diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index efc7aa7a0670..533ff5c68970 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -4846,3 +4846,8 @@ xirc2ps_cs= [NET,PCMCIA] Format: ,,,,,[,[,[,]]] + + xhci-hcd.quirks [USB,KNL] + A hex value specifying bitmask with supplemental xhci + host controller quirks. Meaning of each bit can be + consulted in header drivers/usb/host/xhci.h. diff --git a/Documentation/admin-guide/pm/intel_pstate.rst b/Documentation/admin-guide/pm/intel_pstate.rst index ab2fe0eda1d7..8f1d3de449b5 100644 --- a/Documentation/admin-guide/pm/intel_pstate.rst +++ b/Documentation/admin-guide/pm/intel_pstate.rst @@ -324,8 +324,7 @@ Global Attributes ``intel_pstate`` exposes several global attributes (files) in ``sysfs`` to control its functionality at the system level. They are located in the -``/sys/devices/system/cpu/cpufreq/intel_pstate/`` directory and affect all -CPUs. +``/sys/devices/system/cpu/intel_pstate/`` directory and affect all CPUs. Some of them are not present if the ``intel_pstate=per_cpu_perf_limits`` argument is passed to the kernel in the command line. @@ -379,6 +378,17 @@ argument is passed to the kernel in the command line. but it affects the maximum possible value of per-policy P-state limits (see `Interpretation of Policy Attributes`_ below for details). +``hwp_dynamic_boost`` + This attribute is only present if ``intel_pstate`` works in the + `active mode with the HWP feature enabled `_ in + the processor. If set (equal to 1), it causes the minimum P-state limit + to be increased dynamically for a short time whenever a task previously + waiting on I/O is selected to run on a given logical CPU (the purpose + of this mechanism is to improve performance). + + This setting has no effect on logical CPUs whose minimum P-state limit + is directly set to the highest non-turbo P-state or above it. + .. _status_attr: ``status`` @@ -410,7 +420,7 @@ argument is passed to the kernel in the command line. That only is supported in some configurations, though (for example, if the `HWP feature is enabled in the processor `_, the operation mode of the driver cannot be changed), and if it is not - supported in the current configuration, writes to this attribute with + supported in the current configuration, writes to this attribute will fail with an appropriate error. Interpretation of Policy Attributes diff --git a/Documentation/devicetree/bindings/input/sprd,sc27xx-vibra.txt b/Documentation/devicetree/bindings/input/sprd,sc27xx-vibra.txt new file mode 100644 index 000000000000..f2ec0d4f2dff --- /dev/null +++ b/Documentation/devicetree/bindings/input/sprd,sc27xx-vibra.txt @@ -0,0 +1,23 @@ +Spreadtrum SC27xx PMIC Vibrator + +Required properties: +- compatible: should be "sprd,sc2731-vibrator". +- reg: address of vibrator control register. + +Example : + + sc2731_pmic: pmic@0 { + compatible = "sprd,sc2731"; + reg = <0>; + spi-max-frequency = <26000000>; + interrupts = ; + interrupt-controller; + #interrupt-cells = <2>; + #address-cells = <1>; + #size-cells = <0>; + + vibrator@eb4 { + compatible = "sprd,sc2731-vibrator"; + reg = <0xeb4>; + }; + }; diff --git a/Documentation/driver-api/infrastructure.rst b/Documentation/driver-api/infrastructure.rst index bee1b9a1702f..6172f3cc3d0b 100644 --- a/Documentation/driver-api/infrastructure.rst +++ b/Documentation/driver-api/infrastructure.rst @@ -49,10 +49,10 @@ Device Drivers Base Device Drivers DMA Management ----------------------------- -.. kernel-doc:: drivers/base/dma-coherent.c +.. kernel-doc:: kernel/dma/coherent.c :export: -.. kernel-doc:: drivers/base/dma-mapping.c +.. kernel-doc:: kernel/dma/mapping.c :export: Device drivers PnP support diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking index 2c391338c675..37bf0a9de75c 100644 --- a/Documentation/filesystems/Locking +++ b/Documentation/filesystems/Locking @@ -441,8 +441,6 @@ prototypes: int (*iterate) (struct file *, struct dir_context *); int (*iterate_shared) (struct file *, struct dir_context *); __poll_t (*poll) (struct file *, struct poll_table_struct *); - struct wait_queue_head * (*get_poll_head)(struct file *, __poll_t); - __poll_t (*poll_mask) (struct file *, __poll_t); long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); long (*compat_ioctl) (struct file *, unsigned int, unsigned long); int (*mmap) (struct file *, struct vm_area_struct *); @@ -473,7 +471,7 @@ prototypes: }; locking rules: - All except for ->poll_mask may block. + All may block. ->llseek() locking has moved from llseek to the individual llseek implementations. If your fs is not using generic_file_llseek, you @@ -505,9 +503,6 @@ in sys_read() and friends. the lease within the individual filesystem to record the result of the operation -->poll_mask can be called with or without the waitqueue lock for the waitqueue -returned from ->get_poll_head. - --------------------------- dquot_operations ------------------------------- prototypes: int (*write_dquot) (struct dquot *); diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt index 829a7b7857a4..f608180ad59d 100644 --- a/Documentation/filesystems/vfs.txt +++ b/Documentation/filesystems/vfs.txt @@ -857,8 +857,6 @@ struct file_operations { ssize_t (*write_iter) (struct kiocb *, struct iov_iter *); int (*iterate) (struct file *, struct dir_context *); __poll_t (*poll) (struct file *, struct poll_table_struct *); - struct wait_queue_head * (*get_poll_head)(struct file *, __poll_t); - __poll_t (*poll_mask) (struct file *, __poll_t); long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); long (*compat_ioctl) (struct file *, unsigned int, unsigned long); int (*mmap) (struct file *, struct vm_area_struct *); @@ -903,17 +901,6 @@ otherwise noted. activity on this file and (optionally) go to sleep until there is activity. Called by the select(2) and poll(2) system calls - get_poll_head: Returns the struct wait_queue_head that callers can - wait on. Callers need to check the returned events using ->poll_mask - once woken. Can return NULL to indicate polling is not supported, - or any error code using the ERR_PTR convention to indicate that a - grave error occured and ->poll_mask shall not be called. - - poll_mask: return the mask of EPOLL* values describing the file descriptor - state. Called either before going to sleep on the waitqueue returned by - get_poll_head, or after it has been woken. If ->get_poll_head and - ->poll_mask are implemented ->poll does not need to be implement. - unlocked_ioctl: called by the ioctl(2) system call. compat_ioctl: called by the ioctl(2) system call when 32 bit system calls diff --git a/Documentation/kbuild/kbuild.txt b/Documentation/kbuild/kbuild.txt index 6c9c69ec3986..114c7ce7b58d 100644 --- a/Documentation/kbuild/kbuild.txt +++ b/Documentation/kbuild/kbuild.txt @@ -50,6 +50,11 @@ LDFLAGS_MODULE -------------------------------------------------- Additional options used for $(LD) when linking modules. +KBUILD_KCONFIG +-------------------------------------------------- +Set the top-level Kconfig file to the value of this environment +variable. The default name is "Kconfig". + KBUILD_VERBOSE -------------------------------------------------- Set the kbuild verbosity. Can be assigned same values as "V=...". @@ -88,7 +93,8 @@ In most cases the name of the architecture is the same as the directory name found in the arch/ directory. But some architectures such as x86 and sparc have aliases. x86: i386 for 32 bit, x86_64 for 64 bit -sparc: sparc for 32 bit, sparc64 for 64 bit +sh: sh for 32 bit, sh64 for 64 bit +sparc: sparc32 for 32 bit, sparc64 for 64 bit CROSS_COMPILE -------------------------------------------------- @@ -148,15 +154,6 @@ stripped after they are installed. If INSTALL_MOD_STRIP is '1', then the default option --strip-debug will be used. Otherwise, INSTALL_MOD_STRIP value will be used as the options to the strip command. -INSTALL_FW_PATH --------------------------------------------------- -INSTALL_FW_PATH specifies where to install the firmware blobs. -The default value is: - - $(INSTALL_MOD_PATH)/lib/firmware - -The value can be overridden in which case the default value is ignored. - INSTALL_HDR_PATH -------------------------------------------------- INSTALL_HDR_PATH specifies where to install user space headers when diff --git a/Documentation/kbuild/kconfig-language.txt b/Documentation/kbuild/kconfig-language.txt index 3534a84d206c..64e0775a62d4 100644 --- a/Documentation/kbuild/kconfig-language.txt +++ b/Documentation/kbuild/kconfig-language.txt @@ -430,6 +430,12 @@ This sets the config program's title bar if the config program chooses to use it. It should be placed at the top of the configuration, before any other statement. +'#' Kconfig source file comment: + +An unquoted '#' character anywhere in a source file line indicates +the beginning of a source file comment. The remainder of that line +is a comment. + Kconfig hints ------------- diff --git a/Documentation/kbuild/kconfig.txt b/Documentation/kbuild/kconfig.txt index 7233118f3a05..68c82914c0f3 100644 --- a/Documentation/kbuild/kconfig.txt +++ b/Documentation/kbuild/kconfig.txt @@ -2,9 +2,9 @@ This file contains some assistance for using "make *config". Use "make help" to list all of the possible configuration targets. -The xconfig ('qconf') and menuconfig ('mconf') programs also -have embedded help text. Be sure to check it for navigation, -search, and other general help text. +The xconfig ('qconf'), menuconfig ('mconf'), and nconfig ('nconf') +programs also have embedded help text. Be sure to check that for +navigation, search, and other general help text. ====================================================================== General @@ -17,13 +17,16 @@ this happens, using a previously working .config file and running for you, so you may find that you need to see what NEW kernel symbols have been introduced. -To see a list of new config symbols when using "make oldconfig", use +To see a list of new config symbols, use cp user/some/old.config .config make listnewconfig and the config program will list any new symbols, one per line. +Alternatively, you can use the brute force method: + + make oldconfig scripts/diffconfig .config.old .config | less ______________________________________________________________________ @@ -160,7 +163,7 @@ Searching in menuconfig: This lists all config symbols that contain "hotplug", e.g., HOTPLUG_CPU, MEMORY_HOTPLUG. - For search help, enter / followed TAB-TAB-TAB (to highlight + For search help, enter / followed by TAB-TAB (to highlight ) and Enter. This will tell you that you can also use regular expressions (regexes) in the search string, so if you are not interested in MEMORY_HOTPLUG, you could try @@ -202,6 +205,39 @@ Example: make MENUCONFIG_MODE=single_menu menuconfig +====================================================================== +nconfig +-------------------------------------------------- + +nconfig is an alternate text-based configurator. It lists function +keys across the bottom of the terminal (window) that execute commands. +You can also just use the corresponding numeric key to execute the +commands unless you are in a data entry window. E.g., instead of F6 +for Save, you can just press 6. + +Use F1 for Global help or F3 for the Short help menu. + +Searching in nconfig: + + You can search either in the menu entry "prompt" strings + or in the configuration symbols. + + Use / to begin a search through the menu entries. This does + not support regular expressions. Use or for + Next hit and Previous hit, respectively. Use to + terminate the search mode. + + F8 (SymSearch) searches the configuration symbols for the + given string or regular expression (regex). + +NCONFIG_MODE +-------------------------------------------------- +This mode shows all sub-menus in one large tree. + +Example: + make NCONFIG_MODE=single_menu nconfig + + ====================================================================== xconfig -------------------------------------------------- @@ -230,8 +266,7 @@ gconfig Searching in gconfig: - None (gconfig isn't maintained as well as xconfig or menuconfig); - however, gconfig does have a few more viewing choices than - xconfig does. + There is no search command in gconfig. However, gconfig does + have several different viewing choices, modes, and options. ### diff --git a/Documentation/networking/e100.rst b/Documentation/networking/e100.rst index d4d837027925..9708f5fa76de 100644 --- a/Documentation/networking/e100.rst +++ b/Documentation/networking/e100.rst @@ -1,3 +1,4 @@ +============================================================== Linux* Base Driver for the Intel(R) PRO/100 Family of Adapters ============================================================== @@ -86,83 +87,84 @@ Event Log Message Level: The driver uses the message level flag to log events Additional Configurations ========================= - Configuring the Driver on Different Distributions - ------------------------------------------------- +Configuring the Driver on Different Distributions +------------------------------------------------- - Configuring a network driver to load properly when the system is started is - distribution dependent. Typically, the configuration process involves adding - an alias line to /etc/modprobe.d/*.conf as well as editing other system - startup scripts and/or configuration files. Many popular Linux - distributions ship with tools to make these changes for you. To learn the - proper way to configure a network device for your system, refer to your - distribution documentation. If during this process you are asked for the - driver or module name, the name for the Linux Base Driver for the Intel - PRO/100 Family of Adapters is e100. +Configuring a network driver to load properly when the system is started +is distribution dependent. Typically, the configuration process involves +adding an alias line to /etc/modprobe.d/*.conf as well as editing other +system startup scripts and/or configuration files. Many popular Linux +distributions ship with tools to make these changes for you. To learn +the proper way to configure a network device for your system, refer to +your distribution documentation. If during this process you are asked +for the driver or module name, the name for the Linux Base Driver for +the Intel PRO/100 Family of Adapters is e100. - As an example, if you install the e100 driver for two PRO/100 adapters - (eth0 and eth1), add the following to a configuration file in /etc/modprobe.d/ +As an example, if you install the e100 driver for two PRO/100 adapters +(eth0 and eth1), add the following to a configuration file in +/etc/modprobe.d/:: alias eth0 e100 alias eth1 e100 - Viewing Link Messages - --------------------- - In order to see link messages and other Intel driver information on your - console, you must set the dmesg level up to six. This can be done by - entering the following on the command line before loading the e100 driver:: +Viewing Link Messages +--------------------- + +In order to see link messages and other Intel driver information on your +console, you must set the dmesg level up to six. This can be done by +entering the following on the command line before loading the e100 +driver:: dmesg -n 6 - If you wish to see all messages issued by the driver, including debug - messages, set the dmesg level to eight. +If you wish to see all messages issued by the driver, including debug +messages, set the dmesg level to eight. - NOTE: This setting is not saved across reboots. +NOTE: This setting is not saved across reboots. +ethtool +------- - ethtool - ------- +The driver utilizes the ethtool interface for driver configuration and +diagnostics, as well as displaying statistical information. The ethtool +version 1.6 or later is required for this functionality. - The driver utilizes the ethtool interface for driver configuration and - diagnostics, as well as displaying statistical information. The ethtool - version 1.6 or later is required for this functionality. +The latest release of ethtool can be found from +https://www.kernel.org/pub/software/network/ethtool/ - The latest release of ethtool can be found from - https://www.kernel.org/pub/software/network/ethtool/ +Enabling Wake on LAN* (WoL) +--------------------------- +WoL is provided through the ethtool* utility. For instructions on +enabling WoL with ethtool, refer to the ethtool man page. WoL will be +enabled on the system during the next shut down or reboot. For this +driver version, in order to enable WoL, the e100 driver must be loaded +when shutting down or rebooting the system. - Enabling Wake on LAN* (WoL) - --------------------------- - WoL is provided through the ethtool* utility. For instructions on enabling - WoL with ethtool, refer to the ethtool man page. +NAPI +---- - WoL will be enabled on the system during the next shut down or reboot. For - this driver version, in order to enable WoL, the e100 driver must be - loaded when shutting down or rebooting the system. +NAPI (Rx polling mode) is supported in the e100 driver. - NAPI - ---- +See https://wiki.linuxfoundation.org/networking/napi for more +information on NAPI. - NAPI (Rx polling mode) is supported in the e100 driver. +Multiple Interfaces on Same Ethernet Broadcast Network +------------------------------------------------------ - See https://wiki.linuxfoundation.org/networking/napi for more information - on NAPI. +Due to the default ARP behavior on Linux, it is not possible to have one +system on two IP networks in the same Ethernet broadcast domain +(non-partitioned switch) behave as expected. All Ethernet interfaces +will respond to IP traffic for any IP address assigned to the system. +This results in unbalanced receive traffic. - Multiple Interfaces on Same Ethernet Broadcast Network - ------------------------------------------------------ +If you have multiple interfaces in a server, either turn on ARP +filtering by - Due to the default ARP behavior on Linux, it is not possible to have - one system on two IP networks in the same Ethernet broadcast domain - (non-partitioned switch) behave as expected. All Ethernet interfaces - will respond to IP traffic for any IP address assigned to the system. - This results in unbalanced receive traffic. +(1) entering:: echo 1 > /proc/sys/net/ipv4/conf/all/arp_filter + (this only works if your kernel's version is higher than 2.4.5), or - If you have multiple interfaces in a server, either turn on ARP - filtering by - - (1) entering:: echo 1 > /proc/sys/net/ipv4/conf/all/arp_filter - (this only works if your kernel's version is higher than 2.4.5), or - - (2) installing the interfaces in separate broadcast domains (either - in different switches or in a switch partitioned to VLANs). +(2) installing the interfaces in separate broadcast domains (either + in different switches or in a switch partitioned to VLANs). Support diff --git a/Documentation/networking/e1000.rst b/Documentation/networking/e1000.rst index 616848940e63..144b87eef153 100644 --- a/Documentation/networking/e1000.rst +++ b/Documentation/networking/e1000.rst @@ -1,3 +1,4 @@ +=========================================================== Linux* Base Driver for Intel(R) Ethernet Network Connection =========================================================== @@ -354,57 +355,58 @@ previously mentioned to force the adapter to the same speed and duplex. Additional Configurations ========================= - Jumbo Frames - ------------ - Jumbo Frames support is enabled by changing the MTU to a value larger than - the default of 1500. Use the ifconfig command to increase the MTU size. - For example:: +Jumbo Frames +------------ +Jumbo Frames support is enabled by changing the MTU to a value larger +than the default of 1500. Use the ifconfig command to increase the MTU +size. For example:: ifconfig eth mtu 9000 up - This setting is not saved across reboots. It can be made permanent if - you add:: +This setting is not saved across reboots. It can be made permanent if +you add:: MTU=9000 - to the file /etc/sysconfig/network-scripts/ifcfg-eth. This example - applies to the Red Hat distributions; other distributions may store this - setting in a different location. +to the file /etc/sysconfig/network-scripts/ifcfg-eth. This example +applies to the Red Hat distributions; other distributions may store this +setting in a different location. - Notes: - Degradation in throughput performance may be observed in some Jumbo frames - environments. If this is observed, increasing the application's socket buffer - size and/or increasing the /proc/sys/net/ipv4/tcp_*mem entry values may help. - See the specific application manual and /usr/src/linux*/Documentation/ - networking/ip-sysctl.txt for more details. +Notes: Degradation in throughput performance may be observed in some +Jumbo frames environments. If this is observed, increasing the +application's socket buffer size and/or increasing the +/proc/sys/net/ipv4/tcp_*mem entry values may help. See the specific +application manual and /usr/src/linux*/Documentation/ +networking/ip-sysctl.txt for more details. - - The maximum MTU setting for Jumbo Frames is 16110. This value coincides - with the maximum Jumbo Frames size of 16128. +- The maximum MTU setting for Jumbo Frames is 16110. This value + coincides with the maximum Jumbo Frames size of 16128. - - Using Jumbo frames at 10 or 100 Mbps is not supported and may result in - poor performance or loss of link. +- Using Jumbo frames at 10 or 100 Mbps is not supported and may result + in poor performance or loss of link. - - Adapters based on the Intel(R) 82542 and 82573V/E controller do not - support Jumbo Frames. These correspond to the following product names: - Intel(R) PRO/1000 Gigabit Server Adapter - Intel(R) PRO/1000 PM Network Connection +- Adapters based on the Intel(R) 82542 and 82573V/E controller do not + support Jumbo Frames. These correspond to the following product names: + Intel(R) PRO/1000 Gigabit Server Adapter Intel(R) PRO/1000 PM Network + Connection - ethtool - ------- - The driver utilizes the ethtool interface for driver configuration and - diagnostics, as well as displaying statistical information. The ethtool - version 1.6 or later is required for this functionality. +ethtool +------- +The driver utilizes the ethtool interface for driver configuration and +diagnostics, as well as displaying statistical information. The ethtool +version 1.6 or later is required for this functionality. - The latest release of ethtool can be found from - https://www.kernel.org/pub/software/network/ethtool/ +The latest release of ethtool can be found from +https://www.kernel.org/pub/software/network/ethtool/ - Enabling Wake on LAN* (WoL) - --------------------------- - WoL is configured through the ethtool* utility. +Enabling Wake on LAN* (WoL) +--------------------------- +WoL is configured through the ethtool* utility. + +WoL will be enabled on the system during the next shut down or reboot. +For this driver version, in order to enable WoL, the e1000 driver must be +loaded when shutting down or rebooting the system. - WoL will be enabled on the system during the next shut down or reboot. - For this driver version, in order to enable WoL, the e1000 driver must be - loaded when shutting down or rebooting the system. Support ======= diff --git a/Documentation/networking/strparser.txt b/Documentation/networking/strparser.txt index 13081b3decef..a7d354ddda7b 100644 --- a/Documentation/networking/strparser.txt +++ b/Documentation/networking/strparser.txt @@ -48,7 +48,7 @@ void strp_pause(struct strparser *strp) Temporarily pause a stream parser. Message parsing is suspended and no new messages are delivered to the upper layer. -void strp_pause(struct strparser *strp) +void strp_unpause(struct strparser *strp) Unpause a paused stream parser. diff --git a/Documentation/trace/histogram.txt b/Documentation/trace/histogram.txt index e73bcf9cb5f3..7ffea6aa22e3 100644 --- a/Documentation/trace/histogram.txt +++ b/Documentation/trace/histogram.txt @@ -1729,35 +1729,35 @@ If a variable isn't a key variable or prefixed with 'vals=', the associated event field will be saved in a variable but won't be summed as a value: - # echo 'hist:keys=next_pid:ts1=common_timestamp ... >> event/trigger + # echo 'hist:keys=next_pid:ts1=common_timestamp ...' >> event/trigger Multiple variables can be assigned at the same time. The below would result in both ts0 and b being created as variables, with both common_timestamp and field1 additionally being summed as values: - # echo 'hist:keys=pid:vals=$ts0,$b:ts0=common_timestamp,b=field1 ... >> \ + # echo 'hist:keys=pid:vals=$ts0,$b:ts0=common_timestamp,b=field1 ...' >> \ event/trigger Note that variable assignments can appear either preceding or following their use. The command below behaves identically to the command above: - # echo 'hist:keys=pid:ts0=common_timestamp,b=field1:vals=$ts0,$b ... >> \ + # echo 'hist:keys=pid:ts0=common_timestamp,b=field1:vals=$ts0,$b ...' >> \ event/trigger Any number of variables not bound to a 'vals=' prefix can also be assigned by simply separating them with colons. Below is the same thing but without the values being summed in the histogram: - # echo 'hist:keys=pid:ts0=common_timestamp:b=field1 ... >> event/trigger + # echo 'hist:keys=pid:ts0=common_timestamp:b=field1 ...' >> event/trigger Variables set as above can be referenced and used in expressions on another event. For example, here's how a latency can be calculated: - # echo 'hist:keys=pid,prio:ts0=common_timestamp ... >> event1/trigger - # echo 'hist:keys=next_pid:wakeup_lat=common_timestamp-$ts0 ... >> event2/trigger + # echo 'hist:keys=pid,prio:ts0=common_timestamp ...' >> event1/trigger + # echo 'hist:keys=next_pid:wakeup_lat=common_timestamp-$ts0 ...' >> event2/trigger In the first line above, the event's timetamp is saved into the variable ts0. In the next line, ts0 is subtracted from the second @@ -1766,7 +1766,7 @@ yet another variable, 'wakeup_lat'. The hist trigger below in turn makes use of the wakeup_lat variable to compute a combined latency using the same key and variable from yet another event: - # echo 'hist:key=pid:wakeupswitch_lat=$wakeup_lat+$switchtime_lat ... >> event3/trigger + # echo 'hist:key=pid:wakeupswitch_lat=$wakeup_lat+$switchtime_lat ...' >> event3/trigger 2.2.2 Synthetic Events ---------------------- @@ -1807,10 +1807,11 @@ the command that defined it with a '!': At this point, there isn't yet an actual 'wakeup_latency' event instantiated in the event subsytem - for this to happen, a 'hist trigger action' needs to be instantiated and bound to actual fields -and variables defined on other events (see Section 6.3.3 below). +and variables defined on other events (see Section 2.2.3 below on +how that is done using hist trigger 'onmatch' action). Once that is +done, the 'wakeup_latency' synthetic event instance is created. -Once that is done, an event instance is created, and a histogram can -be defined using it: +A histogram can now be defined for the new synthetic event: # echo 'hist:keys=pid,prio,lat.log2:sort=pid,lat' >> \ /sys/kernel/debug/tracing/events/synthetic/wakeup_latency/trigger @@ -1960,7 +1961,7 @@ hist trigger specification. back to that pid, the timestamp difference is calculated. If the resulting latency, stored in wakeup_lat, exceeds the current maximum latency, the values specified in the save() fields are - recoreded: + recorded: # echo 'hist:keys=pid:ts0=common_timestamp.usecs \ if comm=="cyclictest"' >> \ diff --git a/Documentation/usb/gadget_configfs.txt b/Documentation/usb/gadget_configfs.txt index 635e57493709..b8cb38a98c19 100644 --- a/Documentation/usb/gadget_configfs.txt +++ b/Documentation/usb/gadget_configfs.txt @@ -226,7 +226,7 @@ $ rm configs/./ where . specify the configuration and is a symlink to a function being removed from the configuration, e.g.: -$ rm configfs/c.1/ncm.usb0 +$ rm configs/c.1/ncm.usb0 ... ... diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index 495b7742ab58..d10944e619d3 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt @@ -4610,7 +4610,7 @@ This capability indicates that kvm will implement the interfaces to handle reset, migration and nested KVM for branch prediction blocking. The stfle facility 82 should not be provided to the guest without this capability. -8.14 KVM_CAP_HYPERV_TLBFLUSH +8.18 KVM_CAP_HYPERV_TLBFLUSH Architectures: x86 diff --git a/MAINTAINERS b/MAINTAINERS index 9d5eeff51b5f..192d7f73fd01 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -581,7 +581,7 @@ W: https://www.infradead.org/~dhowells/kafs/ AGPGART DRIVER M: David Airlie -T: git git://people.freedesktop.org/~airlied/linux (part of drm maint) +T: git git://anongit.freedesktop.org/drm/drm S: Maintained F: drivers/char/agp/ F: include/linux/agp* @@ -2971,9 +2971,13 @@ N: bcm585* N: bcm586* N: bcm88312 N: hr2 -F: arch/arm64/boot/dts/broadcom/ns2* +N: stingray +F: arch/arm64/boot/dts/broadcom/northstar2/* +F: arch/arm64/boot/dts/broadcom/stingray/* F: drivers/clk/bcm/clk-ns* +F: drivers/clk/bcm/clk-sr* F: drivers/pinctrl/bcm/pinctrl-ns* +F: include/dt-bindings/clock/bcm-sr* BROADCOM KONA GPIO DRIVER M: Ray Jui @@ -4360,12 +4364,7 @@ L: iommu@lists.linux-foundation.org T: git git://git.infradead.org/users/hch/dma-mapping.git W: http://git.infradead.org/users/hch/dma-mapping.git S: Supported -F: lib/dma-debug.c -F: lib/dma-direct.c -F: lib/dma-noncoherent.c -F: lib/dma-virt.c -F: drivers/base/dma-mapping.c -F: drivers/base/dma-coherent.c +F: kernel/dma/ F: include/asm-generic/dma-mapping.h F: include/linux/dma-direct.h F: include/linux/dma-mapping.h @@ -4461,6 +4460,7 @@ F: Documentation/blockdev/drbd/ DRIVER CORE, KOBJECTS, DEBUGFS AND SYSFS M: Greg Kroah-Hartman +R: "Rafael J. Wysocki" T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git S: Supported F: Documentation/kobject.txt @@ -4631,7 +4631,7 @@ F: include/uapi/drm/vmwgfx_drm.h DRM DRIVERS M: David Airlie L: dri-devel@lists.freedesktop.org -T: git git://people.freedesktop.org/~airlied/linux +T: git git://anongit.freedesktop.org/drm/drm B: https://bugs.freedesktop.org/ C: irc://chat.freenode.net/dri-devel S: Maintained @@ -5674,7 +5674,7 @@ F: drivers/crypto/caam/ F: Documentation/devicetree/bindings/crypto/fsl-sec4.txt FREESCALE DIU FRAMEBUFFER DRIVER -M: Timur Tabi +M: Timur Tabi L: linux-fbdev@vger.kernel.org S: Maintained F: drivers/video/fbdev/fsl-diu-fb.* @@ -5774,7 +5774,7 @@ S: Maintained F: drivers/net/wan/fsl_ucc_hdlc* FREESCALE QUICC ENGINE UCC UART DRIVER -M: Timur Tabi +M: Timur Tabi L: linuxppc-dev@lists.ozlabs.org S: Maintained F: drivers/tty/serial/ucc_uart.c @@ -5798,7 +5798,7 @@ F: drivers/net/ethernet/freescale/fs_enet/ F: include/linux/fs_enet_pd.h FREESCALE SOC SOUND DRIVERS -M: Timur Tabi +M: Timur Tabi M: Nicolin Chen M: Xiubo Li R: Fabio Estevam @@ -9756,6 +9756,11 @@ L: linux-scsi@vger.kernel.org S: Maintained F: drivers/scsi/NCR_D700.* +NCSI LIBRARY: +M: Samuel Mendoza-Jonas +S: Maintained +F: net/ncsi/ + NCT6775 HARDWARE MONITOR DRIVER M: Guenter Roeck L: linux-hwmon@vger.kernel.org @@ -9882,6 +9887,7 @@ M: Andrew Lunn M: Vivien Didelot M: Florian Fainelli S: Maintained +F: Documentation/devicetree/bindings/net/dsa/ F: net/dsa/ F: include/net/dsa.h F: include/linux/dsa/ @@ -10208,11 +10214,13 @@ F: sound/soc/codecs/sgtl5000* NXP TDA998X DRM DRIVER M: Russell King -S: Supported +S: Maintained T: git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-tda998x-devel T: git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-tda998x-fixes F: drivers/gpu/drm/i2c/tda998x_drv.c F: include/drm/i2c/tda998x.h +F: include/dt-bindings/display/tda998x.h +K: "nxp,tda998x" NXP TFA9879 DRIVER M: Peter Rosin @@ -11476,6 +11484,15 @@ W: http://wireless.kernel.org/en/users/Drivers/p54 S: Obsolete F: drivers/net/wireless/intersil/prism54/ +PROC FILESYSTEM +R: Alexey Dobriyan +L: linux-kernel@vger.kernel.org +L: linux-fsdevel@vger.kernel.org +S: Maintained +F: fs/proc/ +F: include/linux/proc_fs.h +F: tools/testing/selftests/proc/ + PROC SYSCTL M: "Luis R. Rodriguez" M: Kees Cook @@ -11808,9 +11825,9 @@ F: Documentation/devicetree/bindings/opp/kryo-cpufreq.txt F: drivers/cpufreq/qcom-cpufreq-kryo.c QUALCOMM EMAC GIGABIT ETHERNET DRIVER -M: Timur Tabi +M: Timur Tabi L: netdev@vger.kernel.org -S: Supported +S: Maintained F: drivers/net/ethernet/qualcomm/emac/ QUALCOMM HEXAGON ARCHITECTURE @@ -11821,7 +11838,7 @@ S: Supported F: arch/hexagon/ QUALCOMM HIDMA DRIVER -M: Sinan Kaya +M: Sinan Kaya L: linux-arm-kernel@lists.infradead.org L: linux-arm-msm@vger.kernel.org L: dmaengine@vger.kernel.org @@ -13648,7 +13665,7 @@ M: Konrad Rzeszutek Wilk L: iommu@lists.linux-foundation.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb.git S: Supported -F: lib/swiotlb.c +F: kernel/dma/swiotlb.c F: arch/*/kernel/pci-swiotlb.c F: include/linux/swiotlb.h @@ -15572,9 +15589,17 @@ M: x86@kernel.org L: linux-kernel@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/core S: Maintained +F: Documentation/devicetree/bindings/x86/ F: Documentation/x86/ F: arch/x86/ +X86 ENTRY CODE +M: Andy Lutomirski +L: linux-kernel@vger.kernel.org +T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/asm +S: Maintained +F: arch/x86/entry/ + X86 MCE INFRASTRUCTURE M: Tony Luck M: Borislav Petkov @@ -15597,7 +15622,7 @@ F: drivers/platform/x86/ F: drivers/platform/olpc/ X86 VDSO -M: Andy Lutomirski +M: Andy Lutomirski L: linux-kernel@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/vdso S: Maintained diff --git a/Makefile b/Makefile index ca2af1ab91eb..a89d8a0d3ee1 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ VERSION = 4 PATCHLEVEL = 18 SUBLEVEL = 0 -EXTRAVERSION = -rc1 +EXTRAVERSION = -rc5 NAME = Merciless Moray # *DOCUMENTATION* @@ -353,9 +353,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \ else if [ -x /bin/bash ]; then echo /bin/bash; \ else echo sh; fi ; fi) -HOST_LFS_CFLAGS := $(shell getconf LFS_CFLAGS) -HOST_LFS_LDFLAGS := $(shell getconf LFS_LDFLAGS) -HOST_LFS_LIBS := $(shell getconf LFS_LIBS) +HOST_LFS_CFLAGS := $(shell getconf LFS_CFLAGS 2>/dev/null) +HOST_LFS_LDFLAGS := $(shell getconf LFS_LDFLAGS 2>/dev/null) +HOST_LFS_LIBS := $(shell getconf LFS_LIBS 2>/dev/null) HOSTCC = gcc HOSTCXX = g++ @@ -507,11 +507,6 @@ ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLA KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO endif -ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/cc-can-link.sh $(CC)), y) - CC_CAN_LINK := y - export CC_CAN_LINK -endif - # The expansion should be delayed until arch/$(SRCARCH)/Makefile is included. # Some architectures define CROSS_COMPILE in arch/$(SRCARCH)/Makefile. # CC_VERSION_TEXT is referenced from Kconfig (so it needs export), @@ -1717,6 +1712,6 @@ endif # skip-makefile PHONY += FORCE FORCE: -# Declare the contents of the .PHONY variable as phony. We keep that +# Declare the contents of the PHONY variable as phony. We keep that # information in a variable so we can use it in if_changed and friends. .PHONY: $(PHONY) diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index 0c4805a572c8..04a4a138ed13 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -555,11 +555,6 @@ config SMP If you don't know what to do here, say N. -config HAVE_DEC_LOCK - bool - depends on SMP - default y - config NR_CPUS int "Maximum number of CPUs (2-32)" range 2 32 diff --git a/arch/alpha/lib/Makefile b/arch/alpha/lib/Makefile index 04f9729de57c..854d5e79979e 100644 --- a/arch/alpha/lib/Makefile +++ b/arch/alpha/lib/Makefile @@ -35,8 +35,6 @@ lib-y = __divqu.o __remqu.o __divlu.o __remlu.o \ callback_srm.o srm_puts.o srm_printk.o \ fls.o -lib-$(CONFIG_SMP) += dec_and_lock.o - # The division routines are built from single source, with different defines. AFLAGS___divqu.o = -DDIV AFLAGS___remqu.o = -DREM diff --git a/arch/alpha/lib/dec_and_lock.c b/arch/alpha/lib/dec_and_lock.c deleted file mode 100644 index a117707f57fe..000000000000 --- a/arch/alpha/lib/dec_and_lock.c +++ /dev/null @@ -1,44 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * arch/alpha/lib/dec_and_lock.c - * - * ll/sc version of atomic_dec_and_lock() - * - */ - -#include -#include -#include - - asm (".text \n\ - .global _atomic_dec_and_lock \n\ - .ent _atomic_dec_and_lock \n\ - .align 4 \n\ -_atomic_dec_and_lock: \n\ - .prologue 0 \n\ -1: ldl_l $1, 0($16) \n\ - subl $1, 1, $1 \n\ - beq $1, 2f \n\ - stl_c $1, 0($16) \n\ - beq $1, 4f \n\ - mb \n\ - clr $0 \n\ - ret \n\ -2: br $29, 3f \n\ -3: ldgp $29, 0($29) \n\ - br $atomic_dec_and_lock_1..ng \n\ - .subsection 2 \n\ -4: br 1b \n\ - .previous \n\ - .end _atomic_dec_and_lock"); - -static int __used atomic_dec_and_lock_1(atomic_t *atomic, spinlock_t *lock) -{ - /* Slow path */ - spin_lock(lock); - if (atomic_dec_and_test(atomic)) - return 1; - spin_unlock(lock); - return 0; -} -EXPORT_SYMBOL(_atomic_dec_and_lock); diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index b6be2b1be75d..d7a81284c272 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1240,8 +1240,14 @@ config PCI VESA. If you have PCI, say Y, otherwise N. config PCI_DOMAINS - bool + bool "Support for multiple PCI domains" depends on PCI + help + Enable PCI domains kernel management. Say Y if your machine + has a PCI bus hierarchy that requires more than one PCI + domain (aka segment) to be correctly managed. Say N otherwise. + + If you don't know what to do here, say N. config PCI_DOMAINS_GENERIC def_bool PCI_DOMAINS diff --git a/arch/arm/boot/dts/am335x-bone-common.dtsi b/arch/arm/boot/dts/am335x-bone-common.dtsi index f9e8667f5886..73b514dddf65 100644 --- a/arch/arm/boot/dts/am335x-bone-common.dtsi +++ b/arch/arm/boot/dts/am335x-bone-common.dtsi @@ -168,7 +168,6 @@ AM33XX_IOPAD(0x8f4, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_dat2.mmc0_dat2 */ AM33XX_IOPAD(0x8f0, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_dat3.mmc0_dat3 */ AM33XX_IOPAD(0x904, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_cmd.mmc0_cmd */ AM33XX_IOPAD(0x900, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_clk.mmc0_clk */ - AM33XX_IOPAD(0x9a0, PIN_INPUT | MUX_MODE4) /* mcasp0_aclkr.mmc0_sdwp */ >; }; diff --git a/arch/arm/boot/dts/am3517.dtsi b/arch/arm/boot/dts/am3517.dtsi index ca294914bbb1..23ea381d363f 100644 --- a/arch/arm/boot/dts/am3517.dtsi +++ b/arch/arm/boot/dts/am3517.dtsi @@ -39,6 +39,8 @@ davinci_emac: ethernet@5c000000 { ti,davinci-ctrl-ram-size = <0x2000>; ti,davinci-rmii-en = /bits/ 8 <1>; local-mac-address = [ 00 00 00 00 00 00 ]; + clocks = <&emac_ick>; + clock-names = "ick"; }; davinci_mdio: ethernet@5c030000 { @@ -49,6 +51,8 @@ davinci_mdio: ethernet@5c030000 { bus_freq = <1000000>; #address-cells = <1>; #size-cells = <0>; + clocks = <&emac_fck>; + clock-names = "fck"; }; uart4: serial@4809e000 { @@ -87,6 +91,11 @@ hecc: can@5c050000 { }; }; +/* Table Table 5-79 of the TRM shows 480ab000 is reserved */ +&usb_otg_hs { + status = "disabled"; +}; + &iva { status = "disabled"; }; diff --git a/arch/arm/boot/dts/am437x-sk-evm.dts b/arch/arm/boot/dts/am437x-sk-evm.dts index 440351ad0b80..d4be3fd0b6f4 100644 --- a/arch/arm/boot/dts/am437x-sk-evm.dts +++ b/arch/arm/boot/dts/am437x-sk-evm.dts @@ -610,6 +610,8 @@ edt-ft5306@38 { touchscreen-size-x = <480>; touchscreen-size-y = <272>; + + wakeup-source; }; tlv320aic3106: tlv320aic3106@1b { diff --git a/arch/arm/boot/dts/armada-385-synology-ds116.dts b/arch/arm/boot/dts/armada-385-synology-ds116.dts index 6782ce481ac9..d8769956cbfc 100644 --- a/arch/arm/boot/dts/armada-385-synology-ds116.dts +++ b/arch/arm/boot/dts/armada-385-synology-ds116.dts @@ -139,7 +139,7 @@ gpio-fan { 3700 5 3900 6 4000 7>; - cooling-cells = <2>; + #cooling-cells = <2>; }; gpio-leds { diff --git a/arch/arm/boot/dts/armada-38x.dtsi b/arch/arm/boot/dts/armada-38x.dtsi index 18edc9bc7927..929459c42760 100644 --- a/arch/arm/boot/dts/armada-38x.dtsi +++ b/arch/arm/boot/dts/armada-38x.dtsi @@ -547,7 +547,7 @@ coredivclk: clock@e4250 { thermal: thermal@e8078 { compatible = "marvell,armada380-thermal"; - reg = <0xe4078 0x4>, <0xe4074 0x4>; + reg = <0xe4078 0x4>, <0xe4070 0x8>; status = "okay"; }; diff --git a/arch/arm/boot/dts/bcm-cygnus.dtsi b/arch/arm/boot/dts/bcm-cygnus.dtsi index 9fe4f5a6379e..2c4df2d2d4a6 100644 --- a/arch/arm/boot/dts/bcm-cygnus.dtsi +++ b/arch/arm/boot/dts/bcm-cygnus.dtsi @@ -216,7 +216,7 @@ i2c0: i2c@18008000 { reg = <0x18008000 0x100>; #address-cells = <1>; #size-cells = <0>; - interrupts = ; + interrupts = ; clock-frequency = <100000>; status = "disabled"; }; @@ -245,7 +245,7 @@ i2c1: i2c@1800b000 { reg = <0x1800b000 0x100>; #address-cells = <1>; #size-cells = <0>; - interrupts = ; + interrupts = ; clock-frequency = <100000>; status = "disabled"; }; @@ -256,7 +256,7 @@ pcie0: pcie@18012000 { #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0>; - interrupt-map = <0 0 0 0 &gic GIC_SPI 100 IRQ_TYPE_NONE>; + interrupt-map = <0 0 0 0 &gic GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>; linux,pci-domain = <0>; @@ -278,10 +278,10 @@ msi0: msi-controller { compatible = "brcm,iproc-msi"; msi-controller; interrupt-parent = <&gic>; - interrupts = , - , - , - ; + interrupts = , + , + , + ; }; }; @@ -291,7 +291,7 @@ pcie1: pcie@18013000 { #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0>; - interrupt-map = <0 0 0 0 &gic GIC_SPI 106 IRQ_TYPE_NONE>; + interrupt-map = <0 0 0 0 &gic GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>; linux,pci-domain = <1>; @@ -313,10 +313,10 @@ msi1: msi-controller { compatible = "brcm,iproc-msi"; msi-controller; interrupt-parent = <&gic>; - interrupts = , - , - , - ; + interrupts = , + , + , + ; }; }; diff --git a/arch/arm/boot/dts/bcm-hr2.dtsi b/arch/arm/boot/dts/bcm-hr2.dtsi index 3f9cedd8011f..3084a7c95733 100644 --- a/arch/arm/boot/dts/bcm-hr2.dtsi +++ b/arch/arm/boot/dts/bcm-hr2.dtsi @@ -264,7 +264,7 @@ i2c0: i2c@38000 { reg = <0x38000 0x50>; #address-cells = <1>; #size-cells = <0>; - interrupts = ; + interrupts = ; clock-frequency = <100000>; }; @@ -279,7 +279,7 @@ i2c1: i2c@3b000 { reg = <0x3b000 0x50>; #address-cells = <1>; #size-cells = <0>; - interrupts = ; + interrupts = ; clock-frequency = <100000>; }; }; @@ -300,7 +300,7 @@ pcie0: pcie@18012000 { #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0>; - interrupt-map = <0 0 0 0 &gic GIC_SPI 186 IRQ_TYPE_NONE>; + interrupt-map = <0 0 0 0 &gic GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>; linux,pci-domain = <0>; @@ -322,10 +322,10 @@ msi0: msi-controller { compatible = "brcm,iproc-msi"; msi-controller; interrupt-parent = <&gic>; - interrupts = , - , - , - ; + interrupts = , + , + , + ; brcm,pcie-msi-inten; }; }; @@ -336,7 +336,7 @@ pcie1: pcie@18013000 { #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0>; - interrupt-map = <0 0 0 0 &gic GIC_SPI 192 IRQ_TYPE_NONE>; + interrupt-map = <0 0 0 0 &gic GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>; linux,pci-domain = <1>; @@ -358,10 +358,10 @@ msi1: msi-controller { compatible = "brcm,iproc-msi"; msi-controller; interrupt-parent = <&gic>; - interrupts = , - , - , - ; + interrupts = , + , + , + ; brcm,pcie-msi-inten; }; }; diff --git a/arch/arm/boot/dts/bcm-nsp.dtsi b/arch/arm/boot/dts/bcm-nsp.dtsi index dcc55aa84583..09ba85046322 100644 --- a/arch/arm/boot/dts/bcm-nsp.dtsi +++ b/arch/arm/boot/dts/bcm-nsp.dtsi @@ -391,7 +391,7 @@ i2c0: i2c@38000 { reg = <0x38000 0x50>; #address-cells = <1>; #size-cells = <0>; - interrupts = ; + interrupts = ; clock-frequency = <100000>; dma-coherent; status = "disabled"; @@ -496,7 +496,7 @@ pcie0: pcie@18012000 { #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0>; - interrupt-map = <0 0 0 0 &gic GIC_SPI 131 IRQ_TYPE_NONE>; + interrupt-map = <0 0 0 0 &gic GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>; linux,pci-domain = <0>; @@ -519,10 +519,10 @@ msi0: msi-controller { compatible = "brcm,iproc-msi"; msi-controller; interrupt-parent = <&gic>; - interrupts = , - , - , - ; + interrupts = , + , + , + ; brcm,pcie-msi-inten; }; }; @@ -533,7 +533,7 @@ pcie1: pcie@18013000 { #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0>; - interrupt-map = <0 0 0 0 &gic GIC_SPI 137 IRQ_TYPE_NONE>; + interrupt-map = <0 0 0 0 &gic GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>; linux,pci-domain = <1>; @@ -556,10 +556,10 @@ msi1: msi-controller { compatible = "brcm,iproc-msi"; msi-controller; interrupt-parent = <&gic>; - interrupts = , - , - , - ; + interrupts = , + , + , + ; brcm,pcie-msi-inten; }; }; @@ -570,7 +570,7 @@ pcie2: pcie@18014000 { #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0>; - interrupt-map = <0 0 0 0 &gic GIC_SPI 143 IRQ_TYPE_NONE>; + interrupt-map = <0 0 0 0 &gic GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>; linux,pci-domain = <2>; @@ -593,10 +593,10 @@ msi2: msi-controller { compatible = "brcm,iproc-msi"; msi-controller; interrupt-parent = <&gic>; - interrupts = , - , - , - ; + interrupts = , + , + , + ; brcm,pcie-msi-inten; }; }; diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi index 9a076c409f4e..ef995e50ee12 100644 --- a/arch/arm/boot/dts/bcm5301x.dtsi +++ b/arch/arm/boot/dts/bcm5301x.dtsi @@ -365,7 +365,7 @@ mdio: mdio@18003000 { i2c0: i2c@18009000 { compatible = "brcm,iproc-i2c"; reg = <0x18009000 0x50>; - interrupts = ; + interrupts = ; #address-cells = <1>; #size-cells = <0>; clock-frequency = <100000>; diff --git a/arch/arm/boot/dts/da850.dtsi b/arch/arm/boot/dts/da850.dtsi index f6f1597b03df..0f4f817a9e22 100644 --- a/arch/arm/boot/dts/da850.dtsi +++ b/arch/arm/boot/dts/da850.dtsi @@ -549,11 +549,7 @@ gpio: gpio@226000 { gpio-controller; #gpio-cells = <2>; reg = <0x226000 0x1000>; - interrupts = <42 IRQ_TYPE_EDGE_BOTH - 43 IRQ_TYPE_EDGE_BOTH 44 IRQ_TYPE_EDGE_BOTH - 45 IRQ_TYPE_EDGE_BOTH 46 IRQ_TYPE_EDGE_BOTH - 47 IRQ_TYPE_EDGE_BOTH 48 IRQ_TYPE_EDGE_BOTH - 49 IRQ_TYPE_EDGE_BOTH 50 IRQ_TYPE_EDGE_BOTH>; + interrupts = <42 43 44 45 46 47 48 49 50>; ti,ngpio = <144>; ti,davinci-gpio-unbanked = <0>; status = "disabled"; diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi index 9dcd14edc202..e03495a799ce 100644 --- a/arch/arm/boot/dts/dra7.dtsi +++ b/arch/arm/boot/dts/dra7.dtsi @@ -1580,7 +1580,6 @@ usb1: usb@48890000 { dr_mode = "otg"; snps,dis_u3_susphy_quirk; snps,dis_u2_susphy_quirk; - snps,dis_metastability_quirk; }; }; @@ -1608,6 +1607,7 @@ usb2: usb@488d0000 { dr_mode = "otg"; snps,dis_u3_susphy_quirk; snps,dis_u2_susphy_quirk; + snps,dis_metastability_quirk; }; }; diff --git a/arch/arm/boot/dts/imx51-zii-rdu1.dts b/arch/arm/boot/dts/imx51-zii-rdu1.dts index df9eca94d812..8a878687197b 100644 --- a/arch/arm/boot/dts/imx51-zii-rdu1.dts +++ b/arch/arm/boot/dts/imx51-zii-rdu1.dts @@ -770,7 +770,7 @@ MX51_PAD_NANDF_D15__GPIO3_25 0x21e6 pinctrl_ts: tsgrp { fsl,pins = < - MX51_PAD_CSI1_D8__GPIO3_12 0x85 + MX51_PAD_CSI1_D8__GPIO3_12 0x04 MX51_PAD_CSI1_D9__GPIO3_13 0x85 >; }; diff --git a/arch/arm/boot/dts/imx6q.dtsi b/arch/arm/boot/dts/imx6q.dtsi index 70483ce72ba6..77f8f030dd07 100644 --- a/arch/arm/boot/dts/imx6q.dtsi +++ b/arch/arm/boot/dts/imx6q.dtsi @@ -90,7 +90,7 @@ ecspi5: ecspi@2018000 { clocks = <&clks IMX6Q_CLK_ECSPI5>, <&clks IMX6Q_CLK_ECSPI5>; clock-names = "ipg", "per"; - dmas = <&sdma 11 7 1>, <&sdma 12 7 2>; + dmas = <&sdma 11 8 1>, <&sdma 12 8 2>; dma-names = "rx", "tx"; status = "disabled"; }; diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi index d8b94f47498b..4e4a55aad5c9 100644 --- a/arch/arm/boot/dts/imx6sx.dtsi +++ b/arch/arm/boot/dts/imx6sx.dtsi @@ -1344,7 +1344,7 @@ pcie: pcie@8ffc000 { ranges = <0x81000000 0 0 0x08f80000 0 0x00010000 /* downstream I/O */ 0x82000000 0 0x08000000 0x08000000 0 0x00f00000>; /* non-prefetchable memory */ num-lanes = <1>; - interrupts = ; + interrupts = ; interrupt-names = "msi"; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0x7>; diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi index 486d4e7433ed..b38f8c240558 100644 --- a/arch/arm/boot/dts/socfpga.dtsi +++ b/arch/arm/boot/dts/socfpga.dtsi @@ -748,13 +748,13 @@ mmc: dwmmc0@ff704000 { nand0: nand@ff900000 { #address-cells = <0x1>; #size-cells = <0x1>; - compatible = "denali,denali-nand-dt"; + compatible = "altr,socfpga-denali-nand"; reg = <0xff900000 0x100000>, <0xffb80000 0x10000>; reg-names = "nand_data", "denali_reg"; interrupts = <0x0 0x90 0x4>; dma-mask = <0xffffffff>; - clocks = <&nand_clk>; + clocks = <&nand_x_clk>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/socfpga_arria10.dtsi b/arch/arm/boot/dts/socfpga_arria10.dtsi index bead79e4b2aa..791ca15c799e 100644 --- a/arch/arm/boot/dts/socfpga_arria10.dtsi +++ b/arch/arm/boot/dts/socfpga_arria10.dtsi @@ -593,8 +593,7 @@ spi1: spi@ffda5000 { #size-cells = <0>; reg = <0xffda5000 0x100>; interrupts = <0 102 4>; - num-chipselect = <4>; - bus-num = <0>; + num-cs = <4>; /*32bit_access;*/ tx-dma-channel = <&pdma 16>; rx-dma-channel = <&pdma 17>; @@ -633,7 +632,7 @@ mmc: dwmmc0@ff808000 { nand: nand@ffb90000 { #address-cells = <1>; #size-cells = <1>; - compatible = "denali,denali-nand-dt", "altr,socfpga-denali-nand"; + compatible = "altr,socfpga-denali-nand"; reg = <0xffb90000 0x72000>, <0xffb80000 0x10000>; reg-names = "nand_data", "denali_reg"; diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile index 1e9f7af8f70f..3157be413297 100644 --- a/arch/arm/common/Makefile +++ b/arch/arm/common/Makefile @@ -10,7 +10,7 @@ obj-$(CONFIG_DMABOUNCE) += dmabounce.o obj-$(CONFIG_SHARP_LOCOMO) += locomo.o obj-$(CONFIG_SHARP_PARAM) += sharpsl_param.o obj-$(CONFIG_SHARP_SCOOP) += scoop.o -obj-$(CONFIG_SMP) += secure_cntvoff.o +obj-$(CONFIG_CPU_V7) += secure_cntvoff.o obj-$(CONFIG_PCI_HOST_ITE8152) += it8152.o obj-$(CONFIG_MCPM) += mcpm_head.o mcpm_entry.o mcpm_platsmp.o vlock.o CFLAGS_REMOVE_mcpm_entry.o = -pg diff --git a/arch/arm/configs/imx_v4_v5_defconfig b/arch/arm/configs/imx_v4_v5_defconfig index 054591dc9a00..4cd2f4a2bff4 100644 --- a/arch/arm/configs/imx_v4_v5_defconfig +++ b/arch/arm/configs/imx_v4_v5_defconfig @@ -141,9 +141,11 @@ CONFIG_USB_STORAGE=y CONFIG_USB_CHIPIDEA=y CONFIG_USB_CHIPIDEA_UDC=y CONFIG_USB_CHIPIDEA_HOST=y +CONFIG_USB_CHIPIDEA_ULPI=y CONFIG_NOP_USB_XCEIV=y CONFIG_USB_GADGET=y CONFIG_USB_ETH=m +CONFIG_USB_ULPI_BUS=y CONFIG_MMC=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig index f70507ab91ee..200ebda47e0c 100644 --- a/arch/arm/configs/imx_v6_v7_defconfig +++ b/arch/arm/configs/imx_v6_v7_defconfig @@ -302,6 +302,7 @@ CONFIG_USB_STORAGE=y CONFIG_USB_CHIPIDEA=y CONFIG_USB_CHIPIDEA_UDC=y CONFIG_USB_CHIPIDEA_HOST=y +CONFIG_USB_CHIPIDEA_ULPI=y CONFIG_USB_SERIAL=m CONFIG_USB_SERIAL_GENERIC=y CONFIG_USB_SERIAL_FTDI_SIO=m @@ -338,6 +339,7 @@ CONFIG_USB_GADGETFS=m CONFIG_USB_FUNCTIONFS=m CONFIG_USB_MASS_STORAGE=m CONFIG_USB_G_SERIAL=m +CONFIG_USB_ULPI_BUS=y CONFIG_MMC=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig index 7e1c543162c3..8f6be1982545 100644 --- a/arch/arm/configs/multi_v7_defconfig +++ b/arch/arm/configs/multi_v7_defconfig @@ -1,5 +1,4 @@ CONFIG_SYSVIPC=y -CONFIG_FHANDLE=y CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y CONFIG_CGROUPS=y @@ -10,20 +9,10 @@ CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_PARTITION_ADVANCED=y CONFIG_CMDLINE_PARTITION=y -CONFIG_ARCH_MULTI_V7=y -# CONFIG_ARCH_MULTI_V5 is not set -# CONFIG_ARCH_MULTI_V4 is not set CONFIG_ARCH_VIRT=y CONFIG_ARCH_ALPINE=y CONFIG_ARCH_ARTPEC=y CONFIG_MACH_ARTPEC6=y -CONFIG_ARCH_MVEBU=y -CONFIG_MACH_ARMADA_370=y -CONFIG_MACH_ARMADA_375=y -CONFIG_MACH_ARMADA_38X=y -CONFIG_MACH_ARMADA_39X=y -CONFIG_MACH_ARMADA_XP=y -CONFIG_MACH_DOVE=y CONFIG_ARCH_AT91=y CONFIG_SOC_SAMA5D2=y CONFIG_SOC_SAMA5D3=y @@ -32,9 +21,9 @@ CONFIG_ARCH_BCM=y CONFIG_ARCH_BCM_CYGNUS=y CONFIG_ARCH_BCM_HR2=y CONFIG_ARCH_BCM_NSP=y -CONFIG_ARCH_BCM_21664=y -CONFIG_ARCH_BCM_281XX=y CONFIG_ARCH_BCM_5301X=y +CONFIG_ARCH_BCM_281XX=y +CONFIG_ARCH_BCM_21664=y CONFIG_ARCH_BCM2835=y CONFIG_ARCH_BCM_63XX=y CONFIG_ARCH_BRCMSTB=y @@ -43,14 +32,14 @@ CONFIG_MACH_BERLIN_BG2=y CONFIG_MACH_BERLIN_BG2CD=y CONFIG_MACH_BERLIN_BG2Q=y CONFIG_ARCH_DIGICOLOR=y +CONFIG_ARCH_EXYNOS=y +CONFIG_EXYNOS5420_MCPM=y CONFIG_ARCH_HIGHBANK=y CONFIG_ARCH_HISI=y CONFIG_ARCH_HI3xxx=y -CONFIG_ARCH_HIX5HD2=y CONFIG_ARCH_HIP01=y CONFIG_ARCH_HIP04=y -CONFIG_ARCH_KEYSTONE=y -CONFIG_ARCH_MESON=y +CONFIG_ARCH_HIX5HD2=y CONFIG_ARCH_MXC=y CONFIG_SOC_IMX50=y CONFIG_SOC_IMX51=y @@ -60,29 +49,30 @@ CONFIG_SOC_IMX6SL=y CONFIG_SOC_IMX6SX=y CONFIG_SOC_IMX6UL=y CONFIG_SOC_IMX7D=y -CONFIG_SOC_VF610=y CONFIG_SOC_LS1021A=y +CONFIG_SOC_VF610=y +CONFIG_ARCH_KEYSTONE=y +CONFIG_ARCH_MEDIATEK=y +CONFIG_ARCH_MESON=y +CONFIG_ARCH_MVEBU=y +CONFIG_MACH_ARMADA_370=y +CONFIG_MACH_ARMADA_375=y +CONFIG_MACH_ARMADA_38X=y +CONFIG_MACH_ARMADA_39X=y +CONFIG_MACH_ARMADA_XP=y +CONFIG_MACH_DOVE=y CONFIG_ARCH_OMAP3=y CONFIG_ARCH_OMAP4=y CONFIG_SOC_OMAP5=y CONFIG_SOC_AM33XX=y CONFIG_SOC_AM43XX=y CONFIG_SOC_DRA7XX=y +CONFIG_ARCH_SIRF=y CONFIG_ARCH_QCOM=y -CONFIG_ARCH_MEDIATEK=y CONFIG_ARCH_MSM8X60=y CONFIG_ARCH_MSM8960=y CONFIG_ARCH_MSM8974=y CONFIG_ARCH_ROCKCHIP=y -CONFIG_ARCH_SOCFPGA=y -CONFIG_PLAT_SPEAR=y -CONFIG_ARCH_SPEAR13XX=y -CONFIG_MACH_SPEAR1310=y -CONFIG_MACH_SPEAR1340=y -CONFIG_ARCH_STI=y -CONFIG_ARCH_STM32=y -CONFIG_ARCH_EXYNOS=y -CONFIG_EXYNOS5420_MCPM=y CONFIG_ARCH_RENESAS=y CONFIG_ARCH_EMEV2=y CONFIG_ARCH_R7S72100=y @@ -99,40 +89,33 @@ CONFIG_ARCH_R8A7792=y CONFIG_ARCH_R8A7793=y CONFIG_ARCH_R8A7794=y CONFIG_ARCH_SH73A0=y +CONFIG_ARCH_SOCFPGA=y +CONFIG_PLAT_SPEAR=y +CONFIG_ARCH_SPEAR13XX=y +CONFIG_MACH_SPEAR1310=y +CONFIG_MACH_SPEAR1340=y +CONFIG_ARCH_STI=y +CONFIG_ARCH_STM32=y CONFIG_ARCH_SUNXI=y -CONFIG_ARCH_SIRF=y CONFIG_ARCH_TEGRA=y -CONFIG_ARCH_TEGRA_2x_SOC=y -CONFIG_ARCH_TEGRA_3x_SOC=y -CONFIG_ARCH_TEGRA_114_SOC=y -CONFIG_ARCH_TEGRA_124_SOC=y CONFIG_ARCH_UNIPHIER=y CONFIG_ARCH_U8500=y -CONFIG_MACH_HREFV60=y -CONFIG_MACH_SNOWBALL=y CONFIG_ARCH_VEXPRESS=y CONFIG_ARCH_VEXPRESS_TC2_PM=y CONFIG_ARCH_WM8850=y CONFIG_ARCH_ZYNQ=y -CONFIG_TRUSTED_FOUNDATIONS=y -CONFIG_PCI=y -CONFIG_PCI_HOST_GENERIC=y -CONFIG_PCI_DRA7XX=y -CONFIG_PCI_DRA7XX_EP=y -CONFIG_PCI_KEYSTONE=y -CONFIG_PCI_MSI=y +CONFIG_PCIEPORTBUS=y CONFIG_PCI_MVEBU=y CONFIG_PCI_TEGRA=y CONFIG_PCI_RCAR_GEN2=y CONFIG_PCIE_RCAR=y -CONFIG_PCIEPORTBUS=y +CONFIG_PCI_DRA7XX_EP=y +CONFIG_PCI_KEYSTONE=y CONFIG_PCI_ENDPOINT=y CONFIG_PCI_ENDPOINT_CONFIGFS=y CONFIG_PCI_EPF_TEST=m CONFIG_SMP=y CONFIG_NR_CPUS=16 -CONFIG_HIGHPTE=y -CONFIG_CMA=y CONFIG_SECCOMP=y CONFIG_ARM_APPENDED_DTB=y CONFIG_ARM_ATAG_DTB_COMPAT=y @@ -145,14 +128,14 @@ CONFIG_CPU_FREQ_GOV_POWERSAVE=m CONFIG_CPU_FREQ_GOV_USERSPACE=m CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y +CONFIG_CPUFREQ_DT=y CONFIG_ARM_IMX6Q_CPUFREQ=y CONFIG_QORIQ_CPUFREQ=y CONFIG_CPU_IDLE=y CONFIG_ARM_CPUIDLE=y -CONFIG_NEON=y -CONFIG_KERNEL_MODE_NEON=y CONFIG_ARM_ZYNQ_CPUIDLE=y CONFIG_ARM_EXYNOS_CPUIDLE=y +CONFIG_KERNEL_MODE_NEON=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -170,23 +153,13 @@ CONFIG_IPV6_MIP6=m CONFIG_IPV6_TUNNEL=m CONFIG_IPV6_MULTIPLE_TABLES=y CONFIG_NET_DSA=m -CONFIG_NET_SWITCHDEV=y CONFIG_CAN=y -CONFIG_CAN_RAW=y -CONFIG_CAN_BCM=y -CONFIG_CAN_DEV=y CONFIG_CAN_AT91=m CONFIG_CAN_FLEXCAN=m -CONFIG_CAN_RCAR=m -CONFIG_CAN_XILINXCAN=y -CONFIG_CAN_MCP251X=y -CONFIG_NET_DSA_BCM_SF2=m -CONFIG_B53=m -CONFIG_B53_SPI_DRIVER=m -CONFIG_B53_MDIO_DRIVER=m -CONFIG_B53_MMAP_DRIVER=m -CONFIG_B53_SRAB_DRIVER=m CONFIG_CAN_SUN4I=y +CONFIG_CAN_XILINXCAN=y +CONFIG_CAN_RCAR=m +CONFIG_CAN_MCP251X=y CONFIG_BT=m CONFIG_BT_HCIUART=m CONFIG_BT_HCIUART_BCM=y @@ -199,11 +172,9 @@ CONFIG_RFKILL_INPUT=y CONFIG_RFKILL_GPIO=y CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS_MOUNT=y -CONFIG_DMA_CMA=y CONFIG_CMA_SIZE_MBYTES=64 CONFIG_OMAP_OCP2SCP=y CONFIG_SIMPLE_PM_BUS=y -CONFIG_SUNXI_RSB=y CONFIG_MTD=y CONFIG_MTD_CMDLINE_PARTS=y CONFIG_MTD_BLOCK=y @@ -236,7 +207,6 @@ CONFIG_PCI_ENDPOINT_TEST=m CONFIG_EEPROM_AT24=y CONFIG_BLK_DEV_SD=y CONFIG_BLK_DEV_SR=y -CONFIG_SCSI_MULTI_LUN=y CONFIG_ATA=y CONFIG_SATA_AHCI=y CONFIG_SATA_AHCI_PLATFORM=y @@ -251,14 +221,20 @@ CONFIG_SATA_MV=y CONFIG_SATA_RCAR=y CONFIG_NETDEVICES=y CONFIG_VIRTIO_NET=y -CONFIG_HIX5HD2_GMAC=y +CONFIG_B53_SPI_DRIVER=m +CONFIG_B53_MDIO_DRIVER=m +CONFIG_B53_MMAP_DRIVER=m +CONFIG_B53_SRAB_DRIVER=m +CONFIG_NET_DSA_BCM_SF2=m CONFIG_SUN4I_EMAC=y -CONFIG_MACB=y CONFIG_BCMGENET=m CONFIG_BGMAC_BCMA=y CONFIG_SYSTEMPORT=m +CONFIG_MACB=y CONFIG_NET_CALXEDA_XGMAC=y CONFIG_GIANFAR=y +CONFIG_HIX5HD2_GMAC=y +CONFIG_E1000E=y CONFIG_IGB=y CONFIG_MV643XX_ETH=y CONFIG_MVNETA=y @@ -268,19 +244,17 @@ CONFIG_R8169=y CONFIG_SH_ETH=y CONFIG_SMSC911X=y CONFIG_STMMAC_ETH=y -CONFIG_STMMAC_PLATFORM=y CONFIG_DWMAC_DWC_QOS_ETH=y CONFIG_TI_CPSW=y CONFIG_XILINX_EMACLITE=y CONFIG_AT803X_PHY=y -CONFIG_MARVELL_PHY=y -CONFIG_SMSC_PHY=y CONFIG_BROADCOM_PHY=y CONFIG_ICPLUS_PHY=y -CONFIG_REALTEK_PHY=y +CONFIG_MARVELL_PHY=y CONFIG_MICREL_PHY=y -CONFIG_FIXED_PHY=y +CONFIG_REALTEK_PHY=y CONFIG_ROCKCHIP_PHY=y +CONFIG_SMSC_PHY=y CONFIG_USB_PEGASUS=y CONFIG_USB_RTL8152=m CONFIG_USB_LAN78XX=m @@ -288,29 +262,29 @@ CONFIG_USB_USBNET=y CONFIG_USB_NET_SMSC75XX=y CONFIG_USB_NET_SMSC95XX=y CONFIG_BRCMFMAC=m -CONFIG_RT2X00=m -CONFIG_RT2800USB=m CONFIG_MWIFIEX=m CONFIG_MWIFIEX_SDIO=m +CONFIG_RT2X00=m +CONFIG_RT2800USB=m CONFIG_INPUT_JOYDEV=y CONFIG_INPUT_EVDEV=y CONFIG_KEYBOARD_QT1070=m CONFIG_KEYBOARD_GPIO=y CONFIG_KEYBOARD_TEGRA=y -CONFIG_KEYBOARD_SPEAR=y -CONFIG_KEYBOARD_ST_KEYSCAN=y -CONFIG_KEYBOARD_CROS_EC=m CONFIG_KEYBOARD_SAMSUNG=m +CONFIG_KEYBOARD_ST_KEYSCAN=y +CONFIG_KEYBOARD_SPEAR=y +CONFIG_KEYBOARD_CROS_EC=m CONFIG_MOUSE_PS2_ELANTECH=y CONFIG_MOUSE_CYAPA=m CONFIG_MOUSE_ELAN_I2C=y CONFIG_INPUT_TOUCHSCREEN=y CONFIG_TOUCHSCREEN_ATMEL_MXT=m CONFIG_TOUCHSCREEN_MMS114=m +CONFIG_TOUCHSCREEN_WM97XX=m CONFIG_TOUCHSCREEN_ST1232=m CONFIG_TOUCHSCREEN_STMPE=y CONFIG_TOUCHSCREEN_SUN4I=y -CONFIG_TOUCHSCREEN_WM97XX=m CONFIG_INPUT_MISC=y CONFIG_INPUT_MAX77693_HAPTIC=m CONFIG_INPUT_MAX8997_HAPTIC=m @@ -327,13 +301,12 @@ CONFIG_SERIAL_8250_DW=y CONFIG_SERIAL_8250_EM=y CONFIG_SERIAL_8250_MT6577=y CONFIG_SERIAL_8250_UNIPHIER=y +CONFIG_SERIAL_OF_PLATFORM=y CONFIG_SERIAL_AMBA_PL011=y CONFIG_SERIAL_AMBA_PL011_CONSOLE=y CONFIG_SERIAL_ATMEL=y CONFIG_SERIAL_ATMEL_CONSOLE=y CONFIG_SERIAL_ATMEL_TTYAT=y -CONFIG_SERIAL_BCM63XX=y -CONFIG_SERIAL_BCM63XX_CONSOLE=y CONFIG_SERIAL_MESON=y CONFIG_SERIAL_MESON_CONSOLE=y CONFIG_SERIAL_SAMSUNG=y @@ -345,15 +318,14 @@ CONFIG_SERIAL_IMX=y CONFIG_SERIAL_IMX_CONSOLE=y CONFIG_SERIAL_SH_SCI=y CONFIG_SERIAL_SH_SCI_NR_UARTS=20 -CONFIG_SERIAL_SH_SCI_CONSOLE=y -CONFIG_SERIAL_SH_SCI_DMA=y CONFIG_SERIAL_MSM=y CONFIG_SERIAL_MSM_CONSOLE=y CONFIG_SERIAL_VT8500=y CONFIG_SERIAL_VT8500_CONSOLE=y -CONFIG_SERIAL_OF_PLATFORM=y CONFIG_SERIAL_OMAP=y CONFIG_SERIAL_OMAP_CONSOLE=y +CONFIG_SERIAL_BCM63XX=y +CONFIG_SERIAL_BCM63XX_CONSOLE=y CONFIG_SERIAL_XILINX_PS_UART=y CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y CONFIG_SERIAL_FSL_LPUART=y @@ -365,12 +337,10 @@ CONFIG_SERIAL_ST_ASC_CONSOLE=y CONFIG_SERIAL_STM32=y CONFIG_SERIAL_STM32_CONSOLE=y CONFIG_SERIAL_DEV_BUS=y -CONFIG_HVC_DRIVER=y CONFIG_VIRTIO_CONSOLE=y +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_ST=y CONFIG_I2C_CHARDEV=y -CONFIG_I2C_DAVINCI=y -CONFIG_I2C_MESON=y -CONFIG_I2C_MUX=y CONFIG_I2C_ARB_GPIO_CHALLENGE=m CONFIG_I2C_MUX_PCA954x=y CONFIG_I2C_MUX_PINCTRL=y @@ -378,12 +348,13 @@ CONFIG_I2C_DEMUX_PINCTRL=y CONFIG_I2C_AT91=m CONFIG_I2C_BCM2835=y CONFIG_I2C_CADENCE=y +CONFIG_I2C_DAVINCI=y CONFIG_I2C_DESIGNWARE_PLATFORM=y CONFIG_I2C_DIGICOLOR=m CONFIG_I2C_EMEV2=m CONFIG_I2C_GPIO=m -CONFIG_I2C_EXYNOS5=y CONFIG_I2C_IMX=y +CONFIG_I2C_MESON=y CONFIG_I2C_MV64XXX=y CONFIG_I2C_RIIC=y CONFIG_I2C_RK3X=y @@ -427,7 +398,6 @@ CONFIG_SPI_SPIDEV=y CONFIG_SPMI=y CONFIG_PINCTRL_AS3722=y CONFIG_PINCTRL_PALMAS=y -CONFIG_PINCTRL_BCM2835=y CONFIG_PINCTRL_APQ8064=y CONFIG_PINCTRL_APQ8084=y CONFIG_PINCTRL_IPQ8064=y @@ -437,25 +407,33 @@ CONFIG_PINCTRL_MSM8X74=y CONFIG_PINCTRL_MSM8916=y CONFIG_PINCTRL_QCOM_SPMI_PMIC=y CONFIG_PINCTRL_QCOM_SSBI_PMIC=y -CONFIG_GPIO_GENERIC_PLATFORM=y CONFIG_GPIO_DAVINCI=y CONFIG_GPIO_DWAPB=y CONFIG_GPIO_EM=y CONFIG_GPIO_RCAR=y +CONFIG_GPIO_SYSCON=y CONFIG_GPIO_UNIPHIER=y CONFIG_GPIO_XILINX=y CONFIG_GPIO_ZYNQ=y CONFIG_GPIO_PCA953X=y CONFIG_GPIO_PCA953X_IRQ=y CONFIG_GPIO_PCF857X=y -CONFIG_GPIO_TWL4030=y CONFIG_GPIO_PALMAS=y -CONFIG_GPIO_SYSCON=y CONFIG_GPIO_TPS6586X=y CONFIG_GPIO_TPS65910=y +CONFIG_GPIO_TWL4030=y +CONFIG_POWER_AVS=y +CONFIG_ROCKCHIP_IODOMAIN=y +CONFIG_POWER_RESET_AS3722=y +CONFIG_POWER_RESET_GPIO=y +CONFIG_POWER_RESET_GPIO_RESTART=y +CONFIG_POWER_RESET_ST=y +CONFIG_POWER_RESET_KEYSTONE=y +CONFIG_POWER_RESET_RMOBILE=y CONFIG_BATTERY_ACT8945A=y CONFIG_BATTERY_CPCAP=m CONFIG_BATTERY_SBS=y +CONFIG_AXP20X_POWER=m CONFIG_BATTERY_MAX17040=m CONFIG_BATTERY_MAX17042=m CONFIG_CHARGER_CPCAP=m @@ -464,15 +442,6 @@ CONFIG_CHARGER_MAX77693=m CONFIG_CHARGER_MAX8997=m CONFIG_CHARGER_MAX8998=m CONFIG_CHARGER_TPS65090=y -CONFIG_AXP20X_POWER=m -CONFIG_POWER_RESET_AS3722=y -CONFIG_POWER_RESET_GPIO=y -CONFIG_POWER_RESET_GPIO_RESTART=y -CONFIG_POWER_RESET_KEYSTONE=y -CONFIG_POWER_RESET_RMOBILE=y -CONFIG_POWER_RESET_ST=y -CONFIG_POWER_AVS=y -CONFIG_ROCKCHIP_IODOMAIN=y CONFIG_SENSORS_IIO_HWMON=y CONFIG_SENSORS_LM90=y CONFIG_SENSORS_LM95245=y @@ -480,14 +449,12 @@ CONFIG_SENSORS_NTC_THERMISTOR=m CONFIG_SENSORS_PWM_FAN=m CONFIG_SENSORS_INA2XX=m CONFIG_CPU_THERMAL=y -CONFIG_BCM2835_THERMAL=m -CONFIG_BRCMSTB_THERMAL=m CONFIG_IMX_THERMAL=y CONFIG_ROCKCHIP_THERMAL=y CONFIG_RCAR_THERMAL=y CONFIG_ARMADA_THERMAL=y -CONFIG_DAVINCI_WATCHDOG=m -CONFIG_EXYNOS_THERMAL=m +CONFIG_BCM2835_THERMAL=m +CONFIG_BRCMSTB_THERMAL=m CONFIG_ST_THERMAL_MEMMAP=y CONFIG_WATCHDOG=y CONFIG_DA9063_WATCHDOG=m @@ -495,20 +462,24 @@ CONFIG_XILINX_WATCHDOG=y CONFIG_ARM_SP805_WATCHDOG=y CONFIG_AT91SAM9X_WATCHDOG=y CONFIG_SAMA5D4_WATCHDOG=y +CONFIG_DW_WATCHDOG=y +CONFIG_DAVINCI_WATCHDOG=m CONFIG_ORION_WATCHDOG=y CONFIG_RN5T618_WATCHDOG=y -CONFIG_ST_LPC_WATCHDOG=y CONFIG_SUNXI_WATCHDOG=y CONFIG_IMX2_WDT=y +CONFIG_ST_LPC_WATCHDOG=y CONFIG_TEGRA_WATCHDOG=m CONFIG_MESON_WATCHDOG=y -CONFIG_DW_WATCHDOG=y CONFIG_DIGICOLOR_WATCHDOG=y CONFIG_RENESAS_WDT=m -CONFIG_BCM2835_WDT=y CONFIG_BCM47XX_WDT=y -CONFIG_BCM7038_WDT=m +CONFIG_BCM2835_WDT=y CONFIG_BCM_KONA_WDT=y +CONFIG_BCM7038_WDT=m +CONFIG_BCMA_HOST_SOC=y +CONFIG_BCMA_DRIVER_GMAC_CMN=y +CONFIG_BCMA_DRIVER_GPIO=y CONFIG_MFD_ACT8945A=y CONFIG_MFD_AS3711=y CONFIG_MFD_AS3722=y @@ -516,7 +487,6 @@ CONFIG_MFD_ATMEL_FLEXCOM=y CONFIG_MFD_ATMEL_HLCDC=m CONFIG_MFD_BCM590XX=y CONFIG_MFD_AC100=y -CONFIG_MFD_AXP20X=y CONFIG_MFD_AXP20X_I2C=y CONFIG_MFD_AXP20X_RSB=y CONFIG_MFD_CROS_EC=m @@ -529,11 +499,11 @@ CONFIG_MFD_MAX77693=m CONFIG_MFD_MAX8907=y CONFIG_MFD_MAX8997=y CONFIG_MFD_MAX8998=y -CONFIG_MFD_RK808=y CONFIG_MFD_CPCAP=y CONFIG_MFD_PM8XXX=y CONFIG_MFD_QCOM_RPM=y CONFIG_MFD_SPMI_PMIC=y +CONFIG_MFD_RK808=y CONFIG_MFD_RN5T618=y CONFIG_MFD_SEC_CORE=y CONFIG_MFD_STMPE=y @@ -543,10 +513,10 @@ CONFIG_MFD_TPS65217=y CONFIG_MFD_TPS65218=y CONFIG_MFD_TPS6586X=y CONFIG_MFD_TPS65910=y -CONFIG_REGULATOR_ACT8945A=y -CONFIG_REGULATOR_AB8500=y CONFIG_REGULATOR_ACT8865=y +CONFIG_REGULATOR_ACT8945A=y CONFIG_REGULATOR_ANATOP=y +CONFIG_REGULATOR_AB8500=y CONFIG_REGULATOR_AS3711=y CONFIG_REGULATOR_AS3722=y CONFIG_REGULATOR_AXP20X=y @@ -554,10 +524,7 @@ CONFIG_REGULATOR_BCM590XX=y CONFIG_REGULATOR_CPCAP=y CONFIG_REGULATOR_DA9210=y CONFIG_REGULATOR_FAN53555=y -CONFIG_REGULATOR_RK808=y CONFIG_REGULATOR_GPIO=y -CONFIG_MFD_SYSCON=y -CONFIG_POWER_RESET_SYSCON=y CONFIG_REGULATOR_LP872X=y CONFIG_REGULATOR_MAX14577=m CONFIG_REGULATOR_MAX8907=y @@ -571,7 +538,8 @@ CONFIG_REGULATOR_PALMAS=y CONFIG_REGULATOR_PBIAS=y CONFIG_REGULATOR_PWM=y CONFIG_REGULATOR_QCOM_RPM=y -CONFIG_REGULATOR_QCOM_SMD_RPM=y +CONFIG_REGULATOR_QCOM_SMD_RPM=m +CONFIG_REGULATOR_RK808=y CONFIG_REGULATOR_RN5T618=y CONFIG_REGULATOR_S2MPS11=y CONFIG_REGULATOR_S5M8767=y @@ -592,18 +560,17 @@ CONFIG_MEDIA_CEC_SUPPORT=y CONFIG_MEDIA_CONTROLLER=y CONFIG_VIDEO_V4L2_SUBDEV_API=y CONFIG_MEDIA_USB_SUPPORT=y -CONFIG_USB_VIDEO_CLASS=y -CONFIG_USB_GSPCA=y +CONFIG_USB_VIDEO_CLASS=m CONFIG_V4L_PLATFORM_DRIVERS=y CONFIG_SOC_CAMERA=m CONFIG_SOC_CAMERA_PLATFORM=m -CONFIG_VIDEO_RCAR_VIN=m -CONFIG_VIDEO_ATMEL_ISI=m CONFIG_VIDEO_SAMSUNG_EXYNOS4_IS=m CONFIG_VIDEO_S5P_FIMC=m CONFIG_VIDEO_S5P_MIPI_CSIS=m CONFIG_VIDEO_EXYNOS_FIMC_LITE=m CONFIG_VIDEO_EXYNOS4_FIMC_IS=m +CONFIG_VIDEO_RCAR_VIN=m +CONFIG_VIDEO_ATMEL_ISI=m CONFIG_V4L_MEM2MEM_DRIVERS=y CONFIG_VIDEO_SAMSUNG_S5P_JPEG=m CONFIG_VIDEO_SAMSUNG_S5P_MFC=m @@ -614,19 +581,15 @@ CONFIG_VIDEO_STI_DELTA=m CONFIG_VIDEO_RENESAS_JPU=m CONFIG_VIDEO_RENESAS_VSP1=m CONFIG_V4L_TEST_DRIVERS=y +CONFIG_VIDEO_VIVID=m CONFIG_CEC_PLATFORM_DRIVERS=y CONFIG_VIDEO_SAMSUNG_S5P_CEC=m # CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set CONFIG_VIDEO_ADV7180=m CONFIG_VIDEO_ML86V7667=m CONFIG_DRM=y -CONFIG_DRM_I2C_ADV7511=m -CONFIG_DRM_I2C_ADV7511_AUDIO=y # CONFIG_DRM_I2C_CH7006 is not set # CONFIG_DRM_I2C_SIL164 is not set -CONFIG_DRM_DUMB_VGA_DAC=m -CONFIG_DRM_NXP_PTN3460=m -CONFIG_DRM_PARADE_PS8622=m CONFIG_DRM_NOUVEAU=m CONFIG_DRM_EXYNOS=m CONFIG_DRM_EXYNOS_FIMD=y @@ -645,13 +608,18 @@ CONFIG_DRM_RCAR_LVDS=y CONFIG_DRM_SUN4I=m CONFIG_DRM_FSL_DCU=m CONFIG_DRM_TEGRA=y +CONFIG_DRM_PANEL_SIMPLE=y CONFIG_DRM_PANEL_SAMSUNG_LD9040=m CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03=m CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0=m -CONFIG_DRM_PANEL_SIMPLE=y +CONFIG_DRM_DUMB_VGA_DAC=m +CONFIG_DRM_NXP_PTN3460=m +CONFIG_DRM_PARADE_PS8622=m CONFIG_DRM_SII9234=m +CONFIG_DRM_I2C_ADV7511=m +CONFIG_DRM_I2C_ADV7511_AUDIO=y CONFIG_DRM_STI=m -CONFIG_DRM_VC4=y +CONFIG_DRM_VC4=m CONFIG_DRM_ETNAVIV=m CONFIG_DRM_MXSFB=m CONFIG_FB_ARMCLCD=y @@ -659,8 +627,6 @@ CONFIG_FB_EFI=y CONFIG_FB_WM8505=y CONFIG_FB_SH_MOBILE_LCDC=y CONFIG_FB_SIMPLE=y -CONFIG_BACKLIGHT_LCD_SUPPORT=y -CONFIG_BACKLIGHT_CLASS_DEVICE=y CONFIG_LCD_PLATFORM=m CONFIG_BACKLIGHT_PWM=y CONFIG_BACKLIGHT_AS3711=y @@ -668,7 +634,6 @@ CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y CONFIG_SOUND=m CONFIG_SND=m -CONFIG_SND_DYNAMIC_MINORS=y CONFIG_SND_HDA_TEGRA=m CONFIG_SND_HDA_INPUT_BEEP=y CONFIG_SND_HDA_PATCH_LOADER=y @@ -692,7 +657,7 @@ CONFIG_SND_SOC_SNOW=m CONFIG_SND_SOC_ODROID=m CONFIG_SND_SOC_SH4_FSI=m CONFIG_SND_SOC_RCAR=m -CONFIG_SND_SIMPLE_SCU_CARD=m +CONFIG_SND_SOC_STI=m CONFIG_SND_SUN4I_CODEC=m CONFIG_SND_SOC_TEGRA=m CONFIG_SND_SOC_TEGRA20_I2S=m @@ -703,31 +668,25 @@ CONFIG_SND_SOC_TEGRA_WM8903=m CONFIG_SND_SOC_TEGRA_WM9712=m CONFIG_SND_SOC_TEGRA_TRIMSLICE=m CONFIG_SND_SOC_TEGRA_ALC5632=m -CONFIG_SND_SOC_CPCAP=m CONFIG_SND_SOC_TEGRA_MAX98090=m CONFIG_SND_SOC_AK4642=m +CONFIG_SND_SOC_CPCAP=m CONFIG_SND_SOC_SGTL5000=m CONFIG_SND_SOC_SPDIF=m -CONFIG_SND_SOC_WM8978=m -CONFIG_SND_SOC_STI=m CONFIG_SND_SOC_STI_SAS=m -CONFIG_SND_SIMPLE_CARD=m +CONFIG_SND_SOC_WM8978=m +CONFIG_SND_SIMPLE_SCU_CARD=m CONFIG_USB=y CONFIG_USB_OTG=y CONFIG_USB_XHCI_HCD=y CONFIG_USB_XHCI_MVEBU=y -CONFIG_USB_XHCI_RCAR=m CONFIG_USB_XHCI_TEGRA=m CONFIG_USB_EHCI_HCD=y -CONFIG_USB_EHCI_MSM=m -CONFIG_USB_EHCI_EXYNOS=y -CONFIG_USB_EHCI_TEGRA=y CONFIG_USB_EHCI_HCD_STI=y -CONFIG_USB_EHCI_HCD_PLATFORM=y -CONFIG_USB_ISP1760=y +CONFIG_USB_EHCI_TEGRA=y +CONFIG_USB_EHCI_EXYNOS=y CONFIG_USB_OHCI_HCD=y CONFIG_USB_OHCI_HCD_STI=y -CONFIG_USB_OHCI_HCD_PLATFORM=y CONFIG_USB_OHCI_EXYNOS=m CONFIG_USB_R8A66597_HCD=m CONFIG_USB_RENESAS_USBHS=m @@ -746,18 +705,18 @@ CONFIG_USB_TI_CPPI41_DMA=y CONFIG_USB_TUSB_OMAP_DMA=y CONFIG_USB_DWC3=y CONFIG_USB_DWC2=y -CONFIG_USB_HSIC_USB3503=y CONFIG_USB_CHIPIDEA=y CONFIG_USB_CHIPIDEA_UDC=y CONFIG_USB_CHIPIDEA_HOST=y +CONFIG_USB_ISP1760=y +CONFIG_USB_HSIC_USB3503=y CONFIG_AB8500_USB=y -CONFIG_KEYSTONE_USB_PHY=y +CONFIG_KEYSTONE_USB_PHY=m CONFIG_NOP_USB_XCEIV=m CONFIG_AM335X_PHY_USB=m CONFIG_TWL6030_USB=m CONFIG_USB_GPIO_VBUS=y CONFIG_USB_ISP1301=y -CONFIG_USB_MSM_OTG=m CONFIG_USB_MXS_PHY=y CONFIG_USB_GADGET=y CONFIG_USB_FSL_USB2=y @@ -793,21 +752,20 @@ CONFIG_MMC_SDHCI_OF_ESDHC=y CONFIG_MMC_SDHCI_ESDHC_IMX=y CONFIG_MMC_SDHCI_DOVE=y CONFIG_MMC_SDHCI_TEGRA=y +CONFIG_MMC_SDHCI_S3C=y CONFIG_MMC_SDHCI_PXAV3=y CONFIG_MMC_SDHCI_SPEAR=y -CONFIG_MMC_SDHCI_S3C=y CONFIG_MMC_SDHCI_S3C_DMA=y CONFIG_MMC_SDHCI_BCM_KONA=y +CONFIG_MMC_MESON_MX_SDIO=y CONFIG_MMC_SDHCI_ST=y CONFIG_MMC_OMAP=y CONFIG_MMC_OMAP_HS=y CONFIG_MMC_ATMELMCI=y CONFIG_MMC_SDHCI_MSM=y -CONFIG_MMC_MESON_MX_SDIO=y CONFIG_MMC_MVSDIO=y CONFIG_MMC_SDHI=y CONFIG_MMC_DW=y -CONFIG_MMC_DW_PLTFM=y CONFIG_MMC_DW_EXYNOS=y CONFIG_MMC_DW_ROCKCHIP=y CONFIG_MMC_SH_MMCIF=y @@ -847,94 +805,85 @@ CONFIG_RTC_DRV_MAX77686=y CONFIG_RTC_DRV_RK808=m CONFIG_RTC_DRV_RS5C372=m CONFIG_RTC_DRV_BQ32K=m -CONFIG_RTC_DRV_PALMAS=y -CONFIG_RTC_DRV_ST_LPC=y CONFIG_RTC_DRV_TWL4030=y +CONFIG_RTC_DRV_PALMAS=y CONFIG_RTC_DRV_TPS6586X=y CONFIG_RTC_DRV_TPS65910=y CONFIG_RTC_DRV_S35390A=m CONFIG_RTC_DRV_RX8581=m CONFIG_RTC_DRV_EM3027=y +CONFIG_RTC_DRV_S5M=m CONFIG_RTC_DRV_DA9063=m CONFIG_RTC_DRV_EFI=m CONFIG_RTC_DRV_DIGICOLOR=m -CONFIG_RTC_DRV_S5M=m CONFIG_RTC_DRV_S3C=m CONFIG_RTC_DRV_PL031=y CONFIG_RTC_DRV_AT91RM9200=m CONFIG_RTC_DRV_AT91SAM9=m CONFIG_RTC_DRV_VT8500=y -CONFIG_RTC_DRV_SUN6I=y CONFIG_RTC_DRV_SUNXI=y CONFIG_RTC_DRV_MV=y CONFIG_RTC_DRV_TEGRA=y +CONFIG_RTC_DRV_ST_LPC=y CONFIG_RTC_DRV_CPCAP=m CONFIG_DMADEVICES=y -CONFIG_DW_DMAC=y CONFIG_AT_HDMAC=y CONFIG_AT_XDMAC=y +CONFIG_DMA_BCM2835=y +CONFIG_DMA_SUN6I=y CONFIG_FSL_EDMA=y +CONFIG_IMX_DMA=y +CONFIG_IMX_SDMA=y CONFIG_MV_XOR=y +CONFIG_MXS_DMA=y +CONFIG_PL330_DMA=y +CONFIG_SIRF_DMA=y +CONFIG_STE_DMA40=y +CONFIG_ST_FDMA=m CONFIG_TEGRA20_APB_DMA=y +CONFIG_XILINX_DMA=y +CONFIG_QCOM_BAM_DMA=y +CONFIG_DW_DMAC=y CONFIG_SH_DMAE=y CONFIG_RCAR_DMAC=y CONFIG_RENESAS_USB_DMAC=m -CONFIG_STE_DMA40=y -CONFIG_SIRF_DMA=y -CONFIG_TI_EDMA=y -CONFIG_PL330_DMA=y -CONFIG_IMX_SDMA=y -CONFIG_IMX_DMA=y -CONFIG_MXS_DMA=y -CONFIG_DMA_BCM2835=y -CONFIG_DMA_OMAP=y -CONFIG_QCOM_BAM_DMA=y -CONFIG_XILINX_DMA=y -CONFIG_DMA_SUN6I=y -CONFIG_ST_FDMA=m +CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_MMIO=y CONFIG_STAGING=y -CONFIG_SENSORS_ISL29018=y -CONFIG_SENSORS_ISL29028=y CONFIG_MFD_NVEC=y CONFIG_KEYBOARD_NVEC=y CONFIG_SERIO_NVEC_PS2=y CONFIG_NVEC_POWER=y CONFIG_NVEC_PAZ00=y -CONFIG_BCMA=y -CONFIG_BCMA_HOST_SOC=y -CONFIG_BCMA_DRIVER_GMAC_CMN=y -CONFIG_BCMA_DRIVER_GPIO=y -CONFIG_QCOM_GSBI=y -CONFIG_QCOM_PM=y -CONFIG_QCOM_SMEM=y -CONFIG_QCOM_SMD_RPM=y -CONFIG_QCOM_SMP2P=y -CONFIG_QCOM_SMSM=y -CONFIG_QCOM_WCNSS_CTRL=m -CONFIG_ROCKCHIP_PM_DOMAINS=y -CONFIG_COMMON_CLK_QCOM=y -CONFIG_QCOM_CLK_RPM=y -CONFIG_CHROME_PLATFORMS=y CONFIG_STAGING_BOARD=y -CONFIG_CROS_EC_CHARDEV=m CONFIG_COMMON_CLK_MAX77686=y CONFIG_COMMON_CLK_RK808=m CONFIG_COMMON_CLK_S2MPS11=m +CONFIG_COMMON_CLK_QCOM=y +CONFIG_QCOM_CLK_RPM=y CONFIG_APQ_MMCC_8084=y CONFIG_MSM_GCC_8660=y CONFIG_MSM_MMCC_8960=y CONFIG_MSM_MMCC_8974=y -CONFIG_HWSPINLOCK_QCOM=y +CONFIG_BCM2835_MBOX=y CONFIG_ROCKCHIP_IOMMU=y CONFIG_TEGRA_IOMMU_GART=y CONFIG_TEGRA_IOMMU_SMMU=y CONFIG_REMOTEPROC=m CONFIG_ST_REMOTEPROC=m CONFIG_RPMSG_VIRTIO=m +CONFIG_RASPBERRYPI_POWER=y +CONFIG_QCOM_GSBI=y +CONFIG_QCOM_PM=y +CONFIG_QCOM_SMD_RPM=m +CONFIG_QCOM_WCNSS_CTRL=m +CONFIG_ROCKCHIP_PM_DOMAINS=y +CONFIG_ARCH_TEGRA_2x_SOC=y +CONFIG_ARCH_TEGRA_3x_SOC=y +CONFIG_ARCH_TEGRA_114_SOC=y +CONFIG_ARCH_TEGRA_124_SOC=y CONFIG_PM_DEVFREQ=y CONFIG_ARM_TEGRA_DEVFREQ=m -CONFIG_MEMORY=y -CONFIG_EXTCON=y CONFIG_TI_AEMIF=y CONFIG_IIO=y CONFIG_IIO_SW_TRIGGER=y @@ -947,56 +896,54 @@ CONFIG_VF610_ADC=m CONFIG_XILINX_XADC=y CONFIG_MPU3050_I2C=y CONFIG_CM36651=m +CONFIG_SENSORS_ISL29018=y +CONFIG_SENSORS_ISL29028=y CONFIG_AK8975=y -CONFIG_RASPBERRYPI_POWER=y CONFIG_IIO_HRTIMER_TRIGGER=y CONFIG_PWM=y CONFIG_PWM_ATMEL=m CONFIG_PWM_ATMEL_HLCDC_PWM=m CONFIG_PWM_ATMEL_TCB=m +CONFIG_PWM_BCM2835=y +CONFIG_PWM_BRCMSTB=m CONFIG_PWM_FSL_FTM=m CONFIG_PWM_MESON=m CONFIG_PWM_RCAR=m CONFIG_PWM_RENESAS_TPU=y CONFIG_PWM_ROCKCHIP=m CONFIG_PWM_SAMSUNG=m +CONFIG_PWM_STI=y CONFIG_PWM_SUN4I=y CONFIG_PWM_TEGRA=y CONFIG_PWM_VT8500=y +CONFIG_KEYSTONE_IRQ=y +CONFIG_PHY_SUN4I_USB=y +CONFIG_PHY_SUN9I_USB=y CONFIG_PHY_HIX5HD2_SATA=y -CONFIG_E1000E=y -CONFIG_PWM_STI=y -CONFIG_PWM_BCM2835=y -CONFIG_PWM_BRCMSTB=m +CONFIG_PHY_BERLIN_SATA=y +CONFIG_PHY_BERLIN_USB=y +CONFIG_PHY_CPCAP_USB=m +CONFIG_PHY_QCOM_APQ8064_SATA=m +CONFIG_PHY_RCAR_GEN2=m +CONFIG_PHY_ROCKCHIP_DP=m +CONFIG_PHY_ROCKCHIP_USB=y +CONFIG_PHY_SAMSUNG_USB2=m +CONFIG_PHY_MIPHY28LP=y +CONFIG_PHY_STIH407_USB=y +CONFIG_PHY_STM32_USBPHYC=y +CONFIG_PHY_TEGRA_XUSB=y CONFIG_PHY_DM816X_USB=m CONFIG_OMAP_USB2=y CONFIG_TI_PIPE3=y CONFIG_TWL4030_USB=m -CONFIG_PHY_BERLIN_USB=y -CONFIG_PHY_CPCAP_USB=m -CONFIG_PHY_BERLIN_SATA=y -CONFIG_PHY_ROCKCHIP_DP=m -CONFIG_PHY_ROCKCHIP_USB=y -CONFIG_PHY_QCOM_APQ8064_SATA=m -CONFIG_PHY_MIPHY28LP=y -CONFIG_PHY_RCAR_GEN2=m -CONFIG_PHY_STIH407_USB=y -CONFIG_PHY_STM32_USBPHYC=y -CONFIG_PHY_SUN4I_USB=y -CONFIG_PHY_SUN9I_USB=y -CONFIG_PHY_SAMSUNG_USB2=m -CONFIG_PHY_TEGRA_XUSB=y -CONFIG_PHY_BRCM_SATA=y -CONFIG_NVMEM=y CONFIG_NVMEM_IMX_OCOTP=y CONFIG_NVMEM_SUNXI_SID=y CONFIG_NVMEM_VF610_OCOTP=y -CONFIG_BCM2835_MBOX=y CONFIG_RASPBERRYPI_FIRMWARE=y -CONFIG_EFI_VARS=m -CONFIG_EFI_CAPSULE_LOADER=m CONFIG_BCM47XX_NVRAM=y CONFIG_BCM47XX_SPROM=y +CONFIG_EFI_VARS=m +CONFIG_EFI_CAPSULE_LOADER=m CONFIG_EXT4_FS=y CONFIG_AUTOFS4_FS=y CONFIG_MSDOS_FS=y @@ -1004,7 +951,6 @@ CONFIG_VFAT_FS=y CONFIG_NTFS_FS=y CONFIG_TMPFS_POSIX_ACL=y CONFIG_UBIFS_FS=y -CONFIG_TMPFS=y CONFIG_SQUASHFS=y CONFIG_SQUASHFS_LZO=y CONFIG_SQUASHFS_XZ=y @@ -1020,13 +966,7 @@ CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y CONFIG_NLS_UTF8=y CONFIG_PRINTK_TIME=y -CONFIG_DEBUG_FS=y CONFIG_MAGIC_SYSRQ=y -CONFIG_LOCKUP_DETECTOR=y -CONFIG_CPUFREQ_DT=y -CONFIG_KEYSTONE_IRQ=y -CONFIG_HW_RANDOM=y -CONFIG_HW_RANDOM_ST=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_SKCIPHER=m @@ -1035,27 +975,19 @@ CONFIG_CRYPTO_USER_API_AEAD=m CONFIG_CRYPTO_DEV_MARVELL_CESA=m CONFIG_CRYPTO_DEV_EXYNOS_RNG=m CONFIG_CRYPTO_DEV_S5P=m +CONFIG_CRYPTO_DEV_ATMEL_AES=m +CONFIG_CRYPTO_DEV_ATMEL_TDES=m +CONFIG_CRYPTO_DEV_ATMEL_SHA=m CONFIG_CRYPTO_DEV_SUN4I_SS=m CONFIG_CRYPTO_DEV_ROCKCHIP=m CONFIG_ARM_CRYPTO=y -CONFIG_CRYPTO_SHA1_ARM=m CONFIG_CRYPTO_SHA1_ARM_NEON=m CONFIG_CRYPTO_SHA1_ARM_CE=m CONFIG_CRYPTO_SHA2_ARM_CE=m -CONFIG_CRYPTO_SHA256_ARM=m CONFIG_CRYPTO_SHA512_ARM=m CONFIG_CRYPTO_AES_ARM=m CONFIG_CRYPTO_AES_ARM_BS=m CONFIG_CRYPTO_AES_ARM_CE=m -CONFIG_CRYPTO_CHACHA20_NEON=m -CONFIG_CRYPTO_CRC32_ARM_CE=m -CONFIG_CRYPTO_CRCT10DIF_ARM_CE=m CONFIG_CRYPTO_GHASH_ARM_CE=m -CONFIG_CRYPTO_DEV_ATMEL_AES=m -CONFIG_CRYPTO_DEV_ATMEL_TDES=m -CONFIG_CRYPTO_DEV_ATMEL_SHA=m -CONFIG_VIDEO_VIVID=m -CONFIG_VIRTIO=y -CONFIG_VIRTIO_PCI=y -CONFIG_VIRTIO_PCI_LEGACY=y -CONFIG_VIRTIO_MMIO=y +CONFIG_CRYPTO_CRC32_ARM_CE=m +CONFIG_CRYPTO_CHACHA20_NEON=m diff --git a/arch/arm/crypto/speck-neon-core.S b/arch/arm/crypto/speck-neon-core.S index 3c1e203e53b9..57caa742016e 100644 --- a/arch/arm/crypto/speck-neon-core.S +++ b/arch/arm/crypto/speck-neon-core.S @@ -272,9 +272,11 @@ * Allocate stack space to store 128 bytes worth of tweaks. For * performance, this space is aligned to a 16-byte boundary so that we * can use the load/store instructions that declare 16-byte alignment. + * For Thumb2 compatibility, don't do the 'bic' directly on 'sp'. */ - sub sp, #128 - bic sp, #0xf + sub r12, sp, #128 + bic r12, #0xf + mov sp, r12 .if \n == 64 // Load first tweak diff --git a/arch/arm/firmware/Makefile b/arch/arm/firmware/Makefile index a71f16536b6c..6e41336b0bc4 100644 --- a/arch/arm/firmware/Makefile +++ b/arch/arm/firmware/Makefile @@ -1 +1,4 @@ obj-$(CONFIG_TRUSTED_FOUNDATIONS) += trusted_foundations.o + +# tf_generic_smc() fails to build with -fsanitize-coverage=trace-pc +KCOV_INSTRUMENT := n diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S index dd546d65a383..7a9b86978ee1 100644 --- a/arch/arm/kernel/head-nommu.S +++ b/arch/arm/kernel/head-nommu.S @@ -177,7 +177,7 @@ M_CLASS(streq r3, [r12, #PMSAv8_MAIR1]) bic r0, r0, #CR_I #endif mcr p15, 0, r0, c1, c0, 0 @ write control reg - isb + instr_sync #elif defined (CONFIG_CPU_V7M) #ifdef CONFIG_ARM_MPU ldreq r3, [r12, MPU_CTRL] diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index f09e9d66d605..dec130e7078c 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -544,7 +544,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) * Increment event counter and perform fixup for the pre-signal * frame. */ - rseq_signal_deliver(regs); + rseq_signal_deliver(ksig, regs); /* * Set up the stack frame @@ -666,7 +666,7 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) } else { clear_thread_flag(TIF_NOTIFY_RESUME); tracehook_notify_resume(regs); - rseq_handle_notify_resume(regs); + rseq_handle_notify_resume(NULL, regs); } } local_irq_disable(); diff --git a/arch/arm/mach-bcm/Kconfig b/arch/arm/mach-bcm/Kconfig index c46a728df44e..25aac6ee2ab1 100644 --- a/arch/arm/mach-bcm/Kconfig +++ b/arch/arm/mach-bcm/Kconfig @@ -20,6 +20,7 @@ config ARCH_BCM_IPROC select GPIOLIB select ARM_AMBA select PINCTRL + select PCI_DOMAINS if PCI help This enables support for systems based on Broadcom IPROC architected SoCs. The IPROC complex contains one or more ARM CPUs along with common diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c index e22fb40e34bc..6d5beb11bd96 100644 --- a/arch/arm/mach-davinci/board-da850-evm.c +++ b/arch/arm/mach-davinci/board-da850-evm.c @@ -774,7 +774,7 @@ static struct gpiod_lookup_table mmc_gpios_table = { GPIO_LOOKUP("davinci_gpio.0", DA850_MMCSD_CD_PIN, "cd", GPIO_ACTIVE_LOW), GPIO_LOOKUP("davinci_gpio.0", DA850_MMCSD_WP_PIN, "wp", - GPIO_ACTIVE_LOW), + GPIO_ACTIVE_HIGH), }, }; diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c index 69df3620eca5..1c73694c871a 100644 --- a/arch/arm/mach-omap2/omap-smp.c +++ b/arch/arm/mach-omap2/omap-smp.c @@ -109,6 +109,45 @@ void omap5_erratum_workaround_801819(void) static inline void omap5_erratum_workaround_801819(void) { } #endif +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR +/* + * Configure ACR and enable ACTLR[0] (Enable invalidates of BTB with + * ICIALLU) to activate the workaround for secondary Core. + * NOTE: it is assumed that the primary core's configuration is done + * by the boot loader (kernel will detect a misconfiguration and complain + * if this is not done). + * + * In General Purpose(GP) devices, ACR bit settings can only be done + * by ROM code in "secure world" using the smc call and there is no + * option to update the "firmware" on such devices. This also works for + * High security(HS) devices, as a backup option in case the + * "update" is not done in the "security firmware". + */ +static void omap5_secondary_harden_predictor(void) +{ + u32 acr, acr_mask; + + asm volatile ("mrc p15, 0, %0, c1, c0, 1" : "=r" (acr)); + + /* + * ACTLR[0] (Enable invalidates of BTB with ICIALLU) + */ + acr_mask = BIT(0); + + /* Do we already have it done.. if yes, skip expensive smc */ + if ((acr & acr_mask) == acr_mask) + return; + + acr |= acr_mask; + omap_smc1(OMAP5_DRA7_MON_SET_ACR_INDEX, acr); + + pr_debug("%s: ARM ACR setup for CVE_2017_5715 applied on CPU%d\n", + __func__, smp_processor_id()); +} +#else +static inline void omap5_secondary_harden_predictor(void) { } +#endif + static void omap4_secondary_init(unsigned int cpu) { /* @@ -131,6 +170,8 @@ static void omap4_secondary_init(unsigned int cpu) set_cntfreq(); /* Configure ACR to disable streaming WA for 801819 */ omap5_erratum_workaround_801819(); + /* Enable ACR to allow for ICUALLU workaround */ + omap5_secondary_harden_predictor(); } /* diff --git a/arch/arm/mach-pxa/irq.c b/arch/arm/mach-pxa/irq.c index 9c10248fadcc..4e8c2116808e 100644 --- a/arch/arm/mach-pxa/irq.c +++ b/arch/arm/mach-pxa/irq.c @@ -185,7 +185,7 @@ static int pxa_irq_suspend(void) { int i; - for (i = 0; i < pxa_internal_irq_nr / 32; i++) { + for (i = 0; i < DIV_ROUND_UP(pxa_internal_irq_nr, 32); i++) { void __iomem *base = irq_base(i); saved_icmr[i] = __raw_readl(base + ICMR); @@ -204,7 +204,7 @@ static void pxa_irq_resume(void) { int i; - for (i = 0; i < pxa_internal_irq_nr / 32; i++) { + for (i = 0; i < DIV_ROUND_UP(pxa_internal_irq_nr, 32); i++) { void __iomem *base = irq_base(i); __raw_writel(saved_icmr[i], base + ICMR); diff --git a/arch/arm/mach-socfpga/Kconfig b/arch/arm/mach-socfpga/Kconfig index d0f62eacf59d..4adb901dd5eb 100644 --- a/arch/arm/mach-socfpga/Kconfig +++ b/arch/arm/mach-socfpga/Kconfig @@ -10,6 +10,7 @@ menuconfig ARCH_SOCFPGA select HAVE_ARM_SCU select HAVE_ARM_TWD if SMP select MFD_SYSCON + select PCI_DOMAINS if PCI if ARCH_SOCFPGA config SOCFPGA_SUSPEND diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index c186474422f3..0cc8e04295a4 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -736,20 +736,29 @@ static int __mark_rodata_ro(void *unused) return 0; } +static int kernel_set_to_readonly __read_mostly; + void mark_rodata_ro(void) { + kernel_set_to_readonly = 1; stop_machine(__mark_rodata_ro, NULL, NULL); debug_checkwx(); } void set_kernel_text_rw(void) { + if (!kernel_set_to_readonly) + return; + set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false, current->active_mm); } void set_kernel_text_ro(void) { + if (!kernel_set_to_readonly) + return; + set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true, current->active_mm); } diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index 6e8b71613039..f6a62ae44a65 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -1844,7 +1844,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) /* there are 2 passes here */ bpf_jit_dump(prog->len, image_size, 2, ctx.target); - set_memory_ro((unsigned long)header, header->pages); + bpf_jit_binary_lock_ro(header); prog->bpf_func = (void *)ctx.target; prog->jited = 1; prog->jited_len = image_size; diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c index 8073625371f5..07060e5b5864 100644 --- a/arch/arm/xen/enlighten.c +++ b/arch/arm/xen/enlighten.c @@ -59,6 +59,9 @@ struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata; static __read_mostly unsigned int xen_events_irq; +uint32_t xen_start_flags; +EXPORT_SYMBOL(xen_start_flags); + int xen_remap_domain_gfn_array(struct vm_area_struct *vma, unsigned long addr, xen_pfn_t *gfn, int nr, @@ -293,9 +296,7 @@ void __init xen_early_init(void) xen_setup_features(); if (xen_feature(XENFEAT_dom0)) - xen_start_info->flags |= SIF_INITDOMAIN|SIF_PRIVILEGED; - else - xen_start_info->flags &= ~(SIF_INITDOMAIN|SIF_PRIVILEGED); + xen_start_flags |= SIF_INITDOMAIN|SIF_PRIVILEGED; if (!console_set_on_cmdline && !xen_initial_domain()) add_preferred_console("hvc", 0, NULL); diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 45272266dafb..e7101b19d590 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -10,7 +10,7 @@ # # Copyright (C) 1995-2001 by Russell King -LDFLAGS_vmlinux :=-p --no-undefined -X +LDFLAGS_vmlinux :=--no-undefined -X CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET) GZFLAGS :=-9 @@ -60,15 +60,15 @@ ifeq ($(CONFIG_CPU_BIG_ENDIAN), y) KBUILD_CPPFLAGS += -mbig-endian CHECKFLAGS += -D__AARCH64EB__ AS += -EB -LD += -EB -LDFLAGS += -maarch64linuxb +# We must use the linux target here, since distributions don't tend to package +# the ELF linker scripts with binutils, and this results in a build failure. +LDFLAGS += -EB -maarch64linuxb UTS_MACHINE := aarch64_be else KBUILD_CPPFLAGS += -mlittle-endian CHECKFLAGS += -D__AARCH64EL__ AS += -EL -LD += -EL -LDFLAGS += -maarch64linux +LDFLAGS += -EL -maarch64linux # See comment above UTS_MACHINE := aarch64 endif diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi index e6b059378dc0..67dac595dc72 100644 --- a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi +++ b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi @@ -309,8 +309,7 @@ spi0: spi@ffda4000 { interrupts = <0 99 4>; resets = <&rst SPIM0_RESET>; reg-io-width = <4>; - num-chipselect = <4>; - bus-num = <0>; + num-cs = <4>; status = "disabled"; }; @@ -322,8 +321,7 @@ spi1: spi@ffda5000 { interrupts = <0 100 4>; resets = <&rst SPIM1_RESET>; reg-io-width = <4>; - num-chipselect = <4>; - bus-num = <0>; + num-cs = <4>; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/amlogic/meson-axg-s400.dts b/arch/arm64/boot/dts/amlogic/meson-axg-s400.dts index 4b3331fbfe39..dff9b15eb3c0 100644 --- a/arch/arm64/boot/dts/amlogic/meson-axg-s400.dts +++ b/arch/arm64/boot/dts/amlogic/meson-axg-s400.dts @@ -66,9 +66,22 @@ wifi32k: wifi32k { ðmac { status = "okay"; - phy-mode = "rgmii"; pinctrl-0 = <ð_rgmii_y_pins>; pinctrl-names = "default"; + phy-handle = <ð_phy0>; + phy-mode = "rgmii"; + + mdio { + compatible = "snps,dwmac-mdio"; + #address-cells = <1>; + #size-cells = <0>; + + eth_phy0: ethernet-phy@0 { + /* Realtek RTL8211F (0x001cc916) */ + reg = <0>; + eee-broken-1000t; + }; + }; }; &uart_A { diff --git a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi index fee87737a201..67d7115e4eff 100644 --- a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi @@ -132,7 +132,7 @@ apb: apb@ffe00000 { sd_emmc_b: sd@5000 { compatible = "amlogic,meson-axg-mmc"; - reg = <0x0 0x5000 0x0 0x2000>; + reg = <0x0 0x5000 0x0 0x800>; interrupts = ; status = "disabled"; clocks = <&clkc CLKID_SD_EMMC_B>, @@ -144,7 +144,7 @@ sd_emmc_b: sd@5000 { sd_emmc_c: mmc@7000 { compatible = "amlogic,meson-axg-mmc"; - reg = <0x0 0x7000 0x0 0x2000>; + reg = <0x0 0x7000 0x0 0x800>; interrupts = ; status = "disabled"; clocks = <&clkc CLKID_SD_EMMC_C>, diff --git a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi index 3c31e21cbed7..b8dc4dbb391b 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi @@ -35,6 +35,12 @@ secmon_reserved: secmon@10000000 { no-map; }; + /* Alternate 3 MiB reserved for ARM Trusted Firmware (BL31) */ + secmon_reserved_alt: secmon@5000000 { + reg = <0x0 0x05000000 0x0 0x300000>; + no-map; + }; + linux,cma { compatible = "shared-dma-pool"; reusable; @@ -457,21 +463,21 @@ apb: apb@d0000000 { sd_emmc_a: mmc@70000 { compatible = "amlogic,meson-gx-mmc", "amlogic,meson-gxbb-mmc"; - reg = <0x0 0x70000 0x0 0x2000>; + reg = <0x0 0x70000 0x0 0x800>; interrupts = ; status = "disabled"; }; sd_emmc_b: mmc@72000 { compatible = "amlogic,meson-gx-mmc", "amlogic,meson-gxbb-mmc"; - reg = <0x0 0x72000 0x0 0x2000>; + reg = <0x0 0x72000 0x0 0x800>; interrupts = ; status = "disabled"; }; sd_emmc_c: mmc@74000 { compatible = "amlogic,meson-gx-mmc", "amlogic,meson-gxbb-mmc"; - reg = <0x0 0x74000 0x0 0x2000>; + reg = <0x0 0x74000 0x0 0x800>; interrupts = ; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-mali.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl-mali.dtsi index eb327664a4d8..6aaafff674f9 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-mali.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-mali.dtsi @@ -6,7 +6,7 @@ &apb { mali: gpu@c0000 { - compatible = "amlogic,meson-gxbb-mali", "arm,mali-450"; + compatible = "amlogic,meson-gxl-mali", "arm,mali-450"; reg = <0x0 0xc0000 0x0 0x40000>; interrupts = , , diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts index 3e3eb31748a3..f63bceb88caa 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts @@ -234,9 +234,6 @@ &sd_emmc_b { bus-width = <4>; cap-sd-highspeed; - sd-uhs-sdr12; - sd-uhs-sdr25; - sd-uhs-sdr50; max-frequency = <100000000>; disable-wp; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi index 0cfd701809de..a1b31013ab6e 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi @@ -189,3 +189,10 @@ &uart_AO { &usb0 { status = "okay"; }; + +&usb2_phy0 { + /* + * HDMI_5V is also used as supply for the USB VBUS. + */ + phy-supply = <&hdmi_5v>; +}; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi index 27538eea547b..c87a80e9bcc6 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi @@ -13,14 +13,6 @@ / { compatible = "amlogic,meson-gxl"; - reserved-memory { - /* Alternate 3 MiB reserved for ARM Trusted Firmware (BL31) */ - secmon_reserved_alt: secmon@5000000 { - reg = <0x0 0x05000000 0x0 0x300000>; - no-map; - }; - }; - soc { usb0: usb@c9000000 { status = "disabled"; diff --git a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi index 4a2a6af8e752..4057197048dc 100644 --- a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi +++ b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi @@ -118,7 +118,7 @@ pcie0: pcie@20020000 { #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0>; - interrupt-map = <0 0 0 0 &gic 0 GIC_SPI 281 IRQ_TYPE_NONE>; + interrupt-map = <0 0 0 0 &gic 0 GIC_SPI 281 IRQ_TYPE_LEVEL_HIGH>; linux,pci-domain = <0>; @@ -149,7 +149,7 @@ pcie4: pcie@50020000 { #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0>; - interrupt-map = <0 0 0 0 &gic 0 GIC_SPI 305 IRQ_TYPE_NONE>; + interrupt-map = <0 0 0 0 &gic 0 GIC_SPI 305 IRQ_TYPE_LEVEL_HIGH>; linux,pci-domain = <4>; @@ -566,7 +566,7 @@ i2c0: i2c@66080000 { reg = <0x66080000 0x100>; #address-cells = <1>; #size-cells = <0>; - interrupts = ; + interrupts = ; clock-frequency = <100000>; status = "disabled"; }; @@ -594,7 +594,7 @@ i2c1: i2c@660b0000 { reg = <0x660b0000 0x100>; #address-cells = <1>; #size-cells = <0>; - interrupts = ; + interrupts = ; clock-frequency = <100000>; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/broadcom/stingray/bcm958742k.dts b/arch/arm64/boot/dts/broadcom/stingray/bcm958742k.dts index eb6f08cdbd79..77efa28c4dd5 100644 --- a/arch/arm64/boot/dts/broadcom/stingray/bcm958742k.dts +++ b/arch/arm64/boot/dts/broadcom/stingray/bcm958742k.dts @@ -43,6 +43,10 @@ &gphy0 { enet-phy-lane-swap; }; +&sdio0 { + mmc-ddr-1_8v; +}; + &uart2 { status = "okay"; }; diff --git a/arch/arm64/boot/dts/broadcom/stingray/bcm958742t.dts b/arch/arm64/boot/dts/broadcom/stingray/bcm958742t.dts index 5084b037320f..55ba495ef56e 100644 --- a/arch/arm64/boot/dts/broadcom/stingray/bcm958742t.dts +++ b/arch/arm64/boot/dts/broadcom/stingray/bcm958742t.dts @@ -42,3 +42,7 @@ / { &gphy0 { enet-phy-lane-swap; }; + +&sdio0 { + mmc-ddr-1_8v; +}; diff --git a/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi b/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi index 99aaff0b6d72..b203152ad67c 100644 --- a/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi +++ b/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi @@ -409,7 +409,7 @@ i2c0: i2c@b0000 { reg = <0x000b0000 0x100>; #address-cells = <1>; #size-cells = <0>; - interrupts = ; + interrupts = ; clock-frequency = <100000>; status = "disabled"; }; @@ -453,7 +453,7 @@ i2c1: i2c@e0000 { reg = <0x000e0000 0x100>; #address-cells = <1>; #size-cells = <0>; - interrupts = ; + interrupts = ; clock-frequency = <100000>; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts b/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts index c6999624ed8a..68c5a6c819ae 100644 --- a/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts +++ b/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts @@ -585,6 +585,8 @@ &dwmmc2 { /* WIFI */ vmmc-supply = <&wlan_en>; ti,non-removable; non-removable; + cap-power-off-card; + keep-power-in-suspend; #address-cells = <0x1>; #size-cells = <0x0>; status = "ok"; diff --git a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts index edb4ee0b8896..7f12624f6c8e 100644 --- a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts +++ b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts @@ -322,6 +322,8 @@ dwmmc_1: dwmmc1@f723e000 { dwmmc_2: dwmmc2@f723f000 { bus-width = <0x4>; non-removable; + cap-power-off-card; + keep-power-in-suspend; vmmc-supply = <®_vdd_3v3>; mmc-pwrseq = <&wl1835_pwrseq>; diff --git a/arch/arm64/boot/dts/marvell/armada-cp110.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110.dtsi index 7dabe25f6774..1c6ff8197a88 100644 --- a/arch/arm64/boot/dts/marvell/armada-cp110.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-cp110.dtsi @@ -149,7 +149,7 @@ CP110_LABEL(xmdio): mdio@12a600 { CP110_LABEL(icu): interrupt-controller@1e0000 { compatible = "marvell,cp110-icu"; - reg = <0x1e0000 0x10>; + reg = <0x1e0000 0x440>; #interrupt-cells = <3>; interrupt-controller; msi-parent = <&gicp>; diff --git a/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi b/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi index 0f829db33efe..4d5ef01f43a3 100644 --- a/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi +++ b/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi @@ -75,7 +75,7 @@ serial@75b0000 { serial@75b1000 { label = "LS-UART0"; - status = "okay"; + status = "disabled"; pinctrl-names = "default", "sleep"; pinctrl-0 = <&blsp2_uart2_4pins_default>; pinctrl-1 = <&blsp2_uart2_4pins_sleep>; diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi index 650f356f69ca..c2625d15a8c0 100644 --- a/arch/arm64/boot/dts/qcom/msm8916.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi @@ -1191,14 +1191,14 @@ ports { port@0 { reg = <0>; - etf_out: endpoint { + etf_in: endpoint { slave-mode; remote-endpoint = <&funnel0_out>; }; }; port@1 { reg = <0>; - etf_in: endpoint { + etf_out: endpoint { remote-endpoint = <&replicator_in>; }; }; diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld11-global.dts b/arch/arm64/boot/dts/socionext/uniphier-ld11-global.dts index 9b4dc41703e3..ae3b5adf32df 100644 --- a/arch/arm64/boot/dts/socionext/uniphier-ld11-global.dts +++ b/arch/arm64/boot/dts/socionext/uniphier-ld11-global.dts @@ -54,7 +54,7 @@ amp_vcc_reg: reg-fixed { sound { compatible = "audio-graph-card"; label = "UniPhier LD11"; - widgets = "Headphone", "Headphone Jack"; + widgets = "Headphone", "Headphones"; dais = <&i2s_port2 &i2s_port3 &i2s_port4 diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld20-global.dts b/arch/arm64/boot/dts/socionext/uniphier-ld20-global.dts index fe6608ea3277..7919233c9ce2 100644 --- a/arch/arm64/boot/dts/socionext/uniphier-ld20-global.dts +++ b/arch/arm64/boot/dts/socionext/uniphier-ld20-global.dts @@ -54,7 +54,7 @@ amp_vcc_reg: reg-fixed { sound { compatible = "audio-graph-card"; label = "UniPhier LD20"; - widgets = "Headphone", "Headphone Jack"; + widgets = "Headphone", "Headphones"; dais = <&i2s_port2 &i2s_port3 &i2s_port4 diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index 3cfa8ca26738..f9a186f6af8a 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -47,6 +47,7 @@ CONFIG_ARCH_MVEBU=y CONFIG_ARCH_QCOM=y CONFIG_ARCH_ROCKCHIP=y CONFIG_ARCH_SEATTLE=y +CONFIG_ARCH_SYNQUACER=y CONFIG_ARCH_RENESAS=y CONFIG_ARCH_R8A7795=y CONFIG_ARCH_R8A7796=y @@ -58,7 +59,6 @@ CONFIG_ARCH_R8A77995=y CONFIG_ARCH_STRATIX10=y CONFIG_ARCH_TEGRA=y CONFIG_ARCH_SPRD=y -CONFIG_ARCH_SYNQUACER=y CONFIG_ARCH_THUNDER=y CONFIG_ARCH_THUNDER2=y CONFIG_ARCH_UNIPHIER=y @@ -67,25 +67,23 @@ CONFIG_ARCH_XGENE=y CONFIG_ARCH_ZX=y CONFIG_ARCH_ZYNQMP=y CONFIG_PCI=y -CONFIG_HOTPLUG_PCI_PCIE=y CONFIG_PCI_IOV=y CONFIG_HOTPLUG_PCI=y CONFIG_HOTPLUG_PCI_ACPI=y -CONFIG_PCI_LAYERSCAPE=y -CONFIG_PCI_HISI=y -CONFIG_PCIE_QCOM=y -CONFIG_PCIE_KIRIN=y -CONFIG_PCIE_ARMADA_8K=y -CONFIG_PCIE_HISI_STB=y CONFIG_PCI_AARDVARK=y CONFIG_PCI_TEGRA=y CONFIG_PCIE_RCAR=y -CONFIG_PCIE_ROCKCHIP=y -CONFIG_PCIE_ROCKCHIP_HOST=m CONFIG_PCI_HOST_GENERIC=y CONFIG_PCI_XGENE=y CONFIG_PCI_HOST_THUNDER_PEM=y CONFIG_PCI_HOST_THUNDER_ECAM=y +CONFIG_PCIE_ROCKCHIP_HOST=m +CONFIG_PCI_LAYERSCAPE=y +CONFIG_PCI_HISI=y +CONFIG_PCIE_QCOM=y +CONFIG_PCIE_ARMADA_8K=y +CONFIG_PCIE_KIRIN=y +CONFIG_PCIE_HISI_STB=y CONFIG_ARM64_VA_BITS_48=y CONFIG_SCHED_MC=y CONFIG_NUMA=y @@ -104,8 +102,6 @@ CONFIG_HIBERNATION=y CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y CONFIG_ARM_CPUIDLE=y CONFIG_CPU_FREQ=y -CONFIG_CPU_FREQ_GOV_ATTR_SET=y -CONFIG_CPU_FREQ_GOV_COMMON=y CONFIG_CPU_FREQ_STAT=y CONFIG_CPU_FREQ_GOV_POWERSAVE=m CONFIG_CPU_FREQ_GOV_USERSPACE=y @@ -113,11 +109,11 @@ CONFIG_CPU_FREQ_GOV_ONDEMAND=y CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y CONFIG_CPUFREQ_DT=y +CONFIG_ACPI_CPPC_CPUFREQ=m CONFIG_ARM_ARMADA_37XX_CPUFREQ=y CONFIG_ARM_BIG_LITTLE_CPUFREQ=y CONFIG_ARM_SCPI_CPUFREQ=y CONFIG_ARM_TEGRA186_CPUFREQ=y -CONFIG_ACPI_CPPC_CPUFREQ=m CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -236,11 +232,6 @@ CONFIG_SMSC911X=y CONFIG_SNI_AVE=y CONFIG_SNI_NETSEC=y CONFIG_STMMAC_ETH=m -CONFIG_DWMAC_IPQ806X=m -CONFIG_DWMAC_MESON=m -CONFIG_DWMAC_ROCKCHIP=m -CONFIG_DWMAC_SUNXI=m -CONFIG_DWMAC_SUN8I=m CONFIG_MDIO_BUS_MUX_MMIOREG=y CONFIG_AT803X_PHY=m CONFIG_MARVELL_PHY=m @@ -269,8 +260,8 @@ CONFIG_WL18XX=m CONFIG_WLCORE_SDIO=m CONFIG_INPUT_EVDEV=y CONFIG_KEYBOARD_ADC=m -CONFIG_KEYBOARD_CROS_EC=y CONFIG_KEYBOARD_GPIO=y +CONFIG_KEYBOARD_CROS_EC=y CONFIG_INPUT_TOUCHSCREEN=y CONFIG_TOUCHSCREEN_ATMEL_MXT=m CONFIG_INPUT_MISC=y @@ -296,17 +287,13 @@ CONFIG_SERIAL_SAMSUNG=y CONFIG_SERIAL_SAMSUNG_CONSOLE=y CONFIG_SERIAL_TEGRA=y CONFIG_SERIAL_SH_SCI=y -CONFIG_SERIAL_SH_SCI_NR_UARTS=11 -CONFIG_SERIAL_SH_SCI_CONSOLE=y CONFIG_SERIAL_MSM=y CONFIG_SERIAL_MSM_CONSOLE=y CONFIG_SERIAL_XILINX_PS_UART=y CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y CONFIG_SERIAL_MVEBU_UART=y CONFIG_SERIAL_DEV_BUS=y -CONFIG_SERIAL_DEV_CTRL_TTYPORT=y CONFIG_VIRTIO_CONSOLE=y -CONFIG_I2C_HID=m CONFIG_I2C_CHARDEV=y CONFIG_I2C_MUX=y CONFIG_I2C_MUX_PCA954x=y @@ -325,26 +312,26 @@ CONFIG_I2C_RCAR=y CONFIG_I2C_CROS_EC_TUNNEL=y CONFIG_SPI=y CONFIG_SPI_ARMADA_3700=y -CONFIG_SPI_MESON_SPICC=m -CONFIG_SPI_MESON_SPIFC=m CONFIG_SPI_BCM2835=m CONFIG_SPI_BCM2835AUX=m +CONFIG_SPI_MESON_SPICC=m +CONFIG_SPI_MESON_SPIFC=m CONFIG_SPI_ORION=y CONFIG_SPI_PL022=y -CONFIG_SPI_QUP=y CONFIG_SPI_ROCKCHIP=y +CONFIG_SPI_QUP=y CONFIG_SPI_S3C64XX=y CONFIG_SPI_SPIDEV=m CONFIG_SPMI=y -CONFIG_PINCTRL_IPQ8074=y CONFIG_PINCTRL_SINGLE=y CONFIG_PINCTRL_MAX77620=y +CONFIG_PINCTRL_IPQ8074=y CONFIG_PINCTRL_MSM8916=y CONFIG_PINCTRL_MSM8994=y CONFIG_PINCTRL_MSM8996=y -CONFIG_PINCTRL_MT7622=y CONFIG_PINCTRL_QDF2XXX=y CONFIG_PINCTRL_QCOM_SPMI_PMIC=y +CONFIG_PINCTRL_MT7622=y CONFIG_GPIO_DWAPB=y CONFIG_GPIO_MB86S7X=y CONFIG_GPIO_PL061=y @@ -368,13 +355,13 @@ CONFIG_SENSORS_INA2XX=m CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y CONFIG_CPU_THERMAL=y CONFIG_THERMAL_EMULATION=y +CONFIG_ROCKCHIP_THERMAL=m +CONFIG_RCAR_GEN3_THERMAL=y CONFIG_ARMADA_THERMAL=y CONFIG_BRCMSTB_THERMAL=m CONFIG_EXYNOS_THERMAL=y -CONFIG_RCAR_GEN3_THERMAL=y -CONFIG_QCOM_TSENS=y -CONFIG_ROCKCHIP_THERMAL=m CONFIG_TEGRA_BPMP_THERMAL=m +CONFIG_QCOM_TSENS=y CONFIG_UNIPHIER_THERMAL=y CONFIG_WATCHDOG=y CONFIG_S3C2410_WATCHDOG=y @@ -395,9 +382,9 @@ CONFIG_MFD_MAX77620=y CONFIG_MFD_SPMI_PMIC=y CONFIG_MFD_RK808=y CONFIG_MFD_SEC_CORE=y +CONFIG_REGULATOR_FIXED_VOLTAGE=y CONFIG_REGULATOR_AXP20X=y CONFIG_REGULATOR_FAN53555=y -CONFIG_REGULATOR_FIXED_VOLTAGE=y CONFIG_REGULATOR_GPIO=y CONFIG_REGULATOR_HI6421V530=y CONFIG_REGULATOR_HI655X=y @@ -407,16 +394,15 @@ CONFIG_REGULATOR_QCOM_SMD_RPM=y CONFIG_REGULATOR_QCOM_SPMI=y CONFIG_REGULATOR_RK808=y CONFIG_REGULATOR_S2MPS11=y +CONFIG_RC_CORE=m +CONFIG_RC_DECODERS=y +CONFIG_RC_DEVICES=y +CONFIG_IR_MESON=m CONFIG_MEDIA_SUPPORT=m CONFIG_MEDIA_CAMERA_SUPPORT=y CONFIG_MEDIA_ANALOG_TV_SUPPORT=y CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y CONFIG_MEDIA_CONTROLLER=y -CONFIG_MEDIA_RC_SUPPORT=y -CONFIG_RC_CORE=m -CONFIG_RC_DEVICES=y -CONFIG_RC_DECODERS=y -CONFIG_IR_MESON=m CONFIG_VIDEO_V4L2_SUBDEV_API=y # CONFIG_DVB_NET is not set CONFIG_V4L_MEM2MEM_DRIVERS=y @@ -441,8 +427,7 @@ CONFIG_ROCKCHIP_DW_HDMI=y CONFIG_ROCKCHIP_DW_MIPI_DSI=y CONFIG_ROCKCHIP_INNO_HDMI=y CONFIG_DRM_RCAR_DU=m -CONFIG_DRM_RCAR_LVDS=y -CONFIG_DRM_RCAR_VSP=y +CONFIG_DRM_RCAR_LVDS=m CONFIG_DRM_TEGRA=m CONFIG_DRM_PANEL_SIMPLE=m CONFIG_DRM_I2C_ADV7511=m @@ -455,7 +440,6 @@ CONFIG_FB_ARMCLCD=y CONFIG_BACKLIGHT_GENERIC=m CONFIG_BACKLIGHT_PWM=m CONFIG_BACKLIGHT_LP855X=m -CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_LOGO=y # CONFIG_LOGO_LINUX_MONO is not set # CONFIG_LOGO_LINUX_VGA16 is not set @@ -468,6 +452,7 @@ CONFIG_SND_SOC_RCAR=m CONFIG_SND_SOC_AK4613=m CONFIG_SND_SIMPLE_CARD=m CONFIG_SND_AUDIO_GRAPH_CARD=m +CONFIG_I2C_HID=m CONFIG_USB=y CONFIG_USB_OTG=y CONFIG_USB_XHCI_HCD=y @@ -501,12 +486,12 @@ CONFIG_MMC_BLOCK_MINORS=32 CONFIG_MMC_ARMMMCI=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_ACPI=y -CONFIG_MMC_SDHCI_F_SDH30=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_OF_ARASAN=y CONFIG_MMC_SDHCI_OF_ESDHC=y CONFIG_MMC_SDHCI_CADENCE=y CONFIG_MMC_SDHCI_TEGRA=y +CONFIG_MMC_SDHCI_F_SDH30=y CONFIG_MMC_MESON_GX=y CONFIG_MMC_SDHCI_MSM=y CONFIG_MMC_SPI=y @@ -524,11 +509,11 @@ CONFIG_LEDS_CLASS=y CONFIG_LEDS_GPIO=y CONFIG_LEDS_PWM=y CONFIG_LEDS_SYSCON=y +CONFIG_LEDS_TRIGGER_DISK=y CONFIG_LEDS_TRIGGER_HEARTBEAT=y CONFIG_LEDS_TRIGGER_CPU=y CONFIG_LEDS_TRIGGER_DEFAULT_ON=y CONFIG_LEDS_TRIGGER_PANIC=y -CONFIG_LEDS_TRIGGER_DISK=y CONFIG_EDAC=y CONFIG_EDAC_GHES=y CONFIG_RTC_CLASS=y @@ -537,13 +522,13 @@ CONFIG_RTC_DRV_RK808=m CONFIG_RTC_DRV_S5M=y CONFIG_RTC_DRV_DS3232=y CONFIG_RTC_DRV_EFI=y +CONFIG_RTC_DRV_CROS_EC=y CONFIG_RTC_DRV_S3C=y CONFIG_RTC_DRV_PL031=y CONFIG_RTC_DRV_SUN6I=y CONFIG_RTC_DRV_ARMADA38X=y CONFIG_RTC_DRV_TEGRA=y CONFIG_RTC_DRV_XGENE=y -CONFIG_RTC_DRV_CROS_EC=y CONFIG_DMADEVICES=y CONFIG_DMA_BCM2835=m CONFIG_K3_DMA=y @@ -579,7 +564,6 @@ CONFIG_HWSPINLOCK_QCOM=y CONFIG_ARM_MHU=y CONFIG_PLATFORM_MHU=y CONFIG_BCM2835_MBOX=y -CONFIG_HI6220_MBOX=y CONFIG_QCOM_APCS_IPC=y CONFIG_ROCKCHIP_IOMMU=y CONFIG_TEGRA_IOMMU_SMMU=y @@ -602,7 +586,6 @@ CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y CONFIG_EXTCON_USB_GPIO=y CONFIG_EXTCON_USBC_CROS_EC=y CONFIG_MEMORY=y -CONFIG_TEGRA_MC=y CONFIG_IIO=y CONFIG_EXYNOS_ADC=y CONFIG_ROCKCHIP_SARADC=m @@ -618,27 +601,27 @@ CONFIG_PWM_RCAR=m CONFIG_PWM_ROCKCHIP=y CONFIG_PWM_SAMSUNG=y CONFIG_PWM_TEGRA=m +CONFIG_PHY_XGENE=y +CONFIG_PHY_SUN4I_USB=y +CONFIG_PHY_HI6220_USB=y CONFIG_PHY_HISTB_COMBPHY=y CONFIG_PHY_HISI_INNO_USB2=y -CONFIG_PHY_RCAR_GEN3_USB2=y -CONFIG_PHY_RCAR_GEN3_USB3=m -CONFIG_PHY_HI6220_USB=y -CONFIG_PHY_QCOM_USB_HS=y -CONFIG_PHY_SUN4I_USB=y CONFIG_PHY_MVEBU_CP110_COMPHY=y CONFIG_PHY_QCOM_QMP=m -CONFIG_PHY_ROCKCHIP_INNO_USB2=y +CONFIG_PHY_QCOM_USB_HS=y +CONFIG_PHY_RCAR_GEN3_USB2=y +CONFIG_PHY_RCAR_GEN3_USB3=m CONFIG_PHY_ROCKCHIP_EMMC=y +CONFIG_PHY_ROCKCHIP_INNO_USB2=y CONFIG_PHY_ROCKCHIP_PCIE=m CONFIG_PHY_ROCKCHIP_TYPEC=y -CONFIG_PHY_XGENE=y CONFIG_PHY_TEGRA_XUSB=y CONFIG_QCOM_L2_PMU=y CONFIG_QCOM_L3_PMU=y -CONFIG_MESON_EFUSE=m CONFIG_QCOM_QFPROM=y CONFIG_ROCKCHIP_EFUSE=y CONFIG_UNIPHIER_EFUSE=y +CONFIG_MESON_EFUSE=m CONFIG_TEE=y CONFIG_OPTEE=y CONFIG_ARM_SCPI_PROTOCOL=y @@ -647,7 +630,6 @@ CONFIG_EFI_CAPSULE_LOADER=y CONFIG_ACPI=y CONFIG_ACPI_APEI=y CONFIG_ACPI_APEI_GHES=y -CONFIG_ACPI_APEI_PCIEAER=y CONFIG_ACPI_APEI_MEMORY_FAILURE=y CONFIG_ACPI_APEI_EINJ=y CONFIG_EXT2_FS=y @@ -682,7 +664,6 @@ CONFIG_DEBUG_INFO=y CONFIG_DEBUG_FS=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y -CONFIG_LOCKUP_DETECTOR=y # CONFIG_SCHED_DEBUG is not set # CONFIG_DEBUG_PREEMPT is not set # CONFIG_FTRACE is not set @@ -691,20 +672,15 @@ CONFIG_SECURITY=y CONFIG_CRYPTO_ECHAINIV=y CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_ARM64_CRYPTO=y -CONFIG_CRYPTO_SHA256_ARM64=m -CONFIG_CRYPTO_SHA512_ARM64=m CONFIG_CRYPTO_SHA1_ARM64_CE=y CONFIG_CRYPTO_SHA2_ARM64_CE=y -CONFIG_CRYPTO_GHASH_ARM64_CE=y -CONFIG_CRYPTO_CRCT10DIF_ARM64_CE=m -CONFIG_CRYPTO_CRC32_ARM64_CE=m -CONFIG_CRYPTO_AES_ARM64=m -CONFIG_CRYPTO_AES_ARM64_CE=m -CONFIG_CRYPTO_AES_ARM64_CE_CCM=y -CONFIG_CRYPTO_AES_ARM64_CE_BLK=y -CONFIG_CRYPTO_AES_ARM64_NEON_BLK=m -CONFIG_CRYPTO_CHACHA20_NEON=m -CONFIG_CRYPTO_AES_ARM64_BS=m CONFIG_CRYPTO_SHA512_ARM64_CE=m CONFIG_CRYPTO_SHA3_ARM64=m CONFIG_CRYPTO_SM3_ARM64_CE=m +CONFIG_CRYPTO_GHASH_ARM64_CE=y +CONFIG_CRYPTO_CRCT10DIF_ARM64_CE=m +CONFIG_CRYPTO_CRC32_ARM64_CE=m +CONFIG_CRYPTO_AES_ARM64_CE_CCM=y +CONFIG_CRYPTO_AES_ARM64_CE_BLK=y +CONFIG_CRYPTO_CHACHA20_NEON=m +CONFIG_CRYPTO_AES_ARM64_BS=m diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c index 253188fb8cb0..e3e50950a863 100644 --- a/arch/arm64/crypto/aes-glue.c +++ b/arch/arm64/crypto/aes-glue.c @@ -223,8 +223,8 @@ static int ctr_encrypt(struct skcipher_request *req) kernel_neon_begin(); aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr, (u8 *)ctx->key_enc, rounds, blocks, walk.iv); - err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); kernel_neon_end(); + err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); } if (walk.nbytes) { u8 __aligned(8) tail[AES_BLOCK_SIZE]; diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h index a91933b1e2e6..4b650ec1d7dd 100644 --- a/arch/arm64/include/asm/alternative.h +++ b/arch/arm64/include/asm/alternative.h @@ -28,7 +28,12 @@ typedef void (*alternative_cb_t)(struct alt_instr *alt, __le32 *origptr, __le32 *updptr, int nr_inst); void __init apply_alternatives_all(void); -void apply_alternatives(void *start, size_t length); + +#ifdef CONFIG_MODULES +void apply_alternatives_module(void *start, size_t length); +#else +static inline void apply_alternatives_module(void *start, size_t length) { } +#endif #define ALTINSTR_ENTRY(feature,cb) \ " .word 661b - .\n" /* label */ \ diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index fda9a8ca48be..fe8777b12f86 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -306,6 +306,7 @@ struct kvm_vcpu_arch { #define KVM_ARM64_FP_ENABLED (1 << 1) /* guest FP regs loaded */ #define KVM_ARM64_FP_HOST (1 << 2) /* host FP regs loaded */ #define KVM_ARM64_HOST_SVE_IN_USE (1 << 3) /* backup for host TIF_SVE */ +#define KVM_ARM64_HOST_SVE_ENABLED (1 << 4) /* SVE enabled for EL0 */ #define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 9f82d6b53851..1bdeca8918a6 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -224,10 +224,8 @@ static inline void set_pte(pte_t *ptep, pte_t pte) * Only if the new pte is valid and kernel, otherwise TLB maintenance * or update_mmu_cache() have the necessary barriers. */ - if (pte_valid_not_user(pte)) { + if (pte_valid_not_user(pte)) dsb(ishst); - isb(); - } } extern void __sync_icache_dcache(pte_t pteval); @@ -434,7 +432,6 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) { WRITE_ONCE(*pmdp, pmd); dsb(ishst); - isb(); } static inline void pmd_clear(pmd_t *pmdp) @@ -485,7 +482,6 @@ static inline void set_pud(pud_t *pudp, pud_t pud) { WRITE_ONCE(*pudp, pud); dsb(ishst); - isb(); } static inline void pud_clear(pud_t *pudp) diff --git a/arch/arm64/include/asm/simd.h b/arch/arm64/include/asm/simd.h index fa8b3fe932e6..6495cc51246f 100644 --- a/arch/arm64/include/asm/simd.h +++ b/arch/arm64/include/asm/simd.h @@ -29,20 +29,15 @@ DECLARE_PER_CPU(bool, kernel_neon_busy); static __must_check inline bool may_use_simd(void) { /* - * The raw_cpu_read() is racy if called with preemption enabled. - * This is not a bug: kernel_neon_busy is only set when - * preemption is disabled, so we cannot migrate to another CPU - * while it is set, nor can we migrate to a CPU where it is set. - * So, if we find it clear on some CPU then we're guaranteed to - * find it clear on any CPU we could migrate to. - * - * If we are in between kernel_neon_begin()...kernel_neon_end(), - * the flag will be set, but preemption is also disabled, so we - * can't migrate to another CPU and spuriously see it become - * false. + * kernel_neon_busy is only set while preemption is disabled, + * and is clear whenever preemption is enabled. Since + * this_cpu_read() is atomic w.r.t. preemption, kernel_neon_busy + * cannot change under our feet -- if it's set we cannot be + * migrated, and if it's clear we cannot be migrated to a CPU + * where it is set. */ return !in_irq() && !irqs_disabled() && !in_nmi() && - !raw_cpu_read(kernel_neon_busy); + !this_cpu_read(kernel_neon_busy); } #else /* ! CONFIG_KERNEL_MODE_NEON */ diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 6171178075dc..a8f84812c6e8 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -728,6 +728,17 @@ asm( asm volatile("msr_s " __stringify(r) ", %x0" : : "rZ" (__val)); \ } while (0) +/* + * Modify bits in a sysreg. Bits in the clear mask are zeroed, then bits in the + * set mask are set. Other bits are left as-is. + */ +#define sysreg_clear_set(sysreg, clear, set) do { \ + u64 __scs_val = read_sysreg(sysreg); \ + u64 __scs_new = (__scs_val & ~(u64)(clear)) | (set); \ + if (__scs_new != __scs_val) \ + write_sysreg(__scs_new, sysreg); \ +} while (0) + static inline void config_sctlr_el1(u32 clear, u32 set) { u32 val; diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c index 5c4bce4ac381..36fb069fd049 100644 --- a/arch/arm64/kernel/alternative.c +++ b/arch/arm64/kernel/alternative.c @@ -122,7 +122,30 @@ static void patch_alternative(struct alt_instr *alt, } } -static void __apply_alternatives(void *alt_region, bool use_linear_alias) +/* + * We provide our own, private D-cache cleaning function so that we don't + * accidentally call into the cache.S code, which is patched by us at + * runtime. + */ +static void clean_dcache_range_nopatch(u64 start, u64 end) +{ + u64 cur, d_size, ctr_el0; + + ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0); + d_size = 4 << cpuid_feature_extract_unsigned_field(ctr_el0, + CTR_DMINLINE_SHIFT); + cur = start & ~(d_size - 1); + do { + /* + * We must clean+invalidate to the PoC in order to avoid + * Cortex-A53 errata 826319, 827319, 824069 and 819472 + * (this corresponds to ARM64_WORKAROUND_CLEAN_CACHE) + */ + asm volatile("dc civac, %0" : : "r" (cur) : "memory"); + } while (cur += d_size, cur < end); +} + +static void __apply_alternatives(void *alt_region, bool is_module) { struct alt_instr *alt; struct alt_region *region = alt_region; @@ -145,7 +168,7 @@ static void __apply_alternatives(void *alt_region, bool use_linear_alias) pr_info_once("patching kernel code\n"); origptr = ALT_ORIG_PTR(alt); - updptr = use_linear_alias ? lm_alias(origptr) : origptr; + updptr = is_module ? origptr : lm_alias(origptr); nr_inst = alt->orig_len / AARCH64_INSN_SIZE; if (alt->cpufeature < ARM64_CB_PATCH) @@ -155,8 +178,20 @@ static void __apply_alternatives(void *alt_region, bool use_linear_alias) alt_cb(alt, origptr, updptr, nr_inst); - flush_icache_range((uintptr_t)origptr, - (uintptr_t)(origptr + nr_inst)); + if (!is_module) { + clean_dcache_range_nopatch((u64)origptr, + (u64)(origptr + nr_inst)); + } + } + + /* + * The core module code takes care of cache maintenance in + * flush_module_icache(). + */ + if (!is_module) { + dsb(ish); + __flush_icache_all(); + isb(); } } @@ -178,7 +213,7 @@ static int __apply_alternatives_multi_stop(void *unused) isb(); } else { BUG_ON(alternatives_applied); - __apply_alternatives(®ion, true); + __apply_alternatives(®ion, false); /* Barriers provided by the cache flushing */ WRITE_ONCE(alternatives_applied, 1); } @@ -192,12 +227,14 @@ void __init apply_alternatives_all(void) stop_machine(__apply_alternatives_multi_stop, NULL, cpu_online_mask); } -void apply_alternatives(void *start, size_t length) +#ifdef CONFIG_MODULES +void apply_alternatives_module(void *start, size_t length) { struct alt_region region = { .begin = start, .end = start + length, }; - __apply_alternatives(®ion, false); + __apply_alternatives(®ion, true); } +#endif diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index d2856b129097..f24892a40d2c 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -937,7 +937,7 @@ static int __init parse_kpti(char *str) __kpti_forced = enabled ? 1 : -1; return 0; } -__setup("kpti=", parse_kpti); +early_param("kpti", parse_kpti); #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ #ifdef CONFIG_ARM64_HW_AFDBM diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c index 155fd91e78f4..f0f27aeefb73 100644 --- a/arch/arm64/kernel/module.c +++ b/arch/arm64/kernel/module.c @@ -448,9 +448,8 @@ int module_finalize(const Elf_Ehdr *hdr, const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) { - if (strcmp(".altinstructions", secstrs + s->sh_name) == 0) { - apply_alternatives((void *)s->sh_addr, s->sh_size); - } + if (strcmp(".altinstructions", secstrs + s->sh_name) == 0) + apply_alternatives_module((void *)s->sh_addr, s->sh_size); #ifdef CONFIG_ARM64_MODULE_PLTS if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE) && !strcmp(".text.ftrace_trampoline", secstrs + s->sh_name)) diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index f3e2e3aec0b0..2faa9863d2e5 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -179,7 +179,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) * This is the secondary CPU boot entry. We're using this CPUs * idle thread stack, but a set of temporary page tables. */ -asmlinkage void secondary_start_kernel(void) +asmlinkage notrace void secondary_start_kernel(void) { u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK; struct mm_struct *mm = &init_mm; diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c index dc6ecfa5a2d2..aac7808ce216 100644 --- a/arch/arm64/kvm/fpsimd.c +++ b/arch/arm64/kvm/fpsimd.c @@ -5,13 +5,14 @@ * Copyright 2018 Arm Limited * Author: Dave Martin */ -#include +#include #include #include #include #include #include #include +#include /* * Called on entry to KVM_RUN unless this vcpu previously ran at least @@ -61,10 +62,16 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu) { BUG_ON(!current->mm); - vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED | KVM_ARM64_HOST_SVE_IN_USE); + vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED | + KVM_ARM64_HOST_SVE_IN_USE | + KVM_ARM64_HOST_SVE_ENABLED); vcpu->arch.flags |= KVM_ARM64_FP_HOST; + if (test_thread_flag(TIF_SVE)) vcpu->arch.flags |= KVM_ARM64_HOST_SVE_IN_USE; + + if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN) + vcpu->arch.flags |= KVM_ARM64_HOST_SVE_ENABLED; } /* @@ -92,19 +99,30 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu) */ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) { - local_bh_disable(); + unsigned long flags; - update_thread_flag(TIF_SVE, - vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE); + local_irq_save(flags); if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) { /* Clean guest FP state to memory and invalidate cpu view */ fpsimd_save(); fpsimd_flush_cpu_state(); - } else if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) { - /* Ensure user trap controls are correctly restored */ - fpsimd_bind_task_to_cpu(); + } else if (system_supports_sve()) { + /* + * The FPSIMD/SVE state in the CPU has not been touched, and we + * have SVE (and VHE): CPACR_EL1 (alias CPTR_EL2) has been + * reset to CPACR_EL1_DEFAULT by the Hyp code, disabling SVE + * for EL0. To avoid spurious traps, restore the trap state + * seen by kvm_arch_vcpu_load_fp(): + */ + if (vcpu->arch.flags & KVM_ARM64_HOST_SVE_ENABLED) + sysreg_clear_set(CPACR_EL1, 0, CPACR_EL1_ZEN_EL0EN); + else + sysreg_clear_set(CPACR_EL1, CPACR_EL1_ZEN_EL0EN, 0); } - local_bh_enable(); + update_thread_flag(TIF_SVE, + vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE); + + local_irq_restore(flags); } diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 49e217ac7e1e..61e93f0b5482 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -583,13 +583,14 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size, size >> PAGE_SHIFT); return NULL; } - if (!coherent) - __dma_flush_area(page_to_virt(page), iosize); - addr = dma_common_contiguous_remap(page, size, VM_USERMAP, prot, __builtin_return_address(0)); - if (!addr) { + if (addr) { + memset(addr, 0, size); + if (!coherent) + __dma_flush_area(page_to_virt(page), iosize); + } else { iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs); dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 5f9a73a4452c..03646e6a2ef4 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -217,8 +217,9 @@ ENDPROC(idmap_cpu_replace_ttbr1) .macro __idmap_kpti_put_pgtable_ent_ng, type orr \type, \type, #PTE_NG // Same bit for blocks and pages - str \type, [cur_\()\type\()p] // Update the entry and ensure it - dc civac, cur_\()\type\()p // is visible to all CPUs. + str \type, [cur_\()\type\()p] // Update the entry and ensure + dmb sy // that it is visible to all + dc civac, cur_\()\type\()p // CPUs. .endm /* diff --git a/arch/m68k/include/asm/mcf_pgalloc.h b/arch/m68k/include/asm/mcf_pgalloc.h index 8b707c249026..12fe700632f4 100644 --- a/arch/m68k/include/asm/mcf_pgalloc.h +++ b/arch/m68k/include/asm/mcf_pgalloc.h @@ -44,6 +44,7 @@ extern inline pmd_t *pmd_alloc_kernel(pgd_t *pgd, unsigned long address) static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page, unsigned long address) { + pgtable_page_dtor(page); __free_page(page); } @@ -74,8 +75,9 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm, return page; } -extern inline void pte_free(struct mm_struct *mm, struct page *page) +static inline void pte_free(struct mm_struct *mm, struct page *page) { + pgtable_page_dtor(page); __free_page(page); } diff --git a/arch/microblaze/Kconfig.debug b/arch/microblaze/Kconfig.debug index 331a3bb66297..93a737c8d1a6 100644 --- a/arch/microblaze/Kconfig.debug +++ b/arch/microblaze/Kconfig.debug @@ -8,11 +8,4 @@ config TRACE_IRQFLAGS_SUPPORT source "lib/Kconfig.debug" -config HEART_BEAT - bool "Heart beat function for kernel" - default n - help - This option turns on/off heart beat kernel functionality. - First GPIO node is taken. - endmenu diff --git a/arch/microblaze/include/asm/setup.h b/arch/microblaze/include/asm/setup.h index d5384f6f36f7..ce9b7b786156 100644 --- a/arch/microblaze/include/asm/setup.h +++ b/arch/microblaze/include/asm/setup.h @@ -19,15 +19,10 @@ extern char cmd_line[COMMAND_LINE_SIZE]; extern char *klimit; -void microblaze_heartbeat(void); -void microblaze_setup_heartbeat(void); - # ifdef CONFIG_MMU extern void mmu_reset(void); # endif /* CONFIG_MMU */ -extern void of_platform_reset_gpio_probe(void); - void time_init(void); void init_IRQ(void); void machine_early_init(const char *cmdline, unsigned int ram, diff --git a/arch/microblaze/include/asm/unistd.h b/arch/microblaze/include/asm/unistd.h index 9774e1d9507b..a62d09420a47 100644 --- a/arch/microblaze/include/asm/unistd.h +++ b/arch/microblaze/include/asm/unistd.h @@ -38,6 +38,6 @@ #endif /* __ASSEMBLY__ */ -#define __NR_syscalls 399 +#define __NR_syscalls 401 #endif /* _ASM_MICROBLAZE_UNISTD_H */ diff --git a/arch/microblaze/include/uapi/asm/unistd.h b/arch/microblaze/include/uapi/asm/unistd.h index eb156f914793..7a9f16a76413 100644 --- a/arch/microblaze/include/uapi/asm/unistd.h +++ b/arch/microblaze/include/uapi/asm/unistd.h @@ -415,5 +415,7 @@ #define __NR_pkey_alloc 396 #define __NR_pkey_free 397 #define __NR_statx 398 +#define __NR_io_pgetevents 399 +#define __NR_rseq 400 #endif /* _UAPI_ASM_MICROBLAZE_UNISTD_H */ diff --git a/arch/microblaze/kernel/Makefile b/arch/microblaze/kernel/Makefile index 7e99cf6984a1..dd71637437f4 100644 --- a/arch/microblaze/kernel/Makefile +++ b/arch/microblaze/kernel/Makefile @@ -8,7 +8,6 @@ ifdef CONFIG_FUNCTION_TRACER CFLAGS_REMOVE_timer.o = -pg CFLAGS_REMOVE_intc.o = -pg CFLAGS_REMOVE_early_printk.o = -pg -CFLAGS_REMOVE_heartbeat.o = -pg CFLAGS_REMOVE_ftrace.o = -pg CFLAGS_REMOVE_process.o = -pg endif @@ -17,12 +16,11 @@ extra-y := head.o vmlinux.lds obj-y += dma.o exceptions.o \ hw_exception_handler.o irq.o \ - platform.o process.o prom.o ptrace.o \ + process.o prom.o ptrace.o \ reset.o setup.o signal.o sys_microblaze.o timer.o traps.o unwind.o obj-y += cpu/ -obj-$(CONFIG_HEART_BEAT) += heartbeat.o obj-$(CONFIG_MODULES) += microblaze_ksyms.o module.o obj-$(CONFIG_MMU) += misc.o obj-$(CONFIG_STACKTRACE) += stacktrace.o diff --git a/arch/microblaze/kernel/heartbeat.c b/arch/microblaze/kernel/heartbeat.c deleted file mode 100644 index 2022130139d2..000000000000 --- a/arch/microblaze/kernel/heartbeat.c +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright (C) 2007-2009 Michal Simek - * Copyright (C) 2007-2009 PetaLogix - * Copyright (C) 2006 Atmark Techno, Inc. - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ - -#include -#include -#include - -#include -#include -#include - -static unsigned int base_addr; - -void microblaze_heartbeat(void) -{ - static unsigned int cnt, period, dist; - - if (base_addr) { - if (cnt == 0 || cnt == dist) - out_be32(base_addr, 1); - else if (cnt == 7 || cnt == dist + 7) - out_be32(base_addr, 0); - - if (++cnt > period) { - cnt = 0; - /* - * The hyperbolic function below modifies the heartbeat - * period length in dependency of the current (5min) - * load. It goes through the points f(0)=126, f(1)=86, - * f(5)=51, f(inf)->30. - */ - period = ((672 << FSHIFT) / (5 * avenrun[0] + - (7 << FSHIFT))) + 30; - dist = period / 4; - } - } -} - -void microblaze_setup_heartbeat(void) -{ - struct device_node *gpio = NULL; - int *prop; - int j; - const char * const gpio_list[] = { - "xlnx,xps-gpio-1.00.a", - NULL - }; - - for (j = 0; gpio_list[j] != NULL; j++) { - gpio = of_find_compatible_node(NULL, NULL, gpio_list[j]); - if (gpio) - break; - } - - if (gpio) { - base_addr = be32_to_cpup(of_get_property(gpio, "reg", NULL)); - base_addr = (unsigned long) ioremap(base_addr, PAGE_SIZE); - pr_notice("Heartbeat GPIO at 0x%x\n", base_addr); - - /* GPIO is configured as output */ - prop = (int *) of_get_property(gpio, "xlnx,is-bidir", NULL); - if (prop) - out_be32(base_addr + 4, 0); - } -} diff --git a/arch/microblaze/kernel/platform.c b/arch/microblaze/kernel/platform.c deleted file mode 100644 index 2540d60610d9..000000000000 --- a/arch/microblaze/kernel/platform.c +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright 2008 Michal Simek - * - * based on virtex.c file - * - * Copyright 2007 Secret Lab Technologies Ltd. - * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any - * kind, whether express or implied. - */ - -#include -#include -#include - -static struct of_device_id xilinx_of_bus_ids[] __initdata = { - { .compatible = "simple-bus", }, - { .compatible = "xlnx,compound", }, - {} -}; - -static int __init microblaze_device_probe(void) -{ - of_platform_bus_probe(NULL, xilinx_of_bus_ids, NULL); - of_platform_reset_gpio_probe(); - return 0; -} -device_initcall(microblaze_device_probe); diff --git a/arch/microblaze/kernel/reset.c b/arch/microblaze/kernel/reset.c index bab4c8330ef4..fcbe1daf6316 100644 --- a/arch/microblaze/kernel/reset.c +++ b/arch/microblaze/kernel/reset.c @@ -18,7 +18,7 @@ static int handle; /* reset pin handle */ static unsigned int reset_val; -void of_platform_reset_gpio_probe(void) +static int of_platform_reset_gpio_probe(void) { int ret; handle = of_get_named_gpio(of_find_node_by_path("/"), @@ -27,13 +27,13 @@ void of_platform_reset_gpio_probe(void) if (!gpio_is_valid(handle)) { pr_info("Skipping unavailable RESET gpio %d (%s)\n", handle, "reset"); - return; + return -ENODEV; } ret = gpio_request(handle, "reset"); if (ret < 0) { pr_info("GPIO pin is already allocated\n"); - return; + return ret; } /* get current setup value */ @@ -51,11 +51,12 @@ void of_platform_reset_gpio_probe(void) pr_info("RESET: Registered gpio device: %d, current val: %d\n", handle, reset_val); - return; + return 0; err: gpio_free(handle); - return; + return ret; } +device_initcall(of_platform_reset_gpio_probe); static void gpio_system_reset(void) diff --git a/arch/microblaze/kernel/syscall_table.S b/arch/microblaze/kernel/syscall_table.S index 56bcf313121f..6ab650593792 100644 --- a/arch/microblaze/kernel/syscall_table.S +++ b/arch/microblaze/kernel/syscall_table.S @@ -400,3 +400,5 @@ ENTRY(sys_call_table) .long sys_pkey_alloc .long sys_pkey_free .long sys_statx + .long sys_io_pgetevents + .long sys_rseq diff --git a/arch/microblaze/kernel/timer.c b/arch/microblaze/kernel/timer.c index 7de941cbbd94..a6683484b3a1 100644 --- a/arch/microblaze/kernel/timer.c +++ b/arch/microblaze/kernel/timer.c @@ -156,9 +156,6 @@ static inline void timer_ack(void) static irqreturn_t timer_interrupt(int irq, void *dev_id) { struct clock_event_device *evt = &clockevent_xilinx_timer; -#ifdef CONFIG_HEART_BEAT - microblaze_heartbeat(); -#endif timer_ack(); evt->event_handler(evt); return IRQ_HANDLED; @@ -318,10 +315,6 @@ static int __init xilinx_timer_init(struct device_node *timer) return ret; } -#ifdef CONFIG_HEART_BEAT - microblaze_setup_heartbeat(); -#endif - ret = xilinx_clocksource_init(); if (ret) return ret; diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 3f9deec70b92..08c10c518f83 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -65,6 +65,7 @@ config MIPS select HAVE_OPROFILE select HAVE_PERF_EVENTS select HAVE_REGS_AND_STACK_ACCESS_API + select HAVE_RSEQ select HAVE_STACKPROTECTOR select HAVE_SYSCALL_TRACEPOINTS select HAVE_VIRT_CPU_ACCOUNTING_GEN if 64BIT || !SMP diff --git a/arch/mips/ath79/mach-pb44.c b/arch/mips/ath79/mach-pb44.c index 6b2c6f3baefa..75fb96ca61db 100644 --- a/arch/mips/ath79/mach-pb44.c +++ b/arch/mips/ath79/mach-pb44.c @@ -34,7 +34,7 @@ #define PB44_KEYS_DEBOUNCE_INTERVAL (3 * PB44_KEYS_POLL_INTERVAL) static struct gpiod_lookup_table pb44_i2c_gpiod_table = { - .dev_id = "i2c-gpio", + .dev_id = "i2c-gpio.0", .table = { GPIO_LOOKUP_IDX("ath79-gpio", PB44_GPIO_I2C_SDA, NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c index 6054d49e608e..8c9cbf13d32a 100644 --- a/arch/mips/bcm47xx/setup.c +++ b/arch/mips/bcm47xx/setup.c @@ -212,6 +212,12 @@ static int __init bcm47xx_cpu_fixes(void) */ if (bcm47xx_bus.bcma.bus.chipinfo.id == BCMA_CHIP_ID_BCM4706) cpu_wait = NULL; + + /* + * BCM47XX Erratum "R10: PCIe Transactions Periodically Fail" + * Enable ExternalSync for sync instruction to take effect + */ + set_c0_config7(MIPS_CONF7_ES); break; #endif } diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h index a7d0b836f2f7..cea8ad864b3f 100644 --- a/arch/mips/include/asm/io.h +++ b/arch/mips/include/asm/io.h @@ -414,6 +414,8 @@ static inline type pfx##in##bwlq##p(unsigned long port) \ __val = *__addr; \ slow; \ \ + /* prevent prefetching of coherent DMA data prematurely */ \ + rmb(); \ return pfx##ioswab##bwlq(__addr, __val); \ } diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index ae461d91cd1f..0bc270806ec5 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h @@ -681,6 +681,8 @@ #define MIPS_CONF7_WII (_ULCAST_(1) << 31) #define MIPS_CONF7_RPS (_ULCAST_(1) << 2) +/* ExternalSync */ +#define MIPS_CONF7_ES (_ULCAST_(1) << 8) #define MIPS_CONF7_IAR (_ULCAST_(1) << 10) #define MIPS_CONF7_AR (_ULCAST_(1) << 16) @@ -2765,6 +2767,7 @@ __BUILD_SET_C0(status) __BUILD_SET_C0(cause) __BUILD_SET_C0(config) __BUILD_SET_C0(config5) +__BUILD_SET_C0(config7) __BUILD_SET_C0(intcontrol) __BUILD_SET_C0(intctl) __BUILD_SET_C0(srsmap) diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h index bb05e9916a5f..f25dd1d83fb7 100644 --- a/arch/mips/include/uapi/asm/unistd.h +++ b/arch/mips/include/uapi/asm/unistd.h @@ -388,17 +388,19 @@ #define __NR_pkey_alloc (__NR_Linux + 364) #define __NR_pkey_free (__NR_Linux + 365) #define __NR_statx (__NR_Linux + 366) +#define __NR_rseq (__NR_Linux + 367) +#define __NR_io_pgetevents (__NR_Linux + 368) /* * Offset of the last Linux o32 flavoured syscall */ -#define __NR_Linux_syscalls 366 +#define __NR_Linux_syscalls 368 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ #define __NR_O32_Linux 4000 -#define __NR_O32_Linux_syscalls 366 +#define __NR_O32_Linux_syscalls 368 #if _MIPS_SIM == _MIPS_SIM_ABI64 @@ -733,16 +735,18 @@ #define __NR_pkey_alloc (__NR_Linux + 324) #define __NR_pkey_free (__NR_Linux + 325) #define __NR_statx (__NR_Linux + 326) +#define __NR_rseq (__NR_Linux + 327) +#define __NR_io_pgetevents (__NR_Linux + 328) /* * Offset of the last Linux 64-bit flavoured syscall */ -#define __NR_Linux_syscalls 326 +#define __NR_Linux_syscalls 328 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ #define __NR_64_Linux 5000 -#define __NR_64_Linux_syscalls 326 +#define __NR_64_Linux_syscalls 328 #if _MIPS_SIM == _MIPS_SIM_NABI32 @@ -1081,15 +1085,17 @@ #define __NR_pkey_alloc (__NR_Linux + 328) #define __NR_pkey_free (__NR_Linux + 329) #define __NR_statx (__NR_Linux + 330) +#define __NR_rseq (__NR_Linux + 331) +#define __NR_io_pgetevents (__NR_Linux + 332) /* * Offset of the last N32 flavoured syscall */ -#define __NR_Linux_syscalls 330 +#define __NR_Linux_syscalls 332 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ #define __NR_N32_Linux 6000 -#define __NR_N32_Linux_syscalls 330 +#define __NR_N32_Linux_syscalls 332 #endif /* _UAPI_ASM_UNISTD_H */ diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S index 38a302919e6b..d7de8adcfcc8 100644 --- a/arch/mips/kernel/entry.S +++ b/arch/mips/kernel/entry.S @@ -79,6 +79,10 @@ FEXPORT(ret_from_fork) jal schedule_tail # a0 = struct task_struct *prev FEXPORT(syscall_exit) +#ifdef CONFIG_DEBUG_RSEQ + move a0, sp + jal rseq_syscall +#endif local_irq_disable # make sure need_resched and # signals dont change between # sampling and return @@ -141,6 +145,10 @@ work_notifysig: # deal with pending signals and j resume_userspace_check FEXPORT(syscall_exit_partial) +#ifdef CONFIG_DEBUG_RSEQ + move a0, sp + jal rseq_syscall +#endif local_irq_disable # make sure need_resched doesn't # change between and return LONG_L a2, TI_FLAGS($28) # current->work diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S index f2ee7e1e3342..cff52b283e03 100644 --- a/arch/mips/kernel/mcount.S +++ b/arch/mips/kernel/mcount.S @@ -119,10 +119,20 @@ NESTED(_mcount, PT_SIZE, ra) EXPORT_SYMBOL(_mcount) PTR_LA t1, ftrace_stub PTR_L t2, ftrace_trace_function /* Prepare t2 for (1) */ - bne t1, t2, static_trace + beq t1, t2, fgraph_trace nop + MCOUNT_SAVE_REGS + + move a0, ra /* arg1: self return address */ + jalr t2 /* (1) call *ftrace_trace_function */ + move a1, AT /* arg2: parent's return address */ + + MCOUNT_RESTORE_REGS + +fgraph_trace: #ifdef CONFIG_FUNCTION_GRAPH_TRACER + PTR_LA t1, ftrace_stub PTR_L t3, ftrace_graph_return bne t1, t3, ftrace_graph_caller nop @@ -131,24 +141,11 @@ EXPORT_SYMBOL(_mcount) bne t1, t3, ftrace_graph_caller nop #endif - b ftrace_stub -#ifdef CONFIG_32BIT - addiu sp, sp, 8 -#else - nop -#endif -static_trace: - MCOUNT_SAVE_REGS - - move a0, ra /* arg1: self return address */ - jalr t2 /* (1) call *ftrace_trace_function */ - move a1, AT /* arg2: parent's return address */ - - MCOUNT_RESTORE_REGS #ifdef CONFIG_32BIT addiu sp, sp, 8 #endif + .globl ftrace_stub ftrace_stub: RETURN_BACK diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 8d85046adcc8..9670e70139fd 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include @@ -655,28 +656,42 @@ unsigned long arch_align_stack(unsigned long sp) return sp & ALMASK; } -static void arch_dump_stack(void *info) +static DEFINE_PER_CPU(call_single_data_t, backtrace_csd); +static struct cpumask backtrace_csd_busy; + +static void handle_backtrace(void *info) { - struct pt_regs *regs; + nmi_cpu_backtrace(get_irq_regs()); + cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy); +} - regs = get_irq_regs(); +static void raise_backtrace(cpumask_t *mask) +{ + call_single_data_t *csd; + int cpu; - if (regs) - show_regs(regs); + for_each_cpu(cpu, mask) { + /* + * If we previously sent an IPI to the target CPU & it hasn't + * cleared its bit in the busy cpumask then it didn't handle + * our previous IPI & it's not safe for us to reuse the + * call_single_data_t. + */ + if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) { + pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n", + cpu); + continue; + } - dump_stack(); + csd = &per_cpu(backtrace_csd, cpu); + csd->func = handle_backtrace; + smp_call_function_single_async(cpu, csd); + } } void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self) { - long this_cpu = get_cpu(); - - if (cpumask_test_cpu(this_cpu, mask) && !exclude_self) - dump_stack(); - - smp_call_function_many(mask, arch_dump_stack, NULL, 1); - - put_cpu(); + nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace); } int mips_get_process_fp_mode(struct task_struct *task) diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index a9a7d78803cd..91d3c8c46097 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S @@ -590,3 +590,5 @@ EXPORT(sys_call_table) PTR sys_pkey_alloc PTR sys_pkey_free /* 4365 */ PTR sys_statx + PTR sys_rseq + PTR sys_io_pgetevents diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index 65d5aeeb9bdb..358d9599983d 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S @@ -439,4 +439,6 @@ EXPORT(sys_call_table) PTR sys_pkey_alloc PTR sys_pkey_free /* 5325 */ PTR sys_statx + PTR sys_rseq + PTR sys_io_pgetevents .size sys_call_table,.-sys_call_table diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index cbf190ef9e8a..c65eaacc1abf 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -434,4 +434,6 @@ EXPORT(sysn32_call_table) PTR sys_pkey_alloc PTR sys_pkey_free PTR sys_statx /* 6330 */ + PTR sys_rseq + PTR compat_sys_io_pgetevents .size sysn32_call_table,.-sysn32_call_table diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index 9ebe3e2403b1..73913f072e39 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -583,4 +583,6 @@ EXPORT(sys32_call_table) PTR sys_pkey_alloc PTR sys_pkey_free /* 4365 */ PTR sys_statx + PTR sys_rseq + PTR compat_sys_io_pgetevents .size sys32_call_table,.-sys32_call_table diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index 9e224469c788..0a9cfe7a0372 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c @@ -801,6 +801,8 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) regs->regs[0] = 0; /* Don't deal with this again. */ } + rseq_signal_deliver(ksig, regs); + if (sig_uses_siginfo(&ksig->ka, abi)) ret = abi->setup_rt_frame(vdso + abi->vdso->off_rt_sigreturn, ksig, regs, oldset); @@ -868,6 +870,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused, if (thread_info_flags & _TIF_NOTIFY_RESUME) { clear_thread_flag(TIF_NOTIFY_RESUME); tracehook_notify_resume(regs); + rseq_handle_notify_resume(NULL, regs); } user_enter(); diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index d67fa74622ee..8d505a21396e 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -351,6 +351,7 @@ static void __show_regs(const struct pt_regs *regs) void show_regs(struct pt_regs *regs) { __show_regs((struct pt_regs *)regs); + dump_stack(); } void show_registers(struct pt_regs *regs) diff --git a/arch/mips/mm/ioremap.c b/arch/mips/mm/ioremap.c index 1986e09fb457..1601d90b087b 100644 --- a/arch/mips/mm/ioremap.c +++ b/arch/mips/mm/ioremap.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -98,6 +99,20 @@ static int remap_area_pages(unsigned long address, phys_addr_t phys_addr, return error; } +static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages, + void *arg) +{ + unsigned long i; + + for (i = 0; i < nr_pages; i++) { + if (pfn_valid(start_pfn + i) && + !PageReserved(pfn_to_page(start_pfn + i))) + return 1; + } + + return 0; +} + /* * Generic mapping function (not visible outside): */ @@ -116,8 +131,8 @@ static int remap_area_pages(unsigned long address, phys_addr_t phys_addr, void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long flags) { + unsigned long offset, pfn, last_pfn; struct vm_struct * area; - unsigned long offset; phys_addr_t last_addr; void * addr; @@ -137,18 +152,16 @@ void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long return (void __iomem *) CKSEG1ADDR(phys_addr); /* - * Don't allow anybody to remap normal RAM that we're using.. + * Don't allow anybody to remap RAM that may be allocated by the page + * allocator, since that could lead to races & data clobbering. */ - if (phys_addr < virt_to_phys(high_memory)) { - char *t_addr, *t_end; - struct page *page; - - t_addr = __va(phys_addr); - t_end = t_addr + (size - 1); - - for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++) - if(!PageReserved(page)) - return NULL; + pfn = PFN_DOWN(phys_addr); + last_pfn = PFN_DOWN(last_addr); + if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL, + __ioremap_check_ram) == 1) { + WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n", + &phys_addr, &last_addr); + return NULL; } /* diff --git a/arch/openrisc/include/asm/pgalloc.h b/arch/openrisc/include/asm/pgalloc.h index 3e1a46615120..8999b9226512 100644 --- a/arch/openrisc/include/asm/pgalloc.h +++ b/arch/openrisc/include/asm/pgalloc.h @@ -98,8 +98,12 @@ static inline void pte_free(struct mm_struct *mm, struct page *pte) __free_page(pte); } +#define __pte_free_tlb(tlb, pte, addr) \ +do { \ + pgtable_page_dtor(pte); \ + tlb_remove_page((tlb), (pte)); \ +} while (0) -#define __pte_free_tlb(tlb, pte, addr) tlb_remove_page((tlb), (pte)) #define pmd_pgtable(pmd) pmd_page(pmd) #define check_pgt_cache() do { } while (0) diff --git a/arch/openrisc/kernel/entry.S b/arch/openrisc/kernel/entry.S index 690d55272ba6..0c826ad6e994 100644 --- a/arch/openrisc/kernel/entry.S +++ b/arch/openrisc/kernel/entry.S @@ -277,12 +277,6 @@ EXCEPTION_ENTRY(_data_page_fault_handler) l.addi r3,r1,0 // pt_regs /* r4 set be EXCEPTION_HANDLE */ // effective address of fault - /* - * __PHX__: TODO - * - * all this can be written much simpler. look at - * DTLB miss handler in the CONFIG_GUARD_PROTECTED_CORE part - */ #ifdef CONFIG_OPENRISC_NO_SPR_SR_DSX l.lwz r6,PT_PC(r3) // address of an offending insn l.lwz r6,0(r6) // instruction that caused pf @@ -314,7 +308,7 @@ EXCEPTION_ENTRY(_data_page_fault_handler) #else - l.lwz r6,PT_SR(r3) // SR + l.mfspr r6,r0,SPR_SR // SR l.andi r6,r6,SPR_SR_DSX // check for delay slot exception l.sfne r6,r0 // exception happened in delay slot l.bnf 7f diff --git a/arch/openrisc/kernel/head.S b/arch/openrisc/kernel/head.S index fb02b2a1d6f2..9fc6b60140f0 100644 --- a/arch/openrisc/kernel/head.S +++ b/arch/openrisc/kernel/head.S @@ -210,8 +210,7 @@ * r4 - EEAR exception EA * r10 - current pointing to current_thread_info struct * r12 - syscall 0, since we didn't come from syscall - * r13 - temp it actually contains new SR, not needed anymore - * r31 - handler address of the handler we'll jump to + * r30 - handler address of the handler we'll jump to * * handler has to save remaining registers to the exception * ksp frame *before* tainting them! @@ -244,6 +243,7 @@ /* r1 is KSP, r30 is __pa(KSP) */ ;\ tophys (r30,r1) ;\ l.sw PT_GPR12(r30),r12 ;\ + /* r4 use for tmp before EA */ ;\ l.mfspr r12,r0,SPR_EPCR_BASE ;\ l.sw PT_PC(r30),r12 ;\ l.mfspr r12,r0,SPR_ESR_BASE ;\ @@ -263,7 +263,10 @@ /* r12 == 1 if we come from syscall */ ;\ CLEAR_GPR(r12) ;\ /* ----- turn on MMU ----- */ ;\ - l.ori r30,r0,(EXCEPTION_SR) ;\ + /* Carry DSX into exception SR */ ;\ + l.mfspr r30,r0,SPR_SR ;\ + l.andi r30,r30,SPR_SR_DSX ;\ + l.ori r30,r30,(EXCEPTION_SR) ;\ l.mtspr r0,r30,SPR_ESR_BASE ;\ /* r30: EA address of handler */ ;\ LOAD_SYMBOL_2_GPR(r30,handler) ;\ diff --git a/arch/openrisc/kernel/traps.c b/arch/openrisc/kernel/traps.c index fac246e6f37a..d8981cbb852a 100644 --- a/arch/openrisc/kernel/traps.c +++ b/arch/openrisc/kernel/traps.c @@ -300,7 +300,7 @@ static inline int in_delay_slot(struct pt_regs *regs) return 0; } #else - return regs->sr & SPR_SR_DSX; + return mfspr(SPR_SR) & SPR_SR_DSX; #endif } diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index c480770fabcd..17526bebcbd2 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -244,11 +244,11 @@ config PARISC_PAGE_SIZE_4KB config PARISC_PAGE_SIZE_16KB bool "16KB" - depends on PA8X00 + depends on PA8X00 && BROKEN config PARISC_PAGE_SIZE_64KB bool "64KB" - depends on PA8X00 + depends on PA8X00 && BROKEN endchoice @@ -347,7 +347,7 @@ config NR_CPUS int "Maximum number of CPUs (2-32)" range 2 32 depends on SMP - default "32" + default "4" endmenu diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile index 714284ea6cc2..5ce030266e7d 100644 --- a/arch/parisc/Makefile +++ b/arch/parisc/Makefile @@ -65,10 +65,6 @@ endif # kernel. cflags-y += -mdisable-fpregs -# Without this, "ld -r" results in .text sections that are too big -# (> 0x40000) for branches to reach stubs. -cflags-y += -ffunction-sections - # Use long jumps instead of long branches (needed if your linker fails to # link a too big vmlinux executable). Not enabled for building modules. ifdef CONFIG_MLONGCALLS diff --git a/arch/parisc/include/asm/signal.h b/arch/parisc/include/asm/signal.h index eeb5c8858663..715c96ba2ec8 100644 --- a/arch/parisc/include/asm/signal.h +++ b/arch/parisc/include/asm/signal.h @@ -21,14 +21,6 @@ typedef struct { unsigned long sig[_NSIG_WORDS]; } sigset_t; -#ifndef __KERNEL__ -struct sigaction { - __sighandler_t sa_handler; - unsigned long sa_flags; - sigset_t sa_mask; /* mask last for extensibility */ -}; -#endif - #include #endif /* !__ASSEMBLY */ diff --git a/arch/parisc/include/uapi/asm/unistd.h b/arch/parisc/include/uapi/asm/unistd.h index 4872e77aa96b..dc77c5a51db7 100644 --- a/arch/parisc/include/uapi/asm/unistd.h +++ b/arch/parisc/include/uapi/asm/unistd.h @@ -364,8 +364,9 @@ #define __NR_preadv2 (__NR_Linux + 347) #define __NR_pwritev2 (__NR_Linux + 348) #define __NR_statx (__NR_Linux + 349) +#define __NR_io_pgetevents (__NR_Linux + 350) -#define __NR_Linux_syscalls (__NR_statx + 1) +#define __NR_Linux_syscalls (__NR_io_pgetevents + 1) #define __IGNORE_select /* newselect */ diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c index e0e1c9775c32..5eb979d04b90 100644 --- a/arch/parisc/kernel/drivers.c +++ b/arch/parisc/kernel/drivers.c @@ -154,17 +154,14 @@ int register_parisc_driver(struct parisc_driver *driver) { /* FIXME: we need this because apparently the sti * driver can be registered twice */ - if(driver->drv.name) { - printk(KERN_WARNING - "BUG: skipping previously registered driver %s\n", - driver->name); + if (driver->drv.name) { + pr_warn("BUG: skipping previously registered driver %s\n", + driver->name); return 1; } if (!driver->probe) { - printk(KERN_WARNING - "BUG: driver %s has no probe routine\n", - driver->name); + pr_warn("BUG: driver %s has no probe routine\n", driver->name); return 1; } @@ -491,12 +488,9 @@ alloc_pa_dev(unsigned long hpa, struct hardware_path *mod_path) dev = create_parisc_device(mod_path); if (dev->id.hw_type != HPHW_FAULTY) { - printk(KERN_ERR "Two devices have hardware path [%s]. " - "IODC data for second device: " - "%02x%02x%02x%02x%02x%02x\n" - "Rearranging GSC cards sometimes helps\n", - parisc_pathname(dev), iodc_data[0], iodc_data[1], - iodc_data[3], iodc_data[4], iodc_data[5], iodc_data[6]); + pr_err("Two devices have hardware path [%s]. IODC data for second device: %7phN\n" + "Rearranging GSC cards sometimes helps\n", + parisc_pathname(dev), iodc_data); return NULL; } @@ -528,8 +522,7 @@ alloc_pa_dev(unsigned long hpa, struct hardware_path *mod_path) * the keyboard controller */ if ((hpa & 0xfff) == 0 && insert_resource(&iomem_resource, &dev->hpa)) - printk("Unable to claim HPA %lx for device %s\n", - hpa, name); + pr_warn("Unable to claim HPA %lx for device %s\n", hpa, name); return dev; } @@ -875,7 +868,7 @@ static void print_parisc_device(struct parisc_device *dev) static int count; print_pa_hwpath(dev, hw_path); - printk(KERN_INFO "%d. %s at 0x%px [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }", + pr_info("%d. %s at 0x%px [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }", ++count, dev->name, (void*) dev->hpa.start, hw_path, dev->id.hw_type, dev->id.hversion_rev, dev->id.hversion, dev->id.sversion); diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S index 6308749359e4..fe3f2a49d2b1 100644 --- a/arch/parisc/kernel/syscall_table.S +++ b/arch/parisc/kernel/syscall_table.S @@ -445,6 +445,7 @@ ENTRY_COMP(preadv2) ENTRY_COMP(pwritev2) ENTRY_SAME(statx) + ENTRY_COMP(io_pgetevents) /* 350 */ .ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b)) diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c index 143f90e2f9f3..2ef83d78eec4 100644 --- a/arch/parisc/kernel/unwind.c +++ b/arch/parisc/kernel/unwind.c @@ -25,7 +25,7 @@ /* #define DEBUG 1 */ #ifdef DEBUG -#define dbg(x...) printk(x) +#define dbg(x...) pr_debug(x) #else #define dbg(x...) #endif @@ -182,7 +182,7 @@ int __init unwind_init(void) start = (long)&__start___unwind[0]; stop = (long)&__stop___unwind[0]; - printk("unwind_init: start = 0x%lx, end = 0x%lx, entries = %lu\n", + dbg("unwind_init: start = 0x%lx, end = 0x%lx, entries = %lu\n", start, stop, (stop - start) / sizeof(struct unwind_table_entry)); diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index bd06a3ccda31..2ea575cb3401 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -244,6 +244,7 @@ cpu-as-$(CONFIG_4xx) += -Wa,-m405 cpu-as-$(CONFIG_ALTIVEC) += $(call as-option,-Wa$(comma)-maltivec) cpu-as-$(CONFIG_E200) += -Wa,-me200 cpu-as-$(CONFIG_PPC_BOOK3S_64) += -Wa,-mpower4 +cpu-as-$(CONFIG_PPC_E500MC) += $(call as-option,-Wa$(comma)-me500mc) KBUILD_AFLAGS += $(cpu-as-y) KBUILD_CFLAGS += $(cpu-as-y) diff --git a/arch/powerpc/include/asm/book3s/32/pgalloc.h b/arch/powerpc/include/asm/book3s/32/pgalloc.h index 6a6673907e45..82e44b1a00ae 100644 --- a/arch/powerpc/include/asm/book3s/32/pgalloc.h +++ b/arch/powerpc/include/asm/book3s/32/pgalloc.h @@ -108,6 +108,7 @@ static inline void pgtable_free(void *table, unsigned index_size) } #define check_pgt_cache() do { } while (0) +#define get_hugepd_cache_index(x) (x) #ifdef CONFIG_SMP static inline void pgtable_free_tlb(struct mmu_gather *tlb, @@ -137,7 +138,6 @@ static inline void pgtable_free_tlb(struct mmu_gather *tlb, static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, unsigned long address) { - pgtable_page_dtor(table); pgtable_free_tlb(tlb, page_address(table), 0); } #endif /* _ASM_POWERPC_BOOK3S_32_PGALLOC_H */ diff --git a/arch/powerpc/include/asm/book3s/64/pgtable-4k.h b/arch/powerpc/include/asm/book3s/64/pgtable-4k.h index af5f2baac80f..a069dfcac9a9 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable-4k.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable-4k.h @@ -49,6 +49,27 @@ static inline int hugepd_ok(hugepd_t hpd) } #define is_hugepd(hpd) (hugepd_ok(hpd)) +/* + * 16M and 16G huge page directory tables are allocated from slab cache + * + */ +#define H_16M_CACHE_INDEX (PAGE_SHIFT + H_PTE_INDEX_SIZE + H_PMD_INDEX_SIZE - 24) +#define H_16G_CACHE_INDEX \ + (PAGE_SHIFT + H_PTE_INDEX_SIZE + H_PMD_INDEX_SIZE + H_PUD_INDEX_SIZE - 34) + +static inline int get_hugepd_cache_index(int index) +{ + switch (index) { + case H_16M_CACHE_INDEX: + return HTLB_16M_INDEX; + case H_16G_CACHE_INDEX: + return HTLB_16G_INDEX; + default: + BUG(); + } + /* should not reach */ +} + #else /* !CONFIG_HUGETLB_PAGE */ static inline int pmd_huge(pmd_t pmd) { return 0; } static inline int pud_huge(pud_t pud) { return 0; } diff --git a/arch/powerpc/include/asm/book3s/64/pgtable-64k.h b/arch/powerpc/include/asm/book3s/64/pgtable-64k.h index fb4b3ba52339..d7ee249d6890 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable-64k.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable-64k.h @@ -45,8 +45,17 @@ static inline int hugepd_ok(hugepd_t hpd) { return 0; } + #define is_hugepd(pdep) 0 +/* + * This should never get called + */ +static inline int get_hugepd_cache_index(int index) +{ + BUG(); +} + #else /* !CONFIG_HUGETLB_PAGE */ static inline int pmd_huge(pmd_t pmd) { return 0; } static inline int pud_huge(pud_t pud) { return 0; } diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 63cee159022b..42aafba7a308 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -287,6 +287,11 @@ enum pgtable_index { PMD_INDEX, PUD_INDEX, PGD_INDEX, + /* + * Below are used with 4k page size and hugetlb + */ + HTLB_16M_INDEX, + HTLB_16G_INDEX, }; extern unsigned long __vmalloc_start; diff --git a/arch/powerpc/include/asm/nmi.h b/arch/powerpc/include/asm/nmi.h index 0f571e0ebca1..bd9ba8defd72 100644 --- a/arch/powerpc/include/asm/nmi.h +++ b/arch/powerpc/include/asm/nmi.h @@ -8,7 +8,7 @@ extern void arch_touch_nmi_watchdog(void); static inline void arch_touch_nmi_watchdog(void) {} #endif -#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_STACKTRACE) +#if defined(CONFIG_NMI_IPI) && defined(CONFIG_STACKTRACE) extern void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self); #define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace diff --git a/arch/powerpc/include/asm/nohash/32/pgalloc.h b/arch/powerpc/include/asm/nohash/32/pgalloc.h index 1707781d2f20..8825953c225b 100644 --- a/arch/powerpc/include/asm/nohash/32/pgalloc.h +++ b/arch/powerpc/include/asm/nohash/32/pgalloc.h @@ -109,6 +109,7 @@ static inline void pgtable_free(void *table, unsigned index_size) } #define check_pgt_cache() do { } while (0) +#define get_hugepd_cache_index(x) (x) #ifdef CONFIG_SMP static inline void pgtable_free_tlb(struct mmu_gather *tlb, @@ -139,7 +140,6 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, unsigned long address) { tlb_flush_pgtable(tlb, address); - pgtable_page_dtor(table); pgtable_free_tlb(tlb, page_address(table), 0); } #endif /* _ASM_POWERPC_PGALLOC_32_H */ diff --git a/arch/powerpc/include/asm/nohash/64/pgalloc.h b/arch/powerpc/include/asm/nohash/64/pgalloc.h index 0e693f322cb2..e2d62d033708 100644 --- a/arch/powerpc/include/asm/nohash/64/pgalloc.h +++ b/arch/powerpc/include/asm/nohash/64/pgalloc.h @@ -141,6 +141,7 @@ static inline void pgtable_free(void *table, int shift) } } +#define get_hugepd_cache_index(x) (x) #ifdef CONFIG_SMP static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift) { diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h index cfcf6a874cfa..01b5171ea189 100644 --- a/arch/powerpc/include/asm/systbl.h +++ b/arch/powerpc/include/asm/systbl.h @@ -393,3 +393,4 @@ SYSCALL(pkey_alloc) SYSCALL(pkey_free) SYSCALL(pkey_mprotect) SYSCALL(rseq) +COMPAT_SYS(io_pgetevents) diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h index 1e9708632dce..c19379f0a32e 100644 --- a/arch/powerpc/include/asm/unistd.h +++ b/arch/powerpc/include/asm/unistd.h @@ -12,7 +12,7 @@ #include -#define NR_syscalls 388 +#define NR_syscalls 389 #define __NR__exit __NR_exit diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h index ac5ba55066dd..985534d0b448 100644 --- a/arch/powerpc/include/uapi/asm/unistd.h +++ b/arch/powerpc/include/uapi/asm/unistd.h @@ -399,5 +399,6 @@ #define __NR_pkey_free 385 #define __NR_pkey_mprotect 386 #define __NR_rseq 387 +#define __NR_io_pgetevents 388 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c index 4be1c0de9406..96dd3d871986 100644 --- a/arch/powerpc/kernel/dt_cpu_ftrs.c +++ b/arch/powerpc/kernel/dt_cpu_ftrs.c @@ -711,7 +711,8 @@ static __init void cpufeatures_cpu_quirks(void) cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_HV_ASSIST; cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_XER_SO_BUG; cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1; - } else /* DD2.1 and up have DD2_1 */ + } else if ((version & 0xffff0000) == 0x004e0000) + /* DD2.1 and up have DD2_1 */ cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1; if ((version & 0xffff0000) == 0x004e0000) { diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c index 4f861055a852..d63b488d34d7 100644 --- a/arch/powerpc/kernel/pci_32.c +++ b/arch/powerpc/kernel/pci_32.c @@ -285,9 +285,6 @@ pci_bus_to_hose(int bus) * Note that the returned IO or memory base is a physical address */ -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wpragmas" -#pragma GCC diagnostic ignored "-Wattribute-alias" SYSCALL_DEFINE3(pciconfig_iobase, long, which, unsigned long, bus, unsigned long, devfn) { @@ -313,4 +310,3 @@ SYSCALL_DEFINE3(pciconfig_iobase, long, which, return result; } -#pragma GCC diagnostic pop diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c index 812171c09f42..dff28f903512 100644 --- a/arch/powerpc/kernel/pci_64.c +++ b/arch/powerpc/kernel/pci_64.c @@ -203,9 +203,6 @@ void pcibios_setup_phb_io_space(struct pci_controller *hose) #define IOBASE_ISA_IO 3 #define IOBASE_ISA_MEM 4 -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wpragmas" -#pragma GCC diagnostic ignored "-Wattribute-alias" SYSCALL_DEFINE3(pciconfig_iobase, long, which, unsigned long, in_bus, unsigned long, in_devfn) { @@ -259,7 +256,6 @@ SYSCALL_DEFINE3(pciconfig_iobase, long, which, unsigned long, in_bus, return -EOPNOTSUPP; } -#pragma GCC diagnostic pop #ifdef CONFIG_NUMA int pcibus_to_node(struct pci_bus *bus) diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 7fb9f83dcde8..8afd146bc9c7 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -1051,9 +1051,6 @@ struct pseries_errorlog *get_pseries_errorlog(struct rtas_error_log *log, } /* We assume to be passed big endian arguments */ -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wpragmas" -#pragma GCC diagnostic ignored "-Wattribute-alias" SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs) { struct rtas_args args; @@ -1140,7 +1137,6 @@ SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs) return 0; } -#pragma GCC diagnostic pop /* * Call early during boot, before mem init, to retrieve the RTAS diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 62b1a40d8957..40b44bb53a4e 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -700,12 +700,19 @@ EXPORT_SYMBOL(check_legacy_ioport); static int ppc_panic_event(struct notifier_block *this, unsigned long event, void *ptr) { + /* + * panic does a local_irq_disable, but we really + * want interrupts to be hard disabled. + */ + hard_irq_disable(); + /* * If firmware-assisted dump has been registered then trigger * firmware-assisted dump and let firmware handle everything else. */ crash_fadump(NULL, ptr); - ppc_md.panic(ptr); /* May not return */ + if (ppc_md.panic) + ppc_md.panic(ptr); /* May not return */ return NOTIFY_DONE; } @@ -716,7 +723,8 @@ static struct notifier_block ppc_panic_block = { void __init setup_panic(void) { - if (!ppc_md.panic) + /* PPC64 always does a hard irq disable in its panic handler */ + if (!IS_ENABLED(CONFIG_PPC64) && !ppc_md.panic) return; atomic_notifier_chain_register(&panic_notifier_list, &ppc_panic_block); } diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 7a7ce8ad455e..225bc5f91049 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -387,6 +387,14 @@ void early_setup_secondary(void) #endif /* CONFIG_SMP */ +void panic_smp_self_stop(void) +{ + hard_irq_disable(); + spin_begin(); + while (1) + spin_cpu_relax(); +} + #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE) static bool use_spinloop(void) { diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c index 17fe4339ba59..b3e8db376ecd 100644 --- a/arch/powerpc/kernel/signal.c +++ b/arch/powerpc/kernel/signal.c @@ -134,7 +134,7 @@ static void do_signal(struct task_struct *tsk) /* Re-enable the breakpoints for the signal stack */ thread_change_pc(tsk, tsk->thread.regs); - rseq_signal_deliver(tsk->thread.regs); + rseq_signal_deliver(&ksig, tsk->thread.regs); if (is32) { if (ksig.ka.sa.sa_flags & SA_SIGINFO) @@ -170,7 +170,7 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) if (thread_info_flags & _TIF_NOTIFY_RESUME) { clear_thread_flag(TIF_NOTIFY_RESUME); tracehook_notify_resume(regs); - rseq_handle_notify_resume(regs); + rseq_handle_notify_resume(NULL, regs); } user_enter(); diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index 5eedbb282d42..e6474a45cef5 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -1038,9 +1038,6 @@ static int do_setcontext_tm(struct ucontext __user *ucp, } #endif -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wpragmas" -#pragma GCC diagnostic ignored "-Wattribute-alias" #ifdef CONFIG_PPC64 COMPAT_SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx, struct ucontext __user *, new_ctx, int, ctx_size) @@ -1134,7 +1131,6 @@ SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx, set_thread_flag(TIF_RESTOREALL); return 0; } -#pragma GCC diagnostic pop #ifdef CONFIG_PPC64 COMPAT_SYSCALL_DEFINE0(rt_sigreturn) @@ -1231,9 +1227,6 @@ SYSCALL_DEFINE0(rt_sigreturn) return 0; } -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wpragmas" -#pragma GCC diagnostic ignored "-Wattribute-alias" #ifdef CONFIG_PPC32 SYSCALL_DEFINE3(debug_setcontext, struct ucontext __user *, ctx, int, ndbg, struct sig_dbg_op __user *, dbg) @@ -1337,7 +1330,6 @@ SYSCALL_DEFINE3(debug_setcontext, struct ucontext __user *, ctx, return 0; } #endif -#pragma GCC diagnostic pop /* * OK, we're invoking a handler diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index d42b60020389..83d51bf586c7 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -625,9 +625,6 @@ static long setup_trampoline(unsigned int syscall, unsigned int __user *tramp) /* * Handle {get,set,swap}_context operations */ -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wpragmas" -#pragma GCC diagnostic ignored "-Wattribute-alias" SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx, struct ucontext __user *, new_ctx, long, ctx_size) { @@ -693,7 +690,6 @@ SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx, set_thread_flag(TIF_RESTOREALL); return 0; } -#pragma GCC diagnostic pop /* diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 5eadfffabe35..4794d6b4f4d2 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -600,9 +600,6 @@ static void nmi_stop_this_cpu(struct pt_regs *regs) nmi_ipi_busy_count--; nmi_ipi_unlock(); - /* Remove this CPU */ - set_cpu_online(smp_processor_id(), false); - spin_begin(); while (1) spin_cpu_relax(); @@ -617,9 +614,6 @@ void smp_send_stop(void) static void stop_this_cpu(void *dummy) { - /* Remove this CPU */ - set_cpu_online(smp_processor_id(), false); - hard_irq_disable(); spin_begin(); while (1) diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c index 07e97f289c52..e2c50b55138f 100644 --- a/arch/powerpc/kernel/stacktrace.c +++ b/arch/powerpc/kernel/stacktrace.c @@ -196,7 +196,7 @@ save_stack_trace_tsk_reliable(struct task_struct *tsk, EXPORT_SYMBOL_GPL(save_stack_trace_tsk_reliable); #endif /* CONFIG_HAVE_RELIABLE_STACKTRACE */ -#ifdef CONFIG_PPC_BOOK3S_64 +#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI) static void handle_backtrace_ipi(struct pt_regs *regs) { nmi_cpu_backtrace(regs); @@ -242,4 +242,4 @@ void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self) { nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace_ipi); } -#endif /* CONFIG_PPC64 */ +#endif /* defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI) */ diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c index 083fa06962fd..466216506eb2 100644 --- a/arch/powerpc/kernel/syscalls.c +++ b/arch/powerpc/kernel/syscalls.c @@ -62,9 +62,6 @@ static inline long do_mmap2(unsigned long addr, size_t len, return ret; } -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wpragmas" -#pragma GCC diagnostic ignored "-Wattribute-alias" SYSCALL_DEFINE6(mmap2, unsigned long, addr, size_t, len, unsigned long, prot, unsigned long, flags, unsigned long, fd, unsigned long, pgoff) @@ -78,7 +75,6 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, size_t, len, { return do_mmap2(addr, len, prot, flags, fd, offset, PAGE_SHIFT); } -#pragma GCC diagnostic pop #ifdef CONFIG_PPC32 /* diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 7c5f479c5c00..8a9a49c13865 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -337,7 +337,8 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif if (shift >= pdshift) hugepd_free(tlb, hugepte); else - pgtable_free_tlb(tlb, hugepte, pdshift - shift); + pgtable_free_tlb(tlb, hugepte, + get_hugepd_cache_index(pdshift - shift)); } static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c index c1f4ca45c93a..4afbfbb64bfd 100644 --- a/arch/powerpc/mm/pgtable-book3s64.c +++ b/arch/powerpc/mm/pgtable-book3s64.c @@ -409,6 +409,18 @@ static inline void pgtable_free(void *table, int index) case PUD_INDEX: kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), table); break; +#if defined(CONFIG_PPC_4K_PAGES) && defined(CONFIG_HUGETLB_PAGE) + /* 16M hugepd directory at pud level */ + case HTLB_16M_INDEX: + BUILD_BUG_ON(H_16M_CACHE_INDEX <= 0); + kmem_cache_free(PGT_CACHE(H_16M_CACHE_INDEX), table); + break; + /* 16G hugepd directory at the pgd level */ + case HTLB_16G_INDEX: + BUILD_BUG_ON(H_16G_CACHE_INDEX <= 0); + kmem_cache_free(PGT_CACHE(H_16G_CACHE_INDEX), table); + break; +#endif /* We don't free pgd table via RCU callback */ default: BUG(); diff --git a/arch/powerpc/mm/subpage-prot.c b/arch/powerpc/mm/subpage-prot.c index 75cb646a79c3..9d16ee251fc0 100644 --- a/arch/powerpc/mm/subpage-prot.c +++ b/arch/powerpc/mm/subpage-prot.c @@ -186,9 +186,6 @@ static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr, * in a 2-bit field won't allow writes to a page that is otherwise * write-protected. */ -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wpragmas" -#pragma GCC diagnostic ignored "-Wattribute-alias" SYSCALL_DEFINE3(subpage_prot, unsigned long, addr, unsigned long, len, u32 __user *, map) { @@ -272,4 +269,3 @@ SYSCALL_DEFINE3(subpage_prot, unsigned long, addr, up_write(&mm->mmap_sem); return err; } -#pragma GCC diagnostic pop diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c index 67a6e86d3e7e..1135b43a597c 100644 --- a/arch/powerpc/mm/tlb-radix.c +++ b/arch/powerpc/mm/tlb-radix.c @@ -689,22 +689,17 @@ EXPORT_SYMBOL(radix__flush_tlb_kernel_range); static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33; static unsigned long tlb_local_single_page_flush_ceiling __read_mostly = POWER9_TLB_SETS_RADIX * 2; -void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start, - unsigned long end) +static inline void __radix__flush_tlb_range(struct mm_struct *mm, + unsigned long start, unsigned long end, + bool flush_all_sizes) { - struct mm_struct *mm = vma->vm_mm; unsigned long pid; unsigned int page_shift = mmu_psize_defs[mmu_virtual_psize].shift; unsigned long page_size = 1UL << page_shift; unsigned long nr_pages = (end - start) >> page_shift; bool local, full; -#ifdef CONFIG_HUGETLB_PAGE - if (is_vm_hugetlb_page(vma)) - return radix__flush_hugetlb_tlb_range(vma, start, end); -#endif - pid = mm->context.id; if (unlikely(pid == MMU_NO_CONTEXT)) return; @@ -738,37 +733,64 @@ void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start, _tlbie_pid(pid, RIC_FLUSH_TLB); } } else { - bool hflush = false; + bool hflush = flush_all_sizes; + bool gflush = flush_all_sizes; unsigned long hstart, hend; + unsigned long gstart, gend; -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - hstart = (start + HPAGE_PMD_SIZE - 1) >> HPAGE_PMD_SHIFT; - hend = end >> HPAGE_PMD_SHIFT; - if (hstart < hend) { - hstart <<= HPAGE_PMD_SHIFT; - hend <<= HPAGE_PMD_SHIFT; + if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) hflush = true; + + if (hflush) { + hstart = (start + PMD_SIZE - 1) & PMD_MASK; + hend = end & PMD_MASK; + if (hstart == hend) + hflush = false; + } + + if (gflush) { + gstart = (start + PUD_SIZE - 1) & PUD_MASK; + gend = end & PUD_MASK; + if (gstart == gend) + gflush = false; } -#endif asm volatile("ptesync": : :"memory"); if (local) { __tlbiel_va_range(start, end, pid, page_size, mmu_virtual_psize); if (hflush) __tlbiel_va_range(hstart, hend, pid, - HPAGE_PMD_SIZE, MMU_PAGE_2M); + PMD_SIZE, MMU_PAGE_2M); + if (gflush) + __tlbiel_va_range(gstart, gend, pid, + PUD_SIZE, MMU_PAGE_1G); asm volatile("ptesync": : :"memory"); } else { __tlbie_va_range(start, end, pid, page_size, mmu_virtual_psize); if (hflush) __tlbie_va_range(hstart, hend, pid, - HPAGE_PMD_SIZE, MMU_PAGE_2M); + PMD_SIZE, MMU_PAGE_2M); + if (gflush) + __tlbie_va_range(gstart, gend, pid, + PUD_SIZE, MMU_PAGE_1G); fixup_tlbie(); asm volatile("eieio; tlbsync; ptesync": : :"memory"); } } preempt_enable(); } + +void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end) + +{ +#ifdef CONFIG_HUGETLB_PAGE + if (is_vm_hugetlb_page(vma)) + return radix__flush_hugetlb_tlb_range(vma, start, end); +#endif + + __radix__flush_tlb_range(vma->vm_mm, start, end, false); +} EXPORT_SYMBOL(radix__flush_tlb_range); static int radix_get_mmu_psize(int page_size) @@ -837,6 +859,8 @@ void radix__tlb_flush(struct mmu_gather *tlb) int psize = 0; struct mm_struct *mm = tlb->mm; int page_size = tlb->page_size; + unsigned long start = tlb->start; + unsigned long end = tlb->end; /* * if page size is not something we understand, do a full mm flush @@ -847,15 +871,45 @@ void radix__tlb_flush(struct mmu_gather *tlb) */ if (tlb->fullmm) { __flush_all_mm(mm, true); +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE) + } else if (mm_tlb_flush_nested(mm)) { + /* + * If there is a concurrent invalidation that is clearing ptes, + * then it's possible this invalidation will miss one of those + * cleared ptes and miss flushing the TLB. If this invalidate + * returns before the other one flushes TLBs, that can result + * in it returning while there are still valid TLBs inside the + * range to be invalidated. + * + * See mm/memory.c:tlb_finish_mmu() for more details. + * + * The solution to this is ensure the entire range is always + * flushed here. The problem for powerpc is that the flushes + * are page size specific, so this "forced flush" would not + * do the right thing if there are a mix of page sizes in + * the range to be invalidated. So use __flush_tlb_range + * which invalidates all possible page sizes in the range. + * + * PWC flush probably is not be required because the core code + * shouldn't free page tables in this path, but accounting + * for the possibility makes us a bit more robust. + * + * need_flush_all is an uncommon case because page table + * teardown should be done with exclusive locks held (but + * after locks are dropped another invalidate could come + * in), it could be optimized further if necessary. + */ + if (!tlb->need_flush_all) + __radix__flush_tlb_range(mm, start, end, true); + else + radix__flush_all_mm(mm); +#endif } else if ( (psize = radix_get_mmu_psize(page_size)) == -1) { if (!tlb->need_flush_all) radix__flush_tlb_mm(mm); else radix__flush_all_mm(mm); } else { - unsigned long start = tlb->start; - unsigned long end = tlb->end; - if (!tlb->need_flush_all) radix__flush_tlb_range_psize(mm, start, end, psize); else @@ -1043,6 +1097,8 @@ extern void radix_kvm_prefetch_workaround(struct mm_struct *mm) for (; sib <= cpu_last_thread_sibling(cpu) && !flush; sib++) { if (sib == cpu) continue; + if (!cpu_possible(sib)) + continue; if (paca_ptrs[sib]->kvm_hstate.kvm_vcpu) flush = true; } diff --git a/arch/powerpc/platforms/powermac/time.c b/arch/powerpc/platforms/powermac/time.c index 7c968e46736f..12e6e4d30602 100644 --- a/arch/powerpc/platforms/powermac/time.c +++ b/arch/powerpc/platforms/powermac/time.c @@ -42,7 +42,11 @@ #define DBG(x...) #endif -/* Apparently the RTC stores seconds since 1 Jan 1904 */ +/* + * Offset between Unix time (1970-based) and Mac time (1904-based). Cuda and PMU + * times wrap in 2040. If we need to handle later times, the read_time functions + * need to be changed to interpret wrapped times as post-2040. + */ #define RTC_OFFSET 2082844800 /* @@ -97,8 +101,11 @@ static time64_t cuda_get_time(void) if (req.reply_len != 7) printk(KERN_ERR "cuda_get_time: got %d byte reply\n", req.reply_len); - now = (req.reply[3] << 24) + (req.reply[4] << 16) - + (req.reply[5] << 8) + req.reply[6]; + now = (u32)((req.reply[3] << 24) + (req.reply[4] << 16) + + (req.reply[5] << 8) + req.reply[6]); + /* it's either after year 2040, or the RTC has gone backwards */ + WARN_ON(now < RTC_OFFSET); + return now - RTC_OFFSET; } @@ -106,10 +113,10 @@ static time64_t cuda_get_time(void) static int cuda_set_rtc_time(struct rtc_time *tm) { - time64_t nowtime; + u32 nowtime; struct adb_request req; - nowtime = rtc_tm_to_time64(tm) + RTC_OFFSET; + nowtime = lower_32_bits(rtc_tm_to_time64(tm) + RTC_OFFSET); if (cuda_request(&req, NULL, 6, CUDA_PACKET, CUDA_SET_TIME, nowtime >> 24, nowtime >> 16, nowtime >> 8, nowtime) < 0) @@ -140,8 +147,12 @@ static time64_t pmu_get_time(void) if (req.reply_len != 4) printk(KERN_ERR "pmu_get_time: got %d byte reply from PMU\n", req.reply_len); - now = (req.reply[0] << 24) + (req.reply[1] << 16) - + (req.reply[2] << 8) + req.reply[3]; + now = (u32)((req.reply[0] << 24) + (req.reply[1] << 16) + + (req.reply[2] << 8) + req.reply[3]); + + /* it's either after year 2040, or the RTC has gone backwards */ + WARN_ON(now < RTC_OFFSET); + return now - RTC_OFFSET; } @@ -149,10 +160,10 @@ static time64_t pmu_get_time(void) static int pmu_set_rtc_time(struct rtc_time *tm) { - time64_t nowtime; + u32 nowtime; struct adb_request req; - nowtime = rtc_tm_to_time64(tm) + RTC_OFFSET; + nowtime = lower_32_bits(rtc_tm_to_time64(tm) + RTC_OFFSET); if (pmu_request(&req, NULL, 5, PMU_SET_RTC, nowtime >> 24, nowtime >> 16, nowtime >> 8, nowtime) < 0) return -ENXIO; diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index f12680c9b947..4764fdeb4f1f 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -107,6 +107,7 @@ config ARCH_RV32I select GENERIC_LIB_ASHLDI3 select GENERIC_LIB_ASHRDI3 select GENERIC_LIB_LSHRDI3 + select GENERIC_LIB_UCMPDI2 config ARCH_RV64I bool "RV64I" diff --git a/arch/riscv/include/uapi/asm/elf.h b/arch/riscv/include/uapi/asm/elf.h index 5cae4c30cd8e..1e0dfc36aab9 100644 --- a/arch/riscv/include/uapi/asm/elf.h +++ b/arch/riscv/include/uapi/asm/elf.h @@ -21,8 +21,13 @@ typedef struct user_regs_struct elf_gregset_t; typedef union __riscv_fp_state elf_fpregset_t; -#define ELF_RISCV_R_SYM(r_info) ((r_info) >> 32) -#define ELF_RISCV_R_TYPE(r_info) ((r_info) & 0xffffffff) +#if __riscv_xlen == 64 +#define ELF_RISCV_R_SYM(r_info) ELF64_R_SYM(r_info) +#define ELF_RISCV_R_TYPE(r_info) ELF64_R_TYPE(r_info) +#else +#define ELF_RISCV_R_SYM(r_info) ELF32_R_SYM(r_info) +#define ELF_RISCV_R_TYPE(r_info) ELF32_R_TYPE(r_info) +#endif /* * RISC-V relocation types diff --git a/arch/riscv/kernel/irq.c b/arch/riscv/kernel/irq.c index b74cbfbce2d0..7bcdaed15703 100644 --- a/arch/riscv/kernel/irq.c +++ b/arch/riscv/kernel/irq.c @@ -16,10 +16,6 @@ #include #include -#ifdef CONFIG_RISCV_INTC -#include -#endif - void __init init_IRQ(void) { irqchip_init(); diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c index 1d5e9b934b8c..3303ed2cd419 100644 --- a/arch/riscv/kernel/module.c +++ b/arch/riscv/kernel/module.c @@ -37,7 +37,7 @@ static int apply_r_riscv_64_rela(struct module *me, u32 *location, Elf_Addr v) static int apply_r_riscv_branch_rela(struct module *me, u32 *location, Elf_Addr v) { - s64 offset = (void *)v - (void *)location; + ptrdiff_t offset = (void *)v - (void *)location; u32 imm12 = (offset & 0x1000) << (31 - 12); u32 imm11 = (offset & 0x800) >> (11 - 7); u32 imm10_5 = (offset & 0x7e0) << (30 - 10); @@ -50,7 +50,7 @@ static int apply_r_riscv_branch_rela(struct module *me, u32 *location, static int apply_r_riscv_jal_rela(struct module *me, u32 *location, Elf_Addr v) { - s64 offset = (void *)v - (void *)location; + ptrdiff_t offset = (void *)v - (void *)location; u32 imm20 = (offset & 0x100000) << (31 - 20); u32 imm19_12 = (offset & 0xff000); u32 imm11 = (offset & 0x800) << (20 - 11); @@ -63,7 +63,7 @@ static int apply_r_riscv_jal_rela(struct module *me, u32 *location, static int apply_r_riscv_rcv_branch_rela(struct module *me, u32 *location, Elf_Addr v) { - s64 offset = (void *)v - (void *)location; + ptrdiff_t offset = (void *)v - (void *)location; u16 imm8 = (offset & 0x100) << (12 - 8); u16 imm7_6 = (offset & 0xc0) >> (6 - 5); u16 imm5 = (offset & 0x20) >> (5 - 2); @@ -78,7 +78,7 @@ static int apply_r_riscv_rcv_branch_rela(struct module *me, u32 *location, static int apply_r_riscv_rvc_jump_rela(struct module *me, u32 *location, Elf_Addr v) { - s64 offset = (void *)v - (void *)location; + ptrdiff_t offset = (void *)v - (void *)location; u16 imm11 = (offset & 0x800) << (12 - 11); u16 imm10 = (offset & 0x400) >> (10 - 8); u16 imm9_8 = (offset & 0x300) << (12 - 11); @@ -96,7 +96,7 @@ static int apply_r_riscv_rvc_jump_rela(struct module *me, u32 *location, static int apply_r_riscv_pcrel_hi20_rela(struct module *me, u32 *location, Elf_Addr v) { - s64 offset = (void *)v - (void *)location; + ptrdiff_t offset = (void *)v - (void *)location; s32 hi20; if (offset != (s32)offset) { @@ -178,7 +178,7 @@ static int apply_r_riscv_lo12_s_rela(struct module *me, u32 *location, static int apply_r_riscv_got_hi20_rela(struct module *me, u32 *location, Elf_Addr v) { - s64 offset = (void *)v - (void *)location; + ptrdiff_t offset = (void *)v - (void *)location; s32 hi20; /* Always emit the got entry */ @@ -200,7 +200,7 @@ static int apply_r_riscv_got_hi20_rela(struct module *me, u32 *location, static int apply_r_riscv_call_plt_rela(struct module *me, u32 *location, Elf_Addr v) { - s64 offset = (void *)v - (void *)location; + ptrdiff_t offset = (void *)v - (void *)location; s32 fill_v = offset; u32 hi20, lo12; @@ -227,7 +227,7 @@ static int apply_r_riscv_call_plt_rela(struct module *me, u32 *location, static int apply_r_riscv_call_rela(struct module *me, u32 *location, Elf_Addr v) { - s64 offset = (void *)v - (void *)location; + ptrdiff_t offset = (void *)v - (void *)location; s32 fill_v = offset; u32 hi20, lo12; @@ -263,14 +263,14 @@ static int apply_r_riscv_align_rela(struct module *me, u32 *location, static int apply_r_riscv_add32_rela(struct module *me, u32 *location, Elf_Addr v) { - *(u32 *)location += (*(u32 *)v); + *(u32 *)location += (u32)v; return 0; } static int apply_r_riscv_sub32_rela(struct module *me, u32 *location, Elf_Addr v) { - *(u32 *)location -= (*(u32 *)v); + *(u32 *)location -= (u32)v; return 0; } @@ -347,7 +347,7 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, unsigned int j; for (j = 0; j < sechdrs[relsec].sh_size / sizeof(*rel); j++) { - u64 hi20_loc = + unsigned long hi20_loc = sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[j].r_offset; u32 hi20_type = ELF_RISCV_R_TYPE(rel[j].r_info); @@ -360,12 +360,12 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, Elf_Sym *hi20_sym = (Elf_Sym *)sechdrs[symindex].sh_addr + ELF_RISCV_R_SYM(rel[j].r_info); - u64 hi20_sym_val = + unsigned long hi20_sym_val = hi20_sym->st_value + rel[j].r_addend; /* Calculate lo12 */ - u64 offset = hi20_sym_val - hi20_loc; + size_t offset = hi20_sym_val - hi20_loc; if (IS_ENABLED(CONFIG_MODULE_SECTIONS) && hi20_type == R_RISCV_GOT_HI20) { offset = module_emit_got_entry( diff --git a/arch/riscv/kernel/ptrace.c b/arch/riscv/kernel/ptrace.c index ba3e80712797..9f82a7e34c64 100644 --- a/arch/riscv/kernel/ptrace.c +++ b/arch/riscv/kernel/ptrace.c @@ -50,7 +50,7 @@ static int riscv_gpr_set(struct task_struct *target, struct pt_regs *regs; regs = task_pt_regs(target); - ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ®s, 0, -1); + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, regs, 0, -1); return ret; } diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index ee44a48faf79..f0d2070866d4 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c @@ -220,8 +220,3 @@ void __init setup_arch(char **cmdline_p) riscv_fill_hwcap(); } -static int __init riscv_device_init(void) -{ - return of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); -} -subsys_initcall_sync(riscv_device_init); diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index c77df8142be2..58a522f9bcc3 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -28,7 +28,9 @@ static void __init zone_sizes_init(void) { unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, }; +#ifdef CONFIG_ZONE_DMA32 max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G, max_low_pfn)); +#endif max_zone_pfns[ZONE_NORMAL] = max_low_pfn; free_area_init_nodes(max_zone_pfns); diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index baed39772c84..e44bb2b2873e 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -160,6 +160,7 @@ config S390 select HAVE_OPROFILE select HAVE_PERF_EVENTS select HAVE_REGS_AND_STACK_ACCESS_API + select HAVE_RSEQ select HAVE_SYSCALL_TRACEPOINTS select HAVE_VIRT_CPU_ACCOUNTING select MODULES_USE_ELF_RELA diff --git a/arch/s390/include/asm/css_chars.h b/arch/s390/include/asm/css_chars.h index 0563fd3e8458..480bb02ccacd 100644 --- a/arch/s390/include/asm/css_chars.h +++ b/arch/s390/include/asm/css_chars.h @@ -6,36 +6,38 @@ struct css_general_char { u64 : 12; - u32 dynio : 1; /* bit 12 */ - u32 : 4; - u32 eadm : 1; /* bit 17 */ - u32 : 23; - u32 aif : 1; /* bit 41 */ - u32 : 3; - u32 mcss : 1; /* bit 45 */ - u32 fcs : 1; /* bit 46 */ - u32 : 1; - u32 ext_mb : 1; /* bit 48 */ - u32 : 7; - u32 aif_tdd : 1; /* bit 56 */ - u32 : 1; - u32 qebsm : 1; /* bit 58 */ - u32 : 2; - u32 aiv : 1; /* bit 61 */ - u32 : 5; - u32 aif_osa : 1; /* bit 67 */ - u32 : 12; - u32 eadm_rf : 1; /* bit 80 */ - u32 : 1; - u32 cib : 1; /* bit 82 */ - u32 : 5; - u32 fcx : 1; /* bit 88 */ - u32 : 19; - u32 alt_ssi : 1; /* bit 108 */ - u32 : 1; - u32 narf : 1; /* bit 110 */ - u32 : 12; - u32 util_str : 1;/* bit 123 */ + u64 dynio : 1; /* bit 12 */ + u64 : 4; + u64 eadm : 1; /* bit 17 */ + u64 : 23; + u64 aif : 1; /* bit 41 */ + u64 : 3; + u64 mcss : 1; /* bit 45 */ + u64 fcs : 1; /* bit 46 */ + u64 : 1; + u64 ext_mb : 1; /* bit 48 */ + u64 : 7; + u64 aif_tdd : 1; /* bit 56 */ + u64 : 1; + u64 qebsm : 1; /* bit 58 */ + u64 : 2; + u64 aiv : 1; /* bit 61 */ + u64 : 2; + + u64 : 3; + u64 aif_osa : 1; /* bit 67 */ + u64 : 12; + u64 eadm_rf : 1; /* bit 80 */ + u64 : 1; + u64 cib : 1; /* bit 82 */ + u64 : 5; + u64 fcx : 1; /* bit 88 */ + u64 : 19; + u64 alt_ssi : 1; /* bit 108 */ + u64 : 1; + u64 narf : 1; /* bit 110 */ + u64 : 12; + u64 util_str : 1;/* bit 123 */ } __packed; extern struct css_general_char css_general_characteristics; diff --git a/arch/s390/kernel/compat_wrapper.c b/arch/s390/kernel/compat_wrapper.c index 607c5e9fba3d..2ce28bf0c5ec 100644 --- a/arch/s390/kernel/compat_wrapper.c +++ b/arch/s390/kernel/compat_wrapper.c @@ -183,3 +183,4 @@ COMPAT_SYSCALL_WRAP2(s390_guarded_storage, int, command, struct gs_cb *, gs_cb); COMPAT_SYSCALL_WRAP5(statx, int, dfd, const char __user *, path, unsigned, flags, unsigned, mask, struct statx __user *, buffer); COMPAT_SYSCALL_WRAP4(s390_sthyi, unsigned long, code, void __user *, info, u64 __user *, rc, unsigned long, flags); COMPAT_SYSCALL_WRAP5(kexec_file_load, int, kernel_fd, int, initrd_fd, unsigned long, cmdline_len, const char __user *, cmdline_ptr, unsigned long, flags) +COMPAT_SYSCALL_WRAP4(rseq, struct rseq __user *, rseq, u32, rseq_len, int, flags, u32, sig) diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index f03402efab4b..150130c897c3 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -357,6 +357,10 @@ ENTRY(system_call) stg %r2,__PT_R2(%r11) # store return value .Lsysc_return: +#ifdef CONFIG_DEBUG_RSEQ + lgr %r2,%r11 + brasl %r14,rseq_syscall +#endif LOCKDEP_SYS_EXIT .Lsysc_tif: TSTMSK __PT_FLAGS(%r11),_PIF_WORK @@ -1265,7 +1269,7 @@ cleanup_critical: jl 0f clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end jl .Lcleanup_load_fpu_regs -0: BR_EX %r14 +0: BR_EX %r14,%r11 .align 8 .Lcleanup_table: @@ -1301,7 +1305,7 @@ cleanup_critical: ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE lctlg %c1,%c1,__LC_USER_ASCE # load primary asce larl %r9,sie_exit # skip forward to sie_exit - BR_EX %r14 + BR_EX %r14,%r11 #endif .Lcleanup_system_call: diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index 2d2960ab3e10..22f08245aa5d 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c @@ -498,7 +498,7 @@ void do_signal(struct pt_regs *regs) } /* No longer in a system call */ clear_pt_regs_flag(regs, PIF_SYSCALL); - + rseq_signal_deliver(&ksig, regs); if (is_compat_task()) handle_signal32(&ksig, oldset, regs); else @@ -537,4 +537,5 @@ void do_notify_resume(struct pt_regs *regs) { clear_thread_flag(TIF_NOTIFY_RESUME); tracehook_notify_resume(regs); + rseq_handle_notify_resume(NULL, regs); } diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl index 8b210ead7956..022fc099b628 100644 --- a/arch/s390/kernel/syscalls/syscall.tbl +++ b/arch/s390/kernel/syscalls/syscall.tbl @@ -389,3 +389,5 @@ 379 common statx sys_statx compat_sys_statx 380 common s390_sthyi sys_s390_sthyi compat_sys_s390_sthyi 381 common kexec_file_load sys_kexec_file_load compat_sys_kexec_file_load +382 common io_pgetevents sys_io_pgetevents compat_sys_io_pgetevents +383 common rseq sys_rseq compat_sys_rseq diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c index 84bd6329a88d..e3bd5627afef 100644 --- a/arch/s390/mm/pgalloc.c +++ b/arch/s390/mm/pgalloc.c @@ -252,6 +252,8 @@ void page_table_free(struct mm_struct *mm, unsigned long *table) spin_unlock_bh(&mm->context.lock); if (mask != 0) return; + } else { + atomic_xor_bits(&page->_refcount, 3U << 24); } pgtable_page_dtor(page); @@ -304,6 +306,8 @@ static void __tlb_remove_table(void *_table) break; /* fallthrough */ case 3: /* 4K page table with pgstes */ + if (mask & 3) + atomic_xor_bits(&page->_refcount, 3 << 24); pgtable_page_dtor(page); __free_page(page); break; diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index d2db8acb1a55..5f0234ec8038 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -1286,6 +1286,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) goto free_addrs; } if (bpf_jit_prog(&jit, fp)) { + bpf_jit_binary_free(header); fp = orig_fp; goto free_addrs; } diff --git a/arch/x86/Makefile b/arch/x86/Makefile index f0a6ea22429d..a08e82856563 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -258,11 +258,6 @@ archscripts: scripts_basic archheaders: $(Q)$(MAKE) $(build)=arch/x86/entry/syscalls all -archprepare: -ifeq ($(CONFIG_KEXEC_FILE),y) - $(Q)$(MAKE) $(build)=arch/x86/purgatory arch/x86/purgatory/kexec-purgatory.c -endif - ### # Kernel objects @@ -327,7 +322,6 @@ archclean: $(Q)rm -rf $(objtree)/arch/x86_64 $(Q)$(MAKE) $(clean)=$(boot) $(Q)$(MAKE) $(clean)=arch/x86/tools - $(Q)$(MAKE) $(clean)=arch/x86/purgatory define archhelp echo '* bzImage - Compressed kernel image (arch/x86/boot/bzImage)' diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index a8a8642d2b0b..e98522ea6f09 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c @@ -114,18 +114,12 @@ __setup_efi_pci(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom) struct pci_setup_rom *rom = NULL; efi_status_t status; unsigned long size; - uint64_t attributes, romsize; + uint64_t romsize; void *romimage; - status = efi_call_proto(efi_pci_io_protocol, attributes, pci, - EfiPciIoAttributeOperationGet, 0, 0, - &attributes); - if (status != EFI_SUCCESS) - return status; - /* - * Some firmware images contain EFI function pointers at the place where the - * romimage and romsize fields are supposed to be. Typically the EFI + * Some firmware images contain EFI function pointers at the place where + * the romimage and romsize fields are supposed to be. Typically the EFI * code is mapped at high addresses, translating to an unrealistically * large romsize. The UEFI spec limits the size of option ROMs to 16 * MiB so we reject any ROMs over 16 MiB in size to catch this. diff --git a/arch/x86/crypto/aegis128-aesni-asm.S b/arch/x86/crypto/aegis128-aesni-asm.S index 9254e0b6cc06..717bf0776421 100644 --- a/arch/x86/crypto/aegis128-aesni-asm.S +++ b/arch/x86/crypto/aegis128-aesni-asm.S @@ -535,6 +535,7 @@ ENTRY(crypto_aegis128_aesni_enc_tail) movdqu STATE3, 0x40(STATEP) FRAME_END + ret ENDPROC(crypto_aegis128_aesni_enc_tail) .macro decrypt_block a s0 s1 s2 s3 s4 i diff --git a/arch/x86/crypto/aegis128l-aesni-asm.S b/arch/x86/crypto/aegis128l-aesni-asm.S index 9263c344f2c7..4eda2b8db9e1 100644 --- a/arch/x86/crypto/aegis128l-aesni-asm.S +++ b/arch/x86/crypto/aegis128l-aesni-asm.S @@ -645,6 +645,7 @@ ENTRY(crypto_aegis128l_aesni_enc_tail) state_store0 FRAME_END + ret ENDPROC(crypto_aegis128l_aesni_enc_tail) /* diff --git a/arch/x86/crypto/aegis256-aesni-asm.S b/arch/x86/crypto/aegis256-aesni-asm.S index 1d977d515bf9..32aae8397268 100644 --- a/arch/x86/crypto/aegis256-aesni-asm.S +++ b/arch/x86/crypto/aegis256-aesni-asm.S @@ -543,6 +543,7 @@ ENTRY(crypto_aegis256_aesni_enc_tail) state_store0 FRAME_END + ret ENDPROC(crypto_aegis256_aesni_enc_tail) /* diff --git a/arch/x86/crypto/morus1280-avx2-asm.S b/arch/x86/crypto/morus1280-avx2-asm.S index 37d422e77931..07653d4582a6 100644 --- a/arch/x86/crypto/morus1280-avx2-asm.S +++ b/arch/x86/crypto/morus1280-avx2-asm.S @@ -453,6 +453,7 @@ ENTRY(crypto_morus1280_avx2_enc_tail) vmovdqu STATE4, (4 * 32)(%rdi) FRAME_END + ret ENDPROC(crypto_morus1280_avx2_enc_tail) /* diff --git a/arch/x86/crypto/morus1280-sse2-asm.S b/arch/x86/crypto/morus1280-sse2-asm.S index 1fe637c7be9d..bd1aa1b60869 100644 --- a/arch/x86/crypto/morus1280-sse2-asm.S +++ b/arch/x86/crypto/morus1280-sse2-asm.S @@ -652,6 +652,7 @@ ENTRY(crypto_morus1280_sse2_enc_tail) movdqu STATE4_HI, (9 * 16)(%rdi) FRAME_END + ret ENDPROC(crypto_morus1280_sse2_enc_tail) /* diff --git a/arch/x86/crypto/morus640-sse2-asm.S b/arch/x86/crypto/morus640-sse2-asm.S index 71c72a0a0862..efa02816d921 100644 --- a/arch/x86/crypto/morus640-sse2-asm.S +++ b/arch/x86/crypto/morus640-sse2-asm.S @@ -437,6 +437,7 @@ ENTRY(crypto_morus640_sse2_enc_tail) movdqu STATE4, (4 * 16)(%rdi) FRAME_END + ret ENDPROC(crypto_morus640_sse2_enc_tail) /* diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c index 92190879b228..3b2490b81918 100644 --- a/arch/x86/entry/common.c +++ b/arch/x86/entry/common.c @@ -164,7 +164,7 @@ static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags) if (cached_flags & _TIF_NOTIFY_RESUME) { clear_thread_flag(TIF_NOTIFY_RESUME); tracehook_notify_resume(regs); - rseq_handle_notify_resume(regs); + rseq_handle_notify_resume(NULL, regs); } if (cached_flags & _TIF_USER_RETURN_NOTIFY) diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index 2582881d19ce..c371bfee137a 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -477,7 +477,7 @@ ENTRY(entry_SYSENTER_32) * whereas POPF does not.) */ addl $PT_EFLAGS-PT_DS, %esp /* point esp at pt_regs->flags */ - btr $X86_EFLAGS_IF_BIT, (%esp) + btrl $X86_EFLAGS_IF_BIT, (%esp) popfl /* diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S index 9de7f1e1dede..7d0df78db727 100644 --- a/arch/x86/entry/entry_64_compat.S +++ b/arch/x86/entry/entry_64_compat.S @@ -84,13 +84,13 @@ ENTRY(entry_SYSENTER_compat) pushq %rdx /* pt_regs->dx */ pushq %rcx /* pt_regs->cx */ pushq $-ENOSYS /* pt_regs->ax */ - pushq %r8 /* pt_regs->r8 */ + pushq $0 /* pt_regs->r8 = 0 */ xorl %r8d, %r8d /* nospec r8 */ - pushq %r9 /* pt_regs->r9 */ + pushq $0 /* pt_regs->r9 = 0 */ xorl %r9d, %r9d /* nospec r9 */ - pushq %r10 /* pt_regs->r10 */ + pushq $0 /* pt_regs->r10 = 0 */ xorl %r10d, %r10d /* nospec r10 */ - pushq %r11 /* pt_regs->r11 */ + pushq $0 /* pt_regs->r11 = 0 */ xorl %r11d, %r11d /* nospec r11 */ pushq %rbx /* pt_regs->rbx */ xorl %ebx, %ebx /* nospec rbx */ @@ -374,13 +374,13 @@ ENTRY(entry_INT80_compat) pushq %rcx /* pt_regs->cx */ xorl %ecx, %ecx /* nospec cx */ pushq $-ENOSYS /* pt_regs->ax */ - pushq $0 /* pt_regs->r8 = 0 */ + pushq %r8 /* pt_regs->r8 */ xorl %r8d, %r8d /* nospec r8 */ - pushq $0 /* pt_regs->r9 = 0 */ + pushq %r9 /* pt_regs->r9 */ xorl %r9d, %r9d /* nospec r9 */ - pushq $0 /* pt_regs->r10 = 0 */ + pushq %r10 /* pt_regs->r10*/ xorl %r10d, %r10d /* nospec r10 */ - pushq $0 /* pt_regs->r11 = 0 */ + pushq %r11 /* pt_regs->r11 */ xorl %r11d, %r11d /* nospec r11 */ pushq %rbx /* pt_regs->rbx */ xorl %ebx, %ebx /* nospec rbx */ diff --git a/arch/x86/hyperv/hv_apic.c b/arch/x86/hyperv/hv_apic.c index f68855499391..402338365651 100644 --- a/arch/x86/hyperv/hv_apic.c +++ b/arch/x86/hyperv/hv_apic.c @@ -114,6 +114,8 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector) ipi_arg->vp_set.format = HV_GENERIC_SET_SPARSE_4K; nr_bank = cpumask_to_vpset(&(ipi_arg->vp_set), mask); } + if (nr_bank < 0) + goto ipi_mask_ex_done; if (!nr_bank) ipi_arg->vp_set.format = HV_GENERIC_SET_ALL; @@ -158,6 +160,9 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector) for_each_cpu(cur_cpu, mask) { vcpu = hv_cpu_number_to_vp_number(cur_cpu); + if (vcpu == VP_INVAL) + goto ipi_mask_done; + /* * This particular version of the IPI hypercall can * only target upto 64 CPUs. diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c index 4c431e1c1eff..1ff420217298 100644 --- a/arch/x86/hyperv/hv_init.c +++ b/arch/x86/hyperv/hv_init.c @@ -265,7 +265,7 @@ void __init hyperv_init(void) { u64 guest_id, required_msrs; union hv_x64_msr_hypercall_contents hypercall_msr; - int cpuhp; + int cpuhp, i; if (x86_hyper_type != X86_HYPER_MS_HYPERV) return; @@ -293,6 +293,9 @@ void __init hyperv_init(void) if (!hv_vp_index) return; + for (i = 0; i < num_possible_cpus(); i++) + hv_vp_index[i] = VP_INVAL; + hv_vp_assist_page = kcalloc(num_possible_cpus(), sizeof(*hv_vp_assist_page), GFP_KERNEL); if (!hv_vp_assist_page) { diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h index 219faaec51df..990770f9e76b 100644 --- a/arch/x86/include/asm/asm.h +++ b/arch/x86/include/asm/asm.h @@ -46,6 +46,65 @@ #define _ASM_SI __ASM_REG(si) #define _ASM_DI __ASM_REG(di) +#ifndef __x86_64__ +/* 32 bit */ + +#define _ASM_ARG1 _ASM_AX +#define _ASM_ARG2 _ASM_DX +#define _ASM_ARG3 _ASM_CX + +#define _ASM_ARG1L eax +#define _ASM_ARG2L edx +#define _ASM_ARG3L ecx + +#define _ASM_ARG1W ax +#define _ASM_ARG2W dx +#define _ASM_ARG3W cx + +#define _ASM_ARG1B al +#define _ASM_ARG2B dl +#define _ASM_ARG3B cl + +#else +/* 64 bit */ + +#define _ASM_ARG1 _ASM_DI +#define _ASM_ARG2 _ASM_SI +#define _ASM_ARG3 _ASM_DX +#define _ASM_ARG4 _ASM_CX +#define _ASM_ARG5 r8 +#define _ASM_ARG6 r9 + +#define _ASM_ARG1Q rdi +#define _ASM_ARG2Q rsi +#define _ASM_ARG3Q rdx +#define _ASM_ARG4Q rcx +#define _ASM_ARG5Q r8 +#define _ASM_ARG6Q r9 + +#define _ASM_ARG1L edi +#define _ASM_ARG2L esi +#define _ASM_ARG3L edx +#define _ASM_ARG4L ecx +#define _ASM_ARG5L r8d +#define _ASM_ARG6L r9d + +#define _ASM_ARG1W di +#define _ASM_ARG2W si +#define _ASM_ARG3W dx +#define _ASM_ARG4W cx +#define _ASM_ARG5W r8w +#define _ASM_ARG6W r9w + +#define _ASM_ARG1B dil +#define _ASM_ARG2B sil +#define _ASM_ARG3B dl +#define _ASM_ARG4B cl +#define _ASM_ARG5B r8b +#define _ASM_ARG6B r9b + +#endif + /* * Macros to generate condition code outputs from inline assembly, * The output operand must be type "bool". diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h index 042b5e892ed1..14de0432d288 100644 --- a/arch/x86/include/asm/barrier.h +++ b/arch/x86/include/asm/barrier.h @@ -38,7 +38,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long index, { unsigned long mask; - asm ("cmp %1,%2; sbb %0,%0;" + asm volatile ("cmp %1,%2; sbb %0,%0;" :"=r" (mask) :"g"(size),"r" (index) :"cc"); diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h index 89f08955fff7..c4fc17220df9 100644 --- a/arch/x86/include/asm/irqflags.h +++ b/arch/x86/include/asm/irqflags.h @@ -13,7 +13,7 @@ * Interrupt control: */ -static inline unsigned long native_save_fl(void) +extern inline unsigned long native_save_fl(void) { unsigned long flags; diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index 3cd14311edfa..5a7375ed5f7c 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h @@ -9,6 +9,8 @@ #include #include +#define VP_INVAL U32_MAX + struct ms_hyperv_info { u32 features; u32 misc_features; @@ -20,7 +22,6 @@ struct ms_hyperv_info { extern struct ms_hyperv_info ms_hyperv; - /* * Generate the guest ID. */ @@ -281,6 +282,8 @@ static inline int cpumask_to_vpset(struct hv_vpset *vpset, */ for_each_cpu(cpu, cpus) { vcpu = hv_cpu_number_to_vp_number(cpu); + if (vcpu == VP_INVAL) + return -1; vcpu_bank = vcpu / 64; vcpu_offset = vcpu % 64; __set_bit(vcpu_offset, (unsigned long *) diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h index ada6410fd2ec..fbd578daa66e 100644 --- a/arch/x86/include/asm/pgalloc.h +++ b/arch/x86/include/asm/pgalloc.h @@ -184,6 +184,9 @@ static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long addr) static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d) { + if (!pgtable_l5_enabled()) + return; + BUG_ON((unsigned long)p4d & (PAGE_SIZE-1)); free_page((unsigned long)p4d); } diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 99ecde23c3ec..5715647fc4fe 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -898,7 +898,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd) #define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd)) /* to find an entry in a page-table-directory. */ -static __always_inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address) +static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address) { if (!pgtable_l5_enabled()) return (p4d_t *)pgd; diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index 0fdcd21dadbd..3c5385f9a88f 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -216,7 +216,7 @@ static inline pgd_t pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd) } #endif -static __always_inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d) +static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d) { pgd_t pgd; @@ -230,7 +230,7 @@ static __always_inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d) *p4dp = native_make_p4d(native_pgd_val(pgd)); } -static __always_inline void native_p4d_clear(p4d_t *p4d) +static inline void native_p4d_clear(p4d_t *p4d) { native_set_p4d(p4d, native_make_p4d(0)); } diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index 425e6b8b9547..6aa8499e1f62 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -114,6 +114,7 @@ #define VMX_MISC_PREEMPTION_TIMER_RATE_MASK 0x0000001f #define VMX_MISC_SAVE_EFER_LMA 0x00000020 #define VMX_MISC_ACTIVITY_HLT 0x00000040 +#define VMX_MISC_ZERO_LEN_INS 0x40000000 /* VMFUNC functions */ #define VMX_VMFUNC_EPTP_SWITCHING 0x00000001 @@ -351,11 +352,13 @@ enum vmcs_field { #define VECTORING_INFO_VALID_MASK INTR_INFO_VALID_MASK #define INTR_TYPE_EXT_INTR (0 << 8) /* external interrupt */ +#define INTR_TYPE_RESERVED (1 << 8) /* reserved */ #define INTR_TYPE_NMI_INTR (2 << 8) /* NMI */ #define INTR_TYPE_HARD_EXCEPTION (3 << 8) /* processor exception */ #define INTR_TYPE_SOFT_INTR (4 << 8) /* software interrupt */ #define INTR_TYPE_PRIV_SW_EXCEPTION (5 << 8) /* ICE breakpoint - undocumented */ #define INTR_TYPE_SOFT_EXCEPTION (6 << 8) /* software exception */ +#define INTR_TYPE_OTHER_EVENT (7 << 8) /* other event */ /* GUEST_INTERRUPTIBILITY_INFO flags. */ #define GUEST_INTR_STATE_STI 0x00000001 diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 02d6f5cf4e70..8824d01c0c35 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -61,6 +61,7 @@ obj-y += alternative.o i8253.o hw_breakpoint.o obj-y += tsc.o tsc_msr.o io_delay.o rtc.o obj-y += pci-iommu_table.o obj-y += resource.o +obj-y += irqflags.o obj-y += process.o obj-y += fpu/ diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index efaf2d4f9c3c..d492752f79e1 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include @@ -392,6 +393,51 @@ extern int uv_hub_info_version(void) } EXPORT_SYMBOL(uv_hub_info_version); +/* Default UV memory block size is 2GB */ +static unsigned long mem_block_size = (2UL << 30); + +/* Kernel parameter to specify UV mem block size */ +static int parse_mem_block_size(char *ptr) +{ + unsigned long size = memparse(ptr, NULL); + + /* Size will be rounded down by set_block_size() below */ + mem_block_size = size; + return 0; +} +early_param("uv_memblksize", parse_mem_block_size); + +static __init int adj_blksize(u32 lgre) +{ + unsigned long base = (unsigned long)lgre << UV_GAM_RANGE_SHFT; + unsigned long size; + + for (size = mem_block_size; size > MIN_MEMORY_BLOCK_SIZE; size >>= 1) + if (IS_ALIGNED(base, size)) + break; + + if (size >= mem_block_size) + return 0; + + mem_block_size = size; + return 1; +} + +static __init void set_block_size(void) +{ + unsigned int order = ffs(mem_block_size); + + if (order) { + /* adjust for ffs return of 1..64 */ + set_memory_block_size_order(order - 1); + pr_info("UV: mem_block_size set to 0x%lx\n", mem_block_size); + } else { + /* bad or zero value, default to 1UL << 31 (2GB) */ + pr_err("UV: mem_block_size error with 0x%lx\n", mem_block_size); + set_memory_block_size_order(31); + } +} + /* Build GAM range lookup table: */ static __init void build_uv_gr_table(void) { @@ -1180,23 +1226,30 @@ static void __init decode_gam_rng_tbl(unsigned long ptr) << UV_GAM_RANGE_SHFT); int order = 0; char suffix[] = " KMGTPE"; + int flag = ' '; while (size > 9999 && order < sizeof(suffix)) { size /= 1024; order++; } + /* adjust max block size to current range start */ + if (gre->type == 1 || gre->type == 2) + if (adj_blksize(lgre)) + flag = '*'; + if (!index) { pr_info("UV: GAM Range Table...\n"); - pr_info("UV: # %20s %14s %5s %4s %5s %3s %2s\n", "Range", "", "Size", "Type", "NASID", "SID", "PN"); + pr_info("UV: # %20s %14s %6s %4s %5s %3s %2s\n", "Range", "", "Size", "Type", "NASID", "SID", "PN"); } - pr_info("UV: %2d: 0x%014lx-0x%014lx %5lu%c %3d %04x %02x %02x\n", + pr_info("UV: %2d: 0x%014lx-0x%014lx%c %5lu%c %3d %04x %02x %02x\n", index++, (unsigned long)lgre << UV_GAM_RANGE_SHFT, (unsigned long)gre->limit << UV_GAM_RANGE_SHFT, - size, suffix[order], + flag, size, suffix[order], gre->type, gre->nasid, gre->sockid, gre->pnode); + /* update to next range start */ lgre = gre->limit; if (sock_min > gre->sockid) sock_min = gre->sockid; @@ -1427,6 +1480,7 @@ static void __init uv_system_init_hub(void) build_socket_tables(); build_uv_gr_table(); + set_block_size(); uv_init_hub_info(&hub_info); uv_possible_blades = num_possible_nodes(); if (!_node_to_pnode) diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 082d7875cef8..38915fbfae73 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -543,7 +543,9 @@ static void bsp_init_amd(struct cpuinfo_x86 *c) nodes_per_socket = ((value >> 3) & 7) + 1; } - if (c->x86 >= 0x15 && c->x86 <= 0x17) { + if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) && + !boot_cpu_has(X86_FEATURE_VIRT_SSBD) && + c->x86 >= 0x15 && c->x86 <= 0x17) { unsigned int bit; switch (c->x86) { diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index cd0fda1fff6d..5c0ea39311fe 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -27,6 +27,7 @@ #include #include #include +#include static void __init spectre_v2_select_mitigation(void); static void __init ssb_select_mitigation(void); @@ -154,7 +155,8 @@ x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest) guestval |= guest_spec_ctrl & x86_spec_ctrl_mask; /* SSBD controlled in MSR_SPEC_CTRL */ - if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD)) + if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) || + static_cpu_has(X86_FEATURE_AMD_SSBD)) hostval |= ssbd_tif_to_spec_ctrl(ti->flags); if (hostval != guestval) { @@ -532,9 +534,10 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void) * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may * use a completely different MSR and bit dependent on family. */ - if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) + if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) && + !static_cpu_has(X86_FEATURE_AMD_SSBD)) { x86_amd_ssb_disable(); - else { + } else { x86_spec_ctrl_base |= SPEC_CTRL_SSBD; x86_spec_ctrl_mask |= SPEC_CTRL_SSBD; wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); @@ -664,6 +667,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr if (boot_cpu_has(X86_FEATURE_PTI)) return sprintf(buf, "Mitigation: PTI\n"); + if (hypervisor_is_type(X86_HYPER_XEN_PV)) + return sprintf(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n"); + break; case X86_BUG_SPECTRE_V1: diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c index 38354c66df81..0c5fcbd998cf 100644 --- a/arch/x86/kernel/cpu/cacheinfo.c +++ b/arch/x86/kernel/cpu/cacheinfo.c @@ -671,7 +671,7 @@ void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id) num_sharing_cache = ((eax >> 14) & 0xfff) + 1; if (num_sharing_cache) { - int bits = get_count_order(num_sharing_cache) - 1; + int bits = get_count_order(num_sharing_cache); per_cpu(cpu_llc_id, cpu) = c->apicid >> bits; } diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 0df7151cfef4..eb4cb3efd20e 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1,3 +1,6 @@ +/* cpu_feature_enabled() cannot be used this early */ +#define USE_EARLY_PGTABLE_L5 + #include #include #include diff --git a/arch/x86/kernel/cpu/mcheck/mce-severity.c b/arch/x86/kernel/cpu/mcheck/mce-severity.c index 5bbd06f38ff6..f34d89c01edc 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-severity.c +++ b/arch/x86/kernel/cpu/mcheck/mce-severity.c @@ -160,6 +160,11 @@ static struct severity { SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_INSTR), USER ), + MCESEV( + PANIC, "Data load in unrecoverable area of kernel", + SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA), + KERNEL + ), #endif MCESEV( PANIC, "Action required: unknown MCACOD", diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index e4cf6ff1c2e1..c102ad51025e 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -772,23 +772,25 @@ EXPORT_SYMBOL_GPL(machine_check_poll); static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp, struct pt_regs *regs) { - int i, ret = 0; char *tmp; + int i; for (i = 0; i < mca_cfg.banks; i++) { m->status = mce_rdmsrl(msr_ops.status(i)); - if (m->status & MCI_STATUS_VAL) { - __set_bit(i, validp); - if (quirk_no_way_out) - quirk_no_way_out(i, m, regs); - } + if (!(m->status & MCI_STATUS_VAL)) + continue; + + __set_bit(i, validp); + if (quirk_no_way_out) + quirk_no_way_out(i, m, regs); if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) { + mce_read_aux(m, i); *msg = tmp; - ret = 1; + return 1; } } - return ret; + return 0; } /* @@ -1205,13 +1207,18 @@ void do_machine_check(struct pt_regs *regs, long error_code) lmce = m.mcgstatus & MCG_STATUS_LMCES; /* + * Local machine check may already know that we have to panic. + * Broadcast machine check begins rendezvous in mce_start() * Go through all banks in exclusion of the other CPUs. This way we * don't report duplicated events on shared banks because the first one - * to see it will clear it. If this is a Local MCE, then no need to - * perform rendezvous. + * to see it will clear it. */ - if (!lmce) + if (lmce) { + if (no_way_out) + mce_panic("Fatal local machine check", &m, msg); + } else { order = mce_start(&no_way_out); + } for (i = 0; i < cfg->banks; i++) { __clear_bit(i, toclear); @@ -1287,12 +1294,17 @@ void do_machine_check(struct pt_regs *regs, long error_code) no_way_out = worst >= MCE_PANIC_SEVERITY; } else { /* - * Local MCE skipped calling mce_reign() - * If we found a fatal error, we need to panic here. + * If there was a fatal machine check we should have + * already called mce_panic earlier in this function. + * Since we re-read the banks, we might have found + * something new. Check again to see if we found a + * fatal error. We call "mce_severity()" again to + * make sure we have the right "msg". */ - if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) - mce_panic("Machine check from unknown source", - NULL, NULL); + if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) { + mce_severity(&m, cfg->tolerant, &msg, true); + mce_panic("Local fatal machine check!", &m, msg); + } } /* diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index 1c2cfa0644aa..97ccf4c3b45b 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -190,8 +190,11 @@ static void save_microcode_patch(void *data, unsigned int size) p = memdup_patch(data, size); if (!p) pr_err("Error allocating buffer %p\n", data); - else + else { list_replace(&iter->plist, &p->plist); + kfree(iter->data); + kfree(iter); + } } } diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c index 4021d3859499..40eee6cc4124 100644 --- a/arch/x86/kernel/cpu/mtrr/if.c +++ b/arch/x86/kernel/cpu/mtrr/if.c @@ -106,7 +106,8 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos) memset(line, 0, LINE_SIZE); - length = strncpy_from_user(line, buf, LINE_SIZE - 1); + len = min_t(size_t, len, LINE_SIZE - 1); + length = strncpy_from_user(line, buf, len); if (length < 0) return length; diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index d1f25c831447..c88c23c658c1 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c @@ -1248,6 +1248,7 @@ void __init e820__memblock_setup(void) { int i; u64 end; + u64 addr = 0; /* * The bootstrap memblock region count maximum is 128 entries @@ -1264,13 +1265,21 @@ void __init e820__memblock_setup(void) struct e820_entry *entry = &e820_table->entries[i]; end = entry->addr + entry->size; + if (addr < entry->addr) + memblock_reserve(addr, entry->addr - addr); + addr = end; if (end != (resource_size_t)end) continue; + /* + * all !E820_TYPE_RAM ranges (including gap ranges) are put + * into memblock.reserved to make sure that struct pages in + * such regions are not left uninitialized after bootup. + */ if (entry->type != E820_TYPE_RAM && entry->type != E820_TYPE_RESERVED_KERN) - continue; - - memblock_add(entry->addr, entry->size); + memblock_reserve(entry->addr, entry->size); + else + memblock_add(entry->addr, entry->size); } /* Throw away partial pages: */ diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index a21d6ace648e..8047379e575a 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -44,7 +44,7 @@ static unsigned int __initdata next_early_pgt; pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX); #ifdef CONFIG_X86_5LEVEL -unsigned int __pgtable_l5_enabled __initdata; +unsigned int __pgtable_l5_enabled __ro_after_init; unsigned int pgdir_shift __ro_after_init = 39; EXPORT_SYMBOL(pgdir_shift); unsigned int ptrs_per_p4d __ro_after_init = 1; diff --git a/arch/x86/kernel/irqflags.S b/arch/x86/kernel/irqflags.S new file mode 100644 index 000000000000..ddeeaac8adda --- /dev/null +++ b/arch/x86/kernel/irqflags.S @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#include +#include +#include + +/* + * unsigned long native_save_fl(void) + */ +ENTRY(native_save_fl) + pushf + pop %_ASM_AX + ret +ENDPROC(native_save_fl) +EXPORT_SYMBOL(native_save_fl) + +/* + * void native_restore_fl(unsigned long flags) + * %eax/%rdi: flags + */ +ENTRY(native_restore_fl) + push %_ASM_ARG1 + popf + ret +ENDPROC(native_restore_fl) +EXPORT_SYMBOL(native_restore_fl) diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c index 697a4ce04308..736348ead421 100644 --- a/arch/x86/kernel/quirks.c +++ b/arch/x86/kernel/quirks.c @@ -645,12 +645,19 @@ static void quirk_intel_brickland_xeon_ras_cap(struct pci_dev *pdev) /* Skylake */ static void quirk_intel_purley_xeon_ras_cap(struct pci_dev *pdev) { - u32 capid0; + u32 capid0, capid5; pci_read_config_dword(pdev, 0x84, &capid0); + pci_read_config_dword(pdev, 0x98, &capid5); - if ((capid0 & 0xc0) == 0xc0) + /* + * CAPID0{7:6} indicate whether this is an advanced RAS SKU + * CAPID5{8:5} indicate that various NVDIMM usage modes are + * enabled, so memory machine check recovery is also enabled. + */ + if ((capid0 & 0xc0) == 0xc0 || (capid5 & 0x1e0)) static_branch_inc(&mcsafe_key); + } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x0ec3, quirk_intel_brickland_xeon_ras_cap); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, quirk_intel_brickland_xeon_ras_cap); diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 445ca11ff863..92a3b312a53c 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -692,7 +692,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs) * Increment event counter and perform fixup for the pre-signal * frame. */ - rseq_signal_deliver(regs); + rseq_signal_deliver(ksig, regs); /* Set up the stack frame */ if (is_ia32_frame(ksig)) { diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index c2f7d1d2a5c3..db9656e13ea0 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -221,6 +221,11 @@ static void notrace start_secondary(void *unused) #ifdef CONFIG_X86_32 /* switch away from the initial page table */ load_cr3(swapper_pg_dir); + /* + * Initialize the CR4 shadow before doing anything that could + * try to read it. + */ + cr4_init_shadow(); __flush_tlb_all(); #endif load_current_idt(); diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index a535dd64de63..e6db475164ed 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -835,16 +835,18 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr) char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" : "simd exception"; - if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP) - return; cond_local_irq_enable(regs); if (!user_mode(regs)) { - if (!fixup_exception(regs, trapnr)) { - task->thread.error_code = error_code; - task->thread.trap_nr = trapnr; + if (fixup_exception(regs, trapnr)) + return; + + task->thread.error_code = error_code; + task->thread.trap_nr = trapnr; + + if (notify_die(DIE_TRAP, str, regs, error_code, + trapnr, SIGFPE) != NOTIFY_STOP) die(str, regs, error_code); - } return; } diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index 58d8d800875d..deb576b23b7c 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -293,7 +293,7 @@ static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool insn_init(insn, auprobe->insn, sizeof(auprobe->insn), x86_64); /* has the side-effect of processing the entire instruction */ insn_get_length(insn); - if (WARN_ON_ONCE(!insn_complete(insn))) + if (!insn_complete(insn)) return -ENOEXEC; if (is_prefix_bad(insn)) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 559a12b6184d..1689f433f3a0 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -1705,6 +1705,17 @@ static inline bool nested_cpu_has_vmwrite_any_field(struct kvm_vcpu *vcpu) MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS; } +static inline bool nested_cpu_has_zero_length_injection(struct kvm_vcpu *vcpu) +{ + return to_vmx(vcpu)->nested.msrs.misc_low & VMX_MISC_ZERO_LEN_INS; +} + +static inline bool nested_cpu_supports_monitor_trap_flag(struct kvm_vcpu *vcpu) +{ + return to_vmx(vcpu)->nested.msrs.procbased_ctls_high & + CPU_BASED_MONITOR_TRAP_FLAG; +} + static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit) { return vmcs12->cpu_based_vm_exec_control & bit; @@ -11620,6 +11631,62 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) !nested_cr3_valid(vcpu, vmcs12->host_cr3)) return VMXERR_ENTRY_INVALID_HOST_STATE_FIELD; + /* + * From the Intel SDM, volume 3: + * Fields relevant to VM-entry event injection must be set properly. + * These fields are the VM-entry interruption-information field, the + * VM-entry exception error code, and the VM-entry instruction length. + */ + if (vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) { + u32 intr_info = vmcs12->vm_entry_intr_info_field; + u8 vector = intr_info & INTR_INFO_VECTOR_MASK; + u32 intr_type = intr_info & INTR_INFO_INTR_TYPE_MASK; + bool has_error_code = intr_info & INTR_INFO_DELIVER_CODE_MASK; + bool should_have_error_code; + bool urg = nested_cpu_has2(vmcs12, + SECONDARY_EXEC_UNRESTRICTED_GUEST); + bool prot_mode = !urg || vmcs12->guest_cr0 & X86_CR0_PE; + + /* VM-entry interruption-info field: interruption type */ + if (intr_type == INTR_TYPE_RESERVED || + (intr_type == INTR_TYPE_OTHER_EVENT && + !nested_cpu_supports_monitor_trap_flag(vcpu))) + return VMXERR_ENTRY_INVALID_CONTROL_FIELD; + + /* VM-entry interruption-info field: vector */ + if ((intr_type == INTR_TYPE_NMI_INTR && vector != NMI_VECTOR) || + (intr_type == INTR_TYPE_HARD_EXCEPTION && vector > 31) || + (intr_type == INTR_TYPE_OTHER_EVENT && vector != 0)) + return VMXERR_ENTRY_INVALID_CONTROL_FIELD; + + /* VM-entry interruption-info field: deliver error code */ + should_have_error_code = + intr_type == INTR_TYPE_HARD_EXCEPTION && prot_mode && + x86_exception_has_error_code(vector); + if (has_error_code != should_have_error_code) + return VMXERR_ENTRY_INVALID_CONTROL_FIELD; + + /* VM-entry exception error code */ + if (has_error_code && + vmcs12->vm_entry_exception_error_code & GENMASK(31, 15)) + return VMXERR_ENTRY_INVALID_CONTROL_FIELD; + + /* VM-entry interruption-info field: reserved bits */ + if (intr_info & INTR_INFO_RESVD_BITS_MASK) + return VMXERR_ENTRY_INVALID_CONTROL_FIELD; + + /* VM-entry instruction length */ + switch (intr_type) { + case INTR_TYPE_SOFT_EXCEPTION: + case INTR_TYPE_SOFT_INTR: + case INTR_TYPE_PRIV_SW_EXCEPTION: + if ((vmcs12->vm_entry_instruction_len > 15) || + (vmcs12->vm_entry_instruction_len == 0 && + !nested_cpu_has_zero_length_injection(vcpu))) + return VMXERR_ENTRY_INVALID_CONTROL_FIELD; + } + } + return 0; } diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 331993c49dae..257f27620bc2 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -110,6 +110,15 @@ static inline bool is_la57_mode(struct kvm_vcpu *vcpu) #endif } +static inline bool x86_exception_has_error_code(unsigned int vector) +{ + static u32 exception_has_error_code = BIT(DF_VECTOR) | BIT(TS_VECTOR) | + BIT(NP_VECTOR) | BIT(SS_VECTOR) | BIT(GP_VECTOR) | + BIT(PF_VECTOR) | BIT(AC_VECTOR); + + return (1U << vector) & exception_has_error_code; +} + static inline bool mmu_is_nested(struct kvm_vcpu *vcpu) { return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu; diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 9a84a0d08727..2aafa6ab6103 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -641,11 +641,6 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address) return 0; } -static const char nx_warning[] = KERN_CRIT -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n"; -static const char smep_warning[] = KERN_CRIT -"unable to execute userspace code (SMEP?) (uid: %d)\n"; - static void show_fault_oops(struct pt_regs *regs, unsigned long error_code, unsigned long address) @@ -664,20 +659,18 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code, pte = lookup_address_in_pgd(pgd, address, &level); if (pte && pte_present(*pte) && !pte_exec(*pte)) - printk(nx_warning, from_kuid(&init_user_ns, current_uid())); + pr_crit("kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n", + from_kuid(&init_user_ns, current_uid())); if (pte && pte_present(*pte) && pte_exec(*pte) && (pgd_flags(*pgd) & _PAGE_USER) && (__read_cr4() & X86_CR4_SMEP)) - printk(smep_warning, from_kuid(&init_user_ns, current_uid())); + pr_crit("unable to execute userspace code (SMEP?) (uid: %d)\n", + from_kuid(&init_user_ns, current_uid())); } - printk(KERN_ALERT "BUG: unable to handle kernel "); - if (address < PAGE_SIZE) - printk(KERN_CONT "NULL pointer dereference"); - else - printk(KERN_CONT "paging request"); - - printk(KERN_CONT " at %px\n", (void *) address); + pr_alert("BUG: unable to handle kernel %s at %px\n", + address < PAGE_SIZE ? "NULL pointer dereference" : "paging request", + (void *)address); dump_pagetable(address); } diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 045f492d5f68..a688617c727e 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -1350,16 +1350,28 @@ int kern_addr_valid(unsigned long addr) /* Amount of ram needed to start using large blocks */ #define MEM_SIZE_FOR_LARGE_BLOCK (64UL << 30) +/* Adjustable memory block size */ +static unsigned long set_memory_block_size; +int __init set_memory_block_size_order(unsigned int order) +{ + unsigned long size = 1UL << order; + + if (size > MEM_SIZE_FOR_LARGE_BLOCK || size < MIN_MEMORY_BLOCK_SIZE) + return -EINVAL; + + set_memory_block_size = size; + return 0; +} + static unsigned long probe_memory_block_size(void) { unsigned long boot_mem_end = max_pfn << PAGE_SHIFT; unsigned long bz; - /* If this is UV system, always set 2G block size */ - if (is_uv_system()) { - bz = MAX_BLOCK_SIZE; + /* If memory block size has been set, then use it */ + bz = set_memory_block_size; + if (bz) goto done; - } /* Use regular block if RAM is smaller than MEM_SIZE_FOR_LARGE_BLOCK */ if (boot_mem_end < MEM_SIZE_FOR_LARGE_BLOCK) { diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index e01f7ceb9e7a..77873ce700ae 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -166,14 +166,14 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd) pgd = pgd_offset_k(pgd_idx * PGDIR_SIZE); set_pgd(pgd_offset_k(pgd_idx * PGDIR_SIZE), save_pgd[pgd_idx]); - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) + if (!pgd_present(*pgd)) continue; for (i = 0; i < PTRS_PER_P4D; i++) { p4d = p4d_offset(pgd, pgd_idx * PGDIR_SIZE + i * P4D_SIZE); - if (!(p4d_val(*p4d) & _PAGE_PRESENT)) + if (!p4d_present(*p4d)) continue; pud = (pud_t *)p4d_page_vaddr(*p4d); diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile index 2e9ee023e6bc..81a8e33115ad 100644 --- a/arch/x86/purgatory/Makefile +++ b/arch/x86/purgatory/Makefile @@ -6,7 +6,7 @@ purgatory-y := purgatory.o stack.o setup-x86_$(BITS).o sha256.o entry64.o string targets += $(purgatory-y) PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y)) -$(obj)/sha256.o: $(srctree)/lib/sha256.c +$(obj)/sha256.o: $(srctree)/lib/sha256.c FORCE $(call if_changed_rule,cc_o_c) LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib -z nodefaultlib diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index c9081c6671f0..3b5318505c69 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -64,6 +64,13 @@ struct shared_info xen_dummy_shared_info; __read_mostly int xen_have_vector_callback; EXPORT_SYMBOL_GPL(xen_have_vector_callback); +/* + * NB: needs to live in .data because it's used by xen_prepare_pvh which runs + * before clearing the bss. + */ +uint32_t xen_start_flags __attribute__((section(".data"))) = 0; +EXPORT_SYMBOL(xen_start_flags); + /* * Point at some empty memory to start with. We map the real shared_info * page as soon as fixmap is up and running. diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index 357969a3697c..439a94bf89ad 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c @@ -1203,15 +1203,24 @@ asmlinkage __visible void __init xen_start_kernel(void) return; xen_domain_type = XEN_PV_DOMAIN; + xen_start_flags = xen_start_info->flags; xen_setup_features(); - xen_setup_machphys_mapping(); - /* Install Xen paravirt ops */ pv_info = xen_info; pv_init_ops.patch = paravirt_patch_default; pv_cpu_ops = xen_cpu_ops; + xen_init_irq_ops(); + + /* + * Setup xen_vcpu early because it is needed for + * local_irq_disable(), irqs_disabled(), e.g. in printk(). + * + * Don't do the full vcpu_info placement stuff until we have + * the cpu_possible_mask and a non-dummy shared_info. + */ + xen_vcpu_info_reset(0); x86_platform.get_nmi_reason = xen_get_nmi_reason; @@ -1224,10 +1233,12 @@ asmlinkage __visible void __init xen_start_kernel(void) * Set up some pagetable state before starting to set any ptes. */ + xen_setup_machphys_mapping(); xen_init_mmu_ops(); /* Prevent unwanted bits from being set in PTEs. */ __supported_pte_mask &= ~_PAGE_GLOBAL; + __default_kernel_pte_mask &= ~_PAGE_GLOBAL; /* * Prevent page tables from being allocated in highmem, even @@ -1248,20 +1259,9 @@ asmlinkage __visible void __init xen_start_kernel(void) get_cpu_cap(&boot_cpu_data); x86_configure_nx(); - xen_init_irq_ops(); - /* Let's presume PV guests always boot on vCPU with id 0. */ per_cpu(xen_vcpu_id, 0) = 0; - /* - * Setup xen_vcpu early because idt_setup_early_handler needs it for - * local_irq_disable(), irqs_disabled(). - * - * Don't do the full vcpu_info placement stuff until we have - * the cpu_possible_mask and a non-dummy shared_info. - */ - xen_vcpu_info_reset(0); - idt_setup_early_handler(); xen_init_capabilities(); diff --git a/arch/x86/xen/enlighten_pvh.c b/arch/x86/xen/enlighten_pvh.c index aa1c6a6831a9..c85d1a88f476 100644 --- a/arch/x86/xen/enlighten_pvh.c +++ b/arch/x86/xen/enlighten_pvh.c @@ -97,6 +97,7 @@ void __init xen_prepare_pvh(void) } xen_pvh = 1; + xen_start_flags = pvh_start_info.flags; msr = cpuid_ebx(xen_cpuid_base() + 2); pfn = __pa(hypercall_page); diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c index 74179852e46c..7515a19fd324 100644 --- a/arch/x86/xen/irq.c +++ b/arch/x86/xen/irq.c @@ -128,8 +128,6 @@ static const struct pv_irq_ops xen_irq_ops __initconst = { void __init xen_init_irq_ops(void) { - /* For PVH we use default pv_irq_ops settings. */ - if (!xen_feature(XENFEAT_hvm_callback_vector)) - pv_irq_ops = xen_irq_ops; + pv_irq_ops = xen_irq_ops; x86_init.irqs.intr_init = xen_init_IRQ; } diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c index 2e20ae2fa2d6..e3b18ad49889 100644 --- a/arch/x86/xen/smp_pv.c +++ b/arch/x86/xen/smp_pv.c @@ -32,6 +32,7 @@ #include #include +#include #include #include @@ -70,6 +71,8 @@ static void cpu_bringup(void) cpu_data(cpu).x86_max_cores = 1; set_cpu_sibling_map(cpu); + speculative_store_bypass_ht_init(); + xen_setup_cpu_clockevents(); notify_cpu_starting(cpu); @@ -250,6 +253,8 @@ static void __init xen_pv_smp_prepare_cpus(unsigned int max_cpus) } set_cpu_sibling_map(0); + speculative_store_bypass_ht_init(); + xen_pmu_init(0); if (xen_smp_intr_init(0) || xen_smp_intr_init_pv(0)) diff --git a/block/bio.c b/block/bio.c index 9710e275f230..67eff5eddc49 100644 --- a/block/bio.c +++ b/block/bio.c @@ -1807,9 +1807,6 @@ void bio_endio(struct bio *bio) if (!bio_integrity_endio(bio)) return; - if (WARN_ONCE(bio->bi_next, "driver left bi_next not NULL")) - bio->bi_next = NULL; - /* * Need to have a real endio function for chained bios, otherwise * various corner cases will break (like stacking block devices that diff --git a/block/blk-core.c b/block/blk-core.c index cf0ee764b908..f84a9b7b6f5a 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -273,10 +273,6 @@ static void req_bio_endio(struct request *rq, struct bio *bio, bio_advance(bio, nbytes); /* don't actually finish bio if it's part of flush sequence */ - /* - * XXX this code looks suspicious - it's not consistent with advancing - * req->bio in caller - */ if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ)) bio_endio(bio); } @@ -3081,10 +3077,8 @@ bool blk_update_request(struct request *req, blk_status_t error, struct bio *bio = req->bio; unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes); - if (bio_bytes == bio->bi_iter.bi_size) { + if (bio_bytes == bio->bi_iter.bi_size) req->bio = bio->bi_next; - bio->bi_next = NULL; - } /* Completion has already been traced */ bio_clear_flag(bio, BIO_TRACE_COMPLETION); @@ -3479,6 +3473,10 @@ static void __blk_rq_prep_clone(struct request *dst, struct request *src) dst->cpu = src->cpu; dst->__sector = blk_rq_pos(src); dst->__data_len = blk_rq_bytes(src); + if (src->rq_flags & RQF_SPECIAL_PAYLOAD) { + dst->rq_flags |= RQF_SPECIAL_PAYLOAD; + dst->special_vec = src->special_vec; + } dst->nr_phys_segments = src->nr_phys_segments; dst->ioprio = src->ioprio; dst->extra_len = src->extra_len; diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index ffa622366922..1c4532e92938 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -356,7 +356,7 @@ static const char *const blk_mq_rq_state_name_array[] = { static const char *blk_mq_rq_state_name(enum mq_rq_state rq_state) { - if (WARN_ON_ONCE((unsigned int)rq_state > + if (WARN_ON_ONCE((unsigned int)rq_state >= ARRAY_SIZE(blk_mq_rq_state_name_array))) return "(?)"; return blk_mq_rq_state_name_array[rq_state]; diff --git a/block/blk-mq.c b/block/blk-mq.c index 70c65bb6c013..95919268564b 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -781,7 +781,6 @@ static void blk_mq_rq_timed_out(struct request *req, bool reserved) WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER); } - req->rq_flags &= ~RQF_TIMED_OUT; blk_add_timer(req); } @@ -1076,6 +1075,9 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx **hctx, #define BLK_MQ_RESOURCE_DELAY 3 /* ms units */ +/* + * Returns true if we did some work AND can potentially do more. + */ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list, bool got_budget) { @@ -1206,8 +1208,17 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list, blk_mq_run_hw_queue(hctx, true); else if (needs_restart && (ret == BLK_STS_RESOURCE)) blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY); + + return false; } + /* + * If the host/device is unable to accept more work, inform the + * caller of that. + */ + if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) + return false; + return (queued + errors) != 0; } diff --git a/block/blk-softirq.c b/block/blk-softirq.c index 01e2b353a2b9..15c1f5e12eb8 100644 --- a/block/blk-softirq.c +++ b/block/blk-softirq.c @@ -144,6 +144,7 @@ void __blk_complete_request(struct request *req) local_irq_restore(flags); } +EXPORT_SYMBOL(__blk_complete_request); /** * blk_complete_request - end I/O on a request diff --git a/block/blk-timeout.c b/block/blk-timeout.c index 4b8a48d48ba1..f2cfd56e1606 100644 --- a/block/blk-timeout.c +++ b/block/blk-timeout.c @@ -210,6 +210,7 @@ void blk_add_timer(struct request *req) if (!req->timeout) req->timeout = q->rq_timeout; + req->rq_flags &= ~RQF_TIMED_OUT; blk_rq_set_deadline(req, jiffies + req->timeout); /* diff --git a/block/bsg.c b/block/bsg.c index 66602c489956..3da540faf673 100644 --- a/block/bsg.c +++ b/block/bsg.c @@ -267,8 +267,6 @@ bsg_map_hdr(struct request_queue *q, struct sg_io_v4 *hdr, fmode_t mode) } else if (hdr->din_xfer_len) { ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr->din_xferp), hdr->din_xfer_len, GFP_KERNEL); - } else { - ret = blk_rq_map_user(q, rq, NULL, NULL, 0, GFP_KERNEL); } if (ret) diff --git a/block/sed-opal.c b/block/sed-opal.c index 945f4b8610e0..e0de4dd448b3 100644 --- a/block/sed-opal.c +++ b/block/sed-opal.c @@ -877,7 +877,7 @@ static size_t response_get_string(const struct parsed_resp *resp, int n, return 0; } - if (n > resp->num) { + if (n >= resp->num) { pr_debug("Response has %d tokens. Can't access %d\n", resp->num, n); return 0; @@ -916,7 +916,7 @@ static u64 response_get_u64(const struct parsed_resp *resp, int n) return 0; } - if (n > resp->num) { + if (n >= resp->num) { pr_debug("Response has %d tokens. Can't access %d\n", resp->num, n); return 0; diff --git a/certs/blacklist.h b/certs/blacklist.h index 150d82da8e99..1efd6fa0dc60 100644 --- a/certs/blacklist.h +++ b/certs/blacklist.h @@ -1,3 +1,3 @@ #include -extern const char __initdata *const blacklist_hashes[]; +extern const char __initconst *const blacklist_hashes[]; diff --git a/crypto/af_alg.c b/crypto/af_alg.c index 49fa8582138b..314c52c967e5 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -1060,12 +1060,19 @@ void af_alg_async_cb(struct crypto_async_request *_req, int err) } EXPORT_SYMBOL_GPL(af_alg_async_cb); -__poll_t af_alg_poll_mask(struct socket *sock, __poll_t events) +/** + * af_alg_poll - poll system call handler + */ +__poll_t af_alg_poll(struct file *file, struct socket *sock, + poll_table *wait) { struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); struct af_alg_ctx *ctx = ask->private; - __poll_t mask = 0; + __poll_t mask; + + sock_poll_wait(file, sk_sleep(sk), wait); + mask = 0; if (!ctx->more || ctx->used) mask |= EPOLLIN | EPOLLRDNORM; @@ -1075,7 +1082,7 @@ __poll_t af_alg_poll_mask(struct socket *sock, __poll_t events) return mask; } -EXPORT_SYMBOL_GPL(af_alg_poll_mask); +EXPORT_SYMBOL_GPL(af_alg_poll); /** * af_alg_alloc_areq - allocate struct af_alg_async_req diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index 825524f27438..c40a8c7ee8ae 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c @@ -375,7 +375,7 @@ static struct proto_ops algif_aead_ops = { .sendmsg = aead_sendmsg, .sendpage = af_alg_sendpage, .recvmsg = aead_recvmsg, - .poll_mask = af_alg_poll_mask, + .poll = af_alg_poll, }; static int aead_check_key(struct socket *sock) @@ -471,7 +471,7 @@ static struct proto_ops algif_aead_ops_nokey = { .sendmsg = aead_sendmsg_nokey, .sendpage = aead_sendpage_nokey, .recvmsg = aead_recvmsg_nokey, - .poll_mask = af_alg_poll_mask, + .poll = af_alg_poll, }; static void *aead_bind(const char *name, u32 type, u32 mask) diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index 4c04eb9888ad..cfdaab2b7d76 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -206,7 +206,7 @@ static struct proto_ops algif_skcipher_ops = { .sendmsg = skcipher_sendmsg, .sendpage = af_alg_sendpage, .recvmsg = skcipher_recvmsg, - .poll_mask = af_alg_poll_mask, + .poll = af_alg_poll, }; static int skcipher_check_key(struct socket *sock) @@ -302,7 +302,7 @@ static struct proto_ops algif_skcipher_ops_nokey = { .sendmsg = skcipher_sendmsg_nokey, .sendpage = skcipher_sendpage_nokey, .recvmsg = skcipher_recvmsg_nokey, - .poll_mask = af_alg_poll_mask, + .poll = af_alg_poll, }; static void *skcipher_bind(const char *name, u32 type, u32 mask) diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c index 7d81e6bb461a..b6cabac4b62b 100644 --- a/crypto/asymmetric_keys/x509_cert_parser.c +++ b/crypto/asymmetric_keys/x509_cert_parser.c @@ -249,6 +249,15 @@ int x509_note_signature(void *context, size_t hdrlen, return -EINVAL; } + if (strcmp(ctx->cert->sig->pkey_algo, "rsa") == 0) { + /* Discard the BIT STRING metadata */ + if (vlen < 1 || *(const u8 *)value != 0) + return -EBADMSG; + + value++; + vlen--; + } + ctx->cert->raw_sig = value; ctx->cert->raw_sig_size = vlen; return 0; diff --git a/crypto/morus640.c b/crypto/morus640.c index 9fbcde307daf..5eede3749e64 100644 --- a/crypto/morus640.c +++ b/crypto/morus640.c @@ -274,8 +274,9 @@ static void crypto_morus640_decrypt_chunk(struct morus640_state *state, u8 *dst, union morus640_block_in tail; memcpy(tail.bytes, src, size); + memset(tail.bytes + size, 0, MORUS640_BLOCK_SIZE - size); - crypto_morus640_load_a(&m, src); + crypto_morus640_load_a(&m, tail.bytes); crypto_morus640_core(state, &m); crypto_morus640_store_a(tail.bytes, &m); memset(tail.bytes + size, 0, MORUS640_BLOCK_SIZE - size); diff --git a/crypto/sha3_generic.c b/crypto/sha3_generic.c index 264ec12c0b9c..7f6735d9003f 100644 --- a/crypto/sha3_generic.c +++ b/crypto/sha3_generic.c @@ -152,7 +152,7 @@ static SHA3_INLINE void keccakf_round(u64 st[25]) st[24] ^= bc[ 4]; } -static void __optimize("O3") keccakf(u64 st[25]) +static void keccakf(u64 st[25]) { int round; diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index 38a286975c31..f8fecfec5df9 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include "internal.h" @@ -946,9 +947,10 @@ static void lpss_iosf_exit_d3_state(void) mutex_unlock(&lpss_iosf_mutex); } -static int acpi_lpss_suspend(struct device *dev, bool wakeup) +static int acpi_lpss_suspend(struct device *dev, bool runtime) { struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); + bool wakeup = runtime || device_may_wakeup(dev); int ret; if (pdata->dev_desc->flags & LPSS_SAVE_CTX) @@ -961,13 +963,14 @@ static int acpi_lpss_suspend(struct device *dev, bool wakeup) * wrong status for devices being about to be powered off. See * lpss_iosf_enter_d3_state() for further information. */ - if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available()) + if ((runtime || !pm_suspend_via_firmware()) && + lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available()) lpss_iosf_enter_d3_state(); return ret; } -static int acpi_lpss_resume(struct device *dev) +static int acpi_lpss_resume(struct device *dev, bool runtime) { struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); int ret; @@ -976,7 +979,8 @@ static int acpi_lpss_resume(struct device *dev) * This call is kept first to be in symmetry with * acpi_lpss_runtime_suspend() one. */ - if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available()) + if ((runtime || !pm_resume_via_firmware()) && + lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available()) lpss_iosf_exit_d3_state(); ret = acpi_dev_resume(dev); @@ -1000,12 +1004,12 @@ static int acpi_lpss_suspend_late(struct device *dev) return 0; ret = pm_generic_suspend_late(dev); - return ret ? ret : acpi_lpss_suspend(dev, device_may_wakeup(dev)); + return ret ? ret : acpi_lpss_suspend(dev, false); } static int acpi_lpss_resume_early(struct device *dev) { - int ret = acpi_lpss_resume(dev); + int ret = acpi_lpss_resume(dev, false); return ret ? ret : pm_generic_resume_early(dev); } @@ -1020,7 +1024,7 @@ static int acpi_lpss_runtime_suspend(struct device *dev) static int acpi_lpss_runtime_resume(struct device *dev) { - int ret = acpi_lpss_resume(dev); + int ret = acpi_lpss_resume(dev, true); return ret ? ret : pm_generic_runtime_resume(dev); } diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c index fc0c2e2328cd..fe9d46d81750 100644 --- a/drivers/acpi/acpica/hwsleep.c +++ b/drivers/acpi/acpica/hwsleep.c @@ -51,16 +51,23 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state) return_ACPI_STATUS(status); } - /* - * 1) Disable all GPEs - * 2) Enable all wakeup GPEs - */ + /* Disable all GPEs */ status = acpi_hw_disable_all_gpes(); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } + /* + * If the target sleep state is S5, clear all GPEs and fixed events too + */ + if (sleep_state == ACPI_STATE_S5) { + status = acpi_hw_clear_acpi_status(); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); + } + } acpi_gbl_system_awake_and_running = FALSE; + /* Enable all wakeup GPEs */ status = acpi_hw_enable_all_wakeup_gpes(); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); diff --git a/drivers/acpi/acpica/uterror.c b/drivers/acpi/acpica/uterror.c index 5a64ddaed8a3..e47430272692 100644 --- a/drivers/acpi/acpica/uterror.c +++ b/drivers/acpi/acpica/uterror.c @@ -182,19 +182,19 @@ acpi_ut_prefixed_namespace_error(const char *module_name, switch (lookup_status) { case AE_ALREADY_EXISTS: - acpi_os_printf("\n" ACPI_MSG_BIOS_ERROR); + acpi_os_printf(ACPI_MSG_BIOS_ERROR); message = "Failure creating"; break; case AE_NOT_FOUND: - acpi_os_printf("\n" ACPI_MSG_BIOS_ERROR); + acpi_os_printf(ACPI_MSG_BIOS_ERROR); message = "Could not resolve"; break; default: - acpi_os_printf("\n" ACPI_MSG_ERROR); + acpi_os_printf(ACPI_MSG_ERROR); message = "Failure resolving"; break; } diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index b0113a5802a3..d79ad844c78f 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -717,10 +717,11 @@ void battery_hook_register(struct acpi_battery_hook *hook) */ pr_err("extension failed to load: %s", hook->name); __battery_hook_unregister(hook, 0); - return; + goto end; } } pr_info("new extension: %s\n", hook->name); +end: mutex_unlock(&hook_mutex); } EXPORT_SYMBOL_GPL(battery_hook_register); @@ -732,7 +733,7 @@ EXPORT_SYMBOL_GPL(battery_hook_register); */ static void battery_hook_add_battery(struct acpi_battery *battery) { - struct acpi_battery_hook *hook_node; + struct acpi_battery_hook *hook_node, *tmp; mutex_lock(&hook_mutex); INIT_LIST_HEAD(&battery->list); @@ -744,15 +745,15 @@ static void battery_hook_add_battery(struct acpi_battery *battery) * when a battery gets hotplugged or initialized * during the battery module initialization. */ - list_for_each_entry(hook_node, &battery_hook_list, list) { + list_for_each_entry_safe(hook_node, tmp, &battery_hook_list, list) { if (hook_node->add_battery(battery->bat)) { /* * The notification of the extensions has failed, to * prevent further errors we will unload the extension. */ - __battery_hook_unregister(hook_node, 0); pr_err("error in extension, unloading: %s", hook_node->name); + __battery_hook_unregister(hook_node, 0); } } mutex_unlock(&hook_mutex); diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index bb94cf0731fe..442a9e24f439 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -2037,6 +2037,17 @@ static inline void acpi_ec_query_exit(void) } } +static const struct dmi_system_id acpi_ec_no_wakeup[] = { + { + .ident = "Thinkpad X1 Carbon 6th", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "20KGS3JF01"), + }, + }, + { }, +}; + int __init acpi_ec_init(void) { int result; @@ -2047,6 +2058,15 @@ int __init acpi_ec_init(void) if (result) return result; + /* + * Disable EC wakeup on following systems to prevent periodic + * wakeup from EC GPE. + */ + if (dmi_check_system(acpi_ec_no_wakeup)) { + ec_no_wakeup = true; + pr_debug("Disabling EC wakeup on suspend-to-idle\n"); + } + /* Drivers must be started after acpi_ec_query_init() */ dsdt_fail = acpi_bus_register_driver(&acpi_ec_driver); /* diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index d15814e1727f..7c479002e798 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -408,6 +408,8 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, const guid_t *guid; int rc, i; + if (cmd_rc) + *cmd_rc = -EINVAL; func = cmd; if (cmd == ND_CMD_CALL) { call_pkg = buf; @@ -518,6 +520,8 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, * If we return an error (like elsewhere) then caller wouldn't * be able to rely upon data returned to make calculation. */ + if (cmd_rc) + *cmd_rc = 0; return 0; } @@ -1273,7 +1277,7 @@ static ssize_t scrub_show(struct device *dev, mutex_lock(&acpi_desc->init_mutex); rc = sprintf(buf, "%d%s", acpi_desc->scrub_count, - work_busy(&acpi_desc->dwork.work) + acpi_desc->scrub_busy && !acpi_desc->cancel ? "+\n" : "\n"); mutex_unlock(&acpi_desc->init_mutex); } @@ -2939,6 +2943,32 @@ static unsigned int __acpi_nfit_scrub(struct acpi_nfit_desc *acpi_desc, return 0; } +static void __sched_ars(struct acpi_nfit_desc *acpi_desc, unsigned int tmo) +{ + lockdep_assert_held(&acpi_desc->init_mutex); + + acpi_desc->scrub_busy = 1; + /* note this should only be set from within the workqueue */ + if (tmo) + acpi_desc->scrub_tmo = tmo; + queue_delayed_work(nfit_wq, &acpi_desc->dwork, tmo * HZ); +} + +static void sched_ars(struct acpi_nfit_desc *acpi_desc) +{ + __sched_ars(acpi_desc, 0); +} + +static void notify_ars_done(struct acpi_nfit_desc *acpi_desc) +{ + lockdep_assert_held(&acpi_desc->init_mutex); + + acpi_desc->scrub_busy = 0; + acpi_desc->scrub_count++; + if (acpi_desc->scrub_count_state) + sysfs_notify_dirent(acpi_desc->scrub_count_state); +} + static void acpi_nfit_scrub(struct work_struct *work) { struct acpi_nfit_desc *acpi_desc; @@ -2949,14 +2979,10 @@ static void acpi_nfit_scrub(struct work_struct *work) mutex_lock(&acpi_desc->init_mutex); query_rc = acpi_nfit_query_poison(acpi_desc); tmo = __acpi_nfit_scrub(acpi_desc, query_rc); - if (tmo) { - queue_delayed_work(nfit_wq, &acpi_desc->dwork, tmo * HZ); - acpi_desc->scrub_tmo = tmo; - } else { - acpi_desc->scrub_count++; - if (acpi_desc->scrub_count_state) - sysfs_notify_dirent(acpi_desc->scrub_count_state); - } + if (tmo) + __sched_ars(acpi_desc, tmo); + else + notify_ars_done(acpi_desc); memset(acpi_desc->ars_status, 0, acpi_desc->max_ars); mutex_unlock(&acpi_desc->init_mutex); } @@ -3037,7 +3063,7 @@ static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc) break; } - queue_delayed_work(nfit_wq, &acpi_desc->dwork, 0); + sched_ars(acpi_desc); return 0; } @@ -3239,7 +3265,7 @@ int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, unsigned long flags) } } if (scheduled) { - queue_delayed_work(nfit_wq, &acpi_desc->dwork, 0); + sched_ars(acpi_desc); dev_dbg(dev, "ars_scan triggered\n"); } mutex_unlock(&acpi_desc->init_mutex); diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h index 7d15856a739f..a97ff42fe311 100644 --- a/drivers/acpi/nfit/nfit.h +++ b/drivers/acpi/nfit/nfit.h @@ -203,6 +203,7 @@ struct acpi_nfit_desc { unsigned int max_ars; unsigned int scrub_count; unsigned int scrub_mode; + unsigned int scrub_busy:1; unsigned int cancel:1; unsigned long dimm_cmd_force_en; unsigned long bus_cmd_force_en; diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 7ca41bf023c9..8df9abfa947b 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -45,6 +45,8 @@ #include #include +#include "acpica/accommon.h" +#include "acpica/acnamesp.h" #include "internal.h" #define _COMPONENT ACPI_OS_SERVICES @@ -1490,6 +1492,76 @@ int acpi_check_region(resource_size_t start, resource_size_t n, } EXPORT_SYMBOL(acpi_check_region); +static acpi_status acpi_deactivate_mem_region(acpi_handle handle, u32 level, + void *_res, void **return_value) +{ + struct acpi_mem_space_context **mem_ctx; + union acpi_operand_object *handler_obj; + union acpi_operand_object *region_obj2; + union acpi_operand_object *region_obj; + struct resource *res = _res; + acpi_status status; + + region_obj = acpi_ns_get_attached_object(handle); + if (!region_obj) + return AE_OK; + + handler_obj = region_obj->region.handler; + if (!handler_obj) + return AE_OK; + + if (region_obj->region.space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) + return AE_OK; + + if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE)) + return AE_OK; + + region_obj2 = acpi_ns_get_secondary_object(region_obj); + if (!region_obj2) + return AE_OK; + + mem_ctx = (void *)®ion_obj2->extra.region_context; + + if (!(mem_ctx[0]->address >= res->start && + mem_ctx[0]->address < res->end)) + return AE_OK; + + status = handler_obj->address_space.setup(region_obj, + ACPI_REGION_DEACTIVATE, + NULL, (void **)mem_ctx); + if (ACPI_SUCCESS(status)) + region_obj->region.flags &= ~(AOPOBJ_SETUP_COMPLETE); + + return status; +} + +/** + * acpi_release_memory - Release any mappings done to a memory region + * @handle: Handle to namespace node + * @res: Memory resource + * @level: A level that terminates the search + * + * Walks through @handle and unmaps all SystemMemory Operation Regions that + * overlap with @res and that have already been activated (mapped). + * + * This is a helper that allows drivers to place special requirements on memory + * region that may overlap with operation regions, primarily allowing them to + * safely map the region as non-cached memory. + * + * The unmapped Operation Regions will be automatically remapped next time they + * are called, so the drivers do not need to do anything else. + */ +acpi_status acpi_release_memory(acpi_handle handle, struct resource *res, + u32 level) +{ + if (!(res->flags & IORESOURCE_MEM)) + return AE_TYPE; + + return acpi_walk_namespace(ACPI_TYPE_REGION, handle, level, + acpi_deactivate_mem_region, NULL, res, NULL); +} +EXPORT_SYMBOL_GPL(acpi_release_memory); + /* * Let drivers know whether the resource checks are effective */ diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c index e5ea1974d1e3..d1e26cb599bf 100644 --- a/drivers/acpi/pptt.c +++ b/drivers/acpi/pptt.c @@ -481,8 +481,14 @@ static int topology_get_acpi_cpu_tag(struct acpi_table_header *table, if (cpu_node) { cpu_node = acpi_find_processor_package_id(table, cpu_node, level, flag); - /* Only the first level has a guaranteed id */ - if (level == 0) + /* + * As per specification if the processor structure represents + * an actual processor, then ACPI processor ID must be valid. + * For processor containers ACPI_PPTT_ACPI_PROCESSOR_ID_VALID + * should be set if the UID is valid + */ + if (level == 0 || + cpu_node->flags & ACPI_PPTT_ACPI_PROCESSOR_ID_VALID) return cpu_node->acpi_processor_id; return ACPI_PTR_DIFF(cpu_node, table); } diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index 2b16e7c8fff3..39b181d6bd0d 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig @@ -398,7 +398,6 @@ config SATA_DWC_VDEBUG config SATA_HIGHBANK tristate "Calxeda Highbank SATA support" - depends on HAS_DMA depends on ARCH_HIGHBANK || COMPILE_TEST help This option enables support for the Calxeda Highbank SoC's @@ -408,7 +407,6 @@ config SATA_HIGHBANK config SATA_MV tristate "Marvell SATA support" - depends on HAS_DMA depends on PCI || ARCH_DOVE || ARCH_MV78XX0 || \ ARCH_MVEBU || ARCH_ORION5X || COMPILE_TEST select GENERIC_PHY diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 738fb22978dd..b2b9eba1d214 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -400,6 +400,7 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(INTEL, 0x0f23), board_ahci_mobile }, /* Bay Trail AHCI */ { PCI_VDEVICE(INTEL, 0x22a3), board_ahci_mobile }, /* Cherry Tr. AHCI */ { PCI_VDEVICE(INTEL, 0x5ae3), board_ahci_mobile }, /* ApolloLake AHCI */ + { PCI_VDEVICE(INTEL, 0x34d3), board_ahci_mobile }, /* Ice Lake LP AHCI */ /* JMicron 360/1/3/5/6, match class to avoid IDE function */ { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, @@ -1280,6 +1281,59 @@ static bool ahci_broken_suspend(struct pci_dev *pdev) return strcmp(buf, dmi->driver_data) < 0; } +static bool ahci_broken_lpm(struct pci_dev *pdev) +{ + static const struct dmi_system_id sysids[] = { + /* Various Lenovo 50 series have LPM issues with older BIOSen */ + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X250"), + }, + .driver_data = "20180406", /* 1.31 */ + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L450"), + }, + .driver_data = "20180420", /* 1.28 */ + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T450s"), + }, + .driver_data = "20180315", /* 1.33 */ + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W541"), + }, + /* + * Note date based on release notes, 2.35 has been + * reported to be good, but I've been unable to get + * a hold of the reporter to get the DMI BIOS date. + * TODO: fix this. + */ + .driver_data = "20180310", /* 2.35 */ + }, + { } /* terminate list */ + }; + const struct dmi_system_id *dmi = dmi_first_match(sysids); + int year, month, date; + char buf[9]; + + if (!dmi) + return false; + + dmi_get_date(DMI_BIOS_DATE, &year, &month, &date); + snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date); + + return strcmp(buf, dmi->driver_data) < 0; +} + static bool ahci_broken_online(struct pci_dev *pdev) { #define ENCODE_BUSDEVFN(bus, slot, func) \ @@ -1694,6 +1748,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) "quirky BIOS, skipping spindown on poweroff\n"); } + if (ahci_broken_lpm(pdev)) { + pi.flags |= ATA_FLAG_NO_LPM; + dev_warn(&pdev->dev, + "BIOS update required for Link Power Management support\n"); + } + if (ahci_broken_suspend(pdev)) { hpriv->flags |= AHCI_HFLAG_NO_SUSPEND; dev_warn(&pdev->dev, diff --git a/drivers/ata/ahci_mvebu.c b/drivers/ata/ahci_mvebu.c index 0045dacd814b..72d90b4c3aae 100644 --- a/drivers/ata/ahci_mvebu.c +++ b/drivers/ata/ahci_mvebu.c @@ -82,7 +82,7 @@ static void ahci_mvebu_regret_option(struct ahci_host_priv *hpriv) * * Return: 0 on success; Error code otherwise. */ -int ahci_mvebu_stop_engine(struct ata_port *ap) +static int ahci_mvebu_stop_engine(struct ata_port *ap) { void __iomem *port_mmio = ahci_port_base(ap); u32 tmp, port_fbs; diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index 965842a08743..09620c2ffa0f 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include #include @@ -1146,10 +1147,12 @@ static ssize_t ahci_led_store(struct ata_port *ap, const char *buf, /* get the slot number from the message */ pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8; - if (pmp < EM_MAX_SLOTS) + if (pmp < EM_MAX_SLOTS) { + pmp = array_index_nospec(pmp, EM_MAX_SLOTS); emp = &pp->em_priv[pmp]; - else + } else { return -EINVAL; + } /* mask off the activity bits if we are in sw_activity * mode, user should turn off sw_activity before setting diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 27d15ed7fa3d..cc71c63df381 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -2493,6 +2493,9 @@ int ata_dev_configure(struct ata_device *dev) (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2) dev->horkage |= ATA_HORKAGE_NOLPM; + if (ap->flags & ATA_FLAG_NO_LPM) + dev->horkage |= ATA_HORKAGE_NOLPM; + if (dev->horkage & ATA_HORKAGE_NOLPM) { ata_dev_warn(dev, "LPM support broken, forcing max_power\n"); dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER; diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index d5412145d76d..01306c018398 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -614,8 +614,7 @@ void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap, list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) { struct ata_queued_cmd *qc; - for (i = 0; i < ATA_MAX_QUEUE; i++) { - qc = __ata_qc_from_tag(ap, i); + ata_qc_for_each_raw(ap, qc, i) { if (qc->flags & ATA_QCFLAG_ACTIVE && qc->scsicmd == scmd) break; @@ -818,14 +817,13 @@ EXPORT_SYMBOL_GPL(ata_port_wait_eh); static int ata_eh_nr_in_flight(struct ata_port *ap) { + struct ata_queued_cmd *qc; unsigned int tag; int nr = 0; /* count only non-internal commands */ - for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { - if (ata_tag_internal(tag)) - continue; - if (ata_qc_from_tag(ap, tag)) + ata_qc_for_each(ap, qc, tag) { + if (qc) nr++; } @@ -847,13 +845,13 @@ void ata_eh_fastdrain_timerfn(struct timer_list *t) goto out_unlock; if (cnt == ap->fastdrain_cnt) { + struct ata_queued_cmd *qc; unsigned int tag; /* No progress during the last interval, tag all * in-flight qcs as timed out and freeze the port. */ - for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { - struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); + ata_qc_for_each(ap, qc, tag) { if (qc) qc->err_mask |= AC_ERR_TIMEOUT; } @@ -999,6 +997,7 @@ void ata_port_schedule_eh(struct ata_port *ap) static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link) { + struct ata_queued_cmd *qc; int tag, nr_aborted = 0; WARN_ON(!ap->ops->error_handler); @@ -1007,9 +1006,7 @@ static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link) ata_eh_set_pending(ap, 0); /* include internal tag in iteration */ - for (tag = 0; tag <= ATA_MAX_QUEUE; tag++) { - struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); - + ata_qc_for_each_with_internal(ap, qc, tag) { if (qc && (!link || qc->dev->link == link)) { qc->flags |= ATA_QCFLAG_FAILED; ata_qc_complete(qc); @@ -1712,9 +1709,7 @@ void ata_eh_analyze_ncq_error(struct ata_link *link) return; /* has LLDD analyzed already? */ - for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { - qc = __ata_qc_from_tag(ap, tag); - + ata_qc_for_each_raw(ap, qc, tag) { if (!(qc->flags & ATA_QCFLAG_FAILED)) continue; @@ -2136,6 +2131,7 @@ static void ata_eh_link_autopsy(struct ata_link *link) { struct ata_port *ap = link->ap; struct ata_eh_context *ehc = &link->eh_context; + struct ata_queued_cmd *qc; struct ata_device *dev; unsigned int all_err_mask = 0, eflags = 0; int tag, nr_failed = 0, nr_quiet = 0; @@ -2168,9 +2164,7 @@ static void ata_eh_link_autopsy(struct ata_link *link) all_err_mask |= ehc->i.err_mask; - for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { - struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); - + ata_qc_for_each_raw(ap, qc, tag) { if (!(qc->flags & ATA_QCFLAG_FAILED) || ata_dev_phys_link(qc->dev) != link) continue; @@ -2436,6 +2430,7 @@ static void ata_eh_link_report(struct ata_link *link) { struct ata_port *ap = link->ap; struct ata_eh_context *ehc = &link->eh_context; + struct ata_queued_cmd *qc; const char *frozen, *desc; char tries_buf[6] = ""; int tag, nr_failed = 0; @@ -2447,9 +2442,7 @@ static void ata_eh_link_report(struct ata_link *link) if (ehc->i.desc[0] != '\0') desc = ehc->i.desc; - for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { - struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); - + ata_qc_for_each_raw(ap, qc, tag) { if (!(qc->flags & ATA_QCFLAG_FAILED) || ata_dev_phys_link(qc->dev) != link || ((qc->flags & ATA_QCFLAG_QUIET) && @@ -2511,8 +2504,7 @@ static void ata_eh_link_report(struct ata_link *link) ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : ""); #endif - for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { - struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); + ata_qc_for_each_raw(ap, qc, tag) { struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf; char data_buf[20] = ""; char cdb_buf[70] = ""; @@ -3992,12 +3984,11 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, */ void ata_eh_finish(struct ata_port *ap) { + struct ata_queued_cmd *qc; int tag; /* retry or finish qcs */ - for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { - struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); - + ata_qc_for_each_raw(ap, qc, tag) { if (!(qc->flags & ATA_QCFLAG_FAILED)) continue; diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 6a91d04351d9..aad1b01447de 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -3805,10 +3805,20 @@ static unsigned int ata_scsi_zbc_out_xlat(struct ata_queued_cmd *qc) */ goto invalid_param_len; } - if (block > dev->n_sectors) - goto out_of_range; all = cdb[14] & 0x1; + if (all) { + /* + * Ignore the block address (zone ID) as defined by ZBC. + */ + block = 0; + } else if (block >= dev->n_sectors) { + /* + * Block must be a valid zone ID (a zone start LBA). + */ + fp = 2; + goto invalid_fld; + } if (ata_ncq_enabled(qc->dev) && ata_fpdma_zac_mgmt_out_supported(qc->dev)) { @@ -3837,10 +3847,6 @@ static unsigned int ata_scsi_zbc_out_xlat(struct ata_queued_cmd *qc) invalid_fld: ata_scsi_set_invalid_field(qc->dev, scmd, fp, 0xff); return 1; - out_of_range: - /* "Logical Block Address out of range" */ - ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x21, 0x00); - return 1; invalid_param_len: /* "Parameter list length error" */ ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0); diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c index b8d9cfc60374..4dc528bf8e85 100644 --- a/drivers/ata/sata_fsl.c +++ b/drivers/ata/sata_fsl.c @@ -395,12 +395,6 @@ static inline unsigned int sata_fsl_tag(unsigned int tag, { /* We let libATA core do actual (queue) tag allocation */ - /* all non NCQ/queued commands should have tag#0 */ - if (ata_tag_internal(tag)) { - DPRINTK("mapping internal cmds to tag#0\n"); - return 0; - } - if (unlikely(tag >= SATA_FSL_QUEUE_DEPTH)) { DPRINTK("tag %d invalid : out of range\n", tag); return 0; @@ -1229,8 +1223,7 @@ static void sata_fsl_host_intr(struct ata_port *ap) /* Workaround for data length mismatch errata */ if (unlikely(hstatus & INT_ON_DATA_LENGTH_MISMATCH)) { - for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { - qc = ata_qc_from_tag(ap, tag); + ata_qc_for_each_with_internal(ap, qc, tag) { if (qc && ata_is_atapi(qc->tf.protocol)) { u32 hcontrol; /* Set HControl[27] to clear error registers */ diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c index 10ae11aa1926..72c9b922a77b 100644 --- a/drivers/ata/sata_nv.c +++ b/drivers/ata/sata_nv.c @@ -675,7 +675,6 @@ static int nv_adma_slave_config(struct scsi_device *sdev) struct ata_port *ap = ata_shost_to_port(sdev->host); struct nv_adma_port_priv *pp = ap->private_data; struct nv_adma_port_priv *port0, *port1; - struct scsi_device *sdev0, *sdev1; struct pci_dev *pdev = to_pci_dev(ap->host->dev); unsigned long segment_boundary, flags; unsigned short sg_tablesize; @@ -736,8 +735,6 @@ static int nv_adma_slave_config(struct scsi_device *sdev) port0 = ap->host->ports[0]->private_data; port1 = ap->host->ports[1]->private_data; - sdev0 = ap->host->ports[0]->link.device[0].sdev; - sdev1 = ap->host->ports[1]->link.device[0].sdev; if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) || (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) { /* diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c index ff81a576347e..82532c299bb5 100644 --- a/drivers/atm/iphase.c +++ b/drivers/atm/iphase.c @@ -1618,7 +1618,7 @@ static int rx_init(struct atm_dev *dev) skb_queue_head_init(&iadev->rx_dma_q); iadev->rx_free_desc_qhead = NULL; - iadev->rx_open = kcalloc(4, iadev->num_vc, GFP_KERNEL); + iadev->rx_open = kcalloc(iadev->num_vc, sizeof(void *), GFP_KERNEL); if (!iadev->rx_open) { printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n", dev->number); diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c index a8d2eb0ceb8d..2c288d1f42bb 100644 --- a/drivers/atm/zatm.c +++ b/drivers/atm/zatm.c @@ -1483,6 +1483,8 @@ static int zatm_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg) return -EFAULT; if (pool < 0 || pool > ZATM_LAST_POOL) return -EINVAL; + pool = array_index_nospec(pool, + ZATM_LAST_POOL + 1); if (copy_from_user(&info, &((struct zatm_pool_req __user *) arg)->info, sizeof(info))) return -EFAULT; diff --git a/drivers/base/Makefile b/drivers/base/Makefile index b074f242a435..704f44295810 100644 --- a/drivers/base/Makefile +++ b/drivers/base/Makefile @@ -8,10 +8,7 @@ obj-y := component.o core.o bus.o dd.o syscore.o \ topology.o container.o property.o cacheinfo.o \ devcon.o obj-$(CONFIG_DEVTMPFS) += devtmpfs.o -obj-$(CONFIG_DMA_CMA) += dma-contiguous.o obj-y += power/ -obj-$(CONFIG_HAS_DMA) += dma-mapping.o -obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o obj-$(CONFIG_ISA_BUS_API) += isa.o obj-y += firmware_loader/ obj-$(CONFIG_NUMA) += node.o diff --git a/drivers/base/core.c b/drivers/base/core.c index 36622b52e419..df3e1a44707a 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -236,6 +236,13 @@ struct device_link *device_link_add(struct device *consumer, link->rpm_active = true; } pm_runtime_new_link(consumer); + /* + * If the link is being added by the consumer driver at probe + * time, balance the decrementation of the supplier's runtime PM + * usage counter after consumer probe in driver_probe_device(). + */ + if (consumer->links.status == DL_DEV_PROBING) + pm_runtime_get_noresume(supplier); } get_device(supplier); link->supplier = supplier; @@ -255,12 +262,12 @@ struct device_link *device_link_add(struct device *consumer, switch (consumer->links.status) { case DL_DEV_PROBING: /* - * Balance the decrementation of the supplier's - * runtime PM usage counter after consumer probe - * in driver_probe_device(). + * Some callers expect the link creation during + * consumer driver probe to resume the supplier + * even without DL_FLAG_RPM_ACTIVE. */ if (flags & DL_FLAG_PM_RUNTIME) - pm_runtime_get_sync(supplier); + pm_runtime_resume(supplier); link->status = DL_STATE_CONSUMER_PROBE; break; diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 4925af5c4cf0..9e8484189034 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -2235,7 +2235,7 @@ static void genpd_dev_pm_sync(struct device *dev) } static int __genpd_dev_pm_attach(struct device *dev, struct device_node *np, - unsigned int index) + unsigned int index, bool power_on) { struct of_phandle_args pd_args; struct generic_pm_domain *pd; @@ -2271,9 +2271,11 @@ static int __genpd_dev_pm_attach(struct device *dev, struct device_node *np, dev->pm_domain->detach = genpd_dev_pm_detach; dev->pm_domain->sync = genpd_dev_pm_sync; - genpd_lock(pd); - ret = genpd_power_on(pd, 0); - genpd_unlock(pd); + if (power_on) { + genpd_lock(pd); + ret = genpd_power_on(pd, 0); + genpd_unlock(pd); + } if (ret) genpd_remove_device(pd, dev); @@ -2307,7 +2309,7 @@ int genpd_dev_pm_attach(struct device *dev) "#power-domain-cells") != 1) return 0; - return __genpd_dev_pm_attach(dev, dev->of_node, 0); + return __genpd_dev_pm_attach(dev, dev->of_node, 0, true); } EXPORT_SYMBOL_GPL(genpd_dev_pm_attach); @@ -2359,14 +2361,14 @@ struct device *genpd_dev_pm_attach_by_id(struct device *dev, } /* Try to attach the device to the PM domain at the specified index. */ - ret = __genpd_dev_pm_attach(genpd_dev, dev->of_node, index); + ret = __genpd_dev_pm_attach(genpd_dev, dev->of_node, index, false); if (ret < 1) { device_unregister(genpd_dev); return ret ? ERR_PTR(ret) : NULL; } - pm_runtime_set_active(genpd_dev); pm_runtime_enable(genpd_dev); + genpd_queue_power_off_work(dev_to_genpd(genpd_dev)); return genpd_dev; } @@ -2487,10 +2489,9 @@ EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states); * power domain corresponding to a DT node's "required-opps" property. * * @dev: Device for which the performance-state needs to be found. - * @opp_node: DT node where the "required-opps" property is present. This can be + * @np: DT node where the "required-opps" property is present. This can be * the device node itself (if it doesn't have an OPP table) or a node * within the OPP table of a device (if device has an OPP table). - * @state: Pointer to return performance state. * * Returns performance state corresponding to the "required-opps" property of * a DT node. This calls platform specific genpd->opp_to_performance_state() @@ -2499,7 +2500,7 @@ EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states); * Returns performance state on success and 0 on failure. */ unsigned int of_genpd_opp_to_performance_state(struct device *dev, - struct device_node *opp_node) + struct device_node *np) { struct generic_pm_domain *genpd; struct dev_pm_opp *opp; @@ -2514,7 +2515,7 @@ unsigned int of_genpd_opp_to_performance_state(struct device *dev, genpd_lock(genpd); - opp = of_dev_pm_opp_find_required_opp(&genpd->dev, opp_node); + opp = of_dev_pm_opp_find_required_opp(&genpd->dev, np); if (IS_ERR(opp)) { dev_err(dev, "Failed to find required OPP: %ld\n", PTR_ERR(opp)); diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index a47e4987ee46..d146fedc38bb 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -1244,8 +1244,8 @@ drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long _drbd_start_io_acct(device, req); /* process discards always from our submitter thread */ - if ((bio_op(bio) & REQ_OP_WRITE_ZEROES) || - (bio_op(bio) & REQ_OP_DISCARD)) + if (bio_op(bio) == REQ_OP_WRITE_ZEROES || + bio_op(bio) == REQ_OP_DISCARD) goto queue_for_submitter_thread; if (rw == WRITE && req->private_bio && req->i.size diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index 1476cb3439f4..5e793dd7adfb 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -282,8 +282,8 @@ void drbd_request_endio(struct bio *bio) what = COMPLETED_OK; } - bio_put(req->private_bio); req->private_bio = ERR_PTR(blk_status_to_errno(bio->bi_status)); + bio_put(bio); /* not req_mod(), we need irqsave here! */ spin_lock_irqsave(&device->resource->req_lock, flags); diff --git a/drivers/block/loop.c b/drivers/block/loop.c index d6b6f434fd4b..4cb1d1be3cfb 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -1613,6 +1613,7 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode, arg = (unsigned long) compat_ptr(arg); case LOOP_SET_FD: case LOOP_CHANGE_FD: + case LOOP_SET_BLOCK_SIZE: err = lo_ioctl(bdev, mode, cmd, arg); break; default: diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 3b7083b8ecbb..74a05561b620 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -76,6 +76,7 @@ struct link_dead_args { #define NBD_HAS_CONFIG_REF 4 #define NBD_BOUND 5 #define NBD_DESTROY_ON_DISCONNECT 6 +#define NBD_DISCONNECT_ON_CLOSE 7 struct nbd_config { u32 flags; @@ -138,6 +139,7 @@ static void nbd_config_put(struct nbd_device *nbd); static void nbd_connect_reply(struct genl_info *info, int index); static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info); static void nbd_dead_link_work(struct work_struct *work); +static void nbd_disconnect_and_put(struct nbd_device *nbd); static inline struct device *nbd_to_dev(struct nbd_device *nbd) { @@ -1305,6 +1307,12 @@ static int nbd_open(struct block_device *bdev, fmode_t mode) static void nbd_release(struct gendisk *disk, fmode_t mode) { struct nbd_device *nbd = disk->private_data; + struct block_device *bdev = bdget_disk(disk, 0); + + if (test_bit(NBD_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) && + bdev->bd_openers == 0) + nbd_disconnect_and_put(nbd); + nbd_config_put(nbd); nbd_put(nbd); } @@ -1705,6 +1713,10 @@ static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info) &config->runtime_flags); put_dev = true; } + if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) { + set_bit(NBD_DISCONNECT_ON_CLOSE, + &config->runtime_flags); + } } if (info->attrs[NBD_ATTR_SOCKETS]) { @@ -1749,6 +1761,17 @@ static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info) return ret; } +static void nbd_disconnect_and_put(struct nbd_device *nbd) +{ + mutex_lock(&nbd->config_lock); + nbd_disconnect(nbd); + nbd_clear_sock(nbd); + mutex_unlock(&nbd->config_lock); + if (test_and_clear_bit(NBD_HAS_CONFIG_REF, + &nbd->config->runtime_flags)) + nbd_config_put(nbd); +} + static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info) { struct nbd_device *nbd; @@ -1781,13 +1804,7 @@ static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info) nbd_put(nbd); return 0; } - mutex_lock(&nbd->config_lock); - nbd_disconnect(nbd); - nbd_clear_sock(nbd); - mutex_unlock(&nbd->config_lock); - if (test_and_clear_bit(NBD_HAS_CONFIG_REF, - &nbd->config->runtime_flags)) - nbd_config_put(nbd); + nbd_disconnect_and_put(nbd); nbd_config_put(nbd); nbd_put(nbd); return 0; @@ -1798,7 +1815,7 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info) struct nbd_device *nbd = NULL; struct nbd_config *config; int index; - int ret = -EINVAL; + int ret = 0; bool put_dev = false; if (!netlink_capable(skb, CAP_SYS_ADMIN)) @@ -1838,6 +1855,7 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info) !nbd->task_recv) { dev_err(nbd_to_dev(nbd), "not configured, cannot reconfigure\n"); + ret = -EINVAL; goto out; } @@ -1862,6 +1880,14 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info) &config->runtime_flags)) refcount_inc(&nbd->refs); } + + if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) { + set_bit(NBD_DISCONNECT_ON_CLOSE, + &config->runtime_flags); + } else { + clear_bit(NBD_DISCONNECT_ON_CLOSE, + &config->runtime_flags); + } } if (info->attrs[NBD_ATTR_SOCKETS]) { diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 7948049f6c43..042c778e5a4e 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -1365,7 +1365,7 @@ static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio) static enum blk_eh_timer_return null_rq_timed_out_fn(struct request *rq) { pr_info("null: rq %p timed out\n", rq); - blk_mq_complete_request(rq); + __blk_complete_request(rq); return BLK_EH_DONE; } diff --git a/drivers/bluetooth/hci_nokia.c b/drivers/bluetooth/hci_nokia.c index 14d159e2042d..2dc33e65d2d0 100644 --- a/drivers/bluetooth/hci_nokia.c +++ b/drivers/bluetooth/hci_nokia.c @@ -29,7 +29,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index 1cc29629d238..80d60f43db56 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -169,9 +169,9 @@ static int sysc_get_clocks(struct sysc *ddata) const char *name; int nr_fck = 0, nr_ick = 0, i, error = 0; - ddata->clock_roles = devm_kzalloc(ddata->dev, - sizeof(*ddata->clock_roles) * + ddata->clock_roles = devm_kcalloc(ddata->dev, SYSC_MAX_CLOCKS, + sizeof(*ddata->clock_roles), GFP_KERNEL); if (!ddata->clock_roles) return -ENOMEM; @@ -200,8 +200,8 @@ static int sysc_get_clocks(struct sysc *ddata) return -EINVAL; } - ddata->clocks = devm_kzalloc(ddata->dev, - sizeof(*ddata->clocks) * ddata->nr_clocks, + ddata->clocks = devm_kcalloc(ddata->dev, + ddata->nr_clocks, sizeof(*ddata->clocks), GFP_KERNEL); if (!ddata->clocks) return -ENOMEM; diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index 91bb98c42a1c..aaf9e5afaad4 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c @@ -516,11 +516,18 @@ EXPORT_SYMBOL_GPL(hwrng_register); void hwrng_unregister(struct hwrng *rng) { + int err; + mutex_lock(&rng_mutex); list_del(&rng->list); - if (current_rng == rng) - enable_best_rng(); + if (current_rng == rng) { + err = enable_best_rng(); + if (err) { + drop_current_rng(); + cur_rng_set_by_user = 0; + } + } if (list_empty(&rng_list)) { mutex_unlock(&rng_mutex); diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index ad353be871bf..90ec010bffbd 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -2088,8 +2088,10 @@ static int try_smi_init(struct smi_info *new_smi) return 0; out_err: - ipmi_unregister_smi(new_smi->intf); - new_smi->intf = NULL; + if (new_smi->intf) { + ipmi_unregister_smi(new_smi->intf); + new_smi->intf = NULL; + } kfree(init_name); diff --git a/drivers/char/ipmi/kcs_bmc.c b/drivers/char/ipmi/kcs_bmc.c index fbfc05e3f3d1..bb882ab161fe 100644 --- a/drivers/char/ipmi/kcs_bmc.c +++ b/drivers/char/ipmi/kcs_bmc.c @@ -210,34 +210,23 @@ static void kcs_bmc_handle_cmd(struct kcs_bmc *kcs_bmc) int kcs_bmc_handle_event(struct kcs_bmc *kcs_bmc) { unsigned long flags; - int ret = 0; + int ret = -ENODATA; u8 status; spin_lock_irqsave(&kcs_bmc->lock, flags); - if (!kcs_bmc->running) { - kcs_force_abort(kcs_bmc); - ret = -ENODEV; - goto out_unlock; + status = read_status(kcs_bmc); + if (status & KCS_STATUS_IBF) { + if (!kcs_bmc->running) + kcs_force_abort(kcs_bmc); + else if (status & KCS_STATUS_CMD_DAT) + kcs_bmc_handle_cmd(kcs_bmc); + else + kcs_bmc_handle_data(kcs_bmc); + + ret = 0; } - status = read_status(kcs_bmc) & (KCS_STATUS_IBF | KCS_STATUS_CMD_DAT); - - switch (status) { - case KCS_STATUS_IBF | KCS_STATUS_CMD_DAT: - kcs_bmc_handle_cmd(kcs_bmc); - break; - - case KCS_STATUS_IBF: - kcs_bmc_handle_data(kcs_bmc); - break; - - default: - ret = -ENODATA; - break; - } - -out_unlock: spin_unlock_irqrestore(&kcs_bmc->lock, flags); return ret; diff --git a/drivers/char/random.c b/drivers/char/random.c index a8fb0020ba5c..cd888d4ee605 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -402,7 +402,8 @@ static struct poolinfo { /* * Static global variables */ -static DECLARE_WAIT_QUEUE_HEAD(random_wait); +static DECLARE_WAIT_QUEUE_HEAD(random_read_wait); +static DECLARE_WAIT_QUEUE_HEAD(random_write_wait); static struct fasync_struct *fasync; static DEFINE_SPINLOCK(random_ready_list_lock); @@ -721,8 +722,8 @@ static void credit_entropy_bits(struct entropy_store *r, int nbits) /* should we wake readers? */ if (entropy_bits >= random_read_wakeup_bits && - wq_has_sleeper(&random_wait)) { - wake_up_interruptible_poll(&random_wait, POLLIN); + wq_has_sleeper(&random_read_wait)) { + wake_up_interruptible(&random_read_wait); kill_fasync(&fasync, SIGIO, POLL_IN); } /* If the input pool is getting full, send some @@ -1396,7 +1397,7 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min, trace_debit_entropy(r->name, 8 * ibytes); if (ibytes && (r->entropy_count >> ENTROPY_SHIFT) < random_write_wakeup_bits) { - wake_up_interruptible_poll(&random_wait, POLLOUT); + wake_up_interruptible(&random_write_wait); kill_fasync(&fasync, SIGIO, POLL_OUT); } @@ -1838,7 +1839,7 @@ _random_read(int nonblock, char __user *buf, size_t nbytes) if (nonblock) return -EAGAIN; - wait_event_interruptible(random_wait, + wait_event_interruptible(random_read_wait, ENTROPY_BITS(&input_pool) >= random_read_wakeup_bits); if (signal_pending(current)) @@ -1875,17 +1876,14 @@ urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) return ret; } -static struct wait_queue_head * -random_get_poll_head(struct file *file, __poll_t events) -{ - return &random_wait; -} - static __poll_t -random_poll_mask(struct file *file, __poll_t events) +random_poll(struct file *file, poll_table * wait) { - __poll_t mask = 0; + __poll_t mask; + poll_wait(file, &random_read_wait, wait); + poll_wait(file, &random_write_wait, wait); + mask = 0; if (ENTROPY_BITS(&input_pool) >= random_read_wakeup_bits) mask |= EPOLLIN | EPOLLRDNORM; if (ENTROPY_BITS(&input_pool) < random_write_wakeup_bits) @@ -1992,8 +1990,7 @@ static int random_fasync(int fd, struct file *filp, int on) const struct file_operations random_fops = { .read = random_read, .write = random_write, - .get_poll_head = random_get_poll_head, - .poll_mask = random_poll_mask, + .poll = random_poll, .unlocked_ioctl = random_ioctl, .fasync = random_fasync, .llseek = noop_llseek, @@ -2326,7 +2323,7 @@ void add_hwgenerator_randomness(const char *buffer, size_t count, * We'll be woken up again once below random_write_wakeup_thresh, * or when the calling thread is about to terminate. */ - wait_event_interruptible(random_wait, kthread_should_stop() || + wait_event_interruptible(random_write_wait, kthread_should_stop() || ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits); mix_pool_bytes(poolp, buffer, count); credit_entropy_bits(poolp, entropy); diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile index ae40cbe770f0..0bb25dd009d1 100644 --- a/drivers/clk/Makefile +++ b/drivers/clk/Makefile @@ -96,7 +96,7 @@ obj-$(CONFIG_ARCH_SPRD) += sprd/ obj-$(CONFIG_ARCH_STI) += st/ obj-$(CONFIG_ARCH_STRATIX10) += socfpga/ obj-$(CONFIG_ARCH_SUNXI) += sunxi/ -obj-$(CONFIG_ARCH_SUNXI) += sunxi-ng/ +obj-$(CONFIG_SUNXI_CCU) += sunxi-ng/ obj-$(CONFIG_ARCH_TEGRA) += tegra/ obj-y += ti/ obj-$(CONFIG_CLK_UNIPHIER) += uniphier/ diff --git a/drivers/clk/davinci/da8xx-cfgchip.c b/drivers/clk/davinci/da8xx-cfgchip.c index aae62a5b8734..d1bbee19ed0f 100644 --- a/drivers/clk/davinci/da8xx-cfgchip.c +++ b/drivers/clk/davinci/da8xx-cfgchip.c @@ -672,7 +672,7 @@ static int of_da8xx_usb_phy_clk_init(struct device *dev, struct regmap *regmap) usb1 = da8xx_cfgchip_register_usb1_clk48(dev, regmap); if (IS_ERR(usb1)) { - if (PTR_ERR(usb0) == -EPROBE_DEFER) + if (PTR_ERR(usb1) == -EPROBE_DEFER) return -EPROBE_DEFER; dev_warn(dev, "Failed to register usb1_clk48 (%ld)\n", diff --git a/drivers/clk/davinci/psc.h b/drivers/clk/davinci/psc.h index 6a42529d31a9..cc5614567a70 100644 --- a/drivers/clk/davinci/psc.h +++ b/drivers/clk/davinci/psc.h @@ -107,7 +107,7 @@ extern const struct davinci_psc_init_data of_da850_psc1_init_data; #ifdef CONFIG_ARCH_DAVINCI_DM355 extern const struct davinci_psc_init_data dm355_psc_init_data; #endif -#ifdef CONFIG_ARCH_DAVINCI_DM356 +#ifdef CONFIG_ARCH_DAVINCI_DM365 extern const struct davinci_psc_init_data dm365_psc_init_data; #endif #ifdef CONFIG_ARCH_DAVINCI_DM644x diff --git a/drivers/clk/sunxi-ng/Makefile b/drivers/clk/sunxi-ng/Makefile index acaa14cfa25c..49454700f2e5 100644 --- a/drivers/clk/sunxi-ng/Makefile +++ b/drivers/clk/sunxi-ng/Makefile @@ -1,24 +1,24 @@ # SPDX-License-Identifier: GPL-2.0 # Common objects -lib-$(CONFIG_SUNXI_CCU) += ccu_common.o -lib-$(CONFIG_SUNXI_CCU) += ccu_mmc_timing.o -lib-$(CONFIG_SUNXI_CCU) += ccu_reset.o +obj-y += ccu_common.o +obj-y += ccu_mmc_timing.o +obj-y += ccu_reset.o # Base clock types -lib-$(CONFIG_SUNXI_CCU) += ccu_div.o -lib-$(CONFIG_SUNXI_CCU) += ccu_frac.o -lib-$(CONFIG_SUNXI_CCU) += ccu_gate.o -lib-$(CONFIG_SUNXI_CCU) += ccu_mux.o -lib-$(CONFIG_SUNXI_CCU) += ccu_mult.o -lib-$(CONFIG_SUNXI_CCU) += ccu_phase.o -lib-$(CONFIG_SUNXI_CCU) += ccu_sdm.o +obj-y += ccu_div.o +obj-y += ccu_frac.o +obj-y += ccu_gate.o +obj-y += ccu_mux.o +obj-y += ccu_mult.o +obj-y += ccu_phase.o +obj-y += ccu_sdm.o # Multi-factor clocks -lib-$(CONFIG_SUNXI_CCU) += ccu_nk.o -lib-$(CONFIG_SUNXI_CCU) += ccu_nkm.o -lib-$(CONFIG_SUNXI_CCU) += ccu_nkmp.o -lib-$(CONFIG_SUNXI_CCU) += ccu_nm.o -lib-$(CONFIG_SUNXI_CCU) += ccu_mp.o +obj-y += ccu_nk.o +obj-y += ccu_nkm.o +obj-y += ccu_nkmp.o +obj-y += ccu_nm.o +obj-y += ccu_mp.o # SoC support obj-$(CONFIG_SUN50I_A64_CCU) += ccu-sun50i-a64.o @@ -38,12 +38,3 @@ obj-$(CONFIG_SUN8I_R40_CCU) += ccu-sun8i-r40.o obj-$(CONFIG_SUN9I_A80_CCU) += ccu-sun9i-a80.o obj-$(CONFIG_SUN9I_A80_CCU) += ccu-sun9i-a80-de.o obj-$(CONFIG_SUN9I_A80_CCU) += ccu-sun9i-a80-usb.o - -# The lib-y file goals is supposed to work only in arch/*/lib or lib/. In our -# case, we want to use that goal, but even though lib.a will be properly -# generated, it will not be linked in, eventually resulting in a linker error -# for missing symbols. -# -# We can work around that by explicitly adding lib.a to the obj-y goal. This is -# an undocumented behaviour, but works well for now. -obj-$(CONFIG_SUNXI_CCU) += lib.a diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index 57cb2f00fc07..d8c7f5750cdb 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -735,7 +735,7 @@ static void __arch_timer_setup(unsigned type, clk->features |= CLOCK_EVT_FEAT_DYNIRQ; clk->name = "arch_mem_timer"; clk->rating = 400; - clk->cpumask = cpu_all_mask; + clk->cpumask = cpu_possible_mask; if (arch_timer_mem_use_virtual) { clk->set_state_shutdown = arch_timer_shutdown_virt_mem; clk->set_state_oneshot_stopped = arch_timer_shutdown_virt_mem; diff --git a/drivers/clocksource/timer-stm32.c b/drivers/clocksource/timer-stm32.c index e5cdc3af684c..2717f88c7904 100644 --- a/drivers/clocksource/timer-stm32.c +++ b/drivers/clocksource/timer-stm32.c @@ -304,8 +304,10 @@ static int __init stm32_timer_init(struct device_node *node) to->private_data = kzalloc(sizeof(struct stm32_timer_private), GFP_KERNEL); - if (!to->private_data) + if (!to->private_data) { + ret = -ENOMEM; goto deinit; + } rstc = of_reset_control_get(node, NULL); if (!IS_ERR(rstc)) { diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 1de5ec8d5ea3..ece120da3353 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -294,6 +294,7 @@ struct pstate_funcs { static struct pstate_funcs pstate_funcs __read_mostly; static int hwp_active __read_mostly; +static int hwp_mode_bdw __read_mostly; static bool per_cpu_limits __read_mostly; static bool hwp_boost __read_mostly; @@ -1413,7 +1414,15 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(); cpu->pstate.scaling = pstate_funcs.get_scaling(); cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling; - cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling; + + if (hwp_active && !hwp_mode_bdw) { + unsigned int phy_max, current_max; + + intel_pstate_get_hwp_max(cpu->cpu, &phy_max, ¤t_max); + cpu->pstate.turbo_freq = phy_max * cpu->pstate.scaling; + } else { + cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling; + } if (pstate_funcs.get_aperf_mperf_shift) cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift(); @@ -2467,28 +2476,36 @@ static inline bool intel_pstate_has_acpi_ppc(void) { return false; } static inline void intel_pstate_request_control_from_smm(void) {} #endif /* CONFIG_ACPI */ +#define INTEL_PSTATE_HWP_BROADWELL 0x01 + +#define ICPU_HWP(model, hwp_mode) \ + { X86_VENDOR_INTEL, 6, model, X86_FEATURE_HWP, hwp_mode } + static const struct x86_cpu_id hwp_support_ids[] __initconst = { - { X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_HWP }, + ICPU_HWP(INTEL_FAM6_BROADWELL_X, INTEL_PSTATE_HWP_BROADWELL), + ICPU_HWP(INTEL_FAM6_BROADWELL_XEON_D, INTEL_PSTATE_HWP_BROADWELL), + ICPU_HWP(X86_MODEL_ANY, 0), {} }; static int __init intel_pstate_init(void) { + const struct x86_cpu_id *id; int rc; if (no_load) return -ENODEV; - if (x86_match_cpu(hwp_support_ids)) { + id = x86_match_cpu(hwp_support_ids); + if (id) { copy_cpu_funcs(&core_funcs); if (!no_hwp) { hwp_active++; + hwp_mode_bdw = id->driver_data; intel_pstate.attr = hwp_cpufreq_attrs; goto hwp_cpu_matched; } } else { - const struct x86_cpu_id *id; - id = x86_match_cpu(intel_pstate_cpu_ids); if (!id) return -ENODEV; diff --git a/drivers/cpufreq/qcom-cpufreq-kryo.c b/drivers/cpufreq/qcom-cpufreq-kryo.c index d049fe4b80c4..29389accf3e9 100644 --- a/drivers/cpufreq/qcom-cpufreq-kryo.c +++ b/drivers/cpufreq/qcom-cpufreq-kryo.c @@ -42,6 +42,8 @@ enum _msm8996_version { NUM_OF_MSM8996_VERSIONS, }; +struct platform_device *cpufreq_dt_pdev, *kryo_cpufreq_pdev; + static enum _msm8996_version __init qcom_cpufreq_kryo_get_msm_id(void) { size_t len; @@ -74,7 +76,6 @@ static enum _msm8996_version __init qcom_cpufreq_kryo_get_msm_id(void) static int qcom_cpufreq_kryo_probe(struct platform_device *pdev) { struct opp_table *opp_tables[NR_CPUS] = {0}; - struct platform_device *cpufreq_dt_pdev; enum _msm8996_version msm8996_version; struct nvmem_cell *speedbin_nvmem; struct device_node *np; @@ -86,8 +87,8 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev) int ret; cpu_dev = get_cpu_device(0); - if (NULL == cpu_dev) - ret = -ENODEV; + if (!cpu_dev) + return -ENODEV; msm8996_version = qcom_cpufreq_kryo_get_msm_id(); if (NUM_OF_MSM8996_VERSIONS == msm8996_version) { @@ -96,8 +97,8 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev) } np = dev_pm_opp_of_get_opp_desc_node(cpu_dev); - if (IS_ERR(np)) - return PTR_ERR(np); + if (!np) + return -ENOENT; ret = of_device_is_compatible(np, "operating-points-v2-kryo-cpu"); if (!ret) { @@ -115,6 +116,8 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev) speedbin = nvmem_cell_read(speedbin_nvmem, &len); nvmem_cell_put(speedbin_nvmem); + if (IS_ERR(speedbin)) + return PTR_ERR(speedbin); switch (msm8996_version) { case MSM8996_V3: @@ -127,6 +130,7 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev) BUG(); break; } + kfree(speedbin); for_each_possible_cpu(cpu) { cpu_dev = get_cpu_device(cpu); @@ -162,8 +166,15 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev) return ret; } +static int qcom_cpufreq_kryo_remove(struct platform_device *pdev) +{ + platform_device_unregister(cpufreq_dt_pdev); + return 0; +} + static struct platform_driver qcom_cpufreq_kryo_driver = { .probe = qcom_cpufreq_kryo_probe, + .remove = qcom_cpufreq_kryo_remove, .driver = { .name = "qcom-cpufreq-kryo", }, @@ -198,8 +209,9 @@ static int __init qcom_cpufreq_kryo_init(void) if (unlikely(ret < 0)) return ret; - ret = PTR_ERR_OR_ZERO(platform_device_register_simple( - "qcom-cpufreq-kryo", -1, NULL, 0)); + kryo_cpufreq_pdev = platform_device_register_simple( + "qcom-cpufreq-kryo", -1, NULL, 0); + ret = PTR_ERR_OR_ZERO(kryo_cpufreq_pdev); if (0 == ret) return 0; @@ -208,5 +220,12 @@ static int __init qcom_cpufreq_kryo_init(void) } module_init(qcom_cpufreq_kryo_init); +static void __init qcom_cpufreq_kryo_exit(void) +{ + platform_device_unregister(kryo_cpufreq_pdev); + platform_driver_unregister(&qcom_cpufreq_kryo_driver); +} +module_exit(qcom_cpufreq_kryo_exit); + MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Kryo CPUfreq driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c index 00c7aab8e7d0..afebbd87c4aa 100644 --- a/drivers/crypto/chelsio/chtls/chtls_io.c +++ b/drivers/crypto/chelsio/chtls/chtls_io.c @@ -1548,15 +1548,14 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, tp->urg_data = 0; if ((avail + offset) >= skb->len) { - if (likely(skb)) - chtls_free_skb(sk, skb); - buffers_freed++; if (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_TLS_HDR) { tp->copied_seq += skb->len; hws->rcvpld = skb->hdr_len; } else { tp->copied_seq += hws->rcvpld; } + chtls_free_skb(sk, skb); + buffers_freed++; hws->copied_seq = 0; if (copied >= target && !skb_peek(&sk->sk_receive_queue)) diff --git a/drivers/dax/device.c b/drivers/dax/device.c index de2f8297a210..108c37fca782 100644 --- a/drivers/dax/device.c +++ b/drivers/dax/device.c @@ -189,14 +189,16 @@ static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma, /* prevent private mappings from being established */ if ((vma->vm_flags & VM_MAYSHARE) != VM_MAYSHARE) { - dev_info(dev, "%s: %s: fail, attempted private mapping\n", + dev_info_ratelimited(dev, + "%s: %s: fail, attempted private mapping\n", current->comm, func); return -EINVAL; } mask = dax_region->align - 1; if (vma->vm_start & mask || vma->vm_end & mask) { - dev_info(dev, "%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n", + dev_info_ratelimited(dev, + "%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n", current->comm, func, vma->vm_start, vma->vm_end, mask); return -EINVAL; @@ -204,13 +206,15 @@ static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma, if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) == PFN_DEV && (vma->vm_flags & VM_DONTCOPY) == 0) { - dev_info(dev, "%s: %s: fail, dax range requires MADV_DONTFORK\n", + dev_info_ratelimited(dev, + "%s: %s: fail, dax range requires MADV_DONTFORK\n", current->comm, func); return -EINVAL; } if (!vma_is_dax(vma)) { - dev_info(dev, "%s: %s: fail, vma is not DAX capable\n", + dev_info_ratelimited(dev, + "%s: %s: fail, vma is not DAX capable\n", current->comm, func); return -EINVAL; } diff --git a/drivers/dax/super.c b/drivers/dax/super.c index 903d9c473749..45276abf03aa 100644 --- a/drivers/dax/super.c +++ b/drivers/dax/super.c @@ -86,6 +86,7 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize) { struct dax_device *dax_dev; bool dax_enabled = false; + struct request_queue *q; pgoff_t pgoff; int err, id; void *kaddr; @@ -99,6 +100,13 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize) return false; } + q = bdev_get_queue(bdev); + if (!q || !blk_queue_dax(q)) { + pr_debug("%s: error: request queue doesn't support dax\n", + bdevname(bdev, buf)); + return false; + } + err = bdev_dax_pgoff(bdev, 0, PAGE_SIZE, &pgoff); if (err) { pr_debug("%s: error: unaligned partition for dax\n", diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c index fa31cccbe04f..6bfa217ed6d0 100644 --- a/drivers/dma/k3dma.c +++ b/drivers/dma/k3dma.c @@ -794,7 +794,7 @@ static struct dma_chan *k3_of_dma_simple_xlate(struct of_phandle_args *dma_spec, struct k3_dma_dev *d = ofdma->of_dma_data; unsigned int request = dma_spec->args[0]; - if (request > d->dma_requests) + if (request >= d->dma_requests) return NULL; return dma_get_slave_channel(&(d->chans[request].vc.chan)); diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index defcdde4d358..de0957fe9668 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -3033,7 +3033,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) pd->src_addr_widths = PL330_DMA_BUSWIDTHS; pd->dst_addr_widths = PL330_DMA_BUSWIDTHS; pd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); - pd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; + pd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; pd->max_burst = ((pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP) ? 1 : PL330_MAX_BURST); diff --git a/drivers/dma/ti/omap-dma.c b/drivers/dma/ti/omap-dma.c index 9b5ca8691f27..a4a931ddf6f6 100644 --- a/drivers/dma/ti/omap-dma.c +++ b/drivers/dma/ti/omap-dma.c @@ -1485,7 +1485,11 @@ static int omap_dma_probe(struct platform_device *pdev) od->ddev.src_addr_widths = OMAP_DMA_BUSWIDTHS; od->ddev.dst_addr_widths = OMAP_DMA_BUSWIDTHS; od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); - od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; + if (__dma_omap15xx(od->plat->dma_attr)) + od->ddev.residue_granularity = + DMA_RESIDUE_GRANULARITY_DESCRIPTOR; + else + od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; od->ddev.max_burst = SZ_16M - 1; /* CCEN: 24bit unsigned */ od->ddev.dev = &pdev->dev; INIT_LIST_HEAD(&od->ddev.channels); diff --git a/drivers/firmware/efi/libstub/tpm.c b/drivers/firmware/efi/libstub/tpm.c index caa37a6dd9d4..a90b0b8fc69a 100644 --- a/drivers/firmware/efi/libstub/tpm.c +++ b/drivers/firmware/efi/libstub/tpm.c @@ -64,7 +64,7 @@ static void efi_retrieve_tpm2_eventlog_1_2(efi_system_table_t *sys_table_arg) efi_guid_t tcg2_guid = EFI_TCG2_PROTOCOL_GUID; efi_guid_t linux_eventlog_guid = LINUX_EFI_TPM_EVENT_LOG_GUID; efi_status_t status; - efi_physical_addr_t log_location, log_last_entry; + efi_physical_addr_t log_location = 0, log_last_entry = 0; struct linux_efi_tpm_eventlog *log_tbl = NULL; unsigned long first_entry_addr, last_entry_addr; size_t log_size, last_entry_size; diff --git a/drivers/fpga/altera-cvp.c b/drivers/fpga/altera-cvp.c index dd4edd8f22ce..7fa793672a7a 100644 --- a/drivers/fpga/altera-cvp.c +++ b/drivers/fpga/altera-cvp.c @@ -455,8 +455,10 @@ static int altera_cvp_probe(struct pci_dev *pdev, mgr = fpga_mgr_create(&pdev->dev, conf->mgr_name, &altera_cvp_ops, conf); - if (!mgr) - return -ENOMEM; + if (!mgr) { + ret = -ENOMEM; + goto err_unmap; + } pci_set_drvdata(pdev, mgr); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index a59c07590cee..7dcbac8af9a7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -190,6 +190,7 @@ struct amdgpu_job; struct amdgpu_irq_src; struct amdgpu_fpriv; struct amdgpu_bo_va_mapping; +struct amdgpu_atif; enum amdgpu_cp_irq { AMDGPU_CP_IRQ_GFX_EOP = 0, @@ -1269,43 +1270,6 @@ struct amdgpu_vram_scratch { /* * ACPI */ -struct amdgpu_atif_notification_cfg { - bool enabled; - int command_code; -}; - -struct amdgpu_atif_notifications { - bool display_switch; - bool expansion_mode_change; - bool thermal_state; - bool forced_power_state; - bool system_power_state; - bool display_conf_change; - bool px_gfx_switch; - bool brightness_change; - bool dgpu_display_event; -}; - -struct amdgpu_atif_functions { - bool system_params; - bool sbios_requests; - bool select_active_disp; - bool lid_state; - bool get_tv_standard; - bool set_tv_standard; - bool get_panel_expansion_mode; - bool set_panel_expansion_mode; - bool temperature_change; - bool graphics_device_types; -}; - -struct amdgpu_atif { - struct amdgpu_atif_notifications notifications; - struct amdgpu_atif_functions functions; - struct amdgpu_atif_notification_cfg notification_cfg; - struct amdgpu_encoder *encoder_for_bl; -}; - struct amdgpu_atcs_functions { bool get_ext_state; bool pcie_perf_req; @@ -1466,7 +1430,7 @@ struct amdgpu_device { #if defined(CONFIG_DEBUG_FS) struct dentry *debugfs_regs[AMDGPU_DEBUGFS_MAX_COMPONENTS]; #endif - struct amdgpu_atif atif; + struct amdgpu_atif *atif; struct amdgpu_atcs atcs; struct mutex srbm_mutex; /* GRBM index mutex. Protects concurrent access to GRBM index */ @@ -1894,6 +1858,12 @@ static inline bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return false; static inline bool amdgpu_has_atpx(void) { return false; } #endif +#if defined(CONFIG_VGA_SWITCHEROO) && defined(CONFIG_ACPI) +void *amdgpu_atpx_get_dhandle(void); +#else +static inline void *amdgpu_atpx_get_dhandle(void) { return NULL; } +#endif + /* * KMS */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index 8fa850a070e0..0d8c3fc6eace 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -34,6 +34,45 @@ #include "amd_acpi.h" #include "atom.h" +struct amdgpu_atif_notification_cfg { + bool enabled; + int command_code; +}; + +struct amdgpu_atif_notifications { + bool display_switch; + bool expansion_mode_change; + bool thermal_state; + bool forced_power_state; + bool system_power_state; + bool display_conf_change; + bool px_gfx_switch; + bool brightness_change; + bool dgpu_display_event; +}; + +struct amdgpu_atif_functions { + bool system_params; + bool sbios_requests; + bool select_active_disp; + bool lid_state; + bool get_tv_standard; + bool set_tv_standard; + bool get_panel_expansion_mode; + bool set_panel_expansion_mode; + bool temperature_change; + bool graphics_device_types; +}; + +struct amdgpu_atif { + acpi_handle handle; + + struct amdgpu_atif_notifications notifications; + struct amdgpu_atif_functions functions; + struct amdgpu_atif_notification_cfg notification_cfg; + struct amdgpu_encoder *encoder_for_bl; +}; + /* Call the ATIF method */ /** @@ -46,8 +85,9 @@ * Executes the requested ATIF function (all asics). * Returns a pointer to the acpi output buffer. */ -static union acpi_object *amdgpu_atif_call(acpi_handle handle, int function, - struct acpi_buffer *params) +static union acpi_object *amdgpu_atif_call(struct amdgpu_atif *atif, + int function, + struct acpi_buffer *params) { acpi_status status; union acpi_object atif_arg_elements[2]; @@ -70,7 +110,8 @@ static union acpi_object *amdgpu_atif_call(acpi_handle handle, int function, atif_arg_elements[1].integer.value = 0; } - status = acpi_evaluate_object(handle, "ATIF", &atif_arg, &buffer); + status = acpi_evaluate_object(atif->handle, NULL, &atif_arg, + &buffer); /* Fail only if calling the method fails and ATIF is supported */ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { @@ -141,15 +182,14 @@ static void amdgpu_atif_parse_functions(struct amdgpu_atif_functions *f, u32 mas * (all asics). * returns 0 on success, error on failure. */ -static int amdgpu_atif_verify_interface(acpi_handle handle, - struct amdgpu_atif *atif) +static int amdgpu_atif_verify_interface(struct amdgpu_atif *atif) { union acpi_object *info; struct atif_verify_interface output; size_t size; int err = 0; - info = amdgpu_atif_call(handle, ATIF_FUNCTION_VERIFY_INTERFACE, NULL); + info = amdgpu_atif_call(atif, ATIF_FUNCTION_VERIFY_INTERFACE, NULL); if (!info) return -EIO; @@ -176,6 +216,35 @@ static int amdgpu_atif_verify_interface(acpi_handle handle, return err; } +static acpi_handle amdgpu_atif_probe_handle(acpi_handle dhandle) +{ + acpi_handle handle = NULL; + char acpi_method_name[255] = { 0 }; + struct acpi_buffer buffer = { sizeof(acpi_method_name), acpi_method_name }; + acpi_status status; + + /* For PX/HG systems, ATIF and ATPX are in the iGPU's namespace, on dGPU only + * systems, ATIF is in the dGPU's namespace. + */ + status = acpi_get_handle(dhandle, "ATIF", &handle); + if (ACPI_SUCCESS(status)) + goto out; + + if (amdgpu_has_atpx()) { + status = acpi_get_handle(amdgpu_atpx_get_dhandle(), "ATIF", + &handle); + if (ACPI_SUCCESS(status)) + goto out; + } + + DRM_DEBUG_DRIVER("No ATIF handle found\n"); + return NULL; +out: + acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); + DRM_DEBUG_DRIVER("Found ATIF handle %s\n", acpi_method_name); + return handle; +} + /** * amdgpu_atif_get_notification_params - determine notify configuration * @@ -188,15 +257,16 @@ static int amdgpu_atif_verify_interface(acpi_handle handle, * where n is specified in the result if a notifier is used. * Returns 0 on success, error on failure. */ -static int amdgpu_atif_get_notification_params(acpi_handle handle, - struct amdgpu_atif_notification_cfg *n) +static int amdgpu_atif_get_notification_params(struct amdgpu_atif *atif) { union acpi_object *info; + struct amdgpu_atif_notification_cfg *n = &atif->notification_cfg; struct atif_system_params params; size_t size; int err = 0; - info = amdgpu_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_PARAMETERS, NULL); + info = amdgpu_atif_call(atif, ATIF_FUNCTION_GET_SYSTEM_PARAMETERS, + NULL); if (!info) { err = -EIO; goto out; @@ -250,14 +320,15 @@ static int amdgpu_atif_get_notification_params(acpi_handle handle, * (all asics). * Returns 0 on success, error on failure. */ -static int amdgpu_atif_get_sbios_requests(acpi_handle handle, - struct atif_sbios_requests *req) +static int amdgpu_atif_get_sbios_requests(struct amdgpu_atif *atif, + struct atif_sbios_requests *req) { union acpi_object *info; size_t size; int count = 0; - info = amdgpu_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS, NULL); + info = amdgpu_atif_call(atif, ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS, + NULL); if (!info) return -EIO; @@ -290,11 +361,10 @@ static int amdgpu_atif_get_sbios_requests(acpi_handle handle, * Returns NOTIFY code */ static int amdgpu_atif_handler(struct amdgpu_device *adev, - struct acpi_bus_event *event) + struct acpi_bus_event *event) { - struct amdgpu_atif *atif = &adev->atif; + struct amdgpu_atif *atif = adev->atif; struct atif_sbios_requests req; - acpi_handle handle; int count; DRM_DEBUG_DRIVER("event, device_class = %s, type = %#x\n", @@ -303,14 +373,14 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev, if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0) return NOTIFY_DONE; - if (!atif->notification_cfg.enabled || + if (!atif || + !atif->notification_cfg.enabled || event->type != atif->notification_cfg.command_code) /* Not our event */ return NOTIFY_DONE; /* Check pending SBIOS requests */ - handle = ACPI_HANDLE(&adev->pdev->dev); - count = amdgpu_atif_get_sbios_requests(handle, &req); + count = amdgpu_atif_get_sbios_requests(atif, &req); if (count <= 0) return NOTIFY_DONE; @@ -641,8 +711,8 @@ static int amdgpu_acpi_event(struct notifier_block *nb, */ int amdgpu_acpi_init(struct amdgpu_device *adev) { - acpi_handle handle; - struct amdgpu_atif *atif = &adev->atif; + acpi_handle handle, atif_handle; + struct amdgpu_atif *atif; struct amdgpu_atcs *atcs = &adev->atcs; int ret; @@ -658,12 +728,26 @@ int amdgpu_acpi_init(struct amdgpu_device *adev) DRM_DEBUG_DRIVER("Call to ATCS verify_interface failed: %d\n", ret); } - /* Call the ATIF method */ - ret = amdgpu_atif_verify_interface(handle, atif); - if (ret) { - DRM_DEBUG_DRIVER("Call to ATIF verify_interface failed: %d\n", ret); + /* Probe for ATIF, and initialize it if found */ + atif_handle = amdgpu_atif_probe_handle(handle); + if (!atif_handle) + goto out; + + atif = kzalloc(sizeof(*atif), GFP_KERNEL); + if (!atif) { + DRM_WARN("Not enough memory to initialize ATIF\n"); goto out; } + atif->handle = atif_handle; + + /* Call the ATIF method */ + ret = amdgpu_atif_verify_interface(atif); + if (ret) { + DRM_DEBUG_DRIVER("Call to ATIF verify_interface failed: %d\n", ret); + kfree(atif); + goto out; + } + adev->atif = atif; if (atif->notifications.brightness_change) { struct drm_encoder *tmp; @@ -693,8 +777,7 @@ int amdgpu_acpi_init(struct amdgpu_device *adev) } if (atif->functions.system_params) { - ret = amdgpu_atif_get_notification_params(handle, - &atif->notification_cfg); + ret = amdgpu_atif_get_notification_params(atif); if (ret) { DRM_DEBUG_DRIVER("Call to GET_SYSTEM_PARAMS failed: %d\n", ret); @@ -720,4 +803,6 @@ int amdgpu_acpi_init(struct amdgpu_device *adev) void amdgpu_acpi_fini(struct amdgpu_device *adev) { unregister_acpi_notifier(&adev->acpi_nb); + if (adev->atif) + kfree(adev->atif); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c index daa06e7c5bb7..9ab89371d9e8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c @@ -90,6 +90,12 @@ bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return amdgpu_atpx_priv.atpx.dgpu_req_power_for_displays; } +#if defined(CONFIG_ACPI) +void *amdgpu_atpx_get_dhandle(void) { + return amdgpu_atpx_priv.dhandle; +} +#endif + /** * amdgpu_atpx_call - call an ATPX method * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 3317d1536f4f..6e5284e6c028 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2158,10 +2158,18 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) switch (asic_type) { #if defined(CONFIG_DRM_AMD_DC) case CHIP_BONAIRE: - case CHIP_HAWAII: case CHIP_KAVERI: case CHIP_KABINI: case CHIP_MULLINS: + /* + * We have systems in the wild with these ASICs that require + * LVDS and VGA support which is not supported with DC. + * + * Fallback to the non-DC driver here by default so as not to + * cause regressions. + */ + return amdgpu_dc > 0; + case CHIP_HAWAII: case CHIP_CARRIZO: case CHIP_STONEY: case CHIP_POLARIS10: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 39ec6b8890a1..e74d620d9699 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -376,7 +376,7 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, struct amdgpu_device *adev = ring->adev; uint64_t index; - if (ring != &adev->uvd.inst[ring->me].ring) { + if (ring->funcs->type != AMDGPU_RING_TYPE_UVD) { ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs]; ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4); } else { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index f70eeed9ed76..7aaa263ad8c7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -231,6 +231,12 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, if (ib->flags & AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE) fence_flags |= AMDGPU_FENCE_FLAG_TC_WB_ONLY; + /* wrap the last IB with fence */ + if (job && job->uf_addr) { + amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence, + fence_flags | AMDGPU_FENCE_FLAG_64BIT); + } + r = amdgpu_fence_emit(ring, f, fence_flags); if (r) { dev_err(adev->dev, "failed to emit fence (%d)\n", r); @@ -243,12 +249,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, if (ring->funcs->insert_end) ring->funcs->insert_end(ring); - /* wrap the last IB with fence */ - if (job && job->uf_addr) { - amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence, - fence_flags | AMDGPU_FENCE_FLAG_64BIT); - } - if (patch_offset != ~0 && ring->funcs->patch_cond_exec) amdgpu_ring_patch_cond_exec(ring, patch_offset); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 5e4e1bd90383..3526efa8960e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -762,8 +762,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); if (domain == AMDGPU_GEM_DOMAIN_VRAM) { adev->vram_pin_size += amdgpu_bo_size(bo); - if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) - adev->invisible_pin_size += amdgpu_bo_size(bo); + adev->invisible_pin_size += amdgpu_vram_mgr_bo_invisible_size(bo); } else if (domain == AMDGPU_GEM_DOMAIN_GTT) { adev->gart_pin_size += amdgpu_bo_size(bo); } @@ -790,25 +789,22 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo) bo->pin_count--; if (bo->pin_count) return 0; + + if (bo->tbo.mem.mem_type == TTM_PL_VRAM) { + adev->vram_pin_size -= amdgpu_bo_size(bo); + adev->invisible_pin_size -= amdgpu_vram_mgr_bo_invisible_size(bo); + } else if (bo->tbo.mem.mem_type == TTM_PL_TT) { + adev->gart_pin_size -= amdgpu_bo_size(bo); + } + for (i = 0; i < bo->placement.num_placement; i++) { bo->placements[i].lpfn = 0; bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; } r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); - if (unlikely(r)) { + if (unlikely(r)) dev_err(adev->dev, "%p validate failed for unpin\n", bo); - goto error; - } - if (bo->tbo.mem.mem_type == TTM_PL_VRAM) { - adev->vram_pin_size -= amdgpu_bo_size(bo); - if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) - adev->invisible_pin_size -= amdgpu_bo_size(bo); - } else if (bo->tbo.mem.mem_type == TTM_PL_TT) { - adev->gart_pin_size -= amdgpu_bo_size(bo); - } - -error: return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index b455da487782..fc818b4d849c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -1882,7 +1882,7 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) if (!amdgpu_device_has_dc_support(adev)) { mutex_lock(&adev->pm.mutex); amdgpu_dpm_get_active_displays(adev); - adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtcs; + adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count; adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev); adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev); /* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index e969c879d87e..e5da4654b630 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -73,6 +73,7 @@ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem); uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man); int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man); +u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo); uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man); uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index bcf68f80bbf0..3ff08e326838 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -130,7 +130,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) unsigned version_major, version_minor, family_id; int i, j, r; - INIT_DELAYED_WORK(&adev->uvd.inst->idle_work, amdgpu_uvd_idle_work_handler); + INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler); switch (adev->asic_type) { #ifdef CONFIG_DRM_AMDGPU_CIK @@ -314,12 +314,12 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev) void *ptr; int i, j; + cancel_delayed_work_sync(&adev->uvd.idle_work); + for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { if (adev->uvd.inst[j].vcpu_bo == NULL) continue; - cancel_delayed_work_sync(&adev->uvd.inst[j].idle_work); - /* only valid for physical mode */ if (adev->asic_type < CHIP_POLARIS10) { for (i = 0; i < adev->uvd.max_handles; ++i) @@ -1145,7 +1145,7 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, static void amdgpu_uvd_idle_work_handler(struct work_struct *work) { struct amdgpu_device *adev = - container_of(work, struct amdgpu_device, uvd.inst->idle_work.work); + container_of(work, struct amdgpu_device, uvd.idle_work.work); unsigned fences = 0, i, j; for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { @@ -1167,7 +1167,7 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work) AMD_CG_STATE_GATE); } } else { - schedule_delayed_work(&adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT); + schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT); } } @@ -1179,7 +1179,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring) if (amdgpu_sriov_vf(adev)) return; - set_clocks = !cancel_delayed_work_sync(&adev->uvd.inst->idle_work); + set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work); if (set_clocks) { if (adev->pm.dpm_enabled) { amdgpu_dpm_enable_uvd(adev, true); @@ -1196,7 +1196,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring) void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring) { if (!amdgpu_sriov_vf(ring->adev)) - schedule_delayed_work(&ring->adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT); + schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h index b1579fba134c..8b23a1b00c76 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h @@ -44,7 +44,6 @@ struct amdgpu_uvd_inst { void *saved_bo; atomic_t handles[AMDGPU_MAX_UVD_HANDLES]; struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES]; - struct delayed_work idle_work; struct amdgpu_ring ring; struct amdgpu_ring ring_enc[AMDGPU_MAX_UVD_ENC_RINGS]; struct amdgpu_irq_src irq; @@ -62,6 +61,7 @@ struct amdgpu_uvd { bool address_64_bit; bool use_ctx_buf; struct amdgpu_uvd_inst inst[AMDGPU_MAX_UVD_INSTANCES]; + struct delayed_work idle_work; }; int amdgpu_uvd_sw_init(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 127e87b470ff..1b4ad9b2a755 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -52,7 +52,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) unsigned long bo_size; const char *fw_name; const struct common_firmware_header *hdr; - unsigned version_major, version_minor, family_id; + unsigned char fw_check; int r; INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler); @@ -83,12 +83,33 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) hdr = (const struct common_firmware_header *)adev->vcn.fw->data; adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version); - family_id = le32_to_cpu(hdr->ucode_version) & 0xff; - version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff; - version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff; - DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n", - version_major, version_minor, family_id); + /* Bit 20-23, it is encode major and non-zero for new naming convention. + * This field is part of version minor and DRM_DISABLED_FLAG in old naming + * convention. Since the l:wq!atest version minor is 0x5B and DRM_DISABLED_FLAG + * is zero in old naming convention, this field is always zero so far. + * These four bits are used to tell which naming convention is present. + */ + fw_check = (le32_to_cpu(hdr->ucode_version) >> 20) & 0xf; + if (fw_check) { + unsigned int dec_ver, enc_major, enc_minor, vep, fw_rev; + + fw_rev = le32_to_cpu(hdr->ucode_version) & 0xfff; + enc_minor = (le32_to_cpu(hdr->ucode_version) >> 12) & 0xff; + enc_major = fw_check; + dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf; + vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf; + DRM_INFO("Found VCN firmware Version ENC: %hu.%hu DEC: %hu VEP: %hu Revision: %hu\n", + enc_major, enc_minor, dec_ver, vep, fw_rev); + } else { + unsigned int version_major, version_minor, family_id; + + family_id = le32_to_cpu(hdr->ucode_version) & 0xff; + version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff; + version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff; + DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n", + version_major, version_minor, family_id); + } bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8) + AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_HEAP_SIZE diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index b0eb2f537392..edf16b2b957a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1463,7 +1463,9 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, uint64_t count; max_entries = min(max_entries, 16ull * 1024ull); - for (count = 1; count < max_entries; ++count) { + for (count = 1; + count < max_entries / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); + ++count) { uint64_t idx = pfn + count; if (pages_addr[idx] != @@ -1476,7 +1478,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, dma_addr = pages_addr; } else { addr = pages_addr[pfn]; - max_entries = count; + max_entries = count * (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); } } else if (flags & AMDGPU_PTE_VALID) { @@ -1491,7 +1493,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, if (r) return r; - pfn += last - start + 1; + pfn += (last - start + 1) / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); if (nodes && nodes->size == pfn) { pfn = 0; ++nodes; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index 9aca653bec07..b6333f92ba45 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -96,6 +96,38 @@ static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev, adev->gmc.visible_vram_size : end) - start; } +/** + * amdgpu_vram_mgr_bo_invisible_size - CPU invisible BO size + * + * @bo: &amdgpu_bo buffer object (must be in VRAM) + * + * Returns: + * How much of the given &amdgpu_bo buffer object lies in CPU invisible VRAM. + */ +u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo) +{ + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); + struct ttm_mem_reg *mem = &bo->tbo.mem; + struct drm_mm_node *nodes = mem->mm_node; + unsigned pages = mem->num_pages; + u64 usage = 0; + + if (adev->gmc.visible_vram_size == adev->gmc.real_vram_size) + return 0; + + if (mem->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT) + return amdgpu_bo_size(bo); + + while (nodes && pages) { + usage += nodes->size << PAGE_SHIFT; + usage -= amdgpu_vram_mgr_vis_size(adev, nodes); + pages -= nodes->size; + ++nodes; + } + + return usage; +} + /** * amdgpu_vram_mgr_new - allocate new ranges * @@ -135,7 +167,8 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man, num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node); } - nodes = kcalloc(num_nodes, sizeof(*nodes), GFP_KERNEL); + nodes = kvmalloc_array(num_nodes, sizeof(*nodes), + GFP_KERNEL | __GFP_ZERO); if (!nodes) return -ENOMEM; @@ -190,7 +223,7 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man, drm_mm_remove_node(&nodes[i]); spin_unlock(&mgr->lock); - kfree(nodes); + kvfree(nodes); return r == -ENOSPC ? 0 : r; } @@ -229,7 +262,7 @@ static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man, atomic64_sub(usage, &mgr->usage); atomic64_sub(vis_usage, &mgr->vis_usage); - kfree(mem->mm_node); + kvfree(mem->mm_node); mem->mm_node = NULL; } diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 0999c843f623..a71b97519cc0 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -900,7 +900,7 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = { .emit_frame_size = 4 + /* vce_v3_0_emit_pipeline_sync */ 6, /* amdgpu_vce_ring_emit_fence x1 no user fence */ - .emit_ib_size = 5, /* vce_v3_0_ring_emit_ib */ + .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */ .emit_ib = amdgpu_vce_ring_emit_ib, .emit_fence = amdgpu_vce_ring_emit_fence, .test_ring = amdgpu_vce_ring_test_ring, @@ -924,7 +924,7 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = { 6 + /* vce_v3_0_emit_vm_flush */ 4 + /* vce_v3_0_emit_pipeline_sync */ 6 + 6, /* amdgpu_vce_ring_emit_fence x2 vm fence */ - .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */ + .emit_ib_size = 5, /* vce_v3_0_ring_emit_ib */ .emit_ib = vce_v3_0_ring_emit_ib, .emit_vm_flush = vce_v3_0_emit_vm_flush, .emit_pipeline_sync = vce_v3_0_emit_pipeline_sync, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index f9add85157e7..770c6b24be0b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -2175,6 +2175,46 @@ get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing) return color_space; } +static void reduce_mode_colour_depth(struct dc_crtc_timing *timing_out) +{ + if (timing_out->display_color_depth <= COLOR_DEPTH_888) + return; + + timing_out->display_color_depth--; +} + +static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_out, + const struct drm_display_info *info) +{ + int normalized_clk; + if (timing_out->display_color_depth <= COLOR_DEPTH_888) + return; + do { + normalized_clk = timing_out->pix_clk_khz; + /* YCbCr 4:2:0 requires additional adjustment of 1/2 */ + if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420) + normalized_clk /= 2; + /* Adjusting pix clock following on HDMI spec based on colour depth */ + switch (timing_out->display_color_depth) { + case COLOR_DEPTH_101010: + normalized_clk = (normalized_clk * 30) / 24; + break; + case COLOR_DEPTH_121212: + normalized_clk = (normalized_clk * 36) / 24; + break; + case COLOR_DEPTH_161616: + normalized_clk = (normalized_clk * 48) / 24; + break; + default: + return; + } + if (normalized_clk <= info->max_tmds_clock) + return; + reduce_mode_colour_depth(timing_out); + + } while (timing_out->display_color_depth > COLOR_DEPTH_888); + +} /*****************************************************************************/ static void @@ -2183,6 +2223,7 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream, const struct drm_connector *connector) { struct dc_crtc_timing *timing_out = &stream->timing; + const struct drm_display_info *info = &connector->display_info; memset(timing_out, 0, sizeof(struct dc_crtc_timing)); @@ -2191,8 +2232,10 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream, timing_out->v_border_top = 0; timing_out->v_border_bottom = 0; /* TODO: un-hardcode */ - - if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444) + if (drm_mode_is_420_only(info, mode_in) + && stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) + timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; + else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444) && stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444; else @@ -2228,6 +2271,8 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream, stream->out_transfer_func->type = TF_TYPE_PREDEFINED; stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB; + if (stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) + adjust_colour_depth_from_display_info(timing_out, info); } static void fill_audio_info(struct audio_info *audio_info, @@ -3928,10 +3973,11 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc, if (acrtc->base.state->event) prepare_flip_isr(acrtc); + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + surface_updates->surface = dc_stream_get_status(acrtc_state->stream)->plane_states[0]; surface_updates->flip_addr = &addr; - dc_commit_updates_for_stream(adev->dm.dc, surface_updates, 1, @@ -3944,9 +3990,6 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc, __func__, addr.address.grph.addr.high_part, addr.address.grph.addr.low_part); - - - spin_unlock_irqrestore(&crtc->dev->event_lock, flags); } /* @@ -4206,6 +4249,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) struct drm_connector *connector; struct drm_connector_state *old_con_state, *new_con_state; struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; + int crtc_disable_count = 0; drm_atomic_helper_update_legacy_modeset_state(dev, state); @@ -4410,6 +4454,9 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); bool modeset_needed; + if (old_crtc_state->active && !new_crtc_state->active) + crtc_disable_count++; + dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); modeset_needed = modeset_required( @@ -4463,11 +4510,9 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) * so we can put the GPU into runtime suspend if we're not driving any * displays anymore */ + for (i = 0; i < crtc_disable_count; i++) + pm_runtime_put_autosuspend(dev->dev); pm_runtime_mark_last_busy(dev->dev); - for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { - if (old_crtc_state->active && !new_crtc_state->active) - pm_runtime_put_autosuspend(dev->dev); - } } diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h index 092d800b703a..33b4de4ad66e 100644 --- a/drivers/gpu/drm/amd/include/atomfirmware.h +++ b/drivers/gpu/drm/amd/include/atomfirmware.h @@ -1433,7 +1433,10 @@ struct atom_smc_dpm_info_v4_1 uint8_t acggfxclkspreadpercent; uint16_t acggfxclkspreadfreq; - uint32_t boardreserved[10]; + uint8_t Vr2_I2C_address; + uint8_t padding_vr2[3]; + + uint32_t boardreserved[9]; }; /* diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c index 5325661fedff..d27c1c9df286 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c @@ -512,14 +512,82 @@ int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr, BIOS_CLKI return 0; } +static void pp_atomfwctrl_copy_vbios_bootup_values_3_2(struct pp_hwmgr *hwmgr, + struct pp_atomfwctrl_bios_boot_up_values *boot_values, + struct atom_firmware_info_v3_2 *fw_info) +{ + uint32_t frequency = 0; + + boot_values->ulRevision = fw_info->firmware_revision; + boot_values->ulGfxClk = fw_info->bootup_sclk_in10khz; + boot_values->ulUClk = fw_info->bootup_mclk_in10khz; + boot_values->usVddc = fw_info->bootup_vddc_mv; + boot_values->usVddci = fw_info->bootup_vddci_mv; + boot_values->usMvddc = fw_info->bootup_mvddc_mv; + boot_values->usVddGfx = fw_info->bootup_vddgfx_mv; + boot_values->ucCoolingID = fw_info->coolingsolution_id; + boot_values->ulSocClk = 0; + boot_values->ulDCEFClk = 0; + + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_SOCCLK_ID, &frequency)) + boot_values->ulSocClk = frequency; + + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCEFCLK_ID, &frequency)) + boot_values->ulDCEFClk = frequency; + + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_ECLK_ID, &frequency)) + boot_values->ulEClk = frequency; + + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_VCLK_ID, &frequency)) + boot_values->ulVClk = frequency; + + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCLK_ID, &frequency)) + boot_values->ulDClk = frequency; +} + +static void pp_atomfwctrl_copy_vbios_bootup_values_3_1(struct pp_hwmgr *hwmgr, + struct pp_atomfwctrl_bios_boot_up_values *boot_values, + struct atom_firmware_info_v3_1 *fw_info) +{ + uint32_t frequency = 0; + + boot_values->ulRevision = fw_info->firmware_revision; + boot_values->ulGfxClk = fw_info->bootup_sclk_in10khz; + boot_values->ulUClk = fw_info->bootup_mclk_in10khz; + boot_values->usVddc = fw_info->bootup_vddc_mv; + boot_values->usVddci = fw_info->bootup_vddci_mv; + boot_values->usMvddc = fw_info->bootup_mvddc_mv; + boot_values->usVddGfx = fw_info->bootup_vddgfx_mv; + boot_values->ucCoolingID = fw_info->coolingsolution_id; + boot_values->ulSocClk = 0; + boot_values->ulDCEFClk = 0; + + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_SOCCLK_ID, &frequency)) + boot_values->ulSocClk = frequency; + + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCEFCLK_ID, &frequency)) + boot_values->ulDCEFClk = frequency; + + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_ECLK_ID, &frequency)) + boot_values->ulEClk = frequency; + + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_VCLK_ID, &frequency)) + boot_values->ulVClk = frequency; + + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCLK_ID, &frequency)) + boot_values->ulDClk = frequency; +} + int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr, struct pp_atomfwctrl_bios_boot_up_values *boot_values) { - struct atom_firmware_info_v3_1 *info = NULL; + struct atom_firmware_info_v3_2 *fwinfo_3_2; + struct atom_firmware_info_v3_1 *fwinfo_3_1; + struct atom_common_table_header *info = NULL; uint16_t ix; ix = GetIndexIntoMasterDataTable(firmwareinfo); - info = (struct atom_firmware_info_v3_1 *) + info = (struct atom_common_table_header *) smu_atom_get_data_table(hwmgr->adev, ix, NULL, NULL, NULL); @@ -528,16 +596,18 @@ int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr, return -EINVAL; } - boot_values->ulRevision = info->firmware_revision; - boot_values->ulGfxClk = info->bootup_sclk_in10khz; - boot_values->ulUClk = info->bootup_mclk_in10khz; - boot_values->usVddc = info->bootup_vddc_mv; - boot_values->usVddci = info->bootup_vddci_mv; - boot_values->usMvddc = info->bootup_mvddc_mv; - boot_values->usVddGfx = info->bootup_vddgfx_mv; - boot_values->ucCoolingID = info->coolingsolution_id; - boot_values->ulSocClk = 0; - boot_values->ulDCEFClk = 0; + if ((info->format_revision == 3) && (info->content_revision == 2)) { + fwinfo_3_2 = (struct atom_firmware_info_v3_2 *)info; + pp_atomfwctrl_copy_vbios_bootup_values_3_2(hwmgr, + boot_values, fwinfo_3_2); + } else if ((info->format_revision == 3) && (info->content_revision == 1)) { + fwinfo_3_1 = (struct atom_firmware_info_v3_1 *)info; + pp_atomfwctrl_copy_vbios_bootup_values_3_1(hwmgr, + boot_values, fwinfo_3_1); + } else { + pr_info("Fw info table revision does not match!"); + return -EINVAL; + } return 0; } @@ -629,5 +699,7 @@ int pp_atomfwctrl_get_smc_dpm_information(struct pp_hwmgr *hwmgr, param->acggfxclkspreadpercent = info->acggfxclkspreadpercent; param->acggfxclkspreadfreq = info->acggfxclkspreadfreq; + param->Vr2_I2C_address = info->Vr2_I2C_address; + return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h index fe10aa4db5e6..22e21668c93a 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h @@ -136,6 +136,9 @@ struct pp_atomfwctrl_bios_boot_up_values { uint32_t ulUClk; uint32_t ulSocClk; uint32_t ulDCEFClk; + uint32_t ulEClk; + uint32_t ulVClk; + uint32_t ulDClk; uint16_t usVddc; uint16_t usVddci; uint16_t usMvddc; @@ -207,6 +210,8 @@ struct pp_atomfwctrl_smc_dpm_parameters uint8_t acggfxclkspreadenabled; uint8_t acggfxclkspreadpercent; uint16_t acggfxclkspreadfreq; + + uint8_t Vr2_I2C_address; }; int pp_atomfwctrl_get_gpu_pll_dividers_vega10(struct pp_hwmgr *hwmgr, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c index dbe4b1f66784..22364875a943 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c @@ -1090,7 +1090,7 @@ static int vega10_disable_se_edc_config(struct pp_hwmgr *hwmgr) static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr) { struct amdgpu_device *adev = hwmgr->adev; - int result; + int result = 0; uint32_t num_se = 0; uint32_t count, data; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c index 782e2098824d..c98e5de777cd 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c @@ -81,6 +81,7 @@ static void vega12_set_default_registry_data(struct pp_hwmgr *hwmgr) data->registry_data.disallowed_features = 0x0; data->registry_data.od_state_in_dc_support = 0; + data->registry_data.thermal_support = 1; data->registry_data.skip_baco_hardware = 0; data->registry_data.log_avfs_param = 0; @@ -803,6 +804,9 @@ static int vega12_init_smc_table(struct pp_hwmgr *hwmgr) data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk; data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk; data->vbios_boot_state.uc_cooling_id = boot_up_values.ucCoolingID; + data->vbios_boot_state.eclock = boot_up_values.ulEClk; + data->vbios_boot_state.dclock = boot_up_values.ulDClk; + data->vbios_boot_state.vclock = boot_up_values.ulVClk; smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk, (uint32_t)(data->vbios_boot_state.dcef_clock / 100)); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h index e81ded1ec198..49b38df8c7f2 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h @@ -167,6 +167,9 @@ struct vega12_vbios_boot_state { uint32_t mem_clock; uint32_t soc_clock; uint32_t dcef_clock; + uint32_t eclock; + uint32_t dclock; + uint32_t vclock; }; #define DPMTABLE_OD_UPDATE_SCLK 0x00000001 diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c index 888ddca902d8..29914700ee82 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c @@ -230,6 +230,8 @@ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable ppsmc_pptable->AcgThresholdFreqLow = 0xFFFF; } + ppsmc_pptable->Vr2_I2C_address = smc_dpm_table.Vr2_I2C_address; + return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h index 2f8a3b983cce..b08526fd1619 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h +++ b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h @@ -499,7 +499,10 @@ typedef struct { uint8_t AcgGfxclkSpreadPercent; uint16_t AcgGfxclkSpreadFreq; - uint32_t BoardReserved[10]; + uint8_t Vr2_I2C_address; + uint8_t padding_vr2[3]; + + uint32_t BoardReserved[9]; uint32_t MmHubPadding[7]; diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c index 8d20faa198cf..0a788d76ed5f 100644 --- a/drivers/gpu/drm/arm/malidp_drv.c +++ b/drivers/gpu/drm/arm/malidp_drv.c @@ -278,7 +278,6 @@ static int malidp_init(struct drm_device *drm) static void malidp_fini(struct drm_device *drm) { - drm_atomic_helper_shutdown(drm); drm_mode_config_cleanup(drm); } @@ -646,6 +645,7 @@ static int malidp_bind(struct device *dev) malidp_de_irq_fini(drm); drm->irq_enabled = false; irq_init_fail: + drm_atomic_helper_shutdown(drm); component_unbind_all(dev, drm); bind_fail: of_node_put(malidp->crtc.port); @@ -681,6 +681,7 @@ static void malidp_unbind(struct device *dev) malidp_se_irq_fini(drm); malidp_de_irq_fini(drm); drm->irq_enabled = false; + drm_atomic_helper_shutdown(drm); component_unbind_all(dev, drm); of_node_put(malidp->crtc.port); malidp->crtc.port = NULL; diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c index d789b46dc817..069783e715f1 100644 --- a/drivers/gpu/drm/arm/malidp_hw.c +++ b/drivers/gpu/drm/arm/malidp_hw.c @@ -634,7 +634,8 @@ const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = { .vsync_irq = MALIDP500_DE_IRQ_VSYNC, }, .se_irq_map = { - .irq_mask = MALIDP500_SE_IRQ_CONF_MODE, + .irq_mask = MALIDP500_SE_IRQ_CONF_MODE | + MALIDP500_SE_IRQ_GLOBAL, .vsync_irq = 0, }, .dc_irq_map = { diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c index 7a44897c50fe..29409a65d864 100644 --- a/drivers/gpu/drm/arm/malidp_planes.c +++ b/drivers/gpu/drm/arm/malidp_planes.c @@ -23,6 +23,7 @@ /* Layer specific register offsets */ #define MALIDP_LAYER_FORMAT 0x000 +#define LAYER_FORMAT_MASK 0x3f #define MALIDP_LAYER_CONTROL 0x004 #define LAYER_ENABLE (1 << 0) #define LAYER_FLOWCFG_MASK 7 @@ -235,8 +236,8 @@ static int malidp_de_plane_check(struct drm_plane *plane, if (state->rotation & MALIDP_ROTATED_MASK) { int val; - val = mp->hwdev->hw->rotmem_required(mp->hwdev, state->crtc_h, - state->crtc_w, + val = mp->hwdev->hw->rotmem_required(mp->hwdev, state->crtc_w, + state->crtc_h, fb->format->format); if (val < 0) return val; @@ -337,7 +338,9 @@ static void malidp_de_plane_update(struct drm_plane *plane, dest_w = plane->state->crtc_w; dest_h = plane->state->crtc_h; - malidp_hw_write(mp->hwdev, ms->format, mp->layer->base); + val = malidp_hw_read(mp->hwdev, mp->layer->base); + val = (val & ~LAYER_FORMAT_MASK) | ms->format; + malidp_hw_write(mp->hwdev, val, mp->layer->base); for (i = 0; i < ms->n_planes; i++) { /* calculate the offset for the layer's plane registers */ diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c index 73c875db45f4..47e0992f3908 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c @@ -839,7 +839,7 @@ static int atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane) return ret; } - if (desc->layout.xstride && desc->layout.pstride) { + if (desc->layout.xstride[0] && desc->layout.pstride[0]) { int ret; ret = drm_plane_create_rotation_property(&plane->base, diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c index 7ab36042a822..a6e8f4591e63 100644 --- a/drivers/gpu/drm/bridge/sil-sii8620.c +++ b/drivers/gpu/drm/bridge/sil-sii8620.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include @@ -36,8 +37,11 @@ #define SII8620_BURST_BUF_LEN 288 #define VAL_RX_HDMI_CTRL2_DEFVAL VAL_RX_HDMI_CTRL2_IDLE_CNT(3) -#define MHL1_MAX_LCLK 225000 -#define MHL3_MAX_LCLK 600000 + +#define MHL1_MAX_PCLK 75000 +#define MHL1_MAX_PCLK_PP_MODE 150000 +#define MHL3_MAX_PCLK 200000 +#define MHL3_MAX_PCLK_PP_MODE 300000 enum sii8620_mode { CM_DISCONNECTED, @@ -69,9 +73,7 @@ struct sii8620 { struct regulator_bulk_data supplies[2]; struct mutex lock; /* context lock, protects fields below */ int error; - int pixel_clock; unsigned int use_packed_pixel:1; - int video_code; enum sii8620_mode mode; enum sii8620_sink_type sink_type; u8 cbus_status; @@ -79,7 +81,9 @@ struct sii8620 { u8 xstat[MHL_XDS_SIZE]; u8 devcap[MHL_DCAP_SIZE]; u8 xdevcap[MHL_XDC_SIZE]; - u8 avif[HDMI_INFOFRAME_SIZE(AVI)]; + bool feature_complete; + bool devcap_read; + bool sink_detected; struct edid *edid; unsigned int gen2_write_burst:1; enum sii8620_mt_state mt_state; @@ -476,7 +480,7 @@ static void sii8620_update_array(u8 *dst, u8 *src, int count) } } -static void sii8620_sink_detected(struct sii8620 *ctx, int ret) +static void sii8620_identify_sink(struct sii8620 *ctx) { static const char * const sink_str[] = { [SINK_NONE] = "NONE", @@ -487,7 +491,7 @@ static void sii8620_sink_detected(struct sii8620 *ctx, int ret) char sink_name[20]; struct device *dev = ctx->dev; - if (ret < 0) + if (!ctx->sink_detected || !ctx->devcap_read) return; sii8620_fetch_edid(ctx); @@ -496,6 +500,7 @@ static void sii8620_sink_detected(struct sii8620 *ctx, int ret) sii8620_mhl_disconnected(ctx); return; } + sii8620_set_upstream_edid(ctx); if (drm_detect_hdmi_monitor(ctx->edid)) ctx->sink_type = SINK_HDMI; @@ -508,53 +513,6 @@ static void sii8620_sink_detected(struct sii8620 *ctx, int ret) sink_str[ctx->sink_type], sink_name); } -static void sii8620_hsic_init(struct sii8620 *ctx) -{ - if (!sii8620_is_mhl3(ctx)) - return; - - sii8620_write(ctx, REG_FCGC, - BIT_FCGC_HSIC_HOSTMODE | BIT_FCGC_HSIC_ENABLE); - sii8620_setbits(ctx, REG_HRXCTRL3, - BIT_HRXCTRL3_HRX_STAY_RESET | BIT_HRXCTRL3_STATUS_EN, ~0); - sii8620_setbits(ctx, REG_TTXNUMB, MSK_TTXNUMB_TTX_NUMBPS, 4); - sii8620_setbits(ctx, REG_TRXCTRL, BIT_TRXCTRL_TRX_FROM_SE_COC, ~0); - sii8620_setbits(ctx, REG_HTXCTRL, BIT_HTXCTRL_HTX_DRVCONN1, 0); - sii8620_setbits(ctx, REG_KEEPER, MSK_KEEPER_MODE, VAL_KEEPER_MODE_HOST); - sii8620_write_seq_static(ctx, - REG_TDMLLCTL, 0, - REG_UTSRST, BIT_UTSRST_HRX_SRST | BIT_UTSRST_HTX_SRST | - BIT_UTSRST_KEEPER_SRST | BIT_UTSRST_FC_SRST, - REG_UTSRST, BIT_UTSRST_HRX_SRST | BIT_UTSRST_HTX_SRST, - REG_HRXINTL, 0xff, - REG_HRXINTH, 0xff, - REG_TTXINTL, 0xff, - REG_TTXINTH, 0xff, - REG_TRXINTL, 0xff, - REG_TRXINTH, 0xff, - REG_HTXINTL, 0xff, - REG_HTXINTH, 0xff, - REG_FCINTR0, 0xff, - REG_FCINTR1, 0xff, - REG_FCINTR2, 0xff, - REG_FCINTR3, 0xff, - REG_FCINTR4, 0xff, - REG_FCINTR5, 0xff, - REG_FCINTR6, 0xff, - REG_FCINTR7, 0xff - ); -} - -static void sii8620_edid_read(struct sii8620 *ctx, int ret) -{ - if (ret < 0) - return; - - sii8620_set_upstream_edid(ctx); - sii8620_hsic_init(ctx); - sii8620_enable_hpd(ctx); -} - static void sii8620_mr_devcap(struct sii8620 *ctx) { u8 dcap[MHL_DCAP_SIZE]; @@ -570,6 +528,8 @@ static void sii8620_mr_devcap(struct sii8620 *ctx) dcap[MHL_DCAP_ADOPTER_ID_H], dcap[MHL_DCAP_ADOPTER_ID_L], dcap[MHL_DCAP_DEVICE_ID_H], dcap[MHL_DCAP_DEVICE_ID_L]); sii8620_update_array(ctx->devcap, dcap, MHL_DCAP_SIZE); + ctx->devcap_read = true; + sii8620_identify_sink(ctx); } static void sii8620_mr_xdevcap(struct sii8620 *ctx) @@ -807,6 +767,7 @@ static void sii8620_burst_rx_all(struct sii8620 *ctx) static void sii8620_fetch_edid(struct sii8620 *ctx) { u8 lm_ddc, ddc_cmd, int3, cbus; + unsigned long timeout; int fetched, i; int edid_len = EDID_LENGTH; u8 *edid; @@ -856,23 +817,31 @@ static void sii8620_fetch_edid(struct sii8620 *ctx) REG_DDC_CMD, ddc_cmd | VAL_DDC_CMD_ENH_DDC_READ_NO_ACK ); - do { - int3 = sii8620_readb(ctx, REG_INTR3); + int3 = 0; + timeout = jiffies + msecs_to_jiffies(200); + for (;;) { cbus = sii8620_readb(ctx, REG_CBUS_STATUS); - - if (int3 & BIT_DDC_CMD_DONE) - break; - - if (!(cbus & BIT_CBUS_STATUS_CBUS_CONNECTED)) { + if (~cbus & BIT_CBUS_STATUS_CBUS_CONNECTED) { + kfree(edid); + edid = NULL; + goto end; + } + if (int3 & BIT_DDC_CMD_DONE) { + if (sii8620_readb(ctx, REG_DDC_DOUT_CNT) + >= FETCH_SIZE) + break; + } else { + int3 = sii8620_readb(ctx, REG_INTR3); + } + if (time_is_before_jiffies(timeout)) { + ctx->error = -ETIMEDOUT; + dev_err(ctx->dev, "timeout during EDID read\n"); kfree(edid); edid = NULL; goto end; } - } while (1); - - sii8620_readb(ctx, REG_DDC_STATUS); - while (sii8620_readb(ctx, REG_DDC_DOUT_CNT) < FETCH_SIZE) usleep_range(10, 20); + } sii8620_read_buf(ctx, REG_DDC_DATA, edid + fetched, FETCH_SIZE); if (fetched + FETCH_SIZE == EDID_LENGTH) { @@ -971,8 +940,17 @@ static int sii8620_hw_on(struct sii8620 *ctx) ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies); if (ret) return ret; + usleep_range(10000, 20000); - return clk_prepare_enable(ctx->clk_xtal); + ret = clk_prepare_enable(ctx->clk_xtal); + if (ret) + return ret; + + msleep(100); + gpiod_set_value(ctx->gpio_reset, 0); + msleep(100); + + return 0; } static int sii8620_hw_off(struct sii8620 *ctx) @@ -982,17 +960,6 @@ static int sii8620_hw_off(struct sii8620 *ctx) return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies); } -static void sii8620_hw_reset(struct sii8620 *ctx) -{ - usleep_range(10000, 20000); - gpiod_set_value(ctx->gpio_reset, 0); - usleep_range(5000, 20000); - gpiod_set_value(ctx->gpio_reset, 1); - usleep_range(10000, 20000); - gpiod_set_value(ctx->gpio_reset, 0); - msleep(300); -} - static void sii8620_cbus_reset(struct sii8620 *ctx) { sii8620_write(ctx, REG_PWD_SRST, BIT_PWD_SRST_CBUS_RST @@ -1055,23 +1022,23 @@ static void sii8620_set_format(struct sii8620 *ctx) BIT_M3_P0CTRL_MHL3_P0_PIXEL_MODE_PACKED, ctx->use_packed_pixel ? ~0 : 0); } else { - if (ctx->use_packed_pixel) + if (ctx->use_packed_pixel) { sii8620_write_seq_static(ctx, REG_VID_MODE, BIT_VID_MODE_M1080P, REG_MHL_TOP_CTL, BIT_MHL_TOP_CTL_MHL_PP_SEL | 1, REG_MHLTX_CTL6, 0x60 ); - else + } else { sii8620_write_seq_static(ctx, REG_VID_MODE, 0, REG_MHL_TOP_CTL, 1, REG_MHLTX_CTL6, 0xa0 ); + } } if (ctx->use_packed_pixel) - out_fmt = VAL_TPI_FORMAT(YCBCR422, FULL) | - BIT_TPI_OUTPUT_CSCMODE709; + out_fmt = VAL_TPI_FORMAT(YCBCR422, FULL); else out_fmt = VAL_TPI_FORMAT(RGB, FULL); @@ -1128,18 +1095,28 @@ static ssize_t mhl3_infoframe_pack(struct mhl3_infoframe *frame, return frm_len; } -static void sii8620_set_infoframes(struct sii8620 *ctx) +static void sii8620_set_infoframes(struct sii8620 *ctx, + struct drm_display_mode *mode) { struct mhl3_infoframe mhl_frm; union hdmi_infoframe frm; u8 buf[31]; int ret; + ret = drm_hdmi_avi_infoframe_from_display_mode(&frm.avi, + mode, + true); + if (ctx->use_packed_pixel) + frm.avi.colorspace = HDMI_COLORSPACE_YUV422; + + if (!ret) + ret = hdmi_avi_infoframe_pack(&frm.avi, buf, ARRAY_SIZE(buf)); + if (ret > 0) + sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, buf + 3, ret - 3); + if (!sii8620_is_mhl3(ctx) || !ctx->use_packed_pixel) { sii8620_write(ctx, REG_TPI_SC, BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI); - sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, ctx->avif + 3, - ARRAY_SIZE(ctx->avif) - 3); sii8620_write(ctx, REG_PKT_FILTER_0, BIT_PKT_FILTER_0_DROP_CEA_GAMUT_PKT | BIT_PKT_FILTER_0_DROP_MPEG_PKT | @@ -1148,16 +1125,6 @@ static void sii8620_set_infoframes(struct sii8620 *ctx) return; } - ret = hdmi_avi_infoframe_init(&frm.avi); - frm.avi.colorspace = HDMI_COLORSPACE_YUV422; - frm.avi.active_aspect = HDMI_ACTIVE_ASPECT_PICTURE; - frm.avi.picture_aspect = HDMI_PICTURE_ASPECT_16_9; - frm.avi.colorimetry = HDMI_COLORIMETRY_ITU_709; - frm.avi.video_code = ctx->video_code; - if (!ret) - ret = hdmi_avi_infoframe_pack(&frm.avi, buf, ARRAY_SIZE(buf)); - if (ret > 0) - sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, buf + 3, ret - 3); sii8620_write(ctx, REG_PKT_FILTER_0, BIT_PKT_FILTER_0_DROP_CEA_GAMUT_PKT | BIT_PKT_FILTER_0_DROP_MPEG_PKT | @@ -1177,6 +1144,9 @@ static void sii8620_set_infoframes(struct sii8620 *ctx) static void sii8620_start_video(struct sii8620 *ctx) { + struct drm_display_mode *mode = + &ctx->bridge.encoder->crtc->state->adjusted_mode; + if (!sii8620_is_mhl3(ctx)) sii8620_stop_video(ctx); @@ -1195,8 +1165,14 @@ static void sii8620_start_video(struct sii8620 *ctx) sii8620_set_format(ctx); if (!sii8620_is_mhl3(ctx)) { - sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), - MHL_DST_LM_CLK_MODE_NORMAL | MHL_DST_LM_PATH_ENABLED); + u8 link_mode = MHL_DST_LM_PATH_ENABLED; + + if (ctx->use_packed_pixel) + link_mode |= MHL_DST_LM_CLK_MODE_PACKED_PIXEL; + else + link_mode |= MHL_DST_LM_CLK_MODE_NORMAL; + + sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), link_mode); sii8620_set_auto_zone(ctx); } else { static const struct { @@ -1213,10 +1189,10 @@ static void sii8620_start_video(struct sii8620 *ctx) MHL_XDS_LINK_RATE_6_0_GBPS, 0x40 }, }; u8 p0_ctrl = BIT_M3_P0CTRL_MHL3_P0_PORT_EN; - int clk = ctx->pixel_clock * (ctx->use_packed_pixel ? 2 : 3); + int clk = mode->clock * (ctx->use_packed_pixel ? 2 : 3); int i; - for (i = 0; i < ARRAY_SIZE(clk_spec); ++i) + for (i = 0; i < ARRAY_SIZE(clk_spec) - 1; ++i) if (clk < clk_spec[i].max_clk) break; @@ -1242,7 +1218,7 @@ static void sii8620_start_video(struct sii8620 *ctx) clk_spec[i].link_rate); } - sii8620_set_infoframes(ctx); + sii8620_set_infoframes(ctx, mode); } static void sii8620_disable_hpd(struct sii8620 *ctx) @@ -1534,6 +1510,16 @@ static void sii8620_set_mode(struct sii8620 *ctx, enum sii8620_mode mode) ); } +static void sii8620_hpd_unplugged(struct sii8620 *ctx) +{ + sii8620_disable_hpd(ctx); + ctx->sink_type = SINK_NONE; + ctx->sink_detected = false; + ctx->feature_complete = false; + kfree(ctx->edid); + ctx->edid = NULL; +} + static void sii8620_disconnect(struct sii8620 *ctx) { sii8620_disable_gen2_write_burst(ctx); @@ -1561,7 +1547,7 @@ static void sii8620_disconnect(struct sii8620 *ctx) REG_MHL_DP_CTL6, 0x2A, REG_MHL_DP_CTL7, 0x03 ); - sii8620_disable_hpd(ctx); + sii8620_hpd_unplugged(ctx); sii8620_write_seq_static(ctx, REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE, REG_MHL_COC_CTL1, 0x07, @@ -1609,10 +1595,8 @@ static void sii8620_disconnect(struct sii8620 *ctx) memset(ctx->xstat, 0, sizeof(ctx->xstat)); memset(ctx->devcap, 0, sizeof(ctx->devcap)); memset(ctx->xdevcap, 0, sizeof(ctx->xdevcap)); + ctx->devcap_read = false; ctx->cbus_status = 0; - ctx->sink_type = SINK_NONE; - kfree(ctx->edid); - ctx->edid = NULL; sii8620_mt_cleanup(ctx); } @@ -1699,17 +1683,18 @@ static void sii8620_status_dcap_ready(struct sii8620 *ctx) static void sii8620_status_changed_path(struct sii8620 *ctx) { - if (ctx->stat[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED) { - sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), - MHL_DST_LM_CLK_MODE_NORMAL - | MHL_DST_LM_PATH_ENABLED); - if (!sii8620_is_mhl3(ctx)) - sii8620_mt_read_devcap(ctx, false); - sii8620_mt_set_cont(ctx, sii8620_sink_detected); - } else { - sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), - MHL_DST_LM_CLK_MODE_NORMAL); - } + u8 link_mode; + + if (ctx->use_packed_pixel) + link_mode = MHL_DST_LM_CLK_MODE_PACKED_PIXEL; + else + link_mode = MHL_DST_LM_CLK_MODE_NORMAL; + + if (ctx->stat[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED) + link_mode |= MHL_DST_LM_PATH_ENABLED; + + sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), + link_mode); } static void sii8620_msc_mr_write_stat(struct sii8620 *ctx) @@ -1722,9 +1707,14 @@ static void sii8620_msc_mr_write_stat(struct sii8620 *ctx) sii8620_update_array(ctx->stat, st, MHL_DST_SIZE); sii8620_update_array(ctx->xstat, xst, MHL_XDS_SIZE); - if (ctx->stat[MHL_DST_CONNECTED_RDY] & MHL_DST_CONN_DCAP_RDY) + if (ctx->stat[MHL_DST_CONNECTED_RDY] & st[MHL_DST_CONNECTED_RDY] & + MHL_DST_CONN_DCAP_RDY) { sii8620_status_dcap_ready(ctx); + if (!sii8620_is_mhl3(ctx)) + sii8620_mt_read_devcap(ctx, false); + } + if (st[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED) sii8620_status_changed_path(ctx); } @@ -1808,8 +1798,11 @@ static void sii8620_msc_mr_set_int(struct sii8620 *ctx) } if (ints[MHL_INT_RCHANGE] & MHL_INT_RC_FEAT_REQ) sii8620_send_features(ctx); - if (ints[MHL_INT_RCHANGE] & MHL_INT_RC_FEAT_COMPLETE) - sii8620_edid_read(ctx, 0); + if (ints[MHL_INT_RCHANGE] & MHL_INT_RC_FEAT_COMPLETE) { + ctx->feature_complete = true; + if (ctx->edid) + sii8620_enable_hpd(ctx); + } } static struct sii8620_mt_msg *sii8620_msc_msg_first(struct sii8620 *ctx) @@ -1884,6 +1877,15 @@ static void sii8620_irq_msc(struct sii8620 *ctx) if (stat & BIT_CBUS_MSC_MR_WRITE_STAT) sii8620_msc_mr_write_stat(ctx); + if (stat & BIT_CBUS_HPD_CHG) { + if (ctx->cbus_status & BIT_CBUS_STATUS_CBUS_HPD) { + ctx->sink_detected = true; + sii8620_identify_sink(ctx); + } else { + sii8620_hpd_unplugged(ctx); + } + } + if (stat & BIT_CBUS_MSC_MR_SET_INT) sii8620_msc_mr_set_int(ctx); @@ -1931,14 +1933,6 @@ static void sii8620_irq_edid(struct sii8620 *ctx) ctx->mt_state = MT_STATE_DONE; } -static void sii8620_scdt_high(struct sii8620 *ctx) -{ - sii8620_write_seq_static(ctx, - REG_INTR8_MASK, BIT_CEA_NEW_AVI | BIT_CEA_NEW_VSI, - REG_TPI_SC, BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI, - ); -} - static void sii8620_irq_scdt(struct sii8620 *ctx) { u8 stat = sii8620_readb(ctx, REG_INTR5); @@ -1946,53 +1940,13 @@ static void sii8620_irq_scdt(struct sii8620 *ctx) if (stat & BIT_INTR_SCDT_CHANGE) { u8 cstat = sii8620_readb(ctx, REG_TMDS_CSTAT_P3); - if (cstat & BIT_TMDS_CSTAT_P3_SCDT) { - if (ctx->sink_type == SINK_HDMI) - /* enable infoframe interrupt */ - sii8620_scdt_high(ctx); - else - sii8620_start_video(ctx); - } + if (cstat & BIT_TMDS_CSTAT_P3_SCDT) + sii8620_start_video(ctx); } sii8620_write(ctx, REG_INTR5, stat); } -static void sii8620_new_vsi(struct sii8620 *ctx) -{ - u8 vsif[11]; - - sii8620_write(ctx, REG_RX_HDMI_CTRL2, - VAL_RX_HDMI_CTRL2_DEFVAL | - BIT_RX_HDMI_CTRL2_VSI_MON_SEL_VSI); - sii8620_read_buf(ctx, REG_RX_HDMI_MON_PKT_HEADER1, vsif, - ARRAY_SIZE(vsif)); -} - -static void sii8620_new_avi(struct sii8620 *ctx) -{ - sii8620_write(ctx, REG_RX_HDMI_CTRL2, VAL_RX_HDMI_CTRL2_DEFVAL); - sii8620_read_buf(ctx, REG_RX_HDMI_MON_PKT_HEADER1, ctx->avif, - ARRAY_SIZE(ctx->avif)); -} - -static void sii8620_irq_infr(struct sii8620 *ctx) -{ - u8 stat = sii8620_readb(ctx, REG_INTR8) - & (BIT_CEA_NEW_VSI | BIT_CEA_NEW_AVI); - - sii8620_write(ctx, REG_INTR8, stat); - - if (stat & BIT_CEA_NEW_VSI) - sii8620_new_vsi(ctx); - - if (stat & BIT_CEA_NEW_AVI) - sii8620_new_avi(ctx); - - if (stat & (BIT_CEA_NEW_VSI | BIT_CEA_NEW_AVI)) - sii8620_start_video(ctx); -} - static void sii8620_got_xdevcap(struct sii8620 *ctx, int ret) { if (ret < 0) @@ -2043,11 +1997,11 @@ static void sii8620_irq_ddc(struct sii8620 *ctx) if (stat & BIT_DDC_CMD_DONE) { sii8620_write(ctx, REG_INTR3_MASK, 0); - if (sii8620_is_mhl3(ctx)) + if (sii8620_is_mhl3(ctx) && !ctx->feature_complete) sii8620_mt_set_int(ctx, MHL_INT_REG(RCHANGE), MHL_INT_RC_FEAT_REQ); else - sii8620_edid_read(ctx, 0); + sii8620_enable_hpd(ctx); } sii8620_write(ctx, REG_INTR3, stat); } @@ -2074,7 +2028,6 @@ static irqreturn_t sii8620_irq_thread(int irq, void *data) { BIT_FAST_INTR_STAT_EDID, sii8620_irq_edid }, { BIT_FAST_INTR_STAT_DDC, sii8620_irq_ddc }, { BIT_FAST_INTR_STAT_SCDT, sii8620_irq_scdt }, - { BIT_FAST_INTR_STAT_INFR, sii8620_irq_infr }, }; struct sii8620 *ctx = data; u8 stats[LEN_FAST_INTR_STAT]; @@ -2112,7 +2065,6 @@ static void sii8620_cable_in(struct sii8620 *ctx) dev_err(dev, "Error powering on, %d.\n", ret); return; } - sii8620_hw_reset(ctx); sii8620_read_buf(ctx, REG_VND_IDL, ver, ARRAY_SIZE(ver)); ret = sii8620_clear_error(ctx); @@ -2268,17 +2220,43 @@ static void sii8620_detach(struct drm_bridge *bridge) rc_unregister_device(ctx->rc_dev); } +static int sii8620_is_packing_required(struct sii8620 *ctx, + const struct drm_display_mode *mode) +{ + int max_pclk, max_pclk_pp_mode; + + if (sii8620_is_mhl3(ctx)) { + max_pclk = MHL3_MAX_PCLK; + max_pclk_pp_mode = MHL3_MAX_PCLK_PP_MODE; + } else { + max_pclk = MHL1_MAX_PCLK; + max_pclk_pp_mode = MHL1_MAX_PCLK_PP_MODE; + } + + if (mode->clock < max_pclk) + return 0; + else if (mode->clock < max_pclk_pp_mode) + return 1; + else + return -1; +} + static enum drm_mode_status sii8620_mode_valid(struct drm_bridge *bridge, const struct drm_display_mode *mode) { struct sii8620 *ctx = bridge_to_sii8620(bridge); + int pack_required = sii8620_is_packing_required(ctx, mode); bool can_pack = ctx->devcap[MHL_DCAP_VID_LINK_MODE] & MHL_DCAP_VID_LINK_PPIXEL; - unsigned int max_pclk = sii8620_is_mhl3(ctx) ? MHL3_MAX_LCLK : - MHL1_MAX_LCLK; - max_pclk /= can_pack ? 2 : 3; - return (mode->clock > max_pclk) ? MODE_CLOCK_HIGH : MODE_OK; + switch (pack_required) { + case 0: + return MODE_OK; + case 1: + return (can_pack) ? MODE_OK : MODE_CLOCK_HIGH; + default: + return MODE_CLOCK_HIGH; + } } static bool sii8620_mode_fixup(struct drm_bridge *bridge, @@ -2286,43 +2264,14 @@ static bool sii8620_mode_fixup(struct drm_bridge *bridge, struct drm_display_mode *adjusted_mode) { struct sii8620 *ctx = bridge_to_sii8620(bridge); - int max_lclk; - bool ret = true; mutex_lock(&ctx->lock); - max_lclk = sii8620_is_mhl3(ctx) ? MHL3_MAX_LCLK : MHL1_MAX_LCLK; - if (max_lclk > 3 * adjusted_mode->clock) { - ctx->use_packed_pixel = 0; - goto end; - } - if ((ctx->devcap[MHL_DCAP_VID_LINK_MODE] & MHL_DCAP_VID_LINK_PPIXEL) && - max_lclk > 2 * adjusted_mode->clock) { - ctx->use_packed_pixel = 1; - goto end; - } - ret = false; -end: - if (ret) { - u8 vic = drm_match_cea_mode(adjusted_mode); + ctx->use_packed_pixel = sii8620_is_packing_required(ctx, adjusted_mode); - if (!vic) { - union hdmi_infoframe frm; - u8 mhl_vic[] = { 0, 95, 94, 93, 98 }; - - /* FIXME: We need the connector here */ - drm_hdmi_vendor_infoframe_from_display_mode( - &frm.vendor.hdmi, NULL, adjusted_mode); - vic = frm.vendor.hdmi.vic; - if (vic >= ARRAY_SIZE(mhl_vic)) - vic = 0; - vic = mhl_vic[vic]; - } - ctx->video_code = vic; - ctx->pixel_clock = adjusted_mode->clock; - } mutex_unlock(&ctx->lock); - return ret; + + return true; } static const struct drm_bridge_funcs sii8620_bridge_funcs = { diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index b553a6f2ff0e..7af748ed1c58 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -369,13 +369,6 @@ EXPORT_SYMBOL(drm_dev_exit); */ void drm_dev_unplug(struct drm_device *dev) { - drm_dev_unregister(dev); - - mutex_lock(&drm_global_mutex); - if (dev->open_count == 0) - drm_dev_put(dev); - mutex_unlock(&drm_global_mutex); - /* * After synchronizing any critical read section is guaranteed to see * the new value of ->unplugged, and any critical section which might @@ -384,6 +377,13 @@ void drm_dev_unplug(struct drm_device *dev) */ dev->unplugged = true; synchronize_srcu(&drm_unplug_srcu); + + drm_dev_unregister(dev); + + mutex_lock(&drm_global_mutex); + if (dev->open_count == 0) + drm_dev_put(dev); + mutex_unlock(&drm_global_mutex); } EXPORT_SYMBOL(drm_dev_unplug); diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c index 1f8031e30f53..cdb10f885a4f 100644 --- a/drivers/gpu/drm/drm_property.c +++ b/drivers/gpu/drm/drm_property.c @@ -532,7 +532,7 @@ static void drm_property_free_blob(struct kref *kref) drm_mode_object_unregister(blob->dev, &blob->base); - kfree(blob); + kvfree(blob); } /** @@ -559,7 +559,7 @@ drm_property_create_blob(struct drm_device *dev, size_t length, if (!length || length > ULONG_MAX - sizeof(struct drm_property_blob)) return ERR_PTR(-EINVAL); - blob = kzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL); + blob = kvzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL); if (!blob) return ERR_PTR(-ENOMEM); @@ -576,7 +576,7 @@ drm_property_create_blob(struct drm_device *dev, size_t length, ret = __drm_mode_object_add(dev, &blob->base, DRM_MODE_OBJECT_BLOB, true, drm_property_free_blob); if (ret) { - kfree(blob); + kvfree(blob); return ERR_PTR(-EINVAL); } diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c index e5013a999147..540b59fb4103 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c @@ -631,8 +631,11 @@ static struct platform_driver etnaviv_platform_driver = { }, }; +static struct platform_device *etnaviv_drm; + static int __init etnaviv_init(void) { + struct platform_device *pdev; int ret; struct device_node *np; @@ -644,7 +647,7 @@ static int __init etnaviv_init(void) ret = platform_driver_register(&etnaviv_platform_driver); if (ret != 0) - platform_driver_unregister(&etnaviv_gpu_driver); + goto unregister_gpu_driver; /* * If the DT contains at least one available GPU device, instantiate @@ -653,20 +656,33 @@ static int __init etnaviv_init(void) for_each_compatible_node(np, NULL, "vivante,gc") { if (!of_device_is_available(np)) continue; - - platform_device_register_simple("etnaviv", -1, NULL, 0); + pdev = platform_device_register_simple("etnaviv", -1, + NULL, 0); + if (IS_ERR(pdev)) { + ret = PTR_ERR(pdev); + of_node_put(np); + goto unregister_platform_driver; + } + etnaviv_drm = pdev; of_node_put(np); break; } + return 0; + +unregister_platform_driver: + platform_driver_unregister(&etnaviv_platform_driver); +unregister_gpu_driver: + platform_driver_unregister(&etnaviv_gpu_driver); return ret; } module_init(etnaviv_init); static void __exit etnaviv_exit(void) { - platform_driver_unregister(&etnaviv_gpu_driver); + platform_device_unregister(etnaviv_drm); platform_driver_unregister(&etnaviv_platform_driver); + platform_driver_unregister(&etnaviv_gpu_driver); } module_exit(etnaviv_exit); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h index dd430f0f8ff5..90f17ff7888e 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h @@ -131,6 +131,9 @@ struct etnaviv_gpu { struct work_struct sync_point_work; int sync_point_event; + /* hang detection */ + u32 hangcheck_dma_addr; + void __iomem *mmio; int irq; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c index a74eb57af15b..50d6b88cb7aa 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c @@ -10,6 +10,7 @@ #include "etnaviv_gem.h" #include "etnaviv_gpu.h" #include "etnaviv_sched.h" +#include "state.xml.h" static int etnaviv_job_hang_limit = 0; module_param_named(job_hang_limit, etnaviv_job_hang_limit, int , 0444); @@ -85,6 +86,29 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job) { struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job); struct etnaviv_gpu *gpu = submit->gpu; + u32 dma_addr; + int change; + + /* + * If the GPU managed to complete this jobs fence, the timout is + * spurious. Bail out. + */ + if (fence_completed(gpu, submit->out_fence->seqno)) + return; + + /* + * If the GPU is still making forward progress on the front-end (which + * should never loop) we shift out the timeout to give it a chance to + * finish the job. + */ + dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS); + change = dma_addr - gpu->hangcheck_dma_addr; + if (change < 0 || change > 16) { + gpu->hangcheck_dma_addr = dma_addr; + schedule_delayed_work(&sched_job->work_tdr, + sched_job->sched->timeout); + return; + } /* block scheduler */ kthread_park(gpu->sched.thread); diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c index 82c95c34447f..e868773ea509 100644 --- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c @@ -265,7 +265,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win, unsigned long val; val = readl(ctx->addr + DECON_WINCONx(win)); - val &= ~WINCONx_BPPMODE_MASK; + val &= WINCONx_ENWIN_F; switch (fb->format->format) { case DRM_FORMAT_XRGB1555: @@ -356,8 +356,8 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc, writel(val, ctx->addr + DECON_VIDOSDxB(win)); } - val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) | - VIDOSD_Wx_ALPHA_B_F(0x0); + val = VIDOSD_Wx_ALPHA_R_F(0xff) | VIDOSD_Wx_ALPHA_G_F(0xff) | + VIDOSD_Wx_ALPHA_B_F(0xff); writel(val, ctx->addr + DECON_VIDOSDxC(win)); val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) | diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index a81b4a5e24a7..ed3cc2989f93 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -420,7 +420,7 @@ static int exynos_drm_bind(struct device *dev) err_free_private: kfree(private); err_free_drm: - drm_dev_unref(drm); + drm_dev_put(drm); return ret; } @@ -444,7 +444,7 @@ static void exynos_drm_unbind(struct device *dev) drm->dev_private = NULL; dev_set_drvdata(dev, NULL); - drm_dev_unref(drm); + drm_dev_put(drm); } static const struct component_master_ops exynos_drm_ops = { diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c index 7fcc1a7ab1a0..27b7d34d776c 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fb.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c @@ -138,7 +138,7 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, err: while (i--) - drm_gem_object_unreference_unlocked(&exynos_gem[i]->base); + drm_gem_object_put_unlocked(&exynos_gem[i]->base); return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c index 6127ef25acd6..e8d0670bb5f8 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c @@ -470,17 +470,18 @@ static void fimc_src_set_transf(struct fimc_context *ctx, unsigned int rotation) static void fimc_set_window(struct fimc_context *ctx, struct exynos_drm_ipp_buffer *buf) { + unsigned int real_width = buf->buf.pitch[0] / buf->format->cpp[0]; u32 cfg, h1, h2, v1, v2; /* cropped image */ h1 = buf->rect.x; - h2 = buf->buf.width - buf->rect.w - buf->rect.x; + h2 = real_width - buf->rect.w - buf->rect.x; v1 = buf->rect.y; v2 = buf->buf.height - buf->rect.h - buf->rect.y; DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]hsize[%d]vsize[%d]\n", buf->rect.x, buf->rect.y, buf->rect.w, buf->rect.h, - buf->buf.width, buf->buf.height); + real_width, buf->buf.height); DRM_DEBUG_KMS("h1[%d]h2[%d]v1[%d]v2[%d]\n", h1, h2, v1, v2); /* @@ -503,12 +504,13 @@ static void fimc_set_window(struct fimc_context *ctx, static void fimc_src_set_size(struct fimc_context *ctx, struct exynos_drm_ipp_buffer *buf) { + unsigned int real_width = buf->buf.pitch[0] / buf->format->cpp[0]; u32 cfg; - DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", buf->buf.width, buf->buf.height); + DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", real_width, buf->buf.height); /* original size */ - cfg = (EXYNOS_ORGISIZE_HORIZONTAL(buf->buf.width) | + cfg = (EXYNOS_ORGISIZE_HORIZONTAL(real_width) | EXYNOS_ORGISIZE_VERTICAL(buf->buf.height)); fimc_write(ctx, cfg, EXYNOS_ORGISIZE); @@ -529,7 +531,7 @@ static void fimc_src_set_size(struct fimc_context *ctx, * for now, we support only ITU601 8 bit mode */ cfg = (EXYNOS_CISRCFMT_ITU601_8BIT | - EXYNOS_CISRCFMT_SOURCEHSIZE(buf->buf.width) | + EXYNOS_CISRCFMT_SOURCEHSIZE(real_width) | EXYNOS_CISRCFMT_SOURCEVSIZE(buf->buf.height)); fimc_write(ctx, cfg, EXYNOS_CISRCFMT); @@ -842,12 +844,13 @@ static void fimc_set_scaler(struct fimc_context *ctx, struct fimc_scaler *sc) static void fimc_dst_set_size(struct fimc_context *ctx, struct exynos_drm_ipp_buffer *buf) { + unsigned int real_width = buf->buf.pitch[0] / buf->format->cpp[0]; u32 cfg, cfg_ext; - DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", buf->buf.width, buf->buf.height); + DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", real_width, buf->buf.height); /* original size */ - cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(buf->buf.width) | + cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(real_width) | EXYNOS_ORGOSIZE_VERTICAL(buf->buf.height)); fimc_write(ctx, cfg, EXYNOS_ORGOSIZE); diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index 6e1494fa71b4..bdf5a7655228 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -143,7 +143,7 @@ static int exynos_drm_gem_handle_create(struct drm_gem_object *obj, DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle); /* drop reference from allocate - handle holds it now. */ - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return 0; } @@ -186,7 +186,7 @@ unsigned long exynos_drm_gem_get_size(struct drm_device *dev, exynos_gem = to_exynos_gem(obj); - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return exynos_gem->size; } @@ -329,13 +329,13 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev, return; } - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); /* * decrease obj->refcount one more time because we has already * increased it at exynos_drm_gem_get_dma_addr(). */ - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); } static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem, @@ -383,7 +383,7 @@ int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data, args->flags = exynos_gem->flags; args->size = exynos_gem->size; - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return 0; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c index 35ac66730563..7ba414b52faa 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c @@ -492,21 +492,25 @@ static void gsc_src_set_fmt(struct gsc_context *ctx, u32 fmt) GSC_IN_CHROMA_ORDER_CRCB); break; case DRM_FORMAT_NV21: + cfg |= (GSC_IN_CHROMA_ORDER_CRCB | GSC_IN_YUV420_2P); + break; case DRM_FORMAT_NV61: - cfg |= (GSC_IN_CHROMA_ORDER_CRCB | - GSC_IN_YUV420_2P); + cfg |= (GSC_IN_CHROMA_ORDER_CRCB | GSC_IN_YUV422_2P); break; case DRM_FORMAT_YUV422: cfg |= GSC_IN_YUV422_3P; break; case DRM_FORMAT_YUV420: + cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV420_3P); + break; case DRM_FORMAT_YVU420: - cfg |= GSC_IN_YUV420_3P; + cfg |= (GSC_IN_CHROMA_ORDER_CRCB | GSC_IN_YUV420_3P); break; case DRM_FORMAT_NV12: + cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV420_2P); + break; case DRM_FORMAT_NV16: - cfg |= (GSC_IN_CHROMA_ORDER_CBCR | - GSC_IN_YUV420_2P); + cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV422_2P); break; } @@ -523,30 +527,30 @@ static void gsc_src_set_transf(struct gsc_context *ctx, unsigned int rotation) switch (degree) { case DRM_MODE_ROTATE_0: - if (rotation & DRM_MODE_REFLECT_Y) - cfg |= GSC_IN_ROT_XFLIP; if (rotation & DRM_MODE_REFLECT_X) + cfg |= GSC_IN_ROT_XFLIP; + if (rotation & DRM_MODE_REFLECT_Y) cfg |= GSC_IN_ROT_YFLIP; break; case DRM_MODE_ROTATE_90: cfg |= GSC_IN_ROT_90; - if (rotation & DRM_MODE_REFLECT_Y) - cfg |= GSC_IN_ROT_XFLIP; if (rotation & DRM_MODE_REFLECT_X) + cfg |= GSC_IN_ROT_XFLIP; + if (rotation & DRM_MODE_REFLECT_Y) cfg |= GSC_IN_ROT_YFLIP; break; case DRM_MODE_ROTATE_180: cfg |= GSC_IN_ROT_180; - if (rotation & DRM_MODE_REFLECT_Y) - cfg &= ~GSC_IN_ROT_XFLIP; if (rotation & DRM_MODE_REFLECT_X) + cfg &= ~GSC_IN_ROT_XFLIP; + if (rotation & DRM_MODE_REFLECT_Y) cfg &= ~GSC_IN_ROT_YFLIP; break; case DRM_MODE_ROTATE_270: cfg |= GSC_IN_ROT_270; - if (rotation & DRM_MODE_REFLECT_Y) - cfg &= ~GSC_IN_ROT_XFLIP; if (rotation & DRM_MODE_REFLECT_X) + cfg &= ~GSC_IN_ROT_XFLIP; + if (rotation & DRM_MODE_REFLECT_Y) cfg &= ~GSC_IN_ROT_YFLIP; break; } @@ -577,7 +581,7 @@ static void gsc_src_set_size(struct gsc_context *ctx, cfg &= ~(GSC_SRCIMG_HEIGHT_MASK | GSC_SRCIMG_WIDTH_MASK); - cfg |= (GSC_SRCIMG_WIDTH(buf->buf.width) | + cfg |= (GSC_SRCIMG_WIDTH(buf->buf.pitch[0] / buf->format->cpp[0]) | GSC_SRCIMG_HEIGHT(buf->buf.height)); gsc_write(cfg, GSC_SRCIMG_SIZE); @@ -672,18 +676,25 @@ static void gsc_dst_set_fmt(struct gsc_context *ctx, u32 fmt) GSC_OUT_CHROMA_ORDER_CRCB); break; case DRM_FORMAT_NV21: - case DRM_FORMAT_NV61: cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV420_2P); break; + case DRM_FORMAT_NV61: + cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV422_2P); + break; case DRM_FORMAT_YUV422: + cfg |= GSC_OUT_YUV422_3P; + break; case DRM_FORMAT_YUV420: + cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV420_3P); + break; case DRM_FORMAT_YVU420: - cfg |= GSC_OUT_YUV420_3P; + cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV420_3P); break; case DRM_FORMAT_NV12: + cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV420_2P); + break; case DRM_FORMAT_NV16: - cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | - GSC_OUT_YUV420_2P); + cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV422_2P); break; } @@ -868,7 +879,7 @@ static void gsc_dst_set_size(struct gsc_context *ctx, /* original size */ cfg = gsc_read(GSC_DSTIMG_SIZE); cfg &= ~(GSC_DSTIMG_HEIGHT_MASK | GSC_DSTIMG_WIDTH_MASK); - cfg |= GSC_DSTIMG_WIDTH(buf->buf.width) | + cfg |= GSC_DSTIMG_WIDTH(buf->buf.pitch[0] / buf->format->cpp[0]) | GSC_DSTIMG_HEIGHT(buf->buf.height); gsc_write(cfg, GSC_DSTIMG_SIZE); @@ -1341,7 +1352,7 @@ static const struct drm_exynos_ipp_limit gsc_5420_limits[] = { }; static const struct drm_exynos_ipp_limit gsc_5433_limits[] = { - { IPP_SIZE_LIMIT(BUFFER, .h = { 32, 8191, 2 }, .v = { 16, 8191, 2 }) }, + { IPP_SIZE_LIMIT(BUFFER, .h = { 32, 8191, 16 }, .v = { 16, 8191, 2 }) }, { IPP_SIZE_LIMIT(AREA, .h = { 16, 4800, 1 }, .v = { 8, 3344, 1 }) }, { IPP_SIZE_LIMIT(ROTATED, .h = { 32, 2047 }, .v = { 8, 8191 }) }, { IPP_SCALE_LIMIT(.h = { (1 << 16) / 16, (1 << 16) * 8 }, diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c index 26374e58c557..b435db8fc916 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c +++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c @@ -345,27 +345,6 @@ static int exynos_drm_ipp_task_setup_buffer(struct exynos_drm_ipp_buffer *buf, int ret = 0; int i; - /* basic checks */ - if (buf->buf.width == 0 || buf->buf.height == 0) - return -EINVAL; - buf->format = drm_format_info(buf->buf.fourcc); - for (i = 0; i < buf->format->num_planes; i++) { - unsigned int width = (i == 0) ? buf->buf.width : - DIV_ROUND_UP(buf->buf.width, buf->format->hsub); - - if (buf->buf.pitch[i] == 0) - buf->buf.pitch[i] = width * buf->format->cpp[i]; - if (buf->buf.pitch[i] < width * buf->format->cpp[i]) - return -EINVAL; - if (!buf->buf.gem_id[i]) - return -ENOENT; - } - - /* pitch for additional planes must match */ - if (buf->format->num_planes > 2 && - buf->buf.pitch[1] != buf->buf.pitch[2]) - return -EINVAL; - /* get GEM buffers and check their size */ for (i = 0; i < buf->format->num_planes; i++) { unsigned int height = (i == 0) ? buf->buf.height : @@ -428,7 +407,7 @@ enum drm_ipp_size_id { IPP_LIMIT_BUFFER, IPP_LIMIT_AREA, IPP_LIMIT_ROTATED, IPP_LIMIT_MAX }; -static const enum drm_ipp_size_id limit_id_fallback[IPP_LIMIT_MAX][4] = { +static const enum drm_exynos_ipp_limit_type limit_id_fallback[IPP_LIMIT_MAX][4] = { [IPP_LIMIT_BUFFER] = { DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER }, [IPP_LIMIT_AREA] = { DRM_EXYNOS_IPP_LIMIT_SIZE_AREA, DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER }, @@ -495,12 +474,13 @@ static int exynos_drm_ipp_check_size_limits(struct exynos_drm_ipp_buffer *buf, enum drm_ipp_size_id id = rotate ? IPP_LIMIT_ROTATED : IPP_LIMIT_AREA; struct drm_ipp_limit l; struct drm_exynos_ipp_limit_val *lh = &l.h, *lv = &l.v; + int real_width = buf->buf.pitch[0] / buf->format->cpp[0]; if (!limits) return 0; __get_size_limit(limits, num_limits, IPP_LIMIT_BUFFER, &l); - if (!__size_limit_check(buf->buf.width, &l.h) || + if (!__size_limit_check(real_width, &l.h) || !__size_limit_check(buf->buf.height, &l.v)) return -EINVAL; @@ -560,10 +540,62 @@ static int exynos_drm_ipp_check_scale_limits( return 0; } +static int exynos_drm_ipp_check_format(struct exynos_drm_ipp_task *task, + struct exynos_drm_ipp_buffer *buf, + struct exynos_drm_ipp_buffer *src, + struct exynos_drm_ipp_buffer *dst, + bool rotate, bool swap) +{ + const struct exynos_drm_ipp_formats *fmt; + int ret, i; + + fmt = __ipp_format_get(task->ipp, buf->buf.fourcc, buf->buf.modifier, + buf == src ? DRM_EXYNOS_IPP_FORMAT_SOURCE : + DRM_EXYNOS_IPP_FORMAT_DESTINATION); + if (!fmt) { + DRM_DEBUG_DRIVER("Task %pK: %s format not supported\n", task, + buf == src ? "src" : "dst"); + return -EINVAL; + } + + /* basic checks */ + if (buf->buf.width == 0 || buf->buf.height == 0) + return -EINVAL; + + buf->format = drm_format_info(buf->buf.fourcc); + for (i = 0; i < buf->format->num_planes; i++) { + unsigned int width = (i == 0) ? buf->buf.width : + DIV_ROUND_UP(buf->buf.width, buf->format->hsub); + + if (buf->buf.pitch[i] == 0) + buf->buf.pitch[i] = width * buf->format->cpp[i]; + if (buf->buf.pitch[i] < width * buf->format->cpp[i]) + return -EINVAL; + if (!buf->buf.gem_id[i]) + return -ENOENT; + } + + /* pitch for additional planes must match */ + if (buf->format->num_planes > 2 && + buf->buf.pitch[1] != buf->buf.pitch[2]) + return -EINVAL; + + /* check driver limits */ + ret = exynos_drm_ipp_check_size_limits(buf, fmt->limits, + fmt->num_limits, + rotate, + buf == dst ? swap : false); + if (ret) + return ret; + ret = exynos_drm_ipp_check_scale_limits(&src->rect, &dst->rect, + fmt->limits, + fmt->num_limits, swap); + return ret; +} + static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task) { struct exynos_drm_ipp *ipp = task->ipp; - const struct exynos_drm_ipp_formats *src_fmt, *dst_fmt; struct exynos_drm_ipp_buffer *src = &task->src, *dst = &task->dst; unsigned int rotation = task->transform.rotation; int ret = 0; @@ -607,37 +639,11 @@ static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task) return -EINVAL; } - src_fmt = __ipp_format_get(ipp, src->buf.fourcc, src->buf.modifier, - DRM_EXYNOS_IPP_FORMAT_SOURCE); - if (!src_fmt) { - DRM_DEBUG_DRIVER("Task %pK: src format not supported\n", task); - return -EINVAL; - } - ret = exynos_drm_ipp_check_size_limits(src, src_fmt->limits, - src_fmt->num_limits, - rotate, false); - if (ret) - return ret; - ret = exynos_drm_ipp_check_scale_limits(&src->rect, &dst->rect, - src_fmt->limits, - src_fmt->num_limits, swap); + ret = exynos_drm_ipp_check_format(task, src, src, dst, rotate, swap); if (ret) return ret; - dst_fmt = __ipp_format_get(ipp, dst->buf.fourcc, dst->buf.modifier, - DRM_EXYNOS_IPP_FORMAT_DESTINATION); - if (!dst_fmt) { - DRM_DEBUG_DRIVER("Task %pK: dst format not supported\n", task); - return -EINVAL; - } - ret = exynos_drm_ipp_check_size_limits(dst, dst_fmt->limits, - dst_fmt->num_limits, - false, swap); - if (ret) - return ret; - ret = exynos_drm_ipp_check_scale_limits(&src->rect, &dst->rect, - dst_fmt->limits, - dst_fmt->num_limits, swap); + ret = exynos_drm_ipp_check_format(task, dst, src, dst, false, swap); if (ret) return ret; diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c index 38a2a7f1204b..7098c6d35266 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_plane.c +++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c @@ -132,7 +132,7 @@ static void exynos_drm_plane_reset(struct drm_plane *plane) if (plane->state) { exynos_state = to_exynos_plane_state(plane->state); if (exynos_state->base.fb) - drm_framebuffer_unreference(exynos_state->base.fb); + drm_framebuffer_put(exynos_state->base.fb); kfree(exynos_state); plane->state = NULL; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c index 1a76dd3d52e1..a820a68429b9 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c +++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c @@ -168,9 +168,9 @@ static void rotator_dst_set_transf(struct rot_context *rot, val &= ~ROT_CONTROL_FLIP_MASK; if (rotation & DRM_MODE_REFLECT_X) - val |= ROT_CONTROL_FLIP_HORIZONTAL; - if (rotation & DRM_MODE_REFLECT_Y) val |= ROT_CONTROL_FLIP_VERTICAL; + if (rotation & DRM_MODE_REFLECT_Y) + val |= ROT_CONTROL_FLIP_HORIZONTAL; val &= ~ROT_CONTROL_ROT_MASK; diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c index 91d4382343d0..0ddb6eec7b11 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c +++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c @@ -30,6 +30,7 @@ #define scaler_write(cfg, offset) writel(cfg, scaler->regs + (offset)) #define SCALER_MAX_CLK 4 #define SCALER_AUTOSUSPEND_DELAY 2000 +#define SCALER_RESET_WAIT_RETRIES 100 struct scaler_data { const char *clk_name[SCALER_MAX_CLK]; @@ -51,9 +52,9 @@ struct scaler_context { static u32 scaler_get_format(u32 drm_fmt) { switch (drm_fmt) { - case DRM_FORMAT_NV21: - return SCALER_YUV420_2P_UV; case DRM_FORMAT_NV12: + return SCALER_YUV420_2P_UV; + case DRM_FORMAT_NV21: return SCALER_YUV420_2P_VU; case DRM_FORMAT_YUV420: return SCALER_YUV420_3P; @@ -63,15 +64,15 @@ static u32 scaler_get_format(u32 drm_fmt) return SCALER_YUV422_1P_UYVY; case DRM_FORMAT_YVYU: return SCALER_YUV422_1P_YVYU; - case DRM_FORMAT_NV61: - return SCALER_YUV422_2P_UV; case DRM_FORMAT_NV16: + return SCALER_YUV422_2P_UV; + case DRM_FORMAT_NV61: return SCALER_YUV422_2P_VU; case DRM_FORMAT_YUV422: return SCALER_YUV422_3P; - case DRM_FORMAT_NV42: - return SCALER_YUV444_2P_UV; case DRM_FORMAT_NV24: + return SCALER_YUV444_2P_UV; + case DRM_FORMAT_NV42: return SCALER_YUV444_2P_VU; case DRM_FORMAT_YUV444: return SCALER_YUV444_3P; @@ -100,6 +101,23 @@ static u32 scaler_get_format(u32 drm_fmt) return 0; } +static inline int scaler_reset(struct scaler_context *scaler) +{ + int retry = SCALER_RESET_WAIT_RETRIES; + + scaler_write(SCALER_CFG_SOFT_RESET, SCALER_CFG); + do { + cpu_relax(); + } while (retry > 1 && + scaler_read(SCALER_CFG) & SCALER_CFG_SOFT_RESET); + do { + cpu_relax(); + scaler_write(1, SCALER_INT_EN); + } while (retry > 0 && scaler_read(SCALER_INT_EN) != 1); + + return retry ? 0 : -EIO; +} + static inline void scaler_enable_int(struct scaler_context *scaler) { u32 val; @@ -354,9 +372,13 @@ static int scaler_commit(struct exynos_drm_ipp *ipp, u32 dst_fmt = scaler_get_format(task->dst.buf.fourcc); struct drm_exynos_ipp_task_rect *dst_pos = &task->dst.rect; - scaler->task = task; - pm_runtime_get_sync(scaler->dev); + if (scaler_reset(scaler)) { + pm_runtime_put(scaler->dev); + return -EIO; + } + + scaler->task = task; scaler_set_src_fmt(scaler, src_fmt); scaler_set_src_base(scaler, &task->src); @@ -394,7 +416,11 @@ static inline void scaler_disable_int(struct scaler_context *scaler) static inline u32 scaler_get_int_status(struct scaler_context *scaler) { - return scaler_read(SCALER_INT_STATUS); + u32 val = scaler_read(SCALER_INT_STATUS); + + scaler_write(val, SCALER_INT_STATUS); + + return val; } static inline int scaler_task_done(u32 val) diff --git a/drivers/gpu/drm/exynos/regs-gsc.h b/drivers/gpu/drm/exynos/regs-gsc.h index 4704a993cbb7..16b39734115c 100644 --- a/drivers/gpu/drm/exynos/regs-gsc.h +++ b/drivers/gpu/drm/exynos/regs-gsc.h @@ -138,6 +138,7 @@ #define GSC_OUT_YUV420_3P (3 << 4) #define GSC_OUT_YUV422_1P (4 << 4) #define GSC_OUT_YUV422_2P (5 << 4) +#define GSC_OUT_YUV422_3P (6 << 4) #define GSC_OUT_YUV444 (7 << 4) #define GSC_OUT_TILE_TYPE_MASK (1 << 2) #define GSC_OUT_TILE_C_16x8 (0 << 2) diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index 6d8180e8d1e2..4b072ade8c38 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c @@ -196,7 +196,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | TRANS_DDI_PORT_MASK); vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= - (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | + (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI | (PORT_B << TRANS_DDI_PORT_SHIFT) | TRANS_DDI_FUNC_ENABLE); if (IS_BROADWELL(dev_priv)) { @@ -216,7 +216,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | TRANS_DDI_PORT_MASK); vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= - (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | + (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI | (PORT_C << TRANS_DDI_PORT_SHIFT) | TRANS_DDI_FUNC_ENABLE); if (IS_BROADWELL(dev_priv)) { @@ -236,7 +236,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | TRANS_DDI_PORT_MASK); vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= - (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | + (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI | (PORT_D << TRANS_DDI_PORT_SHIFT) | TRANS_DDI_FUNC_ENABLE); if (IS_BROADWELL(dev_priv)) { diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 23296547da95..4efec8fa6c1d 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -1592,6 +1592,7 @@ static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu) vgpu_free_mm(mm); return ERR_PTR(-ENOMEM); } + mm->ggtt_mm.last_partial_off = -1UL; return mm; } @@ -1616,6 +1617,7 @@ void _intel_vgpu_mm_release(struct kref *mm_ref) invalidate_ppgtt_mm(mm); } else { vfree(mm->ggtt_mm.virtual_ggtt); + mm->ggtt_mm.last_partial_off = -1UL; } vgpu_free_mm(mm); @@ -1868,6 +1870,62 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data, bytes); + /* If ggtt entry size is 8 bytes, and it's split into two 4 bytes + * write, we assume the two 4 bytes writes are consecutive. + * Otherwise, we abort and report error + */ + if (bytes < info->gtt_entry_size) { + if (ggtt_mm->ggtt_mm.last_partial_off == -1UL) { + /* the first partial part*/ + ggtt_mm->ggtt_mm.last_partial_off = off; + ggtt_mm->ggtt_mm.last_partial_data = e.val64; + return 0; + } else if ((g_gtt_index == + (ggtt_mm->ggtt_mm.last_partial_off >> + info->gtt_entry_size_shift)) && + (off != ggtt_mm->ggtt_mm.last_partial_off)) { + /* the second partial part */ + + int last_off = ggtt_mm->ggtt_mm.last_partial_off & + (info->gtt_entry_size - 1); + + memcpy((void *)&e.val64 + last_off, + (void *)&ggtt_mm->ggtt_mm.last_partial_data + + last_off, bytes); + + ggtt_mm->ggtt_mm.last_partial_off = -1UL; + } else { + int last_offset; + + gvt_vgpu_err("failed to populate guest ggtt entry: abnormal ggtt entry write sequence, last_partial_off=%lx, offset=%x, bytes=%d, ggtt entry size=%d\n", + ggtt_mm->ggtt_mm.last_partial_off, off, + bytes, info->gtt_entry_size); + + /* set host ggtt entry to scratch page and clear + * virtual ggtt entry as not present for last + * partially write offset + */ + last_offset = ggtt_mm->ggtt_mm.last_partial_off & + (~(info->gtt_entry_size - 1)); + + ggtt_get_host_entry(ggtt_mm, &m, last_offset); + ggtt_invalidate_pte(vgpu, &m); + ops->set_pfn(&m, gvt->gtt.scratch_mfn); + ops->clear_present(&m); + ggtt_set_host_entry(ggtt_mm, &m, last_offset); + ggtt_invalidate(gvt->dev_priv); + + ggtt_get_guest_entry(ggtt_mm, &e, last_offset); + ops->clear_present(&e); + ggtt_set_guest_entry(ggtt_mm, &e, last_offset); + + ggtt_mm->ggtt_mm.last_partial_off = off; + ggtt_mm->ggtt_mm.last_partial_data = e.val64; + + return 0; + } + } + if (ops->test_present(&e)) { gfn = ops->get_pfn(&e); m = e; diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h index 3792f2b7f4ff..97e62647418a 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.h +++ b/drivers/gpu/drm/i915/gvt/gtt.h @@ -150,6 +150,8 @@ struct intel_vgpu_mm { } ppgtt_mm; struct { void *virtual_ggtt; + unsigned long last_partial_off; + u64 last_partial_data; } ggtt_mm; }; }; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 34c125e2d90c..52f3b91d14fd 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -340,14 +340,21 @@ struct drm_i915_file_private { unsigned int bsd_engine; -/* Client can have a maximum of 3 contexts banned before - * it is denied of creating new contexts. As one context - * ban needs 4 consecutive hangs, and more if there is - * progress in between, this is a last resort stop gap measure - * to limit the badly behaving clients access to gpu. +/* + * Every context ban increments per client ban score. Also + * hangs in short succession increments ban score. If ban threshold + * is reached, client is considered banned and submitting more work + * will fail. This is a stop gap measure to limit the badly behaving + * clients access to gpu. Note that unbannable contexts never increment + * the client ban score. */ -#define I915_MAX_CLIENT_CONTEXT_BANS 3 - atomic_t context_bans; +#define I915_CLIENT_SCORE_HANG_FAST 1 +#define I915_CLIENT_FAST_HANG_JIFFIES (60 * HZ) +#define I915_CLIENT_SCORE_CONTEXT_BAN 3 +#define I915_CLIENT_SCORE_BANNED 9 + /** ban_score: Accumulated score of all ctx bans and fast hangs. */ + atomic_t ban_score; + unsigned long hang_timestamp; }; /* Interface history: @@ -2238,9 +2245,6 @@ static inline struct scatterlist *____sg_next(struct scatterlist *sg) **/ static inline struct scatterlist *__sg_next(struct scatterlist *sg) { -#ifdef CONFIG_DEBUG_SG - BUG_ON(sg->sg_magic != SG_MAGIC); -#endif return sg_is_last(sg) ? NULL : ____sg_next(sg); } diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 3704f4c0c2c9..17c5097721e8 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2002,7 +2002,6 @@ int i915_gem_fault(struct vm_fault *vmf) bool write = !!(vmf->flags & FAULT_FLAG_WRITE); struct i915_vma *vma; pgoff_t page_offset; - unsigned int flags; int ret; /* We don't use vmf->pgoff since that has the fake offset */ @@ -2038,27 +2037,34 @@ int i915_gem_fault(struct vm_fault *vmf) goto err_unlock; } - /* If the object is smaller than a couple of partial vma, it is - * not worth only creating a single partial vma - we may as well - * clear enough space for the full object. - */ - flags = PIN_MAPPABLE; - if (obj->base.size > 2 * MIN_CHUNK_PAGES << PAGE_SHIFT) - flags |= PIN_NONBLOCK | PIN_NONFAULT; /* Now pin it into the GTT as needed */ - vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags); + vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, + PIN_MAPPABLE | + PIN_NONBLOCK | + PIN_NONFAULT); if (IS_ERR(vma)) { /* Use a partial view if it is bigger than available space */ struct i915_ggtt_view view = compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES); + unsigned int flags; - /* Userspace is now writing through an untracked VMA, abandon + flags = PIN_MAPPABLE; + if (view.type == I915_GGTT_VIEW_NORMAL) + flags |= PIN_NONBLOCK; /* avoid warnings for pinned */ + + /* + * Userspace is now writing through an untracked VMA, abandon * all hope that the hardware is able to track future writes. */ obj->frontbuffer_ggtt_origin = ORIGIN_CPU; - vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE); + vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags); + if (IS_ERR(vma) && !view.type) { + flags = PIN_MAPPABLE; + view.type = I915_GGTT_VIEW_PARTIAL; + vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags); + } } if (IS_ERR(vma)) { ret = PTR_ERR(vma); @@ -2933,32 +2939,54 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj, return 0; } +static void i915_gem_client_mark_guilty(struct drm_i915_file_private *file_priv, + const struct i915_gem_context *ctx) +{ + unsigned int score; + unsigned long prev_hang; + + if (i915_gem_context_is_banned(ctx)) + score = I915_CLIENT_SCORE_CONTEXT_BAN; + else + score = 0; + + prev_hang = xchg(&file_priv->hang_timestamp, jiffies); + if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES)) + score += I915_CLIENT_SCORE_HANG_FAST; + + if (score) { + atomic_add(score, &file_priv->ban_score); + + DRM_DEBUG_DRIVER("client %s: gained %u ban score, now %u\n", + ctx->name, score, + atomic_read(&file_priv->ban_score)); + } +} + static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx) { - bool banned; + unsigned int score; + bool banned, bannable; atomic_inc(&ctx->guilty_count); - banned = false; - if (i915_gem_context_is_bannable(ctx)) { - unsigned int score; + bannable = i915_gem_context_is_bannable(ctx); + score = atomic_add_return(CONTEXT_SCORE_GUILTY, &ctx->ban_score); + banned = score >= CONTEXT_SCORE_BAN_THRESHOLD; - score = atomic_add_return(CONTEXT_SCORE_GUILTY, - &ctx->ban_score); - banned = score >= CONTEXT_SCORE_BAN_THRESHOLD; + DRM_DEBUG_DRIVER("context %s: guilty %d, score %u, ban %s\n", + ctx->name, atomic_read(&ctx->guilty_count), + score, yesno(banned && bannable)); - DRM_DEBUG_DRIVER("context %s marked guilty (score %d) banned? %s\n", - ctx->name, score, yesno(banned)); - } - if (!banned) + /* Cool contexts don't accumulate client ban score */ + if (!bannable) return; - i915_gem_context_set_banned(ctx); - if (!IS_ERR_OR_NULL(ctx->file_priv)) { - atomic_inc(&ctx->file_priv->context_bans); - DRM_DEBUG_DRIVER("client %s has had %d context banned\n", - ctx->name, atomic_read(&ctx->file_priv->context_bans)); - } + if (banned) + i915_gem_context_set_banned(ctx); + + if (!IS_ERR_OR_NULL(ctx->file_priv)) + i915_gem_client_mark_guilty(ctx->file_priv, ctx); } static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx) @@ -5736,6 +5764,7 @@ int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file) INIT_LIST_HEAD(&file_priv->mm.request_list); file_priv->bsd_engine = -1; + file_priv->hang_timestamp = jiffies; ret = i915_gem_context_open(i915, file); if (ret) diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 33f8a4b3c981..060335d3d9e0 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -652,7 +652,7 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv) static bool client_is_banned(struct drm_i915_file_private *file_priv) { - return atomic_read(&file_priv->context_bans) > I915_MAX_CLIENT_CONTEXT_BANS; + return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED; } int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index f627a8c47c58..22df17c8ca9b 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -489,7 +489,9 @@ eb_validate_vma(struct i915_execbuffer *eb, } static int -eb_add_vma(struct i915_execbuffer *eb, unsigned int i, struct i915_vma *vma) +eb_add_vma(struct i915_execbuffer *eb, + unsigned int i, unsigned batch_idx, + struct i915_vma *vma) { struct drm_i915_gem_exec_object2 *entry = &eb->exec[i]; int err; @@ -522,6 +524,24 @@ eb_add_vma(struct i915_execbuffer *eb, unsigned int i, struct i915_vma *vma) eb->flags[i] = entry->flags; vma->exec_flags = &eb->flags[i]; + /* + * SNA is doing fancy tricks with compressing batch buffers, which leads + * to negative relocation deltas. Usually that works out ok since the + * relocate address is still positive, except when the batch is placed + * very low in the GTT. Ensure this doesn't happen. + * + * Note that actual hangs have only been observed on gen7, but for + * paranoia do it everywhere. + */ + if (i == batch_idx) { + if (!(eb->flags[i] & EXEC_OBJECT_PINNED)) + eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS; + if (eb->reloc_cache.has_fence) + eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE; + + eb->batch = vma; + } + err = 0; if (eb_pin_vma(eb, entry, vma)) { if (entry->offset != vma->node.start) { @@ -716,7 +736,7 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb) { struct radix_tree_root *handles_vma = &eb->ctx->handles_vma; struct drm_i915_gem_object *obj; - unsigned int i; + unsigned int i, batch; int err; if (unlikely(i915_gem_context_is_closed(eb->ctx))) @@ -728,6 +748,8 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb) INIT_LIST_HEAD(&eb->relocs); INIT_LIST_HEAD(&eb->unbound); + batch = eb_batch_index(eb); + for (i = 0; i < eb->buffer_count; i++) { u32 handle = eb->exec[i].handle; struct i915_lut_handle *lut; @@ -770,33 +792,16 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb) lut->handle = handle; add_vma: - err = eb_add_vma(eb, i, vma); + err = eb_add_vma(eb, i, batch, vma); if (unlikely(err)) goto err_vma; GEM_BUG_ON(vma != eb->vma[i]); GEM_BUG_ON(vma->exec_flags != &eb->flags[i]); + GEM_BUG_ON(drm_mm_node_allocated(&vma->node) && + eb_vma_misplaced(&eb->exec[i], vma, eb->flags[i])); } - /* take note of the batch buffer before we might reorder the lists */ - i = eb_batch_index(eb); - eb->batch = eb->vma[i]; - GEM_BUG_ON(eb->batch->exec_flags != &eb->flags[i]); - - /* - * SNA is doing fancy tricks with compressing batch buffers, which leads - * to negative relocation deltas. Usually that works out ok since the - * relocate address is still positive, except when the batch is placed - * very low in the GTT. Ensure this doesn't happen. - * - * Note that actual hangs have only been observed on gen7, but for - * paranoia do it everywhere. - */ - if (!(eb->flags[i] & EXEC_OBJECT_PINNED)) - eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS; - if (eb->reloc_cache.has_fence) - eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE; - eb->args->flags |= __EXEC_VALIDATED; return eb_reserve(eb); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index f9bc3aaa90d0..4a02747ac658 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1893,9 +1893,17 @@ static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv, /* * Clear the PIPE*STAT regs before the IIR + * + * Toggle the enable bits to make sure we get an + * edge in the ISR pipe event bit if we don't clear + * all the enabled status bits. Otherwise the edge + * triggered IIR on i965/g4x wouldn't notice that + * an interrupt is still pending. */ - if (pipe_stats[pipe]) - I915_WRITE(reg, enable_mask | pipe_stats[pipe]); + if (pipe_stats[pipe]) { + I915_WRITE(reg, pipe_stats[pipe]); + I915_WRITE(reg, enable_mask); + } } spin_unlock(&dev_priv->irq_lock); } diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index f11bb213ec07..7720569f2024 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -2425,12 +2425,17 @@ enum i915_power_well_id { #define _3D_CHICKEN _MMIO(0x2084) #define _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB (1 << 10) #define _3D_CHICKEN2 _MMIO(0x208c) + +#define FF_SLICE_CHICKEN _MMIO(0x2088) +#define FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX (1 << 1) + /* Disables pipelining of read flushes past the SF-WIZ interface. * Required on all Ironlake steppings according to the B-Spec, but the * particular danger of not doing so is not specified. */ # define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14) #define _3D_CHICKEN3 _MMIO(0x2090) +#define _3D_CHICKEN_SF_PROVOKING_VERTEX_FIX (1 << 12) #define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10) #define _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE (1 << 5) #define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5) diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 9324d476e0a7..0531c01c3604 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -109,7 +109,7 @@ vma_create(struct drm_i915_gem_object *obj, obj->base.size >> PAGE_SHIFT)); vma->size = view->partial.size; vma->size <<= PAGE_SHIFT; - GEM_BUG_ON(vma->size >= obj->base.size); + GEM_BUG_ON(vma->size > obj->base.size); } else if (view->type == I915_GGTT_VIEW_ROTATED) { vma->size = intel_rotation_info_size(&view->rotated); vma->size <<= PAGE_SHIFT; diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index de0e22322c76..072b326d5ee0 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -304,6 +304,9 @@ intel_crt_mode_valid(struct drm_connector *connector, int max_dotclk = dev_priv->max_dotclk_freq; int max_clock; + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) + return MODE_NO_DBLESCAN; + if (mode->clock < 25000) return MODE_CLOCK_LOW; @@ -337,6 +340,12 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) { + struct drm_display_mode *adjusted_mode = + &pipe_config->base.adjusted_mode; + + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) + return false; + return true; } @@ -344,6 +353,12 @@ static bool pch_crt_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) { + struct drm_display_mode *adjusted_mode = + &pipe_config->base.adjusted_mode; + + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) + return false; + pipe_config->has_pch_encoder = true; return true; @@ -354,6 +369,11 @@ static bool hsw_crt_compute_config(struct intel_encoder *encoder, struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct drm_display_mode *adjusted_mode = + &pipe_config->base.adjusted_mode; + + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) + return false; pipe_config->has_pch_encoder = true; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index dee3a8e659f1..2cc6faa1daa8 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -14469,12 +14469,22 @@ static enum drm_mode_status intel_mode_valid(struct drm_device *dev, const struct drm_display_mode *mode) { + /* + * Can't reject DBLSCAN here because Xorg ddxen can add piles + * of DBLSCAN modes to the output's mode list when they detect + * the scaling mode property on the connector. And they don't + * ask the kernel to validate those modes in any way until + * modeset time at which point the client gets a protocol error. + * So in order to not upset those clients we silently ignore the + * DBLSCAN flag on such connectors. For other connectors we will + * reject modes with the DBLSCAN flag in encoder->compute_config(). + * And we always reject DBLSCAN modes in connector->mode_valid() + * as we never want such modes on the connector's mode list. + */ + if (mode->vscan > 1) return MODE_NO_VSCAN; - if (mode->flags & DRM_MODE_FLAG_DBLSCAN) - return MODE_NO_DBLESCAN; - if (mode->flags & DRM_MODE_FLAG_HSKEW) return MODE_H_ILLEGAL; diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 8320f0e8e3be..16faea30114a 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -420,6 +420,9 @@ intel_dp_mode_valid(struct drm_connector *connector, int max_rate, mode_rate, max_lanes, max_link_clock; int max_dotclk; + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) + return MODE_NO_DBLESCAN; + max_dotclk = intel_dp_downstream_max_dotclock(intel_dp); if (intel_dp_is_edp(intel_dp) && fixed_mode) { @@ -1862,7 +1865,10 @@ intel_dp_compute_config(struct intel_encoder *encoder, conn_state->scaling_mode); } - if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) + return false; + + if (HAS_GMCH_DISPLAY(dev_priv) && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) return false; @@ -2782,16 +2788,6 @@ static void intel_disable_dp(struct intel_encoder *encoder, static void g4x_disable_dp(struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) -{ - intel_disable_dp(encoder, old_crtc_state, old_conn_state); - - /* disable the port before the pipe on g4x */ - intel_dp_link_down(encoder, old_crtc_state); -} - -static void ilk_disable_dp(struct intel_encoder *encoder, - const struct intel_crtc_state *old_crtc_state, - const struct drm_connector_state *old_conn_state) { intel_disable_dp(encoder, old_crtc_state, old_conn_state); } @@ -2807,13 +2803,19 @@ static void vlv_disable_dp(struct intel_encoder *encoder, intel_disable_dp(encoder, old_crtc_state, old_conn_state); } -static void ilk_post_disable_dp(struct intel_encoder *encoder, +static void g4x_post_disable_dp(struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); enum port port = encoder->port; + /* + * Bspec does not list a specific disable sequence for g4x DP. + * Follow the ilk+ sequence (disable pipe before the port) for + * g4x DP as it does not suffer from underruns like the normal + * g4x modeset sequence (disable pipe after the port). + */ intel_dp_link_down(encoder, old_crtc_state); /* Only ilk+ has port A */ @@ -6337,7 +6339,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); - if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) + if (!HAS_GMCH_DISPLAY(dev_priv)) connector->interlace_allowed = true; connector->doublescan_allowed = 0; @@ -6436,15 +6438,11 @@ bool intel_dp_init(struct drm_i915_private *dev_priv, intel_encoder->enable = vlv_enable_dp; intel_encoder->disable = vlv_disable_dp; intel_encoder->post_disable = vlv_post_disable_dp; - } else if (INTEL_GEN(dev_priv) >= 5) { - intel_encoder->pre_enable = g4x_pre_enable_dp; - intel_encoder->enable = g4x_enable_dp; - intel_encoder->disable = ilk_disable_dp; - intel_encoder->post_disable = ilk_post_disable_dp; } else { intel_encoder->pre_enable = g4x_pre_enable_dp; intel_encoder->enable = g4x_enable_dp; intel_encoder->disable = g4x_disable_dp; + intel_encoder->post_disable = g4x_post_disable_dp; } intel_dig_port->dp.output_reg = output_reg; diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index 9e6956c08688..5890500a3a8b 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c @@ -48,6 +48,9 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder, bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_LIMITED_M_N); + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) + return false; + pipe_config->has_pch_encoder = false; bpp = 24; if (intel_dp->compliance.test_data.bpc) { @@ -366,6 +369,9 @@ intel_dp_mst_mode_valid(struct drm_connector *connector, if (!intel_dp) return MODE_ERROR; + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) + return MODE_NO_DBLESCAN; + max_link_clock = intel_dp_max_link_rate(intel_dp); max_lanes = intel_dp_max_lane_count(intel_dp); diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c index cf39ca90d887..f349b3920199 100644 --- a/drivers/gpu/drm/i915/intel_dsi.c +++ b/drivers/gpu/drm/i915/intel_dsi.c @@ -326,6 +326,9 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder, conn_state->scaling_mode); } + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) + return false; + /* DSI uses short packets for sync events, so clear mode flags for DSI */ adjusted_mode->flags = 0; @@ -1266,6 +1269,9 @@ intel_dsi_mode_valid(struct drm_connector *connector, DRM_DEBUG_KMS("\n"); + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) + return MODE_NO_DBLESCAN; + if (fixed_mode) { if (mode->hdisplay > fixed_mode->hdisplay) return MODE_PANEL; diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index a70d767313aa..61d908e0df0e 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c @@ -219,6 +219,9 @@ intel_dvo_mode_valid(struct drm_connector *connector, int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; int target_clock = mode->clock; + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) + return MODE_NO_DBLESCAN; + /* XXX: Validate clock range */ if (fixed_mode) { @@ -254,6 +257,9 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder, if (fixed_mode) intel_fixed_panel_mode(fixed_mode, adjusted_mode); + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) + return false; + return true; } diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index ee929f31f7db..d8cb53ef4351 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -1557,6 +1557,9 @@ intel_hdmi_mode_valid(struct drm_connector *connector, bool force_dvi = READ_ONCE(to_intel_digital_connector_state(connector->state)->force_audio) == HDMI_AUDIO_OFF_DVI; + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) + return MODE_NO_DBLESCAN; + clock = mode->clock; if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING) @@ -1677,6 +1680,9 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, int desired_bpp; bool force_dvi = intel_conn_state->force_audio == HDMI_AUDIO_OFF_DVI; + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) + return false; + pipe_config->has_hdmi_sink = !force_dvi && intel_hdmi->has_hdmi_sink; if (pipe_config->has_hdmi_sink) diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 15434cad5430..7c4c8fb1dae4 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -1545,11 +1545,21 @@ static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch) /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */ batch = gen8_emit_flush_coherentl3_wa(engine, batch); + *batch++ = MI_LOAD_REGISTER_IMM(3); + /* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl,glk */ - *batch++ = MI_LOAD_REGISTER_IMM(1); *batch++ = i915_mmio_reg_offset(COMMON_SLICE_CHICKEN2); *batch++ = _MASKED_BIT_DISABLE( GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE); + + /* BSpec: 11391 */ + *batch++ = i915_mmio_reg_offset(FF_SLICE_CHICKEN); + *batch++ = _MASKED_BIT_ENABLE(FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX); + + /* BSpec: 11299 */ + *batch++ = i915_mmio_reg_offset(_3D_CHICKEN3); + *batch++ = _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_PROVOKING_VERTEX_FIX); + *batch++ = MI_NOOP; /* WaClearSlmSpaceAtContextSwitch:kbl */ @@ -2641,10 +2651,8 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, context_size += LRC_HEADER_PAGES * PAGE_SIZE; ctx_obj = i915_gem_object_create(ctx->i915, context_size); - if (IS_ERR(ctx_obj)) { - ret = PTR_ERR(ctx_obj); - goto error_deref_obj; - } + if (IS_ERR(ctx_obj)) + return PTR_ERR(ctx_obj); vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.base, NULL); if (IS_ERR(vma)) { diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index d278f24ba6ae..48f618dc9abb 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -380,6 +380,8 @@ intel_lvds_mode_valid(struct drm_connector *connector, struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; int max_pixclk = to_i915(connector->dev)->max_dotclk_freq; + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) + return MODE_NO_DBLESCAN; if (mode->hdisplay > fixed_mode->hdisplay) return MODE_PANEL; if (mode->vdisplay > fixed_mode->vdisplay) @@ -429,6 +431,9 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder, intel_fixed_panel_mode(intel_connector->panel.fixed_mode, adjusted_mode); + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) + return false; + if (HAS_PCH_SPLIT(dev_priv)) { pipe_config->has_pch_encoder = true; diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 25005023c243..26975df4e593 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -1160,6 +1160,9 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder, adjusted_mode); } + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) + return false; + /* * Make the CRTC code factor in the SDVO pixel multiplier. The * SDVO device will factor out the multiplier during mode_set. @@ -1621,6 +1624,9 @@ intel_sdvo_mode_valid(struct drm_connector *connector, struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) + return MODE_NO_DBLESCAN; + if (intel_sdvo->pixel_clock_min > mode->clock) return MODE_CLOCK_LOW; diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 885fc3809f7f..b55b5c157e38 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -850,6 +850,9 @@ intel_tv_mode_valid(struct drm_connector *connector, const struct tv_mode *tv_mode = intel_tv_mode_find(connector->state); int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) + return MODE_NO_DBLESCAN; + if (mode->clock > max_dotclk) return MODE_CLOCK_HIGH; @@ -877,16 +880,21 @@ intel_tv_compute_config(struct intel_encoder *encoder, struct drm_connector_state *conn_state) { const struct tv_mode *tv_mode = intel_tv_mode_find(conn_state); + struct drm_display_mode *adjusted_mode = + &pipe_config->base.adjusted_mode; if (!tv_mode) return false; - pipe_config->base.adjusted_mode.crtc_clock = tv_mode->clock; + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) + return false; + + adjusted_mode->crtc_clock = tv_mode->clock; DRM_DEBUG_KMS("forcing bpc to 8 for TV\n"); pipe_config->pipe_bpp = 8*3; /* TV has it's own notion of sync and other mode flags, so clear them. */ - pipe_config->base.adjusted_mode.flags = 0; + adjusted_mode->flags = 0; /* * FIXME: We don't check whether the input mode is actually what we want diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c index 32b1a6cdecfc..d3443125e661 100644 --- a/drivers/gpu/drm/meson/meson_drv.c +++ b/drivers/gpu/drm/meson/meson_drv.c @@ -197,8 +197,10 @@ static int meson_drv_bind_master(struct device *dev, bool has_components) priv->io_base = regs; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hhi"); - if (!res) - return -EINVAL; + if (!res) { + ret = -EINVAL; + goto free_drm; + } /* Simply ioremap since it may be a shared register zone */ regs = devm_ioremap(dev, res->start, resource_size(res)); if (!regs) { @@ -215,8 +217,10 @@ static int meson_drv_bind_master(struct device *dev, bool has_components) } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dmc"); - if (!res) - return -EINVAL; + if (!res) { + ret = -EINVAL; + goto free_drm; + } /* Simply ioremap since it may be a shared register zone */ regs = devm_ioremap(dev, res->start, resource_size(res)); if (!regs) { diff --git a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c index 291c08117ab6..397143b639c6 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c +++ b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c @@ -132,7 +132,7 @@ curs507a_new_(const struct nv50_wimm_func *func, struct nouveau_drm *drm, nvif_object_map(&wndw->wimm.base.user, NULL, 0); wndw->immd = func; - wndw->ctxdma.parent = &disp->core->chan.base.user; + wndw->ctxdma.parent = NULL; return 0; } diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c index 224963b533a6..c5a9bc1af5af 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c +++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c @@ -444,14 +444,17 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state) if (ret) return ret; - ctxdma = nv50_wndw_ctxdma_new(wndw, fb); - if (IS_ERR(ctxdma)) { - nouveau_bo_unpin(fb->nvbo); - return PTR_ERR(ctxdma); + if (wndw->ctxdma.parent) { + ctxdma = nv50_wndw_ctxdma_new(wndw, fb); + if (IS_ERR(ctxdma)) { + nouveau_bo_unpin(fb->nvbo); + return PTR_ERR(ctxdma); + } + + asyw->image.handle[0] = ctxdma->object.handle; } asyw->state.fence = reservation_object_get_excl_rcu(fb->nvbo->bo.resv); - asyw->image.handle[0] = ctxdma->object.handle; asyw->image.offset[0] = fb->nvbo->bo.offset; if (wndw->func->prepare) { diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index b8cda9449241..768207fbbae3 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -623,7 +623,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane, struct qxl_cursor_cmd *cmd; struct qxl_cursor *cursor; struct drm_gem_object *obj; - struct qxl_bo *cursor_bo = NULL, *user_bo = NULL; + struct qxl_bo *cursor_bo = NULL, *user_bo = NULL, *old_cursor_bo = NULL; int ret; void *user_ptr; int size = 64*64*4; @@ -677,7 +677,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane, cursor_bo, 0); cmd->type = QXL_CURSOR_SET; - qxl_bo_unref(&qcrtc->cursor_bo); + old_cursor_bo = qcrtc->cursor_bo; qcrtc->cursor_bo = cursor_bo; cursor_bo = NULL; } else { @@ -697,6 +697,9 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane, qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); qxl_release_fence_buffer_objects(release); + if (old_cursor_bo) + qxl_bo_unref(&old_cursor_bo); + qxl_bo_unref(&cursor_bo); return; diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c index 08747fc3ee71..8232b39e16ca 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.c +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c @@ -17,7 +17,6 @@ #include #include #include -#include #include @@ -418,9 +417,6 @@ static void sun4i_tcon0_mode_set_lvds(struct sun4i_tcon *tcon, static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon, const struct drm_display_mode *mode) { - struct drm_panel *panel = tcon->panel; - struct drm_connector *connector = panel->connector; - struct drm_display_info display_info = connector->display_info; unsigned int bp, hsync, vsync; u8 clk_delay; u32 val = 0; @@ -478,27 +474,6 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon, if (mode->flags & DRM_MODE_FLAG_PVSYNC) val |= SUN4I_TCON0_IO_POL_VSYNC_POSITIVE; - /* - * On A20 and similar SoCs, the only way to achieve Positive Edge - * (Rising Edge), is setting dclk clock phase to 2/3(240°). - * By default TCON works in Negative Edge(Falling Edge), - * this is why phase is set to 0 in that case. - * Unfortunately there's no way to logically invert dclk through - * IO_POL register. - * The only acceptable way to work, triple checked with scope, - * is using clock phase set to 0° for Negative Edge and set to 240° - * for Positive Edge. - * On A33 and similar SoCs there would be a 90° phase option, - * but it divides also dclk by 2. - * Following code is a way to avoid quirks all around TCON - * and DOTCLOCK drivers. - */ - if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE) - clk_set_phase(tcon->dclk, 240); - - if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE) - clk_set_phase(tcon->dclk, 0); - regmap_update_bits(tcon->regs, SUN4I_TCON0_IO_POL_REG, SUN4I_TCON0_IO_POL_HSYNC_POSITIVE | SUN4I_TCON0_IO_POL_VSYNC_POSITIVE, val); diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c index 2ebdc6d5a76e..d5583190f3e4 100644 --- a/drivers/gpu/drm/udl/udl_fb.c +++ b/drivers/gpu/drm/udl/udl_fb.c @@ -137,7 +137,10 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y, if (cmd > (char *) urb->transfer_buffer) { /* Send partial buffer remaining before exiting */ - int len = cmd - (char *) urb->transfer_buffer; + int len; + if (cmd < (char *) urb->transfer_buffer + urb->transfer_buffer_length) + *cmd++ = 0xAF; + len = cmd - (char *) urb->transfer_buffer; ret = udl_submit_urb(dev, urb, len); bytes_sent += len; } else diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c index 0c87b1ac6b68..b992644c17e6 100644 --- a/drivers/gpu/drm/udl/udl_transfer.c +++ b/drivers/gpu/drm/udl/udl_transfer.c @@ -153,11 +153,11 @@ static void udl_compress_hline16( raw_pixels_count_byte = cmd++; /* we'll know this later */ raw_pixel_start = pixel; - cmd_pixel_end = pixel + (min(MAX_CMD_PIXELS + 1, - min((int)(pixel_end - pixel) / bpp, - (int)(cmd_buffer_end - cmd) / 2))) * bpp; + cmd_pixel_end = pixel + min3(MAX_CMD_PIXELS + 1UL, + (unsigned long)(pixel_end - pixel) / bpp, + (unsigned long)(cmd_buffer_end - 1 - cmd) / 2) * bpp; - prefetch_range((void *) pixel, (cmd_pixel_end - pixel) * bpp); + prefetch_range((void *) pixel, cmd_pixel_end - pixel); pixel_val16 = get_pixel_val16(pixel, bpp); while (pixel < cmd_pixel_end) { @@ -193,6 +193,9 @@ static void udl_compress_hline16( if (pixel > raw_pixel_start) { /* finalize last RAW span */ *raw_pixels_count_byte = ((pixel-raw_pixel_start) / bpp) & 0xFF; + } else { + /* undo unused byte */ + cmd--; } *cmd_pixels_count_byte = ((pixel - cmd_pixel_start) / bpp) & 0xFF; diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index f858cc72011d..3942ee61bd1c 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1952,6 +1952,8 @@ static int hid_device_probe(struct device *dev) } hdev->io_started = false; + clear_bit(ffs(HID_STAT_REPROBED), &hdev->status); + if (!hdev->driver) { id = hid_match_device(hdev, hdrv); if (id == NULL) { @@ -2215,7 +2217,8 @@ static int __hid_bus_reprobe_drivers(struct device *dev, void *data) struct hid_device *hdev = to_hid_device(dev); if (hdev->driver == hdrv && - !hdrv->match(hdev, hid_ignore_special_drivers)) + !hdrv->match(hdev, hid_ignore_special_drivers) && + !test_and_set_bit(ffs(HID_STAT_REPROBED), &hdev->status)) return device_reprobe(dev); return 0; diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c index 8469b6964ff6..b48100236df8 100644 --- a/drivers/hid/hid-debug.c +++ b/drivers/hid/hid-debug.c @@ -1154,6 +1154,8 @@ static ssize_t hid_debug_events_read(struct file *file, char __user *buffer, goto out; if (list->tail > list->head) { len = list->tail - list->head; + if (len > count) + len = count; if (copy_to_user(buffer + ret, &list->hid_debug_buf[list->head], len)) { ret = -EFAULT; @@ -1163,6 +1165,8 @@ static ssize_t hid_debug_events_read(struct file *file, char __user *buffer, list->head += len; } else { len = HID_DEBUG_BUFSIZE - list->head; + if (len > count) + len = count; if (copy_to_user(buffer, &list->hid_debug_buf[list->head], len)) { ret = -EFAULT; @@ -1170,7 +1174,9 @@ static ssize_t hid_debug_events_read(struct file *file, char __user *buffer, } list->head = 0; ret += len; - goto copy_rest; + count -= len; + if (count > 0) + goto copy_rest; } } diff --git a/drivers/hid/hid-google-hammer.c b/drivers/hid/hid-google-hammer.c index 7b8e17b03cb8..6bf4da7ad63a 100644 --- a/drivers/hid/hid-google-hammer.c +++ b/drivers/hid/hid-google-hammer.c @@ -124,6 +124,8 @@ static const struct hid_device_id hammer_devices[] = { USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_STAFF) }, { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_WAND) }, + { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, + USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_WHISKERS) }, { } }; MODULE_DEVICE_TABLE(hid, hammer_devices); diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index a85634fe033f..c7981ddd8776 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -452,6 +452,7 @@ #define USB_DEVICE_ID_GOOGLE_TOUCH_ROSE 0x5028 #define USB_DEVICE_ID_GOOGLE_STAFF 0x502b #define USB_DEVICE_ID_GOOGLE_WAND 0x502d +#define USB_DEVICE_ID_GOOGLE_WHISKERS 0x5030 #define USB_VENDOR_ID_GOTOP 0x08f2 #define USB_DEVICE_ID_SUPER_Q2 0x007f diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c index cb86cc834201..0422ec2b13d2 100644 --- a/drivers/hid/hid-steam.c +++ b/drivers/hid/hid-steam.c @@ -573,7 +573,7 @@ static bool steam_is_valve_interface(struct hid_device *hdev) static int steam_client_ll_parse(struct hid_device *hdev) { - struct steam_device *steam = hid_get_drvdata(hdev); + struct steam_device *steam = hdev->driver_data; return hid_parse_report(hdev, steam->hdev->dev_rdesc, steam->hdev->dev_rsize); @@ -590,7 +590,7 @@ static void steam_client_ll_stop(struct hid_device *hdev) static int steam_client_ll_open(struct hid_device *hdev) { - struct steam_device *steam = hid_get_drvdata(hdev); + struct steam_device *steam = hdev->driver_data; int ret; ret = hid_hw_open(steam->hdev); @@ -605,7 +605,7 @@ static int steam_client_ll_open(struct hid_device *hdev) static void steam_client_ll_close(struct hid_device *hdev) { - struct steam_device *steam = hid_get_drvdata(hdev); + struct steam_device *steam = hdev->driver_data; mutex_lock(&steam->mutex); steam->client_opened = false; @@ -623,7 +623,7 @@ static int steam_client_ll_raw_request(struct hid_device *hdev, size_t count, unsigned char report_type, int reqtype) { - struct steam_device *steam = hid_get_drvdata(hdev); + struct steam_device *steam = hdev->driver_data; return hid_hw_raw_request(steam->hdev, reportnum, buf, count, report_type, reqtype); @@ -710,7 +710,7 @@ static int steam_probe(struct hid_device *hdev, ret = PTR_ERR(steam->client_hdev); goto client_hdev_fail; } - hid_set_drvdata(steam->client_hdev, steam); + steam->client_hdev->driver_data = steam; /* * With the real steam controller interface, do not connect hidraw. diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c index c1652bb7bd15..eae0cb3ddec6 100644 --- a/drivers/hid/i2c-hid/i2c-hid.c +++ b/drivers/hid/i2c-hid/i2c-hid.c @@ -484,7 +484,7 @@ static void i2c_hid_get_input(struct i2c_hid *ihid) return; } - if ((ret_size > size) || (ret_size <= 2)) { + if ((ret_size > size) || (ret_size < 2)) { dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n", __func__, size, ret_size); return; diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c index 582e449be9fe..a2c53ea3b5ed 100644 --- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c +++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c @@ -205,8 +205,7 @@ static void ish_remove(struct pci_dev *pdev) kfree(ishtp_dev); } -#ifdef CONFIG_PM -static struct device *ish_resume_device; +static struct device __maybe_unused *ish_resume_device; /* 50ms to get resume response */ #define WAIT_FOR_RESUME_ACK_MS 50 @@ -220,7 +219,7 @@ static struct device *ish_resume_device; * in that case a simple resume message is enough, others we need * a reset sequence. */ -static void ish_resume_handler(struct work_struct *work) +static void __maybe_unused ish_resume_handler(struct work_struct *work) { struct pci_dev *pdev = to_pci_dev(ish_resume_device); struct ishtp_device *dev = pci_get_drvdata(pdev); @@ -262,7 +261,7 @@ static void ish_resume_handler(struct work_struct *work) * * Return: 0 to the pm core */ -static int ish_suspend(struct device *device) +static int __maybe_unused ish_suspend(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); struct ishtp_device *dev = pci_get_drvdata(pdev); @@ -288,7 +287,7 @@ static int ish_suspend(struct device *device) return 0; } -static DECLARE_WORK(resume_work, ish_resume_handler); +static __maybe_unused DECLARE_WORK(resume_work, ish_resume_handler); /** * ish_resume() - ISH resume callback * @device: device pointer @@ -297,7 +296,7 @@ static DECLARE_WORK(resume_work, ish_resume_handler); * * Return: 0 to the pm core */ -static int ish_resume(struct device *device) +static int __maybe_unused ish_resume(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); struct ishtp_device *dev = pci_get_drvdata(pdev); @@ -311,21 +310,14 @@ static int ish_resume(struct device *device) return 0; } -static const struct dev_pm_ops ish_pm_ops = { - .suspend = ish_suspend, - .resume = ish_resume, -}; -#define ISHTP_ISH_PM_OPS (&ish_pm_ops) -#else -#define ISHTP_ISH_PM_OPS NULL -#endif /* CONFIG_PM */ +static SIMPLE_DEV_PM_OPS(ish_pm_ops, ish_suspend, ish_resume); static struct pci_driver ish_driver = { .name = KBUILD_MODNAME, .id_table = ish_pci_tbl, .probe = ish_probe, .remove = ish_remove, - .driver.pm = ISHTP_ISH_PM_OPS, + .driver.pm = &ish_pm_ops, }; module_pci_driver(ish_driver); diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c index e3ce233f8bdc..23872d08308c 100644 --- a/drivers/hid/usbhid/hiddev.c +++ b/drivers/hid/usbhid/hiddev.c @@ -36,6 +36,7 @@ #include #include #include +#include #include "usbhid.h" #ifdef CONFIG_USB_DYNAMIC_MINORS @@ -469,10 +470,14 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd, if (uref->field_index >= report->maxfield) goto inval; + uref->field_index = array_index_nospec(uref->field_index, + report->maxfield); field = report->field[uref->field_index]; if (uref->usage_index >= field->maxusage) goto inval; + uref->usage_index = array_index_nospec(uref->usage_index, + field->maxusage); uref->usage_code = field->usage[uref->usage_index].hid; @@ -499,6 +504,8 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd, if (uref->field_index >= report->maxfield) goto inval; + uref->field_index = array_index_nospec(uref->field_index, + report->maxfield); field = report->field[uref->field_index]; @@ -753,6 +760,8 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) if (finfo.field_index >= report->maxfield) break; + finfo.field_index = array_index_nospec(finfo.field_index, + report->maxfield); field = report->field[finfo.field_index]; memset(&finfo, 0, sizeof(finfo)); @@ -797,6 +806,8 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) if (cinfo.index >= hid->maxcollection) break; + cinfo.index = array_index_nospec(cinfo.index, + hid->maxcollection); cinfo.type = hid->collection[cinfo.index].type; cinfo.usage = hid->collection[cinfo.index].usage; diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c index c101369b51de..d6797535fff9 100644 --- a/drivers/hid/wacom_sys.c +++ b/drivers/hid/wacom_sys.c @@ -395,6 +395,14 @@ static void wacom_usage_mapping(struct hid_device *hdev, } } + /* 2nd-generation Intuos Pro Large has incorrect Y maximum */ + if (hdev->vendor == USB_VENDOR_ID_WACOM && + hdev->product == 0x0358 && + WACOM_PEN_FIELD(field) && + wacom_equivalent_usage(usage->hid) == HID_GD_Y) { + field->logical_maximum = 43200; + } + switch (usage->hid) { case HID_GD_X: features->x_max = field->logical_maximum; diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index 0bb44d0088ed..ad7afa74d365 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -3365,8 +3365,14 @@ void wacom_setup_device_quirks(struct wacom *wacom) if (features->type >= INTUOSHT && features->type <= BAMBOO_PT) features->device_type |= WACOM_DEVICETYPE_PAD; - features->x_max = 4096; - features->y_max = 4096; + if (features->type == INTUOSHT2) { + features->x_max = features->x_max / 10; + features->y_max = features->y_max / 10; + } + else { + features->x_max = 4096; + features->y_max = 4096; + } } else if (features->pktlen == WACOM_PKGLEN_BBTOUCH) { features->device_type |= WACOM_DEVICETYPE_PAD; diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c index bf3bb7e1adab..9d3ef879dc51 100644 --- a/drivers/hwmon/dell-smm-hwmon.c +++ b/drivers/hwmon/dell-smm-hwmon.c @@ -1074,6 +1074,13 @@ static struct dmi_system_id i8k_blacklist_fan_support_dmi_table[] __initdata = { DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Vostro 3360"), }, }, + { + .ident = "Dell XPS13 9333", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "XPS13 9333"), + }, + }, { } }; diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c index 155d4d1d1585..f9d1349c3286 100644 --- a/drivers/hwmon/nct6775.c +++ b/drivers/hwmon/nct6775.c @@ -4175,7 +4175,7 @@ static int nct6775_probe(struct platform_device *pdev) * The temperature is already monitored if the respective bit in * is set. */ - for (i = 0; i < 32; i++) { + for (i = 0; i < 31; i++) { if (!(data->temp_mask & BIT(i + 1))) continue; if (!reg_temp_alternate[i]) diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c index 4a34f311e1ff..6ec65adaba49 100644 --- a/drivers/i2c/algos/i2c-algo-bit.c +++ b/drivers/i2c/algos/i2c-algo-bit.c @@ -647,10 +647,10 @@ static int __i2c_bit_add_bus(struct i2c_adapter *adap, if (bit_adap->getscl == NULL) adap->quirks = &i2c_bit_quirk_no_clk_stretch; - /* Bring bus to a known state. Looks like STOP if bus is not free yet */ - setscl(bit_adap, 1); - udelay(bit_adap->udelay); - setsda(bit_adap, 1); + /* + * We tried forcing SCL/SDA to an initial state here. But that caused a + * regression, sadly. Check Bugzilla #200045 for details. + */ ret = add_adapter(adap); if (ret < 0) diff --git a/drivers/i2c/busses/i2c-cht-wc.c b/drivers/i2c/busses/i2c-cht-wc.c index 44cffad43701..c4d176f5ed79 100644 --- a/drivers/i2c/busses/i2c-cht-wc.c +++ b/drivers/i2c/busses/i2c-cht-wc.c @@ -234,7 +234,8 @@ static const struct irq_chip cht_wc_i2c_irq_chip = { .name = "cht_wc_ext_chrg_irq_chip", }; -static const char * const bq24190_suppliers[] = { "fusb302-typec-source" }; +static const char * const bq24190_suppliers[] = { + "tcpm-source-psy-i2c-fusb302" }; static const struct property_entry bq24190_props[] = { PROPERTY_ENTRY_STRING_ARRAY("supplied-from", bq24190_suppliers), diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c index 005e6e0330c2..66f85bbf3591 100644 --- a/drivers/i2c/busses/i2c-gpio.c +++ b/drivers/i2c/busses/i2c-gpio.c @@ -279,9 +279,9 @@ static int i2c_gpio_probe(struct platform_device *pdev) * required for an I2C bus. */ if (pdata->scl_is_open_drain) - gflags = GPIOD_OUT_LOW; + gflags = GPIOD_OUT_HIGH; else - gflags = GPIOD_OUT_LOW_OPEN_DRAIN; + gflags = GPIOD_OUT_HIGH_OPEN_DRAIN; priv->scl = i2c_gpio_get_desc(dev, "scl", 1, gflags); if (IS_ERR(priv->scl)) return PTR_ERR(priv->scl); diff --git a/drivers/i2c/busses/i2c-stu300.c b/drivers/i2c/busses/i2c-stu300.c index e866c481bfc3..fce52bdab2b7 100644 --- a/drivers/i2c/busses/i2c-stu300.c +++ b/drivers/i2c/busses/i2c-stu300.c @@ -127,7 +127,7 @@ enum stu300_error { /* * The number of address send athemps tried before giving up. - * If the first one failes it seems like 5 to 8 attempts are required. + * If the first one fails it seems like 5 to 8 attempts are required. */ #define NUM_ADDR_RESEND_ATTEMPTS 12 diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c index 5fccd1f1bca8..797def5319f1 100644 --- a/drivers/i2c/busses/i2c-tegra.c +++ b/drivers/i2c/busses/i2c-tegra.c @@ -545,6 +545,14 @@ static int tegra_i2c_disable_packet_mode(struct tegra_i2c_dev *i2c_dev) { u32 cnfg; + /* + * NACK interrupt is generated before the I2C controller generates + * the STOP condition on the bus. So wait for 2 clock periods + * before disabling the controller so that the STOP condition has + * been delivered properly. + */ + udelay(DIV_ROUND_UP(2 * 1000000, i2c_dev->bus_clk_rate)); + cnfg = i2c_readl(i2c_dev, I2C_CNFG); if (cnfg & I2C_CNFG_PACKET_MODE_EN) i2c_writel(i2c_dev, cnfg & ~I2C_CNFG_PACKET_MODE_EN, I2C_CNFG); @@ -706,15 +714,6 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev, if (likely(i2c_dev->msg_err == I2C_ERR_NONE)) return 0; - /* - * NACK interrupt is generated before the I2C controller generates - * the STOP condition on the bus. So wait for 2 clock periods - * before resetting the controller so that the STOP condition has - * been delivered properly. - */ - if (i2c_dev->msg_err == I2C_ERR_NO_ACK) - udelay(DIV_ROUND_UP(2 * 1000000, i2c_dev->bus_clk_rate)); - tegra_i2c_init(i2c_dev); if (i2c_dev->msg_err == I2C_ERR_NO_ACK) { if (msg->flags & I2C_M_IGNORE_NAK) diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index 31d16ada6e7d..301285c54603 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -198,7 +198,16 @@ int i2c_generic_scl_recovery(struct i2c_adapter *adap) val = !val; bri->set_scl(adap, val); - ndelay(RECOVERY_NDELAY); + + /* + * If we can set SDA, we will always create STOP here to ensure + * the additional pulses will do no harm. This is achieved by + * letting SDA follow SCL half a cycle later. + */ + ndelay(RECOVERY_NDELAY / 2); + if (bri->set_sda) + bri->set_sda(adap, val); + ndelay(RECOVERY_NDELAY / 2); } /* check if recovery actually succeeded */ diff --git a/drivers/i2c/i2c-core-smbus.c b/drivers/i2c/i2c-core-smbus.c index f3f683041e7f..51970bae3c4a 100644 --- a/drivers/i2c/i2c-core-smbus.c +++ b/drivers/i2c/i2c-core-smbus.c @@ -465,15 +465,18 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr, status = i2c_transfer(adapter, msg, num); if (status < 0) - return status; - if (status != num) - return -EIO; + goto cleanup; + if (status != num) { + status = -EIO; + goto cleanup; + } + status = 0; /* Check PEC if last message is a read */ if (i && (msg[num-1].flags & I2C_M_RD)) { status = i2c_smbus_check_pec(partial_pec, &msg[num-1]); if (status < 0) - return status; + goto cleanup; } if (read_write == I2C_SMBUS_READ) @@ -499,12 +502,13 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr, break; } +cleanup: if (msg[0].flags & I2C_M_DMA_SAFE) kfree(msg[0].buf); if (msg[1].flags & I2C_M_DMA_SAFE) kfree(msg[1].buf); - return 0; + return status; } /** diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c index 7e3d82cff3d5..c149c9c360fc 100644 --- a/drivers/iio/accel/mma8452.c +++ b/drivers/iio/accel/mma8452.c @@ -1053,7 +1053,7 @@ static irqreturn_t mma8452_interrupt(int irq, void *p) if (src < 0) return IRQ_NONE; - if (!(src & data->chip_info->enabled_events)) + if (!(src & (data->chip_info->enabled_events | MMA8452_INT_DRDY))) return IRQ_NONE; if (src & MMA8452_INT_DRDY) { diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c index f9c0624505a2..42618fe4f83e 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c @@ -959,6 +959,8 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name, } irq_type = irqd_get_trigger_type(desc); + if (!irq_type) + irq_type = IRQF_TRIGGER_RISING; if (irq_type == IRQF_TRIGGER_RISING) st->irq_mask = INV_MPU6050_ACTIVE_HIGH; else if (irq_type == IRQF_TRIGGER_FALLING) diff --git a/drivers/iio/light/tsl2772.c b/drivers/iio/light/tsl2772.c index 34d42a2504c9..df5b2a0da96c 100644 --- a/drivers/iio/light/tsl2772.c +++ b/drivers/iio/light/tsl2772.c @@ -582,6 +582,8 @@ static int tsl2772_als_calibrate(struct iio_dev *indio_dev) "%s: failed to get lux\n", __func__); return lux_val; } + if (lux_val == 0) + return -ERANGE; ret = (chip->settings.als_cal_target * chip->settings.als_gain_trim) / lux_val; diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c index 5ec3e41b65f2..fe87d27779d9 100644 --- a/drivers/iio/pressure/bmp280-core.c +++ b/drivers/iio/pressure/bmp280-core.c @@ -415,10 +415,9 @@ static int bmp280_read_humid(struct bmp280_data *data, int *val, int *val2) } comp_humidity = bmp280_compensate_humidity(data, adc_humidity); - *val = comp_humidity; - *val2 = 1024; + *val = comp_humidity * 1000 / 1024; - return IIO_VAL_FRACTIONAL; + return IIO_VAL_INT; } static int bmp280_read_raw(struct iio_dev *indio_dev, diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 3e90b6a1d9d2..cc06e8404e9b 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -3488,8 +3488,8 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, struct ib_flow_attr *flow_attr; struct ib_qp *qp; struct ib_uflow_resources *uflow_res; + struct ib_uverbs_flow_spec_hdr *kern_spec; int err = 0; - void *kern_spec; void *ib_spec; int i; @@ -3538,8 +3538,8 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, if (!kern_flow_attr) return -ENOMEM; - memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr)); - err = ib_copy_from_udata(kern_flow_attr + 1, ucore, + *kern_flow_attr = cmd.flow_attr; + err = ib_copy_from_udata(&kern_flow_attr->flow_specs, ucore, cmd.flow_attr.size); if (err) goto err_free_attr; @@ -3559,6 +3559,11 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, goto err_uobj; } + if (qp->qp_type != IB_QPT_UD && qp->qp_type != IB_QPT_RAW_PACKET) { + err = -EINVAL; + goto err_put; + } + flow_attr = kzalloc(struct_size(flow_attr, flows, cmd.flow_attr.num_of_specs), GFP_KERNEL); if (!flow_attr) { @@ -3578,21 +3583,22 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, flow_attr->flags = kern_flow_attr->flags; flow_attr->size = sizeof(*flow_attr); - kern_spec = kern_flow_attr + 1; + kern_spec = kern_flow_attr->flow_specs; ib_spec = flow_attr + 1; for (i = 0; i < flow_attr->num_of_specs && - cmd.flow_attr.size > offsetof(struct ib_uverbs_flow_spec, reserved) && - cmd.flow_attr.size >= - ((struct ib_uverbs_flow_spec *)kern_spec)->size; i++) { - err = kern_spec_to_ib_spec(file->ucontext, kern_spec, ib_spec, - uflow_res); + cmd.flow_attr.size >= sizeof(*kern_spec) && + cmd.flow_attr.size >= kern_spec->size; + i++) { + err = kern_spec_to_ib_spec( + file->ucontext, (struct ib_uverbs_flow_spec *)kern_spec, + ib_spec, uflow_res); if (err) goto err_free; flow_attr->size += ((union ib_flow_spec *) ib_spec)->size; - cmd.flow_attr.size -= ((struct ib_uverbs_flow_spec *)kern_spec)->size; - kern_spec += ((struct ib_uverbs_flow_spec *) kern_spec)->size; + cmd.flow_attr.size -= kern_spec->size; + kern_spec = ((void *)kern_spec) + kern_spec->size; ib_spec += ((union ib_flow_spec *) ib_spec)->size; } if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) { diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 3ae2339dd27a..2094d136513d 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -736,10 +736,6 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, if (ret) return ret; - if (!file->ucontext && - (command != IB_USER_VERBS_CMD_GET_CONTEXT || extended)) - return -EINVAL; - if (extended) { if (count < (sizeof(hdr) + sizeof(ex_hdr))) return -EINVAL; @@ -759,6 +755,16 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, goto out; } + /* + * Must be after the ib_dev check, as once the RCU clears ib_dev == + * NULL means ucontext == NULL + */ + if (!file->ucontext && + (command != IB_USER_VERBS_CMD_GET_CONTEXT || extended)) { + ret = -EINVAL; + goto out; + } + if (!verify_command_mask(ib_dev, command, extended)) { ret = -EOPNOTSUPP; goto out; diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 0b56828c1319..9d6beb948535 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -1562,11 +1562,12 @@ EXPORT_SYMBOL(ib_destroy_qp); /* Completion queues */ -struct ib_cq *ib_create_cq(struct ib_device *device, - ib_comp_handler comp_handler, - void (*event_handler)(struct ib_event *, void *), - void *cq_context, - const struct ib_cq_init_attr *cq_attr) +struct ib_cq *__ib_create_cq(struct ib_device *device, + ib_comp_handler comp_handler, + void (*event_handler)(struct ib_event *, void *), + void *cq_context, + const struct ib_cq_init_attr *cq_attr, + const char *caller) { struct ib_cq *cq; @@ -1580,12 +1581,13 @@ struct ib_cq *ib_create_cq(struct ib_device *device, cq->cq_context = cq_context; atomic_set(&cq->usecnt, 0); cq->res.type = RDMA_RESTRACK_CQ; + cq->res.kern_name = caller; rdma_restrack_add(&cq->res); } return cq; } -EXPORT_SYMBOL(ib_create_cq); +EXPORT_SYMBOL(__ib_create_cq); int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period) { diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c index 1445918e3239..7b76e6f81aeb 100644 --- a/drivers/infiniband/hw/cxgb4/mem.c +++ b/drivers/infiniband/hw/cxgb4/mem.c @@ -774,7 +774,7 @@ static int c4iw_set_page(struct ib_mr *ibmr, u64 addr) { struct c4iw_mr *mhp = to_c4iw_mr(ibmr); - if (unlikely(mhp->mpl_len == mhp->max_mpl_len)) + if (unlikely(mhp->mpl_len == mhp->attr.pbl_size)) return -ENOMEM; mhp->mpl[mhp->mpl_len++] = addr; diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c index 1a1a47ac53c6..f15c93102081 100644 --- a/drivers/infiniband/hw/hfi1/rc.c +++ b/drivers/infiniband/hw/hfi1/rc.c @@ -271,7 +271,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) lockdep_assert_held(&qp->s_lock); ps->s_txreq = get_txreq(ps->dev, qp); - if (IS_ERR(ps->s_txreq)) + if (!ps->s_txreq) goto bail_no_tx; if (priv->hdr_type == HFI1_PKT_TYPE_9B) { diff --git a/drivers/infiniband/hw/hfi1/uc.c b/drivers/infiniband/hw/hfi1/uc.c index b7b671017e59..e254dcec6f64 100644 --- a/drivers/infiniband/hw/hfi1/uc.c +++ b/drivers/infiniband/hw/hfi1/uc.c @@ -1,5 +1,5 @@ /* - * Copyright(c) 2015, 2016 Intel Corporation. + * Copyright(c) 2015 - 2018 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. @@ -72,7 +72,7 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) int middle = 0; ps->s_txreq = get_txreq(ps->dev, qp); - if (IS_ERR(ps->s_txreq)) + if (!ps->s_txreq) goto bail_no_tx; if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) { diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c index 1ab332f1866e..70d39fc450a1 100644 --- a/drivers/infiniband/hw/hfi1/ud.c +++ b/drivers/infiniband/hw/hfi1/ud.c @@ -1,5 +1,5 @@ /* - * Copyright(c) 2015, 2016 Intel Corporation. + * Copyright(c) 2015 - 2018 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. @@ -503,7 +503,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) u32 lid; ps->s_txreq = get_txreq(ps->dev, qp); - if (IS_ERR(ps->s_txreq)) + if (!ps->s_txreq) goto bail_no_tx; if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) { diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.c b/drivers/infiniband/hw/hfi1/verbs_txreq.c index 873e48ea923f..c4ab2d5b4502 100644 --- a/drivers/infiniband/hw/hfi1/verbs_txreq.c +++ b/drivers/infiniband/hw/hfi1/verbs_txreq.c @@ -1,5 +1,5 @@ /* - * Copyright(c) 2016 - 2017 Intel Corporation. + * Copyright(c) 2016 - 2018 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. @@ -94,7 +94,7 @@ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev, struct rvt_qp *qp) __must_hold(&qp->s_lock) { - struct verbs_txreq *tx = ERR_PTR(-EBUSY); + struct verbs_txreq *tx = NULL; write_seqlock(&dev->txwait_lock); if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) { diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.h b/drivers/infiniband/hw/hfi1/verbs_txreq.h index 729244c3086c..1c19bbc764b2 100644 --- a/drivers/infiniband/hw/hfi1/verbs_txreq.h +++ b/drivers/infiniband/hw/hfi1/verbs_txreq.h @@ -1,5 +1,5 @@ /* - * Copyright(c) 2016 Intel Corporation. + * Copyright(c) 2016 - 2018 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. @@ -83,7 +83,7 @@ static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev, if (unlikely(!tx)) { /* call slow path to get the lock */ tx = __get_txreq(dev, qp); - if (IS_ERR(tx)) + if (!tx) return tx; } tx->qp = qp; diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c index ed1f253faf97..c7c85c22e4e3 100644 --- a/drivers/infiniband/hw/mlx4/mr.c +++ b/drivers/infiniband/hw/mlx4/mr.c @@ -486,8 +486,11 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags, } if (flags & IB_MR_REREG_ACCESS) { - if (ib_access_writable(mr_access_flags) && !mmr->umem->writable) - return -EPERM; + if (ib_access_writable(mr_access_flags) && + !mmr->umem->writable) { + err = -EPERM; + goto release_mpt_entry; + } err = mlx4_mr_hw_change_access(dev->dev, *pmpt_entry, convert_access(mr_access_flags)); diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index e52dd21519b4..b3ba9a222550 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -3199,8 +3199,8 @@ static int flow_counters_set_data(struct ib_counters *ibcounters, if (!mcounters->hw_cntrs_hndl) { mcounters->hw_cntrs_hndl = mlx5_fc_create( to_mdev(ibcounters->device)->mdev, false); - if (!mcounters->hw_cntrs_hndl) { - ret = -ENOMEM; + if (IS_ERR(mcounters->hw_cntrs_hndl)) { + ret = PTR_ERR(mcounters->hw_cntrs_hndl); goto free; } hw_hndl = true; @@ -3546,29 +3546,35 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp, return ERR_PTR(-ENOMEM); err = ib_copy_from_udata(ucmd, udata, required_ucmd_sz); - if (err) { - kfree(ucmd); - return ERR_PTR(err); - } + if (err) + goto free_ucmd; } - if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO) - return ERR_PTR(-ENOMEM); + if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO) { + err = -ENOMEM; + goto free_ucmd; + } if (domain != IB_FLOW_DOMAIN_USER || flow_attr->port > dev->num_ports || (flow_attr->flags & ~(IB_FLOW_ATTR_FLAGS_DONT_TRAP | - IB_FLOW_ATTR_FLAGS_EGRESS))) - return ERR_PTR(-EINVAL); + IB_FLOW_ATTR_FLAGS_EGRESS))) { + err = -EINVAL; + goto free_ucmd; + } if (is_egress && (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT || - flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) - return ERR_PTR(-EINVAL); + flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) { + err = -EINVAL; + goto free_ucmd; + } dst = kzalloc(sizeof(*dst), GFP_KERNEL); - if (!dst) - return ERR_PTR(-ENOMEM); + if (!dst) { + err = -ENOMEM; + goto free_ucmd; + } mutex_lock(&dev->flow_db->lock); @@ -3637,8 +3643,8 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp, unlock: mutex_unlock(&dev->flow_db->lock); kfree(dst); +free_ucmd: kfree(ucmd); - kfree(handler); return ERR_PTR(err); } @@ -6107,7 +6113,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) dev->num_ports = max(MLX5_CAP_GEN(mdev, num_ports), MLX5_CAP_GEN(mdev, num_vhca_ports)); - if (MLX5_VPORT_MANAGER(mdev) && + if (MLX5_ESWITCH_MANAGER(mdev) && mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) { dev->rep = mlx5_ib_vport_rep(mdev->priv.eswitch, 0); diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c index 0af7b7905550..f5de5adc9b1a 100644 --- a/drivers/infiniband/hw/mlx5/srq.c +++ b/drivers/infiniband/hw/mlx5/srq.c @@ -266,18 +266,24 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, desc_size = sizeof(struct mlx5_wqe_srq_next_seg) + srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg); - if (desc_size == 0 || srq->msrq.max_gs > desc_size) - return ERR_PTR(-EINVAL); + if (desc_size == 0 || srq->msrq.max_gs > desc_size) { + err = -EINVAL; + goto err_srq; + } desc_size = roundup_pow_of_two(desc_size); desc_size = max_t(size_t, 32, desc_size); - if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg)) - return ERR_PTR(-EINVAL); + if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg)) { + err = -EINVAL; + goto err_srq; + } srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) / sizeof(struct mlx5_wqe_data_seg); srq->msrq.wqe_shift = ilog2(desc_size); buf_size = srq->msrq.max * desc_size; - if (buf_size < desc_size) - return ERR_PTR(-EINVAL); + if (buf_size < desc_size) { + err = -EINVAL; + goto err_srq; + } in.type = init_attr->srq_type; if (pd->uobject) diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index f7ac8fc9b531..f07b8df96f43 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -1957,6 +1957,9 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, } if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) { + if (rdma_protocol_iwarp(&dev->ibdev, 1)) + return -EINVAL; + if (attr_mask & IB_QP_PATH_MTU) { if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) { diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c index f30eeba3f772..8be27238a86e 100644 --- a/drivers/infiniband/sw/rxe/rxe_req.c +++ b/drivers/infiniband/sw/rxe/rxe_req.c @@ -645,6 +645,9 @@ int rxe_requester(void *arg) } else { goto exit; } + if ((wqe->wr.send_flags & IB_SEND_SIGNALED) || + qp->sq_sig_type == IB_SIGNAL_ALL_WR) + rxe_run_task(&qp->comp.task, 1); qp->req.wqe_index = next_index(qp->sq.queue, qp->req.wqe_index); goto next_wqe; @@ -709,6 +712,7 @@ int rxe_requester(void *arg) if (fill_packet(qp, wqe, &pkt, skb, payload)) { pr_debug("qp#%d Error during fill packet\n", qp_num(qp)); + kfree_skb(skb); goto err; } @@ -740,7 +744,6 @@ int rxe_requester(void *arg) goto next_wqe; err: - kfree_skb(skb); wqe->status = IB_WC_LOC_PROT_ERR; wqe->state = wqe_state_error; __rxe_do_task(&qp->comp.task); diff --git a/drivers/input/input-mt.c b/drivers/input/input-mt.c index cf30523c6ef6..6c7326c93721 100644 --- a/drivers/input/input-mt.c +++ b/drivers/input/input-mt.c @@ -131,8 +131,10 @@ EXPORT_SYMBOL(input_mt_destroy_slots); * inactive, or if the tool type is changed, a new tracking id is * assigned to the slot. The tool type is only reported if the * corresponding absbit field is set. + * + * Returns true if contact is active. */ -void input_mt_report_slot_state(struct input_dev *dev, +bool input_mt_report_slot_state(struct input_dev *dev, unsigned int tool_type, bool active) { struct input_mt *mt = dev->mt; @@ -140,22 +142,24 @@ void input_mt_report_slot_state(struct input_dev *dev, int id; if (!mt) - return; + return false; slot = &mt->slots[mt->slot]; slot->frame = mt->frame; if (!active) { input_event(dev, EV_ABS, ABS_MT_TRACKING_ID, -1); - return; + return false; } id = input_mt_get_value(slot, ABS_MT_TRACKING_ID); - if (id < 0 || input_mt_get_value(slot, ABS_MT_TOOL_TYPE) != tool_type) + if (id < 0) id = input_mt_new_trkid(mt); input_event(dev, EV_ABS, ABS_MT_TRACKING_ID, id); input_event(dev, EV_ABS, ABS_MT_TOOL_TYPE, tool_type); + + return true; } EXPORT_SYMBOL(input_mt_report_slot_state); diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index 48e36acbeb49..cd620e009bad 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c @@ -125,7 +125,7 @@ static const struct xpad_device { u8 mapping; u8 xtype; } xpad_device[] = { - { 0x0079, 0x18d4, "GPD Win 2 Controller", 0, XTYPE_XBOX360 }, + { 0x0079, 0x18d4, "GPD Win 2 X-Box Controller", 0, XTYPE_XBOX360 }, { 0x044f, 0x0f00, "Thrustmaster Wheel", 0, XTYPE_XBOX }, { 0x044f, 0x0f03, "Thrustmaster Wheel", 0, XTYPE_XBOX }, { 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX }, diff --git a/drivers/input/keyboard/goldfish_events.c b/drivers/input/keyboard/goldfish_events.c index f6e643b589b6..e8dae6195b30 100644 --- a/drivers/input/keyboard/goldfish_events.c +++ b/drivers/input/keyboard/goldfish_events.c @@ -45,7 +45,7 @@ struct event_dev { static irqreturn_t events_interrupt(int irq, void *dev_id) { struct event_dev *edev = dev_id; - unsigned type, code, value; + unsigned int type, code, value; type = __raw_readl(edev->addr + REG_READ); code = __raw_readl(edev->addr + REG_READ); @@ -57,7 +57,7 @@ static irqreturn_t events_interrupt(int irq, void *dev_id) } static void events_import_bits(struct event_dev *edev, - unsigned long bits[], unsigned type, size_t count) + unsigned long bits[], unsigned int type, size_t count) { void __iomem *addr = edev->addr; int i, j; @@ -99,6 +99,7 @@ static void events_import_abs_params(struct event_dev *edev) for (j = 0; j < ARRAY_SIZE(val); j++) { int offset = (i * ARRAY_SIZE(val) + j) * sizeof(u32); + val[j] = __raw_readl(edev->addr + REG_DATA + offset); } @@ -112,7 +113,7 @@ static int events_probe(struct platform_device *pdev) struct input_dev *input_dev; struct event_dev *edev; struct resource *res; - unsigned keymapnamelen; + unsigned int keymapnamelen; void __iomem *addr; int irq; int i; @@ -150,7 +151,7 @@ static int events_probe(struct platform_device *pdev) for (i = 0; i < keymapnamelen; i++) edev->name[i] = __raw_readb(edev->addr + REG_DATA + i); - pr_debug("events_probe() keymap=%s\n", edev->name); + pr_debug("%s: keymap=%s\n", __func__, edev->name); input_dev->name = edev->name; input_dev->id.bustype = BUS_HOST; diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig index c25606e00693..ca59a2be9bc5 100644 --- a/drivers/input/misc/Kconfig +++ b/drivers/input/misc/Kconfig @@ -841,4 +841,14 @@ config INPUT_RAVE_SP_PWRBUTTON To compile this driver as a module, choose M here: the module will be called rave-sp-pwrbutton. +config INPUT_SC27XX_VIBRA + tristate "Spreadtrum sc27xx vibrator support" + depends on MFD_SC27XX_PMIC || COMPILE_TEST + select INPUT_FF_MEMLESS + help + This option enables support for Spreadtrum sc27xx vibrator driver. + + To compile this driver as a module, choose M here. The module will + be called sc27xx_vibra. + endif diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile index 72cde28649e2..9d0f9d1ff68f 100644 --- a/drivers/input/misc/Makefile +++ b/drivers/input/misc/Makefile @@ -66,6 +66,7 @@ obj-$(CONFIG_INPUT_RETU_PWRBUTTON) += retu-pwrbutton.o obj-$(CONFIG_INPUT_AXP20X_PEK) += axp20x-pek.o obj-$(CONFIG_INPUT_GPIO_ROTARY_ENCODER) += rotary_encoder.o obj-$(CONFIG_INPUT_RK805_PWRKEY) += rk805-pwrkey.o +obj-$(CONFIG_INPUT_SC27XX_VIBRA) += sc27xx-vibra.o obj-$(CONFIG_INPUT_SGI_BTNS) += sgi_btns.o obj-$(CONFIG_INPUT_SIRFSOC_ONKEY) += sirfsoc-onkey.o obj-$(CONFIG_INPUT_SOC_BUTTON_ARRAY) += soc_button_array.o diff --git a/drivers/input/misc/sc27xx-vibra.c b/drivers/input/misc/sc27xx-vibra.c new file mode 100644 index 000000000000..295251abbdac --- /dev/null +++ b/drivers/input/misc/sc27xx-vibra.c @@ -0,0 +1,154 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 Spreadtrum Communications Inc. + */ + +#include +#include +#include +#include +#include +#include + +#define CUR_DRV_CAL_SEL GENMASK(13, 12) +#define SLP_LDOVIBR_PD_EN BIT(9) +#define LDO_VIBR_PD BIT(8) + +struct vibra_info { + struct input_dev *input_dev; + struct work_struct play_work; + struct regmap *regmap; + u32 base; + u32 strength; + bool enabled; +}; + +static void sc27xx_vibra_set(struct vibra_info *info, bool on) +{ + if (on) { + regmap_update_bits(info->regmap, info->base, LDO_VIBR_PD, 0); + regmap_update_bits(info->regmap, info->base, + SLP_LDOVIBR_PD_EN, 0); + info->enabled = true; + } else { + regmap_update_bits(info->regmap, info->base, LDO_VIBR_PD, + LDO_VIBR_PD); + regmap_update_bits(info->regmap, info->base, + SLP_LDOVIBR_PD_EN, SLP_LDOVIBR_PD_EN); + info->enabled = false; + } +} + +static int sc27xx_vibra_hw_init(struct vibra_info *info) +{ + return regmap_update_bits(info->regmap, info->base, CUR_DRV_CAL_SEL, 0); +} + +static void sc27xx_vibra_play_work(struct work_struct *work) +{ + struct vibra_info *info = container_of(work, struct vibra_info, + play_work); + + if (info->strength && !info->enabled) + sc27xx_vibra_set(info, true); + else if (info->strength == 0 && info->enabled) + sc27xx_vibra_set(info, false); +} + +static int sc27xx_vibra_play(struct input_dev *input, void *data, + struct ff_effect *effect) +{ + struct vibra_info *info = input_get_drvdata(input); + + info->strength = effect->u.rumble.weak_magnitude; + schedule_work(&info->play_work); + + return 0; +} + +static void sc27xx_vibra_close(struct input_dev *input) +{ + struct vibra_info *info = input_get_drvdata(input); + + cancel_work_sync(&info->play_work); + if (info->enabled) + sc27xx_vibra_set(info, false); +} + +static int sc27xx_vibra_probe(struct platform_device *pdev) +{ + struct vibra_info *info; + int error; + + info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + info->regmap = dev_get_regmap(pdev->dev.parent, NULL); + if (!info->regmap) { + dev_err(&pdev->dev, "failed to get vibrator regmap.\n"); + return -ENODEV; + } + + error = device_property_read_u32(&pdev->dev, "reg", &info->base); + if (error) { + dev_err(&pdev->dev, "failed to get vibrator base address.\n"); + return error; + } + + info->input_dev = devm_input_allocate_device(&pdev->dev); + if (!info->input_dev) { + dev_err(&pdev->dev, "failed to allocate input device.\n"); + return -ENOMEM; + } + + info->input_dev->name = "sc27xx:vibrator"; + info->input_dev->id.version = 0; + info->input_dev->close = sc27xx_vibra_close; + + input_set_drvdata(info->input_dev, info); + input_set_capability(info->input_dev, EV_FF, FF_RUMBLE); + INIT_WORK(&info->play_work, sc27xx_vibra_play_work); + info->enabled = false; + + error = sc27xx_vibra_hw_init(info); + if (error) { + dev_err(&pdev->dev, "failed to initialize the vibrator.\n"); + return error; + } + + error = input_ff_create_memless(info->input_dev, NULL, + sc27xx_vibra_play); + if (error) { + dev_err(&pdev->dev, "failed to register vibrator to FF.\n"); + return error; + } + + error = input_register_device(info->input_dev); + if (error) { + dev_err(&pdev->dev, "failed to register input device.\n"); + return error; + } + + return 0; +} + +static const struct of_device_id sc27xx_vibra_of_match[] = { + { .compatible = "sprd,sc2731-vibrator", }, + {} +}; +MODULE_DEVICE_TABLE(of, sc27xx_vibra_of_match); + +static struct platform_driver sc27xx_vibra_driver = { + .driver = { + .name = "sc27xx-vibrator", + .of_match_table = sc27xx_vibra_of_match, + }, + .probe = sc27xx_vibra_probe, +}; + +module_platform_driver(sc27xx_vibra_driver); + +MODULE_DESCRIPTION("Spreadtrum SC27xx Vibrator Driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Xiaotong Lu "); diff --git a/drivers/input/mouse/elan_i2c.h b/drivers/input/mouse/elan_i2c.h index 599544c1a91c..243e0fa6e3e3 100644 --- a/drivers/input/mouse/elan_i2c.h +++ b/drivers/input/mouse/elan_i2c.h @@ -27,6 +27,8 @@ #define ETP_DISABLE_POWER 0x0001 #define ETP_PRESSURE_OFFSET 25 +#define ETP_CALIBRATE_MAX_LEN 3 + /* IAP Firmware handling */ #define ETP_PRODUCT_ID_FORMAT_STRING "%d.0" #define ETP_FW_NAME "elan_i2c_" ETP_PRODUCT_ID_FORMAT_STRING ".bin" diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c index 8ff75114e762..1f9cd7d8b7ad 100644 --- a/drivers/input/mouse/elan_i2c_core.c +++ b/drivers/input/mouse/elan_i2c_core.c @@ -613,7 +613,7 @@ static ssize_t calibrate_store(struct device *dev, int tries = 20; int retval; int error; - u8 val[3]; + u8 val[ETP_CALIBRATE_MAX_LEN]; retval = mutex_lock_interruptible(&data->sysfs_mutex); if (retval) @@ -1345,6 +1345,7 @@ static const struct acpi_device_id elan_acpi_id[] = { { "ELAN060C", 0 }, { "ELAN0611", 0 }, { "ELAN0612", 0 }, + { "ELAN0618", 0 }, { "ELAN1000", 0 }, { } }; diff --git a/drivers/input/mouse/elan_i2c_smbus.c b/drivers/input/mouse/elan_i2c_smbus.c index cfcb32559925..c060d270bc4d 100644 --- a/drivers/input/mouse/elan_i2c_smbus.c +++ b/drivers/input/mouse/elan_i2c_smbus.c @@ -56,7 +56,7 @@ static int elan_smbus_initialize(struct i2c_client *client) { u8 check[ETP_SMBUS_HELLOPACKET_LEN] = { 0x55, 0x55, 0x55, 0x55, 0x55 }; - u8 values[ETP_SMBUS_HELLOPACKET_LEN] = { 0, 0, 0, 0, 0 }; + u8 values[I2C_SMBUS_BLOCK_MAX] = {0}; int len, error; /* Get hello packet */ @@ -117,12 +117,16 @@ static int elan_smbus_calibrate(struct i2c_client *client) static int elan_smbus_calibrate_result(struct i2c_client *client, u8 *val) { int error; + u8 buf[I2C_SMBUS_BLOCK_MAX] = {0}; + + BUILD_BUG_ON(ETP_CALIBRATE_MAX_LEN > sizeof(buf)); error = i2c_smbus_read_block_data(client, - ETP_SMBUS_CALIBRATE_QUERY, val); + ETP_SMBUS_CALIBRATE_QUERY, buf); if (error < 0) return error; + memcpy(val, buf, ETP_CALIBRATE_MAX_LEN); return 0; } @@ -472,6 +476,8 @@ static int elan_smbus_get_report(struct i2c_client *client, u8 *report) { int len; + BUILD_BUG_ON(I2C_SMBUS_BLOCK_MAX > ETP_SMBUS_REPORT_LEN); + len = i2c_smbus_read_block_data(client, ETP_SMBUS_PACKET_QUERY, &report[ETP_SMBUS_REPORT_OFFSET]); diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index fb4d902c4403..dd85b16dc6f8 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c @@ -799,7 +799,7 @@ static int elantech_packet_check_v4(struct psmouse *psmouse) else if (ic_version == 7 && etd->info.samples[1] == 0x2A) sanity_check = ((packet[3] & 0x1c) == 0x10); else - sanity_check = ((packet[0] & 0x0c) == 0x04 && + sanity_check = ((packet[0] & 0x08) == 0x00 && (packet[3] & 0x1c) == 0x10); if (!sanity_check) @@ -1175,6 +1175,12 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = { { } }; +static const char * const middle_button_pnp_ids[] = { + "LEN2131", /* ThinkPad P52 w/ NFC */ + "LEN2132", /* ThinkPad P52 */ + NULL +}; + /* * Set the appropriate event bits for the input subsystem */ @@ -1194,7 +1200,8 @@ static int elantech_set_input_params(struct psmouse *psmouse) __clear_bit(EV_REL, dev->evbit); __set_bit(BTN_LEFT, dev->keybit); - if (dmi_check_system(elantech_dmi_has_middle_button)) + if (dmi_check_system(elantech_dmi_has_middle_button) || + psmouse_matches_pnp_id(psmouse, middle_button_pnp_ids)) __set_bit(BTN_MIDDLE, dev->keybit); __set_bit(BTN_RIGHT, dev->keybit); diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c index 5ff5b1952be0..d3ff1fc09af7 100644 --- a/drivers/input/mouse/psmouse-base.c +++ b/drivers/input/mouse/psmouse-base.c @@ -192,8 +192,8 @@ psmouse_ret_t psmouse_process_byte(struct psmouse *psmouse) else input_report_rel(dev, REL_WHEEL, -wheel); - input_report_key(dev, BTN_SIDE, BIT(4)); - input_report_key(dev, BTN_EXTRA, BIT(5)); + input_report_key(dev, BTN_SIDE, packet[3] & BIT(4)); + input_report_key(dev, BTN_EXTRA, packet[3] & BIT(5)); break; } break; @@ -203,13 +203,13 @@ psmouse_ret_t psmouse_process_byte(struct psmouse *psmouse) input_report_rel(dev, REL_WHEEL, -(s8) packet[3]); /* Extra buttons on Genius NewNet 3D */ - input_report_key(dev, BTN_SIDE, BIT(6)); - input_report_key(dev, BTN_EXTRA, BIT(7)); + input_report_key(dev, BTN_SIDE, packet[0] & BIT(6)); + input_report_key(dev, BTN_EXTRA, packet[0] & BIT(7)); break; case PSMOUSE_THINKPS: /* Extra button on ThinkingMouse */ - input_report_key(dev, BTN_EXTRA, BIT(3)); + input_report_key(dev, BTN_EXTRA, packet[0] & BIT(3)); /* * Without this bit of weirdness moving up gives wildly @@ -223,7 +223,7 @@ psmouse_ret_t psmouse_process_byte(struct psmouse *psmouse) * Cortron PS2 Trackball reports SIDE button in the * 4th bit of the first byte. */ - input_report_key(dev, BTN_SIDE, BIT(3)); + input_report_key(dev, BTN_SIDE, packet[0] & BIT(3)); packet[0] |= BIT(3); break; diff --git a/drivers/input/rmi4/Kconfig b/drivers/input/rmi4/Kconfig index 7172b88cd064..fad2eae4a118 100644 --- a/drivers/input/rmi4/Kconfig +++ b/drivers/input/rmi4/Kconfig @@ -3,6 +3,7 @@ # config RMI4_CORE tristate "Synaptics RMI4 bus support" + select IRQ_DOMAIN help Say Y here if you want to support the Synaptics RMI4 bus. This is required for all RMI4 device support. diff --git a/drivers/input/rmi4/rmi_2d_sensor.c b/drivers/input/rmi4/rmi_2d_sensor.c index 8bb866c7b985..8eeffa066022 100644 --- a/drivers/input/rmi4/rmi_2d_sensor.c +++ b/drivers/input/rmi4/rmi_2d_sensor.c @@ -32,15 +32,15 @@ void rmi_2d_sensor_abs_process(struct rmi_2d_sensor *sensor, if (obj->type == RMI_2D_OBJECT_NONE) return; - if (axis_align->swap_axes) - swap(obj->x, obj->y); - if (axis_align->flip_x) obj->x = sensor->max_x - obj->x; if (axis_align->flip_y) obj->y = sensor->max_y - obj->y; + if (axis_align->swap_axes) + swap(obj->x, obj->y); + /* * Here checking if X offset or y offset are specified is * redundant. We just add the offsets or clip the values. @@ -120,15 +120,15 @@ void rmi_2d_sensor_rel_report(struct rmi_2d_sensor *sensor, int x, int y) x = min(RMI_2D_REL_POS_MAX, max(RMI_2D_REL_POS_MIN, (int)x)); y = min(RMI_2D_REL_POS_MAX, max(RMI_2D_REL_POS_MIN, (int)y)); - if (axis_align->swap_axes) - swap(x, y); - if (axis_align->flip_x) x = min(RMI_2D_REL_POS_MAX, -x); if (axis_align->flip_y) y = min(RMI_2D_REL_POS_MAX, -y); + if (axis_align->swap_axes) + swap(x, y); + if (x || y) { input_report_rel(sensor->input, REL_X, x); input_report_rel(sensor->input, REL_Y, y); @@ -141,17 +141,10 @@ static void rmi_2d_sensor_set_input_params(struct rmi_2d_sensor *sensor) struct input_dev *input = sensor->input; int res_x; int res_y; + int max_x, max_y; int input_flags = 0; if (sensor->report_abs) { - if (sensor->axis_align.swap_axes) { - swap(sensor->max_x, sensor->max_y); - swap(sensor->axis_align.clip_x_low, - sensor->axis_align.clip_y_low); - swap(sensor->axis_align.clip_x_high, - sensor->axis_align.clip_y_high); - } - sensor->min_x = sensor->axis_align.clip_x_low; if (sensor->axis_align.clip_x_high) sensor->max_x = min(sensor->max_x, @@ -163,14 +156,19 @@ static void rmi_2d_sensor_set_input_params(struct rmi_2d_sensor *sensor) sensor->axis_align.clip_y_high); set_bit(EV_ABS, input->evbit); - input_set_abs_params(input, ABS_MT_POSITION_X, 0, sensor->max_x, - 0, 0); - input_set_abs_params(input, ABS_MT_POSITION_Y, 0, sensor->max_y, - 0, 0); + + max_x = sensor->max_x; + max_y = sensor->max_y; + if (sensor->axis_align.swap_axes) + swap(max_x, max_y); + input_set_abs_params(input, ABS_MT_POSITION_X, 0, max_x, 0, 0); + input_set_abs_params(input, ABS_MT_POSITION_Y, 0, max_y, 0, 0); if (sensor->x_mm && sensor->y_mm) { res_x = (sensor->max_x - sensor->min_x) / sensor->x_mm; res_y = (sensor->max_y - sensor->min_y) / sensor->y_mm; + if (sensor->axis_align.swap_axes) + swap(res_x, res_y); input_abs_set_res(input, ABS_X, res_x); input_abs_set_res(input, ABS_Y, res_y); diff --git a/drivers/input/rmi4/rmi_bus.c b/drivers/input/rmi4/rmi_bus.c index c5fa53adba8d..bd0d5ff01b08 100644 --- a/drivers/input/rmi4/rmi_bus.c +++ b/drivers/input/rmi4/rmi_bus.c @@ -9,6 +9,8 @@ #include #include +#include +#include #include #include #include @@ -167,6 +169,39 @@ static inline void rmi_function_of_probe(struct rmi_function *fn) {} #endif +static struct irq_chip rmi_irq_chip = { + .name = "rmi4", +}; + +static int rmi_create_function_irq(struct rmi_function *fn, + struct rmi_function_handler *handler) +{ + struct rmi_driver_data *drvdata = dev_get_drvdata(&fn->rmi_dev->dev); + int i, error; + + for (i = 0; i < fn->num_of_irqs; i++) { + set_bit(fn->irq_pos + i, fn->irq_mask); + + fn->irq[i] = irq_create_mapping(drvdata->irqdomain, + fn->irq_pos + i); + + irq_set_chip_data(fn->irq[i], fn); + irq_set_chip_and_handler(fn->irq[i], &rmi_irq_chip, + handle_simple_irq); + irq_set_nested_thread(fn->irq[i], 1); + + error = devm_request_threaded_irq(&fn->dev, fn->irq[i], NULL, + handler->attention, IRQF_ONESHOT, + dev_name(&fn->dev), fn); + if (error) { + dev_err(&fn->dev, "Error %d registering IRQ\n", error); + return error; + } + } + + return 0; +} + static int rmi_function_probe(struct device *dev) { struct rmi_function *fn = to_rmi_function(dev); @@ -178,7 +213,14 @@ static int rmi_function_probe(struct device *dev) if (handler->probe) { error = handler->probe(fn); - return error; + if (error) + return error; + } + + if (fn->num_of_irqs && handler->attention) { + error = rmi_create_function_irq(fn, handler); + if (error) + return error; } return 0; @@ -230,12 +272,18 @@ int rmi_register_function(struct rmi_function *fn) void rmi_unregister_function(struct rmi_function *fn) { + int i; + rmi_dbg(RMI_DEBUG_CORE, &fn->dev, "Unregistering F%02X.\n", fn->fd.function_number); device_del(&fn->dev); of_node_put(fn->dev.of_node); put_device(&fn->dev); + + for (i = 0; i < fn->num_of_irqs; i++) + irq_dispose_mapping(fn->irq[i]); + } /** diff --git a/drivers/input/rmi4/rmi_bus.h b/drivers/input/rmi4/rmi_bus.h index b7625a9ac66a..96383eab41ba 100644 --- a/drivers/input/rmi4/rmi_bus.h +++ b/drivers/input/rmi4/rmi_bus.h @@ -14,6 +14,12 @@ struct rmi_device; +/* + * The interrupt source count in the function descriptor can represent up to + * 6 interrupt sources in the normal manner. + */ +#define RMI_FN_MAX_IRQS 6 + /** * struct rmi_function - represents the implementation of an RMI4 * function for a particular device (basically, a driver for that RMI4 function) @@ -26,6 +32,7 @@ struct rmi_device; * @irq_pos: The position in the irq bitfield this function holds * @irq_mask: For convenience, can be used to mask IRQ bits off during ATTN * interrupt handling. + * @irqs: assigned virq numbers (up to num_of_irqs) * * @node: entry in device's list of functions */ @@ -36,6 +43,7 @@ struct rmi_function { struct list_head node; unsigned int num_of_irqs; + int irq[RMI_FN_MAX_IRQS]; unsigned int irq_pos; unsigned long irq_mask[]; }; @@ -76,7 +84,7 @@ struct rmi_function_handler { void (*remove)(struct rmi_function *fn); int (*config)(struct rmi_function *fn); int (*reset)(struct rmi_function *fn); - int (*attention)(struct rmi_function *fn, unsigned long *irq_bits); + irqreturn_t (*attention)(int irq, void *ctx); int (*suspend)(struct rmi_function *fn); int (*resume)(struct rmi_function *fn); }; diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c index 7d29053dfb0f..fc3ab93b7aea 100644 --- a/drivers/input/rmi4/rmi_driver.c +++ b/drivers/input/rmi4/rmi_driver.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include "rmi_bus.h" @@ -127,28 +128,11 @@ static int rmi_driver_process_config_requests(struct rmi_device *rmi_dev) return 0; } -static void process_one_interrupt(struct rmi_driver_data *data, - struct rmi_function *fn) -{ - struct rmi_function_handler *fh; - - if (!fn || !fn->dev.driver) - return; - - fh = to_rmi_function_handler(fn->dev.driver); - if (fh->attention) { - bitmap_and(data->fn_irq_bits, data->irq_status, fn->irq_mask, - data->irq_count); - if (!bitmap_empty(data->fn_irq_bits, data->irq_count)) - fh->attention(fn, data->fn_irq_bits); - } -} - static int rmi_process_interrupt_requests(struct rmi_device *rmi_dev) { struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); struct device *dev = &rmi_dev->dev; - struct rmi_function *entry; + int i; int error; if (!data) @@ -173,16 +157,8 @@ static int rmi_process_interrupt_requests(struct rmi_device *rmi_dev) */ mutex_unlock(&data->irq_mutex); - /* - * It would be nice to be able to use irq_chip to handle these - * nested IRQs. Unfortunately, most of the current customers for - * this driver are using older kernels (3.0.x) that don't support - * the features required for that. Once they've shifted to more - * recent kernels (say, 3.3 and higher), this should be switched to - * use irq_chip. - */ - list_for_each_entry(entry, &data->function_list, node) - process_one_interrupt(data, entry); + for_each_set_bit(i, data->irq_status, data->irq_count) + handle_nested_irq(irq_find_mapping(data->irqdomain, i)); if (data->input) input_sync(data->input); @@ -1001,9 +977,13 @@ EXPORT_SYMBOL_GPL(rmi_driver_resume); static int rmi_driver_remove(struct device *dev) { struct rmi_device *rmi_dev = to_rmi_device(dev); + struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); rmi_disable_irq(rmi_dev, false); + irq_domain_remove(data->irqdomain); + data->irqdomain = NULL; + rmi_f34_remove_sysfs(rmi_dev); rmi_free_function_list(rmi_dev); @@ -1035,7 +1015,8 @@ int rmi_probe_interrupts(struct rmi_driver_data *data) { struct rmi_device *rmi_dev = data->rmi_dev; struct device *dev = &rmi_dev->dev; - int irq_count; + struct fwnode_handle *fwnode = rmi_dev->xport->dev->fwnode; + int irq_count = 0; size_t size; int retval; @@ -1046,7 +1027,6 @@ int rmi_probe_interrupts(struct rmi_driver_data *data) * being accessed. */ rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Counting IRQs.\n", __func__); - irq_count = 0; data->bootloader_mode = false; retval = rmi_scan_pdt(rmi_dev, &irq_count, rmi_count_irqs); @@ -1058,6 +1038,15 @@ int rmi_probe_interrupts(struct rmi_driver_data *data) if (data->bootloader_mode) dev_warn(dev, "Device in bootloader mode.\n"); + /* Allocate and register a linear revmap irq_domain */ + data->irqdomain = irq_domain_create_linear(fwnode, irq_count, + &irq_domain_simple_ops, + data); + if (!data->irqdomain) { + dev_err(&rmi_dev->dev, "Failed to create IRQ domain\n"); + return -ENOMEM; + } + data->irq_count = irq_count; data->num_of_irq_regs = (data->irq_count + 7) / 8; @@ -1080,10 +1069,9 @@ int rmi_init_functions(struct rmi_driver_data *data) { struct rmi_device *rmi_dev = data->rmi_dev; struct device *dev = &rmi_dev->dev; - int irq_count; + int irq_count = 0; int retval; - irq_count = 0; rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Creating functions.\n", __func__); retval = rmi_scan_pdt(rmi_dev, &irq_count, rmi_create_function); if (retval < 0) { diff --git a/drivers/input/rmi4/rmi_f01.c b/drivers/input/rmi4/rmi_f01.c index 8a07ae147df6..4edaa14fe878 100644 --- a/drivers/input/rmi4/rmi_f01.c +++ b/drivers/input/rmi4/rmi_f01.c @@ -681,9 +681,9 @@ static int rmi_f01_resume(struct rmi_function *fn) return 0; } -static int rmi_f01_attention(struct rmi_function *fn, - unsigned long *irq_bits) +static irqreturn_t rmi_f01_attention(int irq, void *ctx) { + struct rmi_function *fn = ctx; struct rmi_device *rmi_dev = fn->rmi_dev; int error; u8 device_status; @@ -692,7 +692,7 @@ static int rmi_f01_attention(struct rmi_function *fn, if (error) { dev_err(&fn->dev, "Failed to read device status: %d.\n", error); - return error; + return IRQ_RETVAL(error); } if (RMI_F01_STATUS_BOOTLOADER(device_status)) @@ -704,11 +704,11 @@ static int rmi_f01_attention(struct rmi_function *fn, error = rmi_dev->driver->reset_handler(rmi_dev); if (error) { dev_err(&fn->dev, "Device reset failed: %d\n", error); - return error; + return IRQ_RETVAL(error); } } - return 0; + return IRQ_HANDLED; } struct rmi_function_handler rmi_f01_handler = { diff --git a/drivers/input/rmi4/rmi_f03.c b/drivers/input/rmi4/rmi_f03.c index 88822196d6b7..aaa1edc95522 100644 --- a/drivers/input/rmi4/rmi_f03.c +++ b/drivers/input/rmi4/rmi_f03.c @@ -244,8 +244,9 @@ static int rmi_f03_config(struct rmi_function *fn) return 0; } -static int rmi_f03_attention(struct rmi_function *fn, unsigned long *irq_bits) +static irqreturn_t rmi_f03_attention(int irq, void *ctx) { + struct rmi_function *fn = ctx; struct rmi_device *rmi_dev = fn->rmi_dev; struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev); struct f03_data *f03 = dev_get_drvdata(&fn->dev); @@ -262,7 +263,7 @@ static int rmi_f03_attention(struct rmi_function *fn, unsigned long *irq_bits) /* First grab the data passed by the transport device */ if (drvdata->attn_data.size < ob_len) { dev_warn(&fn->dev, "F03 interrupted, but data is missing!\n"); - return 0; + return IRQ_HANDLED; } memcpy(obs, drvdata->attn_data.data, ob_len); @@ -277,7 +278,7 @@ static int rmi_f03_attention(struct rmi_function *fn, unsigned long *irq_bits) "%s: Failed to read F03 output buffers: %d\n", __func__, error); serio_interrupt(f03->serio, 0, SERIO_TIMEOUT); - return error; + return IRQ_RETVAL(error); } } @@ -303,7 +304,7 @@ static int rmi_f03_attention(struct rmi_function *fn, unsigned long *irq_bits) serio_interrupt(f03->serio, ob_data, serio_flags); } - return 0; + return IRQ_HANDLED; } static void rmi_f03_remove(struct rmi_function *fn) diff --git a/drivers/input/rmi4/rmi_f11.c b/drivers/input/rmi4/rmi_f11.c index 12a233251793..df64d6aed4f7 100644 --- a/drivers/input/rmi4/rmi_f11.c +++ b/drivers/input/rmi4/rmi_f11.c @@ -570,9 +570,7 @@ static inline u8 rmi_f11_parse_finger_state(const u8 *f_state, u8 n_finger) } static void rmi_f11_finger_handler(struct f11_data *f11, - struct rmi_2d_sensor *sensor, - unsigned long *irq_bits, int num_irq_regs, - int size) + struct rmi_2d_sensor *sensor, int size) { const u8 *f_state = f11->data.f_state; u8 finger_state; @@ -581,12 +579,7 @@ static void rmi_f11_finger_handler(struct f11_data *f11, int rel_fingers; int abs_size = sensor->nbr_fingers * RMI_F11_ABS_BYTES; - int abs_bits = bitmap_and(f11->result_bits, irq_bits, f11->abs_mask, - num_irq_regs * 8); - int rel_bits = bitmap_and(f11->result_bits, irq_bits, f11->rel_mask, - num_irq_regs * 8); - - if (abs_bits) { + if (sensor->report_abs) { if (abs_size > size) abs_fingers = size / RMI_F11_ABS_BYTES; else @@ -604,19 +597,7 @@ static void rmi_f11_finger_handler(struct f11_data *f11, rmi_f11_abs_pos_process(f11, sensor, &sensor->objs[i], finger_state, i); } - } - if (rel_bits) { - if ((abs_size + sensor->nbr_fingers * RMI_F11_REL_BYTES) > size) - rel_fingers = (size - abs_size) / RMI_F11_REL_BYTES; - else - rel_fingers = sensor->nbr_fingers; - - for (i = 0; i < rel_fingers; i++) - rmi_f11_rel_pos_report(f11, i); - } - - if (abs_bits) { /* * the absolute part is made in 2 parts to allow the kernel * tracking to take place. @@ -638,7 +619,16 @@ static void rmi_f11_finger_handler(struct f11_data *f11, } input_mt_sync_frame(sensor->input); + } else if (sensor->report_rel) { + if ((abs_size + sensor->nbr_fingers * RMI_F11_REL_BYTES) > size) + rel_fingers = (size - abs_size) / RMI_F11_REL_BYTES; + else + rel_fingers = sensor->nbr_fingers; + + for (i = 0; i < rel_fingers; i++) + rmi_f11_rel_pos_report(f11, i); } + } static int f11_2d_construct_data(struct f11_data *f11) @@ -1276,8 +1266,9 @@ static int rmi_f11_config(struct rmi_function *fn) return 0; } -static int rmi_f11_attention(struct rmi_function *fn, unsigned long *irq_bits) +static irqreturn_t rmi_f11_attention(int irq, void *ctx) { + struct rmi_function *fn = ctx; struct rmi_device *rmi_dev = fn->rmi_dev; struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev); struct f11_data *f11 = dev_get_drvdata(&fn->dev); @@ -1303,13 +1294,12 @@ static int rmi_f11_attention(struct rmi_function *fn, unsigned long *irq_bits) data_base_addr, f11->sensor.data_pkt, f11->sensor.pkt_size); if (error < 0) - return error; + return IRQ_RETVAL(error); } - rmi_f11_finger_handler(f11, &f11->sensor, irq_bits, - drvdata->num_of_irq_regs, valid_bytes); + rmi_f11_finger_handler(f11, &f11->sensor, valid_bytes); - return 0; + return IRQ_HANDLED; } static int rmi_f11_resume(struct rmi_function *fn) diff --git a/drivers/input/rmi4/rmi_f12.c b/drivers/input/rmi4/rmi_f12.c index a3d1aa88f2a9..5c7f48915779 100644 --- a/drivers/input/rmi4/rmi_f12.c +++ b/drivers/input/rmi4/rmi_f12.c @@ -197,10 +197,10 @@ static void rmi_f12_process_objects(struct f12_data *f12, u8 *data1, int size) rmi_2d_sensor_abs_report(sensor, &sensor->objs[i], i); } -static int rmi_f12_attention(struct rmi_function *fn, - unsigned long *irq_nr_regs) +static irqreturn_t rmi_f12_attention(int irq, void *ctx) { int retval; + struct rmi_function *fn = ctx; struct rmi_device *rmi_dev = fn->rmi_dev; struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev); struct f12_data *f12 = dev_get_drvdata(&fn->dev); @@ -222,7 +222,7 @@ static int rmi_f12_attention(struct rmi_function *fn, if (retval < 0) { dev_err(&fn->dev, "Failed to read object data. Code: %d.\n", retval); - return retval; + return IRQ_RETVAL(retval); } } @@ -232,7 +232,7 @@ static int rmi_f12_attention(struct rmi_function *fn, input_mt_sync_frame(sensor->input); - return 0; + return IRQ_HANDLED; } static int rmi_f12_write_control_regs(struct rmi_function *fn) diff --git a/drivers/input/rmi4/rmi_f30.c b/drivers/input/rmi4/rmi_f30.c index 82e0f0d43d55..5e3ed5ac0c3e 100644 --- a/drivers/input/rmi4/rmi_f30.c +++ b/drivers/input/rmi4/rmi_f30.c @@ -122,8 +122,9 @@ static void rmi_f30_report_button(struct rmi_function *fn, } } -static int rmi_f30_attention(struct rmi_function *fn, unsigned long *irq_bits) +static irqreturn_t rmi_f30_attention(int irq, void *ctx) { + struct rmi_function *fn = ctx; struct f30_data *f30 = dev_get_drvdata(&fn->dev); struct rmi_driver_data *drvdata = dev_get_drvdata(&fn->rmi_dev->dev); int error; @@ -134,7 +135,7 @@ static int rmi_f30_attention(struct rmi_function *fn, unsigned long *irq_bits) if (drvdata->attn_data.size < f30->register_count) { dev_warn(&fn->dev, "F30 interrupted, but data is missing\n"); - return 0; + return IRQ_HANDLED; } memcpy(f30->data_regs, drvdata->attn_data.data, f30->register_count); @@ -147,7 +148,7 @@ static int rmi_f30_attention(struct rmi_function *fn, unsigned long *irq_bits) dev_err(&fn->dev, "%s: Failed to read F30 data registers: %d\n", __func__, error); - return error; + return IRQ_RETVAL(error); } } @@ -159,7 +160,7 @@ static int rmi_f30_attention(struct rmi_function *fn, unsigned long *irq_bits) rmi_f03_commit_buttons(f30->f03); } - return 0; + return IRQ_HANDLED; } static int rmi_f30_config(struct rmi_function *fn) diff --git a/drivers/input/rmi4/rmi_f34.c b/drivers/input/rmi4/rmi_f34.c index f1f5ac539d5d..87a7d4ba382d 100644 --- a/drivers/input/rmi4/rmi_f34.c +++ b/drivers/input/rmi4/rmi_f34.c @@ -100,8 +100,9 @@ static int rmi_f34_command(struct f34_data *f34, u8 command, return 0; } -static int rmi_f34_attention(struct rmi_function *fn, unsigned long *irq_bits) +static irqreturn_t rmi_f34_attention(int irq, void *ctx) { + struct rmi_function *fn = ctx; struct f34_data *f34 = dev_get_drvdata(&fn->dev); int ret; u8 status; @@ -126,7 +127,7 @@ static int rmi_f34_attention(struct rmi_function *fn, unsigned long *irq_bits) complete(&f34->v7.cmd_done); } - return 0; + return IRQ_HANDLED; } static int rmi_f34_write_blocks(struct f34_data *f34, const void *data, diff --git a/drivers/input/rmi4/rmi_f54.c b/drivers/input/rmi4/rmi_f54.c index e8a59d164019..a6f515bcab22 100644 --- a/drivers/input/rmi4/rmi_f54.c +++ b/drivers/input/rmi4/rmi_f54.c @@ -610,11 +610,6 @@ static void rmi_f54_work(struct work_struct *work) mutex_unlock(&f54->data_mutex); } -static int rmi_f54_attention(struct rmi_function *fn, unsigned long *irqbits) -{ - return 0; -} - static int rmi_f54_config(struct rmi_function *fn) { struct rmi_driver *drv = fn->rmi_dev->driver; @@ -756,6 +751,5 @@ struct rmi_function_handler rmi_f54_handler = { .func = 0x54, .probe = rmi_f54_probe, .config = rmi_f54_config, - .attention = rmi_f54_attention, .remove = rmi_f54_remove, }; diff --git a/drivers/input/touchscreen/silead.c b/drivers/input/touchscreen/silead.c index ff7043f74a3d..d196ac3d8b8c 100644 --- a/drivers/input/touchscreen/silead.c +++ b/drivers/input/touchscreen/silead.c @@ -603,6 +603,7 @@ static const struct acpi_device_id silead_ts_acpi_match[] = { { "GSL3692", 0 }, { "MSSL1680", 0 }, { "MSSL0001", 0 }, + { "MSSL0002", 0 }, { } }; MODULE_DEVICE_TABLE(acpi, silead_ts_acpi_match); diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index e055d228bfb9..689ffe538370 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -142,7 +142,6 @@ config DMAR_TABLE config INTEL_IOMMU bool "Support for Intel IOMMU using DMA Remapping Devices" depends on PCI_MSI && ACPI && (X86 || IA64_GENERIC) - select DMA_DIRECT_OPS select IOMMU_API select IOMMU_IOVA select NEED_DMA_MAP_STATE diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 14e4b3722428..b344a883f116 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -31,7 +31,6 @@ #include #include #include -#include #include #include #include @@ -3713,30 +3712,61 @@ static void *intel_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs) { - void *vaddr; + struct page *page = NULL; + int order; - vaddr = dma_direct_alloc(dev, size, dma_handle, flags, attrs); - if (iommu_no_mapping(dev) || !vaddr) - return vaddr; + size = PAGE_ALIGN(size); + order = get_order(size); - *dma_handle = __intel_map_single(dev, virt_to_phys(vaddr), - PAGE_ALIGN(size), DMA_BIDIRECTIONAL, - dev->coherent_dma_mask); - if (!*dma_handle) - goto out_free_pages; - return vaddr; + if (!iommu_no_mapping(dev)) + flags &= ~(GFP_DMA | GFP_DMA32); + else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) { + if (dev->coherent_dma_mask < DMA_BIT_MASK(32)) + flags |= GFP_DMA; + else + flags |= GFP_DMA32; + } + + if (gfpflags_allow_blocking(flags)) { + unsigned int count = size >> PAGE_SHIFT; + + page = dma_alloc_from_contiguous(dev, count, order, flags); + if (page && iommu_no_mapping(dev) && + page_to_phys(page) + size > dev->coherent_dma_mask) { + dma_release_from_contiguous(dev, page, count); + page = NULL; + } + } + + if (!page) + page = alloc_pages(flags, order); + if (!page) + return NULL; + memset(page_address(page), 0, size); + + *dma_handle = __intel_map_single(dev, page_to_phys(page), size, + DMA_BIDIRECTIONAL, + dev->coherent_dma_mask); + if (*dma_handle) + return page_address(page); + if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT)) + __free_pages(page, order); -out_free_pages: - dma_direct_free(dev, size, vaddr, *dma_handle, attrs); return NULL; } static void intel_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, unsigned long attrs) { - if (!iommu_no_mapping(dev)) - intel_unmap(dev, dma_handle, PAGE_ALIGN(size)); - dma_direct_free(dev, size, vaddr, dma_handle, attrs); + int order; + struct page *page = virt_to_page(vaddr); + + size = PAGE_ALIGN(size); + order = get_order(size); + + intel_unmap(dev, dma_handle, size); + if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT)) + __free_pages(page, order); } static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist, diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c index 0f52d44b3f69..f5fe0100f9ff 100644 --- a/drivers/irqchip/irq-gic-v2m.c +++ b/drivers/irqchip/irq-gic-v2m.c @@ -199,7 +199,7 @@ static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, fail: irq_domain_free_irqs_parent(domain, virq, nr_irqs); - gicv2m_unalloc_msi(v2m, hwirq, get_count_order(nr_irqs)); + gicv2m_unalloc_msi(v2m, hwirq, nr_irqs); return err; } diff --git a/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c b/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c index 4eca5c763766..606efa64adff 100644 --- a/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c +++ b/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c @@ -45,6 +45,9 @@ static int its_fsl_mc_msi_prepare(struct irq_domain *msi_domain, */ info->scratchpad[0].ul = mc_bus_dev->icid; msi_info = msi_get_domain_info(msi_domain->parent); + + /* Allocate at least 32 MSIs, and always as a power of 2 */ + nvec = max_t(int, 32, roundup_pow_of_two(nvec)); return msi_info->ops->msi_prepare(msi_domain->parent, dev, nvec, info); } diff --git a/drivers/irqchip/irq-gic-v3-its-pci-msi.c b/drivers/irqchip/irq-gic-v3-its-pci-msi.c index 25a98de5cfb2..8d6d009d1d58 100644 --- a/drivers/irqchip/irq-gic-v3-its-pci-msi.c +++ b/drivers/irqchip/irq-gic-v3-its-pci-msi.c @@ -66,7 +66,7 @@ static int its_pci_msi_prepare(struct irq_domain *domain, struct device *dev, { struct pci_dev *pdev, *alias_dev; struct msi_domain_info *msi_info; - int alias_count = 0; + int alias_count = 0, minnvec = 1; if (!dev_is_pci(dev)) return -EINVAL; @@ -86,8 +86,18 @@ static int its_pci_msi_prepare(struct irq_domain *domain, struct device *dev, /* ITS specific DeviceID, as the core ITS ignores dev. */ info->scratchpad[0].ul = pci_msi_domain_get_msi_rid(domain, pdev); - return msi_info->ops->msi_prepare(domain->parent, - dev, max(nvec, alias_count), info); + /* + * Always allocate a power of 2, and special case device 0 for + * broken systems where the DevID is not wired (and all devices + * appear as DevID 0). For that reason, we generously allocate a + * minimum of 32 MSIs for DevID 0. If you want more because all + * your devices are aliasing to DevID 0, consider fixing your HW. + */ + nvec = max(nvec, alias_count); + if (!info->scratchpad[0].ul) + minnvec = 32; + nvec = max_t(int, minnvec, roundup_pow_of_two(nvec)); + return msi_info->ops->msi_prepare(domain->parent, dev, nvec, info); } static struct msi_domain_ops its_pci_msi_ops = { diff --git a/drivers/irqchip/irq-gic-v3-its-platform-msi.c b/drivers/irqchip/irq-gic-v3-its-platform-msi.c index 8881a053c173..7b8e87b493fe 100644 --- a/drivers/irqchip/irq-gic-v3-its-platform-msi.c +++ b/drivers/irqchip/irq-gic-v3-its-platform-msi.c @@ -73,6 +73,8 @@ static int its_pmsi_prepare(struct irq_domain *domain, struct device *dev, /* ITS specific DeviceID, as the core ITS ignores dev. */ info->scratchpad[0].ul = dev_id; + /* Allocate at least 32 MSIs, and always as a power of 2 */ + nvec = max_t(int, 32, roundup_pow_of_two(nvec)); return msi_info->ops->msi_prepare(domain->parent, dev, nvec, info); } diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 5377d7e2afba..316a57530f6d 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -23,6 +23,8 @@ #include #include #include +#include +#include #include #include #include @@ -160,7 +162,7 @@ static struct { } vpe_proxy; static LIST_HEAD(its_nodes); -static DEFINE_SPINLOCK(its_lock); +static DEFINE_RAW_SPINLOCK(its_lock); static struct rdists *gic_rdists; static struct irq_domain *its_parent; @@ -182,6 +184,22 @@ static struct its_collection *dev_event_to_col(struct its_device *its_dev, return its->collections + its_dev->event_map.col_map[event]; } +static struct its_collection *valid_col(struct its_collection *col) +{ + if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(0, 15))) + return NULL; + + return col; +} + +static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe) +{ + if (valid_col(its->collections + vpe->col_idx)) + return vpe; + + return NULL; +} + /* * ITS command descriptors - parameters to be encoded in a command * block. @@ -439,7 +457,7 @@ static struct its_collection *its_build_mapti_cmd(struct its_node *its, its_fixup_cmd(cmd); - return col; + return valid_col(col); } static struct its_collection *its_build_movi_cmd(struct its_node *its, @@ -458,7 +476,7 @@ static struct its_collection *its_build_movi_cmd(struct its_node *its, its_fixup_cmd(cmd); - return col; + return valid_col(col); } static struct its_collection *its_build_discard_cmd(struct its_node *its, @@ -476,7 +494,7 @@ static struct its_collection *its_build_discard_cmd(struct its_node *its, its_fixup_cmd(cmd); - return col; + return valid_col(col); } static struct its_collection *its_build_inv_cmd(struct its_node *its, @@ -494,7 +512,7 @@ static struct its_collection *its_build_inv_cmd(struct its_node *its, its_fixup_cmd(cmd); - return col; + return valid_col(col); } static struct its_collection *its_build_int_cmd(struct its_node *its, @@ -512,7 +530,7 @@ static struct its_collection *its_build_int_cmd(struct its_node *its, its_fixup_cmd(cmd); - return col; + return valid_col(col); } static struct its_collection *its_build_clear_cmd(struct its_node *its, @@ -530,7 +548,7 @@ static struct its_collection *its_build_clear_cmd(struct its_node *its, its_fixup_cmd(cmd); - return col; + return valid_col(col); } static struct its_collection *its_build_invall_cmd(struct its_node *its, @@ -554,7 +572,7 @@ static struct its_vpe *its_build_vinvall_cmd(struct its_node *its, its_fixup_cmd(cmd); - return desc->its_vinvall_cmd.vpe; + return valid_vpe(its, desc->its_vinvall_cmd.vpe); } static struct its_vpe *its_build_vmapp_cmd(struct its_node *its, @@ -576,7 +594,7 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its, its_fixup_cmd(cmd); - return desc->its_vmapp_cmd.vpe; + return valid_vpe(its, desc->its_vmapp_cmd.vpe); } static struct its_vpe *its_build_vmapti_cmd(struct its_node *its, @@ -599,7 +617,7 @@ static struct its_vpe *its_build_vmapti_cmd(struct its_node *its, its_fixup_cmd(cmd); - return desc->its_vmapti_cmd.vpe; + return valid_vpe(its, desc->its_vmapti_cmd.vpe); } static struct its_vpe *its_build_vmovi_cmd(struct its_node *its, @@ -622,7 +640,7 @@ static struct its_vpe *its_build_vmovi_cmd(struct its_node *its, its_fixup_cmd(cmd); - return desc->its_vmovi_cmd.vpe; + return valid_vpe(its, desc->its_vmovi_cmd.vpe); } static struct its_vpe *its_build_vmovp_cmd(struct its_node *its, @@ -640,7 +658,7 @@ static struct its_vpe *its_build_vmovp_cmd(struct its_node *its, its_fixup_cmd(cmd); - return desc->its_vmovp_cmd.vpe; + return valid_vpe(its, desc->its_vmovp_cmd.vpe); } static u64 its_cmd_ptr_to_offset(struct its_node *its, @@ -1405,112 +1423,176 @@ static struct irq_chip its_irq_chip = { .irq_set_vcpu_affinity = its_irq_set_vcpu_affinity, }; + /* * How we allocate LPIs: * - * The GIC has id_bits bits for interrupt identifiers. From there, we - * must subtract 8192 which are reserved for SGIs/PPIs/SPIs. Then, as - * we allocate LPIs by chunks of 32, we can shift the whole thing by 5 - * bits to the right. + * lpi_range_list contains ranges of LPIs that are to available to + * allocate from. To allocate LPIs, just pick the first range that + * fits the required allocation, and reduce it by the required + * amount. Once empty, remove the range from the list. * - * This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations. + * To free a range of LPIs, add a free range to the list, sort it and + * merge the result if the new range happens to be adjacent to an + * already free block. + * + * The consequence of the above is that allocation is cost is low, but + * freeing is expensive. We assumes that freeing rarely occurs. */ -#define IRQS_PER_CHUNK_SHIFT 5 -#define IRQS_PER_CHUNK (1UL << IRQS_PER_CHUNK_SHIFT) -#define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */ -static unsigned long *lpi_bitmap; -static u32 lpi_chunks; -static DEFINE_SPINLOCK(lpi_lock); +static DEFINE_MUTEX(lpi_range_lock); +static LIST_HEAD(lpi_range_list); -static int its_lpi_to_chunk(int lpi) +struct lpi_range { + struct list_head entry; + u32 base_id; + u32 span; +}; + +static struct lpi_range *mk_lpi_range(u32 base, u32 span) { - return (lpi - 8192) >> IRQS_PER_CHUNK_SHIFT; + struct lpi_range *range; + + range = kzalloc(sizeof(*range), GFP_KERNEL); + if (range) { + INIT_LIST_HEAD(&range->entry); + range->base_id = base; + range->span = span; + } + + return range; } -static int its_chunk_to_lpi(int chunk) +static int lpi_range_cmp(void *priv, struct list_head *a, struct list_head *b) { - return (chunk << IRQS_PER_CHUNK_SHIFT) + 8192; + struct lpi_range *ra, *rb; + + ra = container_of(a, struct lpi_range, entry); + rb = container_of(b, struct lpi_range, entry); + + return rb->base_id - ra->base_id; +} + +static void merge_lpi_ranges(void) +{ + struct lpi_range *range, *tmp; + + list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) { + if (!list_is_last(&range->entry, &lpi_range_list) && + (tmp->base_id == (range->base_id + range->span))) { + tmp->base_id = range->base_id; + tmp->span += range->span; + list_del(&range->entry); + kfree(range); + } + } +} + +static int alloc_lpi_range(u32 nr_lpis, u32 *base) +{ + struct lpi_range *range, *tmp; + int err = -ENOSPC; + + mutex_lock(&lpi_range_lock); + + list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) { + if (range->span >= nr_lpis) { + *base = range->base_id; + range->base_id += nr_lpis; + range->span -= nr_lpis; + + if (range->span == 0) { + list_del(&range->entry); + kfree(range); + } + + err = 0; + break; + } + } + + mutex_unlock(&lpi_range_lock); + + pr_debug("ITS: alloc %u:%u\n", *base, nr_lpis); + return err; +} + +static int free_lpi_range(u32 base, u32 nr_lpis) +{ + struct lpi_range *new; + int err = 0; + + mutex_lock(&lpi_range_lock); + + new = mk_lpi_range(base, nr_lpis); + if (!new) { + err = -ENOMEM; + goto out; + } + + list_add(&new->entry, &lpi_range_list); + list_sort(NULL, &lpi_range_list, lpi_range_cmp); + merge_lpi_ranges(); +out: + mutex_unlock(&lpi_range_lock); + return err; } static int __init its_lpi_init(u32 id_bits) { - lpi_chunks = its_lpi_to_chunk(1UL << id_bits); + u32 lpis = (1UL << id_bits) - 8192; + u32 numlpis; + int err; - lpi_bitmap = kcalloc(BITS_TO_LONGS(lpi_chunks), sizeof(long), - GFP_KERNEL); - if (!lpi_bitmap) { - lpi_chunks = 0; - return -ENOMEM; + numlpis = 1UL << GICD_TYPER_NUM_LPIS(gic_rdists->gicd_typer); + + if (numlpis > 2 && !WARN_ON(numlpis > lpis)) { + lpis = numlpis; + pr_info("ITS: Using hypervisor restricted LPI range [%u]\n", + lpis); } - pr_info("ITS: Allocated %d chunks for LPIs\n", (int)lpi_chunks); - return 0; + /* + * Initializing the allocator is just the same as freeing the + * full range of LPIs. + */ + err = free_lpi_range(8192, lpis); + pr_debug("ITS: Allocator initialized for %u LPIs\n", lpis); + return err; } -static unsigned long *its_lpi_alloc_chunks(int nr_irqs, int *base, int *nr_ids) +static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids) { unsigned long *bitmap = NULL; - int chunk_id; - int nr_chunks; - int i; - - nr_chunks = DIV_ROUND_UP(nr_irqs, IRQS_PER_CHUNK); - - spin_lock(&lpi_lock); + int err = 0; do { - chunk_id = bitmap_find_next_zero_area(lpi_bitmap, lpi_chunks, - 0, nr_chunks, 0); - if (chunk_id < lpi_chunks) + err = alloc_lpi_range(nr_irqs, base); + if (!err) break; - nr_chunks--; - } while (nr_chunks > 0); + nr_irqs /= 2; + } while (nr_irqs > 0); - if (!nr_chunks) + if (err) goto out; - bitmap = kcalloc(BITS_TO_LONGS(nr_chunks * IRQS_PER_CHUNK), - sizeof(long), - GFP_ATOMIC); + bitmap = kcalloc(BITS_TO_LONGS(nr_irqs), sizeof (long), GFP_ATOMIC); if (!bitmap) goto out; - for (i = 0; i < nr_chunks; i++) - set_bit(chunk_id + i, lpi_bitmap); - - *base = its_chunk_to_lpi(chunk_id); - *nr_ids = nr_chunks * IRQS_PER_CHUNK; + *nr_ids = nr_irqs; out: - spin_unlock(&lpi_lock); - if (!bitmap) *base = *nr_ids = 0; return bitmap; } -static void its_lpi_free_chunks(unsigned long *bitmap, int base, int nr_ids) +static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids) { - int lpi; - - spin_lock(&lpi_lock); - - for (lpi = base; lpi < (base + nr_ids); lpi += IRQS_PER_CHUNK) { - int chunk = its_lpi_to_chunk(lpi); - - BUG_ON(chunk > lpi_chunks); - if (test_bit(chunk, lpi_bitmap)) { - clear_bit(chunk, lpi_bitmap); - } else { - pr_err("Bad LPI chunk %d\n", chunk); - } - } - - spin_unlock(&lpi_lock); - + WARN_ON(free_lpi_range(base, nr_ids)); kfree(bitmap); } @@ -1543,7 +1625,7 @@ static int __init its_alloc_lpi_tables(void) { phys_addr_t paddr; - lpi_id_bits = min_t(u32, gic_rdists->id_bits, ITS_MAX_LPI_NRBITS); + lpi_id_bits = GICD_TYPER_ID_BITS(gic_rdists->gicd_typer); gic_rdists->prop_page = its_allocate_prop_table(GFP_NOWAIT); if (!gic_rdists->prop_page) { pr_err("Failed to allocate PROPBASE\n"); @@ -1824,11 +1906,16 @@ static int its_alloc_tables(struct its_node *its) static int its_alloc_collections(struct its_node *its) { + int i; + its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections), GFP_KERNEL); if (!its->collections) return -ENOMEM; + for (i = 0; i < nr_cpu_ids; i++) + its->collections[i].target_address = ~0ULL; + return 0; } @@ -1976,12 +2063,12 @@ static void its_cpu_init_collections(void) { struct its_node *its; - spin_lock(&its_lock); + raw_spin_lock(&its_lock); list_for_each_entry(its, &its_nodes, entry) its_cpu_init_collection(its); - spin_unlock(&its_lock); + raw_spin_unlock(&its_lock); } static struct its_device *its_find_device(struct its_node *its, u32 dev_id) @@ -2113,17 +2200,20 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, if (!its_alloc_device_table(its, dev_id)) return NULL; + if (WARN_ON(!is_power_of_2(nvecs))) + nvecs = roundup_pow_of_two(nvecs); + dev = kzalloc(sizeof(*dev), GFP_KERNEL); /* - * We allocate at least one chunk worth of LPIs bet device, - * and thus that many ITEs. The device may require less though. + * Even if the device wants a single LPI, the ITT must be + * sized as a power of two (and you need at least one bit...). */ - nr_ites = max(IRQS_PER_CHUNK, roundup_pow_of_two(nvecs)); + nr_ites = max(2, nvecs); sz = nr_ites * its->ite_size; sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; itt = kzalloc(sz, GFP_KERNEL); if (alloc_lpis) { - lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis); + lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis); if (lpi_map) col_map = kcalloc(nr_lpis, sizeof(*col_map), GFP_KERNEL); @@ -2310,7 +2400,14 @@ static int its_irq_domain_activate(struct irq_domain *domain, cpu_mask = cpumask_of_node(its_dev->its->numa_node); /* Bind the LPI to the first possible CPU */ - cpu = cpumask_first(cpu_mask); + cpu = cpumask_first_and(cpu_mask, cpu_online_mask); + if (cpu >= nr_cpu_ids) { + if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) + return -EINVAL; + + cpu = cpumask_first(cpu_online_mask); + } + its_dev->event_map.col_map[event] = cpu; irq_data_update_effective_affinity(d, cpumask_of(cpu)); @@ -2351,9 +2448,9 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq, /* If all interrupts have been freed, start mopping the floor */ if (bitmap_empty(its_dev->event_map.lpi_map, its_dev->event_map.nr_lpis)) { - its_lpi_free_chunks(its_dev->event_map.lpi_map, - its_dev->event_map.lpi_base, - its_dev->event_map.nr_lpis); + its_lpi_free(its_dev->event_map.lpi_map, + its_dev->event_map.lpi_base, + its_dev->event_map.nr_lpis); kfree(its_dev->event_map.col_map); /* Unmap device/itt */ @@ -2752,7 +2849,7 @@ static void its_vpe_irq_domain_free(struct irq_domain *domain, } if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) { - its_lpi_free_chunks(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis); + its_lpi_free(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis); its_free_prop_table(vm->vprop_page); } } @@ -2767,18 +2864,18 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq BUG_ON(!vm); - bitmap = its_lpi_alloc_chunks(nr_irqs, &base, &nr_ids); + bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids); if (!bitmap) return -ENOMEM; if (nr_ids < nr_irqs) { - its_lpi_free_chunks(bitmap, base, nr_ids); + its_lpi_free(bitmap, base, nr_ids); return -ENOMEM; } vprop_page = its_allocate_prop_table(GFP_KERNEL); if (!vprop_page) { - its_lpi_free_chunks(bitmap, base, nr_ids); + its_lpi_free(bitmap, base, nr_ids); return -ENOMEM; } @@ -2805,7 +2902,7 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq if (i > 0) its_vpe_irq_domain_free(domain, virq, i - 1); - its_lpi_free_chunks(bitmap, base, nr_ids); + its_lpi_free(bitmap, base, nr_ids); its_free_prop_table(vprop_page); } @@ -3042,7 +3139,7 @@ static int its_save_disable(void) struct its_node *its; int err = 0; - spin_lock(&its_lock); + raw_spin_lock(&its_lock); list_for_each_entry(its, &its_nodes, entry) { void __iomem *base; @@ -3074,7 +3171,7 @@ static int its_save_disable(void) writel_relaxed(its->ctlr_save, base + GITS_CTLR); } } - spin_unlock(&its_lock); + raw_spin_unlock(&its_lock); return err; } @@ -3084,7 +3181,7 @@ static void its_restore_enable(void) struct its_node *its; int ret; - spin_lock(&its_lock); + raw_spin_lock(&its_lock); list_for_each_entry(its, &its_nodes, entry) { void __iomem *base; int i; @@ -3136,7 +3233,7 @@ static void its_restore_enable(void) GITS_TYPER_HCC(gic_read_typer(base + GITS_TYPER))) its_cpu_init_collection(its); } - spin_unlock(&its_lock); + raw_spin_unlock(&its_lock); } static struct syscore_ops its_syscore_ops = { @@ -3370,9 +3467,9 @@ static int __init its_probe_one(struct resource *res, if (err) goto out_free_tables; - spin_lock(&its_lock); + raw_spin_lock(&its_lock); list_add(&its->entry, &its_nodes); - spin_unlock(&its_lock); + raw_spin_unlock(&its_lock); return 0; @@ -3399,6 +3496,16 @@ static int redist_disable_lpis(void) u64 timeout = USEC_PER_SEC; u64 val; + /* + * If coming via a CPU hotplug event, we don't need to disable + * LPIs before trying to re-enable them. They are already + * configured and all is well in the world. Detect this case + * by checking the allocation of the pending table for the + * current CPU. + */ + if (gic_data_rdist()->pend_page) + return 0; + if (!gic_rdists_supports_plpis()) { pr_info("CPU%d: LPIs not supported\n", smp_processor_id()); return -ENXIO; diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 76ea56d779a1..e214181b77b7 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -877,7 +877,7 @@ static struct irq_chip gic_eoimode1_chip = { .flags = IRQCHIP_SET_TYPE_MASKED, }; -#define GIC_ID_NR (1U << gic_data.rdists.id_bits) +#define GIC_ID_NR (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer)) static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) @@ -1091,7 +1091,7 @@ static int __init gic_init_bases(void __iomem *dist_base, * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI) */ typer = readl_relaxed(gic_data.dist_base + GICD_TYPER); - gic_data.rdists.id_bits = GICD_TYPER_ID_BITS(typer); + gic_data.rdists.gicd_typer = typer; gic_irqs = GICD_TYPER_IRQS(typer); if (gic_irqs > 1020) gic_irqs = 1020; diff --git a/drivers/irqchip/irq-ls-scfg-msi.c b/drivers/irqchip/irq-ls-scfg-msi.c index 1ec3bfe56693..c671b3212010 100644 --- a/drivers/irqchip/irq-ls-scfg-msi.c +++ b/drivers/irqchip/irq-ls-scfg-msi.c @@ -93,8 +93,12 @@ static void ls_scfg_msi_compose_msg(struct irq_data *data, struct msi_msg *msg) msg->address_lo = lower_32_bits(msi_data->msiir_addr); msg->data = data->hwirq; - if (msi_affinity_flag) - msg->data |= cpumask_first(data->common->affinity); + if (msi_affinity_flag) { + const struct cpumask *mask; + + mask = irq_data_get_effective_affinity_mask(data); + msg->data |= cpumask_first(mask); + } iommu_dma_map_msi_msg(data->irq, msg); } @@ -121,7 +125,7 @@ static int ls_scfg_msi_set_affinity(struct irq_data *irq_data, return -EINVAL; } - cpumask_copy(irq_data->common->affinity, mask); + irq_data_update_effective_affinity(irq_data, cpumask_of(cpu)); return IRQ_SET_MASK_OK; } diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c index 98f90aadd141..18c0a1281914 100644 --- a/drivers/isdn/mISDN/socket.c +++ b/drivers/isdn/mISDN/socket.c @@ -588,7 +588,7 @@ static const struct proto_ops data_sock_ops = { .getname = data_sock_getname, .sendmsg = mISDN_sock_sendmsg, .recvmsg = mISDN_sock_recvmsg, - .poll_mask = datagram_poll_mask, + .poll = datagram_poll, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .setsockopt = data_sock_setsockopt, diff --git a/drivers/lightnvm/Kconfig b/drivers/lightnvm/Kconfig index 10c08982185a..9c03f35d9df1 100644 --- a/drivers/lightnvm/Kconfig +++ b/drivers/lightnvm/Kconfig @@ -4,7 +4,7 @@ menuconfig NVM bool "Open-Channel SSD target support" - depends on BLOCK && HAS_DMA && PCI + depends on BLOCK && PCI select BLK_DEV_NVME help Say Y here to get to enable Open-channel SSDs. diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index ab13fcec3fca..75df4c9d8b54 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -588,7 +588,7 @@ static const char *raid10_md_layout_to_format(int layout) } /* Return md raid10 algorithm for @name */ -static const int raid10_name_to_format(const char *name) +static int raid10_name_to_format(const char *name) { if (!strcasecmp(name, "near")) return ALGORITHM_RAID10_NEAR; diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 938766794c2e..3d0e2c198f06 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -885,9 +885,7 @@ EXPORT_SYMBOL_GPL(dm_table_set_type); static int device_supports_dax(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { - struct request_queue *q = bdev_get_queue(dev->bdev); - - return q && blk_queue_dax(q); + return bdev_dax_supported(dev->bdev, PAGE_SIZE); } static bool dm_table_supports_dax(struct dm_table *t) @@ -1907,6 +1905,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, if (dm_table_supports_dax(t)) blk_queue_flag_set(QUEUE_FLAG_DAX, q); + else + blk_queue_flag_clear(QUEUE_FLAG_DAX, q); + if (dm_table_supports_dax_write_cache(t)) dax_write_cache(t->md->dax_dev, true); diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index 36ef284ad086..72142021b5c9 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c @@ -776,7 +776,6 @@ static int __write_changed_details(struct dm_pool_metadata *pmd) static int __commit_transaction(struct dm_pool_metadata *pmd) { int r; - size_t metadata_len, data_len; struct thin_disk_superblock *disk_super; struct dm_block *sblock; @@ -797,14 +796,6 @@ static int __commit_transaction(struct dm_pool_metadata *pmd) if (r < 0) return r; - r = dm_sm_root_size(pmd->metadata_sm, &metadata_len); - if (r < 0) - return r; - - r = dm_sm_root_size(pmd->data_sm, &data_len); - if (r < 0) - return r; - r = save_sm_roots(pmd); if (r < 0) return r; diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 7945238df1c0..b900723bbd0f 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -1386,6 +1386,8 @@ static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block, static void set_pool_mode(struct pool *pool, enum pool_mode new_mode); +static void requeue_bios(struct pool *pool); + static void check_for_space(struct pool *pool) { int r; @@ -1398,8 +1400,10 @@ static void check_for_space(struct pool *pool) if (r) return; - if (nr_free) + if (nr_free) { set_pool_mode(pool, PM_WRITE); + requeue_bios(pool); + } } /* @@ -1476,7 +1480,10 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result) r = dm_pool_alloc_data_block(pool->pmd, result); if (r) { - metadata_operation_failed(pool, "dm_pool_alloc_data_block", r); + if (r == -ENOSPC) + set_pool_mode(pool, PM_OUT_OF_DATA_SPACE); + else + metadata_operation_failed(pool, "dm_pool_alloc_data_block", r); return r; } diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c index 5961c7794ef3..07ea6a48aac6 100644 --- a/drivers/md/dm-writecache.c +++ b/drivers/md/dm-writecache.c @@ -259,7 +259,7 @@ static int persistent_memory_claim(struct dm_writecache *wc) if (da != p) { long i; wc->memory_map = NULL; - pages = kvmalloc(p * sizeof(struct page *), GFP_KERNEL); + pages = kvmalloc_array(p, sizeof(struct page *), GFP_KERNEL); if (!pages) { r = -ENOMEM; goto err2; @@ -859,7 +859,7 @@ static int writecache_alloc_entries(struct dm_writecache *wc) if (wc->entries) return 0; - wc->entries = vmalloc(sizeof(struct wc_entry) * wc->n_blocks); + wc->entries = vmalloc(array_size(sizeof(struct wc_entry), wc->n_blocks)); if (!wc->entries) return -ENOMEM; for (b = 0; b < wc->n_blocks; b++) { @@ -1481,9 +1481,9 @@ static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeba wb->bio.bi_iter.bi_sector = read_original_sector(wc, e); wb->page_offset = PAGE_SIZE; if (max_pages <= WB_LIST_INLINE || - unlikely(!(wb->wc_list = kmalloc(max_pages * sizeof(struct wc_entry *), - GFP_NOIO | __GFP_NORETRY | - __GFP_NOMEMALLOC | __GFP_NOWARN)))) { + unlikely(!(wb->wc_list = kmalloc_array(max_pages, sizeof(struct wc_entry *), + GFP_NOIO | __GFP_NORETRY | + __GFP_NOMEMALLOC | __GFP_NOWARN)))) { wb->wc_list = wb->wc_list_inline; max_pages = WB_LIST_INLINE; } diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c index 3c0e45f4dcf5..a44183ff4be0 100644 --- a/drivers/md/dm-zoned-target.c +++ b/drivers/md/dm-zoned-target.c @@ -787,7 +787,7 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv) /* Chunk BIO work */ mutex_init(&dmz->chunk_lock); - INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_KERNEL); + INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_NOIO); dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 0, dev->name); if (!dmz->chunk_wq) { diff --git a/drivers/md/dm.c b/drivers/md/dm.c index e65429a29c06..b0dd7027848b 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1056,8 +1056,7 @@ static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, if (len < 1) goto out; nr_pages = min(len, nr_pages); - if (ti->type->direct_access) - ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn); + ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn); out: dm_put_live_table(md, srcu_idx); @@ -1606,10 +1605,9 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md, * the usage of io->orig_bio in dm_remap_zone_report() * won't be affected by this reassignment. */ - struct bio *b = bio_clone_bioset(bio, GFP_NOIO, - &md->queue->bio_split); + struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count, + GFP_NOIO, &md->queue->bio_split); ci.io->orig_bio = b; - bio_advance(bio, (bio_sectors(bio) - ci.sector_count) << 9); bio_chain(b, bio); ret = generic_make_request(bio); break; diff --git a/drivers/md/md.c b/drivers/md/md.c index 29b0cd9ec951..994aed2f9dff 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -5547,7 +5547,8 @@ int md_run(struct mddev *mddev) else pr_warn("md: personality for level %s is not loaded!\n", mddev->clevel); - return -EINVAL; + err = -EINVAL; + goto abort; } spin_unlock(&pers_lock); if (mddev->level != pers->level) { @@ -5560,7 +5561,8 @@ int md_run(struct mddev *mddev) pers->start_reshape == NULL) { /* This personality cannot handle reshaping... */ module_put(pers->owner); - return -EINVAL; + err = -EINVAL; + goto abort; } if (pers->sync_request) { @@ -5629,7 +5631,7 @@ int md_run(struct mddev *mddev) mddev->private = NULL; module_put(pers->owner); bitmap_destroy(mddev); - return err; + goto abort; } if (mddev->queue) { bool nonrot = true; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 478cf446827f..35bd3a62451b 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -3893,6 +3893,13 @@ static int raid10_run(struct mddev *mddev) disk->rdev->saved_raid_disk < 0) conf->fullsync = 1; } + + if (disk->replacement && + !test_bit(In_sync, &disk->replacement->flags) && + disk->replacement->saved_raid_disk < 0) { + conf->fullsync = 1; + } + disk->recovery_disabled = mddev->recovery_disabled - 1; } diff --git a/drivers/media/rc/bpf-lirc.c b/drivers/media/rc/bpf-lirc.c index 40826bba06b6..fcfab6635f9c 100644 --- a/drivers/media/rc/bpf-lirc.c +++ b/drivers/media/rc/bpf-lirc.c @@ -207,29 +207,19 @@ void lirc_bpf_free(struct rc_dev *rcdev) bpf_prog_array_free(rcdev->raw->progs); } -int lirc_prog_attach(const union bpf_attr *attr) +int lirc_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog) { - struct bpf_prog *prog; struct rc_dev *rcdev; int ret; if (attr->attach_flags) return -EINVAL; - prog = bpf_prog_get_type(attr->attach_bpf_fd, - BPF_PROG_TYPE_LIRC_MODE2); - if (IS_ERR(prog)) - return PTR_ERR(prog); - rcdev = rc_dev_get_from_fd(attr->target_fd); - if (IS_ERR(rcdev)) { - bpf_prog_put(prog); + if (IS_ERR(rcdev)) return PTR_ERR(rcdev); - } ret = lirc_bpf_attach(rcdev, prog); - if (ret) - bpf_prog_put(prog); put_device(&rcdev->dev); diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c index e05c3245930a..fa840666bdd1 100644 --- a/drivers/misc/ibmasm/ibmasmfs.c +++ b/drivers/misc/ibmasm/ibmasmfs.c @@ -507,35 +507,14 @@ static int remote_settings_file_close(struct inode *inode, struct file *file) static ssize_t remote_settings_file_read(struct file *file, char __user *buf, size_t count, loff_t *offset) { void __iomem *address = (void __iomem *)file->private_data; - unsigned char *page; - int retval; int len = 0; unsigned int value; - - if (*offset < 0) - return -EINVAL; - if (count == 0 || count > 1024) - return 0; - if (*offset != 0) - return 0; - - page = (unsigned char *)__get_free_page(GFP_KERNEL); - if (!page) - return -ENOMEM; + char lbuf[20]; value = readl(address); - len = sprintf(page, "%d\n", value); + len = snprintf(lbuf, sizeof(lbuf), "%d\n", value); - if (copy_to_user(buf, page, len)) { - retval = -EFAULT; - goto exit; - } - *offset += len; - retval = len; - -exit: - free_page((unsigned long)page); - return retval; + return simple_read_from_buffer(buf, count, offset, lbuf, len); } static ssize_t remote_settings_file_write(struct file *file, const char __user *ubuff, size_t count, loff_t *offset) diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c index b0b8f18a85e3..6649f0d56d2f 100644 --- a/drivers/misc/mei/interrupt.c +++ b/drivers/misc/mei/interrupt.c @@ -310,8 +310,11 @@ int mei_irq_read_handler(struct mei_device *dev, if (&cl->link == &dev->file_list) { /* A message for not connected fixed address clients * should be silently discarded + * On power down client may be force cleaned, + * silently discard such messages */ - if (hdr_is_fixed(mei_hdr)) { + if (hdr_is_fixed(mei_hdr) || + dev->dev_state == MEI_DEV_POWER_DOWN) { mei_irq_discard_msg(dev, mei_hdr); ret = 0; goto reset_slots; diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c index efd733472a35..56c6f79a5c5a 100644 --- a/drivers/misc/vmw_balloon.c +++ b/drivers/misc/vmw_balloon.c @@ -467,7 +467,7 @@ static int vmballoon_send_batched_lock(struct vmballoon *b, unsigned int num_pages, bool is_2m_pages, unsigned int *target) { unsigned long status; - unsigned long pfn = page_to_pfn(b->page); + unsigned long pfn = PHYS_PFN(virt_to_phys(b->batch_page)); STATS_INC(b->stats.lock[is_2m_pages]); @@ -515,7 +515,7 @@ static bool vmballoon_send_batched_unlock(struct vmballoon *b, unsigned int num_pages, bool is_2m_pages, unsigned int *target) { unsigned long status; - unsigned long pfn = page_to_pfn(b->page); + unsigned long pfn = PHYS_PFN(virt_to_phys(b->batch_page)); STATS_INC(b->stats.unlock[is_2m_pages]); diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c index ef05e0039378..2a833686784b 100644 --- a/drivers/mmc/core/slot-gpio.c +++ b/drivers/mmc/core/slot-gpio.c @@ -27,8 +27,8 @@ struct mmc_gpio { bool override_cd_active_level; irqreturn_t (*cd_gpio_isr)(int irq, void *dev_id); char *ro_label; - char cd_label[0]; u32 cd_debounce_delay_ms; + char cd_label[]; }; static irqreturn_t mmc_gpio_cd_irqt(int irq, void *dev_id) diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index 623f4d27fa01..80dc2fd6576c 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c @@ -1065,8 +1065,8 @@ static void dw_mci_ctrl_thld(struct dw_mci *host, struct mmc_data *data) * It's used when HS400 mode is enabled. */ if (data->flags & MMC_DATA_WRITE && - !(host->timing != MMC_TIMING_MMC_HS400)) - return; + host->timing != MMC_TIMING_MMC_HS400) + goto disable; if (data->flags & MMC_DATA_WRITE) enable = SDMMC_CARD_WR_THR_EN; @@ -1074,7 +1074,8 @@ static void dw_mci_ctrl_thld(struct dw_mci *host, struct mmc_data *data) enable = SDMMC_CARD_RD_THR_EN; if (host->timing != MMC_TIMING_MMC_HS200 && - host->timing != MMC_TIMING_UHS_SDR104) + host->timing != MMC_TIMING_UHS_SDR104 && + host->timing != MMC_TIMING_MMC_HS400) goto disable; blksz_depth = blksz / (1 << host->data_shift); diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c index f7f9773d161f..d032bd63444d 100644 --- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c +++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c @@ -139,8 +139,7 @@ renesas_sdhi_internal_dmac_abort_dma(struct tmio_mmc_host *host) { renesas_sdhi_internal_dmac_dm_write(host, DM_CM_RST, RST_RESERVED_BITS | val); - if (host->data && host->data->flags & MMC_DATA_READ) - clear_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags); + clear_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags); renesas_sdhi_internal_dmac_enable_dma(host, true); } @@ -164,17 +163,14 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host, goto force_pio; /* This DMAC cannot handle if buffer is not 8-bytes alignment */ - if (!IS_ALIGNED(sg_dma_address(sg), 8)) { - dma_unmap_sg(&host->pdev->dev, sg, host->sg_len, - mmc_get_dma_dir(data)); - goto force_pio; - } + if (!IS_ALIGNED(sg_dma_address(sg), 8)) + goto force_pio_with_unmap; if (data->flags & MMC_DATA_READ) { dtran_mode |= DTRAN_MODE_CH_NUM_CH1; if (test_bit(SDHI_INTERNAL_DMAC_ONE_RX_ONLY, &global_flags) && test_and_set_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags)) - goto force_pio; + goto force_pio_with_unmap; } else { dtran_mode |= DTRAN_MODE_CH_NUM_CH0; } @@ -189,6 +185,9 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host, return; +force_pio_with_unmap: + dma_unmap_sg(&host->pdev->dev, sg, host->sg_len, mmc_get_dma_dir(data)); + force_pio: host->force_pio = true; renesas_sdhi_internal_dmac_enable_dma(host, false); diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index d6aef70d34fa..4eb3d29ecde1 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -312,6 +312,15 @@ static u32 esdhc_readl_le(struct sdhci_host *host, int reg) if (imx_data->socdata->flags & ESDHC_FLAG_HS400) val |= SDHCI_SUPPORT_HS400; + + /* + * Do not advertise faster UHS modes if there are no + * pinctrl states for 100MHz/200MHz. + */ + if (IS_ERR_OR_NULL(imx_data->pins_100mhz) || + IS_ERR_OR_NULL(imx_data->pins_200mhz)) + val &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_DDR50 + | SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_HS400); } } @@ -1158,18 +1167,6 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, ESDHC_PINCTRL_STATE_100MHZ); imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl, ESDHC_PINCTRL_STATE_200MHZ); - if (IS_ERR(imx_data->pins_100mhz) || - IS_ERR(imx_data->pins_200mhz)) { - dev_warn(mmc_dev(host->mmc), - "could not get ultra high speed state, work on normal mode\n"); - /* - * fall back to not supporting uhs by specifying no - * 1.8v quirk - */ - host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V; - } - } else { - host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V; } /* call to generic mmc_of_parse to support additional capabilities */ diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c index e7472590f2ed..8e7f3e35ee3d 100644 --- a/drivers/mmc/host/sunxi-mmc.c +++ b/drivers/mmc/host/sunxi-mmc.c @@ -1446,6 +1446,7 @@ static int sunxi_mmc_runtime_resume(struct device *dev) sunxi_mmc_init_host(host); sunxi_mmc_set_bus_width(host, mmc->ios.bus_width); sunxi_mmc_set_clk(host, &mmc->ios); + enable_irq(host->irq); return 0; } @@ -1455,6 +1456,12 @@ static int sunxi_mmc_runtime_suspend(struct device *dev) struct mmc_host *mmc = dev_get_drvdata(dev); struct sunxi_mmc_host *host = mmc_priv(mmc); + /* + * When clocks are off, it's possible receiving + * fake interrupts, which will stall the system. + * Disabling the irq will prevent this. + */ + disable_irq(host->irq); sunxi_mmc_reset_host(host); sunxi_mmc_disable(host); diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c index a0c655628d6d..1b64ac8c5bc8 100644 --- a/drivers/mtd/chips/cfi_cmdset_0002.c +++ b/drivers/mtd/chips/cfi_cmdset_0002.c @@ -2526,7 +2526,7 @@ static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) struct ppb_lock { struct flchip *chip; - loff_t offset; + unsigned long adr; int locked; }; @@ -2544,8 +2544,9 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map, unsigned long timeo; int ret; + adr += chip->start; mutex_lock(&chip->mutex); - ret = get_chip(map, chip, adr + chip->start, FL_LOCKING); + ret = get_chip(map, chip, adr, FL_LOCKING); if (ret) { mutex_unlock(&chip->mutex); return ret; @@ -2563,8 +2564,8 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map, if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) { chip->state = FL_LOCKING; - map_write(map, CMD(0xA0), chip->start + adr); - map_write(map, CMD(0x00), chip->start + adr); + map_write(map, CMD(0xA0), adr); + map_write(map, CMD(0x00), adr); } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) { /* * Unlocking of one specific sector is not supported, so we @@ -2602,7 +2603,7 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map, map_write(map, CMD(0x00), chip->start); chip->state = FL_READY; - put_chip(map, chip, adr + chip->start); + put_chip(map, chip, adr); mutex_unlock(&chip->mutex); return ret; @@ -2659,9 +2660,9 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, * sectors shall be unlocked, so lets keep their locking * status at "unlocked" (locked=0) for the final re-locking. */ - if ((adr < ofs) || (adr >= (ofs + len))) { + if ((offset < ofs) || (offset >= (ofs + len))) { sect[sectors].chip = &cfi->chips[chipnum]; - sect[sectors].offset = offset; + sect[sectors].adr = adr; sect[sectors].locked = do_ppb_xxlock( map, &cfi->chips[chipnum], adr, 0, DO_XXLOCK_ONEBLOCK_GETLOCK); @@ -2675,6 +2676,8 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, i++; if (adr >> cfi->chipshift) { + if (offset >= (ofs + len)) + break; adr = 0; chipnum++; @@ -2705,7 +2708,7 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, */ for (i = 0; i < sectors; i++) { if (sect[i].locked) - do_ppb_xxlock(map, sect[i].chip, sect[i].offset, 0, + do_ppb_xxlock(map, sect[i].chip, sect[i].adr, 0, DO_XXLOCK_ONEBLOCK_LOCK); } diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c index 3a6f450d1093..53febe8a68c3 100644 --- a/drivers/mtd/devices/mtd_dataflash.c +++ b/drivers/mtd/devices/mtd_dataflash.c @@ -733,8 +733,8 @@ static struct flash_info dataflash_data[] = { { "AT45DB642x", 0x1f2800, 8192, 1056, 11, SUP_POW2PS}, { "at45db642d", 0x1f2800, 8192, 1024, 10, SUP_POW2PS | IS_POW2PS}, - { "AT45DB641E", 0x1f28000100, 32768, 264, 9, SUP_EXTID | SUP_POW2PS}, - { "at45db641e", 0x1f28000100, 32768, 256, 8, SUP_EXTID | SUP_POW2PS | IS_POW2PS}, + { "AT45DB641E", 0x1f28000100ULL, 32768, 264, 9, SUP_EXTID | SUP_POW2PS}, + { "at45db641e", 0x1f28000100ULL, 32768, 256, 8, SUP_EXTID | SUP_POW2PS | IS_POW2PS}, }; static struct flash_info *jedec_lookup(struct spi_device *spi, diff --git a/drivers/mtd/nand/raw/denali_dt.c b/drivers/mtd/nand/raw/denali_dt.c index cfd33e6ca77f..5869e90cc14b 100644 --- a/drivers/mtd/nand/raw/denali_dt.c +++ b/drivers/mtd/nand/raw/denali_dt.c @@ -123,7 +123,11 @@ static int denali_dt_probe(struct platform_device *pdev) if (ret) return ret; - denali->clk_x_rate = clk_get_rate(dt->clk); + /* + * Hardcode the clock rate for the backward compatibility. + * This works for both SOCFPGA and UniPhier. + */ + denali->clk_x_rate = 200000000; ret = denali_init(denali); if (ret) diff --git a/drivers/mtd/nand/raw/mxc_nand.c b/drivers/mtd/nand/raw/mxc_nand.c index 45786e707b7b..26cef218bb43 100644 --- a/drivers/mtd/nand/raw/mxc_nand.c +++ b/drivers/mtd/nand/raw/mxc_nand.c @@ -48,7 +48,7 @@ #define NFC_V1_V2_CONFIG (host->regs + 0x0a) #define NFC_V1_V2_ECC_STATUS_RESULT (host->regs + 0x0c) #define NFC_V1_V2_RSLTMAIN_AREA (host->regs + 0x0e) -#define NFC_V1_V2_RSLTSPARE_AREA (host->regs + 0x10) +#define NFC_V21_RSLTSPARE_AREA (host->regs + 0x10) #define NFC_V1_V2_WRPROT (host->regs + 0x12) #define NFC_V1_UNLOCKSTART_BLKADDR (host->regs + 0x14) #define NFC_V1_UNLOCKEND_BLKADDR (host->regs + 0x16) @@ -1274,6 +1274,9 @@ static void preset_v2(struct mtd_info *mtd) writew(config1, NFC_V1_V2_CONFIG1); /* preset operation */ + /* spare area size in 16-bit half-words */ + writew(mtd->oobsize / 2, NFC_V21_RSLTSPARE_AREA); + /* Unlock the internal RAM Buffer */ writew(0x2, NFC_V1_V2_CONFIG); diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index 10c4f9919850..b01d15ec4c56 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -440,7 +440,7 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs) for (; page < page_end; page++) { res = chip->ecc.read_oob(mtd, chip, page); - if (res) + if (res < 0) return res; bad = chip->oob_poi[chip->badblockpos]; diff --git a/drivers/mtd/nand/raw/nand_macronix.c b/drivers/mtd/nand/raw/nand_macronix.c index 7ed1f87e742a..49c546c97c6f 100644 --- a/drivers/mtd/nand/raw/nand_macronix.c +++ b/drivers/mtd/nand/raw/nand_macronix.c @@ -17,23 +17,47 @@ #include +/* + * Macronix AC series does not support using SET/GET_FEATURES to change + * the timings unlike what is declared in the parameter page. Unflag + * this feature to avoid unnecessary downturns. + */ +static void macronix_nand_fix_broken_get_timings(struct nand_chip *chip) +{ + unsigned int i; + static const char * const broken_get_timings[] = { + "MX30LF1G18AC", + "MX30LF1G28AC", + "MX30LF2G18AC", + "MX30LF2G28AC", + "MX30LF4G18AC", + "MX30LF4G28AC", + "MX60LF8G18AC", + }; + + if (!chip->parameters.supports_set_get_features) + return; + + for (i = 0; i < ARRAY_SIZE(broken_get_timings); i++) { + if (!strcmp(broken_get_timings[i], chip->parameters.model)) + break; + } + + if (i == ARRAY_SIZE(broken_get_timings)) + return; + + bitmap_clear(chip->parameters.get_feature_list, + ONFI_FEATURE_ADDR_TIMING_MODE, 1); + bitmap_clear(chip->parameters.set_feature_list, + ONFI_FEATURE_ADDR_TIMING_MODE, 1); +} + static int macronix_nand_init(struct nand_chip *chip) { if (nand_is_slc(chip)) chip->bbt_options |= NAND_BBT_SCAN2NDPAGE; - /* - * MX30LF2G18AC chip does not support using SET/GET_FEATURES to change - * the timings unlike what is declared in the parameter page. Unflag - * this feature to avoid unnecessary downturns. - */ - if (chip->parameters.supports_set_get_features && - !strcmp("MX30LF2G18AC", chip->parameters.model)) { - bitmap_clear(chip->parameters.get_feature_list, - ONFI_FEATURE_ADDR_TIMING_MODE, 1); - bitmap_clear(chip->parameters.set_feature_list, - ONFI_FEATURE_ADDR_TIMING_MODE, 1); - } + macronix_nand_fix_broken_get_timings(chip); return 0; } diff --git a/drivers/mtd/nand/raw/nand_micron.c b/drivers/mtd/nand/raw/nand_micron.c index 0af45b134c0c..5ec4c90a637d 100644 --- a/drivers/mtd/nand/raw/nand_micron.c +++ b/drivers/mtd/nand/raw/nand_micron.c @@ -66,7 +66,9 @@ static int micron_nand_onfi_init(struct nand_chip *chip) if (p->supports_set_get_features) { set_bit(ONFI_FEATURE_ADDR_READ_RETRY, p->set_feature_list); + set_bit(ONFI_FEATURE_ON_DIE_ECC, p->set_feature_list); set_bit(ONFI_FEATURE_ADDR_READ_RETRY, p->get_feature_list); + set_bit(ONFI_FEATURE_ON_DIE_ECC, p->get_feature_list); } return 0; diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c index c3f7aaa5d18f..d7e10b36a0b9 100644 --- a/drivers/mtd/spi-nor/cadence-quadspi.c +++ b/drivers/mtd/spi-nor/cadence-quadspi.c @@ -926,10 +926,12 @@ static ssize_t cqspi_write(struct spi_nor *nor, loff_t to, if (ret) return ret; - if (f_pdata->use_direct_mode) + if (f_pdata->use_direct_mode) { memcpy_toio(cqspi->ahb_base + to, buf, len); - else + ret = cqspi_wait_idle(cqspi); + } else { ret = cqspi_indirect_write_execute(nor, to, buf, len); + } if (ret) return ret; diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig index d5c15e8bb3de..f273af136fc7 100644 --- a/drivers/net/ethernet/amd/Kconfig +++ b/drivers/net/ethernet/amd/Kconfig @@ -173,7 +173,7 @@ config SUNLANCE config AMD_XGBE tristate "AMD 10GbE Ethernet driver" - depends on ((OF_NET && OF_ADDRESS) || ACPI || PCI) && HAS_IOMEM && HAS_DMA + depends on ((OF_NET && OF_ADDRESS) || ACPI || PCI) && HAS_IOMEM depends on X86 || ARM64 || COMPILE_TEST select BITREVERSE select CRC32 diff --git a/drivers/net/ethernet/apm/xgene-v2/Kconfig b/drivers/net/ethernet/apm/xgene-v2/Kconfig index 1205861b6318..eedd3f3dd22e 100644 --- a/drivers/net/ethernet/apm/xgene-v2/Kconfig +++ b/drivers/net/ethernet/apm/xgene-v2/Kconfig @@ -1,6 +1,5 @@ config NET_XGENE_V2 tristate "APM X-Gene SoC Ethernet-v2 Driver" - depends on HAS_DMA depends on ARCH_XGENE || COMPILE_TEST help This is the Ethernet driver for the on-chip ethernet interface diff --git a/drivers/net/ethernet/apm/xgene/Kconfig b/drivers/net/ethernet/apm/xgene/Kconfig index afccb033177b..e4e33c900b57 100644 --- a/drivers/net/ethernet/apm/xgene/Kconfig +++ b/drivers/net/ethernet/apm/xgene/Kconfig @@ -1,6 +1,5 @@ config NET_XGENE tristate "APM X-Gene SoC Ethernet Driver" - depends on HAS_DMA depends on ARCH_XGENE || COMPILE_TEST select PHYLIB select MDIO_XGENE diff --git a/drivers/net/ethernet/arc/Kconfig b/drivers/net/ethernet/arc/Kconfig index e743ddf46343..5d0ab8e74b68 100644 --- a/drivers/net/ethernet/arc/Kconfig +++ b/drivers/net/ethernet/arc/Kconfig @@ -24,7 +24,8 @@ config ARC_EMAC_CORE config ARC_EMAC tristate "ARC EMAC support" select ARC_EMAC_CORE - depends on OF_IRQ && OF_NET && HAS_DMA && (ARC || COMPILE_TEST) + depends on OF_IRQ && OF_NET + depends on ARC || COMPILE_TEST ---help--- On some legacy ARC (Synopsys) FPGA boards such as ARCAngel4/ML50x non-standard on-chip ethernet device ARC EMAC 10/100 is used. @@ -33,7 +34,8 @@ config ARC_EMAC config EMAC_ROCKCHIP tristate "Rockchip EMAC support" select ARC_EMAC_CORE - depends on OF_IRQ && OF_NET && REGULATOR && HAS_DMA && (ARCH_ROCKCHIP || COMPILE_TEST) + depends on OF_IRQ && OF_NET && REGULATOR + depends on ARCH_ROCKCHIP || COMPILE_TEST ---help--- Support for Rockchip RK3036/RK3066/RK3188 EMAC ethernet controllers. This selects Rockchip SoC glue layer support for the diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index 567ee54504bc..5e5022fa1d04 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -1897,13 +1897,19 @@ static int alx_resume(struct device *dev) struct pci_dev *pdev = to_pci_dev(dev); struct alx_priv *alx = pci_get_drvdata(pdev); struct alx_hw *hw = &alx->hw; + int err; alx_reset_phy(hw); if (!netif_running(alx->dev)) return 0; netif_device_attach(alx->dev); - return __alx_open(alx, true); + + rtnl_lock(); + err = __alx_open(alx, true); + rtnl_unlock(); + + return err; } static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume); diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index af75156919ed..4c3bfde6e8de 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -157,7 +157,6 @@ config BGMAC config BGMAC_BCMA tristate "Broadcom iProc GBit BCMA support" depends on BCMA && BCMA_HOST_SOC - depends on HAS_DMA depends on BCM47XX || ARCH_BCM_5301X || COMPILE_TEST select BGMAC select PHYLIB @@ -170,7 +169,6 @@ config BGMAC_BCMA config BGMAC_PLATFORM tristate "Broadcom iProc GBit platform support" - depends on HAS_DMA depends on ARCH_BCM_IPROC || COMPILE_TEST depends on OF select BGMAC diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index d847e1b9c37b..be1506169076 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -1533,6 +1533,7 @@ struct bnx2x { struct link_vars link_vars; u32 link_cnt; struct bnx2x_link_report_data last_reported_link; + bool force_link_down; struct mdio_if_info mdio; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 8cd73ff5debc..af7b5a4d8ba0 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -1261,6 +1261,11 @@ void __bnx2x_link_report(struct bnx2x *bp) { struct bnx2x_link_report_data cur_data; + if (bp->force_link_down) { + bp->link_vars.link_up = 0; + return; + } + /* reread mf_cfg */ if (IS_PF(bp) && !CHIP_IS_E1(bp)) bnx2x_read_mf_cfg(bp); @@ -2817,6 +2822,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) bp->pending_max = 0; } + bp->force_link_down = false; if (bp->port.pmf) { rc = bnx2x_initial_phy_init(bp, load_mode); if (rc) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 5b1ed240bf18..57348f2b49a3 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -10279,6 +10279,12 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work) bp->sp_rtnl_state = 0; smp_mb(); + /* Immediately indicate link as down */ + bp->link_vars.link_up = 0; + bp->force_link_down = true; + netif_carrier_off(bp->dev); + BNX2X_ERR("Indicating link is down due to Tx-timeout\n"); + bnx2x_nic_unload(bp, UNLOAD_NORMAL, true); /* When ret value shows failure of allocation failure, * the nic is rebooted again. If open still fails, a error diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index 30273a7717e2..4fd829b5e65d 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -660,7 +660,7 @@ static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id, id_tbl->max = size; id_tbl->next = next; spin_lock_init(&id_tbl->lock); - id_tbl->table = kcalloc(DIV_ROUND_UP(size, 32), 4, GFP_KERNEL); + id_tbl->table = kcalloc(BITS_TO_LONGS(size), sizeof(long), GFP_KERNEL); if (!id_tbl->table) return -ENOMEM; diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 3e93df5d4e3b..96cc03a6d942 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -3726,6 +3726,8 @@ static int at91ether_init(struct platform_device *pdev) int err; u32 reg; + bp->queues[0].bp = bp; + dev->netdev_ops = &at91ether_netdev_ops; dev->ethtool_ops = &macb_ethtool_ops; diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c index 2220c771092b..678835136bf8 100644 --- a/drivers/net/ethernet/cadence/macb_ptp.c +++ b/drivers/net/ethernet/cadence/macb_ptp.c @@ -170,10 +170,7 @@ static int gem_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) if (delta > TSU_NSEC_MAX_VAL) { gem_tsu_get_time(&bp->ptp_clock_info, &now); - if (sign) - now = timespec64_sub(now, then); - else - now = timespec64_add(now, then); + now = timespec64_add(now, then); gem_tsu_set_time(&bp->ptp_clock_info, (const struct timespec64 *)&now); diff --git a/drivers/net/ethernet/calxeda/Kconfig b/drivers/net/ethernet/calxeda/Kconfig index 07d2201530d2..9fdd496b90ff 100644 --- a/drivers/net/ethernet/calxeda/Kconfig +++ b/drivers/net/ethernet/calxeda/Kconfig @@ -1,6 +1,6 @@ config NET_CALXEDA_XGMAC tristate "Calxeda 1G/10G XGMAC Ethernet driver" - depends on HAS_IOMEM && HAS_DMA + depends on HAS_IOMEM depends on ARCH_HIGHBANK || COMPILE_TEST select CRC32 help diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index dd04a2f89ce6..bc03c175a3cd 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -263,7 +263,7 @@ static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable) "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n", enable ? "set" : "unset", pi->port_id, i, -err); else - txq->dcb_prio = value; + txq->dcb_prio = enable ? value : 0; } } diff --git a/drivers/net/ethernet/cisco/enic/enic_clsf.c b/drivers/net/ethernet/cisco/enic/enic_clsf.c index 973c1fb70d09..99038dfc7fbe 100644 --- a/drivers/net/ethernet/cisco/enic/enic_clsf.c +++ b/drivers/net/ethernet/cisco/enic/enic_clsf.c @@ -79,7 +79,6 @@ void enic_rfs_flw_tbl_init(struct enic *enic) enic->rfs_h.max = enic->config.num_arfs; enic->rfs_h.free = enic->rfs_h.max; enic->rfs_h.toclean = 0; - enic_rfs_timer_start(enic); } void enic_rfs_flw_tbl_free(struct enic *enic) @@ -88,7 +87,6 @@ void enic_rfs_flw_tbl_free(struct enic *enic) enic_rfs_timer_stop(enic); spin_lock_bh(&enic->rfs_h.lock); - enic->rfs_h.free = 0; for (i = 0; i < (1 << ENIC_RFS_FLW_BITSHIFT); i++) { struct hlist_head *hhead; struct hlist_node *tmp; @@ -99,6 +97,7 @@ void enic_rfs_flw_tbl_free(struct enic *enic) enic_delfltr(enic, n->fltr_id); hlist_del(&n->node); kfree(n); + enic->rfs_h.free++; } } spin_unlock_bh(&enic->rfs_h.lock); diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 30d2eaa18c04..90c645b8538e 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -1920,7 +1920,7 @@ static int enic_open(struct net_device *netdev) { struct enic *enic = netdev_priv(netdev); unsigned int i; - int err; + int err, ret; err = enic_request_intr(enic); if (err) { @@ -1971,16 +1971,15 @@ static int enic_open(struct net_device *netdev) vnic_intr_unmask(&enic->intr[i]); enic_notify_timer_start(enic); - enic_rfs_flw_tbl_init(enic); + enic_rfs_timer_start(enic); return 0; err_out_free_rq: for (i = 0; i < enic->rq_count; i++) { - err = vnic_rq_disable(&enic->rq[i]); - if (err) - return err; - vnic_rq_clean(&enic->rq[i], enic_free_rq_buf); + ret = vnic_rq_disable(&enic->rq[i]); + if (!ret) + vnic_rq_clean(&enic->rq[i], enic_free_rq_buf); } enic_dev_notify_unset(enic); err_out_free_intr: @@ -2904,6 +2903,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) timer_setup(&enic->notify_timer, enic_notify_timer, 0); + enic_rfs_flw_tbl_init(enic); enic_set_rx_coal_setting(enic); INIT_WORK(&enic->reset, enic_reset); INIT_WORK(&enic->tx_hang_reset, enic_tx_hang_reset); diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index 78db8e62a83f..ed6c76d20b45 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -1735,8 +1735,8 @@ static void ftgmac100_ncsi_handler(struct ncsi_dev *nd) if (unlikely(nd->state != ncsi_dev_state_functional)) return; - netdev_info(nd->dev, "NCSI interface %s\n", - nd->link_up ? "up" : "down"); + netdev_dbg(nd->dev, "NCSI interface %s\n", + nd->link_up ? "up" : "down"); } static void ftgmac100_setup_clk(struct ftgmac100 *priv) diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 5f4e1ffa7b95..ab02057ac730 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -125,6 +125,9 @@ MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms"); /* Default alignment for start of data in an Rx FD */ #define DPAA_FD_DATA_ALIGNMENT 16 +/* The DPAA requires 256 bytes reserved and mapped for the SGT */ +#define DPAA_SGT_SIZE 256 + /* Values for the L3R field of the FM Parse Results */ /* L3 Type field: First IP Present IPv4 */ @@ -1617,8 +1620,8 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv, if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) { nr_frags = skb_shinfo(skb)->nr_frags; - dma_unmap_single(dev, addr, qm_fd_get_offset(fd) + - sizeof(struct qm_sg_entry) * (1 + nr_frags), + dma_unmap_single(dev, addr, + qm_fd_get_offset(fd) + DPAA_SGT_SIZE, dma_dir); /* The sgt buffer has been allocated with netdev_alloc_frag(), @@ -1903,8 +1906,7 @@ static int skb_to_sg_fd(struct dpaa_priv *priv, void *sgt_buf; /* get a page frag to store the SGTable */ - sz = SKB_DATA_ALIGN(priv->tx_headroom + - sizeof(struct qm_sg_entry) * (1 + nr_frags)); + sz = SKB_DATA_ALIGN(priv->tx_headroom + DPAA_SGT_SIZE); sgt_buf = netdev_alloc_frag(sz); if (unlikely(!sgt_buf)) { netdev_err(net_dev, "netdev_alloc_frag() failed for size %d\n", @@ -1972,9 +1974,8 @@ static int skb_to_sg_fd(struct dpaa_priv *priv, skbh = (struct sk_buff **)buffer_start; *skbh = skb; - addr = dma_map_single(dev, buffer_start, priv->tx_headroom + - sizeof(struct qm_sg_entry) * (1 + nr_frags), - dma_dir); + addr = dma_map_single(dev, buffer_start, + priv->tx_headroom + DPAA_SGT_SIZE, dma_dir); if (unlikely(dma_mapping_error(dev, addr))) { dev_err(dev, "DMA mapping failed"); err = -EINVAL; diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c index ce6e24c74978..ecbf6187e13a 100644 --- a/drivers/net/ethernet/freescale/fman/fman_port.c +++ b/drivers/net/ethernet/freescale/fman/fman_port.c @@ -324,6 +324,10 @@ struct fman_port_qmi_regs { #define HWP_HXS_PHE_REPORT 0x00000800 #define HWP_HXS_PCAC_PSTAT 0x00000100 #define HWP_HXS_PCAC_PSTOP 0x00000001 +#define HWP_HXS_TCP_OFFSET 0xA +#define HWP_HXS_UDP_OFFSET 0xB +#define HWP_HXS_SH_PAD_REM 0x80000000 + struct fman_port_hwp_regs { struct { u32 ssa; /* Soft Sequence Attachment */ @@ -728,6 +732,10 @@ static void init_hwp(struct fman_port *port) iowrite32be(0xffffffff, ®s->pmda[i].lcv); } + /* Short packet padding removal from checksum calculation */ + iowrite32be(HWP_HXS_SH_PAD_REM, ®s->pmda[HWP_HXS_TCP_OFFSET].ssa); + iowrite32be(HWP_HXS_SH_PAD_REM, ®s->pmda[HWP_HXS_UDP_OFFSET].ssa); + start_port_hwp(port); } diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig index 8bcf470ff5f3..fb1a7251f45d 100644 --- a/drivers/net/ethernet/hisilicon/Kconfig +++ b/drivers/net/ethernet/hisilicon/Kconfig @@ -5,7 +5,7 @@ config NET_VENDOR_HISILICON bool "Hisilicon devices" default y - depends on (OF || ACPI) && HAS_DMA + depends on OF || ACPI depends on ARM || ARM64 || COMPILE_TEST ---help--- If you have a network (Ethernet) card belonging to this class, say Y. diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c index e2e5cdc7119c..4c0f7eda1166 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c @@ -439,6 +439,7 @@ static void rx_free_irq(struct hinic_rxq *rxq) { struct hinic_rq *rq = rxq->rq; + irq_set_affinity_hint(rq->irq, NULL); free_irq(rq->irq, rxq); rx_del_napi(rxq); } diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 8ffb7454e67c..b151ae316546 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2103,9 +2103,8 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring, unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; #else unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + - SKB_DATA_ALIGN(I40E_SKB_PAD + - (xdp->data_end - - xdp->data_hard_start)); + SKB_DATA_ALIGN(xdp->data_end - + xdp->data_hard_start); #endif struct sk_buff *skb; @@ -2124,7 +2123,7 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring, return NULL; /* update pointers within the skb to store the data */ - skb_reserve(skb, I40E_SKB_PAD + (xdp->data - xdp->data_hard_start)); + skb_reserve(skb, xdp->data - xdp->data_hard_start); __skb_put(skb, xdp->data_end - xdp->data); if (metasize) skb_metadata_set(skb, metasize); @@ -2200,9 +2199,10 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring, return true; } -#define I40E_XDP_PASS 0 -#define I40E_XDP_CONSUMED 1 -#define I40E_XDP_TX 2 +#define I40E_XDP_PASS 0 +#define I40E_XDP_CONSUMED BIT(0) +#define I40E_XDP_TX BIT(1) +#define I40E_XDP_REDIR BIT(2) static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf, struct i40e_ring *xdp_ring); @@ -2249,7 +2249,7 @@ static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring, break; case XDP_REDIRECT: err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); - result = !err ? I40E_XDP_TX : I40E_XDP_CONSUMED; + result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED; break; default: bpf_warn_invalid_xdp_action(act); @@ -2312,7 +2312,8 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) unsigned int total_rx_bytes = 0, total_rx_packets = 0; struct sk_buff *skb = rx_ring->skb; u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); - bool failure = false, xdp_xmit = false; + unsigned int xdp_xmit = 0; + bool failure = false; struct xdp_buff xdp; xdp.rxq = &rx_ring->xdp_rxq; @@ -2373,8 +2374,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) } if (IS_ERR(skb)) { - if (PTR_ERR(skb) == -I40E_XDP_TX) { - xdp_xmit = true; + unsigned int xdp_res = -PTR_ERR(skb); + + if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) { + xdp_xmit |= xdp_res; i40e_rx_buffer_flip(rx_ring, rx_buffer, size); } else { rx_buffer->pagecnt_bias++; @@ -2428,12 +2431,14 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) total_rx_packets++; } - if (xdp_xmit) { + if (xdp_xmit & I40E_XDP_REDIR) + xdp_do_flush_map(); + + if (xdp_xmit & I40E_XDP_TX) { struct i40e_ring *xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; i40e_xdp_ring_update_tail(xdp_ring); - xdp_do_flush_map(); } rx_ring->skb = skb; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 3e87dbbc9024..62e57b05a0ae 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -2186,9 +2186,10 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring, return skb; } -#define IXGBE_XDP_PASS 0 -#define IXGBE_XDP_CONSUMED 1 -#define IXGBE_XDP_TX 2 +#define IXGBE_XDP_PASS 0 +#define IXGBE_XDP_CONSUMED BIT(0) +#define IXGBE_XDP_TX BIT(1) +#define IXGBE_XDP_REDIR BIT(2) static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, struct xdp_frame *xdpf); @@ -2225,7 +2226,7 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter, case XDP_REDIRECT: err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog); if (!err) - result = IXGBE_XDP_TX; + result = IXGBE_XDP_REDIR; else result = IXGBE_XDP_CONSUMED; break; @@ -2285,7 +2286,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, unsigned int mss = 0; #endif /* IXGBE_FCOE */ u16 cleaned_count = ixgbe_desc_unused(rx_ring); - bool xdp_xmit = false; + unsigned int xdp_xmit = 0; struct xdp_buff xdp; xdp.rxq = &rx_ring->xdp_rxq; @@ -2328,8 +2329,10 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, } if (IS_ERR(skb)) { - if (PTR_ERR(skb) == -IXGBE_XDP_TX) { - xdp_xmit = true; + unsigned int xdp_res = -PTR_ERR(skb); + + if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) { + xdp_xmit |= xdp_res; ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size); } else { rx_buffer->pagecnt_bias++; @@ -2401,7 +2404,10 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, total_rx_packets++; } - if (xdp_xmit) { + if (xdp_xmit & IXGBE_XDP_REDIR) + xdp_do_flush_map(); + + if (xdp_xmit & IXGBE_XDP_TX) { struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()]; /* Force memory writes to complete before letting h/w @@ -2409,8 +2415,6 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, */ wmb(); writel(ring->next_to_use, ring->tail); - - xdp_do_flush_map(); } u64_stats_update_begin(&rx_ring->syncp); diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig index cc2f7701e71e..f33fd22b351c 100644 --- a/drivers/net/ethernet/marvell/Kconfig +++ b/drivers/net/ethernet/marvell/Kconfig @@ -18,8 +18,8 @@ if NET_VENDOR_MARVELL config MV643XX_ETH tristate "Marvell Discovery (643XX) and Orion ethernet support" - depends on (MV64X60 || PPC32 || PLAT_ORION || COMPILE_TEST) && INET - depends on HAS_DMA + depends on MV64X60 || PPC32 || PLAT_ORION || COMPILE_TEST + depends on INET select PHYLIB select MVMDIO ---help--- @@ -58,7 +58,6 @@ config MVNETA_BM_ENABLE config MVNETA tristate "Marvell Armada 370/38x/XP/37xx network interface support" depends on ARCH_MVEBU || COMPILE_TEST - depends on HAS_DMA select MVMDIO select PHYLINK ---help--- @@ -84,7 +83,6 @@ config MVNETA_BM config MVPP2 tristate "Marvell Armada 375/7K/8K network interface support" depends on ARCH_MVEBU || COMPILE_TEST - depends on HAS_DMA select MVMDIO select PHYLINK ---help--- @@ -93,7 +91,7 @@ config MVPP2 config PXA168_ETH tristate "Marvell pxa168 ethernet support" - depends on HAS_IOMEM && HAS_DMA + depends on HAS_IOMEM depends on CPU_PXA168 || ARCH_BERLIN || COMPILE_TEST select PHYLIB ---help--- diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 17a904cc6a5e..0ad2f3f7da85 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -1932,7 +1932,7 @@ static int mvneta_rx_swbm(struct mvneta_port *pp, int rx_todo, rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE); index = rx_desc - rxq->descs; data = rxq->buf_virt_addr[index]; - phys_addr = rx_desc->buf_phys_addr; + phys_addr = rx_desc->buf_phys_addr - pp->rx_offset_correction; if (!mvneta_rxq_desc_is_first_last(rx_status) || (rx_status & MVNETA_RXD_ERR_SUMMARY)) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 487388aed98f..384c1fa49081 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -807,6 +807,7 @@ static void cmd_work_handler(struct work_struct *work) unsigned long flags; bool poll_cmd = ent->polling; int alloc_ret; + int cmd_mode; sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; down(sem); @@ -853,6 +854,7 @@ static void cmd_work_handler(struct work_struct *work) set_signature(ent, !cmd->checksum_disabled); dump_command(dev, ent, 1); ent->ts1 = ktime_get_ns(); + cmd_mode = cmd->mode; if (ent->callback) schedule_delayed_work(&ent->cb_timeout_work, cb_timeout); @@ -877,7 +879,7 @@ static void cmd_work_handler(struct work_struct *work) iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell); mmiowb(); /* if not in polling don't use ent after this point */ - if (cmd->mode == CMD_MODE_POLLING || poll_cmd) { + if (cmd_mode == CMD_MODE_POLLING || poll_cmd) { poll_timeout(ent); /* make sure we read the descriptor after ownership is SW */ rmb(); @@ -1276,7 +1278,7 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf, { struct mlx5_core_dev *dev = filp->private_data; struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; - char outlen_str[8]; + char outlen_str[8] = {0}; int outlen; void *ptr; int err; @@ -1291,8 +1293,6 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf, if (copy_from_user(outlen_str, buf, count)) return -EFAULT; - outlen_str[7] = 0; - err = sscanf(outlen_str, "%d", &outlen); if (err < 0) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 56c1b6f5593e..dae4156a710d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -2846,7 +2846,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv) mlx5e_activate_channels(&priv->channels); netif_tx_start_all_queues(priv->netdev); - if (MLX5_VPORT_MANAGER(priv->mdev)) + if (MLX5_ESWITCH_MANAGER(priv->mdev)) mlx5e_add_sqs_fwd_rules(priv); mlx5e_wait_channels_min_rx_wqes(&priv->channels); @@ -2857,7 +2857,7 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv) { mlx5e_redirect_rqts_to_drop(priv); - if (MLX5_VPORT_MANAGER(priv->mdev)) + if (MLX5_ESWITCH_MANAGER(priv->mdev)) mlx5e_remove_sqs_fwd_rules(priv); /* FIXME: This is a W/A only for tx timeout watch dog false alarm when @@ -4597,7 +4597,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) mlx5e_set_netdev_dev_addr(netdev); #if IS_ENABLED(CONFIG_MLX5_ESWITCH) - if (MLX5_VPORT_MANAGER(mdev)) + if (MLX5_ESWITCH_MANAGER(mdev)) netdev->switchdev_ops = &mlx5e_switchdev_ops; #endif @@ -4753,7 +4753,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv) mlx5e_enable_async_events(priv); - if (MLX5_VPORT_MANAGER(priv->mdev)) + if (MLX5_ESWITCH_MANAGER(priv->mdev)) mlx5e_register_vport_reps(priv); if (netdev->reg_state != NETREG_REGISTERED) @@ -4788,7 +4788,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv) queue_work(priv->wq, &priv->set_rx_mode_work); - if (MLX5_VPORT_MANAGER(priv->mdev)) + if (MLX5_ESWITCH_MANAGER(priv->mdev)) mlx5e_unregister_vport_reps(priv); mlx5e_disable_async_events(priv); @@ -4972,7 +4972,7 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev) return NULL; #ifdef CONFIG_MLX5_ESWITCH - if (MLX5_VPORT_MANAGER(mdev)) { + if (MLX5_ESWITCH_MANAGER(mdev)) { rpriv = mlx5e_alloc_nic_rep_priv(mdev); if (!rpriv) { mlx5_core_warn(mdev, "Failed to alloc NIC rep priv data\n"); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 57987f6546e8..2b8040a3cdbd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -823,7 +823,7 @@ bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv) struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5_eswitch_rep *rep; - if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager)) + if (!MLX5_ESWITCH_MANAGER(priv->mdev)) return false; rep = rpriv->rep; @@ -837,8 +837,12 @@ bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv) static bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv) { struct mlx5e_rep_priv *rpriv = priv->ppriv; - struct mlx5_eswitch_rep *rep = rpriv->rep; + struct mlx5_eswitch_rep *rep; + if (!MLX5_ESWITCH_MANAGER(priv->mdev)) + return false; + + rep = rpriv->rep; if (rep && rep->vport != FDB_UPLINK_VPORT) return true; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index f63dfbcd29fe..b79d74860a30 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1594,17 +1594,15 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num) } /* Public E-Switch API */ -#define ESW_ALLOWED(esw) ((esw) && MLX5_VPORT_MANAGER((esw)->dev)) +#define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev)) + int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) { int err; int i, enabled_events; - if (!ESW_ALLOWED(esw)) - return 0; - - if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) || + if (!ESW_ALLOWED(esw) || !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) { esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n"); return -EOPNOTSUPP; @@ -1806,7 +1804,7 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, u64 node_guid; int err = 0; - if (!ESW_ALLOWED(esw)) + if (!MLX5_CAP_GEN(esw->dev, vport_group_manager)) return -EPERM; if (!LEGAL_VPORT(esw, vport) || is_multicast_ether_addr(mac)) return -EINVAL; @@ -1883,7 +1881,7 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, { struct mlx5_vport *evport; - if (!ESW_ALLOWED(esw)) + if (!MLX5_CAP_GEN(esw->dev, vport_group_manager)) return -EPERM; if (!LEGAL_VPORT(esw, vport)) return -EINVAL; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index cecd201f0b73..91f1209886ff 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -1079,8 +1079,8 @@ static int mlx5_devlink_eswitch_check(struct devlink *devlink) if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) return -EOPNOTSUPP; - if (!MLX5_CAP_GEN(dev, vport_group_manager)) - return -EOPNOTSUPP; + if(!MLX5_ESWITCH_MANAGER(dev)) + return -EPERM; if (dev->priv.eswitch->mode == SRIOV_NONE) return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 49a75d31185e..f1a86cea86a0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -32,6 +32,7 @@ #include #include +#include #include "mlx5_core.h" #include "fs_core.h" @@ -2652,7 +2653,7 @@ int mlx5_init_fs(struct mlx5_core_dev *dev) goto err; } - if (MLX5_CAP_GEN(dev, eswitch_flow_table)) { + if (MLX5_ESWITCH_MANAGER(dev)) { if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) { err = init_fdb_root_ns(steering); if (err) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index afd9f4fa22f4..41ad24f0de2c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -32,6 +32,7 @@ #include #include +#include #include #include "mlx5_core.h" #include "../../mlxfw/mlxfw.h" @@ -159,13 +160,13 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev) } if (MLX5_CAP_GEN(dev, vport_group_manager) && - MLX5_CAP_GEN(dev, eswitch_flow_table)) { + MLX5_ESWITCH_MANAGER(dev)) { err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE); if (err) return err; } - if (MLX5_CAP_GEN(dev, eswitch_flow_table)) { + if (MLX5_ESWITCH_MANAGER(dev)) { err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c index 7cb67122e8b5..98359559c77e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c @@ -33,6 +33,7 @@ #include #include #include +#include #include "mlx5_core.h" #include "lib/mpfs.h" @@ -98,7 +99,7 @@ int mlx5_mpfs_init(struct mlx5_core_dev *dev) int l2table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table); struct mlx5_mpfs *mpfs; - if (!MLX5_VPORT_MANAGER(dev)) + if (!MLX5_ESWITCH_MANAGER(dev)) return 0; mpfs = kzalloc(sizeof(*mpfs), GFP_KERNEL); @@ -122,7 +123,7 @@ void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev) { struct mlx5_mpfs *mpfs = dev->priv.mpfs; - if (!MLX5_VPORT_MANAGER(dev)) + if (!MLX5_ESWITCH_MANAGER(dev)) return; WARN_ON(!hlist_empty(mpfs->hash)); @@ -137,7 +138,7 @@ int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac) u32 index; int err; - if (!MLX5_VPORT_MANAGER(dev)) + if (!MLX5_ESWITCH_MANAGER(dev)) return 0; mutex_lock(&mpfs->lock); @@ -179,7 +180,7 @@ int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac) int err = 0; u32 index; - if (!MLX5_VPORT_MANAGER(dev)) + if (!MLX5_ESWITCH_MANAGER(dev)) return 0; mutex_lock(&mpfs->lock); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index fa9d0760dd36..31a9cbd85689 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -701,7 +701,7 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_prio_tc); static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in, int inlen) { - u32 out[MLX5_ST_SZ_DW(qtct_reg)]; + u32 out[MLX5_ST_SZ_DW(qetc_reg)]; if (!MLX5_CAP_GEN(mdev, ets)) return -EOPNOTSUPP; @@ -713,7 +713,7 @@ static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in, static int mlx5_query_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *out, int outlen) { - u32 in[MLX5_ST_SZ_DW(qtct_reg)]; + u32 in[MLX5_ST_SZ_DW(qetc_reg)]; if (!MLX5_CAP_GEN(mdev, ets)) return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c index 2a8b529ce6dd..a0674962f02c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c @@ -88,6 +88,9 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs) return -EBUSY; } + if (!MLX5_ESWITCH_MANAGER(dev)) + goto enable_vfs_hca; + err = mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs, SRIOV_LEGACY); if (err) { mlx5_core_warn(dev, @@ -95,6 +98,7 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs) return err; } +enable_vfs_hca: for (vf = 0; vf < num_vfs; vf++) { err = mlx5_core_enable_hca(dev, vf + 1); if (err) { @@ -140,7 +144,8 @@ static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev) } out: - mlx5_eswitch_disable_sriov(dev->priv.eswitch); + if (MLX5_ESWITCH_MANAGER(dev)) + mlx5_eswitch_disable_sriov(dev->priv.eswitch); if (mlx5_wait_for_vf_pages(dev)) mlx5_core_warn(dev, "timeout reclaiming VFs pages\n"); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index 719cecb182c6..7eecd5b07bb1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -549,8 +549,6 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev, return -EINVAL; if (!MLX5_CAP_GEN(mdev, vport_group_manager)) return -EACCES; - if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify)) - return -EOPNOTSUPP; in = kvzalloc(inlen, GFP_KERNEL); if (!in) diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig index f4d9c9975ac3..82827a8d3d67 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig +++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig @@ -30,7 +30,7 @@ config MLXSW_CORE_THERMAL config MLXSW_PCI tristate "PCI bus implementation for Mellanox Technologies Switch ASICs" - depends on PCI && HAS_DMA && HAS_IOMEM && MLXSW_CORE + depends on PCI && HAS_IOMEM && MLXSW_CORE default m ---help--- This is PCI bus implementation for Mellanox Technologies Switch ASICs. diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index fb2c8f8071e6..776a8a9be8e3 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -344,10 +344,9 @@ static int ocelot_port_stop(struct net_device *dev) static int ocelot_gen_ifh(u32 *ifh, struct frame_info *info) { ifh[0] = IFH_INJ_BYPASS; - ifh[1] = (0xff00 & info->port) >> 8; + ifh[1] = (0xf00 & info->port) >> 8; ifh[2] = (0xff & info->port) << 24; - ifh[3] = IFH_INJ_POP_CNT_DISABLE | (info->cpuq << 20) | - (info->tag_type << 16) | info->vid; + ifh[3] = (info->tag_type << 16) | info->vid; return 0; } @@ -370,11 +369,13 @@ static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev) QS_INJ_CTRL_SOF, QS_INJ_CTRL, grp); info.port = BIT(port->chip_port); - info.cpuq = 0xff; + info.tag_type = IFH_TAG_TYPE_C; + info.vid = skb_vlan_tag_get(skb); ocelot_gen_ifh(ifh, &info); for (i = 0; i < IFH_LEN; i++) - ocelot_write_rix(ocelot, ifh[i], QS_INJ_WR, grp); + ocelot_write_rix(ocelot, (__force u32)cpu_to_be32(ifh[i]), + QS_INJ_WR, grp); count = (skb->len + 3) / 4; last = skb->len % 4; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index fcdfb8e7fdea..40216d56dddc 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -81,10 +81,10 @@ nfp_bpf_xdp_offload(struct nfp_app *app, struct nfp_net *nn, ret = nfp_net_bpf_offload(nn, prog, running, extack); /* Stop offload if replace not possible */ - if (ret && prog) - nfp_bpf_xdp_offload(app, nn, NULL, extack); + if (ret) + return ret; - nn->dp.bpf_offload_xdp = prog && !ret; + nn->dp.bpf_offload_xdp = !!prog; return ret; } @@ -202,6 +202,9 @@ static int nfp_bpf_setup_tc_block(struct net_device *netdev, if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) return -EOPNOTSUPP; + if (tcf_block_shared(f->block)) + return -EOPNOTSUPP; + switch (f->command) { case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c index 91935405f586..84f7a5dbea9d 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/match.c +++ b/drivers/net/ethernet/netronome/nfp/flower/match.c @@ -123,6 +123,20 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *frame, NFP_FLOWER_MASK_MPLS_Q; frame->mpls_lse = cpu_to_be32(t_mpls); + } else if (dissector_uses_key(flow->dissector, + FLOW_DISSECTOR_KEY_BASIC)) { + /* Check for mpls ether type and set NFP_FLOWER_MASK_MPLS_Q + * bit, which indicates an mpls ether type but without any + * mpls fields. + */ + struct flow_dissector_key_basic *key_basic; + + key_basic = skb_flow_dissector_target(flow->dissector, + FLOW_DISSECTOR_KEY_BASIC, + flow->key); + if (key_basic->n_proto == cpu_to_be16(ETH_P_MPLS_UC) || + key_basic->n_proto == cpu_to_be16(ETH_P_MPLS_MC)) + frame->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q); } } diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index c42e64f32333..525057bee0ed 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -264,6 +264,14 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, case cpu_to_be16(ETH_P_ARP): return -EOPNOTSUPP; + case cpu_to_be16(ETH_P_MPLS_UC): + case cpu_to_be16(ETH_P_MPLS_MC): + if (!(key_layer & NFP_FLOWER_LAYER_MAC)) { + key_layer |= NFP_FLOWER_LAYER_MAC; + key_size += sizeof(struct nfp_flower_mac_mpls); + } + break; + /* Will be included in layer 2. */ case cpu_to_be16(ETH_P_8021Q): break; @@ -623,6 +631,9 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev, if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) return -EOPNOTSUPP; + if (tcf_block_shared(f->block)) + return -EOPNOTSUPP; + switch (f->command) { case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index 46b76d5a726c..152283d7e59c 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -240,7 +240,6 @@ static int nfp_pcie_sriov_read_nfd_limit(struct nfp_pf *pf) return pci_sriov_set_totalvfs(pf->pdev, pf->limit_vfs); pf->limit_vfs = ~0; - pci_sriov_set_totalvfs(pf->pdev, 0); /* 0 is unset */ /* Allow any setting for backwards compatibility if symbol not found */ if (err == -ENOENT) return 0; @@ -668,7 +667,7 @@ static int nfp_pci_probe(struct pci_dev *pdev, err = nfp_net_pci_probe(pf); if (err) - goto err_sriov_unlimit; + goto err_fw_unload; err = nfp_hwmon_register(pf); if (err) { @@ -680,8 +679,6 @@ static int nfp_pci_probe(struct pci_dev *pdev, err_net_remove: nfp_net_pci_remove(pf); -err_sriov_unlimit: - pci_sriov_set_totalvfs(pf->pdev, 0); err_fw_unload: kfree(pf->rtbl); nfp_mip_close(pf->mip); @@ -715,7 +712,6 @@ static void nfp_pci_remove(struct pci_dev *pdev) nfp_hwmon_unregister(pf); nfp_pcie_sriov_disable(pdev); - pci_sriov_set_totalvfs(pf->pdev, 0); nfp_net_pci_remove(pf); diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c index cd34097b79f1..37a6d7822a38 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c @@ -232,7 +232,7 @@ struct nfp_nffw_info *nfp_nffw_info_open(struct nfp_cpp *cpp) err = nfp_cpp_read(cpp, nfp_resource_cpp_id(state->res), nfp_resource_address(state->res), fwinf, sizeof(*fwinf)); - if (err < sizeof(*fwinf)) + if (err < (int)sizeof(*fwinf)) goto err_release; if (!nffw_res_flg_init_get(fwinf)) diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index 8f31406ec894..e0680ce91328 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -255,9 +255,8 @@ qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn, *type = DCBX_PROTOCOL_ROCE_V2; } else { *type = DCBX_MAX_PROTOCOL_TYPE; - DP_ERR(p_hwfn, - "No action required, App TLV id = 0x%x app_prio_bitmap = 0x%x\n", - id, app_prio_bitmap); + DP_ERR(p_hwfn, "No action required, App TLV entry = 0x%x\n", + app_prio_bitmap); return false; } @@ -710,9 +709,9 @@ qed_dcbx_get_local_lldp_params(struct qed_hwfn *p_hwfn, p_local = &p_hwfn->p_dcbx_info->lldp_local[LLDP_NEAREST_BRIDGE]; memcpy(params->lldp_local.local_chassis_id, p_local->local_chassis_id, - ARRAY_SIZE(p_local->local_chassis_id)); + sizeof(p_local->local_chassis_id)); memcpy(params->lldp_local.local_port_id, p_local->local_port_id, - ARRAY_SIZE(p_local->local_port_id)); + sizeof(p_local->local_port_id)); } static void @@ -724,9 +723,9 @@ qed_dcbx_get_remote_lldp_params(struct qed_hwfn *p_hwfn, p_remote = &p_hwfn->p_dcbx_info->lldp_remote[LLDP_NEAREST_BRIDGE]; memcpy(params->lldp_remote.peer_chassis_id, p_remote->peer_chassis_id, - ARRAY_SIZE(p_remote->peer_chassis_id)); + sizeof(p_remote->peer_chassis_id)); memcpy(params->lldp_remote.peer_port_id, p_remote->peer_port_id, - ARRAY_SIZE(p_remote->peer_port_id)); + sizeof(p_remote->peer_port_id)); } static int @@ -1479,8 +1478,8 @@ static u8 qed_dcbnl_getcap(struct qed_dev *cdev, int capid, u8 *cap) *cap = 0x80; break; case DCB_CAP_ATTR_DCBX: - *cap = (DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_CEE | - DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_STATIC); + *cap = (DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_VER_IEEE | + DCB_CAP_DCBX_STATIC); break; default: *cap = false; @@ -1548,8 +1547,6 @@ static u8 qed_dcbnl_getdcbx(struct qed_dev *cdev) if (!dcbx_info) return 0; - if (dcbx_info->operational.enabled) - mode |= DCB_CAP_DCBX_LLD_MANAGED; if (dcbx_info->operational.ieee) mode |= DCB_CAP_DCBX_VER_IEEE; if (dcbx_info->operational.cee) diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 329781cda77f..e5249b4741d0 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -1804,7 +1804,7 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) DP_INFO(p_hwfn, "Failed to update driver state\n"); rc = qed_mcp_ov_update_eswitch(p_hwfn, p_hwfn->p_main_ptt, - QED_OV_ESWITCH_VEB); + QED_OV_ESWITCH_NONE); if (rc) DP_INFO(p_hwfn, "Failed to update eswitch mode\n"); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index c97ebd681c47..012973d75ad0 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -201,8 +201,9 @@ void qed_ll2b_complete_rx_packet(void *cxt, struct qed_ll2_comp_rx_data *data) skb = build_skb(buffer->data, 0); if (!skb) { - rc = -ENOMEM; - goto out_post; + DP_INFO(cdev, "Failed to build SKB\n"); + kfree(buffer->data); + goto out_post1; } data->u.placement_offset += NET_SKB_PAD; @@ -224,8 +225,14 @@ void qed_ll2b_complete_rx_packet(void *cxt, struct qed_ll2_comp_rx_data *data) cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb, data->opaque_data_0, data->opaque_data_1); + } else { + DP_VERBOSE(p_hwfn, (NETIF_MSG_RX_STATUS | NETIF_MSG_PKTDATA | + QED_MSG_LL2 | QED_MSG_STORAGE), + "Dropping the packet\n"); + kfree(buffer->data); } +out_post1: /* Update Buffer information and update FW producer */ buffer->data = new_data; buffer->phys_addr = new_phys_addr; diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index b04d57ca5176..0cbc74d6ca8b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -567,8 +567,16 @@ static irqreturn_t qed_single_int(int irq, void *dev_instance) /* Fastpath interrupts */ for (j = 0; j < 64; j++) { if ((0x2ULL << j) & status) { - hwfn->simd_proto_handler[j].func( - hwfn->simd_proto_handler[j].token); + struct qed_simd_fp_handler *p_handler = + &hwfn->simd_proto_handler[j]; + + if (p_handler->func) + p_handler->func(p_handler->token); + else + DP_NOTICE(hwfn, + "Not calling fastpath handler as it is NULL [handler #%d, status 0x%llx]\n", + j, status); + status &= ~(0x2ULL << j); rc = IRQ_HANDLED; } @@ -781,6 +789,14 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev, /* We want a minimum of one slowpath and one fastpath vector per hwfn */ cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2; + if (is_kdump_kernel()) { + DP_INFO(cdev, + "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n", + cdev->int_params.in.min_msix_cnt); + cdev->int_params.in.num_vectors = + cdev->int_params.in.min_msix_cnt; + } + rc = qed_set_int_mode(cdev, false); if (rc) { DP_ERR(cdev, "qed_slowpath_setup_int ERR\n"); diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index f01bf52bc381..fd59cf45f4be 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -4513,6 +4513,8 @@ static void qed_sriov_enable_qid_config(struct qed_hwfn *hwfn, static int qed_sriov_enable(struct qed_dev *cdev, int num) { struct qed_iov_vf_init_params params; + struct qed_hwfn *hwfn; + struct qed_ptt *ptt; int i, j, rc; if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) { @@ -4525,8 +4527,8 @@ static int qed_sriov_enable(struct qed_dev *cdev, int num) /* Initialize HW for VF access */ for_each_hwfn(cdev, j) { - struct qed_hwfn *hwfn = &cdev->hwfns[j]; - struct qed_ptt *ptt = qed_ptt_acquire(hwfn); + hwfn = &cdev->hwfns[j]; + ptt = qed_ptt_acquire(hwfn); /* Make sure not to use more than 16 queues per VF */ params.num_queues = min_t(int, @@ -4562,6 +4564,19 @@ static int qed_sriov_enable(struct qed_dev *cdev, int num) goto err; } + hwfn = QED_LEADING_HWFN(cdev); + ptt = qed_ptt_acquire(hwfn); + if (!ptt) { + DP_ERR(hwfn, "Failed to acquire ptt\n"); + rc = -EBUSY; + goto err; + } + + rc = qed_mcp_ov_update_eswitch(hwfn, ptt, QED_OV_ESWITCH_VEB); + if (rc) + DP_INFO(cdev, "Failed to update eswitch mode\n"); + qed_ptt_release(hwfn, ptt); + return num; err: diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c index 02adb513f475..013ff567283c 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c @@ -337,8 +337,14 @@ int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *info) { struct qede_ptp *ptp = edev->ptp; - if (!ptp) - return -EIO; + if (!ptp) { + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE; + info->phc_index = -1; + + return 0; + } info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | SOF_TIMESTAMPING_RX_SOFTWARE | diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 75dfac0248f4..f4cae2be0fda 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -7148,7 +7148,7 @@ static void rtl8169_netpoll(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); - rtl8169_interrupt(pci_irq_vector(tp->pci_dev, 0), dev); + rtl8169_interrupt(pci_irq_vector(tp->pci_dev, 0), tp); } #endif diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig index 27be51f0a421..f3f7477043ce 100644 --- a/drivers/net/ethernet/renesas/Kconfig +++ b/drivers/net/ethernet/renesas/Kconfig @@ -17,7 +17,6 @@ if NET_VENDOR_RENESAS config SH_ETH tristate "Renesas SuperH Ethernet support" - depends on HAS_DMA depends on ARCH_RENESAS || SUPERH || COMPILE_TEST select CRC32 select MII @@ -31,7 +30,6 @@ config SH_ETH config RAVB tristate "Renesas Ethernet AVB support" - depends on HAS_DMA depends on ARCH_RENESAS || COMPILE_TEST select CRC32 select MII diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index ad4a354ce570..570ec72266f3 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -3180,6 +3180,7 @@ bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx, return true; } +static struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx, const struct efx_filter_spec *spec) { diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index 8edf20967c82..e045a5d6b938 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c @@ -2794,6 +2794,7 @@ int efx_farch_filter_table_probe(struct efx_nic *efx) if (!state) return -ENOMEM; efx->filter_state = state; + init_rwsem(&state->lock); table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; table->id = EFX_FARCH_FILTER_TABLE_RX_IP; diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index cb5b0f58c395..edf20361ea5f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -111,7 +111,7 @@ config DWMAC_ROCKCHIP config DWMAC_SOCFPGA tristate "SOCFPGA dwmac support" default ARCH_SOCFPGA - depends on OF && (ARCH_SOCFPGA || COMPILE_TEST) + depends on OF && (ARCH_SOCFPGA || ARCH_STRATIX10 || COMPILE_TEST) select MFD_SYSCON help Support for ethernet controller on Altera SOCFPGA diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c index 6e359572b9f0..5b3b06a0a3bf 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c @@ -55,6 +55,7 @@ struct socfpga_dwmac { struct device *dev; struct regmap *sys_mgr_base_addr; struct reset_control *stmmac_rst; + struct reset_control *stmmac_ocp_rst; void __iomem *splitter_base; bool f2h_ptp_ref_clk; struct tse_pcs pcs; @@ -262,8 +263,8 @@ static int socfpga_dwmac_set_phy_mode(struct socfpga_dwmac *dwmac) val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII; /* Assert reset to the enet controller before changing the phy mode */ - if (dwmac->stmmac_rst) - reset_control_assert(dwmac->stmmac_rst); + reset_control_assert(dwmac->stmmac_ocp_rst); + reset_control_assert(dwmac->stmmac_rst); regmap_read(sys_mgr_base_addr, reg_offset, &ctrl); ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift); @@ -288,8 +289,8 @@ static int socfpga_dwmac_set_phy_mode(struct socfpga_dwmac *dwmac) /* Deassert reset for the phy configuration to be sampled by * the enet controller, and operation to start in requested mode */ - if (dwmac->stmmac_rst) - reset_control_deassert(dwmac->stmmac_rst); + reset_control_deassert(dwmac->stmmac_ocp_rst); + reset_control_deassert(dwmac->stmmac_rst); if (phymode == PHY_INTERFACE_MODE_SGMII) { if (tse_pcs_init(dwmac->pcs.tse_pcs_base, &dwmac->pcs) != 0) { dev_err(dwmac->dev, "Unable to initialize TSE PCS"); @@ -324,6 +325,15 @@ static int socfpga_dwmac_probe(struct platform_device *pdev) goto err_remove_config_dt; } + dwmac->stmmac_ocp_rst = devm_reset_control_get_optional(dev, "stmmaceth-ocp"); + if (IS_ERR(dwmac->stmmac_ocp_rst)) { + ret = PTR_ERR(dwmac->stmmac_ocp_rst); + dev_err(dev, "error getting reset control of ocp %d\n", ret); + goto err_remove_config_dt; + } + + reset_control_deassert(dwmac->stmmac_ocp_rst); + ret = socfpga_dwmac_parse_data(dwmac, dev); if (ret) { dev_err(dev, "Unable to parse OF data\n"); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c index d37f17ca62fe..65bc3556bd8f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c @@ -407,6 +407,16 @@ static void dwmac4_enable_tso(void __iomem *ioaddr, bool en, u32 chan) } } +static void dwmac4_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan) +{ + u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan)); + + value &= ~DMA_RBSZ_MASK; + value |= (bfsize << DMA_RBSZ_SHIFT) & DMA_RBSZ_MASK; + + writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan)); +} + const struct stmmac_dma_ops dwmac4_dma_ops = { .reset = dwmac4_dma_reset, .init = dwmac4_dma_init, @@ -431,6 +441,7 @@ const struct stmmac_dma_ops dwmac4_dma_ops = { .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr, .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr, .enable_tso = dwmac4_enable_tso, + .set_bfsize = dwmac4_set_bfsize, }; const struct stmmac_dma_ops dwmac410_dma_ops = { @@ -457,4 +468,5 @@ const struct stmmac_dma_ops dwmac410_dma_ops = { .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr, .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr, .enable_tso = dwmac4_enable_tso, + .set_bfsize = dwmac4_set_bfsize, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h index c63c1fe3f26b..22a4a6dbb1a4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h @@ -120,6 +120,8 @@ /* DMA Rx Channel X Control register defines */ #define DMA_CONTROL_SR BIT(0) +#define DMA_RBSZ_MASK GENMASK(14, 1) +#define DMA_RBSZ_SHIFT 1 /* Interrupt status per channel */ #define DMA_CHAN_STATUS_REB GENMASK(21, 19) diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index e44e7b26ce82..fe8b536b13f8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -183,6 +183,7 @@ struct stmmac_dma_ops { void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan); void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan); void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan); + void (*set_bfsize)(void __iomem *ioaddr, int bfsize, u32 chan); }; #define stmmac_reset(__priv, __args...) \ @@ -235,6 +236,8 @@ struct stmmac_dma_ops { stmmac_do_void_callback(__priv, dma, set_tx_tail_ptr, __args) #define stmmac_enable_tso(__priv, __args...) \ stmmac_do_void_callback(__priv, dma, enable_tso, __args) +#define stmmac_set_dma_bfsize(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, set_bfsize, __args) struct mac_device_info; struct net_device; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index e79b0d7b388a..60f59abab009 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -928,6 +928,7 @@ static void stmmac_check_pcs_mode(struct stmmac_priv *priv) static int stmmac_init_phy(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); + u32 tx_cnt = priv->plat->tx_queues_to_use; struct phy_device *phydev; char phy_id_fmt[MII_BUS_ID_SIZE + 3]; char bus_id[MII_BUS_ID_SIZE]; @@ -968,6 +969,15 @@ static int stmmac_init_phy(struct net_device *dev) phydev->advertising &= ~(SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full); + /* + * Half-duplex mode not supported with multiqueue + * half-duplex can only works with single queue + */ + if (tx_cnt > 1) + phydev->supported &= ~(SUPPORTED_1000baseT_Half | + SUPPORTED_100baseT_Half | + SUPPORTED_10baseT_Half); + /* * Broken HW is sometimes missing the pull-up resistor on the * MDIO line, which results in reads to non-existent devices returning @@ -1794,6 +1804,8 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv) stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, qmode); + stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz, + chan); } for (chan = 0; chan < tx_channels_count; chan++) { diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c index 7a16d40a72d1..b9221fc1674d 100644 --- a/drivers/net/ethernet/sun/sungem.c +++ b/drivers/net/ethernet/sun/sungem.c @@ -60,8 +60,7 @@ #include #include "sungem.h" -/* Stripping FCS is causing problems, disabled for now */ -#undef STRIP_FCS +#define STRIP_FCS #define DEFAULT_MSG (NETIF_MSG_DRV | \ NETIF_MSG_PROBE | \ @@ -435,7 +434,7 @@ static int gem_rxmac_reset(struct gem *gp) writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW); writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK); val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) | - ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128); + (ETH_HLEN << 13) | RXDMA_CFG_FTHRESH_128); writel(val, gp->regs + RXDMA_CFG); if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN) writel(((5 & RXDMA_BLANK_IPKTS) | @@ -760,7 +759,6 @@ static int gem_rx(struct gem *gp, int work_to_do) struct net_device *dev = gp->dev; int entry, drops, work_done = 0; u32 done; - __sum16 csum; if (netif_msg_rx_status(gp)) printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n", @@ -855,9 +853,13 @@ static int gem_rx(struct gem *gp, int work_to_do) skb = copy_skb; } - csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff); - skb->csum = csum_unfold(csum); - skb->ip_summed = CHECKSUM_COMPLETE; + if (likely(dev->features & NETIF_F_RXCSUM)) { + __sum16 csum; + + csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff); + skb->csum = csum_unfold(csum); + skb->ip_summed = CHECKSUM_COMPLETE; + } skb->protocol = eth_type_trans(skb, gp->dev); napi_gro_receive(&gp->napi, skb); @@ -1761,7 +1763,7 @@ static void gem_init_dma(struct gem *gp) writel(0, gp->regs + TXDMA_KICK); val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) | - ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128); + (ETH_HLEN << 13) | RXDMA_CFG_FTHRESH_128); writel(val, gp->regs + RXDMA_CFG); writel(desc_dma >> 32, gp->regs + RXDMA_DBHI); @@ -2985,8 +2987,8 @@ static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_drvdata(pdev, dev); /* We can do scatter/gather and HW checksum */ - dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM; - dev->features |= dev->hw_features | NETIF_F_RXCSUM; + dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM; + dev->features = dev->hw_features; if (pci_using_dac) dev->features |= NETIF_F_HIGHDMA; diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c index cdbddf16dd29..4f1267477aa4 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/drivers/net/ethernet/ti/davinci_cpdma.c @@ -205,7 +205,7 @@ static void cpdma_desc_pool_destroy(struct cpdma_ctlr *ctlr) * devices (e.g. cpsw switches) use plain old memory. Descriptor pools * abstract out these details */ -int cpdma_desc_pool_create(struct cpdma_ctlr *ctlr) +static int cpdma_desc_pool_create(struct cpdma_ctlr *ctlr) { struct cpdma_params *cpdma_params = &ctlr->params; struct cpdma_desc_pool *pool; diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index 06d7c9e4dcda..f270beebb428 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -1385,6 +1385,15 @@ static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd) return -EOPNOTSUPP; } +static int match_first_device(struct device *dev, void *data) +{ + if (dev->parent && dev->parent->of_node) + return of_device_is_compatible(dev->parent->of_node, + "ti,davinci_mdio"); + + return !strncmp(dev_name(dev), "davinci_mdio", 12); +} + /** * emac_dev_open - EMAC device open * @ndev: The DaVinci EMAC network adapter @@ -1484,8 +1493,14 @@ static int emac_dev_open(struct net_device *ndev) /* use the first phy on the bus if pdata did not give us a phy id */ if (!phydev && !priv->phy_id) { - phy = bus_find_device_by_name(&mdio_bus_type, NULL, - "davinci_mdio"); + /* NOTE: we can't use bus_find_device_by_name() here because + * the device name is not guaranteed to be 'davinci_mdio'. On + * some systems it can be 'davinci_mdio.0' so we need to use + * strncmp() against the first part of the string to correctly + * match it. + */ + phy = bus_find_device(&mdio_bus_type, NULL, NULL, + match_first_device); if (phy) { priv->phy_id = dev_name(phy); if (!priv->phy_id || !*priv->phy_id) diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 750eaa53bf0c..ada33c2d9ac2 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -476,7 +476,7 @@ static struct sk_buff **geneve_gro_receive(struct sock *sk, out_unlock: rcu_read_unlock(); out: - NAPI_GRO_CB(skb)->flush |= flush; + skb_gro_flush_final(skb, pp, flush); return pp; } diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c index f347fd9c5b28..777fa59f5e0c 100644 --- a/drivers/net/hamradio/bpqether.c +++ b/drivers/net/hamradio/bpqether.c @@ -89,10 +89,6 @@ static const char banner[] __initconst = KERN_INFO \ "AX.25: bpqether driver version 004\n"; -static char bcast_addr[6]={0xFF,0xFF,0xFF,0xFF,0xFF,0xFF}; - -static char bpq_eth_addr[6]; - static int bpq_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); static int bpq_device_event(struct notifier_block *, unsigned long, void *); @@ -501,8 +497,8 @@ static int bpq_new_device(struct net_device *edev) bpq->ethdev = edev; bpq->axdev = ndev; - memcpy(bpq->dest_addr, bcast_addr, sizeof(bpq_eth_addr)); - memcpy(bpq->acpt_addr, bcast_addr, sizeof(bpq_eth_addr)); + eth_broadcast_addr(bpq->dest_addr); + eth_broadcast_addr(bpq->acpt_addr); err = register_netdevice(ndev); if (err) diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 1a924b867b07..4b6e308199d2 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -210,7 +210,7 @@ int netvsc_recv_callback(struct net_device *net, void netvsc_channel_cb(void *context); int netvsc_poll(struct napi_struct *napi, int budget); -void rndis_set_subchannel(struct work_struct *w); +int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev); int rndis_filter_open(struct netvsc_device *nvdev); int rndis_filter_close(struct netvsc_device *nvdev); struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 5d5bd513847f..8e9d0ee1572b 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -65,6 +65,41 @@ void netvsc_switch_datapath(struct net_device *ndev, bool vf) VM_PKT_DATA_INBAND, 0); } +/* Worker to setup sub channels on initial setup + * Initial hotplug event occurs in softirq context + * and can't wait for channels. + */ +static void netvsc_subchan_work(struct work_struct *w) +{ + struct netvsc_device *nvdev = + container_of(w, struct netvsc_device, subchan_work); + struct rndis_device *rdev; + int i, ret; + + /* Avoid deadlock with device removal already under RTNL */ + if (!rtnl_trylock()) { + schedule_work(w); + return; + } + + rdev = nvdev->extension; + if (rdev) { + ret = rndis_set_subchannel(rdev->ndev, nvdev); + if (ret == 0) { + netif_device_attach(rdev->ndev); + } else { + /* fallback to only primary channel */ + for (i = 1; i < nvdev->num_chn; i++) + netif_napi_del(&nvdev->chan_table[i].napi); + + nvdev->max_chn = 1; + nvdev->num_chn = 1; + } + } + + rtnl_unlock(); +} + static struct netvsc_device *alloc_net_device(void) { struct netvsc_device *net_device; @@ -81,7 +116,7 @@ static struct netvsc_device *alloc_net_device(void) init_completion(&net_device->channel_init_wait); init_waitqueue_head(&net_device->subchan_open); - INIT_WORK(&net_device->subchan_work, rndis_set_subchannel); + INIT_WORK(&net_device->subchan_work, netvsc_subchan_work); return net_device; } diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index fe2256bf1d13..dd1d6e115145 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -905,8 +905,20 @@ static int netvsc_attach(struct net_device *ndev, if (IS_ERR(nvdev)) return PTR_ERR(nvdev); - /* Note: enable and attach happen when sub-channels setup */ + if (nvdev->num_chn > 1) { + ret = rndis_set_subchannel(ndev, nvdev); + /* if unavailable, just proceed with one queue */ + if (ret) { + nvdev->max_chn = 1; + nvdev->num_chn = 1; + } + } + + /* In any case device is now ready */ + netif_device_attach(ndev); + + /* Note: enable and attach happen when sub-channels setup */ netif_carrier_off(ndev); if (netif_running(ndev)) { @@ -2089,6 +2101,9 @@ static int netvsc_probe(struct hv_device *dev, memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN); + if (nvdev->num_chn > 1) + schedule_work(&nvdev->subchan_work); + /* hw_features computed in rndis_netdev_set_hwcaps() */ net->features = net->hw_features | NETIF_F_HIGHDMA | NETIF_F_SG | diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 5428bb261102..9b4e3c3787e5 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -1062,29 +1062,15 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc) * This breaks overlap of processing the host message for the * new primary channel with the initialization of sub-channels. */ -void rndis_set_subchannel(struct work_struct *w) +int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev) { - struct netvsc_device *nvdev - = container_of(w, struct netvsc_device, subchan_work); struct nvsp_message *init_packet = &nvdev->channel_init_pkt; - struct net_device_context *ndev_ctx; - struct rndis_device *rdev; - struct net_device *ndev; - struct hv_device *hv_dev; + struct net_device_context *ndev_ctx = netdev_priv(ndev); + struct hv_device *hv_dev = ndev_ctx->device_ctx; + struct rndis_device *rdev = nvdev->extension; int i, ret; - if (!rtnl_trylock()) { - schedule_work(w); - return; - } - - rdev = nvdev->extension; - if (!rdev) - goto unlock; /* device was removed */ - - ndev = rdev->ndev; - ndev_ctx = netdev_priv(ndev); - hv_dev = ndev_ctx->device_ctx; + ASSERT_RTNL(); memset(init_packet, 0, sizeof(struct nvsp_message)); init_packet->hdr.msg_type = NVSP_MSG5_TYPE_SUBCHANNEL; @@ -1100,13 +1086,13 @@ void rndis_set_subchannel(struct work_struct *w) VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); if (ret) { netdev_err(ndev, "sub channel allocate send failed: %d\n", ret); - goto failed; + return ret; } wait_for_completion(&nvdev->channel_init_wait); if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) { netdev_err(ndev, "sub channel request failed\n"); - goto failed; + return -EIO; } nvdev->num_chn = 1 + @@ -1125,21 +1111,7 @@ void rndis_set_subchannel(struct work_struct *w) for (i = 0; i < VRSS_SEND_TAB_SIZE; i++) ndev_ctx->tx_table[i] = i % nvdev->num_chn; - netif_device_attach(ndev); - rtnl_unlock(); - return; - -failed: - /* fallback to only primary channel */ - for (i = 1; i < nvdev->num_chn; i++) - netif_napi_del(&nvdev->chan_table[i].napi); - - nvdev->max_chn = 1; - nvdev->num_chn = 1; - - netif_device_attach(ndev); -unlock: - rtnl_unlock(); + return 0; } static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device, @@ -1360,21 +1332,12 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, netif_napi_add(net, &net_device->chan_table[i].napi, netvsc_poll, NAPI_POLL_WEIGHT); - if (net_device->num_chn > 1) - schedule_work(&net_device->subchan_work); + return net_device; out: - /* if unavailable, just proceed with one queue */ - if (ret) { - net_device->max_chn = 1; - net_device->num_chn = 1; - } - - /* No sub channels, device is ready */ - if (net_device->num_chn == 1) - netif_device_attach(net); - - return net_device; + /* setting up multiple channels failed */ + net_device->max_chn = 1; + net_device->num_chn = 1; err_dev_remv: rndis_filter_device_remove(dev, net_device); diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 4377c26f714d..4a949569ec4c 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -75,10 +75,23 @@ static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval) { struct ipvl_dev *ipvlan; struct net_device *mdev = port->dev; - int err = 0; + unsigned int flags; + int err; ASSERT_RTNL(); if (port->mode != nval) { + list_for_each_entry(ipvlan, &port->ipvlans, pnode) { + flags = ipvlan->dev->flags; + if (nval == IPVLAN_MODE_L3 || nval == IPVLAN_MODE_L3S) { + err = dev_change_flags(ipvlan->dev, + flags | IFF_NOARP); + } else { + err = dev_change_flags(ipvlan->dev, + flags & ~IFF_NOARP); + } + if (unlikely(err)) + goto fail; + } if (nval == IPVLAN_MODE_L3S) { /* New mode is L3S */ err = ipvlan_register_nf_hook(read_pnet(&port->pnet)); @@ -86,21 +99,28 @@ static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval) mdev->l3mdev_ops = &ipvl_l3mdev_ops; mdev->priv_flags |= IFF_L3MDEV_MASTER; } else - return err; + goto fail; } else if (port->mode == IPVLAN_MODE_L3S) { /* Old mode was L3S */ mdev->priv_flags &= ~IFF_L3MDEV_MASTER; ipvlan_unregister_nf_hook(read_pnet(&port->pnet)); mdev->l3mdev_ops = NULL; } - list_for_each_entry(ipvlan, &port->ipvlans, pnode) { - if (nval == IPVLAN_MODE_L3 || nval == IPVLAN_MODE_L3S) - ipvlan->dev->flags |= IFF_NOARP; - else - ipvlan->dev->flags &= ~IFF_NOARP; - } port->mode = nval; } + return 0; + +fail: + /* Undo the flags changes that have been done so far. */ + list_for_each_entry_continue_reverse(ipvlan, &port->ipvlans, pnode) { + flags = ipvlan->dev->flags; + if (port->mode == IPVLAN_MODE_L3 || + port->mode == IPVLAN_MODE_L3S) + dev_change_flags(ipvlan->dev, flags | IFF_NOARP); + else + dev_change_flags(ipvlan->dev, flags & ~IFF_NOARP); + } + return err; } @@ -594,7 +614,8 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev, ipvlan->phy_dev = phy_dev; ipvlan->dev = dev; ipvlan->sfeatures = IPVLAN_FEATURES; - ipvlan_adjust_mtu(ipvlan, phy_dev); + if (!tb[IFLA_MTU]) + ipvlan_adjust_mtu(ipvlan, phy_dev); INIT_LIST_HEAD(&ipvlan->addrs); spin_lock_init(&ipvlan->addrs_lock); @@ -693,6 +714,7 @@ void ipvlan_link_setup(struct net_device *dev) { ether_setup(dev); + dev->max_mtu = ETH_MAX_MTU; dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); dev->priv_flags |= IFF_UNICAST_FLT | IFF_NO_QUEUE; dev->netdev_ops = &ipvlan_netdev_ops; diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c index 83f7420ddea5..4f390fa557e4 100644 --- a/drivers/net/net_failover.c +++ b/drivers/net/net_failover.c @@ -527,7 +527,7 @@ static int net_failover_slave_register(struct net_device *slave_dev, netif_addr_lock_bh(failover_dev); dev_uc_sync_multiple(slave_dev, failover_dev); - dev_uc_sync_multiple(slave_dev, failover_dev); + dev_mc_sync_multiple(slave_dev, failover_dev); netif_addr_unlock_bh(failover_dev); err = vlan_vids_add_by_dev(slave_dev, failover_dev); diff --git a/drivers/net/phy/dp83tc811.c b/drivers/net/phy/dp83tc811.c index 081d99aa3985..49ac678eb2dc 100644 --- a/drivers/net/phy/dp83tc811.c +++ b/drivers/net/phy/dp83tc811.c @@ -222,7 +222,7 @@ static int dp83811_config_intr(struct phy_device *phydev) if (err < 0) return err; - err = phy_write(phydev, MII_DP83811_INT_STAT1, 0); + err = phy_write(phydev, MII_DP83811_INT_STAT2, 0); } return err; diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index de51e8f70f44..ce61231e96ea 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c @@ -1107,7 +1107,7 @@ static const struct proto_ops pppoe_ops = { .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = pppoe_getname, - .poll_mask = datagram_poll_mask, + .poll = datagram_poll, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .setsockopt = sock_no_setsockopt, diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index b0e8b9613054..1eaec648bd1f 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -967,8 +967,7 @@ void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf) atomic_set(&ctx->stop, 1); - if (hrtimer_active(&ctx->tx_timer)) - hrtimer_cancel(&ctx->tx_timer); + hrtimer_cancel(&ctx->tx_timer); tasklet_kill(&ctx->bh); diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 8dff87ec6d99..2e4130746c40 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -64,6 +64,7 @@ #define DEFAULT_RX_CSUM_ENABLE (true) #define DEFAULT_TSO_CSUM_ENABLE (true) #define DEFAULT_VLAN_FILTER_ENABLE (true) +#define DEFAULT_VLAN_RX_OFFLOAD (true) #define TX_OVERHEAD (8) #define RXW_PADDING 2 @@ -2298,7 +2299,7 @@ static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu) if ((ll_mtu % dev->maxpacket) == 0) return -EDOM; - ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN); + ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN); netdev->mtu = new_mtu; @@ -2364,6 +2365,11 @@ static int lan78xx_set_features(struct net_device *netdev, } if (features & NETIF_F_HW_VLAN_CTAG_RX) + pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_; + else + pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_; + + if (features & NETIF_F_HW_VLAN_CTAG_FILTER) pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_; else pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_; @@ -2587,7 +2593,8 @@ static int lan78xx_reset(struct lan78xx_net *dev) buf |= FCT_TX_CTL_EN_; ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf); - ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN); + ret = lan78xx_set_rx_max_frame_length(dev, + dev->net->mtu + VLAN_ETH_HLEN); ret = lan78xx_read_reg(dev, MAC_RX, &buf); buf |= MAC_RX_RXEN_; @@ -2975,6 +2982,12 @@ static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf) if (DEFAULT_TSO_CSUM_ENABLE) dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG; + if (DEFAULT_VLAN_RX_OFFLOAD) + dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX; + + if (DEFAULT_VLAN_FILTER_ENABLE) + dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + dev->net->hw_features = dev->net->features; ret = lan78xx_setup_irq_domain(dev); @@ -3039,8 +3052,13 @@ static void lan78xx_rx_csum_offload(struct lan78xx_net *dev, struct sk_buff *skb, u32 rx_cmd_a, u32 rx_cmd_b) { + /* HW Checksum offload appears to be flawed if used when not stripping + * VLAN headers. Drop back to S/W checksums under these conditions. + */ if (!(dev->net->features & NETIF_F_RXCSUM) || - unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) { + unlikely(rx_cmd_a & RX_CMD_A_ICSM_) || + ((rx_cmd_a & RX_CMD_A_FVTG_) && + !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) { skb->ip_summed = CHECKSUM_NONE; } else { skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_)); @@ -3048,6 +3066,16 @@ static void lan78xx_rx_csum_offload(struct lan78xx_net *dev, } } +static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev, + struct sk_buff *skb, + u32 rx_cmd_a, u32 rx_cmd_b) +{ + if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) && + (rx_cmd_a & RX_CMD_A_FVTG_)) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + (rx_cmd_b & 0xffff)); +} + static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb) { int status; @@ -3112,6 +3140,8 @@ static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb) if (skb->len == size) { lan78xx_rx_csum_offload(dev, skb, rx_cmd_a, rx_cmd_b); + lan78xx_rx_vlan_offload(dev, skb, + rx_cmd_a, rx_cmd_b); skb_trim(skb, skb->len - 4); /* remove fcs */ skb->truesize = size + sizeof(struct sk_buff); @@ -3130,6 +3160,7 @@ static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb) skb_set_tail_pointer(skb2, size); lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b); + lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b); skb_trim(skb2, skb2->len - 4); /* remove fcs */ skb2->truesize = size + sizeof(struct sk_buff); diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 8e8b51f171f4..8fac8e132c5b 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1246,6 +1246,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */ {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */ + {QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e */ {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */ {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */ {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */ diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 86f7196f9d91..2a58607a6aea 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -3962,7 +3962,8 @@ static int rtl8152_close(struct net_device *netdev) #ifdef CONFIG_PM_SLEEP unregister_pm_notifier(&tp->pm_notifier); #endif - napi_disable(&tp->napi); + if (!test_bit(RTL8152_UNPLUG, &tp->flags)) + napi_disable(&tp->napi); clear_bit(WORK_ENABLE, &tp->flags); usb_kill_urb(tp->intr_urb); cancel_delayed_work_sync(&tp->schedule); diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index b6c9a2af3732..53085c63277b 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -53,6 +53,10 @@ module_param(napi_tx, bool, 0644); /* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */ #define VIRTIO_XDP_HEADROOM 256 +/* Separating two types of XDP xmit */ +#define VIRTIO_XDP_TX BIT(0) +#define VIRTIO_XDP_REDIR BIT(1) + /* RX packet size EWMA. The average packet size is used to determine the packet * buffer size when refilling RX rings. As the entire RX ring may be refilled * at once, the weight is chosen so that the EWMA will be insensitive to short- @@ -582,7 +586,7 @@ static struct sk_buff *receive_small(struct net_device *dev, struct receive_queue *rq, void *buf, void *ctx, unsigned int len, - bool *xdp_xmit) + unsigned int *xdp_xmit) { struct sk_buff *skb; struct bpf_prog *xdp_prog; @@ -654,14 +658,14 @@ static struct sk_buff *receive_small(struct net_device *dev, trace_xdp_exception(vi->dev, xdp_prog, act); goto err_xdp; } - *xdp_xmit = true; + *xdp_xmit |= VIRTIO_XDP_TX; rcu_read_unlock(); goto xdp_xmit; case XDP_REDIRECT: err = xdp_do_redirect(dev, &xdp, xdp_prog); if (err) goto err_xdp; - *xdp_xmit = true; + *xdp_xmit |= VIRTIO_XDP_REDIR; rcu_read_unlock(); goto xdp_xmit; default: @@ -723,7 +727,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, void *buf, void *ctx, unsigned int len, - bool *xdp_xmit) + unsigned int *xdp_xmit) { struct virtio_net_hdr_mrg_rxbuf *hdr = buf; u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); @@ -818,7 +822,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, put_page(xdp_page); goto err_xdp; } - *xdp_xmit = true; + *xdp_xmit |= VIRTIO_XDP_TX; if (unlikely(xdp_page != page)) put_page(page); rcu_read_unlock(); @@ -830,7 +834,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, put_page(xdp_page); goto err_xdp; } - *xdp_xmit = true; + *xdp_xmit |= VIRTIO_XDP_REDIR; if (unlikely(xdp_page != page)) put_page(page); rcu_read_unlock(); @@ -939,7 +943,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, } static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq, - void *buf, unsigned int len, void **ctx, bool *xdp_xmit) + void *buf, unsigned int len, void **ctx, + unsigned int *xdp_xmit) { struct net_device *dev = vi->dev; struct sk_buff *skb; @@ -1232,7 +1237,8 @@ static void refill_work(struct work_struct *work) } } -static int virtnet_receive(struct receive_queue *rq, int budget, bool *xdp_xmit) +static int virtnet_receive(struct receive_queue *rq, int budget, + unsigned int *xdp_xmit) { struct virtnet_info *vi = rq->vq->vdev->priv; unsigned int len, received = 0, bytes = 0; @@ -1321,7 +1327,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget) struct virtnet_info *vi = rq->vq->vdev->priv; struct send_queue *sq; unsigned int received, qp; - bool xdp_xmit = false; + unsigned int xdp_xmit = 0; virtnet_poll_cleantx(rq); @@ -1331,12 +1337,14 @@ static int virtnet_poll(struct napi_struct *napi, int budget) if (received < budget) virtqueue_napi_complete(napi, rq->vq, received); - if (xdp_xmit) { + if (xdp_xmit & VIRTIO_XDP_REDIR) + xdp_do_flush_map(); + + if (xdp_xmit & VIRTIO_XDP_TX) { qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id(); sq = &vi->sq[qp]; virtqueue_kick(sq->vq); - xdp_do_flush_map(); } return received; diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index aee0e60471f1..f6bb1d54d4bd 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -623,9 +623,7 @@ static struct sk_buff **vxlan_gro_receive(struct sock *sk, flush = 0; out: - skb_gro_remcsum_cleanup(skb, &grc); - skb->remcsum_offload = 0; - NAPI_GRO_CB(skb)->flush |= flush; + skb_gro_flush_final_remcsum(skb, pp, flush, &grc); return pp; } diff --git a/drivers/net/wireless/broadcom/brcm80211/Kconfig b/drivers/net/wireless/broadcom/brcm80211/Kconfig index 9d99eb42d917..6acba67bca07 100644 --- a/drivers/net/wireless/broadcom/brcm80211/Kconfig +++ b/drivers/net/wireless/broadcom/brcm80211/Kconfig @@ -60,7 +60,6 @@ config BRCMFMAC_PCIE bool "PCIE bus interface support for FullMAC driver" depends on BRCMFMAC depends on PCI - depends on HAS_DMA select BRCMFMAC_PROTO_MSGBUF select FW_LOADER ---help--- diff --git a/drivers/net/wireless/quantenna/qtnfmac/Kconfig b/drivers/net/wireless/quantenna/qtnfmac/Kconfig index 025fa6018550..8d1492a90bd1 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/Kconfig +++ b/drivers/net/wireless/quantenna/qtnfmac/Kconfig @@ -7,7 +7,7 @@ config QTNFMAC config QTNFMAC_PEARL_PCIE tristate "Quantenna QSR10g PCIe support" default n - depends on HAS_DMA && PCI && CFG80211 + depends on PCI && CFG80211 select QTNFMAC select FW_LOADER select CRC32 diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 922ce0abf5cf..a57daecf1d57 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -1810,7 +1810,7 @@ static int talk_to_netback(struct xenbus_device *dev, err = xen_net_read_mac(dev, info->netdev->dev_addr); if (err) { xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename); - goto out; + goto out_unlocked; } rtnl_lock(); @@ -1925,6 +1925,7 @@ static int talk_to_netback(struct xenbus_device *dev, xennet_destroy_queues(info); out: rtnl_unlock(); +out_unlocked: device_unregister(&dev->dev); return err; } @@ -1950,10 +1951,6 @@ static int xennet_connect(struct net_device *dev) /* talk_to_netback() sets the correct number of queues */ num_queues = dev->real_num_tx_queues; - rtnl_lock(); - netdev_update_features(dev); - rtnl_unlock(); - if (dev->reg_state == NETREG_UNINITIALIZED) { err = register_netdev(dev); if (err) { @@ -1963,6 +1960,10 @@ static int xennet_connect(struct net_device *dev) } } + rtnl_lock(); + netdev_update_features(dev); + rtnl_unlock(); + /* * All public and private state should now be sane. Get * ready to start sending and receiving packets and give the driver diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c index d5553c47014f..5d823e965883 100644 --- a/drivers/nfc/pn533/usb.c +++ b/drivers/nfc/pn533/usb.c @@ -74,7 +74,7 @@ static void pn533_recv_response(struct urb *urb) struct sk_buff *skb = NULL; if (!urb->status) { - skb = alloc_skb(urb->actual_length, GFP_KERNEL); + skb = alloc_skb(urb->actual_length, GFP_ATOMIC); if (!skb) { nfc_err(&phy->udev->dev, "failed to alloc memory\n"); } else { @@ -186,7 +186,7 @@ static int pn533_usb_send_frame(struct pn533 *dev, if (dev->protocol_type == PN533_PROTO_REQ_RESP) { /* request for response for sent packet directly */ - rc = pn533_submit_urb_for_response(phy, GFP_ATOMIC); + rc = pn533_submit_urb_for_response(phy, GFP_KERNEL); if (rc) goto error; } else if (dev->protocol_type == PN533_PROTO_REQ_ACK_RESP) { diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c index 2e96b34bc936..fb667bf469c7 100644 --- a/drivers/nvdimm/claim.c +++ b/drivers/nvdimm/claim.c @@ -278,6 +278,7 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns, return -EIO; if (memcpy_mcsafe(buf, nsio->addr + offset, size) != 0) return -EIO; + return 0; } if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) { diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 68940356cad3..8b1fd7f1a224 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -414,7 +414,8 @@ static int pmem_attach_disk(struct device *dev, blk_queue_logical_block_size(q, pmem_sector_size(ndns)); blk_queue_max_hw_sectors(q, UINT_MAX); blk_queue_flag_set(QUEUE_FLAG_NONROT, q); - blk_queue_flag_set(QUEUE_FLAG_DAX, q); + if (pmem->pfn_flags & PFN_MAP) + blk_queue_flag_set(QUEUE_FLAG_DAX, q); q->queuedata = pmem; disk = alloc_disk_node(0, nid); diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 21710a7460c8..46df030b2c3f 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1808,6 +1808,7 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl, u32 max_segments = (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1; + max_segments = min_not_zero(max_segments, ctrl->max_segments); blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors); blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX)); } diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index b528a2f5826c..41d45a1b5c62 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -2790,6 +2790,9 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl) /* re-enable the admin_q so anything new can fast fail */ blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); + /* resume the io queues so that things will fast fail */ + nvme_start_queues(&ctrl->ctrl); + nvme_fc_ctlr_inactive_on_rport(ctrl); } @@ -2804,9 +2807,6 @@ nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl) * waiting for io to terminate */ nvme_fc_delete_association(ctrl); - - /* resume the io queues so that things will fast fail */ - nvme_start_queues(nctrl); } static void diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 231807cbc849..0c4a33df3b2f 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -170,6 +170,7 @@ struct nvme_ctrl { u64 cap; u32 page_size; u32 max_hw_sectors; + u32 max_segments; u16 oncs; u16 oacs; u16 nssa; diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index fc33804662e7..ba943f211687 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -38,6 +38,13 @@ #define SGES_PER_PAGE (PAGE_SIZE / sizeof(struct nvme_sgl_desc)) +/* + * These can be higher, but we need to ensure that any command doesn't + * require an sg allocation that needs more than a page of data. + */ +#define NVME_MAX_KB_SZ 4096 +#define NVME_MAX_SEGS 127 + static int use_threaded_interrupts; module_param(use_threaded_interrupts, int, 0); @@ -100,6 +107,8 @@ struct nvme_dev { struct nvme_ctrl ctrl; struct completion ioq_wait; + mempool_t *iod_mempool; + /* shadow doorbell buffer support: */ u32 *dbbuf_dbs; dma_addr_t dbbuf_dbs_dma_addr; @@ -477,10 +486,7 @@ static blk_status_t nvme_init_iod(struct request *rq, struct nvme_dev *dev) iod->use_sgl = nvme_pci_use_sgls(dev, rq); if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) { - size_t alloc_size = nvme_pci_iod_alloc_size(dev, size, nseg, - iod->use_sgl); - - iod->sg = kmalloc(alloc_size, GFP_ATOMIC); + iod->sg = mempool_alloc(dev->iod_mempool, GFP_ATOMIC); if (!iod->sg) return BLK_STS_RESOURCE; } else { @@ -526,7 +532,7 @@ static void nvme_free_iod(struct nvme_dev *dev, struct request *req) } if (iod->sg != iod->inline_sg) - kfree(iod->sg); + mempool_free(iod->sg, dev->iod_mempool); } #ifdef CONFIG_BLK_DEV_INTEGRITY @@ -2280,6 +2286,7 @@ static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl) blk_put_queue(dev->ctrl.admin_q); kfree(dev->queues); free_opal_dev(dev->ctrl.opal_dev); + mempool_destroy(dev->iod_mempool); kfree(dev); } @@ -2289,6 +2296,7 @@ static void nvme_remove_dead_ctrl(struct nvme_dev *dev, int status) nvme_get_ctrl(&dev->ctrl); nvme_dev_disable(dev, false); + nvme_kill_queues(&dev->ctrl); if (!queue_work(nvme_wq, &dev->remove_work)) nvme_put_ctrl(&dev->ctrl); } @@ -2333,6 +2341,13 @@ static void nvme_reset_work(struct work_struct *work) if (result) goto out; + /* + * Limit the max command size to prevent iod->sg allocations going + * over a single page. + */ + dev->ctrl.max_hw_sectors = NVME_MAX_KB_SZ << 1; + dev->ctrl.max_segments = NVME_MAX_SEGS; + result = nvme_init_identify(&dev->ctrl); if (result) goto out; @@ -2405,7 +2420,6 @@ static void nvme_remove_dead_ctrl_work(struct work_struct *work) struct nvme_dev *dev = container_of(work, struct nvme_dev, remove_work); struct pci_dev *pdev = to_pci_dev(dev->dev); - nvme_kill_queues(&dev->ctrl); if (pci_get_drvdata(pdev)) device_release_driver(&pdev->dev); nvme_put_ctrl(&dev->ctrl); @@ -2509,6 +2523,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) int node, result = -ENOMEM; struct nvme_dev *dev; unsigned long quirks = id->driver_data; + size_t alloc_size; node = dev_to_node(&pdev->dev); if (node == NUMA_NO_NODE) @@ -2546,6 +2561,23 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (result) goto release_pools; + /* + * Double check that our mempool alloc size will cover the biggest + * command we support. + */ + alloc_size = nvme_pci_iod_alloc_size(dev, NVME_MAX_KB_SZ, + NVME_MAX_SEGS, true); + WARN_ON_ONCE(alloc_size > PAGE_SIZE); + + dev->iod_mempool = mempool_create_node(1, mempool_kmalloc, + mempool_kfree, + (void *) alloc_size, + GFP_KERNEL, node); + if (!dev->iod_mempool) { + result = -ENOMEM; + goto release_pools; + } + dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev)); nvme_get_ctrl(&dev->ctrl); diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index c9424da0d23e..518c5b09038c 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -560,12 +560,6 @@ static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue) if (!test_and_clear_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags)) return; - if (nvme_rdma_queue_idx(queue) == 0) { - nvme_rdma_free_qe(queue->device->dev, - &queue->ctrl->async_event_sqe, - sizeof(struct nvme_command), DMA_TO_DEVICE); - } - nvme_rdma_destroy_queue_ib(queue); rdma_destroy_id(queue->cm_id); } @@ -698,7 +692,7 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl, set = &ctrl->tag_set; memset(set, 0, sizeof(*set)); set->ops = &nvme_rdma_mq_ops; - set->queue_depth = nctrl->opts->queue_size; + set->queue_depth = nctrl->sqsize + 1; set->reserved_tags = 1; /* fabric connect */ set->numa_node = NUMA_NO_NODE; set->flags = BLK_MQ_F_SHOULD_MERGE; @@ -734,11 +728,15 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl, static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl, bool remove) { - nvme_rdma_stop_queue(&ctrl->queues[0]); if (remove) { blk_cleanup_queue(ctrl->ctrl.admin_q); nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset); } + if (ctrl->async_event_sqe.data) { + nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe, + sizeof(struct nvme_command), DMA_TO_DEVICE); + ctrl->async_event_sqe.data = NULL; + } nvme_rdma_free_queue(&ctrl->queues[0]); } @@ -755,11 +753,16 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl, ctrl->max_fr_pages = nvme_rdma_get_max_fr_pages(ctrl->device->dev); + error = nvme_rdma_alloc_qe(ctrl->device->dev, &ctrl->async_event_sqe, + sizeof(struct nvme_command), DMA_TO_DEVICE); + if (error) + goto out_free_queue; + if (new) { ctrl->ctrl.admin_tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, true); if (IS_ERR(ctrl->ctrl.admin_tagset)) { error = PTR_ERR(ctrl->ctrl.admin_tagset); - goto out_free_queue; + goto out_free_async_qe; } ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set); @@ -795,12 +798,6 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl, if (error) goto out_stop_queue; - error = nvme_rdma_alloc_qe(ctrl->queues[0].device->dev, - &ctrl->async_event_sqe, sizeof(struct nvme_command), - DMA_TO_DEVICE); - if (error) - goto out_stop_queue; - return 0; out_stop_queue: @@ -811,6 +808,9 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl, out_free_tagset: if (new) nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset); +out_free_async_qe: + nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe, + sizeof(struct nvme_command), DMA_TO_DEVICE); out_free_queue: nvme_rdma_free_queue(&ctrl->queues[0]); return error; @@ -819,7 +819,6 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl, static void nvme_rdma_destroy_io_queues(struct nvme_rdma_ctrl *ctrl, bool remove) { - nvme_rdma_stop_io_queues(ctrl); if (remove) { blk_cleanup_queue(ctrl->ctrl.connect_q); nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.tagset); @@ -888,9 +887,9 @@ static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl) list_del(&ctrl->list); mutex_unlock(&nvme_rdma_ctrl_mutex); - kfree(ctrl->queues); nvmf_free_options(nctrl->opts); free_ctrl: + kfree(ctrl->queues); kfree(ctrl); } @@ -949,6 +948,7 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work) return; destroy_admin: + nvme_rdma_stop_queue(&ctrl->queues[0]); nvme_rdma_destroy_admin_queue(ctrl, false); requeue: dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n", @@ -965,12 +965,14 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work) if (ctrl->ctrl.queue_count > 1) { nvme_stop_queues(&ctrl->ctrl); + nvme_rdma_stop_io_queues(ctrl); blk_mq_tagset_busy_iter(&ctrl->tag_set, nvme_cancel_request, &ctrl->ctrl); nvme_rdma_destroy_io_queues(ctrl, false); } blk_mq_quiesce_queue(ctrl->ctrl.admin_q); + nvme_rdma_stop_queue(&ctrl->queues[0]); blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, nvme_cancel_request, &ctrl->ctrl); nvme_rdma_destroy_admin_queue(ctrl, false); @@ -1736,6 +1738,7 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown) { if (ctrl->ctrl.queue_count > 1) { nvme_stop_queues(&ctrl->ctrl); + nvme_rdma_stop_io_queues(ctrl); blk_mq_tagset_busy_iter(&ctrl->tag_set, nvme_cancel_request, &ctrl->ctrl); nvme_rdma_destroy_io_queues(ctrl, shutdown); @@ -1747,6 +1750,7 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown) nvme_disable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap); blk_mq_quiesce_queue(ctrl->ctrl.admin_q); + nvme_rdma_stop_queue(&ctrl->queues[0]); blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, nvme_cancel_request, &ctrl->ctrl); blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); @@ -1932,11 +1936,6 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, goto out_free_ctrl; } - ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_rdma_ctrl_ops, - 0 /* no quirks, we're perfect! */); - if (ret) - goto out_free_ctrl; - INIT_DELAYED_WORK(&ctrl->reconnect_work, nvme_rdma_reconnect_ctrl_work); INIT_WORK(&ctrl->err_work, nvme_rdma_error_recovery_work); @@ -1950,14 +1949,19 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues), GFP_KERNEL); if (!ctrl->queues) - goto out_uninit_ctrl; + goto out_free_ctrl; + + ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_rdma_ctrl_ops, + 0 /* no quirks, we're perfect! */); + if (ret) + goto out_kfree_queues; changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING); WARN_ON_ONCE(!changed); ret = nvme_rdma_configure_admin_queue(ctrl, true); if (ret) - goto out_kfree_queues; + goto out_uninit_ctrl; /* sanity check icdoff */ if (ctrl->ctrl.icdoff) { @@ -1974,20 +1978,19 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, goto out_remove_admin_queue; } - if (opts->queue_size > ctrl->ctrl.maxcmd) { - /* warn if maxcmd is lower than queue_size */ - dev_warn(ctrl->ctrl.device, - "queue_size %zu > ctrl maxcmd %u, clamping down\n", - opts->queue_size, ctrl->ctrl.maxcmd); - opts->queue_size = ctrl->ctrl.maxcmd; - } - + /* only warn if argument is too large here, will clamp later */ if (opts->queue_size > ctrl->ctrl.sqsize + 1) { - /* warn if sqsize is lower than queue_size */ dev_warn(ctrl->ctrl.device, "queue_size %zu > ctrl sqsize %u, clamping down\n", opts->queue_size, ctrl->ctrl.sqsize + 1); - opts->queue_size = ctrl->ctrl.sqsize + 1; + } + + /* warn if maxcmd is lower than sqsize+1 */ + if (ctrl->ctrl.sqsize + 1 > ctrl->ctrl.maxcmd) { + dev_warn(ctrl->ctrl.device, + "sqsize %u > ctrl maxcmd %u, clamping down\n", + ctrl->ctrl.sqsize + 1, ctrl->ctrl.maxcmd); + ctrl->ctrl.sqsize = ctrl->ctrl.maxcmd - 1; } if (opts->nr_io_queues) { @@ -2013,15 +2016,16 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, return &ctrl->ctrl; out_remove_admin_queue: + nvme_rdma_stop_queue(&ctrl->queues[0]); nvme_rdma_destroy_admin_queue(ctrl, true); -out_kfree_queues: - kfree(ctrl->queues); out_uninit_ctrl: nvme_uninit_ctrl(&ctrl->ctrl); nvme_put_ctrl(&ctrl->ctrl); if (ret > 0) ret = -EIO; return ERR_PTR(ret); +out_kfree_queues: + kfree(ctrl->queues); out_free_ctrl: kfree(ctrl); return ERR_PTR(ret); diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index a03da764ecae..74d4b785d2da 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -686,6 +686,14 @@ static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl) } ctrl->csts = NVME_CSTS_RDY; + + /* + * Controllers that are not yet enabled should not really enforce the + * keep alive timeout, but we still want to track a timeout and cleanup + * in case a host died before it enabled the controller. Hence, simply + * reset the keep alive timer when the controller is enabled. + */ + mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ); } static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl) diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index b5b0cdc21d01..514d1dfc5630 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c @@ -936,6 +936,10 @@ struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *cell_id) return cell; } + /* NULL cell_id only allowed for device tree; invalid otherwise */ + if (!cell_id) + return ERR_PTR(-EINVAL); + return nvmem_cell_get_from_list(cell_id); } EXPORT_SYMBOL_GPL(nvmem_cell_get); diff --git a/drivers/opp/core.c b/drivers/opp/core.c index ab2f3fead6b1..31ff03dbeb83 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -598,7 +598,7 @@ static int _generic_set_opp_regulator(const struct opp_table *opp_table, } /* Scaling up? Scale voltage before frequency */ - if (freq > old_freq) { + if (freq >= old_freq) { ret = _set_opp_voltage(dev, reg, new_supply); if (ret) goto restore_voltage; diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile index 535201984b8b..1b2cfe51e8d7 100644 --- a/drivers/pci/Makefile +++ b/drivers/pci/Makefile @@ -28,10 +28,10 @@ obj-$(CONFIG_PCI_PF_STUB) += pci-pf-stub.o obj-$(CONFIG_PCI_ECAM) += ecam.o obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += xen-pcifront.o -obj-y += controller/ -obj-y += switch/ - # Endpoint library must be initialized before its users obj-$(CONFIG_PCI_ENDPOINT) += endpoint/ +obj-y += controller/ +obj-y += switch/ + ccflags-$(CONFIG_PCI_DEBUG) := -DDEBUG diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig index 18fa09b3ac8f..cc9fa02d32a0 100644 --- a/drivers/pci/controller/Kconfig +++ b/drivers/pci/controller/Kconfig @@ -96,7 +96,6 @@ config PCI_HOST_GENERIC depends on OF select PCI_HOST_COMMON select IRQ_DOMAIN - select PCI_DOMAINS help Say Y here if you want to support a simple generic PCI host controller, such as the one emulated by kvmtool. @@ -138,7 +137,6 @@ config PCI_VERSATILE config PCIE_IPROC tristate - select PCI_DOMAINS help This enables the iProc PCIe core controller support for Broadcom's iProc family of SoCs. An appropriate bus interface driver needs @@ -176,7 +174,6 @@ config PCIE_IPROC_MSI config PCIE_ALTERA bool "Altera PCIe controller" depends on ARM || NIOS2 || COMPILE_TEST - select PCI_DOMAINS help Say Y here if you want to enable PCIe controller support on Altera FPGA. diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig index 16f52c626b4b..91b0194240a5 100644 --- a/drivers/pci/controller/dwc/Kconfig +++ b/drivers/pci/controller/dwc/Kconfig @@ -58,7 +58,6 @@ config PCIE_DW_PLAT_HOST depends on PCI && PCI_MSI_IRQ_DOMAIN select PCIE_DW_HOST select PCIE_DW_PLAT - default y help Enables support for the PCIe controller in the Designware IP to work in host mode. There are two instances of PCIe controller in diff --git a/drivers/pci/controller/pci-ftpci100.c b/drivers/pci/controller/pci-ftpci100.c index a1ebe9ed441f..20bb2564a6b3 100644 --- a/drivers/pci/controller/pci-ftpci100.c +++ b/drivers/pci/controller/pci-ftpci100.c @@ -355,11 +355,13 @@ static int faraday_pci_setup_cascaded_irq(struct faraday_pci *p) irq = of_irq_get(intc, 0); if (irq <= 0) { dev_err(p->dev, "failed to get parent IRQ\n"); + of_node_put(intc); return irq ?: -EINVAL; } p->irqdomain = irq_domain_add_linear(intc, PCI_NUM_INTX, &faraday_pci_irqdomain_ops, p); + of_node_put(intc); if (!p->irqdomain) { dev_err(p->dev, "failed to create Gemini PCI IRQ domain\n"); return -EINVAL; diff --git a/drivers/pci/controller/pcie-rcar.c b/drivers/pci/controller/pcie-rcar.c index 874d75c9ee4a..c8febb009454 100644 --- a/drivers/pci/controller/pcie-rcar.c +++ b/drivers/pci/controller/pcie-rcar.c @@ -680,7 +680,11 @@ static int rcar_pcie_phy_init_gen3(struct rcar_pcie *pcie) if (err) return err; - return phy_power_on(pcie->phy); + err = phy_power_on(pcie->phy); + if (err) + phy_exit(pcie->phy); + + return err; } static int rcar_msi_alloc(struct rcar_msi *chip) @@ -1165,7 +1169,7 @@ static int rcar_pcie_probe(struct platform_device *pdev) if (rcar_pcie_hw_init(pcie)) { dev_info(dev, "PCIe link down\n"); err = -ENODEV; - goto err_clk_disable; + goto err_phy_shutdown; } data = rcar_pci_read_reg(pcie, MACSR); @@ -1177,7 +1181,7 @@ static int rcar_pcie_probe(struct platform_device *pdev) dev_err(dev, "failed to enable MSI support: %d\n", err); - goto err_clk_disable; + goto err_phy_shutdown; } } @@ -1191,6 +1195,12 @@ static int rcar_pcie_probe(struct platform_device *pdev) if (IS_ENABLED(CONFIG_PCI_MSI)) rcar_pcie_teardown_msi(pcie); +err_phy_shutdown: + if (pcie->phy) { + phy_power_off(pcie->phy); + phy_exit(pcie->phy); + } + err_clk_disable: clk_disable_unprepare(pcie->bus_clk); diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c index 6a4bbb5b3de0..fb32840ce8e6 100644 --- a/drivers/pci/controller/pcie-xilinx-nwl.c +++ b/drivers/pci/controller/pcie-xilinx-nwl.c @@ -559,7 +559,7 @@ static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie) PCI_NUM_INTX, &legacy_domain_ops, pcie); - + of_node_put(legacy_intc_node); if (!pcie->legacy_irq_domain) { dev_err(dev, "failed to create IRQ domain\n"); return -ENOMEM; diff --git a/drivers/pci/controller/pcie-xilinx.c b/drivers/pci/controller/pcie-xilinx.c index b110a3a814e3..7b1389d8e2a5 100644 --- a/drivers/pci/controller/pcie-xilinx.c +++ b/drivers/pci/controller/pcie-xilinx.c @@ -509,6 +509,7 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port) port->leg_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, &intx_domain_ops, port); + of_node_put(pcie_intc_node); if (!port->leg_domain) { dev_err(dev, "Failed to get a INTx IRQ domain\n"); return -ENODEV; diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c index 523a8cab3bfb..bf53fad636a5 100644 --- a/drivers/pci/endpoint/pci-epf-core.c +++ b/drivers/pci/endpoint/pci-epf-core.c @@ -145,10 +145,10 @@ EXPORT_SYMBOL_GPL(pci_epf_alloc_space); */ void pci_epf_unregister_driver(struct pci_epf_driver *driver) { - struct config_group *group; + struct config_group *group, *tmp; mutex_lock(&pci_epf_mutex); - list_for_each_entry(group, &driver->epf_group, group_entry) + list_for_each_entry_safe(group, tmp, &driver->epf_group, group_entry) pci_ep_cfs_remove_epf_group(group); list_del(&driver->epf_group); mutex_unlock(&pci_epf_mutex); diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c index 3979f89b250a..5bd6c1573295 100644 --- a/drivers/pci/hotplug/acpi_pcihp.c +++ b/drivers/pci/hotplug/acpi_pcihp.c @@ -7,7 +7,6 @@ * All rights reserved. * * Send feedback to - * */ #include @@ -87,8 +86,17 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev) return 0; /* If _OSC exists, we should not evaluate OSHP */ + + /* + * If there's no ACPI host bridge (i.e., ACPI support is compiled + * into the kernel but the hardware platform doesn't support ACPI), + * there's nothing to do here. + */ host = pci_find_host_bridge(pdev->bus); root = acpi_pci_find_root(ACPI_HANDLE(&host->dev)); + if (!root) + return 0; + if (root->osc_support_set) goto no_control; diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index d0d73dbbd5ca..0f04ae648cf1 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c @@ -574,6 +574,22 @@ void pci_iov_release(struct pci_dev *dev) sriov_release(dev); } +/** + * pci_iov_remove - clean up SR-IOV state after PF driver is detached + * @dev: the PCI device + */ +void pci_iov_remove(struct pci_dev *dev) +{ + struct pci_sriov *iov = dev->sriov; + + if (!dev->is_physfn) + return; + + iov->driver_max_VFs = iov->total_VFs; + if (iov->num_VFs) + pci_warn(dev, "driver left SR-IOV enabled after remove\n"); +} + /** * pci_iov_update_resource - update a VF BAR * @dev: the PCI device diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index 65113b6eed14..89ee6a2b6eb8 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -629,6 +629,18 @@ static bool acpi_pci_need_resume(struct pci_dev *dev) { struct acpi_device *adev = ACPI_COMPANION(&dev->dev); + /* + * In some cases (eg. Samsung 305V4A) leaving a bridge in suspend over + * system-wide suspend/resume confuses the platform firmware, so avoid + * doing that, unless the bridge has a driver that should take care of + * the PM handling. According to Section 16.1.6 of ACPI 6.2, endpoint + * devices are expected to be in D3 before invoking the S3 entry path + * from the firmware, so they should not be affected by this issue. + */ + if (pci_is_bridge(dev) && !dev->driver && + acpi_target_system_state() != ACPI_STATE_S0) + return true; + if (!adev || !acpi_device_power_manageable(adev)) return false; diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index c125d53033c6..6792292b5fc7 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -445,6 +445,7 @@ static int pci_device_remove(struct device *dev) } pcibios_free_irq(pci_dev); pci_dev->driver = NULL; + pci_iov_remove(pci_dev); } /* Undo the runtime PM settings in local_pci_probe() */ diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index c358e7a07f3f..882f1f9596df 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -311,6 +311,7 @@ static inline void pci_restore_ats_state(struct pci_dev *dev) #ifdef CONFIG_PCI_IOV int pci_iov_init(struct pci_dev *dev); void pci_iov_release(struct pci_dev *dev); +void pci_iov_remove(struct pci_dev *dev); void pci_iov_update_resource(struct pci_dev *dev, int resno); resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno); void pci_restore_iov_state(struct pci_dev *dev); @@ -323,6 +324,9 @@ static inline int pci_iov_init(struct pci_dev *dev) } static inline void pci_iov_release(struct pci_dev *dev) +{ +} +static inline void pci_iov_remove(struct pci_dev *dev) { } static inline void pci_restore_iov_state(struct pci_dev *dev) diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c index 6bdb1dad805f..0e31f1392a53 100644 --- a/drivers/perf/xgene_pmu.c +++ b/drivers/perf/xgene_pmu.c @@ -1463,7 +1463,7 @@ static char *xgene_pmu_dev_name(struct device *dev, u32 type, int id) case PMU_TYPE_IOB: return devm_kasprintf(dev, GFP_KERNEL, "iob%d", id); case PMU_TYPE_IOB_SLOW: - return devm_kasprintf(dev, GFP_KERNEL, "iob-slow%d", id); + return devm_kasprintf(dev, GFP_KERNEL, "iob_slow%d", id); case PMU_TYPE_MCB: return devm_kasprintf(dev, GFP_KERNEL, "mcb%d", id); case PMU_TYPE_MC: diff --git a/drivers/pinctrl/actions/pinctrl-owl.c b/drivers/pinctrl/actions/pinctrl-owl.c index 76243caa08c6..b5c880b50bb3 100644 --- a/drivers/pinctrl/actions/pinctrl-owl.c +++ b/drivers/pinctrl/actions/pinctrl-owl.c @@ -333,7 +333,7 @@ static int owl_pin_config_set(struct pinctrl_dev *pctrldev, unsigned long flags; unsigned int param; u32 reg, bit, width, arg; - int ret, i; + int ret = 0, i; info = &pctrl->soc->padinfo[pin]; diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c index b601039d6c69..c4aa411f5935 100644 --- a/drivers/pinctrl/devicetree.c +++ b/drivers/pinctrl/devicetree.c @@ -101,10 +101,11 @@ struct pinctrl_dev *of_pinctrl_get(struct device_node *np) } static int dt_to_map_one_config(struct pinctrl *p, - struct pinctrl_dev *pctldev, + struct pinctrl_dev *hog_pctldev, const char *statename, struct device_node *np_config) { + struct pinctrl_dev *pctldev = NULL; struct device_node *np_pctldev; const struct pinctrl_ops *ops; int ret; @@ -123,8 +124,10 @@ static int dt_to_map_one_config(struct pinctrl *p, return -EPROBE_DEFER; } /* If we're creating a hog we can use the passed pctldev */ - if (pctldev && (np_pctldev == p->dev->of_node)) + if (hog_pctldev && (np_pctldev == p->dev->of_node)) { + pctldev = hog_pctldev; break; + } pctldev = get_pinctrl_dev_from_of_node(np_pctldev); if (pctldev) break; diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7622.c b/drivers/pinctrl/mediatek/pinctrl-mt7622.c index ad6da1184c9f..e3f1ab2290fc 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mt7622.c +++ b/drivers/pinctrl/mediatek/pinctrl-mt7622.c @@ -1459,6 +1459,9 @@ static int mtk_gpio_to_irq(struct gpio_chip *chip, unsigned int offset) struct mtk_pinctrl *hw = gpiochip_get_data(chip); unsigned long eint_n; + if (!hw->eint) + return -ENOTSUPP; + eint_n = offset; return mtk_eint_find_irq(hw->eint, eint_n); @@ -1471,7 +1474,8 @@ static int mtk_gpio_set_config(struct gpio_chip *chip, unsigned int offset, unsigned long eint_n; u32 debounce; - if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE) + if (!hw->eint || + pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE) return -ENOTSUPP; debounce = pinconf_to_config_argument(config); diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c index b3799695d8db..16ff56f93501 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c +++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c @@ -1000,11 +1000,6 @@ static int mtk_eint_init(struct mtk_pinctrl *pctl, struct platform_device *pdev) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(&pdev->dev, "Unable to get eint resource\n"); - return -ENODEV; - } - pctl->eint->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(pctl->eint->base)) return PTR_ERR(pctl->eint->base); diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c index b3153c095199..e5647dac0818 100644 --- a/drivers/pinctrl/pinctrl-single.c +++ b/drivers/pinctrl/pinctrl-single.c @@ -1590,8 +1590,11 @@ static int pcs_save_context(struct pcs_device *pcs) mux_bytes = pcs->width / BITS_PER_BYTE; - if (!pcs->saved_vals) + if (!pcs->saved_vals) { pcs->saved_vals = devm_kzalloc(pcs->dev, pcs->size, GFP_ATOMIC); + if (!pcs->saved_vals) + return -ENOMEM; + } switch (pcs->width) { case 64: @@ -1651,8 +1654,13 @@ static int pinctrl_single_suspend(struct platform_device *pdev, if (!pcs) return -EINVAL; - if (pcs->flags & PCS_CONTEXT_LOSS_OFF) - pcs_save_context(pcs); + if (pcs->flags & PCS_CONTEXT_LOSS_OFF) { + int ret; + + ret = pcs_save_context(pcs); + if (ret < 0) + return ret; + } return pinctrl_force_sleep(pcs->pctl); } diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c index 767c485af59b..547dbdac9d54 100644 --- a/drivers/ptp/ptp_chardev.c +++ b/drivers/ptp/ptp_chardev.c @@ -221,7 +221,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg) } pct = &sysoff->ts[0]; for (i = 0; i < sysoff->n_samples; i++) { - getnstimeofday64(&ts); + ktime_get_real_ts64(&ts); pct->sec = ts.tv_sec; pct->nsec = ts.tv_nsec; pct++; @@ -230,7 +230,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg) pct->nsec = ts.tv_nsec; pct++; } - getnstimeofday64(&ts); + ktime_get_real_ts64(&ts); pct->sec = ts.tv_sec; pct->nsec = ts.tv_nsec; if (copy_to_user((void __user *)arg, sysoff, sizeof(*sysoff))) diff --git a/drivers/ptp/ptp_qoriq.c b/drivers/ptp/ptp_qoriq.c index 1468a1642b49..e8652c148c52 100644 --- a/drivers/ptp/ptp_qoriq.c +++ b/drivers/ptp/ptp_qoriq.c @@ -374,7 +374,7 @@ static int qoriq_ptp_probe(struct platform_device *dev) pr_err("ioremap ptp registers failed\n"); goto no_ioremap; } - getnstimeofday64(&now); + ktime_get_real_ts64(&now); ptp_qoriq_settime(&qoriq_ptp->caps, &now); tmr_ctrl = diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index 6d4012dd6922..bac1eeb3d312 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c @@ -265,8 +265,10 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) return err; /* full-function RTCs won't have such missing fields */ - if (rtc_valid_tm(&alarm->time) == 0) + if (rtc_valid_tm(&alarm->time) == 0) { + rtc_add_offset(rtc, &alarm->time); return 0; + } /* get the "after" timestamp, to detect wrapped fields */ err = rtc_read_time(rtc, &now); @@ -409,7 +411,6 @@ static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) if (err) return err; - rtc_subtract_offset(rtc, &alarm->time); scheduled = rtc_tm_to_time64(&alarm->time); /* Make sure we're not setting alarms in the past */ @@ -426,6 +427,8 @@ static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) * over right here, before we set the alarm. */ + rtc_subtract_offset(rtc, &alarm->time); + if (!rtc->ops) err = -ENODEV; else if (!rtc->ops->set_alarm) @@ -467,7 +470,6 @@ int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) mutex_unlock(&rtc->ops_lock); - rtc_add_offset(rtc, &alarm->time); return err; } EXPORT_SYMBOL_GPL(rtc_set_alarm); diff --git a/drivers/rtc/rtc-mrst.c b/drivers/rtc/rtc-mrst.c index 097a4d4e2aba..1925aaf09093 100644 --- a/drivers/rtc/rtc-mrst.c +++ b/drivers/rtc/rtc-mrst.c @@ -367,10 +367,8 @@ static int vrtc_mrst_do_probe(struct device *dev, struct resource *iomem, } retval = rtc_register_device(mrst_rtc.rtc); - if (retval) { - retval = PTR_ERR(mrst_rtc.rtc); + if (retval) goto cleanup0; - } dev_dbg(dev, "initialised\n"); return 0; diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 73cce3ecb97f..a9f60d0ee02e 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -41,6 +41,15 @@ #define DASD_DIAG_MOD "dasd_diag_mod" +static unsigned int queue_depth = 32; +static unsigned int nr_hw_queues = 4; + +module_param(queue_depth, uint, 0444); +MODULE_PARM_DESC(queue_depth, "Default queue depth for new DASD devices"); + +module_param(nr_hw_queues, uint, 0444); +MODULE_PARM_DESC(nr_hw_queues, "Default number of hardware queues for new DASD devices"); + /* * SECTION: exported variables of dasd.c */ @@ -1222,80 +1231,37 @@ static void dasd_hosts_init(struct dentry *base_dentry, device->hosts_dentry = pde; } -/* - * Allocate memory for a channel program with 'cplength' channel - * command words and 'datasize' additional space. There are two - * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed - * memory and 2) dasd_smalloc_request uses the static ccw memory - * that gets allocated for each device. - */ -struct dasd_ccw_req *dasd_kmalloc_request(int magic, int cplength, - int datasize, - struct dasd_device *device) -{ - struct dasd_ccw_req *cqr; - - /* Sanity checks */ - BUG_ON(datasize > PAGE_SIZE || - (cplength*sizeof(struct ccw1)) > PAGE_SIZE); - - cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC); - if (cqr == NULL) - return ERR_PTR(-ENOMEM); - cqr->cpaddr = NULL; - if (cplength > 0) { - cqr->cpaddr = kcalloc(cplength, sizeof(struct ccw1), - GFP_ATOMIC | GFP_DMA); - if (cqr->cpaddr == NULL) { - kfree(cqr); - return ERR_PTR(-ENOMEM); - } - } - cqr->data = NULL; - if (datasize > 0) { - cqr->data = kzalloc(datasize, GFP_ATOMIC | GFP_DMA); - if (cqr->data == NULL) { - kfree(cqr->cpaddr); - kfree(cqr); - return ERR_PTR(-ENOMEM); - } - } - cqr->magic = magic; - set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); - dasd_get_device(device); - return cqr; -} -EXPORT_SYMBOL(dasd_kmalloc_request); - -struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, - int datasize, - struct dasd_device *device) +struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, int datasize, + struct dasd_device *device, + struct dasd_ccw_req *cqr) { unsigned long flags; - struct dasd_ccw_req *cqr; - char *data; - int size; + char *data, *chunk; + int size = 0; - size = (sizeof(struct dasd_ccw_req) + 7L) & -8L; if (cplength > 0) size += cplength * sizeof(struct ccw1); if (datasize > 0) size += datasize; + if (!cqr) + size += (sizeof(*cqr) + 7L) & -8L; + spin_lock_irqsave(&device->mem_lock, flags); - cqr = (struct dasd_ccw_req *) - dasd_alloc_chunk(&device->ccw_chunks, size); + data = chunk = dasd_alloc_chunk(&device->ccw_chunks, size); spin_unlock_irqrestore(&device->mem_lock, flags); - if (cqr == NULL) + if (!chunk) return ERR_PTR(-ENOMEM); - memset(cqr, 0, sizeof(struct dasd_ccw_req)); - data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L); - cqr->cpaddr = NULL; - if (cplength > 0) { - cqr->cpaddr = (struct ccw1 *) data; - data += cplength*sizeof(struct ccw1); - memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1)); + if (!cqr) { + cqr = (void *) data; + data += (sizeof(*cqr) + 7L) & -8L; + } + memset(cqr, 0, sizeof(*cqr)); + cqr->mem_chunk = chunk; + if (cplength > 0) { + cqr->cpaddr = data; + data += cplength * sizeof(struct ccw1); + memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1)); } - cqr->data = NULL; if (datasize > 0) { cqr->data = data; memset(cqr->data, 0, datasize); @@ -1307,33 +1273,12 @@ struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, } EXPORT_SYMBOL(dasd_smalloc_request); -/* - * Free memory of a channel program. This function needs to free all the - * idal lists that might have been created by dasd_set_cda and the - * struct dasd_ccw_req itself. - */ -void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) -{ - struct ccw1 *ccw; - - /* Clear any idals used for the request. */ - ccw = cqr->cpaddr; - do { - clear_normalized_cda(ccw); - } while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC)); - kfree(cqr->cpaddr); - kfree(cqr->data); - kfree(cqr); - dasd_put_device(device); -} -EXPORT_SYMBOL(dasd_kfree_request); - void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) { unsigned long flags; spin_lock_irqsave(&device->mem_lock, flags); - dasd_free_chunk(&device->ccw_chunks, cqr); + dasd_free_chunk(&device->ccw_chunks, cqr->mem_chunk); spin_unlock_irqrestore(&device->mem_lock, flags); dasd_put_device(device); } @@ -1885,6 +1830,33 @@ static void __dasd_device_process_ccw_queue(struct dasd_device *device, } } +static void __dasd_process_cqr(struct dasd_device *device, + struct dasd_ccw_req *cqr) +{ + char errorstring[ERRORLENGTH]; + + switch (cqr->status) { + case DASD_CQR_SUCCESS: + cqr->status = DASD_CQR_DONE; + break; + case DASD_CQR_ERROR: + cqr->status = DASD_CQR_NEED_ERP; + break; + case DASD_CQR_CLEARED: + cqr->status = DASD_CQR_TERMINATED; + break; + default: + /* internal error 12 - wrong cqr status*/ + snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status); + dev_err(&device->cdev->dev, + "An error occurred in the DASD device driver, " + "reason=%s\n", errorstring); + BUG(); + } + if (cqr->callback) + cqr->callback(cqr, cqr->callback_data); +} + /* * the cqrs from the final queue are returned to the upper layer * by setting a dasd_block state and calling the callback function @@ -1895,40 +1867,18 @@ static void __dasd_device_process_final_queue(struct dasd_device *device, struct list_head *l, *n; struct dasd_ccw_req *cqr; struct dasd_block *block; - void (*callback)(struct dasd_ccw_req *, void *data); - void *callback_data; - char errorstring[ERRORLENGTH]; list_for_each_safe(l, n, final_queue) { cqr = list_entry(l, struct dasd_ccw_req, devlist); list_del_init(&cqr->devlist); block = cqr->block; - callback = cqr->callback; - callback_data = cqr->callback_data; - if (block) + if (!block) { + __dasd_process_cqr(device, cqr); + } else { spin_lock_bh(&block->queue_lock); - switch (cqr->status) { - case DASD_CQR_SUCCESS: - cqr->status = DASD_CQR_DONE; - break; - case DASD_CQR_ERROR: - cqr->status = DASD_CQR_NEED_ERP; - break; - case DASD_CQR_CLEARED: - cqr->status = DASD_CQR_TERMINATED; - break; - default: - /* internal error 12 - wrong cqr status*/ - snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status); - dev_err(&device->cdev->dev, - "An error occurred in the DASD device driver, " - "reason=%s\n", errorstring); - BUG(); - } - if (cqr->callback != NULL) - (callback)(cqr, callback_data); - if (block) + __dasd_process_cqr(device, cqr); spin_unlock_bh(&block->queue_lock); + } } } @@ -3041,7 +2991,6 @@ static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx, cqr->callback_data = req; cqr->status = DASD_CQR_FILLED; cqr->dq = dq; - *((struct dasd_ccw_req **) blk_mq_rq_to_pdu(req)) = cqr; blk_mq_start_request(req); spin_lock(&block->queue_lock); @@ -3072,7 +3021,7 @@ enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved) unsigned long flags; int rc = 0; - cqr = *((struct dasd_ccw_req **) blk_mq_rq_to_pdu(req)); + cqr = blk_mq_rq_to_pdu(req); if (!cqr) return BLK_EH_DONE; @@ -3174,9 +3123,9 @@ static int dasd_alloc_queue(struct dasd_block *block) int rc; block->tag_set.ops = &dasd_mq_ops; - block->tag_set.cmd_size = sizeof(struct dasd_ccw_req *); - block->tag_set.nr_hw_queues = DASD_NR_HW_QUEUES; - block->tag_set.queue_depth = DASD_MAX_LCU_DEV * DASD_REQ_PER_DEV; + block->tag_set.cmd_size = sizeof(struct dasd_ccw_req); + block->tag_set.nr_hw_queues = nr_hw_queues; + block->tag_set.queue_depth = queue_depth; block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; rc = blk_mq_alloc_tag_set(&block->tag_set); @@ -4038,7 +3987,8 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, struct ccw1 *ccw; unsigned long *idaw; - cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device); + cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device, + NULL); if (IS_ERR(cqr)) { /* internal error 13 - Allocating the RDC request failed*/ diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c index 5e963fe0e38d..e36a114354fc 100644 --- a/drivers/s390/block/dasd_alias.c +++ b/drivers/s390/block/dasd_alias.c @@ -407,9 +407,9 @@ static int read_unit_address_configuration(struct dasd_device *device, int rc; unsigned long flags; - cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, + cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, (sizeof(struct dasd_psf_prssd_data)), - device); + device, NULL); if (IS_ERR(cqr)) return PTR_ERR(cqr); cqr->startdev = device; @@ -457,7 +457,7 @@ static int read_unit_address_configuration(struct dasd_device *device, lcu->flags |= NEED_UAC_UPDATE; spin_unlock_irqrestore(&lcu->lock, flags); } - dasd_kfree_request(cqr, cqr->memdev); + dasd_sfree_request(cqr, cqr->memdev); return rc; } diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c index 131f1989f6f3..e1fe02477ea8 100644 --- a/drivers/s390/block/dasd_diag.c +++ b/drivers/s390/block/dasd_diag.c @@ -536,7 +536,8 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev, /* Build the request */ datasize = sizeof(struct dasd_diag_req) + count*sizeof(struct dasd_diag_bio); - cqr = dasd_smalloc_request(DASD_DIAG_MAGIC, 0, datasize, memdev); + cqr = dasd_smalloc_request(DASD_DIAG_MAGIC, 0, datasize, memdev, + blk_mq_rq_to_pdu(req)); if (IS_ERR(cqr)) return cqr; diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index be208e7adcb4..bbf95b78ef5d 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -886,7 +886,7 @@ static int dasd_eckd_read_conf_lpm(struct dasd_device *device, } cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */, 0, /* use rcd_buf as data ara */ - device); + device, NULL); if (IS_ERR(cqr)) { DBF_DEV_EVENT(DBF_WARNING, device, "%s", "Could not allocate RCD request"); @@ -1442,7 +1442,7 @@ static int dasd_eckd_read_features(struct dasd_device *device) cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, (sizeof(struct dasd_psf_prssd_data) + sizeof(struct dasd_rssd_features)), - device); + device, NULL); if (IS_ERR(cqr)) { DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "Could not " "allocate initialization request"); @@ -1504,7 +1504,7 @@ static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device, cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ , sizeof(struct dasd_psf_ssc_data), - device); + device, NULL); if (IS_ERR(cqr)) { DBF_DEV_EVENT(DBF_WARNING, device, "%s", @@ -1815,7 +1815,8 @@ dasd_eckd_analysis_ccw(struct dasd_device *device) cplength = 8; datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data); - cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device); + cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device, + NULL); if (IS_ERR(cqr)) return cqr; ccw = cqr->cpaddr; @@ -2092,7 +2093,8 @@ dasd_eckd_build_check_tcw(struct dasd_device *base, struct format_data_t *fdata, */ itcw_size = itcw_calc_size(0, count, 0); - cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev); + cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev, + NULL); if (IS_ERR(cqr)) return cqr; @@ -2186,7 +2188,7 @@ dasd_eckd_build_check(struct dasd_device *base, struct format_data_t *fdata, cplength += count; cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, - startdev); + startdev, NULL); if (IS_ERR(cqr)) return cqr; @@ -2332,7 +2334,7 @@ dasd_eckd_build_format(struct dasd_device *base, } /* Allocate the format ccw request. */ fcp = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, - datasize, startdev); + datasize, startdev, NULL); if (IS_ERR(fcp)) return fcp; @@ -3103,7 +3105,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single( } /* Allocate the ccw request. */ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, - startdev); + startdev, blk_mq_rq_to_pdu(req)); if (IS_ERR(cqr)) return cqr; ccw = cqr->cpaddr; @@ -3262,7 +3264,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track( /* Allocate the ccw request. */ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, - startdev); + startdev, blk_mq_rq_to_pdu(req)); if (IS_ERR(cqr)) return cqr; ccw = cqr->cpaddr; @@ -3595,7 +3597,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track( /* Allocate the ccw request. */ itcw_size = itcw_calc_size(0, ctidaw, 0); - cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev); + cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev, + blk_mq_rq_to_pdu(req)); if (IS_ERR(cqr)) return cqr; @@ -3862,7 +3865,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev, /* Allocate the ccw request. */ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, - datasize, startdev); + datasize, startdev, blk_mq_rq_to_pdu(req)); if (IS_ERR(cqr)) return cqr; @@ -4102,7 +4105,7 @@ dasd_eckd_release(struct dasd_device *device) return -EACCES; useglobal = 0; - cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device); + cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL); if (IS_ERR(cqr)) { mutex_lock(&dasd_reserve_mutex); useglobal = 1; @@ -4157,7 +4160,7 @@ dasd_eckd_reserve(struct dasd_device *device) return -EACCES; useglobal = 0; - cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device); + cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL); if (IS_ERR(cqr)) { mutex_lock(&dasd_reserve_mutex); useglobal = 1; @@ -4211,7 +4214,7 @@ dasd_eckd_steal_lock(struct dasd_device *device) return -EACCES; useglobal = 0; - cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device); + cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL); if (IS_ERR(cqr)) { mutex_lock(&dasd_reserve_mutex); useglobal = 1; @@ -4271,7 +4274,8 @@ static int dasd_eckd_snid(struct dasd_device *device, useglobal = 0; cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, - sizeof(struct dasd_snid_data), device); + sizeof(struct dasd_snid_data), device, + NULL); if (IS_ERR(cqr)) { mutex_lock(&dasd_reserve_mutex); useglobal = 1; @@ -4331,7 +4335,7 @@ dasd_eckd_performance(struct dasd_device *device, void __user *argp) cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, (sizeof(struct dasd_psf_prssd_data) + sizeof(struct dasd_rssd_perf_stats_t)), - device); + device, NULL); if (IS_ERR(cqr)) { DBF_DEV_EVENT(DBF_WARNING, device, "%s", "Could not allocate initialization request"); @@ -4477,7 +4481,7 @@ static int dasd_symm_io(struct dasd_device *device, void __user *argp) psf1 = psf_data[1]; /* setup CCWs for PSF + RSSD */ - cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 , 0, device); + cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2, 0, device, NULL); if (IS_ERR(cqr)) { DBF_DEV_EVENT(DBF_WARNING, device, "%s", "Could not allocate initialization request"); @@ -5037,7 +5041,7 @@ static int dasd_eckd_read_message_buffer(struct dasd_device *device, cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, (sizeof(struct dasd_psf_prssd_data) + sizeof(struct dasd_rssd_messages)), - device); + device, NULL); if (IS_ERR(cqr)) { DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "Could not allocate read message buffer request"); @@ -5126,7 +5130,7 @@ static int dasd_eckd_query_host_access(struct dasd_device *device, cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, sizeof(struct dasd_psf_prssd_data) + 1, - device); + device, NULL); if (IS_ERR(cqr)) { DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "Could not allocate read message buffer request"); @@ -5284,8 +5288,8 @@ dasd_eckd_psf_cuir_response(struct dasd_device *device, int response, int rc; cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ , - sizeof(struct dasd_psf_cuir_response), - device); + sizeof(struct dasd_psf_cuir_response), + device, NULL); if (IS_ERR(cqr)) { DBF_DEV_EVENT(DBF_WARNING, device, "%s", diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c index 0af8c5295b65..6ef8714dc693 100644 --- a/drivers/s390/block/dasd_eer.c +++ b/drivers/s390/block/dasd_eer.c @@ -447,7 +447,7 @@ static void dasd_eer_snss_cb(struct dasd_ccw_req *cqr, void *data) * is a new ccw in device->eer_cqr. Free the "old" * snss request now. */ - dasd_kfree_request(cqr, device); + dasd_sfree_request(cqr, device); } /* @@ -472,8 +472,8 @@ int dasd_eer_enable(struct dasd_device *device) if (rc) goto out; - cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* SNSS */, - SNSS_DATA_SIZE, device); + cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* SNSS */, + SNSS_DATA_SIZE, device, NULL); if (IS_ERR(cqr)) { rc = -ENOMEM; cqr = NULL; @@ -505,7 +505,7 @@ int dasd_eer_enable(struct dasd_device *device) spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); if (cqr) - dasd_kfree_request(cqr, device); + dasd_sfree_request(cqr, device); return rc; } @@ -528,7 +528,7 @@ void dasd_eer_disable(struct dasd_device *device) in_use = test_and_clear_bit(DASD_FLAG_EER_IN_USE, &device->flags); spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); if (cqr && !in_use) - dasd_kfree_request(cqr, device); + dasd_sfree_request(cqr, device); } /* diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c index a6b132f7e869..56007a3e7f11 100644 --- a/drivers/s390/block/dasd_fba.c +++ b/drivers/s390/block/dasd_fba.c @@ -356,7 +356,8 @@ static struct dasd_ccw_req *dasd_fba_build_cp_discard( datasize = sizeof(struct DE_fba_data) + nr_ccws * (sizeof(struct LO_fba_data) + sizeof(struct ccw1)); - cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev); + cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev, + blk_mq_rq_to_pdu(req)); if (IS_ERR(cqr)) return cqr; @@ -490,7 +491,8 @@ static struct dasd_ccw_req *dasd_fba_build_cp_regular( datasize += (count - 1)*sizeof(struct LO_fba_data); } /* Allocate the ccw request. */ - cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev); + cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev, + blk_mq_rq_to_pdu(req)); if (IS_ERR(cqr)) return cqr; ccw = cqr->cpaddr; diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index 96709b1a7bf8..de6b96036aa4 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h @@ -158,40 +158,33 @@ do { \ struct dasd_ccw_req { unsigned int magic; /* Eye catcher */ + int intrc; /* internal error, e.g. from start_IO */ struct list_head devlist; /* for dasd_device request queue */ struct list_head blocklist; /* for dasd_block request queue */ - - /* Where to execute what... */ struct dasd_block *block; /* the originating block device */ struct dasd_device *memdev; /* the device used to allocate this */ struct dasd_device *startdev; /* device the request is started on */ struct dasd_device *basedev; /* base device if no block->base */ void *cpaddr; /* address of ccw or tcw */ + short retries; /* A retry counter */ unsigned char cpmode; /* 0 = cmd mode, 1 = itcw */ char status; /* status of this request */ - short retries; /* A retry counter */ + char lpm; /* logical path mask */ unsigned long flags; /* flags of this request */ struct dasd_queue *dq; - - /* ... and how */ unsigned long starttime; /* jiffies time of request start */ unsigned long expires; /* expiration period in jiffies */ - char lpm; /* logical path mask */ void *data; /* pointer to data area */ - - /* these are important for recovering erroneous requests */ - int intrc; /* internal error, e.g. from start_IO */ struct irb irb; /* device status in case of an error */ struct dasd_ccw_req *refers; /* ERP-chain queueing. */ void *function; /* originating ERP action */ + void *mem_chunk; - /* these are for statistics only */ unsigned long buildclk; /* TOD-clock of request generation */ unsigned long startclk; /* TOD-clock of request start */ unsigned long stopclk; /* TOD-clock of request interrupt */ unsigned long endclk; /* TOD-clock of request termination */ - /* Callback that is called after reaching final status. */ void (*callback)(struct dasd_ccw_req *, void *data); void *callback_data; }; @@ -235,14 +228,6 @@ struct dasd_ccw_req { #define DASD_CQR_SUPPRESS_IL 6 /* Suppress 'Incorrect Length' error */ #define DASD_CQR_SUPPRESS_CR 7 /* Suppress 'Command Reject' error */ -/* - * There is no reliable way to determine the number of available CPUs on - * LPAR but there is no big performance difference between 1 and the - * maximum CPU number. - * 64 is a good trade off performance wise. - */ -#define DASD_NR_HW_QUEUES 64 -#define DASD_MAX_LCU_DEV 256 #define DASD_REQ_PER_DEV 4 /* Signature for error recovery functions. */ @@ -714,19 +699,10 @@ extern const struct block_device_operations dasd_device_operations; extern struct kmem_cache *dasd_page_cache; struct dasd_ccw_req * -dasd_kmalloc_request(int , int, int, struct dasd_device *); -struct dasd_ccw_req * -dasd_smalloc_request(int , int, int, struct dasd_device *); -void dasd_kfree_request(struct dasd_ccw_req *, struct dasd_device *); +dasd_smalloc_request(int, int, int, struct dasd_device *, struct dasd_ccw_req *); void dasd_sfree_request(struct dasd_ccw_req *, struct dasd_device *); void dasd_wakeup_cb(struct dasd_ccw_req *, void *); -static inline int -dasd_kmalloc_set_cda(struct ccw1 *ccw, void *cda, struct dasd_device *device) -{ - return set_normalized_cda(ccw, cda); -} - struct dasd_device *dasd_alloc_device(void); void dasd_free_device(struct dasd_device *); diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile index a070ef0efe65..f230516abb96 100644 --- a/drivers/s390/cio/Makefile +++ b/drivers/s390/cio/Makefile @@ -5,6 +5,7 @@ # The following is required for define_trace.h to find ./trace.h CFLAGS_trace.o := -I$(src) +CFLAGS_vfio_ccw_fsm.o := -I$(src) obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o \ fcx.o itcw.o crw.o ccwreq.o trace.o ioasm.o diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c index dce92b2a895d..dbe7c7ac9ac8 100644 --- a/drivers/s390/cio/vfio_ccw_cp.c +++ b/drivers/s390/cio/vfio_ccw_cp.c @@ -23,9 +23,13 @@ #define CCWCHAIN_LEN_MAX 256 struct pfn_array { + /* Starting guest physical I/O address. */ unsigned long pa_iova; + /* Array that stores PFNs of the pages need to pin. */ unsigned long *pa_iova_pfn; + /* Array that receives PFNs of the pages pinned. */ unsigned long *pa_pfn; + /* Number of pages pinned from @pa_iova. */ int pa_nr; }; @@ -46,70 +50,33 @@ struct ccwchain { }; /* - * pfn_array_pin() - pin user pages in memory + * pfn_array_alloc_pin() - alloc memory for PFNs, then pin user pages in memory * @pa: pfn_array on which to perform the operation * @mdev: the mediated device to perform pin/unpin operations + * @iova: target guest physical address + * @len: number of bytes that should be pinned from @iova * - * Attempt to pin user pages in memory. + * Attempt to allocate memory for PFNs, and pin user pages in memory. * * Usage of pfn_array: - * @pa->pa_iova starting guest physical I/O address. Assigned by caller. - * @pa->pa_iova_pfn array that stores PFNs of the pages need to pin. Allocated - * by caller. - * @pa->pa_pfn array that receives PFNs of the pages pinned. Allocated by - * caller. - * @pa->pa_nr number of pages from @pa->pa_iova to pin. Assigned by - * caller. - * number of pages pinned. Assigned by callee. + * We expect (pa_nr == 0) and (pa_iova_pfn == NULL), any field in + * this structure will be filled in by this function. * * Returns: * Number of pages pinned on success. - * If @pa->pa_nr is 0 or negative, returns 0. + * If @pa->pa_nr is not 0, or @pa->pa_iova_pfn is not NULL initially, + * returns -EINVAL. * If no pages were pinned, returns -errno. */ -static int pfn_array_pin(struct pfn_array *pa, struct device *mdev) -{ - int i, ret; - - if (pa->pa_nr <= 0) { - pa->pa_nr = 0; - return 0; - } - - pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT; - for (i = 1; i < pa->pa_nr; i++) - pa->pa_iova_pfn[i] = pa->pa_iova_pfn[i - 1] + 1; - - ret = vfio_pin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr, - IOMMU_READ | IOMMU_WRITE, pa->pa_pfn); - - if (ret > 0 && ret != pa->pa_nr) { - vfio_unpin_pages(mdev, pa->pa_iova_pfn, ret); - pa->pa_nr = 0; - return 0; - } - - return ret; -} - -/* Unpin the pages before releasing the memory. */ -static void pfn_array_unpin_free(struct pfn_array *pa, struct device *mdev) -{ - vfio_unpin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr); - pa->pa_nr = 0; - kfree(pa->pa_iova_pfn); -} - -/* Alloc memory for PFNs, then pin pages with them. */ static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev, u64 iova, unsigned int len) { - int ret = 0; + int i, ret = 0; if (!len) return 0; - if (pa->pa_nr) + if (pa->pa_nr || pa->pa_iova_pfn) return -EINVAL; pa->pa_iova = iova; @@ -126,16 +93,37 @@ static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev, return -ENOMEM; pa->pa_pfn = pa->pa_iova_pfn + pa->pa_nr; - ret = pfn_array_pin(pa, mdev); + pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT; + for (i = 1; i < pa->pa_nr; i++) + pa->pa_iova_pfn[i] = pa->pa_iova_pfn[i - 1] + 1; - if (ret > 0) - return ret; - else if (!ret) + ret = vfio_pin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr, + IOMMU_READ | IOMMU_WRITE, pa->pa_pfn); + + if (ret < 0) { + goto err_out; + } else if (ret > 0 && ret != pa->pa_nr) { + vfio_unpin_pages(mdev, pa->pa_iova_pfn, ret); ret = -EINVAL; - - kfree(pa->pa_iova_pfn); + goto err_out; + } return ret; + +err_out: + pa->pa_nr = 0; + kfree(pa->pa_iova_pfn); + pa->pa_iova_pfn = NULL; + + return ret; +} + +/* Unpin the pages before releasing the memory. */ +static void pfn_array_unpin_free(struct pfn_array *pa, struct device *mdev) +{ + vfio_unpin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr); + pa->pa_nr = 0; + kfree(pa->pa_iova_pfn); } static int pfn_array_table_init(struct pfn_array_table *pat, int nr) @@ -365,6 +353,9 @@ static void cp_unpin_free(struct channel_program *cp) * This is the chain length not considering any TICs. * You need to do a new round for each TIC target. * + * The program is also validated for absence of not yet supported + * indirect data addressing scenarios. + * * Returns: the length of the ccw chain or -errno. */ static int ccwchain_calc_length(u64 iova, struct channel_program *cp) @@ -391,6 +382,14 @@ static int ccwchain_calc_length(u64 iova, struct channel_program *cp) do { cnt++; + /* + * As we don't want to fail direct addressing even if the + * orb specified one of the unsupported formats, we defer + * checking for IDAWs in unsupported formats to here. + */ + if ((!cp->orb.cmd.c64 || cp->orb.cmd.i2k) && ccw_is_idal(ccw)) + return -EOPNOTSUPP; + if ((!ccw_is_chain(ccw)) && (!ccw_is_tic(ccw))) break; @@ -503,7 +502,7 @@ static int ccwchain_fetch_direct(struct ccwchain *chain, struct ccw1 *ccw; struct pfn_array_table *pat; unsigned long *idaws; - int idaw_nr; + int ret; ccw = chain->ch_ccw + idx; @@ -523,18 +522,19 @@ static int ccwchain_fetch_direct(struct ccwchain *chain, * needed when translating a direct ccw to a idal ccw. */ pat = chain->ch_pat + idx; - if (pfn_array_table_init(pat, 1)) - return -ENOMEM; - idaw_nr = pfn_array_alloc_pin(pat->pat_pa, cp->mdev, - ccw->cda, ccw->count); - if (idaw_nr < 0) - return idaw_nr; + ret = pfn_array_table_init(pat, 1); + if (ret) + goto out_init; + + ret = pfn_array_alloc_pin(pat->pat_pa, cp->mdev, ccw->cda, ccw->count); + if (ret < 0) + goto out_init; /* Translate this direct ccw to a idal ccw. */ - idaws = kcalloc(idaw_nr, sizeof(*idaws), GFP_DMA | GFP_KERNEL); + idaws = kcalloc(ret, sizeof(*idaws), GFP_DMA | GFP_KERNEL); if (!idaws) { - pfn_array_table_unpin_free(pat, cp->mdev); - return -ENOMEM; + ret = -ENOMEM; + goto out_unpin; } ccw->cda = (__u32) virt_to_phys(idaws); ccw->flags |= CCW_FLAG_IDA; @@ -542,6 +542,12 @@ static int ccwchain_fetch_direct(struct ccwchain *chain, pfn_array_table_idal_create_words(pat, idaws); return 0; + +out_unpin: + pfn_array_table_unpin_free(pat, cp->mdev); +out_init: + ccw->cda = 0; + return ret; } static int ccwchain_fetch_idal(struct ccwchain *chain, @@ -571,7 +577,7 @@ static int ccwchain_fetch_idal(struct ccwchain *chain, pat = chain->ch_pat + idx; ret = pfn_array_table_init(pat, idaw_nr); if (ret) - return ret; + goto out_init; /* Translate idal ccw to use new allocated idaws. */ idaws = kzalloc(idaw_len, GFP_DMA | GFP_KERNEL); @@ -603,6 +609,8 @@ static int ccwchain_fetch_idal(struct ccwchain *chain, kfree(idaws); out_unpin: pfn_array_table_unpin_free(pat, cp->mdev); +out_init: + ccw->cda = 0; return ret; } @@ -656,10 +664,8 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb) /* * XXX: * Only support prefetch enable mode now. - * Only support 64bit addressing idal. - * Only support 4k IDAW. */ - if (!orb->cmd.pfch || !orb->cmd.c64 || orb->cmd.i2k) + if (!orb->cmd.pfch) return -EOPNOTSUPP; INIT_LIST_HEAD(&cp->ccwchain_list); @@ -688,6 +694,10 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb) ret = ccwchain_loop_tic(chain, cp); if (ret) cp_unpin_free(cp); + /* It is safe to force: if not set but idals used + * ccwchain_calc_length returns an error. + */ + cp->orb.cmd.c64 = 1; return ret; } diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c index ea6a2d0b2894..770fa9cfc310 100644 --- a/drivers/s390/cio/vfio_ccw_drv.c +++ b/drivers/s390/cio/vfio_ccw_drv.c @@ -177,6 +177,7 @@ static int vfio_ccw_sch_event(struct subchannel *sch, int process) { struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev); unsigned long flags; + int rc = -EAGAIN; spin_lock_irqsave(sch->lock, flags); if (!device_is_registered(&sch->dev)) @@ -187,6 +188,7 @@ static int vfio_ccw_sch_event(struct subchannel *sch, int process) if (cio_update_schib(sch)) { vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER); + rc = 0; goto out_unlock; } @@ -195,11 +197,12 @@ static int vfio_ccw_sch_event(struct subchannel *sch, int process) private->state = private->mdev ? VFIO_CCW_STATE_IDLE : VFIO_CCW_STATE_STANDBY; } + rc = 0; out_unlock: spin_unlock_irqrestore(sch->lock, flags); - return 0; + return rc; } static struct css_device_id vfio_ccw_sch_ids[] = { diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c index 3c800642134e..797a82731159 100644 --- a/drivers/s390/cio/vfio_ccw_fsm.c +++ b/drivers/s390/cio/vfio_ccw_fsm.c @@ -13,6 +13,9 @@ #include "ioasm.h" #include "vfio_ccw_private.h" +#define CREATE_TRACE_POINTS +#include "vfio_ccw_trace.h" + static int fsm_io_helper(struct vfio_ccw_private *private) { struct subchannel *sch; @@ -110,6 +113,10 @@ static void fsm_disabled_irq(struct vfio_ccw_private *private, */ cio_disable_subchannel(sch); } +inline struct subchannel_id get_schid(struct vfio_ccw_private *p) +{ + return p->sch->schid; +} /* * Deal with the ccw command request from the userspace. @@ -121,6 +128,7 @@ static void fsm_io_request(struct vfio_ccw_private *private, union scsw *scsw = &private->scsw; struct ccw_io_region *io_region = &private->io_region; struct mdev_device *mdev = private->mdev; + char *errstr = "request"; private->state = VFIO_CCW_STATE_BOXED; @@ -132,15 +140,19 @@ static void fsm_io_request(struct vfio_ccw_private *private, /* Don't try to build a cp if transport mode is specified. */ if (orb->tm.b) { io_region->ret_code = -EOPNOTSUPP; + errstr = "transport mode"; goto err_out; } io_region->ret_code = cp_init(&private->cp, mdev_dev(mdev), orb); - if (io_region->ret_code) + if (io_region->ret_code) { + errstr = "cp init"; goto err_out; + } io_region->ret_code = cp_prefetch(&private->cp); if (io_region->ret_code) { + errstr = "cp prefetch"; cp_free(&private->cp); goto err_out; } @@ -148,6 +160,7 @@ static void fsm_io_request(struct vfio_ccw_private *private, /* Start channel program and wait for I/O interrupt. */ io_region->ret_code = fsm_io_helper(private); if (io_region->ret_code) { + errstr = "cp fsm_io_helper"; cp_free(&private->cp); goto err_out; } @@ -164,6 +177,8 @@ static void fsm_io_request(struct vfio_ccw_private *private, err_out: private->state = VFIO_CCW_STATE_IDLE; + trace_vfio_ccw_io_fctl(scsw->cmd.fctl, get_schid(private), + io_region->ret_code, errstr); } /* diff --git a/drivers/s390/cio/vfio_ccw_trace.h b/drivers/s390/cio/vfio_ccw_trace.h new file mode 100644 index 000000000000..b1da53ddec1f --- /dev/null +++ b/drivers/s390/cio/vfio_ccw_trace.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 + * Tracepoints for vfio_ccw driver + * + * Copyright IBM Corp. 2018 + * + * Author(s): Dong Jia Shi + * Halil Pasic + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM vfio_ccw + +#if !defined(_VFIO_CCW_TRACE_) || defined(TRACE_HEADER_MULTI_READ) +#define _VFIO_CCW_TRACE_ + +#include + +TRACE_EVENT(vfio_ccw_io_fctl, + TP_PROTO(int fctl, struct subchannel_id schid, int errno, char *errstr), + TP_ARGS(fctl, schid, errno, errstr), + + TP_STRUCT__entry( + __field(int, fctl) + __field_struct(struct subchannel_id, schid) + __field(int, errno) + __field(char*, errstr) + ), + + TP_fast_assign( + __entry->fctl = fctl; + __entry->schid = schid; + __entry->errno = errno; + __entry->errstr = errstr; + ), + + TP_printk("schid=%x.%x.%04x fctl=%x errno=%d info=%s", + __entry->schid.cssid, + __entry->schid.ssid, + __entry->schid.sch_no, + __entry->fctl, + __entry->errno, + __entry->errstr) +); + +#endif /* _VFIO_CCW_TRACE_ */ + +/* This part must be outside protection */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE vfio_ccw_trace + +#include diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 2a5fec55bf60..a246a618f9a4 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -829,6 +829,17 @@ struct qeth_trap_id { /*some helper functions*/ #define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "") +static inline void qeth_scrub_qdio_buffer(struct qdio_buffer *buf, + unsigned int elements) +{ + unsigned int i; + + for (i = 0; i < elements; i++) + memset(&buf->element[i], 0, sizeof(struct qdio_buffer_element)); + buf->element[14].sflags = 0; + buf->element[15].sflags = 0; +} + /** * qeth_get_elements_for_range() - find number of SBALEs to cover range. * @start: Start of the address range. @@ -1029,7 +1040,7 @@ struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *, __u16, __u16, enum qeth_prot_versions); int qeth_set_features(struct net_device *, netdev_features_t); -void qeth_recover_features(struct net_device *dev); +void qeth_enable_hw_features(struct net_device *dev); netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t); netdev_features_t qeth_features_check(struct sk_buff *skb, struct net_device *dev, diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 8e1474f1ffac..d01ac29fd986 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -73,9 +73,6 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *queue, struct qeth_qdio_out_buffer *buf, enum iucv_tx_notify notification); static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf); -static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, - struct qeth_qdio_out_buffer *buf, - enum qeth_qdio_buffer_states newbufstate); static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int); struct workqueue_struct *qeth_wq; @@ -489,6 +486,7 @@ static void qeth_qdio_handle_aob(struct qeth_card *card, struct qaob *aob; struct qeth_qdio_out_buffer *buffer; enum iucv_tx_notify notification; + unsigned int i; aob = (struct qaob *) phys_to_virt(phys_aob_addr); QETH_CARD_TEXT(card, 5, "haob"); @@ -513,10 +511,18 @@ static void qeth_qdio_handle_aob(struct qeth_card *card, qeth_notify_skbs(buffer->q, buffer, notification); buffer->aob = NULL; - qeth_clear_output_buffer(buffer->q, buffer, - QETH_QDIO_BUF_HANDLED_DELAYED); + /* Free dangling allocations. The attached skbs are handled by + * qeth_cleanup_handled_pending(). + */ + for (i = 0; + i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card); + i++) { + if (aob->sba[i] && buffer->is_header[i]) + kmem_cache_free(qeth_core_header_cache, + (void *) aob->sba[i]); + } + atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED); - /* from here on: do not touch buffer anymore */ qdio_release_aob(aob); } @@ -3759,6 +3765,10 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev, QETH_CARD_TEXT(queue->card, 5, "aob"); QETH_CARD_TEXT_(queue->card, 5, "%lx", virt_to_phys(buffer->aob)); + + /* prepare the queue slot for re-use: */ + qeth_scrub_qdio_buffer(buffer->buffer, + QETH_MAX_BUFFER_ELEMENTS(card)); if (qeth_init_qdio_out_buf(queue, bidx)) { QETH_CARD_TEXT(card, 2, "outofbuf"); qeth_schedule_recovery(card); @@ -4834,7 +4844,7 @@ int qeth_vm_request_mac(struct qeth_card *card) goto out; } - ccw_device_get_id(CARD_RDEV(card), &id); + ccw_device_get_id(CARD_DDEV(card), &id); request->resp_buf_len = sizeof(*response); request->resp_version = DIAG26C_VERSION2; request->op_code = DIAG26C_GET_MAC; @@ -6459,28 +6469,27 @@ static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on) #define QETH_HW_FEATURES (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_TSO | \ NETIF_F_IPV6_CSUM) /** - * qeth_recover_features() - Restore device features after recovery - * @dev: the recovering net_device - * - * Caller must hold rtnl lock. + * qeth_enable_hw_features() - (Re-)Enable HW functions for device features + * @dev: a net_device */ -void qeth_recover_features(struct net_device *dev) +void qeth_enable_hw_features(struct net_device *dev) { - netdev_features_t features = dev->features; struct qeth_card *card = dev->ml_priv; + netdev_features_t features; + rtnl_lock(); + features = dev->features; /* force-off any feature that needs an IPA sequence. * netdev_update_features() will restart them. */ dev->features &= ~QETH_HW_FEATURES; netdev_update_features(dev); - - if (features == dev->features) - return; - dev_warn(&card->gdev->dev, - "Device recovery failed to restore all offload features\n"); + if (features != dev->features) + dev_warn(&card->gdev->dev, + "Device recovery failed to restore all offload features\n"); + rtnl_unlock(); } -EXPORT_SYMBOL_GPL(qeth_recover_features); +EXPORT_SYMBOL_GPL(qeth_enable_hw_features); int qeth_set_features(struct net_device *dev, netdev_features_t features) { diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index a7cb37da6a21..2487f0aeb165 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -140,7 +140,7 @@ static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac) static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac) { - enum qeth_ipa_cmds cmd = is_multicast_ether_addr_64bits(mac) ? + enum qeth_ipa_cmds cmd = is_multicast_ether_addr(mac) ? IPA_CMD_SETGMAC : IPA_CMD_SETVMAC; int rc; @@ -157,7 +157,7 @@ static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac) static int qeth_l2_remove_mac(struct qeth_card *card, u8 *mac) { - enum qeth_ipa_cmds cmd = is_multicast_ether_addr_64bits(mac) ? + enum qeth_ipa_cmds cmd = is_multicast_ether_addr(mac) ? IPA_CMD_DELGMAC : IPA_CMD_DELVMAC; int rc; @@ -501,27 +501,34 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p) return -ERESTARTSYS; } + /* avoid racing against concurrent state change: */ + if (!mutex_trylock(&card->conf_mutex)) + return -EAGAIN; + if (!qeth_card_hw_is_reachable(card)) { ether_addr_copy(dev->dev_addr, addr->sa_data); - return 0; + goto out_unlock; } /* don't register the same address twice */ if (ether_addr_equal_64bits(dev->dev_addr, addr->sa_data) && (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)) - return 0; + goto out_unlock; /* add the new address, switch over, drop the old */ rc = qeth_l2_send_setmac(card, addr->sa_data); if (rc) - return rc; + goto out_unlock; ether_addr_copy(old_addr, dev->dev_addr); ether_addr_copy(dev->dev_addr, addr->sa_data); if (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED) qeth_l2_remove_mac(card, old_addr); card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED; - return 0; + +out_unlock: + mutex_unlock(&card->conf_mutex); + return rc; } static void qeth_promisc_to_bridge(struct qeth_card *card) @@ -1112,6 +1119,8 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) netif_carrier_off(card->dev); qeth_set_allowed_threads(card, 0xffffffff, 0); + + qeth_enable_hw_features(card->dev); if (recover_flag == CARD_STATE_RECOVER) { if (recovery_mode && card->info.type != QETH_CARD_TYPE_OSN) { @@ -1123,9 +1132,6 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) } /* this also sets saved unicast addresses */ qeth_l2_set_rx_mode(card->dev); - rtnl_lock(); - qeth_recover_features(card->dev); - rtnl_unlock(); } /* let user_space know that device is online */ kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index e7fa479adf47..5905dc63e256 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -2662,6 +2662,8 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) netif_carrier_on(card->dev); else netif_carrier_off(card->dev); + + qeth_enable_hw_features(card->dev); if (recover_flag == CARD_STATE_RECOVER) { rtnl_lock(); if (recovery_mode) @@ -2669,7 +2671,6 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) else dev_open(card->dev); qeth_l3_set_rx_mode(card->dev); - qeth_recover_features(card->dev); rtnl_unlock(); } qeth_trace_features(card); diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index a9831bd37a73..a57f3a7d4748 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -1974,7 +1974,6 @@ static void aac_set_safw_attr_all_targets(struct aac_dev *dev) u32 lun_count, nexus; u32 i, bus, target; u8 expose_flag, attribs; - u8 devtype; lun_count = aac_get_safw_phys_lun_count(dev); @@ -1992,23 +1991,23 @@ static void aac_set_safw_attr_all_targets(struct aac_dev *dev) continue; if (expose_flag != 0) { - devtype = AAC_DEVTYPE_RAID_MEMBER; - goto update_devtype; + dev->hba_map[bus][target].devtype = + AAC_DEVTYPE_RAID_MEMBER; + continue; } if (nexus != 0 && (attribs & 8)) { - devtype = AAC_DEVTYPE_NATIVE_RAW; + dev->hba_map[bus][target].devtype = + AAC_DEVTYPE_NATIVE_RAW; dev->hba_map[bus][target].rmw_nexus = nexus; } else - devtype = AAC_DEVTYPE_ARC_RAW; + dev->hba_map[bus][target].devtype = + AAC_DEVTYPE_ARC_RAW; dev->hba_map[bus][target].scan_counter = dev->scan_counter; aac_set_safw_target_qd(dev, bus, target); - -update_devtype: - dev->hba_map[bus][target].devtype = devtype; } } diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 0a9b8b387bd2..02d65dce74e5 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -760,7 +760,6 @@ static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg, ioa_cfg->hrrq[i].allow_interrupts = 0; spin_unlock(&ioa_cfg->hrrq[i]._lock); } - wmb(); /* Set interrupt mask to stop all new interrupts */ if (ioa_cfg->sis64) @@ -8403,7 +8402,6 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd) ioa_cfg->hrrq[i].allow_interrupts = 1; spin_unlock(&ioa_cfg->hrrq[i]._lock); } - wmb(); if (ioa_cfg->sis64) { /* Set the adapter to the correct endian mode. */ writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg); diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 0fea2e2326be..1027b0cb7fa3 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -1224,7 +1224,6 @@ static void qla24xx_chk_fcp_state(struct fc_port *sess) void qlt_schedule_sess_for_deletion(struct fc_port *sess) { struct qla_tgt *tgt = sess->tgt; - struct qla_hw_data *ha = sess->vha->hw; unsigned long flags; if (sess->disc_state == DSC_DELETE_PEND) @@ -1241,16 +1240,16 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess) return; } - spin_lock_irqsave(&ha->tgt.sess_lock, flags); if (sess->deleted == QLA_SESS_DELETED) sess->logout_on_delete = 0; + spin_lock_irqsave(&sess->vha->work_lock, flags); if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { - spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + spin_unlock_irqrestore(&sess->vha->work_lock, flags); return; } sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; - spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + spin_unlock_irqrestore(&sess->vha->work_lock, flags); sess->disc_state = DSC_DELETE_PEND; diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 24d7496cd9e2..364e71861bfd 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -5507,9 +5507,9 @@ static void __exit scsi_debug_exit(void) int k = sdebug_add_host; stop_all_queued(); - free_all_queued(); for (; k; k--) sdebug_remove_adapter(); + free_all_queued(); driver_unregister(&sdebug_driverfs_driver); bus_unregister(&pseudo_lld_bus); root_device_unregister(pseudo_primary); diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 1da3d71e9f61..13948102ca29 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c @@ -3592,7 +3592,7 @@ fc_bsg_job_timeout(struct request *req) /* the blk_end_sync_io() doesn't check the error */ if (inflight) - blk_mq_complete_request(req); + __blk_complete_request(req); return BLK_EH_DONE; } diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 53ae52dbff84..cd2fdac000c9 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -51,6 +51,7 @@ static int sg_version_num = 30536; /* 2 digits for each component */ #include #include #include +#include /* for sg_check_file_access() */ #include "scsi.h" #include @@ -209,6 +210,33 @@ static void sg_device_destroy(struct kref *kref); sdev_prefix_printk(prefix, (sdp)->device, \ (sdp)->disk->disk_name, fmt, ##a) +/* + * The SCSI interfaces that use read() and write() as an asynchronous variant of + * ioctl(..., SG_IO, ...) are fundamentally unsafe, since there are lots of ways + * to trigger read() and write() calls from various contexts with elevated + * privileges. This can lead to kernel memory corruption (e.g. if these + * interfaces are called through splice()) and privilege escalation inside + * userspace (e.g. if a process with access to such a device passes a file + * descriptor to a SUID binary as stdin/stdout/stderr). + * + * This function provides protection for the legacy API by restricting the + * calling context. + */ +static int sg_check_file_access(struct file *filp, const char *caller) +{ + if (filp->f_cred != current_real_cred()) { + pr_err_once("%s: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n", + caller, task_tgid_vnr(current), current->comm); + return -EPERM; + } + if (uaccess_kernel()) { + pr_err_once("%s: process %d (%s) called from kernel context, this is not allowed.\n", + caller, task_tgid_vnr(current), current->comm); + return -EACCES; + } + return 0; +} + static int sg_allow_access(struct file *filp, unsigned char *cmd) { struct sg_fd *sfp = filp->private_data; @@ -393,6 +421,14 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos) struct sg_header *old_hdr = NULL; int retval = 0; + /* + * This could cause a response to be stranded. Close the associated + * file descriptor to free up any resources being held. + */ + retval = sg_check_file_access(filp, __func__); + if (retval) + return retval; + if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) return -ENXIO; SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, @@ -580,9 +616,11 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) struct sg_header old_hdr; sg_io_hdr_t *hp; unsigned char cmnd[SG_MAX_CDB_SIZE]; + int retval; - if (unlikely(uaccess_kernel())) - return -EINVAL; + retval = sg_check_file_access(filp, __func__); + if (retval) + return retval; if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) return -ENXIO; diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c index 36f59a1be7e9..61389bdc7926 100644 --- a/drivers/scsi/xen-scsifront.c +++ b/drivers/scsi/xen-scsifront.c @@ -654,10 +654,17 @@ static int scsifront_dev_reset_handler(struct scsi_cmnd *sc) static int scsifront_sdev_configure(struct scsi_device *sdev) { struct vscsifrnt_info *info = shost_priv(sdev->host); + int err; - if (info && current == info->curr) - xenbus_printf(XBT_NIL, info->dev->nodename, + if (info && current == info->curr) { + err = xenbus_printf(XBT_NIL, info->dev->nodename, info->dev_state_path, "%d", XenbusStateConnected); + if (err) { + xenbus_dev_error(info->dev, err, + "%s: writing dev_state_path", __func__); + return err; + } + } return 0; } @@ -665,10 +672,15 @@ static int scsifront_sdev_configure(struct scsi_device *sdev) static void scsifront_sdev_destroy(struct scsi_device *sdev) { struct vscsifrnt_info *info = shost_priv(sdev->host); + int err; - if (info && current == info->curr) - xenbus_printf(XBT_NIL, info->dev->nodename, + if (info && current == info->curr) { + err = xenbus_printf(XBT_NIL, info->dev->nodename, info->dev_state_path, "%d", XenbusStateClosed); + if (err) + xenbus_dev_error(info->dev, err, + "%s: writing dev_state_path", __func__); + } } static struct scsi_host_template scsifront_sht = { @@ -1003,9 +1015,12 @@ static void scsifront_do_lun_hotplug(struct vscsifrnt_info *info, int op) if (scsi_add_device(info->host, chn, tgt, lun)) { dev_err(&dev->dev, "scsi_add_device\n"); - xenbus_printf(XBT_NIL, dev->nodename, + err = xenbus_printf(XBT_NIL, dev->nodename, info->dev_state_path, "%d", XenbusStateClosed); + if (err) + xenbus_dev_error(dev, err, + "%s: writing dev_state_path", __func__); } break; case VSCSIFRONT_OP_DEL_LUN: @@ -1019,10 +1034,14 @@ static void scsifront_do_lun_hotplug(struct vscsifrnt_info *info, int op) } break; case VSCSIFRONT_OP_READD_LUN: - if (device_state == XenbusStateConnected) - xenbus_printf(XBT_NIL, dev->nodename, + if (device_state == XenbusStateConnected) { + err = xenbus_printf(XBT_NIL, dev->nodename, info->dev_state_path, "%d", XenbusStateConnected); + if (err) + xenbus_dev_error(dev, err, + "%s: writing dev_state_path", __func__); + } break; default: break; diff --git a/drivers/soc/imx/gpcv2.c b/drivers/soc/imx/gpcv2.c index f4e3bd40c72e..6ef18cf8f243 100644 --- a/drivers/soc/imx/gpcv2.c +++ b/drivers/soc/imx/gpcv2.c @@ -39,10 +39,15 @@ #define GPC_M4_PU_PDN_FLG 0x1bc - -#define PGC_MIPI 4 -#define PGC_PCIE 5 -#define PGC_USB_HSIC 8 +/* + * The PGC offset values in Reference Manual + * (Rev. 1, 01/2018 and the older ones) GPC chapter's + * GPC_PGC memory map are incorrect, below offset + * values are from design RTL. + */ +#define PGC_MIPI 16 +#define PGC_PCIE 17 +#define PGC_USB_HSIC 20 #define GPC_PGC_CTRL(n) (0x800 + (n) * 0x40) #define GPC_PGC_SR(n) (GPC_PGC_CTRL(n) + 0xc) diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig index 9dc02f390ba3..5856e792d09c 100644 --- a/drivers/soc/qcom/Kconfig +++ b/drivers/soc/qcom/Kconfig @@ -5,7 +5,8 @@ menu "Qualcomm SoC drivers" config QCOM_COMMAND_DB bool "Qualcomm Command DB" - depends on (ARCH_QCOM && OF) || COMPILE_TEST + depends on ARCH_QCOM || COMPILE_TEST + depends on OF_RESERVED_MEM help Command DB queries shared memory by key string for shared system resources. Platform drivers that require to set state of a shared diff --git a/drivers/soc/renesas/rcar-sysc.c b/drivers/soc/renesas/rcar-sysc.c index 95120acc4d80..50d03d8b4f9a 100644 --- a/drivers/soc/renesas/rcar-sysc.c +++ b/drivers/soc/renesas/rcar-sysc.c @@ -194,11 +194,12 @@ static int rcar_sysc_pd_power_on(struct generic_pm_domain *genpd) static bool has_cpg_mstp; -static void __init rcar_sysc_pd_setup(struct rcar_sysc_pd *pd) +static int __init rcar_sysc_pd_setup(struct rcar_sysc_pd *pd) { struct generic_pm_domain *genpd = &pd->genpd; const char *name = pd->genpd.name; struct dev_power_governor *gov = &simple_qos_governor; + int error; if (pd->flags & PD_CPU) { /* @@ -251,7 +252,11 @@ static void __init rcar_sysc_pd_setup(struct rcar_sysc_pd *pd) rcar_sysc_power_up(&pd->ch); finalize: - pm_genpd_init(genpd, gov, false); + error = pm_genpd_init(genpd, gov, false); + if (error) + pr_err("Failed to init PM domain %s: %d\n", name, error); + + return error; } static const struct of_device_id rcar_sysc_matches[] __initconst = { @@ -375,6 +380,9 @@ static int __init rcar_sysc_pd_init(void) pr_debug("%pOF: syscier = 0x%08x\n", np, syscier); iowrite32(syscier, base + SYSCIER); + /* + * First, create all PM domains + */ for (i = 0; i < info->num_areas; i++) { const struct rcar_sysc_area *area = &info->areas[i]; struct rcar_sysc_pd *pd; @@ -397,14 +405,29 @@ static int __init rcar_sysc_pd_init(void) pd->ch.isr_bit = area->isr_bit; pd->flags = area->flags; - rcar_sysc_pd_setup(pd); - if (area->parent >= 0) - pm_genpd_add_subdomain(domains->domains[area->parent], - &pd->genpd); + error = rcar_sysc_pd_setup(pd); + if (error) + goto out_put; domains->domains[area->isr_bit] = &pd->genpd; } + /* + * Second, link all PM domains to their parents + */ + for (i = 0; i < info->num_areas; i++) { + const struct rcar_sysc_area *area = &info->areas[i]; + + if (!area->name || area->parent < 0) + continue; + + error = pm_genpd_add_subdomain(domains->domains[area->parent], + domains->domains[area->isr_bit]); + if (error) + pr_warn("Failed to add PM subdomain %s to parent %u\n", + area->name, area->parent); + } + error = of_genpd_add_provider_onecell(np, &domains->onecell_data); out_put: diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c index e8c440329708..31db510018a9 100644 --- a/drivers/staging/android/ion/ion_heap.c +++ b/drivers/staging/android/ion/ion_heap.c @@ -30,7 +30,7 @@ void *ion_heap_map_kernel(struct ion_heap *heap, struct page **tmp = pages; if (!pages) - return NULL; + return ERR_PTR(-ENOMEM); if (buffer->flags & ION_FLAG_CACHED) pgprot = PAGE_KERNEL; diff --git a/drivers/staging/comedi/drivers/quatech_daqp_cs.c b/drivers/staging/comedi/drivers/quatech_daqp_cs.c index ea194aa01a64..257b0daff01f 100644 --- a/drivers/staging/comedi/drivers/quatech_daqp_cs.c +++ b/drivers/staging/comedi/drivers/quatech_daqp_cs.c @@ -642,7 +642,7 @@ static int daqp_ao_insn_write(struct comedi_device *dev, /* Make sure D/A update mode is direct update */ outb(0, dev->iobase + DAQP_AUX_REG); - for (i = 0; i > insn->n; i++) { + for (i = 0; i < insn->n; i++) { unsigned int val = data[i]; int ret; diff --git a/drivers/staging/rtl8723bs/core/rtw_ap.c b/drivers/staging/rtl8723bs/core/rtw_ap.c index 45c05527a57a..faf4b4158cfa 100644 --- a/drivers/staging/rtl8723bs/core/rtw_ap.c +++ b/drivers/staging/rtl8723bs/core/rtw_ap.c @@ -1051,7 +1051,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len) return _FAIL; - if (len > MAX_IE_SZ) + if (len < 0 || len > MAX_IE_SZ) return _FAIL; pbss_network->IELength = len; diff --git a/drivers/staging/rtlwifi/rtl8822be/hw.c b/drivers/staging/rtlwifi/rtl8822be/hw.c index 7947edb239a1..88ba5b2fea6a 100644 --- a/drivers/staging/rtlwifi/rtl8822be/hw.c +++ b/drivers/staging/rtlwifi/rtl8822be/hw.c @@ -803,7 +803,7 @@ static void _rtl8822be_enable_aspm_back_door(struct ieee80211_hw *hw) return; pci_read_config_byte(rtlpci->pdev, 0x70f, &tmp); - pci_write_config_byte(rtlpci->pdev, 0x70f, tmp | BIT(7)); + pci_write_config_byte(rtlpci->pdev, 0x70f, tmp | ASPM_L1_LATENCY << 3); pci_read_config_byte(rtlpci->pdev, 0x719, &tmp); pci_write_config_byte(rtlpci->pdev, 0x719, tmp | BIT(3) | BIT(4)); diff --git a/drivers/staging/rtlwifi/wifi.h b/drivers/staging/rtlwifi/wifi.h index 012fb618840b..a45f0eb69d3f 100644 --- a/drivers/staging/rtlwifi/wifi.h +++ b/drivers/staging/rtlwifi/wifi.h @@ -88,6 +88,7 @@ #define RTL_USB_MAX_RX_COUNT 100 #define QBSS_LOAD_SIZE 5 #define MAX_WMMELE_LENGTH 64 +#define ASPM_L1_LATENCY 7 #define TOTAL_CAM_ENTRY 32 diff --git a/drivers/staging/typec/Kconfig b/drivers/staging/typec/Kconfig index 3aa981fbc8f5..e45ed08a5166 100644 --- a/drivers/staging/typec/Kconfig +++ b/drivers/staging/typec/Kconfig @@ -11,6 +11,7 @@ config TYPEC_TCPCI config TYPEC_RT1711H tristate "Richtek RT1711H Type-C chip driver" + depends on I2C select TYPEC_TCPCI help Richtek RT1711H Type-C chip driver that works with diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index 01ac306131c1..10db5656fd5d 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -3727,11 +3727,16 @@ core_scsi3_pri_read_keys(struct se_cmd *cmd) * Check for overflow of 8byte PRI READ_KEYS payload and * next reservation key list descriptor. */ - if ((add_len + 8) > (cmd->data_length - 8)) - break; - - put_unaligned_be64(pr_reg->pr_res_key, &buf[off]); - off += 8; + if (off + 8 <= cmd->data_length) { + put_unaligned_be64(pr_reg->pr_res_key, &buf[off]); + off += 8; + } + /* + * SPC5r17: 6.16.2 READ KEYS service action + * The ADDITIONAL LENGTH field indicates the number of bytes in + * the Reservation key list. The contents of the ADDITIONAL + * LENGTH field are not altered based on the allocation length + */ add_len += 8; } spin_unlock(&dev->t10_pr.registration_lock); diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 7f96dfa32b9c..d8dc3d22051f 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -656,7 +656,7 @@ static void scatter_data_area(struct tcmu_dev *udev, } static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd, - bool bidi) + bool bidi, uint32_t read_len) { struct se_cmd *se_cmd = cmd->se_cmd; int i, dbi; @@ -689,7 +689,7 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd, for_each_sg(data_sg, sg, data_nents, i) { int sg_remaining = sg->length; to = kmap_atomic(sg_page(sg)) + sg->offset; - while (sg_remaining > 0) { + while (sg_remaining > 0 && read_len > 0) { if (block_remaining == 0) { if (from) kunmap_atomic(from); @@ -701,6 +701,8 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd, } copy_bytes = min_t(size_t, sg_remaining, block_remaining); + if (read_len < copy_bytes) + copy_bytes = read_len; offset = DATA_BLOCK_SIZE - block_remaining; tcmu_flush_dcache_range(from, copy_bytes); memcpy(to + sg->length - sg_remaining, from + offset, @@ -708,8 +710,11 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd, sg_remaining -= copy_bytes; block_remaining -= copy_bytes; + read_len -= copy_bytes; } kunmap_atomic(to - sg->offset); + if (read_len == 0) + break; } if (from) kunmap_atomic(from); @@ -1042,6 +1047,8 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry * { struct se_cmd *se_cmd = cmd->se_cmd; struct tcmu_dev *udev = cmd->tcmu_dev; + bool read_len_valid = false; + uint32_t read_len = se_cmd->data_length; /* * cmd has been completed already from timeout, just reclaim @@ -1056,13 +1063,28 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry * pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n", cmd->se_cmd); entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION; - } else if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) { + goto done; + } + + if (se_cmd->data_direction == DMA_FROM_DEVICE && + (entry->hdr.uflags & TCMU_UFLAG_READ_LEN) && entry->rsp.read_len) { + read_len_valid = true; + if (entry->rsp.read_len < read_len) + read_len = entry->rsp.read_len; + } + + if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) { transport_copy_sense_to_cmd(se_cmd, entry->rsp.sense_buffer); - } else if (se_cmd->se_cmd_flags & SCF_BIDI) { + if (!read_len_valid ) + goto done; + else + se_cmd->se_cmd_flags |= SCF_TREAT_READ_AS_NORMAL; + } + if (se_cmd->se_cmd_flags & SCF_BIDI) { /* Get Data-In buffer before clean up */ - gather_data_area(udev, cmd, true); + gather_data_area(udev, cmd, true, read_len); } else if (se_cmd->data_direction == DMA_FROM_DEVICE) { - gather_data_area(udev, cmd, false); + gather_data_area(udev, cmd, false, read_len); } else if (se_cmd->data_direction == DMA_TO_DEVICE) { /* TODO: */ } else if (se_cmd->data_direction != DMA_NONE) { @@ -1070,7 +1092,13 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry * se_cmd->data_direction); } - target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status); +done: + if (read_len_valid) { + pr_debug("read_len = %d\n", read_len); + target_complete_cmd_with_length(cmd->se_cmd, + entry->rsp.scsi_status, read_len); + } else + target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status); out: cmd->se_cmd = NULL; @@ -1740,7 +1768,7 @@ static int tcmu_configure_device(struct se_device *dev) /* Initialise the mailbox of the ring buffer */ mb = udev->mb_addr; mb->version = TCMU_MAILBOX_VERSION; - mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC; + mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC | TCMU_MAILBOX_FLAG_CAP_READ_LEN; mb->cmdr_off = CMDR_OFF; mb->cmdr_size = udev->cmdr_size; diff --git a/drivers/thunderbolt/domain.c b/drivers/thunderbolt/domain.c index 6281266b8ec0..a923ebdeb73c 100644 --- a/drivers/thunderbolt/domain.c +++ b/drivers/thunderbolt/domain.c @@ -213,6 +213,10 @@ static ssize_t boot_acl_store(struct device *dev, struct device_attribute *attr, goto err_free_acl; } ret = tb->cm_ops->set_boot_acl(tb, acl, tb->nboot_acl); + if (!ret) { + /* Notify userspace about the change */ + kobject_uevent(&tb->dev.kobj, KOBJ_CHANGE); + } mutex_unlock(&tb->lock); err_free_acl: diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index cbe98bc2b998..431742201709 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c @@ -124,6 +124,8 @@ struct n_tty_data { struct mutex output_lock; }; +#define MASK(x) ((x) & (N_TTY_BUF_SIZE - 1)) + static inline size_t read_cnt(struct n_tty_data *ldata) { return ldata->read_head - ldata->read_tail; @@ -141,6 +143,7 @@ static inline unsigned char *read_buf_addr(struct n_tty_data *ldata, size_t i) static inline unsigned char echo_buf(struct n_tty_data *ldata, size_t i) { + smp_rmb(); /* Matches smp_wmb() in add_echo_byte(). */ return ldata->echo_buf[i & (N_TTY_BUF_SIZE - 1)]; } @@ -316,9 +319,7 @@ static inline void put_tty_queue(unsigned char c, struct n_tty_data *ldata) static void reset_buffer_flags(struct n_tty_data *ldata) { ldata->read_head = ldata->canon_head = ldata->read_tail = 0; - ldata->echo_head = ldata->echo_tail = ldata->echo_commit = 0; ldata->commit_head = 0; - ldata->echo_mark = 0; ldata->line_start = 0; ldata->erasing = 0; @@ -617,12 +618,19 @@ static size_t __process_echoes(struct tty_struct *tty) old_space = space = tty_write_room(tty); tail = ldata->echo_tail; - while (ldata->echo_commit != tail) { + while (MASK(ldata->echo_commit) != MASK(tail)) { c = echo_buf(ldata, tail); if (c == ECHO_OP_START) { unsigned char op; int no_space_left = 0; + /* + * Since add_echo_byte() is called without holding + * output_lock, we might see only portion of multi-byte + * operation. + */ + if (MASK(ldata->echo_commit) == MASK(tail + 1)) + goto not_yet_stored; /* * If the buffer byte is the start of a multi-byte * operation, get the next byte, which is either the @@ -634,6 +642,8 @@ static size_t __process_echoes(struct tty_struct *tty) unsigned int num_chars, num_bs; case ECHO_OP_ERASE_TAB: + if (MASK(ldata->echo_commit) == MASK(tail + 2)) + goto not_yet_stored; num_chars = echo_buf(ldata, tail + 2); /* @@ -728,7 +738,8 @@ static size_t __process_echoes(struct tty_struct *tty) /* If the echo buffer is nearly full (so that the possibility exists * of echo overrun before the next commit), then discard enough * data at the tail to prevent a subsequent overrun */ - while (ldata->echo_commit - tail >= ECHO_DISCARD_WATERMARK) { + while (ldata->echo_commit > tail && + ldata->echo_commit - tail >= ECHO_DISCARD_WATERMARK) { if (echo_buf(ldata, tail) == ECHO_OP_START) { if (echo_buf(ldata, tail + 1) == ECHO_OP_ERASE_TAB) tail += 3; @@ -738,6 +749,7 @@ static size_t __process_echoes(struct tty_struct *tty) tail++; } + not_yet_stored: ldata->echo_tail = tail; return old_space - space; } @@ -748,6 +760,7 @@ static void commit_echoes(struct tty_struct *tty) size_t nr, old, echoed; size_t head; + mutex_lock(&ldata->output_lock); head = ldata->echo_head; ldata->echo_mark = head; old = ldata->echo_commit - ldata->echo_tail; @@ -756,10 +769,12 @@ static void commit_echoes(struct tty_struct *tty) * is over the threshold (and try again each time another * block is accumulated) */ nr = head - ldata->echo_tail; - if (nr < ECHO_COMMIT_WATERMARK || (nr % ECHO_BLOCK > old % ECHO_BLOCK)) + if (nr < ECHO_COMMIT_WATERMARK || + (nr % ECHO_BLOCK > old % ECHO_BLOCK)) { + mutex_unlock(&ldata->output_lock); return; + } - mutex_lock(&ldata->output_lock); ldata->echo_commit = head; echoed = __process_echoes(tty); mutex_unlock(&ldata->output_lock); @@ -810,7 +825,9 @@ static void flush_echoes(struct tty_struct *tty) static inline void add_echo_byte(unsigned char c, struct n_tty_data *ldata) { - *echo_buf_addr(ldata, ldata->echo_head++) = c; + *echo_buf_addr(ldata, ldata->echo_head) = c; + smp_wmb(); /* Matches smp_rmb() in echo_buf(). */ + ldata->echo_head++; } /** @@ -978,14 +995,15 @@ static void eraser(unsigned char c, struct tty_struct *tty) } seen_alnums = 0; - while (ldata->read_head != ldata->canon_head) { + while (MASK(ldata->read_head) != MASK(ldata->canon_head)) { head = ldata->read_head; /* erase a single possibly multibyte character */ do { head--; c = read_buf(ldata, head); - } while (is_continuation(c, tty) && head != ldata->canon_head); + } while (is_continuation(c, tty) && + MASK(head) != MASK(ldata->canon_head)); /* do not partially erase */ if (is_continuation(c, tty)) @@ -1027,7 +1045,7 @@ static void eraser(unsigned char c, struct tty_struct *tty) * This info is used to go back the correct * number of columns. */ - while (tail != ldata->canon_head) { + while (MASK(tail) != MASK(ldata->canon_head)) { tail--; c = read_buf(ldata, tail); if (c == '\t') { @@ -1302,7 +1320,7 @@ n_tty_receive_char_special(struct tty_struct *tty, unsigned char c) finish_erasing(ldata); echo_char(c, tty); echo_char_raw('\n', ldata); - while (tail != ldata->read_head) { + while (MASK(tail) != MASK(ldata->read_head)) { echo_char(read_buf(ldata, tail), tty); tail++; } @@ -1878,30 +1896,21 @@ static int n_tty_open(struct tty_struct *tty) struct n_tty_data *ldata; /* Currently a malloc failure here can panic */ - ldata = vmalloc(sizeof(*ldata)); + ldata = vzalloc(sizeof(*ldata)); if (!ldata) - goto err; + return -ENOMEM; ldata->overrun_time = jiffies; mutex_init(&ldata->atomic_read_lock); mutex_init(&ldata->output_lock); tty->disc_data = ldata; - reset_buffer_flags(tty->disc_data); - ldata->column = 0; - ldata->canon_column = 0; - ldata->num_overrun = 0; - ldata->no_room = 0; - ldata->lnext = 0; tty->closing = 0; /* indicate buffer work may resume */ clear_bit(TTY_LDISC_HALTED, &tty->flags); n_tty_set_termios(tty, NULL); tty_unthrottle(tty); - return 0; -err: - return -ENOMEM; } static inline int input_available_p(struct tty_struct *tty, int poll) @@ -2411,7 +2420,7 @@ static unsigned long inq_canon(struct n_tty_data *ldata) tail = ldata->read_tail; nr = head - tail; /* Skip EOF-chars.. */ - while (head != tail) { + while (MASK(head) != MASK(tail)) { if (test_bit(tail & (N_TTY_BUF_SIZE - 1), ldata->read_flags) && read_buf(ldata, tail) == __DISABLED_CHAR) nr--; diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c index df93b727e984..9e59f4788589 100644 --- a/drivers/tty/serdev/core.c +++ b/drivers/tty/serdev/core.c @@ -617,6 +617,7 @@ EXPORT_SYMBOL_GPL(__serdev_device_driver_register); static void __exit serdev_exit(void) { bus_unregister(&serdev_bus_type); + ida_destroy(&ctrl_ida); } module_exit(serdev_exit); diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index 3296a05cda2d..f80a300b5d68 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c @@ -3339,9 +3339,7 @@ static const struct pci_device_id blacklist[] = { /* multi-io cards handled by parport_serial */ { PCI_DEVICE(0x4348, 0x7053), }, /* WCH CH353 2S1P */ { PCI_DEVICE(0x4348, 0x5053), }, /* WCH CH353 1S1P */ - { PCI_DEVICE(0x4348, 0x7173), }, /* WCH CH355 4S */ { PCI_DEVICE(0x1c00, 0x3250), }, /* WCH CH382 2S1P */ - { PCI_DEVICE(0x1c00, 0x3470), }, /* WCH CH384 4S */ /* Moxa Smartio MUE boards handled by 8250_moxa */ { PCI_VDEVICE(MOXA, 0x1024), }, diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 1eb1a376a041..15eb6c829d39 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -784,7 +784,7 @@ int vc_allocate(unsigned int currcons) /* return 0 on success */ if (!*vc->vc_uni_pagedir_loc) con_set_default_unimap(vc); - vc->vc_screenbuf = kmalloc(vc->vc_screenbuf_size, GFP_KERNEL); + vc->vc_screenbuf = kzalloc(vc->vc_screenbuf_size, GFP_KERNEL); if (!vc->vc_screenbuf) goto err_free; @@ -871,7 +871,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, if (new_screen_size > (4 << 20)) return -EINVAL; - newscreen = kmalloc(new_screen_size, GFP_USER); + newscreen = kzalloc(new_screen_size, GFP_USER); if (!newscreen) return -ENOMEM; diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c index e8f4ac9400ea..5d421d7e8904 100644 --- a/drivers/uio/uio.c +++ b/drivers/uio/uio.c @@ -215,7 +215,20 @@ static ssize_t name_show(struct device *dev, struct device_attribute *attr, char *buf) { struct uio_device *idev = dev_get_drvdata(dev); - return sprintf(buf, "%s\n", idev->info->name); + int ret; + + mutex_lock(&idev->info_lock); + if (!idev->info) { + ret = -EINVAL; + dev_err(dev, "the device has been unregistered\n"); + goto out; + } + + ret = sprintf(buf, "%s\n", idev->info->name); + +out: + mutex_unlock(&idev->info_lock); + return ret; } static DEVICE_ATTR_RO(name); @@ -223,7 +236,20 @@ static ssize_t version_show(struct device *dev, struct device_attribute *attr, char *buf) { struct uio_device *idev = dev_get_drvdata(dev); - return sprintf(buf, "%s\n", idev->info->version); + int ret; + + mutex_lock(&idev->info_lock); + if (!idev->info) { + ret = -EINVAL; + dev_err(dev, "the device has been unregistered\n"); + goto out; + } + + ret = sprintf(buf, "%s\n", idev->info->version); + +out: + mutex_unlock(&idev->info_lock); + return ret; } static DEVICE_ATTR_RO(version); @@ -415,11 +441,15 @@ EXPORT_SYMBOL_GPL(uio_event_notify); static irqreturn_t uio_interrupt(int irq, void *dev_id) { struct uio_device *idev = (struct uio_device *)dev_id; - irqreturn_t ret = idev->info->handler(irq, idev->info); + irqreturn_t ret; + mutex_lock(&idev->info_lock); + + ret = idev->info->handler(irq, idev->info); if (ret == IRQ_HANDLED) uio_event_notify(idev->info); + mutex_unlock(&idev->info_lock); return ret; } @@ -433,7 +463,6 @@ static int uio_open(struct inode *inode, struct file *filep) struct uio_device *idev; struct uio_listener *listener; int ret = 0; - unsigned long flags; mutex_lock(&minor_lock); idev = idr_find(&uio_idr, iminor(inode)); @@ -460,10 +489,16 @@ static int uio_open(struct inode *inode, struct file *filep) listener->event_count = atomic_read(&idev->event); filep->private_data = listener; - spin_lock_irqsave(&idev->info_lock, flags); + mutex_lock(&idev->info_lock); + if (!idev->info) { + mutex_unlock(&idev->info_lock); + ret = -EINVAL; + goto err_alloc_listener; + } + if (idev->info && idev->info->open) ret = idev->info->open(idev->info, inode); - spin_unlock_irqrestore(&idev->info_lock, flags); + mutex_unlock(&idev->info_lock); if (ret) goto err_infoopen; @@ -495,12 +530,11 @@ static int uio_release(struct inode *inode, struct file *filep) int ret = 0; struct uio_listener *listener = filep->private_data; struct uio_device *idev = listener->dev; - unsigned long flags; - spin_lock_irqsave(&idev->info_lock, flags); + mutex_lock(&idev->info_lock); if (idev->info && idev->info->release) ret = idev->info->release(idev->info, inode); - spin_unlock_irqrestore(&idev->info_lock, flags); + mutex_unlock(&idev->info_lock); module_put(idev->owner); kfree(listener); @@ -513,12 +547,11 @@ static __poll_t uio_poll(struct file *filep, poll_table *wait) struct uio_listener *listener = filep->private_data; struct uio_device *idev = listener->dev; __poll_t ret = 0; - unsigned long flags; - spin_lock_irqsave(&idev->info_lock, flags); + mutex_lock(&idev->info_lock); if (!idev->info || !idev->info->irq) ret = -EIO; - spin_unlock_irqrestore(&idev->info_lock, flags); + mutex_unlock(&idev->info_lock); if (ret) return ret; @@ -537,12 +570,11 @@ static ssize_t uio_read(struct file *filep, char __user *buf, DECLARE_WAITQUEUE(wait, current); ssize_t retval = 0; s32 event_count; - unsigned long flags; - spin_lock_irqsave(&idev->info_lock, flags); + mutex_lock(&idev->info_lock); if (!idev->info || !idev->info->irq) retval = -EIO; - spin_unlock_irqrestore(&idev->info_lock, flags); + mutex_unlock(&idev->info_lock); if (retval) return retval; @@ -592,9 +624,13 @@ static ssize_t uio_write(struct file *filep, const char __user *buf, struct uio_device *idev = listener->dev; ssize_t retval; s32 irq_on; - unsigned long flags; - spin_lock_irqsave(&idev->info_lock, flags); + mutex_lock(&idev->info_lock); + if (!idev->info) { + retval = -EINVAL; + goto out; + } + if (!idev->info || !idev->info->irq) { retval = -EIO; goto out; @@ -618,7 +654,7 @@ static ssize_t uio_write(struct file *filep, const char __user *buf, retval = idev->info->irqcontrol(idev->info, irq_on); out: - spin_unlock_irqrestore(&idev->info_lock, flags); + mutex_unlock(&idev->info_lock); return retval ? retval : sizeof(s32); } @@ -640,10 +676,20 @@ static vm_fault_t uio_vma_fault(struct vm_fault *vmf) struct page *page; unsigned long offset; void *addr; + int ret = 0; + int mi; - int mi = uio_find_mem_index(vmf->vma); - if (mi < 0) - return VM_FAULT_SIGBUS; + mutex_lock(&idev->info_lock); + if (!idev->info) { + ret = VM_FAULT_SIGBUS; + goto out; + } + + mi = uio_find_mem_index(vmf->vma); + if (mi < 0) { + ret = VM_FAULT_SIGBUS; + goto out; + } /* * We need to subtract mi because userspace uses offset = N*PAGE_SIZE @@ -658,7 +704,11 @@ static vm_fault_t uio_vma_fault(struct vm_fault *vmf) page = vmalloc_to_page(addr); get_page(page); vmf->page = page; - return 0; + +out: + mutex_unlock(&idev->info_lock); + + return ret; } static const struct vm_operations_struct uio_logical_vm_ops = { @@ -683,6 +733,7 @@ static int uio_mmap_physical(struct vm_area_struct *vma) struct uio_device *idev = vma->vm_private_data; int mi = uio_find_mem_index(vma); struct uio_mem *mem; + if (mi < 0) return -EINVAL; mem = idev->info->mem + mi; @@ -724,30 +775,46 @@ static int uio_mmap(struct file *filep, struct vm_area_struct *vma) vma->vm_private_data = idev; + mutex_lock(&idev->info_lock); + if (!idev->info) { + ret = -EINVAL; + goto out; + } + mi = uio_find_mem_index(vma); - if (mi < 0) - return -EINVAL; + if (mi < 0) { + ret = -EINVAL; + goto out; + } requested_pages = vma_pages(vma); actual_pages = ((idev->info->mem[mi].addr & ~PAGE_MASK) + idev->info->mem[mi].size + PAGE_SIZE -1) >> PAGE_SHIFT; - if (requested_pages > actual_pages) - return -EINVAL; + if (requested_pages > actual_pages) { + ret = -EINVAL; + goto out; + } if (idev->info->mmap) { ret = idev->info->mmap(idev->info, vma); - return ret; + goto out; } switch (idev->info->mem[mi].memtype) { case UIO_MEM_PHYS: - return uio_mmap_physical(vma); + ret = uio_mmap_physical(vma); + break; case UIO_MEM_LOGICAL: case UIO_MEM_VIRTUAL: - return uio_mmap_logical(vma); + ret = uio_mmap_logical(vma); + break; default: - return -EINVAL; + ret = -EINVAL; } + +out: + mutex_unlock(&idev->info_lock); + return 0; } static const struct file_operations uio_fops = { @@ -865,7 +932,7 @@ int __uio_register_device(struct module *owner, idev->owner = owner; idev->info = info; - spin_lock_init(&idev->info_lock); + mutex_init(&idev->info_lock); init_waitqueue_head(&idev->wait); atomic_set(&idev->event, 0); @@ -902,8 +969,9 @@ int __uio_register_device(struct module *owner, * FDs at the time of unregister and therefore may not be * freed until they are released. */ - ret = request_irq(info->irq, uio_interrupt, - info->irq_flags, info->name, idev); + ret = request_threaded_irq(info->irq, NULL, uio_interrupt, + info->irq_flags, info->name, idev); + if (ret) goto err_request_irq; } @@ -928,7 +996,6 @@ EXPORT_SYMBOL_GPL(__uio_register_device); void uio_unregister_device(struct uio_info *info) { struct uio_device *idev; - unsigned long flags; if (!info || !info->uio_dev) return; @@ -937,14 +1004,14 @@ void uio_unregister_device(struct uio_info *info) uio_free_minor(idev); + mutex_lock(&idev->info_lock); uio_dev_del_attributes(idev); if (info->irq && info->irq != UIO_IRQ_CUSTOM) free_irq(info->irq, idev); - spin_lock_irqsave(&idev->info_lock, flags); idev->info = NULL; - spin_unlock_irqrestore(&idev->info_lock, flags); + mutex_unlock(&idev->info_lock); device_unregister(&idev->dev); diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c index af45aa3222b5..4638d9b066be 100644 --- a/drivers/usb/chipidea/host.c +++ b/drivers/usb/chipidea/host.c @@ -124,8 +124,11 @@ static int host_start(struct ci_hdrc *ci) hcd->power_budget = ci->platdata->power_budget; hcd->tpl_support = ci->platdata->tpl_support; - if (ci->phy || ci->usb_phy) + if (ci->phy || ci->usb_phy) { hcd->skip_phy_initialization = 1; + if (ci->usb_phy) + hcd->usb_phy = ci->usb_phy; + } ehci = hcd_to_ehci(hcd); ehci->caps = ci->hw_bank.cap; diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 7b366a6c0b49..998b32d0167e 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -1758,6 +1758,9 @@ static const struct usb_device_id acm_ids[] = { { USB_DEVICE(0x11ca, 0x0201), /* VeriFone Mx870 Gadget Serial */ .driver_info = SINGLE_RX_URB, }, + { USB_DEVICE(0x1965, 0x0018), /* Uniden UBC125XLT */ + .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ + }, { USB_DEVICE(0x22b8, 0x7000), /* Motorola Q Phone */ .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ }, diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index c55def2f1320..097057d2eacf 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -378,6 +378,10 @@ static const struct usb_device_id usb_quirk_list[] = { /* Corsair K70 RGB */ { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT }, + /* Corsair Strafe */ + { USB_DEVICE(0x1b1c, 0x1b15), .driver_info = USB_QUIRK_DELAY_INIT | + USB_QUIRK_DELAY_CTRL_MSG }, + /* Corsair Strafe RGB */ { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT | USB_QUIRK_DELAY_CTRL_MSG }, diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h index 4a56ac772a3c..71b3b08ad516 100644 --- a/drivers/usb/dwc2/core.h +++ b/drivers/usb/dwc2/core.h @@ -1004,6 +1004,7 @@ struct dwc2_hregs_backup { * @frame_list_sz: Frame list size * @desc_gen_cache: Kmem cache for generic descriptors * @desc_hsisoc_cache: Kmem cache for hs isochronous descriptors + * @unaligned_cache: Kmem cache for DMA mode to handle non-aligned buf * * These are for peripheral mode: * @@ -1177,6 +1178,8 @@ struct dwc2_hsotg { u32 frame_list_sz; struct kmem_cache *desc_gen_cache; struct kmem_cache *desc_hsisoc_cache; + struct kmem_cache *unaligned_cache; +#define DWC2_KMEM_UNALIGNED_BUF_SIZE 1024 #endif /* CONFIG_USB_DWC2_HOST || CONFIG_USB_DWC2_DUAL_ROLE */ diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index f0d9ccf1d665..a0f82cca2d9a 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -812,6 +812,7 @@ static int dwc2_gadget_fill_isoc_desc(struct dwc2_hsotg_ep *hs_ep, u32 index; u32 maxsize = 0; u32 mask = 0; + u8 pid = 0; maxsize = dwc2_gadget_get_desc_params(hs_ep, &mask); @@ -840,7 +841,11 @@ static int dwc2_gadget_fill_isoc_desc(struct dwc2_hsotg_ep *hs_ep, ((len << DEV_DMA_NBYTES_SHIFT) & mask)); if (hs_ep->dir_in) { - desc->status |= ((hs_ep->mc << DEV_DMA_ISOC_PID_SHIFT) & + if (len) + pid = DIV_ROUND_UP(len, hs_ep->ep.maxpacket); + else + pid = 1; + desc->status |= ((pid << DEV_DMA_ISOC_PID_SHIFT) & DEV_DMA_ISOC_PID_MASK) | ((len % hs_ep->ep.maxpacket) ? DEV_DMA_SHORT : 0) | @@ -884,6 +889,7 @@ static void dwc2_gadget_start_isoc_ddma(struct dwc2_hsotg_ep *hs_ep) struct dwc2_dma_desc *desc; if (list_empty(&hs_ep->queue)) { + hs_ep->target_frame = TARGET_FRAME_INITIAL; dev_dbg(hsotg->dev, "%s: No requests in queue\n", __func__); return; } @@ -2755,8 +2761,6 @@ static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep) */ tmp = dwc2_hsotg_read_frameno(hsotg); - dwc2_hsotg_complete_request(hsotg, ep, get_ep_head(ep), 0); - if (using_desc_dma(hsotg)) { if (ep->target_frame == TARGET_FRAME_INITIAL) { /* Start first ISO Out */ @@ -2817,9 +2821,6 @@ static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep) tmp = dwc2_hsotg_read_frameno(hsotg); if (using_desc_dma(hsotg)) { - dwc2_hsotg_complete_request(hsotg, hs_ep, - get_ep_head(hs_ep), 0); - hs_ep->target_frame = tmp; dwc2_gadget_incr_frame_num(hs_ep); dwc2_gadget_start_isoc_ddma(hs_ep); @@ -4739,9 +4740,11 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg) } ret = usb_add_gadget_udc(dev, &hsotg->gadget); - if (ret) + if (ret) { + dwc2_hsotg_ep_free_request(&hsotg->eps_out[0]->ep, + hsotg->ctrl_req); return ret; - + } dwc2_hsotg_dump(hsotg); return 0; @@ -4755,6 +4758,7 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg) int dwc2_hsotg_remove(struct dwc2_hsotg *hsotg) { usb_del_gadget_udc(&hsotg->gadget); + dwc2_hsotg_ep_free_request(&hsotg->eps_out[0]->ep, hsotg->ctrl_req); return 0; } diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c index edaf0b6af4f0..b1104be3429c 100644 --- a/drivers/usb/dwc2/hcd.c +++ b/drivers/usb/dwc2/hcd.c @@ -1567,11 +1567,20 @@ static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg, } if (hsotg->params.host_dma) { - dwc2_writel((u32)chan->xfer_dma, - hsotg->regs + HCDMA(chan->hc_num)); + dma_addr_t dma_addr; + + if (chan->align_buf) { + if (dbg_hc(chan)) + dev_vdbg(hsotg->dev, "align_buf\n"); + dma_addr = chan->align_buf; + } else { + dma_addr = chan->xfer_dma; + } + dwc2_writel((u32)dma_addr, hsotg->regs + HCDMA(chan->hc_num)); + if (dbg_hc(chan)) dev_vdbg(hsotg->dev, "Wrote %08lx to HCDMA(%d)\n", - (unsigned long)chan->xfer_dma, chan->hc_num); + (unsigned long)dma_addr, chan->hc_num); } /* Start the split */ @@ -2625,6 +2634,35 @@ static void dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg, } } +static int dwc2_alloc_split_dma_aligned_buf(struct dwc2_hsotg *hsotg, + struct dwc2_qh *qh, + struct dwc2_host_chan *chan) +{ + if (!hsotg->unaligned_cache || + chan->max_packet > DWC2_KMEM_UNALIGNED_BUF_SIZE) + return -ENOMEM; + + if (!qh->dw_align_buf) { + qh->dw_align_buf = kmem_cache_alloc(hsotg->unaligned_cache, + GFP_ATOMIC | GFP_DMA); + if (!qh->dw_align_buf) + return -ENOMEM; + } + + qh->dw_align_buf_dma = dma_map_single(hsotg->dev, qh->dw_align_buf, + DWC2_KMEM_UNALIGNED_BUF_SIZE, + DMA_FROM_DEVICE); + + if (dma_mapping_error(hsotg->dev, qh->dw_align_buf_dma)) { + dev_err(hsotg->dev, "can't map align_buf\n"); + chan->align_buf = 0; + return -EINVAL; + } + + chan->align_buf = qh->dw_align_buf_dma; + return 0; +} + #define DWC2_USB_DMA_ALIGN 4 struct dma_aligned_buffer { @@ -2802,6 +2840,32 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) /* Set the transfer attributes */ dwc2_hc_init_xfer(hsotg, chan, qtd); + /* For non-dword aligned buffers */ + if (hsotg->params.host_dma && qh->do_split && + chan->ep_is_in && (chan->xfer_dma & 0x3)) { + dev_vdbg(hsotg->dev, "Non-aligned buffer\n"); + if (dwc2_alloc_split_dma_aligned_buf(hsotg, qh, chan)) { + dev_err(hsotg->dev, + "Failed to allocate memory to handle non-aligned buffer\n"); + /* Add channel back to free list */ + chan->align_buf = 0; + chan->multi_count = 0; + list_add_tail(&chan->hc_list_entry, + &hsotg->free_hc_list); + qtd->in_process = 0; + qh->channel = NULL; + return -ENOMEM; + } + } else { + /* + * We assume that DMA is always aligned in non-split + * case or split out case. Warn if not. + */ + WARN_ON_ONCE(hsotg->params.host_dma && + (chan->xfer_dma & 0x3)); + chan->align_buf = 0; + } + if (chan->ep_type == USB_ENDPOINT_XFER_INT || chan->ep_type == USB_ENDPOINT_XFER_ISOC) /* @@ -5246,6 +5310,19 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg) } } + if (hsotg->params.host_dma) { + /* + * Create kmem caches to handle non-aligned buffer + * in Buffer DMA mode. + */ + hsotg->unaligned_cache = kmem_cache_create("dwc2-unaligned-dma", + DWC2_KMEM_UNALIGNED_BUF_SIZE, 4, + SLAB_CACHE_DMA, NULL); + if (!hsotg->unaligned_cache) + dev_err(hsotg->dev, + "unable to create dwc2 unaligned cache\n"); + } + hsotg->otg_port = 1; hsotg->frame_list = NULL; hsotg->frame_list_dma = 0; @@ -5280,8 +5357,9 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg) return 0; error4: - kmem_cache_destroy(hsotg->desc_gen_cache); + kmem_cache_destroy(hsotg->unaligned_cache); kmem_cache_destroy(hsotg->desc_hsisoc_cache); + kmem_cache_destroy(hsotg->desc_gen_cache); error3: dwc2_hcd_release(hsotg); error2: @@ -5322,8 +5400,9 @@ void dwc2_hcd_remove(struct dwc2_hsotg *hsotg) usb_remove_hcd(hcd); hsotg->priv = NULL; - kmem_cache_destroy(hsotg->desc_gen_cache); + kmem_cache_destroy(hsotg->unaligned_cache); kmem_cache_destroy(hsotg->desc_hsisoc_cache); + kmem_cache_destroy(hsotg->desc_gen_cache); dwc2_hcd_release(hsotg); usb_put_hcd(hcd); @@ -5435,7 +5514,7 @@ int dwc2_host_enter_hibernation(struct dwc2_hsotg *hsotg) dwc2_writel(hprt0, hsotg->regs + HPRT0); /* Wait for the HPRT0.PrtSusp register field to be set */ - if (dwc2_hsotg_wait_bit_set(hsotg, HPRT0, HPRT0_SUSP, 300)) + if (dwc2_hsotg_wait_bit_set(hsotg, HPRT0, HPRT0_SUSP, 3000)) dev_warn(hsotg->dev, "Suspend wasn't generated\n"); /* @@ -5616,6 +5695,8 @@ int dwc2_host_exit_hibernation(struct dwc2_hsotg *hsotg, int rem_wakeup, return ret; } + dwc2_hcd_rem_wakeup(hsotg); + hsotg->hibernated = 0; hsotg->bus_suspended = 0; hsotg->lx_state = DWC2_L0; diff --git a/drivers/usb/dwc2/hcd.h b/drivers/usb/dwc2/hcd.h index 7db1ee7e7a77..5502a501f516 100644 --- a/drivers/usb/dwc2/hcd.h +++ b/drivers/usb/dwc2/hcd.h @@ -76,6 +76,8 @@ struct dwc2_qh; * (micro)frame * @xfer_buf: Pointer to current transfer buffer position * @xfer_dma: DMA address of xfer_buf + * @align_buf: In Buffer DMA mode this will be used if xfer_buf is not + * DWORD aligned * @xfer_len: Total number of bytes to transfer * @xfer_count: Number of bytes transferred so far * @start_pkt_count: Packet count at start of transfer @@ -133,6 +135,7 @@ struct dwc2_host_chan { u8 *xfer_buf; dma_addr_t xfer_dma; + dma_addr_t align_buf; u32 xfer_len; u32 xfer_count; u16 start_pkt_count; @@ -302,6 +305,9 @@ struct dwc2_hs_transfer_time { * speed. Note that this is in "schedule slice" which * is tightly packed. * @ntd: Actual number of transfer descriptors in a list + * @dw_align_buf: Used instead of original buffer if its physical address + * is not dword-aligned + * @dw_align_buf_dma: DMA address for dw_align_buf * @qtd_list: List of QTDs for this QH * @channel: Host channel currently processing transfers for this QH * @qh_list_entry: Entry for QH in either the periodic or non-periodic @@ -350,6 +356,8 @@ struct dwc2_qh { struct dwc2_hs_transfer_time hs_transfers[DWC2_HS_SCHEDULE_UFRAMES]; u32 ls_start_schedule_slice; u16 ntd; + u8 *dw_align_buf; + dma_addr_t dw_align_buf_dma; struct list_head qtd_list; struct dwc2_host_chan *channel; struct list_head qh_list_entry; diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c index fbea5e3fb947..ed7f05cf4906 100644 --- a/drivers/usb/dwc2/hcd_intr.c +++ b/drivers/usb/dwc2/hcd_intr.c @@ -942,14 +942,21 @@ static int dwc2_xfercomp_isoc_split_in(struct dwc2_hsotg *hsotg, frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index]; len = dwc2_get_actual_xfer_length(hsotg, chan, chnum, qtd, DWC2_HC_XFER_COMPLETE, NULL); - if (!len) { + if (!len && !qtd->isoc_split_offset) { qtd->complete_split = 0; - qtd->isoc_split_offset = 0; return 0; } frame_desc->actual_length += len; + if (chan->align_buf) { + dev_vdbg(hsotg->dev, "non-aligned buffer\n"); + dma_unmap_single(hsotg->dev, chan->qh->dw_align_buf_dma, + DWC2_KMEM_UNALIGNED_BUF_SIZE, DMA_FROM_DEVICE); + memcpy(qtd->urb->buf + (chan->xfer_dma - qtd->urb->dma), + chan->qh->dw_align_buf, len); + } + qtd->isoc_split_offset += len; hctsiz = dwc2_readl(hsotg->regs + HCTSIZ(chnum)); diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c index d7c3d6c776d8..301ced1618f8 100644 --- a/drivers/usb/dwc2/hcd_queue.c +++ b/drivers/usb/dwc2/hcd_queue.c @@ -383,7 +383,7 @@ static unsigned long *dwc2_get_ls_map(struct dwc2_hsotg *hsotg, /* Get the map and adjust if this is a multi_tt hub */ map = qh->dwc_tt->periodic_bitmaps; if (qh->dwc_tt->usb_tt->multi) - map += DWC2_ELEMENTS_PER_LS_BITMAP * qh->ttport; + map += DWC2_ELEMENTS_PER_LS_BITMAP * (qh->ttport - 1); return map; } @@ -1696,6 +1696,9 @@ void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) if (qh->desc_list) dwc2_hcd_qh_free_ddma(hsotg, qh); + else if (hsotg->unaligned_cache && qh->dw_align_buf) + kmem_cache_free(hsotg->unaligned_cache, qh->dw_align_buf); + kfree(qh); } diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index ea91310113b9..103807587dc6 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -1272,7 +1272,6 @@ static int dwc3_probe(struct platform_device *pdev) if (!dwc->clks) return -ENOMEM; - dwc->num_clks = ARRAY_SIZE(dwc3_core_clks); dwc->dev = dev; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -1307,15 +1306,19 @@ static int dwc3_probe(struct platform_device *pdev) if (IS_ERR(dwc->reset)) return PTR_ERR(dwc->reset); - ret = clk_bulk_get(dev, dwc->num_clks, dwc->clks); - if (ret == -EPROBE_DEFER) - return ret; - /* - * Clocks are optional, but new DT platforms should support all clocks - * as required by the DT-binding. - */ - if (ret) - dwc->num_clks = 0; + if (dev->of_node) { + dwc->num_clks = ARRAY_SIZE(dwc3_core_clks); + + ret = clk_bulk_get(dev, dwc->num_clks, dwc->clks); + if (ret == -EPROBE_DEFER) + return ret; + /* + * Clocks are optional, but new DT platforms should support all + * clocks as required by the DT-binding. + */ + if (ret) + dwc->num_clks = 0; + } ret = reset_control_deassert(dwc->reset); if (ret) diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c index 6b3ccd542bd7..dbeff5e6ad14 100644 --- a/drivers/usb/dwc3/dwc3-of-simple.c +++ b/drivers/usb/dwc3/dwc3-of-simple.c @@ -165,8 +165,9 @@ static int dwc3_of_simple_remove(struct platform_device *pdev) reset_control_put(simple->resets); - pm_runtime_put_sync(dev); pm_runtime_disable(dev); + pm_runtime_put_noidle(dev); + pm_runtime_set_suspended(dev); return 0; } diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index c961a94d136b..f57e7c94b8e5 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c @@ -34,6 +34,7 @@ #define PCI_DEVICE_ID_INTEL_GLK 0x31aa #define PCI_DEVICE_ID_INTEL_CNPLP 0x9dee #define PCI_DEVICE_ID_INTEL_CNPH 0xa36e +#define PCI_DEVICE_ID_INTEL_ICLLP 0x34ee #define PCI_INTEL_BXT_DSM_GUID "732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511" #define PCI_INTEL_BXT_FUNC_PMU_PWR 4 @@ -289,6 +290,7 @@ static const struct pci_device_id dwc3_pci_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_GLK), }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CNPLP), }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CNPH), }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICLLP), }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), }, { } /* Terminating Entry */ }; diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c index b0e67ab2f98c..a6d0203e40b6 100644 --- a/drivers/usb/dwc3/dwc3-qcom.c +++ b/drivers/usb/dwc3/dwc3-qcom.c @@ -490,6 +490,7 @@ static int dwc3_qcom_probe(struct platform_device *pdev) qcom->dwc3 = of_find_device_by_node(dwc3_np); if (!qcom->dwc3) { dev_err(&pdev->dev, "failed to get dwc3 platform device\n"); + ret = -ENODEV; goto depopulate; } @@ -547,8 +548,7 @@ static int dwc3_qcom_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM_SLEEP -static int dwc3_qcom_pm_suspend(struct device *dev) +static int __maybe_unused dwc3_qcom_pm_suspend(struct device *dev) { struct dwc3_qcom *qcom = dev_get_drvdata(dev); int ret = 0; @@ -560,7 +560,7 @@ static int dwc3_qcom_pm_suspend(struct device *dev) return ret; } -static int dwc3_qcom_pm_resume(struct device *dev) +static int __maybe_unused dwc3_qcom_pm_resume(struct device *dev) { struct dwc3_qcom *qcom = dev_get_drvdata(dev); int ret; @@ -571,23 +571,20 @@ static int dwc3_qcom_pm_resume(struct device *dev) return ret; } -#endif -#ifdef CONFIG_PM -static int dwc3_qcom_runtime_suspend(struct device *dev) +static int __maybe_unused dwc3_qcom_runtime_suspend(struct device *dev) { struct dwc3_qcom *qcom = dev_get_drvdata(dev); return dwc3_qcom_suspend(qcom); } -static int dwc3_qcom_runtime_resume(struct device *dev) +static int __maybe_unused dwc3_qcom_runtime_resume(struct device *dev) { struct dwc3_qcom *qcom = dev_get_drvdata(dev); return dwc3_qcom_resume(qcom); } -#endif static const struct dev_pm_ops dwc3_qcom_dev_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(dwc3_qcom_pm_suspend, dwc3_qcom_pm_resume) diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index f242c2bcea81..d2fa071c21b1 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -1719,6 +1719,8 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) */ if (w_value && !f->get_alt) break; + + spin_lock(&cdev->lock); value = f->set_alt(f, w_index, w_value); if (value == USB_GADGET_DELAYED_STATUS) { DBG(cdev, @@ -1728,6 +1730,7 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) DBG(cdev, "delayed_status count %d\n", cdev->delayed_status); } + spin_unlock(&cdev->lock); break; case USB_REQ_GET_INTERFACE: if (ctrl->bRequestType != (USB_DIR_IN|USB_RECIP_INTERFACE)) diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index dce9d12c7981..33e2030503fa 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -215,6 +215,7 @@ struct ffs_io_data { struct mm_struct *mm; struct work_struct work; + struct work_struct cancellation_work; struct usb_ep *ep; struct usb_request *req; @@ -1072,22 +1073,31 @@ ffs_epfile_open(struct inode *inode, struct file *file) return 0; } +static void ffs_aio_cancel_worker(struct work_struct *work) +{ + struct ffs_io_data *io_data = container_of(work, struct ffs_io_data, + cancellation_work); + + ENTER(); + + usb_ep_dequeue(io_data->ep, io_data->req); +} + static int ffs_aio_cancel(struct kiocb *kiocb) { struct ffs_io_data *io_data = kiocb->private; - struct ffs_epfile *epfile = kiocb->ki_filp->private_data; + struct ffs_data *ffs = io_data->ffs; int value; ENTER(); - spin_lock_irq(&epfile->ffs->eps_lock); - - if (likely(io_data && io_data->ep && io_data->req)) - value = usb_ep_dequeue(io_data->ep, io_data->req); - else + if (likely(io_data && io_data->ep && io_data->req)) { + INIT_WORK(&io_data->cancellation_work, ffs_aio_cancel_worker); + queue_work(ffs->io_completion_wq, &io_data->cancellation_work); + value = -EINPROGRESS; + } else { value = -EINVAL; - - spin_unlock_irq(&epfile->ffs->eps_lock); + } return value; } diff --git a/drivers/usb/gadget/udc/aspeed-vhub/Kconfig b/drivers/usb/gadget/udc/aspeed-vhub/Kconfig index f0cdf89b8503..83ba8a2eb6af 100644 --- a/drivers/usb/gadget/udc/aspeed-vhub/Kconfig +++ b/drivers/usb/gadget/udc/aspeed-vhub/Kconfig @@ -2,6 +2,7 @@ config USB_ASPEED_VHUB tristate "Aspeed vHub UDC driver" depends on ARCH_ASPEED || COMPILE_TEST + depends on USB_LIBCOMPOSITE help USB peripheral controller for the Aspeed AST2500 family SoCs supporting the "vHub" functionality and USB2.0 diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c index 1fbfd89d0a0f..387f124a8334 100644 --- a/drivers/usb/host/xhci-dbgcap.c +++ b/drivers/usb/host/xhci-dbgcap.c @@ -508,16 +508,18 @@ static int xhci_do_dbc_start(struct xhci_hcd *xhci) return 0; } -static void xhci_do_dbc_stop(struct xhci_hcd *xhci) +static int xhci_do_dbc_stop(struct xhci_hcd *xhci) { struct xhci_dbc *dbc = xhci->dbc; if (dbc->state == DS_DISABLED) - return; + return -1; writel(0, &dbc->regs->control); xhci_dbc_mem_cleanup(xhci); dbc->state = DS_DISABLED; + + return 0; } static int xhci_dbc_start(struct xhci_hcd *xhci) @@ -544,6 +546,7 @@ static int xhci_dbc_start(struct xhci_hcd *xhci) static void xhci_dbc_stop(struct xhci_hcd *xhci) { + int ret; unsigned long flags; struct xhci_dbc *dbc = xhci->dbc; struct dbc_port *port = &dbc->port; @@ -556,10 +559,11 @@ static void xhci_dbc_stop(struct xhci_hcd *xhci) xhci_dbc_tty_unregister_device(xhci); spin_lock_irqsave(&dbc->lock, flags); - xhci_do_dbc_stop(xhci); + ret = xhci_do_dbc_stop(xhci); spin_unlock_irqrestore(&dbc->lock, flags); - pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller); + if (!ret) + pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller); } static void diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index acbd3d7b8828..ef350c33dc4a 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -595,7 +595,7 @@ struct xhci_ring *xhci_stream_id_to_ring( if (!ep->stream_info) return NULL; - if (stream_id > ep->stream_info->num_streams) + if (stream_id >= ep->stream_info->num_streams) return NULL; return ep->stream_info->stream_rings[stream_id]; } @@ -886,12 +886,12 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) dev = xhci->devs[slot_id]; - trace_xhci_free_virt_device(dev); - xhci->dcbaa->dev_context_ptrs[slot_id] = 0; if (!dev) return; + trace_xhci_free_virt_device(dev); + if (dev->tt_info) old_active_eps = dev->tt_info->active_eps; diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c index a8c1d073cba0..4b463e5202a4 100644 --- a/drivers/usb/host/xhci-tegra.c +++ b/drivers/usb/host/xhci-tegra.c @@ -481,7 +481,7 @@ static void tegra_xusb_mbox_handle(struct tegra_xusb *tegra, unsigned long mask; unsigned int port; bool idle, enable; - int err; + int err = 0; memset(&rsp, 0, sizeof(rsp)); @@ -1223,10 +1223,10 @@ static int tegra_xusb_probe(struct platform_device *pdev) pm_runtime_disable(&pdev->dev); usb_put_hcd(tegra->hcd); disable_xusbc: - if (!&pdev->dev.pm_domain) + if (!pdev->dev.pm_domain) tegra_powergate_power_off(TEGRA_POWERGATE_XUSBC); disable_xusba: - if (!&pdev->dev.pm_domain) + if (!pdev->dev.pm_domain) tegra_powergate_power_off(TEGRA_POWERGATE_XUSBA); put_padctl: tegra_xusb_padctl_put(tegra->padctl); diff --git a/drivers/usb/host/xhci-trace.h b/drivers/usb/host/xhci-trace.h index 410544ffe78f..88b427434bd8 100644 --- a/drivers/usb/host/xhci-trace.h +++ b/drivers/usb/host/xhci-trace.h @@ -171,6 +171,37 @@ DEFINE_EVENT(xhci_log_trb, xhci_dbc_gadget_ep_queue, TP_ARGS(ring, trb) ); +DECLARE_EVENT_CLASS(xhci_log_free_virt_dev, + TP_PROTO(struct xhci_virt_device *vdev), + TP_ARGS(vdev), + TP_STRUCT__entry( + __field(void *, vdev) + __field(unsigned long long, out_ctx) + __field(unsigned long long, in_ctx) + __field(u8, fake_port) + __field(u8, real_port) + __field(u16, current_mel) + + ), + TP_fast_assign( + __entry->vdev = vdev; + __entry->in_ctx = (unsigned long long) vdev->in_ctx->dma; + __entry->out_ctx = (unsigned long long) vdev->out_ctx->dma; + __entry->fake_port = (u8) vdev->fake_port; + __entry->real_port = (u8) vdev->real_port; + __entry->current_mel = (u16) vdev->current_mel; + ), + TP_printk("vdev %p ctx %llx | %llx fake_port %d real_port %d current_mel %d", + __entry->vdev, __entry->in_ctx, __entry->out_ctx, + __entry->fake_port, __entry->real_port, __entry->current_mel + ) +); + +DEFINE_EVENT(xhci_log_free_virt_dev, xhci_free_virt_device, + TP_PROTO(struct xhci_virt_device *vdev), + TP_ARGS(vdev) +); + DECLARE_EVENT_CLASS(xhci_log_virt_dev, TP_PROTO(struct xhci_virt_device *vdev), TP_ARGS(vdev), @@ -208,11 +239,6 @@ DEFINE_EVENT(xhci_log_virt_dev, xhci_alloc_virt_device, TP_ARGS(vdev) ); -DEFINE_EVENT(xhci_log_virt_dev, xhci_free_virt_device, - TP_PROTO(struct xhci_virt_device *vdev), - TP_ARGS(vdev) -); - DEFINE_EVENT(xhci_log_virt_dev, xhci_setup_device, TP_PROTO(struct xhci_virt_device *vdev), TP_ARGS(vdev) diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 8c8da2d657fa..2f4850f25e82 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -908,6 +908,41 @@ static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci) spin_unlock_irqrestore(&xhci->lock, flags); } +static bool xhci_pending_portevent(struct xhci_hcd *xhci) +{ + struct xhci_port **ports; + int port_index; + u32 status; + u32 portsc; + + status = readl(&xhci->op_regs->status); + if (status & STS_EINT) + return true; + /* + * Checking STS_EINT is not enough as there is a lag between a change + * bit being set and the Port Status Change Event that it generated + * being written to the Event Ring. See note in xhci 1.1 section 4.19.2. + */ + + port_index = xhci->usb2_rhub.num_ports; + ports = xhci->usb2_rhub.ports; + while (port_index--) { + portsc = readl(ports[port_index]->addr); + if (portsc & PORT_CHANGE_MASK || + (portsc & PORT_PLS_MASK) == XDEV_RESUME) + return true; + } + port_index = xhci->usb3_rhub.num_ports; + ports = xhci->usb3_rhub.ports; + while (port_index--) { + portsc = readl(ports[port_index]->addr); + if (portsc & PORT_CHANGE_MASK || + (portsc & PORT_PLS_MASK) == XDEV_RESUME) + return true; + } + return false; +} + /* * Stop HC (not bus-specific) * @@ -1009,7 +1044,7 @@ EXPORT_SYMBOL_GPL(xhci_suspend); */ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) { - u32 command, temp = 0, status; + u32 command, temp = 0; struct usb_hcd *hcd = xhci_to_hcd(xhci); struct usb_hcd *secondary_hcd; int retval = 0; @@ -1043,8 +1078,13 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) command = readl(&xhci->op_regs->command); command |= CMD_CRS; writel(command, &xhci->op_regs->command); + /* + * Some controllers take up to 55+ ms to complete the controller + * restore so setting the timeout to 100ms. Xhci specification + * doesn't mention any timeout value. + */ if (xhci_handshake(&xhci->op_regs->status, - STS_RESTORE, 0, 10 * 1000)) { + STS_RESTORE, 0, 100 * 1000)) { xhci_warn(xhci, "WARN: xHC restore state timeout\n"); spin_unlock_irq(&xhci->lock); return -ETIMEDOUT; @@ -1134,8 +1174,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) done: if (retval == 0) { /* Resume root hubs only when have pending events. */ - status = readl(&xhci->op_regs->status); - if (status & STS_EINT) { + if (xhci_pending_portevent(xhci)) { usb_hcd_resume_root_hub(xhci->shared_hcd); usb_hcd_resume_root_hub(hcd); } diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 939e2f86b595..841e89ffe2e9 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -382,6 +382,10 @@ struct xhci_op_regs { #define PORT_PLC (1 << 22) /* port configure error change - port failed to configure its link partner */ #define PORT_CEC (1 << 23) +#define PORT_CHANGE_MASK (PORT_CSC | PORT_PEC | PORT_WRC | PORT_OCC | \ + PORT_RC | PORT_PLC | PORT_CEC) + + /* Cold Attach Status - xHC can set this bit to report device attached during * Sx state. Warm port reset should be perfomed to clear this bit and move port * to connected state. diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c index 8abb6cbbd98a..3be40eaa1ac9 100644 --- a/drivers/usb/misc/yurex.c +++ b/drivers/usb/misc/yurex.c @@ -396,8 +396,7 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) { struct usb_yurex *dev; - int retval = 0; - int bytes_read = 0; + int len = 0; char in_buffer[20]; unsigned long flags; @@ -405,26 +404,16 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count, mutex_lock(&dev->io_mutex); if (!dev->interface) { /* already disconnected */ - retval = -ENODEV; - goto exit; + mutex_unlock(&dev->io_mutex); + return -ENODEV; } spin_lock_irqsave(&dev->lock, flags); - bytes_read = snprintf(in_buffer, 20, "%lld\n", dev->bbu); + len = snprintf(in_buffer, 20, "%lld\n", dev->bbu); spin_unlock_irqrestore(&dev->lock, flags); - - if (*ppos < bytes_read) { - if (copy_to_user(buffer, in_buffer + *ppos, bytes_read - *ppos)) - retval = -EFAULT; - else { - retval = bytes_read - *ppos; - *ppos += bytes_read; - } - } - -exit: mutex_unlock(&dev->io_mutex); - return retval; + + return simple_read_from_buffer(buffer, count, ppos, in_buffer, len); } static ssize_t yurex_write(struct file *file, const char __user *user_buffer, diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c index bdd7a5ad3bf1..3bb1fff02bed 100644 --- a/drivers/usb/serial/ch341.c +++ b/drivers/usb/serial/ch341.c @@ -128,7 +128,7 @@ static int ch341_control_in(struct usb_device *dev, r = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), request, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, value, index, buf, bufsize, DEFAULT_TIMEOUT); - if (r < bufsize) { + if (r < (int)bufsize) { if (r >= 0) { dev_err(&dev->dev, "short control message received (%d < %u)\n", diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index eb6c26cbe579..626a29d9aa58 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -95,6 +95,9 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */ { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */ { USB_DEVICE(0x10C4, 0x815F) }, /* Timewave HamLinkUSB */ + { USB_DEVICE(0x10C4, 0x817C) }, /* CESINEL MEDCAL N Power Quality Monitor */ + { USB_DEVICE(0x10C4, 0x817D) }, /* CESINEL MEDCAL NT Power Quality Monitor */ + { USB_DEVICE(0x10C4, 0x817E) }, /* CESINEL MEDCAL S Power Quality Monitor */ { USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */ { USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */ { USB_DEVICE(0x10C4, 0x81A6) }, /* ThinkOptics WavIt */ @@ -112,6 +115,9 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */ { USB_DEVICE(0x10C4, 0x8281) }, /* Nanotec Plug & Drive */ { USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */ + { USB_DEVICE(0x10C4, 0x82EF) }, /* CESINEL FALCO 6105 AC Power Supply */ + { USB_DEVICE(0x10C4, 0x82F1) }, /* CESINEL MEDCAL EFD Earth Fault Detector */ + { USB_DEVICE(0x10C4, 0x82F2) }, /* CESINEL MEDCAL ST Network Analyzer */ { USB_DEVICE(0x10C4, 0x82F4) }, /* Starizona MicroTouch */ { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */ { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */ @@ -124,7 +130,9 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x8470) }, /* Juniper Networks BX Series System Console */ { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */ { USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */ + { USB_DEVICE(0x10C4, 0x851E) }, /* CESINEL MEDCAL PT Network Analyzer */ { USB_DEVICE(0x10C4, 0x85A7) }, /* LifeScan OneTouch Verio IQ */ + { USB_DEVICE(0x10C4, 0x85B8) }, /* CESINEL ReCon T Energy Logger */ { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */ { USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */ { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */ @@ -134,17 +142,24 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x8857) }, /* CEL EM357 ZigBee USB Stick */ { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */ { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */ + { USB_DEVICE(0x10C4, 0x88FB) }, /* CESINEL MEDCAL STII Network Analyzer */ + { USB_DEVICE(0x10C4, 0x8938) }, /* CESINEL MEDCAL S II Network Analyzer */ { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */ { USB_DEVICE(0x10C4, 0x8962) }, /* Brim Brothers charging dock */ { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */ { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */ + { USB_DEVICE(0x10C4, 0x89A4) }, /* CESINEL FTBC Flexible Thyristor Bridge Controller */ + { USB_DEVICE(0x10C4, 0x89FB) }, /* Qivicon ZigBee USB Radio Stick */ { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */ { USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */ { USB_DEVICE(0x10C4, 0x8B34) }, /* Qivicon ZigBee USB Radio Stick */ { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ + { USB_DEVICE(0x10C4, 0xEA63) }, /* Silicon Labs Windows Update (CP2101-4/CP2102N) */ { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */ { USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */ + { USB_DEVICE(0x10C4, 0xEA7A) }, /* Silicon Labs Windows Update (CP2105) */ + { USB_DEVICE(0x10C4, 0xEA7B) }, /* Silicon Labs Windows Update (CP2108) */ { USB_DEVICE(0x10C4, 0xF001) }, /* Elan Digital Systems USBscope50 */ { USB_DEVICE(0x10C4, 0xF002) }, /* Elan Digital Systems USBwave12 */ { USB_DEVICE(0x10C4, 0xF003) }, /* Elan Digital Systems USBpulse100 */ diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c index 5169624d8b11..38d43c4b7ce5 100644 --- a/drivers/usb/serial/keyspan_pda.c +++ b/drivers/usb/serial/keyspan_pda.c @@ -369,8 +369,10 @@ static int keyspan_pda_get_modem_info(struct usb_serial *serial, 3, /* get pins */ USB_TYPE_VENDOR|USB_RECIP_INTERFACE|USB_DIR_IN, 0, 0, data, 1, 2000); - if (rc >= 0) + if (rc == 1) *value = *data; + else if (rc >= 0) + rc = -EIO; kfree(data); return rc; diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index fdceb46d9fc6..b580b4c7fa48 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c @@ -468,6 +468,9 @@ static void mos7840_control_callback(struct urb *urb) } dev_dbg(dev, "%s urb buffer size is %d\n", __func__, urb->actual_length); + if (urb->actual_length < 1) + goto out; + dev_dbg(dev, "%s mos7840_port->MsrLsr is %d port %d\n", __func__, mos7840_port->MsrLsr, mos7840_port->port_num); data = urb->transfer_buffer; diff --git a/drivers/usb/typec/tcpm.c b/drivers/usb/typec/tcpm.c index 8a201dd53d36..150f43668bec 100644 --- a/drivers/usb/typec/tcpm.c +++ b/drivers/usb/typec/tcpm.c @@ -418,17 +418,18 @@ static void _tcpm_log(struct tcpm_port *port, const char *fmt, va_list args) u64 ts_nsec = local_clock(); unsigned long rem_nsec; + mutex_lock(&port->logbuffer_lock); if (!port->logbuffer[port->logbuffer_head]) { port->logbuffer[port->logbuffer_head] = kzalloc(LOG_BUFFER_ENTRY_SIZE, GFP_KERNEL); - if (!port->logbuffer[port->logbuffer_head]) + if (!port->logbuffer[port->logbuffer_head]) { + mutex_unlock(&port->logbuffer_lock); return; + } } vsnprintf(tmpbuffer, sizeof(tmpbuffer), fmt, args); - mutex_lock(&port->logbuffer_lock); - if (tcpm_log_full(port)) { port->logbuffer_head = max(port->logbuffer_head - 1, 0); strcpy(tmpbuffer, "overflow"); @@ -724,6 +725,9 @@ static int tcpm_set_current_limit(struct tcpm_port *port, u32 max_ma, u32 mv) tcpm_log(port, "Setting voltage/current limit %u mV %u mA", mv, max_ma); + port->supply_voltage = mv; + port->current_limit = max_ma; + if (port->tcpc->set_current_limit) ret = port->tcpc->set_current_limit(port->tcpc, max_ma, mv); @@ -2594,8 +2598,6 @@ static void tcpm_reset_port(struct tcpm_port *port) tcpm_set_attached_state(port, false); port->try_src_count = 0; port->try_snk_count = 0; - port->supply_voltage = 0; - port->current_limit = 0; port->usb_type = POWER_SUPPLY_USB_TYPE_C; power_supply_changed(port->psy); @@ -3043,7 +3045,8 @@ static void run_state_machine(struct tcpm_port *port) tcpm_port_is_sink(port) && time_is_after_jiffies(port->delayed_runtime)) { tcpm_set_state(port, SNK_DISCOVERY, - port->delayed_runtime - jiffies); + jiffies_to_msecs(port->delayed_runtime - + jiffies)); break; } tcpm_set_state(port, unattached_state(port), 0); diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c index bd5cca5632b3..8d0a6fe748bd 100644 --- a/drivers/usb/typec/ucsi/ucsi.c +++ b/drivers/usb/typec/ucsi/ucsi.c @@ -350,6 +350,19 @@ static void ucsi_connector_change(struct work_struct *work) } if (con->status.change & UCSI_CONSTAT_CONNECT_CHANGE) { + typec_set_pwr_role(con->port, con->status.pwr_dir); + + switch (con->status.partner_type) { + case UCSI_CONSTAT_PARTNER_TYPE_UFP: + typec_set_data_role(con->port, TYPEC_HOST); + break; + case UCSI_CONSTAT_PARTNER_TYPE_DFP: + typec_set_data_role(con->port, TYPEC_DEVICE); + break; + default: + break; + } + if (con->status.connected) ucsi_register_partner(con); else diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c index 44eb4e1ea817..a18112a83fae 100644 --- a/drivers/usb/typec/ucsi/ucsi_acpi.c +++ b/drivers/usb/typec/ucsi/ucsi_acpi.c @@ -79,6 +79,11 @@ static int ucsi_acpi_probe(struct platform_device *pdev) return -ENODEV; } + /* This will make sure we can use ioremap_nocache() */ + status = acpi_release_memory(ACPI_HANDLE(&pdev->dev), res, 1); + if (ACPI_FAILURE(status)) + return -ENOMEM; + /* * NOTE: The memory region for the data structures is used also in an * operation region, which means ACPI has already reserved it. Therefore diff --git a/drivers/vfio/pci/Kconfig b/drivers/vfio/pci/Kconfig index 24ee2605b9f0..42dc1d3d71cf 100644 --- a/drivers/vfio/pci/Kconfig +++ b/drivers/vfio/pci/Kconfig @@ -28,5 +28,13 @@ config VFIO_PCI_INTX def_bool y if !S390 config VFIO_PCI_IGD - depends on VFIO_PCI - def_bool y if X86 + bool "VFIO PCI extensions for Intel graphics (GVT-d)" + depends on VFIO_PCI && X86 + default y + help + Support for Intel IGD specific extensions to enable direct + assignment to virtual machines. This includes exposing an IGD + specific firmware table and read-only copies of the host bridge + and LPC bridge config space. + + To enable Intel IGD assignment through vfio-pci, say Y. diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 2c75b33db4ac..3e5b17710a4f 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -343,18 +343,16 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, struct page *page[1]; struct vm_area_struct *vma; struct vm_area_struct *vmas[1]; + unsigned int flags = 0; int ret; + if (prot & IOMMU_WRITE) + flags |= FOLL_WRITE; + + down_read(&mm->mmap_sem); if (mm == current->mm) { - ret = get_user_pages_longterm(vaddr, 1, !!(prot & IOMMU_WRITE), - page, vmas); + ret = get_user_pages_longterm(vaddr, 1, flags, page, vmas); } else { - unsigned int flags = 0; - - if (prot & IOMMU_WRITE) - flags |= FOLL_WRITE; - - down_read(&mm->mmap_sem); ret = get_user_pages_remote(NULL, mm, vaddr, 1, flags, page, vmas, NULL); /* @@ -368,8 +366,8 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, ret = -EOPNOTSUPP; put_page(page[0]); } - up_read(&mm->mmap_sem); } + up_read(&mm->mmap_sem); if (ret == 1) { *pfn = page_to_pfn(page[0]); diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 686dc670fd29..29756d88799b 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -1226,7 +1226,8 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) if (ubufs) vhost_net_ubuf_put_wait_and_free(ubufs); err_ubufs: - sockfd_put(sock); + if (sock) + sockfd_put(sock); err_vq: mutex_unlock(&vq->mutex); err: diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile index 451e833f5931..48b154276179 100644 --- a/drivers/xen/Makefile +++ b/drivers/xen/Makefile @@ -41,4 +41,4 @@ obj-$(CONFIG_XEN_PVCALLS_FRONTEND) += pvcalls-front.o xen-evtchn-y := evtchn.o xen-gntdev-y := gntdev.o xen-gntalloc-y := gntalloc.o -xen-privcmd-y := privcmd.o +xen-privcmd-y := privcmd.o privcmd-buf.o diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index 762378f1811c..08e4af04d6f2 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c @@ -628,8 +628,6 @@ static void __unbind_from_irq(unsigned int irq) xen_irq_info_cleanup(info); } - BUG_ON(info_for_irq(irq)->type == IRQT_UNBOUND); - xen_free_irq(irq); } diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index 2473b0a9e6e4..ba9f3eec2bd0 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c @@ -799,7 +799,7 @@ int gnttab_alloc_pages(int nr_pages, struct page **pages) return 0; } -EXPORT_SYMBOL(gnttab_alloc_pages); +EXPORT_SYMBOL_GPL(gnttab_alloc_pages); /** * gnttab_free_pages - free pages allocated by gnttab_alloc_pages() @@ -820,7 +820,7 @@ void gnttab_free_pages(int nr_pages, struct page **pages) } free_xenballooned_pages(nr_pages, pages); } -EXPORT_SYMBOL(gnttab_free_pages); +EXPORT_SYMBOL_GPL(gnttab_free_pages); /* Handling of paged out grant targets (GNTST_eagain) */ #define MAX_DELAY 256 diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c index 8835065029d3..c93d8ef8df34 100644 --- a/drivers/xen/manage.c +++ b/drivers/xen/manage.c @@ -289,8 +289,15 @@ static void sysrq_handler(struct xenbus_watch *watch, const char *path, return; } - if (sysrq_key != '\0') - xenbus_printf(xbt, "control", "sysrq", "%c", '\0'); + if (sysrq_key != '\0') { + err = xenbus_printf(xbt, "control", "sysrq", "%c", '\0'); + if (err) { + pr_err("%s: Error %d writing sysrq in control/sysrq\n", + __func__, err); + xenbus_transaction_end(xbt, 1); + return; + } + } err = xenbus_transaction_end(xbt, 0); if (err == -EAGAIN) @@ -342,7 +349,12 @@ static int setup_shutdown_watcher(void) continue; snprintf(node, FEATURE_PATH_SIZE, "feature-%s", shutdown_handlers[idx].command); - xenbus_printf(XBT_NIL, "control", node, "%u", 1); + err = xenbus_printf(XBT_NIL, "control", node, "%u", 1); + if (err) { + pr_err("%s: Error %d writing %s\n", __func__, + err, node); + return err; + } } return 0; diff --git a/drivers/xen/privcmd-buf.c b/drivers/xen/privcmd-buf.c new file mode 100644 index 000000000000..df1ed37c3269 --- /dev/null +++ b/drivers/xen/privcmd-buf.c @@ -0,0 +1,210 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT + +/****************************************************************************** + * privcmd-buf.c + * + * Mmap of hypercall buffers. + * + * Copyright (c) 2018 Juergen Gross + */ + +#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include + +#include "privcmd.h" + +MODULE_LICENSE("GPL"); + +static unsigned int limit = 64; +module_param(limit, uint, 0644); +MODULE_PARM_DESC(limit, "Maximum number of pages that may be allocated by " + "the privcmd-buf device per open file"); + +struct privcmd_buf_private { + struct mutex lock; + struct list_head list; + unsigned int allocated; +}; + +struct privcmd_buf_vma_private { + struct privcmd_buf_private *file_priv; + struct list_head list; + unsigned int users; + unsigned int n_pages; + struct page *pages[]; +}; + +static int privcmd_buf_open(struct inode *ino, struct file *file) +{ + struct privcmd_buf_private *file_priv; + + file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL); + if (!file_priv) + return -ENOMEM; + + mutex_init(&file_priv->lock); + INIT_LIST_HEAD(&file_priv->list); + + file->private_data = file_priv; + + return 0; +} + +static void privcmd_buf_vmapriv_free(struct privcmd_buf_vma_private *vma_priv) +{ + unsigned int i; + + vma_priv->file_priv->allocated -= vma_priv->n_pages; + + list_del(&vma_priv->list); + + for (i = 0; i < vma_priv->n_pages; i++) + if (vma_priv->pages[i]) + __free_page(vma_priv->pages[i]); + + kfree(vma_priv); +} + +static int privcmd_buf_release(struct inode *ino, struct file *file) +{ + struct privcmd_buf_private *file_priv = file->private_data; + struct privcmd_buf_vma_private *vma_priv; + + mutex_lock(&file_priv->lock); + + while (!list_empty(&file_priv->list)) { + vma_priv = list_first_entry(&file_priv->list, + struct privcmd_buf_vma_private, + list); + privcmd_buf_vmapriv_free(vma_priv); + } + + mutex_unlock(&file_priv->lock); + + kfree(file_priv); + + return 0; +} + +static void privcmd_buf_vma_open(struct vm_area_struct *vma) +{ + struct privcmd_buf_vma_private *vma_priv = vma->vm_private_data; + + if (!vma_priv) + return; + + mutex_lock(&vma_priv->file_priv->lock); + vma_priv->users++; + mutex_unlock(&vma_priv->file_priv->lock); +} + +static void privcmd_buf_vma_close(struct vm_area_struct *vma) +{ + struct privcmd_buf_vma_private *vma_priv = vma->vm_private_data; + struct privcmd_buf_private *file_priv; + + if (!vma_priv) + return; + + file_priv = vma_priv->file_priv; + + mutex_lock(&file_priv->lock); + + vma_priv->users--; + if (!vma_priv->users) + privcmd_buf_vmapriv_free(vma_priv); + + mutex_unlock(&file_priv->lock); +} + +static vm_fault_t privcmd_buf_vma_fault(struct vm_fault *vmf) +{ + pr_debug("fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n", + vmf->vma, vmf->vma->vm_start, vmf->vma->vm_end, + vmf->pgoff, (void *)vmf->address); + + return VM_FAULT_SIGBUS; +} + +static const struct vm_operations_struct privcmd_buf_vm_ops = { + .open = privcmd_buf_vma_open, + .close = privcmd_buf_vma_close, + .fault = privcmd_buf_vma_fault, +}; + +static int privcmd_buf_mmap(struct file *file, struct vm_area_struct *vma) +{ + struct privcmd_buf_private *file_priv = file->private_data; + struct privcmd_buf_vma_private *vma_priv; + unsigned long count = vma_pages(vma); + unsigned int i; + int ret = 0; + + if (!(vma->vm_flags & VM_SHARED) || count > limit || + file_priv->allocated + count > limit) + return -EINVAL; + + vma_priv = kzalloc(sizeof(*vma_priv) + count * sizeof(void *), + GFP_KERNEL); + if (!vma_priv) + return -ENOMEM; + + vma_priv->n_pages = count; + count = 0; + for (i = 0; i < vma_priv->n_pages; i++) { + vma_priv->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (!vma_priv->pages[i]) + break; + count++; + } + + mutex_lock(&file_priv->lock); + + file_priv->allocated += count; + + vma_priv->file_priv = file_priv; + vma_priv->users = 1; + + vma->vm_flags |= VM_IO | VM_DONTEXPAND; + vma->vm_ops = &privcmd_buf_vm_ops; + vma->vm_private_data = vma_priv; + + list_add(&vma_priv->list, &file_priv->list); + + if (vma_priv->n_pages != count) + ret = -ENOMEM; + else + for (i = 0; i < vma_priv->n_pages; i++) { + ret = vm_insert_page(vma, vma->vm_start + i * PAGE_SIZE, + vma_priv->pages[i]); + if (ret) + break; + } + + if (ret) + privcmd_buf_vmapriv_free(vma_priv); + + mutex_unlock(&file_priv->lock); + + return ret; +} + +const struct file_operations xen_privcmdbuf_fops = { + .owner = THIS_MODULE, + .open = privcmd_buf_open, + .release = privcmd_buf_release, + .mmap = privcmd_buf_mmap, +}; +EXPORT_SYMBOL_GPL(xen_privcmdbuf_fops); + +struct miscdevice xen_privcmdbuf_dev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "xen/hypercall", + .fops = &xen_privcmdbuf_fops, +}; diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c index 8ae0349d9f0a..7e6e682104dc 100644 --- a/drivers/xen/privcmd.c +++ b/drivers/xen/privcmd.c @@ -1007,12 +1007,21 @@ static int __init privcmd_init(void) pr_err("Could not register Xen privcmd device\n"); return err; } + + err = misc_register(&xen_privcmdbuf_dev); + if (err != 0) { + pr_err("Could not register Xen hypercall-buf device\n"); + misc_deregister(&privcmd_dev); + return err; + } + return 0; } static void __exit privcmd_exit(void) { misc_deregister(&privcmd_dev); + misc_deregister(&xen_privcmdbuf_dev); } module_init(privcmd_init); diff --git a/drivers/xen/privcmd.h b/drivers/xen/privcmd.h index 14facaeed36f..0dd9f8f67ee3 100644 --- a/drivers/xen/privcmd.h +++ b/drivers/xen/privcmd.h @@ -1,3 +1,6 @@ #include extern const struct file_operations xen_privcmd_fops; +extern const struct file_operations xen_privcmdbuf_fops; + +extern struct miscdevice xen_privcmdbuf_dev; diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c index 7bc88fd43cfc..e2f3e8b0fba9 100644 --- a/drivers/xen/xen-scsiback.c +++ b/drivers/xen/xen-scsiback.c @@ -1012,6 +1012,7 @@ static void scsiback_do_add_lun(struct vscsibk_info *info, const char *state, { struct v2p_entry *entry; unsigned long flags; + int err; if (try) { spin_lock_irqsave(&info->v2p_lock, flags); @@ -1027,8 +1028,11 @@ static void scsiback_do_add_lun(struct vscsibk_info *info, const char *state, scsiback_del_translation_entry(info, vir); } } else if (!try) { - xenbus_printf(XBT_NIL, info->dev->nodename, state, + err = xenbus_printf(XBT_NIL, info->dev->nodename, state, "%d", XenbusStateClosed); + if (err) + xenbus_dev_error(info->dev, err, + "%s: writing %s", __func__, state); } } @@ -1067,8 +1071,11 @@ static void scsiback_do_1lun_hotplug(struct vscsibk_info *info, int op, snprintf(str, sizeof(str), "vscsi-devs/%s/p-dev", ent); val = xenbus_read(XBT_NIL, dev->nodename, str, NULL); if (IS_ERR(val)) { - xenbus_printf(XBT_NIL, dev->nodename, state, + err = xenbus_printf(XBT_NIL, dev->nodename, state, "%d", XenbusStateClosed); + if (err) + xenbus_dev_error(info->dev, err, + "%s: writing %s", __func__, state); return; } strlcpy(phy, val, VSCSI_NAMELEN); @@ -1079,8 +1086,11 @@ static void scsiback_do_1lun_hotplug(struct vscsibk_info *info, int op, err = xenbus_scanf(XBT_NIL, dev->nodename, str, "%u:%u:%u:%u", &vir.hst, &vir.chn, &vir.tgt, &vir.lun); if (XENBUS_EXIST_ERR(err)) { - xenbus_printf(XBT_NIL, dev->nodename, state, + err = xenbus_printf(XBT_NIL, dev->nodename, state, "%d", XenbusStateClosed); + if (err) + xenbus_dev_error(info->dev, err, + "%s: writing %s", __func__, state); return; } diff --git a/fs/aio.c b/fs/aio.c index e1d20124ec0e..210df9da1283 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -5,7 +5,6 @@ * Implements an efficient asynchronous io interface. * * Copyright 2000, 2001, 2002 Red Hat, Inc. All Rights Reserved. - * Copyright 2018 Christoph Hellwig. * * See ../COPYING for licensing terms. */ @@ -165,22 +164,10 @@ struct fsync_iocb { bool datasync; }; -struct poll_iocb { - struct file *file; - __poll_t events; - struct wait_queue_head *head; - - union { - struct wait_queue_entry wait; - struct work_struct work; - }; -}; - struct aio_kiocb { union { struct kiocb rw; struct fsync_iocb fsync; - struct poll_iocb poll; }; struct kioctx *ki_ctx; @@ -1590,6 +1577,7 @@ static int aio_fsync(struct fsync_iocb *req, struct iocb *iocb, bool datasync) if (unlikely(iocb->aio_buf || iocb->aio_offset || iocb->aio_nbytes || iocb->aio_rw_flags)) return -EINVAL; + req->file = fget(iocb->aio_fildes); if (unlikely(!req->file)) return -EBADF; @@ -1604,137 +1592,6 @@ static int aio_fsync(struct fsync_iocb *req, struct iocb *iocb, bool datasync) return 0; } -/* need to use list_del_init so we can check if item was present */ -static inline bool __aio_poll_remove(struct poll_iocb *req) -{ - if (list_empty(&req->wait.entry)) - return false; - list_del_init(&req->wait.entry); - return true; -} - -static inline void __aio_poll_complete(struct aio_kiocb *iocb, __poll_t mask) -{ - fput(iocb->poll.file); - aio_complete(iocb, mangle_poll(mask), 0); -} - -static void aio_poll_work(struct work_struct *work) -{ - struct aio_kiocb *iocb = container_of(work, struct aio_kiocb, poll.work); - - if (!list_empty_careful(&iocb->ki_list)) - aio_remove_iocb(iocb); - __aio_poll_complete(iocb, iocb->poll.events); -} - -static int aio_poll_cancel(struct kiocb *iocb) -{ - struct aio_kiocb *aiocb = container_of(iocb, struct aio_kiocb, rw); - struct poll_iocb *req = &aiocb->poll; - struct wait_queue_head *head = req->head; - bool found = false; - - spin_lock(&head->lock); - found = __aio_poll_remove(req); - spin_unlock(&head->lock); - - if (found) { - req->events = 0; - INIT_WORK(&req->work, aio_poll_work); - schedule_work(&req->work); - } - return 0; -} - -static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, - void *key) -{ - struct poll_iocb *req = container_of(wait, struct poll_iocb, wait); - struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll); - struct file *file = req->file; - __poll_t mask = key_to_poll(key); - - assert_spin_locked(&req->head->lock); - - /* for instances that support it check for an event match first: */ - if (mask && !(mask & req->events)) - return 0; - - mask = file->f_op->poll_mask(file, req->events) & req->events; - if (!mask) - return 0; - - __aio_poll_remove(req); - - /* - * Try completing without a context switch if we can acquire ctx_lock - * without spinning. Otherwise we need to defer to a workqueue to - * avoid a deadlock due to the lock order. - */ - if (spin_trylock(&iocb->ki_ctx->ctx_lock)) { - list_del_init(&iocb->ki_list); - spin_unlock(&iocb->ki_ctx->ctx_lock); - - __aio_poll_complete(iocb, mask); - } else { - req->events = mask; - INIT_WORK(&req->work, aio_poll_work); - schedule_work(&req->work); - } - - return 1; -} - -static ssize_t aio_poll(struct aio_kiocb *aiocb, struct iocb *iocb) -{ - struct kioctx *ctx = aiocb->ki_ctx; - struct poll_iocb *req = &aiocb->poll; - __poll_t mask; - - /* reject any unknown events outside the normal event mask. */ - if ((u16)iocb->aio_buf != iocb->aio_buf) - return -EINVAL; - /* reject fields that are not defined for poll */ - if (iocb->aio_offset || iocb->aio_nbytes || iocb->aio_rw_flags) - return -EINVAL; - - req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP; - req->file = fget(iocb->aio_fildes); - if (unlikely(!req->file)) - return -EBADF; - if (!file_has_poll_mask(req->file)) - goto out_fail; - - req->head = req->file->f_op->get_poll_head(req->file, req->events); - if (!req->head) - goto out_fail; - if (IS_ERR(req->head)) { - mask = EPOLLERR; - goto done; - } - - init_waitqueue_func_entry(&req->wait, aio_poll_wake); - aiocb->ki_cancel = aio_poll_cancel; - - spin_lock_irq(&ctx->ctx_lock); - spin_lock(&req->head->lock); - mask = req->file->f_op->poll_mask(req->file, req->events) & req->events; - if (!mask) { - __add_wait_queue(req->head, &req->wait); - list_add_tail(&aiocb->ki_list, &ctx->active_reqs); - } - spin_unlock(&req->head->lock); - spin_unlock_irq(&ctx->ctx_lock); -done: - if (mask) - __aio_poll_complete(aiocb, mask); - return 0; -out_fail: - fput(req->file); - return -EINVAL; /* same as no support for IOCB_CMD_POLL */ -} - static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, bool compat) { @@ -1808,9 +1665,6 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, case IOCB_CMD_FDSYNC: ret = aio_fsync(&req->fsync, &iocb, true); break; - case IOCB_CMD_POLL: - ret = aio_poll(req, &iocb); - break; default: pr_debug("invalid aio operation %d\n", iocb.aio_lio_opcode); ret = -EINVAL; diff --git a/fs/autofs/Makefile b/fs/autofs/Makefile index 43fedde15c26..1f85d35ec8b7 100644 --- a/fs/autofs/Makefile +++ b/fs/autofs/Makefile @@ -2,6 +2,6 @@ # Makefile for the linux autofs-filesystem routines. # -obj-$(CONFIG_AUTOFS_FS) += autofs.o +obj-$(CONFIG_AUTOFS_FS) += autofs4.o -autofs-objs := init.o inode.o root.o symlink.o waitq.o expire.o dev-ioctl.o +autofs4-objs := init.o inode.o root.o symlink.o waitq.o expire.o dev-ioctl.o diff --git a/fs/autofs/dev-ioctl.c b/fs/autofs/dev-ioctl.c index ea4ca1445ab7..86eafda4a652 100644 --- a/fs/autofs/dev-ioctl.c +++ b/fs/autofs/dev-ioctl.c @@ -135,6 +135,15 @@ static int validate_dev_ioctl(int cmd, struct autofs_dev_ioctl *param) cmd); goto out; } + } else { + unsigned int inr = _IOC_NR(cmd); + + if (inr == AUTOFS_DEV_IOCTL_OPENMOUNT_CMD || + inr == AUTOFS_DEV_IOCTL_REQUESTER_CMD || + inr == AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD) { + err = -EINVAL; + goto out; + } } err = 0; @@ -271,7 +280,8 @@ static int autofs_dev_ioctl_openmount(struct file *fp, dev_t devid; int err, fd; - /* param->path has already been checked */ + /* param->path has been checked in validate_dev_ioctl() */ + if (!param->openmount.devid) return -EINVAL; @@ -433,10 +443,7 @@ static int autofs_dev_ioctl_requester(struct file *fp, dev_t devid; int err = -ENOENT; - if (param->size <= AUTOFS_DEV_IOCTL_SIZE) { - err = -EINVAL; - goto out; - } + /* param->path has been checked in validate_dev_ioctl() */ devid = sbi->sb->s_dev; @@ -521,10 +528,7 @@ static int autofs_dev_ioctl_ismountpoint(struct file *fp, unsigned int devid, magic; int err = -ENOENT; - if (param->size <= AUTOFS_DEV_IOCTL_SIZE) { - err = -EINVAL; - goto out; - } + /* param->path has been checked in validate_dev_ioctl() */ name = param->path; type = param->ismountpoint.in.type; diff --git a/fs/autofs/init.c b/fs/autofs/init.c index cc9447e1903f..79ae07d9592f 100644 --- a/fs/autofs/init.c +++ b/fs/autofs/init.c @@ -23,7 +23,7 @@ static struct file_system_type autofs_fs_type = { .kill_sb = autofs_kill_sb, }; MODULE_ALIAS_FS("autofs"); -MODULE_ALIAS("autofs4"); +MODULE_ALIAS("autofs"); static int __init init_autofs_fs(void) { diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 0ac456b52bdd..816cc921cf36 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -1259,9 +1259,8 @@ static int load_elf_library(struct file *file) goto out_free_ph; } - len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr + - ELF_MIN_ALIGN - 1); - bss = eppnt->p_memsz + eppnt->p_vaddr; + len = ELF_PAGEALIGN(eppnt->p_filesz + eppnt->p_vaddr); + bss = ELF_PAGEALIGN(eppnt->p_memsz + eppnt->p_vaddr); if (bss > len) { error = vm_brk(len, bss - len); if (error) diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index cce6087d6880..e55843f536bc 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -4542,8 +4542,11 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, offset_in_extent = em_start - em->start; em_end = extent_map_end(em); em_len = em_end - em_start; - disko = em->block_start + offset_in_extent; flags = 0; + if (em->block_start < EXTENT_MAP_LAST_BYTE) + disko = em->block_start + offset_in_extent; + else + disko = 0; /* * bump off for our next call to get_extent diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index e9482f0db9d0..eba61bcb9bb3 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -9005,13 +9005,14 @@ vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf) unlock_extent_cached(io_tree, page_start, page_end, &cached_state); -out_unlock: if (!ret2) { btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE, true); sb_end_pagefault(inode->i_sb); extent_changeset_free(data_reserved); return VM_FAULT_LOCKED; } + +out_unlock: unlock_page(page); out: btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE, (ret != 0)); @@ -9443,6 +9444,7 @@ static int btrfs_rename_exchange(struct inode *old_dir, u64 new_idx = 0; u64 root_objectid; int ret; + int ret2; bool root_log_pinned = false; bool dest_log_pinned = false; @@ -9639,7 +9641,8 @@ static int btrfs_rename_exchange(struct inode *old_dir, dest_log_pinned = false; } } - ret = btrfs_end_transaction(trans); + ret2 = btrfs_end_transaction(trans); + ret = ret ? ret : ret2; out_notrans: if (new_ino == BTRFS_FIRST_FREE_OBJECTID) up_read(&fs_info->subvol_sem); diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index c2837a32d689..43ecbe620dea 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -3577,7 +3577,7 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen, ret = btrfs_extent_same_range(src, loff, BTRFS_MAX_DEDUPE_LEN, dst, dst_loff, &cmp); if (ret) - goto out_unlock; + goto out_free; loff += BTRFS_MAX_DEDUPE_LEN; dst_loff += BTRFS_MAX_DEDUPE_LEN; @@ -3587,16 +3587,16 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen, ret = btrfs_extent_same_range(src, loff, tail_len, dst, dst_loff, &cmp); +out_free: + kvfree(cmp.src_pages); + kvfree(cmp.dst_pages); + out_unlock: if (same_inode) inode_unlock(src); else btrfs_double_inode_unlock(src, dst); -out_free: - kvfree(cmp.src_pages); - kvfree(cmp.dst_pages); - return ret; } diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index 1874a6d2e6f5..c25dc47210a3 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -2680,8 +2680,10 @@ qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path, free_extent_buffer(scratch_leaf); } - if (done && !ret) + if (done && !ret) { ret = 1; + fs_info->qgroup_rescan_progress.objectid = (u64)-1; + } return ret; } @@ -2784,13 +2786,20 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid, if (!init_flags) { /* we're resuming qgroup rescan at mount time */ - if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)) + if (!(fs_info->qgroup_flags & + BTRFS_QGROUP_STATUS_FLAG_RESCAN)) { btrfs_warn(fs_info, "qgroup rescan init failed, qgroup is not enabled"); - else if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON)) + ret = -EINVAL; + } else if (!(fs_info->qgroup_flags & + BTRFS_QGROUP_STATUS_FLAG_ON)) { btrfs_warn(fs_info, "qgroup rescan init failed, qgroup rescan is not queued"); - return -EINVAL; + ret = -EINVAL; + } + + if (ret) + return ret; } mutex_lock(&fs_info->qgroup_rescan_lock); diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index ee764ac352ab..a866be999216 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -1135,6 +1135,7 @@ static struct dentry *splice_dentry(struct dentry *dn, struct inode *in) if (IS_ERR(realdn)) { pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n", PTR_ERR(realdn), dn, in, ceph_vinop(in)); + dput(dn); dn = realdn; /* note realdn contains the error */ goto out; } else if (realdn) { diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index bd78da59a4fd..c923c7854027 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -423,7 +423,7 @@ struct smb_version_operations { void (*set_oplock_level)(struct cifsInodeInfo *, __u32, unsigned int, bool *); /* create lease context buffer for CREATE request */ - char * (*create_lease_buf)(u8 *, u8); + char * (*create_lease_buf)(u8 *lease_key, u8 oplock); /* parse lease context buffer and return oplock/epoch info */ __u8 (*parse_lease_buf)(void *buf, unsigned int *epoch, char *lkey); ssize_t (*copychunk_range)(const unsigned int, @@ -1416,6 +1416,7 @@ typedef int (mid_handle_t)(struct TCP_Server_Info *server, /* one of these for every pending CIFS request to the server */ struct mid_q_entry { struct list_head qhead; /* mids waiting on reply from this server */ + struct kref refcount; struct TCP_Server_Info *server; /* server corresponding to this mid */ __u64 mid; /* multiplex id */ __u32 pid; /* process id */ diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index 03018be17283..1890f534c88b 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h @@ -82,6 +82,7 @@ extern struct mid_q_entry *AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server); extern void DeleteMidQEntry(struct mid_q_entry *midEntry); extern void cifs_delete_mid(struct mid_q_entry *mid); +extern void cifs_mid_q_entry_release(struct mid_q_entry *midEntry); extern void cifs_wake_up_task(struct mid_q_entry *mid); extern int cifs_handle_standard(struct TCP_Server_Info *server, struct mid_q_entry *mid); diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index d352da325de3..93408eab92e7 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -157,8 +157,14 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command) * greater than cifs socket timeout which is 7 seconds */ while (server->tcpStatus == CifsNeedReconnect) { - wait_event_interruptible_timeout(server->response_q, - (server->tcpStatus != CifsNeedReconnect), 10 * HZ); + rc = wait_event_interruptible_timeout(server->response_q, + (server->tcpStatus != CifsNeedReconnect), + 10 * HZ); + if (rc < 0) { + cifs_dbg(FYI, "%s: aborting reconnect due to a received" + " signal by the process\n", __func__); + return -ERESTARTSYS; + } /* are we still trying to reconnect? */ if (server->tcpStatus != CifsNeedReconnect) diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index a57da1b88bdf..5df2c0698cda 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -924,6 +924,7 @@ cifs_demultiplex_thread(void *p) server->pdu_size = next_offset; } + mid_entry = NULL; if (server->ops->is_transform_hdr && server->ops->receive_transform && server->ops->is_transform_hdr(buf)) { @@ -938,8 +939,11 @@ cifs_demultiplex_thread(void *p) length = mid_entry->receive(server, mid_entry); } - if (length < 0) + if (length < 0) { + if (mid_entry) + cifs_mid_q_entry_release(mid_entry); continue; + } if (server->large_buf) buf = server->bigbuf; @@ -956,6 +960,8 @@ cifs_demultiplex_thread(void *p) if (!mid_entry->multiRsp || mid_entry->multiEnd) mid_entry->callback(mid_entry); + + cifs_mid_q_entry_release(mid_entry); } else if (server->ops->is_oplock_break && server->ops->is_oplock_break(buf, server)) { cifs_dbg(FYI, "Received oplock break\n"); diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c index aff8ce8ba34d..646dcd149de1 100644 --- a/fs/cifs/smb1ops.c +++ b/fs/cifs/smb1ops.c @@ -107,6 +107,7 @@ cifs_find_mid(struct TCP_Server_Info *server, char *buffer) if (compare_mid(mid->mid, buf) && mid->mid_state == MID_REQUEST_SUBMITTED && le16_to_cpu(mid->command) == buf->Command) { + kref_get(&mid->refcount); spin_unlock(&GlobalMid_Lock); return mid; } diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c index 788412675723..4ed10dd086e6 100644 --- a/fs/cifs/smb2file.c +++ b/fs/cifs/smb2file.c @@ -41,7 +41,7 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms, int rc; __le16 *smb2_path; struct smb2_file_all_info *smb2_data = NULL; - __u8 smb2_oplock[17]; + __u8 smb2_oplock; struct cifs_fid *fid = oparms->fid; struct network_resiliency_req nr_ioctl_req; @@ -59,12 +59,9 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms, } oparms->desired_access |= FILE_READ_ATTRIBUTES; - *smb2_oplock = SMB2_OPLOCK_LEVEL_BATCH; + smb2_oplock = SMB2_OPLOCK_LEVEL_BATCH; - if (oparms->tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LEASING) - memcpy(smb2_oplock + 1, fid->lease_key, SMB2_LEASE_KEY_SIZE); - - rc = SMB2_open(xid, oparms, smb2_path, smb2_oplock, smb2_data, NULL, + rc = SMB2_open(xid, oparms, smb2_path, &smb2_oplock, smb2_data, NULL, NULL); if (rc) goto out; @@ -101,7 +98,7 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms, move_smb2_info_to_cifs(buf, smb2_data); } - *oplock = *smb2_oplock; + *oplock = smb2_oplock; out: kfree(smb2_data); kfree(smb2_path); diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index 0356b5559c71..ea92a38b2f08 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -203,6 +203,7 @@ smb2_find_mid(struct TCP_Server_Info *server, char *buf) if ((mid->mid == wire_mid) && (mid->mid_state == MID_REQUEST_SUBMITTED) && (mid->command == shdr->Command)) { + kref_get(&mid->refcount); spin_unlock(&GlobalMid_Lock); return mid; } @@ -855,6 +856,8 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon, rc = SMB2_set_ea(xid, tcon, fid.persistent_fid, fid.volatile_fid, ea, len); + kfree(ea); + SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid); return rc; @@ -2219,8 +2222,7 @@ smb2_create_lease_buf(u8 *lease_key, u8 oplock) if (!buf) return NULL; - buf->lcontext.LeaseKeyLow = cpu_to_le64(*((u64 *)lease_key)); - buf->lcontext.LeaseKeyHigh = cpu_to_le64(*((u64 *)(lease_key + 8))); + memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE); buf->lcontext.LeaseState = map_oplock_to_lease(oplock); buf->ccontext.DataOffset = cpu_to_le16(offsetof @@ -2246,8 +2248,7 @@ smb3_create_lease_buf(u8 *lease_key, u8 oplock) if (!buf) return NULL; - buf->lcontext.LeaseKeyLow = cpu_to_le64(*((u64 *)lease_key)); - buf->lcontext.LeaseKeyHigh = cpu_to_le64(*((u64 *)(lease_key + 8))); + memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE); buf->lcontext.LeaseState = map_oplock_to_lease(oplock); buf->ccontext.DataOffset = cpu_to_le16(offsetof @@ -2284,8 +2285,7 @@ smb3_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key) if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS) return SMB2_OPLOCK_LEVEL_NOCHANGE; if (lease_key) - memcpy(lease_key, &lc->lcontext.LeaseKeyLow, - SMB2_LEASE_KEY_SIZE); + memcpy(lease_key, &lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE); return le32_to_cpu(lc->lcontext.LeaseState); } @@ -2521,7 +2521,7 @@ smb3_init_transform_rq(struct TCP_Server_Info *server, struct smb_rqst *new_rq, if (!tr_hdr) goto err_free_iov; - orig_len = smb2_rqst_len(old_rq, false); + orig_len = smb_rqst_len(server, old_rq); /* fill the 2nd iov with a transform header */ fill_transform_hdr(tr_hdr, orig_len, old_rq); diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 810b85787c91..3c92678cb45b 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -155,7 +155,7 @@ smb2_hdr_assemble(struct smb2_sync_hdr *shdr, __le16 smb2_cmd, static int smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon) { - int rc = 0; + int rc; struct nls_table *nls_codepage; struct cifs_ses *ses; struct TCP_Server_Info *server; @@ -166,10 +166,10 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon) * for those three - in the calling routine. */ if (tcon == NULL) - return rc; + return 0; if (smb2_command == SMB2_TREE_CONNECT) - return rc; + return 0; if (tcon->tidStatus == CifsExiting) { /* @@ -212,8 +212,14 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon) return -EAGAIN; } - wait_event_interruptible_timeout(server->response_q, - (server->tcpStatus != CifsNeedReconnect), 10 * HZ); + rc = wait_event_interruptible_timeout(server->response_q, + (server->tcpStatus != CifsNeedReconnect), + 10 * HZ); + if (rc < 0) { + cifs_dbg(FYI, "%s: aborting reconnect due to a received" + " signal by the process\n", __func__); + return -ERESTARTSYS; + } /* are we still trying to reconnect? */ if (server->tcpStatus != CifsNeedReconnect) @@ -231,7 +237,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon) } if (!tcon->ses->need_reconnect && !tcon->need_reconnect) - return rc; + return 0; nls_codepage = load_nls_default(); @@ -340,7 +346,10 @@ smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon, return rc; /* BB eventually switch this to SMB2 specific small buf size */ - *request_buf = cifs_small_buf_get(); + if (smb2_command == SMB2_SET_INFO) + *request_buf = cifs_buf_get(); + else + *request_buf = cifs_small_buf_get(); if (*request_buf == NULL) { /* BB should we add a retry in here if not a writepage? */ return -ENOMEM; @@ -1707,12 +1716,12 @@ parse_lease_state(struct TCP_Server_Info *server, struct smb2_create_rsp *rsp, static int add_lease_context(struct TCP_Server_Info *server, struct kvec *iov, - unsigned int *num_iovec, __u8 *oplock) + unsigned int *num_iovec, u8 *lease_key, __u8 *oplock) { struct smb2_create_req *req = iov[0].iov_base; unsigned int num = *num_iovec; - iov[num].iov_base = server->ops->create_lease_buf(oplock+1, *oplock); + iov[num].iov_base = server->ops->create_lease_buf(lease_key, *oplock); if (iov[num].iov_base == NULL) return -ENOMEM; iov[num].iov_len = server->vals->create_lease_size; @@ -2172,7 +2181,8 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path, *oplock == SMB2_OPLOCK_LEVEL_NONE) req->RequestedOplockLevel = *oplock; else { - rc = add_lease_context(server, iov, &n_iov, oplock); + rc = add_lease_context(server, iov, &n_iov, + oparms->fid->lease_key, oplock); if (rc) { cifs_small_buf_release(req); kfree(copy_path); @@ -3720,7 +3730,7 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon, rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov); - cifs_small_buf_release(req); + cifs_buf_release(req); rsp = (struct smb2_set_info_rsp *)rsp_iov.iov_base; if (rc != 0) { diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h index 824dddeee3f2..a671adcc44a6 100644 --- a/fs/cifs/smb2pdu.h +++ b/fs/cifs/smb2pdu.h @@ -678,16 +678,14 @@ struct create_context { #define SMB2_LEASE_KEY_SIZE 16 struct lease_context { - __le64 LeaseKeyLow; - __le64 LeaseKeyHigh; + u8 LeaseKey[SMB2_LEASE_KEY_SIZE]; __le32 LeaseState; __le32 LeaseFlags; __le64 LeaseDuration; } __packed; struct lease_context_v2 { - __le64 LeaseKeyLow; - __le64 LeaseKeyHigh; + u8 LeaseKey[SMB2_LEASE_KEY_SIZE]; __le32 LeaseState; __le32 LeaseFlags; __le64 LeaseDuration; diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h index 3ae208ac2a77..6e6a4f2ec890 100644 --- a/fs/cifs/smb2proto.h +++ b/fs/cifs/smb2proto.h @@ -113,8 +113,8 @@ extern int smb2_unlock_range(struct cifsFileInfo *cfile, extern int smb2_push_mandatory_locks(struct cifsFileInfo *cfile); extern void smb2_reconnect_server(struct work_struct *work); extern int smb3_crypto_aead_allocate(struct TCP_Server_Info *server); -extern unsigned long -smb2_rqst_len(struct smb_rqst *rqst, bool skip_rfc1002_marker); +extern unsigned long smb_rqst_len(struct TCP_Server_Info *server, + struct smb_rqst *rqst); /* * SMB2 Worker functions - most of protocol specific implementation details diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c index 51b9437c3c7b..719d55e63d88 100644 --- a/fs/cifs/smb2transport.c +++ b/fs/cifs/smb2transport.c @@ -173,6 +173,8 @@ smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server) struct kvec *iov = rqst->rq_iov; struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)iov[0].iov_base; struct cifs_ses *ses; + struct shash_desc *shash = &server->secmech.sdeschmacsha256->shash; + struct smb_rqst drqst; ses = smb2_find_smb_ses(server, shdr->SessionId); if (!ses) { @@ -190,21 +192,39 @@ smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server) } rc = crypto_shash_setkey(server->secmech.hmacsha256, - ses->auth_key.response, SMB2_NTLMV2_SESSKEY_SIZE); + ses->auth_key.response, SMB2_NTLMV2_SESSKEY_SIZE); if (rc) { cifs_dbg(VFS, "%s: Could not update with response\n", __func__); return rc; } - rc = crypto_shash_init(&server->secmech.sdeschmacsha256->shash); + rc = crypto_shash_init(shash); if (rc) { cifs_dbg(VFS, "%s: Could not init sha256", __func__); return rc; } - rc = __cifs_calc_signature(rqst, server, sigptr, - &server->secmech.sdeschmacsha256->shash); + /* + * For SMB2+, __cifs_calc_signature() expects to sign only the actual + * data, that is, iov[0] should not contain a rfc1002 length. + * + * Sign the rfc1002 length prior to passing the data (iov[1-N]) down to + * __cifs_calc_signature(). + */ + drqst = *rqst; + if (drqst.rq_nvec >= 2 && iov[0].iov_len == 4) { + rc = crypto_shash_update(shash, iov[0].iov_base, + iov[0].iov_len); + if (rc) { + cifs_dbg(VFS, "%s: Could not update with payload\n", + __func__); + return rc; + } + drqst.rq_iov++; + drqst.rq_nvec--; + } + rc = __cifs_calc_signature(&drqst, server, sigptr, shash); if (!rc) memcpy(shdr->Signature, sigptr, SMB2_SIGNATURE_SIZE); @@ -408,12 +428,14 @@ generate_smb311signingkey(struct cifs_ses *ses) int smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server) { - int rc = 0; + int rc; unsigned char smb3_signature[SMB2_CMACAES_SIZE]; unsigned char *sigptr = smb3_signature; struct kvec *iov = rqst->rq_iov; struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)iov[0].iov_base; struct cifs_ses *ses; + struct shash_desc *shash = &server->secmech.sdesccmacaes->shash; + struct smb_rqst drqst; ses = smb2_find_smb_ses(server, shdr->SessionId); if (!ses) { @@ -425,8 +447,7 @@ smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server) memset(shdr->Signature, 0x0, SMB2_SIGNATURE_SIZE); rc = crypto_shash_setkey(server->secmech.cmacaes, - ses->smb3signingkey, SMB2_CMACAES_SIZE); - + ses->smb3signingkey, SMB2_CMACAES_SIZE); if (rc) { cifs_dbg(VFS, "%s: Could not set key for cmac aes\n", __func__); return rc; @@ -437,15 +458,33 @@ smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server) * so unlike smb2 case we do not have to check here if secmech are * initialized */ - rc = crypto_shash_init(&server->secmech.sdesccmacaes->shash); + rc = crypto_shash_init(shash); if (rc) { cifs_dbg(VFS, "%s: Could not init cmac aes\n", __func__); return rc; } - rc = __cifs_calc_signature(rqst, server, sigptr, - &server->secmech.sdesccmacaes->shash); + /* + * For SMB2+, __cifs_calc_signature() expects to sign only the actual + * data, that is, iov[0] should not contain a rfc1002 length. + * + * Sign the rfc1002 length prior to passing the data (iov[1-N]) down to + * __cifs_calc_signature(). + */ + drqst = *rqst; + if (drqst.rq_nvec >= 2 && iov[0].iov_len == 4) { + rc = crypto_shash_update(shash, iov[0].iov_base, + iov[0].iov_len); + if (rc) { + cifs_dbg(VFS, "%s: Could not update with payload\n", + __func__); + return rc; + } + drqst.rq_iov++; + drqst.rq_nvec--; + } + rc = __cifs_calc_signature(&drqst, server, sigptr, shash); if (!rc) memcpy(shdr->Signature, sigptr, SMB2_SIGNATURE_SIZE); @@ -548,6 +587,7 @@ smb2_mid_entry_alloc(const struct smb2_sync_hdr *shdr, temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS); memset(temp, 0, sizeof(struct mid_q_entry)); + kref_init(&temp->refcount); temp->mid = le64_to_cpu(shdr->MessageId); temp->pid = current->pid; temp->command = shdr->Command; /* Always LE */ diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c index 6fd94d9ffac2..c55ea4e6201b 100644 --- a/fs/cifs/smbdirect.c +++ b/fs/cifs/smbdirect.c @@ -2083,8 +2083,9 @@ int smbd_recv(struct smbd_connection *info, struct msghdr *msg) * rqst: the data to write * return value: 0 if successfully write, otherwise error code */ -int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst) +int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst) { + struct smbd_connection *info = server->smbd_conn; struct kvec vec; int nvecs; int size; @@ -2118,7 +2119,7 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst) * rq_tailsz to PAGE_SIZE when the buffer has multiple pages and * ends at page boundary */ - buflen = smb2_rqst_len(rqst, true); + buflen = smb_rqst_len(server, rqst); if (buflen + sizeof(struct smbd_data_transfer) > info->max_fragmented_send_size) { diff --git a/fs/cifs/smbdirect.h b/fs/cifs/smbdirect.h index 1e419c21dc60..a11096254f29 100644 --- a/fs/cifs/smbdirect.h +++ b/fs/cifs/smbdirect.h @@ -292,7 +292,7 @@ void smbd_destroy(struct smbd_connection *info); /* Interface for carrying upper layer I/O through send/recv */ int smbd_recv(struct smbd_connection *info, struct msghdr *msg); -int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst); +int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst); enum mr_state { MR_READY, @@ -332,7 +332,7 @@ static inline void *smbd_get_connection( static inline int smbd_reconnect(struct TCP_Server_Info *server) {return -1; } static inline void smbd_destroy(struct smbd_connection *info) {} static inline int smbd_recv(struct smbd_connection *info, struct msghdr *msg) {return -1; } -static inline int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst) {return -1; } +static inline int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst) {return -1; } #endif #endif diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index fb57dfbfb749..a341ec839c83 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -61,6 +61,7 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server) temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS); memset(temp, 0, sizeof(struct mid_q_entry)); + kref_init(&temp->refcount); temp->mid = get_mid(smb_buffer); temp->pid = current->pid; temp->command = cpu_to_le16(smb_buffer->Command); @@ -82,6 +83,21 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server) return temp; } +static void _cifs_mid_q_entry_release(struct kref *refcount) +{ + struct mid_q_entry *mid = container_of(refcount, struct mid_q_entry, + refcount); + + mempool_free(mid, cifs_mid_poolp); +} + +void cifs_mid_q_entry_release(struct mid_q_entry *midEntry) +{ + spin_lock(&GlobalMid_Lock); + kref_put(&midEntry->refcount, _cifs_mid_q_entry_release); + spin_unlock(&GlobalMid_Lock); +} + void DeleteMidQEntry(struct mid_q_entry *midEntry) { @@ -110,7 +126,7 @@ DeleteMidQEntry(struct mid_q_entry *midEntry) } } #endif - mempool_free(midEntry, cifs_mid_poolp); + cifs_mid_q_entry_release(midEntry); } void @@ -202,14 +218,15 @@ smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg, } unsigned long -smb2_rqst_len(struct smb_rqst *rqst, bool skip_rfc1002_marker) +smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst) { unsigned int i; struct kvec *iov; int nvec; unsigned long buflen = 0; - if (skip_rfc1002_marker && rqst->rq_iov[0].iov_len == 4) { + if (server->vals->header_preamble_size == 0 && + rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) { iov = &rqst->rq_iov[1]; nvec = rqst->rq_nvec - 1; } else { @@ -260,7 +277,7 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst, __be32 rfc1002_marker; if (cifs_rdma_enabled(server) && server->smbd_conn) { - rc = smbd_send(server->smbd_conn, rqst); + rc = smbd_send(server, rqst); goto smbd_done; } if (ssocket == NULL) @@ -271,7 +288,7 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst, (char *)&val, sizeof(val)); for (j = 0; j < num_rqst; j++) - send_length += smb2_rqst_len(&rqst[j], true); + send_length += smb_rqst_len(server, &rqst[j]); rfc1002_marker = cpu_to_be32(send_length); /* Generate a rfc1002 marker for SMB2+ */ diff --git a/fs/eventfd.c b/fs/eventfd.c index ceb1031f1cac..08d3bd602f73 100644 --- a/fs/eventfd.c +++ b/fs/eventfd.c @@ -101,20 +101,14 @@ static int eventfd_release(struct inode *inode, struct file *file) return 0; } -static struct wait_queue_head * -eventfd_get_poll_head(struct file *file, __poll_t events) -{ - struct eventfd_ctx *ctx = file->private_data; - - return &ctx->wqh; -} - -static __poll_t eventfd_poll_mask(struct file *file, __poll_t eventmask) +static __poll_t eventfd_poll(struct file *file, poll_table *wait) { struct eventfd_ctx *ctx = file->private_data; __poll_t events = 0; u64 count; + poll_wait(file, &ctx->wqh, wait); + /* * All writes to ctx->count occur within ctx->wqh.lock. This read * can be done outside ctx->wqh.lock because we know that poll_wait @@ -156,11 +150,11 @@ static __poll_t eventfd_poll_mask(struct file *file, __poll_t eventmask) count = READ_ONCE(ctx->count); if (count > 0) - events |= (EPOLLIN & eventmask); + events |= EPOLLIN; if (count == ULLONG_MAX) events |= EPOLLERR; if (ULLONG_MAX - 1 > count) - events |= (EPOLLOUT & eventmask); + events |= EPOLLOUT; return events; } @@ -311,8 +305,7 @@ static const struct file_operations eventfd_fops = { .show_fdinfo = eventfd_show_fdinfo, #endif .release = eventfd_release, - .get_poll_head = eventfd_get_poll_head, - .poll_mask = eventfd_poll_mask, + .poll = eventfd_poll, .read = eventfd_read, .write = eventfd_write, .llseek = noop_llseek, diff --git a/fs/eventpoll.c b/fs/eventpoll.c index ea4436f409fb..67db22fe99c5 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -922,18 +922,14 @@ static __poll_t ep_read_events_proc(struct eventpoll *ep, struct list_head *head return 0; } -static struct wait_queue_head *ep_eventpoll_get_poll_head(struct file *file, - __poll_t eventmask) -{ - struct eventpoll *ep = file->private_data; - return &ep->poll_wait; -} - -static __poll_t ep_eventpoll_poll_mask(struct file *file, __poll_t eventmask) +static __poll_t ep_eventpoll_poll(struct file *file, poll_table *wait) { struct eventpoll *ep = file->private_data; int depth = 0; + /* Insert inside our poll wait queue */ + poll_wait(file, &ep->poll_wait, wait); + /* * Proceed to find out if wanted events are really available inside * the ready list. @@ -972,8 +968,7 @@ static const struct file_operations eventpoll_fops = { .show_fdinfo = ep_show_fdinfo, #endif .release = ep_eventpoll_release, - .get_poll_head = ep_eventpoll_get_poll_head, - .poll_mask = ep_eventpoll_poll_mask, + .poll = ep_eventpoll_poll, .llseek = noop_llseek, }; diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h index cc40802ddfa8..00e759f05161 100644 --- a/fs/ext2/ext2.h +++ b/fs/ext2/ext2.h @@ -748,7 +748,6 @@ extern void ext2_free_blocks (struct inode *, unsigned long, unsigned long); extern unsigned long ext2_count_free_blocks (struct super_block *); extern unsigned long ext2_count_dirs (struct super_block *); -extern void ext2_check_blocks_bitmap (struct super_block *); extern struct ext2_group_desc * ext2_get_group_desc(struct super_block * sb, unsigned int block_group, struct buffer_head ** bh); @@ -771,7 +770,6 @@ extern void ext2_set_link(struct inode *, struct ext2_dir_entry_2 *, struct page extern struct inode * ext2_new_inode (struct inode *, umode_t, const struct qstr *); extern void ext2_free_inode (struct inode *); extern unsigned long ext2_count_free_inodes (struct super_block *); -extern void ext2_check_inodes_bitmap (struct super_block *); extern unsigned long ext2_count_free (struct buffer_head *, unsigned); /* inode.c */ diff --git a/fs/ext2/super.c b/fs/ext2/super.c index 25ab1274090f..8ff53f8da3bc 100644 --- a/fs/ext2/super.c +++ b/fs/ext2/super.c @@ -557,6 +557,9 @@ static int parse_options(char *options, struct super_block *sb, set_opt (opts->s_mount_opt, NO_UID32); break; case Opt_nocheck: + ext2_msg(sb, KERN_WARNING, + "Option nocheck/check=none is deprecated and" + " will be removed in June 2020."); clear_opt (opts->s_mount_opt, CHECK); break; case Opt_debug: @@ -1335,9 +1338,6 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data) new_opts.s_resgid = sbi->s_resgid; spin_unlock(&sbi->s_lock); - /* - * Allow the "check" option to be passed as a remount option. - */ if (!parse_options(data, sb, &new_opts)) return -EINVAL; diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index b00481c475cb..e68cefe08261 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c @@ -184,7 +184,6 @@ static int ext4_init_block_bitmap(struct super_block *sb, unsigned int bit, bit_max; struct ext4_sb_info *sbi = EXT4_SB(sb); ext4_fsblk_t start, tmp; - int flex_bg = 0; J_ASSERT_BH(bh, buffer_locked(bh)); @@ -207,22 +206,19 @@ static int ext4_init_block_bitmap(struct super_block *sb, start = ext4_group_first_block_no(sb, block_group); - if (ext4_has_feature_flex_bg(sb)) - flex_bg = 1; - /* Set bits for block and inode bitmaps, and inode table */ tmp = ext4_block_bitmap(sb, gdp); - if (!flex_bg || ext4_block_in_group(sb, tmp, block_group)) + if (ext4_block_in_group(sb, tmp, block_group)) ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data); tmp = ext4_inode_bitmap(sb, gdp); - if (!flex_bg || ext4_block_in_group(sb, tmp, block_group)) + if (ext4_block_in_group(sb, tmp, block_group)) ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data); tmp = ext4_inode_table(sb, gdp); for (; tmp < ext4_inode_table(sb, gdp) + sbi->s_itb_per_group; tmp++) { - if (!flex_bg || ext4_block_in_group(sb, tmp, block_group)) + if (ext4_block_in_group(sb, tmp, block_group)) ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data); } @@ -442,7 +438,16 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group) goto verify; } ext4_lock_group(sb, block_group); - if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { + if (ext4_has_group_desc_csum(sb) && + (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { + if (block_group == 0) { + ext4_unlock_group(sb, block_group); + unlock_buffer(bh); + ext4_error(sb, "Block bitmap for bg 0 marked " + "uninitialized"); + err = -EFSCORRUPTED; + goto out; + } err = ext4_init_block_bitmap(sb, bh, block_group, desc); set_bitmap_uptodate(bh); set_buffer_uptodate(bh); diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 0b127853c584..7c7123f265c2 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -1114,6 +1114,7 @@ struct ext4_inode_info { #define EXT4_MOUNT_DIOREAD_NOLOCK 0x400000 /* Enable support for dio read nolocking */ #define EXT4_MOUNT_JOURNAL_CHECKSUM 0x800000 /* Journal checksums */ #define EXT4_MOUNT_JOURNAL_ASYNC_COMMIT 0x1000000 /* Journal Async Commit */ +#define EXT4_MOUNT_WARN_ON_ERROR 0x2000000 /* Trigger WARN_ON on error */ #define EXT4_MOUNT_DELALLOC 0x8000000 /* Delalloc support */ #define EXT4_MOUNT_DATA_ERR_ABORT 0x10000000 /* Abort on file data write */ #define EXT4_MOUNT_BLOCK_VALIDITY 0x20000000 /* Block validity checking */ @@ -1507,11 +1508,6 @@ static inline struct ext4_inode_info *EXT4_I(struct inode *inode) static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino) { return ino == EXT4_ROOT_INO || - ino == EXT4_USR_QUOTA_INO || - ino == EXT4_GRP_QUOTA_INO || - ino == EXT4_BOOT_LOADER_INO || - ino == EXT4_JOURNAL_INO || - ino == EXT4_RESIZE_INO || (ino >= EXT4_FIRST_INO(sb) && ino <= le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count)); } @@ -3018,9 +3014,6 @@ extern int ext4_inline_data_fiemap(struct inode *inode, struct iomap; extern int ext4_inline_data_iomap(struct inode *inode, struct iomap *iomap); -extern int ext4_try_to_evict_inline_data(handle_t *handle, - struct inode *inode, - int needed); extern int ext4_inline_data_truncate(struct inode *inode, int *has_inline); extern int ext4_convert_inline_data(struct inode *inode); diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h index 98fb0c119c68..adf6668b596f 100644 --- a/fs/ext4/ext4_extents.h +++ b/fs/ext4/ext4_extents.h @@ -91,6 +91,7 @@ struct ext4_extent_header { }; #define EXT4_EXT_MAGIC cpu_to_le16(0xf30a) +#define EXT4_MAX_EXTENT_DEPTH 5 #define EXT4_EXTENT_TAIL_OFFSET(hdr) \ (sizeof(struct ext4_extent_header) + \ diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 0057fe3f248d..8ce6fd5b10dd 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -869,6 +869,12 @@ ext4_find_extent(struct inode *inode, ext4_lblk_t block, eh = ext_inode_hdr(inode); depth = ext_depth(inode); + if (depth < 0 || depth > EXT4_MAX_EXTENT_DEPTH) { + EXT4_ERROR_INODE(inode, "inode has invalid extent depth: %d", + depth); + ret = -EFSCORRUPTED; + goto err; + } if (path) { ext4_ext_drop_refs(path); diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index f525f909b559..fb83750c1a14 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c @@ -150,7 +150,16 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group) } ext4_lock_group(sb, block_group); - if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) { + if (ext4_has_group_desc_csum(sb) && + (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT))) { + if (block_group == 0) { + ext4_unlock_group(sb, block_group); + unlock_buffer(bh); + ext4_error(sb, "Inode bitmap for bg 0 marked " + "uninitialized"); + err = -EFSCORRUPTED; + goto out; + } memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8); ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8, bh->b_data); @@ -994,7 +1003,8 @@ struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir, /* recheck and clear flag under lock if we still need to */ ext4_lock_group(sb, group); - if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { + if (ext4_has_group_desc_csum(sb) && + (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); ext4_free_group_clusters_set(sb, gdp, ext4_free_clusters_after_init(sb, group, gdp)); diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c index 285ed1588730..e55a8bc870bd 100644 --- a/fs/ext4/inline.c +++ b/fs/ext4/inline.c @@ -437,6 +437,7 @@ static int ext4_destroy_inline_data_nolock(handle_t *handle, memset((void *)ext4_raw_inode(&is.iloc)->i_block, 0, EXT4_MIN_INLINE_DATA_SIZE); + memset(ei->i_data, 0, EXT4_MIN_INLINE_DATA_SIZE); if (ext4_has_feature_extents(inode->i_sb)) { if (S_ISDIR(inode->i_mode) || @@ -886,11 +887,11 @@ int ext4_da_write_inline_data_begin(struct address_space *mapping, flags |= AOP_FLAG_NOFS; if (ret == -ENOSPC) { + ext4_journal_stop(handle); ret = ext4_da_convert_inline_data_to_extent(mapping, inode, flags, fsdata); - ext4_journal_stop(handle); if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) goto retry_journal; @@ -1890,42 +1891,6 @@ int ext4_inline_data_fiemap(struct inode *inode, return (error < 0 ? error : 0); } -/* - * Called during xattr set, and if we can sparse space 'needed', - * just create the extent tree evict the data to the outer block. - * - * We use jbd2 instead of page cache to move data to the 1st block - * so that the whole transaction can be committed as a whole and - * the data isn't lost because of the delayed page cache write. - */ -int ext4_try_to_evict_inline_data(handle_t *handle, - struct inode *inode, - int needed) -{ - int error; - struct ext4_xattr_entry *entry; - struct ext4_inode *raw_inode; - struct ext4_iloc iloc; - - error = ext4_get_inode_loc(inode, &iloc); - if (error) - return error; - - raw_inode = ext4_raw_inode(&iloc); - entry = (struct ext4_xattr_entry *)((void *)raw_inode + - EXT4_I(inode)->i_inline_off); - if (EXT4_XATTR_LEN(entry->e_name_len) + - EXT4_XATTR_SIZE(le32_to_cpu(entry->e_value_size)) < needed) { - error = -ENOSPC; - goto out; - } - - error = ext4_convert_inline_data_nolock(handle, inode, &iloc); -out: - brelse(iloc.bh); - return error; -} - int ext4_inline_data_truncate(struct inode *inode, int *has_inline) { handle_t *handle; diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 2ea07efbe016..7d6c10017bdf 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -402,9 +402,9 @@ static int __check_block_validity(struct inode *inode, const char *func, if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk, map->m_len)) { ext4_error_inode(inode, func, line, map->m_pblk, - "lblock %lu mapped to illegal pblock " + "lblock %lu mapped to illegal pblock %llu " "(length %d)", (unsigned long) map->m_lblk, - map->m_len); + map->m_pblk, map->m_len); return -EFSCORRUPTED; } return 0; @@ -4506,7 +4506,8 @@ static int __ext4_get_inode_loc(struct inode *inode, int inodes_per_block, inode_offset; iloc->bh = NULL; - if (!ext4_valid_inum(sb, inode->i_ino)) + if (inode->i_ino < EXT4_ROOT_INO || + inode->i_ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count)) return -EFSCORRUPTED; iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb); diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 6eae2b91aafa..f7ab34088162 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -2423,7 +2423,8 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group, * initialize bb_free to be able to skip * empty groups without initialization */ - if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { + if (ext4_has_group_desc_csum(sb) && + (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { meta_group_info[i]->bb_free = ext4_free_clusters_after_init(sb, group, desc); } else { @@ -2989,7 +2990,8 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, #endif ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start, ac->ac_b_ex.fe_len); - if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { + if (ext4_has_group_desc_csum(sb) && + (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); ext4_free_group_clusters_set(sb, gdp, ext4_free_clusters_after_init(sb, diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 0c4c2201b3aa..ba2396a7bd04 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -405,6 +405,9 @@ static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn) static void ext4_handle_error(struct super_block *sb) { + if (test_opt(sb, WARN_ON_ERROR)) + WARN_ON_ONCE(1); + if (sb_rdonly(sb)) return; @@ -740,6 +743,9 @@ __acquires(bitlock) va_end(args); } + if (test_opt(sb, WARN_ON_ERROR)) + WARN_ON_ONCE(1); + if (test_opt(sb, ERRORS_CONT)) { ext4_commit_super(sb, 0); return; @@ -1371,7 +1377,8 @@ enum { Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota, Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err, Opt_usrquota, Opt_grpquota, Opt_prjquota, Opt_i_version, Opt_dax, - Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_mblk_io_submit, + Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_warn_on_error, + Opt_nowarn_on_error, Opt_mblk_io_submit, Opt_lazytime, Opt_nolazytime, Opt_debug_want_extra_isize, Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity, Opt_inode_readahead_blks, Opt_journal_ioprio, @@ -1438,6 +1445,8 @@ static const match_table_t tokens = { {Opt_dax, "dax"}, {Opt_stripe, "stripe=%u"}, {Opt_delalloc, "delalloc"}, + {Opt_warn_on_error, "warn_on_error"}, + {Opt_nowarn_on_error, "nowarn_on_error"}, {Opt_lazytime, "lazytime"}, {Opt_nolazytime, "nolazytime"}, {Opt_debug_want_extra_isize, "debug_want_extra_isize=%u"}, @@ -1602,6 +1611,8 @@ static const struct mount_opts { MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT}, {Opt_nodelalloc, EXT4_MOUNT_DELALLOC, MOPT_EXT4_ONLY | MOPT_CLEAR}, + {Opt_warn_on_error, EXT4_MOUNT_WARN_ON_ERROR, MOPT_SET}, + {Opt_nowarn_on_error, EXT4_MOUNT_WARN_ON_ERROR, MOPT_CLEAR}, {Opt_nojournal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM, MOPT_EXT4_ONLY | MOPT_CLEAR}, {Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM, @@ -2331,6 +2342,7 @@ static int ext4_check_descriptors(struct super_block *sb, struct ext4_sb_info *sbi = EXT4_SB(sb); ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block); ext4_fsblk_t last_block; + ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0) + 1; ext4_fsblk_t block_bitmap; ext4_fsblk_t inode_bitmap; ext4_fsblk_t inode_table; @@ -2363,6 +2375,14 @@ static int ext4_check_descriptors(struct super_block *sb, if (!sb_rdonly(sb)) return 0; } + if (block_bitmap >= sb_block + 1 && + block_bitmap <= last_bg_block) { + ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " + "Block bitmap for group %u overlaps " + "block group descriptors", i); + if (!sb_rdonly(sb)) + return 0; + } if (block_bitmap < first_block || block_bitmap > last_block) { ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " "Block bitmap for group %u not in group " @@ -2377,6 +2397,14 @@ static int ext4_check_descriptors(struct super_block *sb, if (!sb_rdonly(sb)) return 0; } + if (inode_bitmap >= sb_block + 1 && + inode_bitmap <= last_bg_block) { + ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " + "Inode bitmap for group %u overlaps " + "block group descriptors", i); + if (!sb_rdonly(sb)) + return 0; + } if (inode_bitmap < first_block || inode_bitmap > last_block) { ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " "Inode bitmap for group %u not in group " @@ -2391,6 +2419,14 @@ static int ext4_check_descriptors(struct super_block *sb, if (!sb_rdonly(sb)) return 0; } + if (inode_table >= sb_block + 1 && + inode_table <= last_bg_block) { + ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " + "Inode table for group %u overlaps " + "block group descriptors", i); + if (!sb_rdonly(sb)) + return 0; + } if (inode_table < first_block || inode_table + sbi->s_itb_per_group - 1 > last_block) { ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " @@ -3097,13 +3133,22 @@ static ext4_group_t ext4_has_uninit_itable(struct super_block *sb) ext4_group_t group, ngroups = EXT4_SB(sb)->s_groups_count; struct ext4_group_desc *gdp = NULL; + if (!ext4_has_group_desc_csum(sb)) + return ngroups; + for (group = 0; group < ngroups; group++) { gdp = ext4_get_group_desc(sb, group, NULL); if (!gdp) continue; - if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))) + if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)) + continue; + if (group != 0) break; + ext4_error(sb, "Inode table for bg 0 marked as " + "needing zeroing"); + if (sb_rdonly(sb)) + return ngroups; } return group; @@ -3742,6 +3787,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) le32_to_cpu(es->s_log_block_size)); goto failed_mount; } + if (le32_to_cpu(es->s_log_cluster_size) > + (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) { + ext4_msg(sb, KERN_ERR, + "Invalid log cluster size: %u", + le32_to_cpu(es->s_log_cluster_size)); + goto failed_mount; + } if (le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) > (blocksize / 4)) { ext4_msg(sb, KERN_ERR, @@ -3806,6 +3858,11 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) } else { sbi->s_inode_size = le16_to_cpu(es->s_inode_size); sbi->s_first_ino = le32_to_cpu(es->s_first_ino); + if (sbi->s_first_ino < EXT4_GOOD_OLD_FIRST_INO) { + ext4_msg(sb, KERN_ERR, "invalid first ino: %u", + sbi->s_first_ino); + goto failed_mount; + } if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) || (!is_power_of_2(sbi->s_inode_size)) || (sbi->s_inode_size > blocksize)) { @@ -3882,13 +3939,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) "block size (%d)", clustersize, blocksize); goto failed_mount; } - if (le32_to_cpu(es->s_log_cluster_size) > - (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) { - ext4_msg(sb, KERN_ERR, - "Invalid log cluster size: %u", - le32_to_cpu(es->s_log_cluster_size)); - goto failed_mount; - } sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) - le32_to_cpu(es->s_log_block_size); sbi->s_clusters_per_group = @@ -3909,10 +3959,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) } } else { if (clustersize != blocksize) { - ext4_warning(sb, "fragment/cluster size (%d) != " - "block size (%d)", clustersize, - blocksize); - clustersize = blocksize; + ext4_msg(sb, KERN_ERR, + "fragment/cluster size (%d) != " + "block size (%d)", clustersize, blocksize); + goto failed_mount; } if (sbi->s_blocks_per_group > blocksize * 8) { ext4_msg(sb, KERN_ERR, @@ -3966,6 +4016,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) ext4_blocks_count(es)); goto failed_mount; } + if ((es->s_first_data_block == 0) && (es->s_log_block_size == 0) && + (sbi->s_cluster_ratio == 1)) { + ext4_msg(sb, KERN_WARNING, "bad geometry: first data " + "block is 0 with a 1k block and cluster size"); + goto failed_mount; + } + blocks_count = (ext4_blocks_count(es) - le32_to_cpu(es->s_first_data_block) + EXT4_BLOCKS_PER_GROUP(sb) - 1); @@ -4001,6 +4058,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) ret = -ENOMEM; goto failed_mount; } + if (((u64)sbi->s_groups_count * sbi->s_inodes_per_group) != + le32_to_cpu(es->s_inodes_count)) { + ext4_msg(sb, KERN_ERR, "inodes count not valid: %u vs %llu", + le32_to_cpu(es->s_inodes_count), + ((u64)sbi->s_groups_count * sbi->s_inodes_per_group)); + ret = -EINVAL; + goto failed_mount; + } bgl_lock_init(sbi->s_blockgroup_lock); @@ -4736,6 +4801,14 @@ static int ext4_commit_super(struct super_block *sb, int sync) if (!sbh || block_device_ejected(sb)) return error; + + /* + * The superblock bh should be mapped, but it might not be if the + * device was hot-removed. Not much we can do but fail the I/O. + */ + if (!buffer_mapped(sbh)) + return error; + /* * If the file system is mounted read-only, don't update the * superblock write time. This avoids updating the superblock diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index fc4ced59c565..723df14f4084 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -230,12 +230,12 @@ __ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh, { int error = -EFSCORRUPTED; - if (buffer_verified(bh)) - return 0; - if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) || BHDR(bh)->h_blocks != cpu_to_le32(1)) goto errout; + if (buffer_verified(bh)) + return 0; + error = -EFSBADCRC; if (!ext4_xattr_block_csum_verify(inode, bh)) goto errout; @@ -1560,7 +1560,7 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i, handle_t *handle, struct inode *inode, bool is_block) { - struct ext4_xattr_entry *last; + struct ext4_xattr_entry *last, *next; struct ext4_xattr_entry *here = s->here; size_t min_offs = s->end - s->base, name_len = strlen(i->name); int in_inode = i->in_inode; @@ -1595,7 +1595,13 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i, /* Compute min_offs and last. */ last = s->first; - for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) { + for (; !IS_LAST_ENTRY(last); last = next) { + next = EXT4_XATTR_NEXT(last); + if ((void *)next >= s->end) { + EXT4_ERROR_INODE(inode, "corrupted xattr entries"); + ret = -EFSCORRUPTED; + goto out; + } if (!last->e_value_inum && last->e_value_size) { size_t offs = le16_to_cpu(last->e_value_offs); if (offs < min_offs) @@ -2206,23 +2212,8 @@ int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode, if (EXT4_I(inode)->i_extra_isize == 0) return -ENOSPC; error = ext4_xattr_set_entry(i, s, handle, inode, false /* is_block */); - if (error) { - if (error == -ENOSPC && - ext4_has_inline_data(inode)) { - error = ext4_try_to_evict_inline_data(handle, inode, - EXT4_XATTR_LEN(strlen(i->name) + - EXT4_XATTR_SIZE(i->value_len))); - if (error) - return error; - error = ext4_xattr_ibody_find(inode, i, is); - if (error) - return error; - error = ext4_xattr_set_entry(i, s, handle, inode, - false /* is_block */); - } - if (error) - return error; - } + if (error) + return error; header = IHDR(inode, ext4_raw_inode(&is->iloc)); if (!IS_LAST_ENTRY(s->first)) { header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC); @@ -2651,6 +2642,11 @@ static int ext4_xattr_make_inode_space(handle_t *handle, struct inode *inode, last = IFIRST(header); /* Find the entry best suited to be pushed into EA block */ for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) { + /* never move system.data out of the inode */ + if ((last->e_name_len == 4) && + (last->e_name_index == EXT4_XATTR_INDEX_SYSTEM) && + !memcmp(last->e_name, "data", 4)) + continue; total_size = EXT4_XATTR_LEN(last->e_name_len); if (!last->e_value_inum) total_size += EXT4_XATTR_SIZE( diff --git a/fs/inode.c b/fs/inode.c index 2c300e981796..8c86c809ca17 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -1999,8 +1999,14 @@ void inode_init_owner(struct inode *inode, const struct inode *dir, inode->i_uid = current_fsuid(); if (dir && dir->i_mode & S_ISGID) { inode->i_gid = dir->i_gid; + + /* Directories are special, and always inherit S_ISGID */ if (S_ISDIR(mode)) mode |= S_ISGID; + else if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP) && + !in_group_p(inode->i_gid) && + !capable_wrt_inode_uidgid(dir, CAP_FSETID)) + mode &= ~S_ISGID; } else inode->i_gid = current_fsgid(); inode->i_mode = mode; diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index 51dd68e67b0f..c0b66a7a795b 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -1361,6 +1361,13 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) if (jh->b_transaction == transaction && jh->b_jlist != BJ_Metadata) { jbd_lock_bh_state(bh); + if (jh->b_transaction == transaction && + jh->b_jlist != BJ_Metadata) + pr_err("JBD2: assertion failure: h_type=%u " + "h_line_no=%u block_no=%llu jlist=%u\n", + handle->h_type, handle->h_line_no, + (unsigned long long) bh->b_blocknr, + jh->b_jlist); J_ASSERT_JH(jh, jh->b_transaction != transaction || jh->b_jlist == BJ_Metadata); jbd_unlock_bh_state(bh); @@ -1380,11 +1387,11 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) * of the transaction. This needs to be done * once a transaction -bzzz */ - jh->b_modified = 1; if (handle->h_buffer_credits <= 0) { ret = -ENOSPC; goto out_unlock_bh; } + jh->b_modified = 1; handle->h_buffer_credits--; } diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c index c60f3d32ee91..a6797986b625 100644 --- a/fs/jfs/xattr.c +++ b/fs/jfs/xattr.c @@ -491,15 +491,17 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size) if (size > PSIZE) { /* * To keep the rest of the code simple. Allocate a - * contiguous buffer to work with + * contiguous buffer to work with. Make the buffer large + * enough to make use of the whole extent. */ - ea_buf->xattr = kmalloc(size, GFP_KERNEL); + ea_buf->max_size = (size + sb->s_blocksize - 1) & + ~(sb->s_blocksize - 1); + + ea_buf->xattr = kmalloc(ea_buf->max_size, GFP_KERNEL); if (ea_buf->xattr == NULL) return -ENOMEM; ea_buf->flag = EA_MALLOC; - ea_buf->max_size = (size + sb->s_blocksize - 1) & - ~(sb->s_blocksize - 1); if (ea_size == 0) return 0; diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index bbd0465535eb..f033f3a69a3b 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c @@ -883,8 +883,10 @@ struct inode *nfs_delegation_find_inode(struct nfs_client *clp, rcu_read_lock(); list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { res = nfs_delegation_find_inode_server(server, fhandle); - if (res != ERR_PTR(-ENOENT)) + if (res != ERR_PTR(-ENOENT)) { + rcu_read_unlock(); return res; + } } rcu_read_unlock(); return ERR_PTR(-ENOENT); diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c index d4a07acad598..8f003792ccde 100644 --- a/fs/nfs/flexfilelayout/flexfilelayout.c +++ b/fs/nfs/flexfilelayout/flexfilelayout.c @@ -1243,17 +1243,18 @@ static int ff_layout_read_done_cb(struct rpc_task *task, hdr->ds_clp, hdr->lseg, hdr->pgio_mirror_idx); + clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags); + clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags); switch (err) { case -NFS4ERR_RESET_TO_PNFS: if (ff_layout_choose_best_ds_for_read(hdr->lseg, hdr->pgio_mirror_idx + 1, &hdr->pgio_mirror_idx)) goto out_eagain; - ff_layout_read_record_layoutstats_done(task, hdr); - pnfs_read_resend_pnfs(hdr); + set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags); return task->tk_status; case -NFS4ERR_RESET_TO_MDS: - ff_layout_reset_read(hdr); + set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags); return task->tk_status; case -EAGAIN: goto out_eagain; @@ -1403,6 +1404,10 @@ static void ff_layout_read_release(void *data) struct nfs_pgio_header *hdr = data; ff_layout_read_record_layoutstats_done(&hdr->task, hdr); + if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags)) + pnfs_read_resend_pnfs(hdr); + else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags)) + ff_layout_reset_read(hdr); pnfs_generic_rw_release(data); } @@ -1423,12 +1428,14 @@ static int ff_layout_write_done_cb(struct rpc_task *task, hdr->ds_clp, hdr->lseg, hdr->pgio_mirror_idx); + clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags); + clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags); switch (err) { case -NFS4ERR_RESET_TO_PNFS: - ff_layout_reset_write(hdr, true); + set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags); return task->tk_status; case -NFS4ERR_RESET_TO_MDS: - ff_layout_reset_write(hdr, false); + set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags); return task->tk_status; case -EAGAIN: return -EAGAIN; @@ -1575,6 +1582,10 @@ static void ff_layout_write_release(void *data) struct nfs_pgio_header *hdr = data; ff_layout_write_record_layoutstats_done(&hdr->task, hdr); + if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags)) + ff_layout_reset_write(hdr, true); + else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags)) + ff_layout_reset_write(hdr, false); pnfs_generic_rw_release(data); } diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index ed45090e4df6..6dd146885da9 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -3294,6 +3294,7 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data) struct nfs4_closedata *calldata = data; struct nfs4_state *state = calldata->state; struct inode *inode = calldata->inode; + struct pnfs_layout_hdr *lo; bool is_rdonly, is_wronly, is_rdwr; int call_close = 0; @@ -3337,6 +3338,12 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data) goto out_wait; } + lo = calldata->arg.lr_args ? calldata->arg.lr_args->layout : NULL; + if (lo && !pnfs_layout_is_valid(lo)) { + calldata->arg.lr_args = NULL; + calldata->res.lr_res = NULL; + } + if (calldata->arg.fmode == 0) task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE]; @@ -5972,12 +5979,19 @@ static void nfs4_delegreturn_release(void *calldata) static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data) { struct nfs4_delegreturndata *d_data; + struct pnfs_layout_hdr *lo; d_data = (struct nfs4_delegreturndata *)data; if (!d_data->lr.roc && nfs4_wait_on_layoutreturn(d_data->inode, task)) return; + lo = d_data->args.lr_args ? d_data->args.lr_args->layout : NULL; + if (lo && !pnfs_layout_is_valid(lo)) { + d_data->args.lr_args = NULL; + d_data->res.lr_res = NULL; + } + nfs4_setup_sequence(d_data->res.server->nfs_client, &d_data->args.seq_args, &d_data->res.seq_res, @@ -8650,6 +8664,8 @@ nfs4_layoutget_handle_exception(struct rpc_task *task, dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status); + nfs4_sequence_free_slot(&lgp->res.seq_res); + switch (nfs4err) { case 0: goto out; @@ -8714,7 +8730,6 @@ nfs4_layoutget_handle_exception(struct rpc_task *task, goto out; } - nfs4_sequence_free_slot(&lgp->res.seq_res); err = nfs4_handle_exception(server, nfs4err, exception); if (!status) { if (exception->retry) @@ -8786,20 +8801,22 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout) if (IS_ERR(task)) return ERR_CAST(task); status = rpc_wait_for_completion_task(task); - if (status == 0) { + if (status != 0) + goto out; + + /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */ + if (task->tk_status < 0 || lgp->res.layoutp->len == 0) { status = nfs4_layoutget_handle_exception(task, lgp, &exception); *timeout = exception.timeout; - } - + } else + lseg = pnfs_layout_process(lgp); +out: trace_nfs4_layoutget(lgp->args.ctx, &lgp->args.range, &lgp->res.range, &lgp->res.stateid, status); - /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */ - if (status == 0 && lgp->res.layoutp->len) - lseg = pnfs_layout_process(lgp); rpc_put_task(task); dprintk("<-- %s status=%d\n", __func__, status); if (status) @@ -8817,6 +8834,8 @@ nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata) &lrp->args.seq_args, &lrp->res.seq_res, task); + if (!pnfs_layout_is_valid(lrp->args.layout)) + rpc_exit(task, 0); } static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata) diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index a8f5e6b16749..3fe81424337d 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -801,6 +801,11 @@ static inline void nfs4_lgopen_release(struct nfs4_layoutget *lgp) { } +static inline bool pnfs_layout_is_valid(const struct pnfs_layout_hdr *lo) +{ + return false; +} + #endif /* CONFIG_NFS_V4_1 */ #if IS_ENABLED(CONFIG_NFS_V4_2) diff --git a/fs/pipe.c b/fs/pipe.c index bb0840e234f3..39d6f431da83 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -509,22 +509,19 @@ static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) } } -static struct wait_queue_head * -pipe_get_poll_head(struct file *filp, __poll_t events) -{ - struct pipe_inode_info *pipe = filp->private_data; - - return &pipe->wait; -} - /* No kernel lock held - fine */ -static __poll_t pipe_poll_mask(struct file *filp, __poll_t events) +static __poll_t +pipe_poll(struct file *filp, poll_table *wait) { + __poll_t mask; struct pipe_inode_info *pipe = filp->private_data; - int nrbufs = pipe->nrbufs; - __poll_t mask = 0; + int nrbufs; + + poll_wait(filp, &pipe->wait, wait); /* Reading only -- no need for acquiring the semaphore. */ + nrbufs = pipe->nrbufs; + mask = 0; if (filp->f_mode & FMODE_READ) { mask = (nrbufs > 0) ? EPOLLIN | EPOLLRDNORM : 0; if (!pipe->writers && filp->f_version != pipe->w_counter) @@ -1023,8 +1020,7 @@ const struct file_operations pipefifo_fops = { .llseek = no_llseek, .read_iter = pipe_read, .write_iter = pipe_write, - .get_poll_head = pipe_get_poll_head, - .poll_mask = pipe_poll_mask, + .poll = pipe_poll, .unlocked_ioctl = pipe_ioctl, .release = pipe_release, .fasync = pipe_fasync, diff --git a/fs/proc/base.c b/fs/proc/base.c index b6572944efc3..aaffc0c30216 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -235,6 +235,10 @@ static ssize_t get_mm_cmdline(struct mm_struct *mm, char __user *buf, if (env_start != arg_end || env_start >= env_end) env_start = env_end = arg_end; + /* .. and limit it to a maximum of one page of slop */ + if (env_end >= arg_end + PAGE_SIZE) + env_end = arg_end + PAGE_SIZE - 1; + /* We're not going to care if "*ppos" has high bits set */ pos = arg_start + *ppos; @@ -254,10 +258,19 @@ static ssize_t get_mm_cmdline(struct mm_struct *mm, char __user *buf, while (count) { int got; size_t size = min_t(size_t, PAGE_SIZE, count); + long offset; - got = access_remote_vm(mm, pos, page, size, FOLL_ANON); - if (got <= 0) + /* + * Are we already starting past the official end? + * We always include the last byte that is *supposed* + * to be NUL + */ + offset = (pos >= arg_end) ? pos - arg_end + 1 : 0; + + got = access_remote_vm(mm, pos - offset, page, size + offset, FOLL_ANON); + if (got <= offset) break; + got -= offset; /* Don't walk past a NUL character once you hit arg_end */ if (pos + got >= arg_end) { @@ -276,12 +289,17 @@ static ssize_t get_mm_cmdline(struct mm_struct *mm, char __user *buf, n = arg_end - pos - 1; /* Cut off at first NUL after 'n' */ - got = n + strnlen(page+n, got-n); - if (!got) + got = n + strnlen(page+n, offset+got-n); + if (got < offset) break; + got -= offset; + + /* Include the NUL if it existed */ + if (got < size) + got++; } - got -= copy_to_user(buf, page, got); + got -= copy_to_user(buf, page+offset, got); if (unlikely(!got)) { if (!len) len = -EFAULT; diff --git a/fs/proc/generic.c b/fs/proc/generic.c index 6ac1c92997ea..bb1c1625b158 100644 --- a/fs/proc/generic.c +++ b/fs/proc/generic.c @@ -564,11 +564,20 @@ static int proc_seq_open(struct inode *inode, struct file *file) return seq_open(file, de->seq_ops); } +static int proc_seq_release(struct inode *inode, struct file *file) +{ + struct proc_dir_entry *de = PDE(inode); + + if (de->state_size) + return seq_release_private(inode, file); + return seq_release(inode, file); +} + static const struct file_operations proc_seq_fops = { .open = proc_seq_open, .read = seq_read, .llseek = seq_lseek, - .release = seq_release, + .release = proc_seq_release, }; struct proc_dir_entry *proc_create_seq_private(const char *name, umode_t mode, diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index e9679016271f..dfd73a4616ce 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -831,7 +831,8 @@ static int show_smap(struct seq_file *m, void *v, int is_pid) SEQ_PUT_DEC(" kB\nSwap: ", mss->swap); SEQ_PUT_DEC(" kB\nSwapPss: ", mss->swap_pss >> PSS_SHIFT); - SEQ_PUT_DEC(" kB\nLocked: ", mss->pss >> PSS_SHIFT); + SEQ_PUT_DEC(" kB\nLocked: ", + mss->pss_locked >> PSS_SHIFT); seq_puts(m, " kB\n"); } if (!rollup_mode) { diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index d88231e3b2be..fc20e06c56ba 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -711,21 +711,18 @@ EXPORT_SYMBOL(dquot_quota_sync); static unsigned long dqcache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) { - struct list_head *head; struct dquot *dquot; unsigned long freed = 0; spin_lock(&dq_list_lock); - head = free_dquots.prev; - while (head != &free_dquots && sc->nr_to_scan) { - dquot = list_entry(head, struct dquot, dq_free); + while (!list_empty(&free_dquots) && sc->nr_to_scan) { + dquot = list_first_entry(&free_dquots, struct dquot, dq_free); remove_dquot_hash(dquot); remove_free_dquot(dquot); remove_inuse(dquot); do_destroy_dquot(dquot); sc->nr_to_scan--; freed++; - head = free_dquots.prev; } spin_unlock(&dq_list_lock); return freed; diff --git a/fs/reiserfs/prints.c b/fs/reiserfs/prints.c index 7e288d97adcb..9fed1c05f1f4 100644 --- a/fs/reiserfs/prints.c +++ b/fs/reiserfs/prints.c @@ -76,83 +76,99 @@ static char *le_type(struct reiserfs_key *key) } /* %k */ -static void sprintf_le_key(char *buf, struct reiserfs_key *key) +static int scnprintf_le_key(char *buf, size_t size, struct reiserfs_key *key) { if (key) - sprintf(buf, "[%d %d %s %s]", le32_to_cpu(key->k_dir_id), - le32_to_cpu(key->k_objectid), le_offset(key), - le_type(key)); + return scnprintf(buf, size, "[%d %d %s %s]", + le32_to_cpu(key->k_dir_id), + le32_to_cpu(key->k_objectid), le_offset(key), + le_type(key)); else - sprintf(buf, "[NULL]"); + return scnprintf(buf, size, "[NULL]"); } /* %K */ -static void sprintf_cpu_key(char *buf, struct cpu_key *key) +static int scnprintf_cpu_key(char *buf, size_t size, struct cpu_key *key) { if (key) - sprintf(buf, "[%d %d %s %s]", key->on_disk_key.k_dir_id, - key->on_disk_key.k_objectid, reiserfs_cpu_offset(key), - cpu_type(key)); + return scnprintf(buf, size, "[%d %d %s %s]", + key->on_disk_key.k_dir_id, + key->on_disk_key.k_objectid, + reiserfs_cpu_offset(key), cpu_type(key)); else - sprintf(buf, "[NULL]"); + return scnprintf(buf, size, "[NULL]"); } -static void sprintf_de_head(char *buf, struct reiserfs_de_head *deh) +static int scnprintf_de_head(char *buf, size_t size, + struct reiserfs_de_head *deh) { if (deh) - sprintf(buf, - "[offset=%d dir_id=%d objectid=%d location=%d state=%04x]", - deh_offset(deh), deh_dir_id(deh), deh_objectid(deh), - deh_location(deh), deh_state(deh)); + return scnprintf(buf, size, + "[offset=%d dir_id=%d objectid=%d location=%d state=%04x]", + deh_offset(deh), deh_dir_id(deh), + deh_objectid(deh), deh_location(deh), + deh_state(deh)); else - sprintf(buf, "[NULL]"); + return scnprintf(buf, size, "[NULL]"); } -static void sprintf_item_head(char *buf, struct item_head *ih) +static int scnprintf_item_head(char *buf, size_t size, struct item_head *ih) { if (ih) { - strcpy(buf, - (ih_version(ih) == KEY_FORMAT_3_6) ? "*3.6* " : "*3.5*"); - sprintf_le_key(buf + strlen(buf), &(ih->ih_key)); - sprintf(buf + strlen(buf), ", item_len %d, item_location %d, " - "free_space(entry_count) %d", - ih_item_len(ih), ih_location(ih), ih_free_space(ih)); + char *p = buf; + char * const end = buf + size; + + p += scnprintf(p, end - p, "%s", + (ih_version(ih) == KEY_FORMAT_3_6) ? + "*3.6* " : "*3.5*"); + + p += scnprintf_le_key(p, end - p, &ih->ih_key); + + p += scnprintf(p, end - p, + ", item_len %d, item_location %d, free_space(entry_count) %d", + ih_item_len(ih), ih_location(ih), + ih_free_space(ih)); + return p - buf; } else - sprintf(buf, "[NULL]"); + return scnprintf(buf, size, "[NULL]"); } -static void sprintf_direntry(char *buf, struct reiserfs_dir_entry *de) +static int scnprintf_direntry(char *buf, size_t size, + struct reiserfs_dir_entry *de) { char name[20]; memcpy(name, de->de_name, de->de_namelen > 19 ? 19 : de->de_namelen); name[de->de_namelen > 19 ? 19 : de->de_namelen] = 0; - sprintf(buf, "\"%s\"==>[%d %d]", name, de->de_dir_id, de->de_objectid); + return scnprintf(buf, size, "\"%s\"==>[%d %d]", + name, de->de_dir_id, de->de_objectid); } -static void sprintf_block_head(char *buf, struct buffer_head *bh) +static int scnprintf_block_head(char *buf, size_t size, struct buffer_head *bh) { - sprintf(buf, "level=%d, nr_items=%d, free_space=%d rdkey ", - B_LEVEL(bh), B_NR_ITEMS(bh), B_FREE_SPACE(bh)); + return scnprintf(buf, size, + "level=%d, nr_items=%d, free_space=%d rdkey ", + B_LEVEL(bh), B_NR_ITEMS(bh), B_FREE_SPACE(bh)); } -static void sprintf_buffer_head(char *buf, struct buffer_head *bh) +static int scnprintf_buffer_head(char *buf, size_t size, struct buffer_head *bh) { - sprintf(buf, - "dev %pg, size %zd, blocknr %llu, count %d, state 0x%lx, page %p, (%s, %s, %s)", - bh->b_bdev, bh->b_size, - (unsigned long long)bh->b_blocknr, atomic_read(&(bh->b_count)), - bh->b_state, bh->b_page, - buffer_uptodate(bh) ? "UPTODATE" : "!UPTODATE", - buffer_dirty(bh) ? "DIRTY" : "CLEAN", - buffer_locked(bh) ? "LOCKED" : "UNLOCKED"); + return scnprintf(buf, size, + "dev %pg, size %zd, blocknr %llu, count %d, state 0x%lx, page %p, (%s, %s, %s)", + bh->b_bdev, bh->b_size, + (unsigned long long)bh->b_blocknr, + atomic_read(&(bh->b_count)), + bh->b_state, bh->b_page, + buffer_uptodate(bh) ? "UPTODATE" : "!UPTODATE", + buffer_dirty(bh) ? "DIRTY" : "CLEAN", + buffer_locked(bh) ? "LOCKED" : "UNLOCKED"); } -static void sprintf_disk_child(char *buf, struct disk_child *dc) +static int scnprintf_disk_child(char *buf, size_t size, struct disk_child *dc) { - sprintf(buf, "[dc_number=%d, dc_size=%u]", dc_block_number(dc), - dc_size(dc)); + return scnprintf(buf, size, "[dc_number=%d, dc_size=%u]", + dc_block_number(dc), dc_size(dc)); } static char *is_there_reiserfs_struct(char *fmt, int *what) @@ -189,55 +205,60 @@ static void prepare_error_buf(const char *fmt, va_list args) char *fmt1 = fmt_buf; char *k; char *p = error_buf; + char * const end = &error_buf[sizeof(error_buf)]; int what; spin_lock(&error_lock); - strcpy(fmt1, fmt); + if (WARN_ON(strscpy(fmt_buf, fmt, sizeof(fmt_buf)) < 0)) { + strscpy(error_buf, "format string too long", end - error_buf); + goto out_unlock; + } while ((k = is_there_reiserfs_struct(fmt1, &what)) != NULL) { *k = 0; - p += vsprintf(p, fmt1, args); + p += vscnprintf(p, end - p, fmt1, args); switch (what) { case 'k': - sprintf_le_key(p, va_arg(args, struct reiserfs_key *)); + p += scnprintf_le_key(p, end - p, + va_arg(args, struct reiserfs_key *)); break; case 'K': - sprintf_cpu_key(p, va_arg(args, struct cpu_key *)); + p += scnprintf_cpu_key(p, end - p, + va_arg(args, struct cpu_key *)); break; case 'h': - sprintf_item_head(p, va_arg(args, struct item_head *)); + p += scnprintf_item_head(p, end - p, + va_arg(args, struct item_head *)); break; case 't': - sprintf_direntry(p, - va_arg(args, - struct reiserfs_dir_entry *)); + p += scnprintf_direntry(p, end - p, + va_arg(args, struct reiserfs_dir_entry *)); break; case 'y': - sprintf_disk_child(p, - va_arg(args, struct disk_child *)); + p += scnprintf_disk_child(p, end - p, + va_arg(args, struct disk_child *)); break; case 'z': - sprintf_block_head(p, - va_arg(args, struct buffer_head *)); + p += scnprintf_block_head(p, end - p, + va_arg(args, struct buffer_head *)); break; case 'b': - sprintf_buffer_head(p, - va_arg(args, struct buffer_head *)); + p += scnprintf_buffer_head(p, end - p, + va_arg(args, struct buffer_head *)); break; case 'a': - sprintf_de_head(p, - va_arg(args, - struct reiserfs_de_head *)); + p += scnprintf_de_head(p, end - p, + va_arg(args, struct reiserfs_de_head *)); break; } - p += strlen(p); fmt1 = k + 2; } - vsprintf(p, fmt1, args); + p += vscnprintf(p, end - p, fmt1, args); +out_unlock: spin_unlock(&error_lock); } diff --git a/fs/select.c b/fs/select.c index 317891ff8165..4a6b6e4b21cb 100644 --- a/fs/select.c +++ b/fs/select.c @@ -34,29 +34,6 @@ #include -__poll_t vfs_poll(struct file *file, struct poll_table_struct *pt) -{ - if (file->f_op->poll) { - return file->f_op->poll(file, pt); - } else if (file_has_poll_mask(file)) { - unsigned int events = poll_requested_events(pt); - struct wait_queue_head *head; - - if (pt && pt->_qproc) { - head = file->f_op->get_poll_head(file, events); - if (!head) - return DEFAULT_POLLMASK; - if (IS_ERR(head)) - return EPOLLERR; - pt->_qproc(file, head, pt); - } - - return file->f_op->poll_mask(file, events); - } else { - return DEFAULT_POLLMASK; - } -} -EXPORT_SYMBOL_GPL(vfs_poll); /* * Estimate expected accuracy in ns from a timeval. diff --git a/fs/timerfd.c b/fs/timerfd.c index d84a2bee4f82..cdad49da3ff7 100644 --- a/fs/timerfd.c +++ b/fs/timerfd.c @@ -226,20 +226,21 @@ static int timerfd_release(struct inode *inode, struct file *file) kfree_rcu(ctx, rcu); return 0; } - -static struct wait_queue_head *timerfd_get_poll_head(struct file *file, - __poll_t eventmask) + +static __poll_t timerfd_poll(struct file *file, poll_table *wait) { struct timerfd_ctx *ctx = file->private_data; + __poll_t events = 0; + unsigned long flags; - return &ctx->wqh; -} + poll_wait(file, &ctx->wqh, wait); -static __poll_t timerfd_poll_mask(struct file *file, __poll_t eventmask) -{ - struct timerfd_ctx *ctx = file->private_data; + spin_lock_irqsave(&ctx->wqh.lock, flags); + if (ctx->ticks) + events |= EPOLLIN; + spin_unlock_irqrestore(&ctx->wqh.lock, flags); - return ctx->ticks ? EPOLLIN : 0; + return events; } static ssize_t timerfd_read(struct file *file, char __user *buf, size_t count, @@ -363,8 +364,7 @@ static long timerfd_ioctl(struct file *file, unsigned int cmd, unsigned long arg static const struct file_operations timerfd_fops = { .release = timerfd_release, - .get_poll_head = timerfd_get_poll_head, - .poll_mask = timerfd_poll_mask, + .poll = timerfd_poll, .read = timerfd_read, .llseek = noop_llseek, .show_fdinfo = timerfd_show, diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c index 1b961b1d9699..fcda0fc97b90 100644 --- a/fs/udf/balloc.c +++ b/fs/udf/balloc.c @@ -533,8 +533,7 @@ static int udf_table_prealloc_blocks(struct super_block *sb, udf_write_aext(table, &epos, &eloc, (etype << 30) | elen, 1); } else - udf_delete_aext(table, epos, eloc, - (etype << 30) | elen); + udf_delete_aext(table, epos); } else { alloc_count = 0; } @@ -630,7 +629,7 @@ static udf_pblk_t udf_table_new_block(struct super_block *sb, if (goal_elen) udf_write_aext(table, &goal_epos, &goal_eloc, goal_elen, 1); else - udf_delete_aext(table, goal_epos, goal_eloc, goal_elen); + udf_delete_aext(table, goal_epos); brelse(goal_epos.bh); udf_add_free_space(sb, partition, -1); diff --git a/fs/udf/directory.c b/fs/udf/directory.c index 0a98a2369738..d9523013096f 100644 --- a/fs/udf/directory.c +++ b/fs/udf/directory.c @@ -141,10 +141,7 @@ struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t *nf_pos, fibh->ebh->b_data, sizeof(struct fileIdentDesc) + fibh->soffset); - fi_len = (sizeof(struct fileIdentDesc) + - cfi->lengthFileIdent + - le16_to_cpu(cfi->lengthOfImpUse) + 3) & ~3; - + fi_len = udf_dir_entry_len(cfi); *nf_pos += fi_len - (fibh->eoffset - fibh->soffset); fibh->eoffset = fibh->soffset + fi_len; } else { @@ -152,6 +149,9 @@ struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t *nf_pos, sizeof(struct fileIdentDesc)); } } + /* Got last entry outside of dir size - fs is corrupted! */ + if (*nf_pos > dir->i_size) + return NULL; return fi; } diff --git a/fs/udf/inode.c b/fs/udf/inode.c index 7f39d17352c9..9915a58fbabd 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c @@ -1147,8 +1147,7 @@ static void udf_update_extents(struct inode *inode, struct kernel_long_ad *laarr if (startnum > endnum) { for (i = 0; i < (startnum - endnum); i++) - udf_delete_aext(inode, *epos, laarr[i].extLocation, - laarr[i].extLength); + udf_delete_aext(inode, *epos); } else if (startnum < endnum) { for (i = 0; i < (endnum - startnum); i++) { udf_insert_aext(inode, *epos, laarr[i].extLocation, @@ -2176,14 +2175,15 @@ static int8_t udf_insert_aext(struct inode *inode, struct extent_position epos, return (nelen >> 30); } -int8_t udf_delete_aext(struct inode *inode, struct extent_position epos, - struct kernel_lb_addr eloc, uint32_t elen) +int8_t udf_delete_aext(struct inode *inode, struct extent_position epos) { struct extent_position oepos; int adsize; int8_t etype; struct allocExtDesc *aed; struct udf_inode_info *iinfo; + struct kernel_lb_addr eloc; + uint32_t elen; if (epos.bh) { get_bh(epos.bh); diff --git a/fs/udf/namei.c b/fs/udf/namei.c index c586026508db..06f37ddd2997 100644 --- a/fs/udf/namei.c +++ b/fs/udf/namei.c @@ -351,8 +351,6 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir, loff_t f_pos; loff_t size = udf_ext0_offset(dir) + dir->i_size; int nfidlen; - uint8_t lfi; - uint16_t liu; udf_pblk_t block; struct kernel_lb_addr eloc; uint32_t elen = 0; @@ -383,7 +381,7 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir, namelen = 0; } - nfidlen = (sizeof(struct fileIdentDesc) + namelen + 3) & ~3; + nfidlen = ALIGN(sizeof(struct fileIdentDesc) + namelen, UDF_NAME_PAD); f_pos = udf_ext0_offset(dir); @@ -424,12 +422,8 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir, goto out_err; } - liu = le16_to_cpu(cfi->lengthOfImpUse); - lfi = cfi->lengthFileIdent; - if ((cfi->fileCharacteristics & FID_FILE_CHAR_DELETED) != 0) { - if (((sizeof(struct fileIdentDesc) + - liu + lfi + 3) & ~3) == nfidlen) { + if (udf_dir_entry_len(cfi) == nfidlen) { cfi->descTag.tagSerialNum = cpu_to_le16(1); cfi->fileVersionNum = cpu_to_le16(1); cfi->fileCharacteristics = 0; @@ -1201,9 +1195,7 @@ static int udf_rename(struct inode *old_dir, struct dentry *old_dentry, if (dir_fi) { dir_fi->icb.extLocation = cpu_to_lelb(UDF_I(new_dir)->i_location); - udf_update_tag((char *)dir_fi, - (sizeof(struct fileIdentDesc) + - le16_to_cpu(dir_fi->lengthOfImpUse) + 3) & ~3); + udf_update_tag((char *)dir_fi, udf_dir_entry_len(dir_fi)); if (old_iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) mark_inode_dirty(old_inode); else diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h index bae311b59400..84c47dde4d26 100644 --- a/fs/udf/udfdecl.h +++ b/fs/udf/udfdecl.h @@ -132,6 +132,12 @@ struct inode *udf_find_metadata_inode_efe(struct super_block *sb, extern int udf_write_fi(struct inode *inode, struct fileIdentDesc *, struct fileIdentDesc *, struct udf_fileident_bh *, uint8_t *, uint8_t *); +static inline unsigned int udf_dir_entry_len(struct fileIdentDesc *cfi) +{ + return ALIGN(sizeof(struct fileIdentDesc) + + le16_to_cpu(cfi->lengthOfImpUse) + cfi->lengthFileIdent, + UDF_NAME_PAD); +} /* file.c */ extern long udf_ioctl(struct file *, unsigned int, unsigned long); @@ -167,8 +173,7 @@ extern int udf_add_aext(struct inode *, struct extent_position *, struct kernel_lb_addr *, uint32_t, int); extern void udf_write_aext(struct inode *, struct extent_position *, struct kernel_lb_addr *, uint32_t, int); -extern int8_t udf_delete_aext(struct inode *, struct extent_position, - struct kernel_lb_addr, uint32_t); +extern int8_t udf_delete_aext(struct inode *, struct extent_position); extern int8_t udf_next_aext(struct inode *, struct extent_position *, struct kernel_lb_addr *, uint32_t *, int); extern int8_t udf_current_aext(struct inode *, struct extent_position *, diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index 123bf7d516fc..594d192b2331 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -222,24 +222,26 @@ static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx, unsigned long reason) { struct mm_struct *mm = ctx->mm; - pte_t *pte; + pte_t *ptep, pte; bool ret = true; VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); - pte = huge_pte_offset(mm, address, vma_mmu_pagesize(vma)); - if (!pte) + ptep = huge_pte_offset(mm, address, vma_mmu_pagesize(vma)); + + if (!ptep) goto out; ret = false; + pte = huge_ptep_get(ptep); /* * Lockless access: we're in a wait_event so it's ok if it * changes under us. */ - if (huge_pte_none(*pte)) + if (huge_pte_none(pte)) ret = true; - if (!huge_pte_write(*pte) && (reason & VM_UFFD_WP)) + if (!huge_pte_write(pte) && (reason & VM_UFFD_WP)) ret = true; out: return ret; diff --git a/fs/xfs/libxfs/xfs_ag_resv.c b/fs/xfs/libxfs/xfs_ag_resv.c index 84db76e0e3e3..fecd187fcf2c 100644 --- a/fs/xfs/libxfs/xfs_ag_resv.c +++ b/fs/xfs/libxfs/xfs_ag_resv.c @@ -157,6 +157,7 @@ __xfs_ag_resv_free( error = xfs_mod_fdblocks(pag->pag_mount, oldresv, true); resv->ar_reserved = 0; resv->ar_asked = 0; + resv->ar_orig_reserved = 0; if (error) trace_xfs_ag_resv_free_error(pag->pag_mount, pag->pag_agno, @@ -189,13 +190,34 @@ __xfs_ag_resv_init( struct xfs_mount *mp = pag->pag_mount; struct xfs_ag_resv *resv; int error; - xfs_extlen_t reserved; + xfs_extlen_t hidden_space; if (used > ask) ask = used; - reserved = ask - used; - error = xfs_mod_fdblocks(mp, -(int64_t)reserved, true); + switch (type) { + case XFS_AG_RESV_RMAPBT: + /* + * Space taken by the rmapbt is not subtracted from fdblocks + * because the rmapbt lives in the free space. Here we must + * subtract the entire reservation from fdblocks so that we + * always have blocks available for rmapbt expansion. + */ + hidden_space = ask; + break; + case XFS_AG_RESV_METADATA: + /* + * Space taken by all other metadata btrees are accounted + * on-disk as used space. We therefore only hide the space + * that is reserved but not used by the trees. + */ + hidden_space = ask - used; + break; + default: + ASSERT(0); + return -EINVAL; + } + error = xfs_mod_fdblocks(mp, -(int64_t)hidden_space, true); if (error) { trace_xfs_ag_resv_init_error(pag->pag_mount, pag->pag_agno, error, _RET_IP_); @@ -216,7 +238,8 @@ __xfs_ag_resv_init( resv = xfs_perag_resv(pag, type); resv->ar_asked = ask; - resv->ar_reserved = resv->ar_orig_reserved = reserved; + resv->ar_orig_reserved = hidden_space; + resv->ar_reserved = ask - used; trace_xfs_ag_resv_init(pag, type, ask); return 0; diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c index 01628f0c9a0c..7205268b30bc 100644 --- a/fs/xfs/libxfs/xfs_bmap.c +++ b/fs/xfs/libxfs/xfs_bmap.c @@ -5780,6 +5780,32 @@ xfs_bmap_collapse_extents( return error; } +/* Make sure we won't be right-shifting an extent past the maximum bound. */ +int +xfs_bmap_can_insert_extents( + struct xfs_inode *ip, + xfs_fileoff_t off, + xfs_fileoff_t shift) +{ + struct xfs_bmbt_irec got; + int is_empty; + int error = 0; + + ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); + + if (XFS_FORCED_SHUTDOWN(ip->i_mount)) + return -EIO; + + xfs_ilock(ip, XFS_ILOCK_EXCL); + error = xfs_bmap_last_extent(NULL, ip, XFS_DATA_FORK, &got, &is_empty); + if (!error && !is_empty && got.br_startoff >= off && + ((got.br_startoff + shift) & BMBT_STARTOFF_MASK) < got.br_startoff) + error = -EINVAL; + xfs_iunlock(ip, XFS_ILOCK_EXCL); + + return error; +} + int xfs_bmap_insert_extents( struct xfs_trans *tp, diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h index 99dddbd0fcc6..9b49ddf99c41 100644 --- a/fs/xfs/libxfs/xfs_bmap.h +++ b/fs/xfs/libxfs/xfs_bmap.h @@ -227,6 +227,8 @@ int xfs_bmap_collapse_extents(struct xfs_trans *tp, struct xfs_inode *ip, xfs_fileoff_t *next_fsb, xfs_fileoff_t offset_shift_fsb, bool *done, xfs_fsblock_t *firstblock, struct xfs_defer_ops *dfops); +int xfs_bmap_can_insert_extents(struct xfs_inode *ip, xfs_fileoff_t off, + xfs_fileoff_t shift); int xfs_bmap_insert_extents(struct xfs_trans *tp, struct xfs_inode *ip, xfs_fileoff_t *next_fsb, xfs_fileoff_t offset_shift_fsb, bool *done, xfs_fileoff_t stop_fsb, xfs_fsblock_t *firstblock, diff --git a/fs/xfs/libxfs/xfs_format.h b/fs/xfs/libxfs/xfs_format.h index 1c5a8aaf2bfc..059bc44c27e8 100644 --- a/fs/xfs/libxfs/xfs_format.h +++ b/fs/xfs/libxfs/xfs_format.h @@ -962,6 +962,9 @@ typedef enum xfs_dinode_fmt { XFS_DFORK_DSIZE(dip, mp) : \ XFS_DFORK_ASIZE(dip, mp)) +#define XFS_DFORK_MAXEXT(dip, mp, w) \ + (XFS_DFORK_SIZE(dip, mp, w) / sizeof(struct xfs_bmbt_rec)) + /* * Return pointers to the data or attribute forks. */ @@ -1526,6 +1529,8 @@ typedef struct xfs_bmdr_block { #define BMBT_STARTBLOCK_BITLEN 52 #define BMBT_BLOCKCOUNT_BITLEN 21 +#define BMBT_STARTOFF_MASK ((1ULL << BMBT_STARTOFF_BITLEN) - 1) + typedef struct xfs_bmbt_rec { __be64 l0, l1; } xfs_bmbt_rec_t; diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c index d38d724534c4..33dc34655ac3 100644 --- a/fs/xfs/libxfs/xfs_inode_buf.c +++ b/fs/xfs/libxfs/xfs_inode_buf.c @@ -374,6 +374,47 @@ xfs_log_dinode_to_disk( } } +static xfs_failaddr_t +xfs_dinode_verify_fork( + struct xfs_dinode *dip, + struct xfs_mount *mp, + int whichfork) +{ + uint32_t di_nextents = XFS_DFORK_NEXTENTS(dip, whichfork); + + switch (XFS_DFORK_FORMAT(dip, whichfork)) { + case XFS_DINODE_FMT_LOCAL: + /* + * no local regular files yet + */ + if (whichfork == XFS_DATA_FORK) { + if (S_ISREG(be16_to_cpu(dip->di_mode))) + return __this_address; + if (be64_to_cpu(dip->di_size) > + XFS_DFORK_SIZE(dip, mp, whichfork)) + return __this_address; + } + if (di_nextents) + return __this_address; + break; + case XFS_DINODE_FMT_EXTENTS: + if (di_nextents > XFS_DFORK_MAXEXT(dip, mp, whichfork)) + return __this_address; + break; + case XFS_DINODE_FMT_BTREE: + if (whichfork == XFS_ATTR_FORK) { + if (di_nextents > MAXAEXTNUM) + return __this_address; + } else if (di_nextents > MAXEXTNUM) { + return __this_address; + } + break; + default: + return __this_address; + } + return NULL; +} + xfs_failaddr_t xfs_dinode_verify( struct xfs_mount *mp, @@ -441,24 +482,9 @@ xfs_dinode_verify( case S_IFREG: case S_IFLNK: case S_IFDIR: - switch (dip->di_format) { - case XFS_DINODE_FMT_LOCAL: - /* - * no local regular files yet - */ - if (S_ISREG(mode)) - return __this_address; - if (di_size > XFS_DFORK_DSIZE(dip, mp)) - return __this_address; - if (dip->di_nextents) - return __this_address; - /* fall through */ - case XFS_DINODE_FMT_EXTENTS: - case XFS_DINODE_FMT_BTREE: - break; - default: - return __this_address; - } + fa = xfs_dinode_verify_fork(dip, mp, XFS_DATA_FORK); + if (fa) + return fa; break; case 0: /* Uninitialized inode ok. */ @@ -468,17 +494,9 @@ xfs_dinode_verify( } if (XFS_DFORK_Q(dip)) { - switch (dip->di_aformat) { - case XFS_DINODE_FMT_LOCAL: - if (dip->di_anextents) - return __this_address; - /* fall through */ - case XFS_DINODE_FMT_EXTENTS: - case XFS_DINODE_FMT_BTREE: - break; - default: - return __this_address; - } + fa = xfs_dinode_verify_fork(dip, mp, XFS_ATTR_FORK); + if (fa) + return fa; } else { /* * If there is no fork offset, this may be a freshly-made inode diff --git a/fs/xfs/libxfs/xfs_rtbitmap.c b/fs/xfs/libxfs/xfs_rtbitmap.c index 65fc4ed2e9a1..b228c821bae6 100644 --- a/fs/xfs/libxfs/xfs_rtbitmap.c +++ b/fs/xfs/libxfs/xfs_rtbitmap.c @@ -1029,8 +1029,8 @@ xfs_rtalloc_query_range( if (low_rec->ar_startext >= mp->m_sb.sb_rextents || low_rec->ar_startext == high_rec->ar_startext) return 0; - if (high_rec->ar_startext >= mp->m_sb.sb_rextents) - high_rec->ar_startext = mp->m_sb.sb_rextents - 1; + if (high_rec->ar_startext > mp->m_sb.sb_rextents) + high_rec->ar_startext = mp->m_sb.sb_rextents; /* Iterate the bitmap, looking for discrepancies. */ rtstart = low_rec->ar_startext; diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c index c35009a86699..83b1e8c6c18f 100644 --- a/fs/xfs/xfs_bmap_util.c +++ b/fs/xfs/xfs_bmap_util.c @@ -685,12 +685,10 @@ xfs_getbmap( } /* - * dead simple method of punching delalyed allocation blocks from a range in - * the inode. Walks a block at a time so will be slow, but is only executed in - * rare error cases so the overhead is not critical. This will always punch out - * both the start and end blocks, even if the ranges only partially overlap - * them, so it is up to the caller to ensure that partial blocks are not - * passed in. + * Dead simple method of punching delalyed allocation blocks from a range in + * the inode. This will always punch out both the start and end blocks, even + * if the ranges only partially overlap them, so it is up to the caller to + * ensure that partial blocks are not passed in. */ int xfs_bmap_punch_delalloc_range( @@ -698,63 +696,44 @@ xfs_bmap_punch_delalloc_range( xfs_fileoff_t start_fsb, xfs_fileoff_t length) { - xfs_fileoff_t remaining = length; + struct xfs_ifork *ifp = &ip->i_df; + xfs_fileoff_t end_fsb = start_fsb + length; + struct xfs_bmbt_irec got, del; + struct xfs_iext_cursor icur; int error = 0; ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); - do { - int done; - xfs_bmbt_irec_t imap; - int nimaps = 1; - xfs_fsblock_t firstblock; - struct xfs_defer_ops dfops; - - /* - * Map the range first and check that it is a delalloc extent - * before trying to unmap the range. Otherwise we will be - * trying to remove a real extent (which requires a - * transaction) or a hole, which is probably a bad idea... - */ - error = xfs_bmapi_read(ip, start_fsb, 1, &imap, &nimaps, - XFS_BMAPI_ENTIRE); - - if (error) { - /* something screwed, just bail */ - if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { - xfs_alert(ip->i_mount, - "Failed delalloc mapping lookup ino %lld fsb %lld.", - ip->i_ino, start_fsb); - } - break; - } - if (!nimaps) { - /* nothing there */ - goto next_block; - } - if (imap.br_startblock != DELAYSTARTBLOCK) { - /* been converted, ignore */ - goto next_block; - } - WARN_ON(imap.br_blockcount == 0); - - /* - * Note: while we initialise the firstblock/dfops pair, they - * should never be used because blocks should never be - * allocated or freed for a delalloc extent and hence we need - * don't cancel or finish them after the xfs_bunmapi() call. - */ - xfs_defer_init(&dfops, &firstblock); - error = xfs_bunmapi(NULL, ip, start_fsb, 1, 0, 1, &firstblock, - &dfops, &done); + if (!(ifp->if_flags & XFS_IFEXTENTS)) { + error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK); if (error) - break; + return error; + } - ASSERT(!xfs_defer_has_unfinished_work(&dfops)); -next_block: - start_fsb++; - remaining--; - } while(remaining > 0); + if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got)) + return 0; + + while (got.br_startoff + got.br_blockcount > start_fsb) { + del = got; + xfs_trim_extent(&del, start_fsb, length); + + /* + * A delete can push the cursor forward. Step back to the + * previous extent on non-delalloc or extents outside the + * target range. + */ + if (!del.br_blockcount || + !isnullstartblock(del.br_startblock)) { + if (!xfs_iext_prev_extent(ifp, &icur, &got)) + break; + continue; + } + + error = xfs_bmap_del_extent_delay(ip, XFS_DATA_FORK, &icur, + &got, &del); + if (error || !xfs_iext_get_extent(ifp, &icur, &got)) + break; + } return error; } @@ -1208,7 +1187,22 @@ xfs_free_file_space( return 0; if (offset + len > XFS_ISIZE(ip)) len = XFS_ISIZE(ip) - offset; - return iomap_zero_range(VFS_I(ip), offset, len, NULL, &xfs_iomap_ops); + error = iomap_zero_range(VFS_I(ip), offset, len, NULL, &xfs_iomap_ops); + if (error) + return error; + + /* + * If we zeroed right up to EOF and EOF straddles a page boundary we + * must make sure that the post-EOF area is also zeroed because the + * page could be mmap'd and iomap_zero_range doesn't do that for us. + * Writeback of the eof page will do this, albeit clumsily. + */ + if (offset + len >= XFS_ISIZE(ip) && ((offset + len) & PAGE_MASK)) { + error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, + (offset + len) & ~PAGE_MASK, LLONG_MAX); + } + + return error; } /* @@ -1404,6 +1398,10 @@ xfs_insert_file_space( trace_xfs_insert_file_space(ip); + error = xfs_bmap_can_insert_extents(ip, stop_fsb, shift_fsb); + if (error) + return error; + error = xfs_prepare_shift(ip, offset); if (error) return error; diff --git a/fs/xfs/xfs_fsmap.c b/fs/xfs/xfs_fsmap.c index c34fa9c342f2..c7157bc48bd1 100644 --- a/fs/xfs/xfs_fsmap.c +++ b/fs/xfs/xfs_fsmap.c @@ -513,8 +513,8 @@ xfs_getfsmap_rtdev_rtbitmap_query( struct xfs_trans *tp, struct xfs_getfsmap_info *info) { - struct xfs_rtalloc_rec alow; - struct xfs_rtalloc_rec ahigh; + struct xfs_rtalloc_rec alow = { 0 }; + struct xfs_rtalloc_rec ahigh = { 0 }; int error; xfs_ilock(tp->t_mountp->m_rbmip, XFS_ILOCK_SHARED); diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index a7afcad6b711..3f2bd6032cf8 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c @@ -387,7 +387,7 @@ xfs_reserve_blocks( do { free = percpu_counter_sum(&mp->m_fdblocks) - mp->m_alloc_set_aside; - if (!free) + if (free <= 0) break; delta = request - mp->m_resblks; diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 7a96c4e0ab5c..5df4de666cc1 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -3236,7 +3236,6 @@ xfs_iflush_cluster( struct xfs_inode *cip; int nr_found; int clcount = 0; - int bufwasdelwri; int i; pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); @@ -3360,37 +3359,22 @@ xfs_iflush_cluster( * inode buffer and shut down the filesystem. */ rcu_read_unlock(); - /* - * Clean up the buffer. If it was delwri, just release it -- - * brelse can handle it with no problems. If not, shut down the - * filesystem before releasing the buffer. - */ - bufwasdelwri = (bp->b_flags & _XBF_DELWRI_Q); - if (bufwasdelwri) - xfs_buf_relse(bp); - xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); - if (!bufwasdelwri) { - /* - * Just like incore_relse: if we have b_iodone functions, - * mark the buffer as an error and call them. Otherwise - * mark it as stale and brelse. - */ - if (bp->b_iodone) { - bp->b_flags &= ~XBF_DONE; - xfs_buf_stale(bp); - xfs_buf_ioerror(bp, -EIO); - xfs_buf_ioend(bp); - } else { - xfs_buf_stale(bp); - xfs_buf_relse(bp); - } - } - /* - * Unlocks the flush lock + * We'll always have an inode attached to the buffer for completion + * process by the time we are called from xfs_iflush(). Hence we have + * always need to do IO completion processing to abort the inodes + * attached to the buffer. handle them just like the shutdown case in + * xfs_buf_submit(). */ + ASSERT(bp->b_iodone); + bp->b_flags &= ~XBF_DONE; + xfs_buf_stale(bp); + xfs_buf_ioerror(bp, -EIO); + xfs_buf_ioend(bp); + + /* abort the corrupt inode, as it was not attached to the buffer */ xfs_iflush_abort(cip, false); kmem_free(cilist); xfs_perag_put(pag); @@ -3486,12 +3470,17 @@ xfs_iflush( xfs_log_force(mp, 0); /* - * inode clustering: - * see if other inodes can be gathered into this write + * inode clustering: try to gather other inodes into this write + * + * Note: Any error during clustering will result in the filesystem + * being shut down and completion callbacks run on the cluster buffer. + * As we have already flushed and attached this inode to the buffer, + * it has already been aborted and released by xfs_iflush_cluster() and + * so we have no further error handling to do here. */ error = xfs_iflush_cluster(ip, bp); if (error) - goto cluster_corrupt_out; + return error; *bpp = bp; return 0; @@ -3500,12 +3489,8 @@ xfs_iflush( if (bp) xfs_buf_relse(bp); xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); -cluster_corrupt_out: - error = -EFSCORRUPTED; abort_out: - /* - * Unlocks the flush lock - */ + /* abort the corrupt inode, as it was not attached to the buffer */ xfs_iflush_abort(ip, false); return error; } diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index 49f5492eed3b..55876dd02f0c 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c @@ -963,12 +963,13 @@ xfs_ilock_for_iomap( unsigned *lockmode) { unsigned mode = XFS_ILOCK_SHARED; + bool is_write = flags & (IOMAP_WRITE | IOMAP_ZERO); /* * COW writes may allocate delalloc space or convert unwritten COW * extents, so we need to make sure to take the lock exclusively here. */ - if (xfs_is_reflink_inode(ip) && (flags & (IOMAP_WRITE | IOMAP_ZERO))) { + if (xfs_is_reflink_inode(ip) && is_write) { /* * FIXME: It could still overwrite on unshared extents and not * need allocation. @@ -989,6 +990,7 @@ xfs_ilock_for_iomap( mode = XFS_ILOCK_EXCL; } +relock: if (flags & IOMAP_NOWAIT) { if (!xfs_ilock_nowait(ip, mode)) return -EAGAIN; @@ -996,6 +998,17 @@ xfs_ilock_for_iomap( xfs_ilock(ip, mode); } + /* + * The reflink iflag could have changed since the earlier unlocked + * check, so if we got ILOCK_SHARED for a write and but we're now a + * reflink inode we have to switch to ILOCK_EXCL and relock. + */ + if (mode == XFS_ILOCK_SHARED && is_write && xfs_is_reflink_inode(ip)) { + xfs_iunlock(ip, mode); + mode = XFS_ILOCK_EXCL; + goto relock; + } + *lockmode = mode; return 0; } diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c index e040af120b69..524f543c5b82 100644 --- a/fs/xfs/xfs_trans.c +++ b/fs/xfs/xfs_trans.c @@ -258,7 +258,12 @@ xfs_trans_alloc( if (!(flags & XFS_TRANS_NO_WRITECOUNT)) sb_start_intwrite(mp->m_super); - WARN_ON(mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE); + /* + * Zero-reservation ("empty") transactions can't modify anything, so + * they're allowed to run while we're frozen. + */ + WARN_ON(resp->tr_logres > 0 && + mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE); atomic_inc(&mp->m_active_trans); tp = kmem_zone_zalloc(xfs_trans_zone, diff --git a/include/acpi/processor.h b/include/acpi/processor.h index 40a916efd7c0..1194a4c78d55 100644 --- a/include/acpi/processor.h +++ b/include/acpi/processor.h @@ -309,7 +309,7 @@ static inline void acpi_processor_ppc_exit(void) { return; } -static inline int acpi_processor_ppc_has_changed(struct acpi_processor *pr, +static inline void acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag) { static unsigned int printout = 1; @@ -320,7 +320,6 @@ static inline int acpi_processor_ppc_has_changed(struct acpi_processor *pr, "Consider compiling CPUfreq support into your kernel.\n"); printout = 0; } - return 0; } static inline int acpi_processor_get_bios_limit(int cpu, unsigned int *limit) { diff --git a/include/asm-generic/qspinlock_types.h b/include/asm-generic/qspinlock_types.h index 0763f065b975..d10f1e7d6ba8 100644 --- a/include/asm-generic/qspinlock_types.h +++ b/include/asm-generic/qspinlock_types.h @@ -63,7 +63,7 @@ typedef struct qspinlock { /* * Initializier */ -#define __ARCH_SPIN_LOCK_UNLOCKED { .val = ATOMIC_INIT(0) } +#define __ARCH_SPIN_LOCK_UNLOCKED { { .val = ATOMIC_INIT(0) } } /* * Bitfields in the atomic value: diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index faddde44de8c..3063125197ad 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -265,33 +265,41 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb, * For now w.r.t page table cache, mark the range_size as PAGE_SIZE */ +#ifndef pte_free_tlb #define pte_free_tlb(tlb, ptep, address) \ do { \ __tlb_adjust_range(tlb, address, PAGE_SIZE); \ __pte_free_tlb(tlb, ptep, address); \ } while (0) +#endif +#ifndef pmd_free_tlb #define pmd_free_tlb(tlb, pmdp, address) \ do { \ __tlb_adjust_range(tlb, address, PAGE_SIZE); \ __pmd_free_tlb(tlb, pmdp, address); \ } while (0) +#endif #ifndef __ARCH_HAS_4LEVEL_HACK +#ifndef pud_free_tlb #define pud_free_tlb(tlb, pudp, address) \ do { \ __tlb_adjust_range(tlb, address, PAGE_SIZE); \ __pud_free_tlb(tlb, pudp, address); \ } while (0) #endif +#endif #ifndef __ARCH_HAS_5LEVEL_HACK +#ifndef p4d_free_tlb #define p4d_free_tlb(tlb, pudp, address) \ do { \ __tlb_adjust_range(tlb, address, PAGE_SIZE); \ __p4d_free_tlb(tlb, pudp, address); \ } while (0) #endif +#endif #define tlb_migrate_finish(mm) do {} while (0) diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h index cc414db9da0a..482461d8931d 100644 --- a/include/crypto/if_alg.h +++ b/include/crypto/if_alg.h @@ -245,7 +245,8 @@ ssize_t af_alg_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags); void af_alg_free_resources(struct af_alg_async_req *areq); void af_alg_async_cb(struct crypto_async_request *_req, int err); -__poll_t af_alg_poll_mask(struct socket *sock, __poll_t events); +__poll_t af_alg_poll(struct file *file, struct socket *sock, + poll_table *wait); struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk, unsigned int areqlen); int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags, diff --git a/include/dt-bindings/clock/imx6ul-clock.h b/include/dt-bindings/clock/imx6ul-clock.h index 9564597cbfac..0aa1d9c3e0b9 100644 --- a/include/dt-bindings/clock/imx6ul-clock.h +++ b/include/dt-bindings/clock/imx6ul-clock.h @@ -235,27 +235,25 @@ #define IMX6UL_CLK_CSI_PODF 222 #define IMX6UL_CLK_PLL3_120M 223 #define IMX6UL_CLK_KPP 224 -#define IMX6UL_CLK_CKO1_SEL 225 -#define IMX6UL_CLK_CKO1_PODF 226 -#define IMX6UL_CLK_CKO1 227 -#define IMX6UL_CLK_CKO2_SEL 228 -#define IMX6UL_CLK_CKO2_PODF 229 -#define IMX6UL_CLK_CKO2 230 -#define IMX6UL_CLK_CKO 231 - -/* For i.MX6ULL */ -#define IMX6ULL_CLK_ESAI_PRED 232 -#define IMX6ULL_CLK_ESAI_PODF 233 -#define IMX6ULL_CLK_ESAI_EXTAL 234 -#define IMX6ULL_CLK_ESAI_MEM 235 -#define IMX6ULL_CLK_ESAI_IPG 236 -#define IMX6ULL_CLK_DCP_CLK 237 -#define IMX6ULL_CLK_EPDC_PRE_SEL 238 -#define IMX6ULL_CLK_EPDC_SEL 239 -#define IMX6ULL_CLK_EPDC_PODF 240 -#define IMX6ULL_CLK_EPDC_ACLK 241 -#define IMX6ULL_CLK_EPDC_PIX 242 -#define IMX6ULL_CLK_ESAI_SEL 243 +#define IMX6ULL_CLK_ESAI_PRED 225 +#define IMX6ULL_CLK_ESAI_PODF 226 +#define IMX6ULL_CLK_ESAI_EXTAL 227 +#define IMX6ULL_CLK_ESAI_MEM 228 +#define IMX6ULL_CLK_ESAI_IPG 229 +#define IMX6ULL_CLK_DCP_CLK 230 +#define IMX6ULL_CLK_EPDC_PRE_SEL 231 +#define IMX6ULL_CLK_EPDC_SEL 232 +#define IMX6ULL_CLK_EPDC_PODF 233 +#define IMX6ULL_CLK_EPDC_ACLK 234 +#define IMX6ULL_CLK_EPDC_PIX 235 +#define IMX6ULL_CLK_ESAI_SEL 236 +#define IMX6UL_CLK_CKO1_SEL 237 +#define IMX6UL_CLK_CKO1_PODF 238 +#define IMX6UL_CLK_CKO1 239 +#define IMX6UL_CLK_CKO2_SEL 240 +#define IMX6UL_CLK_CKO2_PODF 241 +#define IMX6UL_CLK_CKO2 242 +#define IMX6UL_CLK_CKO 243 #define IMX6UL_CLK_END 244 #endif /* __DT_BINDINGS_CLOCK_IMX6UL_H */ diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 4b35a66383f9..e54f40974eb0 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -443,6 +443,9 @@ int acpi_check_resource_conflict(const struct resource *res); int acpi_check_region(resource_size_t start, resource_size_t n, const char *name); +acpi_status acpi_release_memory(acpi_handle handle, struct resource *res, + u32 level); + int acpi_resources_are_enforced(void); #ifdef CONFIG_HIBERNATION diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h index 0c27515d2cf6..8124815eb121 100644 --- a/include/linux/atmdev.h +++ b/include/linux/atmdev.h @@ -214,6 +214,7 @@ struct atmphy_ops { struct atm_skb_data { struct atm_vcc *vcc; /* ATM VCC */ unsigned long atm_options; /* ATM layer options */ + unsigned int acct_truesize; /* truesize accounted to vcc */ }; #define VCC_HTABLE_SIZE 32 @@ -241,6 +242,20 @@ void vcc_insert_socket(struct sock *sk); void atm_dev_release_vccs(struct atm_dev *dev); +static inline void atm_account_tx(struct atm_vcc *vcc, struct sk_buff *skb) +{ + /* + * Because ATM skbs may not belong to a sock (and we don't + * necessarily want to), skb->truesize may be adjusted, + * escaping the hack in pskb_expand_head() which avoids + * doing so for some cases. So stash the value of truesize + * at the time we accounted it, and atm_pop_raw() can use + * that value later, in case it changes. + */ + refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); + ATM_SKB(skb)->acct_truesize = skb->truesize; + ATM_SKB(skb)->atm_options = vcc->atm_options; +} static inline void atm_force_charge(struct atm_vcc *vcc,int truesize) { diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h index 0bd432a4d7bd..24251762c20c 100644 --- a/include/linux/backing-dev-defs.h +++ b/include/linux/backing-dev-defs.h @@ -22,7 +22,6 @@ struct dentry; */ enum wb_state { WB_registered, /* bdi_register() was done */ - WB_shutting_down, /* wb_shutdown() in progress */ WB_writeback_running, /* Writeback is in progress */ WB_has_dirty_io, /* Dirty inodes on ->b_{dirty|io|more_io} */ WB_start_all, /* nr_pages == 0 (all) work pending */ @@ -189,6 +188,7 @@ struct backing_dev_info { #ifdef CONFIG_CGROUP_WRITEBACK struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */ struct rb_root cgwb_congested_tree; /* their congested states */ + struct mutex cgwb_release_mutex; /* protect shutdown of wb structs */ #else struct bdi_writeback_congested *wb_congested; #endif diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 9154570edf29..79226ca8f80f 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1119,8 +1119,8 @@ static inline unsigned int blk_max_size_offset(struct request_queue *q, if (!q->limits.chunk_sectors) return q->limits.max_sectors; - return q->limits.chunk_sectors - - (offset & (q->limits.chunk_sectors - 1)); + return min(q->limits.max_sectors, (unsigned int)(q->limits.chunk_sectors - + (offset & (q->limits.chunk_sectors - 1)))); } static inline unsigned int blk_rq_get_max_sectors(struct request *rq, diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index 975fb4cf1bb7..79795c5fa7c3 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h @@ -188,12 +188,38 @@ int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor, \ __ret; \ }) +int cgroup_bpf_prog_attach(const union bpf_attr *attr, + enum bpf_prog_type ptype, struct bpf_prog *prog); +int cgroup_bpf_prog_detach(const union bpf_attr *attr, + enum bpf_prog_type ptype); +int cgroup_bpf_prog_query(const union bpf_attr *attr, + union bpf_attr __user *uattr); #else +struct bpf_prog; struct cgroup_bpf {}; static inline void cgroup_bpf_put(struct cgroup *cgrp) {} static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; } +static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr, + enum bpf_prog_type ptype, + struct bpf_prog *prog) +{ + return -EINVAL; +} + +static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr, + enum bpf_prog_type ptype) +{ + return -EINVAL; +} + +static inline int cgroup_bpf_prog_query(const union bpf_attr *attr, + union bpf_attr __user *uattr) +{ + return -EINVAL; +} + #define cgroup_bpf_enabled (0) #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0) #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; }) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 995c3b1e59bf..8827e797ff97 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -488,12 +488,15 @@ void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth); /* Map specifics */ struct xdp_buff; +struct sk_buff; struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key); void __dev_map_insert_ctx(struct bpf_map *map, u32 index); void __dev_map_flush(struct bpf_map *map); int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, struct net_device *dev_rx); +int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, + struct bpf_prog *xdp_prog); struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key); void __cpu_map_insert_ctx(struct bpf_map *map, u32 index); @@ -586,6 +589,15 @@ int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, return 0; } +struct sk_buff; + +static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, + struct sk_buff *skb, + struct bpf_prog *xdp_prog) +{ + return 0; +} + static inline struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key) { @@ -684,6 +696,8 @@ static inline void bpf_map_offload_map_free(struct bpf_map *map) struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key); struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key); int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type); +int sockmap_get_from_fd(const union bpf_attr *attr, int type, + struct bpf_prog *prog); #else static inline struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key) { @@ -702,6 +716,12 @@ static inline int sock_map_prog(struct bpf_map *map, { return -EOPNOTSUPP; } + +static inline int sockmap_get_from_fd(const union bpf_attr *attr, int type, + struct bpf_prog *prog) +{ + return -EINVAL; +} #endif #if defined(CONFIG_XDP_SOCKETS) diff --git a/include/linux/bpf_lirc.h b/include/linux/bpf_lirc.h index 5f8a4283092d..9d9ff755ec29 100644 --- a/include/linux/bpf_lirc.h +++ b/include/linux/bpf_lirc.h @@ -5,11 +5,12 @@ #include #ifdef CONFIG_BPF_LIRC_MODE2 -int lirc_prog_attach(const union bpf_attr *attr); +int lirc_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog); int lirc_prog_detach(const union bpf_attr *attr); int lirc_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr); #else -static inline int lirc_prog_attach(const union bpf_attr *attr) +static inline int lirc_prog_attach(const union bpf_attr *attr, + struct bpf_prog *prog) { return -EINVAL; } diff --git a/include/linux/compat.h b/include/linux/compat.h index b1a5562b3215..c68acc47da57 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -72,6 +72,9 @@ */ #ifndef COMPAT_SYSCALL_DEFINEx #define COMPAT_SYSCALL_DEFINEx(x, name, ...) \ + __diag_push(); \ + __diag_ignore(GCC, 8, "-Wattribute-alias", \ + "Type aliasing is used to sanitize syscall arguments");\ asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \ asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) \ __attribute__((alias(__stringify(__se_compat_sys##name)))); \ @@ -80,8 +83,11 @@ asmlinkage long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \ asmlinkage long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \ { \ - return __do_compat_sys##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__));\ + long ret = __do_compat_sys##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__));\ + __MAP(x,__SC_TEST,__VA_ARGS__); \ + return ret; \ } \ + __diag_pop(); \ static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) #endif /* COMPAT_SYSCALL_DEFINEx */ diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index f1a7492a5cc8..573f5a7d42d4 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -65,6 +65,18 @@ #define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0])) #endif +/* + * Feature detection for gnu_inline (gnu89 extern inline semantics). Either + * __GNUC_STDC_INLINE__ is defined (not using gnu89 extern inline semantics, + * and we opt in to the gnu89 semantics), or __GNUC_STDC_INLINE__ is not + * defined so the gnu89 semantics are the default. + */ +#ifdef __GNUC_STDC_INLINE__ +# define __gnu_inline __attribute__((gnu_inline)) +#else +# define __gnu_inline +#endif + /* * Force always-inline if the user requests it so via the .config, * or if gcc is too old. @@ -72,19 +84,22 @@ * -Wunused-function. This turns out to avoid the need for complex #ifdef * directives. Suppress the warning in clang as well by using "unused" * function attribute, which is redundant but not harmful for gcc. + * Prefer gnu_inline, so that extern inline functions do not emit an + * externally visible function. This makes extern inline behave as per gnu89 + * semantics rather than c99. This prevents multiple symbol definition errors + * of extern inline functions at link time. + * A lot of inline functions can cause havoc with function tracing. */ #if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \ !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4) -#define inline inline __attribute__((always_inline,unused)) notrace -#define __inline__ __inline__ __attribute__((always_inline,unused)) notrace -#define __inline __inline __attribute__((always_inline,unused)) notrace +#define inline \ + inline __attribute__((always_inline, unused)) notrace __gnu_inline #else -/* A lot of inline functions can cause havoc with function tracing */ -#define inline inline __attribute__((unused)) notrace -#define __inline__ __inline__ __attribute__((unused)) notrace -#define __inline __inline __attribute__((unused)) notrace +#define inline inline __attribute__((unused)) notrace __gnu_inline #endif +#define __inline__ inline +#define __inline inline #define __always_inline inline __attribute__((always_inline)) #define noinline __attribute__((noinline)) @@ -347,3 +362,28 @@ #if GCC_VERSION >= 50100 #define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1 #endif + +/* + * Turn individual warnings and errors on and off locally, depending + * on version. + */ +#define __diag_GCC(version, severity, s) \ + __diag_GCC_ ## version(__diag_GCC_ ## severity s) + +/* Severity used in pragma directives */ +#define __diag_GCC_ignore ignored +#define __diag_GCC_warn warning +#define __diag_GCC_error error + +/* Compilers before gcc-4.6 do not understand "#pragma GCC diagnostic push" */ +#if GCC_VERSION >= 40600 +#define __diag_str1(s) #s +#define __diag_str(s) __diag_str1(s) +#define __diag(s) _Pragma(__diag_str(GCC diagnostic s)) +#endif + +#if GCC_VERSION >= 80000 +#define __diag_GCC_8(s) __diag(s) +#else +#define __diag_GCC_8(s) +#endif diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index 6b79a9bba9a7..a8ba6b04152c 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -271,4 +271,22 @@ struct ftrace_likely_data { # define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long)) #endif +#ifndef __diag +#define __diag(string) +#endif + +#ifndef __diag_GCC +#define __diag_GCC(version, severity, string) +#endif + +#define __diag_push() __diag(push) +#define __diag_pop() __diag(pop) + +#define __diag_ignore(compiler, version, option, comment) \ + __diag_ ## compiler(version, ignore, option) +#define __diag_warn(compiler, version, option, comment) \ + __diag_ ## compiler(version, warn, option) +#define __diag_error(compiler, version, option, comment) \ + __diag_ ## compiler(version, error, option) + #endif /* __LINUX_COMPILER_TYPES_H */ diff --git a/include/linux/dax.h b/include/linux/dax.h index 3855e3800f48..deb0f663252f 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -135,7 +135,7 @@ void dax_flush(struct dax_device *dax_dev, void *addr, size_t size); ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, const struct iomap_ops *ops); -int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, +vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, pfn_t *pfnp, int *errp, const struct iomap_ops *ops); vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, enum page_entry_size pe_size, pfn_t pfn); diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h index b67bf6ac907d..3c5a4cb3eb95 100644 --- a/include/linux/dma-contiguous.h +++ b/include/linux/dma-contiguous.h @@ -48,7 +48,7 @@ * CMA should not be used by the device drivers directly. It is * only a helper framework for dma-mapping subsystem. * - * For more information, see kernel-docs in drivers/base/dma-contiguous.c + * For more information, see kernel-docs in kernel/dma/contiguous.c */ #ifdef __KERNEL__ diff --git a/include/linux/filter.h b/include/linux/filter.h index 45fc0f5000d8..300baad62c88 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -19,6 +19,7 @@ #include #include #include +#include #include @@ -469,15 +470,16 @@ struct sock_fprog_kern { }; struct bpf_binary_header { - unsigned int pages; - u8 image[]; + u32 pages; + /* Some arches need word alignment for their instructions */ + u8 image[] __aligned(4); }; struct bpf_prog { u16 pages; /* Number of allocated pages */ u16 jited:1, /* Is our filter JIT'ed? */ jit_requested:1,/* archs need to JIT the prog */ - locked:1, /* Program image locked? */ + undo_set_mem:1, /* Passed set_memory_ro() checkpoint */ gpl_compatible:1, /* Is filter GPL compatible? */ cb_access:1, /* Is control block accessed? */ dst_needed:1, /* Do we need dst entry? */ @@ -671,50 +673,27 @@ bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default) #define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0])) -#ifdef CONFIG_ARCH_HAS_SET_MEMORY static inline void bpf_prog_lock_ro(struct bpf_prog *fp) { - fp->locked = 1; - WARN_ON_ONCE(set_memory_ro((unsigned long)fp, fp->pages)); + fp->undo_set_mem = 1; + set_memory_ro((unsigned long)fp, fp->pages); } static inline void bpf_prog_unlock_ro(struct bpf_prog *fp) { - if (fp->locked) { - WARN_ON_ONCE(set_memory_rw((unsigned long)fp, fp->pages)); - /* In case set_memory_rw() fails, we want to be the first - * to crash here instead of some random place later on. - */ - fp->locked = 0; - } + if (fp->undo_set_mem) + set_memory_rw((unsigned long)fp, fp->pages); } static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr) { - WARN_ON_ONCE(set_memory_ro((unsigned long)hdr, hdr->pages)); + set_memory_ro((unsigned long)hdr, hdr->pages); } static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr) { - WARN_ON_ONCE(set_memory_rw((unsigned long)hdr, hdr->pages)); + set_memory_rw((unsigned long)hdr, hdr->pages); } -#else -static inline void bpf_prog_lock_ro(struct bpf_prog *fp) -{ -} - -static inline void bpf_prog_unlock_ro(struct bpf_prog *fp) -{ -} - -static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr) -{ -} - -static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr) -{ -} -#endif /* CONFIG_ARCH_HAS_SET_MEMORY */ static inline struct bpf_binary_header * bpf_jit_binary_hdr(const struct bpf_prog *fp) @@ -786,6 +765,21 @@ static inline bool bpf_dump_raw_ok(void) struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, const struct bpf_insn *patch, u32 len); +static inline int __xdp_generic_ok_fwd_dev(struct sk_buff *skb, + struct net_device *fwd) +{ + unsigned int len; + + if (unlikely(!(fwd->flags & IFF_UP))) + return -ENETDOWN; + + len = fwd->mtu + fwd->hard_header_len + VLAN_HLEN; + if (skb->len > len) + return -EMSGSIZE; + + return 0; +} + /* The pair of xdp_do_redirect and xdp_do_flush_map MUST be called in the * same cpu context. Further for best results no more than a single map * for the do_redirect/do_flush pair should be used. This limitation is @@ -961,6 +955,9 @@ static inline void bpf_prog_kallsyms_del(struct bpf_prog *fp) } #endif /* CONFIG_BPF_JIT */ +void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp); +void bpf_prog_kallsyms_del_all(struct bpf_prog *fp); + #define BPF_ANC BIT(15) static inline bool bpf_needs_clear_a(const struct sock_filter *first) diff --git a/include/linux/fs.h b/include/linux/fs.h index 5c91108846db..d78d146a98da 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1720,8 +1720,6 @@ struct file_operations { int (*iterate) (struct file *, struct dir_context *); int (*iterate_shared) (struct file *, struct dir_context *); __poll_t (*poll) (struct file *, struct poll_table_struct *); - struct wait_queue_head * (*get_poll_head)(struct file *, __poll_t); - __poll_t (*poll_mask) (struct file *, __poll_t); long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); long (*compat_ioctl) (struct file *, unsigned int, unsigned long); int (*mmap) (struct file *, struct vm_area_struct *); diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 8154f4920fcb..ebb77674be90 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -223,7 +223,6 @@ extern enum ftrace_tracing_type_t ftrace_tracing_type; */ int register_ftrace_function(struct ftrace_ops *ops); int unregister_ftrace_function(struct ftrace_ops *ops); -void clear_ftrace_function(void); extern void ftrace_stub(unsigned long a0, unsigned long a1, struct ftrace_ops *op, struct pt_regs *regs); @@ -239,7 +238,6 @@ static inline int ftrace_nr_registered_ops(void) { return 0; } -static inline void clear_ftrace_function(void) { } static inline void ftrace_kill(void) { } static inline void ftrace_free_init_mem(void) { } static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { } diff --git a/include/linux/hid.h b/include/linux/hid.h index 41a3d5775394..773bcb1d4044 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -511,6 +511,7 @@ struct hid_output_fifo { #define HID_STAT_ADDED BIT(0) #define HID_STAT_PARSED BIT(1) #define HID_STAT_DUP_DETECTED BIT(2) +#define HID_STAT_REPROBED BIT(3) struct hid_input { struct list_head list; @@ -579,7 +580,7 @@ struct hid_device { /* device report descriptor */ bool battery_avoid_query; #endif - unsigned int status; /* see STAT flags above */ + unsigned long status; /* see STAT flags above */ unsigned claimed; /* Claimed by hidinput, hiddev? */ unsigned quirks; /* Various quirks the device can pull on us */ bool io_started; /* If IO has started */ diff --git a/include/linux/iio/buffer-dma.h b/include/linux/iio/buffer-dma.h index 767467d886de..67c75372b691 100644 --- a/include/linux/iio/buffer-dma.h +++ b/include/linux/iio/buffer-dma.h @@ -141,7 +141,7 @@ int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n, char __user *user_buffer); size_t iio_dma_buffer_data_available(struct iio_buffer *buffer); int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd); -int iio_dma_buffer_set_length(struct iio_buffer *buffer, int length); +int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length); int iio_dma_buffer_request_update(struct iio_buffer *buffer); int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue, diff --git a/include/linux/input/mt.h b/include/linux/input/mt.h index d7188de4db96..3f4bf60b0bb5 100644 --- a/include/linux/input/mt.h +++ b/include/linux/input/mt.h @@ -100,7 +100,7 @@ static inline bool input_is_mt_axis(int axis) return axis == ABS_MT_SLOT || input_is_mt_value(axis); } -void input_mt_report_slot_state(struct input_dev *dev, +bool input_mt_report_slot_state(struct input_dev *dev, unsigned int tool_type, bool active); void input_mt_report_finger_count(struct input_dev *dev, int count); diff --git a/include/linux/irq.h b/include/linux/irq.h index 4bd2f34947f4..201de12a9957 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -503,6 +503,7 @@ struct irq_chip { * IRQCHIP_SKIP_SET_WAKE: Skip chip.irq_set_wake(), for this irq chip * IRQCHIP_ONESHOT_SAFE: One shot does not require mask/unmask * IRQCHIP_EOI_THREADED: Chip requires eoi() on unmask in threaded mode + * IRQCHIP_SUPPORTS_LEVEL_MSI Chip can provide two doorbells for Level MSIs */ enum { IRQCHIP_SET_TYPE_MASKED = (1 << 0), diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index cbb872c1b607..9d2ea3e907d0 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -73,6 +73,7 @@ #define GICD_TYPER_MBIS (1U << 16) #define GICD_TYPER_ID_BITS(typer) ((((typer) >> 19) & 0x1f) + 1) +#define GICD_TYPER_NUM_LPIS(typer) ((((typer) >> 11) & 0x1f) + 1) #define GICD_TYPER_IRQS(typer) ((((typer) & 0x1f) + 1) * 32) #define GICD_IROUTER_SPI_MODE_ONE (0U << 31) @@ -576,8 +577,8 @@ struct rdists { phys_addr_t phys_base; } __percpu *rdist; struct page *prop_page; - int id_bits; u64 flags; + u32 gicd_typer; bool has_vlpis; bool has_direct_lpi; }; diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index 25b33b664537..dd1e40ddac7d 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -145,11 +145,6 @@ static inline void *irq_desc_get_handler_data(struct irq_desc *desc) return desc->irq_common_data.handler_data; } -static inline struct msi_desc *irq_desc_get_msi_desc(struct irq_desc *desc) -{ - return desc->irq_common_data.msi_desc; -} - /* * Architectures call this to let the generic IRQ layer * handle an interrupt. diff --git a/include/linux/kernel.h b/include/linux/kernel.h index d23123238534..941dc0a5a877 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -666,7 +666,7 @@ do { \ * your code. (Extra memory is used for special buffers that are * allocated when trace_printk() is used.) * - * A little optization trick is done here. If there's only one + * A little optimization trick is done here. If there's only one * argument, there's no need to scan the string for printf formats. * The trace_puts() will suffice. But how can we take advantage of * using trace_puts() when trace_printk() has only one argument? diff --git a/include/linux/kthread.h b/include/linux/kthread.h index 2803264c512f..c1961761311d 100644 --- a/include/linux/kthread.h +++ b/include/linux/kthread.h @@ -62,7 +62,6 @@ void *kthread_probe_data(struct task_struct *k); int kthread_park(struct task_struct *k); void kthread_unpark(struct task_struct *k); void kthread_parkme(void); -void kthread_park_complete(struct task_struct *k); int kthreadd(void *unused); extern struct task_struct *kthreadd_task; diff --git a/include/linux/libata.h b/include/linux/libata.h index 8b8946dd63b9..32f247cb5e9e 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -210,6 +210,7 @@ enum { ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */ /* (doesn't imply presence) */ ATA_FLAG_SATA = (1 << 1), + ATA_FLAG_NO_LPM = (1 << 2), /* host not happy with LPM */ ATA_FLAG_NO_LOG_PAGE = (1 << 5), /* do not issue log page read */ ATA_FLAG_NO_ATAPI = (1 << 6), /* No ATAPI support */ ATA_FLAG_PIO_DMA = (1 << 7), /* PIO cmds via DMA */ @@ -1495,6 +1496,29 @@ static inline bool ata_tag_valid(unsigned int tag) return tag < ATA_MAX_QUEUE || ata_tag_internal(tag); } +#define __ata_qc_for_each(ap, qc, tag, max_tag, fn) \ + for ((tag) = 0; (tag) < (max_tag) && \ + ({ qc = fn((ap), (tag)); 1; }); (tag)++) \ + +/* + * Internal use only, iterate commands ignoring error handling and + * status of 'qc'. + */ +#define ata_qc_for_each_raw(ap, qc, tag) \ + __ata_qc_for_each(ap, qc, tag, ATA_MAX_QUEUE, __ata_qc_from_tag) + +/* + * Iterate all potential commands that can be queued + */ +#define ata_qc_for_each(ap, qc, tag) \ + __ata_qc_for_each(ap, qc, tag, ATA_MAX_QUEUE, ata_qc_from_tag) + +/* + * Like ata_qc_for_each, but with the internal tag included + */ +#define ata_qc_for_each_with_internal(ap, qc, tag) \ + __ata_qc_for_each(ap, qc, tag, ATA_MAX_QUEUE + 1, ata_qc_from_tag) + /* * device helpers */ diff --git a/include/linux/memory.h b/include/linux/memory.h index 31ca3e28b0eb..a6ddefc60517 100644 --- a/include/linux/memory.h +++ b/include/linux/memory.h @@ -38,6 +38,7 @@ struct memory_block { int arch_get_memory_phys_device(unsigned long start_pfn); unsigned long memory_block_size_bytes(void); +int set_memory_block_size_order(unsigned int order); /* These states are exposed to userspace as text strings in sysfs */ #define MEM_ONLINE (1<<0) /* exposed to userspace */ diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h index d3c9db492b30..fab5121ffb8f 100644 --- a/include/linux/mlx5/eswitch.h +++ b/include/linux/mlx5/eswitch.h @@ -8,6 +8,8 @@ #include +#define MLX5_ESWITCH_MANAGER(mdev) MLX5_CAP_GEN(mdev, eswitch_manager) + enum { SRIOV_NONE, SRIOV_LEGACY, diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 27134c4fcb76..ac281f5ec9b8 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -922,7 +922,7 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 vnic_env_queue_counters[0x1]; u8 ets[0x1]; u8 nic_flow_table[0x1]; - u8 eswitch_flow_table[0x1]; + u8 eswitch_manager[0x1]; u8 device_memory[0x1]; u8 mcam_reg[0x1]; u8 pcam_reg[0x1]; diff --git a/include/linux/net.h b/include/linux/net.h index 08b6eb964dd6..6554d3ba4396 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -147,7 +147,6 @@ struct proto_ops { int (*getname) (struct socket *sock, struct sockaddr *addr, int peer); - __poll_t (*poll_mask) (struct socket *sock, __poll_t events); __poll_t (*poll) (struct file *file, struct socket *sock, struct poll_table_struct *wait); int (*ioctl) (struct socket *sock, unsigned int cmd, diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 3ec9850c7936..3d0cc0b5cec2 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2789,11 +2789,31 @@ static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, if (PTR_ERR(pp) != -EINPROGRESS) NAPI_GRO_CB(skb)->flush |= flush; } +static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb, + struct sk_buff **pp, + int flush, + struct gro_remcsum *grc) +{ + if (PTR_ERR(pp) != -EINPROGRESS) { + NAPI_GRO_CB(skb)->flush |= flush; + skb_gro_remcsum_cleanup(skb, grc); + skb->remcsum_offload = 0; + } +} #else static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush) { NAPI_GRO_CB(skb)->flush |= flush; } +static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb, + struct sk_buff **pp, + int flush, + struct gro_remcsum *grc) +{ + NAPI_GRO_CB(skb)->flush |= flush; + skb_gro_remcsum_cleanup(skb, grc); + skb->remcsum_offload = 0; +} #endif static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 9dee3c23895d..712eed156d09 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1438,6 +1438,8 @@ enum { NFS_IOHDR_EOF, NFS_IOHDR_REDO, NFS_IOHDR_STAT, + NFS_IOHDR_RESEND_PNFS, + NFS_IOHDR_RESEND_MDS, }; struct nfs_io_completion; diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index 9206a4fef9ac..cb8d84090cfb 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -234,7 +234,7 @@ struct generic_pm_domain *of_genpd_remove_last(struct device_node *np); int of_genpd_parse_idle_states(struct device_node *dn, struct genpd_power_state **states, int *n); unsigned int of_genpd_opp_to_performance_state(struct device *dev, - struct device_node *opp_node); + struct device_node *np); int genpd_dev_pm_attach(struct device *dev); struct device *genpd_dev_pm_attach_by_id(struct device *dev, @@ -274,9 +274,9 @@ static inline int of_genpd_parse_idle_states(struct device_node *dn, static inline unsigned int of_genpd_opp_to_performance_state(struct device *dev, - struct device_node *opp_node) + struct device_node *np) { - return -ENODEV; + return 0; } static inline int genpd_dev_pm_attach(struct device *dev) diff --git a/include/linux/poll.h b/include/linux/poll.h index fdf86b4cbc71..7e0fdcf905d2 100644 --- a/include/linux/poll.h +++ b/include/linux/poll.h @@ -74,17 +74,17 @@ static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc) pt->_key = ~(__poll_t)0; /* all events enabled */ } -static inline bool file_has_poll_mask(struct file *file) -{ - return file->f_op->get_poll_head && file->f_op->poll_mask; -} - static inline bool file_can_poll(struct file *file) { - return file->f_op->poll || file_has_poll_mask(file); + return file->f_op->poll; } -__poll_t vfs_poll(struct file *file, struct poll_table_struct *pt); +static inline __poll_t vfs_poll(struct file *file, struct poll_table_struct *pt) +{ + if (unlikely(!file->f_op->poll)) + return DEFAULT_POLLMASK; + return file->f_op->poll(file, pt); +} struct poll_table_entry { struct file *filp; diff --git a/include/linux/refcount.h b/include/linux/refcount.h index 4193c41e383a..a685da2c4522 100644 --- a/include/linux/refcount.h +++ b/include/linux/refcount.h @@ -98,5 +98,7 @@ extern __must_check bool refcount_dec_if_one(refcount_t *r); extern __must_check bool refcount_dec_not_one(refcount_t *r); extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock); extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock); - +extern __must_check bool refcount_dec_and_lock_irqsave(refcount_t *r, + spinlock_t *lock, + unsigned long *flags); #endif /* _LINUX_REFCOUNT_H */ diff --git a/include/linux/rmi.h b/include/linux/rmi.h index 64125443f8a6..5ef5c7c412a7 100644 --- a/include/linux/rmi.h +++ b/include/linux/rmi.h @@ -354,6 +354,8 @@ struct rmi_driver_data { struct mutex irq_mutex; struct input_dev *input; + struct irq_domain *irqdomain; + u8 pdt_props; u8 num_rx_electrodes; diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index 51f52020ad5f..093aa57120b0 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h @@ -9,9 +9,6 @@ #include struct scatterlist { -#ifdef CONFIG_DEBUG_SG - unsigned long sg_magic; -#endif unsigned long page_link; unsigned int offset; unsigned int length; @@ -64,7 +61,6 @@ struct sg_table { * */ -#define SG_MAGIC 0x87654321 #define SG_CHAIN 0x01UL #define SG_END 0x02UL @@ -98,7 +94,6 @@ static inline void sg_assign_page(struct scatterlist *sg, struct page *page) */ BUG_ON((unsigned long) page & (SG_CHAIN | SG_END)); #ifdef CONFIG_DEBUG_SG - BUG_ON(sg->sg_magic != SG_MAGIC); BUG_ON(sg_is_chain(sg)); #endif sg->page_link = page_link | (unsigned long) page; @@ -129,7 +124,6 @@ static inline void sg_set_page(struct scatterlist *sg, struct page *page, static inline struct page *sg_page(struct scatterlist *sg) { #ifdef CONFIG_DEBUG_SG - BUG_ON(sg->sg_magic != SG_MAGIC); BUG_ON(sg_is_chain(sg)); #endif return (struct page *)((sg)->page_link & ~(SG_CHAIN | SG_END)); @@ -195,9 +189,6 @@ static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents, **/ static inline void sg_mark_end(struct scatterlist *sg) { -#ifdef CONFIG_DEBUG_SG - BUG_ON(sg->sg_magic != SG_MAGIC); -#endif /* * Set termination bit, clear potential chain bit */ @@ -215,9 +206,6 @@ static inline void sg_mark_end(struct scatterlist *sg) **/ static inline void sg_unmark_end(struct scatterlist *sg) { -#ifdef CONFIG_DEBUG_SG - BUG_ON(sg->sg_magic != SG_MAGIC); -#endif sg->page_link &= ~SG_END; } @@ -260,12 +248,6 @@ static inline void *sg_virt(struct scatterlist *sg) static inline void sg_init_marker(struct scatterlist *sgl, unsigned int nents) { -#ifdef CONFIG_DEBUG_SG - unsigned int i; - - for (i = 0; i < nents; i++) - sgl[i].sg_magic = SG_MAGIC; -#endif sg_mark_end(&sgl[nents - 1]); } diff --git a/include/linux/sched.h b/include/linux/sched.h index 87bf02d93a27..43731fe51c97 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -118,7 +118,7 @@ struct task_group; * the comment with set_special_state(). */ #define is_special_task_state(state) \ - ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_DEAD)) + ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | TASK_DEAD)) #define __set_current_state(state_value) \ do { \ @@ -1799,20 +1799,22 @@ static inline void rseq_set_notify_resume(struct task_struct *t) set_tsk_thread_flag(t, TIF_NOTIFY_RESUME); } -void __rseq_handle_notify_resume(struct pt_regs *regs); +void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs); -static inline void rseq_handle_notify_resume(struct pt_regs *regs) +static inline void rseq_handle_notify_resume(struct ksignal *ksig, + struct pt_regs *regs) { if (current->rseq) - __rseq_handle_notify_resume(regs); + __rseq_handle_notify_resume(ksig, regs); } -static inline void rseq_signal_deliver(struct pt_regs *regs) +static inline void rseq_signal_deliver(struct ksignal *ksig, + struct pt_regs *regs) { preempt_disable(); __set_bit(RSEQ_EVENT_SIGNAL_BIT, ¤t->rseq_event_mask); preempt_enable(); - rseq_handle_notify_resume(regs); + rseq_handle_notify_resume(ksig, regs); } /* rseq_preempt() requires preemption to be disabled. */ @@ -1831,9 +1833,7 @@ static inline void rseq_migrate(struct task_struct *t) /* * If parent process has a registered restartable sequences area, the - * child inherits. Only applies when forking a process, not a thread. In - * case a parent fork() in the middle of a restartable sequence, set the - * resume notifier to force the child to retry. + * child inherits. Only applies when forking a process, not a thread. */ static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags) { @@ -1847,7 +1847,6 @@ static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags) t->rseq_len = current->rseq_len; t->rseq_sig = current->rseq_sig; t->rseq_event_mask = current->rseq_event_mask; - rseq_preempt(t); } } @@ -1864,10 +1863,12 @@ static inline void rseq_execve(struct task_struct *t) static inline void rseq_set_notify_resume(struct task_struct *t) { } -static inline void rseq_handle_notify_resume(struct pt_regs *regs) +static inline void rseq_handle_notify_resume(struct ksignal *ksig, + struct pt_regs *regs) { } -static inline void rseq_signal_deliver(struct pt_regs *regs) +static inline void rseq_signal_deliver(struct ksignal *ksig, + struct pt_regs *regs) { } static inline void rseq_preempt(struct task_struct *t) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index c86885954994..164cdedf6012 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -3252,7 +3252,8 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags, int *peeked, int *off, int *err); struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock, int *err); -__poll_t datagram_poll_mask(struct socket *sock, __poll_t events); +__poll_t datagram_poll(struct file *file, struct socket *sock, + struct poll_table_struct *wait); int skb_copy_datagram_iter(const struct sk_buff *from, int offset, struct iov_iter *to, int size); static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset, diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 09fa2c6f0e68..3a1a1dbc6f49 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -155,8 +155,12 @@ struct kmem_cache { #ifdef CONFIG_SYSFS #define SLAB_SUPPORTS_SYSFS +void sysfs_slab_unlink(struct kmem_cache *); void sysfs_slab_release(struct kmem_cache *); #else +static inline void sysfs_slab_unlink(struct kmem_cache *s) +{ +} static inline void sysfs_slab_release(struct kmem_cache *s) { } diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 1e8a46435838..fd57888d4942 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -427,6 +427,11 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); #define atomic_dec_and_lock(atomic, lock) \ __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) +extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock, + unsigned long *flags); +#define atomic_dec_and_lock_irqsave(atomic, lock, flags) \ + __cond_lock(lock, _atomic_dec_and_lock_irqsave(atomic, lock, &(flags))) + int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask, size_t max_size, unsigned int cpu_mult, gfp_t gfp); diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 73810808cdf2..a368a68cb667 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -231,6 +231,9 @@ static inline int is_syscall_trace_event(struct trace_event_call *tp_event) */ #ifndef __SYSCALL_DEFINEx #define __SYSCALL_DEFINEx(x, name, ...) \ + __diag_push(); \ + __diag_ignore(GCC, 8, "-Wattribute-alias", \ + "Type aliasing is used to sanitize syscall arguments");\ asmlinkage long sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) \ __attribute__((alias(__stringify(__se_sys##name)))); \ ALLOW_ERROR_INJECTION(sys##name, ERRNO); \ @@ -243,6 +246,7 @@ static inline int is_syscall_trace_event(struct trace_event_call *tp_event) __PROTECT(x, ret,__MAP(x,__SC_ARGS,__VA_ARGS__)); \ return ret; \ } \ + __diag_pop(); \ static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) #endif /* __SYSCALL_DEFINEx */ diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h index 6c5f2074e14f..6f8b68cd460f 100644 --- a/include/linux/uio_driver.h +++ b/include/linux/uio_driver.h @@ -75,7 +75,7 @@ struct uio_device { struct fasync_struct *async_queue; wait_queue_head_t wait; struct uio_info *info; - spinlock_t info_lock; + struct mutex info_lock; struct kobject *map_dir; struct kobject *portio_dir; }; diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h index 53ce8176c313..ec9d6bc65855 100644 --- a/include/net/bluetooth/bluetooth.h +++ b/include/net/bluetooth/bluetooth.h @@ -271,7 +271,7 @@ int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags); int bt_sock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags); -__poll_t bt_sock_poll_mask(struct socket *sock, __poll_t events); +__poll_t bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait); int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo); int bt_sock_wait_ready(struct sock *sk, unsigned long flags); diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index 5cba71d2dc44..71b9043aa0e7 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -170,6 +170,7 @@ struct fib6_info { unused:3; struct fib6_nh fib6_nh; + struct rcu_head rcu; }; struct rt6_info { @@ -273,7 +274,7 @@ static inline void ip6_rt_put(struct rt6_info *rt) } struct fib6_info *fib6_info_alloc(gfp_t gfp_flags); -void fib6_info_destroy(struct fib6_info *f6i); +void fib6_info_destroy_rcu(struct rcu_head *head); static inline void fib6_info_hold(struct fib6_info *f6i) { @@ -283,7 +284,7 @@ static inline void fib6_info_hold(struct fib6_info *f6i) static inline void fib6_info_release(struct fib6_info *f6i) { if (f6i && atomic_dec_and_test(&f6i->fib6_ref)) - fib6_info_destroy(f6i); + call_rcu(&f6i->rcu, fib6_info_destroy_rcu); } enum fib6_walk_state { diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h index b0eaeb02d46d..f4c21b5a1242 100644 --- a/include/net/iucv/af_iucv.h +++ b/include/net/iucv/af_iucv.h @@ -153,6 +153,8 @@ struct iucv_sock_list { atomic_t autobind_name; }; +__poll_t iucv_sock_poll(struct file *file, struct socket *sock, + poll_table *wait); void iucv_sock_link(struct iucv_sock_list *l, struct sock *s); void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *s); void iucv_accept_enqueue(struct sock *parent, struct sock *sk); diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index 47e35cce3b64..a71264d75d7f 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -128,6 +128,7 @@ struct net { #endif #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) struct netns_nf_frag nf_frag; + struct ctl_table_header *nf_frag_frags_hdr; #endif struct sock *nfnl; struct sock *nfnl_stash; diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h index c978a31b0f84..762ac9931b62 100644 --- a/include/net/netns/ipv6.h +++ b/include/net/netns/ipv6.h @@ -109,7 +109,6 @@ struct netns_ipv6 { #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) struct netns_nf_frag { - struct netns_sysctl_ipv6 sysctl; struct netns_frags frags; }; #endif diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index a3c1a2c47cd4..20b059574e60 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -111,6 +111,11 @@ void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, { } +static inline bool tcf_block_shared(struct tcf_block *block) +{ + return false; +} + static inline struct Qdisc *tcf_block_q(struct tcf_block *block) { return NULL; diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 30b3e2fe240a..8c2caa370e0f 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -109,7 +109,8 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb); int sctp_inet_listen(struct socket *sock, int backlog); void sctp_write_space(struct sock *sk); void sctp_data_ready(struct sock *sk); -__poll_t sctp_poll_mask(struct socket *sock, __poll_t events); +__poll_t sctp_poll(struct file *file, struct socket *sock, + poll_table *wait); void sctp_sock_rfree(struct sk_buff *skb); void sctp_copy_sock(struct sock *newsk, struct sock *sk, struct sctp_association *asoc); diff --git a/include/net/tcp.h b/include/net/tcp.h index 0448e7c5d2b4..800582b5dd54 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -388,7 +388,8 @@ bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst); void tcp_close(struct sock *sk, long timeout); void tcp_init_sock(struct sock *sk); void tcp_init_transfer(struct sock *sk, int bpf_op); -__poll_t tcp_poll_mask(struct socket *sock, __poll_t events); +__poll_t tcp_poll(struct file *file, struct socket *sock, + struct poll_table_struct *wait); int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); int tcp_setsockopt(struct sock *sk, int level, int optname, diff --git a/include/net/tls.h b/include/net/tls.h index 7f84ea3e217c..70c273777fe9 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -109,7 +109,8 @@ struct tls_sw_context_rx { struct strparser strp; void (*saved_data_ready)(struct sock *sk); - __poll_t (*sk_poll_mask)(struct socket *sock, __poll_t events); + unsigned int (*sk_poll)(struct file *file, struct socket *sock, + struct poll_table_struct *wait); struct sk_buff *recv_pkt; u8 control; bool decrypted; @@ -224,7 +225,8 @@ void tls_sw_free_resources_tx(struct sock *sk); void tls_sw_free_resources_rx(struct sock *sk); int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, int flags, int *addr_len); -__poll_t tls_sw_poll_mask(struct socket *sock, __poll_t events); +unsigned int tls_sw_poll(struct file *file, struct socket *sock, + struct poll_table_struct *wait); ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags); diff --git a/include/net/udp.h b/include/net/udp.h index b1ea8b0f5e6a..81afdacd4fff 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -285,7 +285,7 @@ int udp_init_sock(struct sock *sk); int udp_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); int __udp_disconnect(struct sock *sk, int flags); int udp_disconnect(struct sock *sk, int flags); -__poll_t udp_poll_mask(struct socket *sock, __poll_t events); +__poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait); struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, netdev_features_t features, bool is_ipv6); diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 4c6241bc2039..6c003995347a 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -3391,11 +3391,14 @@ int ib_process_cq_direct(struct ib_cq *cq, int budget); * * Users can examine the cq structure to determine the actual CQ size. */ -struct ib_cq *ib_create_cq(struct ib_device *device, - ib_comp_handler comp_handler, - void (*event_handler)(struct ib_event *, void *), - void *cq_context, - const struct ib_cq_init_attr *cq_attr); +struct ib_cq *__ib_create_cq(struct ib_device *device, + ib_comp_handler comp_handler, + void (*event_handler)(struct ib_event *, void *), + void *cq_context, + const struct ib_cq_init_attr *cq_attr, + const char *caller); +#define ib_create_cq(device, cmp_hndlr, evt_hndlr, cq_ctxt, cq_attr) \ + __ib_create_cq((device), (cmp_hndlr), (evt_hndlr), (cq_ctxt), (cq_attr), KBUILD_MODNAME) /** * ib_resize_cq - Modifies the capacity of the CQ. diff --git a/include/uapi/linux/aio_abi.h b/include/uapi/linux/aio_abi.h index d00221345c19..3c5038b587ba 100644 --- a/include/uapi/linux/aio_abi.h +++ b/include/uapi/linux/aio_abi.h @@ -39,8 +39,10 @@ enum { IOCB_CMD_PWRITE = 1, IOCB_CMD_FSYNC = 2, IOCB_CMD_FDSYNC = 3, - /* 4 was the experimental IOCB_CMD_PREADX */ - IOCB_CMD_POLL = 5, + /* These two are experimental. + * IOCB_CMD_PREADX = 4, + * IOCB_CMD_POLL = 5, + */ IOCB_CMD_NOOP = 6, IOCB_CMD_PREADV = 7, IOCB_CMD_PWRITEV = 8, diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 59b19b6a40d7..b7db3261c62d 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -1857,7 +1857,8 @@ union bpf_attr { * is resolved), the nexthop address is returned in ipv4_dst * or ipv6_dst based on family, smac is set to mac address of * egress device, dmac is set to nexthop mac address, rt_metric - * is set to metric from route (IPv4/IPv6 only). + * is set to metric from route (IPv4/IPv6 only), and ifindex + * is set to the device index of the nexthop from the FIB lookup. * * *plen* argument is the size of the passed in struct. * *flags* argument can be a combination of one or more of the @@ -1873,9 +1874,10 @@ union bpf_attr { * *ctx* is either **struct xdp_md** for XDP programs or * **struct sk_buff** tc cls_act programs. * Return - * Egress device index on success, 0 if packet needs to continue - * up the stack for further processing or a negative error in case - * of failure. + * * < 0 if any input argument is invalid + * * 0 on success (packet is forwarded, nexthop neighbor exists) + * * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the + * * packet is not forwarded or needs assist from full stack * * int bpf_sock_hash_update(struct bpf_sock_ops_kern *skops, struct bpf_map *map, void *key, u64 flags) * Description @@ -2612,6 +2614,18 @@ struct bpf_raw_tracepoint_args { #define BPF_FIB_LOOKUP_DIRECT BIT(0) #define BPF_FIB_LOOKUP_OUTPUT BIT(1) +enum { + BPF_FIB_LKUP_RET_SUCCESS, /* lookup successful */ + BPF_FIB_LKUP_RET_BLACKHOLE, /* dest is blackholed; can be dropped */ + BPF_FIB_LKUP_RET_UNREACHABLE, /* dest is unreachable; can be dropped */ + BPF_FIB_LKUP_RET_PROHIBIT, /* dest not allowed; can be dropped */ + BPF_FIB_LKUP_RET_NOT_FWDED, /* packet is not forwarded */ + BPF_FIB_LKUP_RET_FWD_DISABLED, /* fwding is not enabled on ingress */ + BPF_FIB_LKUP_RET_UNSUPP_LWT, /* fwd requires encapsulation */ + BPF_FIB_LKUP_RET_NO_NEIGH, /* no neighbor entry for nh */ + BPF_FIB_LKUP_RET_FRAG_NEEDED, /* fragmentation required to fwd */ +}; + struct bpf_fib_lookup { /* input: network family for lookup (AF_INET, AF_INET6) * output: network family of egress nexthop @@ -2625,7 +2639,11 @@ struct bpf_fib_lookup { /* total length of packet from network header - used for MTU check */ __u16 tot_len; - __u32 ifindex; /* L3 device index for lookup */ + + /* input: L3 device index for lookup + * output: device index from FIB lookup + */ + __u32 ifindex; union { /* inputs to lookup */ diff --git a/include/uapi/linux/nbd.h b/include/uapi/linux/nbd.h index 85a3fb65e40a..20d6cc91435d 100644 --- a/include/uapi/linux/nbd.h +++ b/include/uapi/linux/nbd.h @@ -53,6 +53,9 @@ enum { /* These are client behavior specific flags. */ #define NBD_CFLAG_DESTROY_ON_DISCONNECT (1 << 0) /* delete the nbd device on disconnect. */ +#define NBD_CFLAG_DISCONNECT_ON_CLOSE (1 << 1) /* disconnect the nbd device on + * close by last opener. + */ /* userspace doesn't need the nbd_device structure */ diff --git a/include/uapi/linux/rseq.h b/include/uapi/linux/rseq.h index d620fa43756c..9a402fdb60e9 100644 --- a/include/uapi/linux/rseq.h +++ b/include/uapi/linux/rseq.h @@ -10,13 +10,8 @@ * Copyright (c) 2015-2018 Mathieu Desnoyers */ -#ifdef __KERNEL__ -# include -#else -# include -#endif - -#include +#include +#include enum rseq_cpu_id_state { RSEQ_CPU_ID_UNINITIALIZED = -1, @@ -52,10 +47,10 @@ struct rseq_cs { __u32 version; /* enum rseq_cs_flags */ __u32 flags; - LINUX_FIELD_u32_u64(start_ip); + __u64 start_ip; /* Offset from start_ip. */ - LINUX_FIELD_u32_u64(post_commit_offset); - LINUX_FIELD_u32_u64(abort_ip); + __u64 post_commit_offset; + __u64 abort_ip; } __attribute__((aligned(4 * sizeof(__u64)))); /* @@ -67,28 +62,30 @@ struct rseq_cs { struct rseq { /* * Restartable sequences cpu_id_start field. Updated by the - * kernel, and read by user-space with single-copy atomicity - * semantics. Aligned on 32-bit. Always contains a value in the - * range of possible CPUs, although the value may not be the - * actual current CPU (e.g. if rseq is not initialized). This - * CPU number value should always be compared against the value - * of the cpu_id field before performing a rseq commit or - * returning a value read from a data structure indexed using - * the cpu_id_start value. + * kernel. Read by user-space with single-copy atomicity + * semantics. This field should only be read by the thread which + * registered this data structure. Aligned on 32-bit. Always + * contains a value in the range of possible CPUs, although the + * value may not be the actual current CPU (e.g. if rseq is not + * initialized). This CPU number value should always be compared + * against the value of the cpu_id field before performing a rseq + * commit or returning a value read from a data structure indexed + * using the cpu_id_start value. */ __u32 cpu_id_start; /* - * Restartable sequences cpu_id field. Updated by the kernel, - * and read by user-space with single-copy atomicity semantics. - * Aligned on 32-bit. Values RSEQ_CPU_ID_UNINITIALIZED and - * RSEQ_CPU_ID_REGISTRATION_FAILED have a special semantic: the - * former means "rseq uninitialized", and latter means "rseq - * initialization failed". This value is meant to be read within - * rseq critical sections and compared with the cpu_id_start - * value previously read, before performing the commit instruction, - * or read and compared with the cpu_id_start value before returning - * a value loaded from a data structure indexed using the - * cpu_id_start value. + * Restartable sequences cpu_id field. Updated by the kernel. + * Read by user-space with single-copy atomicity semantics. This + * field should only be read by the thread which registered this + * data structure. Aligned on 32-bit. Values + * RSEQ_CPU_ID_UNINITIALIZED and RSEQ_CPU_ID_REGISTRATION_FAILED + * have a special semantic: the former means "rseq uninitialized", + * and latter means "rseq initialization failed". This value is + * meant to be read within rseq critical sections and compared + * with the cpu_id_start value previously read, before performing + * the commit instruction, or read and compared with the + * cpu_id_start value before returning a value loaded from a data + * structure indexed using the cpu_id_start value. */ __u32 cpu_id; /* @@ -105,27 +102,44 @@ struct rseq { * targeted by the rseq_cs. Also needs to be set to NULL by user-space * before reclaiming memory that contains the targeted struct rseq_cs. * - * Read and set by the kernel with single-copy atomicity semantics. - * Set by user-space with single-copy atomicity semantics. Aligned - * on 64-bit. + * Read and set by the kernel. Set by user-space with single-copy + * atomicity semantics. This field should only be updated by the + * thread which registered this data structure. Aligned on 64-bit. */ - LINUX_FIELD_u32_u64(rseq_cs); + union { + __u64 ptr64; +#ifdef __LP64__ + __u64 ptr; +#else + struct { +#if (defined(__BYTE_ORDER) && (__BYTE_ORDER == __BIG_ENDIAN)) || defined(__BIG_ENDIAN) + __u32 padding; /* Initialized to zero. */ + __u32 ptr32; +#else /* LITTLE */ + __u32 ptr32; + __u32 padding; /* Initialized to zero. */ +#endif /* ENDIAN */ + } ptr; +#endif + } rseq_cs; + /* - * - RSEQ_DISABLE flag: + * Restartable sequences flags field. + * + * This field should only be updated by the thread which + * registered this data structure. Read by the kernel. + * Mainly used for single-stepping through rseq critical sections + * with debuggers. * - * Fallback fast-track flag for single-stepping. - * Set by user-space if lack of progress is detected. - * Cleared by user-space after rseq finish. - * Read by the kernel. * - RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT - * Inhibit instruction sequence block restart and event - * counter increment on preemption for this thread. + * Inhibit instruction sequence block restart on preemption + * for this thread. * - RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL - * Inhibit instruction sequence block restart and event - * counter increment on signal delivery for this thread. + * Inhibit instruction sequence block restart on signal + * delivery for this thread. * - RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE - * Inhibit instruction sequence block restart and event - * counter increment on migration for this thread. + * Inhibit instruction sequence block restart on migration for + * this thread. */ __u32 flags; } __attribute__((aligned(4 * sizeof(__u64)))); diff --git a/include/uapi/linux/target_core_user.h b/include/uapi/linux/target_core_user.h index 6e299349b158..b7b57967d90f 100644 --- a/include/uapi/linux/target_core_user.h +++ b/include/uapi/linux/target_core_user.h @@ -44,6 +44,7 @@ #define TCMU_MAILBOX_VERSION 2 #define ALIGN_SIZE 64 /* Should be enough for most CPUs */ #define TCMU_MAILBOX_FLAG_CAP_OOOC (1 << 0) /* Out-of-order completions */ +#define TCMU_MAILBOX_FLAG_CAP_READ_LEN (1 << 1) /* Read data length */ struct tcmu_mailbox { __u16 version; @@ -71,6 +72,7 @@ struct tcmu_cmd_entry_hdr { __u16 cmd_id; __u8 kflags; #define TCMU_UFLAG_UNKNOWN_OP 0x1 +#define TCMU_UFLAG_READ_LEN 0x2 __u8 uflags; } __packed; @@ -119,7 +121,7 @@ struct tcmu_cmd_entry { __u8 scsi_status; __u8 __pad1; __u16 __pad2; - __u32 __pad3; + __u32 read_len; char sense_buffer[TCMU_SENSE_BUFFERSIZE]; } rsp; }; diff --git a/include/uapi/linux/types_32_64.h b/include/uapi/linux/types_32_64.h deleted file mode 100644 index 0a87ace34a57..000000000000 --- a/include/uapi/linux/types_32_64.h +++ /dev/null @@ -1,50 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ -#ifndef _UAPI_LINUX_TYPES_32_64_H -#define _UAPI_LINUX_TYPES_32_64_H - -/* - * linux/types_32_64.h - * - * Integer type declaration for pointers across 32-bit and 64-bit systems. - * - * Copyright (c) 2015-2018 Mathieu Desnoyers - */ - -#ifdef __KERNEL__ -# include -#else -# include -#endif - -#include - -#ifdef __BYTE_ORDER -# if (__BYTE_ORDER == __BIG_ENDIAN) -# define LINUX_BYTE_ORDER_BIG_ENDIAN -# else -# define LINUX_BYTE_ORDER_LITTLE_ENDIAN -# endif -#else -# ifdef __BIG_ENDIAN -# define LINUX_BYTE_ORDER_BIG_ENDIAN -# else -# define LINUX_BYTE_ORDER_LITTLE_ENDIAN -# endif -#endif - -#ifdef __LP64__ -# define LINUX_FIELD_u32_u64(field) __u64 field -# define LINUX_FIELD_u32_u64_INIT_ONSTACK(field, v) field = (intptr_t)v -#else -# ifdef LINUX_BYTE_ORDER_BIG_ENDIAN -# define LINUX_FIELD_u32_u64(field) __u32 field ## _padding, field -# define LINUX_FIELD_u32_u64_INIT_ONSTACK(field, v) \ - field ## _padding = 0, field = (intptr_t)v -# else -# define LINUX_FIELD_u32_u64(field) __u32 field, field ## _padding -# define LINUX_FIELD_u32_u64_INIT_ONSTACK(field, v) \ - field = (intptr_t)v, field ## _padding = 0 -# endif -#endif - -#endif /* _UAPI_LINUX_TYPES_32_64_H */ diff --git a/include/xen/xen.h b/include/xen/xen.h index 9d4340c907d1..1e1d9bd0bd37 100644 --- a/include/xen/xen.h +++ b/include/xen/xen.h @@ -25,12 +25,16 @@ extern bool xen_pvh; #define xen_hvm_domain() (xen_domain_type == XEN_HVM_DOMAIN) #define xen_pvh_domain() (xen_pvh) +#include + +extern uint32_t xen_start_flags; + #ifdef CONFIG_XEN_DOM0 #include #include #define xen_initial_domain() (xen_domain() && \ - xen_start_info && xen_start_info->flags & SIF_INITDOMAIN) + (xen_start_flags & SIF_INITDOMAIN)) #else /* !CONFIG_XEN_DOM0 */ #define xen_initial_domain() (0) #endif /* CONFIG_XEN_DOM0 */ diff --git a/init/Kconfig b/init/Kconfig index 5a52f07259a2..041f3a022122 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1051,10 +1051,9 @@ config LD_DEAD_CODE_DATA_ELIMINATION depends on HAVE_LD_DEAD_CODE_DATA_ELIMINATION depends on EXPERT help - Select this if the architecture wants to do dead code and - data elimination with the linker by compiling with - -ffunction-sections -fdata-sections, and linking with - --gc-sections. + Enable this if you want to do dead code and data elimination with + the linker by compiling with -ffunction-sections -fdata-sections, + and linking with --gc-sections. This can reduce on disk and in-memory size of the kernel code and static data, particularly for small configs and @@ -1719,10 +1718,6 @@ source "arch/Kconfig" endmenu # General setup -config HAVE_GENERIC_DMA_COHERENT - bool - default n - config RT_MUTEXES bool diff --git a/kernel/Makefile b/kernel/Makefile index d2001624fe7a..04bc07c2b42a 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -41,6 +41,7 @@ obj-y += printk/ obj-y += irq/ obj-y += rcu/ obj-y += livepatch/ +obj-y += dma/ obj-$(CONFIG_CHECKPOINT_RESTORE) += kcmp.o obj-$(CONFIG_FREEZER) += freezer.o diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index f7c00bd6f8e4..3d83ee7df381 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c @@ -428,6 +428,60 @@ int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, return ret; } +int cgroup_bpf_prog_attach(const union bpf_attr *attr, + enum bpf_prog_type ptype, struct bpf_prog *prog) +{ + struct cgroup *cgrp; + int ret; + + cgrp = cgroup_get_from_fd(attr->target_fd); + if (IS_ERR(cgrp)) + return PTR_ERR(cgrp); + + ret = cgroup_bpf_attach(cgrp, prog, attr->attach_type, + attr->attach_flags); + cgroup_put(cgrp); + return ret; +} + +int cgroup_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype) +{ + struct bpf_prog *prog; + struct cgroup *cgrp; + int ret; + + cgrp = cgroup_get_from_fd(attr->target_fd); + if (IS_ERR(cgrp)) + return PTR_ERR(cgrp); + + prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); + if (IS_ERR(prog)) + prog = NULL; + + ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type, 0); + if (prog) + bpf_prog_put(prog); + + cgroup_put(cgrp); + return ret; +} + +int cgroup_bpf_prog_query(const union bpf_attr *attr, + union bpf_attr __user *uattr) +{ + struct cgroup *cgrp; + int ret; + + cgrp = cgroup_get_from_fd(attr->query.target_fd); + if (IS_ERR(cgrp)) + return PTR_ERR(cgrp); + + ret = cgroup_bpf_query(cgrp, attr, uattr); + + cgroup_put(cgrp); + return ret; +} + /** * __cgroup_bpf_run_filter_skb() - Run a program for packet filtering * @sk: The socket sending or receiving traffic diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 9f1493705f40..1e5625d46414 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -350,6 +350,20 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, return prog_adj; } +void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp) +{ + int i; + + for (i = 0; i < fp->aux->func_cnt; i++) + bpf_prog_kallsyms_del(fp->aux->func[i]); +} + +void bpf_prog_kallsyms_del_all(struct bpf_prog *fp) +{ + bpf_prog_kallsyms_del_subprogs(fp); + bpf_prog_kallsyms_del(fp); +} + #ifdef CONFIG_BPF_JIT /* All BPF JIT sysctl knobs here. */ int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON); @@ -1434,6 +1448,17 @@ static int bpf_check_tail_call(const struct bpf_prog *fp) return 0; } +static void bpf_prog_select_func(struct bpf_prog *fp) +{ +#ifndef CONFIG_BPF_JIT_ALWAYS_ON + u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1); + + fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1]; +#else + fp->bpf_func = __bpf_prog_ret0_warn; +#endif +} + /** * bpf_prog_select_runtime - select exec runtime for BPF program * @fp: bpf_prog populated with internal BPF program @@ -1444,13 +1469,13 @@ static int bpf_check_tail_call(const struct bpf_prog *fp) */ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err) { -#ifndef CONFIG_BPF_JIT_ALWAYS_ON - u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1); + /* In case of BPF to BPF calls, verifier did all the prep + * work with regards to JITing, etc. + */ + if (fp->bpf_func) + goto finalize; - fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1]; -#else - fp->bpf_func = __bpf_prog_ret0_warn; -#endif + bpf_prog_select_func(fp); /* eBPF JITs can rewrite the program in case constant * blinding is active. However, in case of error during @@ -1471,6 +1496,8 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err) if (*err) return fp; } + +finalize: bpf_prog_lock_ro(fp); /* The tail call compatibility check can only be done at diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index a7cc7b3494a9..642c97f6d1b8 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c @@ -345,6 +345,20 @@ int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, return bq_enqueue(dst, xdpf, dev_rx); } +int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, + struct bpf_prog *xdp_prog) +{ + int err; + + err = __xdp_generic_ok_fwd_dev(skb, dst->dev); + if (unlikely(err)) + return err; + skb->dev = dst->dev; + generic_xdp_tx(skb, xdp_prog); + + return 0; +} + static void *dev_map_lookup_elem(struct bpf_map *map, void *key) { struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key); diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c index 52a91d816c0e..cf7b6a6dbd1f 100644 --- a/kernel/bpf/sockmap.c +++ b/kernel/bpf/sockmap.c @@ -72,6 +72,7 @@ struct bpf_htab { u32 n_buckets; u32 elem_size; struct bpf_sock_progs progs; + struct rcu_head rcu; }; struct htab_elem { @@ -89,8 +90,8 @@ enum smap_psock_state { struct smap_psock_map_entry { struct list_head list; struct sock **entry; - struct htab_elem *hash_link; - struct bpf_htab *htab; + struct htab_elem __rcu *hash_link; + struct bpf_htab __rcu *htab; }; struct smap_psock { @@ -120,6 +121,7 @@ struct smap_psock { struct bpf_prog *bpf_parse; struct bpf_prog *bpf_verdict; struct list_head maps; + spinlock_t maps_lock; /* Back reference used when sock callback trigger sockmap operations */ struct sock *sock; @@ -140,6 +142,7 @@ static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); static int bpf_tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size, int flags); +static void bpf_tcp_close(struct sock *sk, long timeout); static inline struct smap_psock *smap_psock_sk(const struct sock *sk) { @@ -161,7 +164,42 @@ static bool bpf_tcp_stream_read(const struct sock *sk) return !empty; } -static struct proto tcp_bpf_proto; +enum { + SOCKMAP_IPV4, + SOCKMAP_IPV6, + SOCKMAP_NUM_PROTS, +}; + +enum { + SOCKMAP_BASE, + SOCKMAP_TX, + SOCKMAP_NUM_CONFIGS, +}; + +static struct proto *saved_tcpv6_prot __read_mostly; +static DEFINE_SPINLOCK(tcpv6_prot_lock); +static struct proto bpf_tcp_prots[SOCKMAP_NUM_PROTS][SOCKMAP_NUM_CONFIGS]; +static void build_protos(struct proto prot[SOCKMAP_NUM_CONFIGS], + struct proto *base) +{ + prot[SOCKMAP_BASE] = *base; + prot[SOCKMAP_BASE].close = bpf_tcp_close; + prot[SOCKMAP_BASE].recvmsg = bpf_tcp_recvmsg; + prot[SOCKMAP_BASE].stream_memory_read = bpf_tcp_stream_read; + + prot[SOCKMAP_TX] = prot[SOCKMAP_BASE]; + prot[SOCKMAP_TX].sendmsg = bpf_tcp_sendmsg; + prot[SOCKMAP_TX].sendpage = bpf_tcp_sendpage; +} + +static void update_sk_prot(struct sock *sk, struct smap_psock *psock) +{ + int family = sk->sk_family == AF_INET6 ? SOCKMAP_IPV6 : SOCKMAP_IPV4; + int conf = psock->bpf_tx_msg ? SOCKMAP_TX : SOCKMAP_BASE; + + sk->sk_prot = &bpf_tcp_prots[family][conf]; +} + static int bpf_tcp_init(struct sock *sk) { struct smap_psock *psock; @@ -181,14 +219,17 @@ static int bpf_tcp_init(struct sock *sk) psock->save_close = sk->sk_prot->close; psock->sk_proto = sk->sk_prot; - if (psock->bpf_tx_msg) { - tcp_bpf_proto.sendmsg = bpf_tcp_sendmsg; - tcp_bpf_proto.sendpage = bpf_tcp_sendpage; - tcp_bpf_proto.recvmsg = bpf_tcp_recvmsg; - tcp_bpf_proto.stream_memory_read = bpf_tcp_stream_read; + /* Build IPv6 sockmap whenever the address of tcpv6_prot changes */ + if (sk->sk_family == AF_INET6 && + unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv6_prot))) { + spin_lock_bh(&tcpv6_prot_lock); + if (likely(sk->sk_prot != saved_tcpv6_prot)) { + build_protos(bpf_tcp_prots[SOCKMAP_IPV6], sk->sk_prot); + smp_store_release(&saved_tcpv6_prot, sk->sk_prot); + } + spin_unlock_bh(&tcpv6_prot_lock); } - - sk->sk_prot = &tcp_bpf_proto; + update_sk_prot(sk, psock); rcu_read_unlock(); return 0; } @@ -219,16 +260,54 @@ static void bpf_tcp_release(struct sock *sk) rcu_read_unlock(); } +static struct htab_elem *lookup_elem_raw(struct hlist_head *head, + u32 hash, void *key, u32 key_size) +{ + struct htab_elem *l; + + hlist_for_each_entry_rcu(l, head, hash_node) { + if (l->hash == hash && !memcmp(&l->key, key, key_size)) + return l; + } + + return NULL; +} + +static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash) +{ + return &htab->buckets[hash & (htab->n_buckets - 1)]; +} + +static inline struct hlist_head *select_bucket(struct bpf_htab *htab, u32 hash) +{ + return &__select_bucket(htab, hash)->head; +} + static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) { atomic_dec(&htab->count); kfree_rcu(l, rcu); } +static struct smap_psock_map_entry *psock_map_pop(struct sock *sk, + struct smap_psock *psock) +{ + struct smap_psock_map_entry *e; + + spin_lock_bh(&psock->maps_lock); + e = list_first_entry_or_null(&psock->maps, + struct smap_psock_map_entry, + list); + if (e) + list_del(&e->list); + spin_unlock_bh(&psock->maps_lock); + return e; +} + static void bpf_tcp_close(struct sock *sk, long timeout) { void (*close_fun)(struct sock *sk, long timeout); - struct smap_psock_map_entry *e, *tmp; + struct smap_psock_map_entry *e; struct sk_msg_buff *md, *mtmp; struct smap_psock *psock; struct sock *osk; @@ -247,7 +326,6 @@ static void bpf_tcp_close(struct sock *sk, long timeout) */ close_fun = psock->save_close; - write_lock_bh(&sk->sk_callback_lock); if (psock->cork) { free_start_sg(psock->sock, psock->cork); kfree(psock->cork); @@ -260,20 +338,38 @@ static void bpf_tcp_close(struct sock *sk, long timeout) kfree(md); } - list_for_each_entry_safe(e, tmp, &psock->maps, list) { + e = psock_map_pop(sk, psock); + while (e) { if (e->entry) { osk = cmpxchg(e->entry, sk, NULL); if (osk == sk) { - list_del(&e->list); smap_release_sock(psock, sk); } } else { - hlist_del_rcu(&e->hash_link->hash_node); - smap_release_sock(psock, e->hash_link->sk); - free_htab_elem(e->htab, e->hash_link); + struct htab_elem *link = rcu_dereference(e->hash_link); + struct bpf_htab *htab = rcu_dereference(e->htab); + struct hlist_head *head; + struct htab_elem *l; + struct bucket *b; + + b = __select_bucket(htab, link->hash); + head = &b->head; + raw_spin_lock_bh(&b->lock); + l = lookup_elem_raw(head, + link->hash, link->key, + htab->map.key_size); + /* If another thread deleted this object skip deletion. + * The refcnt on psock may or may not be zero. + */ + if (l) { + hlist_del_rcu(&link->hash_node); + smap_release_sock(psock, link->sk); + free_htab_elem(htab, link); + } + raw_spin_unlock_bh(&b->lock); } + e = psock_map_pop(sk, psock); } - write_unlock_bh(&sk->sk_callback_lock); rcu_read_unlock(); close_fun(sk, timeout); } @@ -1111,8 +1207,7 @@ static void bpf_tcp_msg_add(struct smap_psock *psock, static int bpf_tcp_ulp_register(void) { - tcp_bpf_proto = tcp_prot; - tcp_bpf_proto.close = bpf_tcp_close; + build_protos(bpf_tcp_prots[SOCKMAP_IPV4], &tcp_prot); /* Once BPF TX ULP is registered it is never unregistered. It * will be in the ULP list for the lifetime of the system. Doing * duplicate registers is not a problem. @@ -1357,7 +1452,9 @@ static void smap_release_sock(struct smap_psock *psock, struct sock *sock) { if (refcount_dec_and_test(&psock->refcnt)) { tcp_cleanup_ulp(sock); + write_lock_bh(&sock->sk_callback_lock); smap_stop_sock(psock, sock); + write_unlock_bh(&sock->sk_callback_lock); clear_bit(SMAP_TX_RUNNING, &psock->state); rcu_assign_sk_user_data(sock, NULL); call_rcu_sched(&psock->rcu, smap_destroy_psock); @@ -1508,6 +1605,7 @@ static struct smap_psock *smap_init_psock(struct sock *sock, int node) INIT_LIST_HEAD(&psock->maps); INIT_LIST_HEAD(&psock->ingress); refcount_set(&psock->refcnt, 1); + spin_lock_init(&psock->maps_lock); rcu_assign_sk_user_data(sock, psock); sock_hold(sock); @@ -1564,18 +1662,32 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr) return ERR_PTR(err); } -static void smap_list_remove(struct smap_psock *psock, - struct sock **entry, - struct htab_elem *hash_link) +static void smap_list_map_remove(struct smap_psock *psock, + struct sock **entry) { struct smap_psock_map_entry *e, *tmp; + spin_lock_bh(&psock->maps_lock); list_for_each_entry_safe(e, tmp, &psock->maps, list) { - if (e->entry == entry || e->hash_link == hash_link) { + if (e->entry == entry) list_del(&e->list); - break; - } } + spin_unlock_bh(&psock->maps_lock); +} + +static void smap_list_hash_remove(struct smap_psock *psock, + struct htab_elem *hash_link) +{ + struct smap_psock_map_entry *e, *tmp; + + spin_lock_bh(&psock->maps_lock); + list_for_each_entry_safe(e, tmp, &psock->maps, list) { + struct htab_elem *c = rcu_dereference(e->hash_link); + + if (c == hash_link) + list_del(&e->list); + } + spin_unlock_bh(&psock->maps_lock); } static void sock_map_free(struct bpf_map *map) @@ -1601,7 +1713,6 @@ static void sock_map_free(struct bpf_map *map) if (!sock) continue; - write_lock_bh(&sock->sk_callback_lock); psock = smap_psock_sk(sock); /* This check handles a racing sock event that can get the * sk_callback_lock before this case but after xchg happens @@ -1609,10 +1720,9 @@ static void sock_map_free(struct bpf_map *map) * to be null and queued for garbage collection. */ if (likely(psock)) { - smap_list_remove(psock, &stab->sock_map[i], NULL); + smap_list_map_remove(psock, &stab->sock_map[i]); smap_release_sock(psock, sock); } - write_unlock_bh(&sock->sk_callback_lock); } rcu_read_unlock(); @@ -1661,17 +1771,15 @@ static int sock_map_delete_elem(struct bpf_map *map, void *key) if (!sock) return -EINVAL; - write_lock_bh(&sock->sk_callback_lock); psock = smap_psock_sk(sock); if (!psock) goto out; if (psock->bpf_parse) smap_stop_sock(psock, sock); - smap_list_remove(psock, &stab->sock_map[k], NULL); + smap_list_map_remove(psock, &stab->sock_map[k]); smap_release_sock(psock, sock); out: - write_unlock_bh(&sock->sk_callback_lock); return 0; } @@ -1752,7 +1860,6 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map, } } - write_lock_bh(&sock->sk_callback_lock); psock = smap_psock_sk(sock); /* 2. Do not allow inheriting programs if psock exists and has @@ -1809,7 +1916,9 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map, if (err) goto out_free; smap_init_progs(psock, verdict, parse); + write_lock_bh(&sock->sk_callback_lock); smap_start_sock(psock, sock); + write_unlock_bh(&sock->sk_callback_lock); } /* 4. Place psock in sockmap for use and stop any programs on @@ -1819,9 +1928,10 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map, */ if (map_link) { e->entry = map_link; + spin_lock_bh(&psock->maps_lock); list_add_tail(&e->list, &psock->maps); + spin_unlock_bh(&psock->maps_lock); } - write_unlock_bh(&sock->sk_callback_lock); return err; out_free: smap_release_sock(psock, sock); @@ -1832,7 +1942,6 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map, } if (tx_msg) bpf_prog_put(tx_msg); - write_unlock_bh(&sock->sk_callback_lock); kfree(e); return err; } @@ -1869,10 +1978,8 @@ static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops, if (osock) { struct smap_psock *opsock = smap_psock_sk(osock); - write_lock_bh(&osock->sk_callback_lock); - smap_list_remove(opsock, &stab->sock_map[i], NULL); + smap_list_map_remove(opsock, &stab->sock_map[i]); smap_release_sock(opsock, osock); - write_unlock_bh(&osock->sk_callback_lock); } out: return err; @@ -1915,6 +2022,24 @@ int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type) return 0; } +int sockmap_get_from_fd(const union bpf_attr *attr, int type, + struct bpf_prog *prog) +{ + int ufd = attr->target_fd; + struct bpf_map *map; + struct fd f; + int err; + + f = fdget(ufd); + map = __bpf_map_get(f); + if (IS_ERR(map)) + return PTR_ERR(map); + + err = sock_map_prog(map, prog, attr->attach_type); + fdput(f); + return err; +} + static void *sock_map_lookup(struct bpf_map *map, void *key) { return NULL; @@ -2043,14 +2168,13 @@ static struct bpf_map *sock_hash_alloc(union bpf_attr *attr) return ERR_PTR(err); } -static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash) +static void __bpf_htab_free(struct rcu_head *rcu) { - return &htab->buckets[hash & (htab->n_buckets - 1)]; -} + struct bpf_htab *htab; -static inline struct hlist_head *select_bucket(struct bpf_htab *htab, u32 hash) -{ - return &__select_bucket(htab, hash)->head; + htab = container_of(rcu, struct bpf_htab, rcu); + bpf_map_area_free(htab->buckets); + kfree(htab); } static void sock_hash_free(struct bpf_map *map) @@ -2069,16 +2193,18 @@ static void sock_hash_free(struct bpf_map *map) */ rcu_read_lock(); for (i = 0; i < htab->n_buckets; i++) { - struct hlist_head *head = select_bucket(htab, i); + struct bucket *b = __select_bucket(htab, i); + struct hlist_head *head; struct hlist_node *n; struct htab_elem *l; + raw_spin_lock_bh(&b->lock); + head = &b->head; hlist_for_each_entry_safe(l, n, head, hash_node) { struct sock *sock = l->sk; struct smap_psock *psock; hlist_del_rcu(&l->hash_node); - write_lock_bh(&sock->sk_callback_lock); psock = smap_psock_sk(sock); /* This check handles a racing sock event that can get * the sk_callback_lock before this case but after xchg @@ -2086,16 +2212,15 @@ static void sock_hash_free(struct bpf_map *map) * (psock) to be null and queued for garbage collection. */ if (likely(psock)) { - smap_list_remove(psock, NULL, l); + smap_list_hash_remove(psock, l); smap_release_sock(psock, sock); } - write_unlock_bh(&sock->sk_callback_lock); - kfree(l); + free_htab_elem(htab, l); } + raw_spin_unlock_bh(&b->lock); } rcu_read_unlock(); - bpf_map_area_free(htab->buckets); - kfree(htab); + call_rcu(&htab->rcu, __bpf_htab_free); } static struct htab_elem *alloc_sock_hash_elem(struct bpf_htab *htab, @@ -2122,19 +2247,6 @@ static struct htab_elem *alloc_sock_hash_elem(struct bpf_htab *htab, return l_new; } -static struct htab_elem *lookup_elem_raw(struct hlist_head *head, - u32 hash, void *key, u32 key_size) -{ - struct htab_elem *l; - - hlist_for_each_entry_rcu(l, head, hash_node) { - if (l->hash == hash && !memcmp(&l->key, key, key_size)) - return l; - } - - return NULL; -} - static inline u32 htab_map_hash(const void *key, u32 key_len) { return jhash(key, key_len, 0); @@ -2254,9 +2366,12 @@ static int sock_hash_ctx_update_elem(struct bpf_sock_ops_kern *skops, goto bucket_err; } - e->hash_link = l_new; - e->htab = container_of(map, struct bpf_htab, map); + rcu_assign_pointer(e->hash_link, l_new); + rcu_assign_pointer(e->htab, + container_of(map, struct bpf_htab, map)); + spin_lock_bh(&psock->maps_lock); list_add_tail(&e->list, &psock->maps); + spin_unlock_bh(&psock->maps_lock); /* add new element to the head of the list, so that * concurrent search will find it before old elem @@ -2266,7 +2381,7 @@ static int sock_hash_ctx_update_elem(struct bpf_sock_ops_kern *skops, psock = smap_psock_sk(l_old->sk); hlist_del_rcu(&l_old->hash_node); - smap_list_remove(psock, NULL, l_old); + smap_list_hash_remove(psock, l_old); smap_release_sock(psock, l_old->sk); free_htab_elem(htab, l_old); } @@ -2326,7 +2441,6 @@ static int sock_hash_delete_elem(struct bpf_map *map, void *key) struct smap_psock *psock; hlist_del_rcu(&l->hash_node); - write_lock_bh(&sock->sk_callback_lock); psock = smap_psock_sk(sock); /* This check handles a racing sock event that can get the * sk_callback_lock before this case but after xchg happens @@ -2334,10 +2448,9 @@ static int sock_hash_delete_elem(struct bpf_map *map, void *key) * to be null and queued for garbage collection. */ if (likely(psock)) { - smap_list_remove(psock, NULL, l); + smap_list_hash_remove(psock, l); smap_release_sock(psock, sock); } - write_unlock_bh(&sock->sk_callback_lock); free_htab_elem(htab, l); ret = 0; } @@ -2383,6 +2496,7 @@ const struct bpf_map_ops sock_hash_ops = { .map_get_next_key = sock_hash_get_next_key, .map_update_elem = sock_hash_update_elem, .map_delete_elem = sock_hash_delete_elem, + .map_release_uref = sock_map_release, }; BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock, diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 0fa20624707f..d10ecd78105f 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -1034,14 +1034,9 @@ static void __bpf_prog_put_rcu(struct rcu_head *rcu) static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock) { if (atomic_dec_and_test(&prog->aux->refcnt)) { - int i; - /* bpf_prog_free_id() must be called first */ bpf_prog_free_id(prog, do_idr_lock); - - for (i = 0; i < prog->aux->func_cnt; i++) - bpf_prog_kallsyms_del(prog->aux->func[i]); - bpf_prog_kallsyms_del(prog); + bpf_prog_kallsyms_del_all(prog); call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu); } @@ -1358,9 +1353,7 @@ static int bpf_prog_load(union bpf_attr *attr) if (err < 0) goto free_used_maps; - /* eBPF program is ready to be JITed */ - if (!prog->bpf_func) - prog = bpf_prog_select_runtime(prog, &err); + prog = bpf_prog_select_runtime(prog, &err); if (err < 0) goto free_used_maps; @@ -1384,6 +1377,7 @@ static int bpf_prog_load(union bpf_attr *attr) return err; free_used_maps: + bpf_prog_kallsyms_del_subprogs(prog); free_used_maps(prog->aux); free_prog: bpf_prog_uncharge_memlock(prog); @@ -1489,8 +1483,6 @@ static int bpf_raw_tracepoint_open(const union bpf_attr *attr) return err; } -#ifdef CONFIG_CGROUP_BPF - static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog, enum bpf_attach_type attach_type) { @@ -1505,40 +1497,6 @@ static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog, #define BPF_PROG_ATTACH_LAST_FIELD attach_flags -static int sockmap_get_from_fd(const union bpf_attr *attr, - int type, bool attach) -{ - struct bpf_prog *prog = NULL; - int ufd = attr->target_fd; - struct bpf_map *map; - struct fd f; - int err; - - f = fdget(ufd); - map = __bpf_map_get(f); - if (IS_ERR(map)) - return PTR_ERR(map); - - if (attach) { - prog = bpf_prog_get_type(attr->attach_bpf_fd, type); - if (IS_ERR(prog)) { - fdput(f); - return PTR_ERR(prog); - } - } - - err = sock_map_prog(map, prog, attr->attach_type); - if (err) { - fdput(f); - if (prog) - bpf_prog_put(prog); - return err; - } - - fdput(f); - return 0; -} - #define BPF_F_ATTACH_MASK \ (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI) @@ -1546,7 +1504,6 @@ static int bpf_prog_attach(const union bpf_attr *attr) { enum bpf_prog_type ptype; struct bpf_prog *prog; - struct cgroup *cgrp; int ret; if (!capable(CAP_NET_ADMIN)) @@ -1583,12 +1540,15 @@ static int bpf_prog_attach(const union bpf_attr *attr) ptype = BPF_PROG_TYPE_CGROUP_DEVICE; break; case BPF_SK_MSG_VERDICT: - return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_MSG, true); + ptype = BPF_PROG_TYPE_SK_MSG; + break; case BPF_SK_SKB_STREAM_PARSER: case BPF_SK_SKB_STREAM_VERDICT: - return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_SKB, true); + ptype = BPF_PROG_TYPE_SK_SKB; + break; case BPF_LIRC_MODE2: - return lirc_prog_attach(attr); + ptype = BPF_PROG_TYPE_LIRC_MODE2; + break; default: return -EINVAL; } @@ -1602,18 +1562,20 @@ static int bpf_prog_attach(const union bpf_attr *attr) return -EINVAL; } - cgrp = cgroup_get_from_fd(attr->target_fd); - if (IS_ERR(cgrp)) { - bpf_prog_put(prog); - return PTR_ERR(cgrp); + switch (ptype) { + case BPF_PROG_TYPE_SK_SKB: + case BPF_PROG_TYPE_SK_MSG: + ret = sockmap_get_from_fd(attr, ptype, prog); + break; + case BPF_PROG_TYPE_LIRC_MODE2: + ret = lirc_prog_attach(attr, prog); + break; + default: + ret = cgroup_bpf_prog_attach(attr, ptype, prog); } - ret = cgroup_bpf_attach(cgrp, prog, attr->attach_type, - attr->attach_flags); if (ret) bpf_prog_put(prog); - cgroup_put(cgrp); - return ret; } @@ -1622,9 +1584,6 @@ static int bpf_prog_attach(const union bpf_attr *attr) static int bpf_prog_detach(const union bpf_attr *attr) { enum bpf_prog_type ptype; - struct bpf_prog *prog; - struct cgroup *cgrp; - int ret; if (!capable(CAP_NET_ADMIN)) return -EPERM; @@ -1657,29 +1616,17 @@ static int bpf_prog_detach(const union bpf_attr *attr) ptype = BPF_PROG_TYPE_CGROUP_DEVICE; break; case BPF_SK_MSG_VERDICT: - return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_MSG, false); + return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_MSG, NULL); case BPF_SK_SKB_STREAM_PARSER: case BPF_SK_SKB_STREAM_VERDICT: - return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_SKB, false); + return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_SKB, NULL); case BPF_LIRC_MODE2: return lirc_prog_detach(attr); default: return -EINVAL; } - cgrp = cgroup_get_from_fd(attr->target_fd); - if (IS_ERR(cgrp)) - return PTR_ERR(cgrp); - - prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); - if (IS_ERR(prog)) - prog = NULL; - - ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type, 0); - if (prog) - bpf_prog_put(prog); - cgroup_put(cgrp); - return ret; + return cgroup_bpf_prog_detach(attr, ptype); } #define BPF_PROG_QUERY_LAST_FIELD query.prog_cnt @@ -1687,9 +1634,6 @@ static int bpf_prog_detach(const union bpf_attr *attr) static int bpf_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr) { - struct cgroup *cgrp; - int ret; - if (!capable(CAP_NET_ADMIN)) return -EPERM; if (CHECK_ATTR(BPF_PROG_QUERY)) @@ -1717,14 +1661,9 @@ static int bpf_prog_query(const union bpf_attr *attr, default: return -EINVAL; } - cgrp = cgroup_get_from_fd(attr->query.target_fd); - if (IS_ERR(cgrp)) - return PTR_ERR(cgrp); - ret = cgroup_bpf_query(cgrp, attr, uattr); - cgroup_put(cgrp); - return ret; + + return cgroup_bpf_prog_query(attr, uattr); } -#endif /* CONFIG_CGROUP_BPF */ #define BPF_PROG_TEST_RUN_LAST_FIELD test.duration @@ -2371,7 +2310,6 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz case BPF_OBJ_GET: err = bpf_obj_get(&attr); break; -#ifdef CONFIG_CGROUP_BPF case BPF_PROG_ATTACH: err = bpf_prog_attach(&attr); break; @@ -2381,7 +2319,6 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz case BPF_PROG_QUERY: err = bpf_prog_query(&attr, uattr); break; -#endif case BPF_PROG_TEST_RUN: err = bpf_prog_test_run(&attr, uattr); break; diff --git a/kernel/dma/Kconfig b/kernel/dma/Kconfig new file mode 100644 index 000000000000..9bd54304446f --- /dev/null +++ b/kernel/dma/Kconfig @@ -0,0 +1,50 @@ + +config HAS_DMA + bool + depends on !NO_DMA + default y + +config NEED_SG_DMA_LENGTH + bool + +config NEED_DMA_MAP_STATE + bool + +config ARCH_DMA_ADDR_T_64BIT + def_bool 64BIT || PHYS_ADDR_T_64BIT + +config HAVE_GENERIC_DMA_COHERENT + bool + +config ARCH_HAS_SYNC_DMA_FOR_DEVICE + bool + +config ARCH_HAS_SYNC_DMA_FOR_CPU + bool + select NEED_DMA_MAP_STATE + +config DMA_DIRECT_OPS + bool + depends on HAS_DMA + +config DMA_NONCOHERENT_OPS + bool + depends on HAS_DMA + select DMA_DIRECT_OPS + +config DMA_NONCOHERENT_MMAP + bool + depends on DMA_NONCOHERENT_OPS + +config DMA_NONCOHERENT_CACHE_SYNC + bool + depends on DMA_NONCOHERENT_OPS + +config DMA_VIRT_OPS + bool + depends on HAS_DMA + +config SWIOTLB + bool + select DMA_DIRECT_OPS + select NEED_DMA_MAP_STATE diff --git a/kernel/dma/Makefile b/kernel/dma/Makefile new file mode 100644 index 000000000000..6de44e4eb454 --- /dev/null +++ b/kernel/dma/Makefile @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_HAS_DMA) += mapping.o +obj-$(CONFIG_DMA_CMA) += contiguous.o +obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += coherent.o +obj-$(CONFIG_DMA_DIRECT_OPS) += direct.o +obj-$(CONFIG_DMA_NONCOHERENT_OPS) += noncoherent.o +obj-$(CONFIG_DMA_VIRT_OPS) += virt.o +obj-$(CONFIG_DMA_API_DEBUG) += debug.o +obj-$(CONFIG_SWIOTLB) += swiotlb.o + diff --git a/drivers/base/dma-coherent.c b/kernel/dma/coherent.c similarity index 100% rename from drivers/base/dma-coherent.c rename to kernel/dma/coherent.c diff --git a/drivers/base/dma-contiguous.c b/kernel/dma/contiguous.c similarity index 100% rename from drivers/base/dma-contiguous.c rename to kernel/dma/contiguous.c diff --git a/lib/dma-debug.c b/kernel/dma/debug.c similarity index 100% rename from lib/dma-debug.c rename to kernel/dma/debug.c diff --git a/lib/dma-direct.c b/kernel/dma/direct.c similarity index 100% rename from lib/dma-direct.c rename to kernel/dma/direct.c diff --git a/drivers/base/dma-mapping.c b/kernel/dma/mapping.c similarity index 99% rename from drivers/base/dma-mapping.c rename to kernel/dma/mapping.c index f831a582209c..d2a92ddaac4d 100644 --- a/drivers/base/dma-mapping.c +++ b/kernel/dma/mapping.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * drivers/base/dma-mapping.c - arch-independent dma-mapping routines + * arch-independent dma-mapping routines * * Copyright (c) 2006 SUSE Linux Products GmbH * Copyright (c) 2006 Tejun Heo diff --git a/lib/dma-noncoherent.c b/kernel/dma/noncoherent.c similarity index 100% rename from lib/dma-noncoherent.c rename to kernel/dma/noncoherent.c diff --git a/lib/swiotlb.c b/kernel/dma/swiotlb.c similarity index 99% rename from lib/swiotlb.c rename to kernel/dma/swiotlb.c index 04b68d9dffac..904541055792 100644 --- a/lib/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -1085,3 +1085,4 @@ const struct dma_map_ops swiotlb_dma_ops = { .unmap_page = swiotlb_unmap_page, .dma_supported = dma_direct_supported, }; +EXPORT_SYMBOL(swiotlb_dma_ops); diff --git a/lib/dma-virt.c b/kernel/dma/virt.c similarity index 98% rename from lib/dma-virt.c rename to kernel/dma/virt.c index 8e61a02ef9ca..631ddec4b60a 100644 --- a/lib/dma-virt.c +++ b/kernel/dma/virt.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 /* - * lib/dma-virt.c - * * DMA operations that map to virtual addresses without flushing memory. */ #include diff --git a/kernel/events/core.c b/kernel/events/core.c index 80cca2b30c4f..8f0434a9951a 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -6482,7 +6482,7 @@ void perf_prepare_sample(struct perf_event_header *header, data->phys_addr = perf_virt_to_phys(data->addr); } -static void __always_inline +static __always_inline void __perf_event_output(struct perf_event *event, struct perf_sample_data *data, struct pt_regs *regs, diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index 045a37e9ddee..5d3cf407e374 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c @@ -103,7 +103,7 @@ static void perf_output_put_handle(struct perf_output_handle *handle) preempt_enable(); } -static bool __always_inline +static __always_inline bool ring_buffer_has_space(unsigned long head, unsigned long tail, unsigned long data_size, unsigned int size, bool backward) @@ -114,7 +114,7 @@ ring_buffer_has_space(unsigned long head, unsigned long tail, return CIRC_SPACE(tail, head, data_size) >= size; } -static int __always_inline +static __always_inline int __perf_output_begin(struct perf_output_handle *handle, struct perf_event *event, unsigned int size, bool backward) @@ -414,7 +414,7 @@ void *perf_aux_output_begin(struct perf_output_handle *handle, } EXPORT_SYMBOL_GPL(perf_aux_output_begin); -static bool __always_inline rb_need_aux_wakeup(struct ring_buffer *rb) +static __always_inline bool rb_need_aux_wakeup(struct ring_buffer *rb) { if (rb->aux_overwrite) return false; diff --git a/kernel/irq/debugfs.c b/kernel/irq/debugfs.c index 4dadeb3d6666..6f636136cccc 100644 --- a/kernel/irq/debugfs.c +++ b/kernel/irq/debugfs.c @@ -55,6 +55,7 @@ static const struct irq_bit_descr irqchip_flags[] = { BIT_MASK_DESCR(IRQCHIP_SKIP_SET_WAKE), BIT_MASK_DESCR(IRQCHIP_ONESHOT_SAFE), BIT_MASK_DESCR(IRQCHIP_EOI_THREADED), + BIT_MASK_DESCR(IRQCHIP_SUPPORTS_LEVEL_MSI), }; static void diff --git a/kernel/kthread.c b/kernel/kthread.c index 481951bf091d..750cb8082694 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -177,9 +177,20 @@ void *kthread_probe_data(struct task_struct *task) static void __kthread_parkme(struct kthread *self) { for (;;) { - set_current_state(TASK_PARKED); + /* + * TASK_PARKED is a special state; we must serialize against + * possible pending wakeups to avoid store-store collisions on + * task->state. + * + * Such a collision might possibly result in the task state + * changin from TASK_PARKED and us failing the + * wait_task_inactive() in kthread_park(). + */ + set_special_state(TASK_PARKED); if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags)) break; + + complete_all(&self->parked); schedule(); } __set_current_state(TASK_RUNNING); @@ -191,11 +202,6 @@ void kthread_parkme(void) } EXPORT_SYMBOL_GPL(kthread_parkme); -void kthread_park_complete(struct task_struct *k) -{ - complete_all(&to_kthread(k)->parked); -} - static int kthread(void *_create) { /* Copy data: it's on kthread's stack */ @@ -461,6 +467,9 @@ void kthread_unpark(struct task_struct *k) reinit_completion(&kthread->parked); clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); + /* + * __kthread_parkme() will either see !SHOULD_PARK or get the wakeup. + */ wake_up_state(k, TASK_PARKED); } EXPORT_SYMBOL_GPL(kthread_unpark); @@ -487,7 +496,16 @@ int kthread_park(struct task_struct *k) set_bit(KTHREAD_SHOULD_PARK, &kthread->flags); if (k != current) { wake_up_process(k); + /* + * Wait for __kthread_parkme() to complete(), this means we + * _will_ have TASK_PARKED and are about to call schedule(). + */ wait_for_completion(&kthread->parked); + /* + * Now wait for that schedule() to complete and the task to + * get scheduled out. + */ + WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED)); } return 0; diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index edcac5de7ebc..5fa4d3138bf1 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -1265,11 +1265,11 @@ unsigned long lockdep_count_forward_deps(struct lock_class *class) this.parent = NULL; this.class = class; - local_irq_save(flags); + raw_local_irq_save(flags); arch_spin_lock(&lockdep_lock); ret = __lockdep_count_forward_deps(&this); arch_spin_unlock(&lockdep_lock); - local_irq_restore(flags); + raw_local_irq_restore(flags); return ret; } @@ -1292,11 +1292,11 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class) this.parent = NULL; this.class = class; - local_irq_save(flags); + raw_local_irq_save(flags); arch_spin_lock(&lockdep_lock); ret = __lockdep_count_backward_deps(&this); arch_spin_unlock(&lockdep_lock); - local_irq_restore(flags); + raw_local_irq_restore(flags); return ret; } @@ -4411,7 +4411,7 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len) if (unlikely(!debug_locks)) return; - local_irq_save(flags); + raw_local_irq_save(flags); for (i = 0; i < curr->lockdep_depth; i++) { hlock = curr->held_locks + i; @@ -4422,7 +4422,7 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len) print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock); break; } - local_irq_restore(flags); + raw_local_irq_restore(flags); } EXPORT_SYMBOL_GPL(debug_check_no_locks_freed); diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c index bc1e507be9ff..776308d2fa9e 100644 --- a/kernel/locking/rwsem.c +++ b/kernel/locking/rwsem.c @@ -181,6 +181,7 @@ void down_read_non_owner(struct rw_semaphore *sem) might_sleep(); __down_read(sem); + rwsem_set_reader_owned(sem); } EXPORT_SYMBOL(down_read_non_owner); diff --git a/kernel/rseq.c b/kernel/rseq.c index ae306f90c514..c6242d8594dc 100644 --- a/kernel/rseq.c +++ b/kernel/rseq.c @@ -85,9 +85,9 @@ static int rseq_update_cpu_id(struct task_struct *t) { u32 cpu_id = raw_smp_processor_id(); - if (__put_user(cpu_id, &t->rseq->cpu_id_start)) + if (put_user(cpu_id, &t->rseq->cpu_id_start)) return -EFAULT; - if (__put_user(cpu_id, &t->rseq->cpu_id)) + if (put_user(cpu_id, &t->rseq->cpu_id)) return -EFAULT; trace_rseq_update(t); return 0; @@ -100,14 +100,14 @@ static int rseq_reset_rseq_cpu_id(struct task_struct *t) /* * Reset cpu_id_start to its initial state (0). */ - if (__put_user(cpu_id_start, &t->rseq->cpu_id_start)) + if (put_user(cpu_id_start, &t->rseq->cpu_id_start)) return -EFAULT; /* * Reset cpu_id to RSEQ_CPU_ID_UNINITIALIZED, so any user coming * in after unregistration can figure out that rseq needs to be * registered again. */ - if (__put_user(cpu_id, &t->rseq->cpu_id)) + if (put_user(cpu_id, &t->rseq->cpu_id)) return -EFAULT; return 0; } @@ -115,29 +115,36 @@ static int rseq_reset_rseq_cpu_id(struct task_struct *t) static int rseq_get_rseq_cs(struct task_struct *t, struct rseq_cs *rseq_cs) { struct rseq_cs __user *urseq_cs; - unsigned long ptr; + u64 ptr; u32 __user *usig; u32 sig; int ret; - ret = __get_user(ptr, &t->rseq->rseq_cs); - if (ret) - return ret; + if (copy_from_user(&ptr, &t->rseq->rseq_cs.ptr64, sizeof(ptr))) + return -EFAULT; if (!ptr) { memset(rseq_cs, 0, sizeof(*rseq_cs)); return 0; } - urseq_cs = (struct rseq_cs __user *)ptr; + if (ptr >= TASK_SIZE) + return -EINVAL; + urseq_cs = (struct rseq_cs __user *)(unsigned long)ptr; if (copy_from_user(rseq_cs, urseq_cs, sizeof(*rseq_cs))) return -EFAULT; - if (rseq_cs->version > 0) - return -EINVAL; + if (rseq_cs->start_ip >= TASK_SIZE || + rseq_cs->start_ip + rseq_cs->post_commit_offset >= TASK_SIZE || + rseq_cs->abort_ip >= TASK_SIZE || + rseq_cs->version > 0) + return -EINVAL; + /* Check for overflow. */ + if (rseq_cs->start_ip + rseq_cs->post_commit_offset < rseq_cs->start_ip) + return -EINVAL; /* Ensure that abort_ip is not in the critical section. */ if (rseq_cs->abort_ip - rseq_cs->start_ip < rseq_cs->post_commit_offset) return -EINVAL; - usig = (u32 __user *)(rseq_cs->abort_ip - sizeof(u32)); + usig = (u32 __user *)(unsigned long)(rseq_cs->abort_ip - sizeof(u32)); ret = get_user(sig, usig); if (ret) return ret; @@ -146,7 +153,7 @@ static int rseq_get_rseq_cs(struct task_struct *t, struct rseq_cs *rseq_cs) printk_ratelimited(KERN_WARNING "Possible attack attempt. Unexpected rseq signature 0x%x, expecting 0x%x (pid=%d, addr=%p).\n", sig, current->rseq_sig, current->pid, usig); - return -EPERM; + return -EINVAL; } return 0; } @@ -157,7 +164,7 @@ static int rseq_need_restart(struct task_struct *t, u32 cs_flags) int ret; /* Get thread flags. */ - ret = __get_user(flags, &t->rseq->flags); + ret = get_user(flags, &t->rseq->flags); if (ret) return ret; @@ -195,9 +202,11 @@ static int clear_rseq_cs(struct task_struct *t) * of code outside of the rseq assembly block. This performs * a lazy clear of the rseq_cs field. * - * Set rseq_cs to NULL with single-copy atomicity. + * Set rseq_cs to NULL. */ - return __put_user(0UL, &t->rseq->rseq_cs); + if (clear_user(&t->rseq->rseq_cs.ptr64, sizeof(t->rseq->rseq_cs.ptr64))) + return -EFAULT; + return 0; } /* @@ -251,10 +260,10 @@ static int rseq_ip_fixup(struct pt_regs *regs) * respect to other threads scheduled on the same CPU, and with respect * to signal handlers. */ -void __rseq_handle_notify_resume(struct pt_regs *regs) +void __rseq_handle_notify_resume(struct ksignal *ksig, struct pt_regs *regs) { struct task_struct *t = current; - int ret; + int ret, sig; if (unlikely(t->flags & PF_EXITING)) return; @@ -268,7 +277,8 @@ void __rseq_handle_notify_resume(struct pt_regs *regs) return; error: - force_sig(SIGSEGV, t); + sig = ksig ? ksig->sig : 0; + force_sigsegv(sig, t); } #ifdef CONFIG_DEBUG_RSEQ diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 78d8facba456..fe365c9a08e9 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -7,7 +7,6 @@ */ #include "sched.h" -#include #include #include @@ -2724,28 +2723,20 @@ static struct rq *finish_task_switch(struct task_struct *prev) membarrier_mm_sync_core_before_usermode(mm); mmdrop(mm); } - if (unlikely(prev_state & (TASK_DEAD|TASK_PARKED))) { - switch (prev_state) { - case TASK_DEAD: - if (prev->sched_class->task_dead) - prev->sched_class->task_dead(prev); + if (unlikely(prev_state == TASK_DEAD)) { + if (prev->sched_class->task_dead) + prev->sched_class->task_dead(prev); - /* - * Remove function-return probe instances associated with this - * task and put them back on the free list. - */ - kprobe_flush_task(prev); + /* + * Remove function-return probe instances associated with this + * task and put them back on the free list. + */ + kprobe_flush_task(prev); - /* Task is done with its stack. */ - put_task_stack(prev); + /* Task is done with its stack. */ + put_task_stack(prev); - put_task_struct(prev); - break; - - case TASK_PARKED: - kthread_park_complete(prev); - break; - } + put_task_struct(prev); } tick_nohz_task_switch(); @@ -3113,7 +3104,9 @@ static void sched_tick_remote(struct work_struct *work) struct tick_work *twork = container_of(dwork, struct tick_work, work); int cpu = twork->cpu; struct rq *rq = cpu_rq(cpu); + struct task_struct *curr; struct rq_flags rf; + u64 delta; /* * Handle the tick only if it appears the remote CPU is running in full @@ -3122,24 +3115,28 @@ static void sched_tick_remote(struct work_struct *work) * statistics and checks timeslices in a time-independent way, regardless * of when exactly it is running. */ - if (!idle_cpu(cpu) && tick_nohz_tick_stopped_cpu(cpu)) { - struct task_struct *curr; - u64 delta; + if (idle_cpu(cpu) || !tick_nohz_tick_stopped_cpu(cpu)) + goto out_requeue; - rq_lock_irq(rq, &rf); - update_rq_clock(rq); - curr = rq->curr; - delta = rq_clock_task(rq) - curr->se.exec_start; + rq_lock_irq(rq, &rf); + curr = rq->curr; + if (is_idle_task(curr)) + goto out_unlock; - /* - * Make sure the next tick runs within a reasonable - * amount of time. - */ - WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3); - curr->sched_class->task_tick(rq, curr, 0); - rq_unlock_irq(rq, &rf); - } + update_rq_clock(rq); + delta = rq_clock_task(rq) - curr->se.exec_start; + /* + * Make sure the next tick runs within a reasonable + * amount of time. + */ + WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3); + curr->sched_class->task_tick(rq, curr, 0); + +out_unlock: + rq_unlock_irq(rq, &rf); + +out_requeue: /* * Run the remote tick once per second (1Hz). This arbitrary * frequency is large enough to avoid overload but short enough diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index 3cde46483f0a..c907fde01eaa 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -192,7 +192,7 @@ static unsigned long sugov_aggregate_util(struct sugov_cpu *sg_cpu) { struct rq *rq = cpu_rq(sg_cpu->cpu); - if (rq->rt.rt_nr_running) + if (rt_rq_is_runnable(&rq->rt)) return sg_cpu->max; /* diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 1866e64792a7..2f0a0be4d344 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3982,18 +3982,10 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep) if (!sched_feat(UTIL_EST)) return; - /* - * Update root cfs_rq's estimated utilization - * - * If *p is the last task then the root cfs_rq's estimated utilization - * of a CPU is 0 by definition. - */ - ue.enqueued = 0; - if (cfs_rq->nr_running) { - ue.enqueued = cfs_rq->avg.util_est.enqueued; - ue.enqueued -= min_t(unsigned int, ue.enqueued, - (_task_util_est(p) | UTIL_AVG_UNCHANGED)); - } + /* Update root cfs_rq's estimated utilization */ + ue.enqueued = cfs_rq->avg.util_est.enqueued; + ue.enqueued -= min_t(unsigned int, ue.enqueued, + (_task_util_est(p) | UTIL_AVG_UNCHANGED)); WRITE_ONCE(cfs_rq->avg.util_est.enqueued, ue.enqueued); /* @@ -4590,6 +4582,7 @@ void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b) now = sched_clock_cpu(smp_processor_id()); cfs_b->runtime = cfs_b->quota; cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period); + cfs_b->expires_seq++; } static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) @@ -4612,6 +4605,7 @@ static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) struct task_group *tg = cfs_rq->tg; struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg); u64 amount = 0, min_amount, expires; + int expires_seq; /* note: this is a positive sum as runtime_remaining <= 0 */ min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining; @@ -4628,6 +4622,7 @@ static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) cfs_b->idle = 0; } } + expires_seq = cfs_b->expires_seq; expires = cfs_b->runtime_expires; raw_spin_unlock(&cfs_b->lock); @@ -4637,8 +4632,10 @@ static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) * spread between our sched_clock and the one on which runtime was * issued. */ - if ((s64)(expires - cfs_rq->runtime_expires) > 0) + if (cfs_rq->expires_seq != expires_seq) { + cfs_rq->expires_seq = expires_seq; cfs_rq->runtime_expires = expires; + } return cfs_rq->runtime_remaining > 0; } @@ -4664,12 +4661,9 @@ static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq) * has not truly expired. * * Fortunately we can check determine whether this the case by checking - * whether the global deadline has advanced. It is valid to compare - * cfs_b->runtime_expires without any locks since we only care about - * exact equality, so a partial write will still work. + * whether the global deadline(cfs_b->expires_seq) has advanced. */ - - if (cfs_rq->runtime_expires != cfs_b->runtime_expires) { + if (cfs_rq->expires_seq == cfs_b->expires_seq) { /* extend local deadline, drift is bounded above by 2 ticks */ cfs_rq->runtime_expires += TICK_NSEC; } else { @@ -5202,13 +5196,18 @@ static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b) { + u64 overrun; + lockdep_assert_held(&cfs_b->lock); - if (!cfs_b->period_active) { - cfs_b->period_active = 1; - hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period); - hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED); - } + if (cfs_b->period_active) + return; + + cfs_b->period_active = 1; + overrun = hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period); + cfs_b->runtime_expires += (overrun + 1) * ktime_to_ns(cfs_b->period); + cfs_b->expires_seq++; + hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED); } static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 47556b0c9a95..572567078b60 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -508,8 +508,11 @@ static void sched_rt_rq_dequeue(struct rt_rq *rt_rq) rt_se = rt_rq->tg->rt_se[cpu]; - if (!rt_se) + if (!rt_se) { dequeue_top_rt_rq(rt_rq); + /* Kick cpufreq (see the comment in kernel/sched/sched.h). */ + cpufreq_update_util(rq_of_rt_rq(rt_rq), 0); + } else if (on_rt_rq(rt_se)) dequeue_rt_entity(rt_se, 0); } @@ -1001,8 +1004,6 @@ dequeue_top_rt_rq(struct rt_rq *rt_rq) sub_nr_running(rq, rt_rq->rt_nr_running); rt_rq->rt_queued = 0; - /* Kick cpufreq (see the comment in kernel/sched/sched.h). */ - cpufreq_update_util(rq, 0); } static void @@ -1014,11 +1015,14 @@ enqueue_top_rt_rq(struct rt_rq *rt_rq) if (rt_rq->rt_queued) return; - if (rt_rq_throttled(rt_rq) || !rt_rq->rt_nr_running) + + if (rt_rq_throttled(rt_rq)) return; - add_nr_running(rq, rt_rq->rt_nr_running); - rt_rq->rt_queued = 1; + if (rt_rq->rt_nr_running) { + add_nr_running(rq, rt_rq->rt_nr_running); + rt_rq->rt_queued = 1; + } /* Kick cpufreq (see the comment in kernel/sched/sched.h). */ cpufreq_update_util(rq, 0); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 6601baf2361c..c7742dcc136c 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -334,9 +334,10 @@ struct cfs_bandwidth { u64 runtime; s64 hierarchical_quota; u64 runtime_expires; + int expires_seq; - int idle; - int period_active; + short idle; + short period_active; struct hrtimer period_timer; struct hrtimer slack_timer; struct list_head throttled_cfs_rq; @@ -551,6 +552,7 @@ struct cfs_rq { #ifdef CONFIG_CFS_BANDWIDTH int runtime_enabled; + int expires_seq; u64 runtime_expires; s64 runtime_remaining; @@ -609,6 +611,11 @@ struct rt_rq { #endif }; +static inline bool rt_rq_is_runnable(struct rt_rq *rt_rq) +{ + return rt_rq->rt_queued && rt_rq->rt_nr_running; +} + /* Deadline class' related fields in a runqueue */ struct dl_rq { /* runqueue is an rbtree, ordered by deadline */ diff --git a/kernel/softirq.c b/kernel/softirq.c index de2f57fddc04..900dcfee542c 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -139,9 +139,13 @@ static void __local_bh_enable(unsigned int cnt) { lockdep_assert_irqs_disabled(); + if (preempt_count() == cnt) + trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip()); + if (softirq_count() == (cnt & SOFTIRQ_MASK)) trace_softirqs_on(_RET_IP_); - preempt_count_sub(cnt); + + __preempt_count_sub(cnt); } /* diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index 055a4a728c00..3e93c54bd3a1 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -1659,7 +1659,7 @@ EXPORT_SYMBOL_GPL(hrtimer_init_sleeper); int nanosleep_copyout(struct restart_block *restart, struct timespec64 *ts) { switch(restart->nanosleep.type) { -#ifdef CONFIG_COMPAT +#ifdef CONFIG_COMPAT_32BIT_TIME case TT_COMPAT: if (compat_put_timespec64(ts, restart->nanosleep.compat_rmtp)) return -EFAULT; diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index 5a6251ac6f7a..9cdf54b04ca8 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c @@ -604,7 +604,6 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags, /* * Disarm any old timer after extracting its expiry time. */ - lockdep_assert_irqs_disabled(); ret = 0; old_incr = timer->it.cpu.incr; @@ -1049,7 +1048,6 @@ static void posix_cpu_timer_rearm(struct k_itimer *timer) /* * Now re-arm for the new expiry time. */ - lockdep_assert_irqs_disabled(); arm_timer(timer); unlock: unlock_task_sighand(p, &flags); diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index b7005dd21ec1..14de3727b18e 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c @@ -277,8 +277,7 @@ static bool tick_check_preferred(struct clock_event_device *curdev, */ return !curdev || newdev->rating > curdev->rating || - (!cpumask_equal(curdev->cpumask, newdev->cpumask) && - !tick_check_percpu(curdev, newdev, smp_processor_id())); + !cpumask_equal(curdev->cpumask, newdev->cpumask); } /* diff --git a/kernel/time/time.c b/kernel/time/time.c index 6fa99213fc72..2b41e8e2d31d 100644 --- a/kernel/time/time.c +++ b/kernel/time/time.c @@ -28,6 +28,7 @@ */ #include +#include #include #include #include @@ -314,9 +315,10 @@ unsigned int jiffies_to_msecs(const unsigned long j) return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC); #else # if BITS_PER_LONG == 32 - return (HZ_TO_MSEC_MUL32 * j) >> HZ_TO_MSEC_SHR32; + return (HZ_TO_MSEC_MUL32 * j + (1ULL << HZ_TO_MSEC_SHR32) - 1) >> + HZ_TO_MSEC_SHR32; # else - return (j * HZ_TO_MSEC_NUM) / HZ_TO_MSEC_DEN; + return DIV_ROUND_UP(j * HZ_TO_MSEC_NUM, HZ_TO_MSEC_DEN); # endif #endif } diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index efed9c1cfb7e..caf9cbf35816 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -192,17 +192,6 @@ static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip, op->saved_func(ip, parent_ip, op, regs); } -/** - * clear_ftrace_function - reset the ftrace function - * - * This NULLs the ftrace function and in essence stops - * tracing. There may be lag - */ -void clear_ftrace_function(void) -{ - ftrace_trace_function = ftrace_stub; -} - static void ftrace_sync(struct work_struct *work) { /* @@ -6689,7 +6678,7 @@ void ftrace_kill(void) { ftrace_disabled = 1; ftrace_enabled = 0; - clear_ftrace_function(); + ftrace_trace_function = ftrace_stub; } /** diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index c9336e98ac59..87cf25171fb8 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -1360,8 +1360,6 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) { - struct ring_buffer *buf; - if (tr->stop_count) return; @@ -1375,9 +1373,7 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) arch_spin_lock(&tr->max_lock); - buf = tr->trace_buffer.buffer; - tr->trace_buffer.buffer = tr->max_buffer.buffer; - tr->max_buffer.buffer = buf; + swap(tr->trace_buffer.buffer, tr->max_buffer.buffer); __update_max_tr(tr, tsk, cpu); arch_spin_unlock(&tr->max_lock); @@ -2957,6 +2953,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) } EXPORT_SYMBOL_GPL(trace_vbprintk); +__printf(3, 0) static int __trace_array_vprintk(struct ring_buffer *buffer, unsigned long ip, const char *fmt, va_list args) @@ -3011,12 +3008,14 @@ __trace_array_vprintk(struct ring_buffer *buffer, return len; } +__printf(3, 0) int trace_array_vprintk(struct trace_array *tr, unsigned long ip, const char *fmt, va_list args) { return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args); } +__printf(3, 0) int trace_array_printk(struct trace_array *tr, unsigned long ip, const char *fmt, ...) { @@ -3032,6 +3031,7 @@ int trace_array_printk(struct trace_array *tr, return ret; } +__printf(3, 4) int trace_array_printk_buf(struct ring_buffer *buffer, unsigned long ip, const char *fmt, ...) { @@ -3047,6 +3047,7 @@ int trace_array_printk_buf(struct ring_buffer *buffer, return ret; } +__printf(2, 0) int trace_vprintk(unsigned long ip, const char *fmt, va_list args) { return trace_array_vprintk(&global_trace, ip, fmt, args); @@ -3364,8 +3365,8 @@ static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m, print_event_info(buf, m); - seq_printf(m, "# TASK-PID CPU# %s TIMESTAMP FUNCTION\n", tgid ? "TGID " : ""); - seq_printf(m, "# | | | %s | |\n", tgid ? " | " : ""); + seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? "TGID " : ""); + seq_printf(m, "# | | %s | | |\n", tgid ? " | " : ""); } static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m, @@ -3385,9 +3386,9 @@ static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file tgid ? tgid_space : space); seq_printf(m, "# %s||| / delay\n", tgid ? tgid_space : space); - seq_printf(m, "# TASK-PID CPU#%s|||| TIMESTAMP FUNCTION\n", + seq_printf(m, "# TASK-PID %sCPU# |||| TIMESTAMP FUNCTION\n", tgid ? " TGID " : space); - seq_printf(m, "# | | | %s|||| | |\n", + seq_printf(m, "# | | %s | |||| | |\n", tgid ? " | " : space); } diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 630c5a24b2b2..f8f86231ad90 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -583,9 +583,7 @@ static __always_inline void trace_clear_recursion(int bit) static inline struct ring_buffer_iter * trace_buffer_iter(struct trace_iterator *iter, int cpu) { - if (iter->buffer_iter && iter->buffer_iter[cpu]) - return iter->buffer_iter[cpu]; - return NULL; + return iter->buffer_iter ? iter->buffer_iter[cpu] : NULL; } int tracer_init(struct tracer *t, struct trace_array *tr); diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index e1c818dbc0d7..893a206bcba4 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c @@ -78,7 +78,8 @@ static const char * ops[] = { OPS }; C(TOO_MANY_PREDS, "Too many terms in predicate expression"), \ C(INVALID_FILTER, "Meaningless filter expression"), \ C(IP_FIELD_ONLY, "Only 'ip' field is supported for function trace"), \ - C(INVALID_VALUE, "Invalid value (did you forget quotes)?"), + C(INVALID_VALUE, "Invalid value (did you forget quotes)?"), \ + C(NO_FILTER, "No filter found"), #undef C #define C(a, b) FILT_ERR_##a @@ -550,6 +551,13 @@ predicate_parse(const char *str, int nr_parens, int nr_preds, goto out_free; } + if (!N) { + /* No program? */ + ret = -EINVAL; + parse_error(pe, FILT_ERR_NO_FILTER, ptr - str); + goto out_free; + } + prog[N].pred = NULL; /* #13 */ prog[N].target = 1; /* TRUE */ prog[N+1].pred = NULL; @@ -1693,6 +1701,7 @@ static void create_filter_finish(struct filter_parse_error *pe) * @filter_str: filter string * @set_str: remember @filter_str and enable detailed error in filter * @filterp: out param for created filter (always updated on return) + * Must be a pointer that references a NULL pointer. * * Creates a filter for @call with @filter_str. If @set_str is %true, * @filter_str is copied and recorded in the new filter. @@ -1710,6 +1719,10 @@ static int create_filter(struct trace_event_call *call, struct filter_parse_error *pe = NULL; int err; + /* filterp must point to NULL */ + if (WARN_ON(*filterp)) + *filterp = NULL; + err = create_filter_start(filter_string, set_str, &pe, filterp); if (err) return err; diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index 046c716a6536..aae18af94c94 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -393,7 +393,7 @@ static void hist_err_event(char *str, char *system, char *event, char *var) else if (system) snprintf(err, MAX_FILTER_STR_VAL, "%s.%s", system, event); else - strncpy(err, var, MAX_FILTER_STR_VAL); + strscpy(err, var, MAX_FILTER_STR_VAL); hist_err(str, err); } diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 23c0b0cb5fb9..169b3c44ee97 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c @@ -831,6 +831,7 @@ print_graph_entry_leaf(struct trace_iterator *iter, struct ftrace_graph_ret *graph_ret; struct ftrace_graph_ent *call; unsigned long long duration; + int cpu = iter->cpu; int i; graph_ret = &ret_entry->ret; @@ -839,7 +840,6 @@ print_graph_entry_leaf(struct trace_iterator *iter, if (data) { struct fgraph_cpu_data *cpu_data; - int cpu = iter->cpu; cpu_data = per_cpu_ptr(data->cpu_data, cpu); @@ -869,6 +869,9 @@ print_graph_entry_leaf(struct trace_iterator *iter, trace_seq_printf(s, "%ps();\n", (void *)call->func); + print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET, + cpu, iter->ent->pid, flags); + return trace_handle_return(s); } diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index daa81571b22a..21f718472942 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -1480,8 +1480,10 @@ create_local_trace_kprobe(char *func, void *addr, unsigned long offs, } ret = __register_trace_kprobe(tk); - if (ret < 0) + if (ret < 0) { + kfree(tk->tp.call.print_fmt); goto error; + } return &tk->tp.call; error: @@ -1501,6 +1503,8 @@ void destroy_local_trace_kprobe(struct trace_event_call *event_call) } __unregister_trace_kprobe(tk); + + kfree(tk->tp.call.print_fmt); free_trace_kprobe(tk); } #endif /* CONFIG_PERF_EVENTS */ diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index 90db994ac900..1c8e30fda46a 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c @@ -594,8 +594,7 @@ int trace_print_context(struct trace_iterator *iter) trace_find_cmdline(entry->pid, comm); - trace_seq_printf(s, "%16s-%-5d [%03d] ", - comm, entry->pid, iter->cpu); + trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid); if (tr->trace_flags & TRACE_ITER_RECORD_TGID) { unsigned int tgid = trace_find_tgid(entry->pid); @@ -606,6 +605,8 @@ int trace_print_context(struct trace_iterator *iter) trace_seq_printf(s, "(%5d) ", tgid); } + trace_seq_printf(s, "[%03d] ", iter->cpu); + if (tr->trace_flags & TRACE_ITER_IRQ_INFO) trace_print_lat_fmt(s, entry); diff --git a/lib/Kconfig b/lib/Kconfig index e34b04b56057..706836ec314d 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -420,60 +420,15 @@ config HAS_IOPORT_MAP depends on HAS_IOMEM && !NO_IOPORT_MAP default y -config HAS_DMA - bool - depends on !NO_DMA - default y +source "kernel/dma/Kconfig" config SGL_ALLOC bool default n -config NEED_SG_DMA_LENGTH - bool - -config NEED_DMA_MAP_STATE - bool - -config ARCH_DMA_ADDR_T_64BIT - def_bool 64BIT || PHYS_ADDR_T_64BIT - config IOMMU_HELPER bool -config ARCH_HAS_SYNC_DMA_FOR_DEVICE - bool - -config ARCH_HAS_SYNC_DMA_FOR_CPU - bool - select NEED_DMA_MAP_STATE - -config DMA_DIRECT_OPS - bool - depends on HAS_DMA - -config DMA_NONCOHERENT_OPS - bool - depends on HAS_DMA - select DMA_DIRECT_OPS - -config DMA_NONCOHERENT_MMAP - bool - depends on DMA_NONCOHERENT_OPS - -config DMA_NONCOHERENT_CACHE_SYNC - bool - depends on DMA_NONCOHERENT_OPS - -config DMA_VIRT_OPS - bool - depends on HAS_DMA - -config SWIOTLB - bool - select DMA_DIRECT_OPS - select NEED_DMA_MAP_STATE - config CHECK_SIGNATURE bool diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan index 3d35d062970d..c253c1b46c6b 100644 --- a/lib/Kconfig.kasan +++ b/lib/Kconfig.kasan @@ -6,6 +6,7 @@ if HAVE_ARCH_KASAN config KASAN bool "KASan: runtime memory debugger" depends on SLUB || (SLAB && !DEBUG_SLAB) + select SLUB_DEBUG if SLUB select CONSTRUCTORS select STACKDEPOT help diff --git a/lib/Makefile b/lib/Makefile index 956b320292fe..90dc5520b784 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -23,15 +23,12 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \ sha1.o chacha20.o irq_regs.o argv_split.o \ flex_proportions.o ratelimit.o show_mem.o \ is_single_threaded.o plist.o decompress.o kobject_uevent.o \ - earlycpio.o seq_buf.o siphash.o \ + earlycpio.o seq_buf.o siphash.o dec_and_lock.o \ nmi_backtrace.o nodemask.o win_minmax.o lib-$(CONFIG_PRINTK) += dump_stack.o lib-$(CONFIG_MMU) += ioremap.o lib-$(CONFIG_SMP) += cpumask.o -lib-$(CONFIG_DMA_DIRECT_OPS) += dma-direct.o -lib-$(CONFIG_DMA_NONCOHERENT_OPS) += dma-noncoherent.o -lib-$(CONFIG_DMA_VIRT_OPS) += dma-virt.o lib-y += kobject.o klist.o obj-y += lockref.o @@ -98,10 +95,6 @@ obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o obj-$(CONFIG_DEBUG_LIST) += list_debug.o obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o -ifneq ($(CONFIG_HAVE_DEC_LOCK),y) - lib-y += dec_and_lock.o -endif - obj-$(CONFIG_BITREVERSE) += bitrev.o obj-$(CONFIG_RATIONAL) += rational.o obj-$(CONFIG_CRC_CCITT) += crc-ccitt.o @@ -148,7 +141,6 @@ obj-$(CONFIG_SMP) += percpu_counter.o obj-$(CONFIG_AUDIT_GENERIC) += audit.o obj-$(CONFIG_AUDIT_COMPAT_GENERIC) += compat_audit.o -obj-$(CONFIG_SWIOTLB) += swiotlb.o obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o obj-$(CONFIG_NOTIFIER_ERROR_INJECTION) += notifier-error-inject.o @@ -169,8 +161,6 @@ obj-$(CONFIG_NLATTR) += nlattr.o obj-$(CONFIG_LRU_CACHE) += lru_cache.o -obj-$(CONFIG_DMA_API_DEBUG) += dma-debug.o - obj-$(CONFIG_GENERIC_CSUM) += checksum.o obj-$(CONFIG_GENERIC_ATOMIC64) += atomic64.o diff --git a/lib/dec_and_lock.c b/lib/dec_and_lock.c index 347fa7ac2e8a..9555b68bb774 100644 --- a/lib/dec_and_lock.c +++ b/lib/dec_and_lock.c @@ -33,3 +33,19 @@ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) } EXPORT_SYMBOL(_atomic_dec_and_lock); + +int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock, + unsigned long *flags) +{ + /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */ + if (atomic_add_unless(atomic, -1, 1)) + return 0; + + /* Otherwise do it the slow way */ + spin_lock_irqsave(lock, *flags); + if (atomic_dec_and_test(atomic)) + return 1; + spin_unlock_irqrestore(lock, *flags); + return 0; +} +EXPORT_SYMBOL(_atomic_dec_and_lock_irqsave); diff --git a/lib/percpu_ida.c b/lib/percpu_ida.c index 9bbd9c5d375a..beb14839b41a 100644 --- a/lib/percpu_ida.c +++ b/lib/percpu_ida.c @@ -141,7 +141,7 @@ int percpu_ida_alloc(struct percpu_ida *pool, int state) spin_lock_irqsave(&tags->lock, flags); /* Fastpath */ - if (likely(tags->nr_free >= 0)) { + if (likely(tags->nr_free)) { tag = tags->freelist[--tags->nr_free]; spin_unlock_irqrestore(&tags->lock, flags); return tag; diff --git a/lib/refcount.c b/lib/refcount.c index 0eb48353abe3..d3b81cefce91 100644 --- a/lib/refcount.c +++ b/lib/refcount.c @@ -350,3 +350,31 @@ bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock) } EXPORT_SYMBOL(refcount_dec_and_lock); +/** + * refcount_dec_and_lock_irqsave - return holding spinlock with disabled + * interrupts if able to decrement refcount to 0 + * @r: the refcount + * @lock: the spinlock to be locked + * @flags: saved IRQ-flags if the is acquired + * + * Same as refcount_dec_and_lock() above except that the spinlock is acquired + * with disabled interupts. + * + * Return: true and hold spinlock if able to decrement refcount to 0, false + * otherwise + */ +bool refcount_dec_and_lock_irqsave(refcount_t *r, spinlock_t *lock, + unsigned long *flags) +{ + if (refcount_dec_not_one(r)) + return false; + + spin_lock_irqsave(lock, *flags); + if (!refcount_dec_and_test(r)) { + spin_unlock_irqrestore(lock, *flags); + return false; + } + + return true; +} +EXPORT_SYMBOL(refcount_dec_and_lock_irqsave); diff --git a/lib/scatterlist.c b/lib/scatterlist.c index 1642fd507a96..7c6096a71704 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -24,9 +24,6 @@ **/ struct scatterlist *sg_next(struct scatterlist *sg) { -#ifdef CONFIG_DEBUG_SG - BUG_ON(sg->sg_magic != SG_MAGIC); -#endif if (sg_is_last(sg)) return NULL; @@ -111,10 +108,7 @@ struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents) for_each_sg(sgl, sg, nents, i) ret = sg; -#ifdef CONFIG_DEBUG_SG - BUG_ON(sgl[0].sg_magic != SG_MAGIC); BUG_ON(!sg_is_last(ret)); -#endif return ret; } EXPORT_SYMBOL(sg_last); diff --git a/lib/test_bpf.c b/lib/test_bpf.c index 60aedc879361..08d3d59dca17 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c @@ -5282,21 +5282,31 @@ static struct bpf_test tests[] = { { /* Mainly checking JIT here. */ "BPF_MAXINSNS: Ctx heavy transformations", { }, +#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390) + CLASSIC | FLAG_EXPECTED_FAIL, +#else CLASSIC, +#endif { }, { { 1, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) }, { 10, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) } }, .fill_helper = bpf_fill_maxinsns6, + .expected_errcode = -ENOTSUPP, }, { /* Mainly checking JIT here. */ "BPF_MAXINSNS: Call heavy transformations", { }, +#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390) + CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL, +#else CLASSIC | FLAG_NO_DATA, +#endif { }, { { 1, 0 }, { 10, 0 } }, .fill_helper = bpf_fill_maxinsns7, + .expected_errcode = -ENOTSUPP, }, { /* Mainly checking JIT here. */ "BPF_MAXINSNS: Jump heavy test", @@ -5347,18 +5357,28 @@ static struct bpf_test tests[] = { { "BPF_MAXINSNS: exec all MSH", { }, +#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390) + CLASSIC | FLAG_EXPECTED_FAIL, +#else CLASSIC, +#endif { 0xfa, 0xfb, 0xfc, 0xfd, }, { { 4, 0xababab83 } }, .fill_helper = bpf_fill_maxinsns13, + .expected_errcode = -ENOTSUPP, }, { "BPF_MAXINSNS: ld_abs+get_processor_id", { }, +#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390) + CLASSIC | FLAG_EXPECTED_FAIL, +#else CLASSIC, +#endif { }, { { 1, 0xbee } }, .fill_helper = bpf_fill_ld_abs_get_processor_id, + .expected_errcode = -ENOTSUPP, }, /* * LD_IND / LD_ABS on fragmented SKBs diff --git a/lib/test_printf.c b/lib/test_printf.c index b2aa8f514844..cea592f402ed 100644 --- a/lib/test_printf.c +++ b/lib/test_printf.c @@ -260,13 +260,6 @@ plain(void) { int err; - /* - * Make sure crng is ready. Otherwise we get "(ptrval)" instead - * of a hashed address when printing '%p' in plain_hash() and - * plain_format(). - */ - wait_for_random_bytes(); - err = plain_hash(); if (err) { pr_warn("plain 'p' does not appear to be hashed\n"); diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 347cc834c04a..2e5d3df0853d 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -359,15 +359,8 @@ static void wb_shutdown(struct bdi_writeback *wb) spin_lock_bh(&wb->work_lock); if (!test_and_clear_bit(WB_registered, &wb->state)) { spin_unlock_bh(&wb->work_lock); - /* - * Wait for wb shutdown to finish if someone else is just - * running wb_shutdown(). Otherwise we could proceed to wb / - * bdi destruction before wb_shutdown() is finished. - */ - wait_on_bit(&wb->state, WB_shutting_down, TASK_UNINTERRUPTIBLE); return; } - set_bit(WB_shutting_down, &wb->state); spin_unlock_bh(&wb->work_lock); cgwb_remove_from_bdi_list(wb); @@ -379,12 +372,6 @@ static void wb_shutdown(struct bdi_writeback *wb) mod_delayed_work(bdi_wq, &wb->dwork, 0); flush_delayed_work(&wb->dwork); WARN_ON(!list_empty(&wb->work_list)); - /* - * Make sure bit gets cleared after shutdown is finished. Matches with - * the barrier provided by test_and_clear_bit() above. - */ - smp_wmb(); - clear_and_wake_up_bit(WB_shutting_down, &wb->state); } static void wb_exit(struct bdi_writeback *wb) @@ -508,10 +495,12 @@ static void cgwb_release_workfn(struct work_struct *work) struct bdi_writeback *wb = container_of(work, struct bdi_writeback, release_work); + mutex_lock(&wb->bdi->cgwb_release_mutex); wb_shutdown(wb); css_put(wb->memcg_css); css_put(wb->blkcg_css); + mutex_unlock(&wb->bdi->cgwb_release_mutex); fprop_local_destroy_percpu(&wb->memcg_completions); percpu_ref_exit(&wb->refcnt); @@ -697,6 +686,7 @@ static int cgwb_bdi_init(struct backing_dev_info *bdi) INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC); bdi->cgwb_congested_tree = RB_ROOT; + mutex_init(&bdi->cgwb_release_mutex); ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL); if (!ret) { @@ -717,7 +707,10 @@ static void cgwb_bdi_unregister(struct backing_dev_info *bdi) spin_lock_irq(&cgwb_lock); radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0) cgwb_kill(*slot); + spin_unlock_irq(&cgwb_lock); + mutex_lock(&bdi->cgwb_release_mutex); + spin_lock_irq(&cgwb_lock); while (!list_empty(&bdi->wb_list)) { wb = list_first_entry(&bdi->wb_list, struct bdi_writeback, bdi_node); @@ -726,6 +719,7 @@ static void cgwb_bdi_unregister(struct backing_dev_info *bdi) spin_lock_irq(&cgwb_lock); } spin_unlock_irq(&cgwb_lock); + mutex_unlock(&bdi->cgwb_release_mutex); } /** diff --git a/mm/debug.c b/mm/debug.c index 56e2d9125ea5..38c926520c97 100644 --- a/mm/debug.c +++ b/mm/debug.c @@ -43,12 +43,25 @@ const struct trace_print_flags vmaflag_names[] = { void __dump_page(struct page *page, const char *reason) { + bool page_poisoned = PagePoisoned(page); + int mapcount; + + /* + * If struct page is poisoned don't access Page*() functions as that + * leads to recursive loop. Page*() check for poisoned pages, and calls + * dump_page() when detected. + */ + if (page_poisoned) { + pr_emerg("page:%px is uninitialized and poisoned", page); + goto hex_only; + } + /* * Avoid VM_BUG_ON() in page_mapcount(). * page->_mapcount space in struct page is used by sl[aou]b pages to * encode own info. */ - int mapcount = PageSlab(page) ? 0 : page_mapcount(page); + mapcount = PageSlab(page) ? 0 : page_mapcount(page); pr_emerg("page:%px count:%d mapcount:%d mapping:%px index:%#lx", page, page_ref_count(page), mapcount, @@ -60,6 +73,7 @@ void __dump_page(struct page *page, const char *reason) pr_emerg("flags: %#lx(%pGp)\n", page->flags, &page->flags); +hex_only: print_hex_dump(KERN_ALERT, "raw: ", DUMP_PREFIX_NONE, 32, sizeof(unsigned long), page, sizeof(struct page), false); @@ -68,7 +82,7 @@ void __dump_page(struct page *page, const char *reason) pr_alert("page dumped because: %s\n", reason); #ifdef CONFIG_MEMCG - if (page->mem_cgroup) + if (!page_poisoned && page->mem_cgroup) pr_alert("page->mem_cgroup:%px\n", page->mem_cgroup); #endif } diff --git a/mm/gup.c b/mm/gup.c index b70d7ba7cc13..fc5f98069f4e 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -1238,8 +1238,6 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) int locked = 0; long ret = 0; - VM_BUG_ON(start & ~PAGE_MASK); - VM_BUG_ON(len != PAGE_ALIGN(len)); end = start + len; for (nstart = start; nstart < end; nstart = nend) { diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 3612fbb32e9d..039ddbc574e9 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2163,6 +2163,7 @@ static void __init gather_bootmem_prealloc(void) */ if (hstate_is_gigantic(h)) adjust_managed_page_count(page, 1 << h->order); + cond_resched(); } } diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c index f185455b3406..c3bd5209da38 100644 --- a/mm/kasan/kasan.c +++ b/mm/kasan/kasan.c @@ -619,12 +619,13 @@ void kasan_kfree_large(void *ptr, unsigned long ip) int kasan_module_alloc(void *addr, size_t size) { void *ret; + size_t scaled_size; size_t shadow_size; unsigned long shadow_start; shadow_start = (unsigned long)kasan_mem_to_shadow(addr); - shadow_size = round_up(size >> KASAN_SHADOW_SCALE_SHIFT, - PAGE_SIZE); + scaled_size = (size + KASAN_SHADOW_MASK) >> KASAN_SHADOW_SCALE_SHIFT; + shadow_size = round_up(scaled_size, PAGE_SIZE); if (WARN_ON(!PAGE_ALIGNED(shadow_start))) return -EINVAL; diff --git a/mm/memblock.c b/mm/memblock.c index cc16d70b8333..11e46f83e1ad 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -20,7 +20,6 @@ #include #include #include -#include #include #include @@ -228,7 +227,8 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size, * so we use WARN_ONCE() here to see the stack trace if * fail happens. */ - WARN_ONCE(1, "memblock: bottom-up allocation failed, memory hotunplug may be affected\n"); + WARN_ONCE(IS_ENABLED(CONFIG_MEMORY_HOTREMOVE), + "memblock: bottom-up allocation failed, memory hotremove may be affected\n"); } return __memblock_find_range_top_down(start, end, size, align, nid, diff --git a/mm/mmap.c b/mm/mmap.c index d1eb87ef4b1a..5801b5f0a634 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -186,8 +186,8 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma) return next; } -static int do_brk(unsigned long addr, unsigned long len, struct list_head *uf); - +static int do_brk_flags(unsigned long addr, unsigned long request, unsigned long flags, + struct list_head *uf); SYSCALL_DEFINE1(brk, unsigned long, brk) { unsigned long retval; @@ -245,7 +245,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) goto out; /* Ok, looks good - let it rip. */ - if (do_brk(oldbrk, newbrk-oldbrk, &uf) < 0) + if (do_brk_flags(oldbrk, newbrk-oldbrk, 0, &uf) < 0) goto out; set_brk: @@ -2929,21 +2929,14 @@ static inline void verify_mm_writelocked(struct mm_struct *mm) * anonymous maps. eventually we may be able to do some * brk-specific accounting here. */ -static int do_brk_flags(unsigned long addr, unsigned long request, unsigned long flags, struct list_head *uf) +static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long flags, struct list_head *uf) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma, *prev; - unsigned long len; struct rb_node **rb_link, *rb_parent; pgoff_t pgoff = addr >> PAGE_SHIFT; int error; - len = PAGE_ALIGN(request); - if (len < request) - return -ENOMEM; - if (!len) - return 0; - /* Until we need other flags, refuse anything except VM_EXEC. */ if ((flags & (~VM_EXEC)) != 0) return -EINVAL; @@ -3015,18 +3008,20 @@ static int do_brk_flags(unsigned long addr, unsigned long request, unsigned long return 0; } -static int do_brk(unsigned long addr, unsigned long len, struct list_head *uf) -{ - return do_brk_flags(addr, len, 0, uf); -} - -int vm_brk_flags(unsigned long addr, unsigned long len, unsigned long flags) +int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags) { struct mm_struct *mm = current->mm; + unsigned long len; int ret; bool populate; LIST_HEAD(uf); + len = PAGE_ALIGN(request); + if (len < request) + return -ENOMEM; + if (!len) + return 0; + if (down_write_killable(&mm->mmap_sem)) return -EINTR; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 1521100f1e63..5d800d61ddb7 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -6847,6 +6847,7 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn) /* Initialise every node */ mminit_verify_pageflags_layout(); setup_nr_node_ids(); + zero_resv_unavail(); for_each_online_node(nid) { pg_data_t *pgdat = NODE_DATA(nid); free_area_init_node(nid, NULL, @@ -6857,7 +6858,6 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn) node_set_state(nid, N_MEMORY); check_for_memory(pgdat, nid); } - zero_resv_unavail(); } static int __init cmdline_parse_core(char *p, unsigned long *core, @@ -7033,9 +7033,9 @@ void __init set_dma_reserve(unsigned long new_dma_reserve) void __init free_area_init(unsigned long *zones_size) { + zero_resv_unavail(); free_area_init_node(0, zones_size, __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL); - zero_resv_unavail(); } static int page_alloc_cpu_dead(unsigned int cpu) diff --git a/mm/rmap.c b/mm/rmap.c index 6db729dc4c50..eb477809a5c0 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -64,6 +64,7 @@ #include #include #include +#include #include @@ -1481,11 +1482,16 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, set_pte_at(mm, address, pvmw.pte, pteval); } - } else if (pte_unused(pteval)) { + } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) { /* * The guest indicated that the page content is of no * interest anymore. Simply discard the pte, vmscan * will take care of the rest. + * A future reference will then fault in a new zero + * page. When userfaultfd is active, we must not drop + * this page though, as its main user (postcopy + * migration) will not expect userfaults on already + * copied pages. */ dec_mm_counter(mm, mm_counter(page)); /* We have to invalidate as we cleared the pte */ diff --git a/mm/slab_common.c b/mm/slab_common.c index 890b1f04a03a..2296caf87bfb 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -567,10 +567,14 @@ static int shutdown_cache(struct kmem_cache *s) list_del(&s->list); if (s->flags & SLAB_TYPESAFE_BY_RCU) { +#ifdef SLAB_SUPPORTS_SYSFS + sysfs_slab_unlink(s); +#endif list_add_tail(&s->list, &slab_caches_to_rcu_destroy); schedule_work(&slab_caches_to_rcu_destroy_work); } else { #ifdef SLAB_SUPPORTS_SYSFS + sysfs_slab_unlink(s); sysfs_slab_release(s); #else slab_kmem_cache_release(s); diff --git a/mm/slub.c b/mm/slub.c index a3b8467c14af..51258eff4178 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -5667,7 +5667,6 @@ static void sysfs_slab_remove_workfn(struct work_struct *work) kset_unregister(s->memcg_kset); #endif kobject_uevent(&s->kobj, KOBJ_REMOVE); - kobject_del(&s->kobj); out: kobject_put(&s->kobj); } @@ -5752,6 +5751,12 @@ static void sysfs_slab_remove(struct kmem_cache *s) schedule_work(&s->kobj_remove_work); } +void sysfs_slab_unlink(struct kmem_cache *s) +{ + if (slab_state >= FULL) + kobject_del(&s->kobj); +} + void sysfs_slab_release(struct kmem_cache *s) { if (slab_state >= FULL) diff --git a/mm/vmstat.c b/mm/vmstat.c index 75eda9c2b260..8ba0870ecddd 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1796,11 +1796,9 @@ static void vmstat_update(struct work_struct *w) * to occur in the future. Keep on running the * update worker thread. */ - preempt_disable(); queue_delayed_work_on(smp_processor_id(), mm_percpu_wq, this_cpu_ptr(&vmstat_work), round_jiffies_relative(sysctl_stat_interval)); - preempt_enable(); } } diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 73a65789271b..8ccee3d01822 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -693,7 +693,7 @@ static struct sk_buff **vlan_gro_receive(struct sk_buff **head, out_unlock: rcu_read_unlock(); out: - NAPI_GRO_CB(skb)->flush |= flush; + skb_gro_flush_final(skb, pp, flush); return pp; } diff --git a/net/9p/client.c b/net/9p/client.c index 18c5271910dc..5c1343195292 100644 --- a/net/9p/client.c +++ b/net/9p/client.c @@ -225,7 +225,8 @@ static int parse_opts(char *opts, struct p9_client *clnt) } free_and_return: - v9fs_put_trans(clnt->trans_mod); + if (ret) + v9fs_put_trans(clnt->trans_mod); kfree(tmp_options); return ret; } diff --git a/net/Makefile b/net/Makefile index 13ec0d5415c7..bdaf53925acd 100644 --- a/net/Makefile +++ b/net/Makefile @@ -20,11 +20,7 @@ obj-$(CONFIG_TLS) += tls/ obj-$(CONFIG_XFRM) += xfrm/ obj-$(CONFIG_UNIX) += unix/ obj-$(CONFIG_NET) += ipv6/ -ifneq ($(CC_CAN_LINK),y) -$(warning CC cannot link executables. Skipping bpfilter.) -else obj-$(CONFIG_BPFILTER) += bpfilter/ -endif obj-$(CONFIG_PACKET) += packet/ obj-$(CONFIG_NET_KEY) += key/ obj-$(CONFIG_BRIDGE) += bridge/ diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c index 55fdba05d7d9..9b6bc5abe946 100644 --- a/net/appletalk/ddp.c +++ b/net/appletalk/ddp.c @@ -1869,7 +1869,7 @@ static const struct proto_ops atalk_dgram_ops = { .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = atalk_getname, - .poll_mask = datagram_poll_mask, + .poll = datagram_poll, .ioctl = atalk_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = atalk_compat_ioctl, diff --git a/net/atm/br2684.c b/net/atm/br2684.c index 36b3adacc0dd..10462de734ea 100644 --- a/net/atm/br2684.c +++ b/net/atm/br2684.c @@ -252,8 +252,7 @@ static int br2684_xmit_vcc(struct sk_buff *skb, struct net_device *dev, ATM_SKB(skb)->vcc = atmvcc = brvcc->atmvcc; pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, atmvcc, atmvcc->dev); - refcount_add(skb->truesize, &sk_atm(atmvcc)->sk_wmem_alloc); - ATM_SKB(skb)->atm_options = atmvcc->atm_options; + atm_account_tx(atmvcc, skb); dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; diff --git a/net/atm/clip.c b/net/atm/clip.c index 66caa48a27c2..d795b9c5aea4 100644 --- a/net/atm/clip.c +++ b/net/atm/clip.c @@ -381,8 +381,7 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb, memcpy(here, llc_oui, sizeof(llc_oui)); ((__be16 *) here)[3] = skb->protocol; } - refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); - ATM_SKB(skb)->atm_options = vcc->atm_options; + atm_account_tx(vcc, skb); entry->vccs->last_use = jiffies; pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, vcc, vcc->dev); old = xchg(&entry->vccs->xoff, 1); /* assume XOFF ... */ diff --git a/net/atm/common.c b/net/atm/common.c index 1f2af59935db..a7a68e509628 100644 --- a/net/atm/common.c +++ b/net/atm/common.c @@ -630,10 +630,9 @@ int vcc_sendmsg(struct socket *sock, struct msghdr *m, size_t size) goto out; } pr_debug("%d += %d\n", sk_wmem_alloc_get(sk), skb->truesize); - refcount_add(skb->truesize, &sk->sk_wmem_alloc); + atm_account_tx(vcc, skb); skb->dev = NULL; /* for paths shared with net_device interfaces */ - ATM_SKB(skb)->atm_options = vcc->atm_options; if (!copy_from_iter_full(skb_put(skb, size), size, &m->msg_iter)) { kfree_skb(skb); error = -EFAULT; @@ -648,11 +647,16 @@ int vcc_sendmsg(struct socket *sock, struct msghdr *m, size_t size) return error; } -__poll_t vcc_poll_mask(struct socket *sock, __poll_t events) +__poll_t vcc_poll(struct file *file, struct socket *sock, poll_table *wait) { struct sock *sk = sock->sk; - struct atm_vcc *vcc = ATM_SD(sock); - __poll_t mask = 0; + struct atm_vcc *vcc; + __poll_t mask; + + sock_poll_wait(file, sk_sleep(sk), wait); + mask = 0; + + vcc = ATM_SD(sock); /* exceptional events */ if (sk->sk_err) diff --git a/net/atm/common.h b/net/atm/common.h index 526796ad230f..5850649068bb 100644 --- a/net/atm/common.h +++ b/net/atm/common.h @@ -17,7 +17,7 @@ int vcc_connect(struct socket *sock, int itf, short vpi, int vci); int vcc_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, int flags); int vcc_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len); -__poll_t vcc_poll_mask(struct socket *sock, __poll_t events); +__poll_t vcc_poll(struct file *file, struct socket *sock, poll_table *wait); int vcc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); int vcc_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); int vcc_setsockopt(struct socket *sock, int level, int optname, diff --git a/net/atm/lec.c b/net/atm/lec.c index 5a95fcf6f9b6..d7f5cf5b7594 100644 --- a/net/atm/lec.c +++ b/net/atm/lec.c @@ -182,9 +182,8 @@ lec_send(struct atm_vcc *vcc, struct sk_buff *skb) struct net_device *dev = skb->dev; ATM_SKB(skb)->vcc = vcc; - ATM_SKB(skb)->atm_options = vcc->atm_options; + atm_account_tx(vcc, skb); - refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); if (vcc->send(vcc, skb) < 0) { dev->stats.tx_dropped++; return; diff --git a/net/atm/mpc.c b/net/atm/mpc.c index 75620c2f2617..24b53c4c39c6 100644 --- a/net/atm/mpc.c +++ b/net/atm/mpc.c @@ -555,8 +555,7 @@ static int send_via_shortcut(struct sk_buff *skb, struct mpoa_client *mpc) sizeof(struct llc_snap_hdr)); } - refcount_add(skb->truesize, &sk_atm(entry->shortcut)->sk_wmem_alloc); - ATM_SKB(skb)->atm_options = entry->shortcut->atm_options; + atm_account_tx(entry->shortcut, skb); entry->shortcut->send(entry->shortcut, skb); entry->packets_fwded++; mpc->in_ops->put(entry); diff --git a/net/atm/pppoatm.c b/net/atm/pppoatm.c index 21d9d341a619..af8c4b38b746 100644 --- a/net/atm/pppoatm.c +++ b/net/atm/pppoatm.c @@ -350,8 +350,7 @@ static int pppoatm_send(struct ppp_channel *chan, struct sk_buff *skb) return 1; } - refcount_add(skb->truesize, &sk_atm(ATM_SKB(skb)->vcc)->sk_wmem_alloc); - ATM_SKB(skb)->atm_options = ATM_SKB(skb)->vcc->atm_options; + atm_account_tx(vcc, skb); pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, ATM_SKB(skb)->vcc, ATM_SKB(skb)->vcc->dev); ret = ATM_SKB(skb)->vcc->send(ATM_SKB(skb)->vcc, skb) diff --git a/net/atm/pvc.c b/net/atm/pvc.c index 9f75092fe778..2cb10af16afc 100644 --- a/net/atm/pvc.c +++ b/net/atm/pvc.c @@ -113,7 +113,7 @@ static const struct proto_ops pvc_proto_ops = { .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = pvc_getname, - .poll_mask = vcc_poll_mask, + .poll = vcc_poll, .ioctl = vcc_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = vcc_compat_ioctl, diff --git a/net/atm/raw.c b/net/atm/raw.c index ee10e8d46185..b3ba44aab0ee 100644 --- a/net/atm/raw.c +++ b/net/atm/raw.c @@ -35,8 +35,8 @@ static void atm_pop_raw(struct atm_vcc *vcc, struct sk_buff *skb) struct sock *sk = sk_atm(vcc); pr_debug("(%d) %d -= %d\n", - vcc->vci, sk_wmem_alloc_get(sk), skb->truesize); - WARN_ON(refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc)); + vcc->vci, sk_wmem_alloc_get(sk), ATM_SKB(skb)->acct_truesize); + WARN_ON(refcount_sub_and_test(ATM_SKB(skb)->acct_truesize, &sk->sk_wmem_alloc)); dev_kfree_skb_any(skb); sk->sk_write_space(sk); } diff --git a/net/atm/svc.c b/net/atm/svc.c index 53f4ad7087b1..2f91b766ac42 100644 --- a/net/atm/svc.c +++ b/net/atm/svc.c @@ -636,7 +636,7 @@ static const struct proto_ops svc_proto_ops = { .socketpair = sock_no_socketpair, .accept = svc_accept, .getname = svc_getname, - .poll_mask = vcc_poll_mask, + .poll = vcc_poll, .ioctl = svc_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = svc_compat_ioctl, diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index d1d2442ce573..c603d33d5410 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -1941,7 +1941,7 @@ static const struct proto_ops ax25_proto_ops = { .socketpair = sock_no_socketpair, .accept = ax25_accept, .getname = ax25_getname, - .poll_mask = datagram_poll_mask, + .poll = datagram_poll, .ioctl = ax25_ioctl, .listen = ax25_listen, .shutdown = ax25_shutdown, diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index 510ab4f55df5..3264e1873219 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c @@ -437,13 +437,16 @@ static inline __poll_t bt_accept_poll(struct sock *parent) return 0; } -__poll_t bt_sock_poll_mask(struct socket *sock, __poll_t events) +__poll_t bt_sock_poll(struct file *file, struct socket *sock, + poll_table *wait) { struct sock *sk = sock->sk; __poll_t mask = 0; BT_DBG("sock %p, sk %p", sock, sk); + poll_wait(file, sk_sleep(sk), wait); + if (sk->sk_state == BT_LISTEN) return bt_accept_poll(sk); @@ -475,7 +478,7 @@ __poll_t bt_sock_poll_mask(struct socket *sock, __poll_t events) return mask; } -EXPORT_SYMBOL(bt_sock_poll_mask); +EXPORT_SYMBOL(bt_sock_poll); int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index d6c099861538..1506e1632394 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c @@ -1975,7 +1975,7 @@ static const struct proto_ops hci_sock_ops = { .sendmsg = hci_sock_sendmsg, .recvmsg = hci_sock_recvmsg, .ioctl = hci_sock_ioctl, - .poll_mask = datagram_poll_mask, + .poll = datagram_poll, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .setsockopt = hci_sock_setsockopt, diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index 742a190034e6..686bdc6b35b0 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c @@ -1653,7 +1653,7 @@ static const struct proto_ops l2cap_sock_ops = { .getname = l2cap_sock_getname, .sendmsg = l2cap_sock_sendmsg, .recvmsg = l2cap_sock_recvmsg, - .poll_mask = bt_sock_poll_mask, + .poll = bt_sock_poll, .ioctl = bt_sock_ioctl, .mmap = sock_no_mmap, .socketpair = sock_no_socketpair, diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index 1cf57622473a..d606e9212291 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c @@ -1049,7 +1049,7 @@ static const struct proto_ops rfcomm_sock_ops = { .setsockopt = rfcomm_sock_setsockopt, .getsockopt = rfcomm_sock_getsockopt, .ioctl = rfcomm_sock_ioctl, - .poll_mask = bt_sock_poll_mask, + .poll = bt_sock_poll, .socketpair = sock_no_socketpair, .mmap = sock_no_mmap }; diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index d60dbc61d170..413b8ee49fec 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -1197,7 +1197,7 @@ static const struct proto_ops sco_sock_ops = { .getname = sco_sock_getname, .sendmsg = sco_sock_sendmsg, .recvmsg = sco_sock_recvmsg, - .poll_mask = bt_sock_poll_mask, + .poll = bt_sock_poll, .ioctl = bt_sock_ioctl, .mmap = sock_no_mmap, .socketpair = sock_no_socketpair, diff --git a/net/bpfilter/.gitignore b/net/bpfilter/.gitignore new file mode 100644 index 000000000000..e97084e3eea2 --- /dev/null +++ b/net/bpfilter/.gitignore @@ -0,0 +1 @@ +bpfilter_umh diff --git a/net/bpfilter/Kconfig b/net/bpfilter/Kconfig index a948b072c28f..76deb6615883 100644 --- a/net/bpfilter/Kconfig +++ b/net/bpfilter/Kconfig @@ -1,6 +1,5 @@ menuconfig BPFILTER bool "BPF based packet filtering framework (BPFILTER)" - default n depends on NET && BPF && INET help This builds experimental bpfilter framework that is aiming to @@ -9,6 +8,7 @@ menuconfig BPFILTER if BPFILTER config BPFILTER_UMH tristate "bpfilter kernel module with user mode helper" + depends on $(success,$(srctree)/scripts/cc-can-link.sh $(CC)) default m help This builds bpfilter kernel module with embedded user mode helper diff --git a/net/bpfilter/Makefile b/net/bpfilter/Makefile index e0bbe7583e58..39c6980b5d99 100644 --- a/net/bpfilter/Makefile +++ b/net/bpfilter/Makefile @@ -15,18 +15,7 @@ ifeq ($(CONFIG_BPFILTER_UMH), y) HOSTLDFLAGS += -static endif -# a bit of elf magic to convert bpfilter_umh binary into a binary blob -# inside bpfilter_umh.o elf file referenced by -# _binary_net_bpfilter_bpfilter_umh_start symbol -# which bpfilter_kern.c passes further into umh blob loader at run-time -quiet_cmd_copy_umh = GEN $@ - cmd_copy_umh = echo ':' > $(obj)/.bpfilter_umh.o.cmd; \ - $(OBJCOPY) -I binary -O `$(OBJDUMP) -f $<|grep format|cut -d' ' -f8` \ - -B `$(OBJDUMP) -f $<|grep architecture|cut -d, -f1|cut -d' ' -f2` \ - --rename-section .data=.init.rodata $< $@ - -$(obj)/bpfilter_umh.o: $(obj)/bpfilter_umh - $(call cmd,copy_umh) +$(obj)/bpfilter_umh_blob.o: $(obj)/bpfilter_umh obj-$(CONFIG_BPFILTER_UMH) += bpfilter.o -bpfilter-objs += bpfilter_kern.o bpfilter_umh.o +bpfilter-objs += bpfilter_kern.o bpfilter_umh_blob.o diff --git a/net/bpfilter/bpfilter_kern.c b/net/bpfilter/bpfilter_kern.c index 09522573f611..f0fc182d3db7 100644 --- a/net/bpfilter/bpfilter_kern.c +++ b/net/bpfilter/bpfilter_kern.c @@ -10,11 +10,8 @@ #include #include "msgfmt.h" -#define UMH_start _binary_net_bpfilter_bpfilter_umh_start -#define UMH_end _binary_net_bpfilter_bpfilter_umh_end - -extern char UMH_start; -extern char UMH_end; +extern char bpfilter_umh_start; +extern char bpfilter_umh_end; static struct umh_info info; /* since ip_getsockopt() can run in parallel, serialize access to umh */ @@ -93,7 +90,9 @@ static int __init load_umh(void) int err; /* fork usermode process */ - err = fork_usermode_blob(&UMH_start, &UMH_end - &UMH_start, &info); + err = fork_usermode_blob(&bpfilter_umh_start, + &bpfilter_umh_end - &bpfilter_umh_start, + &info); if (err) return err; pr_info("Loaded bpfilter_umh pid %d\n", info.pid); diff --git a/net/bpfilter/bpfilter_umh_blob.S b/net/bpfilter/bpfilter_umh_blob.S new file mode 100644 index 000000000000..40311d10d2f2 --- /dev/null +++ b/net/bpfilter/bpfilter_umh_blob.S @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + .section .init.rodata, "a" + .global bpfilter_umh_start +bpfilter_umh_start: + .incbin "net/bpfilter/bpfilter_umh" + .global bpfilter_umh_end +bpfilter_umh_end: diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index c7991867d622..a6fb1b3bcad9 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c @@ -934,11 +934,15 @@ static int caif_release(struct socket *sock) } /* Copied from af_unix.c:unix_poll(), added CAIF tx_flow handling */ -static __poll_t caif_poll_mask(struct socket *sock, __poll_t events) +static __poll_t caif_poll(struct file *file, + struct socket *sock, poll_table *wait) { struct sock *sk = sock->sk; + __poll_t mask; struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); - __poll_t mask = 0; + + sock_poll_wait(file, sk_sleep(sk), wait); + mask = 0; /* exceptional events? */ if (sk->sk_err) @@ -972,7 +976,7 @@ static const struct proto_ops caif_seqpacket_ops = { .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = sock_no_getname, - .poll_mask = caif_poll_mask, + .poll = caif_poll, .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, @@ -993,7 +997,7 @@ static const struct proto_ops caif_stream_ops = { .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = sock_no_getname, - .poll_mask = caif_poll_mask, + .poll = caif_poll, .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, diff --git a/net/can/bcm.c b/net/can/bcm.c index 9393f25df08d..0af8f0db892a 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c @@ -1660,7 +1660,7 @@ static const struct proto_ops bcm_ops = { .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = sock_no_getname, - .poll_mask = datagram_poll_mask, + .poll = datagram_poll, .ioctl = can_ioctl, /* use can_ioctl() from af_can.c */ .listen = sock_no_listen, .shutdown = sock_no_shutdown, diff --git a/net/can/raw.c b/net/can/raw.c index fd7e2f49ea6a..1051eee82581 100644 --- a/net/can/raw.c +++ b/net/can/raw.c @@ -843,7 +843,7 @@ static const struct proto_ops raw_ops = { .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = raw_getname, - .poll_mask = datagram_poll_mask, + .poll = datagram_poll, .ioctl = can_ioctl, /* use can_ioctl() from af_can.c */ .listen = sock_no_listen, .shutdown = sock_no_shutdown, diff --git a/net/core/datagram.c b/net/core/datagram.c index f19bf3dc2bd6..9938952c5c78 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -819,8 +819,9 @@ EXPORT_SYMBOL(skb_copy_and_csum_datagram_msg); /** * datagram_poll - generic datagram poll + * @file: file struct * @sock: socket - * @events to wait for + * @wait: poll table * * Datagram poll: Again totally generic. This also handles * sequenced packet sockets providing the socket receive queue @@ -830,10 +831,14 @@ EXPORT_SYMBOL(skb_copy_and_csum_datagram_msg); * and you use a different write policy from sock_writeable() * then please supply your own write_space callback. */ -__poll_t datagram_poll_mask(struct socket *sock, __poll_t events) +__poll_t datagram_poll(struct file *file, struct socket *sock, + poll_table *wait) { struct sock *sk = sock->sk; - __poll_t mask = 0; + __poll_t mask; + + sock_poll_wait(file, sk_sleep(sk), wait); + mask = 0; /* exceptional events? */ if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) @@ -866,4 +871,4 @@ __poll_t datagram_poll_mask(struct socket *sock, __poll_t events) return mask; } -EXPORT_SYMBOL(datagram_poll_mask); +EXPORT_SYMBOL(datagram_poll); diff --git a/net/core/dev.c b/net/core/dev.c index 57b7bab5f70b..a5aa1c7444e6 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -8643,7 +8643,8 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char /* We get here if we can't use the current device name */ if (!pat) goto out; - if (dev_get_valid_name(net, dev, pat) < 0) + err = dev_get_valid_name(net, dev, pat); + if (err < 0) goto out; } @@ -8655,7 +8656,6 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char dev_close(dev); /* And unlink it from device chain */ - err = -ENODEV; unlist_netdevice(dev); synchronize_net(); diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c index a04e1e88bf3a..50537ff961a7 100644 --- a/net/core/dev_ioctl.c +++ b/net/core/dev_ioctl.c @@ -285,16 +285,9 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd) if (ifr->ifr_qlen < 0) return -EINVAL; if (dev->tx_queue_len ^ ifr->ifr_qlen) { - unsigned int orig_len = dev->tx_queue_len; - - dev->tx_queue_len = ifr->ifr_qlen; - err = call_netdevice_notifiers( - NETDEV_CHANGE_TX_QUEUE_LEN, dev); - err = notifier_to_errno(err); - if (err) { - dev->tx_queue_len = orig_len; + err = dev_change_tx_queue_len(dev, ifr->ifr_qlen); + if (err) return err; - } } return 0; diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index 126ffc5bc630..f64aa13811ea 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c @@ -416,6 +416,14 @@ static struct fib_rule *rule_find(struct fib_rules_ops *ops, if (rule->mark && r->mark != rule->mark) continue; + if (rule->suppress_ifgroup != -1 && + r->suppress_ifgroup != rule->suppress_ifgroup) + continue; + + if (rule->suppress_prefixlen != -1 && + r->suppress_prefixlen != rule->suppress_prefixlen) + continue; + if (rule->mark_mask && r->mark_mask != rule->mark_mask) continue; @@ -436,6 +444,9 @@ static struct fib_rule *rule_find(struct fib_rules_ops *ops, if (rule->ip_proto && r->ip_proto != rule->ip_proto) continue; + if (rule->proto && r->proto != rule->proto) + continue; + if (fib_rule_port_range_set(&rule->sport_range) && !fib_rule_port_range_compare(&r->sport_range, &rule->sport_range)) @@ -645,6 +656,73 @@ static int fib_nl2rule(struct sk_buff *skb, struct nlmsghdr *nlh, return err; } +static int rule_exists(struct fib_rules_ops *ops, struct fib_rule_hdr *frh, + struct nlattr **tb, struct fib_rule *rule) +{ + struct fib_rule *r; + + list_for_each_entry(r, &ops->rules_list, list) { + if (r->action != rule->action) + continue; + + if (r->table != rule->table) + continue; + + if (r->pref != rule->pref) + continue; + + if (memcmp(r->iifname, rule->iifname, IFNAMSIZ)) + continue; + + if (memcmp(r->oifname, rule->oifname, IFNAMSIZ)) + continue; + + if (r->mark != rule->mark) + continue; + + if (r->suppress_ifgroup != rule->suppress_ifgroup) + continue; + + if (r->suppress_prefixlen != rule->suppress_prefixlen) + continue; + + if (r->mark_mask != rule->mark_mask) + continue; + + if (r->tun_id != rule->tun_id) + continue; + + if (r->fr_net != rule->fr_net) + continue; + + if (r->l3mdev != rule->l3mdev) + continue; + + if (!uid_eq(r->uid_range.start, rule->uid_range.start) || + !uid_eq(r->uid_range.end, rule->uid_range.end)) + continue; + + if (r->ip_proto != rule->ip_proto) + continue; + + if (r->proto != rule->proto) + continue; + + if (!fib_rule_port_range_compare(&r->sport_range, + &rule->sport_range)) + continue; + + if (!fib_rule_port_range_compare(&r->dport_range, + &rule->dport_range)) + continue; + + if (!ops->compare(r, frh, tb)) + continue; + return 1; + } + return 0; +} + int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { @@ -679,7 +757,7 @@ int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh, goto errout; if ((nlh->nlmsg_flags & NLM_F_EXCL) && - rule_find(ops, frh, tb, rule, user_priority)) { + rule_exists(ops, frh, tb, rule)) { err = -EEXIST; goto errout_free; } diff --git a/net/core/filter.c b/net/core/filter.c index 3d9ba7e5965a..0ca6907d7efe 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -3214,20 +3214,6 @@ int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp, } EXPORT_SYMBOL_GPL(xdp_do_redirect); -static int __xdp_generic_ok_fwd_dev(struct sk_buff *skb, struct net_device *fwd) -{ - unsigned int len; - - if (unlikely(!(fwd->flags & IFF_UP))) - return -ENETDOWN; - - len = fwd->mtu + fwd->hard_header_len + VLAN_HLEN; - if (skb->len > len) - return -EMSGSIZE; - - return 0; -} - static int xdp_do_generic_redirect_map(struct net_device *dev, struct sk_buff *skb, struct xdp_buff *xdp, @@ -3256,10 +3242,11 @@ static int xdp_do_generic_redirect_map(struct net_device *dev, } if (map->map_type == BPF_MAP_TYPE_DEVMAP) { - if (unlikely((err = __xdp_generic_ok_fwd_dev(skb, fwd)))) + struct bpf_dtab_netdev *dst = fwd; + + err = dev_map_generic_redirect(dst, skb, xdp_prog); + if (unlikely(err)) goto err; - skb->dev = fwd; - generic_xdp_tx(skb, xdp_prog); } else if (map->map_type == BPF_MAP_TYPE_XSKMAP) { struct xdp_sock *xs = fwd; @@ -4086,8 +4073,9 @@ static int bpf_fib_set_fwd_params(struct bpf_fib_lookup *params, memcpy(params->smac, dev->dev_addr, ETH_ALEN); params->h_vlan_TCI = 0; params->h_vlan_proto = 0; + params->ifindex = dev->ifindex; - return dev->ifindex; + return 0; } #endif @@ -4111,7 +4099,7 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params, /* verify forwarding is enabled on this interface */ in_dev = __in_dev_get_rcu(dev); if (unlikely(!in_dev || !IN_DEV_FORWARD(in_dev))) - return 0; + return BPF_FIB_LKUP_RET_FWD_DISABLED; if (flags & BPF_FIB_LOOKUP_OUTPUT) { fl4.flowi4_iif = 1; @@ -4136,7 +4124,7 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params, tb = fib_get_table(net, tbid); if (unlikely(!tb)) - return 0; + return BPF_FIB_LKUP_RET_NOT_FWDED; err = fib_table_lookup(tb, &fl4, &res, FIB_LOOKUP_NOREF); } else { @@ -4148,8 +4136,20 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params, err = fib_lookup(net, &fl4, &res, FIB_LOOKUP_NOREF); } - if (err || res.type != RTN_UNICAST) - return 0; + if (err) { + /* map fib lookup errors to RTN_ type */ + if (err == -EINVAL) + return BPF_FIB_LKUP_RET_BLACKHOLE; + if (err == -EHOSTUNREACH) + return BPF_FIB_LKUP_RET_UNREACHABLE; + if (err == -EACCES) + return BPF_FIB_LKUP_RET_PROHIBIT; + + return BPF_FIB_LKUP_RET_NOT_FWDED; + } + + if (res.type != RTN_UNICAST) + return BPF_FIB_LKUP_RET_NOT_FWDED; if (res.fi->fib_nhs > 1) fib_select_path(net, &res, &fl4, NULL); @@ -4157,19 +4157,16 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params, if (check_mtu) { mtu = ip_mtu_from_fib_result(&res, params->ipv4_dst); if (params->tot_len > mtu) - return 0; + return BPF_FIB_LKUP_RET_FRAG_NEEDED; } nh = &res.fi->fib_nh[res.nh_sel]; /* do not handle lwt encaps right now */ if (nh->nh_lwtstate) - return 0; + return BPF_FIB_LKUP_RET_UNSUPP_LWT; dev = nh->nh_dev; - if (unlikely(!dev)) - return 0; - if (nh->nh_gw) params->ipv4_dst = nh->nh_gw; @@ -4179,10 +4176,10 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params, * rcu_read_lock_bh is not needed here */ neigh = __ipv4_neigh_lookup_noref(dev, (__force u32)params->ipv4_dst); - if (neigh) - return bpf_fib_set_fwd_params(params, neigh, dev); + if (!neigh) + return BPF_FIB_LKUP_RET_NO_NEIGH; - return 0; + return bpf_fib_set_fwd_params(params, neigh, dev); } #endif @@ -4203,7 +4200,7 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params, /* link local addresses are never forwarded */ if (rt6_need_strict(dst) || rt6_need_strict(src)) - return 0; + return BPF_FIB_LKUP_RET_NOT_FWDED; dev = dev_get_by_index_rcu(net, params->ifindex); if (unlikely(!dev)) @@ -4211,7 +4208,7 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params, idev = __in6_dev_get_safely(dev); if (unlikely(!idev || !net->ipv6.devconf_all->forwarding)) - return 0; + return BPF_FIB_LKUP_RET_FWD_DISABLED; if (flags & BPF_FIB_LOOKUP_OUTPUT) { fl6.flowi6_iif = 1; @@ -4238,7 +4235,7 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params, tb = ipv6_stub->fib6_get_table(net, tbid); if (unlikely(!tb)) - return 0; + return BPF_FIB_LKUP_RET_NOT_FWDED; f6i = ipv6_stub->fib6_table_lookup(net, tb, oif, &fl6, strict); } else { @@ -4251,11 +4248,23 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params, } if (unlikely(IS_ERR_OR_NULL(f6i) || f6i == net->ipv6.fib6_null_entry)) - return 0; + return BPF_FIB_LKUP_RET_NOT_FWDED; - if (unlikely(f6i->fib6_flags & RTF_REJECT || - f6i->fib6_type != RTN_UNICAST)) - return 0; + if (unlikely(f6i->fib6_flags & RTF_REJECT)) { + switch (f6i->fib6_type) { + case RTN_BLACKHOLE: + return BPF_FIB_LKUP_RET_BLACKHOLE; + case RTN_UNREACHABLE: + return BPF_FIB_LKUP_RET_UNREACHABLE; + case RTN_PROHIBIT: + return BPF_FIB_LKUP_RET_PROHIBIT; + default: + return BPF_FIB_LKUP_RET_NOT_FWDED; + } + } + + if (f6i->fib6_type != RTN_UNICAST) + return BPF_FIB_LKUP_RET_NOT_FWDED; if (f6i->fib6_nsiblings && fl6.flowi6_oif == 0) f6i = ipv6_stub->fib6_multipath_select(net, f6i, &fl6, @@ -4265,11 +4274,11 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params, if (check_mtu) { mtu = ipv6_stub->ip6_mtu_from_fib6(f6i, dst, src); if (params->tot_len > mtu) - return 0; + return BPF_FIB_LKUP_RET_FRAG_NEEDED; } if (f6i->fib6_nh.nh_lwtstate) - return 0; + return BPF_FIB_LKUP_RET_UNSUPP_LWT; if (f6i->fib6_flags & RTF_GATEWAY) *dst = f6i->fib6_nh.nh_gw; @@ -4283,10 +4292,10 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params, */ neigh = ___neigh_lookup_noref(ipv6_stub->nd_tbl, neigh_key_eq128, ndisc_hashfn, dst, dev); - if (neigh) - return bpf_fib_set_fwd_params(params, neigh, dev); + if (!neigh) + return BPF_FIB_LKUP_RET_NO_NEIGH; - return 0; + return bpf_fib_set_fwd_params(params, neigh, dev); } #endif @@ -4328,7 +4337,7 @@ BPF_CALL_4(bpf_skb_fib_lookup, struct sk_buff *, skb, struct bpf_fib_lookup *, params, int, plen, u32, flags) { struct net *net = dev_net(skb->dev); - int index = -EAFNOSUPPORT; + int rc = -EAFNOSUPPORT; if (plen < sizeof(*params)) return -EINVAL; @@ -4339,25 +4348,25 @@ BPF_CALL_4(bpf_skb_fib_lookup, struct sk_buff *, skb, switch (params->family) { #if IS_ENABLED(CONFIG_INET) case AF_INET: - index = bpf_ipv4_fib_lookup(net, params, flags, false); + rc = bpf_ipv4_fib_lookup(net, params, flags, false); break; #endif #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: - index = bpf_ipv6_fib_lookup(net, params, flags, false); + rc = bpf_ipv6_fib_lookup(net, params, flags, false); break; #endif } - if (index > 0) { + if (!rc) { struct net_device *dev; - dev = dev_get_by_index_rcu(net, index); + dev = dev_get_by_index_rcu(net, params->ifindex); if (!is_skb_forwardable(dev, skb)) - index = 0; + rc = BPF_FIB_LKUP_RET_FRAG_NEEDED; } - return index; + return rc; } static const struct bpf_func_proto bpf_skb_fib_lookup_proto = { diff --git a/net/core/skbuff.c b/net/core/skbuff.c index c642304f178c..eba8dae22c25 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -5276,8 +5276,7 @@ struct sk_buff *alloc_skb_with_frags(unsigned long header_len, if (npages >= 1 << order) { page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) | __GFP_COMP | - __GFP_NOWARN | - __GFP_NORETRY, + __GFP_NOWARN, order); if (page) goto fill_page; diff --git a/net/core/sock.c b/net/core/sock.c index bcc41829a16d..9e8f65585b81 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -3243,7 +3243,8 @@ static int req_prot_init(const struct proto *prot) rsk_prot->slab = kmem_cache_create(rsk_prot->slab_name, rsk_prot->obj_size, 0, - prot->slab_flags, NULL); + SLAB_ACCOUNT | prot->slab_flags, + NULL); if (!rsk_prot->slab) { pr_crit("%s: Can't create request sock SLAB cache!\n", @@ -3258,7 +3259,8 @@ int proto_register(struct proto *prot, int alloc_slab) if (alloc_slab) { prot->slab = kmem_cache_create_usercopy(prot->name, prot->obj_size, 0, - SLAB_HWCACHE_ALIGN | prot->slab_flags, + SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT | + prot->slab_flags, prot->useroffset, prot->usersize, NULL); @@ -3281,6 +3283,7 @@ int proto_register(struct proto *prot, int alloc_slab) kmem_cache_create(prot->twsk_prot->twsk_slab_name, prot->twsk_prot->twsk_obj_size, 0, + SLAB_ACCOUNT | prot->slab_flags, NULL); if (prot->twsk_prot->twsk_slab == NULL) diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c index 8b5ba6dffac7..12877a1514e7 100644 --- a/net/dccp/ccids/ccid3.c +++ b/net/dccp/ccids/ccid3.c @@ -600,7 +600,7 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk, { struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk); struct dccp_sock *dp = dccp_sk(sk); - ktime_t now = ktime_get_real(); + ktime_t now = ktime_get(); s64 delta = 0; switch (fbtype) { @@ -625,15 +625,14 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk, case CCID3_FBACK_PERIODIC: delta = ktime_us_delta(now, hc->rx_tstamp_last_feedback); if (delta <= 0) - DCCP_BUG("delta (%ld) <= 0", (long)delta); - else - hc->rx_x_recv = scaled_div32(hc->rx_bytes_recv, delta); + delta = 1; + hc->rx_x_recv = scaled_div32(hc->rx_bytes_recv, delta); break; default: return; } - ccid3_pr_debug("Interval %ldusec, X_recv=%u, 1/p=%u\n", (long)delta, + ccid3_pr_debug("Interval %lldusec, X_recv=%u, 1/p=%u\n", delta, hc->rx_x_recv, hc->rx_pinv); hc->rx_tstamp_last_feedback = now; @@ -680,7 +679,8 @@ static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb) static u32 ccid3_first_li(struct sock *sk) { struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk); - u32 x_recv, p, delta; + u32 x_recv, p; + s64 delta; u64 fval; if (hc->rx_rtt == 0) { @@ -688,7 +688,9 @@ static u32 ccid3_first_li(struct sock *sk) hc->rx_rtt = DCCP_FALLBACK_RTT; } - delta = ktime_to_us(net_timedelta(hc->rx_tstamp_last_feedback)); + delta = ktime_us_delta(ktime_get(), hc->rx_tstamp_last_feedback); + if (delta <= 0) + delta = 1; x_recv = scaled_div32(hc->rx_bytes_recv, delta); if (x_recv == 0) { /* would also trigger divide-by-zero */ DCCP_WARN("X_recv==0\n"); diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h index 0ea2ee56ac1b..f91e3816806b 100644 --- a/net/dccp/dccp.h +++ b/net/dccp/dccp.h @@ -316,7 +316,8 @@ int dccp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, int flags, int *addr_len); void dccp_shutdown(struct sock *sk, int how); int inet_dccp_listen(struct socket *sock, int backlog); -__poll_t dccp_poll_mask(struct socket *sock, __poll_t events); +__poll_t dccp_poll(struct file *file, struct socket *sock, + poll_table *wait); int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); void dccp_req_err(struct sock *sk, u64 seq); diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index a9e478cd3787..b08feb219b44 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -984,7 +984,7 @@ static const struct proto_ops inet_dccp_ops = { .accept = inet_accept, .getname = inet_getname, /* FIXME: work on tcp_poll to rename it to inet_csk_poll */ - .poll_mask = dccp_poll_mask, + .poll = dccp_poll, .ioctl = inet_ioctl, /* FIXME: work on inet_listen to rename it to sock_common_listen */ .listen = inet_dccp_listen, diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 17fc4e0166ba..6344f1b18a6a 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -1070,7 +1070,7 @@ static const struct proto_ops inet6_dccp_ops = { .socketpair = sock_no_socketpair, .accept = inet_accept, .getname = inet6_getname, - .poll_mask = dccp_poll_mask, + .poll = dccp_poll, .ioctl = inet6_ioctl, .listen = inet_dccp_listen, .shutdown = inet_shutdown, diff --git a/net/dccp/proto.c b/net/dccp/proto.c index ca21c1c76da0..0d56e36a6db7 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -312,11 +312,20 @@ int dccp_disconnect(struct sock *sk, int flags) EXPORT_SYMBOL_GPL(dccp_disconnect); -__poll_t dccp_poll_mask(struct socket *sock, __poll_t events) +/* + * Wait for a DCCP event. + * + * Note that we don't need to lock the socket, as the upper poll layers + * take care of normal races (between the test and the event) and we don't + * go look at any of the socket buffers directly. + */ +__poll_t dccp_poll(struct file *file, struct socket *sock, + poll_table *wait) { __poll_t mask; struct sock *sk = sock->sk; + sock_poll_wait(file, sk_sleep(sk), wait); if (sk->sk_state == DCCP_LISTEN) return inet_csk_listen_poll(sk); @@ -358,7 +367,7 @@ __poll_t dccp_poll_mask(struct socket *sock, __poll_t events) return mask; } -EXPORT_SYMBOL_GPL(dccp_poll_mask); +EXPORT_SYMBOL_GPL(dccp_poll); int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg) { diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index 9a686d890bfa..7d6ff983ba2c 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c @@ -1207,11 +1207,11 @@ static int dn_getname(struct socket *sock, struct sockaddr *uaddr,int peer) } -static __poll_t dn_poll_mask(struct socket *sock, __poll_t events) +static __poll_t dn_poll(struct file *file, struct socket *sock, poll_table *wait) { struct sock *sk = sock->sk; struct dn_scp *scp = DN_SK(sk); - __poll_t mask = datagram_poll_mask(sock, events); + __poll_t mask = datagram_poll(file, sock, wait); if (!skb_queue_empty(&scp->other_receive_queue)) mask |= EPOLLRDBAND; @@ -2331,7 +2331,7 @@ static const struct proto_ops dn_proto_ops = { .socketpair = sock_no_socketpair, .accept = dn_accept, .getname = dn_getname, - .poll_mask = dn_poll_mask, + .poll = dn_poll, .ioctl = dn_ioctl, .listen = dn_listen, .shutdown = dn_shutdown, diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c index a0768d2759b8..a60658c85a9a 100644 --- a/net/ieee802154/socket.c +++ b/net/ieee802154/socket.c @@ -423,7 +423,7 @@ static const struct proto_ops ieee802154_raw_ops = { .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = sock_no_getname, - .poll_mask = datagram_poll_mask, + .poll = datagram_poll, .ioctl = ieee802154_sock_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, @@ -969,7 +969,7 @@ static const struct proto_ops ieee802154_dgram_ops = { .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = sock_no_getname, - .poll_mask = datagram_poll_mask, + .poll = datagram_poll, .ioctl = ieee802154_sock_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 15e125558c76..b403499fdabe 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -986,7 +986,7 @@ const struct proto_ops inet_stream_ops = { .socketpair = sock_no_socketpair, .accept = inet_accept, .getname = inet_getname, - .poll_mask = tcp_poll_mask, + .poll = tcp_poll, .ioctl = inet_ioctl, .listen = inet_listen, .shutdown = inet_shutdown, @@ -1021,7 +1021,7 @@ const struct proto_ops inet_dgram_ops = { .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = inet_getname, - .poll_mask = udp_poll_mask, + .poll = udp_poll, .ioctl = inet_ioctl, .listen = sock_no_listen, .shutdown = inet_shutdown, @@ -1042,7 +1042,7 @@ EXPORT_SYMBOL(inet_dgram_ops); /* * For SOCK_RAW sockets; should be the same as inet_dgram_ops but without - * udp_poll_mask + * udp_poll */ static const struct proto_ops inet_sockraw_ops = { .family = PF_INET, @@ -1053,7 +1053,7 @@ static const struct proto_ops inet_sockraw_ops = { .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = inet_getname, - .poll_mask = datagram_poll_mask, + .poll = datagram_poll, .ioctl = inet_ioctl, .listen = sock_no_listen, .shutdown = inet_shutdown, diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c index 1540db65241a..c9ec1603666b 100644 --- a/net/ipv4/fou.c +++ b/net/ipv4/fou.c @@ -448,9 +448,7 @@ static struct sk_buff **gue_gro_receive(struct sock *sk, out_unlock: rcu_read_unlock(); out: - NAPI_GRO_CB(skb)->flush |= flush; - skb_gro_remcsum_cleanup(skb, &grc); - skb->remcsum_offload = 0; + skb_gro_flush_final_remcsum(skb, pp, flush, &grc); return pp; } diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c index 1859c473b21a..6a7d980105f6 100644 --- a/net/ipv4/gre_offload.c +++ b/net/ipv4/gre_offload.c @@ -223,7 +223,7 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head, out_unlock: rcu_read_unlock(); out: - NAPI_GRO_CB(skb)->flush |= flush; + skb_gro_flush_final(skb, pp, flush); return pp; } diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 31ff46daae97..3647167c8fa3 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -243,9 +243,9 @@ static inline int compute_score(struct sock *sk, struct net *net, bool dev_match = (sk->sk_bound_dev_if == dif || sk->sk_bound_dev_if == sdif); - if (exact_dif && !dev_match) + if (!dev_match) return -1; - if (sk->sk_bound_dev_if && dev_match) + if (sk->sk_bound_dev_if) score += 4; } if (sk->sk_incoming_cpu == raw_smp_processor_id()) diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index af5a830ff6ad..b3308e9d9762 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -1145,7 +1145,8 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork, cork->fragsize = ip_sk_use_pmtu(sk) ? dst_mtu(&rt->dst) : rt->dst.dev->mtu; - cork->gso_size = sk->sk_type == SOCK_DGRAM ? ipc->gso_size : 0; + cork->gso_size = sk->sk_type == SOCK_DGRAM && + sk->sk_protocol == IPPROTO_UDP ? ipc->gso_size : 0; cork->dst = &rt->dst; cork->length = 0; cork->ttl = ipc->ttl; diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index d06247ba08b2..af0a857d8352 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -265,8 +265,9 @@ static int proc_tcp_fastopen_key(struct ctl_table *table, int write, ipv4.sysctl_tcp_fastopen); struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) }; struct tcp_fastopen_context *ctxt; - int ret; u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */ + __le32 key[4]; + int ret, i; tbl.data = kmalloc(tbl.maxlen, GFP_KERNEL); if (!tbl.data) @@ -275,11 +276,14 @@ static int proc_tcp_fastopen_key(struct ctl_table *table, int write, rcu_read_lock(); ctxt = rcu_dereference(net->ipv4.tcp_fastopen_ctx); if (ctxt) - memcpy(user_key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH); + memcpy(key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH); else - memset(user_key, 0, sizeof(user_key)); + memset(key, 0, sizeof(key)); rcu_read_unlock(); + for (i = 0; i < ARRAY_SIZE(key); i++) + user_key[i] = le32_to_cpu(key[i]); + snprintf(tbl.data, tbl.maxlen, "%08x-%08x-%08x-%08x", user_key[0], user_key[1], user_key[2], user_key[3]); ret = proc_dostring(&tbl, write, buffer, lenp, ppos); @@ -290,13 +294,17 @@ static int proc_tcp_fastopen_key(struct ctl_table *table, int write, ret = -EINVAL; goto bad_key; } - tcp_fastopen_reset_cipher(net, NULL, user_key, + + for (i = 0; i < ARRAY_SIZE(user_key); i++) + key[i] = cpu_to_le32(user_key[i]); + + tcp_fastopen_reset_cipher(net, NULL, key, TCP_FASTOPEN_KEY_LENGTH); } bad_key: pr_debug("proc FO key set 0x%x-%x-%x-%x <- 0x%s: %u\n", - user_key[0], user_key[1], user_key[2], user_key[3], + user_key[0], user_key[1], user_key[2], user_key[3], (char *)tbl.data, ret); kfree(tbl.data); return ret; diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 141acd92e58a..e7b53d2a971f 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -494,21 +494,32 @@ static inline bool tcp_stream_is_readable(const struct tcp_sock *tp, } /* - * Socket is not locked. We are protected from async events by poll logic and - * correct handling of state changes made by other threads is impossible in - * any case. + * Wait for a TCP event. + * + * Note that we don't need to lock the socket, as the upper poll layers + * take care of normal races (between the test and the event) and we don't + * go look at any of the socket buffers directly. */ -__poll_t tcp_poll_mask(struct socket *sock, __poll_t events) +__poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait) { + __poll_t mask; struct sock *sk = sock->sk; const struct tcp_sock *tp = tcp_sk(sk); - __poll_t mask = 0; int state; + sock_poll_wait(file, sk_sleep(sk), wait); + state = inet_sk_state_load(sk); if (state == TCP_LISTEN) return inet_csk_listen_poll(sk); + /* Socket is not locked. We are protected from async events + * by poll logic and correct handling of state changes + * made by other threads is impossible in any case. + */ + + mask = 0; + /* * EPOLLHUP is certainly not done right. But poll() doesn't * have a notion of HUP in just one direction, and for a @@ -589,7 +600,7 @@ __poll_t tcp_poll_mask(struct socket *sock, __poll_t events) return mask; } -EXPORT_SYMBOL(tcp_poll_mask); +EXPORT_SYMBOL(tcp_poll); int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg) { diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 355d3dffd021..8e5522c6833a 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -265,7 +265,7 @@ static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb) * it is probably a retransmit. */ if (tp->ecn_flags & TCP_ECN_SEEN) - tcp_enter_quickack_mode(sk, 1); + tcp_enter_quickack_mode(sk, 2); break; case INET_ECN_CE: if (tcp_ca_needs_ecn(sk)) @@ -273,7 +273,7 @@ static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb) if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) { /* Better not delay acks, sender can have a very low cwnd */ - tcp_enter_quickack_mode(sk, 1); + tcp_enter_quickack_mode(sk, 2); tp->ecn_flags |= TCP_ECN_DEMAND_CWR; } tp->ecn_flags |= TCP_ECN_SEEN; @@ -3181,6 +3181,15 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack, if (tcp_is_reno(tp)) { tcp_remove_reno_sacks(sk, pkts_acked); + + /* If any of the cumulatively ACKed segments was + * retransmitted, non-SACK case cannot confirm that + * progress was due to original transmission due to + * lack of TCPCB_SACKED_ACKED bits even if some of + * the packets may have been never retransmitted. + */ + if (flag & FLAG_RETRANS_DATA_ACKED) + flag &= ~FLAG_ORIG_SACK_ACKED; } else { int delta; diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 9bb27df4dac5..24e116ddae79 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -2591,7 +2591,7 @@ int compat_udp_getsockopt(struct sock *sk, int level, int optname, * udp_poll - wait for a UDP event. * @file - file struct * @sock - socket - * @events - events to wait for + * @wait - poll table * * This is same as datagram poll, except for the special case of * blocking sockets. If application is using a blocking fd @@ -2600,23 +2600,23 @@ int compat_udp_getsockopt(struct sock *sk, int level, int optname, * but then block when reading it. Add special case code * to work around these arguably broken applications. */ -__poll_t udp_poll_mask(struct socket *sock, __poll_t events) +__poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait) { - __poll_t mask = datagram_poll_mask(sock, events); + __poll_t mask = datagram_poll(file, sock, wait); struct sock *sk = sock->sk; if (!skb_queue_empty(&udp_sk(sk)->reader_queue)) mask |= EPOLLIN | EPOLLRDNORM; /* Check for false positives due to checksum errors */ - if ((mask & EPOLLRDNORM) && !(sock->file->f_flags & O_NONBLOCK) && + if ((mask & EPOLLRDNORM) && !(file->f_flags & O_NONBLOCK) && !(sk->sk_shutdown & RCV_SHUTDOWN) && first_packet_length(sk) == -1) mask &= ~(EPOLLIN | EPOLLRDNORM); return mask; } -EXPORT_SYMBOL(udp_poll_mask); +EXPORT_SYMBOL(udp_poll); int udp_abort(struct sock *sk, int err) { diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index 92dc9e5a7ff3..69c54540d5b4 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c @@ -394,7 +394,7 @@ struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb, out_unlock: rcu_read_unlock(); out: - NAPI_GRO_CB(skb)->flush |= flush; + skb_gro_flush_final(skb, pp, flush); return pp; } EXPORT_SYMBOL(udp_gro_receive); diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index c134286d6a41..91580c62bb86 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -4528,6 +4528,7 @@ static int modify_prefix_route(struct inet6_ifaddr *ifp, unsigned long expires, u32 flags) { struct fib6_info *f6i; + u32 prio; f6i = addrconf_get_prefix_route(&ifp->addr, ifp->prefix_len, @@ -4536,13 +4537,15 @@ static int modify_prefix_route(struct inet6_ifaddr *ifp, if (!f6i) return -ENOENT; - if (f6i->fib6_metric != ifp->rt_priority) { + prio = ifp->rt_priority ? : IP6_RT_PRIO_ADDRCONF; + if (f6i->fib6_metric != prio) { + /* delete old one */ + ip6_del_rt(dev_net(ifp->idev->dev), f6i); + /* add new one */ addrconf_prefix_route(&ifp->addr, ifp->prefix_len, ifp->rt_priority, ifp->idev->dev, expires, flags, GFP_KERNEL); - /* delete old one */ - ip6_del_rt(dev_net(ifp->idev->dev), f6i); } else { if (!expires) fib6_clean_expires(f6i); diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 74f2a261e8df..9ed0eae91758 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -570,7 +570,7 @@ const struct proto_ops inet6_stream_ops = { .socketpair = sock_no_socketpair, /* a do nothing */ .accept = inet_accept, /* ok */ .getname = inet6_getname, - .poll_mask = tcp_poll_mask, /* ok */ + .poll = tcp_poll, /* ok */ .ioctl = inet6_ioctl, /* must change */ .listen = inet_listen, /* ok */ .shutdown = inet_shutdown, /* ok */ @@ -603,7 +603,7 @@ const struct proto_ops inet6_dgram_ops = { .socketpair = sock_no_socketpair, /* a do nothing */ .accept = sock_no_accept, /* a do nothing */ .getname = inet6_getname, - .poll_mask = udp_poll_mask, /* ok */ + .poll = udp_poll, /* ok */ .ioctl = inet6_ioctl, /* must change */ .listen = sock_no_listen, /* ok */ .shutdown = inet_shutdown, /* ok */ diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c index 2febe26de6a1..595ad408dba0 100644 --- a/net/ipv6/inet6_hashtables.c +++ b/net/ipv6/inet6_hashtables.c @@ -113,9 +113,9 @@ static inline int compute_score(struct sock *sk, struct net *net, bool dev_match = (sk->sk_bound_dev_if == dif || sk->sk_bound_dev_if == sdif); - if (exact_dif && !dev_match) + if (!dev_match) return -1; - if (sk->sk_bound_dev_if && dev_match) + if (sk->sk_bound_dev_if) score++; } if (sk->sk_incoming_cpu == raw_smp_processor_id()) diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 39d1d487eca2..1fb2f3118d60 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -167,8 +167,9 @@ struct fib6_info *fib6_info_alloc(gfp_t gfp_flags) return f6i; } -void fib6_info_destroy(struct fib6_info *f6i) +void fib6_info_destroy_rcu(struct rcu_head *head) { + struct fib6_info *f6i = container_of(head, struct fib6_info, rcu); struct rt6_exception_bucket *bucket; struct dst_metrics *m; @@ -206,7 +207,7 @@ void fib6_info_destroy(struct fib6_info *f6i) kfree(f6i); } -EXPORT_SYMBOL_GPL(fib6_info_destroy); +EXPORT_SYMBOL_GPL(fib6_info_destroy_rcu); static struct fib6_node *node_alloc(struct net *net) { diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 021e5aef6ba3..a14fb4fcdf18 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -1219,7 +1219,8 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork, if (mtu < IPV6_MIN_MTU) return -EINVAL; cork->base.fragsize = mtu; - cork->base.gso_size = sk->sk_type == SOCK_DGRAM ? ipc6->gso_size : 0; + cork->base.gso_size = sk->sk_type == SOCK_DGRAM && + sk->sk_protocol == IPPROTO_UDP ? ipc6->gso_size : 0; if (dst_allfrag(xfrm_dst_path(&rt->dst))) cork->base.flags |= IPCORK_ALLFRAG; diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 975021df7c1c..c0c74088f2af 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -2082,7 +2082,8 @@ void ipv6_mc_dad_complete(struct inet6_dev *idev) mld_send_initial_cr(idev); idev->mc_dad_count--; if (idev->mc_dad_count) - mld_dad_start_timer(idev, idev->mc_maxdelay); + mld_dad_start_timer(idev, + unsolicited_report_interval(idev)); } } @@ -2094,7 +2095,8 @@ static void mld_dad_timer_expire(struct timer_list *t) if (idev->mc_dad_count) { idev->mc_dad_count--; if (idev->mc_dad_count) - mld_dad_start_timer(idev, idev->mc_maxdelay); + mld_dad_start_timer(idev, + unsolicited_report_interval(idev)); } in6_dev_put(idev); } @@ -2452,7 +2454,8 @@ static void mld_ifc_timer_expire(struct timer_list *t) if (idev->mc_ifc_count) { idev->mc_ifc_count--; if (idev->mc_ifc_count) - mld_ifc_start_timer(idev, idev->mc_maxdelay); + mld_ifc_start_timer(idev, + unsolicited_report_interval(idev)); } in6_dev_put(idev); } diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index 5e0332014c17..a452d99c9f52 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c @@ -107,7 +107,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net) if (hdr == NULL) goto err_reg; - net->nf_frag.sysctl.frags_hdr = hdr; + net->nf_frag_frags_hdr = hdr; return 0; err_reg: @@ -121,8 +121,8 @@ static void __net_exit nf_ct_frags6_sysctl_unregister(struct net *net) { struct ctl_table *table; - table = net->nf_frag.sysctl.frags_hdr->ctl_table_arg; - unregister_net_sysctl_table(net->nf_frag.sysctl.frags_hdr); + table = net->nf_frag_frags_hdr->ctl_table_arg; + unregister_net_sysctl_table(net->nf_frag_frags_hdr); if (!net_eq(net, &init_net)) kfree(table); } diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index ce6f0d15b5dd..afc307c89d1a 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -1334,7 +1334,7 @@ void raw6_proc_exit(void) } #endif /* CONFIG_PROC_FS */ -/* Same as inet6_dgram_ops, sans udp_poll_mask. */ +/* Same as inet6_dgram_ops, sans udp_poll. */ const struct proto_ops inet6_sockraw_ops = { .family = PF_INET6, .owner = THIS_MODULE, @@ -1344,7 +1344,7 @@ const struct proto_ops inet6_sockraw_ops = { .socketpair = sock_no_socketpair, /* a do nothing */ .accept = sock_no_accept, /* a do nothing */ .getname = inet6_getname, - .poll_mask = datagram_poll_mask, /* ok */ + .poll = datagram_poll, /* ok */ .ioctl = inet6_ioctl, /* must change */ .listen = sock_no_listen, /* ok */ .shutdown = inet_shutdown, /* ok */ diff --git a/net/ipv6/seg6_hmac.c b/net/ipv6/seg6_hmac.c index 33fb35cbfac1..558fe8cc6d43 100644 --- a/net/ipv6/seg6_hmac.c +++ b/net/ipv6/seg6_hmac.c @@ -373,7 +373,7 @@ static int seg6_hmac_init_algo(void) return -ENOMEM; for_each_possible_cpu(cpu) { - tfm = crypto_alloc_shash(algo->name, 0, GFP_KERNEL); + tfm = crypto_alloc_shash(algo->name, 0, 0); if (IS_ERR(tfm)) return PTR_ERR(tfm); p_tfm = per_cpu_ptr(algo->tfms, cpu); diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index 68e86257a549..893a022f9620 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -1488,11 +1488,14 @@ static inline __poll_t iucv_accept_poll(struct sock *parent) return 0; } -static __poll_t iucv_sock_poll_mask(struct socket *sock, __poll_t events) +__poll_t iucv_sock_poll(struct file *file, struct socket *sock, + poll_table *wait) { struct sock *sk = sock->sk; __poll_t mask = 0; + sock_poll_wait(file, sk_sleep(sk), wait); + if (sk->sk_state == IUCV_LISTEN) return iucv_accept_poll(sk); @@ -2385,7 +2388,7 @@ static const struct proto_ops iucv_sock_ops = { .getname = iucv_sock_getname, .sendmsg = iucv_sock_sendmsg, .recvmsg = iucv_sock_recvmsg, - .poll_mask = iucv_sock_poll_mask, + .poll = iucv_sock_poll, .ioctl = sock_no_ioctl, .mmap = sock_no_mmap, .socketpair = sock_no_socketpair, diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c index 84b7d5c6fec8..d3601d421571 100644 --- a/net/kcm/kcmsock.c +++ b/net/kcm/kcmsock.c @@ -1336,9 +1336,9 @@ static void init_kcm_sock(struct kcm_sock *kcm, struct kcm_mux *mux) struct list_head *head; int index = 0; - /* For SOCK_SEQPACKET sock type, datagram_poll_mask checks the sk_state, - * so we set sk_state, otherwise epoll_wait always returns right away - * with EPOLLHUP + /* For SOCK_SEQPACKET sock type, datagram_poll checks the sk_state, so + * we set sk_state, otherwise epoll_wait always returns right away with + * EPOLLHUP */ kcm->sk.sk_state = TCP_ESTABLISHED; @@ -1903,7 +1903,7 @@ static const struct proto_ops kcm_dgram_ops = { .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = sock_no_getname, - .poll_mask = datagram_poll_mask, + .poll = datagram_poll, .ioctl = kcm_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, @@ -1924,7 +1924,7 @@ static const struct proto_ops kcm_seqpacket_ops = { .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = sock_no_getname, - .poll_mask = datagram_poll_mask, + .poll = datagram_poll, .ioctl = kcm_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, diff --git a/net/key/af_key.c b/net/key/af_key.c index 8bdc1cbe490a..5e1d2946ffbf 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -3751,7 +3751,7 @@ static const struct proto_ops pfkey_ops = { /* Now the operations that really occur. */ .release = pfkey_release, - .poll_mask = datagram_poll_mask, + .poll = datagram_poll, .sendmsg = pfkey_sendmsg, .recvmsg = pfkey_recvmsg, }; diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index 181073bf6925..a9c05b2bc1b0 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c @@ -613,7 +613,7 @@ static const struct proto_ops l2tp_ip_ops = { .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = l2tp_ip_getname, - .poll_mask = datagram_poll_mask, + .poll = datagram_poll, .ioctl = inet_ioctl, .listen = sock_no_listen, .shutdown = inet_shutdown, diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index 336e4c00abbc..957369192ca1 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -754,7 +754,7 @@ static const struct proto_ops l2tp_ip6_ops = { .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = l2tp_ip6_getname, - .poll_mask = datagram_poll_mask, + .poll = datagram_poll, .ioctl = inet6_ioctl, .listen = sock_no_listen, .shutdown = inet_shutdown, diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index 55188382845c..e398797878a9 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -1818,7 +1818,7 @@ static const struct proto_ops pppol2tp_ops = { .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = pppol2tp_getname, - .poll_mask = datagram_poll_mask, + .poll = datagram_poll, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .setsockopt = pppol2tp_setsockopt, diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index 804de8490186..1beeea9549fa 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -1192,7 +1192,7 @@ static const struct proto_ops llc_ui_ops = { .socketpair = sock_no_socketpair, .accept = llc_ui_accept, .getname = llc_ui_getname, - .poll_mask = datagram_poll_mask, + .poll = datagram_poll, .ioctl = llc_ui_ioctl, .listen = llc_ui_listen, .shutdown = llc_ui_shutdown, diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 44b5dfe8727d..fa1f1e63a264 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -4845,7 +4845,9 @@ int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev, skb_reset_network_header(skb); skb_reset_mac_header(skb); + local_bh_disable(); __ieee80211_subif_start_xmit(skb, skb->dev, flags); + local_bh_enable(); return 0; } diff --git a/net/ncsi/ncsi-aen.c b/net/ncsi/ncsi-aen.c index e7b05de1e6d1..25e483e8278b 100644 --- a/net/ncsi/ncsi-aen.c +++ b/net/ncsi/ncsi-aen.c @@ -73,8 +73,8 @@ static int ncsi_aen_handler_lsc(struct ncsi_dev_priv *ndp, ncm->data[2] = data; ncm->data[4] = ntohl(lsc->oem_status); - netdev_info(ndp->ndev.dev, "NCSI: LSC AEN - channel %u state %s\n", - nc->id, data & 0x1 ? "up" : "down"); + netdev_dbg(ndp->ndev.dev, "NCSI: LSC AEN - channel %u state %s\n", + nc->id, data & 0x1 ? "up" : "down"); chained = !list_empty(&nc->link); state = nc->state; @@ -148,9 +148,9 @@ static int ncsi_aen_handler_hncdsc(struct ncsi_dev_priv *ndp, hncdsc = (struct ncsi_aen_hncdsc_pkt *)h; ncm->data[3] = ntohl(hncdsc->status); spin_unlock_irqrestore(&nc->lock, flags); - netdev_printk(KERN_DEBUG, ndp->ndev.dev, - "NCSI: host driver %srunning on channel %u\n", - ncm->data[3] & 0x1 ? "" : "not ", nc->id); + netdev_dbg(ndp->ndev.dev, + "NCSI: host driver %srunning on channel %u\n", + ncm->data[3] & 0x1 ? "" : "not ", nc->id); return 0; } diff --git a/net/ncsi/ncsi-manage.c b/net/ncsi/ncsi-manage.c index 5561e221b71f..091284760d21 100644 --- a/net/ncsi/ncsi-manage.c +++ b/net/ncsi/ncsi-manage.c @@ -788,8 +788,8 @@ static void ncsi_configure_channel(struct ncsi_dev_priv *ndp) } break; case ncsi_dev_state_config_done: - netdev_printk(KERN_DEBUG, ndp->ndev.dev, - "NCSI: channel %u config done\n", nc->id); + netdev_dbg(ndp->ndev.dev, "NCSI: channel %u config done\n", + nc->id); spin_lock_irqsave(&nc->lock, flags); if (nc->reconfigure_needed) { /* This channel's configuration has been updated @@ -804,8 +804,7 @@ static void ncsi_configure_channel(struct ncsi_dev_priv *ndp) list_add_tail_rcu(&nc->link, &ndp->channel_queue); spin_unlock_irqrestore(&ndp->lock, flags); - netdev_printk(KERN_DEBUG, dev, - "Dirty NCSI channel state reset\n"); + netdev_dbg(dev, "Dirty NCSI channel state reset\n"); ncsi_process_next_channel(ndp); break; } @@ -816,9 +815,9 @@ static void ncsi_configure_channel(struct ncsi_dev_priv *ndp) } else { hot_nc = NULL; nc->state = NCSI_CHANNEL_INACTIVE; - netdev_warn(ndp->ndev.dev, - "NCSI: channel %u link down after config\n", - nc->id); + netdev_dbg(ndp->ndev.dev, + "NCSI: channel %u link down after config\n", + nc->id); } spin_unlock_irqrestore(&nc->lock, flags); @@ -908,9 +907,9 @@ static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp) } ncm = &found->modes[NCSI_MODE_LINK]; - netdev_printk(KERN_DEBUG, ndp->ndev.dev, - "NCSI: Channel %u added to queue (link %s)\n", - found->id, ncm->data[2] & 0x1 ? "up" : "down"); + netdev_dbg(ndp->ndev.dev, + "NCSI: Channel %u added to queue (link %s)\n", + found->id, ncm->data[2] & 0x1 ? "up" : "down"); out: spin_lock_irqsave(&ndp->lock, flags); @@ -1199,14 +1198,14 @@ int ncsi_process_next_channel(struct ncsi_dev_priv *ndp) switch (old_state) { case NCSI_CHANNEL_INACTIVE: ndp->ndev.state = ncsi_dev_state_config; - netdev_info(ndp->ndev.dev, "NCSI: configuring channel %u\n", - nc->id); + netdev_dbg(ndp->ndev.dev, "NCSI: configuring channel %u\n", + nc->id); ncsi_configure_channel(ndp); break; case NCSI_CHANNEL_ACTIVE: ndp->ndev.state = ncsi_dev_state_suspend; - netdev_info(ndp->ndev.dev, "NCSI: suspending channel %u\n", - nc->id); + netdev_dbg(ndp->ndev.dev, "NCSI: suspending channel %u\n", + nc->id); ncsi_suspend_channel(ndp); break; default: @@ -1226,8 +1225,6 @@ int ncsi_process_next_channel(struct ncsi_dev_priv *ndp) return ncsi_choose_active_channel(ndp); } - netdev_printk(KERN_DEBUG, ndp->ndev.dev, - "NCSI: No more channels to process\n"); ncsi_report_link(ndp, false); return -ENODEV; } @@ -1318,9 +1315,9 @@ static int ncsi_kick_channels(struct ncsi_dev_priv *ndp) if ((ndp->ndev.state & 0xff00) == ncsi_dev_state_config || !list_empty(&nc->link)) { - netdev_printk(KERN_DEBUG, nd->dev, - "NCSI: channel %p marked dirty\n", - nc); + netdev_dbg(nd->dev, + "NCSI: channel %p marked dirty\n", + nc); nc->reconfigure_needed = true; } spin_unlock_irqrestore(&nc->lock, flags); @@ -1338,8 +1335,7 @@ static int ncsi_kick_channels(struct ncsi_dev_priv *ndp) list_add_tail_rcu(&nc->link, &ndp->channel_queue); spin_unlock_irqrestore(&ndp->lock, flags); - netdev_printk(KERN_DEBUG, nd->dev, - "NCSI: kicked channel %p\n", nc); + netdev_dbg(nd->dev, "NCSI: kicked channel %p\n", nc); n++; } } @@ -1370,8 +1366,8 @@ int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) { n_vids++; if (vlan->vid == vid) { - netdev_printk(KERN_DEBUG, dev, - "NCSI: vid %u already registered\n", vid); + netdev_dbg(dev, "NCSI: vid %u already registered\n", + vid); return 0; } } @@ -1390,7 +1386,7 @@ int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) vlan->vid = vid; list_add_rcu(&vlan->list, &ndp->vlan_vids); - netdev_printk(KERN_DEBUG, dev, "NCSI: Added new vid %u\n", vid); + netdev_dbg(dev, "NCSI: Added new vid %u\n", vid); found = ncsi_kick_channels(ndp) != 0; @@ -1419,8 +1415,7 @@ int ncsi_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) /* Remove the VLAN id from our internal list */ list_for_each_entry_safe(vlan, tmp, &ndp->vlan_vids, list) if (vlan->vid == vid) { - netdev_printk(KERN_DEBUG, dev, - "NCSI: vid %u found, removing\n", vid); + netdev_dbg(dev, "NCSI: vid %u found, removing\n", vid); list_del_rcu(&vlan->list); found = true; kfree(vlan); @@ -1547,7 +1542,7 @@ void ncsi_stop_dev(struct ncsi_dev *nd) } } - netdev_printk(KERN_DEBUG, ndp->ndev.dev, "NCSI: Stopping device\n"); + netdev_dbg(ndp->ndev.dev, "NCSI: Stopping device\n"); ncsi_report_link(ndp, true); } EXPORT_SYMBOL_GPL(ncsi_stop_dev); diff --git a/net/netfilter/nf_conncount.c b/net/netfilter/nf_conncount.c index d8383609fe28..510039862aa9 100644 --- a/net/netfilter/nf_conncount.c +++ b/net/netfilter/nf_conncount.c @@ -47,6 +47,8 @@ struct nf_conncount_tuple { struct hlist_node node; struct nf_conntrack_tuple tuple; struct nf_conntrack_zone zone; + int cpu; + u32 jiffies32; }; struct nf_conncount_rb { @@ -91,11 +93,42 @@ bool nf_conncount_add(struct hlist_head *head, return false; conn->tuple = *tuple; conn->zone = *zone; + conn->cpu = raw_smp_processor_id(); + conn->jiffies32 = (u32)jiffies; hlist_add_head(&conn->node, head); return true; } EXPORT_SYMBOL_GPL(nf_conncount_add); +static const struct nf_conntrack_tuple_hash * +find_or_evict(struct net *net, struct nf_conncount_tuple *conn) +{ + const struct nf_conntrack_tuple_hash *found; + unsigned long a, b; + int cpu = raw_smp_processor_id(); + __s32 age; + + found = nf_conntrack_find_get(net, &conn->zone, &conn->tuple); + if (found) + return found; + b = conn->jiffies32; + a = (u32)jiffies; + + /* conn might have been added just before by another cpu and + * might still be unconfirmed. In this case, nf_conntrack_find() + * returns no result. Thus only evict if this cpu added the + * stale entry or if the entry is older than two jiffies. + */ + age = a - b; + if (conn->cpu == cpu || age >= 2) { + hlist_del(&conn->node); + kmem_cache_free(conncount_conn_cachep, conn); + return ERR_PTR(-ENOENT); + } + + return ERR_PTR(-EAGAIN); +} + unsigned int nf_conncount_lookup(struct net *net, struct hlist_head *head, const struct nf_conntrack_tuple *tuple, const struct nf_conntrack_zone *zone, @@ -103,18 +136,27 @@ unsigned int nf_conncount_lookup(struct net *net, struct hlist_head *head, { const struct nf_conntrack_tuple_hash *found; struct nf_conncount_tuple *conn; - struct hlist_node *n; struct nf_conn *found_ct; + struct hlist_node *n; unsigned int length = 0; *addit = tuple ? true : false; /* check the saved connections */ hlist_for_each_entry_safe(conn, n, head, node) { - found = nf_conntrack_find_get(net, &conn->zone, &conn->tuple); - if (found == NULL) { - hlist_del(&conn->node); - kmem_cache_free(conncount_conn_cachep, conn); + found = find_or_evict(net, conn); + if (IS_ERR(found)) { + /* Not found, but might be about to be confirmed */ + if (PTR_ERR(found) == -EAGAIN) { + length++; + if (!tuple) + continue; + + if (nf_ct_tuple_equal(&conn->tuple, tuple) && + nf_ct_zone_id(&conn->zone, conn->zone.dir) == + nf_ct_zone_id(zone, zone->dir)) + *addit = false; + } continue; } diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c index 551a1eddf0fa..a75b11c39312 100644 --- a/net/netfilter/nf_conntrack_helper.c +++ b/net/netfilter/nf_conntrack_helper.c @@ -465,6 +465,11 @@ void nf_conntrack_helper_unregister(struct nf_conntrack_helper *me) nf_ct_expect_iterate_destroy(expect_iter_me, NULL); nf_ct_iterate_destroy(unhelp, me); + + /* Maybe someone has gotten the helper already when unhelp above. + * So need to wait it. + */ + synchronize_rcu(); } EXPORT_SYMBOL_GPL(nf_conntrack_helper_unregister); diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c index 426457047578..a61d6df6e5f6 100644 --- a/net/netfilter/nf_log.c +++ b/net/netfilter/nf_log.c @@ -424,6 +424,10 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write, if (write) { struct ctl_table tmp = *table; + /* proc_dostring() can append to existing strings, so we need to + * initialize it as an empty string. + */ + buf[0] = '\0'; tmp.data = buf; r = proc_dostring(&tmp, write, buffer, lenp, ppos); if (r) @@ -442,14 +446,17 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write, rcu_assign_pointer(net->nf.nf_loggers[tindex], logger); mutex_unlock(&nf_log_mutex); } else { + struct ctl_table tmp = *table; + + tmp.data = buf; mutex_lock(&nf_log_mutex); logger = nft_log_dereference(net->nf.nf_loggers[tindex]); if (!logger) - table->data = "NONE"; + strlcpy(buf, "NONE", sizeof(buf)); else - table->data = logger->name; - r = proc_dostring(table, write, buffer, lenp, ppos); + strlcpy(buf, logger->name, sizeof(buf)); mutex_unlock(&nf_log_mutex); + r = proc_dostring(&tmp, write, buffer, lenp, ppos); } return r; diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index 4ccd2988f9db..ea4ba551abb2 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c @@ -1243,6 +1243,9 @@ static int nfqnl_recv_unsupp(struct net *net, struct sock *ctnl, static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = { [NFQA_CFG_CMD] = { .len = sizeof(struct nfqnl_msg_config_cmd) }, [NFQA_CFG_PARAMS] = { .len = sizeof(struct nfqnl_msg_config_params) }, + [NFQA_CFG_QUEUE_MAXLEN] = { .type = NLA_U32 }, + [NFQA_CFG_MASK] = { .type = NLA_U32 }, + [NFQA_CFG_FLAGS] = { .type = NLA_U32 }, }; static const struct nf_queue_handler nfqh = { diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 1189b84413d5..393573a99a5a 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -2658,7 +2658,7 @@ static const struct proto_ops netlink_ops = { .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = netlink_getname, - .poll_mask = datagram_poll_mask, + .poll = datagram_poll, .ioctl = netlink_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index 93fbcafbf388..03f37c4e64fe 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c @@ -1355,7 +1355,7 @@ static const struct proto_ops nr_proto_ops = { .socketpair = sock_no_socketpair, .accept = nr_accept, .getname = nr_getname, - .poll_mask = datagram_poll_mask, + .poll = datagram_poll, .ioctl = nr_ioctl, .listen = nr_listen, .shutdown = sock_no_shutdown, diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c index ab5bb14b49af..ea0c0c6f1874 100644 --- a/net/nfc/llcp_sock.c +++ b/net/nfc/llcp_sock.c @@ -548,13 +548,16 @@ static inline __poll_t llcp_accept_poll(struct sock *parent) return 0; } -static __poll_t llcp_sock_poll_mask(struct socket *sock, __poll_t events) +static __poll_t llcp_sock_poll(struct file *file, struct socket *sock, + poll_table *wait) { struct sock *sk = sock->sk; __poll_t mask = 0; pr_debug("%p\n", sk); + sock_poll_wait(file, sk_sleep(sk), wait); + if (sk->sk_state == LLCP_LISTEN) return llcp_accept_poll(sk); @@ -896,7 +899,7 @@ static const struct proto_ops llcp_sock_ops = { .socketpair = sock_no_socketpair, .accept = llcp_sock_accept, .getname = llcp_sock_getname, - .poll_mask = llcp_sock_poll_mask, + .poll = llcp_sock_poll, .ioctl = sock_no_ioctl, .listen = llcp_sock_listen, .shutdown = sock_no_shutdown, @@ -916,7 +919,7 @@ static const struct proto_ops llcp_rawsock_ops = { .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = llcp_sock_getname, - .poll_mask = llcp_sock_poll_mask, + .poll = llcp_sock_poll, .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c index 60c322531c49..e2188deb08dc 100644 --- a/net/nfc/rawsock.c +++ b/net/nfc/rawsock.c @@ -284,7 +284,7 @@ static const struct proto_ops rawsock_ops = { .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = sock_no_getname, - .poll_mask = datagram_poll_mask, + .poll = datagram_poll, .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, @@ -304,7 +304,7 @@ static const struct proto_ops rawsock_raw_ops = { .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = sock_no_getname, - .poll_mask = datagram_poll_mask, + .poll = datagram_poll, .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 50809748c127..57634bc3da74 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -2262,6 +2262,13 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, if (po->stats.stats1.tp_drops) status |= TP_STATUS_LOSING; } + + if (do_vnet && + virtio_net_hdr_from_skb(skb, h.raw + macoff - + sizeof(struct virtio_net_hdr), + vio_le(), true, 0)) + goto drop_n_account; + po->stats.stats1.tp_packets++; if (copy_skb) { status |= TP_STATUS_COPY; @@ -2269,15 +2276,6 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, } spin_unlock(&sk->sk_receive_queue.lock); - if (do_vnet) { - if (virtio_net_hdr_from_skb(skb, h.raw + macoff - - sizeof(struct virtio_net_hdr), - vio_le(), true, 0)) { - spin_lock(&sk->sk_receive_queue.lock); - goto drop_n_account; - } - } - skb_copy_bits(skb, 0, h.raw + macoff, snaplen); if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp))) @@ -4078,11 +4076,12 @@ static int packet_ioctl(struct socket *sock, unsigned int cmd, return 0; } -static __poll_t packet_poll_mask(struct socket *sock, __poll_t events) +static __poll_t packet_poll(struct file *file, struct socket *sock, + poll_table *wait) { struct sock *sk = sock->sk; struct packet_sock *po = pkt_sk(sk); - __poll_t mask = datagram_poll_mask(sock, events); + __poll_t mask = datagram_poll(file, sock, wait); spin_lock_bh(&sk->sk_receive_queue.lock); if (po->rx_ring.pg_vec) { @@ -4424,7 +4423,7 @@ static const struct proto_ops packet_ops_spkt = { .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = packet_getname_spkt, - .poll_mask = datagram_poll_mask, + .poll = datagram_poll, .ioctl = packet_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, @@ -4445,7 +4444,7 @@ static const struct proto_ops packet_ops = { .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = packet_getname, - .poll_mask = packet_poll_mask, + .poll = packet_poll, .ioctl = packet_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, diff --git a/net/phonet/socket.c b/net/phonet/socket.c index c295c4e20f01..30187990257f 100644 --- a/net/phonet/socket.c +++ b/net/phonet/socket.c @@ -340,12 +340,15 @@ static int pn_socket_getname(struct socket *sock, struct sockaddr *addr, return sizeof(struct sockaddr_pn); } -static __poll_t pn_socket_poll_mask(struct socket *sock, __poll_t events) +static __poll_t pn_socket_poll(struct file *file, struct socket *sock, + poll_table *wait) { struct sock *sk = sock->sk; struct pep_sock *pn = pep_sk(sk); __poll_t mask = 0; + poll_wait(file, sk_sleep(sk), wait); + if (sk->sk_state == TCP_CLOSE) return EPOLLERR; if (!skb_queue_empty(&sk->sk_receive_queue)) @@ -445,7 +448,7 @@ const struct proto_ops phonet_dgram_ops = { .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = pn_socket_getname, - .poll_mask = datagram_poll_mask, + .poll = datagram_poll, .ioctl = pn_socket_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, @@ -470,7 +473,7 @@ const struct proto_ops phonet_stream_ops = { .socketpair = sock_no_socketpair, .accept = pn_socket_accept, .getname = pn_socket_getname, - .poll_mask = pn_socket_poll_mask, + .poll = pn_socket_poll, .ioctl = pn_socket_ioctl, .listen = pn_socket_listen, .shutdown = sock_no_shutdown, diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c index 1b5025ea5b04..2aa07b547b16 100644 --- a/net/qrtr/qrtr.c +++ b/net/qrtr/qrtr.c @@ -1023,7 +1023,7 @@ static const struct proto_ops qrtr_proto_ops = { .recvmsg = qrtr_recvmsg, .getname = qrtr_getname, .ioctl = qrtr_ioctl, - .poll_mask = datagram_poll_mask, + .poll = datagram_poll, .shutdown = sock_no_shutdown, .setsockopt = sock_no_setsockopt, .getsockopt = sock_no_getsockopt, diff --git a/net/rds/connection.c b/net/rds/connection.c index abef75da89a7..cfb05953b0e5 100644 --- a/net/rds/connection.c +++ b/net/rds/connection.c @@ -659,11 +659,19 @@ static void rds_conn_info(struct socket *sock, unsigned int len, int rds_conn_init(void) { + int ret; + + ret = rds_loop_net_init(); /* register pernet callback */ + if (ret) + return ret; + rds_conn_slab = kmem_cache_create("rds_connection", sizeof(struct rds_connection), 0, 0, NULL); - if (!rds_conn_slab) + if (!rds_conn_slab) { + rds_loop_net_exit(); return -ENOMEM; + } rds_info_register_func(RDS_INFO_CONNECTIONS, rds_conn_info); rds_info_register_func(RDS_INFO_SEND_MESSAGES, @@ -676,6 +684,7 @@ int rds_conn_init(void) void rds_conn_exit(void) { + rds_loop_net_exit(); /* unregister pernet callback */ rds_loop_exit(); WARN_ON(!hlist_empty(rds_conn_hash)); diff --git a/net/rds/loop.c b/net/rds/loop.c index dac6218a460e..feea1f96ee2a 100644 --- a/net/rds/loop.c +++ b/net/rds/loop.c @@ -33,6 +33,8 @@ #include #include #include +#include +#include #include "rds_single_path.h" #include "rds.h" @@ -40,6 +42,17 @@ static DEFINE_SPINLOCK(loop_conns_lock); static LIST_HEAD(loop_conns); +static atomic_t rds_loop_unloading = ATOMIC_INIT(0); + +static void rds_loop_set_unloading(void) +{ + atomic_set(&rds_loop_unloading, 1); +} + +static bool rds_loop_is_unloading(struct rds_connection *conn) +{ + return atomic_read(&rds_loop_unloading) != 0; +} /* * This 'loopback' transport is a special case for flows that originate @@ -165,6 +178,8 @@ void rds_loop_exit(void) struct rds_loop_connection *lc, *_lc; LIST_HEAD(tmp_list); + rds_loop_set_unloading(); + synchronize_rcu(); /* avoid calling conn_destroy with irqs off */ spin_lock_irq(&loop_conns_lock); list_splice(&loop_conns, &tmp_list); @@ -177,6 +192,46 @@ void rds_loop_exit(void) } } +static void rds_loop_kill_conns(struct net *net) +{ + struct rds_loop_connection *lc, *_lc; + LIST_HEAD(tmp_list); + + spin_lock_irq(&loop_conns_lock); + list_for_each_entry_safe(lc, _lc, &loop_conns, loop_node) { + struct net *c_net = read_pnet(&lc->conn->c_net); + + if (net != c_net) + continue; + list_move_tail(&lc->loop_node, &tmp_list); + } + spin_unlock_irq(&loop_conns_lock); + + list_for_each_entry_safe(lc, _lc, &tmp_list, loop_node) { + WARN_ON(lc->conn->c_passive); + rds_conn_destroy(lc->conn); + } +} + +static void __net_exit rds_loop_exit_net(struct net *net) +{ + rds_loop_kill_conns(net); +} + +static struct pernet_operations rds_loop_net_ops = { + .exit = rds_loop_exit_net, +}; + +int rds_loop_net_init(void) +{ + return register_pernet_device(&rds_loop_net_ops); +} + +void rds_loop_net_exit(void) +{ + unregister_pernet_device(&rds_loop_net_ops); +} + /* * This is missing .xmit_* because loop doesn't go through generic * rds_send_xmit() and doesn't call rds_recv_incoming(). .listen_stop and @@ -194,4 +249,5 @@ struct rds_transport rds_loop_transport = { .inc_free = rds_loop_inc_free, .t_name = "loopback", .t_type = RDS_TRANS_LOOP, + .t_unloading = rds_loop_is_unloading, }; diff --git a/net/rds/loop.h b/net/rds/loop.h index 469fa4b2da4f..bbc8cdd030df 100644 --- a/net/rds/loop.h +++ b/net/rds/loop.h @@ -5,6 +5,8 @@ /* loop.c */ extern struct rds_transport rds_loop_transport; +int rds_loop_net_init(void); +void rds_loop_net_exit(void); void rds_loop_exit(void); #endif diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index ebe42e7eb456..d00a0ef39a56 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c @@ -1470,7 +1470,7 @@ static const struct proto_ops rose_proto_ops = { .socketpair = sock_no_socketpair, .accept = rose_accept, .getname = rose_getname, - .poll_mask = datagram_poll_mask, + .poll = datagram_poll, .ioctl = rose_ioctl, .listen = rose_listen, .shutdown = sock_no_shutdown, diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index 3b1ac93efee2..2b463047dd7b 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c @@ -734,11 +734,15 @@ static int rxrpc_getsockopt(struct socket *sock, int level, int optname, /* * permit an RxRPC socket to be polled */ -static __poll_t rxrpc_poll_mask(struct socket *sock, __poll_t events) +static __poll_t rxrpc_poll(struct file *file, struct socket *sock, + poll_table *wait) { struct sock *sk = sock->sk; struct rxrpc_sock *rx = rxrpc_sk(sk); - __poll_t mask = 0; + __poll_t mask; + + sock_poll_wait(file, sk_sleep(sk), wait); + mask = 0; /* the socket is readable if there are any messages waiting on the Rx * queue */ @@ -945,7 +949,7 @@ static const struct proto_ops rxrpc_rpc_ops = { .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = sock_no_getname, - .poll_mask = rxrpc_poll_mask, + .poll = rxrpc_poll, .ioctl = sock_no_ioctl, .listen = rxrpc_listen, .shutdown = rxrpc_shutdown, diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c index 8527cfdc446d..20d7d36b2fc9 100644 --- a/net/sched/act_ife.c +++ b/net/sched/act_ife.c @@ -415,7 +415,8 @@ static void tcf_ife_cleanup(struct tc_action *a) spin_unlock_bh(&ife->tcf_lock); p = rcu_dereference_protected(ife->params, 1); - kfree_rcu(p, rcu); + if (p) + kfree_rcu(p, rcu); } /* under ife->tcf_lock for existing action */ @@ -516,8 +517,6 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla, saddr = nla_data(tb[TCA_IFE_SMAC]); } - ife->tcf_action = parm->action; - if (parm->flags & IFE_ENCODE) { if (daddr) ether_addr_copy(p->eth_dst, daddr); @@ -543,10 +542,8 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla, NULL, NULL); if (err) { metadata_parse_err: - if (exists) - tcf_idr_release(*a, bind); if (ret == ACT_P_CREATED) - _tcf_ife_cleanup(*a); + tcf_idr_release(*a, bind); if (exists) spin_unlock_bh(&ife->tcf_lock); @@ -567,7 +564,7 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla, err = use_all_metadata(ife); if (err) { if (ret == ACT_P_CREATED) - _tcf_ife_cleanup(*a); + tcf_idr_release(*a, bind); if (exists) spin_unlock_bh(&ife->tcf_lock); @@ -576,6 +573,7 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla, } } + ife->tcf_action = parm->action; if (exists) spin_unlock_bh(&ife->tcf_lock); diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 2b5be42a9f1c..9e8b26a80fb3 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -66,7 +66,7 @@ struct fl_flow_mask { struct rhashtable_params filter_ht_params; struct flow_dissector dissector; struct list_head filters; - struct rcu_head rcu; + struct rcu_work rwork; struct list_head list; }; @@ -203,6 +203,20 @@ static int fl_init(struct tcf_proto *tp) return rhashtable_init(&head->ht, &mask_ht_params); } +static void fl_mask_free(struct fl_flow_mask *mask) +{ + rhashtable_destroy(&mask->ht); + kfree(mask); +} + +static void fl_mask_free_work(struct work_struct *work) +{ + struct fl_flow_mask *mask = container_of(to_rcu_work(work), + struct fl_flow_mask, rwork); + + fl_mask_free(mask); +} + static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask, bool async) { @@ -210,12 +224,11 @@ static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask, return false; rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params); - rhashtable_destroy(&mask->ht); list_del_rcu(&mask->list); if (async) - kfree_rcu(mask, rcu); + tcf_queue_work(&mask->rwork, fl_mask_free_work); else - kfree(mask); + fl_mask_free(mask); return true; } diff --git a/net/sched/sch_blackhole.c b/net/sched/sch_blackhole.c index c98a61e980ba..9c4c2bb547d7 100644 --- a/net/sched/sch_blackhole.c +++ b/net/sched/sch_blackhole.c @@ -21,7 +21,7 @@ static int blackhole_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { qdisc_drop(skb, sch, to_free); - return NET_XMIT_SUCCESS; + return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; } static struct sk_buff *blackhole_dequeue(struct Qdisc *sch) diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index 3ae9877ea205..3278a76f6861 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -1385,8 +1385,8 @@ hfsc_schedule_watchdog(struct Qdisc *sch) if (next_time == 0 || next_time > q->root.cl_cfmin) next_time = q->root.cl_cfmin; } - WARN_ON(next_time == 0); - qdisc_watchdog_schedule(&q->watchdog, next_time); + if (next_time) + qdisc_watchdog_schedule(&q->watchdog, next_time); } static int diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c index 79daa98208c3..bfb9f812e2ef 100644 --- a/net/sctp/chunk.c +++ b/net/sctp/chunk.c @@ -237,7 +237,9 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc, /* Account for a different sized first fragment */ if (msg_len >= first_len) { msg->can_delay = 0; - SCTP_INC_STATS(sock_net(asoc->base.sk), SCTP_MIB_FRAGUSRMSGS); + if (msg_len > first_len) + SCTP_INC_STATS(sock_net(asoc->base.sk), + SCTP_MIB_FRAGUSRMSGS); } else { /* Which may be the only one... */ first_len = msg_len; diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 7339918a805d..0cd2e764f47f 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -1010,7 +1010,7 @@ static const struct proto_ops inet6_seqpacket_ops = { .socketpair = sock_no_socketpair, .accept = inet_accept, .getname = sctp_getname, - .poll_mask = sctp_poll_mask, + .poll = sctp_poll, .ioctl = inet6_ioctl, .listen = sctp_inet_listen, .shutdown = inet_shutdown, diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 5dffbc493008..67f73d3a1356 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -1016,7 +1016,7 @@ static const struct proto_ops inet_seqpacket_ops = { .socketpair = sock_no_socketpair, .accept = inet_accept, .getname = inet_getname, /* Semantics are different. */ - .poll_mask = sctp_poll_mask, + .poll = sctp_poll, .ioctl = inet_ioctl, .listen = sctp_inet_listen, .shutdown = inet_shutdown, /* Looks harmless. */ diff --git a/net/sctp/socket.c b/net/sctp/socket.c index d20f7addee19..ce620e878538 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -7717,12 +7717,14 @@ int sctp_inet_listen(struct socket *sock, int backlog) * here, again, by modeling the current TCP/UDP code. We don't have * a good way to test with it yet. */ -__poll_t sctp_poll_mask(struct socket *sock, __poll_t events) +__poll_t sctp_poll(struct file *file, struct socket *sock, poll_table *wait) { struct sock *sk = sock->sk; struct sctp_sock *sp = sctp_sk(sk); __poll_t mask; + poll_wait(file, sk_sleep(sk), wait); + sock_rps_record_flow(sk); /* A TCP-style listening socket becomes readable when the accept queue diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index da7f02edcd37..3c1405df936c 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -45,6 +45,7 @@ static DEFINE_MUTEX(smc_create_lgr_pending); /* serialize link group */ static void smc_tcp_listen_work(struct work_struct *); +static void smc_connect_work(struct work_struct *); static void smc_set_keepalive(struct sock *sk, int val) { @@ -122,6 +123,12 @@ static int smc_release(struct socket *sock) goto out; smc = smc_sk(sk); + + /* cleanup for a dangling non-blocking connect */ + flush_work(&smc->connect_work); + kfree(smc->connect_info); + smc->connect_info = NULL; + if (sk->sk_state == SMC_LISTEN) /* smc_close_non_accepted() is called and acquires * sock lock for child sockets again @@ -186,6 +193,7 @@ static struct sock *smc_sock_alloc(struct net *net, struct socket *sock, sk->sk_protocol = protocol; smc = smc_sk(sk); INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work); + INIT_WORK(&smc->connect_work, smc_connect_work); INIT_DELAYED_WORK(&smc->conn.tx_work, smc_tx_work); INIT_LIST_HEAD(&smc->accept_q); spin_lock_init(&smc->accept_q_lock); @@ -576,6 +584,35 @@ static int __smc_connect(struct smc_sock *smc) return 0; } +static void smc_connect_work(struct work_struct *work) +{ + struct smc_sock *smc = container_of(work, struct smc_sock, + connect_work); + int rc; + + lock_sock(&smc->sk); + rc = kernel_connect(smc->clcsock, &smc->connect_info->addr, + smc->connect_info->alen, smc->connect_info->flags); + if (smc->clcsock->sk->sk_err) { + smc->sk.sk_err = smc->clcsock->sk->sk_err; + goto out; + } + if (rc < 0) { + smc->sk.sk_err = -rc; + goto out; + } + + rc = __smc_connect(smc); + if (rc < 0) + smc->sk.sk_err = -rc; + +out: + smc->sk.sk_state_change(&smc->sk); + kfree(smc->connect_info); + smc->connect_info = NULL; + release_sock(&smc->sk); +} + static int smc_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags) { @@ -605,15 +642,32 @@ static int smc_connect(struct socket *sock, struct sockaddr *addr, smc_copy_sock_settings_to_clc(smc); tcp_sk(smc->clcsock->sk)->syn_smc = 1; - rc = kernel_connect(smc->clcsock, addr, alen, flags); - if (rc) - goto out; + if (flags & O_NONBLOCK) { + if (smc->connect_info) { + rc = -EALREADY; + goto out; + } + smc->connect_info = kzalloc(alen + 2 * sizeof(int), GFP_KERNEL); + if (!smc->connect_info) { + rc = -ENOMEM; + goto out; + } + smc->connect_info->alen = alen; + smc->connect_info->flags = flags ^ O_NONBLOCK; + memcpy(&smc->connect_info->addr, addr, alen); + schedule_work(&smc->connect_work); + rc = -EINPROGRESS; + } else { + rc = kernel_connect(smc->clcsock, addr, alen, flags); + if (rc) + goto out; - rc = __smc_connect(smc); - if (rc < 0) - goto out; - else - rc = 0; /* success cases including fallback */ + rc = __smc_connect(smc); + if (rc < 0) + goto out; + else + rc = 0; /* success cases including fallback */ + } out: release_sock(sk); @@ -1273,40 +1327,26 @@ static __poll_t smc_accept_poll(struct sock *parent) return mask; } -static __poll_t smc_poll_mask(struct socket *sock, __poll_t events) +static __poll_t smc_poll(struct file *file, struct socket *sock, + poll_table *wait) { struct sock *sk = sock->sk; __poll_t mask = 0; struct smc_sock *smc; - int rc; if (!sk) return EPOLLNVAL; smc = smc_sk(sock->sk); - sock_hold(sk); - lock_sock(sk); if ((sk->sk_state == SMC_INIT) || smc->use_fallback) { /* delegate to CLC child sock */ - release_sock(sk); - mask = smc->clcsock->ops->poll_mask(smc->clcsock, events); - lock_sock(sk); + mask = smc->clcsock->ops->poll(file, smc->clcsock, wait); sk->sk_err = smc->clcsock->sk->sk_err; - if (sk->sk_err) { + if (sk->sk_err) mask |= EPOLLERR; - } else { - /* if non-blocking connect finished ... */ - if (sk->sk_state == SMC_INIT && - mask & EPOLLOUT && - smc->clcsock->sk->sk_state != TCP_CLOSE) { - rc = __smc_connect(smc); - if (rc < 0) - mask |= EPOLLERR; - /* success cases including fallback */ - mask |= EPOLLOUT | EPOLLWRNORM; - } - } } else { + if (sk->sk_state != SMC_CLOSED) + sock_poll_wait(file, sk_sleep(sk), wait); if (sk->sk_err) mask |= EPOLLERR; if ((sk->sk_shutdown == SHUTDOWN_MASK) || @@ -1332,10 +1372,7 @@ static __poll_t smc_poll_mask(struct socket *sock, __poll_t events) } if (smc->conn.urg_state == SMC_URG_VALID) mask |= EPOLLPRI; - } - release_sock(sk); - sock_put(sk); return mask; } @@ -1619,7 +1656,7 @@ static const struct proto_ops smc_sock_ops = { .socketpair = sock_no_socketpair, .accept = smc_accept, .getname = smc_getname, - .poll_mask = smc_poll_mask, + .poll = smc_poll, .ioctl = smc_ioctl, .listen = smc_listen, .shutdown = smc_shutdown, diff --git a/net/smc/smc.h b/net/smc/smc.h index 51ae1f10d81a..d7ca26570482 100644 --- a/net/smc/smc.h +++ b/net/smc/smc.h @@ -187,11 +187,19 @@ struct smc_connection { struct work_struct close_work; /* peer sent some closing */ }; +struct smc_connect_info { + int flags; + int alen; + struct sockaddr addr; +}; + struct smc_sock { /* smc sock container */ struct sock sk; struct socket *clcsock; /* internal tcp socket */ struct smc_connection conn; /* smc connection */ struct smc_sock *listen_smc; /* listen parent */ + struct smc_connect_info *connect_info; /* connect address & flags */ + struct work_struct connect_work; /* handle non-blocking connect*/ struct work_struct tcp_listen_work;/* handle tcp socket accepts */ struct work_struct smc_listen_work;/* prepare new accept socket */ struct list_head accept_q; /* sockets to be accepted */ diff --git a/net/socket.c b/net/socket.c index 8a109012608a..85633622c94d 100644 --- a/net/socket.c +++ b/net/socket.c @@ -117,10 +117,8 @@ static ssize_t sock_write_iter(struct kiocb *iocb, struct iov_iter *from); static int sock_mmap(struct file *file, struct vm_area_struct *vma); static int sock_close(struct inode *inode, struct file *file); -static struct wait_queue_head *sock_get_poll_head(struct file *file, - __poll_t events); -static __poll_t sock_poll_mask(struct file *file, __poll_t); -static __poll_t sock_poll(struct file *file, struct poll_table_struct *wait); +static __poll_t sock_poll(struct file *file, + struct poll_table_struct *wait); static long sock_ioctl(struct file *file, unsigned int cmd, unsigned long arg); #ifdef CONFIG_COMPAT static long compat_sock_ioctl(struct file *file, @@ -143,8 +141,6 @@ static const struct file_operations socket_file_ops = { .llseek = no_llseek, .read_iter = sock_read_iter, .write_iter = sock_write_iter, - .get_poll_head = sock_get_poll_head, - .poll_mask = sock_poll_mask, .poll = sock_poll, .unlocked_ioctl = sock_ioctl, #ifdef CONFIG_COMPAT @@ -1130,48 +1126,16 @@ int sock_create_lite(int family, int type, int protocol, struct socket **res) } EXPORT_SYMBOL(sock_create_lite); -static struct wait_queue_head *sock_get_poll_head(struct file *file, - __poll_t events) -{ - struct socket *sock = file->private_data; - - if (!sock->ops->poll_mask) - return NULL; - sock_poll_busy_loop(sock, events); - return sk_sleep(sock->sk); -} - -static __poll_t sock_poll_mask(struct file *file, __poll_t events) -{ - struct socket *sock = file->private_data; - - /* - * We need to be sure we are in sync with the socket flags modification. - * - * This memory barrier is paired in the wq_has_sleeper. - */ - smp_mb(); - - /* this socket can poll_ll so tell the system call */ - return sock->ops->poll_mask(sock, events) | - (sk_can_busy_loop(sock->sk) ? POLL_BUSY_LOOP : 0); -} - /* No kernel lock held - perfect */ static __poll_t sock_poll(struct file *file, poll_table *wait) { struct socket *sock = file->private_data; - __poll_t events = poll_requested_events(wait), mask = 0; + __poll_t events = poll_requested_events(wait); - if (sock->ops->poll) { - sock_poll_busy_loop(sock, events); - mask = sock->ops->poll(file, sock, wait); - } else if (sock->ops->poll_mask) { - sock_poll_wait(file, sock_get_poll_head(file, events), wait); - mask = sock->ops->poll_mask(sock, events); - } - - return mask | sock_poll_busy_flag(sock); + sock_poll_busy_loop(sock, events); + if (!sock->ops->poll) + return 0; + return sock->ops->poll(file, sock, wait) | sock_poll_busy_flag(sock); } static int sock_mmap(struct file *file, struct vm_area_struct *vma) diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c index 1a9695183599..625acb27efcc 100644 --- a/net/strparser/strparser.c +++ b/net/strparser/strparser.c @@ -35,7 +35,6 @@ struct _strp_msg { */ struct strp_msg strp; int accum_len; - int early_eaten; }; static inline struct _strp_msg *_strp_msg(struct sk_buff *skb) @@ -115,20 +114,6 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb, head = strp->skb_head; if (head) { /* Message already in progress */ - - stm = _strp_msg(head); - if (unlikely(stm->early_eaten)) { - /* Already some number of bytes on the receive sock - * data saved in skb_head, just indicate they - * are consumed. - */ - eaten = orig_len <= stm->early_eaten ? - orig_len : stm->early_eaten; - stm->early_eaten -= eaten; - - return eaten; - } - if (unlikely(orig_offset)) { /* Getting data with a non-zero offset when a message is * in progress is not expected. If it does happen, we @@ -297,9 +282,9 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb, } stm->accum_len += cand_len; + eaten += cand_len; strp->need_bytes = stm->strp.full_len - stm->accum_len; - stm->early_eaten = cand_len; STRP_STATS_ADD(strp->stats.bytes, cand_len); desc->count = 0; /* Stop reading socket */ break; @@ -392,7 +377,7 @@ static int strp_read_sock(struct strparser *strp) /* Lower sock lock held */ void strp_data_ready(struct strparser *strp) { - if (unlikely(strp->stopped)) + if (unlikely(strp->stopped) || strp->paused) return; /* This check is needed to synchronize with do_strp_work. @@ -407,9 +392,6 @@ void strp_data_ready(struct strparser *strp) return; } - if (strp->paused) - return; - if (strp->need_bytes) { if (strp_peek_len(strp) < strp->need_bytes) return; diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 3c85af058227..3fabf9f6a0f9 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -987,8 +987,6 @@ bool xprt_prepare_transmit(struct rpc_task *task) task->tk_status = -EAGAIN; goto out_unlock; } - if (!bc_prealloc(req) && !req->rq_xmit_bytes_sent) - req->rq_xid = xprt_alloc_xid(xprt); ret = true; out_unlock: spin_unlock_bh(&xprt->transport_lock); @@ -1298,7 +1296,12 @@ void xprt_retry_reserve(struct rpc_task *task) static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt) { - return (__force __be32)xprt->xid++; + __be32 xid; + + spin_lock(&xprt->reserve_lock); + xid = (__force __be32)xprt->xid++; + spin_unlock(&xprt->reserve_lock); + return xid; } static inline void xprt_init_xid(struct rpc_xprt *xprt) @@ -1316,6 +1319,7 @@ void xprt_request_init(struct rpc_task *task) req->rq_task = task; req->rq_xprt = xprt; req->rq_buffer = NULL; + req->rq_xid = xprt_alloc_xid(xprt); req->rq_connect_cookie = xprt->connect_cookie - 1; req->rq_bytes_sent = 0; req->rq_snd_buf.len = 0; diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 14a5d055717d..930852c54d7a 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -692,9 +692,10 @@ static int tipc_getname(struct socket *sock, struct sockaddr *uaddr, } /** - * tipc_poll - read pollmask + * tipc_poll - read and possibly block on pollmask * @file: file structure associated with the socket * @sock: socket for which to calculate the poll bits + * @wait: ??? * * Returns pollmask value * @@ -708,12 +709,15 @@ static int tipc_getname(struct socket *sock, struct sockaddr *uaddr, * imply that the operation will succeed, merely that it should be performed * and will not block. */ -static __poll_t tipc_poll_mask(struct socket *sock, __poll_t events) +static __poll_t tipc_poll(struct file *file, struct socket *sock, + poll_table *wait) { struct sock *sk = sock->sk; struct tipc_sock *tsk = tipc_sk(sk); __poll_t revents = 0; + sock_poll_wait(file, sk_sleep(sk), wait); + if (sk->sk_shutdown & RCV_SHUTDOWN) revents |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM; if (sk->sk_shutdown == SHUTDOWN_MASK) @@ -3033,7 +3037,7 @@ static const struct proto_ops msg_ops = { .socketpair = tipc_socketpair, .accept = sock_no_accept, .getname = tipc_getname, - .poll_mask = tipc_poll_mask, + .poll = tipc_poll, .ioctl = tipc_ioctl, .listen = sock_no_listen, .shutdown = tipc_shutdown, @@ -3054,7 +3058,7 @@ static const struct proto_ops packet_ops = { .socketpair = tipc_socketpair, .accept = tipc_accept, .getname = tipc_getname, - .poll_mask = tipc_poll_mask, + .poll = tipc_poll, .ioctl = tipc_ioctl, .listen = tipc_listen, .shutdown = tipc_shutdown, @@ -3075,7 +3079,7 @@ static const struct proto_ops stream_ops = { .socketpair = tipc_socketpair, .accept = tipc_accept, .getname = tipc_getname, - .poll_mask = tipc_poll_mask, + .poll = tipc_poll, .ioctl = tipc_ioctl, .listen = tipc_listen, .shutdown = tipc_shutdown, diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index a127d61e8af9..301f22430469 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -712,7 +712,7 @@ static int __init tls_register(void) build_protos(tls_prots[TLSV4], &tcp_prot); tls_sw_proto_ops = inet_stream_ops; - tls_sw_proto_ops.poll_mask = tls_sw_poll_mask; + tls_sw_proto_ops.poll = tls_sw_poll; tls_sw_proto_ops.splice_read = tls_sw_splice_read; #ifdef CONFIG_TLS_DEVICE diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index f127fac88acf..d2380548f8f6 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -919,22 +919,23 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos, return copied ? : err; } -__poll_t tls_sw_poll_mask(struct socket *sock, __poll_t events) +unsigned int tls_sw_poll(struct file *file, struct socket *sock, + struct poll_table_struct *wait) { + unsigned int ret; struct sock *sk = sock->sk; struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); - __poll_t mask; - /* Grab EPOLLOUT and EPOLLHUP from the underlying socket */ - mask = ctx->sk_poll_mask(sock, events); + /* Grab POLLOUT and POLLHUP from the underlying socket */ + ret = ctx->sk_poll(file, sock, wait); - /* Clear EPOLLIN bits, and set based on recv_pkt */ - mask &= ~(EPOLLIN | EPOLLRDNORM); + /* Clear POLLIN bits, and set based on recv_pkt */ + ret &= ~(POLLIN | POLLRDNORM); if (ctx->recv_pkt) - mask |= EPOLLIN | EPOLLRDNORM; + ret |= POLLIN | POLLRDNORM; - return mask; + return ret; } static int tls_read_size(struct strparser *strp, struct sk_buff *skb) @@ -1191,7 +1192,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) sk->sk_data_ready = tls_data_ready; write_unlock_bh(&sk->sk_callback_lock); - sw_ctx_rx->sk_poll_mask = sk->sk_socket->ops->poll_mask; + sw_ctx_rx->sk_poll = sk->sk_socket->ops->poll; strp_check_rcv(&sw_ctx_rx->strp); } diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 95b02a71fd47..e5473c03d667 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -638,8 +638,9 @@ static int unix_stream_connect(struct socket *, struct sockaddr *, static int unix_socketpair(struct socket *, struct socket *); static int unix_accept(struct socket *, struct socket *, int, bool); static int unix_getname(struct socket *, struct sockaddr *, int); -static __poll_t unix_poll_mask(struct socket *, __poll_t); -static __poll_t unix_dgram_poll_mask(struct socket *, __poll_t); +static __poll_t unix_poll(struct file *, struct socket *, poll_table *); +static __poll_t unix_dgram_poll(struct file *, struct socket *, + poll_table *); static int unix_ioctl(struct socket *, unsigned int, unsigned long); static int unix_shutdown(struct socket *, int); static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t); @@ -680,7 +681,7 @@ static const struct proto_ops unix_stream_ops = { .socketpair = unix_socketpair, .accept = unix_accept, .getname = unix_getname, - .poll_mask = unix_poll_mask, + .poll = unix_poll, .ioctl = unix_ioctl, .listen = unix_listen, .shutdown = unix_shutdown, @@ -703,7 +704,7 @@ static const struct proto_ops unix_dgram_ops = { .socketpair = unix_socketpair, .accept = sock_no_accept, .getname = unix_getname, - .poll_mask = unix_dgram_poll_mask, + .poll = unix_dgram_poll, .ioctl = unix_ioctl, .listen = sock_no_listen, .shutdown = unix_shutdown, @@ -725,7 +726,7 @@ static const struct proto_ops unix_seqpacket_ops = { .socketpair = unix_socketpair, .accept = unix_accept, .getname = unix_getname, - .poll_mask = unix_dgram_poll_mask, + .poll = unix_dgram_poll, .ioctl = unix_ioctl, .listen = unix_listen, .shutdown = unix_shutdown, @@ -2629,10 +2630,13 @@ static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) return err; } -static __poll_t unix_poll_mask(struct socket *sock, __poll_t events) +static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wait) { struct sock *sk = sock->sk; - __poll_t mask = 0; + __poll_t mask; + + sock_poll_wait(file, sk_sleep(sk), wait); + mask = 0; /* exceptional events? */ if (sk->sk_err) @@ -2661,11 +2665,15 @@ static __poll_t unix_poll_mask(struct socket *sock, __poll_t events) return mask; } -static __poll_t unix_dgram_poll_mask(struct socket *sock, __poll_t events) +static __poll_t unix_dgram_poll(struct file *file, struct socket *sock, + poll_table *wait) { struct sock *sk = sock->sk, *other; - int writable; - __poll_t mask = 0; + unsigned int writable; + __poll_t mask; + + sock_poll_wait(file, sk_sleep(sk), wait); + mask = 0; /* exceptional events? */ if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) @@ -2691,7 +2699,7 @@ static __poll_t unix_dgram_poll_mask(struct socket *sock, __poll_t events) } /* No write status requested, avoid expensive OUT tests. */ - if (!(events & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT))) + if (!(poll_requested_events(wait) & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT))) return mask; writable = unix_writable(sk); diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index bb5d5fa68c35..c1076c19b858 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c @@ -850,11 +850,18 @@ static int vsock_shutdown(struct socket *sock, int mode) return err; } -static __poll_t vsock_poll_mask(struct socket *sock, __poll_t events) +static __poll_t vsock_poll(struct file *file, struct socket *sock, + poll_table *wait) { - struct sock *sk = sock->sk; - struct vsock_sock *vsk = vsock_sk(sk); - __poll_t mask = 0; + struct sock *sk; + __poll_t mask; + struct vsock_sock *vsk; + + sk = sock->sk; + vsk = vsock_sk(sk); + + poll_wait(file, sk_sleep(sk), wait); + mask = 0; if (sk->sk_err) /* Signify that there has been an error on this socket. */ @@ -1084,7 +1091,7 @@ static const struct proto_ops vsock_dgram_ops = { .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = vsock_getname, - .poll_mask = vsock_poll_mask, + .poll = vsock_poll, .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = vsock_shutdown, @@ -1842,7 +1849,7 @@ static const struct proto_ops vsock_stream_ops = { .socketpair = sock_no_socketpair, .accept = vsock_accept, .getname = vsock_getname, - .poll_mask = vsock_poll_mask, + .poll = vsock_poll, .ioctl = sock_no_ioctl, .listen = vsock_listen, .shutdown = vsock_shutdown, diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c index 8e03bd3f3668..5d3cce9e8744 100644 --- a/net/vmw_vsock/virtio_transport.c +++ b/net/vmw_vsock/virtio_transport.c @@ -201,7 +201,7 @@ virtio_transport_send_pkt(struct virtio_vsock_pkt *pkt) return -ENODEV; } - if (le32_to_cpu(pkt->hdr.dst_cid) == vsock->guest_cid) + if (le64_to_cpu(pkt->hdr.dst_cid) == vsock->guest_cid) return virtio_transport_send_pkt_loopback(vsock, pkt); if (pkt->reply) diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index c7bbe5f0aae8..4eece06be1e7 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -6231,7 +6231,7 @@ do { \ nl80211_check_s32); /* * Check HT operation mode based on - * IEEE 802.11 2012 8.4.2.59 HT Operation element. + * IEEE 802.11-2016 9.4.2.57 HT Operation element. */ if (tb[NL80211_MESHCONF_HT_OPMODE]) { ht_opmode = nla_get_u16(tb[NL80211_MESHCONF_HT_OPMODE]); @@ -6241,22 +6241,9 @@ do { \ IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT)) return -EINVAL; - if ((ht_opmode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT) && - (ht_opmode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT)) - return -EINVAL; + /* NON_HT_STA bit is reserved, but some programs set it */ + ht_opmode &= ~IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT; - switch (ht_opmode & IEEE80211_HT_OP_MODE_PROTECTION) { - case IEEE80211_HT_OP_MODE_PROTECTION_NONE: - case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ: - if (ht_opmode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT) - return -EINVAL; - break; - case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER: - case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED: - if (!(ht_opmode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT)) - return -EINVAL; - break; - } cfg->ht_opmode = ht_opmode; mask |= (1 << (NL80211_MESHCONF_HT_OPMODE - 1)); } @@ -10962,9 +10949,12 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info) rem) { u8 *mask_pat; - nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat, - nl80211_packet_pattern_policy, - info->extack); + err = nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat, + nl80211_packet_pattern_policy, + info->extack); + if (err) + goto error; + err = -EINVAL; if (!pat_tb[NL80211_PKTPAT_MASK] || !pat_tb[NL80211_PKTPAT_PATTERN]) @@ -11213,8 +11203,11 @@ static int nl80211_parse_coalesce_rule(struct cfg80211_registered_device *rdev, rem) { u8 *mask_pat; - nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat, - nl80211_packet_pattern_policy, NULL); + err = nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat, + nl80211_packet_pattern_policy, NULL); + if (err) + return err; + if (!pat_tb[NL80211_PKTPAT_MASK] || !pat_tb[NL80211_PKTPAT_PATTERN]) return -EINVAL; diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index f93365ae0fdd..d49aa79b7997 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c @@ -1750,7 +1750,7 @@ static const struct proto_ops x25_proto_ops = { .socketpair = sock_no_socketpair, .accept = x25_accept, .getname = x25_getname, - .poll_mask = datagram_poll_mask, + .poll = datagram_poll, .ioctl = x25_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = compat_x25_ioctl, diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index 36919a254ba3..59fb7d3c36a3 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -118,6 +118,9 @@ int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) u64 addr; int err; + if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index) + return -EINVAL; + if (!xskq_peek_addr(xs->umem->fq, &addr) || len > xs->umem->chunk_size_nohr) { xs->rx_dropped++; @@ -300,9 +303,10 @@ static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) return (xs->zc) ? xsk_zc_xmit(sk) : xsk_generic_xmit(sk, m, total_len); } -static __poll_t xsk_poll_mask(struct socket *sock, __poll_t events) +static unsigned int xsk_poll(struct file *file, struct socket *sock, + struct poll_table_struct *wait) { - __poll_t mask = datagram_poll_mask(sock, events); + unsigned int mask = datagram_poll(file, sock, wait); struct sock *sk = sock->sk; struct xdp_sock *xs = xdp_sk(sk); @@ -693,7 +697,7 @@ static const struct proto_ops xsk_proto_ops = { .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = sock_no_getname, - .poll_mask = xsk_poll_mask, + .poll = xsk_poll, .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, diff --git a/samples/bpf/xdp_fwd_kern.c b/samples/bpf/xdp_fwd_kern.c index 6673cdb9f55c..a7e94e7ff87d 100644 --- a/samples/bpf/xdp_fwd_kern.c +++ b/samples/bpf/xdp_fwd_kern.c @@ -48,9 +48,9 @@ static __always_inline int xdp_fwd_flags(struct xdp_md *ctx, u32 flags) struct ethhdr *eth = data; struct ipv6hdr *ip6h; struct iphdr *iph; - int out_index; u16 h_proto; u64 nh_off; + int rc; nh_off = sizeof(*eth); if (data + nh_off > data_end) @@ -101,7 +101,7 @@ static __always_inline int xdp_fwd_flags(struct xdp_md *ctx, u32 flags) fib_params.ifindex = ctx->ingress_ifindex; - out_index = bpf_fib_lookup(ctx, &fib_params, sizeof(fib_params), flags); + rc = bpf_fib_lookup(ctx, &fib_params, sizeof(fib_params), flags); /* verify egress index has xdp support * TO-DO bpf_map_lookup_elem(&tx_port, &key) fails with @@ -109,7 +109,7 @@ static __always_inline int xdp_fwd_flags(struct xdp_md *ctx, u32 flags) * NOTE: without verification that egress index supports XDP * forwarding packets are dropped. */ - if (out_index > 0) { + if (rc == 0) { if (h_proto == htons(ETH_P_IP)) ip_decrease_ttl(iph); else if (h_proto == htons(ETH_P_IPV6)) @@ -117,7 +117,7 @@ static __always_inline int xdp_fwd_flags(struct xdp_md *ctx, u32 flags) memcpy(eth->h_dest, fib_params.dmac, ETH_ALEN); memcpy(eth->h_source, fib_params.smac, ETH_ALEN); - return bpf_redirect_map(&tx_port, out_index, 0); + return bpf_redirect_map(&tx_port, fib_params.ifindex, 0); } return XDP_PASS; diff --git a/samples/vfio-mdev/mbochs.c b/samples/vfio-mdev/mbochs.c index 2960e26c6ea4..2535c3677c7b 100644 --- a/samples/vfio-mdev/mbochs.c +++ b/samples/vfio-mdev/mbochs.c @@ -178,6 +178,8 @@ static const char *vbe_name(u32 index) return "(invalid)"; } +static struct page *__mbochs_get_page(struct mdev_state *mdev_state, + pgoff_t pgoff); static struct page *mbochs_get_page(struct mdev_state *mdev_state, pgoff_t pgoff); @@ -394,7 +396,7 @@ static ssize_t mdev_access(struct mdev_device *mdev, char *buf, size_t count, MBOCHS_MEMORY_BAR_OFFSET + mdev_state->memsize) { pos -= MBOCHS_MMIO_BAR_OFFSET; poff = pos & ~PAGE_MASK; - pg = mbochs_get_page(mdev_state, pos >> PAGE_SHIFT); + pg = __mbochs_get_page(mdev_state, pos >> PAGE_SHIFT); map = kmap(pg); if (is_write) memcpy(map + poff, buf, count); @@ -657,7 +659,7 @@ static void mbochs_put_pages(struct mdev_state *mdev_state) dev_dbg(dev, "%s: %d pages released\n", __func__, count); } -static int mbochs_region_vm_fault(struct vm_fault *vmf) +static vm_fault_t mbochs_region_vm_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct mdev_state *mdev_state = vma->vm_private_data; @@ -695,7 +697,7 @@ static int mbochs_mmap(struct mdev_device *mdev, struct vm_area_struct *vma) return 0; } -static int mbochs_dmabuf_vm_fault(struct vm_fault *vmf) +static vm_fault_t mbochs_dmabuf_vm_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct mbochs_dmabuf *dmabuf = vma->vm_private_data; @@ -803,15 +805,6 @@ static void mbochs_release_dmabuf(struct dma_buf *buf) mutex_unlock(&mdev_state->ops_lock); } -static void *mbochs_kmap_atomic_dmabuf(struct dma_buf *buf, - unsigned long page_num) -{ - struct mbochs_dmabuf *dmabuf = buf->priv; - struct page *page = dmabuf->pages[page_num]; - - return kmap_atomic(page); -} - static void *mbochs_kmap_dmabuf(struct dma_buf *buf, unsigned long page_num) { struct mbochs_dmabuf *dmabuf = buf->priv; @@ -820,12 +813,18 @@ static void *mbochs_kmap_dmabuf(struct dma_buf *buf, unsigned long page_num) return kmap(page); } +static void mbochs_kunmap_dmabuf(struct dma_buf *buf, unsigned long page_num, + void *vaddr) +{ + kunmap(vaddr); +} + static struct dma_buf_ops mbochs_dmabuf_ops = { .map_dma_buf = mbochs_map_dmabuf, .unmap_dma_buf = mbochs_unmap_dmabuf, .release = mbochs_release_dmabuf, - .map_atomic = mbochs_kmap_atomic_dmabuf, .map = mbochs_kmap_dmabuf, + .unmap = mbochs_kunmap_dmabuf, .mmap = mbochs_mmap_dmabuf, }; diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include index c8156d61678c..86321f06461e 100644 --- a/scripts/Kbuild.include +++ b/scripts/Kbuild.include @@ -214,7 +214,7 @@ hdr-inst := -f $(srctree)/scripts/Makefile.headersinst obj # Prefix -I with $(srctree) if it is not an absolute path. # skip if -I has no parameter addtree = $(if $(patsubst -I%,%,$(1)), \ -$(if $(filter-out -I/% -I./% -I../%,$(1)),$(patsubst -I%,-I$(srctree)/%,$(1)),$(1))) +$(if $(filter-out -I/% -I./% -I../%,$(1)),$(patsubst -I%,-I$(srctree)/%,$(1)),$(1)),$(1)) # Find all -I options and call addtree flags = $(foreach o,$($(1)),$(if $(filter -I%,$(o)),$(call addtree,$(o)),$(o))) diff --git a/scripts/Makefile.build b/scripts/Makefile.build index 34d9e9ce97c2..514ed63ff571 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build @@ -239,6 +239,7 @@ cmd_record_mcount = \ "$(CC_FLAGS_FTRACE)" ]; then \ $(sub_cmd_record_mcount) \ fi; +endif # -record-mcount endif # CONFIG_FTRACE_MCOUNT_RECORD ifdef CONFIG_STACK_VALIDATION @@ -263,7 +264,6 @@ ifneq ($(RETPOLINE_CFLAGS),) objtool_args += --retpoline endif endif -endif ifdef CONFIG_MODVERSIONS @@ -590,7 +590,4 @@ endif # We never want them to be removed automatically. .SECONDARY: $(targets) -# Declare the contents of the .PHONY variable as phony. We keep that -# information in a variable se we can use it in if_changed and friends. - .PHONY: $(PHONY) diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean index 808d09f27ad4..17ef94c635cd 100644 --- a/scripts/Makefile.clean +++ b/scripts/Makefile.clean @@ -88,7 +88,4 @@ PHONY += $(subdir-ymn) $(subdir-ymn): $(Q)$(MAKE) $(clean)=$@ -# Declare the contents of the .PHONY variable as phony. We keep that -# information in a variable se we can use it in if_changed and friends. - .PHONY: $(PHONY) diff --git a/scripts/Makefile.modbuiltin b/scripts/Makefile.modbuiltin index a763b4775d06..40867a41615b 100644 --- a/scripts/Makefile.modbuiltin +++ b/scripts/Makefile.modbuiltin @@ -54,8 +54,4 @@ PHONY += $(subdir-ym) $(subdir-ym): $(Q)$(MAKE) $(modbuiltin)=$@ - -# Declare the contents of the .PHONY variable as phony. We keep that -# information in a variable se we can use it in if_changed and friends. - .PHONY: $(PHONY) diff --git a/scripts/Makefile.modinst b/scripts/Makefile.modinst index 51ca0244fc8a..ff5ca9817a85 100644 --- a/scripts/Makefile.modinst +++ b/scripts/Makefile.modinst @@ -35,8 +35,4 @@ modinst_dir = $(if $(KBUILD_EXTMOD),$(ext-mod-dir),kernel/$(@D)) $(modules): $(call cmd,modules_install,$(MODLIB)/$(modinst_dir)) - -# Declare the contents of the .PHONY variable as phony. We keep that -# information in a variable so we can use it in if_changed and friends. - .PHONY: $(PHONY) diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost index df4174405feb..dd92dbbbaa68 100644 --- a/scripts/Makefile.modpost +++ b/scripts/Makefile.modpost @@ -149,8 +149,4 @@ ifneq ($(cmd_files),) include $(cmd_files) endif - -# Declare the contents of the .PHONY variable as phony. We keep that -# information in a variable se we can use it in if_changed and friends. - .PHONY: $(PHONY) diff --git a/scripts/Makefile.modsign b/scripts/Makefile.modsign index 171483bc0538..da56aa78d245 100644 --- a/scripts/Makefile.modsign +++ b/scripts/Makefile.modsign @@ -27,7 +27,4 @@ modinst_dir = $(if $(KBUILD_EXTMOD),$(ext-mod-dir),kernel/$(@D)) $(modules): $(call cmd,sign_ko,$(MODLIB)/$(modinst_dir)) -# Declare the contents of the .PHONY variable as phony. We keep that -# information in a variable se we can use it in if_changed and friends. - .PHONY: $(PHONY) diff --git a/scripts/cc-can-link.sh b/scripts/cc-can-link.sh index 208eb2825dab..6efcead31989 100755 --- a/scripts/cc-can-link.sh +++ b/scripts/cc-can-link.sh @@ -1,7 +1,7 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 -cat << "END" | $@ -x c - -o /dev/null >/dev/null 2>&1 && echo "y" +cat << "END" | $@ -x c - -o /dev/null >/dev/null 2>&1 #include int main(void) { diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index e3b7362b0ee4..447857ffaf6b 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -2606,12 +2606,6 @@ sub process { "A patch subject line should describe the change not the tool that found it\n" . $herecurr); } -# Check for old stable address - if ($line =~ /^\s*cc:\s*.*?.*$/i) { - ERROR("STABLE_ADDRESS", - "The 'stable' address should be 'stable\@vger.kernel.org'\n" . $herecurr); - } - # Check for unwanted Gerrit info if ($in_commit_log && $line =~ /^\s*change-id:/i) { ERROR("GERRIT_CHANGE_ID", @@ -5819,14 +5813,14 @@ sub process { defined $stat && $stat =~ /^\+(?![^\{]*\{\s*).*\b(\w+)\s*\(.*$String\s*,/s && $1 !~ /^_*volatile_*$/) { - my $specifier; - my $extension; - my $bad_specifier = ""; my $stat_real; my $lc = $stat =~ tr@\n@@; $lc = $lc + $linenr; for (my $count = $linenr; $count <= $lc; $count++) { + my $specifier; + my $extension; + my $bad_specifier = ""; my $fmt = get_quoted_string($lines[$count - 1], raw_line($count, 0)); $fmt =~ s/%%//g; diff --git a/scripts/extract-vmlinux b/scripts/extract-vmlinux index 5061abcc2540..e6239f39abad 100755 --- a/scripts/extract-vmlinux +++ b/scripts/extract-vmlinux @@ -57,6 +57,8 @@ try_decompress '\3757zXZ\000' abcde unxz try_decompress 'BZh' xy bunzip2 try_decompress '\135\0\0\0' xxx unlzma try_decompress '\211\114\132' xy 'lzop -d' +try_decompress '\002!L\030' xxx 'lz4 -d' +try_decompress '(\265/\375' xxx unzstd # Bail out: echo "$me: Cannot find vmlinux." >&2 diff --git a/scripts/gcc-x86_64-has-stack-protector.sh b/scripts/gcc-x86_64-has-stack-protector.sh index 3755af0cd9f7..75e4e22b986a 100755 --- a/scripts/gcc-x86_64-has-stack-protector.sh +++ b/scripts/gcc-x86_64-has-stack-protector.sh @@ -1,4 +1,4 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 -echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -O0 -mcmodel=kernel -fno-PIE -fstack-protector - -o - 2> /dev/null | grep -q "%gs" +echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -m64 -O0 -mcmodel=kernel -fno-PIE -fstack-protector - -o - 2> /dev/null | grep -q "%gs" diff --git a/scripts/kconfig/expr.h b/scripts/kconfig/expr.h index 94a383b21df6..f63b41b0dd49 100644 --- a/scripts/kconfig/expr.h +++ b/scripts/kconfig/expr.h @@ -171,6 +171,9 @@ struct symbol { * config BAZ * int "BAZ Value" * range 1..255 + * + * Please, also check zconf.y:print_symbol() when modifying the + * list of property types! */ enum prop_type { P_UNKNOWN, diff --git a/scripts/kconfig/preprocess.c b/scripts/kconfig/preprocess.c index 65da87fce907..5ca2df790d3c 100644 --- a/scripts/kconfig/preprocess.c +++ b/scripts/kconfig/preprocess.c @@ -156,7 +156,7 @@ static char *do_shell(int argc, char *argv[]) nread--; /* remove trailing new lines */ - while (buf[nread - 1] == '\n') + while (nread > 0 && buf[nread - 1] == '\n') nread--; buf[nread] = 0; diff --git a/scripts/kconfig/zconf.y b/scripts/kconfig/zconf.y index 6f9b0aa32a82..4b68272ebdb9 100644 --- a/scripts/kconfig/zconf.y +++ b/scripts/kconfig/zconf.y @@ -31,7 +31,7 @@ struct symbol *symbol_hash[SYMBOL_HASHSIZE]; static struct menu *current_menu, *current_entry; %} -%expect 32 +%expect 31 %union { @@ -337,7 +337,7 @@ choice_block: /* if entry */ -if_entry: T_IF expr nl +if_entry: T_IF expr T_EOL { printd(DEBUG_PARSE, "%s:%d:if\n", zconf_curname(), zconf_lineno()); menu_add_entry(NULL); @@ -717,6 +717,10 @@ static void print_symbol(FILE *out, struct menu *menu) print_quoted_string(out, prop->text); fputc('\n', out); break; + case P_SYMBOL: + fputs( " symbol ", out); + fprintf(out, "%s\n", prop->sym->name); + break; default: fprintf(out, " unknown prop %d!\n", prop->type); break; diff --git a/scripts/tags.sh b/scripts/tags.sh index 66f08bb1cce9..412a70cce558 100755 --- a/scripts/tags.sh +++ b/scripts/tags.sh @@ -245,7 +245,7 @@ exuberant() { setup_regex exuberant asm c all_target_sources | xargs $1 -a \ - -I __initdata,__exitdata,__initconst, \ + -I __initdata,__exitdata,__initconst,__ro_after_init \ -I __initdata_memblock \ -I __refdata,__attribute,__maybe_unused,__always_unused \ -I __acquires,__releases,__deprecated \ diff --git a/security/keys/dh.c b/security/keys/dh.c index f7403821db7f..b203f7758f97 100644 --- a/security/keys/dh.c +++ b/security/keys/dh.c @@ -142,6 +142,8 @@ static void kdf_dealloc(struct kdf_sdesc *sdesc) * The src pointer is defined as Z || other info where Z is the shared secret * from DH and other info is an arbitrary string (see SP800-56A section * 5.8.1.2). + * + * 'dlen' must be a multiple of the digest size. */ static int kdf_ctr(struct kdf_sdesc *sdesc, const u8 *src, unsigned int slen, u8 *dst, unsigned int dlen, unsigned int zlen) @@ -205,8 +207,8 @@ static int keyctl_dh_compute_kdf(struct kdf_sdesc *sdesc, { uint8_t *outbuf = NULL; int ret; - size_t outbuf_len = round_up(buflen, - crypto_shash_digestsize(sdesc->shash.tfm)); + size_t outbuf_len = roundup(buflen, + crypto_shash_digestsize(sdesc->shash.tfm)); outbuf = kmalloc(outbuf_len, GFP_KERNEL); if (!outbuf) { diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c index f3d374d2ca04..79d3709b0671 100644 --- a/security/selinux/selinuxfs.c +++ b/security/selinux/selinuxfs.c @@ -441,22 +441,16 @@ static int sel_release_policy(struct inode *inode, struct file *filp) static ssize_t sel_read_policy(struct file *filp, char __user *buf, size_t count, loff_t *ppos) { - struct selinux_fs_info *fsi = file_inode(filp)->i_sb->s_fs_info; struct policy_load_memory *plm = filp->private_data; int ret; - mutex_lock(&fsi->mutex); - ret = avc_has_perm(&selinux_state, current_sid(), SECINITSID_SECURITY, SECCLASS_SECURITY, SECURITY__READ_POLICY, NULL); if (ret) - goto out; + return ret; - ret = simple_read_from_buffer(buf, count, ppos, plm->data, plm->len); -out: - mutex_unlock(&fsi->mutex); - return ret; + return simple_read_from_buffer(buf, count, ppos, plm->data, plm->len); } static vm_fault_t sel_mmap_policy_fault(struct vm_fault *vmf) @@ -1188,25 +1182,29 @@ static ssize_t sel_read_bool(struct file *filep, char __user *buf, ret = -EINVAL; if (index >= fsi->bool_num || strcmp(name, fsi->bool_pending_names[index])) - goto out; + goto out_unlock; ret = -ENOMEM; page = (char *)get_zeroed_page(GFP_KERNEL); if (!page) - goto out; + goto out_unlock; cur_enforcing = security_get_bool_value(fsi->state, index); if (cur_enforcing < 0) { ret = cur_enforcing; - goto out; + goto out_unlock; } length = scnprintf(page, PAGE_SIZE, "%d %d", cur_enforcing, fsi->bool_pending_values[index]); - ret = simple_read_from_buffer(buf, count, ppos, page, length); -out: mutex_unlock(&fsi->mutex); + ret = simple_read_from_buffer(buf, count, ppos, page, length); +out_free: free_page((unsigned long)page); return ret; + +out_unlock: + mutex_unlock(&fsi->mutex); + goto out_free; } static ssize_t sel_write_bool(struct file *filep, const char __user *buf, @@ -1219,6 +1217,17 @@ static ssize_t sel_write_bool(struct file *filep, const char __user *buf, unsigned index = file_inode(filep)->i_ino & SEL_INO_MASK; const char *name = filep->f_path.dentry->d_name.name; + if (count >= PAGE_SIZE) + return -ENOMEM; + + /* No partial writes. */ + if (*ppos != 0) + return -EINVAL; + + page = memdup_user_nul(buf, count); + if (IS_ERR(page)) + return PTR_ERR(page); + mutex_lock(&fsi->mutex); length = avc_has_perm(&selinux_state, @@ -1233,22 +1242,6 @@ static ssize_t sel_write_bool(struct file *filep, const char __user *buf, fsi->bool_pending_names[index])) goto out; - length = -ENOMEM; - if (count >= PAGE_SIZE) - goto out; - - /* No partial writes. */ - length = -EINVAL; - if (*ppos != 0) - goto out; - - page = memdup_user_nul(buf, count); - if (IS_ERR(page)) { - length = PTR_ERR(page); - page = NULL; - goto out; - } - length = -EINVAL; if (sscanf(page, "%d", &new_value) != 1) goto out; @@ -1280,6 +1273,17 @@ static ssize_t sel_commit_bools_write(struct file *filep, ssize_t length; int new_value; + if (count >= PAGE_SIZE) + return -ENOMEM; + + /* No partial writes. */ + if (*ppos != 0) + return -EINVAL; + + page = memdup_user_nul(buf, count); + if (IS_ERR(page)) + return PTR_ERR(page); + mutex_lock(&fsi->mutex); length = avc_has_perm(&selinux_state, @@ -1289,22 +1293,6 @@ static ssize_t sel_commit_bools_write(struct file *filep, if (length) goto out; - length = -ENOMEM; - if (count >= PAGE_SIZE) - goto out; - - /* No partial writes. */ - length = -EINVAL; - if (*ppos != 0) - goto out; - - page = memdup_user_nul(buf, count); - if (IS_ERR(page)) { - length = PTR_ERR(page); - page = NULL; - goto out; - } - length = -EINVAL; if (sscanf(page, "%d", &new_value) != 1) goto out; diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c index 7ad226018f51..19de675d4504 100644 --- a/security/smack/smack_lsm.c +++ b/security/smack/smack_lsm.c @@ -2296,6 +2296,7 @@ static void smack_task_to_inode(struct task_struct *p, struct inode *inode) struct smack_known *skp = smk_of_task_struct(p); isp->smk_inode = skp; + isp->smk_flags |= SMK_INODE_INSTANT; } /* diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c index 61a07fe34cd2..56ca78423040 100644 --- a/sound/core/seq/seq_clientmgr.c +++ b/sound/core/seq/seq_clientmgr.c @@ -2004,7 +2004,8 @@ static int snd_seq_ioctl_query_next_client(struct snd_seq_client *client, struct snd_seq_client *cptr = NULL; /* search for next client */ - info->client++; + if (info->client < INT_MAX) + info->client++; if (info->client < 0) info->client = 0; for (; info->client < SNDRV_SEQ_MAX_CLIENTS; info->client++) { diff --git a/sound/core/timer.c b/sound/core/timer.c index 665089c45560..b6f076bbc72d 100644 --- a/sound/core/timer.c +++ b/sound/core/timer.c @@ -1520,7 +1520,7 @@ static int snd_timer_user_next_device(struct snd_timer_id __user *_tid) } else { if (id.subdevice < 0) id.subdevice = 0; - else + else if (id.subdevice < INT_MAX) id.subdevice++; } } diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index d91c87e41756..20a171ac4bb2 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -2899,8 +2899,9 @@ static int hda_codec_runtime_suspend(struct device *dev) list_for_each_entry(pcm, &codec->pcm_list_head, list) snd_pcm_suspend_all(pcm->pcm); state = hda_call_codec_suspend(codec); - if (codec_has_clkstop(codec) && codec_has_epss(codec) && - (state & AC_PWRST_CLK_STOP_OK)) + if (codec->link_down_at_suspend || + (codec_has_clkstop(codec) && codec_has_epss(codec) && + (state & AC_PWRST_CLK_STOP_OK))) snd_hdac_codec_link_down(&codec->core); snd_hdac_link_power(&codec->core, false); return 0; diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h index 681c360f29f9..a8b1b31f161c 100644 --- a/sound/pci/hda/hda_codec.h +++ b/sound/pci/hda/hda_codec.h @@ -258,6 +258,7 @@ struct hda_codec { unsigned int power_save_node:1; /* advanced PM for each widget */ unsigned int auto_runtime_pm:1; /* enable automatic codec runtime pm */ unsigned int force_pin_prefix:1; /* Add location prefix */ + unsigned int link_down_at_suspend:1; /* link down at runtime suspend */ #ifdef CONFIG_PM unsigned long power_on_acct; unsigned long power_off_acct; diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c index 04e949aa01ad..321e95c409c1 100644 --- a/sound/pci/hda/patch_ca0132.c +++ b/sound/pci/hda/patch_ca0132.c @@ -991,6 +991,7 @@ struct ca0132_spec { enum { QUIRK_NONE, QUIRK_ALIENWARE, + QUIRK_ALIENWARE_M17XR4, QUIRK_SBZ, QUIRK_R3DI, }; @@ -1040,13 +1041,15 @@ static const struct hda_pintbl r3di_pincfgs[] = { }; static const struct snd_pci_quirk ca0132_quirks[] = { + SND_PCI_QUIRK(0x1028, 0x057b, "Alienware M17x R4", QUIRK_ALIENWARE_M17XR4), SND_PCI_QUIRK(0x1028, 0x0685, "Alienware 15 2015", QUIRK_ALIENWARE), SND_PCI_QUIRK(0x1028, 0x0688, "Alienware 17 2015", QUIRK_ALIENWARE), SND_PCI_QUIRK(0x1028, 0x0708, "Alienware 15 R2 2016", QUIRK_ALIENWARE), SND_PCI_QUIRK(0x1102, 0x0010, "Sound Blaster Z", QUIRK_SBZ), SND_PCI_QUIRK(0x1102, 0x0023, "Sound Blaster Z", QUIRK_SBZ), SND_PCI_QUIRK(0x1458, 0xA016, "Recon3Di", QUIRK_R3DI), - SND_PCI_QUIRK(0x1458, 0xA036, "Recon3Di", QUIRK_R3DI), + SND_PCI_QUIRK(0x1458, 0xA026, "Gigabyte G1.Sniper Z97", QUIRK_R3DI), + SND_PCI_QUIRK(0x1458, 0xA036, "Gigabyte GA-Z170X-Gaming 7", QUIRK_R3DI), {} }; @@ -5663,7 +5666,7 @@ static const char * const ca0132_alt_slave_pfxs[] = { * I think this has to do with the pin for rear surround being 0x11, * and the center/lfe being 0x10. Usually the pin order is the opposite. */ -const struct snd_pcm_chmap_elem ca0132_alt_chmaps[] = { +static const struct snd_pcm_chmap_elem ca0132_alt_chmaps[] = { { .channels = 2, .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } }, { .channels = 4, @@ -5966,7 +5969,7 @@ static int ca0132_build_pcms(struct hda_codec *codec) info->stream[SNDRV_PCM_STREAM_CAPTURE].nid = spec->adcs[0]; /* With the DSP enabled, desktops don't use this ADC. */ - if (spec->use_alt_functions) { + if (!spec->use_alt_functions) { info = snd_hda_codec_pcm_new(codec, "CA0132 Analog Mic-In2"); if (!info) return -ENOMEM; @@ -6130,7 +6133,10 @@ static void ca0132_init_dmic(struct hda_codec *codec) * Bit 6: set to select Data2, clear for Data1 * Bit 7: set to enable DMic, clear for AMic */ - val = 0x23; + if (spec->quirk == QUIRK_ALIENWARE_M17XR4) + val = 0x33; + else + val = 0x23; /* keep a copy of dmic ctl val for enable/disable dmic purpuse */ spec->dmic_ctl = val; snd_hda_codec_write(codec, spec->input_pins[0], 0, @@ -7223,7 +7229,7 @@ static int ca0132_init(struct hda_codec *codec) snd_hda_sequence_write(codec, spec->base_init_verbs); - if (spec->quirk != QUIRK_NONE) + if (spec->use_alt_functions) ca0132_alt_init(codec); ca0132_download_dsp(codec); @@ -7237,8 +7243,9 @@ static int ca0132_init(struct hda_codec *codec) case QUIRK_R3DI: r3di_setup_defaults(codec); break; - case QUIRK_NONE: - case QUIRK_ALIENWARE: + case QUIRK_SBZ: + break; + default: ca0132_setup_defaults(codec); ca0132_init_analog_mic2(codec); ca0132_init_dmic(codec); @@ -7343,7 +7350,6 @@ static const struct hda_codec_ops ca0132_patch_ops = { static void ca0132_config(struct hda_codec *codec) { struct ca0132_spec *spec = codec->spec; - struct auto_pin_cfg *cfg = &spec->autocfg; spec->dacs[0] = 0x2; spec->dacs[1] = 0x3; @@ -7405,12 +7411,7 @@ static void ca0132_config(struct hda_codec *codec) /* SPDIF I/O */ spec->dig_out = 0x05; spec->multiout.dig_out_nid = spec->dig_out; - cfg->dig_out_pins[0] = 0x0c; - cfg->dig_outs = 1; - cfg->dig_out_type[0] = HDA_PCM_TYPE_SPDIF; spec->dig_in = 0x09; - cfg->dig_in_pin = 0x0e; - cfg->dig_in_type = HDA_PCM_TYPE_SPDIF; break; case QUIRK_R3DI: codec_dbg(codec, "%s: QUIRK_R3DI applied.\n", __func__); @@ -7438,9 +7439,6 @@ static void ca0132_config(struct hda_codec *codec) /* SPDIF I/O */ spec->dig_out = 0x05; spec->multiout.dig_out_nid = spec->dig_out; - cfg->dig_out_pins[0] = 0x0c; - cfg->dig_outs = 1; - cfg->dig_out_type[0] = HDA_PCM_TYPE_SPDIF; break; default: spec->num_outputs = 2; @@ -7463,12 +7461,7 @@ static void ca0132_config(struct hda_codec *codec) /* SPDIF I/O */ spec->dig_out = 0x05; spec->multiout.dig_out_nid = spec->dig_out; - cfg->dig_out_pins[0] = 0x0c; - cfg->dig_outs = 1; - cfg->dig_out_type[0] = HDA_PCM_TYPE_SPDIF; spec->dig_in = 0x09; - cfg->dig_in_pin = 0x0e; - cfg->dig_in_type = HDA_PCM_TYPE_SPDIF; break; } } @@ -7476,7 +7469,7 @@ static void ca0132_config(struct hda_codec *codec) static int ca0132_prepare_verbs(struct hda_codec *codec) { /* Verbs + terminator (an empty element) */ -#define NUM_SPEC_VERBS 4 +#define NUM_SPEC_VERBS 2 struct ca0132_spec *spec = codec->spec; spec->chip_init_verbs = ca0132_init_verbs0; @@ -7488,34 +7481,24 @@ static int ca0132_prepare_verbs(struct hda_codec *codec) if (!spec->spec_init_verbs) return -ENOMEM; - /* HP jack autodetection */ - spec->spec_init_verbs[0].nid = spec->unsol_tag_hp; - spec->spec_init_verbs[0].param = AC_VERB_SET_UNSOLICITED_ENABLE; - spec->spec_init_verbs[0].verb = AC_USRSP_EN | spec->unsol_tag_hp; - - /* MIC1 jack autodetection */ - spec->spec_init_verbs[1].nid = spec->unsol_tag_amic1; - spec->spec_init_verbs[1].param = AC_VERB_SET_UNSOLICITED_ENABLE; - spec->spec_init_verbs[1].verb = AC_USRSP_EN | spec->unsol_tag_amic1; - /* config EAPD */ - spec->spec_init_verbs[2].nid = 0x0b; - spec->spec_init_verbs[2].param = 0x78D; - spec->spec_init_verbs[2].verb = 0x00; + spec->spec_init_verbs[0].nid = 0x0b; + spec->spec_init_verbs[0].param = 0x78D; + spec->spec_init_verbs[0].verb = 0x00; /* Previously commented configuration */ /* - spec->spec_init_verbs[3].nid = 0x0b; - spec->spec_init_verbs[3].param = AC_VERB_SET_EAPD_BTLENABLE; + spec->spec_init_verbs[2].nid = 0x0b; + spec->spec_init_verbs[2].param = AC_VERB_SET_EAPD_BTLENABLE; + spec->spec_init_verbs[2].verb = 0x02; + + spec->spec_init_verbs[3].nid = 0x10; + spec->spec_init_verbs[3].param = 0x78D; spec->spec_init_verbs[3].verb = 0x02; spec->spec_init_verbs[4].nid = 0x10; - spec->spec_init_verbs[4].param = 0x78D; + spec->spec_init_verbs[4].param = AC_VERB_SET_EAPD_BTLENABLE; spec->spec_init_verbs[4].verb = 0x02; - - spec->spec_init_verbs[5].nid = 0x10; - spec->spec_init_verbs[5].param = AC_VERB_SET_EAPD_BTLENABLE; - spec->spec_init_verbs[5].verb = 0x02; */ /* Terminator: spec->spec_init_verbs[NUM_SPEC_VERBS-1] */ diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 8840daf9c6a3..8a49415aebac 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include #include @@ -764,8 +765,10 @@ static void check_presence_and_report(struct hda_codec *codec, hda_nid_t nid, if (pin_idx < 0) return; + mutex_lock(&spec->pcm_lock); if (hdmi_present_sense(get_pin(spec, pin_idx), 1)) snd_hda_jack_report_sync(codec); + mutex_unlock(&spec->pcm_lock); } static void jack_callback(struct hda_codec *codec, @@ -1628,21 +1631,23 @@ static void sync_eld_via_acomp(struct hda_codec *codec, static bool hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll) { struct hda_codec *codec = per_pin->codec; - struct hdmi_spec *spec = codec->spec; int ret; /* no temporary power up/down needed for component notifier */ - if (!codec_has_acomp(codec)) - snd_hda_power_up_pm(codec); + if (!codec_has_acomp(codec)) { + ret = snd_hda_power_up_pm(codec); + if (ret < 0 && pm_runtime_suspended(hda_codec_dev(codec))) { + snd_hda_power_down_pm(codec); + return false; + } + } - mutex_lock(&spec->pcm_lock); if (codec_has_acomp(codec)) { sync_eld_via_acomp(codec, per_pin); ret = false; /* don't call snd_hda_jack_report_sync() */ } else { ret = hdmi_present_sense_via_verbs(per_pin, repoll); } - mutex_unlock(&spec->pcm_lock); if (!codec_has_acomp(codec)) snd_hda_power_down_pm(codec); @@ -1654,12 +1659,16 @@ static void hdmi_repoll_eld(struct work_struct *work) { struct hdmi_spec_per_pin *per_pin = container_of(to_delayed_work(work), struct hdmi_spec_per_pin, work); + struct hda_codec *codec = per_pin->codec; + struct hdmi_spec *spec = codec->spec; if (per_pin->repoll_count++ > 6) per_pin->repoll_count = 0; + mutex_lock(&spec->pcm_lock); if (hdmi_present_sense(per_pin, per_pin->repoll_count)) snd_hda_jack_report_sync(per_pin->codec); + mutex_unlock(&spec->pcm_lock); } static void intel_haswell_fixup_connect_list(struct hda_codec *codec, @@ -3741,6 +3750,11 @@ static int patch_atihdmi(struct hda_codec *codec) spec->chmap.channels_max = max(spec->chmap.channels_max, 8u); + /* AMD GPUs have neither EPSS nor CLKSTOP bits, hence preventing + * the link-down as is. Tell the core to allow it. + */ + codec->link_down_at_suspend = 1; + return 0; } diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index e9bd33ea538f..7496be4491b1 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -2545,6 +2545,7 @@ static const struct snd_pci_quirk alc262_fixup_tbl[] = { SND_PCI_QUIRK(0x10cf, 0x1397, "Fujitsu Lifebook S7110", ALC262_FIXUP_FSC_S7110), SND_PCI_QUIRK(0x10cf, 0x142d, "Fujitsu Lifebook E8410", ALC262_FIXUP_BENQ), SND_PCI_QUIRK(0x10f1, 0x2915, "Tyan Thunder n6650W", ALC262_FIXUP_TYAN), + SND_PCI_QUIRK(0x1734, 0x1141, "FSC ESPRIMO U9210", ALC262_FIXUP_FSC_H270), SND_PCI_QUIRK(0x1734, 0x1147, "FSC Celsius H270", ALC262_FIXUP_FSC_H270), SND_PCI_QUIRK(0x17aa, 0x384e, "Lenovo 3000", ALC262_FIXUP_LENOVO_3000), SND_PCI_QUIRK(0x17ff, 0x0560, "Benq ED8", ALC262_FIXUP_BENQ), @@ -4995,7 +4996,6 @@ static void alc_fixup_tpt440_dock(struct hda_codec *codec, struct alc_spec *spec = codec->spec; if (action == HDA_FIXUP_ACT_PRE_PROBE) { - spec->shutup = alc_no_shutup; /* reduce click noise */ spec->reboot_notify = alc_d3_at_reboot; /* reduce noise */ spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP; codec->power_save_node = 0; /* avoid click noises */ @@ -5394,6 +5394,13 @@ static void alc274_fixup_bind_dacs(struct hda_codec *codec, /* for hda_fixup_thinkpad_acpi() */ #include "thinkpad_helper.c" +static void alc_fixup_thinkpad_acpi(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + alc_fixup_no_shutup(codec, fix, action); /* reduce click noise */ + hda_fixup_thinkpad_acpi(codec, fix, action); +} + /* for dell wmi mic mute led */ #include "dell_wmi_helper.c" @@ -5946,7 +5953,7 @@ static const struct hda_fixup alc269_fixups[] = { }, [ALC269_FIXUP_THINKPAD_ACPI] = { .type = HDA_FIXUP_FUNC, - .v.func = hda_fixup_thinkpad_acpi, + .v.func = alc_fixup_thinkpad_acpi, .chained = true, .chain_id = ALC269_FIXUP_SKU_IGNORE, }, @@ -6603,8 +6610,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), + SND_PCI_QUIRK(0x17aa, 0x312a, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), SND_PCI_QUIRK(0x17aa, 0x312f, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), - SND_PCI_QUIRK(0x17aa, 0x3138, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI), SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC), @@ -6782,6 +6789,17 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { {0x14, 0x90170110}, {0x19, 0x02a11030}, {0x21, 0x02211020}), + SND_HDA_PIN_QUIRK(0x10ec0235, 0x17aa, "Lenovo", ALC294_FIXUP_LENOVO_MIC_LOCATION, + {0x14, 0x90170110}, + {0x19, 0x02a11030}, + {0x1a, 0x02a11040}, + {0x1b, 0x01014020}, + {0x21, 0x0221101f}), + SND_HDA_PIN_QUIRK(0x10ec0235, 0x17aa, "Lenovo", ALC294_FIXUP_LENOVO_MIC_LOCATION, + {0x14, 0x90170110}, + {0x19, 0x02a11020}, + {0x1a, 0x02a11030}, + {0x21, 0x0221101f}), SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, {0x12, 0x90a60140}, {0x14, 0x90170110}, diff --git a/sound/pci/lx6464es/lx6464es.c b/sound/pci/lx6464es/lx6464es.c index 6c85f13ab23f..54f6252faca6 100644 --- a/sound/pci/lx6464es/lx6464es.c +++ b/sound/pci/lx6464es/lx6464es.c @@ -1018,6 +1018,7 @@ static int snd_lx6464es_create(struct snd_card *card, chip->port_dsp_bar = pci_ioremap_bar(pci, 2); if (!chip->port_dsp_bar) { dev_err(card->dev, "cannot remap PCI memory region\n"); + err = -ENOMEM; goto remap_pci_failed; } diff --git a/tools/arch/arm/include/uapi/asm/kvm.h b/tools/arch/arm/include/uapi/asm/kvm.h index caae4843cb70..16e006f708ca 100644 --- a/tools/arch/arm/include/uapi/asm/kvm.h +++ b/tools/arch/arm/include/uapi/asm/kvm.h @@ -91,6 +91,7 @@ struct kvm_regs { #define KVM_VGIC_V3_ADDR_TYPE_DIST 2 #define KVM_VGIC_V3_ADDR_TYPE_REDIST 3 #define KVM_VGIC_ITS_ADDR_TYPE 4 +#define KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION 5 #define KVM_VGIC_V3_DIST_SIZE SZ_64K #define KVM_VGIC_V3_REDIST_SIZE (2 * SZ_64K) diff --git a/tools/arch/arm64/include/uapi/asm/kvm.h b/tools/arch/arm64/include/uapi/asm/kvm.h index 04b3256f8e6d..4e76630dd655 100644 --- a/tools/arch/arm64/include/uapi/asm/kvm.h +++ b/tools/arch/arm64/include/uapi/asm/kvm.h @@ -91,6 +91,7 @@ struct kvm_regs { #define KVM_VGIC_V3_ADDR_TYPE_DIST 2 #define KVM_VGIC_V3_ADDR_TYPE_REDIST 3 #define KVM_VGIC_ITS_ADDR_TYPE 4 +#define KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION 5 #define KVM_VGIC_V3_DIST_SIZE SZ_64K #define KVM_VGIC_V3_REDIST_SIZE (2 * SZ_64K) diff --git a/tools/arch/powerpc/include/uapi/asm/kvm.h b/tools/arch/powerpc/include/uapi/asm/kvm.h index 833ed9a16adf..1b32b56a03d3 100644 --- a/tools/arch/powerpc/include/uapi/asm/kvm.h +++ b/tools/arch/powerpc/include/uapi/asm/kvm.h @@ -633,6 +633,7 @@ struct kvm_ppc_cpu_char { #define KVM_REG_PPC_PSSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbd) #define KVM_REG_PPC_DEC_EXPIRY (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbe) +#define KVM_REG_PPC_ONLINE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xbf) /* Transactional Memory checkpointed state: * This is all GPRs, all VSX regs and a subset of SPRs diff --git a/tools/arch/powerpc/include/uapi/asm/unistd.h b/tools/arch/powerpc/include/uapi/asm/unistd.h index 389c36fd8299..ac5ba55066dd 100644 --- a/tools/arch/powerpc/include/uapi/asm/unistd.h +++ b/tools/arch/powerpc/include/uapi/asm/unistd.h @@ -398,5 +398,6 @@ #define __NR_pkey_alloc 384 #define __NR_pkey_free 385 #define __NR_pkey_mprotect 386 +#define __NR_rseq 387 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h index fb00a2fca990..5701f5cecd31 100644 --- a/tools/arch/x86/include/asm/cpufeatures.h +++ b/tools/arch/x86/include/asm/cpufeatures.h @@ -282,7 +282,9 @@ #define X86_FEATURE_AMD_IBPB (13*32+12) /* "" Indirect Branch Prediction Barrier */ #define X86_FEATURE_AMD_IBRS (13*32+14) /* "" Indirect Branch Restricted Speculation */ #define X86_FEATURE_AMD_STIBP (13*32+15) /* "" Single Thread Indirect Branch Predictors */ +#define X86_FEATURE_AMD_SSBD (13*32+24) /* "" Speculative Store Bypass Disable */ #define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */ +#define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */ /* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */ #define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */ diff --git a/tools/bpf/bpftool/perf.c b/tools/bpf/bpftool/perf.c index ac6b1a12c9b7..b76b77dcfd1f 100644 --- a/tools/bpf/bpftool/perf.c +++ b/tools/bpf/bpftool/perf.c @@ -29,9 +29,10 @@ static bool has_perf_query_support(void) if (perf_query_supported) goto out; - fd = open(bin_name, O_RDONLY); + fd = open("/", O_RDONLY); if (fd < 0) { - p_err("perf_query_support: %s", strerror(errno)); + p_err("perf_query_support: cannot open directory \"/\" (%s)", + strerror(errno)); goto out; } diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c index a4f435203fef..959aa53ab678 100644 --- a/tools/bpf/bpftool/prog.c +++ b/tools/bpf/bpftool/prog.c @@ -90,7 +90,9 @@ static void print_boot_time(__u64 nsecs, char *buf, unsigned int size) } wallclock_secs = (real_time_ts.tv_sec - boot_time_ts.tv_sec) + - nsecs / 1000000000; + (real_time_ts.tv_nsec - boot_time_ts.tv_nsec + nsecs) / + 1000000000; + if (!localtime_r(&wallclock_secs, &load_tm)) { snprintf(buf, size, "%llu", nsecs / 1000000000); @@ -692,15 +694,19 @@ static int do_load(int argc, char **argv) return -1; } - if (do_pin_fd(prog_fd, argv[1])) { - p_err("failed to pin program"); - return -1; - } + if (do_pin_fd(prog_fd, argv[1])) + goto err_close_obj; if (json_output) jsonw_null(json_wtr); + bpf_object__close(obj); + return 0; + +err_close_obj: + bpf_object__close(obj); + return -1; } static int do_help(int argc, char **argv) diff --git a/tools/build/Build.include b/tools/build/Build.include index a4bbb984941d..950c1504ca37 100644 --- a/tools/build/Build.include +++ b/tools/build/Build.include @@ -63,8 +63,8 @@ dep-cmd = $(if $(wildcard $(fixdep)), $(fixdep) $(depfile) $@ '$(make-cmd)' > $(dot-target).tmp; \ rm -f $(depfile); \ mv -f $(dot-target).tmp $(dot-target).cmd, \ - printf '\# cannot find fixdep (%s)\n' $(fixdep) > $(dot-target).cmd; \ - printf '\# using basic dep data\n\n' >> $(dot-target).cmd; \ + printf '$(pound) cannot find fixdep (%s)\n' $(fixdep) > $(dot-target).cmd; \ + printf '$(pound) using basic dep data\n\n' >> $(dot-target).cmd; \ cat $(depfile) >> $(dot-target).cmd; \ printf '\n%s\n' 'cmd_$@ := $(make-cmd)' >> $(dot-target).cmd) @@ -98,4 +98,4 @@ cxx_flags = -Wp,-MD,$(depfile) -Wp,-MT,$@ $(CXXFLAGS) -D"BUILD_STR(s)=\#s" $(CXX ### ## HOSTCC C flags -host_c_flags = -Wp,-MD,$(depfile) -Wp,-MT,$@ $(CHOSTFLAGS) -D"BUILD_STR(s)=\#s" $(CHOSTFLAGS_$(basetarget).o) $(CHOSTFLAGS_$(obj)) +host_c_flags = -Wp,-MD,$(depfile) -Wp,-MT,$@ $(HOSTCFLAGS) -D"BUILD_STR(s)=\#s" $(HOSTCFLAGS_$(basetarget).o) $(HOSTCFLAGS_$(obj)) diff --git a/tools/build/Makefile b/tools/build/Makefile index 5eb4b5ad79cb..5edf65e684ab 100644 --- a/tools/build/Makefile +++ b/tools/build/Makefile @@ -43,7 +43,7 @@ $(OUTPUT)fixdep-in.o: FORCE $(Q)$(MAKE) $(build)=fixdep $(OUTPUT)fixdep: $(OUTPUT)fixdep-in.o - $(QUIET_LINK)$(HOSTCC) $(LDFLAGS) -o $@ $< + $(QUIET_LINK)$(HOSTCC) $(HOSTLDFLAGS) -o $@ $< FORCE: diff --git a/tools/include/uapi/drm/drm.h b/tools/include/uapi/drm/drm.h index 6fdff5945c8a..9c660e1688ab 100644 --- a/tools/include/uapi/drm/drm.h +++ b/tools/include/uapi/drm/drm.h @@ -680,6 +680,13 @@ struct drm_get_cap { */ #define DRM_CLIENT_CAP_ATOMIC 3 +/** + * DRM_CLIENT_CAP_ASPECT_RATIO + * + * If set to 1, the DRM core will provide aspect ratio information in modes. + */ +#define DRM_CLIENT_CAP_ASPECT_RATIO 4 + /** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */ struct drm_set_client_cap { __u64 capability; diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index e0b06784f227..59b19b6a40d7 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -2630,7 +2630,7 @@ struct bpf_fib_lookup { union { /* inputs to lookup */ __u8 tos; /* AF_INET */ - __be32 flowlabel; /* AF_INET6 */ + __be32 flowinfo; /* AF_INET6, flow_label + priority */ /* output: metric of fib result (IPv4/IPv6 only) */ __u32 rt_metric; diff --git a/tools/include/uapi/linux/if_link.h b/tools/include/uapi/linux/if_link.h index 68699f654118..cf01b6824244 100644 --- a/tools/include/uapi/linux/if_link.h +++ b/tools/include/uapi/linux/if_link.h @@ -333,6 +333,7 @@ enum { IFLA_BRPORT_BCAST_FLOOD, IFLA_BRPORT_GROUP_FWD_MASK, IFLA_BRPORT_NEIGH_SUPPRESS, + IFLA_BRPORT_ISOLATED, __IFLA_BRPORT_MAX }; #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1) @@ -516,6 +517,7 @@ enum { IFLA_VXLAN_COLLECT_METADATA, IFLA_VXLAN_LABEL, IFLA_VXLAN_GPE, + IFLA_VXLAN_TTL_INHERIT, __IFLA_VXLAN_MAX }; #define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1) diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h index 39e364c70caf..b6270a3b38e9 100644 --- a/tools/include/uapi/linux/kvm.h +++ b/tools/include/uapi/linux/kvm.h @@ -948,6 +948,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_S390_BPB 152 #define KVM_CAP_GET_MSR_FEATURES 153 #define KVM_CAP_HYPERV_EVENTFD 154 +#define KVM_CAP_HYPERV_TLBFLUSH 155 #ifdef KVM_CAP_IRQ_ROUTING diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 38047c6aa575..f4a25bd1871f 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -164,6 +164,7 @@ static int __dead_end_function(struct objtool_file *file, struct symbol *func, "lbug_with_loc", "fortify_panic", "usercopy_abort", + "machine_real_restart", }; if (func->bind == STB_WEAK) diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c index 4e60e105583e..0d1acb704f64 100644 --- a/tools/objtool/elf.c +++ b/tools/objtool/elf.c @@ -302,19 +302,34 @@ static int read_symbols(struct elf *elf) continue; sym->pfunc = sym->cfunc = sym; coldstr = strstr(sym->name, ".cold."); - if (coldstr) { - coldstr[0] = '\0'; - pfunc = find_symbol_by_name(elf, sym->name); - coldstr[0] = '.'; + if (!coldstr) + continue; - if (!pfunc) { - WARN("%s(): can't find parent function", - sym->name); - goto err; - } + coldstr[0] = '\0'; + pfunc = find_symbol_by_name(elf, sym->name); + coldstr[0] = '.'; - sym->pfunc = pfunc; - pfunc->cfunc = sym; + if (!pfunc) { + WARN("%s(): can't find parent function", + sym->name); + goto err; + } + + sym->pfunc = pfunc; + pfunc->cfunc = sym; + + /* + * Unfortunately, -fnoreorder-functions puts the child + * inside the parent. Remove the overlap so we can + * have sane assumptions. + * + * Note that pfunc->len now no longer matches + * pfunc->sym.st_size. + */ + if (sym->sec == pfunc->sec && + sym->offset >= pfunc->offset && + sym->offset + sym->len == pfunc->offset + pfunc->len) { + pfunc->len -= sym->len; } } } diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt index 5dfe102fb5b5..b10a90b6a718 100644 --- a/tools/perf/Documentation/perf-stat.txt +++ b/tools/perf/Documentation/perf-stat.txt @@ -178,6 +178,9 @@ Print count deltas for fixed number of times. This option should be used together with "-I" option. example: 'perf stat -I 1000 --interval-count 2 -e cycles -a' +--interval-clear:: +Clear the screen before next interval. + --timeout msecs:: Stop the 'perf stat' session and print count deltas after N milliseconds (minimum: 10 ms). This option is not supported with the "-I" option. diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config index b5ac356ba323..f5a3b402589e 100644 --- a/tools/perf/Makefile.config +++ b/tools/perf/Makefile.config @@ -207,8 +207,7 @@ ifdef PYTHON_CONFIG PYTHON_EMBED_LDOPTS := $(shell $(PYTHON_CONFIG_SQ) --ldflags 2>/dev/null) PYTHON_EMBED_LDFLAGS := $(call strip-libs,$(PYTHON_EMBED_LDOPTS)) PYTHON_EMBED_LIBADD := $(call grep-libs,$(PYTHON_EMBED_LDOPTS)) -lutil - PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --cflags 2>/dev/null) - PYTHON_EMBED_CCOPTS := $(filter-out -specs=%,$(PYTHON_EMBED_CCOPTS)) + PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --includes 2>/dev/null) FLAGS_PYTHON_EMBED := $(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS) endif diff --git a/tools/perf/arch/powerpc/util/skip-callchain-idx.c b/tools/perf/arch/powerpc/util/skip-callchain-idx.c index 3598b8b75d27..ef5d59a5742e 100644 --- a/tools/perf/arch/powerpc/util/skip-callchain-idx.c +++ b/tools/perf/arch/powerpc/util/skip-callchain-idx.c @@ -243,7 +243,7 @@ int arch_skip_callchain_idx(struct thread *thread, struct ip_callchain *chain) u64 ip; u64 skip_slot = -1; - if (chain->nr < 3) + if (!chain || chain->nr < 3) return skip_slot; ip = chain->ips[2]; diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl index 4dfe42666d0c..f0b1709a5ffb 100644 --- a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl +++ b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl @@ -341,6 +341,8 @@ 330 common pkey_alloc __x64_sys_pkey_alloc 331 common pkey_free __x64_sys_pkey_free 332 common statx __x64_sys_statx +333 common io_pgetevents __x64_sys_io_pgetevents +334 common rseq __x64_sys_rseq # # x32-specific system call numbers start at 512 to avoid cache impact diff --git a/tools/perf/arch/x86/util/perf_regs.c b/tools/perf/arch/x86/util/perf_regs.c index 4b2caf6d48e7..fead6b3b4206 100644 --- a/tools/perf/arch/x86/util/perf_regs.c +++ b/tools/perf/arch/x86/util/perf_regs.c @@ -226,7 +226,7 @@ int arch_sdt_arg_parse_op(char *old_op, char **new_op) else if (rm[2].rm_so != rm[2].rm_eo) prefix[0] = '+'; else - strncpy(prefix, "+0", 2); + scnprintf(prefix, sizeof(prefix), "+0"); } /* Rename register */ diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c index 63eb49082774..44195514b19e 100644 --- a/tools/perf/bench/numa.c +++ b/tools/perf/bench/numa.c @@ -1098,7 +1098,7 @@ static void *worker_thread(void *__tdata) u8 *global_data; u8 *process_data; u8 *thread_data; - u64 bytes_done; + u64 bytes_done, secs; long work_done; u32 l; struct rusage rusage; @@ -1254,7 +1254,8 @@ static void *worker_thread(void *__tdata) timersub(&stop, &start0, &diff); td->runtime_ns = diff.tv_sec * NSEC_PER_SEC; td->runtime_ns += diff.tv_usec * NSEC_PER_USEC; - td->speed_gbs = bytes_done / (td->runtime_ns / NSEC_PER_SEC) / 1e9; + secs = td->runtime_ns / NSEC_PER_SEC; + td->speed_gbs = secs ? bytes_done / secs / 1e9 : 0; getrusage(RUSAGE_THREAD, &rusage); td->system_time_ns = rusage.ru_stime.tv_sec * NSEC_PER_SEC; diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 5eb22cc56363..8180319285af 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -283,6 +283,15 @@ static int process_sample_event(struct perf_tool *tool, return ret; } +static int process_feature_event(struct perf_tool *tool, + union perf_event *event, + struct perf_session *session) +{ + if (event->feat.feat_id < HEADER_LAST_FEATURE) + return perf_event__process_feature(tool, event, session); + return 0; +} + static int hist_entry__tty_annotate(struct hist_entry *he, struct perf_evsel *evsel, struct perf_annotate *ann) @@ -471,7 +480,7 @@ int cmd_annotate(int argc, const char **argv) .attr = perf_event__process_attr, .build_id = perf_event__process_build_id, .tracing_data = perf_event__process_tracing_data, - .feature = perf_event__process_feature, + .feature = process_feature_event, .ordered_events = true, .ordering_requires_timestamps = true, }, diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c index 307b3594525f..6a8738f7ead3 100644 --- a/tools/perf/builtin-c2c.c +++ b/tools/perf/builtin-c2c.c @@ -56,16 +56,16 @@ struct c2c_hist_entry { struct compute_stats cstats; + unsigned long paddr; + unsigned long paddr_cnt; + bool paddr_zero; + char *nodestr; + /* * must be at the end, * because of its callchain dynamic entry */ struct hist_entry he; - - unsigned long paddr; - unsigned long paddr_cnt; - bool paddr_zero; - char *nodestr; }; static char const *coalesce_default = "pid,iaddr"; diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index cdb5b6949832..c04dc7b53797 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -217,7 +217,8 @@ static int process_feature_event(struct perf_tool *tool, } /* - * All features are received, we can force the + * (feat_id = HEADER_LAST_FEATURE) is the end marker which + * means all features are received, now we can force the * group if needed. */ setup_forced_leader(rep, session->evlist); diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index b3bf35512d21..568ddfac3213 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c @@ -180,6 +180,18 @@ static struct { PERF_OUTPUT_EVNAME | PERF_OUTPUT_TRACE }, + [PERF_TYPE_HW_CACHE] = { + .user_set = false, + + .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID | + PERF_OUTPUT_CPU | PERF_OUTPUT_TIME | + PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP | + PERF_OUTPUT_SYM | PERF_OUTPUT_SYMOFFSET | + PERF_OUTPUT_DSO | PERF_OUTPUT_PERIOD, + + .invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT, + }, + [PERF_TYPE_RAW] = { .user_set = false, @@ -1822,6 +1834,7 @@ static int process_attr(struct perf_tool *tool, union perf_event *event, struct perf_evlist *evlist; struct perf_evsel *evsel, *pos; int err; + static struct perf_evsel_script *es; err = perf_event__process_attr(tool, event, pevlist); if (err) @@ -1830,6 +1843,19 @@ static int process_attr(struct perf_tool *tool, union perf_event *event, evlist = *pevlist; evsel = perf_evlist__last(*pevlist); + if (!evsel->priv) { + if (scr->per_event_dump) { + evsel->priv = perf_evsel_script__new(evsel, + scr->session->data); + } else { + es = zalloc(sizeof(*es)); + if (!es) + return -ENOMEM; + es->fp = stdout; + evsel->priv = es; + } + } + if (evsel->attr.type >= PERF_TYPE_MAX && evsel->attr.type != PERF_TYPE_SYNTH) return 0; @@ -3018,6 +3044,15 @@ int process_cpu_map_event(struct perf_tool *tool __maybe_unused, return set_maps(script); } +static int process_feature_event(struct perf_tool *tool, + union perf_event *event, + struct perf_session *session) +{ + if (event->feat.feat_id < HEADER_LAST_FEATURE) + return perf_event__process_feature(tool, event, session); + return 0; +} + #ifdef HAVE_AUXTRACE_SUPPORT static int perf_script__process_auxtrace_info(struct perf_tool *tool, union perf_event *event, @@ -3062,7 +3097,7 @@ int cmd_script(int argc, const char **argv) .attr = process_attr, .event_update = perf_event__process_event_update, .tracing_data = perf_event__process_tracing_data, - .feature = perf_event__process_feature, + .feature = process_feature_event, .build_id = perf_event__process_build_id, .id_index = perf_event__process_id_index, .auxtrace_info = perf_script__process_auxtrace_info, @@ -3113,8 +3148,9 @@ int cmd_script(int argc, const char **argv) "+field to add and -field to remove." "Valid types: hw,sw,trace,raw,synth. " "Fields: comm,tid,pid,time,cpu,event,trace,ip,sym,dso," - "addr,symoff,period,iregs,uregs,brstack,brstacksym,flags," - "bpf-output,callindent,insn,insnlen,brstackinsn,synth,phys_addr", + "addr,symoff,srcline,period,iregs,uregs,brstack," + "brstacksym,flags,bpf-output,brstackinsn,brstackoff," + "callindent,insn,insnlen,synth,phys_addr,metric,misc", parse_output_fields), OPT_BOOLEAN('a', "all-cpus", &system_wide, "system-wide collection from all CPUs"), diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 096ccb25c11f..05be023c3f0e 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -65,6 +65,7 @@ #include "util/tool.h" #include "util/string2.h" #include "util/metricgroup.h" +#include "util/top.h" #include "asm/bug.h" #include @@ -144,6 +145,8 @@ static struct target target = { typedef int (*aggr_get_id_t)(struct cpu_map *m, int cpu); +#define METRIC_ONLY_LEN 20 + static int run_count = 1; static bool no_inherit = false; static volatile pid_t child_pid = -1; @@ -173,6 +176,7 @@ static struct cpu_map *aggr_map; static aggr_get_id_t aggr_get_id; static bool append_file; static bool interval_count; +static bool interval_clear; static const char *output_name; static int output_fd; static int print_free_counters_hint; @@ -180,6 +184,7 @@ static int print_mixed_hw_group_error; static u64 *walltime_run; static bool ru_display = false; static struct rusage ru_data; +static unsigned int metric_only_len = METRIC_ONLY_LEN; struct perf_stat { bool record; @@ -967,8 +972,6 @@ static void print_metric_csv(void *ctx, fprintf(out, "%s%s%s%s", csv_sep, vals, csv_sep, unit); } -#define METRIC_ONLY_LEN 20 - /* Filter out some columns that don't work well in metrics only mode */ static bool valid_only_metric(const char *unit) @@ -999,22 +1002,20 @@ static void print_metric_only(void *ctx, const char *color, const char *fmt, { struct outstate *os = ctx; FILE *out = os->fh; - int n; - char buf[1024]; - unsigned mlen = METRIC_ONLY_LEN; + char buf[1024], str[1024]; + unsigned mlen = metric_only_len; if (!valid_only_metric(unit)) return; unit = fixunit(buf, os->evsel, unit); - if (color) - n = color_fprintf(out, color, fmt, val); - else - n = fprintf(out, fmt, val); - if (n > METRIC_ONLY_LEN) - n = METRIC_ONLY_LEN; if (mlen < strlen(unit)) mlen = strlen(unit) + 1; - fprintf(out, "%*s", mlen - n, ""); + + if (color) + mlen += strlen(color) + sizeof(PERF_COLOR_RESET) - 1; + + color_snprintf(str, sizeof(str), color ?: "", fmt, val); + fprintf(out, "%*s ", mlen, str); } static void print_metric_only_csv(void *ctx, const char *color __maybe_unused, @@ -1054,7 +1055,7 @@ static void print_metric_header(void *ctx, const char *color __maybe_unused, if (csv_output) fprintf(os->fh, "%s%s", unit, csv_sep); else - fprintf(os->fh, "%-*s ", METRIC_ONLY_LEN, unit); + fprintf(os->fh, "%*s ", metric_only_len, unit); } static void nsec_printout(int id, int nr, struct perf_evsel *evsel, double avg) @@ -1704,9 +1705,12 @@ static void print_interval(char *prefix, struct timespec *ts) FILE *output = stat_config.output; static int num_print_interval; + if (interval_clear) + puts(CONSOLE_CLEAR); + sprintf(prefix, "%6lu.%09lu%s", ts->tv_sec, ts->tv_nsec, csv_sep); - if (num_print_interval == 0 && !csv_output) { + if ((num_print_interval == 0 && !csv_output) || interval_clear) { switch (stat_config.aggr_mode) { case AGGR_SOCKET: fprintf(output, "# time socket cpus"); @@ -1719,7 +1723,7 @@ static void print_interval(char *prefix, struct timespec *ts) fprintf(output, " counts %*s events\n", unit_width, "unit"); break; case AGGR_NONE: - fprintf(output, "# time CPU"); + fprintf(output, "# time CPU "); if (!metric_only) fprintf(output, " counts %*s events\n", unit_width, "unit"); break; @@ -1738,7 +1742,7 @@ static void print_interval(char *prefix, struct timespec *ts) } } - if (num_print_interval == 0 && metric_only) + if ((num_print_interval == 0 || interval_clear) && metric_only) print_metric_headers(" ", true); if (++num_print_interval == 25) num_print_interval = 0; @@ -2057,6 +2061,8 @@ static const struct option stat_options[] = { "(overhead is possible for values <= 100ms)"), OPT_INTEGER(0, "interval-count", &stat_config.times, "print counts for fixed number of times"), + OPT_BOOLEAN(0, "interval-clear", &interval_clear, + "clear screen in between new interval"), OPT_UINTEGER(0, "timeout", &stat_config.timeout, "stop workload and print counts after a timeout period in ms (>= 10ms)"), OPT_SET_UINT(0, "per-socket", &stat_config.aggr_mode, @@ -2436,14 +2442,13 @@ static int add_default_attributes(void) (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) | (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, }; + struct parse_events_error errinfo; /* Set attrs if no event is selected and !null_run: */ if (null_run) return 0; if (transaction_run) { - struct parse_events_error errinfo; - if (pmu_have_event("cpu", "cycles-ct") && pmu_have_event("cpu", "el-start")) err = parse_events(evsel_list, transaction_attrs, @@ -2454,6 +2459,7 @@ static int add_default_attributes(void) &errinfo); if (err) { fprintf(stderr, "Cannot set up transaction events\n"); + parse_events_print_error(&errinfo, transaction_attrs); return -1; } return 0; @@ -2479,10 +2485,11 @@ static int add_default_attributes(void) pmu_have_event("msr", "smi")) { if (!force_metric_only) metric_only = true; - err = parse_events(evsel_list, smi_cost_attrs, NULL); + err = parse_events(evsel_list, smi_cost_attrs, &errinfo); } else { fprintf(stderr, "To measure SMI cost, it needs " "msr/aperf/, msr/smi/ and cpu/cycles/ support\n"); + parse_events_print_error(&errinfo, smi_cost_attrs); return -1; } if (err) { @@ -2517,12 +2524,13 @@ static int add_default_attributes(void) if (topdown_attrs[0] && str) { if (warn) arch_topdown_group_warn(); - err = parse_events(evsel_list, str, NULL); + err = parse_events(evsel_list, str, &errinfo); if (err) { fprintf(stderr, "Cannot set up top down events %s: %d\n", str, err); free(str); + parse_events_print_error(&errinfo, str); return -1; } } else { diff --git a/tools/perf/jvmti/jvmti_agent.c b/tools/perf/jvmti/jvmti_agent.c index 0c6d1002b524..ac1bcdc17dae 100644 --- a/tools/perf/jvmti/jvmti_agent.c +++ b/tools/perf/jvmti/jvmti_agent.c @@ -35,6 +35,7 @@ #include #include /* for gettid() */ #include +#include #include "jvmti_agent.h" #include "../util/jitdump.h" @@ -249,7 +250,7 @@ void *jvmti_open(void) /* * jitdump file name */ - snprintf(dump_path, PATH_MAX, "%s/jit-%i.dump", jit_path, getpid()); + scnprintf(dump_path, PATH_MAX, "%s/jit-%i.dump", jit_path, getpid()); fd = open(dump_path, O_CREAT|O_TRUNC|O_RDWR, 0666); if (fd == -1) diff --git a/tools/perf/pmu-events/Build b/tools/perf/pmu-events/Build index 17783913d330..215ba30b8534 100644 --- a/tools/perf/pmu-events/Build +++ b/tools/perf/pmu-events/Build @@ -1,7 +1,7 @@ hostprogs := jevents jevents-y += json.o jsmn.o jevents.o -CHOSTFLAGS_jevents.o = -I$(srctree)/tools/include +HOSTCFLAGS_jevents.o = -I$(srctree)/tools/include pmu-events-y += pmu-events.o JDIR = pmu-events/arch/$(SRCARCH) JSON = $(shell [ -d $(JDIR) ] && \ diff --git a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py index 38dfb720fb6f..54ace2f6bc36 100644 --- a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py +++ b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py @@ -31,10 +31,8 @@ def flag_str(event_name, field_name, value): string = "" if flag_fields[event_name][field_name]: - print_delim = 0 - keys = flag_fields[event_name][field_name]['values'].keys() - keys.sort() - for idx in keys: + print_delim = 0 + for idx in sorted(flag_fields[event_name][field_name]['values']): if not value and not idx: string += flag_fields[event_name][field_name]['values'][idx] break @@ -51,14 +49,12 @@ def symbol_str(event_name, field_name, value): string = "" if symbolic_fields[event_name][field_name]: - keys = symbolic_fields[event_name][field_name]['values'].keys() - keys.sort() - for idx in keys: + for idx in sorted(symbolic_fields[event_name][field_name]['values']): if not value and not idx: - string = symbolic_fields[event_name][field_name]['values'][idx] + string = symbolic_fields[event_name][field_name]['values'][idx] break - if (value == idx): - string = symbolic_fields[event_name][field_name]['values'][idx] + if (value == idx): + string = symbolic_fields[event_name][field_name]['values'][idx] break return string @@ -74,19 +70,17 @@ def trace_flag_str(value): string = "" print_delim = 0 - keys = trace_flags.keys() + for idx in trace_flags: + if not value and not idx: + string += "NONE" + break - for idx in keys: - if not value and not idx: - string += "NONE" - break - - if idx and (value & idx) == idx: - if print_delim: - string += " | "; - string += trace_flags[idx] - print_delim = 1 - value &= ~idx + if idx and (value & idx) == idx: + if print_delim: + string += " | "; + string += trace_flags[idx] + print_delim = 1 + value &= ~idx return string diff --git a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py index 81a56cd2b3c1..21a7a1298094 100755 --- a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py +++ b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py @@ -8,6 +8,7 @@ # PerfEvent is the base class for all perf event sample, PebsEvent # is a HW base Intel x86 PEBS event, and user could add more SW/HW # event classes based on requirements. +from __future__ import print_function import struct @@ -44,7 +45,8 @@ class PerfEvent(object): PerfEvent.event_num += 1 def show(self): - print "PMU event: name=%12s, symbol=%24s, comm=%8s, dso=%12s" % (self.name, self.symbol, self.comm, self.dso) + print("PMU event: name=%12s, symbol=%24s, comm=%8s, dso=%12s" % + (self.name, self.symbol, self.comm, self.dso)) # # Basic Intel PEBS (Precise Event-based Sampling) event, whose raw buffer diff --git a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py index fdd92f699055..cac7b2542ee8 100644 --- a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py +++ b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py @@ -11,7 +11,7 @@ try: import wx except ImportError: - raise ImportError, "You need to install the wxpython lib for this script" + raise ImportError("You need to install the wxpython lib for this script") class RootFrame(wx.Frame): diff --git a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py index f6c84966e4f8..7384dcb628c4 100644 --- a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py +++ b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py @@ -5,6 +5,7 @@ # This software may be distributed under the terms of the GNU General # Public License ("GPL") version 2 as published by the Free Software # Foundation. +from __future__ import print_function import errno, os @@ -33,7 +34,7 @@ def nsecs_str(nsecs): return str def add_stats(dict, key, value): - if not dict.has_key(key): + if key not in dict: dict[key] = (value, value, value, 1) else: min, max, avg, count = dict[key] @@ -72,10 +73,10 @@ try: except: if not audit_package_warned: audit_package_warned = True - print "Install the audit-libs-python package to get syscall names.\n" \ - "For example:\n # apt-get install python-audit (Ubuntu)" \ - "\n # yum install audit-libs-python (Fedora)" \ - "\n etc.\n" + print("Install the audit-libs-python package to get syscall names.\n" + "For example:\n # apt-get install python-audit (Ubuntu)" + "\n # yum install audit-libs-python (Fedora)" + "\n etc.\n") def syscall_name(id): try: diff --git a/tools/perf/scripts/python/sched-migration.py b/tools/perf/scripts/python/sched-migration.py index de66cb3b72c9..3473e7f66081 100644 --- a/tools/perf/scripts/python/sched-migration.py +++ b/tools/perf/scripts/python/sched-migration.py @@ -9,13 +9,17 @@ # This software is distributed under the terms of the GNU General # Public License ("GPL") version 2 as published by the Free Software # Foundation. - +from __future__ import print_function import os import sys from collections import defaultdict -from UserList import UserList +try: + from UserList import UserList +except ImportError: + # Python 3: UserList moved to the collections package + from collections import UserList sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') @@ -300,7 +304,7 @@ class TimeSliceList(UserList): if i == -1: return - for i in xrange(i, len(self.data)): + for i in range(i, len(self.data)): timeslice = self.data[i] if timeslice.start > end: return @@ -336,8 +340,8 @@ class SchedEventProxy: on_cpu_task = self.current_tsk[headers.cpu] if on_cpu_task != -1 and on_cpu_task != prev_pid: - print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \ - (headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid) + print("Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \ + headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid) threads[prev_pid] = prev_comm threads[next_pid] = next_comm diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c index 2bde505e2e7e..dd850a26d579 100644 --- a/tools/perf/tests/builtin-test.c +++ b/tools/perf/tests/builtin-test.c @@ -422,7 +422,7 @@ static const char *shell_test__description(char *description, size_t size, #define for_each_shell_test(dir, base, ent) \ while ((ent = readdir(dir)) != NULL) \ - if (!is_directory(base, ent)) + if (!is_directory(base, ent) && ent->d_name[0] != '.') static const char *shell_tests__dir(char *path, size_t size) { diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c index 7d4077068454..61211918bfba 100644 --- a/tools/perf/tests/parse-events.c +++ b/tools/perf/tests/parse-events.c @@ -1309,6 +1309,11 @@ static int test__checkevent_config_cache(struct perf_evlist *evlist) return 0; } +static bool test__intel_pt_valid(void) +{ + return !!perf_pmu__find("intel_pt"); +} + static int test__intel_pt(struct perf_evlist *evlist) { struct perf_evsel *evsel = perf_evlist__first(evlist); @@ -1375,6 +1380,7 @@ struct evlist_test { const char *name; __u32 type; const int id; + bool (*valid)(void); int (*check)(struct perf_evlist *evlist); }; @@ -1648,6 +1654,7 @@ static struct evlist_test test__events[] = { }, { .name = "intel_pt//u", + .valid = test__intel_pt_valid, .check = test__intel_pt, .id = 52, }, @@ -1686,17 +1693,24 @@ static struct terms_test test__terms[] = { static int test_event(struct evlist_test *e) { + struct parse_events_error err = { .idx = 0, }; struct perf_evlist *evlist; int ret; + if (e->valid && !e->valid()) { + pr_debug("... SKIP"); + return 0; + } + evlist = perf_evlist__new(); if (evlist == NULL) return -ENOMEM; - ret = parse_events(evlist, e->name, NULL); + ret = parse_events(evlist, e->name, &err); if (ret) { - pr_debug("failed to parse event '%s', err %d\n", - e->name, ret); + pr_debug("failed to parse event '%s', err %d, str '%s'\n", + e->name, ret, err.str); + parse_events_print_error(&err, e->name); } else { ret = e->check(evlist); } @@ -1714,10 +1728,11 @@ static int test_events(struct evlist_test *events, unsigned cnt) for (i = 0; i < cnt; i++) { struct evlist_test *e = &events[i]; - pr_debug("running test %d '%s'\n", e->id, e->name); + pr_debug("running test %d '%s'", e->id, e->name); ret1 = test_event(e); if (ret1) ret2 = ret1; + pr_debug("\n"); } return ret2; @@ -1799,7 +1814,7 @@ static int test_pmu_events(void) } while (!ret && (ent = readdir(dir))) { - struct evlist_test e; + struct evlist_test e = { .id = 0, }; char name[2 * NAME_MAX + 1 + 12 + 3]; /* Names containing . are special and cannot be used directly */ diff --git a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh index 263057039693..94e513e62b34 100755 --- a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh +++ b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh @@ -14,35 +14,40 @@ libc=$(grep -w libc /proc/self/maps | head -1 | sed -r 's/.*[[:space:]](\/.*)/\1 nm -Dg $libc 2>/dev/null | fgrep -q inet_pton || exit 254 trace_libc_inet_pton_backtrace() { - idx=0 - expected[0]="ping[][0-9 \.:]+probe_libc:inet_pton: \([[:xdigit:]]+\)" - expected[1]=".*inet_pton\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$" + + expected=`mktemp -u /tmp/expected.XXX` + + echo "ping[][0-9 \.:]+probe_libc:inet_pton: \([[:xdigit:]]+\)" > $expected + echo ".*inet_pton\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$" >> $expected case "$(uname -m)" in s390x) eventattr='call-graph=dwarf,max-stack=4' - expected[2]="gaih_inet.*\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$" - expected[3]="(__GI_)?getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$" - expected[4]="main\+0x[[:xdigit:]]+[[:space:]]\(.*/bin/ping.*\)$" + echo "gaih_inet.*\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$" >> $expected + echo "(__GI_)?getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$" >> $expected + echo "main\+0x[[:xdigit:]]+[[:space:]]\(.*/bin/ping.*\)$" >> $expected ;; *) eventattr='max-stack=3' - expected[2]="getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" - expected[3]=".*\+0x[[:xdigit:]]+[[:space:]]\(.*/bin/ping.*\)$" + echo "getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected + echo ".*\+0x[[:xdigit:]]+[[:space:]]\(.*/bin/ping.*\)$" >> $expected ;; esac - file=`mktemp -u /tmp/perf.data.XXX` + perf_data=`mktemp -u /tmp/perf.data.XXX` + perf_script=`mktemp -u /tmp/perf.script.XXX` + perf record -e probe_libc:inet_pton/$eventattr/ -o $perf_data ping -6 -c 1 ::1 > /dev/null 2>&1 + perf script -i $perf_data > $perf_script - perf record -e probe_libc:inet_pton/$eventattr/ -o $file ping -6 -c 1 ::1 > /dev/null 2>&1 - perf script -i $file | while read line ; do + exec 3<$perf_script + exec 4<$expected + while read line <&3 && read -r pattern <&4; do + [ -z "$pattern" ] && break echo $line - echo "$line" | egrep -q "${expected[$idx]}" + echo "$line" | egrep -q "$pattern" if [ $? -ne 0 ] ; then - printf "FAIL: expected backtrace entry %d \"%s\" got \"%s\"\n" $idx "${expected[$idx]}" "$line" + printf "FAIL: expected backtrace entry \"%s\" got \"%s\"\n" "$pattern" "$line" exit 1 fi - let idx+=1 - [ -z "${expected[$idx]}" ] && break done # If any statements are executed from this point onwards, @@ -58,6 +63,6 @@ skip_if_no_perf_probe && \ perf probe -q $libc inet_pton && \ trace_libc_inet_pton_backtrace err=$? -rm -f ${file} +rm -f ${perf_data} ${perf_script} ${expected} perf probe -q -d probe_libc:inet_pton exit $err diff --git a/tools/perf/tests/shell/trace+probe_vfs_getname.sh b/tools/perf/tests/shell/trace+probe_vfs_getname.sh index 55ad9793d544..4ce276efe6b4 100755 --- a/tools/perf/tests/shell/trace+probe_vfs_getname.sh +++ b/tools/perf/tests/shell/trace+probe_vfs_getname.sh @@ -17,7 +17,7 @@ skip_if_no_perf_probe || exit 2 file=$(mktemp /tmp/temporary_file.XXXXX) trace_open_vfs_getname() { - evts=$(echo $(perf list syscalls:sys_enter_open* |& egrep 'open(at)? ' | sed -r 's/.*sys_enter_([a-z]+) +\[.*$/\1/') | sed 's/ /,/') + evts=$(echo $(perf list syscalls:sys_enter_open* 2>&1 | egrep 'open(at)? ' | sed -r 's/.*sys_enter_([a-z]+) +\[.*$/\1/') | sed 's/ /,/') perf trace -e $evts touch $file 2>&1 | \ egrep " +[0-9]+\.[0-9]+ +\( +[0-9]+\.[0-9]+ ms\): +touch\/[0-9]+ open(at)?\((dfd: +CWD, +)?filename: +${file}, +flags: CREAT\|NOCTTY\|NONBLOCK\|WRONLY, +mode: +IRUGO\|IWUGO\) += +[0-9]+$" } diff --git a/tools/perf/tests/topology.c b/tools/perf/tests/topology.c index 40e30a26b23c..9497d02f69e6 100644 --- a/tools/perf/tests/topology.c +++ b/tools/perf/tests/topology.c @@ -45,6 +45,7 @@ static int session_write_header(char *path) perf_header__set_feat(&session->header, HEADER_CPU_TOPOLOGY); perf_header__set_feat(&session->header, HEADER_NRCPUS); + perf_header__set_feat(&session->header, HEADER_ARCH); session->header.data_size += DATA_SIZE; diff --git a/tools/perf/ui/gtk/hists.c b/tools/perf/ui/gtk/hists.c index b085f1b3e34d..4ab663ec3e5e 100644 --- a/tools/perf/ui/gtk/hists.c +++ b/tools/perf/ui/gtk/hists.c @@ -382,7 +382,7 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists, gtk_tree_store_set(store, &iter, col_idx++, s, -1); } - if (hists__has_callchains(hists) && + if (hist_entry__has_callchains(h) && symbol_conf.use_callchain && hists__has(hists, sym)) { if (callchain_param.mode == CHAIN_GRAPH_REL) total = symbol_conf.cumulate_callchain ? diff --git a/tools/perf/util/c++/clang.cpp b/tools/perf/util/c++/clang.cpp index bf31ceab33bd..89512504551b 100644 --- a/tools/perf/util/c++/clang.cpp +++ b/tools/perf/util/c++/clang.cpp @@ -146,8 +146,15 @@ getBPFObjectFromModule(llvm::Module *Module) raw_svector_ostream ostream(*Buffer); legacy::PassManager PM; - if (TargetMachine->addPassesToEmitFile(PM, ostream, - TargetMachine::CGFT_ObjectFile)) { + bool NotAdded; +#if CLANG_VERSION_MAJOR < 7 + NotAdded = TargetMachine->addPassesToEmitFile(PM, ostream, + TargetMachine::CGFT_ObjectFile); +#else + NotAdded = TargetMachine->addPassesToEmitFile(PM, ostream, nullptr, + TargetMachine::CGFT_ObjectFile); +#endif + if (NotAdded) { llvm::errs() << "TargetMachine can't emit a file of this type\n"; return std::unique_ptr>(nullptr);; } diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 540cd2dcd3e7..653ff65aa2c3 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -2129,6 +2129,7 @@ static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused) int cpu_nr = ff->ph->env.nr_cpus_avail; u64 size = 0; struct perf_header *ph = ff->ph; + bool do_core_id_test = true; ph->env.cpu = calloc(cpu_nr, sizeof(*ph->env.cpu)); if (!ph->env.cpu) @@ -2183,6 +2184,13 @@ static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused) return 0; } + /* On s390 the socket_id number is not related to the numbers of cpus. + * The socket_id number might be higher than the numbers of cpus. + * This depends on the configuration. + */ + if (ph->env.arch && !strncmp(ph->env.arch, "s390", 4)) + do_core_id_test = false; + for (i = 0; i < (u32)cpu_nr; i++) { if (do_read_u32(ff, &nr)) goto free_cpu; @@ -2192,7 +2200,7 @@ static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused) if (do_read_u32(ff, &nr)) goto free_cpu; - if (nr != (u32)-1 && nr > (u32)cpu_nr) { + if (do_core_id_test && nr != (u32)-1 && nr > (u32)cpu_nr) { pr_debug("socket_id number is too big." "You may need to upgrade the perf tool.\n"); goto free_cpu; @@ -3456,7 +3464,7 @@ int perf_event__process_feature(struct perf_tool *tool, pr_warning("invalid record type %d in pipe-mode\n", type); return 0; } - if (feat == HEADER_RESERVED || feat > HEADER_LAST_FEATURE) { + if (feat == HEADER_RESERVED || feat >= HEADER_LAST_FEATURE) { pr_warning("invalid record type %d in pipe-mode\n", type); return -1; } diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index 52e8fda93a47..828cb9794c76 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c @@ -370,9 +370,11 @@ void hists__delete_entries(struct hists *hists) static int hist_entry__init(struct hist_entry *he, struct hist_entry *template, - bool sample_self) + bool sample_self, + size_t callchain_size) { *he = *template; + he->callchain_size = callchain_size; if (symbol_conf.cumulate_callchain) { he->stat_acc = malloc(sizeof(he->stat)); @@ -473,7 +475,7 @@ static struct hist_entry *hist_entry__new(struct hist_entry *template, he = ops->new(callchain_size); if (he) { - err = hist_entry__init(he, template, sample_self); + err = hist_entry__init(he, template, sample_self, callchain_size); if (err) { ops->free(he); he = NULL; @@ -619,9 +621,11 @@ __hists__add_entry(struct hists *hists, .raw_data = sample->raw_data, .raw_size = sample->raw_size, .ops = ops, - }; + }, *he = hists__findnew_entry(hists, &entry, al, sample_self); - return hists__findnew_entry(hists, &entry, al, sample_self); + if (!hists->has_callchains && he && he->callchain_size != 0) + hists->has_callchains = true; + return he; } struct hist_entry *hists__add_entry(struct hists *hists, diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index 06607c434949..73049f7f0f60 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h @@ -85,6 +85,7 @@ struct hists { struct events_stats stats; u64 event_stream; u16 col_len[HISTC_NR_COLS]; + bool has_callchains; int socket_filter; struct perf_hpp_list *hpp_list; struct list_head hpp_formats; @@ -222,8 +223,7 @@ static inline struct hists *evsel__hists(struct perf_evsel *evsel) static __pure inline bool hists__has_callchains(struct hists *hists) { - const struct perf_evsel *evsel = hists_to_evsel(hists); - return evsel__has_callchain(evsel); + return hists->has_callchains; } int hists__init(void); diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c index ba4c9dd18643..d426761a549d 100644 --- a/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c +++ b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c @@ -366,7 +366,7 @@ static int intel_pt_get_cyc(unsigned int byte, const unsigned char *buf, if (len < offs) return INTEL_PT_NEED_MORE_BYTES; byte = buf[offs++]; - payload |= (byte >> 1) << shift; + payload |= ((uint64_t)byte >> 1) << shift; } packet->type = INTEL_PT_CYC; diff --git a/tools/perf/util/llvm-utils.c b/tools/perf/util/llvm-utils.c index 976e658e38dc..5e94857dfca2 100644 --- a/tools/perf/util/llvm-utils.c +++ b/tools/perf/util/llvm-utils.c @@ -266,16 +266,16 @@ static const char *kinc_fetch_script = "#!/usr/bin/env sh\n" "if ! test -d \"$KBUILD_DIR\"\n" "then\n" -" exit -1\n" +" exit 1\n" "fi\n" "if ! test -f \"$KBUILD_DIR/include/generated/autoconf.h\"\n" "then\n" -" exit -1\n" +" exit 1\n" "fi\n" "TMPDIR=`mktemp -d`\n" "if test -z \"$TMPDIR\"\n" "then\n" -" exit -1\n" +" exit 1\n" "fi\n" "cat << EOF > $TMPDIR/Makefile\n" "obj-y := dummy.o\n" diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y index 155d2570274f..da8fe57691b8 100644 --- a/tools/perf/util/parse-events.y +++ b/tools/perf/util/parse-events.y @@ -227,11 +227,16 @@ event_def: event_pmu | event_pmu: PE_NAME opt_pmu_config { + struct parse_events_state *parse_state = _parse_state; + struct parse_events_error *error = parse_state->error; struct list_head *list, *orig_terms, *terms; if (parse_events_copy_term_list($2, &orig_terms)) YYABORT; + if (error) + error->idx = @1.first_column; + ALLOC_LIST(list); if (parse_events_add_pmu(_parse_state, list, $1, $2, false, false)) { struct perf_pmu *pmu = NULL; diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c index d2fb597c9a8c..3ba6a1742f91 100644 --- a/tools/perf/util/pmu.c +++ b/tools/perf/util/pmu.c @@ -234,6 +234,74 @@ static int perf_pmu__parse_snapshot(struct perf_pmu_alias *alias, return 0; } +static void perf_pmu_assign_str(char *name, const char *field, char **old_str, + char **new_str) +{ + if (!*old_str) + goto set_new; + + if (*new_str) { /* Have new string, check with old */ + if (strcasecmp(*old_str, *new_str)) + pr_debug("alias %s differs in field '%s'\n", + name, field); + zfree(old_str); + } else /* Nothing new --> keep old string */ + return; +set_new: + *old_str = *new_str; + *new_str = NULL; +} + +static void perf_pmu_update_alias(struct perf_pmu_alias *old, + struct perf_pmu_alias *newalias) +{ + perf_pmu_assign_str(old->name, "desc", &old->desc, &newalias->desc); + perf_pmu_assign_str(old->name, "long_desc", &old->long_desc, + &newalias->long_desc); + perf_pmu_assign_str(old->name, "topic", &old->topic, &newalias->topic); + perf_pmu_assign_str(old->name, "metric_expr", &old->metric_expr, + &newalias->metric_expr); + perf_pmu_assign_str(old->name, "metric_name", &old->metric_name, + &newalias->metric_name); + perf_pmu_assign_str(old->name, "value", &old->str, &newalias->str); + old->scale = newalias->scale; + old->per_pkg = newalias->per_pkg; + old->snapshot = newalias->snapshot; + memcpy(old->unit, newalias->unit, sizeof(old->unit)); +} + +/* Delete an alias entry. */ +static void perf_pmu_free_alias(struct perf_pmu_alias *newalias) +{ + zfree(&newalias->name); + zfree(&newalias->desc); + zfree(&newalias->long_desc); + zfree(&newalias->topic); + zfree(&newalias->str); + zfree(&newalias->metric_expr); + zfree(&newalias->metric_name); + parse_events_terms__purge(&newalias->terms); + free(newalias); +} + +/* Merge an alias, search in alias list. If this name is already + * present merge both of them to combine all information. + */ +static bool perf_pmu_merge_alias(struct perf_pmu_alias *newalias, + struct list_head *alist) +{ + struct perf_pmu_alias *a; + + list_for_each_entry(a, alist, list) { + if (!strcasecmp(newalias->name, a->name)) { + perf_pmu_update_alias(a, newalias); + perf_pmu_free_alias(newalias); + return true; + } + } + return false; +} + static int __perf_pmu__new_alias(struct list_head *list, char *dir, char *name, char *desc, char *val, char *long_desc, char *topic, @@ -241,9 +309,11 @@ static int __perf_pmu__new_alias(struct list_head *list, char *dir, char *name, char *metric_expr, char *metric_name) { + struct parse_events_term *term; struct perf_pmu_alias *alias; int ret; int num; + char newval[256]; alias = malloc(sizeof(*alias)); if (!alias) @@ -262,6 +332,27 @@ static int __perf_pmu__new_alias(struct list_head *list, char *dir, char *name, return ret; } + /* Scan event and remove leading zeroes, spaces, newlines, some + * platforms have terms specified as + * event=0x0091 (read from files ..//events/ + * and terms specified as event=0x91 (read from JSON files). + * + * Rebuild string to make alias->str member comparable. + */ + memset(newval, 0, sizeof(newval)); + ret = 0; + list_for_each_entry(term, &alias->terms, list) { + if (ret) + ret += scnprintf(newval + ret, sizeof(newval) - ret, + ","); + if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) + ret += scnprintf(newval + ret, sizeof(newval) - ret, + "%s=%#x", term->config, term->val.num); + else if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR) + ret += scnprintf(newval + ret, sizeof(newval) - ret, + "%s=%s", term->config, term->val.str); + } + alias->name = strdup(name); if (dir) { /* @@ -285,9 +376,10 @@ static int __perf_pmu__new_alias(struct list_head *list, char *dir, char *name, snprintf(alias->unit, sizeof(alias->unit), "%s", unit); } alias->per_pkg = perpkg && sscanf(perpkg, "%d", &num) == 1 && num == 1; - alias->str = strdup(val); + alias->str = strdup(newval); - list_add_tail(&alias->list, list); + if (!perf_pmu_merge_alias(alias, list)) + list_add_tail(&alias->list, list); return 0; } @@ -303,6 +395,9 @@ static int perf_pmu__new_alias(struct list_head *list, char *dir, char *name, FI buf[ret] = 0; + /* Remove trailing newline from sysfs file */ + rtrim(buf); + return __perf_pmu__new_alias(list, dir, name, NULL, buf, NULL, NULL, NULL, NULL, NULL, NULL); } diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c index 46e9e19ab1ac..bc32e57d17be 100644 --- a/tools/perf/util/scripting-engines/trace-event-python.c +++ b/tools/perf/util/scripting-engines/trace-event-python.c @@ -908,14 +908,11 @@ static void python_process_tracepoint(struct perf_sample *sample, if (_PyTuple_Resize(&t, n) == -1) Py_FatalError("error resizing Python tuple"); - if (!dict) { + if (!dict) call_object(handler, t, handler_name); - } else { + else call_object(handler, t, default_handler_name); - Py_DECREF(dict); - } - Py_XDECREF(all_entries_dict); Py_DECREF(t); } @@ -1235,7 +1232,6 @@ static void python_process_general_event(struct perf_sample *sample, call_object(handler, t, handler_name); - Py_DECREF(dict); Py_DECREF(t); } @@ -1627,6 +1623,7 @@ static int python_generate_script(struct pevent *pevent, const char *outfile) fprintf(ofp, "# See the perf-script-python Documentation for the list " "of available functions.\n\n"); + fprintf(ofp, "from __future__ import print_function\n\n"); fprintf(ofp, "import os\n"); fprintf(ofp, "import sys\n\n"); @@ -1636,10 +1633,10 @@ static int python_generate_script(struct pevent *pevent, const char *outfile) fprintf(ofp, "from Core import *\n\n\n"); fprintf(ofp, "def trace_begin():\n"); - fprintf(ofp, "\tprint \"in trace_begin\"\n\n"); + fprintf(ofp, "\tprint(\"in trace_begin\")\n\n"); fprintf(ofp, "def trace_end():\n"); - fprintf(ofp, "\tprint \"in trace_end\"\n\n"); + fprintf(ofp, "\tprint(\"in trace_end\")\n\n"); while ((event = trace_find_next_event(pevent, event))) { fprintf(ofp, "def %s__%s(", event->system, event->name); @@ -1675,7 +1672,7 @@ static int python_generate_script(struct pevent *pevent, const char *outfile) "common_secs, common_nsecs,\n\t\t\t" "common_pid, common_comm)\n\n"); - fprintf(ofp, "\t\tprint \""); + fprintf(ofp, "\t\tprint(\""); not_first = 0; count = 0; @@ -1736,31 +1733,31 @@ static int python_generate_script(struct pevent *pevent, const char *outfile) fprintf(ofp, "%s", f->name); } - fprintf(ofp, ")\n\n"); + fprintf(ofp, "))\n\n"); - fprintf(ofp, "\t\tprint 'Sample: {'+" - "get_dict_as_string(perf_sample_dict['sample'], ', ')+'}'\n\n"); + fprintf(ofp, "\t\tprint('Sample: {'+" + "get_dict_as_string(perf_sample_dict['sample'], ', ')+'}')\n\n"); fprintf(ofp, "\t\tfor node in common_callchain:"); fprintf(ofp, "\n\t\t\tif 'sym' in node:"); - fprintf(ofp, "\n\t\t\t\tprint \"\\t[%%x] %%s\" %% (node['ip'], node['sym']['name'])"); + fprintf(ofp, "\n\t\t\t\tprint(\"\\t[%%x] %%s\" %% (node['ip'], node['sym']['name']))"); fprintf(ofp, "\n\t\t\telse:"); - fprintf(ofp, "\n\t\t\t\tprint \"\t[%%x]\" %% (node['ip'])\n\n"); - fprintf(ofp, "\t\tprint \"\\n\"\n\n"); + fprintf(ofp, "\n\t\t\t\tprint(\"\t[%%x]\" %% (node['ip']))\n\n"); + fprintf(ofp, "\t\tprint()\n\n"); } fprintf(ofp, "def trace_unhandled(event_name, context, " "event_fields_dict, perf_sample_dict):\n"); - fprintf(ofp, "\t\tprint get_dict_as_string(event_fields_dict)\n"); - fprintf(ofp, "\t\tprint 'Sample: {'+" - "get_dict_as_string(perf_sample_dict['sample'], ', ')+'}'\n\n"); + fprintf(ofp, "\t\tprint(get_dict_as_string(event_fields_dict))\n"); + fprintf(ofp, "\t\tprint('Sample: {'+" + "get_dict_as_string(perf_sample_dict['sample'], ', ')+'}')\n\n"); fprintf(ofp, "def print_header(" "event_name, cpu, secs, nsecs, pid, comm):\n" - "\tprint \"%%-20s %%5u %%05u.%%09u %%8u %%-20s \" %% \\\n\t" - "(event_name, cpu, secs, nsecs, pid, comm),\n\n"); + "\tprint(\"%%-20s %%5u %%05u.%%09u %%8u %%-20s \" %% \\\n\t" + "(event_name, cpu, secs, nsecs, pid, comm), end=\"\")\n\n"); fprintf(ofp, "def get_dict_as_string(a_dict, delimiter=' '):\n" "\treturn delimiter.join" diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h index 7cf2d5cc038e..8bf302cafcec 100644 --- a/tools/perf/util/sort.h +++ b/tools/perf/util/sort.h @@ -112,6 +112,8 @@ struct hist_entry { char level; u8 filtered; + + u16 callchain_size; union { /* * Since perf diff only supports the stdio output, TUI @@ -153,7 +155,7 @@ struct hist_entry { static __pure inline bool hist_entry__has_callchains(struct hist_entry *he) { - return hists__has_callchains(he->hists); + return he->callchain_size != 0; } static inline bool hist_entry__has_pairs(struct hist_entry *he) diff --git a/tools/power/x86/turbostat/turbostat.8 b/tools/power/x86/turbostat/turbostat.8 index ca9ef7017624..d39e4ff7d0bf 100644 --- a/tools/power/x86/turbostat/turbostat.8 +++ b/tools/power/x86/turbostat/turbostat.8 @@ -56,7 +56,7 @@ name as necessary to disambiguate it from others is necessary. Note that option .PP \fB--hide column\fP do not show the specified built-in columns. May be invoked multiple times, or with a comma-separated list of column names. Use "--hide sysfs" to hide the sysfs statistics columns as a group. .PP -\fB--enable column\fP show the specified built-in columns, which are otherwise disabled, by default. Currently the only built-in counters disabled by default are "usec" and "Time_Of_Day_Seconds". +\fB--enable column\fP show the specified built-in columns, which are otherwise disabled, by default. Currently the only built-in counters disabled by default are "usec", "Time_Of_Day_Seconds", "APIC" and "X2APIC". The column name "all" can be used to enable all disabled-by-default built-in counters. .PP \fB--show column\fP show only the specified built-in columns. May be invoked multiple times, or with a comma-separated list of column names. Use "--show sysfs" to show the sysfs statistics columns as a group. diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index d6cff3070ebd..4d14bbbf9b63 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -109,6 +109,7 @@ unsigned int has_hwp_activity_window; /* IA32_HWP_REQUEST[bits 41:32] */ unsigned int has_hwp_epp; /* IA32_HWP_REQUEST[bits 31:24] */ unsigned int has_hwp_pkg; /* IA32_HWP_REQUEST_PKG */ unsigned int has_misc_feature_control; +unsigned int first_counter_read = 1; #define RAPL_PKG (1 << 0) /* 0x610 MSR_PKG_POWER_LIMIT */ @@ -170,6 +171,8 @@ struct thread_data { unsigned long long irq_count; unsigned int smi_count; unsigned int cpu_id; + unsigned int apic_id; + unsigned int x2apic_id; unsigned int flags; #define CPU_IS_FIRST_THREAD_IN_CORE 0x2 #define CPU_IS_FIRST_CORE_IN_PACKAGE 0x4 @@ -381,19 +384,23 @@ int get_msr(int cpu, off_t offset, unsigned long long *msr) } /* - * Each string in this array is compared in --show and --hide cmdline. - * Thus, strings that are proper sub-sets must follow their more specific peers. + * This list matches the column headers, except + * 1. built-in only, the sysfs counters are not here -- we learn of those at run-time + * 2. Core and CPU are moved to the end, we can't have strings that contain them + * matching on them for --show and --hide. */ struct msr_counter bic[] = { { 0x0, "usec" }, { 0x0, "Time_Of_Day_Seconds" }, { 0x0, "Package" }, + { 0x0, "Node" }, { 0x0, "Avg_MHz" }, + { 0x0, "Busy%" }, { 0x0, "Bzy_MHz" }, { 0x0, "TSC_MHz" }, { 0x0, "IRQ" }, { 0x0, "SMI", "", 32, 0, FORMAT_DELTA, NULL}, - { 0x0, "Busy%" }, + { 0x0, "sysfs" }, { 0x0, "CPU%c1" }, { 0x0, "CPU%c3" }, { 0x0, "CPU%c6" }, @@ -424,73 +431,73 @@ struct msr_counter bic[] = { { 0x0, "Cor_J" }, { 0x0, "GFX_J" }, { 0x0, "RAM_J" }, - { 0x0, "Core" }, - { 0x0, "CPU" }, { 0x0, "Mod%c6" }, - { 0x0, "sysfs" }, { 0x0, "Totl%C0" }, { 0x0, "Any%C0" }, { 0x0, "GFX%C0" }, { 0x0, "CPUGFX%" }, - { 0x0, "Node%" }, + { 0x0, "Core" }, + { 0x0, "CPU" }, + { 0x0, "APIC" }, + { 0x0, "X2APIC" }, }; - - #define MAX_BIC (sizeof(bic) / sizeof(struct msr_counter)) #define BIC_USEC (1ULL << 0) #define BIC_TOD (1ULL << 1) #define BIC_Package (1ULL << 2) -#define BIC_Avg_MHz (1ULL << 3) -#define BIC_Bzy_MHz (1ULL << 4) -#define BIC_TSC_MHz (1ULL << 5) -#define BIC_IRQ (1ULL << 6) -#define BIC_SMI (1ULL << 7) -#define BIC_Busy (1ULL << 8) -#define BIC_CPU_c1 (1ULL << 9) -#define BIC_CPU_c3 (1ULL << 10) -#define BIC_CPU_c6 (1ULL << 11) -#define BIC_CPU_c7 (1ULL << 12) -#define BIC_ThreadC (1ULL << 13) -#define BIC_CoreTmp (1ULL << 14) -#define BIC_CoreCnt (1ULL << 15) -#define BIC_PkgTmp (1ULL << 16) -#define BIC_GFX_rc6 (1ULL << 17) -#define BIC_GFXMHz (1ULL << 18) -#define BIC_Pkgpc2 (1ULL << 19) -#define BIC_Pkgpc3 (1ULL << 20) -#define BIC_Pkgpc6 (1ULL << 21) -#define BIC_Pkgpc7 (1ULL << 22) -#define BIC_Pkgpc8 (1ULL << 23) -#define BIC_Pkgpc9 (1ULL << 24) -#define BIC_Pkgpc10 (1ULL << 25) -#define BIC_CPU_LPI (1ULL << 26) -#define BIC_SYS_LPI (1ULL << 27) -#define BIC_PkgWatt (1ULL << 26) -#define BIC_CorWatt (1ULL << 27) -#define BIC_GFXWatt (1ULL << 28) -#define BIC_PkgCnt (1ULL << 29) -#define BIC_RAMWatt (1ULL << 30) -#define BIC_PKG__ (1ULL << 31) -#define BIC_RAM__ (1ULL << 32) -#define BIC_Pkg_J (1ULL << 33) -#define BIC_Cor_J (1ULL << 34) -#define BIC_GFX_J (1ULL << 35) -#define BIC_RAM_J (1ULL << 36) -#define BIC_Core (1ULL << 37) -#define BIC_CPU (1ULL << 38) -#define BIC_Mod_c6 (1ULL << 39) -#define BIC_sysfs (1ULL << 40) -#define BIC_Totl_c0 (1ULL << 41) -#define BIC_Any_c0 (1ULL << 42) -#define BIC_GFX_c0 (1ULL << 43) -#define BIC_CPUGFX (1ULL << 44) -#define BIC_Node (1ULL << 45) +#define BIC_Node (1ULL << 3) +#define BIC_Avg_MHz (1ULL << 4) +#define BIC_Busy (1ULL << 5) +#define BIC_Bzy_MHz (1ULL << 6) +#define BIC_TSC_MHz (1ULL << 7) +#define BIC_IRQ (1ULL << 8) +#define BIC_SMI (1ULL << 9) +#define BIC_sysfs (1ULL << 10) +#define BIC_CPU_c1 (1ULL << 11) +#define BIC_CPU_c3 (1ULL << 12) +#define BIC_CPU_c6 (1ULL << 13) +#define BIC_CPU_c7 (1ULL << 14) +#define BIC_ThreadC (1ULL << 15) +#define BIC_CoreTmp (1ULL << 16) +#define BIC_CoreCnt (1ULL << 17) +#define BIC_PkgTmp (1ULL << 18) +#define BIC_GFX_rc6 (1ULL << 19) +#define BIC_GFXMHz (1ULL << 20) +#define BIC_Pkgpc2 (1ULL << 21) +#define BIC_Pkgpc3 (1ULL << 22) +#define BIC_Pkgpc6 (1ULL << 23) +#define BIC_Pkgpc7 (1ULL << 24) +#define BIC_Pkgpc8 (1ULL << 25) +#define BIC_Pkgpc9 (1ULL << 26) +#define BIC_Pkgpc10 (1ULL << 27) +#define BIC_CPU_LPI (1ULL << 28) +#define BIC_SYS_LPI (1ULL << 29) +#define BIC_PkgWatt (1ULL << 30) +#define BIC_CorWatt (1ULL << 31) +#define BIC_GFXWatt (1ULL << 32) +#define BIC_PkgCnt (1ULL << 33) +#define BIC_RAMWatt (1ULL << 34) +#define BIC_PKG__ (1ULL << 35) +#define BIC_RAM__ (1ULL << 36) +#define BIC_Pkg_J (1ULL << 37) +#define BIC_Cor_J (1ULL << 38) +#define BIC_GFX_J (1ULL << 39) +#define BIC_RAM_J (1ULL << 40) +#define BIC_Mod_c6 (1ULL << 41) +#define BIC_Totl_c0 (1ULL << 42) +#define BIC_Any_c0 (1ULL << 43) +#define BIC_GFX_c0 (1ULL << 44) +#define BIC_CPUGFX (1ULL << 45) +#define BIC_Core (1ULL << 46) +#define BIC_CPU (1ULL << 47) +#define BIC_APIC (1ULL << 48) +#define BIC_X2APIC (1ULL << 49) -#define BIC_DISABLED_BY_DEFAULT (BIC_USEC | BIC_TOD) +#define BIC_DISABLED_BY_DEFAULT (BIC_USEC | BIC_TOD | BIC_APIC | BIC_X2APIC) unsigned long long bic_enabled = (0xFFFFFFFFFFFFFFFFULL & ~BIC_DISABLED_BY_DEFAULT); -unsigned long long bic_present = BIC_USEC | BIC_TOD | BIC_sysfs; +unsigned long long bic_present = BIC_USEC | BIC_TOD | BIC_sysfs | BIC_APIC | BIC_X2APIC; #define DO_BIC(COUNTER_NAME) (bic_enabled & bic_present & COUNTER_NAME) #define ENABLE_BIC(COUNTER_NAME) (bic_enabled |= COUNTER_NAME) @@ -517,17 +524,34 @@ void help(void) "when COMMAND completes.\n" "If no COMMAND is specified, turbostat wakes every 5-seconds\n" "to print statistics, until interrupted.\n" - "--add add a counter\n" - " eg. --add msr0x10,u64,cpu,delta,MY_TSC\n" - "--cpu cpu-set limit output to summary plus cpu-set:\n" - " {core | package | j,k,l..m,n-p }\n" - "--quiet skip decoding system configuration header\n" - "--interval sec.subsec Override default 5-second measurement interval\n" - "--help print this help message\n" - "--list list column headers only\n" - "--num_iterations num number of the measurement iterations\n" - "--out file create or truncate \"file\" for all output\n" - "--version print version information\n" + " -a, --add add a counter\n" + " eg. --add msr0x10,u64,cpu,delta,MY_TSC\n" + " -c, --cpu cpu-set limit output to summary plus cpu-set:\n" + " {core | package | j,k,l..m,n-p }\n" + " -d, --debug displays usec, Time_Of_Day_Seconds and more debugging\n" + " -D, --Dump displays the raw counter values\n" + " -e, --enable [all | column]\n" + " shows all or the specified disabled column\n" + " -H, --hide [column|column,column,...]\n" + " hide the specified column(s)\n" + " -i, --interval sec.subsec\n" + " Override default 5-second measurement interval\n" + " -J, --Joules displays energy in Joules instead of Watts\n" + " -l, --list list column headers only\n" + " -n, --num_iterations num\n" + " number of the measurement iterations\n" + " -o, --out file\n" + " create or truncate \"file\" for all output\n" + " -q, --quiet skip decoding system configuration header\n" + " -s, --show [column|column,column,...]\n" + " show only the specified column(s)\n" + " -S, --Summary\n" + " limits output to 1-line system summary per interval\n" + " -T, --TCC temperature\n" + " sets the Thermal Control Circuit temperature in\n" + " degrees Celsius\n" + " -h, --help print this help message\n" + " -v, --version print version information\n" "\n" "For more help, run \"man turbostat\"\n"); } @@ -601,6 +625,10 @@ void print_header(char *delim) outp += sprintf(outp, "%sCore", (printed++ ? delim : "")); if (DO_BIC(BIC_CPU)) outp += sprintf(outp, "%sCPU", (printed++ ? delim : "")); + if (DO_BIC(BIC_APIC)) + outp += sprintf(outp, "%sAPIC", (printed++ ? delim : "")); + if (DO_BIC(BIC_X2APIC)) + outp += sprintf(outp, "%sX2APIC", (printed++ ? delim : "")); if (DO_BIC(BIC_Avg_MHz)) outp += sprintf(outp, "%sAvg_MHz", (printed++ ? delim : "")); if (DO_BIC(BIC_Busy)) @@ -880,6 +908,10 @@ int format_counters(struct thread_data *t, struct core_data *c, outp += sprintf(outp, "%s-", (printed++ ? delim : "")); if (DO_BIC(BIC_CPU)) outp += sprintf(outp, "%s-", (printed++ ? delim : "")); + if (DO_BIC(BIC_APIC)) + outp += sprintf(outp, "%s-", (printed++ ? delim : "")); + if (DO_BIC(BIC_X2APIC)) + outp += sprintf(outp, "%s-", (printed++ ? delim : "")); } else { if (DO_BIC(BIC_Package)) { if (p) @@ -904,6 +936,10 @@ int format_counters(struct thread_data *t, struct core_data *c, } if (DO_BIC(BIC_CPU)) outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), t->cpu_id); + if (DO_BIC(BIC_APIC)) + outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), t->apic_id); + if (DO_BIC(BIC_X2APIC)) + outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), t->x2apic_id); } if (DO_BIC(BIC_Avg_MHz)) @@ -1231,6 +1267,12 @@ delta_thread(struct thread_data *new, struct thread_data *old, int i; struct msr_counter *mp; + /* we run cpuid just the 1st time, copy the results */ + if (DO_BIC(BIC_APIC)) + new->apic_id = old->apic_id; + if (DO_BIC(BIC_X2APIC)) + new->x2apic_id = old->x2apic_id; + /* * the timestamps from start of measurement interval are in "old" * the timestamp from end of measurement interval are in "new" @@ -1393,6 +1435,12 @@ int sum_counters(struct thread_data *t, struct core_data *c, int i; struct msr_counter *mp; + /* copy un-changing apic_id's */ + if (DO_BIC(BIC_APIC)) + average.threads.apic_id = t->apic_id; + if (DO_BIC(BIC_X2APIC)) + average.threads.x2apic_id = t->x2apic_id; + /* remember first tv_begin */ if (average.threads.tv_begin.tv_sec == 0) average.threads.tv_begin = t->tv_begin; @@ -1619,6 +1667,34 @@ int get_mp(int cpu, struct msr_counter *mp, unsigned long long *counterp) return 0; } +void get_apic_id(struct thread_data *t) +{ + unsigned int eax, ebx, ecx, edx, max_level; + + eax = ebx = ecx = edx = 0; + + if (!genuine_intel) + return; + + __cpuid(0, max_level, ebx, ecx, edx); + + __cpuid(1, eax, ebx, ecx, edx); + t->apic_id = (ebx >> 24) & 0xf; + + if (max_level < 0xb) + return; + + if (!DO_BIC(BIC_X2APIC)) + return; + + ecx = 0; + __cpuid(0xb, eax, ebx, ecx, edx); + t->x2apic_id = edx; + + if (debug && (t->apic_id != t->x2apic_id)) + fprintf(stderr, "cpu%d: apic 0x%x x2apic 0x%x\n", t->cpu_id, t->apic_id, t->x2apic_id); +} + /* * get_counters(...) * migrate to cpu @@ -1632,7 +1708,6 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) struct msr_counter *mp; int i; - gettimeofday(&t->tv_begin, (struct timezone *)NULL); if (cpu_migrate(cpu)) { @@ -1640,6 +1715,8 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) return -1; } + if (first_counter_read) + get_apic_id(t); retry: t->tsc = rdtsc(); /* we are running on local CPU of interest */ @@ -2432,6 +2509,12 @@ void set_node_data(void) if (pni[pkg].count > topo.nodes_per_pkg) topo.nodes_per_pkg = pni[0].count; + /* Fake 1 node per pkg for machines that don't + * expose nodes and thus avoid -nan results + */ + if (topo.nodes_per_pkg == 0) + topo.nodes_per_pkg = 1; + for (cpu = 0; cpu < topo.num_cpus; cpu++) { pkg = cpus[cpu].physical_package_id; node = cpus[cpu].physical_node_id; @@ -2879,6 +2962,7 @@ void do_sleep(void) } } + void turbostat_loop() { int retval; @@ -2892,6 +2976,7 @@ void turbostat_loop() snapshot_proc_sysfs_files(); retval = for_all_cpus(get_counters, EVEN_COUNTERS); + first_counter_read = 0; if (retval < -1) { exit(retval); } else if (retval == -1) { @@ -4392,7 +4477,7 @@ void process_cpuid() if (!quiet) { fprintf(outf, "%d CPUID levels; family:model:stepping 0x%x:%x:%x (%d:%d:%d)\n", max_level, family, model, stepping, family, model, stepping); - fprintf(outf, "CPUID(1): %s %s %s %s %s %s %s %s %s\n", + fprintf(outf, "CPUID(1): %s %s %s %s %s %s %s %s %s %s\n", ecx & (1 << 0) ? "SSE3" : "-", ecx & (1 << 3) ? "MONITOR" : "-", ecx & (1 << 6) ? "SMX" : "-", @@ -4401,6 +4486,7 @@ void process_cpuid() edx & (1 << 4) ? "TSC" : "-", edx & (1 << 5) ? "MSR" : "-", edx & (1 << 22) ? "ACPI-TM" : "-", + edx & (1 << 28) ? "HT" : "-", edx & (1 << 29) ? "TM" : "-"); } @@ -4652,7 +4738,6 @@ void process_cpuid() return; } - /* * in /dev/cpu/ return success for names that are numbers * ie. filter out ".", "..", "microcode". @@ -4842,6 +4927,13 @@ void init_counter(struct thread_data *thread_base, struct core_data *core_base, struct core_data *c; struct pkg_data *p; + + /* Workaround for systems where physical_node_id==-1 + * and logical_node_id==(-1 - topo.num_cpus) + */ + if (node_id < 0) + node_id = 0; + t = GET_THREAD(thread_base, thread_id, core_id, node_id, pkg_id); c = GET_CORE(core_base, core_id, node_id, pkg_id); p = GET_PKG(pkg_base, pkg_id); @@ -4946,6 +5038,7 @@ int fork_it(char **argv) snapshot_proc_sysfs_files(); status = for_all_cpus(get_counters, EVEN_COUNTERS); + first_counter_read = 0; if (status) exit(status); /* clear affinity side-effect of get_counters() */ @@ -5009,7 +5102,7 @@ int get_and_dump_counters(void) } void print_version() { - fprintf(outf, "turbostat version 18.06.01" + fprintf(outf, "turbostat version 18.06.20" " - Len Brown \n"); } @@ -5381,7 +5474,7 @@ void cmdline(int argc, char **argv) break; case 'e': /* --enable specified counter */ - bic_enabled |= bic_lookup(optarg, SHOW_LIST); + bic_enabled = bic_enabled | bic_lookup(optarg, SHOW_LIST); break; case 'd': debug++; @@ -5465,7 +5558,6 @@ void cmdline(int argc, char **argv) int main(int argc, char **argv) { outf = stderr; - cmdline(argc, argv); if (!quiet) diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c index a8fb63edcf89..e2926f72a821 100644 --- a/tools/testing/nvdimm/test/nfit.c +++ b/tools/testing/nvdimm/test/nfit.c @@ -1991,8 +1991,7 @@ static void nfit_test0_setup(struct nfit_test *t) pcap->header.type = ACPI_NFIT_TYPE_CAPABILITIES; pcap->header.length = sizeof(*pcap); pcap->highest_capability = 1; - pcap->capabilities = ACPI_NFIT_CAPABILITY_CACHE_FLUSH | - ACPI_NFIT_CAPABILITY_MEM_FLUSH; + pcap->capabilities = ACPI_NFIT_CAPABILITY_MEM_FLUSH; offset += pcap->header.length; if (t->setup_hotplug) { diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config index 1eefe211a4a8..b4994a94968b 100644 --- a/tools/testing/selftests/bpf/config +++ b/tools/testing/selftests/bpf/config @@ -6,4 +6,15 @@ CONFIG_TEST_BPF=m CONFIG_CGROUP_BPF=y CONFIG_NETDEVSIM=m CONFIG_NET_CLS_ACT=y +CONFIG_NET_SCHED=y CONFIG_NET_SCH_INGRESS=y +CONFIG_NET_IPIP=y +CONFIG_IPV6=y +CONFIG_NET_IPGRE_DEMUX=y +CONFIG_NET_IPGRE=y +CONFIG_IPV6_GRE=y +CONFIG_CRYPTO_USER_API_HASH=m +CONFIG_CRYPTO_HMAC=m +CONFIG_CRYPTO_SHA256=m +CONFIG_VXLAN=y +CONFIG_GENEVE=y diff --git a/tools/testing/selftests/bpf/test_kmod.sh b/tools/testing/selftests/bpf/test_kmod.sh index 35669ccd4d23..9df0d2ac45f8 100755 --- a/tools/testing/selftests/bpf/test_kmod.sh +++ b/tools/testing/selftests/bpf/test_kmod.sh @@ -1,6 +1,15 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 +# Kselftest framework requirement - SKIP code is 4. +ksft_skip=4 + +msg="skip all tests:" +if [ "$(id -u)" != "0" ]; then + echo $msg please run this as root >&2 + exit $ksft_skip +fi + SRC_TREE=../../../../ test_run() diff --git a/tools/testing/selftests/bpf/test_lirc_mode2.sh b/tools/testing/selftests/bpf/test_lirc_mode2.sh index ce2e15e4f976..677686198df3 100755 --- a/tools/testing/selftests/bpf/test_lirc_mode2.sh +++ b/tools/testing/selftests/bpf/test_lirc_mode2.sh @@ -1,6 +1,15 @@ #!/bin/bash # SPDX-License-Identifier: GPL-2.0 +# Kselftest framework requirement - SKIP code is 4. +ksft_skip=4 + +msg="skip all tests:" +if [ $UID != 0 ]; then + echo $msg please run this as root >&2 + exit $ksft_skip +fi + GREEN='\033[0;92m' RED='\033[0;31m' NC='\033[0m' # No Color diff --git a/tools/testing/selftests/bpf/test_lwt_seg6local.sh b/tools/testing/selftests/bpf/test_lwt_seg6local.sh index 1c77994b5e71..270fa8f49573 100755 --- a/tools/testing/selftests/bpf/test_lwt_seg6local.sh +++ b/tools/testing/selftests/bpf/test_lwt_seg6local.sh @@ -21,6 +21,15 @@ # An UDP datagram is sent from fb00::1 to fb00::6. The test succeeds if this # datagram can be read on NS6 when binding to fb00::6. +# Kselftest framework requirement - SKIP code is 4. +ksft_skip=4 + +msg="skip all tests:" +if [ $UID != 0 ]; then + echo $msg please run this as root >&2 + exit $ksft_skip +fi + TMP_FILE="/tmp/selftest_lwt_seg6local.txt" cleanup() diff --git a/tools/testing/selftests/bpf/test_offload.py b/tools/testing/selftests/bpf/test_offload.py index e78aad0a68bb..be800d0e7a84 100755 --- a/tools/testing/selftests/bpf/test_offload.py +++ b/tools/testing/selftests/bpf/test_offload.py @@ -163,6 +163,10 @@ def bpftool(args, JSON=True, ns="", fail=True): def bpftool_prog_list(expected=None, ns=""): _, progs = bpftool("prog show", JSON=True, ns=ns, fail=True) + # Remove the base progs + for p in base_progs: + if p in progs: + progs.remove(p) if expected is not None: if len(progs) != expected: fail(True, "%d BPF programs loaded, expected %d" % @@ -171,6 +175,10 @@ def bpftool_prog_list(expected=None, ns=""): def bpftool_map_list(expected=None, ns=""): _, maps = bpftool("map show", JSON=True, ns=ns, fail=True) + # Remove the base maps + for m in base_maps: + if m in maps: + maps.remove(m) if expected is not None: if len(maps) != expected: fail(True, "%d BPF maps loaded, expected %d" % @@ -585,8 +593,8 @@ skip(os.getuid() != 0, "test must be run as root") # Check tools ret, progs = bpftool("prog", fail=False) skip(ret != 0, "bpftool not installed") -# Check no BPF programs are loaded -skip(len(progs) != 0, "BPF programs already loaded on the system") +base_progs = progs +_, base_maps = bpftool("map") # Check netdevsim ret, out = cmd("modprobe netdevsim", fail=False) diff --git a/tools/testing/selftests/bpf/test_sockmap.c b/tools/testing/selftests/bpf/test_sockmap.c index 05c8cb71724a..9e78df207919 100644 --- a/tools/testing/selftests/bpf/test_sockmap.c +++ b/tools/testing/selftests/bpf/test_sockmap.c @@ -1413,18 +1413,12 @@ static int test_suite(void) int main(int argc, char **argv) { - struct rlimit r = {10 * 1024 * 1024, RLIM_INFINITY}; int iov_count = 1, length = 1024, rate = 1; struct sockmap_options options = {0}; int opt, longindex, err, cg_fd = 0; char *bpf_file = BPF_SOCKMAP_FILENAME; int test = PING_PONG; - if (setrlimit(RLIMIT_MEMLOCK, &r)) { - perror("setrlimit(RLIMIT_MEMLOCK)"); - return 1; - } - if (argc < 2) return test_suite(); diff --git a/tools/testing/selftests/bpf/test_tunnel.sh b/tools/testing/selftests/bpf/test_tunnel.sh index aeb2901f21f4..546aee3e9fb4 100755 --- a/tools/testing/selftests/bpf/test_tunnel.sh +++ b/tools/testing/selftests/bpf/test_tunnel.sh @@ -608,28 +608,26 @@ setup_xfrm_tunnel() test_xfrm_tunnel() { config_device - #tcpdump -nei veth1 ip & - output=$(mktemp) - cat /sys/kernel/debug/tracing/trace_pipe | tee $output & - setup_xfrm_tunnel + > /sys/kernel/debug/tracing/trace + setup_xfrm_tunnel tc qdisc add dev veth1 clsact tc filter add dev veth1 proto ip ingress bpf da obj test_tunnel_kern.o \ sec xfrm_get_state ip netns exec at_ns0 ping $PING_ARG 10.1.1.200 sleep 1 - grep "reqid 1" $output + grep "reqid 1" /sys/kernel/debug/tracing/trace check_err $? - grep "spi 0x1" $output + grep "spi 0x1" /sys/kernel/debug/tracing/trace check_err $? - grep "remote ip 0xac100164" $output + grep "remote ip 0xac100164" /sys/kernel/debug/tracing/trace check_err $? cleanup if [ $ret -ne 0 ]; then - echo -e ${RED}"FAIL: xfrm tunnel"${NC} - return 1 - fi - echo -e ${GREEN}"PASS: xfrm tunnel"${NC} + echo -e ${RED}"FAIL: xfrm tunnel"${NC} + return 1 + fi + echo -e ${GREEN}"PASS: xfrm tunnel"${NC} } attach_bpf() @@ -657,6 +655,10 @@ cleanup() ip link del ip6geneve11 2> /dev/null ip link del erspan11 2> /dev/null ip link del ip6erspan11 2> /dev/null + ip xfrm policy delete dir out src 10.1.1.200/32 dst 10.1.1.100/32 2> /dev/null + ip xfrm policy delete dir in src 10.1.1.100/32 dst 10.1.1.200/32 2> /dev/null + ip xfrm state delete src 172.16.1.100 dst 172.16.1.200 proto esp spi 0x1 2> /dev/null + ip xfrm state delete src 172.16.1.200 dst 172.16.1.100 proto esp spi 0x2 2> /dev/null } cleanup_exit() @@ -668,7 +670,7 @@ cleanup_exit() check() { - ip link help $1 2>&1 | grep -q "^Usage:" + ip link help 2>&1 | grep -q "\s$1\s" if [ $? -ne 0 ];then echo "SKIP $1: iproute2 not support" cleanup diff --git a/tools/testing/selftests/net/.gitignore b/tools/testing/selftests/net/.gitignore index 128e548aa377..1a0ac3a29ec5 100644 --- a/tools/testing/selftests/net/.gitignore +++ b/tools/testing/selftests/net/.gitignore @@ -12,3 +12,4 @@ tcp_mmap udpgso udpgso_bench_rx udpgso_bench_tx +tcp_inq diff --git a/tools/testing/selftests/net/config b/tools/testing/selftests/net/config index 7ba089b33e8b..cd3a2f1545b5 100644 --- a/tools/testing/selftests/net/config +++ b/tools/testing/selftests/net/config @@ -12,3 +12,5 @@ CONFIG_NET_IPVTI=y CONFIG_INET6_XFRM_MODE_TUNNEL=y CONFIG_IPV6_VTI=y CONFIG_DUMMY=y +CONFIG_BRIDGE=y +CONFIG_VLAN_8021Q=y diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh old mode 100644 new mode 100755 diff --git a/tools/testing/selftests/pstore/pstore_post_reboot_tests b/tools/testing/selftests/pstore/pstore_post_reboot_tests index 6ccb154cb4aa..22f8df1ad7d4 100755 --- a/tools/testing/selftests/pstore/pstore_post_reboot_tests +++ b/tools/testing/selftests/pstore/pstore_post_reboot_tests @@ -7,13 +7,16 @@ # # Released under the terms of the GPL v2. +# Kselftest framework requirement - SKIP code is 4. +ksft_skip=4 + . ./common_tests if [ -e $REBOOT_FLAG ]; then rm $REBOOT_FLAG else prlog "pstore_crash_test has not been executed yet. we skip further tests." - exit 0 + exit $ksft_skip fi prlog -n "Mounting pstore filesystem ... " diff --git a/tools/testing/selftests/rseq/param_test.c b/tools/testing/selftests/rseq/param_test.c index 6a9f602a8718..615252331813 100644 --- a/tools/testing/selftests/rseq/param_test.c +++ b/tools/testing/selftests/rseq/param_test.c @@ -137,6 +137,30 @@ unsigned int yield_mod_cnt, nr_abort; "subic. %%" INJECT_ASM_REG ", %%" INJECT_ASM_REG ", 1\n\t" \ "bne 222b\n\t" \ "333:\n\t" + +#elif defined(__mips__) + +#define RSEQ_INJECT_INPUT \ + , [loop_cnt_1]"m"(loop_cnt[1]) \ + , [loop_cnt_2]"m"(loop_cnt[2]) \ + , [loop_cnt_3]"m"(loop_cnt[3]) \ + , [loop_cnt_4]"m"(loop_cnt[4]) \ + , [loop_cnt_5]"m"(loop_cnt[5]) \ + , [loop_cnt_6]"m"(loop_cnt[6]) + +#define INJECT_ASM_REG "$5" + +#define RSEQ_INJECT_CLOBBER \ + , INJECT_ASM_REG + +#define RSEQ_INJECT_ASM(n) \ + "lw " INJECT_ASM_REG ", %[loop_cnt_" #n "]\n\t" \ + "beqz " INJECT_ASM_REG ", 333f\n\t" \ + "222:\n\t" \ + "addiu " INJECT_ASM_REG ", -1\n\t" \ + "bnez " INJECT_ASM_REG ", 222b\n\t" \ + "333:\n\t" + #else #error unsupported target #endif diff --git a/tools/testing/selftests/rseq/rseq-arm.h b/tools/testing/selftests/rseq/rseq-arm.h index 3b055f9aeaab..3cea19877227 100644 --- a/tools/testing/selftests/rseq/rseq-arm.h +++ b/tools/testing/selftests/rseq/rseq-arm.h @@ -57,6 +57,7 @@ do { \ #define __RSEQ_ASM_DEFINE_ABORT(table_label, label, teardown, \ abort_label, version, flags, \ start_ip, post_commit_offset, abort_ip) \ + ".balign 32\n\t" \ __rseq_str(table_label) ":\n\t" \ ".word " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \ ".word " __rseq_str(start_ip) ", 0x0, " __rseq_str(post_commit_offset) ", 0x0, " __rseq_str(abort_ip) ", 0x0\n\t" \ diff --git a/tools/testing/selftests/rseq/rseq-mips.h b/tools/testing/selftests/rseq/rseq-mips.h new file mode 100644 index 000000000000..7f48ecf46994 --- /dev/null +++ b/tools/testing/selftests/rseq/rseq-mips.h @@ -0,0 +1,725 @@ +/* SPDX-License-Identifier: LGPL-2.1 OR MIT */ +/* + * Author: Paul Burton + * (C) Copyright 2018 MIPS Tech LLC + * + * Based on rseq-arm.h: + * (C) Copyright 2016-2018 - Mathieu Desnoyers + */ + +#define RSEQ_SIG 0x53053053 + +#define rseq_smp_mb() __asm__ __volatile__ ("sync" ::: "memory") +#define rseq_smp_rmb() rseq_smp_mb() +#define rseq_smp_wmb() rseq_smp_mb() + +#define rseq_smp_load_acquire(p) \ +__extension__ ({ \ + __typeof(*p) ____p1 = RSEQ_READ_ONCE(*p); \ + rseq_smp_mb(); \ + ____p1; \ +}) + +#define rseq_smp_acquire__after_ctrl_dep() rseq_smp_rmb() + +#define rseq_smp_store_release(p, v) \ +do { \ + rseq_smp_mb(); \ + RSEQ_WRITE_ONCE(*p, v); \ +} while (0) + +#ifdef RSEQ_SKIP_FASTPATH +#include "rseq-skip.h" +#else /* !RSEQ_SKIP_FASTPATH */ + +#if _MIPS_SZLONG == 64 +# define LONG ".dword" +# define LONG_LA "dla" +# define LONG_L "ld" +# define LONG_S "sd" +# define LONG_ADDI "daddiu" +# define U32_U64_PAD(x) x +#elif _MIPS_SZLONG == 32 +# define LONG ".word" +# define LONG_LA "la" +# define LONG_L "lw" +# define LONG_S "sw" +# define LONG_ADDI "addiu" +# ifdef __BIG_ENDIAN +# define U32_U64_PAD(x) "0x0, " x +# else +# define U32_U64_PAD(x) x ", 0x0" +# endif +#else +# error unsupported _MIPS_SZLONG +#endif + +#define __RSEQ_ASM_DEFINE_TABLE(version, flags, start_ip, \ + post_commit_offset, abort_ip) \ + ".pushsection __rseq_table, \"aw\"\n\t" \ + ".balign 32\n\t" \ + ".word " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \ + LONG " " U32_U64_PAD(__rseq_str(start_ip)) "\n\t" \ + LONG " " U32_U64_PAD(__rseq_str(post_commit_offset)) "\n\t" \ + LONG " " U32_U64_PAD(__rseq_str(abort_ip)) "\n\t" \ + ".popsection\n\t" + +#define RSEQ_ASM_DEFINE_TABLE(start_ip, post_commit_ip, abort_ip) \ + __RSEQ_ASM_DEFINE_TABLE(0x0, 0x0, start_ip, \ + (post_commit_ip - start_ip), abort_ip) + +#define RSEQ_ASM_STORE_RSEQ_CS(label, cs_label, rseq_cs) \ + RSEQ_INJECT_ASM(1) \ + LONG_LA " $4, " __rseq_str(cs_label) "\n\t" \ + LONG_S " $4, %[" __rseq_str(rseq_cs) "]\n\t" \ + __rseq_str(label) ":\n\t" + +#define RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, label) \ + RSEQ_INJECT_ASM(2) \ + "lw $4, %[" __rseq_str(current_cpu_id) "]\n\t" \ + "bne $4, %[" __rseq_str(cpu_id) "], " __rseq_str(label) "\n\t" + +#define __RSEQ_ASM_DEFINE_ABORT(table_label, label, teardown, \ + abort_label, version, flags, \ + start_ip, post_commit_offset, abort_ip) \ + ".balign 32\n\t" \ + __rseq_str(table_label) ":\n\t" \ + ".word " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \ + LONG " " U32_U64_PAD(__rseq_str(start_ip)) "\n\t" \ + LONG " " U32_U64_PAD(__rseq_str(post_commit_offset)) "\n\t" \ + LONG " " U32_U64_PAD(__rseq_str(abort_ip)) "\n\t" \ + ".word " __rseq_str(RSEQ_SIG) "\n\t" \ + __rseq_str(label) ":\n\t" \ + teardown \ + "b %l[" __rseq_str(abort_label) "]\n\t" + +#define RSEQ_ASM_DEFINE_ABORT(table_label, label, teardown, abort_label, \ + start_ip, post_commit_ip, abort_ip) \ + __RSEQ_ASM_DEFINE_ABORT(table_label, label, teardown, \ + abort_label, 0x0, 0x0, start_ip, \ + (post_commit_ip - start_ip), abort_ip) + +#define RSEQ_ASM_DEFINE_CMPFAIL(label, teardown, cmpfail_label) \ + __rseq_str(label) ":\n\t" \ + teardown \ + "b %l[" __rseq_str(cmpfail_label) "]\n\t" + +#define rseq_workaround_gcc_asm_size_guess() __asm__ __volatile__("") + +static inline __attribute__((always_inline)) +int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu) +{ + RSEQ_INJECT_C(9) + + rseq_workaround_gcc_asm_size_guess(); + __asm__ __volatile__ goto ( + RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */ + /* Start rseq by storing table entry pointer into rseq_cs. */ + RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs) + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f) + RSEQ_INJECT_ASM(3) + LONG_L " $4, %[v]\n\t" + "bne $4, %[expect], %l[cmpfail]\n\t" + RSEQ_INJECT_ASM(4) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) + LONG_L " $4, %[v]\n\t" + "bne $4, %[expect], %l[error2]\n\t" +#endif + /* final store */ + LONG_S " %[newv], %[v]\n\t" + "2:\n\t" + RSEQ_INJECT_ASM(5) + "b 5f\n\t" + RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f) + "5:\n\t" + : /* gcc asm goto does not allow outputs */ + : [cpu_id] "r" (cpu), + [current_cpu_id] "m" (__rseq_abi.cpu_id), + [rseq_cs] "m" (__rseq_abi.rseq_cs), + [v] "m" (*v), + [expect] "r" (expect), + [newv] "r" (newv) + RSEQ_INJECT_INPUT + : "$4", "memory" + RSEQ_INJECT_CLOBBER + : abort, cmpfail +#ifdef RSEQ_COMPARE_TWICE + , error1, error2 +#endif + ); + rseq_workaround_gcc_asm_size_guess(); + return 0; +abort: + rseq_workaround_gcc_asm_size_guess(); + RSEQ_INJECT_FAILED + return -1; +cmpfail: + rseq_workaround_gcc_asm_size_guess(); + return 1; +#ifdef RSEQ_COMPARE_TWICE +error1: + rseq_bug("cpu_id comparison failed"); +error2: + rseq_bug("expected value comparison failed"); +#endif +} + +static inline __attribute__((always_inline)) +int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot, + off_t voffp, intptr_t *load, int cpu) +{ + RSEQ_INJECT_C(9) + + rseq_workaround_gcc_asm_size_guess(); + __asm__ __volatile__ goto ( + RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */ + /* Start rseq by storing table entry pointer into rseq_cs. */ + RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs) + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f) + RSEQ_INJECT_ASM(3) + LONG_L " $4, %[v]\n\t" + "beq $4, %[expectnot], %l[cmpfail]\n\t" + RSEQ_INJECT_ASM(4) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) + LONG_L " $4, %[v]\n\t" + "beq $4, %[expectnot], %l[error2]\n\t" +#endif + LONG_S " $4, %[load]\n\t" + LONG_ADDI " $4, %[voffp]\n\t" + LONG_L " $4, 0($4)\n\t" + /* final store */ + LONG_S " $4, %[v]\n\t" + "2:\n\t" + RSEQ_INJECT_ASM(5) + "b 5f\n\t" + RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f) + "5:\n\t" + : /* gcc asm goto does not allow outputs */ + : [cpu_id] "r" (cpu), + [current_cpu_id] "m" (__rseq_abi.cpu_id), + [rseq_cs] "m" (__rseq_abi.rseq_cs), + /* final store input */ + [v] "m" (*v), + [expectnot] "r" (expectnot), + [voffp] "Ir" (voffp), + [load] "m" (*load) + RSEQ_INJECT_INPUT + : "$4", "memory" + RSEQ_INJECT_CLOBBER + : abort, cmpfail +#ifdef RSEQ_COMPARE_TWICE + , error1, error2 +#endif + ); + rseq_workaround_gcc_asm_size_guess(); + return 0; +abort: + rseq_workaround_gcc_asm_size_guess(); + RSEQ_INJECT_FAILED + return -1; +cmpfail: + rseq_workaround_gcc_asm_size_guess(); + return 1; +#ifdef RSEQ_COMPARE_TWICE +error1: + rseq_bug("cpu_id comparison failed"); +error2: + rseq_bug("expected value comparison failed"); +#endif +} + +static inline __attribute__((always_inline)) +int rseq_addv(intptr_t *v, intptr_t count, int cpu) +{ + RSEQ_INJECT_C(9) + + rseq_workaround_gcc_asm_size_guess(); + __asm__ __volatile__ goto ( + RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */ + /* Start rseq by storing table entry pointer into rseq_cs. */ + RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs) + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f) + RSEQ_INJECT_ASM(3) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) +#endif + LONG_L " $4, %[v]\n\t" + LONG_ADDI " $4, %[count]\n\t" + /* final store */ + LONG_S " $4, %[v]\n\t" + "2:\n\t" + RSEQ_INJECT_ASM(4) + "b 5f\n\t" + RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f) + "5:\n\t" + : /* gcc asm goto does not allow outputs */ + : [cpu_id] "r" (cpu), + [current_cpu_id] "m" (__rseq_abi.cpu_id), + [rseq_cs] "m" (__rseq_abi.rseq_cs), + [v] "m" (*v), + [count] "Ir" (count) + RSEQ_INJECT_INPUT + : "$4", "memory" + RSEQ_INJECT_CLOBBER + : abort +#ifdef RSEQ_COMPARE_TWICE + , error1 +#endif + ); + rseq_workaround_gcc_asm_size_guess(); + return 0; +abort: + rseq_workaround_gcc_asm_size_guess(); + RSEQ_INJECT_FAILED + return -1; +#ifdef RSEQ_COMPARE_TWICE +error1: + rseq_bug("cpu_id comparison failed"); +#endif +} + +static inline __attribute__((always_inline)) +int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect, + intptr_t *v2, intptr_t newv2, + intptr_t newv, int cpu) +{ + RSEQ_INJECT_C(9) + + rseq_workaround_gcc_asm_size_guess(); + __asm__ __volatile__ goto ( + RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */ + /* Start rseq by storing table entry pointer into rseq_cs. */ + RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs) + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f) + RSEQ_INJECT_ASM(3) + LONG_L " $4, %[v]\n\t" + "bne $4, %[expect], %l[cmpfail]\n\t" + RSEQ_INJECT_ASM(4) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) + LONG_L " $4, %[v]\n\t" + "bne $4, %[expect], %l[error2]\n\t" +#endif + /* try store */ + LONG_S " %[newv2], %[v2]\n\t" + RSEQ_INJECT_ASM(5) + /* final store */ + LONG_S " %[newv], %[v]\n\t" + "2:\n\t" + RSEQ_INJECT_ASM(6) + "b 5f\n\t" + RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f) + "5:\n\t" + : /* gcc asm goto does not allow outputs */ + : [cpu_id] "r" (cpu), + [current_cpu_id] "m" (__rseq_abi.cpu_id), + [rseq_cs] "m" (__rseq_abi.rseq_cs), + /* try store input */ + [v2] "m" (*v2), + [newv2] "r" (newv2), + /* final store input */ + [v] "m" (*v), + [expect] "r" (expect), + [newv] "r" (newv) + RSEQ_INJECT_INPUT + : "$4", "memory" + RSEQ_INJECT_CLOBBER + : abort, cmpfail +#ifdef RSEQ_COMPARE_TWICE + , error1, error2 +#endif + ); + rseq_workaround_gcc_asm_size_guess(); + return 0; +abort: + rseq_workaround_gcc_asm_size_guess(); + RSEQ_INJECT_FAILED + return -1; +cmpfail: + rseq_workaround_gcc_asm_size_guess(); + return 1; +#ifdef RSEQ_COMPARE_TWICE +error1: + rseq_bug("cpu_id comparison failed"); +error2: + rseq_bug("expected value comparison failed"); +#endif +} + +static inline __attribute__((always_inline)) +int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect, + intptr_t *v2, intptr_t newv2, + intptr_t newv, int cpu) +{ + RSEQ_INJECT_C(9) + + rseq_workaround_gcc_asm_size_guess(); + __asm__ __volatile__ goto ( + RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */ + /* Start rseq by storing table entry pointer into rseq_cs. */ + RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs) + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f) + RSEQ_INJECT_ASM(3) + LONG_L " $4, %[v]\n\t" + "bne $4, %[expect], %l[cmpfail]\n\t" + RSEQ_INJECT_ASM(4) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) + LONG_L " $4, %[v]\n\t" + "bne $4, %[expect], %l[error2]\n\t" +#endif + /* try store */ + LONG_S " %[newv2], %[v2]\n\t" + RSEQ_INJECT_ASM(5) + "sync\n\t" /* full sync provides store-release */ + /* final store */ + LONG_S " %[newv], %[v]\n\t" + "2:\n\t" + RSEQ_INJECT_ASM(6) + "b 5f\n\t" + RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f) + "5:\n\t" + : /* gcc asm goto does not allow outputs */ + : [cpu_id] "r" (cpu), + [current_cpu_id] "m" (__rseq_abi.cpu_id), + [rseq_cs] "m" (__rseq_abi.rseq_cs), + /* try store input */ + [v2] "m" (*v2), + [newv2] "r" (newv2), + /* final store input */ + [v] "m" (*v), + [expect] "r" (expect), + [newv] "r" (newv) + RSEQ_INJECT_INPUT + : "$4", "memory" + RSEQ_INJECT_CLOBBER + : abort, cmpfail +#ifdef RSEQ_COMPARE_TWICE + , error1, error2 +#endif + ); + rseq_workaround_gcc_asm_size_guess(); + return 0; +abort: + rseq_workaround_gcc_asm_size_guess(); + RSEQ_INJECT_FAILED + return -1; +cmpfail: + rseq_workaround_gcc_asm_size_guess(); + return 1; +#ifdef RSEQ_COMPARE_TWICE +error1: + rseq_bug("cpu_id comparison failed"); +error2: + rseq_bug("expected value comparison failed"); +#endif +} + +static inline __attribute__((always_inline)) +int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect, + intptr_t *v2, intptr_t expect2, + intptr_t newv, int cpu) +{ + RSEQ_INJECT_C(9) + + rseq_workaround_gcc_asm_size_guess(); + __asm__ __volatile__ goto ( + RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */ + /* Start rseq by storing table entry pointer into rseq_cs. */ + RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs) + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f) + RSEQ_INJECT_ASM(3) + LONG_L " $4, %[v]\n\t" + "bne $4, %[expect], %l[cmpfail]\n\t" + RSEQ_INJECT_ASM(4) + LONG_L " $4, %[v2]\n\t" + "bne $4, %[expect2], %l[cmpfail]\n\t" + RSEQ_INJECT_ASM(5) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) + LONG_L " $4, %[v]\n\t" + "bne $4, %[expect], %l[error2]\n\t" + LONG_L " $4, %[v2]\n\t" + "bne $4, %[expect2], %l[error3]\n\t" +#endif + /* final store */ + LONG_S " %[newv], %[v]\n\t" + "2:\n\t" + RSEQ_INJECT_ASM(6) + "b 5f\n\t" + RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f) + "5:\n\t" + : /* gcc asm goto does not allow outputs */ + : [cpu_id] "r" (cpu), + [current_cpu_id] "m" (__rseq_abi.cpu_id), + [rseq_cs] "m" (__rseq_abi.rseq_cs), + /* cmp2 input */ + [v2] "m" (*v2), + [expect2] "r" (expect2), + /* final store input */ + [v] "m" (*v), + [expect] "r" (expect), + [newv] "r" (newv) + RSEQ_INJECT_INPUT + : "$4", "memory" + RSEQ_INJECT_CLOBBER + : abort, cmpfail +#ifdef RSEQ_COMPARE_TWICE + , error1, error2, error3 +#endif + ); + rseq_workaround_gcc_asm_size_guess(); + return 0; +abort: + rseq_workaround_gcc_asm_size_guess(); + RSEQ_INJECT_FAILED + return -1; +cmpfail: + rseq_workaround_gcc_asm_size_guess(); + return 1; +#ifdef RSEQ_COMPARE_TWICE +error1: + rseq_bug("cpu_id comparison failed"); +error2: + rseq_bug("1st expected value comparison failed"); +error3: + rseq_bug("2nd expected value comparison failed"); +#endif +} + +static inline __attribute__((always_inline)) +int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect, + void *dst, void *src, size_t len, + intptr_t newv, int cpu) +{ + uintptr_t rseq_scratch[3]; + + RSEQ_INJECT_C(9) + + rseq_workaround_gcc_asm_size_guess(); + __asm__ __volatile__ goto ( + RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */ + LONG_S " %[src], %[rseq_scratch0]\n\t" + LONG_S " %[dst], %[rseq_scratch1]\n\t" + LONG_S " %[len], %[rseq_scratch2]\n\t" + /* Start rseq by storing table entry pointer into rseq_cs. */ + RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs) + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f) + RSEQ_INJECT_ASM(3) + LONG_L " $4, %[v]\n\t" + "bne $4, %[expect], 5f\n\t" + RSEQ_INJECT_ASM(4) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 6f) + LONG_L " $4, %[v]\n\t" + "bne $4, %[expect], 7f\n\t" +#endif + /* try memcpy */ + "beqz %[len], 333f\n\t" \ + "222:\n\t" \ + "lb $4, 0(%[src])\n\t" \ + "sb $4, 0(%[dst])\n\t" \ + LONG_ADDI " %[src], 1\n\t" \ + LONG_ADDI " %[dst], 1\n\t" \ + LONG_ADDI " %[len], -1\n\t" \ + "bnez %[len], 222b\n\t" \ + "333:\n\t" \ + RSEQ_INJECT_ASM(5) + /* final store */ + LONG_S " %[newv], %[v]\n\t" + "2:\n\t" + RSEQ_INJECT_ASM(6) + /* teardown */ + LONG_L " %[len], %[rseq_scratch2]\n\t" + LONG_L " %[dst], %[rseq_scratch1]\n\t" + LONG_L " %[src], %[rseq_scratch0]\n\t" + "b 8f\n\t" + RSEQ_ASM_DEFINE_ABORT(3, 4, + /* teardown */ + LONG_L " %[len], %[rseq_scratch2]\n\t" + LONG_L " %[dst], %[rseq_scratch1]\n\t" + LONG_L " %[src], %[rseq_scratch0]\n\t", + abort, 1b, 2b, 4f) + RSEQ_ASM_DEFINE_CMPFAIL(5, + /* teardown */ + LONG_L " %[len], %[rseq_scratch2]\n\t" + LONG_L " %[dst], %[rseq_scratch1]\n\t" + LONG_L " %[src], %[rseq_scratch0]\n\t", + cmpfail) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_CMPFAIL(6, + /* teardown */ + LONG_L " %[len], %[rseq_scratch2]\n\t" + LONG_L " %[dst], %[rseq_scratch1]\n\t" + LONG_L " %[src], %[rseq_scratch0]\n\t", + error1) + RSEQ_ASM_DEFINE_CMPFAIL(7, + /* teardown */ + LONG_L " %[len], %[rseq_scratch2]\n\t" + LONG_L " %[dst], %[rseq_scratch1]\n\t" + LONG_L " %[src], %[rseq_scratch0]\n\t", + error2) +#endif + "8:\n\t" + : /* gcc asm goto does not allow outputs */ + : [cpu_id] "r" (cpu), + [current_cpu_id] "m" (__rseq_abi.cpu_id), + [rseq_cs] "m" (__rseq_abi.rseq_cs), + /* final store input */ + [v] "m" (*v), + [expect] "r" (expect), + [newv] "r" (newv), + /* try memcpy input */ + [dst] "r" (dst), + [src] "r" (src), + [len] "r" (len), + [rseq_scratch0] "m" (rseq_scratch[0]), + [rseq_scratch1] "m" (rseq_scratch[1]), + [rseq_scratch2] "m" (rseq_scratch[2]) + RSEQ_INJECT_INPUT + : "$4", "memory" + RSEQ_INJECT_CLOBBER + : abort, cmpfail +#ifdef RSEQ_COMPARE_TWICE + , error1, error2 +#endif + ); + rseq_workaround_gcc_asm_size_guess(); + return 0; +abort: + rseq_workaround_gcc_asm_size_guess(); + RSEQ_INJECT_FAILED + return -1; +cmpfail: + rseq_workaround_gcc_asm_size_guess(); + return 1; +#ifdef RSEQ_COMPARE_TWICE +error1: + rseq_workaround_gcc_asm_size_guess(); + rseq_bug("cpu_id comparison failed"); +error2: + rseq_workaround_gcc_asm_size_guess(); + rseq_bug("expected value comparison failed"); +#endif +} + +static inline __attribute__((always_inline)) +int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect, + void *dst, void *src, size_t len, + intptr_t newv, int cpu) +{ + uintptr_t rseq_scratch[3]; + + RSEQ_INJECT_C(9) + + rseq_workaround_gcc_asm_size_guess(); + __asm__ __volatile__ goto ( + RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */ + LONG_S " %[src], %[rseq_scratch0]\n\t" + LONG_S " %[dst], %[rseq_scratch1]\n\t" + LONG_S " %[len], %[rseq_scratch2]\n\t" + /* Start rseq by storing table entry pointer into rseq_cs. */ + RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs) + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f) + RSEQ_INJECT_ASM(3) + LONG_L " $4, %[v]\n\t" + "bne $4, %[expect], 5f\n\t" + RSEQ_INJECT_ASM(4) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 6f) + LONG_L " $4, %[v]\n\t" + "bne $4, %[expect], 7f\n\t" +#endif + /* try memcpy */ + "beqz %[len], 333f\n\t" \ + "222:\n\t" \ + "lb $4, 0(%[src])\n\t" \ + "sb $4, 0(%[dst])\n\t" \ + LONG_ADDI " %[src], 1\n\t" \ + LONG_ADDI " %[dst], 1\n\t" \ + LONG_ADDI " %[len], -1\n\t" \ + "bnez %[len], 222b\n\t" \ + "333:\n\t" \ + RSEQ_INJECT_ASM(5) + "sync\n\t" /* full sync provides store-release */ + /* final store */ + LONG_S " %[newv], %[v]\n\t" + "2:\n\t" + RSEQ_INJECT_ASM(6) + /* teardown */ + LONG_L " %[len], %[rseq_scratch2]\n\t" + LONG_L " %[dst], %[rseq_scratch1]\n\t" + LONG_L " %[src], %[rseq_scratch0]\n\t" + "b 8f\n\t" + RSEQ_ASM_DEFINE_ABORT(3, 4, + /* teardown */ + LONG_L " %[len], %[rseq_scratch2]\n\t" + LONG_L " %[dst], %[rseq_scratch1]\n\t" + LONG_L " %[src], %[rseq_scratch0]\n\t", + abort, 1b, 2b, 4f) + RSEQ_ASM_DEFINE_CMPFAIL(5, + /* teardown */ + LONG_L " %[len], %[rseq_scratch2]\n\t" + LONG_L " %[dst], %[rseq_scratch1]\n\t" + LONG_L " %[src], %[rseq_scratch0]\n\t", + cmpfail) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_CMPFAIL(6, + /* teardown */ + LONG_L " %[len], %[rseq_scratch2]\n\t" + LONG_L " %[dst], %[rseq_scratch1]\n\t" + LONG_L " %[src], %[rseq_scratch0]\n\t", + error1) + RSEQ_ASM_DEFINE_CMPFAIL(7, + /* teardown */ + LONG_L " %[len], %[rseq_scratch2]\n\t" + LONG_L " %[dst], %[rseq_scratch1]\n\t" + LONG_L " %[src], %[rseq_scratch0]\n\t", + error2) +#endif + "8:\n\t" + : /* gcc asm goto does not allow outputs */ + : [cpu_id] "r" (cpu), + [current_cpu_id] "m" (__rseq_abi.cpu_id), + [rseq_cs] "m" (__rseq_abi.rseq_cs), + /* final store input */ + [v] "m" (*v), + [expect] "r" (expect), + [newv] "r" (newv), + /* try memcpy input */ + [dst] "r" (dst), + [src] "r" (src), + [len] "r" (len), + [rseq_scratch0] "m" (rseq_scratch[0]), + [rseq_scratch1] "m" (rseq_scratch[1]), + [rseq_scratch2] "m" (rseq_scratch[2]) + RSEQ_INJECT_INPUT + : "$4", "memory" + RSEQ_INJECT_CLOBBER + : abort, cmpfail +#ifdef RSEQ_COMPARE_TWICE + , error1, error2 +#endif + ); + rseq_workaround_gcc_asm_size_guess(); + return 0; +abort: + rseq_workaround_gcc_asm_size_guess(); + RSEQ_INJECT_FAILED + return -1; +cmpfail: + rseq_workaround_gcc_asm_size_guess(); + return 1; +#ifdef RSEQ_COMPARE_TWICE +error1: + rseq_workaround_gcc_asm_size_guess(); + rseq_bug("cpu_id comparison failed"); +error2: + rseq_workaround_gcc_asm_size_guess(); + rseq_bug("expected value comparison failed"); +#endif +} + +#endif /* !RSEQ_SKIP_FASTPATH */ diff --git a/tools/testing/selftests/rseq/rseq.h b/tools/testing/selftests/rseq/rseq.h index 0a808575cbc4..86ce22417e0d 100644 --- a/tools/testing/selftests/rseq/rseq.h +++ b/tools/testing/selftests/rseq/rseq.h @@ -73,6 +73,8 @@ extern __thread volatile struct rseq __rseq_abi; #include #elif defined(__PPC__) #include +#elif defined(__mips__) +#include #else #error unsupported target #endif @@ -131,17 +133,27 @@ static inline uint32_t rseq_current_cpu(void) return cpu; } +static inline void rseq_clear_rseq_cs(void) +{ +#ifdef __LP64__ + __rseq_abi.rseq_cs.ptr = 0; +#else + __rseq_abi.rseq_cs.ptr.ptr32 = 0; +#endif +} + /* - * rseq_prepare_unload() should be invoked by each thread using rseq_finish*() - * at least once between their last rseq_finish*() and library unload of the - * library defining the rseq critical section (struct rseq_cs). This also - * applies to use of rseq in code generated by JIT: rseq_prepare_unload() - * should be invoked at least once by each thread using rseq_finish*() before - * reclaim of the memory holding the struct rseq_cs. + * rseq_prepare_unload() should be invoked by each thread executing a rseq + * critical section at least once between their last critical section and + * library unload of the library defining the rseq critical section + * (struct rseq_cs). This also applies to use of rseq in code generated by + * JIT: rseq_prepare_unload() should be invoked at least once by each + * thread executing a rseq critical section before reclaim of the memory + * holding the struct rseq_cs. */ static inline void rseq_prepare_unload(void) { - __rseq_abi.rseq_cs = 0; + rseq_clear_rseq_cs(); } #endif /* RSEQ_H_ */ diff --git a/tools/testing/selftests/rseq/run_param_test.sh b/tools/testing/selftests/rseq/run_param_test.sh old mode 100644 new mode 100755 diff --git a/tools/testing/selftests/sparc64/Makefile b/tools/testing/selftests/sparc64/Makefile index 2082eeffd779..a19531dba4dc 100644 --- a/tools/testing/selftests/sparc64/Makefile +++ b/tools/testing/selftests/sparc64/Makefile @@ -1,7 +1,18 @@ +# SPDX-License-Identifier: GPL-2.0 +uname_M := $(shell uname -m 2>/dev/null || echo not) +ARCH ?= $(shell echo $(uname_M) | sed -e s/x86_64/x86/) + +ifneq ($(ARCH),sparc64) +nothing: +.PHONY: all clean run_tests install +.SILENT: +else + SUBDIRS := drivers TEST_PROGS := run.sh + .PHONY: all clean include ../lib.mk @@ -18,10 +29,6 @@ all: fi \ done -override define RUN_TESTS - @cd $(OUTPUT); ./run.sh -endef - override define INSTALL_RULE mkdir -p $(INSTALL_PATH) install -t $(INSTALL_PATH) $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) @@ -33,10 +40,6 @@ override define INSTALL_RULE done; endef -override define EMIT_TESTS - echo "./run.sh" -endef - override define CLEAN @for DIR in $(SUBDIRS); do \ BUILD_TARGET=$(OUTPUT)/$$DIR; \ @@ -44,3 +47,4 @@ override define CLEAN make OUTPUT=$$BUILD_TARGET -C $$DIR $@;\ done endef +endif diff --git a/tools/testing/selftests/sparc64/drivers/Makefile b/tools/testing/selftests/sparc64/drivers/Makefile index 6264f40bbdbc..deb0df415565 100644 --- a/tools/testing/selftests/sparc64/drivers/Makefile +++ b/tools/testing/selftests/sparc64/drivers/Makefile @@ -1,4 +1,4 @@ - +# SPDX-License-Identifier: GPL-2.0 INCLUDEDIR := -I. CFLAGS := $(CFLAGS) $(INCLUDEDIR) -Wall -O2 -g diff --git a/tools/testing/selftests/static_keys/test_static_keys.sh b/tools/testing/selftests/static_keys/test_static_keys.sh index 24cff498b31a..fc9f8cde7d42 100755 --- a/tools/testing/selftests/static_keys/test_static_keys.sh +++ b/tools/testing/selftests/static_keys/test_static_keys.sh @@ -2,6 +2,19 @@ # SPDX-License-Identifier: GPL-2.0 # Runs static keys kernel module tests +# Kselftest framework requirement - SKIP code is 4. +ksft_skip=4 + +if ! /sbin/modprobe -q -n test_static_key_base; then + echo "static_key: module test_static_key_base is not found [SKIP]" + exit $ksft_skip +fi + +if ! /sbin/modprobe -q -n test_static_keys; then + echo "static_key: module test_static_keys is not found [SKIP]" + exit $ksft_skip +fi + if /sbin/modprobe -q test_static_key_base; then if /sbin/modprobe -q test_static_keys; then echo "static_key: ok" diff --git a/tools/testing/selftests/sync/config b/tools/testing/selftests/sync/config new file mode 100644 index 000000000000..1ab7e8130db2 --- /dev/null +++ b/tools/testing/selftests/sync/config @@ -0,0 +1,4 @@ +CONFIG_STAGING=y +CONFIG_ANDROID=y +CONFIG_SYNC=y +CONFIG_SW_SYNC=y diff --git a/tools/testing/selftests/sysctl/sysctl.sh b/tools/testing/selftests/sysctl/sysctl.sh index ec232c3cfcaa..584eb8ea780a 100755 --- a/tools/testing/selftests/sysctl/sysctl.sh +++ b/tools/testing/selftests/sysctl/sysctl.sh @@ -14,6 +14,9 @@ # This performs a series tests against the proc sysctl interface. +# Kselftest framework requirement - SKIP code is 4. +ksft_skip=4 + TEST_NAME="sysctl" TEST_DRIVER="test_${TEST_NAME}" TEST_DIR=$(dirname $0) @@ -41,7 +44,7 @@ test_modprobe() echo "$0: $DIR not present" >&2 echo "You must have the following enabled in your kernel:" >&2 cat $TEST_DIR/config >&2 - exit 1 + exit $ksft_skip fi } @@ -98,28 +101,30 @@ test_reqs() uid=$(id -u) if [ $uid -ne 0 ]; then echo $msg must be run as root >&2 - exit 0 + exit $ksft_skip fi if ! which perl 2> /dev/null > /dev/null; then echo "$0: You need perl installed" - exit 1 + exit $ksft_skip fi if ! which getconf 2> /dev/null > /dev/null; then echo "$0: You need getconf installed" - exit 1 + exit $ksft_skip fi if ! which diff 2> /dev/null > /dev/null; then echo "$0: You need diff installed" - exit 1 + exit $ksft_skip fi } function load_req_mod() { - trap "test_modprobe" EXIT - if [ ! -d $DIR ]; then + if ! modprobe -q -n $TEST_DRIVER; then + echo "$0: module $TEST_DRIVER not found [SKIP]" + exit $ksft_skip + fi modprobe $TEST_DRIVER if [ $? -ne 0 ]; then exit @@ -765,6 +770,7 @@ function parse_args() test_reqs allow_user_defaults check_production_sysctl_writes_strict +test_modprobe load_req_mod trap "test_finish" EXIT diff --git a/tools/testing/selftests/user/test_user_copy.sh b/tools/testing/selftests/user/test_user_copy.sh index d60506fc77f8..f9b31a57439b 100755 --- a/tools/testing/selftests/user/test_user_copy.sh +++ b/tools/testing/selftests/user/test_user_copy.sh @@ -2,6 +2,13 @@ # SPDX-License-Identifier: GPL-2.0 # Runs copy_to/from_user infrastructure using test_user_copy kernel module +# Kselftest framework requirement - SKIP code is 4. +ksft_skip=4 + +if ! /sbin/modprobe -q -n test_user_copy; then + echo "user: module test_user_copy is not found [SKIP]" + exit $ksft_skip +fi if /sbin/modprobe -q test_user_copy; then /sbin/modprobe -q -r test_user_copy echo "user_copy: ok" diff --git a/tools/testing/selftests/vm/compaction_test.c b/tools/testing/selftests/vm/compaction_test.c index 1097f04e4d80..bcec71250873 100644 --- a/tools/testing/selftests/vm/compaction_test.c +++ b/tools/testing/selftests/vm/compaction_test.c @@ -16,6 +16,8 @@ #include #include +#include "../kselftest.h" + #define MAP_SIZE 1048576 struct map_list { @@ -169,7 +171,7 @@ int main(int argc, char **argv) printf("Either the sysctl compact_unevictable_allowed is not\n" "set to 1 or couldn't read the proc file.\n" "Skipping the test\n"); - return 0; + return KSFT_SKIP; } lim.rlim_cur = RLIM_INFINITY; diff --git a/tools/testing/selftests/vm/mlock2-tests.c b/tools/testing/selftests/vm/mlock2-tests.c index 4997b9222cfa..637b6d0ac0d0 100644 --- a/tools/testing/selftests/vm/mlock2-tests.c +++ b/tools/testing/selftests/vm/mlock2-tests.c @@ -9,6 +9,8 @@ #include #include "mlock2.h" +#include "../kselftest.h" + struct vm_boundaries { unsigned long start; unsigned long end; @@ -303,7 +305,7 @@ static int test_mlock_lock() if (mlock2_(map, 2 * page_size, 0)) { if (errno == ENOSYS) { printf("Cannot call new mlock family, skipping test\n"); - _exit(0); + _exit(KSFT_SKIP); } perror("mlock2(0)"); goto unmap; @@ -412,7 +414,7 @@ static int test_mlock_onfault() if (mlock2_(map, 2 * page_size, MLOCK_ONFAULT)) { if (errno == ENOSYS) { printf("Cannot call new mlock family, skipping test\n"); - _exit(0); + _exit(KSFT_SKIP); } perror("mlock2(MLOCK_ONFAULT)"); goto unmap; @@ -425,7 +427,7 @@ static int test_mlock_onfault() if (munlock(map, 2 * page_size)) { if (errno == ENOSYS) { printf("Cannot call new mlock family, skipping test\n"); - _exit(0); + _exit(KSFT_SKIP); } perror("munlock()"); goto unmap; @@ -457,7 +459,7 @@ static int test_lock_onfault_of_present() if (mlock2_(map, 2 * page_size, MLOCK_ONFAULT)) { if (errno == ENOSYS) { printf("Cannot call new mlock family, skipping test\n"); - _exit(0); + _exit(KSFT_SKIP); } perror("mlock2(MLOCK_ONFAULT)"); goto unmap; @@ -583,7 +585,7 @@ static int test_vma_management(bool call_mlock) if (call_mlock && mlock2_(map, 3 * page_size, MLOCK_ONFAULT)) { if (errno == ENOSYS) { printf("Cannot call new mlock family, skipping test\n"); - _exit(0); + _exit(KSFT_SKIP); } perror("mlock(ONFAULT)\n"); goto out; diff --git a/tools/testing/selftests/vm/run_vmtests b/tools/testing/selftests/vm/run_vmtests index 22d564673830..88cbe5575f0c 100755 --- a/tools/testing/selftests/vm/run_vmtests +++ b/tools/testing/selftests/vm/run_vmtests @@ -2,6 +2,9 @@ # SPDX-License-Identifier: GPL-2.0 #please run as root +# Kselftest framework requirement - SKIP code is 4. +ksft_skip=4 + mnt=./huge exitcode=0 @@ -36,7 +39,7 @@ if [ -n "$freepgs" ] && [ -n "$hpgsize_KB" ]; then echo $(( $lackpgs + $nr_hugepgs )) > /proc/sys/vm/nr_hugepages if [ $? -ne 0 ]; then echo "Please run this test as root" - exit 1 + exit $ksft_skip fi while read name size unit; do if [ "$name" = "HugePages_Free:" ]; then diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c index de2f9ec8a87f..7b8171e3128a 100644 --- a/tools/testing/selftests/vm/userfaultfd.c +++ b/tools/testing/selftests/vm/userfaultfd.c @@ -69,6 +69,8 @@ #include #include +#include "../kselftest.h" + #ifdef __NR_userfaultfd static unsigned long nr_cpus, nr_pages, nr_pages_per_cpu, page_size; @@ -1322,7 +1324,7 @@ int main(int argc, char **argv) int main(void) { printf("skip: Skipping userfaultfd test (missing __NR_userfaultfd)\n"); - return 0; + return KSFT_SKIP; } #endif /* __NR_userfaultfd */ diff --git a/tools/testing/selftests/x86/sigreturn.c b/tools/testing/selftests/x86/sigreturn.c index 246145b84a12..4d9dc3f2fd70 100644 --- a/tools/testing/selftests/x86/sigreturn.c +++ b/tools/testing/selftests/x86/sigreturn.c @@ -610,21 +610,41 @@ static int test_valid_sigreturn(int cs_bits, bool use_16bit_ss, int force_ss) */ for (int i = 0; i < NGREG; i++) { greg_t req = requested_regs[i], res = resulting_regs[i]; + if (i == REG_TRAPNO || i == REG_IP) continue; /* don't care */ - if (i == REG_SP) { - printf("\tSP: %llx -> %llx\n", (unsigned long long)req, - (unsigned long long)res); + if (i == REG_SP) { /* - * In many circumstances, the high 32 bits of rsp - * are zeroed. For example, we could be a real - * 32-bit program, or we could hit any of a number - * of poorly-documented IRET or segmented ESP - * oddities. If this happens, it's okay. + * If we were using a 16-bit stack segment, then + * the kernel is a bit stuck: IRET only restores + * the low 16 bits of ESP/RSP if SS is 16-bit. + * The kernel uses a hack to restore bits 31:16, + * but that hack doesn't help with bits 63:32. + * On Intel CPUs, bits 63:32 end up zeroed, and, on + * AMD CPUs, they leak the high bits of the kernel + * espfix64 stack pointer. There's very little that + * the kernel can do about it. + * + * Similarly, if we are returning to a 32-bit context, + * the CPU will often lose the high 32 bits of RSP. */ - if (res == (req & 0xFFFFFFFF)) - continue; /* OK; not expected to work */ + + if (res == req) + continue; + + if (cs_bits != 64 && ((res ^ req) & 0xFFFFFFFF) == 0) { + printf("[NOTE]\tSP: %llx -> %llx\n", + (unsigned long long)req, + (unsigned long long)res); + continue; + } + + printf("[FAIL]\tSP mismatch: requested 0x%llx; got 0x%llx\n", + (unsigned long long)requested_regs[i], + (unsigned long long)resulting_regs[i]); + nerrs++; + continue; } bool ignore_reg = false; @@ -654,25 +674,18 @@ static int test_valid_sigreturn(int cs_bits, bool use_16bit_ss, int force_ss) #endif /* Sanity check on the kernel */ - if (i == REG_CX && requested_regs[i] != resulting_regs[i]) { + if (i == REG_CX && req != res) { printf("[FAIL]\tCX (saved SP) mismatch: requested 0x%llx; got 0x%llx\n", - (unsigned long long)requested_regs[i], - (unsigned long long)resulting_regs[i]); + (unsigned long long)req, + (unsigned long long)res); nerrs++; continue; } - if (requested_regs[i] != resulting_regs[i] && !ignore_reg) { - /* - * SP is particularly interesting here. The - * usual cause of failures is that we hit the - * nasty IRET case of returning to a 16-bit SS, - * in which case bits 16:31 of the *kernel* - * stack pointer persist in ESP. - */ + if (req != res && !ignore_reg) { printf("[FAIL]\tReg %d mismatch: requested 0x%llx; got 0x%llx\n", - i, (unsigned long long)requested_regs[i], - (unsigned long long)resulting_regs[i]); + i, (unsigned long long)req, + (unsigned long long)res); nerrs++; } } diff --git a/tools/testing/selftests/zram/zram.sh b/tools/testing/selftests/zram/zram.sh index 754de7da426a..232e958ec454 100755 --- a/tools/testing/selftests/zram/zram.sh +++ b/tools/testing/selftests/zram/zram.sh @@ -2,6 +2,9 @@ # SPDX-License-Identifier: GPL-2.0 TCID="zram.sh" +# Kselftest framework requirement - SKIP code is 4. +ksft_skip=4 + . ./zram_lib.sh run_zram () { @@ -24,5 +27,5 @@ elif [ -b /dev/zram0 ]; then else echo "$TCID : No zram.ko module or /dev/zram0 device file not found" echo "$TCID : CONFIG_ZRAM is not set" - exit 1 + exit $ksft_skip fi diff --git a/tools/testing/selftests/zram/zram_lib.sh b/tools/testing/selftests/zram/zram_lib.sh index f6a9c73e7a44..9e73a4fb9b0a 100755 --- a/tools/testing/selftests/zram/zram_lib.sh +++ b/tools/testing/selftests/zram/zram_lib.sh @@ -18,6 +18,9 @@ MODULE=0 dev_makeswap=-1 dev_mounted=-1 +# Kselftest framework requirement - SKIP code is 4. +ksft_skip=4 + trap INT check_prereqs() @@ -27,7 +30,7 @@ check_prereqs() if [ $uid -ne 0 ]; then echo $msg must be run as root >&2 - exit 0 + exit $ksft_skip fi } diff --git a/tools/virtio/linux/scatterlist.h b/tools/virtio/linux/scatterlist.h index 9a45f90e2d08..369ee308b668 100644 --- a/tools/virtio/linux/scatterlist.h +++ b/tools/virtio/linux/scatterlist.h @@ -36,7 +36,6 @@ static inline void sg_assign_page(struct scatterlist *sg, struct page *page) */ BUG_ON((unsigned long) page & 0x03); #ifdef CONFIG_DEBUG_SG - BUG_ON(sg->sg_magic != SG_MAGIC); BUG_ON(sg_is_chain(sg)); #endif sg->page_link = page_link | (unsigned long) page; @@ -67,7 +66,6 @@ static inline void sg_set_page(struct scatterlist *sg, struct page *page, static inline struct page *sg_page(struct scatterlist *sg) { #ifdef CONFIG_DEBUG_SG - BUG_ON(sg->sg_magic != SG_MAGIC); BUG_ON(sg_is_chain(sg)); #endif return (struct page *)((sg)->page_link & ~0x3); @@ -116,9 +114,6 @@ static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents, **/ static inline void sg_mark_end(struct scatterlist *sg) { -#ifdef CONFIG_DEBUG_SG - BUG_ON(sg->sg_magic != SG_MAGIC); -#endif /* * Set termination bit, clear potential chain bit */ @@ -136,17 +131,11 @@ static inline void sg_mark_end(struct scatterlist *sg) **/ static inline void sg_unmark_end(struct scatterlist *sg) { -#ifdef CONFIG_DEBUG_SG - BUG_ON(sg->sg_magic != SG_MAGIC); -#endif sg->page_link &= ~0x02; } static inline struct scatterlist *sg_next(struct scatterlist *sg) { -#ifdef CONFIG_DEBUG_SG - BUG_ON(sg->sg_magic != SG_MAGIC); -#endif if (sg_is_last(sg)) return NULL; @@ -160,13 +149,6 @@ static inline struct scatterlist *sg_next(struct scatterlist *sg) static inline void sg_init_table(struct scatterlist *sgl, unsigned int nents) { memset(sgl, 0, sizeof(*sgl) * nents); -#ifdef CONFIG_DEBUG_SG - { - unsigned int i; - for (i = 0; i < nents; i++) - sgl[i].sg_magic = SG_MAGIC; - } -#endif sg_mark_end(&sgl[nents - 1]); } diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig index 72143cfaf6ec..ea434ddc8499 100644 --- a/virt/kvm/Kconfig +++ b/virt/kvm/Kconfig @@ -47,7 +47,7 @@ config KVM_GENERIC_DIRTYLOG_READ_PROTECT config KVM_COMPAT def_bool y - depends on KVM && COMPAT && !S390 + depends on KVM && COMPAT && !(S390 || ARM64) config HAVE_KVM_IRQ_BYPASS bool diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c index 8d90de213ce9..1d90d79706bd 100644 --- a/virt/kvm/arm/mmu.c +++ b/virt/kvm/arm/mmu.c @@ -297,6 +297,8 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) phys_addr_t next; assert_spin_locked(&kvm->mmu_lock); + WARN_ON(size & ~PAGE_MASK); + pgd = kvm->arch.pgd + stage2_pgd_index(addr); do { /* diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c index ff7dc890941a..cdce653e3c47 100644 --- a/virt/kvm/arm/vgic/vgic-v3.c +++ b/virt/kvm/arm/vgic/vgic-v3.c @@ -617,11 +617,6 @@ int vgic_v3_probe(const struct gic_kvm_info *info) pr_warn("GICV physical address 0x%llx not page aligned\n", (unsigned long long)info->vcpu.start); kvm_vgic_global_state.vcpu_base = 0; - } else if (!PAGE_ALIGNED(resource_size(&info->vcpu))) { - pr_warn("GICV size 0x%llx not a multiple of page size 0x%lx\n", - (unsigned long long)resource_size(&info->vcpu), - PAGE_SIZE); - kvm_vgic_global_state.vcpu_base = 0; } else { kvm_vgic_global_state.vcpu_base = info->vcpu.start; kvm_vgic_global_state.can_emulate_gicv2 = true; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index ada21f47f22b..8b47507faab5 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -116,6 +116,11 @@ static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, #ifdef CONFIG_KVM_COMPAT static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl, unsigned long arg); +#define KVM_COMPAT(c) .compat_ioctl = (c) +#else +static long kvm_no_compat_ioctl(struct file *file, unsigned int ioctl, + unsigned long arg) { return -EINVAL; } +#define KVM_COMPAT(c) .compat_ioctl = kvm_no_compat_ioctl #endif static int hardware_enable_all(void); static void hardware_disable_all(void); @@ -2396,11 +2401,9 @@ static int kvm_vcpu_release(struct inode *inode, struct file *filp) static struct file_operations kvm_vcpu_fops = { .release = kvm_vcpu_release, .unlocked_ioctl = kvm_vcpu_ioctl, -#ifdef CONFIG_KVM_COMPAT - .compat_ioctl = kvm_vcpu_compat_ioctl, -#endif .mmap = kvm_vcpu_mmap, .llseek = noop_llseek, + KVM_COMPAT(kvm_vcpu_compat_ioctl), }; /* @@ -2824,10 +2827,8 @@ static int kvm_device_release(struct inode *inode, struct file *filp) static const struct file_operations kvm_device_fops = { .unlocked_ioctl = kvm_device_ioctl, -#ifdef CONFIG_KVM_COMPAT - .compat_ioctl = kvm_device_ioctl, -#endif .release = kvm_device_release, + KVM_COMPAT(kvm_device_ioctl), }; struct kvm_device *kvm_device_from_filp(struct file *filp) @@ -3165,10 +3166,8 @@ static long kvm_vm_compat_ioctl(struct file *filp, static struct file_operations kvm_vm_fops = { .release = kvm_vm_release, .unlocked_ioctl = kvm_vm_ioctl, -#ifdef CONFIG_KVM_COMPAT - .compat_ioctl = kvm_vm_compat_ioctl, -#endif .llseek = noop_llseek, + KVM_COMPAT(kvm_vm_compat_ioctl), }; static int kvm_dev_ioctl_create_vm(unsigned long type) @@ -3259,8 +3258,8 @@ static long kvm_dev_ioctl(struct file *filp, static struct file_operations kvm_chardev_ops = { .unlocked_ioctl = kvm_dev_ioctl, - .compat_ioctl = kvm_dev_ioctl, .llseek = noop_llseek, + KVM_COMPAT(kvm_dev_ioctl), }; static struct miscdevice kvm_dev = {