mirror of https://gitee.com/openkylin/linux.git
Linux 5.6-rc1
-----BEGIN PGP SIGNATURE----- iQFSBAABCAA8FiEEq68RxlopcLEwq+PEeb4+QwBBGIYFAl5AnxkeHHRvcnZhbGRz QGxpbnV4LWZvdW5kYXRpb24ub3JnAAoJEHm+PkMAQRiGca0H+QHA5sm2Mxlg/4hk E3M+vDPwn69KeHAVL6OUl3LT03J1+KuBdtAcbHlTm6ikBBHVghKySSVVixguZnOO rExO9o7vVIto/HpzLWgWPHoFj9Z5ij/Xn0O+QD1qY1pbwuKPV4w6U/NpyN48nv6L gsVmhS9Z8FzNqvyyqXmiuxoVN3tYdhcoRtNMpgG0jkvgGMy0nGzhjCJGqnRiXKGo Xu1zNRJ7SUsp3/hzDAKm0WNSa+gEVAJEGjb/JYAu01XoXxX3Z8OxeOdygmmV3a7t dGW1yTsWOSZlRMPRu4TI+ktTNvYldXn8qRPmjilVaDtdvzayL/4JM2ZnAg5SlnBB Qb+qBc8= =6xDw -----END PGP SIGNATURE----- Merge tag 'v5.6-rc1' into arm/fixes Linux 5.6-rc1 Merging in to avoid fixes pull request diffstats being noisy due to being based on -rc1. Signed-off-by: Olof Johansson <olof@lixom.net>
This commit is contained in:
commit
1b32b72b27
|
@ -0,0 +1,404 @@
|
|||
ZoneFS - Zone filesystem for Zoned block devices
|
||||
|
||||
Introduction
|
||||
============
|
||||
|
||||
zonefs is a very simple file system exposing each zone of a zoned block device
|
||||
as a file. Unlike a regular POSIX-compliant file system with native zoned block
|
||||
device support (e.g. f2fs), zonefs does not hide the sequential write
|
||||
constraint of zoned block devices to the user. Files representing sequential
|
||||
write zones of the device must be written sequentially starting from the end
|
||||
of the file (append only writes).
|
||||
|
||||
As such, zonefs is in essence closer to a raw block device access interface
|
||||
than to a full-featured POSIX file system. The goal of zonefs is to simplify
|
||||
the implementation of zoned block device support in applications by replacing
|
||||
raw block device file accesses with a richer file API, avoiding relying on
|
||||
direct block device file ioctls which may be more obscure to developers. One
|
||||
example of this approach is the implementation of LSM (log-structured merge)
|
||||
tree structures (such as used in RocksDB and LevelDB) on zoned block devices
|
||||
by allowing SSTables to be stored in a zone file similarly to a regular file
|
||||
system rather than as a range of sectors of the entire disk. The introduction
|
||||
of the higher level construct "one file is one zone" can help reducing the
|
||||
amount of changes needed in the application as well as introducing support for
|
||||
different application programming languages.
|
||||
|
||||
Zoned block devices
|
||||
-------------------
|
||||
|
||||
Zoned storage devices belong to a class of storage devices with an address
|
||||
space that is divided into zones. A zone is a group of consecutive LBAs and all
|
||||
zones are contiguous (there are no LBA gaps). Zones may have different types.
|
||||
* Conventional zones: there are no access constraints to LBAs belonging to
|
||||
conventional zones. Any read or write access can be executed, similarly to a
|
||||
regular block device.
|
||||
* Sequential zones: these zones accept random reads but must be written
|
||||
sequentially. Each sequential zone has a write pointer maintained by the
|
||||
device that keeps track of the mandatory start LBA position of the next write
|
||||
to the device. As a result of this write constraint, LBAs in a sequential zone
|
||||
cannot be overwritten. Sequential zones must first be erased using a special
|
||||
command (zone reset) before rewriting.
|
||||
|
||||
Zoned storage devices can be implemented using various recording and media
|
||||
technologies. The most common form of zoned storage today uses the SCSI Zoned
|
||||
Block Commands (ZBC) and Zoned ATA Commands (ZAC) interfaces on Shingled
|
||||
Magnetic Recording (SMR) HDDs.
|
||||
|
||||
Solid State Disks (SSD) storage devices can also implement a zoned interface
|
||||
to, for instance, reduce internal write amplification due to garbage collection.
|
||||
The NVMe Zoned NameSpace (ZNS) is a technical proposal of the NVMe standard
|
||||
committee aiming at adding a zoned storage interface to the NVMe protocol.
|
||||
|
||||
Zonefs Overview
|
||||
===============
|
||||
|
||||
Zonefs exposes the zones of a zoned block device as files. The files
|
||||
representing zones are grouped by zone type, which are themselves represented
|
||||
by sub-directories. This file structure is built entirely using zone information
|
||||
provided by the device and so does not require any complex on-disk metadata
|
||||
structure.
|
||||
|
||||
On-disk metadata
|
||||
----------------
|
||||
|
||||
zonefs on-disk metadata is reduced to an immutable super block which
|
||||
persistently stores a magic number and optional feature flags and values. On
|
||||
mount, zonefs uses blkdev_report_zones() to obtain the device zone configuration
|
||||
and populates the mount point with a static file tree solely based on this
|
||||
information. File sizes come from the device zone type and write pointer
|
||||
position managed by the device itself.
|
||||
|
||||
The super block is always written on disk at sector 0. The first zone of the
|
||||
device storing the super block is never exposed as a zone file by zonefs. If
|
||||
the zone containing the super block is a sequential zone, the mkzonefs format
|
||||
tool always "finishes" the zone, that is, it transitions the zone to a full
|
||||
state to make it read-only, preventing any data write.
|
||||
|
||||
Zone type sub-directories
|
||||
-------------------------
|
||||
|
||||
Files representing zones of the same type are grouped together under the same
|
||||
sub-directory automatically created on mount.
|
||||
|
||||
For conventional zones, the sub-directory "cnv" is used. This directory is
|
||||
however created if and only if the device has usable conventional zones. If
|
||||
the device only has a single conventional zone at sector 0, the zone will not
|
||||
be exposed as a file as it will be used to store the zonefs super block. For
|
||||
such devices, the "cnv" sub-directory will not be created.
|
||||
|
||||
For sequential write zones, the sub-directory "seq" is used.
|
||||
|
||||
These two directories are the only directories that exist in zonefs. Users
|
||||
cannot create other directories and cannot rename nor delete the "cnv" and
|
||||
"seq" sub-directories.
|
||||
|
||||
The size of the directories indicated by the st_size field of struct stat,
|
||||
obtained with the stat() or fstat() system calls, indicates the number of files
|
||||
existing under the directory.
|
||||
|
||||
Zone files
|
||||
----------
|
||||
|
||||
Zone files are named using the number of the zone they represent within the set
|
||||
of zones of a particular type. That is, both the "cnv" and "seq" directories
|
||||
contain files named "0", "1", "2", ... The file numbers also represent
|
||||
increasing zone start sector on the device.
|
||||
|
||||
All read and write operations to zone files are not allowed beyond the file
|
||||
maximum size, that is, beyond the zone size. Any access exceeding the zone
|
||||
size is failed with the -EFBIG error.
|
||||
|
||||
Creating, deleting, renaming or modifying any attribute of files and
|
||||
sub-directories is not allowed.
|
||||
|
||||
The number of blocks of a file as reported by stat() and fstat() indicates the
|
||||
size of the file zone, or in other words, the maximum file size.
|
||||
|
||||
Conventional zone files
|
||||
-----------------------
|
||||
|
||||
The size of conventional zone files is fixed to the size of the zone they
|
||||
represent. Conventional zone files cannot be truncated.
|
||||
|
||||
These files can be randomly read and written using any type of I/O operation:
|
||||
buffered I/Os, direct I/Os, memory mapped I/Os (mmap), etc. There are no I/O
|
||||
constraint for these files beyond the file size limit mentioned above.
|
||||
|
||||
Sequential zone files
|
||||
---------------------
|
||||
|
||||
The size of sequential zone files grouped in the "seq" sub-directory represents
|
||||
the file's zone write pointer position relative to the zone start sector.
|
||||
|
||||
Sequential zone files can only be written sequentially, starting from the file
|
||||
end, that is, write operations can only be append writes. Zonefs makes no
|
||||
attempt at accepting random writes and will fail any write request that has a
|
||||
start offset not corresponding to the end of the file, or to the end of the last
|
||||
write issued and still in-flight (for asynchrnous I/O operations).
|
||||
|
||||
Since dirty page writeback by the page cache does not guarantee a sequential
|
||||
write pattern, zonefs prevents buffered writes and writeable shared mappings
|
||||
on sequential files. Only direct I/O writes are accepted for these files.
|
||||
zonefs relies on the sequential delivery of write I/O requests to the device
|
||||
implemented by the block layer elevator. An elevator implementing the sequential
|
||||
write feature for zoned block device (ELEVATOR_F_ZBD_SEQ_WRITE elevator feature)
|
||||
must be used. This type of elevator (e.g. mq-deadline) is the set by default
|
||||
for zoned block devices on device initialization.
|
||||
|
||||
There are no restrictions on the type of I/O used for read operations in
|
||||
sequential zone files. Buffered I/Os, direct I/Os and shared read mappings are
|
||||
all accepted.
|
||||
|
||||
Truncating sequential zone files is allowed only down to 0, in which case, the
|
||||
zone is reset to rewind the file zone write pointer position to the start of
|
||||
the zone, or up to the zone size, in which case the file's zone is transitioned
|
||||
to the FULL state (finish zone operation).
|
||||
|
||||
Format options
|
||||
--------------
|
||||
|
||||
Several optional features of zonefs can be enabled at format time.
|
||||
* Conventional zone aggregation: ranges of contiguous conventional zones can be
|
||||
aggregated into a single larger file instead of the default one file per zone.
|
||||
* File ownership: The owner UID and GID of zone files is by default 0 (root)
|
||||
but can be changed to any valid UID/GID.
|
||||
* File access permissions: the default 640 access permissions can be changed.
|
||||
|
||||
IO error handling
|
||||
-----------------
|
||||
|
||||
Zoned block devices may fail I/O requests for reasons similar to regular block
|
||||
devices, e.g. due to bad sectors. However, in addition to such known I/O
|
||||
failure pattern, the standards governing zoned block devices behavior define
|
||||
additional conditions that result in I/O errors.
|
||||
|
||||
* A zone may transition to the read-only condition (BLK_ZONE_COND_READONLY):
|
||||
While the data already written in the zone is still readable, the zone can
|
||||
no longer be written. No user action on the zone (zone management command or
|
||||
read/write access) can change the zone condition back to a normal read/write
|
||||
state. While the reasons for the device to transition a zone to read-only
|
||||
state are not defined by the standards, a typical cause for such transition
|
||||
would be a defective write head on an HDD (all zones under this head are
|
||||
changed to read-only).
|
||||
|
||||
* A zone may transition to the offline condition (BLK_ZONE_COND_OFFLINE):
|
||||
An offline zone cannot be read nor written. No user action can transition an
|
||||
offline zone back to an operational good state. Similarly to zone read-only
|
||||
transitions, the reasons for a drive to transition a zone to the offline
|
||||
condition are undefined. A typical cause would be a defective read-write head
|
||||
on an HDD causing all zones on the platter under the broken head to be
|
||||
inaccessible.
|
||||
|
||||
* Unaligned write errors: These errors result from the host issuing write
|
||||
requests with a start sector that does not correspond to a zone write pointer
|
||||
position when the write request is executed by the device. Even though zonefs
|
||||
enforces sequential file write for sequential zones, unaligned write errors
|
||||
may still happen in the case of a partial failure of a very large direct I/O
|
||||
operation split into multiple BIOs/requests or asynchronous I/O operations.
|
||||
If one of the write request within the set of sequential write requests
|
||||
issued to the device fails, all write requests after queued after it will
|
||||
become unaligned and fail.
|
||||
|
||||
* Delayed write errors: similarly to regular block devices, if the device side
|
||||
write cache is enabled, write errors may occur in ranges of previously
|
||||
completed writes when the device write cache is flushed, e.g. on fsync().
|
||||
Similarly to the previous immediate unaligned write error case, delayed write
|
||||
errors can propagate through a stream of cached sequential data for a zone
|
||||
causing all data to be dropped after the sector that caused the error.
|
||||
|
||||
All I/O errors detected by zonefs are notified to the user with an error code
|
||||
return for the system call that trigered or detected the error. The recovery
|
||||
actions taken by zonefs in response to I/O errors depend on the I/O type (read
|
||||
vs write) and on the reason for the error (bad sector, unaligned writes or zone
|
||||
condition change).
|
||||
|
||||
* For read I/O errors, zonefs does not execute any particular recovery action,
|
||||
but only if the file zone is still in a good condition and there is no
|
||||
inconsistency between the file inode size and its zone write pointer position.
|
||||
If a problem is detected, I/O error recovery is executed (see below table).
|
||||
|
||||
* For write I/O errors, zonefs I/O error recovery is always executed.
|
||||
|
||||
* A zone condition change to read-only or offline also always triggers zonefs
|
||||
I/O error recovery.
|
||||
|
||||
Zonefs minimal I/O error recovery may change a file size and a file access
|
||||
permissions.
|
||||
|
||||
* File size changes:
|
||||
Immediate or delayed write errors in a sequential zone file may cause the file
|
||||
inode size to be inconsistent with the amount of data successfully written in
|
||||
the file zone. For instance, the partial failure of a multi-BIO large write
|
||||
operation will cause the zone write pointer to advance partially, even though
|
||||
the entire write operation will be reported as failed to the user. In such
|
||||
case, the file inode size must be advanced to reflect the zone write pointer
|
||||
change and eventually allow the user to restart writing at the end of the
|
||||
file.
|
||||
A file size may also be reduced to reflect a delayed write error detected on
|
||||
fsync(): in this case, the amount of data effectively written in the zone may
|
||||
be less than originally indicated by the file inode size. After such I/O
|
||||
error, zonefs always fixes a file inode size to reflect the amount of data
|
||||
persistently stored in the file zone.
|
||||
|
||||
* Access permission changes:
|
||||
A zone condition change to read-only is indicated with a change in the file
|
||||
access permissions to render the file read-only. This disables changes to the
|
||||
file attributes and data modification. For offline zones, all permissions
|
||||
(read and write) to the file are disabled.
|
||||
|
||||
Further action taken by zonefs I/O error recovery can be controlled by the user
|
||||
with the "errors=xxx" mount option. The table below summarizes the result of
|
||||
zonefs I/O error processing depending on the mount option and on the zone
|
||||
conditions.
|
||||
|
||||
+--------------+-----------+-----------------------------------------+
|
||||
| | | Post error state |
|
||||
| "errors=xxx" | device | access permissions |
|
||||
| mount | zone | file file device zone |
|
||||
| option | condition | size read write read write |
|
||||
+--------------+-----------+-----------------------------------------+
|
||||
| | good | fixed yes no yes yes |
|
||||
| remount-ro | read-only | fixed yes no yes no |
|
||||
| (default) | offline | 0 no no no no |
|
||||
+--------------+-----------+-----------------------------------------+
|
||||
| | good | fixed yes no yes yes |
|
||||
| zone-ro | read-only | fixed yes no yes no |
|
||||
| | offline | 0 no no no no |
|
||||
+--------------+-----------+-----------------------------------------+
|
||||
| | good | 0 no no yes yes |
|
||||
| zone-offline | read-only | 0 no no yes no |
|
||||
| | offline | 0 no no no no |
|
||||
+--------------+-----------+-----------------------------------------+
|
||||
| | good | fixed yes yes yes yes |
|
||||
| repair | read-only | fixed yes no yes no |
|
||||
| | offline | 0 no no no no |
|
||||
+--------------+-----------+-----------------------------------------+
|
||||
|
||||
Further notes:
|
||||
* The "errors=remount-ro" mount option is the default behavior of zonefs I/O
|
||||
error processing if no errors mount option is specified.
|
||||
* With the "errors=remount-ro" mount option, the change of the file access
|
||||
permissions to read-only applies to all files. The file system is remounted
|
||||
read-only.
|
||||
* Access permission and file size changes due to the device transitioning zones
|
||||
to the offline condition are permanent. Remounting or reformating the device
|
||||
with mkfs.zonefs (mkzonefs) will not change back offline zone files to a good
|
||||
state.
|
||||
* File access permission changes to read-only due to the device transitioning
|
||||
zones to the read-only condition are permanent. Remounting or reformating
|
||||
the device will not re-enable file write access.
|
||||
* File access permission changes implied by the remount-ro, zone-ro and
|
||||
zone-offline mount options are temporary for zones in a good condition.
|
||||
Unmounting and remounting the file system will restore the previous default
|
||||
(format time values) access rights to the files affected.
|
||||
* The repair mount option triggers only the minimal set of I/O error recovery
|
||||
actions, that is, file size fixes for zones in a good condition. Zones
|
||||
indicated as being read-only or offline by the device still imply changes to
|
||||
the zone file access permissions as noted in the table above.
|
||||
|
||||
Mount options
|
||||
-------------
|
||||
|
||||
zonefs define the "errors=<behavior>" mount option to allow the user to specify
|
||||
zonefs behavior in response to I/O errors, inode size inconsistencies or zone
|
||||
condition chages. The defined behaviors are as follow:
|
||||
* remount-ro (default)
|
||||
* zone-ro
|
||||
* zone-offline
|
||||
* repair
|
||||
|
||||
The I/O error actions defined for each behavior is detailed in the previous
|
||||
section.
|
||||
|
||||
Zonefs User Space Tools
|
||||
=======================
|
||||
|
||||
The mkzonefs tool is used to format zoned block devices for use with zonefs.
|
||||
This tool is available on Github at:
|
||||
|
||||
https://github.com/damien-lemoal/zonefs-tools
|
||||
|
||||
zonefs-tools also includes a test suite which can be run against any zoned
|
||||
block device, including null_blk block device created with zoned mode.
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
The following formats a 15TB host-managed SMR HDD with 256 MB zones
|
||||
with the conventional zones aggregation feature enabled.
|
||||
|
||||
# mkzonefs -o aggr_cnv /dev/sdX
|
||||
# mount -t zonefs /dev/sdX /mnt
|
||||
# ls -l /mnt/
|
||||
total 0
|
||||
dr-xr-xr-x 2 root root 1 Nov 25 13:23 cnv
|
||||
dr-xr-xr-x 2 root root 55356 Nov 25 13:23 seq
|
||||
|
||||
The size of the zone files sub-directories indicate the number of files
|
||||
existing for each type of zones. In this example, there is only one
|
||||
conventional zone file (all conventional zones are aggregated under a single
|
||||
file).
|
||||
|
||||
# ls -l /mnt/cnv
|
||||
total 137101312
|
||||
-rw-r----- 1 root root 140391743488 Nov 25 13:23 0
|
||||
|
||||
This aggregated conventional zone file can be used as a regular file.
|
||||
|
||||
# mkfs.ext4 /mnt/cnv/0
|
||||
# mount -o loop /mnt/cnv/0 /data
|
||||
|
||||
The "seq" sub-directory grouping files for sequential write zones has in this
|
||||
example 55356 zones.
|
||||
|
||||
# ls -lv /mnt/seq
|
||||
total 14511243264
|
||||
-rw-r----- 1 root root 0 Nov 25 13:23 0
|
||||
-rw-r----- 1 root root 0 Nov 25 13:23 1
|
||||
-rw-r----- 1 root root 0 Nov 25 13:23 2
|
||||
...
|
||||
-rw-r----- 1 root root 0 Nov 25 13:23 55354
|
||||
-rw-r----- 1 root root 0 Nov 25 13:23 55355
|
||||
|
||||
For sequential write zone files, the file size changes as data is appended at
|
||||
the end of the file, similarly to any regular file system.
|
||||
|
||||
# dd if=/dev/zero of=/mnt/seq/0 bs=4096 count=1 conv=notrunc oflag=direct
|
||||
1+0 records in
|
||||
1+0 records out
|
||||
4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00044121 s, 9.3 MB/s
|
||||
|
||||
# ls -l /mnt/seq/0
|
||||
-rw-r----- 1 root root 4096 Nov 25 13:23 /mnt/seq/0
|
||||
|
||||
The written file can be truncated to the zone size, preventing any further
|
||||
write operation.
|
||||
|
||||
# truncate -s 268435456 /mnt/seq/0
|
||||
# ls -l /mnt/seq/0
|
||||
-rw-r----- 1 root root 268435456 Nov 25 13:49 /mnt/seq/0
|
||||
|
||||
Truncation to 0 size allows freeing the file zone storage space and restart
|
||||
append-writes to the file.
|
||||
|
||||
# truncate -s 0 /mnt/seq/0
|
||||
# ls -l /mnt/seq/0
|
||||
-rw-r----- 1 root root 0 Nov 25 13:49 /mnt/seq/0
|
||||
|
||||
Since files are statically mapped to zones on the disk, the number of blocks of
|
||||
a file as reported by stat() and fstat() indicates the size of the file zone.
|
||||
|
||||
# stat /mnt/seq/0
|
||||
File: /mnt/seq/0
|
||||
Size: 0 Blocks: 524288 IO Block: 4096 regular empty file
|
||||
Device: 870h/2160d Inode: 50431 Links: 1
|
||||
Access: (0640/-rw-r-----) Uid: ( 0/ root) Gid: ( 0/ root)
|
||||
Access: 2019-11-25 13:23:57.048971997 +0900
|
||||
Modify: 2019-11-25 13:52:25.553805765 +0900
|
||||
Change: 2019-11-25 13:52:25.553805765 +0900
|
||||
Birth: -
|
||||
|
||||
The number of blocks of the file ("Blocks") in units of 512B blocks gives the
|
||||
maximum file size of 524288 * 512 B = 256 MB, corresponding to the device zone
|
||||
size in this example. Of note is that the "IO block" field always indicates the
|
||||
minimum I/O size for writes and corresponds to the device physical sector size.
|
|
@ -28,7 +28,6 @@ This document describes the Linux kernel Makefiles.
|
|||
--- 4.3 Using C++ for host programs
|
||||
--- 4.4 Controlling compiler options for host programs
|
||||
--- 4.5 When host programs are actually built
|
||||
--- 4.6 Using hostprogs-$(CONFIG_FOO)
|
||||
|
||||
=== 5 Kbuild clean infrastructure
|
||||
|
||||
|
@ -595,11 +594,11 @@ compilation stage.
|
|||
Two steps are required in order to use a host executable.
|
||||
|
||||
The first step is to tell kbuild that a host program exists. This is
|
||||
done utilising the variable hostprogs-y.
|
||||
done utilising the variable "hostprogs".
|
||||
|
||||
The second step is to add an explicit dependency to the executable.
|
||||
This can be done in two ways. Either add the dependency in a rule,
|
||||
or utilise the variable $(always).
|
||||
or utilise the variable "always-y".
|
||||
Both possibilities are described in the following.
|
||||
|
||||
4.1 Simple Host Program
|
||||
|
@ -612,7 +611,7 @@ Both possibilities are described in the following.
|
|||
|
||||
Example::
|
||||
|
||||
hostprogs-y := bin2hex
|
||||
hostprogs := bin2hex
|
||||
|
||||
Kbuild assumes in the above example that bin2hex is made from a single
|
||||
c-source file named bin2hex.c located in the same directory as
|
||||
|
@ -630,7 +629,7 @@ Both possibilities are described in the following.
|
|||
Example::
|
||||
|
||||
#scripts/lxdialog/Makefile
|
||||
hostprogs-y := lxdialog
|
||||
hostprogs := lxdialog
|
||||
lxdialog-objs := checklist.o lxdialog.o
|
||||
|
||||
Objects with extension .o are compiled from the corresponding .c
|
||||
|
@ -650,7 +649,7 @@ Both possibilities are described in the following.
|
|||
Example::
|
||||
|
||||
#scripts/kconfig/Makefile
|
||||
hostprogs-y := qconf
|
||||
hostprogs := qconf
|
||||
qconf-cxxobjs := qconf.o
|
||||
|
||||
In the example above the executable is composed of the C++ file
|
||||
|
@ -662,7 +661,7 @@ Both possibilities are described in the following.
|
|||
Example::
|
||||
|
||||
#scripts/kconfig/Makefile
|
||||
hostprogs-y := qconf
|
||||
hostprogs := qconf
|
||||
qconf-cxxobjs := qconf.o
|
||||
qconf-objs := check.o
|
||||
|
||||
|
@ -710,7 +709,7 @@ Both possibilities are described in the following.
|
|||
Example::
|
||||
|
||||
#drivers/pci/Makefile
|
||||
hostprogs-y := gen-devlist
|
||||
hostprogs := gen-devlist
|
||||
$(obj)/devlist.h: $(src)/pci.ids $(obj)/gen-devlist
|
||||
( cd $(obj); ./gen-devlist ) < $<
|
||||
|
||||
|
@ -718,47 +717,31 @@ Both possibilities are described in the following.
|
|||
$(obj)/gen-devlist is updated. Note that references to
|
||||
the host programs in special rules must be prefixed with $(obj).
|
||||
|
||||
(2) Use $(always)
|
||||
(2) Use always-y
|
||||
|
||||
When there is no suitable special rule, and the host program
|
||||
shall be built when a makefile is entered, the $(always)
|
||||
shall be built when a makefile is entered, the always-y
|
||||
variable shall be used.
|
||||
|
||||
Example::
|
||||
|
||||
#scripts/lxdialog/Makefile
|
||||
hostprogs-y := lxdialog
|
||||
always := $(hostprogs-y)
|
||||
hostprogs := lxdialog
|
||||
always-y := $(hostprogs)
|
||||
|
||||
This will tell kbuild to build lxdialog even if not referenced in
|
||||
any rule.
|
||||
|
||||
4.6 Using hostprogs-$(CONFIG_FOO)
|
||||
---------------------------------
|
||||
|
||||
A typical pattern in a Kbuild file looks like this:
|
||||
|
||||
Example::
|
||||
|
||||
#scripts/Makefile
|
||||
hostprogs-$(CONFIG_KALLSYMS) += kallsyms
|
||||
|
||||
Kbuild knows about both 'y' for built-in and 'm' for module.
|
||||
So if a config symbol evaluates to 'm', kbuild will still build
|
||||
the binary. In other words, Kbuild handles hostprogs-m exactly
|
||||
like hostprogs-y. But only hostprogs-y is recommended to be used
|
||||
when no CONFIG symbols are involved.
|
||||
|
||||
5 Kbuild clean infrastructure
|
||||
=============================
|
||||
|
||||
"make clean" deletes most generated files in the obj tree where the kernel
|
||||
is compiled. This includes generated files such as host programs.
|
||||
Kbuild knows targets listed in $(hostprogs-y), $(hostprogs-m), $(always),
|
||||
$(extra-y) and $(targets). They are all deleted during "make clean".
|
||||
Files matching the patterns "*.[oas]", "*.ko", plus some additional files
|
||||
generated by kbuild are deleted all over the kernel src tree when
|
||||
"make clean" is executed.
|
||||
Kbuild knows targets listed in $(hostprogs), $(always-y), $(always-m),
|
||||
$(always-), $(extra-y), $(extra-) and $(targets). They are all deleted
|
||||
during "make clean". Files matching the patterns "*.[oas]", "*.ko", plus
|
||||
some additional files generated by kbuild are deleted all over the kernel
|
||||
source tree when "make clean" is executed.
|
||||
|
||||
Additional files or directories can be specified in kbuild makefiles by use of
|
||||
$(clean-files).
|
||||
|
@ -1269,12 +1252,12 @@ When kbuild executes, the following steps are followed (roughly):
|
|||
Example::
|
||||
|
||||
#arch/x86/kernel/Makefile
|
||||
always := vmlinux.lds
|
||||
extra-y := vmlinux.lds
|
||||
|
||||
#Makefile
|
||||
export CPPFLAGS_vmlinux.lds += -P -C -U$(ARCH)
|
||||
|
||||
The assignment to $(always) is used to tell kbuild to build the
|
||||
The assignment to extra-y is used to tell kbuild to build the
|
||||
target vmlinux.lds.
|
||||
The assignment to $(CPPFLAGS_vmlinux.lds) tells kbuild to use the
|
||||
specified options when building the target vmlinux.lds.
|
||||
|
|
8
Kbuild
8
Kbuild
|
@ -7,7 +7,7 @@
|
|||
|
||||
bounds-file := include/generated/bounds.h
|
||||
|
||||
always := $(bounds-file)
|
||||
always-y := $(bounds-file)
|
||||
targets := kernel/bounds.s
|
||||
|
||||
$(bounds-file): kernel/bounds.s FORCE
|
||||
|
@ -28,7 +28,7 @@ $(timeconst-file): kernel/time/timeconst.bc FORCE
|
|||
|
||||
offsets-file := include/generated/asm-offsets.h
|
||||
|
||||
always += $(offsets-file)
|
||||
always-y += $(offsets-file)
|
||||
targets += arch/$(SRCARCH)/kernel/asm-offsets.s
|
||||
|
||||
arch/$(SRCARCH)/kernel/asm-offsets.s: $(timeconst-file) $(bounds-file)
|
||||
|
@ -39,7 +39,7 @@ $(offsets-file): arch/$(SRCARCH)/kernel/asm-offsets.s FORCE
|
|||
#####
|
||||
# Check for missing system calls
|
||||
|
||||
always += missing-syscalls
|
||||
always-y += missing-syscalls
|
||||
|
||||
quiet_cmd_syscalls = CALL $<
|
||||
cmd_syscalls = $(CONFIG_SHELL) $< $(CC) $(c_flags) $(missing_syscalls_flags)
|
||||
|
@ -50,7 +50,7 @@ missing-syscalls: scripts/checksyscalls.sh $(offsets-file) FORCE
|
|||
#####
|
||||
# Check atomic headers are up-to-date
|
||||
|
||||
always += old-atomics
|
||||
always-y += old-atomics
|
||||
|
||||
quiet_cmd_atomics = CALL $<
|
||||
cmd_atomics = $(CONFIG_SHELL) $<
|
||||
|
|
19
MAINTAINERS
19
MAINTAINERS
|
@ -13360,7 +13360,7 @@ S: Maintained
|
|||
F: fs/timerfd.c
|
||||
F: include/linux/timer*
|
||||
F: include/linux/time_namespace.h
|
||||
F: kernel/time_namespace.c
|
||||
F: kernel/time/namespace.c
|
||||
F: kernel/time/*timer*
|
||||
|
||||
POWER MANAGEMENT CORE
|
||||
|
@ -17138,7 +17138,6 @@ F: drivers/staging/unisys/
|
|||
UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER
|
||||
R: Alim Akhtar <alim.akhtar@samsung.com>
|
||||
R: Avri Altman <avri.altman@wdc.com>
|
||||
R: Pedro Sousa <pedrom.sousa@synopsys.com>
|
||||
L: linux-scsi@vger.kernel.org
|
||||
S: Supported
|
||||
F: Documentation/scsi/ufs.txt
|
||||
|
@ -17792,6 +17791,12 @@ F: include/linux/vbox_utils.h
|
|||
F: include/uapi/linux/vbox*.h
|
||||
F: drivers/virt/vboxguest/
|
||||
|
||||
VIRTUAL BOX SHARED FOLDER VFS DRIVER:
|
||||
M: Hans de Goede <hdegoede@redhat.com>
|
||||
L: linux-fsdevel@vger.kernel.org
|
||||
S: Maintained
|
||||
F: fs/vboxsf/*
|
||||
|
||||
VIRTUAL SERIO DEVICE DRIVER
|
||||
M: Stephen Chandler Paul <thatslyude@gmail.com>
|
||||
S: Maintained
|
||||
|
@ -18491,6 +18496,16 @@ L: linux-kernel@vger.kernel.org
|
|||
S: Maintained
|
||||
F: arch/x86/kernel/cpu/zhaoxin.c
|
||||
|
||||
ZONEFS FILESYSTEM
|
||||
M: Damien Le Moal <damien.lemoal@wdc.com>
|
||||
M: Naohiro Aota <naohiro.aota@wdc.com>
|
||||
R: Johannes Thumshirn <jth@kernel.org>
|
||||
L: linux-fsdevel@vger.kernel.org
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/dlemoal/zonefs.git
|
||||
S: Maintained
|
||||
F: fs/zonefs/
|
||||
F: Documentation/filesystems/zonefs.txt
|
||||
|
||||
ZPOOL COMPRESSED PAGE STORAGE API
|
||||
M: Dan Streetman <ddstreet@ieee.org>
|
||||
L: linux-mm@kvack.org
|
||||
|
|
6
Makefile
6
Makefile
|
@ -1,8 +1,8 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 5
|
||||
PATCHLEVEL = 6
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION =
|
||||
EXTRAVERSION = -rc1
|
||||
NAME = Kleptomaniac Octopus
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -1679,7 +1679,7 @@ PHONY += descend $(build-dirs)
|
|||
descend: $(build-dirs)
|
||||
$(build-dirs): prepare
|
||||
$(Q)$(MAKE) $(build)=$@ \
|
||||
single-build=$(if $(filter-out $@/, $(single-no-ko)),1) \
|
||||
single-build=$(if $(filter-out $@/, $(filter $@/%, $(single-no-ko))),1) \
|
||||
need-builtin=1 need-modorder=1
|
||||
|
||||
clean-dirs := $(addprefix _clean_, $(clean-dirs))
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
# Copyright (C) 1994 by Linus Torvalds
|
||||
#
|
||||
|
||||
hostprogs-y := tools/mkbb tools/objstrip
|
||||
hostprogs := tools/mkbb tools/objstrip
|
||||
targets := vmlinux.gz vmlinux \
|
||||
vmlinux.nh tools/lxboot tools/bootlx tools/bootph \
|
||||
tools/bootpzh bootloader bootpheader bootpzheader
|
||||
|
|
|
@ -326,16 +326,16 @@ static inline u64 __gic_readq_nonatomic(const volatile void __iomem *addr)
|
|||
#define gits_write_cwriter(v, c) __gic_writeq_nonatomic(v, c)
|
||||
|
||||
/*
|
||||
* GITS_VPROPBASER - hi and lo bits may be accessed independently.
|
||||
* GICR_VPROPBASER - hi and lo bits may be accessed independently.
|
||||
*/
|
||||
#define gits_read_vpropbaser(c) __gic_readq_nonatomic(c)
|
||||
#define gits_write_vpropbaser(v, c) __gic_writeq_nonatomic(v, c)
|
||||
#define gicr_read_vpropbaser(c) __gic_readq_nonatomic(c)
|
||||
#define gicr_write_vpropbaser(v, c) __gic_writeq_nonatomic(v, c)
|
||||
|
||||
/*
|
||||
* GITS_VPENDBASER - the Valid bit must be cleared before changing
|
||||
* GICR_VPENDBASER - the Valid bit must be cleared before changing
|
||||
* anything else.
|
||||
*/
|
||||
static inline void gits_write_vpendbaser(u64 val, void __iomem *addr)
|
||||
static inline void gicr_write_vpendbaser(u64 val, void __iomem *addr)
|
||||
{
|
||||
u32 tmp;
|
||||
|
||||
|
@ -352,7 +352,7 @@ static inline void gits_write_vpendbaser(u64 val, void __iomem *addr)
|
|||
__gic_writeq_nonatomic(val, addr);
|
||||
}
|
||||
|
||||
#define gits_read_vpendbaser(c) __gic_readq_nonatomic(c)
|
||||
#define gicr_read_vpendbaser(c) __gic_readq_nonatomic(c)
|
||||
|
||||
static inline bool gic_prio_masking_enabled(void)
|
||||
{
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
ARCH_REL_TYPE_ABS := R_ARM_JUMP_SLOT|R_ARM_GLOB_DAT|R_ARM_ABS32
|
||||
include $(srctree)/lib/vdso/Makefile
|
||||
|
||||
hostprogs-y := vdsomunge
|
||||
hostprogs := vdsomunge
|
||||
|
||||
obj-vdso := vgettimeofday.o datapage.o note.o
|
||||
|
||||
|
|
|
@ -140,11 +140,11 @@ static inline u32 gic_read_rpr(void)
|
|||
#define gicr_write_pendbaser(v, c) writeq_relaxed(v, c)
|
||||
#define gicr_read_pendbaser(c) readq_relaxed(c)
|
||||
|
||||
#define gits_write_vpropbaser(v, c) writeq_relaxed(v, c)
|
||||
#define gits_read_vpropbaser(c) readq_relaxed(c)
|
||||
#define gicr_write_vpropbaser(v, c) writeq_relaxed(v, c)
|
||||
#define gicr_read_vpropbaser(c) readq_relaxed(c)
|
||||
|
||||
#define gits_write_vpendbaser(v, c) writeq_relaxed(v, c)
|
||||
#define gits_read_vpendbaser(c) readq_relaxed(c)
|
||||
#define gicr_write_vpendbaser(v, c) writeq_relaxed(v, c)
|
||||
#define gicr_read_vpendbaser(c) readq_relaxed(c)
|
||||
|
||||
static inline bool gic_prio_masking_enabled(void)
|
||||
{
|
||||
|
|
|
@ -115,9 +115,9 @@ VDSO_LDFLAGS += $(call cc32-ldoption,-fuse-ld=bfd)
|
|||
|
||||
# Borrow vdsomunge.c from the arm vDSO
|
||||
# We have to use a relative path because scripts/Makefile.host prefixes
|
||||
# $(hostprogs-y) with $(obj)
|
||||
# $(hostprogs) with $(obj)
|
||||
munge := ../../../arm/vdso/vdsomunge
|
||||
hostprogs-y := $(munge)
|
||||
hostprogs := $(munge)
|
||||
|
||||
c-obj-vdso := note.o
|
||||
c-obj-vdso-gettimeofday := vgettimeofday.o
|
||||
|
|
|
@ -21,7 +21,7 @@ endif
|
|||
drop-sections := .reginfo .mdebug .comment .note .pdr .options .MIPS.options
|
||||
strip-flags := $(addprefix --remove-section=,$(drop-sections))
|
||||
|
||||
hostprogs-y := elf2ecoff
|
||||
hostprogs := elf2ecoff
|
||||
|
||||
suffix-y := bin
|
||||
suffix-$(CONFIG_KERNEL_BZIP2) := bz2
|
||||
|
|
|
@ -84,7 +84,7 @@ $(obj)/piggy.o: $(obj)/dummy.o $(obj)/vmlinux.bin.z FORCE
|
|||
HOSTCFLAGS_calc_vmlinuz_load_addr.o += $(LINUXINCLUDE)
|
||||
|
||||
# Calculate the load address of the compressed kernel image
|
||||
hostprogs-y := calc_vmlinuz_load_addr
|
||||
hostprogs := calc_vmlinuz_load_addr
|
||||
|
||||
ifneq ($(zload-y),)
|
||||
VMLINUZ_LOAD_ADDRESS := $(zload-y)
|
||||
|
@ -112,7 +112,7 @@ ifdef CONFIG_MACH_DECSTATION
|
|||
endif
|
||||
|
||||
# elf2ecoff can only handle 32bit image
|
||||
hostprogs-y += ../elf2ecoff
|
||||
hostprogs += ../elf2ecoff
|
||||
|
||||
ifdef CONFIG_32BIT
|
||||
VMLINUZ = vmlinuz
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
hostprogs-y += relocs
|
||||
hostprogs += relocs
|
||||
relocs-objs += relocs_32.o
|
||||
relocs-objs += relocs_64.o
|
||||
relocs-objs += relocs_main.o
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
hostprogs-y := elf-entry
|
||||
hostprogs := elf-entry
|
||||
PHONY += elf-entry
|
||||
elf-entry: $(obj)/elf-entry
|
||||
@:
|
||||
|
||||
hostprogs-$(CONFIG_CPU_LOONGSON3_WORKAROUNDS) += loongson3-llsc-check
|
||||
hostprogs += loongson3-llsc-check
|
||||
PHONY += loongson3-llsc-check
|
||||
loongson3-llsc-check: $(obj)/loongson3-llsc-check
|
||||
@:
|
||||
|
|
|
@ -100,7 +100,7 @@ $(obj)/%.so.raw: OBJCOPYFLAGS := -S
|
|||
$(obj)/%.so.raw: $(obj)/%.so.dbg.raw FORCE
|
||||
$(call if_changed,objcopy)
|
||||
|
||||
hostprogs-y := genvdso
|
||||
hostprogs := genvdso
|
||||
|
||||
quiet_cmd_genvdso = GENVDSO $@
|
||||
define cmd_genvdso
|
||||
|
|
|
@ -224,7 +224,7 @@ $(patsubst %.S,%.o, $(filter %.S, $(src-boot))): %.o: %.S FORCE
|
|||
$(obj)/wrapper.a: $(obj-wlib) FORCE
|
||||
$(call if_changed,bootar)
|
||||
|
||||
hostprogs-y := addnote hack-coff mktree
|
||||
hostprogs := addnote hack-coff mktree
|
||||
|
||||
targets += $(patsubst $(obj)/%,%,$(obj-boot) wrapper.a)
|
||||
extra-y := $(obj)/wrapper.a $(obj-plat) $(obj)/empty.o \
|
||||
|
@ -464,7 +464,7 @@ WRAPPER_BINDIR := /usr/sbin
|
|||
INSTALL := install
|
||||
|
||||
extra-installed := $(patsubst $(obj)/%, $(DESTDIR)$(WRAPPER_OBJDIR)/%, $(extra-y))
|
||||
hostprogs-installed := $(patsubst %, $(DESTDIR)$(WRAPPER_BINDIR)/%, $(hostprogs-y))
|
||||
hostprogs-installed := $(patsubst %, $(DESTDIR)$(WRAPPER_BINDIR)/%, $(hostprogs))
|
||||
wrapper-installed := $(DESTDIR)$(WRAPPER_BINDIR)/wrapper
|
||||
dts-installed := $(patsubst $(dtstree)/%, $(DESTDIR)$(WRAPPER_DTSDIR)/%, $(wildcard $(dtstree)/*.dts))
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
|
|||
{
|
||||
int oldval = 0, ret;
|
||||
|
||||
allow_write_to_user(uaddr, sizeof(*uaddr));
|
||||
allow_read_write_user(uaddr, uaddr, sizeof(*uaddr));
|
||||
pagefault_disable();
|
||||
|
||||
switch (op) {
|
||||
|
@ -62,7 +62,7 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
|
|||
|
||||
*oval = oldval;
|
||||
|
||||
prevent_write_to_user(uaddr, sizeof(*uaddr));
|
||||
prevent_read_write_user(uaddr, uaddr, sizeof(*uaddr));
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -76,7 +76,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
|||
if (!access_ok(uaddr, sizeof(u32)))
|
||||
return -EFAULT;
|
||||
|
||||
allow_write_to_user(uaddr, sizeof(*uaddr));
|
||||
allow_read_write_user(uaddr, uaddr, sizeof(*uaddr));
|
||||
|
||||
__asm__ __volatile__ (
|
||||
PPC_ATOMIC_ENTRY_BARRIER
|
||||
"1: lwarx %1,0,%3 # futex_atomic_cmpxchg_inatomic\n\
|
||||
|
@ -97,7 +98,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
|||
: "cc", "memory");
|
||||
|
||||
*uval = prev;
|
||||
prevent_write_to_user(uaddr, sizeof(*uaddr));
|
||||
prevent_read_write_user(uaddr, uaddr, sizeof(*uaddr));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -214,7 +214,7 @@ transfer_to_handler_cont:
|
|||
* To speed up the syscall path where interrupts stay on, let's check
|
||||
* first if we are changing the MSR value at all.
|
||||
*/
|
||||
tophys(r12, r1)
|
||||
tophys_novmstack r12, r1
|
||||
lwz r12,_MSR(r12)
|
||||
andi. r12,r12,MSR_EE
|
||||
bne 1f
|
||||
|
|
|
@ -10,8 +10,8 @@ PHONY += kapi
|
|||
|
||||
kapi: $(kapi-hdrs-y)
|
||||
|
||||
hostprogs-y += gen_facilities
|
||||
hostprogs-y += gen_opcode_table
|
||||
hostprogs += gen_facilities
|
||||
hostprogs += gen_opcode_table
|
||||
|
||||
HOSTCFLAGS_gen_facilities.o += $(LINUXINCLUDE)
|
||||
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
ROOT_IMG := /usr/src/root.img
|
||||
ELFTOAOUT := elftoaout
|
||||
|
||||
hostprogs-y := piggyback
|
||||
hostprogs := piggyback
|
||||
targets := tftpboot.img image zImage vmlinux.aout
|
||||
clean-files := System.map
|
||||
|
||||
|
|
|
@ -41,7 +41,7 @@ $(obj)/vdso64.so.dbg: $(obj)/vdso.lds $(vobjs) FORCE
|
|||
$(call if_changed,vdso)
|
||||
|
||||
HOST_EXTRACFLAGS += -I$(srctree)/tools/include
|
||||
hostprogs-y += vdso2c
|
||||
hostprogs += vdso2c
|
||||
|
||||
quiet_cmd_vdso2c = VDSO2C $@
|
||||
cmd_vdso2c = $(obj)/vdso2c $< $(<:%.dbg=%) $@
|
||||
|
|
|
@ -45,8 +45,8 @@ setup-y += video-vesa.o
|
|||
setup-y += video-bios.o
|
||||
|
||||
targets += $(setup-y)
|
||||
hostprogs-y := tools/build
|
||||
hostprogs-$(CONFIG_X86_FEATURE_NAMES) += mkcpustr
|
||||
hostprogs := tools/build
|
||||
hostprogs += mkcpustr
|
||||
|
||||
HOST_EXTRACFLAGS += -I$(srctree)/tools/include \
|
||||
-include include/generated/autoconf.h \
|
||||
|
|
|
@ -58,7 +58,7 @@ KBUILD_LDFLAGS += $(shell $(LD) --help 2>&1 | grep -q "\-z noreloc-overflow" \
|
|||
endif
|
||||
LDFLAGS_vmlinux := -T
|
||||
|
||||
hostprogs-y := mkpiggy
|
||||
hostprogs := mkpiggy
|
||||
HOST_EXTRACFLAGS += -I$(srctree)/tools/include
|
||||
|
||||
sed-voffset := -e 's/^\([0-9a-fA-F]*\) [ABCDGRSTVW] \(_text\|__bss_start\|_end\)$$/\#define VO_\2 _AC(0x\1,UL)/p'
|
||||
|
|
|
@ -393,7 +393,13 @@ int count_immovable_mem_regions(void)
|
|||
table = table_addr + sizeof(struct acpi_table_srat);
|
||||
|
||||
while (table + sizeof(struct acpi_subtable_header) < table_end) {
|
||||
|
||||
sub_table = (struct acpi_subtable_header *)table;
|
||||
if (!sub_table->length) {
|
||||
debug_putstr("Invalid zero length SRAT subtable.\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (sub_table->type == ACPI_SRAT_TYPE_MEMORY_AFFINITY) {
|
||||
struct acpi_srat_mem_affinity *ma;
|
||||
|
||||
|
|
|
@ -59,7 +59,7 @@ $(obj)/vdso64.so.dbg: $(obj)/vdso.lds $(vobjs) FORCE
|
|||
$(call if_changed,vdso_and_check)
|
||||
|
||||
HOST_EXTRACFLAGS += -I$(srctree)/tools/include -I$(srctree)/include/uapi -I$(srctree)/arch/$(SUBARCH)/include/uapi
|
||||
hostprogs-y += vdso2c
|
||||
hostprogs += vdso2c
|
||||
|
||||
quiet_cmd_vdso2c = VDSO2C $@
|
||||
cmd_vdso2c = $(obj)/vdso2c $< $(<:%.dbg=%) $@
|
||||
|
|
|
@ -21,11 +21,15 @@
|
|||
#include <linux/hyperv.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/cpuhotplug.h>
|
||||
#include <linux/syscore_ops.h>
|
||||
#include <clocksource/hyperv_timer.h>
|
||||
|
||||
void *hv_hypercall_pg;
|
||||
EXPORT_SYMBOL_GPL(hv_hypercall_pg);
|
||||
|
||||
/* Storage to save the hypercall page temporarily for hibernation */
|
||||
static void *hv_hypercall_pg_saved;
|
||||
|
||||
u32 *hv_vp_index;
|
||||
EXPORT_SYMBOL_GPL(hv_vp_index);
|
||||
|
||||
|
@ -246,6 +250,48 @@ static int __init hv_pci_init(void)
|
|||
return 1;
|
||||
}
|
||||
|
||||
static int hv_suspend(void)
|
||||
{
|
||||
union hv_x64_msr_hypercall_contents hypercall_msr;
|
||||
|
||||
/*
|
||||
* Reset the hypercall page as it is going to be invalidated
|
||||
* accross hibernation. Setting hv_hypercall_pg to NULL ensures
|
||||
* that any subsequent hypercall operation fails safely instead of
|
||||
* crashing due to an access of an invalid page. The hypercall page
|
||||
* pointer is restored on resume.
|
||||
*/
|
||||
hv_hypercall_pg_saved = hv_hypercall_pg;
|
||||
hv_hypercall_pg = NULL;
|
||||
|
||||
/* Disable the hypercall page in the hypervisor */
|
||||
rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
|
||||
hypercall_msr.enable = 0;
|
||||
wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void hv_resume(void)
|
||||
{
|
||||
union hv_x64_msr_hypercall_contents hypercall_msr;
|
||||
|
||||
/* Re-enable the hypercall page */
|
||||
rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
|
||||
hypercall_msr.enable = 1;
|
||||
hypercall_msr.guest_physical_address =
|
||||
vmalloc_to_pfn(hv_hypercall_pg_saved);
|
||||
wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
|
||||
|
||||
hv_hypercall_pg = hv_hypercall_pg_saved;
|
||||
hv_hypercall_pg_saved = NULL;
|
||||
}
|
||||
|
||||
static struct syscore_ops hv_syscore_ops = {
|
||||
.suspend = hv_suspend,
|
||||
.resume = hv_resume,
|
||||
};
|
||||
|
||||
/*
|
||||
* This function is to be invoked early in the boot sequence after the
|
||||
* hypervisor has been detected.
|
||||
|
@ -330,6 +376,8 @@ void __init hyperv_init(void)
|
|||
|
||||
x86_init.pci.arch_init = hv_pci_init;
|
||||
|
||||
register_syscore_ops(&hv_syscore_ops);
|
||||
|
||||
return;
|
||||
|
||||
remove_cpuhp_state:
|
||||
|
@ -349,6 +397,8 @@ void hyperv_cleanup(void)
|
|||
{
|
||||
union hv_x64_msr_hypercall_contents hypercall_msr;
|
||||
|
||||
unregister_syscore_ops(&hv_syscore_ops);
|
||||
|
||||
/* Reset our OS id */
|
||||
wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
|
||||
|
||||
|
|
|
@ -140,6 +140,7 @@ extern void apic_soft_disable(void);
|
|||
extern void lapic_shutdown(void);
|
||||
extern void sync_Arb_IDs(void);
|
||||
extern void init_bsp_APIC(void);
|
||||
extern void apic_intr_mode_select(void);
|
||||
extern void apic_intr_mode_init(void);
|
||||
extern void init_apic_mappings(void);
|
||||
void register_lapic_address(unsigned long address);
|
||||
|
@ -188,6 +189,7 @@ static inline void disable_local_APIC(void) { }
|
|||
# define setup_secondary_APIC_clock x86_init_noop
|
||||
static inline void lapic_update_tsc_freq(void) { }
|
||||
static inline void init_bsp_APIC(void) { }
|
||||
static inline void apic_intr_mode_select(void) { }
|
||||
static inline void apic_intr_mode_init(void) { }
|
||||
static inline void lapic_assign_system_vectors(void) { }
|
||||
static inline void lapic_assign_legacy_vector(unsigned int i, bool r) { }
|
||||
|
@ -452,6 +454,14 @@ static inline void ack_APIC_irq(void)
|
|||
apic_eoi();
|
||||
}
|
||||
|
||||
|
||||
static inline bool lapic_vector_set_in_irr(unsigned int vector)
|
||||
{
|
||||
u32 irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
|
||||
|
||||
return !!(irr & (1U << (vector % 32)));
|
||||
}
|
||||
|
||||
static inline unsigned default_get_apic_id(unsigned long x)
|
||||
{
|
||||
unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR));
|
||||
|
|
|
@ -51,12 +51,14 @@ struct x86_init_resources {
|
|||
* are set up.
|
||||
* @intr_init: interrupt init code
|
||||
* @trap_init: platform specific trap setup
|
||||
* @intr_mode_select: interrupt delivery mode selection
|
||||
* @intr_mode_init: interrupt delivery mode setup
|
||||
*/
|
||||
struct x86_init_irqs {
|
||||
void (*pre_vector_init)(void);
|
||||
void (*intr_init)(void);
|
||||
void (*trap_init)(void);
|
||||
void (*intr_mode_select)(void);
|
||||
void (*intr_mode_init)(void);
|
||||
};
|
||||
|
||||
|
|
|
@ -830,8 +830,17 @@ bool __init apic_needs_pit(void)
|
|||
if (!tsc_khz || !cpu_khz)
|
||||
return true;
|
||||
|
||||
/* Is there an APIC at all? */
|
||||
if (!boot_cpu_has(X86_FEATURE_APIC))
|
||||
/* Is there an APIC at all or is it disabled? */
|
||||
if (!boot_cpu_has(X86_FEATURE_APIC) || disable_apic)
|
||||
return true;
|
||||
|
||||
/*
|
||||
* If interrupt delivery mode is legacy PIC or virtual wire without
|
||||
* configuration, the local APIC timer wont be set up. Make sure
|
||||
* that the PIT is initialized.
|
||||
*/
|
||||
if (apic_intr_mode == APIC_PIC ||
|
||||
apic_intr_mode == APIC_VIRTUAL_WIRE_NO_CONFIG)
|
||||
return true;
|
||||
|
||||
/* Virt guests may lack ARAT, but still have DEADLINE */
|
||||
|
@ -1322,7 +1331,7 @@ void __init sync_Arb_IDs(void)
|
|||
|
||||
enum apic_intr_mode_id apic_intr_mode __ro_after_init;
|
||||
|
||||
static int __init apic_intr_mode_select(void)
|
||||
static int __init __apic_intr_mode_select(void)
|
||||
{
|
||||
/* Check kernel option */
|
||||
if (disable_apic) {
|
||||
|
@ -1384,6 +1393,12 @@ static int __init apic_intr_mode_select(void)
|
|||
return APIC_SYMMETRIC_IO;
|
||||
}
|
||||
|
||||
/* Select the interrupt delivery mode for the BSP */
|
||||
void __init apic_intr_mode_select(void)
|
||||
{
|
||||
apic_intr_mode = __apic_intr_mode_select();
|
||||
}
|
||||
|
||||
/*
|
||||
* An initial setup of the virtual wire mode.
|
||||
*/
|
||||
|
@ -1440,8 +1455,6 @@ void __init apic_intr_mode_init(void)
|
|||
{
|
||||
bool upmode = IS_ENABLED(CONFIG_UP_LATE_INIT);
|
||||
|
||||
apic_intr_mode = apic_intr_mode_select();
|
||||
|
||||
switch (apic_intr_mode) {
|
||||
case APIC_PIC:
|
||||
pr_info("APIC: Keep in PIC mode(8259)\n");
|
||||
|
@ -2626,6 +2639,13 @@ static int lapic_suspend(void)
|
|||
#endif
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
/*
|
||||
* Mask IOAPIC before disabling the local APIC to prevent stale IRR
|
||||
* entries on some implementations.
|
||||
*/
|
||||
mask_ioapic_entries();
|
||||
|
||||
disable_local_APIC();
|
||||
|
||||
irq_remapping_disable();
|
||||
|
|
|
@ -23,10 +23,8 @@
|
|||
|
||||
static struct irq_domain *msi_default_domain;
|
||||
|
||||
static void irq_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
|
||||
static void __irq_msi_compose_msg(struct irq_cfg *cfg, struct msi_msg *msg)
|
||||
{
|
||||
struct irq_cfg *cfg = irqd_cfg(data);
|
||||
|
||||
msg->address_hi = MSI_ADDR_BASE_HI;
|
||||
|
||||
if (x2apic_enabled())
|
||||
|
@ -47,6 +45,127 @@ static void irq_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
|
|||
MSI_DATA_VECTOR(cfg->vector);
|
||||
}
|
||||
|
||||
static void irq_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
|
||||
{
|
||||
__irq_msi_compose_msg(irqd_cfg(data), msg);
|
||||
}
|
||||
|
||||
static void irq_msi_update_msg(struct irq_data *irqd, struct irq_cfg *cfg)
|
||||
{
|
||||
struct msi_msg msg[2] = { [1] = { }, };
|
||||
|
||||
__irq_msi_compose_msg(cfg, msg);
|
||||
irq_data_get_irq_chip(irqd)->irq_write_msi_msg(irqd, msg);
|
||||
}
|
||||
|
||||
static int
|
||||
msi_set_affinity(struct irq_data *irqd, const struct cpumask *mask, bool force)
|
||||
{
|
||||
struct irq_cfg old_cfg, *cfg = irqd_cfg(irqd);
|
||||
struct irq_data *parent = irqd->parent_data;
|
||||
unsigned int cpu;
|
||||
int ret;
|
||||
|
||||
/* Save the current configuration */
|
||||
cpu = cpumask_first(irq_data_get_effective_affinity_mask(irqd));
|
||||
old_cfg = *cfg;
|
||||
|
||||
/* Allocate a new target vector */
|
||||
ret = parent->chip->irq_set_affinity(parent, mask, force);
|
||||
if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* For non-maskable and non-remapped MSI interrupts the migration
|
||||
* to a different destination CPU and a different vector has to be
|
||||
* done careful to handle the possible stray interrupt which can be
|
||||
* caused by the non-atomic update of the address/data pair.
|
||||
*
|
||||
* Direct update is possible when:
|
||||
* - The MSI is maskable (remapped MSI does not use this code path)).
|
||||
* The quirk bit is not set in this case.
|
||||
* - The new vector is the same as the old vector
|
||||
* - The old vector is MANAGED_IRQ_SHUTDOWN_VECTOR (interrupt starts up)
|
||||
* - The new destination CPU is the same as the old destination CPU
|
||||
*/
|
||||
if (!irqd_msi_nomask_quirk(irqd) ||
|
||||
cfg->vector == old_cfg.vector ||
|
||||
old_cfg.vector == MANAGED_IRQ_SHUTDOWN_VECTOR ||
|
||||
cfg->dest_apicid == old_cfg.dest_apicid) {
|
||||
irq_msi_update_msg(irqd, cfg);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Paranoia: Validate that the interrupt target is the local
|
||||
* CPU.
|
||||
*/
|
||||
if (WARN_ON_ONCE(cpu != smp_processor_id())) {
|
||||
irq_msi_update_msg(irqd, cfg);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Redirect the interrupt to the new vector on the current CPU
|
||||
* first. This might cause a spurious interrupt on this vector if
|
||||
* the device raises an interrupt right between this update and the
|
||||
* update to the final destination CPU.
|
||||
*
|
||||
* If the vector is in use then the installed device handler will
|
||||
* denote it as spurious which is no harm as this is a rare event
|
||||
* and interrupt handlers have to cope with spurious interrupts
|
||||
* anyway. If the vector is unused, then it is marked so it won't
|
||||
* trigger the 'No irq handler for vector' warning in do_IRQ().
|
||||
*
|
||||
* This requires to hold vector lock to prevent concurrent updates to
|
||||
* the affected vector.
|
||||
*/
|
||||
lock_vector_lock();
|
||||
|
||||
/*
|
||||
* Mark the new target vector on the local CPU if it is currently
|
||||
* unused. Reuse the VECTOR_RETRIGGERED state which is also used in
|
||||
* the CPU hotplug path for a similar purpose. This cannot be
|
||||
* undone here as the current CPU has interrupts disabled and
|
||||
* cannot handle the interrupt before the whole set_affinity()
|
||||
* section is done. In the CPU unplug case, the current CPU is
|
||||
* about to vanish and will not handle any interrupts anymore. The
|
||||
* vector is cleaned up when the CPU comes online again.
|
||||
*/
|
||||
if (IS_ERR_OR_NULL(this_cpu_read(vector_irq[cfg->vector])))
|
||||
this_cpu_write(vector_irq[cfg->vector], VECTOR_RETRIGGERED);
|
||||
|
||||
/* Redirect it to the new vector on the local CPU temporarily */
|
||||
old_cfg.vector = cfg->vector;
|
||||
irq_msi_update_msg(irqd, &old_cfg);
|
||||
|
||||
/* Now transition it to the target CPU */
|
||||
irq_msi_update_msg(irqd, cfg);
|
||||
|
||||
/*
|
||||
* All interrupts after this point are now targeted at the new
|
||||
* vector/CPU.
|
||||
*
|
||||
* Drop vector lock before testing whether the temporary assignment
|
||||
* to the local CPU was hit by an interrupt raised in the device,
|
||||
* because the retrigger function acquires vector lock again.
|
||||
*/
|
||||
unlock_vector_lock();
|
||||
|
||||
/*
|
||||
* Check whether the transition raced with a device interrupt and
|
||||
* is pending in the local APICs IRR. It is safe to do this outside
|
||||
* of vector lock as the irq_desc::lock of this interrupt is still
|
||||
* held and interrupts are disabled: The check is not accessing the
|
||||
* underlying vector store. It's just checking the local APIC's
|
||||
* IRR.
|
||||
*/
|
||||
if (lapic_vector_set_in_irr(cfg->vector))
|
||||
irq_data_get_irq_chip(irqd)->irq_retrigger(irqd);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
|
||||
* which implement the MSI or MSI-X Capability Structure.
|
||||
|
@ -58,6 +177,7 @@ static struct irq_chip pci_msi_controller = {
|
|||
.irq_ack = irq_chip_ack_parent,
|
||||
.irq_retrigger = irq_chip_retrigger_hierarchy,
|
||||
.irq_compose_msi_msg = irq_msi_compose_msg,
|
||||
.irq_set_affinity = msi_set_affinity,
|
||||
.flags = IRQCHIP_SKIP_SET_WAKE,
|
||||
};
|
||||
|
||||
|
@ -146,6 +266,8 @@ void __init arch_init_msi_domain(struct irq_domain *parent)
|
|||
}
|
||||
if (!msi_default_domain)
|
||||
pr_warn("failed to initialize irqdomain for MSI/MSI-x.\n");
|
||||
else
|
||||
msi_default_domain->flags |= IRQ_DOMAIN_MSI_NOMASK_QUIRK;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IRQ_REMAP
|
||||
|
|
|
@ -91,10 +91,18 @@ void __init hpet_time_init(void)
|
|||
|
||||
static __init void x86_late_time_init(void)
|
||||
{
|
||||
x86_init.timers.timer_init();
|
||||
/*
|
||||
* After PIT/HPET timers init, select and setup
|
||||
* the final interrupt mode for delivering IRQs.
|
||||
* Before PIT/HPET init, select the interrupt mode. This is required
|
||||
* to make the decision whether PIT should be initialized correct.
|
||||
*/
|
||||
x86_init.irqs.intr_mode_select();
|
||||
|
||||
/* Setup the legacy timers */
|
||||
x86_init.timers.timer_init();
|
||||
|
||||
/*
|
||||
* After PIT/HPET timers init, set up the final interrupt mode for
|
||||
* delivering IRQs.
|
||||
*/
|
||||
x86_init.irqs.intr_mode_init();
|
||||
tsc_init();
|
||||
|
|
|
@ -80,6 +80,7 @@ struct x86_init_ops x86_init __initdata = {
|
|||
.pre_vector_init = init_ISA_irqs,
|
||||
.intr_init = native_init_IRQ,
|
||||
.trap_init = x86_init_noop,
|
||||
.intr_mode_select = apic_intr_mode_select,
|
||||
.intr_mode_init = apic_intr_mode_init
|
||||
},
|
||||
|
||||
|
|
|
@ -308,7 +308,7 @@ static void __init efi_clean_memmap(void)
|
|||
.phys_map = efi.memmap.phys_map,
|
||||
.desc_version = efi.memmap.desc_version,
|
||||
.desc_size = efi.memmap.desc_size,
|
||||
.size = data.desc_size * (efi.memmap.nr_map - n_removal),
|
||||
.size = efi.memmap.desc_size * (efi.memmap.nr_map - n_removal),
|
||||
.flags = 0,
|
||||
};
|
||||
|
||||
|
|
|
@ -12,7 +12,7 @@ OBJECT_FILES_NON_STANDARD := y
|
|||
# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
|
||||
KCOV_INSTRUMENT := n
|
||||
|
||||
always := realmode.bin realmode.relocs
|
||||
always-y := realmode.bin realmode.relocs
|
||||
|
||||
wakeup-objs := wakeup_asm.o wakemain.o video-mode.o
|
||||
wakeup-objs += copy.o bioscall.o regs.o
|
||||
|
|
|
@ -26,7 +26,7 @@ posttest: $(obj)/insn_decoder_test vmlinux $(obj)/insn_sanity
|
|||
$(call cmd,posttest)
|
||||
$(call cmd,sanitytest)
|
||||
|
||||
hostprogs-y += insn_decoder_test insn_sanity
|
||||
hostprogs += insn_decoder_test insn_sanity
|
||||
|
||||
# -I needed for generated C source and C source which in the kernel tree.
|
||||
HOSTCFLAGS_insn_decoder_test.o := -Wall -I$(objtree)/arch/x86/lib/ -I$(srctree)/arch/x86/include/uapi/ -I$(srctree)/arch/x86/include/ -I$(srctree)/arch/x86/lib/ -I$(srctree)/include/uapi/
|
||||
|
@ -39,7 +39,7 @@ $(obj)/insn_decoder_test.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/l
|
|||
$(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
|
||||
|
||||
HOST_EXTRACFLAGS += -I$(srctree)/tools/include
|
||||
hostprogs-y += relocs
|
||||
hostprogs += relocs
|
||||
relocs-objs := relocs_32.o relocs_64.o relocs_common.o
|
||||
PHONY += relocs
|
||||
relocs: $(obj)/relocs
|
||||
|
|
|
@ -1205,6 +1205,7 @@ asmlinkage __visible void __init xen_start_kernel(void)
|
|||
x86_platform.get_nmi_reason = xen_get_nmi_reason;
|
||||
|
||||
x86_init.resources.memory_setup = xen_memory_setup;
|
||||
x86_init.irqs.intr_mode_select = x86_init_noop;
|
||||
x86_init.irqs.intr_mode_init = x86_init_noop;
|
||||
x86_init.oem.arch_setup = xen_arch_setup;
|
||||
x86_init.oem.banner = xen_banner;
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
|
||||
ccflags-y := -Idrivers/gpu/drm/amd/include
|
||||
|
||||
hostprogs-y := mkregtable
|
||||
hostprogs := mkregtable
|
||||
clean-files := rn50_reg_safe.h r100_reg_safe.h r200_reg_safe.h rv515_reg_safe.h r300_reg_safe.h r420_reg_safe.h rs600_reg_safe.h r600_reg_safe.h evergreen_reg_safe.h cayman_reg_safe.h
|
||||
|
||||
quiet_cmd_mkregtable = MKREGTABLE $@
|
||||
|
|
|
@ -438,7 +438,7 @@ config CSKY_MPINTC
|
|||
help
|
||||
Say yes here to enable C-SKY SMP interrupt controller driver used
|
||||
for C-SKY SMP system.
|
||||
In fact it's not mmio map in hw and it use ld/st to visit the
|
||||
In fact it's not mmio map in hardware and it uses ld/st to visit the
|
||||
controller's register inside CPU.
|
||||
|
||||
config CSKY_APB_INTC
|
||||
|
@ -446,7 +446,7 @@ config CSKY_APB_INTC
|
|||
depends on CSKY
|
||||
help
|
||||
Say yes here to enable C-SKY APB interrupt controller driver used
|
||||
by C-SKY single core SOC system. It use mmio map apb-bus to visit
|
||||
by C-SKY single core SOC system. It uses mmio map apb-bus to visit
|
||||
the controller's register.
|
||||
|
||||
config IMX_IRQSTEER
|
||||
|
|
|
@ -661,7 +661,7 @@ static struct its_collection *its_build_invall_cmd(struct its_node *its,
|
|||
struct its_cmd_desc *desc)
|
||||
{
|
||||
its_encode_cmd(cmd, GITS_CMD_INVALL);
|
||||
its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
|
||||
its_encode_collection(cmd, desc->its_invall_cmd.col->col_id);
|
||||
|
||||
its_fixup_cmd(cmd);
|
||||
|
||||
|
@ -2376,6 +2376,8 @@ static u64 inherit_vpe_l1_table_from_its(void)
|
|||
continue;
|
||||
|
||||
/* We have a winner! */
|
||||
gic_data_rdist()->vpe_l1_base = its->tables[2].base;
|
||||
|
||||
val = GICR_VPROPBASER_4_1_VALID;
|
||||
if (baser & GITS_BASER_INDIRECT)
|
||||
val |= GICR_VPROPBASER_4_1_INDIRECT;
|
||||
|
@ -2413,14 +2415,12 @@ static u64 inherit_vpe_l1_table_from_rd(cpumask_t **mask)
|
|||
|
||||
for_each_possible_cpu(cpu) {
|
||||
void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base;
|
||||
u32 tmp;
|
||||
|
||||
if (!base || cpu == smp_processor_id())
|
||||
continue;
|
||||
|
||||
val = gic_read_typer(base + GICR_TYPER);
|
||||
tmp = compute_common_aff(val);
|
||||
if (tmp != aff)
|
||||
if (aff != compute_common_aff(val))
|
||||
continue;
|
||||
|
||||
/*
|
||||
|
@ -2429,9 +2429,10 @@ static u64 inherit_vpe_l1_table_from_rd(cpumask_t **mask)
|
|||
* ours wrt CommonLPIAff. Let's use its own VPROPBASER.
|
||||
* Make sure we don't write the Z bit in that case.
|
||||
*/
|
||||
val = gits_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER);
|
||||
val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER);
|
||||
val &= ~GICR_VPROPBASER_4_1_Z;
|
||||
|
||||
gic_data_rdist()->vpe_l1_base = gic_data_rdist_cpu(cpu)->vpe_l1_base;
|
||||
*mask = gic_data_rdist_cpu(cpu)->vpe_table_mask;
|
||||
|
||||
return val;
|
||||
|
@ -2440,6 +2441,72 @@ static u64 inherit_vpe_l1_table_from_rd(cpumask_t **mask)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static bool allocate_vpe_l2_table(int cpu, u32 id)
|
||||
{
|
||||
void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base;
|
||||
unsigned int psz, esz, idx, npg, gpsz;
|
||||
u64 val;
|
||||
struct page *page;
|
||||
__le64 *table;
|
||||
|
||||
if (!gic_rdists->has_rvpeid)
|
||||
return true;
|
||||
|
||||
val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER);
|
||||
|
||||
esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val) + 1;
|
||||
gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val);
|
||||
npg = FIELD_GET(GICR_VPROPBASER_4_1_SIZE, val) + 1;
|
||||
|
||||
switch (gpsz) {
|
||||
default:
|
||||
WARN_ON(1);
|
||||
/* fall through */
|
||||
case GIC_PAGE_SIZE_4K:
|
||||
psz = SZ_4K;
|
||||
break;
|
||||
case GIC_PAGE_SIZE_16K:
|
||||
psz = SZ_16K;
|
||||
break;
|
||||
case GIC_PAGE_SIZE_64K:
|
||||
psz = SZ_64K;
|
||||
break;
|
||||
}
|
||||
|
||||
/* Don't allow vpe_id that exceeds single, flat table limit */
|
||||
if (!(val & GICR_VPROPBASER_4_1_INDIRECT))
|
||||
return (id < (npg * psz / (esz * SZ_8)));
|
||||
|
||||
/* Compute 1st level table index & check if that exceeds table limit */
|
||||
idx = id >> ilog2(psz / (esz * SZ_8));
|
||||
if (idx >= (npg * psz / GITS_LVL1_ENTRY_SIZE))
|
||||
return false;
|
||||
|
||||
table = gic_data_rdist_cpu(cpu)->vpe_l1_base;
|
||||
|
||||
/* Allocate memory for 2nd level table */
|
||||
if (!table[idx]) {
|
||||
page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(psz));
|
||||
if (!page)
|
||||
return false;
|
||||
|
||||
/* Flush Lvl2 table to PoC if hw doesn't support coherency */
|
||||
if (!(val & GICR_VPROPBASER_SHAREABILITY_MASK))
|
||||
gic_flush_dcache_to_poc(page_address(page), psz);
|
||||
|
||||
table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
|
||||
|
||||
/* Flush Lvl1 entry to PoC if hw doesn't support coherency */
|
||||
if (!(val & GICR_VPROPBASER_SHAREABILITY_MASK))
|
||||
gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
|
||||
|
||||
/* Ensure updated table contents are visible to RD hardware */
|
||||
dsb(sy);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int allocate_vpe_l1_table(void)
|
||||
{
|
||||
void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
|
||||
|
@ -2457,8 +2524,8 @@ static int allocate_vpe_l1_table(void)
|
|||
* effect of making sure no doorbell will be generated and we can
|
||||
* then safely clear VPROPBASER.Valid.
|
||||
*/
|
||||
if (gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER) & GICR_VPENDBASER_Valid)
|
||||
gits_write_vpendbaser(GICR_VPENDBASER_PendingLast,
|
||||
if (gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER) & GICR_VPENDBASER_Valid)
|
||||
gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast,
|
||||
vlpi_base + GICR_VPENDBASER);
|
||||
|
||||
/*
|
||||
|
@ -2481,8 +2548,8 @@ static int allocate_vpe_l1_table(void)
|
|||
|
||||
/* First probe the page size */
|
||||
val = FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, GIC_PAGE_SIZE_64K);
|
||||
gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
|
||||
val = gits_read_vpropbaser(vlpi_base + GICR_VPROPBASER);
|
||||
gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
|
||||
val = gicr_read_vpropbaser(vlpi_base + GICR_VPROPBASER);
|
||||
gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val);
|
||||
esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val);
|
||||
|
||||
|
@ -2531,7 +2598,7 @@ static int allocate_vpe_l1_table(void)
|
|||
npg = 1;
|
||||
}
|
||||
|
||||
val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, npg);
|
||||
val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, npg - 1);
|
||||
|
||||
/* Right, that's the number of CPU pages we need for L1 */
|
||||
np = DIV_ROUND_UP(npg * psz, PAGE_SIZE);
|
||||
|
@ -2542,7 +2609,7 @@ static int allocate_vpe_l1_table(void)
|
|||
if (!page)
|
||||
return -ENOMEM;
|
||||
|
||||
gic_data_rdist()->vpe_l1_page = page;
|
||||
gic_data_rdist()->vpe_l1_base = page_address(page);
|
||||
pa = virt_to_phys(page_address(page));
|
||||
WARN_ON(!IS_ALIGNED(pa, psz));
|
||||
|
||||
|
@ -2553,7 +2620,7 @@ static int allocate_vpe_l1_table(void)
|
|||
val |= GICR_VPROPBASER_4_1_VALID;
|
||||
|
||||
out:
|
||||
gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
|
||||
gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
|
||||
cpumask_set_cpu(smp_processor_id(), gic_data_rdist()->vpe_table_mask);
|
||||
|
||||
pr_debug("CPU%d: VPROPBASER = %llx %*pbl\n",
|
||||
|
@ -2660,14 +2727,14 @@ static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set)
|
|||
bool clean;
|
||||
u64 val;
|
||||
|
||||
val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
|
||||
val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
|
||||
val &= ~GICR_VPENDBASER_Valid;
|
||||
val &= ~clr;
|
||||
val |= set;
|
||||
gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
|
||||
gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
|
||||
|
||||
do {
|
||||
val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
|
||||
val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
|
||||
clean = !(val & GICR_VPENDBASER_Dirty);
|
||||
if (!clean) {
|
||||
count--;
|
||||
|
@ -2782,7 +2849,7 @@ static void its_cpu_init_lpis(void)
|
|||
val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
|
||||
pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n",
|
||||
smp_processor_id(), val);
|
||||
gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
|
||||
gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
|
||||
|
||||
/*
|
||||
* Also clear Valid bit of GICR_VPENDBASER, in case some
|
||||
|
@ -2790,7 +2857,6 @@ static void its_cpu_init_lpis(void)
|
|||
* corrupting memory.
|
||||
*/
|
||||
val = its_clear_vpend_valid(vlpi_base, 0, 0);
|
||||
WARN_ON(val & GICR_VPENDBASER_Dirty);
|
||||
}
|
||||
|
||||
if (allocate_vpe_l1_table()) {
|
||||
|
@ -2954,6 +3020,7 @@ static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
|
|||
static bool its_alloc_vpe_table(u32 vpe_id)
|
||||
{
|
||||
struct its_node *its;
|
||||
int cpu;
|
||||
|
||||
/*
|
||||
* Make sure the L2 tables are allocated on *all* v4 ITSs. We
|
||||
|
@ -2976,6 +3043,19 @@ static bool its_alloc_vpe_table(u32 vpe_id)
|
|||
return false;
|
||||
}
|
||||
|
||||
/* Non v4.1? No need to iterate RDs and go back early. */
|
||||
if (!gic_rdists->has_rvpeid)
|
||||
return true;
|
||||
|
||||
/*
|
||||
* Make sure the L2 tables are allocated for all copies of
|
||||
* the L1 table on *all* v4.1 RDs.
|
||||
*/
|
||||
for_each_possible_cpu(cpu) {
|
||||
if (!allocate_vpe_l2_table(cpu, vpe_id))
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -3443,7 +3523,7 @@ static void its_vpe_schedule(struct its_vpe *vpe)
|
|||
val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
|
||||
val |= GICR_VPROPBASER_RaWb;
|
||||
val |= GICR_VPROPBASER_InnerShareable;
|
||||
gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
|
||||
gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
|
||||
|
||||
val = virt_to_phys(page_address(vpe->vpt_page)) &
|
||||
GENMASK_ULL(51, 16);
|
||||
|
@ -3461,7 +3541,7 @@ static void its_vpe_schedule(struct its_vpe *vpe)
|
|||
val |= GICR_VPENDBASER_PendingLast;
|
||||
val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0;
|
||||
val |= GICR_VPENDBASER_Valid;
|
||||
gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
|
||||
gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
|
||||
}
|
||||
|
||||
static void its_vpe_deschedule(struct its_vpe *vpe)
|
||||
|
@ -3661,7 +3741,7 @@ static void its_vpe_4_1_schedule(struct its_vpe *vpe,
|
|||
val |= info->g1en ? GICR_VPENDBASER_4_1_VGRP1EN : 0;
|
||||
val |= FIELD_PREP(GICR_VPENDBASER_4_1_VPEID, vpe->vpe_id);
|
||||
|
||||
gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
|
||||
gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
|
||||
}
|
||||
|
||||
static void its_vpe_4_1_deschedule(struct its_vpe *vpe,
|
||||
|
|
|
@ -1839,6 +1839,7 @@ static struct
|
|||
struct redist_region *redist_regs;
|
||||
u32 nr_redist_regions;
|
||||
bool single_redist;
|
||||
int enabled_rdists;
|
||||
u32 maint_irq;
|
||||
int maint_irq_mode;
|
||||
phys_addr_t vcpu_base;
|
||||
|
@ -1933,8 +1934,10 @@ static int __init gic_acpi_match_gicc(union acpi_subtable_headers *header,
|
|||
* If GICC is enabled and has valid gicr base address, then it means
|
||||
* GICR base is presented via GICC
|
||||
*/
|
||||
if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address)
|
||||
if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address) {
|
||||
acpi_data.enabled_rdists++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* It's perfectly valid firmware can pass disabled GICC entry, driver
|
||||
|
@ -1964,8 +1967,10 @@ static int __init gic_acpi_count_gicr_regions(void)
|
|||
|
||||
count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
|
||||
gic_acpi_match_gicc, 0);
|
||||
if (count > 0)
|
||||
if (count > 0) {
|
||||
acpi_data.single_redist = true;
|
||||
count = acpi_data.enabled_rdists;
|
||||
}
|
||||
|
||||
return count;
|
||||
}
|
||||
|
|
|
@ -1383,26 +1383,31 @@ netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
|
|||
bool do_tx_balance = true;
|
||||
u32 hash_index = 0;
|
||||
const u8 *hash_start = NULL;
|
||||
struct ipv6hdr *ip6hdr;
|
||||
|
||||
skb_reset_mac_header(skb);
|
||||
eth_data = eth_hdr(skb);
|
||||
|
||||
switch (ntohs(skb->protocol)) {
|
||||
case ETH_P_IP: {
|
||||
const struct iphdr *iph = ip_hdr(skb);
|
||||
const struct iphdr *iph;
|
||||
|
||||
if (is_broadcast_ether_addr(eth_data->h_dest) ||
|
||||
iph->daddr == ip_bcast ||
|
||||
iph->protocol == IPPROTO_IGMP) {
|
||||
!pskb_network_may_pull(skb, sizeof(*iph))) {
|
||||
do_tx_balance = false;
|
||||
break;
|
||||
}
|
||||
iph = ip_hdr(skb);
|
||||
if (iph->daddr == ip_bcast || iph->protocol == IPPROTO_IGMP) {
|
||||
do_tx_balance = false;
|
||||
break;
|
||||
}
|
||||
hash_start = (char *)&(iph->daddr);
|
||||
hash_size = sizeof(iph->daddr);
|
||||
}
|
||||
break;
|
||||
case ETH_P_IPV6:
|
||||
}
|
||||
case ETH_P_IPV6: {
|
||||
const struct ipv6hdr *ip6hdr;
|
||||
|
||||
/* IPv6 doesn't really use broadcast mac address, but leave
|
||||
* that here just in case.
|
||||
*/
|
||||
|
@ -1419,7 +1424,11 @@ netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
|
|||
break;
|
||||
}
|
||||
|
||||
/* Additianally, DAD probes should not be tx-balanced as that
|
||||
if (!pskb_network_may_pull(skb, sizeof(*ip6hdr))) {
|
||||
do_tx_balance = false;
|
||||
break;
|
||||
}
|
||||
/* Additionally, DAD probes should not be tx-balanced as that
|
||||
* will lead to false positives for duplicate addresses and
|
||||
* prevent address configuration from working.
|
||||
*/
|
||||
|
@ -1429,17 +1438,26 @@ netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
|
|||
break;
|
||||
}
|
||||
|
||||
hash_start = (char *)&(ipv6_hdr(skb)->daddr);
|
||||
hash_size = sizeof(ipv6_hdr(skb)->daddr);
|
||||
hash_start = (char *)&ip6hdr->daddr;
|
||||
hash_size = sizeof(ip6hdr->daddr);
|
||||
break;
|
||||
case ETH_P_IPX:
|
||||
if (ipx_hdr(skb)->ipx_checksum != IPX_NO_CHECKSUM) {
|
||||
}
|
||||
case ETH_P_IPX: {
|
||||
const struct ipxhdr *ipxhdr;
|
||||
|
||||
if (pskb_network_may_pull(skb, sizeof(*ipxhdr))) {
|
||||
do_tx_balance = false;
|
||||
break;
|
||||
}
|
||||
ipxhdr = (struct ipxhdr *)skb_network_header(skb);
|
||||
|
||||
if (ipxhdr->ipx_checksum != IPX_NO_CHECKSUM) {
|
||||
/* something is wrong with this packet */
|
||||
do_tx_balance = false;
|
||||
break;
|
||||
}
|
||||
|
||||
if (ipx_hdr(skb)->ipx_type != IPX_TYPE_NCP) {
|
||||
if (ipxhdr->ipx_type != IPX_TYPE_NCP) {
|
||||
/* The only protocol worth balancing in
|
||||
* this family since it has an "ARP" like
|
||||
* mechanism
|
||||
|
@ -1448,9 +1466,11 @@ netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
|
|||
break;
|
||||
}
|
||||
|
||||
eth_data = eth_hdr(skb);
|
||||
hash_start = (char *)eth_data->h_dest;
|
||||
hash_size = ETH_ALEN;
|
||||
break;
|
||||
}
|
||||
case ETH_P_ARP:
|
||||
do_tx_balance = false;
|
||||
if (bond_info->rlb_enabled)
|
||||
|
|
|
@ -693,7 +693,7 @@ int b53_configure_vlan(struct dsa_switch *ds)
|
|||
b53_do_vlan_op(dev, VTA_CMD_CLEAR);
|
||||
}
|
||||
|
||||
b53_enable_vlan(dev, false, ds->vlan_filtering);
|
||||
b53_enable_vlan(dev, dev->vlan_enabled, ds->vlan_filtering);
|
||||
|
||||
b53_for_each_port(dev, i)
|
||||
b53_write16(dev, B53_VLAN_PAGE,
|
||||
|
|
|
@ -68,7 +68,9 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
|
|||
|
||||
/* Force link status for IMP port */
|
||||
reg = core_readl(priv, offset);
|
||||
reg |= (MII_SW_OR | LINK_STS | GMII_SPEED_UP_2G);
|
||||
reg |= (MII_SW_OR | LINK_STS);
|
||||
if (priv->type == BCM7278_DEVICE_ID)
|
||||
reg |= GMII_SPEED_UP_2G;
|
||||
core_writel(priv, reg, offset);
|
||||
|
||||
/* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
|
||||
|
|
|
@ -101,6 +101,12 @@ static struct spi_driver ksz9477_spi_driver = {
|
|||
|
||||
module_spi_driver(ksz9477_spi_driver);
|
||||
|
||||
MODULE_ALIAS("spi:ksz9477");
|
||||
MODULE_ALIAS("spi:ksz9897");
|
||||
MODULE_ALIAS("spi:ksz9893");
|
||||
MODULE_ALIAS("spi:ksz9563");
|
||||
MODULE_ALIAS("spi:ksz8563");
|
||||
MODULE_ALIAS("spi:ksz9567");
|
||||
MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>");
|
||||
MODULE_DESCRIPTION("Microchip KSZ9477 Series Switch SPI access Driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
|
|
@ -2736,6 +2736,9 @@ static int __maybe_unused bcm_sysport_resume(struct device *d)
|
|||
|
||||
umac_reset(priv);
|
||||
|
||||
/* Disable the UniMAC RX/TX */
|
||||
umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0);
|
||||
|
||||
/* We may have been suspended and never received a WOL event that
|
||||
* would turn off MPD detection, take care of that now
|
||||
*/
|
||||
|
|
|
@ -73,7 +73,11 @@ struct sifive_fu540_macb_mgmt {
|
|||
/* Max length of transmit frame must be a multiple of 8 bytes */
|
||||
#define MACB_TX_LEN_ALIGN 8
|
||||
#define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
|
||||
#define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
|
||||
/* Limit maximum TX length as per Cadence TSO errata. This is to avoid a
|
||||
* false amba_error in TX path from the DMA assuming there is not enough
|
||||
* space in the SRAM (16KB) even when there is.
|
||||
*/
|
||||
#define GEM_MAX_TX_LEN (unsigned int)(0x3FC0)
|
||||
|
||||
#define GEM_MTU_MIN_SIZE ETH_MIN_MTU
|
||||
#define MACB_NETIF_LSO NETIF_F_TSO
|
||||
|
@ -1791,16 +1795,14 @@ static netdev_features_t macb_features_check(struct sk_buff *skb,
|
|||
|
||||
/* Validate LSO compatibility */
|
||||
|
||||
/* there is only one buffer */
|
||||
if (!skb_is_nonlinear(skb))
|
||||
/* there is only one buffer or protocol is not UDP */
|
||||
if (!skb_is_nonlinear(skb) || (ip_hdr(skb)->protocol != IPPROTO_UDP))
|
||||
return features;
|
||||
|
||||
/* length of header */
|
||||
hdrlen = skb_transport_offset(skb);
|
||||
if (ip_hdr(skb)->protocol == IPPROTO_TCP)
|
||||
hdrlen += tcp_hdrlen(skb);
|
||||
|
||||
/* For LSO:
|
||||
/* For UFO only:
|
||||
* When software supplies two or more payload buffers all payload buffers
|
||||
* apart from the last must be a multiple of 8 bytes in size.
|
||||
*/
|
||||
|
|
|
@ -1039,7 +1039,7 @@ static int phy_interface_mode(u8 lmac_type)
|
|||
if (lmac_type == BGX_MODE_QSGMII)
|
||||
return PHY_INTERFACE_MODE_QSGMII;
|
||||
if (lmac_type == BGX_MODE_RGMII)
|
||||
return PHY_INTERFACE_MODE_RGMII;
|
||||
return PHY_INTERFACE_MODE_RGMII_RXID;
|
||||
|
||||
return PHY_INTERFACE_MODE_SGMII;
|
||||
}
|
||||
|
|
|
@ -3403,6 +3403,13 @@ static int chcr_stats_show(struct seq_file *seq, void *v)
|
|||
atomic_read(&adap->chcr_stats.fallback));
|
||||
seq_printf(seq, "IPSec PDU: %10u\n",
|
||||
atomic_read(&adap->chcr_stats.ipsec_cnt));
|
||||
seq_printf(seq, "TLS PDU Tx: %10u\n",
|
||||
atomic_read(&adap->chcr_stats.tls_pdu_tx));
|
||||
seq_printf(seq, "TLS PDU Rx: %10u\n",
|
||||
atomic_read(&adap->chcr_stats.tls_pdu_rx));
|
||||
seq_printf(seq, "TLS Keys (DDR) Count: %10u\n",
|
||||
atomic_read(&adap->chcr_stats.tls_key));
|
||||
|
||||
return 0;
|
||||
}
|
||||
DEFINE_SHOW_ATTRIBUTE(chcr_stats);
|
||||
|
|
|
@ -417,7 +417,10 @@ static void de_rx (struct de_private *de)
|
|||
if (status & DescOwn)
|
||||
break;
|
||||
|
||||
len = ((status >> 16) & 0x7ff) - 4;
|
||||
/* the length is actually a 15 bit value here according
|
||||
* to Table 4-1 in the DE2104x spec so mask is 0x7fff
|
||||
*/
|
||||
len = ((status >> 16) & 0x7fff) - 4;
|
||||
mapping = de->rx_skb[rx_tail].mapping;
|
||||
|
||||
if (unlikely(drop)) {
|
||||
|
|
|
@ -2453,6 +2453,9 @@ static void dpaa_adjust_link(struct net_device *net_dev)
|
|||
mac_dev->adjust_link(mac_dev);
|
||||
}
|
||||
|
||||
/* The Aquantia PHYs are capable of performing rate adaptation */
|
||||
#define PHY_VEND_AQUANTIA 0x03a1b400
|
||||
|
||||
static int dpaa_phy_init(struct net_device *net_dev)
|
||||
{
|
||||
__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
|
||||
|
@ -2471,9 +2474,14 @@ static int dpaa_phy_init(struct net_device *net_dev)
|
|||
return -ENODEV;
|
||||
}
|
||||
|
||||
/* Remove any features not supported by the controller */
|
||||
ethtool_convert_legacy_u32_to_link_mode(mask, mac_dev->if_support);
|
||||
linkmode_and(phy_dev->supported, phy_dev->supported, mask);
|
||||
/* Unless the PHY is capable of rate adaptation */
|
||||
if (mac_dev->phy_if != PHY_INTERFACE_MODE_XGMII ||
|
||||
((phy_dev->drv->phy_id & GENMASK(31, 10)) != PHY_VEND_AQUANTIA)) {
|
||||
/* remove any features not supported by the controller */
|
||||
ethtool_convert_legacy_u32_to_link_mode(mask,
|
||||
mac_dev->if_support);
|
||||
linkmode_and(phy_dev->supported, phy_dev->supported, mask);
|
||||
}
|
||||
|
||||
phy_support_asym_pause(phy_dev);
|
||||
|
||||
|
|
|
@ -791,7 +791,7 @@ int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
|
|||
struct i40e_ring *ring;
|
||||
|
||||
if (test_bit(__I40E_CONFIG_BUSY, pf->state))
|
||||
return -ENETDOWN;
|
||||
return -EAGAIN;
|
||||
|
||||
if (test_bit(__I40E_VSI_DOWN, vsi->state))
|
||||
return -ENETDOWN;
|
||||
|
|
|
@ -401,6 +401,8 @@ struct mvneta_pcpu_stats {
|
|||
struct u64_stats_sync syncp;
|
||||
u64 rx_packets;
|
||||
u64 rx_bytes;
|
||||
u64 rx_dropped;
|
||||
u64 rx_errors;
|
||||
u64 tx_packets;
|
||||
u64 tx_bytes;
|
||||
};
|
||||
|
@ -738,6 +740,8 @@ mvneta_get_stats64(struct net_device *dev,
|
|||
struct mvneta_pcpu_stats *cpu_stats;
|
||||
u64 rx_packets;
|
||||
u64 rx_bytes;
|
||||
u64 rx_dropped;
|
||||
u64 rx_errors;
|
||||
u64 tx_packets;
|
||||
u64 tx_bytes;
|
||||
|
||||
|
@ -746,19 +750,20 @@ mvneta_get_stats64(struct net_device *dev,
|
|||
start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
|
||||
rx_packets = cpu_stats->rx_packets;
|
||||
rx_bytes = cpu_stats->rx_bytes;
|
||||
rx_dropped = cpu_stats->rx_dropped;
|
||||
rx_errors = cpu_stats->rx_errors;
|
||||
tx_packets = cpu_stats->tx_packets;
|
||||
tx_bytes = cpu_stats->tx_bytes;
|
||||
} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
|
||||
|
||||
stats->rx_packets += rx_packets;
|
||||
stats->rx_bytes += rx_bytes;
|
||||
stats->rx_dropped += rx_dropped;
|
||||
stats->rx_errors += rx_errors;
|
||||
stats->tx_packets += tx_packets;
|
||||
stats->tx_bytes += tx_bytes;
|
||||
}
|
||||
|
||||
stats->rx_errors = dev->stats.rx_errors;
|
||||
stats->rx_dropped = dev->stats.rx_dropped;
|
||||
|
||||
stats->tx_dropped = dev->stats.tx_dropped;
|
||||
}
|
||||
|
||||
|
@ -1736,8 +1741,14 @@ static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
|
|||
static void mvneta_rx_error(struct mvneta_port *pp,
|
||||
struct mvneta_rx_desc *rx_desc)
|
||||
{
|
||||
struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
|
||||
u32 status = rx_desc->status;
|
||||
|
||||
/* update per-cpu counter */
|
||||
u64_stats_update_begin(&stats->syncp);
|
||||
stats->rx_errors++;
|
||||
u64_stats_update_end(&stats->syncp);
|
||||
|
||||
switch (status & MVNETA_RXD_ERR_CODE_MASK) {
|
||||
case MVNETA_RXD_ERR_CRC:
|
||||
netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
|
||||
|
@ -2179,11 +2190,15 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
|
|||
|
||||
rxq->skb = build_skb(xdp->data_hard_start, PAGE_SIZE);
|
||||
if (unlikely(!rxq->skb)) {
|
||||
netdev_err(dev,
|
||||
"Can't allocate skb on queue %d\n",
|
||||
rxq->id);
|
||||
dev->stats.rx_dropped++;
|
||||
struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
|
||||
|
||||
netdev_err(dev, "Can't allocate skb on queue %d\n", rxq->id);
|
||||
rxq->skb_alloc_err++;
|
||||
|
||||
u64_stats_update_begin(&stats->syncp);
|
||||
stats->rx_dropped++;
|
||||
u64_stats_update_end(&stats->syncp);
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
page_pool_release_page(rxq->page_pool, page);
|
||||
|
@ -2270,7 +2285,6 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
|
|||
/* Check errors only for FIRST descriptor */
|
||||
if (rx_status & MVNETA_RXD_ERR_SUMMARY) {
|
||||
mvneta_rx_error(pp, rx_desc);
|
||||
dev->stats.rx_errors++;
|
||||
/* leave the descriptor untouched */
|
||||
continue;
|
||||
}
|
||||
|
@ -2372,7 +2386,6 @@ static int mvneta_rx_hwbm(struct napi_struct *napi,
|
|||
mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
|
||||
rx_desc->buf_phys_addr);
|
||||
err_drop_frame:
|
||||
dev->stats.rx_errors++;
|
||||
mvneta_rx_error(pp, rx_desc);
|
||||
/* leave the descriptor untouched */
|
||||
continue;
|
||||
|
|
|
@ -45,7 +45,7 @@ void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id);
|
|||
|
||||
static inline bool mlx5_accel_is_ktls_device(struct mlx5_core_dev *mdev)
|
||||
{
|
||||
if (!MLX5_CAP_GEN(mdev, tls))
|
||||
if (!MLX5_CAP_GEN(mdev, tls_tx))
|
||||
return false;
|
||||
|
||||
if (!MLX5_CAP_GEN(mdev, log_max_dek))
|
||||
|
|
|
@ -269,7 +269,7 @@ struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev,
|
|||
int datalen;
|
||||
u32 skb_seq;
|
||||
|
||||
if (MLX5_CAP_GEN(sq->channel->mdev, tls)) {
|
||||
if (MLX5_CAP_GEN(sq->channel->mdev, tls_tx)) {
|
||||
skb = mlx5e_ktls_handle_tx_skb(netdev, sq, skb, wqe, pi);
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -613,13 +613,6 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
|
|||
|
||||
wqe_counter = be16_to_cpu(cqe->wqe_counter);
|
||||
|
||||
if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
|
||||
netdev_WARN_ONCE(cq->channel->netdev,
|
||||
"Bad OP in ICOSQ CQE: 0x%x\n", get_cqe_opcode(cqe));
|
||||
if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
|
||||
queue_work(cq->channel->priv->wq, &sq->recover_work);
|
||||
break;
|
||||
}
|
||||
do {
|
||||
struct mlx5e_sq_wqe_info *wi;
|
||||
u16 ci;
|
||||
|
@ -629,6 +622,15 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
|
|||
ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
|
||||
wi = &sq->db.ico_wqe[ci];
|
||||
|
||||
if (last_wqe && unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
|
||||
netdev_WARN_ONCE(cq->channel->netdev,
|
||||
"Bad OP in ICOSQ CQE: 0x%x\n",
|
||||
get_cqe_opcode(cqe));
|
||||
if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
|
||||
queue_work(cq->channel->priv->wq, &sq->recover_work);
|
||||
break;
|
||||
}
|
||||
|
||||
if (likely(wi->opcode == MLX5_OPCODE_UMR)) {
|
||||
sqcc += MLX5E_UMR_WQEBBS;
|
||||
wi->umr.rq->mpwqe.umr_completed++;
|
||||
|
|
|
@ -451,34 +451,17 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
|
|||
|
||||
i = 0;
|
||||
do {
|
||||
struct mlx5e_tx_wqe_info *wi;
|
||||
u16 wqe_counter;
|
||||
bool last_wqe;
|
||||
u16 ci;
|
||||
|
||||
mlx5_cqwq_pop(&cq->wq);
|
||||
|
||||
wqe_counter = be16_to_cpu(cqe->wqe_counter);
|
||||
|
||||
if (unlikely(get_cqe_opcode(cqe) == MLX5_CQE_REQ_ERR)) {
|
||||
if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING,
|
||||
&sq->state)) {
|
||||
struct mlx5e_tx_wqe_info *wi;
|
||||
u16 ci;
|
||||
|
||||
ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
|
||||
wi = &sq->db.wqe_info[ci];
|
||||
mlx5e_dump_error_cqe(sq,
|
||||
(struct mlx5_err_cqe *)cqe);
|
||||
mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
|
||||
queue_work(cq->channel->priv->wq,
|
||||
&sq->recover_work);
|
||||
}
|
||||
stats->cqe_err++;
|
||||
}
|
||||
|
||||
do {
|
||||
struct mlx5e_tx_wqe_info *wi;
|
||||
struct sk_buff *skb;
|
||||
u16 ci;
|
||||
int j;
|
||||
|
||||
last_wqe = (sqcc == wqe_counter);
|
||||
|
@ -516,6 +499,18 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
|
|||
napi_consume_skb(skb, napi_budget);
|
||||
} while (!last_wqe);
|
||||
|
||||
if (unlikely(get_cqe_opcode(cqe) == MLX5_CQE_REQ_ERR)) {
|
||||
if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING,
|
||||
&sq->state)) {
|
||||
mlx5e_dump_error_cqe(sq,
|
||||
(struct mlx5_err_cqe *)cqe);
|
||||
mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
|
||||
queue_work(cq->channel->priv->wq,
|
||||
&sq->recover_work);
|
||||
}
|
||||
stats->cqe_err++;
|
||||
}
|
||||
|
||||
} while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
|
||||
|
||||
stats->cqes += i;
|
||||
|
|
|
@ -850,6 +850,7 @@ void mlx5_fpga_ipsec_delete_sa_ctx(void *context)
|
|||
mutex_lock(&fpga_xfrm->lock);
|
||||
if (!--fpga_xfrm->num_rules) {
|
||||
mlx5_fpga_ipsec_release_sa_ctx(fpga_xfrm->sa_ctx);
|
||||
kfree(fpga_xfrm->sa_ctx);
|
||||
fpga_xfrm->sa_ctx = NULL;
|
||||
}
|
||||
mutex_unlock(&fpga_xfrm->lock);
|
||||
|
@ -1478,7 +1479,7 @@ int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
|
|||
if (!memcmp(&xfrm->attrs, attrs, sizeof(xfrm->attrs)))
|
||||
return 0;
|
||||
|
||||
if (!mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) {
|
||||
if (mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) {
|
||||
mlx5_core_warn(mdev, "Tried to create an esp with unsupported attrs\n");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
|
|
@ -1582,16 +1582,16 @@ struct match_list_head {
|
|||
struct match_list first;
|
||||
};
|
||||
|
||||
static void free_match_list(struct match_list_head *head)
|
||||
static void free_match_list(struct match_list_head *head, bool ft_locked)
|
||||
{
|
||||
if (!list_empty(&head->list)) {
|
||||
struct match_list *iter, *match_tmp;
|
||||
|
||||
list_del(&head->first.list);
|
||||
tree_put_node(&head->first.g->node, false);
|
||||
tree_put_node(&head->first.g->node, ft_locked);
|
||||
list_for_each_entry_safe(iter, match_tmp, &head->list,
|
||||
list) {
|
||||
tree_put_node(&iter->g->node, false);
|
||||
tree_put_node(&iter->g->node, ft_locked);
|
||||
list_del(&iter->list);
|
||||
kfree(iter);
|
||||
}
|
||||
|
@ -1600,7 +1600,8 @@ static void free_match_list(struct match_list_head *head)
|
|||
|
||||
static int build_match_list(struct match_list_head *match_head,
|
||||
struct mlx5_flow_table *ft,
|
||||
const struct mlx5_flow_spec *spec)
|
||||
const struct mlx5_flow_spec *spec,
|
||||
bool ft_locked)
|
||||
{
|
||||
struct rhlist_head *tmp, *list;
|
||||
struct mlx5_flow_group *g;
|
||||
|
@ -1625,7 +1626,7 @@ static int build_match_list(struct match_list_head *match_head,
|
|||
|
||||
curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC);
|
||||
if (!curr_match) {
|
||||
free_match_list(match_head);
|
||||
free_match_list(match_head, ft_locked);
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
@ -1805,7 +1806,7 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
|
|||
version = atomic_read(&ft->node.version);
|
||||
|
||||
/* Collect all fgs which has a matching match_criteria */
|
||||
err = build_match_list(&match_head, ft, spec);
|
||||
err = build_match_list(&match_head, ft, spec, take_write);
|
||||
if (err) {
|
||||
if (take_write)
|
||||
up_write_ref_node(&ft->node, false);
|
||||
|
@ -1819,7 +1820,7 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
|
|||
|
||||
rule = try_add_to_existing_fg(ft, &match_head.list, spec, flow_act, dest,
|
||||
dest_num, version);
|
||||
free_match_list(&match_head);
|
||||
free_match_list(&match_head, take_write);
|
||||
if (!IS_ERR(rule) ||
|
||||
(PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) {
|
||||
if (take_write)
|
||||
|
|
|
@ -242,7 +242,7 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
|
|||
return err;
|
||||
}
|
||||
|
||||
if (MLX5_CAP_GEN(dev, tls)) {
|
||||
if (MLX5_CAP_GEN(dev, tls_tx)) {
|
||||
err = mlx5_core_get_caps(dev, MLX5_CAP_TLS);
|
||||
if (err)
|
||||
return err;
|
||||
|
|
|
@ -573,6 +573,7 @@ static int mlxsw_hwmon_module_init(struct mlxsw_hwmon *mlxsw_hwmon)
|
|||
|
||||
static int mlxsw_hwmon_gearbox_init(struct mlxsw_hwmon *mlxsw_hwmon)
|
||||
{
|
||||
enum mlxsw_reg_mgpir_device_type device_type;
|
||||
int index, max_index, sensor_index;
|
||||
char mgpir_pl[MLXSW_REG_MGPIR_LEN];
|
||||
char mtmp_pl[MLXSW_REG_MTMP_LEN];
|
||||
|
@ -584,8 +585,9 @@ static int mlxsw_hwmon_gearbox_init(struct mlxsw_hwmon *mlxsw_hwmon)
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
mlxsw_reg_mgpir_unpack(mgpir_pl, &gbox_num, NULL, NULL, NULL);
|
||||
if (!gbox_num)
|
||||
mlxsw_reg_mgpir_unpack(mgpir_pl, &gbox_num, &device_type, NULL, NULL);
|
||||
if (device_type != MLXSW_REG_MGPIR_DEVICE_TYPE_GEARBOX_DIE ||
|
||||
!gbox_num)
|
||||
return 0;
|
||||
|
||||
index = mlxsw_hwmon->module_sensor_max;
|
||||
|
|
|
@ -895,8 +895,10 @@ static int
|
|||
mlxsw_thermal_gearboxes_init(struct device *dev, struct mlxsw_core *core,
|
||||
struct mlxsw_thermal *thermal)
|
||||
{
|
||||
enum mlxsw_reg_mgpir_device_type device_type;
|
||||
struct mlxsw_thermal_module *gearbox_tz;
|
||||
char mgpir_pl[MLXSW_REG_MGPIR_LEN];
|
||||
u8 gbox_num;
|
||||
int i;
|
||||
int err;
|
||||
|
||||
|
@ -908,11 +910,13 @@ mlxsw_thermal_gearboxes_init(struct device *dev, struct mlxsw_core *core,
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
mlxsw_reg_mgpir_unpack(mgpir_pl, &thermal->tz_gearbox_num, NULL, NULL,
|
||||
mlxsw_reg_mgpir_unpack(mgpir_pl, &gbox_num, &device_type, NULL,
|
||||
NULL);
|
||||
if (!thermal->tz_gearbox_num)
|
||||
if (device_type != MLXSW_REG_MGPIR_DEVICE_TYPE_GEARBOX_DIE ||
|
||||
!gbox_num)
|
||||
return 0;
|
||||
|
||||
thermal->tz_gearbox_num = gbox_num;
|
||||
thermal->tz_gearbox_arr = kcalloc(thermal->tz_gearbox_num,
|
||||
sizeof(*thermal->tz_gearbox_arr),
|
||||
GFP_KERNEL);
|
||||
|
|
|
@ -215,7 +215,7 @@ mlxsw_sp_dpipe_table_erif_entries_dump(void *priv, bool counters_enabled,
|
|||
start_again:
|
||||
err = devlink_dpipe_entry_ctx_prepare(dump_ctx);
|
||||
if (err)
|
||||
return err;
|
||||
goto err_ctx_prepare;
|
||||
j = 0;
|
||||
for (; i < rif_count; i++) {
|
||||
struct mlxsw_sp_rif *rif = mlxsw_sp_rif_by_index(mlxsw_sp, i);
|
||||
|
@ -247,6 +247,7 @@ mlxsw_sp_dpipe_table_erif_entries_dump(void *priv, bool counters_enabled,
|
|||
return 0;
|
||||
err_entry_append:
|
||||
err_entry_get:
|
||||
err_ctx_prepare:
|
||||
rtnl_unlock();
|
||||
devlink_dpipe_entry_clear(&entry);
|
||||
return err;
|
||||
|
|
|
@ -4844,6 +4844,23 @@ mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
|
|||
fib_node->fib_entry = NULL;
|
||||
}
|
||||
|
||||
static bool mlxsw_sp_fib4_allow_replace(struct mlxsw_sp_fib4_entry *fib4_entry)
|
||||
{
|
||||
struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
|
||||
struct mlxsw_sp_fib4_entry *fib4_replaced;
|
||||
|
||||
if (!fib_node->fib_entry)
|
||||
return true;
|
||||
|
||||
fib4_replaced = container_of(fib_node->fib_entry,
|
||||
struct mlxsw_sp_fib4_entry, common);
|
||||
if (fib4_entry->tb_id == RT_TABLE_MAIN &&
|
||||
fib4_replaced->tb_id == RT_TABLE_LOCAL)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int
|
||||
mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp,
|
||||
const struct fib_entry_notifier_info *fen_info)
|
||||
|
@ -4872,6 +4889,12 @@ mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp,
|
|||
goto err_fib4_entry_create;
|
||||
}
|
||||
|
||||
if (!mlxsw_sp_fib4_allow_replace(fib4_entry)) {
|
||||
mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
|
||||
mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
|
||||
return 0;
|
||||
}
|
||||
|
||||
replaced = fib_node->fib_entry;
|
||||
err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib4_entry->common);
|
||||
if (err) {
|
||||
|
@ -4908,7 +4931,7 @@ static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
|
|||
return;
|
||||
|
||||
fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
|
||||
if (WARN_ON(!fib4_entry))
|
||||
if (!fib4_entry)
|
||||
return;
|
||||
fib_node = fib4_entry->common.fib_node;
|
||||
|
||||
|
@ -4970,6 +4993,9 @@ static void mlxsw_sp_rt6_release(struct fib6_info *rt)
|
|||
|
||||
static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
|
||||
{
|
||||
struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
|
||||
|
||||
fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
|
||||
mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt);
|
||||
kfree(mlxsw_sp_rt6);
|
||||
}
|
||||
|
@ -5408,6 +5434,27 @@ mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static bool mlxsw_sp_fib6_allow_replace(struct mlxsw_sp_fib6_entry *fib6_entry)
|
||||
{
|
||||
struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
|
||||
struct mlxsw_sp_fib6_entry *fib6_replaced;
|
||||
struct fib6_info *rt, *rt_replaced;
|
||||
|
||||
if (!fib_node->fib_entry)
|
||||
return true;
|
||||
|
||||
fib6_replaced = container_of(fib_node->fib_entry,
|
||||
struct mlxsw_sp_fib6_entry,
|
||||
common);
|
||||
rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
|
||||
rt_replaced = mlxsw_sp_fib6_entry_rt(fib6_replaced);
|
||||
if (rt->fib6_table->tb6_id == RT_TABLE_MAIN &&
|
||||
rt_replaced->fib6_table->tb6_id == RT_TABLE_LOCAL)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp,
|
||||
struct fib6_info **rt_arr,
|
||||
unsigned int nrt6)
|
||||
|
@ -5442,6 +5489,12 @@ static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp,
|
|||
goto err_fib6_entry_create;
|
||||
}
|
||||
|
||||
if (!mlxsw_sp_fib6_allow_replace(fib6_entry)) {
|
||||
mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
|
||||
mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
|
||||
return 0;
|
||||
}
|
||||
|
||||
replaced = fib_node->fib_entry;
|
||||
err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib6_entry->common);
|
||||
if (err)
|
||||
|
|
|
@ -44,8 +44,8 @@
|
|||
/* Add/subtract the Adjustment_Value when making a Drift adjustment */
|
||||
#define QED_DRIFT_CNTR_DIRECTION_SHIFT 31
|
||||
#define QED_TIMESTAMP_MASK BIT(16)
|
||||
/* Param mask for Hardware to detect/timestamp the unicast PTP packets */
|
||||
#define QED_PTP_UCAST_PARAM_MASK 0xF
|
||||
/* Param mask for Hardware to detect/timestamp the L2/L4 unicast PTP packets */
|
||||
#define QED_PTP_UCAST_PARAM_MASK 0x70F
|
||||
|
||||
static enum qed_resc_lock qed_ptcdev_to_resc(struct qed_hwfn *p_hwfn)
|
||||
{
|
||||
|
|
|
@ -2477,15 +2477,18 @@ static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
|
|||
switch (tp->mac_version) {
|
||||
case RTL_GIGA_MAC_VER_12:
|
||||
case RTL_GIGA_MAC_VER_17:
|
||||
pcie_set_readrq(tp->pci_dev, 512);
|
||||
r8168b_1_hw_jumbo_enable(tp);
|
||||
break;
|
||||
case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_26:
|
||||
pcie_set_readrq(tp->pci_dev, 512);
|
||||
r8168c_hw_jumbo_enable(tp);
|
||||
break;
|
||||
case RTL_GIGA_MAC_VER_27 ... RTL_GIGA_MAC_VER_28:
|
||||
r8168dp_hw_jumbo_enable(tp);
|
||||
break;
|
||||
case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_33:
|
||||
pcie_set_readrq(tp->pci_dev, 512);
|
||||
r8168e_hw_jumbo_enable(tp);
|
||||
break;
|
||||
default:
|
||||
|
@ -2515,6 +2518,9 @@ static void rtl_hw_jumbo_disable(struct rtl8169_private *tp)
|
|||
break;
|
||||
}
|
||||
rtl_lock_config_regs(tp);
|
||||
|
||||
if (pci_is_pcie(tp->pci_dev) && tp->supports_gmii)
|
||||
pcie_set_readrq(tp->pci_dev, 4096);
|
||||
}
|
||||
|
||||
static void rtl_jumbo_config(struct rtl8169_private *tp, int mtu)
|
||||
|
|
|
@ -823,7 +823,6 @@ static int ioc3_close(struct net_device *dev)
|
|||
netif_stop_queue(dev);
|
||||
|
||||
ioc3_stop(ip);
|
||||
free_irq(dev->irq, dev);
|
||||
|
||||
ioc3_free_rx_bufs(ip);
|
||||
ioc3_clean_tx_ring(ip);
|
||||
|
|
|
@ -413,6 +413,7 @@ static int ethqos_configure(struct qcom_ethqos *ethqos)
|
|||
dll_lock = rgmii_readl(ethqos, SDC4_STATUS);
|
||||
if (dll_lock & SDC4_STATUS_DLL_LOCK)
|
||||
break;
|
||||
retry--;
|
||||
} while (retry > 0);
|
||||
if (!retry)
|
||||
dev_err(ðqos->pdev->dev,
|
||||
|
|
|
@ -420,7 +420,7 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
|
|||
value |= GMAC_PACKET_FILTER_PM;
|
||||
/* Set all the bits of the HASH tab */
|
||||
memset(mc_filter, 0xff, sizeof(mc_filter));
|
||||
} else if (!netdev_mc_empty(dev)) {
|
||||
} else if (!netdev_mc_empty(dev) && (dev->flags & IFF_MULTICAST)) {
|
||||
struct netdev_hw_addr *ha;
|
||||
|
||||
/* Hash filter for multicast */
|
||||
|
@ -736,11 +736,14 @@ static void dwmac4_update_vlan_hash(struct mac_device_info *hw, u32 hash,
|
|||
__le16 perfect_match, bool is_double)
|
||||
{
|
||||
void __iomem *ioaddr = hw->pcsr;
|
||||
u32 value;
|
||||
|
||||
writel(hash, ioaddr + GMAC_VLAN_HASH_TABLE);
|
||||
|
||||
value = readl(ioaddr + GMAC_VLAN_TAG);
|
||||
|
||||
if (hash) {
|
||||
u32 value = GMAC_VLAN_VTHM | GMAC_VLAN_ETV;
|
||||
value |= GMAC_VLAN_VTHM | GMAC_VLAN_ETV;
|
||||
if (is_double) {
|
||||
value |= GMAC_VLAN_EDVLP;
|
||||
value |= GMAC_VLAN_ESVL;
|
||||
|
@ -759,8 +762,6 @@ static void dwmac4_update_vlan_hash(struct mac_device_info *hw, u32 hash,
|
|||
|
||||
writel(value | perfect_match, ioaddr + GMAC_VLAN_TAG);
|
||||
} else {
|
||||
u32 value = readl(ioaddr + GMAC_VLAN_TAG);
|
||||
|
||||
value &= ~(GMAC_VLAN_VTHM | GMAC_VLAN_ETV);
|
||||
value &= ~(GMAC_VLAN_EDVLP | GMAC_VLAN_ESVL);
|
||||
value &= ~GMAC_VLAN_DOVLTC;
|
||||
|
|
|
@ -458,7 +458,7 @@ static void dwxgmac2_set_filter(struct mac_device_info *hw,
|
|||
|
||||
for (i = 0; i < XGMAC_MAX_HASH_TABLE; i++)
|
||||
writel(~0x0, ioaddr + XGMAC_HASH_TABLE(i));
|
||||
} else if (!netdev_mc_empty(dev)) {
|
||||
} else if (!netdev_mc_empty(dev) && (dev->flags & IFF_MULTICAST)) {
|
||||
struct netdev_hw_addr *ha;
|
||||
|
||||
value |= XGMAC_FILTER_HMC;
|
||||
|
@ -569,7 +569,9 @@ static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash,
|
|||
|
||||
writel(value, ioaddr + XGMAC_PACKET_FILTER);
|
||||
|
||||
value = XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV;
|
||||
value = readl(ioaddr + XGMAC_VLAN_TAG);
|
||||
|
||||
value |= XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV;
|
||||
if (is_double) {
|
||||
value |= XGMAC_VLAN_EDVLP;
|
||||
value |= XGMAC_VLAN_ESVL;
|
||||
|
@ -584,7 +586,9 @@ static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash,
|
|||
|
||||
writel(value, ioaddr + XGMAC_PACKET_FILTER);
|
||||
|
||||
value = XGMAC_VLAN_ETV;
|
||||
value = readl(ioaddr + XGMAC_VLAN_TAG);
|
||||
|
||||
value |= XGMAC_VLAN_ETV;
|
||||
if (is_double) {
|
||||
value |= XGMAC_VLAN_EDVLP;
|
||||
value |= XGMAC_VLAN_ESVL;
|
||||
|
|
|
@ -95,7 +95,7 @@ static int stmmac_default_data(struct pci_dev *pdev,
|
|||
|
||||
plat->bus_id = 1;
|
||||
plat->phy_addr = 0;
|
||||
plat->interface = PHY_INTERFACE_MODE_GMII;
|
||||
plat->phy_interface = PHY_INTERFACE_MODE_GMII;
|
||||
|
||||
plat->dma_cfg->pbl = 32;
|
||||
plat->dma_cfg->pblx8 = true;
|
||||
|
@ -217,7 +217,8 @@ static int ehl_sgmii_data(struct pci_dev *pdev,
|
|||
{
|
||||
plat->bus_id = 1;
|
||||
plat->phy_addr = 0;
|
||||
plat->interface = PHY_INTERFACE_MODE_SGMII;
|
||||
plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
|
||||
|
||||
return ehl_common_data(pdev, plat);
|
||||
}
|
||||
|
||||
|
@ -230,7 +231,8 @@ static int ehl_rgmii_data(struct pci_dev *pdev,
|
|||
{
|
||||
plat->bus_id = 1;
|
||||
plat->phy_addr = 0;
|
||||
plat->interface = PHY_INTERFACE_MODE_RGMII;
|
||||
plat->phy_interface = PHY_INTERFACE_MODE_RGMII;
|
||||
|
||||
return ehl_common_data(pdev, plat);
|
||||
}
|
||||
|
||||
|
@ -258,7 +260,7 @@ static int tgl_sgmii_data(struct pci_dev *pdev,
|
|||
{
|
||||
plat->bus_id = 1;
|
||||
plat->phy_addr = 0;
|
||||
plat->interface = PHY_INTERFACE_MODE_SGMII;
|
||||
plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
|
||||
return tgl_common_data(pdev, plat);
|
||||
}
|
||||
|
||||
|
@ -358,7 +360,7 @@ static int quark_default_data(struct pci_dev *pdev,
|
|||
|
||||
plat->bus_id = pci_dev_id(pdev);
|
||||
plat->phy_addr = ret;
|
||||
plat->interface = PHY_INTERFACE_MODE_RMII;
|
||||
plat->phy_interface = PHY_INTERFACE_MODE_RMII;
|
||||
|
||||
plat->dma_cfg->pbl = 16;
|
||||
plat->dma_cfg->pblx8 = true;
|
||||
|
@ -415,7 +417,7 @@ static int snps_gmac5_default_data(struct pci_dev *pdev,
|
|||
|
||||
plat->bus_id = 1;
|
||||
plat->phy_addr = -1;
|
||||
plat->interface = PHY_INTERFACE_MODE_GMII;
|
||||
plat->phy_interface = PHY_INTERFACE_MODE_GMII;
|
||||
|
||||
plat->dma_cfg->pbl = 32;
|
||||
plat->dma_cfg->pblx8 = true;
|
||||
|
|
|
@ -120,7 +120,7 @@ int netvsc_xdp_set(struct net_device *dev, struct bpf_prog *prog,
|
|||
}
|
||||
|
||||
if (prog)
|
||||
bpf_prog_add(prog, nvdev->num_chn);
|
||||
bpf_prog_add(prog, nvdev->num_chn - 1);
|
||||
|
||||
for (i = 0; i < nvdev->num_chn; i++)
|
||||
rcu_assign_pointer(nvdev->chan_table[i].bpf_prog, prog);
|
||||
|
@ -136,6 +136,7 @@ int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog)
|
|||
{
|
||||
struct netdev_bpf xdp;
|
||||
bpf_op_t ndo_bpf;
|
||||
int ret;
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
|
@ -148,10 +149,18 @@ int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog)
|
|||
|
||||
memset(&xdp, 0, sizeof(xdp));
|
||||
|
||||
if (prog)
|
||||
bpf_prog_inc(prog);
|
||||
|
||||
xdp.command = XDP_SETUP_PROG;
|
||||
xdp.prog = prog;
|
||||
|
||||
return ndo_bpf(vf_netdev, &xdp);
|
||||
ret = ndo_bpf(vf_netdev, &xdp);
|
||||
|
||||
if (ret && prog)
|
||||
bpf_prog_put(prog);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static u32 netvsc_xdp_query(struct netvsc_device *nvdev)
|
||||
|
|
|
@ -1059,9 +1059,12 @@ static int netvsc_attach(struct net_device *ndev,
|
|||
|
||||
prog = dev_info->bprog;
|
||||
if (prog) {
|
||||
bpf_prog_inc(prog);
|
||||
ret = netvsc_xdp_set(ndev, prog, NULL, nvdev);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
bpf_prog_put(prog);
|
||||
goto err1;
|
||||
}
|
||||
}
|
||||
|
||||
/* In any case device is now ready */
|
||||
|
|
|
@ -934,9 +934,7 @@ int nsim_dev_port_del(struct nsim_bus_dev *nsim_bus_dev,
|
|||
int nsim_dev_init(void)
|
||||
{
|
||||
nsim_dev_ddir = debugfs_create_dir(DRV_NAME, NULL);
|
||||
if (IS_ERR(nsim_dev_ddir))
|
||||
return PTR_ERR(nsim_dev_ddir);
|
||||
return 0;
|
||||
return PTR_ERR_OR_ZERO(nsim_dev_ddir);
|
||||
}
|
||||
|
||||
void nsim_dev_exit(void)
|
||||
|
|
|
@ -263,6 +263,7 @@ static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key,
|
|||
} else {
|
||||
node = kzalloc(sizeof(*node), GFP_KERNEL);
|
||||
if (unlikely(!node)) {
|
||||
list_del(&newnode->peer_list);
|
||||
kfree(newnode);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
|
|
@ -569,10 +569,8 @@ static int wg_set_device(struct sk_buff *skb, struct genl_info *info)
|
|||
private_key);
|
||||
list_for_each_entry_safe(peer, temp, &wg->peer_list,
|
||||
peer_list) {
|
||||
if (wg_noise_precompute_static_static(peer))
|
||||
wg_noise_expire_current_peer_keypairs(peer);
|
||||
else
|
||||
wg_peer_remove(peer);
|
||||
BUG_ON(!wg_noise_precompute_static_static(peer));
|
||||
wg_noise_expire_current_peer_keypairs(peer);
|
||||
}
|
||||
wg_cookie_checker_precompute_device_keys(&wg->cookie_checker);
|
||||
up_write(&wg->static_identity.lock);
|
||||
|
|
|
@ -46,17 +46,21 @@ void __init wg_noise_init(void)
|
|||
/* Must hold peer->handshake.static_identity->lock */
|
||||
bool wg_noise_precompute_static_static(struct wg_peer *peer)
|
||||
{
|
||||
bool ret = true;
|
||||
bool ret;
|
||||
|
||||
down_write(&peer->handshake.lock);
|
||||
if (peer->handshake.static_identity->has_identity)
|
||||
if (peer->handshake.static_identity->has_identity) {
|
||||
ret = curve25519(
|
||||
peer->handshake.precomputed_static_static,
|
||||
peer->handshake.static_identity->static_private,
|
||||
peer->handshake.remote_static);
|
||||
else
|
||||
} else {
|
||||
u8 empty[NOISE_PUBLIC_KEY_LEN] = { 0 };
|
||||
|
||||
ret = curve25519(empty, empty, peer->handshake.remote_static);
|
||||
memset(peer->handshake.precomputed_static_static, 0,
|
||||
NOISE_PUBLIC_KEY_LEN);
|
||||
}
|
||||
up_write(&peer->handshake.lock);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -1897,27 +1897,55 @@ static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac,
|
|||
ieee80211_resume_disconnect(vif);
|
||||
}
|
||||
|
||||
static int iwl_mvm_check_rt_status(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif)
|
||||
static bool iwl_mvm_rt_status(struct iwl_trans *trans, u32 base, u32 *err_id)
|
||||
{
|
||||
u32 base = mvm->trans->dbg.lmac_error_event_table[0];
|
||||
struct error_table_start {
|
||||
/* cf. struct iwl_error_event_table */
|
||||
u32 valid;
|
||||
u32 error_id;
|
||||
__le32 err_id;
|
||||
} err_info;
|
||||
|
||||
iwl_trans_read_mem_bytes(mvm->trans, base,
|
||||
&err_info, sizeof(err_info));
|
||||
if (!base)
|
||||
return false;
|
||||
|
||||
if (err_info.valid &&
|
||||
err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
|
||||
struct cfg80211_wowlan_wakeup wakeup = {
|
||||
.rfkill_release = true,
|
||||
};
|
||||
ieee80211_report_wowlan_wakeup(vif, &wakeup, GFP_KERNEL);
|
||||
iwl_trans_read_mem_bytes(trans, base,
|
||||
&err_info, sizeof(err_info));
|
||||
if (err_info.valid && err_id)
|
||||
*err_id = le32_to_cpu(err_info.err_id);
|
||||
|
||||
return !!err_info.valid;
|
||||
}
|
||||
|
||||
static bool iwl_mvm_check_rt_status(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif)
|
||||
{
|
||||
u32 err_id;
|
||||
|
||||
/* check for lmac1 error */
|
||||
if (iwl_mvm_rt_status(mvm->trans,
|
||||
mvm->trans->dbg.lmac_error_event_table[0],
|
||||
&err_id)) {
|
||||
if (err_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
|
||||
struct cfg80211_wowlan_wakeup wakeup = {
|
||||
.rfkill_release = true,
|
||||
};
|
||||
ieee80211_report_wowlan_wakeup(vif, &wakeup,
|
||||
GFP_KERNEL);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
return err_info.valid;
|
||||
|
||||
/* check if we have lmac2 set and check for error */
|
||||
if (iwl_mvm_rt_status(mvm->trans,
|
||||
mvm->trans->dbg.lmac_error_event_table[1], NULL))
|
||||
return true;
|
||||
|
||||
/* check for umac error */
|
||||
if (iwl_mvm_rt_status(mvm->trans,
|
||||
mvm->trans->dbg.umac_error_event_table, NULL))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
|
||||
* Copyright (C) 2018 Intel Corporation
|
||||
* Copyright (C) 2019 Intel Corporation
|
||||
* Copyright (C) 2020 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
|
@ -30,6 +31,7 @@
|
|||
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
|
||||
* Copyright (C) 2018 Intel Corporation
|
||||
* Copyright (C) 2019 Intel Corporation
|
||||
* Copyright (C) 2020 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -528,6 +530,8 @@ void iwl_mvm_ftm_abort(struct iwl_mvm *mvm, struct cfg80211_pmsr_request *req)
|
|||
if (req != mvm->ftm_initiator.req)
|
||||
return;
|
||||
|
||||
iwl_mvm_ftm_reset(mvm);
|
||||
|
||||
if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_RANGE_ABORT_CMD,
|
||||
LOCATION_GROUP, 0),
|
||||
0, sizeof(cmd), &cmd))
|
||||
|
@ -641,7 +645,6 @@ void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
|
|||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
if (!mvm->ftm_initiator.req) {
|
||||
IWL_ERR(mvm, "Got FTM response but have no request?\n");
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -5,10 +5,9 @@
|
|||
*
|
||||
* GPL LICENSE SUMMARY
|
||||
*
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||
* Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
|
@ -28,10 +27,9 @@
|
|||
*
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||
* Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -2037,7 +2035,7 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
|
|||
rcu_read_lock();
|
||||
|
||||
sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_ctxt_cmd.sta_id]);
|
||||
if (IS_ERR(sta)) {
|
||||
if (IS_ERR_OR_NULL(sta)) {
|
||||
rcu_read_unlock();
|
||||
WARN(1, "Can't find STA to configure HE\n");
|
||||
return;
|
||||
|
@ -3293,7 +3291,7 @@ static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw,
|
|||
if (fw_has_capa(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD))
|
||||
iwl_mvm_schedule_session_protection(mvm, vif, 900,
|
||||
min_duration);
|
||||
min_duration, false);
|
||||
else
|
||||
iwl_mvm_protect_session(mvm, vif, duration,
|
||||
min_duration, 500, false);
|
||||
|
|
|
@ -3320,6 +3320,10 @@ static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
|
|||
igtk_cmd.sta_id = cpu_to_le32(sta_id);
|
||||
|
||||
if (remove_key) {
|
||||
/* This is a valid situation for IGTK */
|
||||
if (sta_id == IWL_MVM_INVALID_STA)
|
||||
return 0;
|
||||
|
||||
igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
|
||||
} else {
|
||||
struct ieee80211_key_seq seq;
|
||||
|
@ -3574,9 +3578,9 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
|
|||
IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
|
||||
keyconf->keyidx, sta_id);
|
||||
|
||||
if (mvm_sta && (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
|
||||
keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
|
||||
keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256))
|
||||
if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
|
||||
keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
|
||||
keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
|
||||
return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
|
||||
|
||||
if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
|
||||
|
|
|
@ -205,9 +205,15 @@ void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw,
|
|||
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
|
||||
u32 duration = 2 * vif->bss_conf.dtim_period * vif->bss_conf.beacon_int;
|
||||
|
||||
mutex_lock(&mvm->mutex);
|
||||
/* Protect the session to hear the TDLS setup response on the channel */
|
||||
iwl_mvm_protect_session(mvm, vif, duration, duration, 100, true);
|
||||
mutex_lock(&mvm->mutex);
|
||||
if (fw_has_capa(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD))
|
||||
iwl_mvm_schedule_session_protection(mvm, vif, duration,
|
||||
duration, true);
|
||||
else
|
||||
iwl_mvm_protect_session(mvm, vif, duration,
|
||||
duration, 100, true);
|
||||
mutex_unlock(&mvm->mutex);
|
||||
}
|
||||
|
||||
|
|
|
@ -1056,13 +1056,42 @@ int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm,
|
|||
return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
|
||||
}
|
||||
|
||||
static bool iwl_mvm_session_prot_notif(struct iwl_notif_wait_data *notif_wait,
|
||||
struct iwl_rx_packet *pkt, void *data)
|
||||
{
|
||||
struct iwl_mvm *mvm =
|
||||
container_of(notif_wait, struct iwl_mvm, notif_wait);
|
||||
struct iwl_mvm_session_prot_notif *resp;
|
||||
int resp_len = iwl_rx_packet_payload_len(pkt);
|
||||
|
||||
if (WARN_ON(pkt->hdr.cmd != SESSION_PROTECTION_NOTIF ||
|
||||
pkt->hdr.group_id != MAC_CONF_GROUP))
|
||||
return true;
|
||||
|
||||
if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
|
||||
IWL_ERR(mvm, "Invalid SESSION_PROTECTION_NOTIF response\n");
|
||||
return true;
|
||||
}
|
||||
|
||||
resp = (void *)pkt->data;
|
||||
|
||||
if (!resp->status)
|
||||
IWL_ERR(mvm,
|
||||
"TIME_EVENT_NOTIFICATION received but not executed\n");
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif,
|
||||
u32 duration, u32 min_duration)
|
||||
u32 duration, u32 min_duration,
|
||||
bool wait_for_notif)
|
||||
{
|
||||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
|
||||
|
||||
const u16 notif[] = { iwl_cmd_id(SESSION_PROTECTION_NOTIF,
|
||||
MAC_CONF_GROUP, 0) };
|
||||
struct iwl_notification_wait wait_notif;
|
||||
struct iwl_mvm_session_prot_cmd cmd = {
|
||||
.id_and_color =
|
||||
cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
|
||||
|
@ -1071,7 +1100,6 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
|
|||
.conf_id = cpu_to_le32(SESSION_PROTECT_CONF_ASSOC),
|
||||
.duration_tu = cpu_to_le32(MSEC_TO_TU(duration)),
|
||||
};
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
|
@ -1092,14 +1120,35 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
|
|||
IWL_DEBUG_TE(mvm, "Add new session protection, duration %d TU\n",
|
||||
le32_to_cpu(cmd.duration_tu));
|
||||
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(SESSION_PROTECTION_CMD,
|
||||
MAC_CONF_GROUP, 0),
|
||||
0, sizeof(cmd), &cmd);
|
||||
if (ret) {
|
||||
if (!wait_for_notif) {
|
||||
if (iwl_mvm_send_cmd_pdu(mvm,
|
||||
iwl_cmd_id(SESSION_PROTECTION_CMD,
|
||||
MAC_CONF_GROUP, 0),
|
||||
0, sizeof(cmd), &cmd)) {
|
||||
IWL_ERR(mvm,
|
||||
"Couldn't send the SESSION_PROTECTION_CMD\n");
|
||||
spin_lock_bh(&mvm->time_event_lock);
|
||||
iwl_mvm_te_clear_data(mvm, te_data);
|
||||
spin_unlock_bh(&mvm->time_event_lock);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
iwl_init_notification_wait(&mvm->notif_wait, &wait_notif,
|
||||
notif, ARRAY_SIZE(notif),
|
||||
iwl_mvm_session_prot_notif, NULL);
|
||||
|
||||
if (iwl_mvm_send_cmd_pdu(mvm,
|
||||
iwl_cmd_id(SESSION_PROTECTION_CMD,
|
||||
MAC_CONF_GROUP, 0),
|
||||
0, sizeof(cmd), &cmd)) {
|
||||
IWL_ERR(mvm,
|
||||
"Couldn't send the SESSION_PROTECTION_CMD: %d\n", ret);
|
||||
spin_lock_bh(&mvm->time_event_lock);
|
||||
iwl_mvm_te_clear_data(mvm, te_data);
|
||||
spin_unlock_bh(&mvm->time_event_lock);
|
||||
"Couldn't send the SESSION_PROTECTION_CMD\n");
|
||||
iwl_remove_notification(&mvm->notif_wait, &wait_notif);
|
||||
} else if (iwl_wait_notification(&mvm->notif_wait, &wait_notif,
|
||||
TU_TO_JIFFIES(100))) {
|
||||
IWL_ERR(mvm,
|
||||
"Failed to protect session until session protection\n");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -250,10 +250,12 @@ iwl_mvm_te_scheduled(struct iwl_mvm_time_event_data *te_data)
|
|||
* @mvm: the mvm component
|
||||
* @vif: the virtual interface for which the protection issued
|
||||
* @duration: the duration of the protection
|
||||
* @wait_for_notif: if true, will block until the start of the protection
|
||||
*/
|
||||
void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif,
|
||||
u32 duration, u32 min_duration);
|
||||
u32 duration, u32 min_duration,
|
||||
bool wait_for_notif);
|
||||
|
||||
/**
|
||||
* iwl_mvm_rx_session_protect_notif - handles %SESSION_PROTECTION_NOTIF
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
* Copyright(c) 2013 - 2014, 2019 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
|
||||
* Copyright(c) 2019 Intel Corporation
|
||||
* Copyright(c) 2019 - 2020 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
|
@ -31,7 +31,7 @@
|
|||
* Copyright(c) 2012 - 2014, 2019 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
|
||||
* Copyright(c) 2019 Intel Corporation
|
||||
* Copyright(c) 2019 - 2020 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -234,7 +234,7 @@ static int iwl_mvm_get_temp_cmd(struct iwl_mvm *mvm)
|
|||
.flags = cpu_to_le32(DTS_TRIGGER_CMD_FLAGS_TEMP),
|
||||
};
|
||||
struct iwl_ext_dts_measurement_cmd extcmd = {
|
||||
.control_mode = cpu_to_le32(DTS_AUTOMATIC),
|
||||
.control_mode = cpu_to_le32(DTS_DIRECT_WITHOUT_MEASURE),
|
||||
};
|
||||
u32 cmdid;
|
||||
|
||||
|
@ -734,7 +734,8 @@ static struct thermal_zone_device_ops tzone_ops = {
|
|||
static void iwl_mvm_thermal_zone_register(struct iwl_mvm *mvm)
|
||||
{
|
||||
int i;
|
||||
char name[] = "iwlwifi";
|
||||
char name[16];
|
||||
static atomic_t counter = ATOMIC_INIT(0);
|
||||
|
||||
if (!iwl_mvm_is_tt_in_fw(mvm)) {
|
||||
mvm->tz_device.tzone = NULL;
|
||||
|
@ -744,6 +745,7 @@ static void iwl_mvm_thermal_zone_register(struct iwl_mvm *mvm)
|
|||
|
||||
BUILD_BUG_ON(ARRAY_SIZE(name) >= THERMAL_NAME_LENGTH);
|
||||
|
||||
sprintf(name, "iwlwifi_%u", atomic_inc_return(&counter) & 0xFF);
|
||||
mvm->tz_device.tzone = thermal_zone_device_register(name,
|
||||
IWL_MAX_DTS_TRIPS,
|
||||
IWL_WRITABLE_TRIPS_MSK,
|
||||
|
|
|
@ -1785,6 +1785,8 @@ static int lbs_ibss_join_existing(struct lbs_private *priv,
|
|||
rates_max = rates_eid[1];
|
||||
if (rates_max > MAX_RATES) {
|
||||
lbs_deb_join("invalid rates");
|
||||
rcu_read_unlock();
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
rates = cmd.bss.rates;
|
||||
|
|
|
@ -2884,6 +2884,13 @@ mwifiex_cmd_append_vsie_tlv(struct mwifiex_private *priv,
|
|||
vs_param_set->header.len =
|
||||
cpu_to_le16((((u16) priv->vs_ie[id].ie[1])
|
||||
& 0x00FF) + 2);
|
||||
if (le16_to_cpu(vs_param_set->header.len) >
|
||||
MWIFIEX_MAX_VSIE_LEN) {
|
||||
mwifiex_dbg(priv->adapter, ERROR,
|
||||
"Invalid param length!\n");
|
||||
break;
|
||||
}
|
||||
|
||||
memcpy(vs_param_set->ie, priv->vs_ie[id].ie,
|
||||
le16_to_cpu(vs_param_set->header.len));
|
||||
*buffer += le16_to_cpu(vs_param_set->header.len) +
|
||||
|
|
|
@ -232,6 +232,7 @@ static int mwifiex_process_country_ie(struct mwifiex_private *priv,
|
|||
|
||||
if (country_ie_len >
|
||||
(IEEE80211_COUNTRY_STRING_LEN + MWIFIEX_MAX_TRIPLET_802_11D)) {
|
||||
rcu_read_unlock();
|
||||
mwifiex_dbg(priv->adapter, ERROR,
|
||||
"11D: country_ie_len overflow!, deauth AP\n");
|
||||
return -EINVAL;
|
||||
|
|
|
@ -970,6 +970,10 @@ int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
|
|||
"WMM Parameter Set Count: %d\n",
|
||||
wmm_param_ie->qos_info_bitmap & mask);
|
||||
|
||||
if (wmm_param_ie->vend_hdr.len + 2 >
|
||||
sizeof(struct ieee_types_wmm_parameter))
|
||||
break;
|
||||
|
||||
memcpy((u8 *) &priv->curr_bss_params.bss_descriptor.
|
||||
wmm_ie, wmm_param_ie,
|
||||
wmm_param_ie->vend_hdr.len + 2);
|
||||
|
|
|
@ -92,8 +92,9 @@ static int mt7615_check_eeprom(struct mt76_dev *dev)
|
|||
|
||||
static void mt7615_eeprom_parse_hw_cap(struct mt7615_dev *dev)
|
||||
{
|
||||
u8 val, *eeprom = dev->mt76.eeprom.data;
|
||||
u8 *eeprom = dev->mt76.eeprom.data;
|
||||
u8 tx_mask, rx_mask, max_nss;
|
||||
u32 val;
|
||||
|
||||
val = FIELD_GET(MT_EE_NIC_WIFI_CONF_BAND_SEL,
|
||||
eeprom[MT_EE_WIFI_CONF]);
|
||||
|
|
|
@ -281,27 +281,26 @@ static void rtw_wow_rx_dma_start(struct rtw_dev *rtwdev)
|
|||
rtw_write32_clr(rtwdev, REG_RXPKT_NUM, BIT_RW_RELEASE);
|
||||
}
|
||||
|
||||
static bool rtw_wow_check_fw_status(struct rtw_dev *rtwdev, bool wow_enable)
|
||||
static int rtw_wow_check_fw_status(struct rtw_dev *rtwdev, bool wow_enable)
|
||||
{
|
||||
bool ret;
|
||||
|
||||
/* wait 100ms for wow firmware to finish work */
|
||||
msleep(100);
|
||||
|
||||
if (wow_enable) {
|
||||
if (!rtw_read8(rtwdev, REG_WOWLAN_WAKE_REASON))
|
||||
ret = 0;
|
||||
if (rtw_read8(rtwdev, REG_WOWLAN_WAKE_REASON))
|
||||
goto wow_fail;
|
||||
} else {
|
||||
if (rtw_read32_mask(rtwdev, REG_FE1IMR, BIT_FS_RXDONE) == 0 &&
|
||||
rtw_read32_mask(rtwdev, REG_RXPKT_NUM, BIT_RW_RELEASE) == 0)
|
||||
ret = 0;
|
||||
if (rtw_read32_mask(rtwdev, REG_FE1IMR, BIT_FS_RXDONE) ||
|
||||
rtw_read32_mask(rtwdev, REG_RXPKT_NUM, BIT_RW_RELEASE))
|
||||
goto wow_fail;
|
||||
}
|
||||
|
||||
if (ret)
|
||||
rtw_err(rtwdev, "failed to check wow status %s\n",
|
||||
wow_enable ? "enabled" : "disabled");
|
||||
return 0;
|
||||
|
||||
return ret;
|
||||
wow_fail:
|
||||
rtw_err(rtwdev, "failed to check wow status %s\n",
|
||||
wow_enable ? "enabled" : "disabled");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
static void rtw_wow_fw_security_type_iter(struct ieee80211_hw *hw,
|
||||
|
|
|
@ -7604,7 +7604,6 @@ megasas_resume(struct pci_dev *pdev)
|
|||
int rval;
|
||||
struct Scsi_Host *host;
|
||||
struct megasas_instance *instance;
|
||||
int irq_flags = PCI_IRQ_LEGACY;
|
||||
u32 status_reg;
|
||||
|
||||
instance = pci_get_drvdata(pdev);
|
||||
|
@ -7673,16 +7672,15 @@ megasas_resume(struct pci_dev *pdev)
|
|||
atomic_set(&instance->ldio_outstanding, 0);
|
||||
|
||||
/* Now re-enable MSI-X */
|
||||
if (instance->msix_vectors) {
|
||||
irq_flags = PCI_IRQ_MSIX;
|
||||
if (instance->smp_affinity_enable)
|
||||
irq_flags |= PCI_IRQ_AFFINITY;
|
||||
if (instance->msix_vectors)
|
||||
megasas_alloc_irq_vectors(instance);
|
||||
|
||||
if (!instance->msix_vectors) {
|
||||
rval = pci_alloc_irq_vectors(instance->pdev, 1, 1,
|
||||
PCI_IRQ_LEGACY);
|
||||
if (rval < 0)
|
||||
goto fail_reenable_msix;
|
||||
}
|
||||
rval = pci_alloc_irq_vectors(instance->pdev, 1,
|
||||
instance->msix_vectors ?
|
||||
instance->msix_vectors : 1, irq_flags);
|
||||
if (rval < 0)
|
||||
goto fail_reenable_msix;
|
||||
|
||||
megasas_setup_reply_map(instance);
|
||||
|
||||
|
|
|
@ -2377,7 +2377,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
|
|||
ts->buf_valid_size = sizeof(*resp);
|
||||
} else
|
||||
PM8001_IO_DBG(pm8001_ha,
|
||||
pm8001_printk("response to large\n"));
|
||||
pm8001_printk("response too large\n"));
|
||||
}
|
||||
if (pm8001_dev)
|
||||
pm8001_dev->running_req--;
|
||||
|
|
|
@ -2519,12 +2519,6 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
|
|||
/* Driver Debug Functions. */
|
||||
/****************************************************************************/
|
||||
|
||||
static inline int
|
||||
ql_mask_match(uint level)
|
||||
{
|
||||
return (level & ql2xextended_error_logging) == level;
|
||||
}
|
||||
|
||||
/*
|
||||
* This function is for formatting and logging debug information.
|
||||
* It is to be used when vha is available. It formats the message
|
||||
|
|
|
@ -374,3 +374,9 @@ extern int qla24xx_dump_ram(struct qla_hw_data *, uint32_t, uint32_t *,
|
|||
extern void qla24xx_pause_risc(struct device_reg_24xx __iomem *,
|
||||
struct qla_hw_data *);
|
||||
extern int qla24xx_soft_reset(struct qla_hw_data *);
|
||||
|
||||
static inline int
|
||||
ql_mask_match(uint level)
|
||||
{
|
||||
return (level & ql2xextended_error_logging) == level;
|
||||
}
|
||||
|
|
|
@ -1939,6 +1939,18 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
|
|||
inbuf = (uint32_t *)&sts->nvme_ersp_data;
|
||||
outbuf = (uint32_t *)fd->rspaddr;
|
||||
iocb->u.nvme.rsp_pyld_len = le16_to_cpu(sts->nvme_rsp_pyld_len);
|
||||
if (unlikely(iocb->u.nvme.rsp_pyld_len >
|
||||
sizeof(struct nvme_fc_ersp_iu))) {
|
||||
if (ql_mask_match(ql_dbg_io)) {
|
||||
WARN_ONCE(1, "Unexpected response payload length %u.\n",
|
||||
iocb->u.nvme.rsp_pyld_len);
|
||||
ql_log(ql_log_warn, fcport->vha, 0x5100,
|
||||
"Unexpected response payload length %u.\n",
|
||||
iocb->u.nvme.rsp_pyld_len);
|
||||
}
|
||||
iocb->u.nvme.rsp_pyld_len =
|
||||
sizeof(struct nvme_fc_ersp_iu);
|
||||
}
|
||||
iter = iocb->u.nvme.rsp_pyld_len >> 2;
|
||||
for (; iter; iter--)
|
||||
*outbuf++ = swab32(*inbuf++);
|
||||
|
|
|
@ -546,7 +546,7 @@ static inline bool ufs_is_valid_unit_desc_lun(struct ufs_dev_info *dev_info,
|
|||
u8 lun)
|
||||
{
|
||||
if (!dev_info || !dev_info->max_lu_supported) {
|
||||
pr_err("Max General LU supported by UFS isn't initilized\n");
|
||||
pr_err("Max General LU supported by UFS isn't initialized\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
@ -47,7 +47,7 @@ static int serial8250_ioc3_probe(struct platform_device *pdev)
|
|||
if (!data)
|
||||
return -ENOMEM;
|
||||
|
||||
membase = devm_ioremap_nocache(&pdev->dev, r->start, resource_size(r));
|
||||
membase = devm_ioremap(&pdev->dev, r->start, resource_size(r));
|
||||
if (!membase)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -12,7 +12,7 @@ obj-$(CONFIG_HW_CONSOLE) += vt.o defkeymap.o
|
|||
# Files generated that shall be removed upon make clean
|
||||
clean-files := consolemap_deftbl.c defkeymap.c
|
||||
|
||||
hostprogs-y += conmakehash
|
||||
hostprogs += conmakehash
|
||||
|
||||
quiet_cmd_conmk = CONMK $@
|
||||
cmd_conmk = $(obj)/conmakehash $< > $@
|
||||
|
|
|
@ -18,7 +18,7 @@ obj-$(CONFIG_SPU_BASE) += logo_spe_clut224.o
|
|||
|
||||
# How to generate logo's
|
||||
|
||||
hostprogs-y := pnmtologo
|
||||
hostprogs := pnmtologo
|
||||
|
||||
# Create commands like "pnmtologo -t mono -n logo_mac_mono -o ..."
|
||||
quiet_cmd_logo = LOGO $@
|
||||
|
|
|
@ -7,7 +7,7 @@ obj-$(CONFIG_ZORRO) += zorro.o zorro-driver.o zorro-sysfs.o
|
|||
obj-$(CONFIG_PROC_FS) += proc.o
|
||||
obj-$(CONFIG_ZORRO_NAMES) += names.o
|
||||
|
||||
hostprogs-y := gen-devlist
|
||||
hostprogs := gen-devlist
|
||||
|
||||
# Files generated that shall be removed upon make clean
|
||||
clean-files := devlist.h
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue