Merge branch 'for-davem' into for-next

This commit is contained in:
Al Viro 2015-04-11 22:27:19 -04:00
commit 39c853ebfe
1537 changed files with 47887 additions and 28928 deletions

View File

@ -188,6 +188,14 @@ Description:
Indicates the interface unique physical port identifier within
the NIC, as a string.
What: /sys/class/net/<iface>/phys_port_name
Date: March 2015
KernelVersion: 4.0
Contact: netdev@vger.kernel.org
Description:
Indicates the interface physical port name within the NIC,
as a string.
What: /sys/class/net/<iface>/speed
Date: October 2009
KernelVersion: 2.6.33

View File

@ -24,6 +24,14 @@ Description:
Indicates the number of transmit timeout events seen by this
network interface transmit queue.
What: /sys/class/<iface>/queues/tx-<queue>/tx_maxrate
Date: March 2015
KernelVersion: 4.1
Contact: netdev@vger.kernel.org
Description:
A Mbps max-rate set for the queue, a value of zero means disabled,
default is disabled.
What: /sys/class/<iface>/queues/tx-<queue>/xps_cpus
Date: November 2010
KernelVersion: 2.6.38

View File

@ -14,7 +14,11 @@ Required properties for all the ethernet interfaces:
- "enet_csr": Ethernet control and status register address space
- "ring_csr": Descriptor ring control and status register address space
- "ring_cmd": Descriptor ring command register address space
- interrupts: Ethernet main interrupt
- interrupts: Two interrupt specifiers can be specified.
- First is the Rx interrupt. This irq is mandatory.
- Second is the Tx completion interrupt.
This is supported only on SGMII based 1GbE and 10GbE interfaces.
- port-id: Port number (0 or 1)
- clocks: Reference to the clock entry.
- local-mac-address: MAC address assigned to this device
- phy-connection-type: Interface type between ethernet device and PHY device
@ -49,6 +53,7 @@ Example:
<0x0 0X10000000 0x0 0X200>;
reg-names = "enet_csr", "ring_csr", "ring_cmd";
interrupts = <0x0 0x3c 0x4>;
port-id = <0>;
clocks = <&menetclk 0>;
local-mac-address = [00 01 73 00 00 01];
phy-connection-type = "rgmii";

View File

@ -19,7 +19,9 @@ the parent DSA node. The maximum number of allowed child nodes is 4
(DSA_MAX_SWITCHES).
Each of these switch child nodes should have the following required properties:
- reg : Describes the switch address on the MII bus
- reg : Contains two fields. The first one describes the
address on the MII bus. The second is the switch
number that must be unique in cascaded configurations
- #address-cells : Must be 1
- #size-cells : Must be 0

View File

@ -6,11 +6,14 @@ Required properties:
- spi-max-frequency: maximal bus speed, should be set to 7500000 depends
sync or async operation mode
- reg: the chipselect index
- interrupts: the interrupt generated by the device
- interrupts: the interrupt generated by the device. Non high-level
can occur deadlocks while handling isr.
Optional properties:
- reset-gpio: GPIO spec for the rstn pin
- sleep-gpio: GPIO spec for the slp_tr pin
- xtal-trim: u8 value for fine tuning the internal capacitance
arrays of xtal pins: 0 = +0 pF, 0xf = +4.5 pF
Example:
@ -18,6 +21,7 @@ Example:
compatible = "atmel,at86rf231";
spi-max-frequency = <7500000>;
reg = <0>;
interrupts = <19 1>;
interrupts = <19 4>;
interrupt-parent = <&gpio3>;
xtal-trim = /bits/ 8 <0x06>;
};

View File

@ -13,11 +13,15 @@ Required properties:
- cca-gpio: GPIO spec for the CCA pin
- vreg-gpio: GPIO spec for the VREG pin
- reset-gpio: GPIO spec for the RESET pin
Optional properties:
- amplified: include if the CC2520 is connected to a CC2591 amplifier
Example:
cc2520@0 {
compatible = "ti,cc2520";
reg = <0>;
spi-max-frequency = <4000000>;
amplified;
pinctrl-names = "default";
pinctrl-0 = <&cc2520_cape_pins>;
fifo-gpio = <&gpio1 18 0>;

View File

@ -49,6 +49,7 @@ Required properties:
- compatible: Should be "ti,netcp-1.0"
- clocks: phandle to the reference clocks for the subsystem.
- dma-id: Navigator packet dma instance id.
- ranges: address range of NetCP (includes, Ethernet SS, PA and SA)
Optional properties:
- reg: register location and the size for the following register
@ -64,10 +65,30 @@ NetCP device properties: Device specification for NetCP sub-modules.
1Gb/10Gb (gbe/xgbe) ethernet switch sub-module specifications.
Required properties:
- label: Must be "netcp-gbe" for 1Gb & "netcp-xgbe" for 10Gb.
- compatible: Must be one of below:-
"ti,netcp-gbe" for 1GbE on NetCP 1.4
"ti,netcp-gbe-5" for 1GbE N NetCP 1.5 (N=5)
"ti,netcp-gbe-9" for 1GbE N NetCP 1.5 (N=9)
"ti,netcp-gbe-2" for 1GbE N NetCP 1.5 (N=2)
"ti,netcp-xgbe" for 10 GbE
- reg: register location and the size for the following register
regions in the specified order.
- subsystem registers
- serdes registers
- switch subsystem registers
- sgmii port3/4 module registers (only for NetCP 1.4)
- switch module registers
- serdes registers (only for 10G)
NetCP 1.4 ethss, here is the order
index #0 - switch subsystem registers
index #1 - sgmii port3/4 module registers
index #2 - switch module registers
NetCP 1.5 ethss 9 port, 5 port and 2 port
index #0 - switch subsystem registers
index #1 - switch module registers
index #2 - serdes registers
- tx-channel: the navigator packet dma channel name for tx.
- tx-queue: the navigator queue number associated with the tx dma channel.
- interfaces: specification for each of the switch port to be registered as a
@ -120,14 +141,13 @@ Optional properties:
Example binding:
netcp: netcp@2090000 {
netcp: netcp@2000000 {
reg = <0x2620110 0x8>;
reg-names = "efuse";
compatible = "ti,netcp-1.0";
#address-cells = <1>;
#size-cells = <1>;
ranges;
ranges = <0 0x2000000 0xfffff>;
clocks = <&papllclk>, <&clkcpgmac>, <&chipclk12>;
dma-coherent;
/* big-endian; */
@ -137,9 +157,9 @@ netcp: netcp@2090000 {
#address-cells = <1>;
#size-cells = <1>;
ranges;
gbe@0x2090000 {
gbe@90000 {
label = "netcp-gbe";
reg = <0x2090000 0xf00>;
reg = <0x90000 0x300>, <0x90400 0x400>, <0x90800 0x700>;
/* enable-ale; */
tx-queue = <648>;
tx-channel = <8>;

View File

@ -2,10 +2,13 @@
Required properties:
- compatible: Should be "cdns,[<chip>-]{macb|gem}"
Use "cdns,at91sam9260-macb" Atmel at91sam9260 and at91sam9263 SoCs.
Use "cdns,at91sam9260-macb" for Atmel at91sam9 SoCs or the 10/100Mbit IP
available on sama5d3 SoCs.
Use "cdns,at32ap7000-macb" for other 10/100 usage or use the generic form: "cdns,macb".
Use "cdns,pc302-gem" for Picochip picoXcell pc302 and later devices based on
the Cadence GEM, or the generic form: "cdns,gem".
Use "cdns,sama5d3-gem" for the Gigabit IP available on Atmel sama5d3 SoCs.
Use "cdns,sama5d4-gem" for the Gigabit IP available on Atmel sama5d4 SoCs.
- reg: Address and length of the register set for the device
- interrupts: Should contain macb interrupt
- phy-mode: See ethernet.txt file in the same directory.

View File

@ -114,6 +114,9 @@ ALPS Absolute Mode - Protocol Version 2
byte 4: 0 y6 y5 y4 y3 y2 y1 y0
byte 5: 0 z6 z5 z4 z3 z2 z1 z0
Protocol Version 2 DualPoint devices send standard PS/2 mouse packets for
the DualPoint Stick.
Dualpoint device -- interleaved packet format
---------------------------------------------
@ -127,6 +130,11 @@ Dualpoint device -- interleaved packet format
byte 7: 0 y6 y5 y4 y3 y2 y1 y0
byte 8: 0 z6 z5 z4 z3 z2 z1 z0
Devices which use the interleaving format normally send standard PS/2 mouse
packets for the DualPoint Stick + ALPS Absolute Mode packets for the
touchpad, switching to the interleaved packet format when both the stick and
the touchpad are used at the same time.
ALPS Absolute Mode - Protocol Version 3
---------------------------------------

View File

@ -294,6 +294,12 @@ accordingly. This property does not affect kernel behavior.
The kernel does not provide button emulation for such devices but treats
them as any other INPUT_PROP_BUTTONPAD device.
INPUT_PROP_ACCELEROMETER
-------------------------
Directional axes on this device (absolute and/or relative x, y, z) represent
accelerometer data. All other axes retain their meaning. A device must not mix
regular directional axes and accelerometer axes on the same event node.
Guidelines:
==========
The guidelines below ensure proper single-touch and multi-finger functionality.

View File

@ -312,9 +312,12 @@ ABS_MT_TOOL_TYPE
The type of approaching tool. A lot of kernel drivers cannot distinguish
between different tool types, such as a finger or a pen. In such cases, the
event should be omitted. The protocol currently supports MT_TOOL_FINGER and
MT_TOOL_PEN [2]. For type B devices, this event is handled by input core;
drivers should instead use input_mt_report_slot_state().
event should be omitted. The protocol currently supports MT_TOOL_FINGER,
MT_TOOL_PEN, and MT_TOOL_PALM [2]. For type B devices, this event is handled
by input core; drivers should instead use input_mt_report_slot_state().
A contact's ABS_MT_TOOL_TYPE may change over time while still touching the
device, because the firmware may not be able to determine which tool is being
used when it first appears.
ABS_MT_BLOB_ID

View File

@ -22,7 +22,8 @@ This file contains
4.1.3 RAW socket option CAN_RAW_LOOPBACK
4.1.4 RAW socket option CAN_RAW_RECV_OWN_MSGS
4.1.5 RAW socket option CAN_RAW_FD_FRAMES
4.1.6 RAW socket returned message flags
4.1.6 RAW socket option CAN_RAW_JOIN_FILTERS
4.1.7 RAW socket returned message flags
4.2 Broadcast Manager protocol sockets (SOCK_DGRAM)
4.2.1 Broadcast Manager operations
4.2.2 Broadcast Manager message flags
@ -601,7 +602,22 @@ solution for a couple of reasons:
CAN FD frames by checking if the device maximum transfer unit is CANFD_MTU.
The CAN device MTU can be retrieved e.g. with a SIOCGIFMTU ioctl() syscall.
4.1.6 RAW socket returned message flags
4.1.6 RAW socket option CAN_RAW_JOIN_FILTERS
The CAN_RAW socket can set multiple CAN identifier specific filters that
lead to multiple filters in the af_can.c filter processing. These filters
are indenpendent from each other which leads to logical OR'ed filters when
applied (see 4.1.1).
This socket option joines the given CAN filters in the way that only CAN
frames are passed to user space that matched *all* given CAN filters. The
semantic for the applied filters is therefore changed to a logical AND.
This is useful especially when the filterset is a combination of filters
where the CAN_INV_FILTER flag is set in order to notch single CAN IDs or
CAN ID ranges from the incoming traffic.
4.1.7 RAW socket returned message flags
When using recvmsg() call, the msg->msg_flags may contain following flags:

View File

@ -280,7 +280,8 @@ Possible BPF extensions are shown in the following table:
rxhash skb->hash
cpu raw_smp_processor_id()
vlan_tci skb_vlan_tag_get(skb)
vlan_pr skb_vlan_tag_present(skb)
vlan_avail skb_vlan_tag_present(skb)
vlan_tpid skb->vlan_proto
rand prandom_u32()
These extensions can also be prefixed with '#'.

View File

@ -388,6 +388,16 @@ tcp_mtu_probing - INTEGER
1 - Disabled by default, enabled when an ICMP black hole detected
2 - Always enabled, use initial MSS of tcp_base_mss.
tcp_probe_interval - INTEGER
Controls how often to start TCP Packetization-Layer Path MTU
Discovery reprobe. The default is reprobing every 10 minutes as
per RFC4821.
tcp_probe_threshold - INTEGER
Controls when TCP Packetization-Layer Path MTU Discovery probing
will stop in respect to the width of search range in bytes. Default
is 8 bytes.
tcp_no_metrics_save - BOOLEAN
By default, TCP saves various connection metrics in the route cache
when the connection closes, so that connections established in the
@ -1116,11 +1126,23 @@ arp_accept - BOOLEAN
gratuitous arp frame, the arp table will be updated regardless
if this setting is on or off.
mcast_solicit - INTEGER
The maximum number of multicast probes in INCOMPLETE state,
when the associated hardware address is unknown. Defaults
to 3.
ucast_solicit - INTEGER
The maximum number of unicast probes in PROBE state, when
the hardware address is being reconfirmed. Defaults to 3.
app_solicit - INTEGER
The maximum number of probes to send to the user space ARP daemon
via netlink before dropping back to multicast probes (see
mcast_solicit). Defaults to 0.
mcast_resolicit). Defaults to 0.
mcast_resolicit - INTEGER
The maximum number of multicast probes after unicast and
app probes in PROBE state. Defaults to 0.
disable_policy - BOOLEAN
Disable IPSEC policy (SPD) for this interface
@ -1198,6 +1220,17 @@ anycast_src_echo_reply - BOOLEAN
FALSE: disabled
Default: FALSE
idgen_delay - INTEGER
Controls the delay in seconds after which time to retry
privacy stable address generation if a DAD conflict is
detected.
Default: 1 (as specified in RFC7217)
idgen_retries - INTEGER
Controls the number of retries to generate a stable privacy
address if a DAD conflict is detected.
Default: 3 (as specified in RFC7217)
mld_qrv - INTEGER
Controls the MLD query robustness variable (see RFC3810 9.1).
Default: 2 (as specified by RFC3810 9.1)
@ -1518,6 +1551,20 @@ use_optimistic - BOOLEAN
0: disabled (default)
1: enabled
stable_secret - IPv6 address
This IPv6 address will be used as a secret to generate IPv6
addresses for link-local addresses and autoconfigured
ones. All addresses generated after setting this secret will
be stable privacy ones by default. This can be changed via the
addrgenmode ip-link. conf/default/stable_secret is used as the
secret for the namespace, the interface specific ones can
overwrite that. Writes to conf/all/stable_secret are refused.
It is recommended to generate this secret during installation
of a system and keep it stable after that.
By default the stable secret is unset.
icmp/*:
ratelimit - INTEGER
Limit the maximal rates for sending ICMPv6 packets.

View File

@ -22,6 +22,27 @@ backup_only - BOOLEAN
If set, disable the director function while the server is
in backup mode to avoid packet loops for DR/TUN methods.
conn_reuse_mode - INTEGER
1 - default
Controls how ipvs will deal with connections that are detected
port reuse. It is a bitmap, with the values being:
0: disable any special handling on port reuse. The new
connection will be delivered to the same real server that was
servicing the previous connection. This will effectively
disable expire_nodest_conn.
bit 1: enable rescheduling of new connections when it is safe.
That is, whenever expire_nodest_conn and for TCP sockets, when
the connection is in TIME_WAIT state (which is only possible if
you use NAT mode).
bit 2: it is bit 1 plus, for TCP connections, when connections
are in FIN_WAIT state, as this is the last state seen by load
balancer in Direct Routing mode. This bit helps on adding new
real servers to a very busy cluster.
conntrack - BOOLEAN
0 - disabled (default)
not 0 - enabled

View File

@ -0,0 +1,20 @@
/proc/sys/net/mpls/* Variables:
platform_labels - INTEGER
Number of entries in the platform label table. It is not
possible to configure forwarding for label values equal to or
greater than the number of platform labels.
A dense utliziation of the entries in the platform label table
is possible and expected aas the platform labels are locally
allocated.
If the number of platform label table entries is set to 0 no
label will be recognized by the kernel and mpls forwarding
will be disabled.
Reducing this value will remove all label routing entries that
no longer fit in the table.
Possible values: 0 - 1048575
Default: 0

View File

@ -440,9 +440,10 @@ and the following flags apply:
+++ Capture process:
from include/linux/if_packet.h
#define TP_STATUS_COPY 2
#define TP_STATUS_LOSING 4
#define TP_STATUS_CSUMNOTREADY 8
#define TP_STATUS_COPY (1 << 1)
#define TP_STATUS_LOSING (1 << 2)
#define TP_STATUS_CSUMNOTREADY (1 << 3)
#define TP_STATUS_CSUM_VALID (1 << 7)
TP_STATUS_COPY : This flag indicates that the frame (and associated
meta information) has been truncated because it's
@ -466,6 +467,12 @@ TP_STATUS_CSUMNOTREADY: currently it's used for outgoing IP packets which
reading the packet we should not try to check the
checksum.
TP_STATUS_CSUM_VALID : This flag indicates that at least the transport
header checksum of the packet has been already
validated on the kernel side. If the flag is not set
then we are free to check the checksum by ourselves
provided that TP_STATUS_CSUMNOTREADY is also not set.
for convenience there are also the following defines:
#define TP_STATUS_KERNEL 0

View File

@ -3,13 +3,11 @@
HOWTO for the linux packet generator
------------------------------------
Date: 041221
Enable CONFIG_NET_PKTGEN to compile and build pktgen.o either in kernel
or as module. Module is preferred. insmod pktgen if needed. Once running
pktgen creates a thread on each CPU where each thread has affinity to its CPU.
Monitoring and controlling is done via /proc. Easiest to select a suitable
a sample script and configure.
Enable CONFIG_NET_PKTGEN to compile and build pktgen either in-kernel
or as a module. A module is preferred; modprobe pktgen if needed. Once
running, pktgen creates a thread for each CPU with affinity to that CPU.
Monitoring and controlling is done via /proc. It is easiest to select a
suitable sample script and configure that.
On a dual CPU:
@ -27,7 +25,7 @@ For monitoring and control pktgen creates:
Tuning NIC for max performance
==============================
The default NIC setting are (likely) not tuned for pktgen's artificial
The default NIC settings are (likely) not tuned for pktgen's artificial
overload type of benchmarking, as this could hurt the normal use-case.
Specifically increasing the TX ring buffer in the NIC:
@ -35,20 +33,20 @@ Specifically increasing the TX ring buffer in the NIC:
A larger TX ring can improve pktgen's performance, while it can hurt
in the general case, 1) because the TX ring buffer might get larger
than the CPUs L1/L2 cache, 2) because it allow more queueing in the
than the CPU's L1/L2 cache, 2) because it allows more queueing in the
NIC HW layer (which is bad for bufferbloat).
One should be careful to conclude, that packets/descriptors in the HW
One should hesitate to conclude that packets/descriptors in the HW
TX ring cause delay. Drivers usually delay cleaning up the
ring-buffers (for various performance reasons), thus packets stalling
the TX ring, might just be waiting for cleanup.
ring-buffers for various performance reasons, and packets stalling
the TX ring might just be waiting for cleanup.
This cleanup issues is specifically the case, for the driver ixgbe
(Intel 82599 chip). This driver (ixgbe) combine TX+RX ring cleanups,
This cleanup issue is specifically the case for the driver ixgbe
(Intel 82599 chip). This driver (ixgbe) combines TX+RX ring cleanups,
and the cleanup interval is affected by the ethtool --coalesce setting
of parameter "rx-usecs".
For ixgbe use e.g "30" resulting in approx 33K interrupts/sec (1/30*10^6):
For ixgbe use e.g. "30" resulting in approx 33K interrupts/sec (1/30*10^6):
# ethtool -C ethX rx-usecs 30
@ -60,15 +58,16 @@ Running:
Stopped: eth1
Result: OK: max_before_softirq=10000
Most important the devices assigned to thread. Note! A device can only belong
to one thread.
Most important are the devices assigned to the thread. Note that a
device can only belong to one thread.
Viewing devices
===============
Parm section holds configured info. Current hold running stats.
Result is printed after run or after interruption. Example:
The Params section holds configured information. The Current section
holds running statistics. The Result is printed after a run or after
interruption. Example:
/proc/net/pktgen/eth1
@ -93,7 +92,8 @@ Result: OK: 13101142(c12220741+d880401) usec, 10000000 (60byte,0frags)
Configuring threads and devices
================================
This is done via the /proc interface easiest done via pgset in the scripts
This is done via the /proc interface, and most easily done via pgset
as defined in the sample scripts.
Examples:
@ -192,10 +192,11 @@ Examples:
pgset "rate 300M" set rate to 300 Mb/s
pgset "ratep 1000000" set rate to 1Mpps
Example scripts
===============
Sample scripts
==============
A collection of small tutorial scripts for pktgen is in examples dir.
A collection of small tutorial scripts for pktgen is in the
samples/pktgen directory:
pktgen.conf-1-1 # 1 CPU 1 dev
pktgen.conf-1-2 # 1 CPU 2 dev
@ -206,25 +207,26 @@ pktgen.conf-1-1-ip6 # 1 CPU 1 dev ipv6
pktgen.conf-1-1-ip6-rdos # 1 CPU 1 dev ipv6 w. route DoS
pktgen.conf-1-1-flows # 1 CPU 1 dev multiple flows.
Run in shell: ./pktgen.conf-X-Y It does all the setup including sending.
Run in shell: ./pktgen.conf-X-Y
This does all the setup including sending.
Interrupt affinity
===================
Note when adding devices to a specific CPU there good idea to also assign
/proc/irq/XX/smp_affinity so the TX-interrupts gets bound to the same CPU.
as this reduces cache bouncing when freeing skb's.
Note that when adding devices to a specific CPU it is a good idea to
also assign /proc/irq/XX/smp_affinity so that the TX interrupts are bound
to the same CPU. This reduces cache bouncing when freeing skbs.
Enable IPsec
============
Default IPsec transformation with ESP encapsulation plus Transport mode
could be enabled by simply setting:
Default IPsec transformation with ESP encapsulation plus transport mode
can be enabled by simply setting:
pgset "flag IPSEC"
pgset "flows 1"
To avoid breaking existing testbed scripts for using AH type and tunnel mode,
user could use "pgset spi SPI_VALUE" to specify which formal of transformation
you can use "pgset spi SPI_VALUE" to specify which transformation mode
to employ.

View File

@ -38,7 +38,7 @@ The corresponding adapter's LED will blink multiple times.
3. Features supported:
a. Jumbo frames. Xframe I/II supports MTU up to 9600 bytes,
modifiable using ifconfig command.
modifiable using ip command.
b. Offloads. Supports checksum offload(TCP/UDP/IP) on transmit
and receive, TSO.

View File

@ -421,6 +421,15 @@ best CPUs to share a given queue are probably those that share the cache
with the CPU that processes transmit completions for that queue
(transmit interrupts).
Per TX Queue rate limitation:
=============================
These are rate-limitation mechanisms implemented by HW, where currently
a max-rate attribute is supported, by setting a Mbps value to
/sys/class/net/<dev>/queues/tx-<n>/tx_maxrate
A value of zero means disabled, and this is the default.
Further Information
===================

View File

@ -39,7 +39,7 @@ iii) PCI-SIG's I/O Virtualization
iv) Jumbo frames
X3100 Series supports MTU up to 9600 bytes, modifiable using
ifconfig command.
ip command.
v) Offloads supported: (Enabled by default)
Checksum offload (TCP/UDP/IP) on transmit and receive paths

View File

@ -637,8 +637,7 @@ F: drivers/gpu/drm/radeon/radeon_kfd.h
F: include/uapi/linux/kfd_ioctl.h
AMD MICROCODE UPDATE SUPPORT
M: Andreas Herrmann <herrmann.der.user@googlemail.com>
L: amd64-microcode@amd64.org
M: Borislav Petkov <bp@alien8.de>
S: Maintained
F: arch/x86/kernel/cpu/microcode/amd*
@ -1186,7 +1185,7 @@ M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/mach-mvebu/
F: drivers/rtc/armada38x-rtc
F: drivers/rtc/rtc-armada38x.c
ARM/Marvell Berlin SoC support
M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
@ -1362,6 +1361,7 @@ F: drivers/i2c/busses/i2c-rk3x.c
F: drivers/*/*rockchip*
F: drivers/*/*/*rockchip*
F: sound/soc/rockchip/
N: rockchip
ARM/SAMSUNG EXYNOS ARM ARCHITECTURES
M: Kukjin Kim <kgene@kernel.org>
@ -1675,8 +1675,8 @@ F: drivers/misc/eeprom/at24.c
F: include/linux/platform_data/at24.h
ATA OVER ETHERNET (AOE) DRIVER
M: "Ed L. Cashin" <ecashin@coraid.com>
W: http://support.coraid.com/support/linux
M: "Ed L. Cashin" <ed.cashin@acm.org>
W: http://www.openaoe.org/
S: Supported
F: Documentation/aoe/
F: drivers/block/aoe/
@ -3252,6 +3252,13 @@ S: Maintained
F: Documentation/hwmon/dme1737
F: drivers/hwmon/dme1737.c
DMI/SMBIOS SUPPORT
M: Jean Delvare <jdelvare@suse.de>
S: Maintained
F: drivers/firmware/dmi-id.c
F: drivers/firmware/dmi_scan.c
F: include/linux/dmi.h
DOCKING STATION DRIVER
M: Shaohua Li <shaohua.li@intel.com>
L: linux-acpi@vger.kernel.org
@ -5087,7 +5094,7 @@ S: Supported
F: drivers/platform/x86/intel_menlow.c
INTEL IA32 MICROCODE UPDATE SUPPORT
M: Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
M: Borislav Petkov <bp@alien8.de>
S: Maintained
F: arch/x86/kernel/cpu/microcode/core*
F: arch/x86/kernel/cpu/microcode/intel*
@ -5128,22 +5135,21 @@ M: Deepak Saxena <dsaxena@plexity.net>
S: Maintained
F: drivers/char/hw_random/ixp4xx-rng.c
INTEL ETHERNET DRIVERS (e100/e1000/e1000e/fm10k/igb/igbvf/ixgb/ixgbe/ixgbevf/i40e/i40evf)
INTEL ETHERNET DRIVERS
M: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
M: Jesse Brandeburg <jesse.brandeburg@intel.com>
M: Bruce Allan <bruce.w.allan@intel.com>
M: Carolyn Wyborny <carolyn.wyborny@intel.com>
M: Don Skidmore <donald.c.skidmore@intel.com>
M: Greg Rose <gregory.v.rose@intel.com>
M: Matthew Vick <matthew.vick@intel.com>
M: John Ronciak <john.ronciak@intel.com>
M: Mitch Williams <mitch.a.williams@intel.com>
M: Linux NICS <linux.nics@intel.com>
L: e1000-devel@lists.sourceforge.net
R: Jesse Brandeburg <jesse.brandeburg@intel.com>
R: Shannon Nelson <shannon.nelson@intel.com>
R: Carolyn Wyborny <carolyn.wyborny@intel.com>
R: Don Skidmore <donald.c.skidmore@intel.com>
R: Matthew Vick <matthew.vick@intel.com>
R: John Ronciak <john.ronciak@intel.com>
R: Mitch Williams <mitch.a.williams@intel.com>
L: intel-wired-lan@lists.osuosl.org
W: http://www.intel.com/support/feedback.htm
W: http://e1000.sourceforge.net/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next.git
Q: http://patchwork.ozlabs.org/project/intel-wired-lan/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-queue.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue.git
S: Supported
F: Documentation/networking/e100.txt
F: Documentation/networking/e1000.txt
@ -6316,6 +6322,7 @@ F: drivers/scsi/megaraid/
MELLANOX ETHERNET DRIVER (mlx4_en)
M: Amir Vadai <amirv@mellanox.com>
M: Ido Shamay <idos@mellanox.com>
L: netdev@vger.kernel.org
S: Supported
W: http://www.mellanox.com
@ -8345,7 +8352,6 @@ F: block/partitions/ibm.c
S390 NETWORK DRIVERS
M: Ursula Braun <ursula.braun@de.ibm.com>
M: Frank Blaschka <blaschka@linux.vnet.ibm.com>
M: linux390@de.ibm.com
L: linux-s390@vger.kernel.org
W: http://www.ibm.com/developerworks/linux/linux390/
@ -9820,7 +9826,7 @@ F: include/linux/wl12xx.h
TIPC NETWORK LAYER
M: Jon Maloy <jon.maloy@ericsson.com>
M: Allan Stephens <allan.stephens@windriver.com>
M: Ying Xue <ying.xue@windriver.com>
L: netdev@vger.kernel.org (core kernel code)
L: tipc-discussion@lists.sourceforge.net (user apps, general discussion)
W: http://tipc.sourceforge.net/

View File

@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 0
SUBLEVEL = 0
EXTRAVERSION = -rc5
EXTRAVERSION = -rc7
NAME = Hurr durr I'ma sheep
# *DOCUMENTATION*

View File

@ -67,7 +67,7 @@ stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
sigset_t *set)
{
int err;
err = __copy_to_user(&(sf->uc.uc_mcontext.regs), regs,
err = __copy_to_user(&(sf->uc.uc_mcontext.regs.scratch), regs,
sizeof(sf->uc.uc_mcontext.regs.scratch));
err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t));
@ -83,7 +83,7 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
if (!err)
set_current_blocked(&set);
err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs),
err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs.scratch),
sizeof(sf->uc.uc_mcontext.regs.scratch));
return err;
@ -131,6 +131,15 @@ SYSCALL_DEFINE0(rt_sigreturn)
/* Don't restart from sigreturn */
syscall_wont_restart(regs);
/*
* Ensure that sigreturn always returns to user mode (in case the
* regs saved on user stack got fudged between save and sigreturn)
* Otherwise it is easy to panic the kernel with a custom
* signal handler and/or restorer which clobberes the status32/ret
* to return to a bogus location in kernel mode.
*/
regs->status32 |= STATUS_U_MASK;
return regs->r0;
badframe:
@ -229,8 +238,11 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
/*
* handler returns using sigreturn stub provided already by userpsace
* If not, nuke the process right away
*/
BUG_ON(!(ksig->ka.sa.sa_flags & SA_RESTORER));
if(!(ksig->ka.sa.sa_flags & SA_RESTORER))
return 1;
regs->blink = (unsigned long)ksig->ka.sa.sa_restorer;
/* User Stack for signal handler will be above the frame just carved */
@ -296,12 +308,12 @@ static void
handle_signal(struct ksignal *ksig, struct pt_regs *regs)
{
sigset_t *oldset = sigmask_to_save();
int ret;
int failed;
/* Set up the stack frame */
ret = setup_rt_frame(ksig, oldset, regs);
failed = setup_rt_frame(ksig, oldset, regs);
signal_setup_done(ret, ksig, 0);
signal_setup_done(failed, ksig, 0);
}
void do_signal(struct pt_regs *regs)

View File

@ -619,6 +619,7 @@ config ARCH_PXA
select GENERIC_CLOCKEVENTS
select GPIO_PXA
select HAVE_IDE
select IRQ_DOMAIN
select MULTI_IRQ_HANDLER
select PLAT_PXA
select SPARSE_IRQ

View File

@ -842,7 +842,7 @@ uart1: serial@fffd8000 {
};
macb0: ethernet@fffc4000 {
compatible = "cdns,at32ap7000-macb", "cdns,macb";
compatible = "cdns,at91sam9260-macb", "cdns,macb";
reg = <0xfffc4000 0x100>;
interrupts = <21 IRQ_TYPE_LEVEL_HIGH 3>;
pinctrl-names = "default";

View File

@ -845,7 +845,7 @@ ac97: sound@fffa0000 {
};
macb0: ethernet@fffbc000 {
compatible = "cdns,at32ap7000-macb", "cdns,macb";
compatible = "cdns,at91sam9260-macb", "cdns,macb";
reg = <0xfffbc000 0x100>;
interrupts = <21 IRQ_TYPE_LEVEL_HIGH 3>;
pinctrl-names = "default";

View File

@ -956,7 +956,7 @@ usart3: serial@fff98000 {
};
macb0: ethernet@fffbc000 {
compatible = "cdns,at32ap7000-macb", "cdns,macb";
compatible = "cdns,at91sam9260-macb", "cdns,macb";
reg = <0xfffbc000 0x100>;
interrupts = <25 IRQ_TYPE_LEVEL_HIGH 3>;
pinctrl-names = "default";

View File

@ -53,7 +53,7 @@ macb0_clk: macb0_clk {
};
macb0: ethernet@f802c000 {
compatible = "cdns,at32ap7000-macb", "cdns,macb";
compatible = "cdns,at91sam9260-macb", "cdns,macb";
reg = <0xf802c000 0x100>;
interrupts = <24 IRQ_TYPE_LEVEL_HIGH 3>;
pinctrl-names = "default";

View File

@ -41,7 +41,7 @@ macb1_clk: macb1_clk {
};
macb1: ethernet@f8030000 {
compatible = "cdns,at32ap7000-macb", "cdns,macb";
compatible = "cdns,at91sam9260-macb", "cdns,macb";
reg = <0xf8030000 0x100>;
interrupts = <27 IRQ_TYPE_LEVEL_HIGH 3>;
pinctrl-names = "default";

View File

@ -36,6 +36,20 @@ DM816X_IOPAD(0x0aac, PIN_INPUT | MUX_MODE0) /* SPI_D1 */
>;
};
mmc_pins: pinmux_mmc_pins {
pinctrl-single,pins = <
DM816X_IOPAD(0x0a70, MUX_MODE0) /* SD_POW */
DM816X_IOPAD(0x0a74, MUX_MODE0) /* SD_CLK */
DM816X_IOPAD(0x0a78, MUX_MODE0) /* SD_CMD */
DM816X_IOPAD(0x0a7C, MUX_MODE0) /* SD_DAT0 */
DM816X_IOPAD(0x0a80, MUX_MODE0) /* SD_DAT1 */
DM816X_IOPAD(0x0a84, MUX_MODE0) /* SD_DAT2 */
DM816X_IOPAD(0x0a88, MUX_MODE0) /* SD_DAT2 */
DM816X_IOPAD(0x0a8c, MUX_MODE2) /* GP1[7] */
DM816X_IOPAD(0x0a90, MUX_MODE2) /* GP1[8] */
>;
};
usb0_pins: pinmux_usb0_pins {
pinctrl-single,pins = <
DM816X_IOPAD(0x0d00, MUX_MODE0) /* USB0_DRVVBUS */
@ -137,7 +151,12 @@ m25p80@0 {
};
&mmc1 {
pinctrl-names = "default";
pinctrl-0 = <&mmc_pins>;
vmmc-supply = <&vmmcsd_fixed>;
bus-width = <4>;
cd-gpios = <&gpio2 7 GPIO_ACTIVE_LOW>;
wp-gpios = <&gpio2 8 GPIO_ACTIVE_LOW>;
};
/* At least dm8168-evm rev c won't support multipoint, later may */

View File

@ -150,17 +150,27 @@ elm: elm@48080000 {
};
gpio1: gpio@48032000 {
compatible = "ti,omap3-gpio";
compatible = "ti,omap4-gpio";
ti,hwmods = "gpio1";
ti,gpio-always-on;
reg = <0x48032000 0x1000>;
interrupts = <97>;
interrupts = <96>;
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
};
gpio2: gpio@4804c000 {
compatible = "ti,omap3-gpio";
compatible = "ti,omap4-gpio";
ti,hwmods = "gpio2";
ti,gpio-always-on;
reg = <0x4804c000 0x1000>;
interrupts = <99>;
interrupts = <98>;
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
};
gpmc: gpmc@50000000 {

View File

@ -1111,7 +1111,6 @@ pcie1_phy: pciephy@4a094000 {
"wkupclk", "refclk",
"div-clk", "phy-div";
#phy-cells = <0>;
ti,hwmods = "pcie1-phy";
};
pcie2_phy: pciephy@4a095000 {
@ -1130,7 +1129,6 @@ pcie2_phy: pciephy@4a095000 {
"wkupclk", "refclk",
"div-clk", "phy-div";
#phy-cells = <0>;
ti,hwmods = "pcie2-phy";
status = "disabled";
};
};

View File

@ -92,6 +92,8 @@ aes: aes@480c5000 {
ti,hwmods = "aes";
reg = <0x480c5000 0x50>;
interrupts = <0>;
dmas = <&sdma 65 &sdma 66>;
dma-names = "tx", "rx";
};
prm: prm@48306000 {
@ -550,6 +552,8 @@ sham: sham@480c3000 {
ti,hwmods = "sham";
reg = <0x480c3000 0x64>;
interrupts = <49>;
dmas = <&sdma 69>;
dma-names = "rx";
};
smartreflex_core: smartreflex@480cb000 {

View File

@ -411,6 +411,7 @@ gmac: ethernet@ff290000 {
"mac_clk_rx", "mac_clk_tx",
"clk_mac_ref", "clk_mac_refout",
"aclk_mac", "pclk_mac";
status = "disabled";
};
usb_host0_ehci: usb@ff500000 {

View File

@ -41,7 +41,7 @@ macb1_clk: macb1_clk {
};
macb1: ethernet@f802c000 {
compatible = "cdns,at32ap7000-macb", "cdns,macb";
compatible = "cdns,at91sam9260-macb", "cdns,macb";
reg = <0xf802c000 0x100>;
interrupts = <35 IRQ_TYPE_LEVEL_HIGH 3>;
pinctrl-names = "default";

View File

@ -660,7 +660,7 @@ spi1: spi@fff01000 {
#address-cells = <1>;
#size-cells = <0>;
reg = <0xfff01000 0x1000>;
interrupts = <0 156 4>;
interrupts = <0 155 4>;
num-cs = <4>;
clocks = <&spi_m_clk>;
status = "disabled";

View File

@ -56,6 +56,22 @@ / {
model = "Olimex A10-OLinuXino-LIME";
compatible = "olimex,a10-olinuxino-lime", "allwinner,sun4i-a10";
cpus {
cpu0: cpu@0 {
/*
* The A10-Lime is known to be unstable
* when running at 1008 MHz
*/
operating-points = <
/* kHz uV */
912000 1350000
864000 1300000
624000 1250000
>;
cooling-max-level = <2>;
};
};
soc@01c00000 {
emac: ethernet@01c0b000 {
pinctrl-names = "default";

View File

@ -75,7 +75,6 @@ cpu0: cpu@0 {
clock-latency = <244144>; /* 8 32k periods */
operating-points = <
/* kHz uV */
1056000 1500000
1008000 1400000
912000 1350000
864000 1300000
@ -83,7 +82,7 @@ cpu0: cpu@0 {
>;
#cooling-cells = <2>;
cooling-min-level = <0>;
cooling-max-level = <4>;
cooling-max-level = <3>;
};
};

View File

@ -47,7 +47,6 @@ cpu0: cpu@0 {
clock-latency = <244144>; /* 8 32k periods */
operating-points = <
/* kHz uV */
1104000 1500000
1008000 1400000
912000 1350000
864000 1300000
@ -57,7 +56,7 @@ cpu0: cpu@0 {
>;
#cooling-cells = <2>;
cooling-min-level = <0>;
cooling-max-level = <6>;
cooling-max-level = <5>;
};
};

View File

@ -105,7 +105,6 @@ cpu0: cpu@0 {
clock-latency = <244144>; /* 8 32k periods */
operating-points = <
/* kHz uV */
1008000 1450000
960000 1400000
912000 1400000
864000 1300000
@ -116,7 +115,7 @@ cpu0: cpu@0 {
>;
#cooling-cells = <2>;
cooling-min-level = <0>;
cooling-max-level = <7>;
cooling-max-level = <6>;
};
cpu@1 {

View File

@ -720,6 +720,8 @@ static const char * __init omap_get_family(void)
return kasprintf(GFP_KERNEL, "OMAP4");
else if (soc_is_omap54xx())
return kasprintf(GFP_KERNEL, "OMAP5");
else if (soc_is_am33xx() || soc_is_am335x())
return kasprintf(GFP_KERNEL, "AM33xx");
else if (soc_is_am43xx())
return kasprintf(GFP_KERNEL, "AM43xx");
else if (soc_is_dra7xx())

View File

@ -11,6 +11,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/bitops.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/interrupt.h>
@ -40,7 +41,6 @@
#define ICHP_VAL_IRQ (1 << 31)
#define ICHP_IRQ(i) (((i) >> 16) & 0x7fff)
#define IPR_VALID (1 << 31)
#define IRQ_BIT(n) (((n) - PXA_IRQ(0)) & 0x1f)
#define MAX_INTERNAL_IRQS 128
@ -51,6 +51,7 @@
static void __iomem *pxa_irq_base;
static int pxa_internal_irq_nr;
static bool cpu_has_ipr;
static struct irq_domain *pxa_irq_domain;
static inline void __iomem *irq_base(int i)
{
@ -66,18 +67,20 @@ static inline void __iomem *irq_base(int i)
void pxa_mask_irq(struct irq_data *d)
{
void __iomem *base = irq_data_get_irq_chip_data(d);
irq_hw_number_t irq = irqd_to_hwirq(d);
uint32_t icmr = __raw_readl(base + ICMR);
icmr &= ~(1 << IRQ_BIT(d->irq));
icmr &= ~BIT(irq & 0x1f);
__raw_writel(icmr, base + ICMR);
}
void pxa_unmask_irq(struct irq_data *d)
{
void __iomem *base = irq_data_get_irq_chip_data(d);
irq_hw_number_t irq = irqd_to_hwirq(d);
uint32_t icmr = __raw_readl(base + ICMR);
icmr |= 1 << IRQ_BIT(d->irq);
icmr |= BIT(irq & 0x1f);
__raw_writel(icmr, base + ICMR);
}
@ -118,40 +121,63 @@ asmlinkage void __exception_irq_entry ichp_handle_irq(struct pt_regs *regs)
} while (1);
}
void __init pxa_init_irq(int irq_nr, int (*fn)(struct irq_data *, unsigned int))
static int pxa_irq_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
int irq, i, n;
void __iomem *base = irq_base(hw / 32);
BUG_ON(irq_nr > MAX_INTERNAL_IRQS);
/* initialize interrupt priority */
if (cpu_has_ipr)
__raw_writel(hw | IPR_VALID, pxa_irq_base + IPR(hw));
irq_set_chip_and_handler(virq, &pxa_internal_irq_chip,
handle_level_irq);
irq_set_chip_data(virq, base);
set_irq_flags(virq, IRQF_VALID);
return 0;
}
static struct irq_domain_ops pxa_irq_ops = {
.map = pxa_irq_map,
.xlate = irq_domain_xlate_onecell,
};
static __init void
pxa_init_irq_common(struct device_node *node, int irq_nr,
int (*fn)(struct irq_data *, unsigned int))
{
int n;
pxa_internal_irq_nr = irq_nr;
cpu_has_ipr = !cpu_is_pxa25x();
pxa_irq_base = io_p2v(0x40d00000);
pxa_irq_domain = irq_domain_add_legacy(node, irq_nr,
PXA_IRQ(0), 0,
&pxa_irq_ops, NULL);
if (!pxa_irq_domain)
panic("Unable to add PXA IRQ domain\n");
irq_set_default_host(pxa_irq_domain);
for (n = 0; n < irq_nr; n += 32) {
void __iomem *base = irq_base(n >> 5);
__raw_writel(0, base + ICMR); /* disable all IRQs */
__raw_writel(0, base + ICLR); /* all IRQs are IRQ, not FIQ */
for (i = n; (i < (n + 32)) && (i < irq_nr); i++) {
/* initialize interrupt priority */
if (cpu_has_ipr)
__raw_writel(i | IPR_VALID, pxa_irq_base + IPR(i));
irq = PXA_IRQ(i);
irq_set_chip_and_handler(irq, &pxa_internal_irq_chip,
handle_level_irq);
irq_set_chip_data(irq, base);
set_irq_flags(irq, IRQF_VALID);
}
}
/* only unmasked interrupts kick us out of idle */
__raw_writel(1, irq_base(0) + ICCR);
pxa_internal_irq_chip.irq_set_wake = fn;
}
void __init pxa_init_irq(int irq_nr, int (*fn)(struct irq_data *, unsigned int))
{
BUG_ON(irq_nr > MAX_INTERNAL_IRQS);
pxa_irq_base = io_p2v(0x40d00000);
cpu_has_ipr = !cpu_is_pxa25x();
pxa_init_irq_common(NULL, irq_nr, fn);
}
#ifdef CONFIG_PM
static unsigned long saved_icmr[MAX_INTERNAL_IRQS/32];
static unsigned long saved_ipr[MAX_INTERNAL_IRQS];
@ -203,30 +229,6 @@ struct syscore_ops pxa_irq_syscore_ops = {
};
#ifdef CONFIG_OF
static struct irq_domain *pxa_irq_domain;
static int pxa_irq_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
void __iomem *base = irq_base(hw / 32);
/* initialize interrupt priority */
if (cpu_has_ipr)
__raw_writel(hw | IPR_VALID, pxa_irq_base + IPR(hw));
irq_set_chip_and_handler(hw, &pxa_internal_irq_chip,
handle_level_irq);
irq_set_chip_data(hw, base);
set_irq_flags(hw, IRQF_VALID);
return 0;
}
static struct irq_domain_ops pxa_irq_ops = {
.map = pxa_irq_map,
.xlate = irq_domain_xlate_onecell,
};
static const struct of_device_id intc_ids[] __initconst = {
{ .compatible = "marvell,pxa-intc", },
{}
@ -236,7 +238,7 @@ void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int))
{
struct device_node *node;
struct resource res;
int n, ret;
int ret;
node = of_find_matching_node(NULL, intc_ids);
if (!node) {
@ -267,23 +269,6 @@ void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int))
return;
}
pxa_irq_domain = irq_domain_add_legacy(node, pxa_internal_irq_nr, 0, 0,
&pxa_irq_ops, NULL);
if (!pxa_irq_domain)
panic("Unable to add PXA IRQ domain\n");
irq_set_default_host(pxa_irq_domain);
for (n = 0; n < pxa_internal_irq_nr; n += 32) {
void __iomem *base = irq_base(n >> 5);
__raw_writel(0, base + ICMR); /* disable all IRQs */
__raw_writel(0, base + ICLR); /* all IRQs are IRQ, not FIQ */
}
/* only unmasked interrupts kick us out of idle */
__raw_writel(1, irq_base(0) + ICCR);
pxa_internal_irq_chip.irq_set_wake = fn;
pxa_init_irq_common(node, pxa_internal_irq_nr, fn);
}
#endif /* CONFIG_OF */

View File

@ -412,7 +412,7 @@ static struct fixed_voltage_config can_regulator_pdata = {
};
static struct platform_device can_regulator_device = {
.name = "reg-fixed-volage",
.name = "reg-fixed-voltage",
.id = 0,
.dev = {
.platform_data = &can_regulator_pdata,

View File

@ -1,10 +1,12 @@
menuconfig ARCH_SUNXI
bool "Allwinner SoCs" if ARCH_MULTI_V7
select ARCH_REQUIRE_GPIOLIB
select ARCH_HAS_RESET_CONTROLLER
select CLKSRC_MMIO
select GENERIC_IRQ_CHIP
select PINCTRL
select SUN4I_TIMER
select RESET_CONTROLLER
if ARCH_SUNXI
@ -20,10 +22,8 @@ config MACH_SUN5I
config MACH_SUN6I
bool "Allwinner A31 (sun6i) SoCs support"
default ARCH_SUNXI
select ARCH_HAS_RESET_CONTROLLER
select ARM_GIC
select MFD_SUN6I_PRCM
select RESET_CONTROLLER
select SUN5I_HSTIMER
config MACH_SUN7I
@ -37,16 +37,12 @@ config MACH_SUN7I
config MACH_SUN8I
bool "Allwinner A23 (sun8i) SoCs support"
default ARCH_SUNXI
select ARCH_HAS_RESET_CONTROLLER
select ARM_GIC
select MFD_SUN6I_PRCM
select RESET_CONTROLLER
config MACH_SUN9I
bool "Allwinner (sun9i) SoCs support"
default ARCH_SUNXI
select ARCH_HAS_RESET_CONTROLLER
select ARM_GIC
select RESET_CONTROLLER
endif

View File

@ -799,6 +799,7 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
const struct of_device_id *match;
const struct dmtimer_platform_data *pdata;
int ret;
match = of_match_device(of_match_ptr(omap_timer_match), dev);
pdata = match ? match->data : dev->platform_data;
@ -860,7 +861,12 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
}
if (!timer->reserved) {
pm_runtime_get_sync(dev);
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
dev_err(dev, "%s: pm_runtime_get_sync failed!\n",
__func__);
goto err_get_sync;
}
__omap_dm_timer_init_regs(timer);
pm_runtime_put(dev);
}
@ -873,6 +879,11 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
dev_dbg(dev, "Device Probed.\n");
return 0;
err_get_sync:
pm_runtime_put_noidle(dev);
pm_runtime_disable(dev);
return ret;
}
/**
@ -899,6 +910,8 @@ static int omap_dm_timer_remove(struct platform_device *pdev)
}
spin_unlock_irqrestore(&dm_timer_lock, flags);
pm_runtime_disable(&pdev->dev);
return ret;
}

View File

@ -45,6 +45,10 @@ &sgenet0 {
status = "ok";
};
&sgenet1 {
status = "ok";
};
&xgenet {
status = "ok";
};

View File

@ -186,6 +186,16 @@ sge0clk: sge0clk@1f21c000 {
clock-output-names = "sge0clk";
};
sge1clk: sge1clk@1f21c000 {
compatible = "apm,xgene-device-clock";
#clock-cells = <1>;
clocks = <&socplldiv2 0>;
reg = <0x0 0x1f21c000 0x0 0x1000>;
reg-names = "csr-reg";
csr-mask = <0xc>;
clock-output-names = "sge1clk";
};
xge0clk: xge0clk@1f61c000 {
compatible = "apm,xgene-device-clock";
#clock-cells = <1>;
@ -628,13 +638,30 @@ sgenet0: ethernet@1f210000 {
<0x0 0x1f200000 0x0 0Xc300>,
<0x0 0x1B000000 0x0 0X200>;
reg-names = "enet_csr", "ring_csr", "ring_cmd";
interrupts = <0x0 0xA0 0x4>;
interrupts = <0x0 0xA0 0x4>,
<0x0 0xA1 0x4>;
dma-coherent;
clocks = <&sge0clk 0>;
local-mac-address = [00 00 00 00 00 00];
phy-connection-type = "sgmii";
};
sgenet1: ethernet@1f210030 {
compatible = "apm,xgene1-sgenet";
status = "disabled";
reg = <0x0 0x1f210030 0x0 0xd100>,
<0x0 0x1f200000 0x0 0Xc300>,
<0x0 0x1B000000 0x0 0X8000>;
reg-names = "enet_csr", "ring_csr", "ring_cmd";
interrupts = <0x0 0xAC 0x4>,
<0x0 0xAD 0x4>;
port-id = <1>;
dma-coherent;
clocks = <&sge1clk 0>;
local-mac-address = [00 00 00 00 00 00];
phy-connection-type = "sgmii";
};
xgenet: ethernet@1f610000 {
compatible = "apm,xgene1-xgenet";
status = "disabled";
@ -642,7 +669,8 @@ xgenet: ethernet@1f610000 {
<0x0 0x1f600000 0x0 0Xc300>,
<0x0 0x18000000 0x0 0X200>;
reg-names = "enet_csr", "ring_csr", "ring_cmd";
interrupts = <0x0 0x60 0x4>;
interrupts = <0x0 0x60 0x4>,
<0x0 0x61 0x4>;
dma-coherent;
clocks = <&xge0clk 0>;
/* mac address will be overwritten by the bootloader */

View File

@ -8,7 +8,7 @@
*/
/* SoC fixed clocks */
soc_uartclk: refclk72738khz {
soc_uartclk: refclk7273800hz {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <7273800>;

View File

@ -246,14 +246,30 @@ static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
__ret; \
})
#define this_cpu_cmpxchg_1(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n)
#define this_cpu_cmpxchg_2(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n)
#define this_cpu_cmpxchg_4(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n)
#define this_cpu_cmpxchg_8(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n)
#define _protect_cmpxchg_local(pcp, o, n) \
({ \
typeof(*raw_cpu_ptr(&(pcp))) __ret; \
preempt_disable(); \
__ret = cmpxchg_local(raw_cpu_ptr(&(pcp)), o, n); \
preempt_enable(); \
__ret; \
})
#define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \
cmpxchg_double_local(raw_cpu_ptr(&(ptr1)), raw_cpu_ptr(&(ptr2)), \
o1, o2, n1, n2)
#define this_cpu_cmpxchg_1(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
#define this_cpu_cmpxchg_2(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
#define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
#define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
#define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \
({ \
int __ret; \
preempt_disable(); \
__ret = cmpxchg_double_local( raw_cpu_ptr(&(ptr1)), \
raw_cpu_ptr(&(ptr2)), \
o1, o2, n1, n2); \
preempt_enable(); \
__ret; \
})
#define cmpxchg64(ptr,o,n) cmpxchg((ptr),(o),(n))
#define cmpxchg64_local(ptr,o,n) cmpxchg_local((ptr),(o),(n))

View File

@ -151,6 +151,15 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
{
unsigned int cpu = smp_processor_id();
/*
* init_mm.pgd does not contain any user mappings and it is always
* active for kernel addresses in TTBR1. Just set the reserved TTBR0.
*/
if (next == &init_mm) {
cpu_set_reserved_ttbr0();
return;
}
if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next)
check_and_switch_context(next, tsk);
}

View File

@ -204,25 +204,47 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
return ret;
}
#define _percpu_add(pcp, val) \
__percpu_add(raw_cpu_ptr(&(pcp)), val, sizeof(pcp))
#define _percpu_read(pcp) \
({ \
typeof(pcp) __retval; \
preempt_disable(); \
__retval = (typeof(pcp))__percpu_read(raw_cpu_ptr(&(pcp)), \
sizeof(pcp)); \
preempt_enable(); \
__retval; \
})
#define _percpu_add_return(pcp, val) (typeof(pcp)) (_percpu_add(pcp, val))
#define _percpu_write(pcp, val) \
do { \
preempt_disable(); \
__percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val), \
sizeof(pcp)); \
preempt_enable(); \
} while(0) \
#define _pcp_protect(operation, pcp, val) \
({ \
typeof(pcp) __retval; \
preempt_disable(); \
__retval = (typeof(pcp))operation(raw_cpu_ptr(&(pcp)), \
(val), sizeof(pcp)); \
preempt_enable(); \
__retval; \
})
#define _percpu_add(pcp, val) \
_pcp_protect(__percpu_add, pcp, val)
#define _percpu_add_return(pcp, val) _percpu_add(pcp, val)
#define _percpu_and(pcp, val) \
__percpu_and(raw_cpu_ptr(&(pcp)), val, sizeof(pcp))
_pcp_protect(__percpu_and, pcp, val)
#define _percpu_or(pcp, val) \
__percpu_or(raw_cpu_ptr(&(pcp)), val, sizeof(pcp))
#define _percpu_read(pcp) (typeof(pcp)) \
(__percpu_read(raw_cpu_ptr(&(pcp)), sizeof(pcp)))
#define _percpu_write(pcp, val) \
__percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val), sizeof(pcp))
_pcp_protect(__percpu_or, pcp, val)
#define _percpu_xchg(pcp, val) (typeof(pcp)) \
(__percpu_xchg(raw_cpu_ptr(&(pcp)), (unsigned long)(val), sizeof(pcp)))
_pcp_protect(__percpu_xchg, pcp, (unsigned long)(val))
#define this_cpu_add_1(pcp, val) _percpu_add(pcp, val)
#define this_cpu_add_2(pcp, val) _percpu_add(pcp, val)

View File

@ -2,6 +2,7 @@
#define _ASM_METAG_IO_H
#include <linux/types.h>
#include <asm/pgtable-bits.h>
#define IO_SPACE_LIMIT 0

View File

@ -0,0 +1,104 @@
/*
* Meta page table definitions.
*/
#ifndef _METAG_PGTABLE_BITS_H
#define _METAG_PGTABLE_BITS_H
#include <asm/metag_mem.h>
/*
* Definitions for MMU descriptors
*
* These are the hardware bits in the MMCU pte entries.
* Derived from the Meta toolkit headers.
*/
#define _PAGE_PRESENT MMCU_ENTRY_VAL_BIT
#define _PAGE_WRITE MMCU_ENTRY_WR_BIT
#define _PAGE_PRIV MMCU_ENTRY_PRIV_BIT
/* Write combine bit - this can cause writes to occur out of order */
#define _PAGE_WR_COMBINE MMCU_ENTRY_WRC_BIT
/* Sys coherent bit - this bit is never used by Linux */
#define _PAGE_SYS_COHERENT MMCU_ENTRY_SYS_BIT
#define _PAGE_ALWAYS_ZERO_1 0x020
#define _PAGE_CACHE_CTRL0 0x040
#define _PAGE_CACHE_CTRL1 0x080
#define _PAGE_ALWAYS_ZERO_2 0x100
#define _PAGE_ALWAYS_ZERO_3 0x200
#define _PAGE_ALWAYS_ZERO_4 0x400
#define _PAGE_ALWAYS_ZERO_5 0x800
/* These are software bits that we stuff into the gaps in the hardware
* pte entries that are not used. Note, these DO get stored in the actual
* hardware, but the hardware just does not use them.
*/
#define _PAGE_ACCESSED _PAGE_ALWAYS_ZERO_1
#define _PAGE_DIRTY _PAGE_ALWAYS_ZERO_2
/* Pages owned, and protected by, the kernel. */
#define _PAGE_KERNEL _PAGE_PRIV
/* No cacheing of this page */
#define _PAGE_CACHE_WIN0 (MMCU_CWIN_UNCACHED << MMCU_ENTRY_CWIN_S)
/* burst cacheing - good for data streaming */
#define _PAGE_CACHE_WIN1 (MMCU_CWIN_BURST << MMCU_ENTRY_CWIN_S)
/* One cache way per thread */
#define _PAGE_CACHE_WIN2 (MMCU_CWIN_C1SET << MMCU_ENTRY_CWIN_S)
/* Full on cacheing */
#define _PAGE_CACHE_WIN3 (MMCU_CWIN_CACHED << MMCU_ENTRY_CWIN_S)
#define _PAGE_CACHEABLE (_PAGE_CACHE_WIN3 | _PAGE_WR_COMBINE)
/* which bits are used for cache control ... */
#define _PAGE_CACHE_MASK (_PAGE_CACHE_CTRL0 | _PAGE_CACHE_CTRL1 | \
_PAGE_WR_COMBINE)
/* This is a mask of the bits that pte_modify is allowed to change. */
#define _PAGE_CHG_MASK (PAGE_MASK)
#define _PAGE_SZ_SHIFT 1
#define _PAGE_SZ_4K (0x0)
#define _PAGE_SZ_8K (0x1 << _PAGE_SZ_SHIFT)
#define _PAGE_SZ_16K (0x2 << _PAGE_SZ_SHIFT)
#define _PAGE_SZ_32K (0x3 << _PAGE_SZ_SHIFT)
#define _PAGE_SZ_64K (0x4 << _PAGE_SZ_SHIFT)
#define _PAGE_SZ_128K (0x5 << _PAGE_SZ_SHIFT)
#define _PAGE_SZ_256K (0x6 << _PAGE_SZ_SHIFT)
#define _PAGE_SZ_512K (0x7 << _PAGE_SZ_SHIFT)
#define _PAGE_SZ_1M (0x8 << _PAGE_SZ_SHIFT)
#define _PAGE_SZ_2M (0x9 << _PAGE_SZ_SHIFT)
#define _PAGE_SZ_4M (0xa << _PAGE_SZ_SHIFT)
#define _PAGE_SZ_MASK (0xf << _PAGE_SZ_SHIFT)
#if defined(CONFIG_PAGE_SIZE_4K)
#define _PAGE_SZ (_PAGE_SZ_4K)
#elif defined(CONFIG_PAGE_SIZE_8K)
#define _PAGE_SZ (_PAGE_SZ_8K)
#elif defined(CONFIG_PAGE_SIZE_16K)
#define _PAGE_SZ (_PAGE_SZ_16K)
#endif
#define _PAGE_TABLE (_PAGE_SZ | _PAGE_PRESENT)
#if defined(CONFIG_HUGETLB_PAGE_SIZE_8K)
# define _PAGE_SZHUGE (_PAGE_SZ_8K)
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_16K)
# define _PAGE_SZHUGE (_PAGE_SZ_16K)
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_32K)
# define _PAGE_SZHUGE (_PAGE_SZ_32K)
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
# define _PAGE_SZHUGE (_PAGE_SZ_64K)
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_128K)
# define _PAGE_SZHUGE (_PAGE_SZ_128K)
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K)
# define _PAGE_SZHUGE (_PAGE_SZ_256K)
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
# define _PAGE_SZHUGE (_PAGE_SZ_512K)
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1M)
# define _PAGE_SZHUGE (_PAGE_SZ_1M)
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_2M)
# define _PAGE_SZHUGE (_PAGE_SZ_2M)
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_4M)
# define _PAGE_SZHUGE (_PAGE_SZ_4M)
#endif
#endif /* _METAG_PGTABLE_BITS_H */

View File

@ -5,6 +5,7 @@
#ifndef _METAG_PGTABLE_H
#define _METAG_PGTABLE_H
#include <asm/pgtable-bits.h>
#include <asm-generic/pgtable-nopmd.h>
/* Invalid regions on Meta: 0x00000000-0x001FFFFF and 0xFFFF0000-0xFFFFFFFF */
@ -20,100 +21,6 @@
#define VMALLOC_END 0x7FFFFFFF
#endif
/*
* Definitions for MMU descriptors
*
* These are the hardware bits in the MMCU pte entries.
* Derived from the Meta toolkit headers.
*/
#define _PAGE_PRESENT MMCU_ENTRY_VAL_BIT
#define _PAGE_WRITE MMCU_ENTRY_WR_BIT
#define _PAGE_PRIV MMCU_ENTRY_PRIV_BIT
/* Write combine bit - this can cause writes to occur out of order */
#define _PAGE_WR_COMBINE MMCU_ENTRY_WRC_BIT
/* Sys coherent bit - this bit is never used by Linux */
#define _PAGE_SYS_COHERENT MMCU_ENTRY_SYS_BIT
#define _PAGE_ALWAYS_ZERO_1 0x020
#define _PAGE_CACHE_CTRL0 0x040
#define _PAGE_CACHE_CTRL1 0x080
#define _PAGE_ALWAYS_ZERO_2 0x100
#define _PAGE_ALWAYS_ZERO_3 0x200
#define _PAGE_ALWAYS_ZERO_4 0x400
#define _PAGE_ALWAYS_ZERO_5 0x800
/* These are software bits that we stuff into the gaps in the hardware
* pte entries that are not used. Note, these DO get stored in the actual
* hardware, but the hardware just does not use them.
*/
#define _PAGE_ACCESSED _PAGE_ALWAYS_ZERO_1
#define _PAGE_DIRTY _PAGE_ALWAYS_ZERO_2
/* Pages owned, and protected by, the kernel. */
#define _PAGE_KERNEL _PAGE_PRIV
/* No cacheing of this page */
#define _PAGE_CACHE_WIN0 (MMCU_CWIN_UNCACHED << MMCU_ENTRY_CWIN_S)
/* burst cacheing - good for data streaming */
#define _PAGE_CACHE_WIN1 (MMCU_CWIN_BURST << MMCU_ENTRY_CWIN_S)
/* One cache way per thread */
#define _PAGE_CACHE_WIN2 (MMCU_CWIN_C1SET << MMCU_ENTRY_CWIN_S)
/* Full on cacheing */
#define _PAGE_CACHE_WIN3 (MMCU_CWIN_CACHED << MMCU_ENTRY_CWIN_S)
#define _PAGE_CACHEABLE (_PAGE_CACHE_WIN3 | _PAGE_WR_COMBINE)
/* which bits are used for cache control ... */
#define _PAGE_CACHE_MASK (_PAGE_CACHE_CTRL0 | _PAGE_CACHE_CTRL1 | \
_PAGE_WR_COMBINE)
/* This is a mask of the bits that pte_modify is allowed to change. */
#define _PAGE_CHG_MASK (PAGE_MASK)
#define _PAGE_SZ_SHIFT 1
#define _PAGE_SZ_4K (0x0)
#define _PAGE_SZ_8K (0x1 << _PAGE_SZ_SHIFT)
#define _PAGE_SZ_16K (0x2 << _PAGE_SZ_SHIFT)
#define _PAGE_SZ_32K (0x3 << _PAGE_SZ_SHIFT)
#define _PAGE_SZ_64K (0x4 << _PAGE_SZ_SHIFT)
#define _PAGE_SZ_128K (0x5 << _PAGE_SZ_SHIFT)
#define _PAGE_SZ_256K (0x6 << _PAGE_SZ_SHIFT)
#define _PAGE_SZ_512K (0x7 << _PAGE_SZ_SHIFT)
#define _PAGE_SZ_1M (0x8 << _PAGE_SZ_SHIFT)
#define _PAGE_SZ_2M (0x9 << _PAGE_SZ_SHIFT)
#define _PAGE_SZ_4M (0xa << _PAGE_SZ_SHIFT)
#define _PAGE_SZ_MASK (0xf << _PAGE_SZ_SHIFT)
#if defined(CONFIG_PAGE_SIZE_4K)
#define _PAGE_SZ (_PAGE_SZ_4K)
#elif defined(CONFIG_PAGE_SIZE_8K)
#define _PAGE_SZ (_PAGE_SZ_8K)
#elif defined(CONFIG_PAGE_SIZE_16K)
#define _PAGE_SZ (_PAGE_SZ_16K)
#endif
#define _PAGE_TABLE (_PAGE_SZ | _PAGE_PRESENT)
#if defined(CONFIG_HUGETLB_PAGE_SIZE_8K)
# define _PAGE_SZHUGE (_PAGE_SZ_8K)
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_16K)
# define _PAGE_SZHUGE (_PAGE_SZ_16K)
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_32K)
# define _PAGE_SZHUGE (_PAGE_SZ_32K)
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
# define _PAGE_SZHUGE (_PAGE_SZ_64K)
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_128K)
# define _PAGE_SZHUGE (_PAGE_SZ_128K)
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K)
# define _PAGE_SZHUGE (_PAGE_SZ_256K)
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
# define _PAGE_SZHUGE (_PAGE_SZ_512K)
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1M)
# define _PAGE_SZHUGE (_PAGE_SZ_1M)
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_2M)
# define _PAGE_SZHUGE (_PAGE_SZ_2M)
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_4M)
# define _PAGE_SZHUGE (_PAGE_SZ_4M)
#endif
/*
* The Linux memory management assumes a three-level page table setup. On
* Meta, we use that, but "fold" the mid level into the top-level page

View File

@ -26,7 +26,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
if (likely(pgd != NULL)) {
memset(pgd, 0, PAGE_SIZE<<PGD_ALLOC_ORDER);
#ifdef CONFIG_64BIT
#if PT_NLEVELS == 3
actual_pgd += PTRS_PER_PGD;
/* Populate first pmd with allocated memory. We mark it
* with PxD_FLAG_ATTACHED as a signal to the system that this
@ -45,7 +45,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
#ifdef CONFIG_64BIT
#if PT_NLEVELS == 3
pgd -= PTRS_PER_PGD;
#endif
free_pages((unsigned long)pgd, PGD_ALLOC_ORDER);
@ -72,12 +72,15 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
#ifdef CONFIG_64BIT
if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
/* This is the permanent pmd attached to the pgd;
* cannot free it */
/*
* This is the permanent pmd attached to the pgd;
* cannot free it.
* Increment the counter to compensate for the decrement
* done by generic mm code.
*/
mm_inc_nr_pmds(mm);
return;
#endif
free_pages((unsigned long)pmd, PMD_ORDER);
}
@ -99,7 +102,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
static inline void
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
{
#ifdef CONFIG_64BIT
#if PT_NLEVELS == 3
/* preserve the gateway marker if this is the beginning of
* the permanent pmd */
if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)

View File

@ -55,8 +55,8 @@
#define ENTRY_COMP(_name_) .word sys_##_name_
#endif
ENTRY_SAME(restart_syscall) /* 0 */
ENTRY_SAME(exit)
90: ENTRY_SAME(restart_syscall) /* 0 */
91: ENTRY_SAME(exit)
ENTRY_SAME(fork_wrapper)
ENTRY_SAME(read)
ENTRY_SAME(write)
@ -439,7 +439,10 @@
ENTRY_SAME(bpf)
ENTRY_COMP(execveat)
/* Nothing yet */
.ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b))
.error "size of syscall table does not fit value of __NR_Linux_syscalls"
.endif
#undef ENTRY_SAME
#undef ENTRY_DIFF

View File

@ -126,7 +126,7 @@ config PPC
select IRQ_FORCED_THREADING
select HAVE_RCU_TABLE_FREE if SMP
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_BPF_JIT if PPC64
select HAVE_BPF_JIT
select HAVE_ARCH_JUMP_LABEL
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_HAS_GCOV_PROFILE_ALL

View File

@ -23,6 +23,8 @@
#define PPC_STL stringify_in_c(std)
#define PPC_STLU stringify_in_c(stdu)
#define PPC_LCMPI stringify_in_c(cmpdi)
#define PPC_LCMPLI stringify_in_c(cmpldi)
#define PPC_LCMP stringify_in_c(cmpd)
#define PPC_LONG stringify_in_c(.llong)
#define PPC_LONG_ALIGN stringify_in_c(.balign 8)
#define PPC_TLNEI stringify_in_c(tdnei)
@ -52,6 +54,8 @@
#define PPC_STL stringify_in_c(stw)
#define PPC_STLU stringify_in_c(stwu)
#define PPC_LCMPI stringify_in_c(cmpwi)
#define PPC_LCMPLI stringify_in_c(cmplwi)
#define PPC_LCMP stringify_in_c(cmpw)
#define PPC_LONG stringify_in_c(.long)
#define PPC_LONG_ALIGN stringify_in_c(.balign 4)
#define PPC_TLNEI stringify_in_c(twnei)

View File

@ -55,7 +55,7 @@ static inline cpumask_t cpu_thread_mask_to_cores(const struct cpumask *threads)
static inline int cpu_nr_cores(void)
{
return NR_CPUS >> threads_shift;
return nr_cpu_ids >> threads_shift;
}
static inline cpumask_t cpu_online_cores_map(void)

View File

@ -153,6 +153,7 @@
#define PPC_INST_MFSPR_PVR_MASK 0xfc1fffff
#define PPC_INST_MFTMR 0x7c0002dc
#define PPC_INST_MSGSND 0x7c00019c
#define PPC_INST_MSGCLR 0x7c0001dc
#define PPC_INST_MSGSNDP 0x7c00011c
#define PPC_INST_MTTMR 0x7c0003dc
#define PPC_INST_NOP 0x60000000
@ -212,6 +213,8 @@
#define PPC_INST_LWZ 0x80000000
#define PPC_INST_STD 0xf8000000
#define PPC_INST_STDU 0xf8000001
#define PPC_INST_STW 0x90000000
#define PPC_INST_STWU 0x94000000
#define PPC_INST_MFLR 0x7c0802a6
#define PPC_INST_MTLR 0x7c0803a6
#define PPC_INST_CMPWI 0x2c000000
@ -309,6 +312,8 @@
___PPC_RB(b) | __PPC_EH(eh))
#define PPC_MSGSND(b) stringify_in_c(.long PPC_INST_MSGSND | \
___PPC_RB(b))
#define PPC_MSGCLR(b) stringify_in_c(.long PPC_INST_MSGCLR | \
___PPC_RB(b))
#define PPC_MSGSNDP(b) stringify_in_c(.long PPC_INST_MSGSNDP | \
___PPC_RB(b))
#define PPC_POPCNTB(a, s) stringify_in_c(.long PPC_INST_POPCNTB | \

View File

@ -608,13 +608,16 @@
#define SRR1_ISI_N_OR_G 0x10000000 /* ISI: Access is no-exec or G */
#define SRR1_ISI_PROT 0x08000000 /* ISI: Other protection fault */
#define SRR1_WAKEMASK 0x00380000 /* reason for wakeup */
#define SRR1_WAKEMASK_P8 0x003c0000 /* reason for wakeup on POWER8 */
#define SRR1_WAKESYSERR 0x00300000 /* System error */
#define SRR1_WAKEEE 0x00200000 /* External interrupt */
#define SRR1_WAKEMT 0x00280000 /* mtctrl */
#define SRR1_WAKEHMI 0x00280000 /* Hypervisor maintenance */
#define SRR1_WAKEDEC 0x00180000 /* Decrementer interrupt */
#define SRR1_WAKEDBELL 0x00140000 /* Privileged doorbell on P8 */
#define SRR1_WAKETHERM 0x00100000 /* Thermal management interrupt */
#define SRR1_WAKERESET 0x00100000 /* System reset */
#define SRR1_WAKEHDBELL 0x000c0000 /* Hypervisor doorbell on P8 */
#define SRR1_WAKESTATE 0x00030000 /* Powersave exit mask [46:47] */
#define SRR1_WS_DEEPEST 0x00030000 /* Some resources not maintained,
* may not be recoverable */

View File

@ -437,6 +437,26 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check_early = __machine_check_early_realmode_p8,
.platform = "power8",
},
{ /* Power8NVL */
.pvr_mask = 0xffff0000,
.pvr_value = 0x004c0000,
.cpu_name = "POWER8NVL (raw)",
.cpu_features = CPU_FTRS_POWER8,
.cpu_user_features = COMMON_USER_POWER8,
.cpu_user_features2 = COMMON_USER2_POWER8,
.mmu_features = MMU_FTRS_POWER8,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/power8",
.oprofile_type = PPC_OPROFILE_INVALID,
.cpu_setup = __setup_cpu_power8,
.cpu_restore = __restore_cpu_power8,
.flush_tlb = __flush_tlb_power8,
.machine_check_early = __machine_check_early_realmode_p8,
.platform = "power8",
},
{ /* Power8 DD1: Does not support doorbell IPIs */
.pvr_mask = 0xffffff00,
.pvr_value = 0x004d0100,

View File

@ -17,6 +17,7 @@
#include <asm/dbell.h>
#include <asm/irq_regs.h>
#include <asm/kvm_ppc.h>
#ifdef CONFIG_SMP
void doorbell_setup_this_cpu(void)
@ -41,6 +42,7 @@ void doorbell_exception(struct pt_regs *regs)
may_hard_irq_enable();
kvmppc_set_host_ipi(smp_processor_id(), 0);
__this_cpu_inc(irq_stat.doorbell_irqs);
smp_ipi_demux();

View File

@ -1408,7 +1408,7 @@ machine_check_handle_early:
bne 9f /* continue in V mode if we are. */
5:
#ifdef CONFIG_KVM_BOOK3S_64_HV
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
/*
* We are coming from kernel context. Check if we are coming from
* guest. if yes, then we can continue. We will fall through

View File

@ -636,7 +636,7 @@ static int kvmppc_get_yield_count(struct kvm_vcpu *vcpu)
spin_lock(&vcpu->arch.vpa_update_lock);
lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr;
if (lppaca)
yield_count = lppaca->yield_count;
yield_count = be32_to_cpu(lppaca->yield_count);
spin_unlock(&vcpu->arch.vpa_update_lock);
return yield_count;
}
@ -942,20 +942,20 @@ static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu,
static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
bool preserve_top32)
{
struct kvm *kvm = vcpu->kvm;
struct kvmppc_vcore *vc = vcpu->arch.vcore;
u64 mask;
mutex_lock(&kvm->lock);
spin_lock(&vc->lock);
/*
* If ILE (interrupt little-endian) has changed, update the
* MSR_LE bit in the intr_msr for each vcpu in this vcore.
*/
if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) {
struct kvm *kvm = vcpu->kvm;
struct kvm_vcpu *vcpu;
int i;
mutex_lock(&kvm->lock);
kvm_for_each_vcpu(i, vcpu, kvm) {
if (vcpu->arch.vcore != vc)
continue;
@ -964,7 +964,6 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
else
vcpu->arch.intr_msr &= ~MSR_LE;
}
mutex_unlock(&kvm->lock);
}
/*
@ -981,6 +980,7 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
mask &= 0xFFFFFFFF;
vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
spin_unlock(&vc->lock);
mutex_unlock(&kvm->lock);
}
static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,

View File

@ -1005,6 +1005,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
/* Save HEIR (HV emulation assist reg) in emul_inst
if this is an HEI (HV emulation interrupt, e40) */
li r3,KVM_INST_FETCH_FAILED
stw r3,VCPU_LAST_INST(r9)
cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
bne 11f
mfspr r3,SPRN_HEIR

View File

@ -1,4 +1,4 @@
#
# Arch-specific network modules
#
obj-$(CONFIG_BPF_JIT) += bpf_jit_64.o bpf_jit_comp.o
obj-$(CONFIG_BPF_JIT) += bpf_jit_asm.o bpf_jit_comp.o

View File

@ -10,12 +10,25 @@
#ifndef _BPF_JIT_H
#define _BPF_JIT_H
#ifdef CONFIG_PPC64
#define BPF_PPC_STACK_R3_OFF 48
#define BPF_PPC_STACK_LOCALS 32
#define BPF_PPC_STACK_BASIC (48+64)
#define BPF_PPC_STACK_SAVE (18*8)
#define BPF_PPC_STACKFRAME (BPF_PPC_STACK_BASIC+BPF_PPC_STACK_LOCALS+ \
BPF_PPC_STACK_SAVE)
#define BPF_PPC_SLOWPATH_FRAME (48+64)
#else
#define BPF_PPC_STACK_R3_OFF 24
#define BPF_PPC_STACK_LOCALS 16
#define BPF_PPC_STACK_BASIC (24+32)
#define BPF_PPC_STACK_SAVE (18*4)
#define BPF_PPC_STACKFRAME (BPF_PPC_STACK_BASIC+BPF_PPC_STACK_LOCALS+ \
BPF_PPC_STACK_SAVE)
#define BPF_PPC_SLOWPATH_FRAME (24+32)
#endif
#define REG_SZ (BITS_PER_LONG/8)
/*
* Generated code register usage:
@ -57,7 +70,11 @@ DECLARE_LOAD_FUNC(sk_load_half);
DECLARE_LOAD_FUNC(sk_load_byte);
DECLARE_LOAD_FUNC(sk_load_byte_msh);
#ifdef CONFIG_PPC64
#define FUNCTION_DESCR_SIZE 24
#else
#define FUNCTION_DESCR_SIZE 0
#endif
/*
* 16-bit immediate helper macros: HA() is for use with sign-extending instrs
@ -86,7 +103,12 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh);
#define PPC_LIS(r, i) PPC_ADDIS(r, 0, i)
#define PPC_STD(r, base, i) EMIT(PPC_INST_STD | ___PPC_RS(r) | \
___PPC_RA(base) | ((i) & 0xfffc))
#define PPC_STDU(r, base, i) EMIT(PPC_INST_STDU | ___PPC_RS(r) | \
___PPC_RA(base) | ((i) & 0xfffc))
#define PPC_STW(r, base, i) EMIT(PPC_INST_STW | ___PPC_RS(r) | \
___PPC_RA(base) | ((i) & 0xfffc))
#define PPC_STWU(r, base, i) EMIT(PPC_INST_STWU | ___PPC_RS(r) | \
___PPC_RA(base) | ((i) & 0xfffc))
#define PPC_LBZ(r, base, i) EMIT(PPC_INST_LBZ | ___PPC_RT(r) | \
___PPC_RA(base) | IMM_L(i))
@ -98,6 +120,17 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh);
___PPC_RA(base) | IMM_L(i))
#define PPC_LHBRX(r, base, b) EMIT(PPC_INST_LHBRX | ___PPC_RT(r) | \
___PPC_RA(base) | ___PPC_RB(b))
#ifdef CONFIG_PPC64
#define PPC_BPF_LL(r, base, i) do { PPC_LD(r, base, i); } while(0)
#define PPC_BPF_STL(r, base, i) do { PPC_STD(r, base, i); } while(0)
#define PPC_BPF_STLU(r, base, i) do { PPC_STDU(r, base, i); } while(0)
#else
#define PPC_BPF_LL(r, base, i) do { PPC_LWZ(r, base, i); } while(0)
#define PPC_BPF_STL(r, base, i) do { PPC_STW(r, base, i); } while(0)
#define PPC_BPF_STLU(r, base, i) do { PPC_STWU(r, base, i); } while(0)
#endif
/* Convenience helpers for the above with 'far' offsets: */
#define PPC_LBZ_OFFS(r, base, i) do { if ((i) < 32768) PPC_LBZ(r, base, i); \
else { PPC_ADDIS(r, base, IMM_HA(i)); \
@ -115,6 +148,29 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh);
else { PPC_ADDIS(r, base, IMM_HA(i)); \
PPC_LHZ(r, r, IMM_L(i)); } } while(0)
#ifdef CONFIG_PPC64
#define PPC_LL_OFFS(r, base, i) do { PPC_LD_OFFS(r, base, i); } while(0)
#else
#define PPC_LL_OFFS(r, base, i) do { PPC_LWZ_OFFS(r, base, i); } while(0)
#endif
#ifdef CONFIG_SMP
#ifdef CONFIG_PPC64
#define PPC_BPF_LOAD_CPU(r) \
do { BUILD_BUG_ON(FIELD_SIZEOF(struct paca_struct, paca_index) != 2); \
PPC_LHZ_OFFS(r, 13, offsetof(struct paca_struct, paca_index)); \
} while (0)
#else
#define PPC_BPF_LOAD_CPU(r) \
do { BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info, cpu) != 4); \
PPC_LHZ_OFFS(r, (1 & ~(THREAD_SIZE - 1)), \
offsetof(struct thread_info, cpu)); \
} while(0)
#endif
#else
#define PPC_BPF_LOAD_CPU(r) do { PPC_LI(r, 0); } while(0)
#endif
#define PPC_CMPWI(a, i) EMIT(PPC_INST_CMPWI | ___PPC_RA(a) | IMM_L(i))
#define PPC_CMPDI(a, i) EMIT(PPC_INST_CMPDI | ___PPC_RA(a) | IMM_L(i))
#define PPC_CMPLWI(a, i) EMIT(PPC_INST_CMPLWI | ___PPC_RA(a) | IMM_L(i))
@ -196,6 +252,12 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh);
PPC_ORI(d, d, (uintptr_t)(i) & 0xffff); \
} } while (0);
#ifdef CONFIG_PPC64
#define PPC_FUNC_ADDR(d,i) do { PPC_LI64(d, i); } while(0)
#else
#define PPC_FUNC_ADDR(d,i) do { PPC_LI32(d, i); } while(0)
#endif
#define PPC_LHBRX_OFFS(r, base, i) \
do { PPC_LI32(r, i); PPC_LHBRX(r, r, base); } while(0)
#ifdef __LITTLE_ENDIAN__

View File

@ -34,13 +34,13 @@
*/
.globl sk_load_word
sk_load_word:
cmpdi r_addr, 0
PPC_LCMPI r_addr, 0
blt bpf_slow_path_word_neg
.globl sk_load_word_positive_offset
sk_load_word_positive_offset:
/* Are we accessing past headlen? */
subi r_scratch1, r_HL, 4
cmpd r_scratch1, r_addr
PPC_LCMP r_scratch1, r_addr
blt bpf_slow_path_word
/* Nope, just hitting the header. cr0 here is eq or gt! */
#ifdef __LITTLE_ENDIAN__
@ -52,12 +52,12 @@ sk_load_word_positive_offset:
.globl sk_load_half
sk_load_half:
cmpdi r_addr, 0
PPC_LCMPI r_addr, 0
blt bpf_slow_path_half_neg
.globl sk_load_half_positive_offset
sk_load_half_positive_offset:
subi r_scratch1, r_HL, 2
cmpd r_scratch1, r_addr
PPC_LCMP r_scratch1, r_addr
blt bpf_slow_path_half
#ifdef __LITTLE_ENDIAN__
lhbrx r_A, r_D, r_addr
@ -68,11 +68,11 @@ sk_load_half_positive_offset:
.globl sk_load_byte
sk_load_byte:
cmpdi r_addr, 0
PPC_LCMPI r_addr, 0
blt bpf_slow_path_byte_neg
.globl sk_load_byte_positive_offset
sk_load_byte_positive_offset:
cmpd r_HL, r_addr
PPC_LCMP r_HL, r_addr
ble bpf_slow_path_byte
lbzx r_A, r_D, r_addr
blr
@ -83,11 +83,11 @@ sk_load_byte_positive_offset:
*/
.globl sk_load_byte_msh
sk_load_byte_msh:
cmpdi r_addr, 0
PPC_LCMPI r_addr, 0
blt bpf_slow_path_byte_msh_neg
.globl sk_load_byte_msh_positive_offset
sk_load_byte_msh_positive_offset:
cmpd r_HL, r_addr
PPC_LCMP r_HL, r_addr
ble bpf_slow_path_byte_msh
lbzx r_X, r_D, r_addr
rlwinm r_X, r_X, 2, 32-4-2, 31-2
@ -101,13 +101,13 @@ sk_load_byte_msh_positive_offset:
*/
#define bpf_slow_path_common(SIZE) \
mflr r0; \
std r0, 16(r1); \
PPC_STL r0, PPC_LR_STKOFF(r1); \
/* R3 goes in parameter space of caller's frame */ \
std r_skb, (BPF_PPC_STACKFRAME+48)(r1); \
std r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \
std r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \
addi r5, r1, BPF_PPC_STACK_BASIC+(2*8); \
stdu r1, -BPF_PPC_SLOWPATH_FRAME(r1); \
PPC_STL r_skb, (BPF_PPC_STACKFRAME+BPF_PPC_STACK_R3_OFF)(r1); \
PPC_STL r_A, (BPF_PPC_STACK_BASIC+(0*REG_SZ))(r1); \
PPC_STL r_X, (BPF_PPC_STACK_BASIC+(1*REG_SZ))(r1); \
addi r5, r1, BPF_PPC_STACK_BASIC+(2*REG_SZ); \
PPC_STLU r1, -BPF_PPC_SLOWPATH_FRAME(r1); \
/* R3 = r_skb, as passed */ \
mr r4, r_addr; \
li r6, SIZE; \
@ -115,19 +115,19 @@ sk_load_byte_msh_positive_offset:
nop; \
/* R3 = 0 on success */ \
addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \
ld r0, 16(r1); \
ld r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \
ld r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \
PPC_LL r0, PPC_LR_STKOFF(r1); \
PPC_LL r_A, (BPF_PPC_STACK_BASIC+(0*REG_SZ))(r1); \
PPC_LL r_X, (BPF_PPC_STACK_BASIC+(1*REG_SZ))(r1); \
mtlr r0; \
cmpdi r3, 0; \
PPC_LCMPI r3, 0; \
blt bpf_error; /* cr0 = LT */ \
ld r_skb, (BPF_PPC_STACKFRAME+48)(r1); \
PPC_LL r_skb, (BPF_PPC_STACKFRAME+BPF_PPC_STACK_R3_OFF)(r1); \
/* Great success! */
bpf_slow_path_word:
bpf_slow_path_common(4)
/* Data value is on stack, and cr0 != LT */
lwz r_A, BPF_PPC_STACK_BASIC+(2*8)(r1)
lwz r_A, BPF_PPC_STACK_BASIC+(2*REG_SZ)(r1)
blr
bpf_slow_path_half:
@ -154,12 +154,12 @@ bpf_slow_path_byte_msh:
*/
#define sk_negative_common(SIZE) \
mflr r0; \
std r0, 16(r1); \
PPC_STL r0, PPC_LR_STKOFF(r1); \
/* R3 goes in parameter space of caller's frame */ \
std r_skb, (BPF_PPC_STACKFRAME+48)(r1); \
std r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \
std r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \
stdu r1, -BPF_PPC_SLOWPATH_FRAME(r1); \
PPC_STL r_skb, (BPF_PPC_STACKFRAME+BPF_PPC_STACK_R3_OFF)(r1); \
PPC_STL r_A, (BPF_PPC_STACK_BASIC+(0*REG_SZ))(r1); \
PPC_STL r_X, (BPF_PPC_STACK_BASIC+(1*REG_SZ))(r1); \
PPC_STLU r1, -BPF_PPC_SLOWPATH_FRAME(r1); \
/* R3 = r_skb, as passed */ \
mr r4, r_addr; \
li r5, SIZE; \
@ -167,19 +167,19 @@ bpf_slow_path_byte_msh:
nop; \
/* R3 != 0 on success */ \
addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \
ld r0, 16(r1); \
ld r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \
ld r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \
PPC_LL r0, PPC_LR_STKOFF(r1); \
PPC_LL r_A, (BPF_PPC_STACK_BASIC+(0*REG_SZ))(r1); \
PPC_LL r_X, (BPF_PPC_STACK_BASIC+(1*REG_SZ))(r1); \
mtlr r0; \
cmpldi r3, 0; \
PPC_LCMPLI r3, 0; \
beq bpf_error_slow; /* cr0 = EQ */ \
mr r_addr, r3; \
ld r_skb, (BPF_PPC_STACKFRAME+48)(r1); \
PPC_LL r_skb, (BPF_PPC_STACKFRAME+BPF_PPC_STACK_R3_OFF)(r1); \
/* Great success! */
bpf_slow_path_word_neg:
lis r_scratch1,-32 /* SKF_LL_OFF */
cmpd r_addr, r_scratch1 /* addr < SKF_* */
PPC_LCMP r_addr, r_scratch1 /* addr < SKF_* */
blt bpf_error /* cr0 = LT */
.globl sk_load_word_negative_offset
sk_load_word_negative_offset:
@ -189,7 +189,7 @@ sk_load_word_negative_offset:
bpf_slow_path_half_neg:
lis r_scratch1,-32 /* SKF_LL_OFF */
cmpd r_addr, r_scratch1 /* addr < SKF_* */
PPC_LCMP r_addr, r_scratch1 /* addr < SKF_* */
blt bpf_error /* cr0 = LT */
.globl sk_load_half_negative_offset
sk_load_half_negative_offset:
@ -199,7 +199,7 @@ sk_load_half_negative_offset:
bpf_slow_path_byte_neg:
lis r_scratch1,-32 /* SKF_LL_OFF */
cmpd r_addr, r_scratch1 /* addr < SKF_* */
PPC_LCMP r_addr, r_scratch1 /* addr < SKF_* */
blt bpf_error /* cr0 = LT */
.globl sk_load_byte_negative_offset
sk_load_byte_negative_offset:
@ -209,7 +209,7 @@ sk_load_byte_negative_offset:
bpf_slow_path_byte_msh_neg:
lis r_scratch1,-32 /* SKF_LL_OFF */
cmpd r_addr, r_scratch1 /* addr < SKF_* */
PPC_LCMP r_addr, r_scratch1 /* addr < SKF_* */
blt bpf_error /* cr0 = LT */
.globl sk_load_byte_msh_negative_offset
sk_load_byte_msh_negative_offset:
@ -221,7 +221,7 @@ sk_load_byte_msh_negative_offset:
bpf_error_slow:
/* fabricate a cr0 = lt */
li r_scratch1, -1
cmpdi r_scratch1, 0
PPC_LCMPI r_scratch1, 0
bpf_error:
/* Entered with cr0 = lt */
li r3, 0

View File

@ -1,8 +1,9 @@
/* bpf_jit_comp.c: BPF JIT compiler for PPC64
/* bpf_jit_comp.c: BPF JIT compiler
*
* Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
*
* Based on the x86 BPF compiler, by Eric Dumazet (eric.dumazet@gmail.com)
* Ported to ppc32 by Denis Kirjanov <kda@linux-powerpc.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@ -36,11 +37,11 @@ static void bpf_jit_build_prologue(struct bpf_prog *fp, u32 *image,
if (ctx->seen & SEEN_DATAREF) {
/* If we call any helpers (for loads), save LR */
EMIT(PPC_INST_MFLR | __PPC_RT(R0));
PPC_STD(0, 1, 16);
PPC_BPF_STL(0, 1, PPC_LR_STKOFF);
/* Back up non-volatile regs. */
PPC_STD(r_D, 1, -(8*(32-r_D)));
PPC_STD(r_HL, 1, -(8*(32-r_HL)));
PPC_BPF_STL(r_D, 1, -(REG_SZ*(32-r_D)));
PPC_BPF_STL(r_HL, 1, -(REG_SZ*(32-r_HL)));
}
if (ctx->seen & SEEN_MEM) {
/*
@ -49,11 +50,10 @@ static void bpf_jit_build_prologue(struct bpf_prog *fp, u32 *image,
*/
for (i = r_M; i < (r_M+16); i++) {
if (ctx->seen & (1 << (i-r_M)))
PPC_STD(i, 1, -(8*(32-i)));
PPC_BPF_STL(i, 1, -(REG_SZ*(32-i)));
}
}
EMIT(PPC_INST_STDU | __PPC_RS(R1) | __PPC_RA(R1) |
(-BPF_PPC_STACKFRAME & 0xfffc));
PPC_BPF_STLU(1, 1, -BPF_PPC_STACKFRAME);
}
if (ctx->seen & SEEN_DATAREF) {
@ -67,7 +67,7 @@ static void bpf_jit_build_prologue(struct bpf_prog *fp, u32 *image,
data_len));
PPC_LWZ_OFFS(r_HL, r_skb, offsetof(struct sk_buff, len));
PPC_SUB(r_HL, r_HL, r_scratch1);
PPC_LD_OFFS(r_D, r_skb, offsetof(struct sk_buff, data));
PPC_LL_OFFS(r_D, r_skb, offsetof(struct sk_buff, data));
}
if (ctx->seen & SEEN_XREG) {
@ -99,16 +99,16 @@ static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
PPC_ADDI(1, 1, BPF_PPC_STACKFRAME);
if (ctx->seen & SEEN_DATAREF) {
PPC_LD(0, 1, 16);
PPC_BPF_LL(0, 1, PPC_LR_STKOFF);
PPC_MTLR(0);
PPC_LD(r_D, 1, -(8*(32-r_D)));
PPC_LD(r_HL, 1, -(8*(32-r_HL)));
PPC_BPF_LL(r_D, 1, -(REG_SZ*(32-r_D)));
PPC_BPF_LL(r_HL, 1, -(REG_SZ*(32-r_HL)));
}
if (ctx->seen & SEEN_MEM) {
/* Restore any saved non-vol registers */
for (i = r_M; i < (r_M+16); i++) {
if (ctx->seen & (1 << (i-r_M)))
PPC_LD(i, 1, -(8*(32-i)));
PPC_BPF_LL(i, 1, -(REG_SZ*(32-i)));
}
}
}
@ -355,7 +355,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
ifindex) != 4);
BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
type) != 2);
PPC_LD_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
PPC_LL_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
dev));
PPC_CMPDI(r_scratch1, 0);
if (ctx->pc_ret0 != -1) {
@ -411,20 +411,8 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
PPC_SRWI(r_A, r_A, 5);
break;
case BPF_ANC | SKF_AD_CPU:
#ifdef CONFIG_SMP
/*
* PACA ptr is r13:
* raw_smp_processor_id() = local_paca->paca_index
*/
BUILD_BUG_ON(FIELD_SIZEOF(struct paca_struct,
paca_index) != 2);
PPC_LHZ_OFFS(r_A, 13,
offsetof(struct paca_struct, paca_index));
#else
PPC_LI(r_A, 0);
#endif
PPC_BPF_LOAD_CPU(r_A);
break;
/*** Absolute loads from packet header/data ***/
case BPF_LD | BPF_W | BPF_ABS:
func = CHOOSE_LOAD_FUNC(K, sk_load_word);
@ -437,7 +425,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
common_load:
/* Load from [K]. */
ctx->seen |= SEEN_DATAREF;
PPC_LI64(r_scratch1, func);
PPC_FUNC_ADDR(r_scratch1, func);
PPC_MTLR(r_scratch1);
PPC_LI32(r_addr, K);
PPC_BLRL();
@ -463,7 +451,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
* in the helper functions.
*/
ctx->seen |= SEEN_DATAREF | SEEN_XREG;
PPC_LI64(r_scratch1, func);
PPC_FUNC_ADDR(r_scratch1, func);
PPC_MTLR(r_scratch1);
PPC_ADDI(r_addr, r_X, IMM_L(K));
if (K >= 32768)
@ -685,9 +673,11 @@ void bpf_jit_compile(struct bpf_prog *fp)
if (image) {
bpf_flush_icache(code_base, code_base + (proglen/4));
#ifdef CONFIG_PPC64
/* Function descriptor nastiness: Address + TOC */
((u64 *)image)[0] = (u64)code_base;
((u64 *)image)[1] = local_paca->kernel_toc;
#endif
fp->bpf_func = (void *)image;
fp->jited = true;
}

View File

@ -33,6 +33,8 @@
#include <asm/runlatch.h>
#include <asm/code-patching.h>
#include <asm/dbell.h>
#include <asm/kvm_ppc.h>
#include <asm/ppc-opcode.h>
#include "powernv.h"
@ -149,7 +151,7 @@ static int pnv_smp_cpu_disable(void)
static void pnv_smp_cpu_kill_self(void)
{
unsigned int cpu;
unsigned long srr1;
unsigned long srr1, wmask;
u32 idle_states;
/* Standard hot unplug procedure */
@ -161,6 +163,10 @@ static void pnv_smp_cpu_kill_self(void)
generic_set_cpu_dead(cpu);
smp_wmb();
wmask = SRR1_WAKEMASK;
if (cpu_has_feature(CPU_FTR_ARCH_207S))
wmask = SRR1_WAKEMASK_P8;
idle_states = pnv_get_supported_cpuidle_states();
/* We don't want to take decrementer interrupts while we are offline,
* so clear LPCR:PECE1. We keep PECE2 enabled.
@ -191,10 +197,14 @@ static void pnv_smp_cpu_kill_self(void)
* having finished executing in a KVM guest, then srr1
* contains 0.
*/
if ((srr1 & SRR1_WAKEMASK) == SRR1_WAKEEE) {
if ((srr1 & wmask) == SRR1_WAKEEE) {
icp_native_flush_interrupt();
local_paca->irq_happened &= PACA_IRQ_HARD_DIS;
smp_mb();
} else if ((srr1 & wmask) == SRR1_WAKEHDBELL) {
unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
asm volatile(PPC_MSGCLR(%0) : : "r" (msg));
kvmppc_set_host_ipi(cpu, 0);
}
if (cpu_core_split_required())

View File

@ -25,10 +25,10 @@
static struct kobject *mobility_kobj;
struct update_props_workarea {
u32 phandle;
u32 state;
u64 reserved;
u32 nprops;
__be32 phandle;
__be32 state;
__be64 reserved;
__be32 nprops;
} __packed;
#define NODE_ACTION_MASK 0xff000000
@ -54,11 +54,11 @@ static int mobility_rtas_call(int token, char *buf, s32 scope)
return rc;
}
static int delete_dt_node(u32 phandle)
static int delete_dt_node(__be32 phandle)
{
struct device_node *dn;
dn = of_find_node_by_phandle(phandle);
dn = of_find_node_by_phandle(be32_to_cpu(phandle));
if (!dn)
return -ENOENT;
@ -127,7 +127,7 @@ static int update_dt_property(struct device_node *dn, struct property **prop,
return 0;
}
static int update_dt_node(u32 phandle, s32 scope)
static int update_dt_node(__be32 phandle, s32 scope)
{
struct update_props_workarea *upwa;
struct device_node *dn;
@ -136,6 +136,7 @@ static int update_dt_node(u32 phandle, s32 scope)
char *prop_data;
char *rtas_buf;
int update_properties_token;
u32 nprops;
u32 vd;
update_properties_token = rtas_token("ibm,update-properties");
@ -146,7 +147,7 @@ static int update_dt_node(u32 phandle, s32 scope)
if (!rtas_buf)
return -ENOMEM;
dn = of_find_node_by_phandle(phandle);
dn = of_find_node_by_phandle(be32_to_cpu(phandle));
if (!dn) {
kfree(rtas_buf);
return -ENOENT;
@ -162,6 +163,7 @@ static int update_dt_node(u32 phandle, s32 scope)
break;
prop_data = rtas_buf + sizeof(*upwa);
nprops = be32_to_cpu(upwa->nprops);
/* On the first call to ibm,update-properties for a node the
* the first property value descriptor contains an empty
@ -170,17 +172,17 @@ static int update_dt_node(u32 phandle, s32 scope)
*/
if (*prop_data == 0) {
prop_data++;
vd = *(u32 *)prop_data;
vd = be32_to_cpu(*(__be32 *)prop_data);
prop_data += vd + sizeof(vd);
upwa->nprops--;
nprops--;
}
for (i = 0; i < upwa->nprops; i++) {
for (i = 0; i < nprops; i++) {
char *prop_name;
prop_name = prop_data;
prop_data += strlen(prop_name) + 1;
vd = *(u32 *)prop_data;
vd = be32_to_cpu(*(__be32 *)prop_data);
prop_data += sizeof(vd);
switch (vd) {
@ -212,13 +214,13 @@ static int update_dt_node(u32 phandle, s32 scope)
return 0;
}
static int add_dt_node(u32 parent_phandle, u32 drc_index)
static int add_dt_node(__be32 parent_phandle, __be32 drc_index)
{
struct device_node *dn;
struct device_node *parent_dn;
int rc;
parent_dn = of_find_node_by_phandle(parent_phandle);
parent_dn = of_find_node_by_phandle(be32_to_cpu(parent_phandle));
if (!parent_dn)
return -ENOENT;
@ -237,7 +239,7 @@ static int add_dt_node(u32 parent_phandle, u32 drc_index)
int pseries_devicetree_update(s32 scope)
{
char *rtas_buf;
u32 *data;
__be32 *data;
int update_nodes_token;
int rc;
@ -254,17 +256,17 @@ int pseries_devicetree_update(s32 scope)
if (rc && rc != 1)
break;
data = (u32 *)rtas_buf + 4;
while (*data & NODE_ACTION_MASK) {
data = (__be32 *)rtas_buf + 4;
while (be32_to_cpu(*data) & NODE_ACTION_MASK) {
int i;
u32 action = *data & NODE_ACTION_MASK;
int node_count = *data & NODE_COUNT_MASK;
u32 action = be32_to_cpu(*data) & NODE_ACTION_MASK;
u32 node_count = be32_to_cpu(*data) & NODE_COUNT_MASK;
data++;
for (i = 0; i < node_count; i++) {
u32 phandle = *data++;
u32 drc_index;
__be32 phandle = *data++;
__be32 drc_index;
switch (action) {
case DELETE_DT_NODE:

View File

@ -211,7 +211,7 @@ do { \
extern unsigned long mmap_rnd_mask;
#define STACK_RND_MASK (mmap_rnd_mask)
#define STACK_RND_MASK (test_thread_flag(TIF_31BIT) ? 0x7ff : mmap_rnd_mask)
#define ARCH_DLINFO \
do { \

View File

@ -57,7 +57,6 @@ enum interruption_class {
IRQIO_TAP,
IRQIO_VMR,
IRQIO_LCS,
IRQIO_CLW,
IRQIO_CTC,
IRQIO_APB,
IRQIO_ADM,

View File

@ -57,6 +57,44 @@
unsigned long ftrace_plt;
static inline void ftrace_generate_orig_insn(struct ftrace_insn *insn)
{
#ifdef CC_USING_HOTPATCH
/* brcl 0,0 */
insn->opc = 0xc004;
insn->disp = 0;
#else
/* stg r14,8(r15) */
insn->opc = 0xe3e0;
insn->disp = 0xf0080024;
#endif
}
static inline int is_kprobe_on_ftrace(struct ftrace_insn *insn)
{
#ifdef CONFIG_KPROBES
if (insn->opc == BREAKPOINT_INSTRUCTION)
return 1;
#endif
return 0;
}
static inline void ftrace_generate_kprobe_nop_insn(struct ftrace_insn *insn)
{
#ifdef CONFIG_KPROBES
insn->opc = BREAKPOINT_INSTRUCTION;
insn->disp = KPROBE_ON_FTRACE_NOP;
#endif
}
static inline void ftrace_generate_kprobe_call_insn(struct ftrace_insn *insn)
{
#ifdef CONFIG_KPROBES
insn->opc = BREAKPOINT_INSTRUCTION;
insn->disp = KPROBE_ON_FTRACE_CALL;
#endif
}
int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
unsigned long addr)
{
@ -72,16 +110,9 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
return -EFAULT;
if (addr == MCOUNT_ADDR) {
/* Initial code replacement */
#ifdef CC_USING_HOTPATCH
/* We expect to see brcl 0,0 */
ftrace_generate_nop_insn(&orig);
#else
/* We expect to see stg r14,8(r15) */
orig.opc = 0xe3e0;
orig.disp = 0xf0080024;
#endif
ftrace_generate_orig_insn(&orig);
ftrace_generate_nop_insn(&new);
} else if (old.opc == BREAKPOINT_INSTRUCTION) {
} else if (is_kprobe_on_ftrace(&old)) {
/*
* If we find a breakpoint instruction, a kprobe has been
* placed at the beginning of the function. We write the
@ -89,9 +120,8 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
* bytes of the original instruction so that the kprobes
* handler can execute a nop, if it reaches this breakpoint.
*/
new.opc = orig.opc = BREAKPOINT_INSTRUCTION;
orig.disp = KPROBE_ON_FTRACE_CALL;
new.disp = KPROBE_ON_FTRACE_NOP;
ftrace_generate_kprobe_call_insn(&orig);
ftrace_generate_kprobe_nop_insn(&new);
} else {
/* Replace ftrace call with a nop. */
ftrace_generate_call_insn(&orig, rec->ip);
@ -111,7 +141,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old)))
return -EFAULT;
if (old.opc == BREAKPOINT_INSTRUCTION) {
if (is_kprobe_on_ftrace(&old)) {
/*
* If we find a breakpoint instruction, a kprobe has been
* placed at the beginning of the function. We write the
@ -119,9 +149,8 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
* bytes of the original instruction so that the kprobes
* handler can execute a brasl if it reaches this breakpoint.
*/
new.opc = orig.opc = BREAKPOINT_INSTRUCTION;
orig.disp = KPROBE_ON_FTRACE_NOP;
new.disp = KPROBE_ON_FTRACE_CALL;
ftrace_generate_kprobe_nop_insn(&orig);
ftrace_generate_kprobe_call_insn(&new);
} else {
/* Replace nop with an ftrace call. */
ftrace_generate_nop_insn(&orig);

View File

@ -79,7 +79,6 @@ static const struct irq_class irqclass_sub_desc[NR_ARCH_IRQS] = {
{.irq = IRQIO_TAP, .name = "TAP", .desc = "[I/O] Tape"},
{.irq = IRQIO_VMR, .name = "VMR", .desc = "[I/O] Unit Record Devices"},
{.irq = IRQIO_LCS, .name = "LCS", .desc = "[I/O] LCS"},
{.irq = IRQIO_CLW, .name = "CLW", .desc = "[I/O] CLAW"},
{.irq = IRQIO_CTC, .name = "CTC", .desc = "[I/O] CTC"},
{.irq = IRQIO_APB, .name = "APB", .desc = "[I/O] AP Bus"},
{.irq = IRQIO_ADM, .name = "ADM", .desc = "[I/O] EADM Subchannel"},

View File

@ -1415,7 +1415,7 @@ CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC_DIAG, PERF_EVENT_CPUM_SF_DIAG);
static struct attribute *cpumsf_pmu_events_attr[] = {
CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC),
CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG),
NULL,
NULL,
};
@ -1606,8 +1606,11 @@ static int __init init_cpum_sampling_pmu(void)
return -EINVAL;
}
if (si.ad)
if (si.ad) {
sfb_set_limits(CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB);
cpumsf_pmu_events_attr[1] =
CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG);
}
sfdbg = debug_register(KMSG_COMPONENT, 2, 1, 80);
if (!sfdbg)

View File

@ -177,6 +177,17 @@ restart_entry:
lhi %r1,1
sigp %r1,%r0,SIGP_SET_ARCHITECTURE
sam64
#ifdef CONFIG_SMP
larl %r1,smp_cpu_mt_shift
icm %r1,15,0(%r1)
jz smt_done
llgfr %r1,%r1
smt_loop:
sigp %r1,%r0,SIGP_SET_MULTI_THREADING
brc 8,smt_done /* accepted */
brc 2,smt_loop /* busy, try again */
smt_done:
#endif
larl %r1,.Lnew_pgm_check_psw
lpswe 0(%r1)
pgm_check_entry:

View File

@ -456,7 +456,7 @@ int gxio_mpipe_equeue_init(gxio_mpipe_equeue_t *equeue,
EXPORT_SYMBOL_GPL(gxio_mpipe_equeue_init);
int gxio_mpipe_set_timestamp(gxio_mpipe_context_t *context,
const struct timespec *ts)
const struct timespec64 *ts)
{
cycles_t cycles = get_cycles();
return gxio_mpipe_set_timestamp_aux(context, (uint64_t)ts->tv_sec,
@ -466,7 +466,7 @@ int gxio_mpipe_set_timestamp(gxio_mpipe_context_t *context,
EXPORT_SYMBOL_GPL(gxio_mpipe_set_timestamp);
int gxio_mpipe_get_timestamp(gxio_mpipe_context_t *context,
struct timespec *ts)
struct timespec64 *ts)
{
int ret;
cycles_t cycles_prev, cycles_now, clock_rate;

View File

@ -1830,7 +1830,7 @@ extern int gxio_mpipe_link_set_attr(gxio_mpipe_link_t *link, uint32_t attr,
* code.
*/
extern int gxio_mpipe_get_timestamp(gxio_mpipe_context_t *context,
struct timespec *ts);
struct timespec64 *ts);
/* Set the timestamp of mPIPE.
*
@ -1840,7 +1840,7 @@ extern int gxio_mpipe_get_timestamp(gxio_mpipe_context_t *context,
* code.
*/
extern int gxio_mpipe_set_timestamp(gxio_mpipe_context_t *context,
const struct timespec *ts);
const struct timespec64 *ts);
/* Adjust the timestamp of mPIPE.
*

View File

@ -212,11 +212,11 @@ static struct event_constraint intel_hsw_event_constraints[] = {
INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
/* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
INTEL_EVENT_CONSTRAINT(0x08a3, 0x4),
INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4),
/* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
INTEL_EVENT_CONSTRAINT(0x0ca3, 0x4),
INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4),
/* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
INTEL_EVENT_CONSTRAINT(0x04a3, 0xf),
INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf),
EVENT_CONSTRAINT_END
};
@ -1649,11 +1649,11 @@ intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event
if (c)
return c;
c = intel_pebs_constraints(event);
c = intel_shared_regs_constraints(cpuc, event);
if (c)
return c;
c = intel_shared_regs_constraints(cpuc, event);
c = intel_pebs_constraints(event);
if (c)
return c;

View File

@ -364,12 +364,21 @@ system_call_fastpath:
* Has incomplete stack frame and undefined top of stack.
*/
ret_from_sys_call:
testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
jnz int_ret_from_sys_call_fixup /* Go the the slow path */
LOCKDEP_SYS_EXIT
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
/*
* We must check ti flags with interrupts (or at least preemption)
* off because we must *never* return to userspace without
* processing exit work that is enqueued if we're preempted here.
* In particular, returning to userspace with any of the one-shot
* flags (TIF_NOTIFY_RESUME, TIF_USER_RETURN_NOTIFY, etc) set is
* very bad.
*/
testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
jnz int_ret_from_sys_call_fixup /* Go the the slow path */
CFI_REMEMBER_STATE
/*
* sysretq will re-enable interrupts:
@ -386,7 +395,7 @@ ret_from_sys_call:
int_ret_from_sys_call_fixup:
FIXUP_TOP_OF_STACK %r11, -ARGOFFSET
jmp int_ret_from_sys_call
jmp int_ret_from_sys_call_irqs_off
/* Do syscall tracing */
tracesys:
@ -432,6 +441,7 @@ tracesys_phase2:
GLOBAL(int_ret_from_sys_call)
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
int_ret_from_sys_call_irqs_off:
movl $_TIF_ALLWORK_MASK,%edi
/* edi: mask to check */
GLOBAL(int_with_check)
@ -789,7 +799,21 @@ retint_swapgs: /* return to user-space */
cmpq %r11,(EFLAGS-ARGOFFSET)(%rsp) /* R11 == RFLAGS */
jne opportunistic_sysret_failed
testq $X86_EFLAGS_RF,%r11 /* sysret can't restore RF */
/*
* SYSRET can't restore RF. SYSRET can restore TF, but unlike IRET,
* restoring TF results in a trap from userspace immediately after
* SYSRET. This would cause an infinite loop whenever #DB happens
* with register state that satisfies the opportunistic SYSRET
* conditions. For example, single-stepping this user code:
*
* movq $stuck_here,%rcx
* pushfq
* popq %r11
* stuck_here:
*
* would never get past 'stuck_here'.
*/
testq $(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11
jnz opportunistic_sysret_failed
/* nothing to check for RSP */

View File

@ -72,7 +72,7 @@ struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] =
{ "bx", 8, offsetof(struct pt_regs, bx) },
{ "cx", 8, offsetof(struct pt_regs, cx) },
{ "dx", 8, offsetof(struct pt_regs, dx) },
{ "si", 8, offsetof(struct pt_regs, dx) },
{ "si", 8, offsetof(struct pt_regs, si) },
{ "di", 8, offsetof(struct pt_regs, di) },
{ "bp", 8, offsetof(struct pt_regs, bp) },
{ "sp", 8, offsetof(struct pt_regs, sp) },

View File

@ -183,6 +183,16 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
},
},
/* ASRock */
{ /* Handle problems with rebooting on ASRock Q1900DC-ITX */
.callback = set_pci_reboot,
.ident = "ASRock Q1900DC-ITX",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "ASRock"),
DMI_MATCH(DMI_BOARD_NAME, "Q1900DC-ITX"),
},
},
/* ASUS */
{ /* Handle problems with rebooting on ASUS P4S800 */
.callback = set_bios_reboot,

View File

@ -422,6 +422,7 @@ static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu,
struct kvm_ioapic *ioapic, int vector, int trigger_mode)
{
int i;
struct kvm_lapic *apic = vcpu->arch.apic;
for (i = 0; i < IOAPIC_NUM_PINS; i++) {
union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i];
@ -443,7 +444,8 @@ static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu,
kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i);
spin_lock(&ioapic->lock);
if (trigger_mode != IOAPIC_LEVEL_TRIG)
if (trigger_mode != IOAPIC_LEVEL_TRIG ||
kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI)
continue;
ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG);

View File

@ -833,8 +833,7 @@ int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector)
{
if (!(kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI) &&
kvm_ioapic_handles_vector(apic->vcpu->kvm, vector)) {
if (kvm_ioapic_handles_vector(apic->vcpu->kvm, vector)) {
int trigger_mode;
if (apic_test_vector(vector, apic->regs + APIC_TMR))
trigger_mode = IOAPIC_LEVEL_TRIG;

View File

@ -2479,8 +2479,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
if (enable_ept) {
/* nested EPT: emulate EPT also to L1 */
vmx->nested.nested_vmx_secondary_ctls_high |=
SECONDARY_EXEC_ENABLE_EPT |
SECONDARY_EXEC_UNRESTRICTED_GUEST;
SECONDARY_EXEC_ENABLE_EPT;
vmx->nested.nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
VMX_EPTP_WB_BIT | VMX_EPT_2MB_PAGE_BIT |
VMX_EPT_INVEPT_BIT;
@ -2494,6 +2493,10 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
} else
vmx->nested.nested_vmx_ept_caps = 0;
if (enable_unrestricted_guest)
vmx->nested.nested_vmx_secondary_ctls_high |=
SECONDARY_EXEC_UNRESTRICTED_GUEST;
/* miscellaneous data */
rdmsr(MSR_IA32_VMX_MISC,
vmx->nested.nested_vmx_misc_low,

View File

@ -91,6 +91,12 @@ EXPORT_SYMBOL_GPL(xen_p2m_size);
unsigned long xen_max_p2m_pfn __read_mostly;
EXPORT_SYMBOL_GPL(xen_max_p2m_pfn);
#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
#define P2M_LIMIT CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
#else
#define P2M_LIMIT 0
#endif
static DEFINE_SPINLOCK(p2m_update_lock);
static unsigned long *p2m_mid_missing_mfn;
@ -385,9 +391,11 @@ static void __init xen_rebuild_p2m_list(unsigned long *p2m)
void __init xen_vmalloc_p2m_tree(void)
{
static struct vm_struct vm;
unsigned long p2m_limit;
p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE;
vm.flags = VM_ALLOC;
vm.size = ALIGN(sizeof(unsigned long) * xen_max_p2m_pfn,
vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit),
PMD_SIZE * PMDS_PER_MID_PAGE);
vm_area_register_early(&vm, PMD_SIZE * PMDS_PER_MID_PAGE);
pr_notice("p2m virtual area at %p, size is %lx\n", vm.addr, vm.size);

View File

@ -592,7 +592,7 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS)) {
struct bio_vec *bprev;
bprev = &rq->biotail->bi_io_vec[bio->bi_vcnt - 1];
bprev = &rq->biotail->bi_io_vec[rq->biotail->bi_vcnt - 1];
if (bvec_gap_to_prev(bprev, bio->bi_io_vec[0].bv_offset))
return false;
}

View File

@ -278,9 +278,11 @@ static int bt_get(struct blk_mq_alloc_data *data,
/*
* We're out of tags on this hardware queue, kick any
* pending IO submits before going to sleep waiting for
* some to complete.
* some to complete. Note that hctx can be NULL here for
* reserved tag allocation.
*/
blk_mq_run_hw_queue(hctx, false);
if (hctx)
blk_mq_run_hw_queue(hctx, false);
/*
* Retry tag allocation after running the hardware queue,

View File

@ -1938,7 +1938,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
*/
if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release,
PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
goto err_map;
goto err_mq_usage;
setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
blk_queue_rq_timeout(q, 30000);
@ -1981,7 +1981,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
blk_mq_init_cpu_queues(q, set->nr_hw_queues);
if (blk_mq_init_hw_queues(q, set))
goto err_hw;
goto err_mq_usage;
mutex_lock(&all_q_mutex);
list_add_tail(&q->all_q_node, &all_q_list);
@ -1993,7 +1993,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
return q;
err_hw:
err_mq_usage:
blk_cleanup_queue(q);
err_hctxs:
kfree(map);

View File

@ -585,7 +585,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
b->physical_block_size);
t->io_min = max(t->io_min, b->io_min);
t->io_opt = lcm(t->io_opt, b->io_opt);
t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
t->cluster &= b->cluster;
t->discard_zeroes_data &= b->discard_zeroes_data;
@ -616,7 +616,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
b->raid_partial_stripes_expensive);
/* Find lowest common alignment_offset */
t->alignment_offset = lcm(t->alignment_offset, alignment)
t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
% max(t->physical_block_size, t->io_min);
/* Verify that new alignment_offset is on a logical block boundary */
@ -643,7 +643,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
b->max_discard_sectors);
t->discard_granularity = max(t->discard_granularity,
b->discard_granularity);
t->discard_alignment = lcm(t->discard_alignment, alignment) %
t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
t->discard_granularity;
}

View File

@ -358,8 +358,8 @@ int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len)
npages = (off + n + PAGE_SIZE - 1) >> PAGE_SHIFT;
if (WARN_ON(npages == 0))
return -EINVAL;
sg_init_table(sgl->sg, npages);
/* Add one extra for linking */
sg_init_table(sgl->sg, npages + 1);
for (i = 0, len = n; i < npages; i++) {
int plen = min_t(int, len, PAGE_SIZE - off);
@ -369,18 +369,26 @@ int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len)
off = 0;
len -= plen;
}
sg_mark_end(sgl->sg + npages - 1);
sgl->npages = npages;
return n;
}
EXPORT_SYMBOL_GPL(af_alg_make_sg);
void af_alg_link_sg(struct af_alg_sgl *sgl_prev, struct af_alg_sgl *sgl_new)
{
sg_unmark_end(sgl_prev->sg + sgl_prev->npages - 1);
sg_chain(sgl_prev->sg, sgl_prev->npages + 1, sgl_new->sg);
}
EXPORT_SYMBOL_GPL(af_alg_link_sg);
void af_alg_free_sg(struct af_alg_sgl *sgl)
{
int i;
i = 0;
do {
for (i = 0; i < sgl->npages; i++)
put_page(sgl->pages[i]);
} while (!sg_is_last(sgl->sg + (i++)));
}
EXPORT_SYMBOL_GPL(af_alg_free_sg);

View File

@ -34,8 +34,8 @@ struct hash_ctx {
struct ahash_request req;
};
static int hash_sendmsg(struct kiocb *unused, struct socket *sock,
struct msghdr *msg, size_t ignored)
static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
size_t ignored)
{
int limit = ALG_MAX_PAGES * PAGE_SIZE;
struct sock *sk = sock->sk;
@ -56,8 +56,8 @@ static int hash_sendmsg(struct kiocb *unused, struct socket *sock,
ctx->more = 0;
while (iov_iter_count(&msg->msg_iter)) {
int len = iov_iter_count(&msg->msg_iter);
while (msg_data_left(msg)) {
int len = msg_data_left(msg);
if (len > limit)
len = limit;
@ -139,8 +139,8 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page,
return err ?: size;
}
static int hash_recvmsg(struct kiocb *unused, struct socket *sock,
struct msghdr *msg, size_t len, int flags)
static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
int flags)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);

View File

@ -55,8 +55,8 @@ struct rng_ctx {
struct crypto_rng *drng;
};
static int rng_recvmsg(struct kiocb *unused, struct socket *sock,
struct msghdr *msg, size_t len, int flags)
static int rng_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
int flags)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);

View File

@ -39,6 +39,7 @@ struct skcipher_ctx {
struct af_alg_completion completion;
atomic_t inflight;
unsigned used;
unsigned int len;
@ -49,9 +50,65 @@ struct skcipher_ctx {
struct ablkcipher_request req;
};
struct skcipher_async_rsgl {
struct af_alg_sgl sgl;
struct list_head list;
};
struct skcipher_async_req {
struct kiocb *iocb;
struct skcipher_async_rsgl first_sgl;
struct list_head list;
struct scatterlist *tsg;
char iv[];
};
#define GET_SREQ(areq, ctx) (struct skcipher_async_req *)((char *)areq + \
crypto_ablkcipher_reqsize(crypto_ablkcipher_reqtfm(&ctx->req)))
#define GET_REQ_SIZE(ctx) \
crypto_ablkcipher_reqsize(crypto_ablkcipher_reqtfm(&ctx->req))
#define GET_IV_SIZE(ctx) \
crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(&ctx->req))
#define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \
sizeof(struct scatterlist) - 1)
static void skcipher_free_async_sgls(struct skcipher_async_req *sreq)
{
struct skcipher_async_rsgl *rsgl, *tmp;
struct scatterlist *sgl;
struct scatterlist *sg;
int i, n;
list_for_each_entry_safe(rsgl, tmp, &sreq->list, list) {
af_alg_free_sg(&rsgl->sgl);
if (rsgl != &sreq->first_sgl)
kfree(rsgl);
}
sgl = sreq->tsg;
n = sg_nents(sgl);
for_each_sg(sgl, sg, n, i)
put_page(sg_page(sg));
kfree(sreq->tsg);
}
static void skcipher_async_cb(struct crypto_async_request *req, int err)
{
struct sock *sk = req->data;
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
struct skcipher_async_req *sreq = GET_SREQ(req, ctx);
struct kiocb *iocb = sreq->iocb;
atomic_dec(&ctx->inflight);
skcipher_free_async_sgls(sreq);
kfree(req);
iocb->ki_complete(iocb, err, err);
}
static inline int skcipher_sndbuf(struct sock *sk)
{
struct alg_sock *ask = alg_sk(sk);
@ -96,7 +153,7 @@ static int skcipher_alloc_sgl(struct sock *sk)
return 0;
}
static void skcipher_pull_sgl(struct sock *sk, int used)
static void skcipher_pull_sgl(struct sock *sk, int used, int put)
{
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
@ -123,8 +180,8 @@ static void skcipher_pull_sgl(struct sock *sk, int used)
if (sg[i].length)
return;
put_page(sg_page(sg + i));
if (put)
put_page(sg_page(sg + i));
sg_assign_page(sg + i, NULL);
}
@ -143,7 +200,7 @@ static void skcipher_free_sgl(struct sock *sk)
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
skcipher_pull_sgl(sk, ctx->used);
skcipher_pull_sgl(sk, ctx->used, 1);
}
static int skcipher_wait_for_wmem(struct sock *sk, unsigned flags)
@ -239,8 +296,8 @@ static void skcipher_data_wakeup(struct sock *sk)
rcu_read_unlock();
}
static int skcipher_sendmsg(struct kiocb *unused, struct socket *sock,
struct msghdr *msg, size_t size)
static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
size_t size)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
@ -424,8 +481,153 @@ static ssize_t skcipher_sendpage(struct socket *sock, struct page *page,
return err ?: size;
}
static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock,
struct msghdr *msg, size_t ignored, int flags)
static int skcipher_all_sg_nents(struct skcipher_ctx *ctx)
{
struct skcipher_sg_list *sgl;
struct scatterlist *sg;
int nents = 0;
list_for_each_entry(sgl, &ctx->tsgl, list) {
sg = sgl->sg;
while (!sg->length)
sg++;
nents += sg_nents(sg);
}
return nents;
}
static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
int flags)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
struct skcipher_sg_list *sgl;
struct scatterlist *sg;
struct skcipher_async_req *sreq;
struct ablkcipher_request *req;
struct skcipher_async_rsgl *last_rsgl = NULL;
unsigned int txbufs = 0, len = 0, tx_nents = skcipher_all_sg_nents(ctx);
unsigned int reqlen = sizeof(struct skcipher_async_req) +
GET_REQ_SIZE(ctx) + GET_IV_SIZE(ctx);
int err = -ENOMEM;
bool mark = false;
lock_sock(sk);
req = kmalloc(reqlen, GFP_KERNEL);
if (unlikely(!req))
goto unlock;
sreq = GET_SREQ(req, ctx);
sreq->iocb = msg->msg_iocb;
memset(&sreq->first_sgl, '\0', sizeof(struct skcipher_async_rsgl));
INIT_LIST_HEAD(&sreq->list);
sreq->tsg = kcalloc(tx_nents, sizeof(*sg), GFP_KERNEL);
if (unlikely(!sreq->tsg)) {
kfree(req);
goto unlock;
}
sg_init_table(sreq->tsg, tx_nents);
memcpy(sreq->iv, ctx->iv, GET_IV_SIZE(ctx));
ablkcipher_request_set_tfm(req, crypto_ablkcipher_reqtfm(&ctx->req));
ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
skcipher_async_cb, sk);
while (iov_iter_count(&msg->msg_iter)) {
struct skcipher_async_rsgl *rsgl;
int used;
if (!ctx->used) {
err = skcipher_wait_for_data(sk, flags);
if (err)
goto free;
}
sgl = list_first_entry(&ctx->tsgl,
struct skcipher_sg_list, list);
sg = sgl->sg;
while (!sg->length)
sg++;
used = min_t(unsigned long, ctx->used,
iov_iter_count(&msg->msg_iter));
used = min_t(unsigned long, used, sg->length);
if (txbufs == tx_nents) {
struct scatterlist *tmp;
int x;
/* Ran out of tx slots in async request
* need to expand */
tmp = kcalloc(tx_nents * 2, sizeof(*tmp),
GFP_KERNEL);
if (!tmp)
goto free;
sg_init_table(tmp, tx_nents * 2);
for (x = 0; x < tx_nents; x++)
sg_set_page(&tmp[x], sg_page(&sreq->tsg[x]),
sreq->tsg[x].length,
sreq->tsg[x].offset);
kfree(sreq->tsg);
sreq->tsg = tmp;
tx_nents *= 2;
mark = true;
}
/* Need to take over the tx sgl from ctx
* to the asynch req - these sgls will be freed later */
sg_set_page(sreq->tsg + txbufs++, sg_page(sg), sg->length,
sg->offset);
if (list_empty(&sreq->list)) {
rsgl = &sreq->first_sgl;
list_add_tail(&rsgl->list, &sreq->list);
} else {
rsgl = kmalloc(sizeof(*rsgl), GFP_KERNEL);
if (!rsgl) {
err = -ENOMEM;
goto free;
}
list_add_tail(&rsgl->list, &sreq->list);
}
used = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, used);
err = used;
if (used < 0)
goto free;
if (last_rsgl)
af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl);
last_rsgl = rsgl;
len += used;
skcipher_pull_sgl(sk, used, 0);
iov_iter_advance(&msg->msg_iter, used);
}
if (mark)
sg_mark_end(sreq->tsg + txbufs - 1);
ablkcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg,
len, sreq->iv);
err = ctx->enc ? crypto_ablkcipher_encrypt(req) :
crypto_ablkcipher_decrypt(req);
if (err == -EINPROGRESS) {
atomic_inc(&ctx->inflight);
err = -EIOCBQUEUED;
goto unlock;
}
free:
skcipher_free_async_sgls(sreq);
kfree(req);
unlock:
skcipher_wmem_wakeup(sk);
release_sock(sk);
return err;
}
static int skcipher_recvmsg_sync(struct socket *sock, struct msghdr *msg,
int flags)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
@ -439,7 +641,7 @@ static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock,
long copied = 0;
lock_sock(sk);
while (iov_iter_count(&msg->msg_iter)) {
while (msg_data_left(msg)) {
sgl = list_first_entry(&ctx->tsgl,
struct skcipher_sg_list, list);
sg = sgl->sg;
@ -453,7 +655,7 @@ static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock,
goto unlock;
}
used = min_t(unsigned long, ctx->used, iov_iter_count(&msg->msg_iter));
used = min_t(unsigned long, ctx->used, msg_data_left(msg));
used = af_alg_make_sg(&ctx->rsgl, &msg->msg_iter, used);
err = used;
@ -484,7 +686,7 @@ static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock,
goto unlock;
copied += used;
skcipher_pull_sgl(sk, used);
skcipher_pull_sgl(sk, used, 1);
iov_iter_advance(&msg->msg_iter, used);
}
@ -497,6 +699,13 @@ static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock,
return copied ?: err;
}
static int skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
size_t ignored, int flags)
{
return (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) ?
skcipher_recvmsg_async(sock, msg, flags) :
skcipher_recvmsg_sync(sock, msg, flags);
}
static unsigned int skcipher_poll(struct file *file, struct socket *sock,
poll_table *wait)
@ -555,12 +764,25 @@ static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen)
return crypto_ablkcipher_setkey(private, key, keylen);
}
static void skcipher_wait(struct sock *sk)
{
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
int ctr = 0;
while (atomic_read(&ctx->inflight) && ctr++ < 100)
msleep(100);
}
static void skcipher_sock_destruct(struct sock *sk)
{
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(&ctx->req);
if (atomic_read(&ctx->inflight))
skcipher_wait(sk);
skcipher_free_sgl(sk);
sock_kzfree_s(sk, ctx->iv, crypto_ablkcipher_ivsize(tfm));
sock_kfree_s(sk, ctx, ctx->len);
@ -592,6 +814,7 @@ static int skcipher_accept_parent(void *private, struct sock *sk)
ctx->more = 0;
ctx->merge = 0;
ctx->enc = 0;
atomic_set(&ctx->inflight, 0);
af_alg_init_completion(&ctx->completion);
ask->private = ctx;

View File

@ -4204,9 +4204,18 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
/* devices that don't properly handle queued TRIM commands */
{ "Micron_M[56]*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
{ "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "Micron_M5[15]0*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "Samsung SSD 850 PRO*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "Crucial_CT*SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
/*
* As defined, the DRAT (Deterministic Read After Trim) and RZAT
@ -4226,6 +4235,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
*/
{ "INTEL*SSDSC2MH*", NULL, 0, },
{ "Micron*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "Crucial*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "INTEL*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
@ -4737,7 +4748,7 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag)
return NULL;
/* libsas case */
if (!ap->scsi_host) {
if (ap->flags & ATA_FLAG_SAS_HOST) {
tag = ata_sas_allocate_tag(ap);
if (tag < 0)
return NULL;
@ -4776,7 +4787,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
tag = qc->tag;
if (likely(ata_tag_valid(tag))) {
qc->tag = ATA_TAG_POISON;
if (!ap->scsi_host)
if (ap->flags & ATA_FLAG_SAS_HOST)
ata_sas_free_tag(tag, ap);
}
}

View File

@ -73,9 +73,6 @@
#undef GENERAL_DEBUG
#undef EXTRA_DEBUG
#undef NS_USE_DESTRUCTORS /* For now keep this undefined unless you know
you're going to use only raw ATM */
/* Do not touch these */
#ifdef TX_DEBUG
@ -138,11 +135,6 @@ static void process_tsq(ns_dev * card);
static void drain_scq(ns_dev * card, scq_info * scq, int pos);
static void process_rsq(ns_dev * card);
static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe);
#ifdef NS_USE_DESTRUCTORS
static void ns_sb_destructor(struct sk_buff *sb);
static void ns_lb_destructor(struct sk_buff *lb);
static void ns_hb_destructor(struct sk_buff *hb);
#endif /* NS_USE_DESTRUCTORS */
static void recycle_rx_buf(ns_dev * card, struct sk_buff *skb);
static void recycle_iovec_rx_bufs(ns_dev * card, struct iovec *iov, int count);
static void recycle_iov_buf(ns_dev * card, struct sk_buff *iovb);
@ -2169,9 +2161,6 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
} else {
skb_put(skb, len);
dequeue_sm_buf(card, skb);
#ifdef NS_USE_DESTRUCTORS
skb->destructor = ns_sb_destructor;
#endif /* NS_USE_DESTRUCTORS */
ATM_SKB(skb)->vcc = vcc;
__net_timestamp(skb);
vcc->push(vcc, skb);
@ -2190,9 +2179,6 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
} else {
skb_put(sb, len);
dequeue_sm_buf(card, sb);
#ifdef NS_USE_DESTRUCTORS
sb->destructor = ns_sb_destructor;
#endif /* NS_USE_DESTRUCTORS */
ATM_SKB(sb)->vcc = vcc;
__net_timestamp(sb);
vcc->push(vcc, sb);
@ -2208,9 +2194,6 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
atomic_inc(&vcc->stats->rx_drop);
} else {
dequeue_lg_buf(card, skb);
#ifdef NS_USE_DESTRUCTORS
skb->destructor = ns_lb_destructor;
#endif /* NS_USE_DESTRUCTORS */
skb_push(skb, NS_SMBUFSIZE);
skb_copy_from_linear_data(sb, skb->data,
NS_SMBUFSIZE);
@ -2322,9 +2305,6 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
card->index);
#endif /* EXTRA_DEBUG */
ATM_SKB(hb)->vcc = vcc;
#ifdef NS_USE_DESTRUCTORS
hb->destructor = ns_hb_destructor;
#endif /* NS_USE_DESTRUCTORS */
__net_timestamp(hb);
vcc->push(vcc, hb);
atomic_inc(&vcc->stats->rx);
@ -2337,68 +2317,6 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
}
#ifdef NS_USE_DESTRUCTORS
static void ns_sb_destructor(struct sk_buff *sb)
{
ns_dev *card;
u32 stat;
card = (ns_dev *) ATM_SKB(sb)->vcc->dev->dev_data;
stat = readl(card->membase + STAT);
card->sbfqc = ns_stat_sfbqc_get(stat);
card->lbfqc = ns_stat_lfbqc_get(stat);
do {
sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL);
if (sb == NULL)
break;
NS_PRV_BUFTYPE(sb) = BUF_SM;
skb_queue_tail(&card->sbpool.queue, sb);
skb_reserve(sb, NS_AAL0_HEADER);
push_rxbufs(card, sb);
} while (card->sbfqc < card->sbnr.min);
}
static void ns_lb_destructor(struct sk_buff *lb)
{
ns_dev *card;
u32 stat;
card = (ns_dev *) ATM_SKB(lb)->vcc->dev->dev_data;
stat = readl(card->membase + STAT);
card->sbfqc = ns_stat_sfbqc_get(stat);
card->lbfqc = ns_stat_lfbqc_get(stat);
do {
lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL);
if (lb == NULL)
break;
NS_PRV_BUFTYPE(lb) = BUF_LG;
skb_queue_tail(&card->lbpool.queue, lb);
skb_reserve(lb, NS_SMBUFSIZE);
push_rxbufs(card, lb);
} while (card->lbfqc < card->lbnr.min);
}
static void ns_hb_destructor(struct sk_buff *hb)
{
ns_dev *card;
card = (ns_dev *) ATM_SKB(hb)->vcc->dev->dev_data;
while (card->hbpool.count < card->hbnr.init) {
hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL);
if (hb == NULL)
break;
NS_PRV_BUFTYPE(hb) = BUF_NONE;
skb_queue_tail(&card->hbpool.queue, hb);
card->hbpool.count++;
}
}
#endif /* NS_USE_DESTRUCTORS */
static void recycle_rx_buf(ns_dev * card, struct sk_buff *skb)
{
if (unlikely(NS_PRV_BUFTYPE(skb) == BUF_NONE)) {
@ -2427,9 +2345,6 @@ static void recycle_iov_buf(ns_dev * card, struct sk_buff *iovb)
static void dequeue_sm_buf(ns_dev * card, struct sk_buff *sb)
{
skb_unlink(sb, &card->sbpool.queue);
#ifdef NS_USE_DESTRUCTORS
if (card->sbfqc < card->sbnr.min)
#else
if (card->sbfqc < card->sbnr.init) {
struct sk_buff *new_sb;
if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) {
@ -2440,7 +2355,6 @@ static void dequeue_sm_buf(ns_dev * card, struct sk_buff *sb)
}
}
if (card->sbfqc < card->sbnr.init)
#endif /* NS_USE_DESTRUCTORS */
{
struct sk_buff *new_sb;
if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) {
@ -2455,9 +2369,6 @@ static void dequeue_sm_buf(ns_dev * card, struct sk_buff *sb)
static void dequeue_lg_buf(ns_dev * card, struct sk_buff *lb)
{
skb_unlink(lb, &card->lbpool.queue);
#ifdef NS_USE_DESTRUCTORS
if (card->lbfqc < card->lbnr.min)
#else
if (card->lbfqc < card->lbnr.init) {
struct sk_buff *new_lb;
if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) {
@ -2468,7 +2379,6 @@ static void dequeue_lg_buf(ns_dev * card, struct sk_buff *lb)
}
}
if (card->lbfqc < card->lbnr.init)
#endif /* NS_USE_DESTRUCTORS */
{
struct sk_buff *new_lb;
if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) {

View File

@ -243,4 +243,12 @@ extern struct regcache_ops regcache_rbtree_ops;
extern struct regcache_ops regcache_lzo_ops;
extern struct regcache_ops regcache_flat_ops;
static inline const char *regmap_name(const struct regmap *map)
{
if (map->dev)
return dev_name(map->dev);
return map->name;
}
#endif

Some files were not shown because too many files have changed in this diff Show More