Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net

This commit is contained in:
David S. Miller 2011-08-20 10:39:12 -07:00
commit 823dcd2506
265 changed files with 9171 additions and 8040 deletions

View File

@ -2680,6 +2680,27 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
vmpoff= [KNL,S390] Perform z/VM CP command after power off.
Format: <command>
vsyscall= [X86-64]
Controls the behavior of vsyscalls (i.e. calls to
fixed addresses of 0xffffffffff600x00 from legacy
code). Most statically-linked binaries and older
versions of glibc use these calls. Because these
functions are at fixed addresses, they make nice
targets for exploits that can control RIP.
emulate [default] Vsyscalls turn into traps and are
emulated reasonably safely.
native Vsyscalls are native syscall instructions.
This is a little bit faster than trapping
and makes a few dynamic recompilers work
better than they would in emulation mode.
It also makes exploits much easier to write.
none Vsyscalls don't work at all. This makes
them quite hard to use for exploits but
might break your system.
vt.cur_default= [VT] Default cursor shape.
Format: 0xCCBBAA, where AA, BB, and CC are the same as
the parameters of the <Esc>[?A;B;Cc escape sequence;

View File

@ -1,13 +1,21 @@
00-INDEX
- this file
3c359.txt
- information on the 3Com TokenLink Velocity XL (3c5359) driver.
3c505.txt
- information on the 3Com EtherLink Plus (3c505) driver.
3c509.txt
- information on the 3Com Etherlink III Series Ethernet cards.
6pack.txt
- info on the 6pack protocol, an alternative to KISS for AX.25
DLINK.txt
- info on the D-Link DE-600/DE-620 parallel port pocket adapters
PLIP.txt
- PLIP: The Parallel Line Internet Protocol device driver
README.ipw2100
- README for the Intel PRO/Wireless 2100 driver.
README.ipw2200
- README for the Intel PRO/Wireless 2915ABG and 2200BG driver.
README.sb1000
- info on General Instrument/NextLevel SURFboard1000 cable modem.
alias.txt
@ -20,8 +28,12 @@ atm.txt
- info on where to get ATM programs and support for Linux.
ax25.txt
- info on using AX.25 and NET/ROM code for Linux
batman-adv.txt
- B.A.T.M.A.N routing protocol on top of layer 2 Ethernet Frames.
baycom.txt
- info on the driver for Baycom style amateur radio modems
bonding.txt
- Linux Ethernet Bonding Driver HOWTO: link aggregation in Linux.
bridge.txt
- where to get user space programs for ethernet bridging with Linux.
can.txt
@ -34,32 +46,60 @@ cxacru.txt
- Conexant AccessRunner USB ADSL Modem
cxacru-cf.py
- Conexant AccessRunner USB ADSL Modem configuration file parser
cxgb.txt
- Release Notes for the Chelsio N210 Linux device driver.
dccp.txt
- the Datagram Congestion Control Protocol (DCCP) (RFC 4340..42).
de4x5.txt
- the Digital EtherWORKS DE4?? and DE5?? PCI Ethernet driver
decnet.txt
- info on using the DECnet networking layer in Linux.
depca.txt
- the Digital DEPCA/EtherWORKS DE1?? and DE2?? LANCE Ethernet driver
dl2k.txt
- README for D-Link DL2000-based Gigabit Ethernet Adapters (dl2k.ko).
dm9000.txt
- README for the Simtec DM9000 Network driver.
dmfe.txt
- info on the Davicom DM9102(A)/DM9132/DM9801 fast ethernet driver.
dns_resolver.txt
- The DNS resolver module allows kernel servies to make DNS queries.
driver.txt
- Softnet driver issues.
e100.txt
- info on Intel's EtherExpress PRO/100 line of 10/100 boards
e1000.txt
- info on Intel's E1000 line of gigabit ethernet boards
e1000e.txt
- README for the Intel Gigabit Ethernet Driver (e1000e).
eql.txt
- serial IP load balancing
ewrk3.txt
- the Digital EtherWORKS 3 DE203/4/5 Ethernet driver
fib_trie.txt
- Level Compressed Trie (LC-trie) notes: a structure for routing.
filter.txt
- Linux Socket Filtering
fore200e.txt
- FORE Systems PCA-200E/SBA-200E ATM NIC driver info.
framerelay.txt
- info on using Frame Relay/Data Link Connection Identifier (DLCI).
gen_stats.txt
- Generic networking statistics for netlink users.
generic_hdlc.txt
- The generic High Level Data Link Control (HDLC) layer.
generic_netlink.txt
- info on Generic Netlink
gianfar.txt
- Gianfar Ethernet Driver.
ieee802154.txt
- Linux IEEE 802.15.4 implementation, API and drivers
ifenslave.c
- Configure network interfaces for parallel routing (bonding).
igb.txt
- README for the Intel Gigabit Ethernet Driver (igb).
igbvf.txt
- README for the Intel Gigabit Ethernet Driver (igbvf).
ip-sysctl.txt
- /proc/sys/net/ipv4/* variables
ip_dynaddr.txt
@ -68,41 +108,117 @@ ipddp.txt
- AppleTalk-IP Decapsulation and AppleTalk-IP Encapsulation
iphase.txt
- Interphase PCI ATM (i)Chip IA Linux driver info.
ipv6.txt
- Options to the ipv6 kernel module.
ipvs-sysctl.txt
- Per-inode explanation of the /proc/sys/net/ipv4/vs interface.
irda.txt
- where to get IrDA (infrared) utilities and info for Linux.
ixgb.txt
- README for the Intel 10 Gigabit Ethernet Driver (ixgb).
ixgbe.txt
- README for the Intel 10 Gigabit Ethernet Driver (ixgbe).
ixgbevf.txt
- README for the Intel Virtual Function (VF) Driver (ixgbevf).
l2tp.txt
- User guide to the L2TP tunnel protocol.
lapb-module.txt
- programming information of the LAPB module.
ltpc.txt
- the Apple or Farallon LocalTalk PC card driver
mac80211-injection.txt
- HOWTO use packet injection with mac80211
multicast.txt
- Behaviour of cards under Multicast
multiqueue.txt
- HOWTO for multiqueue network device support.
netconsole.txt
- The network console module netconsole.ko: configuration and notes.
netdev-features.txt
- Network interface features API description.
netdevices.txt
- info on network device driver functions exported to the kernel.
netif-msg.txt
- Design of the network interface message level setting (NETIF_MSG_*).
nfc.txt
- The Linux Near Field Communication (NFS) subsystem.
olympic.txt
- IBM PCI Pit/Pit-Phy/Olympic Token Ring driver info.
operstates.txt
- Overview of network interface operational states.
packet_mmap.txt
- User guide to memory mapped packet socket rings (PACKET_[RT]X_RING).
phonet.txt
- The Phonet packet protocol used in Nokia cellular modems.
phy.txt
- The PHY abstraction layer.
pktgen.txt
- User guide to the kernel packet generator (pktgen.ko).
policy-routing.txt
- IP policy-based routing
ppp_generic.txt
- Information about the generic PPP driver.
proc_net_tcp.txt
- Per inode overview of the /proc/net/tcp and /proc/net/tcp6 interfaces.
radiotap-headers.txt
- Background on radiotap headers.
ray_cs.txt
- Raylink Wireless LAN card driver info.
rds.txt
- Background on the reliable, ordered datagram delivery method RDS.
regulatory.txt
- Overview of the Linux wireless regulatory infrastructure.
rxrpc.txt
- Guide to the RxRPC protocol.
s2io.txt
- Release notes for Neterion Xframe I/II 10GbE driver.
scaling.txt
- Explanation of network scaling techniques: RSS, RPS, RFS, aRFS, XPS.
sctp.txt
- Notes on the Linux kernel implementation of the SCTP protocol.
secid.txt
- Explanation of the secid member in flow structures.
skfp.txt
- SysKonnect FDDI (SK-5xxx, Compaq Netelligent) driver info.
smc9.txt
- the driver for SMC's 9000 series of Ethernet cards
smctr.txt
- SMC TokenCard TokenRing Linux driver info.
spider-net.txt
- README for the Spidernet Driver (as found in PS3 / Cell BE).
stmmac.txt
- README for the STMicro Synopsys Ethernet driver.
tc-actions-env-rules.txt
- rules for traffic control (tc) actions.
timestamping.txt
- overview of network packet timestamping variants.
tcp.txt
- short blurb on how TCP output takes place.
tcp-thin.txt
- kernel tuning options for low rate 'thin' TCP streams.
tlan.txt
- ThunderLAN (Compaq Netelligent 10/100, Olicom OC-2xxx) driver info.
tms380tr.txt
- SysKonnect Token Ring ISA/PCI adapter driver info.
tproxy.txt
- Transparent proxy support user guide.
tuntap.txt
- TUN/TAP device driver, allowing user space Rx/Tx of packets.
udplite.txt
- UDP-Lite protocol (RFC 3828) introduction.
vortex.txt
- info on using 3Com Vortex (3c590, 3c592, 3c595, 3c597) Ethernet cards.
vxge.txt
- README for the Neterion X3100 PCIe Server Adapter.
x25.txt
- general info on X.25 development.
x25-iface.txt
- description of the X.25 Packet Layer to LAPB device interface.
xfrm_proc.txt
- description of the statistics package for XFRM.
xfrm_sync.txt
- sync patches for XFRM enable migration of an SA between hosts.
xfrm_sysctl.txt
- description of the XFRM configuration options.
z8530drv.txt
- info about Linux driver for Z8530 based HDLC cards for AX.25

View File

@ -0,0 +1,378 @@
Scaling in the Linux Networking Stack
Introduction
============
This document describes a set of complementary techniques in the Linux
networking stack to increase parallelism and improve performance for
multi-processor systems.
The following technologies are described:
RSS: Receive Side Scaling
RPS: Receive Packet Steering
RFS: Receive Flow Steering
Accelerated Receive Flow Steering
XPS: Transmit Packet Steering
RSS: Receive Side Scaling
=========================
Contemporary NICs support multiple receive and transmit descriptor queues
(multi-queue). On reception, a NIC can send different packets to different
queues to distribute processing among CPUs. The NIC distributes packets by
applying a filter to each packet that assigns it to one of a small number
of logical flows. Packets for each flow are steered to a separate receive
queue, which in turn can be processed by separate CPUs. This mechanism is
generally known as “Receive-side Scaling” (RSS). The goal of RSS and
the other scaling techniques to increase performance uniformly.
Multi-queue distribution can also be used for traffic prioritization, but
that is not the focus of these techniques.
The filter used in RSS is typically a hash function over the network
and/or transport layer headers-- for example, a 4-tuple hash over
IP addresses and TCP ports of a packet. The most common hardware
implementation of RSS uses a 128-entry indirection table where each entry
stores a queue number. The receive queue for a packet is determined
by masking out the low order seven bits of the computed hash for the
packet (usually a Toeplitz hash), taking this number as a key into the
indirection table and reading the corresponding value.
Some advanced NICs allow steering packets to queues based on
programmable filters. For example, webserver bound TCP port 80 packets
can be directed to their own receive queue. Such “n-tuple” filters can
be configured from ethtool (--config-ntuple).
==== RSS Configuration
The driver for a multi-queue capable NIC typically provides a kernel
module parameter for specifying the number of hardware queues to
configure. In the bnx2x driver, for instance, this parameter is called
num_queues. A typical RSS configuration would be to have one receive queue
for each CPU if the device supports enough queues, or otherwise at least
one for each memory domain, where a memory domain is a set of CPUs that
share a particular memory level (L1, L2, NUMA node, etc.).
The indirection table of an RSS device, which resolves a queue by masked
hash, is usually programmed by the driver at initialization. The
default mapping is to distribute the queues evenly in the table, but the
indirection table can be retrieved and modified at runtime using ethtool
commands (--show-rxfh-indir and --set-rxfh-indir). Modifying the
indirection table could be done to give different queues different
relative weights.
== RSS IRQ Configuration
Each receive queue has a separate IRQ associated with it. The NIC triggers
this to notify a CPU when new packets arrive on the given queue. The
signaling path for PCIe devices uses message signaled interrupts (MSI-X),
that can route each interrupt to a particular CPU. The active mapping
of queues to IRQs can be determined from /proc/interrupts. By default,
an IRQ may be handled on any CPU. Because a non-negligible part of packet
processing takes place in receive interrupt handling, it is advantageous
to spread receive interrupts between CPUs. To manually adjust the IRQ
affinity of each interrupt see Documentation/IRQ-affinity. Some systems
will be running irqbalance, a daemon that dynamically optimizes IRQ
assignments and as a result may override any manual settings.
== Suggested Configuration
RSS should be enabled when latency is a concern or whenever receive
interrupt processing forms a bottleneck. Spreading load between CPUs
decreases queue length. For low latency networking, the optimal setting
is to allocate as many queues as there are CPUs in the system (or the
NIC maximum, if lower). The most efficient high-rate configuration
is likely the one with the smallest number of receive queues where no
receive queue overflows due to a saturated CPU, because in default
mode with interrupt coalescing enabled, the aggregate number of
interrupts (and thus work) grows with each additional queue.
Per-cpu load can be observed using the mpstat utility, but note that on
processors with hyperthreading (HT), each hyperthread is represented as
a separate CPU. For interrupt handling, HT has shown no benefit in
initial tests, so limit the number of queues to the number of CPU cores
in the system.
RPS: Receive Packet Steering
============================
Receive Packet Steering (RPS) is logically a software implementation of
RSS. Being in software, it is necessarily called later in the datapath.
Whereas RSS selects the queue and hence CPU that will run the hardware
interrupt handler, RPS selects the CPU to perform protocol processing
above the interrupt handler. This is accomplished by placing the packet
on the desired CPUs backlog queue and waking up the CPU for processing.
RPS has some advantages over RSS: 1) it can be used with any NIC,
2) software filters can easily be added to hash over new protocols,
3) it does not increase hardware device interrupt rate (although it does
introduce inter-processor interrupts (IPIs)).
RPS is called during bottom half of the receive interrupt handler, when
a driver sends a packet up the network stack with netif_rx() or
netif_receive_skb(). These call the get_rps_cpu() function, which
selects the queue that should process a packet.
The first step in determining the target CPU for RPS is to calculate a
flow hash over the packets addresses or ports (2-tuple or 4-tuple hash
depending on the protocol). This serves as a consistent hash of the
associated flow of the packet. The hash is either provided by hardware
or will be computed in the stack. Capable hardware can pass the hash in
the receive descriptor for the packet; this would usually be the same
hash used for RSS (e.g. computed Toeplitz hash). The hash is saved in
skb->rx_hash and can be used elsewhere in the stack as a hash of the
packets flow.
Each receive hardware queue has an associated list of CPUs to which
RPS may enqueue packets for processing. For each received packet,
an index into the list is computed from the flow hash modulo the size
of the list. The indexed CPU is the target for processing the packet,
and the packet is queued to the tail of that CPUs backlog queue. At
the end of the bottom half routine, IPIs are sent to any CPUs for which
packets have been queued to their backlog queue. The IPI wakes backlog
processing on the remote CPU, and any queued packets are then processed
up the networking stack.
==== RPS Configuration
RPS requires a kernel compiled with the CONFIG_RPS kconfig symbol (on
by default for SMP). Even when compiled in, RPS remains disabled until
explicitly configured. The list of CPUs to which RPS may forward traffic
can be configured for each receive queue using a sysfs file entry:
/sys/class/net/<dev>/queues/rx-<n>/rps_cpus
This file implements a bitmap of CPUs. RPS is disabled when it is zero
(the default), in which case packets are processed on the interrupting
CPU. Documentation/IRQ-affinity.txt explains how CPUs are assigned to
the bitmap.
== Suggested Configuration
For a single queue device, a typical RPS configuration would be to set
the rps_cpus to the CPUs in the same memory domain of the interrupting
CPU. If NUMA locality is not an issue, this could also be all CPUs in
the system. At high interrupt rate, it might be wise to exclude the
interrupting CPU from the map since that already performs much work.
For a multi-queue system, if RSS is configured so that a hardware
receive queue is mapped to each CPU, then RPS is probably redundant
and unnecessary. If there are fewer hardware queues than CPUs, then
RPS might be beneficial if the rps_cpus for each queue are the ones that
share the same memory domain as the interrupting CPU for that queue.
RFS: Receive Flow Steering
==========================
While RPS steers packets solely based on hash, and thus generally
provides good load distribution, it does not take into account
application locality. This is accomplished by Receive Flow Steering
(RFS). The goal of RFS is to increase datacache hitrate by steering
kernel processing of packets to the CPU where the application thread
consuming the packet is running. RFS relies on the same RPS mechanisms
to enqueue packets onto the backlog of another CPU and to wake up that
CPU.
In RFS, packets are not forwarded directly by the value of their hash,
but the hash is used as index into a flow lookup table. This table maps
flows to the CPUs where those flows are being processed. The flow hash
(see RPS section above) is used to calculate the index into this table.
The CPU recorded in each entry is the one which last processed the flow.
If an entry does not hold a valid CPU, then packets mapped to that entry
are steered using plain RPS. Multiple table entries may point to the
same CPU. Indeed, with many flows and few CPUs, it is very likely that
a single application thread handles flows with many different flow hashes.
rps_sock_table is a global flow table that contains the *desired* CPU for
flows: the CPU that is currently processing the flow in userspace. Each
table value is a CPU index that is updated during calls to recvmsg and
sendmsg (specifically, inet_recvmsg(), inet_sendmsg(), inet_sendpage()
and tcp_splice_read()).
When the scheduler moves a thread to a new CPU while it has outstanding
receive packets on the old CPU, packets may arrive out of order. To
avoid this, RFS uses a second flow table to track outstanding packets
for each flow: rps_dev_flow_table is a table specific to each hardware
receive queue of each device. Each table value stores a CPU index and a
counter. The CPU index represents the *current* CPU onto which packets
for this flow are enqueued for further kernel processing. Ideally, kernel
and userspace processing occur on the same CPU, and hence the CPU index
in both tables is identical. This is likely false if the scheduler has
recently migrated a userspace thread while the kernel still has packets
enqueued for kernel processing on the old CPU.
The counter in rps_dev_flow_table values records the length of the current
CPU's backlog when a packet in this flow was last enqueued. Each backlog
queue has a head counter that is incremented on dequeue. A tail counter
is computed as head counter + queue length. In other words, the counter
in rps_dev_flow_table[i] records the last element in flow i that has
been enqueued onto the currently designated CPU for flow i (of course,
entry i is actually selected by hash and multiple flows may hash to the
same entry i).
And now the trick for avoiding out of order packets: when selecting the
CPU for packet processing (from get_rps_cpu()) the rps_sock_flow table
and the rps_dev_flow table of the queue that the packet was received on
are compared. If the desired CPU for the flow (found in the
rps_sock_flow table) matches the current CPU (found in the rps_dev_flow
table), the packet is enqueued onto that CPUs backlog. If they differ,
the current CPU is updated to match the desired CPU if one of the
following is true:
- The current CPU's queue head counter >= the recorded tail counter
value in rps_dev_flow[i]
- The current CPU is unset (equal to NR_CPUS)
- The current CPU is offline
After this check, the packet is sent to the (possibly updated) current
CPU. These rules aim to ensure that a flow only moves to a new CPU when
there are no packets outstanding on the old CPU, as the outstanding
packets could arrive later than those about to be processed on the new
CPU.
==== RFS Configuration
RFS is only available if the kconfig symbol CONFIG_RFS is enabled (on
by default for SMP). The functionality remains disabled until explicitly
configured. The number of entries in the global flow table is set through:
/proc/sys/net/core/rps_sock_flow_entries
The number of entries in the per-queue flow table are set through:
/sys/class/net/<dev>/queues/tx-<n>/rps_flow_cnt
== Suggested Configuration
Both of these need to be set before RFS is enabled for a receive queue.
Values for both are rounded up to the nearest power of two. The
suggested flow count depends on the expected number of active connections
at any given time, which may be significantly less than the number of open
connections. We have found that a value of 32768 for rps_sock_flow_entries
works fairly well on a moderately loaded server.
For a single queue device, the rps_flow_cnt value for the single queue
would normally be configured to the same value as rps_sock_flow_entries.
For a multi-queue device, the rps_flow_cnt for each queue might be
configured as rps_sock_flow_entries / N, where N is the number of
queues. So for instance, if rps_flow_entries is set to 32768 and there
are 16 configured receive queues, rps_flow_cnt for each queue might be
configured as 2048.
Accelerated RFS
===============
Accelerated RFS is to RFS what RSS is to RPS: a hardware-accelerated load
balancing mechanism that uses soft state to steer flows based on where
the application thread consuming the packets of each flow is running.
Accelerated RFS should perform better than RFS since packets are sent
directly to a CPU local to the thread consuming the data. The target CPU
will either be the same CPU where the application runs, or at least a CPU
which is local to the application threads CPU in the cache hierarchy.
To enable accelerated RFS, the networking stack calls the
ndo_rx_flow_steer driver function to communicate the desired hardware
queue for packets matching a particular flow. The network stack
automatically calls this function every time a flow entry in
rps_dev_flow_table is updated. The driver in turn uses a device specific
method to program the NIC to steer the packets.
The hardware queue for a flow is derived from the CPU recorded in
rps_dev_flow_table. The stack consults a CPU to hardware queue map which
is maintained by the NIC driver. This is an auto-generated reverse map of
the IRQ affinity table shown by /proc/interrupts. Drivers can use
functions in the cpu_rmap (“CPU affinity reverse map”) kernel library
to populate the map. For each CPU, the corresponding queue in the map is
set to be one whose processing CPU is closest in cache locality.
==== Accelerated RFS Configuration
Accelerated RFS is only available if the kernel is compiled with
CONFIG_RFS_ACCEL and support is provided by the NIC device and driver.
It also requires that ntuple filtering is enabled via ethtool. The map
of CPU to queues is automatically deduced from the IRQ affinities
configured for each receive queue by the driver, so no additional
configuration should be necessary.
== Suggested Configuration
This technique should be enabled whenever one wants to use RFS and the
NIC supports hardware acceleration.
XPS: Transmit Packet Steering
=============================
Transmit Packet Steering is a mechanism for intelligently selecting
which transmit queue to use when transmitting a packet on a multi-queue
device. To accomplish this, a mapping from CPU to hardware queue(s) is
recorded. The goal of this mapping is usually to assign queues
exclusively to a subset of CPUs, where the transmit completions for
these queues are processed on a CPU within this set. This choice
provides two benefits. First, contention on the device queue lock is
significantly reduced since fewer CPUs contend for the same queue
(contention can be eliminated completely if each CPU has its own
transmit queue). Secondly, cache miss rate on transmit completion is
reduced, in particular for data cache lines that hold the sk_buff
structures.
XPS is configured per transmit queue by setting a bitmap of CPUs that
may use that queue to transmit. The reverse mapping, from CPUs to
transmit queues, is computed and maintained for each network device.
When transmitting the first packet in a flow, the function
get_xps_queue() is called to select a queue. This function uses the ID
of the running CPU as a key into the CPU-to-queue lookup table. If the
ID matches a single queue, that is used for transmission. If multiple
queues match, one is selected by using the flow hash to compute an index
into the set.
The queue chosen for transmitting a particular flow is saved in the
corresponding socket structure for the flow (e.g. a TCP connection).
This transmit queue is used for subsequent packets sent on the flow to
prevent out of order (ooo) packets. The choice also amortizes the cost
of calling get_xps_queues() over all packets in the flow. To avoid
ooo packets, the queue for a flow can subsequently only be changed if
skb->ooo_okay is set for a packet in the flow. This flag indicates that
there are no outstanding packets in the flow, so the transmit queue can
change without the risk of generating out of order packets. The
transport layer is responsible for setting ooo_okay appropriately. TCP,
for instance, sets the flag when all data for a connection has been
acknowledged.
==== XPS Configuration
XPS is only available if the kconfig symbol CONFIG_XPS is enabled (on by
default for SMP). The functionality remains disabled until explicitly
configured. To enable XPS, the bitmap of CPUs that may use a transmit
queue is configured using the sysfs file entry:
/sys/class/net/<dev>/queues/tx-<n>/xps_cpus
== Suggested Configuration
For a network device with a single transmission queue, XPS configuration
has no effect, since there is no choice in this case. In a multi-queue
system, XPS is preferably configured so that each CPU maps onto one queue.
If there are as many queues as there are CPUs in the system, then each
queue can also map onto one CPU, resulting in exclusive pairings that
experience no contention. If there are fewer queues than CPUs, then the
best CPUs to share a given queue are probably those that share the cache
with the CPU that processes transmit completions for that queue
(transmit interrupts).
Further Information
===================
RPS and RFS were introduced in kernel 2.6.35. XPS was incorporated into
2.6.38. Original patches were submitted by Tom Herbert
(therbert@google.com)
Accelerated RFS was introduced in 2.6.35. Original patches were
submitted by Ben Hutchings (bhutchings@solarflare.com)
Authors:
Tom Herbert (therbert@google.com)
Willem de Bruijn (willemb@google.com)

View File

@ -3898,9 +3898,9 @@ F: arch/powerpc/platforms/powermac/
F: drivers/macintosh/
LINUX FOR POWERPC EMBEDDED MPC5XXX
M: Grant Likely <grant.likely@secretlab.ca>
M: Anatolij Gustschin <agust@denx.de>
L: linuxppc-dev@lists.ozlabs.org
T: git git://git.secretlab.ca/git/linux-2.6.git
T: git git://git.denx.de/linux-2.6-agust.git
S: Maintained
F: arch/powerpc/platforms/512x/
F: arch/powerpc/platforms/52xx/
@ -7351,7 +7351,7 @@ THE REST
M: Linus Torvalds <torvalds@linux-foundation.org>
L: linux-kernel@vger.kernel.org
Q: http://patchwork.kernel.org/project/LKML/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
S: Buried alive in reporters
F: *
F: */

View File

@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 0
PATCHLEVEL = 1
SUBLEVEL = 0
EXTRAVERSION =
EXTRAVERSION = -rc1
NAME = Sneaky Weasel
# *DOCUMENTATION*

View File

@ -195,8 +195,7 @@ config VECTORS_BASE
The base address of exception vectors.
config ARM_PATCH_PHYS_VIRT
bool "Patch physical to virtual translations at runtime (EXPERIMENTAL)"
depends on EXPERIMENTAL
bool "Patch physical to virtual translations at runtime"
depends on !XIP_KERNEL && MMU
depends on !ARCH_REALVIEW || !SPARSEMEM
help

View File

@ -195,10 +195,10 @@ ENTRY(iwmmxt_task_disable)
@ enable access to CP0 and CP1
XSC(mrc p15, 0, r4, c15, c1, 0)
XSC(orr r4, r4, #0xf)
XSC(orr r4, r4, #0x3)
XSC(mcr p15, 0, r4, c15, c1, 0)
PJ4(mrc p15, 0, r4, c1, c0, 2)
PJ4(orr r4, r4, #0x3)
PJ4(orr r4, r4, #0xf)
PJ4(mcr p15, 0, r4, c1, c0, 2)
mov r0, #0 @ nothing to load
@ -313,7 +313,7 @@ ENTRY(iwmmxt_task_switch)
teq r2, r3 @ next task owns it?
movne pc, lr @ no: leave Concan disabled
1: @ flip Conan access
1: @ flip Concan access
XSC(eor r1, r1, #0x3)
XSC(mcr p15, 0, r1, c15, c1, 0)
PJ4(eor r1, r1, #0xf)

View File

@ -323,7 +323,11 @@ int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
#endif
s = find_mod_section(hdr, sechdrs, ".alt.smp.init");
if (s && !is_smp())
#ifdef CONFIG_SMP_ON_UP
fixup_smp((void *)s->sh_addr, s->sh_size);
#else
return -EINVAL;
#endif
return 0;
}

View File

@ -331,6 +331,9 @@ int __init mx25_clocks_init(void)
__raw_writel(__raw_readl(CRM_BASE+0x64) | (1 << 7) | (1 << 0),
CRM_BASE + 0x64);
/* Clock source for gpt is ahb_div */
__raw_writel(__raw_readl(CRM_BASE+0x64) & ~(1 << 5), CRM_BASE + 0x64);
mxc_timer_init(&gpt_clk, MX25_IO_ADDRESS(MX25_GPT1_BASE_ADDR), 54);
return 0;

View File

@ -30,6 +30,7 @@
#include <linux/input.h>
#include <linux/gpio.h>
#include <linux/delay.h>
#include <sound/tlv320aic32x4.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/time.h>
@ -196,6 +197,17 @@ static struct pca953x_platform_data visstrim_m10_pca9555_pdata = {
.invert = 0,
};
static struct aic32x4_pdata visstrim_m10_aic32x4_pdata = {
.power_cfg = AIC32X4_PWR_MICBIAS_2075_LDOIN |
AIC32X4_PWR_AVDD_DVDD_WEAK_DISABLE |
AIC32X4_PWR_AIC32X4_LDO_ENABLE |
AIC32X4_PWR_CMMODE_LDOIN_RANGE_18_36 |
AIC32X4_PWR_CMMODE_HP_LDOIN_POWERED,
.micpga_routing = AIC32X4_MICPGA_ROUTE_LMIC_IN2R_10K |
AIC32X4_MICPGA_ROUTE_RMIC_IN1L_10K,
.swapdacs = false,
};
static struct i2c_board_info visstrim_m10_i2c_devices[] = {
{
I2C_BOARD_INFO("pca9555", 0x20),
@ -203,6 +215,7 @@ static struct i2c_board_info visstrim_m10_i2c_devices[] = {
},
{
I2C_BOARD_INFO("tlv320aic32x4", 0x18),
.platform_data = &visstrim_m10_aic32x4_pdata,
}
};

View File

@ -468,7 +468,7 @@ static struct i2c_board_info __initdata mx31ads_i2c1_devices[] = {
#endif
};
static void mxc_init_i2c(void)
static void __init mxc_init_i2c(void)
{
i2c_register_board_info(1, mx31ads_i2c1_devices,
ARRAY_SIZE(mx31ads_i2c1_devices));
@ -486,7 +486,7 @@ static unsigned int ssi_pins[] = {
MX31_PIN_STXD5__STXD5,
};
static void mxc_init_audio(void)
static void __init mxc_init_audio(void)
{
imx31_add_imx_ssi(0, NULL);
mxc_iomux_setup_multiple_pins(ssi_pins, ARRAY_SIZE(ssi_pins), "ssi");

View File

@ -192,7 +192,7 @@ static struct mxc_usbh_platform_data usbh2_pdata __initdata = {
.portsc = MXC_EHCI_MODE_ULPI | MXC_EHCI_UTMI_8BIT,
};
static void lilly1131_usb_init(void)
static void __init lilly1131_usb_init(void)
{
imx31_add_mxc_ehci_hs(1, &usbh1_pdata);

View File

@ -16,16 +16,18 @@
#include <mach/gpio.h>
#include <mach/pxa168.h>
#include <mach/mfp-pxa168.h>
#include <mach/mfp-gplugd.h>
#include "common.h"
static unsigned long gplugd_pin_config[] __initdata = {
/* UART3 */
GPIO8_UART3_SOUT,
GPIO9_UART3_SIN,
GPI1O_UART3_CTS,
GPI11_UART3_RTS,
GPIO8_UART3_TXD,
GPIO9_UART3_RXD,
GPIO1O_UART3_CTS,
GPIO11_UART3_RTS,
/* USB OTG PEN */
GPIO18_GPIO,
/* MMC2 */
GPIO28_MMC2_CMD,
@ -109,6 +111,12 @@ static unsigned long gplugd_pin_config[] __initdata = {
GPIO105_CI2C_SDA,
GPIO106_CI2C_SCL,
/* SPI NOR Flash on SSP2 */
GPIO107_SSP2_RXD,
GPIO108_SSP2_TXD,
GPIO110_GPIO, /* SPI_CSn */
GPIO111_SSP2_CLK,
/* Select JTAG */
GPIO109_GPIO,
@ -154,7 +162,7 @@ static void __init select_disp_freq(void)
"frequency\n");
} else {
gpio_direction_output(35, 1);
gpio_free(104);
gpio_free(35);
}
if (unlikely(gpio_request(85, "DISP_FREQ_SEL_2"))) {
@ -162,7 +170,7 @@ static void __init select_disp_freq(void)
"frequency\n");
} else {
gpio_direction_output(85, 0);
gpio_free(104);
gpio_free(85);
}
}

View File

@ -1,52 +0,0 @@
/*
* linux/arch/arm/mach-mmp/include/mach/mfp-gplugd.h
*
* MFP definitions used in gplugD
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __MACH_MFP_GPLUGD_H
#define __MACH_MFP_GPLUGD_H
#include <plat/mfp.h>
#include <mach/mfp.h>
/* UART3 */
#define GPIO8_UART3_SOUT MFP_CFG(GPIO8, AF2)
#define GPIO9_UART3_SIN MFP_CFG(GPIO9, AF2)
#define GPI1O_UART3_CTS MFP_CFG(GPIO10, AF2)
#define GPI11_UART3_RTS MFP_CFG(GPIO11, AF2)
/* MMC2 */
#define GPIO28_MMC2_CMD MFP_CFG_DRV(GPIO28, AF6, FAST)
#define GPIO29_MMC2_CLK MFP_CFG_DRV(GPIO29, AF6, FAST)
#define GPIO30_MMC2_DAT0 MFP_CFG_DRV(GPIO30, AF6, FAST)
#define GPIO31_MMC2_DAT1 MFP_CFG_DRV(GPIO31, AF6, FAST)
#define GPIO32_MMC2_DAT2 MFP_CFG_DRV(GPIO32, AF6, FAST)
#define GPIO33_MMC2_DAT3 MFP_CFG_DRV(GPIO33, AF6, FAST)
/* I2S */
#undef GPIO114_I2S_FRM
#undef GPIO115_I2S_BCLK
#define GPIO114_I2S_FRM MFP_CFG_DRV(GPIO114, AF1, FAST)
#define GPIO115_I2S_BCLK MFP_CFG_DRV(GPIO115, AF1, FAST)
#define GPIO116_I2S_TXD MFP_CFG_DRV(GPIO116, AF1, FAST)
/* MMC4 */
#define GPIO125_MMC4_DAT3 MFP_CFG_DRV(GPIO125, AF7, FAST)
#define GPIO126_MMC4_DAT2 MFP_CFG_DRV(GPIO126, AF7, FAST)
#define GPIO127_MMC4_DAT1 MFP_CFG_DRV(GPIO127, AF7, FAST)
#define GPIO0_2_MMC4_DAT0 MFP_CFG_DRV(GPIO0_2, AF7, FAST)
#define GPIO1_2_MMC4_CMD MFP_CFG_DRV(GPIO1_2, AF7, FAST)
#define GPIO2_2_MMC4_CLK MFP_CFG_DRV(GPIO2_2, AF7, FAST)
/* OTG GPIO */
#define GPIO_USB_OTG_PEN 18
#define GPIO_USB_OIDIR 20
/* Other GPIOs are 35, 84, 85 */
#endif /* __MACH_MFP_GPLUGD_H */

View File

@ -203,6 +203,10 @@
#define GPIO33_CF_nCD2 MFP_CFG(GPIO33, AF3)
/* UART */
#define GPIO8_UART3_TXD MFP_CFG(GPIO8, AF2)
#define GPIO9_UART3_RXD MFP_CFG(GPIO9, AF2)
#define GPIO1O_UART3_CTS MFP_CFG(GPIO10, AF2)
#define GPIO11_UART3_RTS MFP_CFG(GPIO11, AF2)
#define GPIO88_UART2_TXD MFP_CFG(GPIO88, AF2)
#define GPIO89_UART2_RXD MFP_CFG(GPIO89, AF2)
#define GPIO107_UART1_TXD MFP_CFG_DRV(GPIO107, AF1, FAST)
@ -232,6 +236,22 @@
#define GPIO53_MMC1_CD MFP_CFG(GPIO53, AF1)
#define GPIO46_MMC1_WP MFP_CFG(GPIO46, AF1)
/* MMC2 */
#define GPIO28_MMC2_CMD MFP_CFG_DRV(GPIO28, AF6, FAST)
#define GPIO29_MMC2_CLK MFP_CFG_DRV(GPIO29, AF6, FAST)
#define GPIO30_MMC2_DAT0 MFP_CFG_DRV(GPIO30, AF6, FAST)
#define GPIO31_MMC2_DAT1 MFP_CFG_DRV(GPIO31, AF6, FAST)
#define GPIO32_MMC2_DAT2 MFP_CFG_DRV(GPIO32, AF6, FAST)
#define GPIO33_MMC2_DAT3 MFP_CFG_DRV(GPIO33, AF6, FAST)
/* MMC4 */
#define GPIO125_MMC4_DAT3 MFP_CFG_DRV(GPIO125, AF7, FAST)
#define GPIO126_MMC4_DAT2 MFP_CFG_DRV(GPIO126, AF7, FAST)
#define GPIO127_MMC4_DAT1 MFP_CFG_DRV(GPIO127, AF7, FAST)
#define GPIO0_2_MMC4_DAT0 MFP_CFG_DRV(GPIO0_2, AF7, FAST)
#define GPIO1_2_MMC4_CMD MFP_CFG_DRV(GPIO1_2, AF7, FAST)
#define GPIO2_2_MMC4_CLK MFP_CFG_DRV(GPIO2_2, AF7, FAST)
/* LCD */
#define GPIO84_LCD_CS MFP_CFG(GPIO84, AF1)
#define GPIO60_LCD_DD0 MFP_CFG(GPIO60, AF1)
@ -269,11 +289,12 @@
#define GPIO106_CI2C_SCL MFP_CFG(GPIO106, AF1)
/* I2S */
#define GPIO113_I2S_MCLK MFP_CFG(GPIO113,AF6)
#define GPIO114_I2S_FRM MFP_CFG(GPIO114,AF1)
#define GPIO115_I2S_BCLK MFP_CFG(GPIO115,AF1)
#define GPIO116_I2S_RXD MFP_CFG(GPIO116,AF2)
#define GPIO117_I2S_TXD MFP_CFG(GPIO117,AF2)
#define GPIO113_I2S_MCLK MFP_CFG(GPIO113, AF6)
#define GPIO114_I2S_FRM MFP_CFG(GPIO114, AF1)
#define GPIO115_I2S_BCLK MFP_CFG(GPIO115, AF1)
#define GPIO116_I2S_RXD MFP_CFG(GPIO116, AF2)
#define GPIO116_I2S_TXD MFP_CFG(GPIO116, AF1)
#define GPIO117_I2S_TXD MFP_CFG(GPIO117, AF2)
/* PWM */
#define GPIO96_PWM3_OUT MFP_CFG(GPIO96, AF1)
@ -324,4 +345,10 @@
#define GPIO101_MII_MDIO MFP_CFG(GPIO101, AF5)
#define GPIO103_RX_DV MFP_CFG(GPIO103, AF5)
/* SSP2 */
#define GPIO107_SSP2_RXD MFP_CFG(GPIO107, AF4)
#define GPIO108_SSP2_TXD MFP_CFG(GPIO108, AF4)
#define GPIO111_SSP2_CLK MFP_CFG(GPIO111, AF4)
#define GPIO112_SSP2_FRM MFP_CFG(GPIO112, AF4)
#endif /* __ASM_MACH_MFP_PXA168_H */

View File

@ -51,12 +51,12 @@ static inline uint32_t timer_read(void)
{
int delay = 100;
__raw_writel(1, TIMERS_VIRT_BASE + TMR_CVWR(0));
__raw_writel(1, TIMERS_VIRT_BASE + TMR_CVWR(1));
while (delay--)
cpu_relax();
return __raw_readl(TIMERS_VIRT_BASE + TMR_CVWR(0));
return __raw_readl(TIMERS_VIRT_BASE + TMR_CVWR(1));
}
unsigned long long notrace sched_clock(void)
@ -75,28 +75,51 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *c = dev_id;
/* disable and clear pending interrupt status */
__raw_writel(0x0, TIMERS_VIRT_BASE + TMR_IER(0));
__raw_writel(0x1, TIMERS_VIRT_BASE + TMR_ICR(0));
/*
* Clear pending interrupt status.
*/
__raw_writel(0x01, TIMERS_VIRT_BASE + TMR_ICR(0));
/*
* Disable timer 0.
*/
__raw_writel(0x02, TIMERS_VIRT_BASE + TMR_CER);
c->event_handler(c);
return IRQ_HANDLED;
}
static int timer_set_next_event(unsigned long delta,
struct clock_event_device *dev)
{
unsigned long flags, next;
unsigned long flags;
local_irq_save(flags);
/* clear pending interrupt status and enable */
/*
* Disable timer 0.
*/
__raw_writel(0x02, TIMERS_VIRT_BASE + TMR_CER);
/*
* Clear and enable timer match 0 interrupt.
*/
__raw_writel(0x01, TIMERS_VIRT_BASE + TMR_ICR(0));
__raw_writel(0x01, TIMERS_VIRT_BASE + TMR_IER(0));
next = timer_read() + delta;
__raw_writel(next, TIMERS_VIRT_BASE + TMR_TN_MM(0, 0));
/*
* Setup new clockevent timer value.
*/
__raw_writel(delta - 1, TIMERS_VIRT_BASE + TMR_TN_MM(0, 0));
/*
* Enable timer 0.
*/
__raw_writel(0x03, TIMERS_VIRT_BASE + TMR_CER);
local_irq_restore(flags);
return 0;
}
@ -145,23 +168,26 @@ static struct clocksource cksrc = {
static void __init timer_config(void)
{
uint32_t ccr = __raw_readl(TIMERS_VIRT_BASE + TMR_CCR);
uint32_t cer = __raw_readl(TIMERS_VIRT_BASE + TMR_CER);
uint32_t cmr = __raw_readl(TIMERS_VIRT_BASE + TMR_CMR);
__raw_writel(cer & ~0x1, TIMERS_VIRT_BASE + TMR_CER); /* disable */
__raw_writel(0x0, TIMERS_VIRT_BASE + TMR_CER); /* disable */
ccr &= (cpu_is_mmp2()) ? TMR_CCR_CS_0(0) : TMR_CCR_CS_0(3);
ccr &= (cpu_is_mmp2()) ? (TMR_CCR_CS_0(0) | TMR_CCR_CS_1(0)) :
(TMR_CCR_CS_0(3) | TMR_CCR_CS_1(3));
__raw_writel(ccr, TIMERS_VIRT_BASE + TMR_CCR);
/* free-running mode */
__raw_writel(cmr | 0x01, TIMERS_VIRT_BASE + TMR_CMR);
/* set timer 0 to periodic mode, and timer 1 to free-running mode */
__raw_writel(0x2, TIMERS_VIRT_BASE + TMR_CMR);
__raw_writel(0x0, TIMERS_VIRT_BASE + TMR_PLCR(0)); /* free-running */
__raw_writel(0x1, TIMERS_VIRT_BASE + TMR_PLCR(0)); /* periodic */
__raw_writel(0x7, TIMERS_VIRT_BASE + TMR_ICR(0)); /* clear status */
__raw_writel(0x0, TIMERS_VIRT_BASE + TMR_IER(0));
/* enable timer counter */
__raw_writel(cer | 0x01, TIMERS_VIRT_BASE + TMR_CER);
__raw_writel(0x0, TIMERS_VIRT_BASE + TMR_PLCR(1)); /* free-running */
__raw_writel(0x7, TIMERS_VIRT_BASE + TMR_ICR(1)); /* clear status */
__raw_writel(0x0, TIMERS_VIRT_BASE + TMR_IER(1));
/* enable timer 1 counter */
__raw_writel(0x2, TIMERS_VIRT_BASE + TMR_CER);
}
static struct irqaction timer_irq = {

View File

@ -81,7 +81,7 @@ static struct plat_serial8250_port serial_platform_data[] = {
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP,
}, {
.mapbase = (unsigned long)(MX51_CS1_BASE_ADDR + 0x2000000),
.irq = irq_to_gpio(CPUIMX51_QUARTD_GPIO),
.irq = gpio_to_irq(CPUIMX51_QUARTD_GPIO),
.irqflags = IRQF_TRIGGER_HIGH,
.uartclk = CPUIMX51_QUART_XTAL,
.regshift = CPUIMX51_QUART_REGSHIFT,

View File

@ -369,7 +369,7 @@ static void __init mx51_babbage_init(void)
ARRAY_SIZE(mx51babbage_pads));
imx51_add_imx_uart(0, &uart_pdata);
imx51_add_imx_uart(1, &uart_pdata);
imx51_add_imx_uart(1, NULL);
imx51_add_imx_uart(2, &uart_pdata);
babbage_fec_reset();

View File

@ -108,9 +108,9 @@ static void __init mx51_efikamx_board_id(void)
gpio_request(EFIKAMX_PCBID2, "pcbid2");
gpio_direction_input(EFIKAMX_PCBID2);
id = gpio_get_value(EFIKAMX_PCBID0);
id |= gpio_get_value(EFIKAMX_PCBID1) << 1;
id |= gpio_get_value(EFIKAMX_PCBID2) << 2;
id = gpio_get_value(EFIKAMX_PCBID0) ? 1 : 0;
id |= (gpio_get_value(EFIKAMX_PCBID1) ? 1 : 0) << 1;
id |= (gpio_get_value(EFIKAMX_PCBID2) ? 1 : 0) << 2;
switch (id) {
case 7:

View File

@ -156,23 +156,24 @@ static struct gpio_keys_button mx51_efikasb_keys[] = {
{
.code = KEY_POWER,
.gpio = EFIKASB_PWRKEY,
.type = EV_PWR,
.type = EV_KEY,
.desc = "Power Button",
.wakeup = 1,
.debounce_interval = 10, /* ms */
.active_low = 1,
},
{
.code = SW_LID,
.gpio = EFIKASB_LID,
.type = EV_SW,
.desc = "Lid Switch",
.active_low = 1,
},
{
/* SW_RFKILLALL vs KEY_RFKILL ? */
.code = SW_RFKILL_ALL,
.code = KEY_RFKILL,
.gpio = EFIKASB_RFKILL,
.type = EV_SW,
.type = EV_KEY,
.desc = "rfkill",
.active_low = 1,
},
};
@ -224,8 +225,8 @@ static void __init mx51_efikasb_board_id(void)
gpio_request(EFIKASB_PCBID1, "pcb id1");
gpio_direction_input(EFIKASB_PCBID1);
id = gpio_get_value(EFIKASB_PCBID0);
id |= gpio_get_value(EFIKASB_PCBID1) << 1;
id = gpio_get_value(EFIKASB_PCBID0) ? 1 : 0;
id |= (gpio_get_value(EFIKASB_PCBID1) ? 1 : 0) << 1;
switch (id) {
default:

View File

@ -271,7 +271,11 @@ static int _clk_pll_enable(struct clk *clk)
int i = 0;
pllbase = _get_pll_base(clk);
reg = __raw_readl(pllbase + MXC_PLL_DP_CTL) | MXC_PLL_DP_CTL_UPEN;
reg = __raw_readl(pllbase + MXC_PLL_DP_CTL);
if (reg & MXC_PLL_DP_CTL_UPEN)
return 0;
reg |= MXC_PLL_DP_CTL_UPEN;
__raw_writel(reg, pllbase + MXC_PLL_DP_CTL);
/* Wait for lock */

View File

@ -186,7 +186,7 @@ static int initialize_usbh1_port(struct platform_device *pdev)
mdelay(10);
return mx51_initialize_usb_hw(0, MXC_EHCI_ITC_NO_THRESHOLD);
return mx51_initialize_usb_hw(pdev->id, MXC_EHCI_ITC_NO_THRESHOLD);
}
static struct mxc_usbh_platform_data usbh1_config = {

View File

@ -7,7 +7,6 @@ config ARCH_OMAP2PLUS_TYPICAL
default y
select AEABI
select REGULATOR
select PM
select PM_RUNTIME
select VFP
select NEON if ARCH_OMAP3 || ARCH_OMAP4

View File

@ -45,8 +45,6 @@ static struct omap_board_config_kernel am3517_crane_config[] __initdata = {
static struct omap_board_mux board_mux[] __initdata = {
{ .reg_offset = OMAP_MUX_TERMINATOR },
};
#else
#define board_mux NULL
#endif
static void __init am3517_crane_init_early(void)

View File

@ -491,23 +491,22 @@ static void __init beagle_opp_init(void)
/* Custom OPP enabled for all xM versions */
if (cpu_is_omap3630()) {
struct omap_hwmod *mh = omap_hwmod_lookup("mpu");
struct omap_hwmod *dh = omap_hwmod_lookup("iva");
struct device *dev;
struct device *mpu_dev, *iva_dev;
if (!mh || !dh) {
mpu_dev = omap2_get_mpuss_device();
iva_dev = omap2_get_iva_device();
if (!mpu_dev || !iva_dev) {
pr_err("%s: Aiee.. no mpu/dsp devices? %p %p\n",
__func__, mh, dh);
__func__, mpu_dev, iva_dev);
return;
}
/* Enable MPU 1GHz and lower opps */
dev = &mh->od->pdev.dev;
r = opp_enable(dev, 800000000);
r = opp_enable(mpu_dev, 800000000);
/* TODO: MPU 1GHz needs SR and ABB */
/* Enable IVA 800MHz and lower opps */
dev = &dh->od->pdev.dev;
r |= opp_enable(dev, 660000000);
r |= opp_enable(iva_dev, 660000000);
/* TODO: DSP 800MHz needs SR and ABB */
if (r) {
pr_err("%s: failed to enable higher opp %d\n",
@ -516,10 +515,8 @@ static void __init beagle_opp_init(void)
* Cleanup - disable the higher freqs - we dont care
* about the results
*/
dev = &mh->od->pdev.dev;
opp_disable(dev, 800000000);
dev = &dh->od->pdev.dev;
opp_disable(dev, 660000000);
opp_disable(mpu_dev, 800000000);
opp_disable(iva_dev, 660000000);
}
}
return;

View File

@ -18,13 +18,36 @@ extern void omap4_cminst_clkdm_force_sleep(u8 part, s16 inst, u16 cdoffs);
extern void omap4_cminst_clkdm_force_wakeup(u8 part, s16 inst, u16 cdoffs);
extern int omap4_cminst_wait_module_ready(u8 part, u16 inst, s16 cdoffs, u16 clkctrl_offs);
extern int omap4_cminst_wait_module_idle(u8 part, u16 inst, s16 cdoffs, u16 clkctrl_offs);
# ifdef CONFIG_ARCH_OMAP4
extern int omap4_cminst_wait_module_idle(u8 part, u16 inst, s16 cdoffs,
u16 clkctrl_offs);
extern void omap4_cminst_module_enable(u8 mode, u8 part, u16 inst, s16 cdoffs,
u16 clkctrl_offs);
extern void omap4_cminst_module_disable(u8 part, u16 inst, s16 cdoffs,
u16 clkctrl_offs);
# else
static inline int omap4_cminst_wait_module_idle(u8 part, u16 inst, s16 cdoffs,
u16 clkctrl_offs)
{
return 0;
}
static inline void omap4_cminst_module_enable(u8 mode, u8 part, u16 inst,
s16 cdoffs, u16 clkctrl_offs)
{
}
static inline void omap4_cminst_module_disable(u8 part, u16 inst, s16 cdoffs,
u16 clkctrl_offs)
{
}
# endif
/*
* In an ideal world, we would not export these low-level functions,
* but this will probably take some time to fix properly

View File

@ -821,11 +821,10 @@ static void __init omap_mux_set_cmdline_signals(void)
if (!omap_mux_options)
return;
options = kmalloc(strlen(omap_mux_options) + 1, GFP_KERNEL);
options = kstrdup(omap_mux_options, GFP_KERNEL);
if (!options)
return;
strcpy(options, omap_mux_options);
next_opt = options;
while ((token = strsep(&next_opt, ",")) != NULL) {
@ -855,24 +854,19 @@ static int __init omap_mux_copy_names(struct omap_mux *src,
for (i = 0; i < OMAP_MUX_NR_MODES; i++) {
if (src->muxnames[i]) {
dst->muxnames[i] =
kmalloc(strlen(src->muxnames[i]) + 1,
GFP_KERNEL);
dst->muxnames[i] = kstrdup(src->muxnames[i],
GFP_KERNEL);
if (!dst->muxnames[i])
goto free;
strcpy(dst->muxnames[i], src->muxnames[i]);
}
}
#ifdef CONFIG_DEBUG_FS
for (i = 0; i < OMAP_MUX_NR_SIDES; i++) {
if (src->balls[i]) {
dst->balls[i] =
kmalloc(strlen(src->balls[i]) + 1,
GFP_KERNEL);
dst->balls[i] = kstrdup(src->balls[i], GFP_KERNEL);
if (!dst->balls[i])
goto free;
strcpy(dst->balls[i], src->balls[i]);
}
}
#endif

View File

@ -621,7 +621,7 @@ void sr_disable(struct voltagedomain *voltdm)
sr_v2_disable(sr);
}
pm_runtime_put_sync(&sr->pdev->dev);
pm_runtime_put_sync_suspend(&sr->pdev->dev);
}
/**
@ -860,6 +860,7 @@ static int __init omap_sr_probe(struct platform_device *pdev)
irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
pm_runtime_enable(&pdev->dev);
pm_runtime_irq_safe(&pdev->dev);
sr_info->pdev = pdev;
sr_info->srid = pdev->id;

View File

@ -293,7 +293,8 @@ static void __init omap2_gp_clocksource_init(int gptimer_id,
pr_info("OMAP clocksource: GPTIMER%d at %lu Hz\n",
gptimer_id, clksrc.rate);
__omap_dm_timer_load_start(clksrc.io_base, OMAP_TIMER_CTRL_ST, 0, 1);
__omap_dm_timer_load_start(clksrc.io_base,
OMAP_TIMER_CTRL_ST | OMAP_TIMER_CTRL_AR, 0, 1);
init_sched_clock(&cd, dmtimer_update_sched_clock, 32, clksrc.rate);
if (clocksource_register_hz(&clocksource_gpt, clksrc.rate))

View File

@ -48,14 +48,7 @@ void __init omap_pmic_init(int bus, u32 clkrate,
omap_register_i2c_bus(bus, clkrate, &pmic_i2c_board_info, 1);
}
static struct twl4030_usb_data omap4_usb_pdata = {
.phy_init = omap4430_phy_init,
.phy_exit = omap4430_phy_exit,
.phy_power = omap4430_phy_power,
.phy_set_clock = omap4430_phy_set_clk,
.phy_suspend = omap4430_phy_suspend,
};
#if defined(CONFIG_ARCH_OMAP3)
static struct twl4030_usb_data omap3_usb_pdata = {
.usb_mode = T2_USB_MODE_ULPI,
};
@ -122,6 +115,45 @@ static struct regulator_init_data omap3_vpll2_idata = {
.consumer_supplies = omap3_vpll2_supplies,
};
void __init omap3_pmic_get_config(struct twl4030_platform_data *pmic_data,
u32 pdata_flags, u32 regulators_flags)
{
if (!pmic_data->irq_base)
pmic_data->irq_base = TWL4030_IRQ_BASE;
if (!pmic_data->irq_end)
pmic_data->irq_end = TWL4030_IRQ_END;
/* Common platform data configurations */
if (pdata_flags & TWL_COMMON_PDATA_USB && !pmic_data->usb)
pmic_data->usb = &omap3_usb_pdata;
if (pdata_flags & TWL_COMMON_PDATA_BCI && !pmic_data->bci)
pmic_data->bci = &omap3_bci_pdata;
if (pdata_flags & TWL_COMMON_PDATA_MADC && !pmic_data->madc)
pmic_data->madc = &omap3_madc_pdata;
if (pdata_flags & TWL_COMMON_PDATA_AUDIO && !pmic_data->audio)
pmic_data->audio = &omap3_audio_pdata;
/* Common regulator configurations */
if (regulators_flags & TWL_COMMON_REGULATOR_VDAC && !pmic_data->vdac)
pmic_data->vdac = &omap3_vdac_idata;
if (regulators_flags & TWL_COMMON_REGULATOR_VPLL2 && !pmic_data->vpll2)
pmic_data->vpll2 = &omap3_vpll2_idata;
}
#endif /* CONFIG_ARCH_OMAP3 */
#if defined(CONFIG_ARCH_OMAP4)
static struct twl4030_usb_data omap4_usb_pdata = {
.phy_init = omap4430_phy_init,
.phy_exit = omap4430_phy_exit,
.phy_power = omap4430_phy_power,
.phy_set_clock = omap4430_phy_set_clk,
.phy_suspend = omap4430_phy_suspend,
};
static struct regulator_init_data omap4_vdac_idata = {
.constraints = {
.min_uV = 1800000,
@ -273,32 +305,4 @@ void __init omap4_pmic_get_config(struct twl4030_platform_data *pmic_data,
!pmic_data->clk32kg)
pmic_data->clk32kg = &omap4_clk32kg_idata;
}
void __init omap3_pmic_get_config(struct twl4030_platform_data *pmic_data,
u32 pdata_flags, u32 regulators_flags)
{
if (!pmic_data->irq_base)
pmic_data->irq_base = TWL4030_IRQ_BASE;
if (!pmic_data->irq_end)
pmic_data->irq_end = TWL4030_IRQ_END;
/* Common platform data configurations */
if (pdata_flags & TWL_COMMON_PDATA_USB && !pmic_data->usb)
pmic_data->usb = &omap3_usb_pdata;
if (pdata_flags & TWL_COMMON_PDATA_BCI && !pmic_data->bci)
pmic_data->bci = &omap3_bci_pdata;
if (pdata_flags & TWL_COMMON_PDATA_MADC && !pmic_data->madc)
pmic_data->madc = &omap3_madc_pdata;
if (pdata_flags & TWL_COMMON_PDATA_AUDIO && !pmic_data->audio)
pmic_data->audio = &omap3_audio_pdata;
/* Common regulator configurations */
if (regulators_flags & TWL_COMMON_REGULATOR_VDAC && !pmic_data->vdac)
pmic_data->vdac = &omap3_vdac_idata;
if (regulators_flags & TWL_COMMON_REGULATOR_VPLL2 && !pmic_data->vpll2)
pmic_data->vpll2 = &omap3_vpll2_idata;
}
#endif /* CONFIG_ARCH_OMAP4 */

View File

@ -65,7 +65,7 @@
#include <plat/iic.h>
#include <plat/pm.h>
#include <sound/wm8915.h>
#include <sound/wm8996.h>
#include <sound/wm8962.h>
#include <sound/wm9081.h>
@ -614,7 +614,7 @@ static struct wm831x_pdata glenfarclas_pmic_pdata __initdata = {
.disable_touch = true,
};
static struct wm8915_retune_mobile_config wm8915_retune[] = {
static struct wm8996_retune_mobile_config wm8996_retune[] = {
{
.name = "Sub LPF",
.rate = 48000,
@ -635,12 +635,12 @@ static struct wm8915_retune_mobile_config wm8915_retune[] = {
},
};
static struct wm8915_pdata wm8915_pdata __initdata = {
static struct wm8996_pdata wm8996_pdata __initdata = {
.ldo_ena = S3C64XX_GPN(7),
.gpio_base = CODEC_GPIO_BASE,
.micdet_def = 1,
.inl_mode = WM8915_DIFFERRENTIAL_1,
.inr_mode = WM8915_DIFFERRENTIAL_1,
.inl_mode = WM8996_DIFFERRENTIAL_1,
.inr_mode = WM8996_DIFFERRENTIAL_1,
.irq_flags = IRQF_TRIGGER_RISING,
@ -652,8 +652,8 @@ static struct wm8915_pdata wm8915_pdata __initdata = {
0x020e, /* GPIO5 == CLKOUT */
},
.retune_mobile_cfgs = wm8915_retune,
.num_retune_mobile_cfgs = ARRAY_SIZE(wm8915_retune),
.retune_mobile_cfgs = wm8996_retune,
.num_retune_mobile_cfgs = ARRAY_SIZE(wm8996_retune),
};
static struct wm8962_pdata wm8962_pdata __initdata = {
@ -679,8 +679,8 @@ static struct i2c_board_info i2c_devs1[] __initdata = {
.platform_data = &glenfarclas_pmic_pdata },
{ I2C_BOARD_INFO("wm1250-ev1", 0x27) },
{ I2C_BOARD_INFO("wm8915", 0x1a),
.platform_data = &wm8915_pdata,
{ I2C_BOARD_INFO("wm8996", 0x1a),
.platform_data = &wm8996_pdata,
.irq = GLENFARCLAS_PMIC_IRQ_BASE + WM831X_IRQ_GPIO_2,
},
{ I2C_BOARD_INFO("wm9081", 0x6c),

View File

@ -28,6 +28,7 @@
#include <asm/mach-types.h>
#include <mach/nanoengine.h>
#include <mach/hardware.h>
static DEFINE_SPINLOCK(nano_lock);

View File

@ -22,6 +22,7 @@
#include <linux/sched.h>
#include <linux/uaccess.h>
#include <asm/system.h>
#include <asm/unaligned.h>
#include "fault.h"
@ -95,6 +96,33 @@ static const char *usermode_action[] = {
"signal+warn"
};
/* Return true if and only if the ARMv6 unaligned access model is in use. */
static bool cpu_is_v6_unaligned(void)
{
return cpu_architecture() >= CPU_ARCH_ARMv6 && (cr_alignment & CR_U);
}
static int safe_usermode(int new_usermode, bool warn)
{
/*
* ARMv6 and later CPUs can perform unaligned accesses for
* most single load and store instructions up to word size.
* LDM, STM, LDRD and STRD still need to be handled.
*
* Ignoring the alignment fault is not an option on these
* CPUs since we spin re-faulting the instruction without
* making any progress.
*/
if (cpu_is_v6_unaligned() && !(new_usermode & (UM_FIXUP | UM_SIGNAL))) {
new_usermode |= UM_FIXUP;
if (warn)
printk(KERN_WARNING "alignment: ignoring faults is unsafe on this CPU. Defaulting to fixup mode.\n");
}
return new_usermode;
}
static int alignment_proc_show(struct seq_file *m, void *v)
{
seq_printf(m, "User:\t\t%lu\n", ai_user);
@ -125,7 +153,7 @@ static ssize_t alignment_proc_write(struct file *file, const char __user *buffer
if (get_user(mode, buffer))
return -EFAULT;
if (mode >= '0' && mode <= '5')
ai_usermode = mode - '0';
ai_usermode = safe_usermode(mode - '0', true);
}
return count;
}
@ -886,9 +914,16 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
if (ai_usermode & UM_FIXUP)
goto fixup;
if (ai_usermode & UM_SIGNAL)
force_sig(SIGBUS, current);
else {
if (ai_usermode & UM_SIGNAL) {
siginfo_t si;
si.si_signo = SIGBUS;
si.si_errno = 0;
si.si_code = BUS_ADRALN;
si.si_addr = (void __user *)addr;
force_sig_info(si.si_signo, &si, current);
} else {
/*
* We're about to disable the alignment trap and return to
* user space. But if an interrupt occurs before actually
@ -926,20 +961,11 @@ static int __init alignment_init(void)
return -ENOMEM;
#endif
/*
* ARMv6 and later CPUs can perform unaligned accesses for
* most single load and store instructions up to word size.
* LDM, STM, LDRD and STRD still need to be handled.
*
* Ignoring the alignment fault is not an option on these
* CPUs since we spin re-faulting the instruction without
* making any progress.
*/
if (cpu_architecture() >= CPU_ARCH_ARMv6 && (cr_alignment & CR_U)) {
if (cpu_is_v6_unaligned()) {
cr_alignment &= ~CR_A;
cr_no_alignment &= ~CR_A;
set_cr(cr_alignment);
ai_usermode = UM_FIXUP;
ai_usermode = safe_usermode(ai_usermode, false);
}
hook_fault_code(1, do_alignment, SIGBUS, BUS_ADRALN,

View File

@ -441,7 +441,7 @@ static inline int free_area(unsigned long pfn, unsigned long end, char *s)
static inline void poison_init_mem(void *s, size_t count)
{
u32 *p = (u32 *)s;
while ((count = count - 4))
for (; count != 0; count -= 4)
*p++ = 0xe7fddef0;
}

View File

@ -410,6 +410,7 @@ __arm946_proc_info:
.long 0x41009460
.long 0xff00fff0
.long 0
.long 0
b __arm946_setup
.long cpu_arch_name
.long cpu_elf_name
@ -418,6 +419,6 @@ __arm946_proc_info:
.long arm946_processor_functions
.long 0
.long 0
.long arm940_cache_fns
.long arm946_cache_fns
.size __arm946_proc_info, . - __arm946_proc_info

View File

@ -44,6 +44,14 @@
#define UART_PADDR MX51_UART1_BASE_ADDR
#endif
/* iMX50/53 have same addresses, but not iMX51 */
#if defined(CONFIG_SOC_IMX50) || defined(CONFIG_SOC_IMX53)
#ifdef UART_PADDR
#error "CONFIG_DEBUG_LL is incompatible with multiple archs"
#endif
#define UART_PADDR MX53_UART1_BASE_ADDR
#endif
#define UART_VADDR IMX_IO_ADDRESS(UART_PADDR)
.macro addruart, rp, rv

View File

@ -30,6 +30,9 @@
#define MX53_SDHC_PAD_CTRL (PAD_CTL_HYS | PAD_CTL_PKE | PAD_CTL_PUE | \
PAD_CTL_PUS_47K_UP | PAD_CTL_DSE_HIGH | \
PAD_CTL_SRE_FAST)
#define PAD_CTRL_I2C (PAD_CTL_SRE_FAST | PAD_CTL_ODE | PAD_CTL_PKE | \
PAD_CTL_PUE | PAD_CTL_DSE_HIGH | PAD_CTL_PUS_100K_UP \
| PAD_CTL_HYS)
#define _MX53_PAD_GPIO_19__KPP_COL_5 IOMUX_PAD(0x348, 0x20, 0, 0x840, 0, 0)
#define _MX53_PAD_GPIO_19__GPIO4_5 IOMUX_PAD(0x348, 0x20, 1, 0x0, 0, 0)
@ -1256,7 +1259,7 @@
#define MX53_PAD_KEY_COL3__GPIO4_12 (_MX53_PAD_KEY_COL3__GPIO4_12 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_KEY_COL3__USBOH3_H2_DP (_MX53_PAD_KEY_COL3__USBOH3_H2_DP | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_KEY_COL3__SPDIF_IN1 (_MX53_PAD_KEY_COL3__SPDIF_IN1 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_KEY_COL3__I2C2_SCL (_MX53_PAD_KEY_COL3__I2C2_SCL | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_KEY_COL3__I2C2_SCL (_MX53_PAD_KEY_COL3__I2C2_SCL | MUX_PAD_CTRL(PAD_CTRL_I2C))
#define MX53_PAD_KEY_COL3__ECSPI1_SS3 (_MX53_PAD_KEY_COL3__ECSPI1_SS3 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_KEY_COL3__FEC_CRS (_MX53_PAD_KEY_COL3__FEC_CRS | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_KEY_COL3__USBPHY1_SIECLOCK (_MX53_PAD_KEY_COL3__USBPHY1_SIECLOCK | MUX_PAD_CTRL(NO_PAD_CTRL))
@ -1264,7 +1267,7 @@
#define MX53_PAD_KEY_ROW3__GPIO4_13 (_MX53_PAD_KEY_ROW3__GPIO4_13 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_KEY_ROW3__USBOH3_H2_DM (_MX53_PAD_KEY_ROW3__USBOH3_H2_DM | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_KEY_ROW3__CCM_ASRC_EXT_CLK (_MX53_PAD_KEY_ROW3__CCM_ASRC_EXT_CLK | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_KEY_ROW3__I2C2_SDA (_MX53_PAD_KEY_ROW3__I2C2_SDA | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_KEY_ROW3__I2C2_SDA (_MX53_PAD_KEY_ROW3__I2C2_SDA | MUX_PAD_CTRL(PAD_CTRL_I2C))
#define MX53_PAD_KEY_ROW3__OSC32K_32K_OUT (_MX53_PAD_KEY_ROW3__OSC32K_32K_OUT | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_KEY_ROW3__CCM_PLL4_BYP (_MX53_PAD_KEY_ROW3__CCM_PLL4_BYP | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_KEY_ROW3__USBPHY1_LINESTATE_0 (_MX53_PAD_KEY_ROW3__USBPHY1_LINESTATE_0 | MUX_PAD_CTRL(NO_PAD_CTRL))
@ -1536,7 +1539,7 @@
#define MX53_PAD_CSI0_DAT8__KPP_COL_7 (_MX53_PAD_CSI0_DAT8__KPP_COL_7 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_CSI0_DAT8__ECSPI2_SCLK (_MX53_PAD_CSI0_DAT8__ECSPI2_SCLK | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_CSI0_DAT8__USBOH3_USBH3_OC (_MX53_PAD_CSI0_DAT8__USBOH3_USBH3_OC | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_CSI0_DAT8__I2C1_SDA (_MX53_PAD_CSI0_DAT8__I2C1_SDA | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_CSI0_DAT8__I2C1_SDA (_MX53_PAD_CSI0_DAT8__I2C1_SDA | MUX_PAD_CTRL(PAD_CTRL_I2C))
#define MX53_PAD_CSI0_DAT8__EMI_EMI_DEBUG_37 (_MX53_PAD_CSI0_DAT8__EMI_EMI_DEBUG_37 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_CSI0_DAT8__TPIU_TRACE_5 (_MX53_PAD_CSI0_DAT8__TPIU_TRACE_5 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_CSI0_DAT9__IPU_CSI0_D_9 (_MX53_PAD_CSI0_DAT9__IPU_CSI0_D_9 | MUX_PAD_CTRL(NO_PAD_CTRL))
@ -1544,7 +1547,7 @@
#define MX53_PAD_CSI0_DAT9__KPP_ROW_7 (_MX53_PAD_CSI0_DAT9__KPP_ROW_7 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_CSI0_DAT9__ECSPI2_MOSI (_MX53_PAD_CSI0_DAT9__ECSPI2_MOSI | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_CSI0_DAT9__USBOH3_USBH3_PWR (_MX53_PAD_CSI0_DAT9__USBOH3_USBH3_PWR | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_CSI0_DAT9__I2C1_SCL (_MX53_PAD_CSI0_DAT9__I2C1_SCL | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_CSI0_DAT9__I2C1_SCL (_MX53_PAD_CSI0_DAT9__I2C1_SCL | MUX_PAD_CTRL(PAD_CTRL_I2C))
#define MX53_PAD_CSI0_DAT9__EMI_EMI_DEBUG_38 (_MX53_PAD_CSI0_DAT9__EMI_EMI_DEBUG_38 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_CSI0_DAT9__TPIU_TRACE_6 (_MX53_PAD_CSI0_DAT9__TPIU_TRACE_6 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_CSI0_DAT10__IPU_CSI0_D_10 (_MX53_PAD_CSI0_DAT10__IPU_CSI0_D_10 | MUX_PAD_CTRL(NO_PAD_CTRL))
@ -1631,25 +1634,25 @@
#define MX53_PAD_EIM_EB2__CCM_DI1_EXT_CLK (_MX53_PAD_EIM_EB2__CCM_DI1_EXT_CLK | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_EB2__IPU_SER_DISP1_CS (_MX53_PAD_EIM_EB2__IPU_SER_DISP1_CS | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_EB2__ECSPI1_SS0 (_MX53_PAD_EIM_EB2__ECSPI1_SS0 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_EB2__I2C2_SCL (_MX53_PAD_EIM_EB2__I2C2_SCL | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_EB2__I2C2_SCL (_MX53_PAD_EIM_EB2__I2C2_SCL | MUX_PAD_CTRL(PAD_CTRL_I2C))
#define MX53_PAD_EIM_D16__EMI_WEIM_D_16 (_MX53_PAD_EIM_D16__EMI_WEIM_D_16 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D16__GPIO3_16 (_MX53_PAD_EIM_D16__GPIO3_16 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D16__IPU_DI0_PIN5 (_MX53_PAD_EIM_D16__IPU_DI0_PIN5 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D16__IPU_DISPB1_SER_CLK (_MX53_PAD_EIM_D16__IPU_DISPB1_SER_CLK | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D16__ECSPI1_SCLK (_MX53_PAD_EIM_D16__ECSPI1_SCLK | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D16__I2C2_SDA (_MX53_PAD_EIM_D16__I2C2_SDA | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D16__I2C2_SDA (_MX53_PAD_EIM_D16__I2C2_SDA | MUX_PAD_CTRL(PAD_CTRL_I2C))
#define MX53_PAD_EIM_D17__EMI_WEIM_D_17 (_MX53_PAD_EIM_D17__EMI_WEIM_D_17 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D17__GPIO3_17 (_MX53_PAD_EIM_D17__GPIO3_17 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D17__IPU_DI0_PIN6 (_MX53_PAD_EIM_D17__IPU_DI0_PIN6 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D17__IPU_DISPB1_SER_DIN (_MX53_PAD_EIM_D17__IPU_DISPB1_SER_DIN | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D17__ECSPI1_MISO (_MX53_PAD_EIM_D17__ECSPI1_MISO | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D17__I2C3_SCL (_MX53_PAD_EIM_D17__I2C3_SCL | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D17__I2C3_SCL (_MX53_PAD_EIM_D17__I2C3_SCL | MUX_PAD_CTRL(PAD_CTRL_I2C))
#define MX53_PAD_EIM_D18__EMI_WEIM_D_18 (_MX53_PAD_EIM_D18__EMI_WEIM_D_18 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D18__GPIO3_18 (_MX53_PAD_EIM_D18__GPIO3_18 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D18__IPU_DI0_PIN7 (_MX53_PAD_EIM_D18__IPU_DI0_PIN7 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D18__IPU_DISPB1_SER_DIO (_MX53_PAD_EIM_D18__IPU_DISPB1_SER_DIO | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D18__ECSPI1_MOSI (_MX53_PAD_EIM_D18__ECSPI1_MOSI | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D18__I2C3_SDA (_MX53_PAD_EIM_D18__I2C3_SDA | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D18__I2C3_SDA (_MX53_PAD_EIM_D18__I2C3_SDA | MUX_PAD_CTRL(PAD_CTRL_I2C))
#define MX53_PAD_EIM_D18__IPU_DI1_D0_CS (_MX53_PAD_EIM_D18__IPU_DI1_D0_CS | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D19__EMI_WEIM_D_19 (_MX53_PAD_EIM_D19__EMI_WEIM_D_19 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D19__GPIO3_19 (_MX53_PAD_EIM_D19__GPIO3_19 | MUX_PAD_CTRL(NO_PAD_CTRL))
@ -1672,7 +1675,7 @@
#define MX53_PAD_EIM_D21__IPU_DI0_PIN17 (_MX53_PAD_EIM_D21__IPU_DI0_PIN17 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D21__IPU_DISPB0_SER_CLK (_MX53_PAD_EIM_D21__IPU_DISPB0_SER_CLK | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D21__CSPI_SCLK (_MX53_PAD_EIM_D21__CSPI_SCLK | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D21__I2C1_SCL (_MX53_PAD_EIM_D21__I2C1_SCL | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D21__I2C1_SCL (_MX53_PAD_EIM_D21__I2C1_SCL | MUX_PAD_CTRL(PAD_CTRL_I2C))
#define MX53_PAD_EIM_D21__USBOH3_USBOTG_OC (_MX53_PAD_EIM_D21__USBOH3_USBOTG_OC | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D22__EMI_WEIM_D_22 (_MX53_PAD_EIM_D22__EMI_WEIM_D_22 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D22__GPIO3_22 (_MX53_PAD_EIM_D22__GPIO3_22 | MUX_PAD_CTRL(NO_PAD_CTRL))
@ -1732,7 +1735,7 @@
#define MX53_PAD_EIM_D28__UART2_CTS (_MX53_PAD_EIM_D28__UART2_CTS | MUX_PAD_CTRL(MX53_UART_PAD_CTRL))
#define MX53_PAD_EIM_D28__IPU_DISPB0_SER_DIO (_MX53_PAD_EIM_D28__IPU_DISPB0_SER_DIO | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D28__CSPI_MOSI (_MX53_PAD_EIM_D28__CSPI_MOSI | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D28__I2C1_SDA (_MX53_PAD_EIM_D28__I2C1_SDA | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D28__I2C1_SDA (_MX53_PAD_EIM_D28__I2C1_SDA | MUX_PAD_CTRL(PAD_CTRL_I2C))
#define MX53_PAD_EIM_D28__IPU_EXT_TRIG (_MX53_PAD_EIM_D28__IPU_EXT_TRIG | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D28__IPU_DI0_PIN13 (_MX53_PAD_EIM_D28__IPU_DI0_PIN13 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D29__EMI_WEIM_D_29 (_MX53_PAD_EIM_D29__EMI_WEIM_D_29 | MUX_PAD_CTRL(NO_PAD_CTRL))
@ -2297,7 +2300,7 @@
#define MX53_PAD_GPIO_9__SCC_FAIL_STATE (_MX53_PAD_GPIO_9__SCC_FAIL_STATE | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_GPIO_3__ESAI1_HCKR (_MX53_PAD_GPIO_3__ESAI1_HCKR | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_GPIO_3__GPIO1_3 (_MX53_PAD_GPIO_3__GPIO1_3 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_GPIO_3__I2C3_SCL (_MX53_PAD_GPIO_3__I2C3_SCL | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_GPIO_3__I2C3_SCL (_MX53_PAD_GPIO_3__I2C3_SCL | MUX_PAD_CTRL(PAD_CTRL_I2C))
#define MX53_PAD_GPIO_3__DPLLIP1_TOG_EN (_MX53_PAD_GPIO_3__DPLLIP1_TOG_EN | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_GPIO_3__CCM_CLKO2 (_MX53_PAD_GPIO_3__CCM_CLKO2 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_GPIO_3__OBSERVE_MUX_OBSRV_INT_OUT0 (_MX53_PAD_GPIO_3__OBSERVE_MUX_OBSRV_INT_OUT0 | MUX_PAD_CTRL(NO_PAD_CTRL))
@ -2305,7 +2308,7 @@
#define MX53_PAD_GPIO_3__MLB_MLBCLK (_MX53_PAD_GPIO_3__MLB_MLBCLK | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_GPIO_6__ESAI1_SCKT (_MX53_PAD_GPIO_6__ESAI1_SCKT | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_GPIO_6__GPIO1_6 (_MX53_PAD_GPIO_6__GPIO1_6 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_GPIO_6__I2C3_SDA (_MX53_PAD_GPIO_6__I2C3_SDA | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_GPIO_6__I2C3_SDA (_MX53_PAD_GPIO_6__I2C3_SDA | MUX_PAD_CTRL(PAD_CTRL_I2C))
#define MX53_PAD_GPIO_6__CCM_CCM_OUT_0 (_MX53_PAD_GPIO_6__CCM_CCM_OUT_0 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_GPIO_6__CSU_CSU_INT_DEB (_MX53_PAD_GPIO_6__CSU_CSU_INT_DEB | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_GPIO_6__OBSERVE_MUX_OBSRV_INT_OUT1 (_MX53_PAD_GPIO_6__OBSERVE_MUX_OBSRV_INT_OUT1 | MUX_PAD_CTRL(NO_PAD_CTRL))
@ -2333,7 +2336,7 @@
#define MX53_PAD_GPIO_5__CCM_CLKO (_MX53_PAD_GPIO_5__CCM_CLKO | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_GPIO_5__CSU_CSU_ALARM_AUT_2 (_MX53_PAD_GPIO_5__CSU_CSU_ALARM_AUT_2 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_GPIO_5__OBSERVE_MUX_OBSRV_INT_OUT4 (_MX53_PAD_GPIO_5__OBSERVE_MUX_OBSRV_INT_OUT4 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_GPIO_5__I2C3_SCL (_MX53_PAD_GPIO_5__I2C3_SCL | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_GPIO_5__I2C3_SCL (_MX53_PAD_GPIO_5__I2C3_SCL | MUX_PAD_CTRL(PAD_CTRL_I2C))
#define MX53_PAD_GPIO_5__CCM_PLL1_BYP (_MX53_PAD_GPIO_5__CCM_PLL1_BYP | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_GPIO_7__ESAI1_TX4_RX1 (_MX53_PAD_GPIO_7__ESAI1_TX4_RX1 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_GPIO_7__GPIO1_7 (_MX53_PAD_GPIO_7__GPIO1_7 | MUX_PAD_CTRL(NO_PAD_CTRL))
@ -2356,7 +2359,7 @@
#define MX53_PAD_GPIO_16__TZIC_PWRFAIL_INT (_MX53_PAD_GPIO_16__TZIC_PWRFAIL_INT | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_GPIO_16__RTC_CE_RTC_EXT_TRIG1 (_MX53_PAD_GPIO_16__RTC_CE_RTC_EXT_TRIG1 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_GPIO_16__SPDIF_IN1 (_MX53_PAD_GPIO_16__SPDIF_IN1 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_GPIO_16__I2C3_SDA (_MX53_PAD_GPIO_16__I2C3_SDA | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_GPIO_16__I2C3_SDA (_MX53_PAD_GPIO_16__I2C3_SDA | MUX_PAD_CTRL(PAD_CTRL_I2C))
#define MX53_PAD_GPIO_16__SJC_DE_B (_MX53_PAD_GPIO_16__SJC_DE_B | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_GPIO_17__ESAI1_TX0 (_MX53_PAD_GPIO_17__ESAI1_TX0 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_GPIO_17__GPIO7_12 (_MX53_PAD_GPIO_17__GPIO7_12 | MUX_PAD_CTRL(NO_PAD_CTRL))

View File

@ -13,6 +13,7 @@ config ARCH_OMAP1
bool "TI OMAP1"
select CLKDEV_LOOKUP
select CLKSRC_MMIO
select GENERIC_IRQ_CHIP
help
"Systems based on omap7xx, omap15xx or omap16xx"

View File

@ -195,6 +195,11 @@
#define OMAP36XX_DMA_UART4_TX 81 /* S_DMA_80 */
#define OMAP36XX_DMA_UART4_RX 82 /* S_DMA_81 */
/* Only for AM35xx */
#define AM35XX_DMA_UART4_TX 54
#define AM35XX_DMA_UART4_RX 55
/*----------------------------------------------------------------------------*/
#define OMAP1_DMA_TOUT_IRQ (1 << 0)

View File

@ -357,6 +357,7 @@
#define INT_35XX_EMAC_C0_TX_PULSE_IRQ 69
#define INT_35XX_EMAC_C0_MISC_PULSE_IRQ 70
#define INT_35XX_USBOTG_IRQ 71
#define INT_35XX_UART4 84
#define INT_35XX_CCDC_VD0_IRQ 88
#define INT_35XX_CCDC_VD1_IRQ 92
#define INT_35XX_CCDC_VD2_IRQ 93

View File

@ -56,6 +56,9 @@
#define TI816X_UART2_BASE 0x48022000
#define TI816X_UART3_BASE 0x48024000
/* AM3505/3517 UART4 */
#define AM35XX_UART4_BASE 0x4809E000 /* Only on AM3505/3517 */
/* External port on Zoom2/3 */
#define ZOOM_UART_BASE 0x10000000
#define ZOOM_UART_VIRT 0xfa400000

View File

@ -423,9 +423,6 @@ static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, u32 da,
{
unsigned int i;
struct scatterlist *sg;
void *va;
va = phys_to_virt(pa);
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
unsigned bytes;

View File

@ -910,7 +910,7 @@ omapl138_case_a3 MACH_OMAPL138_CASE_A3 OMAPL138_CASE_A3 3280
uemd MACH_UEMD UEMD 3281
ccwmx51mut MACH_CCWMX51MUT CCWMX51MUT 3282
rockhopper MACH_ROCKHOPPER ROCKHOPPER 3283
nookcolor MACH_NOOKCOLOR NOOKCOLOR 3284
encore MACH_ENCORE ENCORE 3284
hkdkc100 MACH_HKDKC100 HKDKC100 3285
ts42xx MACH_TS42XX TS42XX 3286
aebl MACH_AEBL AEBL 3287

View File

@ -22,7 +22,6 @@ static __always_inline bool arch_static_branch(struct jump_label_key *key)
asm goto("1:\n\t"
"nop\n\t"
".pushsection __jump_table, \"aw\"\n\t"
".align 4\n\t"
JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"
".popsection \n\t"
: : "i" (key) : : l_yes);
@ -41,7 +40,6 @@ struct jump_entry {
jump_label_t code;
jump_label_t target;
jump_label_t key;
jump_label_t pad;
};
#endif /* _ASM_POWERPC_JUMP_LABEL_H */

View File

@ -3,17 +3,7 @@
#include <asm/page.h>
/*
* If CONFIG_RELOCATABLE is enabled we can place the kdump kernel anywhere.
* To keep enough space in the RMO for the first stage kernel on 64bit, we
* place it at 64MB. If CONFIG_RELOCATABLE is not enabled we must place
* the second stage at 32MB.
*/
#if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC64)
#define KDUMP_KERNELBASE 0x4000000
#else
#define KDUMP_KERNELBASE 0x2000000
#endif
/* How many bytes to reserve at zero for kdump. The reserve limit should
* be greater or equal to the trampoline's end address.

View File

@ -1003,7 +1003,6 @@
#define PV_970 0x0039
#define PV_POWER5 0x003A
#define PV_POWER5p 0x003B
#define PV_POWER7 0x003F
#define PV_970FX 0x003C
#define PV_POWER6 0x003E
#define PV_POWER7 0x003F
@ -1024,13 +1023,16 @@
#define mtmsrd(v) __mtmsrd((v), 0)
#define mtmsr(v) mtmsrd(v)
#else
#define mtmsr(v) asm volatile("mtmsr %0" : : "r" (v) : "memory")
#define mtmsr(v) asm volatile("mtmsr %0" : \
: "r" ((unsigned long)(v)) \
: "memory")
#endif
#define mfspr(rn) ({unsigned long rval; \
asm volatile("mfspr %0," __stringify(rn) \
: "=r" (rval)); rval;})
#define mtspr(rn, v) asm volatile("mtspr " __stringify(rn) ",%0" : : "r" (v)\
#define mtspr(rn, v) asm volatile("mtspr " __stringify(rn) ",%0" : \
: "r" ((unsigned long)(v)) \
: "memory")
#ifdef __powerpc64__

View File

@ -2051,7 +2051,8 @@ static struct cpu_spec __initdata cpu_specs[] = {
static struct cpu_spec the_cpu_spec;
static void __init setup_cpu_spec(unsigned long offset, struct cpu_spec *s)
static struct cpu_spec * __init setup_cpu_spec(unsigned long offset,
struct cpu_spec *s)
{
struct cpu_spec *t = &the_cpu_spec;
struct cpu_spec old;
@ -2114,6 +2115,8 @@ static void __init setup_cpu_spec(unsigned long offset, struct cpu_spec *s)
t->cpu_setup(offset, t);
}
#endif /* CONFIG_PPC64 || CONFIG_BOOKE */
return t;
}
struct cpu_spec * __init identify_cpu(unsigned long offset, unsigned int pvr)
@ -2124,10 +2127,8 @@ struct cpu_spec * __init identify_cpu(unsigned long offset, unsigned int pvr)
s = PTRRELOC(s);
for (i = 0; i < ARRAY_SIZE(cpu_specs); i++,s++) {
if ((pvr & s->pvr_mask) == s->pvr_value) {
setup_cpu_spec(offset, s);
return s;
}
if ((pvr & s->pvr_mask) == s->pvr_value)
return setup_cpu_spec(offset, s);
}
BUG();

View File

@ -117,6 +117,7 @@ void ioport_unmap(void __iomem *addr)
EXPORT_SYMBOL(ioport_map);
EXPORT_SYMBOL(ioport_unmap);
#ifdef CONFIG_PCI
void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max)
{
resource_size_t start = pci_resource_start(dev, bar);
@ -146,3 +147,4 @@ void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
EXPORT_SYMBOL(pci_iomap);
EXPORT_SYMBOL(pci_iounmap);
#endif /* CONFIG_PCI */

View File

@ -136,12 +136,16 @@ void __init reserve_crashkernel(void)
crashk_res.start = KDUMP_KERNELBASE;
#else
if (!crashk_res.start) {
#ifdef CONFIG_PPC64
/*
* unspecified address, choose a region of specified size
* can overlap with initrd (ignoring corruption when retained)
* ppc64 requires kernel and some stacks to be in first segemnt
* On 64bit we split the RMO in half but cap it at half of
* a small SLB (128MB) since the crash kernel needs to place
* itself and some stacks to be in the first segment.
*/
crashk_res.start = min(0x80000000ULL, (ppc64_rma_size / 2));
#else
crashk_res.start = KDUMP_KERNELBASE;
#endif
}
crash_base = PAGE_ALIGN(crashk_res.start);

View File

@ -154,8 +154,12 @@ static int read_user_stack_64(unsigned long __user *ptr, unsigned long *ret)
((unsigned long)ptr & 7))
return -EFAULT;
if (!__get_user_inatomic(*ret, ptr))
pagefault_disable();
if (!__get_user_inatomic(*ret, ptr)) {
pagefault_enable();
return 0;
}
pagefault_enable();
return read_user_stack_slow(ptr, ret, 8);
}
@ -166,8 +170,12 @@ static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
((unsigned long)ptr & 3))
return -EFAULT;
if (!__get_user_inatomic(*ret, ptr))
pagefault_disable();
if (!__get_user_inatomic(*ret, ptr)) {
pagefault_enable();
return 0;
}
pagefault_enable();
return read_user_stack_slow(ptr, ret, 4);
}
@ -294,11 +302,17 @@ static inline int current_is_64bit(void)
*/
static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
{
int rc;
if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) ||
((unsigned long)ptr & 3))
return -EFAULT;
return __get_user_inatomic(*ret, ptr);
pagefault_disable();
rc = __get_user_inatomic(*ret, ptr);
pagefault_enable();
return rc;
}
static inline void perf_callchain_user_64(struct perf_callchain_entry *entry,

View File

@ -1020,7 +1020,7 @@ static unsigned long __init alloc_up(unsigned long size, unsigned long align)
}
if (addr == 0)
return 0;
RELOC(alloc_bottom) = addr;
RELOC(alloc_bottom) = addr + size;
prom_debug(" -> %x\n", addr);
prom_debug(" alloc_bottom : %x\n", RELOC(alloc_bottom));
@ -1830,11 +1830,13 @@ static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end,
if (room > DEVTREE_CHUNK_SIZE)
room = DEVTREE_CHUNK_SIZE;
if (room < PAGE_SIZE)
prom_panic("No memory for flatten_device_tree (no room)");
prom_panic("No memory for flatten_device_tree "
"(no room)\n");
chunk = alloc_up(room, 0);
if (chunk == 0)
prom_panic("No memory for flatten_device_tree (claim failed)");
*mem_end = RELOC(alloc_top);
prom_panic("No memory for flatten_device_tree "
"(claim failed)\n");
*mem_end = chunk + room;
}
ret = (void *)*mem_start;
@ -2042,7 +2044,7 @@ static void __init flatten_device_tree(void)
/*
* Check how much room we have between alloc top & bottom (+/- a
* few pages), crop to 4Mb, as this is our "chuck" size
* few pages), crop to 1MB, as this is our "chunk" size
*/
room = RELOC(alloc_top) - RELOC(alloc_bottom) - 0x4000;
if (room > DEVTREE_CHUNK_SIZE)
@ -2053,7 +2055,7 @@ static void __init flatten_device_tree(void)
mem_start = (unsigned long)alloc_up(room, PAGE_SIZE);
if (mem_start == 0)
prom_panic("Can't allocate initial device-tree chunk\n");
mem_end = RELOC(alloc_top);
mem_end = mem_start + room;
/* Get root of tree */
root = call_prom("peer", 1, 1, (phandle)0);

View File

@ -1251,7 +1251,7 @@ BEGIN_FTR_SECTION
reg = 0
.rept 32
li r6,reg*16+VCPU_VSRS
stxvd2x reg,r6,r3
STXVD2X(reg,r6,r3)
reg = reg + 1
.endr
FTR_SECTION_ELSE
@ -1313,7 +1313,7 @@ BEGIN_FTR_SECTION
reg = 0
.rept 32
li r7,reg*16+VCPU_VSRS
lxvd2x reg,r7,r4
LXVD2X(reg,r7,r4)
reg = reg + 1
.endr
FTR_SECTION_ELSE

View File

@ -24,7 +24,7 @@ source "arch/powerpc/platforms/wsp/Kconfig"
config KVM_GUEST
bool "KVM Guest support"
default y
default n
---help---
This option enables various optimizations for running under the KVM
hypervisor. Overhead for the kernel when not running inside KVM should

View File

@ -181,7 +181,7 @@ static void dtl_stop(struct dtl *dtl)
lppaca_of(dtl->cpu).dtl_enable_mask = 0x0;
unregister_dtl(hwcpu, __pa(dtl->buf));
unregister_dtl(hwcpu);
}
static u64 dtl_current_index(struct dtl *dtl)

View File

@ -135,7 +135,7 @@ static void pseries_mach_cpu_die(void)
get_lppaca()->idle = 0;
if (get_preferred_offline_state(cpu) == CPU_STATE_ONLINE) {
unregister_slb_shadow(hwcpu, __pa(get_slb_shadow()));
unregister_slb_shadow(hwcpu);
/*
* Call to start_secondary_resume() will not return.
@ -150,7 +150,7 @@ static void pseries_mach_cpu_die(void)
WARN_ON(get_preferred_offline_state(cpu) != CPU_STATE_OFFLINE);
set_cpu_current_state(cpu, CPU_STATE_OFFLINE);
unregister_slb_shadow(hwcpu, __pa(get_slb_shadow()));
unregister_slb_shadow(hwcpu);
rtas_stop_self();
/* Should never get here... */

View File

@ -212,17 +212,15 @@ static int __init ioei_init(void)
struct device_node *np;
ioei_check_exception_token = rtas_token("check-exception");
if (ioei_check_exception_token == RTAS_UNKNOWN_SERVICE) {
pr_warning("IO Event IRQ not supported on this system !\n");
if (ioei_check_exception_token == RTAS_UNKNOWN_SERVICE)
return -ENODEV;
}
np = of_find_node_by_path("/event-sources/ibm,io-events");
if (np) {
request_event_sources_irqs(np, ioei_interrupt, "IO_EVENT");
pr_info("IBM I/O event interrupts enabled\n");
of_node_put(np);
} else {
pr_err("io_event_irq: No ibm,io-events on system! "
"IO Event interrupt disabled.\n");
return -ENODEV;
}
return 0;

View File

@ -25,20 +25,30 @@ static void pseries_kexec_cpu_down(int crash_shutdown, int secondary)
{
/* Don't risk a hypervisor call if we're crashing */
if (firmware_has_feature(FW_FEATURE_SPLPAR) && !crash_shutdown) {
unsigned long addr;
int ret;
int cpu = smp_processor_id();
int hwcpu = hard_smp_processor_id();
addr = __pa(get_slb_shadow());
if (unregister_slb_shadow(hard_smp_processor_id(), addr))
printk("SLB shadow buffer deregistration of "
"cpu %u (hw_cpu_id %d) failed\n",
smp_processor_id(),
hard_smp_processor_id());
if (get_lppaca()->dtl_enable_mask) {
ret = unregister_dtl(hwcpu);
if (ret) {
pr_err("WARNING: DTL deregistration for cpu "
"%d (hw %d) failed with %d\n",
cpu, hwcpu, ret);
}
}
addr = __pa(get_lppaca());
if (unregister_vpa(hard_smp_processor_id(), addr)) {
printk("VPA deregistration of cpu %u (hw_cpu_id %d) "
"failed\n", smp_processor_id(),
hard_smp_processor_id());
ret = unregister_slb_shadow(hwcpu);
if (ret) {
pr_err("WARNING: SLB shadow buffer deregistration "
"for cpu %d (hw %d) failed with %d\n",
cpu, hwcpu, ret);
}
ret = unregister_vpa(hwcpu);
if (ret) {
pr_err("WARNING: VPA deregistration for cpu %d "
"(hw %d) failed with %d\n", cpu, hwcpu, ret);
}
}
}

View File

@ -67,9 +67,8 @@ void vpa_init(int cpu)
ret = register_vpa(hwcpu, addr);
if (ret) {
printk(KERN_ERR "WARNING: vpa_init: VPA registration for "
"cpu %d (hw %d) of area %lx returns %ld\n",
cpu, hwcpu, addr, ret);
pr_err("WARNING: VPA registration for cpu %d (hw %d) of area "
"%lx failed with %ld\n", cpu, hwcpu, addr, ret);
return;
}
/*
@ -80,10 +79,9 @@ void vpa_init(int cpu)
if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
ret = register_slb_shadow(hwcpu, addr);
if (ret)
printk(KERN_ERR
"WARNING: vpa_init: SLB shadow buffer "
"registration for cpu %d (hw %d) of area %lx "
"returns %ld\n", cpu, hwcpu, addr, ret);
pr_err("WARNING: SLB shadow buffer registration for "
"cpu %d (hw %d) of area %lx failed with %ld\n",
cpu, hwcpu, addr, ret);
}
/*
@ -100,8 +98,9 @@ void vpa_init(int cpu)
dtl->enqueue_to_dispatch_time = DISPATCH_LOG_BYTES;
ret = register_dtl(hwcpu, __pa(dtl));
if (ret)
pr_warn("DTL registration failed for cpu %d (%ld)\n",
cpu, ret);
pr_err("WARNING: DTL registration of cpu %d (hw %d) "
"failed with %ld\n", smp_processor_id(),
hwcpu, ret);
lppaca_of(cpu).dtl_enable_mask = 2;
}
}
@ -204,7 +203,7 @@ static void pSeries_lpar_hptab_clear(void)
unsigned long ptel;
} ptes[4];
long lpar_rc;
int i, j;
unsigned long i, j;
/* Read in batches of 4,
* invalidate only valid entries not in the VRMA

View File

@ -53,9 +53,9 @@ static inline long vpa_call(unsigned long flags, unsigned long cpu,
return plpar_hcall_norets(H_REGISTER_VPA, flags, cpu, vpa);
}
static inline long unregister_vpa(unsigned long cpu, unsigned long vpa)
static inline long unregister_vpa(unsigned long cpu)
{
return vpa_call(0x5, cpu, vpa);
return vpa_call(0x5, cpu, 0);
}
static inline long register_vpa(unsigned long cpu, unsigned long vpa)
@ -63,9 +63,9 @@ static inline long register_vpa(unsigned long cpu, unsigned long vpa)
return vpa_call(0x1, cpu, vpa);
}
static inline long unregister_slb_shadow(unsigned long cpu, unsigned long vpa)
static inline long unregister_slb_shadow(unsigned long cpu)
{
return vpa_call(0x7, cpu, vpa);
return vpa_call(0x7, cpu, 0);
}
static inline long register_slb_shadow(unsigned long cpu, unsigned long vpa)
@ -73,9 +73,9 @@ static inline long register_slb_shadow(unsigned long cpu, unsigned long vpa)
return vpa_call(0x3, cpu, vpa);
}
static inline long unregister_dtl(unsigned long cpu, unsigned long vpa)
static inline long unregister_dtl(unsigned long cpu)
{
return vpa_call(0x6, cpu, vpa);
return vpa_call(0x6, cpu, 0);
}
static inline long register_dtl(unsigned long cpu, unsigned long vpa)

View File

@ -324,8 +324,9 @@ static int alloc_dispatch_logs(void)
dtl->enqueue_to_dispatch_time = DISPATCH_LOG_BYTES;
ret = register_dtl(hard_smp_processor_id(), __pa(dtl));
if (ret)
pr_warn("DTL registration failed for boot cpu %d (%d)\n",
smp_processor_id(), ret);
pr_err("WARNING: DTL registration of cpu %d (hw %d) failed "
"with %d\n", smp_processor_id(),
hard_smp_processor_id(), ret);
get_paca()->lppaca_ptr->dtl_enable_mask = 2;
return 0;

View File

@ -655,8 +655,6 @@ struct ppc4xx_pciex_hwops
static struct ppc4xx_pciex_hwops *ppc4xx_pciex_hwops;
#ifdef CONFIG_44x
static int __init ppc4xx_pciex_wait_on_sdr(struct ppc4xx_pciex_port *port,
unsigned int sdr_offset,
unsigned int mask,
@ -688,6 +686,7 @@ static int __init ppc4xx_pciex_port_reset_sdr(struct ppc4xx_pciex_port *port)
return 0;
}
static void __init ppc4xx_pciex_check_link_sdr(struct ppc4xx_pciex_port *port)
{
printk(KERN_INFO "PCIE%d: Checking link...\n", port->index);
@ -718,6 +717,8 @@ static void __init ppc4xx_pciex_check_link_sdr(struct ppc4xx_pciex_port *port)
printk(KERN_INFO "PCIE%d: No device detected.\n", port->index);
}
#ifdef CONFIG_44x
/* Check various reset bits of the 440SPe PCIe core */
static int __init ppc440spe_pciex_check_reset(struct device_node *np)
{

View File

@ -1256,13 +1256,14 @@ static int __init ds_init(void)
{
unsigned long hv_ret, major, minor;
hv_ret = sun4v_get_version(HV_GRP_REBOOT_DATA, &major, &minor);
if (hv_ret == HV_EOK) {
pr_info("SUN4V: Reboot data supported (maj=%lu,min=%lu).\n",
major, minor);
reboot_data_supported = 1;
if (tlb_type == hypervisor) {
hv_ret = sun4v_get_version(HV_GRP_REBOOT_DATA, &major, &minor);
if (hv_ret == HV_EOK) {
pr_info("SUN4V: Reboot data supported (maj=%lu,min=%lu).\n",
major, minor);
reboot_data_supported = 1;
}
}
kthread_run(ds_thread, NULL, "kldomd");
return vio_register_driver(&ds_driver);

View File

@ -27,8 +27,8 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
desc->base2 = (info->base_addr & 0xff000000) >> 24;
/*
* Don't allow setting of the lm bit. It is useless anyway
* because 64bit system calls require __USER_CS:
* Don't allow setting of the lm bit. It would confuse
* user_64bit_mode and would get overridden by sysret anyway.
*/
desc->l = 0;
}

View File

@ -17,7 +17,6 @@
* Vectors 0 ... 31 : system traps and exceptions - hardcoded events
* Vectors 32 ... 127 : device interrupts
* Vector 128 : legacy int80 syscall interface
* Vector 204 : legacy x86_64 vsyscall emulation
* Vectors 129 ... INVALIDATE_TLB_VECTOR_START-1 except 204 : device interrupts
* Vectors INVALIDATE_TLB_VECTOR_START ... 255 : special interrupts
*
@ -51,9 +50,6 @@
#ifdef CONFIG_X86_32
# define SYSCALL_VECTOR 0x80
#endif
#ifdef CONFIG_X86_64
# define VSYSCALL_EMU_VECTOR 0xcc
#endif
/*
* Vectors 0x30-0x3f are used for ISA interrupts.

View File

@ -41,6 +41,7 @@
#include <asm/desc_defs.h>
#include <asm/kmap_types.h>
#include <asm/pgtable_types.h>
struct page;
struct thread_struct;
@ -63,6 +64,11 @@ struct paravirt_callee_save {
struct pv_info {
unsigned int kernel_rpl;
int shared_kernel_pmd;
#ifdef CONFIG_X86_64
u16 extra_user_64bit_cs; /* __USER_CS if none */
#endif
int paravirt_enabled;
const char *name;
};

View File

@ -131,6 +131,9 @@ struct pt_regs {
#ifdef __KERNEL__
#include <linux/init.h>
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt_types.h>
#endif
struct cpuinfo_x86;
struct task_struct;
@ -187,6 +190,22 @@ static inline int v8086_mode(struct pt_regs *regs)
#endif
}
#ifdef CONFIG_X86_64
static inline bool user_64bit_mode(struct pt_regs *regs)
{
#ifndef CONFIG_PARAVIRT
/*
* On non-paravirt systems, this is the only long mode CPL 3
* selector. We do not allow long mode selectors in the LDT.
*/
return regs->cs == __USER_CS;
#else
/* Headers are too twisted for this to go in paravirt.h. */
return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
#endif
}
#endif
/*
* X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode
* when it traps. The previous stack will be directly underneath the saved

View File

@ -40,7 +40,6 @@ asmlinkage void alignment_check(void);
asmlinkage void machine_check(void);
#endif /* CONFIG_X86_MCE */
asmlinkage void simd_coprocessor_error(void);
asmlinkage void emulate_vsyscall(void);
dotraplinkage void do_divide_error(struct pt_regs *, long);
dotraplinkage void do_debug(struct pt_regs *, long);
@ -67,7 +66,6 @@ dotraplinkage void do_alignment_check(struct pt_regs *, long);
dotraplinkage void do_machine_check(struct pt_regs *, long);
#endif
dotraplinkage void do_simd_coprocessor_error(struct pt_regs *, long);
dotraplinkage void do_emulate_vsyscall(struct pt_regs *, long);
#ifdef CONFIG_X86_32
dotraplinkage void do_iret_error(struct pt_regs *, long);
#endif

View File

@ -681,6 +681,8 @@ __SYSCALL(__NR_syncfs, sys_syncfs)
__SYSCALL(__NR_sendmmsg, sys_sendmmsg)
#define __NR_setns 308
__SYSCALL(__NR_setns, sys_setns)
#define __NR_getcpu 309
__SYSCALL(__NR_getcpu, sys_getcpu)
#ifndef __NO_STUBS
#define __ARCH_WANT_OLD_READDIR

View File

@ -27,6 +27,12 @@ extern struct timezone sys_tz;
extern void map_vsyscall(void);
/*
* Called on instruction fetch fault in vsyscall page.
* Returns true if handled.
*/
extern bool emulate_vsyscall(struct pt_regs *regs, unsigned long address);
#endif /* __KERNEL__ */
#endif /* _ASM_X86_VSYSCALL_H */

View File

@ -17,19 +17,6 @@ CFLAGS_REMOVE_ftrace.o = -pg
CFLAGS_REMOVE_early_printk.o = -pg
endif
#
# vsyscalls (which work on the user stack) should have
# no stack-protector checks:
#
nostackp := $(call cc-option, -fno-stack-protector)
CFLAGS_vsyscall_64.o := $(PROFILING) -g0 $(nostackp)
CFLAGS_hpet.o := $(nostackp)
CFLAGS_paravirt.o := $(nostackp)
GCOV_PROFILE_vsyscall_64.o := n
GCOV_PROFILE_hpet.o := n
GCOV_PROFILE_tsc.o := n
GCOV_PROFILE_paravirt.o := n
obj-y := process_$(BITS).o signal.o entry_$(BITS).o
obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
obj-y += time.o ioport.o ldt.o dumpstack.o

View File

@ -1590,6 +1590,7 @@ static __init int intel_pmu_init(void)
break;
case 42: /* SandyBridge */
case 45: /* SandyBridge, "Romely-EP" */
memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
sizeof(hw_cache_event_ids));

View File

@ -1111,7 +1111,6 @@ zeroentry spurious_interrupt_bug do_spurious_interrupt_bug
zeroentry coprocessor_error do_coprocessor_error
errorentry alignment_check do_alignment_check
zeroentry simd_coprocessor_error do_simd_coprocessor_error
zeroentry emulate_vsyscall do_emulate_vsyscall
/* Reload gs selector with exception handling */

View File

@ -307,6 +307,10 @@ struct pv_info pv_info = {
.paravirt_enabled = 0,
.kernel_rpl = 0,
.shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
#ifdef CONFIG_X86_64
.extra_user_64bit_cs = __USER_CS,
#endif
};
struct pv_init_ops pv_init_ops = {

View File

@ -74,7 +74,7 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
#ifdef CONFIG_X86_64
case 0x40 ... 0x4f:
if (regs->cs != __USER_CS)
if (!user_64bit_mode(regs))
/* 32-bit mode: register increment */
return 0;
/* 64-bit mode: REX prefix */

View File

@ -872,12 +872,6 @@ void __init trap_init(void)
set_bit(SYSCALL_VECTOR, used_vectors);
#endif
#ifdef CONFIG_X86_64
BUG_ON(test_bit(VSYSCALL_EMU_VECTOR, used_vectors));
set_system_intr_gate(VSYSCALL_EMU_VECTOR, &emulate_vsyscall);
set_bit(VSYSCALL_EMU_VECTOR, used_vectors);
#endif
/*
* Should be a barrier for any external CPU state:
*/

View File

@ -71,7 +71,6 @@ PHDRS {
text PT_LOAD FLAGS(5); /* R_E */
data PT_LOAD FLAGS(6); /* RW_ */
#ifdef CONFIG_X86_64
user PT_LOAD FLAGS(5); /* R_E */
#ifdef CONFIG_SMP
percpu PT_LOAD FLAGS(6); /* RW_ */
#endif
@ -154,44 +153,16 @@ SECTIONS
#ifdef CONFIG_X86_64
#define VSYSCALL_ADDR (-10*1024*1024)
#define VLOAD_OFFSET (VSYSCALL_ADDR - __vsyscall_0 + LOAD_OFFSET)
#define VLOAD(x) (ADDR(x) - VLOAD_OFFSET)
#define VVIRT_OFFSET (VSYSCALL_ADDR - __vsyscall_0)
#define VVIRT(x) (ADDR(x) - VVIRT_OFFSET)
. = ALIGN(4096);
__vsyscall_0 = .;
. = VSYSCALL_ADDR;
.vsyscall : AT(VLOAD(.vsyscall)) {
*(.vsyscall_0)
. = 1024;
*(.vsyscall_1)
. = 2048;
*(.vsyscall_2)
. = 4096; /* Pad the whole page. */
} :user =0xcc
. = ALIGN(__vsyscall_0 + PAGE_SIZE, PAGE_SIZE);
#undef VSYSCALL_ADDR
#undef VLOAD_OFFSET
#undef VLOAD
#undef VVIRT_OFFSET
#undef VVIRT
. = ALIGN(PAGE_SIZE);
__vvar_page = .;
.vvar : AT(ADDR(.vvar) - LOAD_OFFSET) {
/* work around gold bug 13023 */
__vvar_beginning_hack = .;
/* Place all vvars at the offsets in asm/vvar.h. */
#define EMIT_VVAR(name, offset) \
. = offset; \
/* Place all vvars at the offsets in asm/vvar.h. */
#define EMIT_VVAR(name, offset) \
. = __vvar_beginning_hack + offset; \
*(.vvar_ ## name)
#define __VVAR_KERNEL_LDS
#include <asm/vvar.h>

View File

@ -18,9 +18,6 @@
* use the vDSO.
*/
/* Disable profiling for userspace code: */
#define DISABLE_BRANCH_PROFILING
#include <linux/time.h>
#include <linux/init.h>
#include <linux/kernel.h>
@ -50,12 +47,36 @@
#include <asm/vgtod.h>
#include <asm/traps.h>
#define CREATE_TRACE_POINTS
#include "vsyscall_trace.h"
DEFINE_VVAR(int, vgetcpu_mode);
DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
{
.lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
};
static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
static int __init vsyscall_setup(char *str)
{
if (str) {
if (!strcmp("emulate", str))
vsyscall_mode = EMULATE;
else if (!strcmp("native", str))
vsyscall_mode = NATIVE;
else if (!strcmp("none", str))
vsyscall_mode = NONE;
else
return -EINVAL;
return 0;
}
return -EINVAL;
}
early_param("vsyscall", vsyscall_setup);
void update_vsyscall_tz(void)
{
unsigned long flags;
@ -100,7 +121,7 @@ static void warn_bad_vsyscall(const char *level, struct pt_regs *regs,
printk("%s%s[%d] %s ip:%lx cs:%lx sp:%lx ax:%lx si:%lx di:%lx\n",
level, tsk->comm, task_pid_nr(tsk),
message, regs->ip - 2, regs->cs,
message, regs->ip, regs->cs,
regs->sp, regs->ax, regs->si, regs->di);
}
@ -118,46 +139,39 @@ static int addr_to_vsyscall_nr(unsigned long addr)
return nr;
}
void dotraplinkage do_emulate_vsyscall(struct pt_regs *regs, long error_code)
bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
{
struct task_struct *tsk;
unsigned long caller;
int vsyscall_nr;
long ret;
local_irq_enable();
/*
* Real 64-bit user mode code has cs == __USER_CS. Anything else
* is bogus.
* No point in checking CS -- the only way to get here is a user mode
* trap to a high address, which means that we're in 64-bit user code.
*/
if (regs->cs != __USER_CS) {
/*
* If we trapped from kernel mode, we might as well OOPS now
* instead of returning to some random address and OOPSing
* then.
*/
BUG_ON(!user_mode(regs));
/* Compat mode and non-compat 32-bit CS should both segfault. */
warn_bad_vsyscall(KERN_WARNING, regs,
"illegal int 0xcc from 32-bit mode");
goto sigsegv;
WARN_ON_ONCE(address != regs->ip);
if (vsyscall_mode == NONE) {
warn_bad_vsyscall(KERN_INFO, regs,
"vsyscall attempted with vsyscall=none");
return false;
}
/*
* x86-ism here: regs->ip points to the instruction after the int 0xcc,
* and int 0xcc is two bytes long.
*/
vsyscall_nr = addr_to_vsyscall_nr(regs->ip - 2);
vsyscall_nr = addr_to_vsyscall_nr(address);
trace_emulate_vsyscall(vsyscall_nr);
if (vsyscall_nr < 0) {
warn_bad_vsyscall(KERN_WARNING, regs,
"illegal int 0xcc (exploit attempt?)");
"misaligned vsyscall (exploit attempt or buggy program) -- look up the vsyscall kernel parameter if you need a workaround");
goto sigsegv;
}
if (get_user(caller, (unsigned long __user *)regs->sp) != 0) {
warn_bad_vsyscall(KERN_WARNING, regs, "int 0xcc with bad stack (exploit attempt?)");
warn_bad_vsyscall(KERN_WARNING, regs,
"vsyscall with bad stack (exploit attempt?)");
goto sigsegv;
}
@ -202,13 +216,11 @@ void dotraplinkage do_emulate_vsyscall(struct pt_regs *regs, long error_code)
regs->ip = caller;
regs->sp += 8;
local_irq_disable();
return;
return true;
sigsegv:
regs->ip -= 2; /* The faulting instruction should be the int 0xcc. */
force_sig(SIGSEGV, current);
local_irq_disable();
return true;
}
/*
@ -256,15 +268,21 @@ cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg)
void __init map_vsyscall(void)
{
extern char __vsyscall_0;
unsigned long physaddr_page0 = __pa_symbol(&__vsyscall_0);
extern char __vsyscall_page;
unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);
extern char __vvar_page;
unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
/* Note that VSYSCALL_MAPPED_PAGES must agree with the code below. */
__set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL);
__set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
vsyscall_mode == NATIVE
? PAGE_KERNEL_VSYSCALL
: PAGE_KERNEL_VVAR);
BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
(unsigned long)VSYSCALL_START);
__set_fixmap(VVAR_PAGE, physaddr_vvar_page, PAGE_KERNEL_VVAR);
BUILD_BUG_ON((unsigned long)__fix_to_virt(VVAR_PAGE) != (unsigned long)VVAR_ADDRESS);
BUILD_BUG_ON((unsigned long)__fix_to_virt(VVAR_PAGE) !=
(unsigned long)VVAR_ADDRESS);
}
static int __init vsyscall_init(void)

View File

@ -7,21 +7,31 @@
*/
#include <linux/linkage.h>
#include <asm/irq_vectors.h>
#include <asm/page_types.h>
#include <asm/unistd_64.h>
/* The unused parts of the page are filled with 0xcc by the linker script. */
__PAGE_ALIGNED_DATA
.globl __vsyscall_page
.balign PAGE_SIZE, 0xcc
.type __vsyscall_page, @object
__vsyscall_page:
.section .vsyscall_0, "a"
ENTRY(vsyscall_0)
int $VSYSCALL_EMU_VECTOR
END(vsyscall_0)
mov $__NR_gettimeofday, %rax
syscall
ret
.section .vsyscall_1, "a"
ENTRY(vsyscall_1)
int $VSYSCALL_EMU_VECTOR
END(vsyscall_1)
.balign 1024, 0xcc
mov $__NR_time, %rax
syscall
ret
.section .vsyscall_2, "a"
ENTRY(vsyscall_2)
int $VSYSCALL_EMU_VECTOR
END(vsyscall_2)
.balign 1024, 0xcc
mov $__NR_getcpu, %rax
syscall
ret
.balign 4096, 0xcc
.size __vsyscall_page, 4096

View File

@ -0,0 +1,29 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM vsyscall
#if !defined(__VSYSCALL_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
#define __VSYSCALL_TRACE_H
#include <linux/tracepoint.h>
TRACE_EVENT(emulate_vsyscall,
TP_PROTO(int nr),
TP_ARGS(nr),
TP_STRUCT__entry(__field(int, nr)),
TP_fast_assign(
__entry->nr = nr;
),
TP_printk("nr = %d", __entry->nr)
);
#endif
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH ../../arch/x86/kernel
#define TRACE_INCLUDE_FILE vsyscall_trace
#include <trace/define_trace.h>

View File

@ -105,7 +105,7 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
* but for now it's good enough to assume that long
* mode only uses well known segments or kernel.
*/
return (!user_mode(regs)) || (regs->cs == __USER_CS);
return (!user_mode(regs) || user_64bit_mode(regs));
#endif
case 0x60:
/* 0x64 thru 0x67 are valid prefixes in all modes. */
@ -720,6 +720,18 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
if (is_errata100(regs, address))
return;
#ifdef CONFIG_X86_64
/*
* Instruction fetch faults in the vsyscall page might need
* emulation.
*/
if (unlikely((error_code & PF_INSTR) &&
((address & ~0xfff) == VSYSCALL_START))) {
if (emulate_vsyscall(regs, address))
return;
}
#endif
if (unlikely(show_unhandled_signals))
show_signal_msg(regs, error_code, address, tsk);

View File

@ -9,6 +9,7 @@ __PAGE_ALIGNED_DATA
vdso_start:
.incbin "arch/x86/vdso/vdso.so"
vdso_end:
.align PAGE_SIZE /* extra data here leaks to userspace. */
.previous

View File

@ -951,6 +951,10 @@ static const struct pv_info xen_info __initconst = {
.paravirt_enabled = 1,
.shared_kernel_pmd = 0,
#ifdef CONFIG_X86_64
.extra_user_64bit_cs = FLAT_USER_CS64,
#endif
.name = "Xen",
};

View File

@ -1916,6 +1916,7 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
# endif
#else
case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE:
case VVAR_PAGE:
#endif
case FIX_TEXT_POKE0:
case FIX_TEXT_POKE1:
@ -1956,7 +1957,8 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
#ifdef CONFIG_X86_64
/* Replicate changes to map the vsyscall page into the user
pagetable vsyscall mapping. */
if (idx >= VSYSCALL_LAST_PAGE && idx <= VSYSCALL_FIRST_PAGE) {
if ((idx >= VSYSCALL_LAST_PAGE && idx <= VSYSCALL_FIRST_PAGE) ||
idx == VVAR_PAGE) {
unsigned long vaddr = __fix_to_virt(idx);
set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
}

View File

@ -80,6 +80,7 @@
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/dmaengine.h>
#include <linux/amba/bus.h>

View File

@ -3419,9 +3419,27 @@ static int bond_xmit_hash_policy_l2(struct sk_buff *skb, int count)
static int bond_open(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave;
int i;
bond->kill_timers = 0;
/* reset slave->backup and slave->inactive */
read_lock(&bond->lock);
if (bond->slave_cnt > 0) {
read_lock(&bond->curr_slave_lock);
bond_for_each_slave(bond, slave, i) {
if ((bond->params.mode == BOND_MODE_ACTIVEBACKUP)
&& (slave != bond->curr_active_slave)) {
bond_set_slave_inactive_flags(slave);
} else {
bond_set_slave_active_flags(slave);
}
}
read_unlock(&bond->curr_slave_lock);
}
read_unlock(&bond->lock);
INIT_DELAYED_WORK(&bond->mcast_work, bond_resend_igmp_join_requests_delayed);
if (bond_is_lb(bond)) {

View File

@ -408,7 +408,7 @@ static void plx_pci_del_card(struct pci_dev *pdev)
struct sja1000_priv *priv;
int i = 0;
for (i = 0; i < card->channels; i++) {
for (i = 0; i < PLX_PCI_MAX_CHAN; i++) {
dev = card->net_dev[i];
if (!dev)
continue;
@ -536,7 +536,6 @@ static int __devinit plx_pci_add_card(struct pci_dev *pdev,
if (err) {
dev_err(&pdev->dev, "Registering device failed "
"(err=%d)\n", err);
free_sja1000dev(dev);
goto failure_cleanup;
}
@ -549,6 +548,7 @@ static int __devinit plx_pci_add_card(struct pci_dev *pdev,
dev_err(&pdev->dev, "Channel #%d not detected\n",
i + 1);
free_sja1000dev(dev);
card->net_dev[i] = NULL;
}
}

View File

@ -197,7 +197,7 @@ static void slc_bump(struct slcan *sl)
skb->ip_summed = CHECKSUM_UNNECESSARY;
memcpy(skb_put(skb, sizeof(struct can_frame)),
&cf, sizeof(struct can_frame));
netif_rx(skb);
netif_rx_ni(skb);
sl->dev->stats.rx_packets++;
sl->dev->stats.rx_bytes += cf.can_dlc;

View File

@ -82,7 +82,7 @@ static int cards_found;
/*
* VLB I/O addresses
*/
static unsigned int pcnet32_portlist[] __initdata =
static unsigned int pcnet32_portlist[] =
{ 0x300, 0x320, 0x340, 0x360, 0 };
static int pcnet32_debug;

View File

@ -65,8 +65,9 @@ static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
fp->disable_tpa = ((bp->flags & TPA_ENABLE_FLAG) == 0);
#ifdef BCM_CNIC
/* We don't want TPA on FCoE, FWD and OOO L2 rings */
bnx2x_fcoe(bp, disable_tpa) = 1;
/* We don't want TPA on an FCoE L2 ring */
if (IS_FCOE_FP(fp))
fp->disable_tpa = 1;
#endif
}
@ -1410,10 +1411,9 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
{
struct bnx2x *bp = netdev_priv(dev);
#ifdef BCM_CNIC
if (NO_FCOE(bp))
return skb_tx_hash(dev, skb);
else {
if (!NO_FCOE(bp)) {
struct ethhdr *hdr = (struct ethhdr *)skb->data;
u16 ether_type = ntohs(hdr->h_proto);
@ -1430,8 +1430,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
return bnx2x_fcoe_tx(bp, txq_index);
}
#endif
/* Select a none-FCoE queue: if FCoE is enabled, exclude FCoE L2 ring
*/
/* select a non-FCoE queue */
return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
}
@ -1454,6 +1453,28 @@ void bnx2x_set_num_queues(struct bnx2x *bp)
bp->num_queues += NON_ETH_CONTEXT_USE;
}
/**
* bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
*
* @bp: Driver handle
*
* We currently support for at most 16 Tx queues for each CoS thus we will
* allocate a multiple of 16 for ETH L2 rings according to the value of the
* bp->max_cos.
*
* If there is an FCoE L2 queue the appropriate Tx queue will have the next
* index after all ETH L2 indices.
*
* If the actual number of Tx queues (for each CoS) is less than 16 then there
* will be the holes at the end of each group of 16 ETh L2 indices (0..15,
* 16..31,...) with indicies that are not coupled with any real Tx queue.
*
* The proper configuration of skb->queue_mapping is handled by
* bnx2x_select_queue() and __skb_tx_hash().
*
* bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
* will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
*/
static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
{
int rc, tx, rx;

View File

@ -923,7 +923,7 @@ static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp,
void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled)
{
if (!CHIP_IS_E1x(bp)) {
if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3(bp)) {
bp->dcb_state = dcb_on;
bp->dcbx_enabled = dcbx_enabled;
} else {

View File

@ -5794,6 +5794,12 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp));
/*
* take the UNDI lock to protect undi_unload flow from accessing
* registers while we're resetting the chip
*/
bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
bnx2x_reset_common(bp);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
@ -5804,6 +5810,8 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
}
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val);
bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON);
if (!CHIP_IS_E1x(bp)) {
@ -10245,10 +10253,17 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
/* clean indirect addresses */
pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
PCICFG_VENDOR_ID_OFFSET);
REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
/* Clean the following indirect addresses for all functions since it
* is not used by the driver.
*/
REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0);
REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0);
REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
/*
* Enable internal target-read (in case we are probed after PF FLR).

View File

@ -3007,11 +3007,27 @@
/* [R 6] Debug only: Number of used entries in the data FIFO */
#define PXP2_REG_HST_DATA_FIFO_STATUS 0x12047c
/* [R 7] Debug only: Number of used entries in the header FIFO */
#define PXP2_REG_HST_HEADER_FIFO_STATUS 0x120478
#define PXP2_REG_PGL_ADDR_88_F0 0x120534
#define PXP2_REG_PGL_ADDR_8C_F0 0x120538
#define PXP2_REG_PGL_ADDR_90_F0 0x12053c
#define PXP2_REG_PGL_ADDR_94_F0 0x120540
#define PXP2_REG_HST_HEADER_FIFO_STATUS 0x120478
#define PXP2_REG_PGL_ADDR_88_F0 0x120534
/* [R 32] GRC address for configuration access to PCIE config address 0x88.
* any write to this PCIE address will cause a GRC write access to the
* address that's in t this register */
#define PXP2_REG_PGL_ADDR_88_F1 0x120544
#define PXP2_REG_PGL_ADDR_8C_F0 0x120538
/* [R 32] GRC address for configuration access to PCIE config address 0x8c.
* any write to this PCIE address will cause a GRC write access to the
* address that's in t this register */
#define PXP2_REG_PGL_ADDR_8C_F1 0x120548
#define PXP2_REG_PGL_ADDR_90_F0 0x12053c
/* [R 32] GRC address for configuration access to PCIE config address 0x90.
* any write to this PCIE address will cause a GRC write access to the
* address that's in t this register */
#define PXP2_REG_PGL_ADDR_90_F1 0x12054c
#define PXP2_REG_PGL_ADDR_94_F0 0x120540
/* [R 32] GRC address for configuration access to PCIE config address 0x94.
* any write to this PCIE address will cause a GRC write access to the
* address that's in t this register */
#define PXP2_REG_PGL_ADDR_94_F1 0x120550
#define PXP2_REG_PGL_CONTROL0 0x120490
#define PXP2_REG_PGL_CONTROL1 0x120514
#define PXP2_REG_PGL_DEBUG 0x120520

View File

@ -2710,8 +2710,13 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
/* Tell the skb what kind of packet this is */
skb->protocol = eth_type_trans(skb, dev);
/* Set vlan tag */
if (fcb->flags & RXFCB_VLN)
/*
* There's need to check for NETIF_F_HW_VLAN_RX here.
* Even if vlan rx accel is disabled, on some chips
* RXFCB_VLN is pseudo randomly set.
*/
if (dev->features & NETIF_F_HW_VLAN_RX &&
fcb->flags & RXFCB_VLN)
__vlan_hwaccel_put_tag(skb, fcb->vlctl);
/* Send the packet up the stack */

View File

@ -686,10 +686,21 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
{
unsigned int last_rule_idx = priv->cur_filer_idx;
unsigned int cmp_rqfpr;
unsigned int local_rqfpr[MAX_FILER_IDX + 1];
unsigned int local_rqfcr[MAX_FILER_IDX + 1];
unsigned int *local_rqfpr;
unsigned int *local_rqfcr;
int i = 0x0, k = 0x0;
int j = MAX_FILER_IDX, l = 0x0;
int ret = 1;
local_rqfpr = kmalloc(sizeof(unsigned int) * (MAX_FILER_IDX + 1),
GFP_KERNEL);
local_rqfcr = kmalloc(sizeof(unsigned int) * (MAX_FILER_IDX + 1),
GFP_KERNEL);
if (!local_rqfpr || !local_rqfcr) {
pr_err("Out of memory\n");
ret = 0;
goto err;
}
switch (class) {
case TCP_V4_FLOW:
@ -706,7 +717,8 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
break;
default:
pr_err("Right now this class is not supported\n");
return 0;
ret = 0;
goto err;
}
for (i = 0; i < MAX_FILER_IDX + 1; i++) {
@ -721,7 +733,8 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
if (i == MAX_FILER_IDX + 1) {
pr_err("No parse rule found, can't create hash rules\n");
return 0;
ret = 0;
goto err;
}
/* If a match was found, then it begins the starting of a cluster rule
@ -765,7 +778,10 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
priv->cur_filer_idx = priv->cur_filer_idx - 1;
}
return 1;
err:
kfree(local_rqfcr);
kfree(local_rqfpr);
return ret;
}
static int gfar_set_hash_opts(struct gfar_private *priv, struct ethtool_rxnfc *cmd)

View File

@ -2085,7 +2085,8 @@ struct e1000_info e1000_82574_info = {
| FLAG_HAS_AMT
| FLAG_HAS_CTRLEXT_ON_LOAD,
.flags2 = FLAG2_CHECK_PHY_HANG
| FLAG2_DISABLE_ASPM_L0S,
| FLAG2_DISABLE_ASPM_L0S
| FLAG2_NO_DISABLE_RX,
.pba = 32,
.max_hw_frame_size = DEFAULT_JUMBO,
.get_variants = e1000_get_variants_82571,
@ -2104,7 +2105,8 @@ struct e1000_info e1000_82583_info = {
| FLAG_HAS_AMT
| FLAG_HAS_JUMBO_FRAMES
| FLAG_HAS_CTRLEXT_ON_LOAD,
.flags2 = FLAG2_DISABLE_ASPM_L0S,
.flags2 = FLAG2_DISABLE_ASPM_L0S
| FLAG2_NO_DISABLE_RX,
.pba = 32,
.max_hw_frame_size = DEFAULT_JUMBO,
.get_variants = e1000_get_variants_82571,

View File

@ -155,6 +155,9 @@ struct e1000_info;
#define HV_M_STATUS_SPEED_1000 0x0200
#define HV_M_STATUS_LINK_UP 0x0040
#define E1000_ICH_FWSM_PCIM2PCI 0x01000000 /* ME PCIm-to-PCI active */
#define E1000_ICH_FWSM_PCIM2PCI_COUNT 2000
/* Time to wait before putting the device into D3 if there's no link (in ms). */
#define LINK_TIMEOUT 100
@ -453,6 +456,8 @@ struct e1000_info {
#define FLAG2_DISABLE_ASPM_L0S (1 << 7)
#define FLAG2_DISABLE_AIM (1 << 8)
#define FLAG2_CHECK_PHY_HANG (1 << 9)
#define FLAG2_NO_DISABLE_RX (1 << 10)
#define FLAG2_PCIM2PCI_ARBITER_WA (1 << 11)
#define E1000_RX_DESC_PS(R, i) \
(&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))

View File

@ -1206,7 +1206,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
rx_ring->next_to_clean = 0;
rctl = er32(RCTL);
ew32(RCTL, rctl & ~E1000_RCTL_EN);
if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
ew32(RCTL, rctl & ~E1000_RCTL_EN);
ew32(RDBAL, ((u64) rx_ring->dma & 0xFFFFFFFF));
ew32(RDBAH, ((u64) rx_ring->dma >> 32));
ew32(RDLEN, rx_ring->size);

View File

@ -137,8 +137,9 @@
#define HV_PM_CTRL PHY_REG(770, 17)
/* PHY Low Power Idle Control */
#define I82579_LPI_CTRL PHY_REG(772, 20)
#define I82579_LPI_CTRL_ENABLE_MASK 0x6000
#define I82579_LPI_CTRL PHY_REG(772, 20)
#define I82579_LPI_CTRL_ENABLE_MASK 0x6000
#define I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT 0x80
/* EMI Registers */
#define I82579_EMI_ADDR 0x10
@ -163,6 +164,11 @@
#define HV_KMRN_MODE_CTRL PHY_REG(769, 16)
#define HV_KMRN_MDIO_SLOW 0x0400
/* KMRN FIFO Control and Status */
#define HV_KMRN_FIFO_CTRLSTA PHY_REG(770, 16)
#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK 0x7000
#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT 12
/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
/* Offset 04h HSFSTS */
union ich8_hws_flash_status {
@ -657,6 +663,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
struct e1000_mac_info *mac = &hw->mac;
s32 ret_val;
bool link;
u16 phy_reg;
/*
* We only want to go out to the PHY registers to see if Auto-Neg
@ -689,16 +696,35 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
mac->get_link_status = false;
if (hw->phy.type == e1000_phy_82578) {
ret_val = e1000_link_stall_workaround_hv(hw);
if (ret_val)
goto out;
}
if (hw->mac.type == e1000_pch2lan) {
switch (hw->mac.type) {
case e1000_pch2lan:
ret_val = e1000_k1_workaround_lv(hw);
if (ret_val)
goto out;
/* fall-thru */
case e1000_pchlan:
if (hw->phy.type == e1000_phy_82578) {
ret_val = e1000_link_stall_workaround_hv(hw);
if (ret_val)
goto out;
}
/*
* Workaround for PCHx parts in half-duplex:
* Set the number of preambles removed from the packet
* when it is passed from the PHY to the MAC to prevent
* the MAC from misinterpreting the packet type.
*/
e1e_rphy(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
if ((er32(STATUS) & E1000_STATUS_FD) != E1000_STATUS_FD)
phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
break;
default:
break;
}
/*
@ -788,6 +814,11 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
(adapter->hw.phy.type == e1000_phy_igp_3))
adapter->flags |= FLAG_LSC_GIG_SPEED_DROP;
/* Enable workaround for 82579 w/ ME enabled */
if ((adapter->hw.mac.type == e1000_pch2lan) &&
(er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
adapter->flags2 |= FLAG2_PCIM2PCI_ARBITER_WA;
/* Disable EEE by default until IEEE802.3az spec is finalized */
if (adapter->flags2 & FLAG2_HAS_EEE)
adapter->hw.dev_spec.ich8lan.eee_disable = true;
@ -1355,7 +1386,7 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
return ret_val;
/* Preamble tuning for SSC */
ret_val = e1e_wphy(hw, PHY_REG(770, 16), 0xA204);
ret_val = e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, 0xA204);
if (ret_val)
return ret_val;
}
@ -1645,6 +1676,7 @@ static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
s32 ret_val = 0;
u16 status_reg = 0;
u32 mac_reg;
u16 phy_reg;
if (hw->mac.type != e1000_pch2lan)
goto out;
@ -1659,12 +1691,19 @@ static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
mac_reg = er32(FEXTNVM4);
mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
if (status_reg & HV_M_STATUS_SPEED_1000)
mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
else
mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg);
if (ret_val)
goto out;
if (status_reg & HV_M_STATUS_SPEED_1000) {
mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
phy_reg &= ~I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
} else {
mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
phy_reg |= I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
}
ew32(FEXTNVM4, mac_reg);
ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg);
}
out:

View File

@ -190,7 +190,8 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
/* Check for LOM (vs. NIC) or one of two valid mezzanine cards */
if (!((nvm_data & NVM_COMPAT_LOM) ||
(hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_DUAL) ||
(hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD)))
(hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD) ||
(hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES)))
goto out;
ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
@ -200,10 +201,10 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
goto out;
}
if (nvm_alt_mac_addr_offset == 0xFFFF) {
if ((nvm_alt_mac_addr_offset == 0xFFFF) ||
(nvm_alt_mac_addr_offset == 0x0000))
/* There is no Alternate MAC Address */
goto out;
}
if (hw->bus.func == E1000_FUNC_1)
nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;

View File

@ -56,7 +56,7 @@
#define DRV_EXTRAVERSION "-k"
#define DRV_VERSION "1.3.16" DRV_EXTRAVERSION
#define DRV_VERSION "1.4.4" DRV_EXTRAVERSION
char e1000e_driver_name[] = "e1000e";
const char e1000e_driver_version[] = DRV_VERSION;
@ -518,6 +518,63 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
adapter->hw_csum_good++;
}
/**
* e1000e_update_tail_wa - helper function for e1000e_update_[rt]dt_wa()
* @hw: pointer to the HW structure
* @tail: address of tail descriptor register
* @i: value to write to tail descriptor register
*
* When updating the tail register, the ME could be accessing Host CSR
* registers at the same time. Normally, this is handled in h/w by an
* arbiter but on some parts there is a bug that acknowledges Host accesses
* later than it should which could result in the descriptor register to
* have an incorrect value. Workaround this by checking the FWSM register
* which has bit 24 set while ME is accessing Host CSR registers, wait
* if it is set and try again a number of times.
**/
static inline s32 e1000e_update_tail_wa(struct e1000_hw *hw, u8 __iomem * tail,
unsigned int i)
{
unsigned int j = 0;
while ((j++ < E1000_ICH_FWSM_PCIM2PCI_COUNT) &&
(er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI))
udelay(50);
writel(i, tail);
if ((j == E1000_ICH_FWSM_PCIM2PCI_COUNT) && (i != readl(tail)))
return E1000_ERR_SWFW_SYNC;
return 0;
}
static void e1000e_update_rdt_wa(struct e1000_adapter *adapter, unsigned int i)
{
u8 __iomem *tail = (adapter->hw.hw_addr + adapter->rx_ring->tail);
struct e1000_hw *hw = &adapter->hw;
if (e1000e_update_tail_wa(hw, tail, i)) {
u32 rctl = er32(RCTL);
ew32(RCTL, rctl & ~E1000_RCTL_EN);
e_err("ME firmware caused invalid RDT - resetting\n");
schedule_work(&adapter->reset_task);
}
}
static void e1000e_update_tdt_wa(struct e1000_adapter *adapter, unsigned int i)
{
u8 __iomem *tail = (adapter->hw.hw_addr + adapter->tx_ring->tail);
struct e1000_hw *hw = &adapter->hw;
if (e1000e_update_tail_wa(hw, tail, i)) {
u32 tctl = er32(TCTL);
ew32(TCTL, tctl & ~E1000_TCTL_EN);
e_err("ME firmware caused invalid TDT - resetting\n");
schedule_work(&adapter->reset_task);
}
}
/**
* e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
* @adapter: address of board private structure
@ -573,7 +630,10 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
* such as IA-64).
*/
wmb();
writel(i, adapter->hw.hw_addr + rx_ring->tail);
if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
e1000e_update_rdt_wa(adapter, i);
else
writel(i, adapter->hw.hw_addr + rx_ring->tail);
}
i++;
if (i == rx_ring->count)
@ -673,7 +733,11 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
* such as IA-64).
*/
wmb();
writel(i << 1, adapter->hw.hw_addr + rx_ring->tail);
if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
e1000e_update_rdt_wa(adapter, i << 1);
else
writel(i << 1,
adapter->hw.hw_addr + rx_ring->tail);
}
i++;
@ -756,7 +820,10 @@ static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
* applicable for weak-ordered memory model archs,
* such as IA-64). */
wmb();
writel(i, adapter->hw.hw_addr + rx_ring->tail);
if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
e1000e_update_rdt_wa(adapter, i);
else
writel(i, adapter->hw.hw_addr + rx_ring->tail);
}
}
@ -2915,7 +2982,8 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
/* disable receives while setting up the descriptors */
rctl = er32(RCTL);
ew32(RCTL, rctl & ~E1000_RCTL_EN);
if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
ew32(RCTL, rctl & ~E1000_RCTL_EN);
e1e_flush();
usleep_range(10000, 20000);
@ -3394,7 +3462,8 @@ void e1000e_down(struct e1000_adapter *adapter)
/* disable receives in the hardware */
rctl = er32(RCTL);
ew32(RCTL, rctl & ~E1000_RCTL_EN);
if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
ew32(RCTL, rctl & ~E1000_RCTL_EN);
/* flush and sleep below */
netif_stop_queue(netdev);
@ -3403,6 +3472,7 @@ void e1000e_down(struct e1000_adapter *adapter)
tctl = er32(TCTL);
tctl &= ~E1000_TCTL_EN;
ew32(TCTL, tctl);
/* flush both disables and wait for them to finish */
e1e_flush();
usleep_range(10000, 20000);
@ -4686,7 +4756,12 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
wmb();
tx_ring->next_to_use = i;
writel(i, adapter->hw.hw_addr + tx_ring->tail);
if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
e1000e_update_tdt_wa(adapter, i);
else
writel(i, adapter->hw.hw_addr + tx_ring->tail);
/*
* we need this if more than one processor can write to our tail
* at a time, it synchronizes IO on IA64/Altix systems

Some files were not shown because too many files have changed in this diff Show More