mirror of https://gitee.com/openkylin/linux.git
Linux 3.5-rc7
-----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.18 (GNU/Linux) iQEcBAABAgAGBQJQAfWKAAoJEHm+PkMAQRiG/DwIAIullMhkDhD/GJcn24ZbUJoa v6zRPK2hIavuKH/6bUoUiXT346PUYgVnRMhetuYKJFURz6KX/nmlxve/iXNn/WP1 9hnxhE+zcnp2qKI83c3Yok09eed1KnGY5hWQkqXM2gzji/OU0pCKchOcL01l//uz iiWpNAXEVUnT92CafnHlZ55f/MWVqRFmDKi3Ty1YKSskhojQ6NOPsWCxrTxKVbim 2YPXc3D+xLHzF12ufVgla20AF4KnK8m+tFugniRAqArIagpzBUP1x1wk0RN5PyBD FTP8lv7bSfBusp41/mPDB66WAe9EfQBoWQY6jloJjp0i8xnMyH5V05pImBV5NwU= =O+gl -----END PGP SIGNATURE----- Merge tag 'v3.5-rc7' into regulator-drivers Linux 3.5-rc7
This commit is contained in:
commit
d5b2e30bdc
|
@ -1,26 +1,5 @@
|
|||
What: /sys/block/rssd*/registers
|
||||
Date: March 2012
|
||||
KernelVersion: 3.3
|
||||
Contact: Asai Thambi S P <asamymuthupa@micron.com>
|
||||
Description: This is a read-only file. Dumps below driver information and
|
||||
hardware registers.
|
||||
- S ACTive
|
||||
- Command Issue
|
||||
- Completed
|
||||
- PORT IRQ STAT
|
||||
- HOST IRQ STAT
|
||||
- Allocated
|
||||
- Commands in Q
|
||||
|
||||
What: /sys/block/rssd*/status
|
||||
Date: April 2012
|
||||
KernelVersion: 3.4
|
||||
Contact: Asai Thambi S P <asamymuthupa@micron.com>
|
||||
Description: This is a read-only file. Indicates the status of the device.
|
||||
|
||||
What: /sys/block/rssd*/flags
|
||||
Date: May 2012
|
||||
KernelVersion: 3.5
|
||||
Contact: Asai Thambi S P <asamymuthupa@micron.com>
|
||||
Description: This is a read-only file. Dumps the flags in port and driver
|
||||
data structure
|
||||
|
|
|
@ -142,13 +142,14 @@ KernelVersion: 3.4
|
|||
Contact: linux-mtd@lists.infradead.org
|
||||
Description:
|
||||
This allows the user to examine and adjust the criteria by which
|
||||
mtd returns -EUCLEAN from mtd_read(). If the maximum number of
|
||||
bit errors that were corrected on any single region comprising
|
||||
an ecc step (as reported by the driver) equals or exceeds this
|
||||
value, -EUCLEAN is returned. Otherwise, absent an error, 0 is
|
||||
returned. Higher layers (e.g., UBI) use this return code as an
|
||||
indication that an erase block may be degrading and should be
|
||||
scrutinized as a candidate for being marked as bad.
|
||||
mtd returns -EUCLEAN from mtd_read() and mtd_read_oob(). If the
|
||||
maximum number of bit errors that were corrected on any single
|
||||
region comprising an ecc step (as reported by the driver) equals
|
||||
or exceeds this value, -EUCLEAN is returned. Otherwise, absent
|
||||
an error, 0 is returned. Higher layers (e.g., UBI) use this
|
||||
return code as an indication that an erase block may be
|
||||
degrading and should be scrutinized as a candidate for being
|
||||
marked as bad.
|
||||
|
||||
The initial value may be specified by the flash device driver.
|
||||
If not, then the default value is ecc_strength.
|
||||
|
@ -167,7 +168,7 @@ Description:
|
|||
block degradation, but high enough to avoid the consequences of
|
||||
a persistent return value of -EUCLEAN on devices where sticky
|
||||
bitflips occur. Note that if bitflip_threshold exceeds
|
||||
ecc_strength, -EUCLEAN is never returned by mtd_read().
|
||||
ecc_strength, -EUCLEAN is never returned by the read operations.
|
||||
Conversely, if bitflip_threshold is zero, -EUCLEAN is always
|
||||
returned, absent a hard error.
|
||||
|
||||
|
|
|
@ -3988,7 +3988,7 @@ interface and may change in the future.</para>
|
|||
from RGB to Y'CbCr color space.
|
||||
</entry>
|
||||
</row>
|
||||
<row id = "v4l2-jpeg-chroma-subsampling">
|
||||
<row>
|
||||
<entrytbl spanname="descr" cols="2">
|
||||
<tbody valign="top">
|
||||
<row>
|
||||
|
|
|
@ -284,13 +284,6 @@ These controls are described in <xref
|
|||
processing controls. These controls are described in <xref
|
||||
linkend="image-process-controls" />.</entry>
|
||||
</row>
|
||||
<row>
|
||||
<entry><constant>V4L2_CTRL_CLASS_JPEG</constant></entry>
|
||||
<entry>0x9d0000</entry>
|
||||
<entry>The class containing JPEG compression controls.
|
||||
These controls are described in <xref
|
||||
linkend="jpeg-controls" />.</entry>
|
||||
</row>
|
||||
</tbody>
|
||||
</tgroup>
|
||||
</table>
|
||||
|
|
|
@ -7,39 +7,39 @@ This target is read-only.
|
|||
|
||||
Construction Parameters
|
||||
=======================
|
||||
<version> <dev> <hash_dev> <hash_start>
|
||||
<version> <dev> <hash_dev>
|
||||
<data_block_size> <hash_block_size>
|
||||
<num_data_blocks> <hash_start_block>
|
||||
<algorithm> <digest> <salt>
|
||||
|
||||
<version>
|
||||
This is the version number of the on-disk format.
|
||||
This is the type of the on-disk hash format.
|
||||
|
||||
0 is the original format used in the Chromium OS.
|
||||
The salt is appended when hashing, digests are stored continuously and
|
||||
the rest of the block is padded with zeros.
|
||||
The salt is appended when hashing, digests are stored continuously and
|
||||
the rest of the block is padded with zeros.
|
||||
|
||||
1 is the current format that should be used for new devices.
|
||||
The salt is prepended when hashing and each digest is
|
||||
padded with zeros to the power of two.
|
||||
The salt is prepended when hashing and each digest is
|
||||
padded with zeros to the power of two.
|
||||
|
||||
<dev>
|
||||
This is the device containing the data the integrity of which needs to be
|
||||
This is the device containing data, the integrity of which needs to be
|
||||
checked. It may be specified as a path, like /dev/sdaX, or a device number,
|
||||
<major>:<minor>.
|
||||
|
||||
<hash_dev>
|
||||
This is the device that that supplies the hash tree data. It may be
|
||||
This is the device that supplies the hash tree data. It may be
|
||||
specified similarly to the device path and may be the same device. If the
|
||||
same device is used, the hash_start should be outside of the dm-verity
|
||||
configured device size.
|
||||
same device is used, the hash_start should be outside the configured
|
||||
dm-verity device.
|
||||
|
||||
<data_block_size>
|
||||
The block size on a data device. Each block corresponds to one digest on
|
||||
the hash device.
|
||||
The block size on a data device in bytes.
|
||||
Each block corresponds to one digest on the hash device.
|
||||
|
||||
<hash_block_size>
|
||||
The size of a hash block.
|
||||
The size of a hash block in bytes.
|
||||
|
||||
<num_data_blocks>
|
||||
The number of data blocks on the data device. Additional blocks are
|
||||
|
@ -65,7 +65,7 @@ Construction Parameters
|
|||
Theory of operation
|
||||
===================
|
||||
|
||||
dm-verity is meant to be setup as part of a verified boot path. This
|
||||
dm-verity is meant to be set up as part of a verified boot path. This
|
||||
may be anything ranging from a boot using tboot or trustedgrub to just
|
||||
booting from a known-good device (like a USB drive or CD).
|
||||
|
||||
|
@ -73,20 +73,20 @@ When a dm-verity device is configured, it is expected that the caller
|
|||
has been authenticated in some way (cryptographic signatures, etc).
|
||||
After instantiation, all hashes will be verified on-demand during
|
||||
disk access. If they cannot be verified up to the root node of the
|
||||
tree, the root hash, then the I/O will fail. This should identify
|
||||
tree, the root hash, then the I/O will fail. This should detect
|
||||
tampering with any data on the device and the hash data.
|
||||
|
||||
Cryptographic hashes are used to assert the integrity of the device on a
|
||||
per-block basis. This allows for a lightweight hash computation on first read
|
||||
into the page cache. Block hashes are stored linearly-aligned to the nearest
|
||||
block the size of a page.
|
||||
per-block basis. This allows for a lightweight hash computation on first read
|
||||
into the page cache. Block hashes are stored linearly, aligned to the nearest
|
||||
block size.
|
||||
|
||||
Hash Tree
|
||||
---------
|
||||
|
||||
Each node in the tree is a cryptographic hash. If it is a leaf node, the hash
|
||||
is of some block data on disk. If it is an intermediary node, then the hash is
|
||||
of a number of child nodes.
|
||||
of some data block on disk is calculated. If it is an intermediary node,
|
||||
the hash of a number of child nodes is calculated.
|
||||
|
||||
Each entry in the tree is a collection of neighboring nodes that fit in one
|
||||
block. The number is determined based on block_size and the size of the
|
||||
|
@ -110,63 +110,23 @@ alg = sha256, num_blocks = 32768, block_size = 4096
|
|||
On-disk format
|
||||
==============
|
||||
|
||||
Below is the recommended on-disk format. The verity kernel code does not
|
||||
read the on-disk header. It only reads the hash blocks which directly
|
||||
follow the header. It is expected that a user-space tool will verify the
|
||||
integrity of the verity_header and then call dmsetup with the correct
|
||||
parameters. Alternatively, the header can be omitted and the dmsetup
|
||||
parameters can be passed via the kernel command-line in a rooted chain
|
||||
of trust where the command-line is verified.
|
||||
The verity kernel code does not read the verity metadata on-disk header.
|
||||
It only reads the hash blocks which directly follow the header.
|
||||
It is expected that a user-space tool will verify the integrity of the
|
||||
verity header.
|
||||
|
||||
The on-disk format is especially useful in cases where the hash blocks
|
||||
are on a separate partition. The magic number allows easy identification
|
||||
of the partition contents. Alternatively, the hash blocks can be stored
|
||||
in the same partition as the data to be verified. In such a configuration
|
||||
the filesystem on the partition would be sized a little smaller than
|
||||
the full-partition, leaving room for the hash blocks.
|
||||
|
||||
struct superblock {
|
||||
uint8_t signature[8]
|
||||
"verity\0\0";
|
||||
|
||||
uint8_t version;
|
||||
1 - current format
|
||||
|
||||
uint8_t data_block_bits;
|
||||
log2(data block size)
|
||||
|
||||
uint8_t hash_block_bits;
|
||||
log2(hash block size)
|
||||
|
||||
uint8_t pad1[1];
|
||||
zero padding
|
||||
|
||||
uint16_t salt_size;
|
||||
big-endian salt size
|
||||
|
||||
uint8_t pad2[2];
|
||||
zero padding
|
||||
|
||||
uint32_t data_blocks_hi;
|
||||
big-endian high 32 bits of the 64-bit number of data blocks
|
||||
|
||||
uint32_t data_blocks_lo;
|
||||
big-endian low 32 bits of the 64-bit number of data blocks
|
||||
|
||||
uint8_t algorithm[16];
|
||||
cryptographic algorithm
|
||||
|
||||
uint8_t salt[384];
|
||||
salt (the salt size is specified above)
|
||||
|
||||
uint8_t pad3[88];
|
||||
zero padding to 512-byte boundary
|
||||
}
|
||||
Alternatively, the header can be omitted and the dmsetup parameters can
|
||||
be passed via the kernel command-line in a rooted chain of trust where
|
||||
the command-line is verified.
|
||||
|
||||
Directly following the header (and with sector number padded to the next hash
|
||||
block boundary) are the hash blocks which are stored a depth at a time
|
||||
(starting from the root), sorted in order of increasing index.
|
||||
|
||||
The full specification of kernel parameters and on-disk metadata format
|
||||
is available at the cryptsetup project's wiki page
|
||||
http://code.google.com/p/cryptsetup/wiki/DMVerity
|
||||
|
||||
Status
|
||||
======
|
||||
V (for Valid) is returned if every check performed so far was valid.
|
||||
|
@ -174,21 +134,22 @@ If any check failed, C (for Corruption) is returned.
|
|||
|
||||
Example
|
||||
=======
|
||||
|
||||
Setup a device:
|
||||
dmsetup create vroot --table \
|
||||
"0 2097152 "\
|
||||
"verity 1 /dev/sda1 /dev/sda2 4096 4096 2097152 1 "\
|
||||
Set up a device:
|
||||
# dmsetup create vroot --readonly --table \
|
||||
"0 2097152 verity 1 /dev/sda1 /dev/sda2 4096 4096 262144 1 sha256 "\
|
||||
"4392712ba01368efdf14b05c76f9e4df0d53664630b5d48632ed17a137f39076 "\
|
||||
"1234000000000000000000000000000000000000000000000000000000000000"
|
||||
|
||||
A command line tool veritysetup is available to compute or verify
|
||||
the hash tree or activate the kernel driver. This is available from
|
||||
the LVM2 upstream repository and may be supplied as a package called
|
||||
device-mapper-verity-tools:
|
||||
git://sources.redhat.com/git/lvm2
|
||||
http://sourceware.org/git/?p=lvm2.git
|
||||
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/verity?cvsroot=lvm2
|
||||
the hash tree or activate the kernel device. This is available from
|
||||
the cryptsetup upstream repository http://code.google.com/p/cryptsetup/
|
||||
(as a libcryptsetup extension).
|
||||
|
||||
veritysetup -a vroot /dev/sda1 /dev/sda2 \
|
||||
4392712ba01368efdf14b05c76f9e4df0d53664630b5d48632ed17a137f39076
|
||||
Create hash on the device:
|
||||
# veritysetup format /dev/sda1 /dev/sda2
|
||||
...
|
||||
Root hash: 4392712ba01368efdf14b05c76f9e4df0d53664630b5d48632ed17a137f39076
|
||||
|
||||
Activate the device:
|
||||
# veritysetup create vroot /dev/sda1 /dev/sda2 \
|
||||
4392712ba01368efdf14b05c76f9e4df0d53664630b5d48632ed17a137f39076
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
|
||||
Required properties:
|
||||
- compatible : "fsl,mma8450".
|
||||
- reg: the I2C address of MMA8450
|
||||
|
||||
Example:
|
||||
|
||||
|
|
|
@ -46,8 +46,8 @@ Examples:
|
|||
|
||||
ecspi@70010000 { /* ECSPI1 */
|
||||
fsl,spi-num-chipselects = <2>;
|
||||
cs-gpios = <&gpio3 24 0>, /* GPIO4_24 */
|
||||
<&gpio3 25 0>; /* GPIO4_25 */
|
||||
cs-gpios = <&gpio4 24 0>, /* GPIO4_24 */
|
||||
<&gpio4 25 0>; /* GPIO4_25 */
|
||||
status = "okay";
|
||||
|
||||
pmic: mc13892@0 {
|
||||
|
|
|
@ -29,6 +29,6 @@ esdhc@70008000 {
|
|||
compatible = "fsl,imx51-esdhc";
|
||||
reg = <0x70008000 0x4000>;
|
||||
interrupts = <2>;
|
||||
cd-gpios = <&gpio0 6 0>; /* GPIO1_6 */
|
||||
wp-gpios = <&gpio0 5 0>; /* GPIO1_5 */
|
||||
cd-gpios = <&gpio1 6 0>; /* GPIO1_6 */
|
||||
wp-gpios = <&gpio1 5 0>; /* GPIO1_5 */
|
||||
};
|
||||
|
|
|
@ -19,6 +19,6 @@ ethernet@83fec000 {
|
|||
reg = <0x83fec000 0x4000>;
|
||||
interrupts = <87>;
|
||||
phy-mode = "mii";
|
||||
phy-reset-gpios = <&gpio1 14 0>; /* GPIO2_14 */
|
||||
phy-reset-gpios = <&gpio2 14 0>; /* GPIO2_14 */
|
||||
local-mac-address = [00 04 9F 01 1B B9];
|
||||
};
|
||||
|
|
|
@ -17,6 +17,6 @@ ecspi@70010000 {
|
|||
reg = <0x70010000 0x4000>;
|
||||
interrupts = <36>;
|
||||
fsl,spi-num-chipselects = <2>;
|
||||
cs-gpios = <&gpio3 24 0>, /* GPIO4_24 */
|
||||
<&gpio3 25 0>; /* GPIO4_25 */
|
||||
cs-gpios = <&gpio3 24 0>, /* GPIO3_24 */
|
||||
<&gpio3 25 0>; /* GPIO3_25 */
|
||||
};
|
||||
|
|
|
@ -3,6 +3,7 @@ Device tree binding vendor prefix registry. Keep list in alphabetical order.
|
|||
This isn't an exhaustive list, but you should add new prefixes to it before
|
||||
using them to avoid name-space collisions.
|
||||
|
||||
ad Avionic Design GmbH
|
||||
adi Analog Devices, Inc.
|
||||
amcc Applied Micro Circuits Corporation (APM, formally AMCC)
|
||||
apm Applied Micro Circuits Corporation (APM)
|
||||
|
|
|
@ -0,0 +1,57 @@
|
|||
The execve system call can grant a newly-started program privileges that
|
||||
its parent did not have. The most obvious examples are setuid/setgid
|
||||
programs and file capabilities. To prevent the parent program from
|
||||
gaining these privileges as well, the kernel and user code must be
|
||||
careful to prevent the parent from doing anything that could subvert the
|
||||
child. For example:
|
||||
|
||||
- The dynamic loader handles LD_* environment variables differently if
|
||||
a program is setuid.
|
||||
|
||||
- chroot is disallowed to unprivileged processes, since it would allow
|
||||
/etc/passwd to be replaced from the point of view of a process that
|
||||
inherited chroot.
|
||||
|
||||
- The exec code has special handling for ptrace.
|
||||
|
||||
These are all ad-hoc fixes. The no_new_privs bit (since Linux 3.5) is a
|
||||
new, generic mechanism to make it safe for a process to modify its
|
||||
execution environment in a manner that persists across execve. Any task
|
||||
can set no_new_privs. Once the bit is set, it is inherited across fork,
|
||||
clone, and execve and cannot be unset. With no_new_privs set, execve
|
||||
promises not to grant the privilege to do anything that could not have
|
||||
been done without the execve call. For example, the setuid and setgid
|
||||
bits will no longer change the uid or gid; file capabilities will not
|
||||
add to the permitted set, and LSMs will not relax constraints after
|
||||
execve.
|
||||
|
||||
To set no_new_privs, use prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0).
|
||||
|
||||
Be careful, though: LSMs might also not tighten constraints on exec
|
||||
in no_new_privs mode. (This means that setting up a general-purpose
|
||||
service launcher to set no_new_privs before execing daemons may
|
||||
interfere with LSM-based sandboxing.)
|
||||
|
||||
Note that no_new_privs does not prevent privilege changes that do not
|
||||
involve execve. An appropriately privileged task can still call
|
||||
setuid(2) and receive SCM_RIGHTS datagrams.
|
||||
|
||||
There are two main use cases for no_new_privs so far:
|
||||
|
||||
- Filters installed for the seccomp mode 2 sandbox persist across
|
||||
execve and can change the behavior of newly-executed programs.
|
||||
Unprivileged users are therefore only allowed to install such filters
|
||||
if no_new_privs is set.
|
||||
|
||||
- By itself, no_new_privs can be used to reduce the attack surface
|
||||
available to an unprivileged user. If everything running with a
|
||||
given uid has no_new_privs set, then that uid will be unable to
|
||||
escalate its privileges by directly attacking setuid, setgid, and
|
||||
fcap-using binaries; it will need to compromise something without the
|
||||
no_new_privs bit set first.
|
||||
|
||||
In the future, other potentially dangerous kernel features could become
|
||||
available to unprivileged tasks if no_new_privs is set. In principle,
|
||||
several options to unshare(2) and clone(2) would be safe when
|
||||
no_new_privs is set, and no_new_privs + chroot is considerable less
|
||||
dangerous than chroot by itself.
|
|
@ -1930,6 +1930,23 @@ The "pte_enc" field provides a value that can OR'ed into the hash
|
|||
PTE's RPN field (ie, it needs to be shifted left by 12 to OR it
|
||||
into the hash PTE second double word).
|
||||
|
||||
4.75 KVM_IRQFD
|
||||
|
||||
Capability: KVM_CAP_IRQFD
|
||||
Architectures: x86
|
||||
Type: vm ioctl
|
||||
Parameters: struct kvm_irqfd (in)
|
||||
Returns: 0 on success, -1 on error
|
||||
|
||||
Allows setting an eventfd to directly trigger a guest interrupt.
|
||||
kvm_irqfd.fd specifies the file descriptor to use as the eventfd and
|
||||
kvm_irqfd.gsi specifies the irqchip pin toggled by this event. When
|
||||
an event is tiggered on the eventfd, an interrupt is injected into
|
||||
the guest using the specified gsi pin. The irqfd is removed using
|
||||
the KVM_IRQFD_FLAG_DEASSIGN flag, specifying both kvm_irqfd.fd
|
||||
and kvm_irqfd.gsi.
|
||||
|
||||
|
||||
5. The kvm_run structure
|
||||
------------------------
|
||||
|
||||
|
|
|
@ -4654,8 +4654,8 @@ L: netfilter@vger.kernel.org
|
|||
L: coreteam@netfilter.org
|
||||
W: http://www.netfilter.org/
|
||||
W: http://www.iptables.org/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-2.6.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-next-2.6.git
|
||||
T: git git://1984.lsi.us.es/nf
|
||||
T: git git://1984.lsi.us.es/nf-next
|
||||
S: Supported
|
||||
F: include/linux/netfilter*
|
||||
F: include/linux/netfilter/
|
||||
|
@ -4857,6 +4857,7 @@ M: Kevin Hilman <khilman@ti.com>
|
|||
L: linux-omap@vger.kernel.org
|
||||
S: Maintained
|
||||
F: arch/arm/*omap*/*pm*
|
||||
F: drivers/cpufreq/omap-cpufreq.c
|
||||
|
||||
OMAP POWERDOMAIN/CLOCKDOMAIN SOC ADAPTATION LAYER SUPPORT
|
||||
M: Rajendra Nayak <rnayak@ti.com>
|
||||
|
@ -5909,7 +5910,7 @@ M: Ingo Molnar <mingo@redhat.com>
|
|||
M: Peter Zijlstra <peterz@infradead.org>
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git sched/core
|
||||
S: Maintained
|
||||
F: kernel/sched*
|
||||
F: kernel/sched/
|
||||
F: include/linux/sched.h
|
||||
|
||||
SCORE ARCHITECTURE
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
VERSION = 3
|
||||
PATCHLEVEL = 5
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc5
|
||||
EXTRAVERSION = -rc7
|
||||
NAME = Saber-toothed Squirrel
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -176,7 +176,6 @@ CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
|
|||
CONFIG_USB_DEVICEFS=y
|
||||
CONFIG_USB_SUSPEND=y
|
||||
CONFIG_USB_MON=y
|
||||
CONFIG_USB_EHCI_HCD=y
|
||||
CONFIG_USB_WDM=y
|
||||
CONFIG_USB_STORAGE=y
|
||||
CONFIG_USB_LIBUSUAL=y
|
||||
|
|
|
@ -243,7 +243,7 @@ typedef struct {
|
|||
|
||||
#define ATOMIC64_INIT(i) { (i) }
|
||||
|
||||
static inline u64 atomic64_read(atomic64_t *v)
|
||||
static inline u64 atomic64_read(const atomic64_t *v)
|
||||
{
|
||||
u64 result;
|
||||
|
||||
|
|
|
@ -60,13 +60,13 @@
|
|||
#ifndef __ASSEMBLY__
|
||||
|
||||
#ifdef CONFIG_CPU_USE_DOMAINS
|
||||
#define set_domain(x) \
|
||||
do { \
|
||||
__asm__ __volatile__( \
|
||||
"mcr p15, 0, %0, c3, c0 @ set domain" \
|
||||
: : "r" (x)); \
|
||||
isb(); \
|
||||
} while (0)
|
||||
static inline void set_domain(unsigned val)
|
||||
{
|
||||
asm volatile(
|
||||
"mcr p15, 0, %0, c3, c0 @ set domain"
|
||||
: : "r" (val));
|
||||
isb();
|
||||
}
|
||||
|
||||
#define modify_domain(dom,type) \
|
||||
do { \
|
||||
|
@ -78,8 +78,8 @@
|
|||
} while (0)
|
||||
|
||||
#else
|
||||
#define set_domain(x) do { } while (0)
|
||||
#define modify_domain(dom,type) do { } while (0)
|
||||
static inline void set_domain(unsigned val) { }
|
||||
static inline void modify_domain(unsigned dom, unsigned type) { }
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
|
|
@ -148,7 +148,6 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
|
|||
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
|
||||
#define TIF_SYSCALL_TRACE 8
|
||||
#define TIF_SYSCALL_AUDIT 9
|
||||
#define TIF_SYSCALL_RESTARTSYS 10
|
||||
#define TIF_POLLING_NRFLAG 16
|
||||
#define TIF_USING_IWMMXT 17
|
||||
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
|
||||
|
@ -164,11 +163,9 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
|
|||
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
|
||||
#define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
|
||||
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
|
||||
#define _TIF_SYSCALL_RESTARTSYS (1 << TIF_SYSCALL_RESTARTSYS)
|
||||
|
||||
/* Checks for any syscall work in entry-common.S */
|
||||
#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
|
||||
_TIF_SYSCALL_RESTARTSYS)
|
||||
#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
|
||||
|
||||
/*
|
||||
* Change these and you break ASM code in entry-common.S
|
||||
|
|
|
@ -187,8 +187,8 @@ void kprobe_arm_test_cases(void)
|
|||
TEST_BF_R ("mov pc, r",0,2f,"")
|
||||
TEST_BF_RR("mov pc, r",0,2f,", asl r",1,0,"")
|
||||
TEST_BB( "sub pc, pc, #1b-2b+8")
|
||||
#if __LINUX_ARM_ARCH__ >= 6
|
||||
TEST_BB( "sub pc, pc, #1b-2b+8-2") /* UNPREDICTABLE before ARMv6 */
|
||||
#if __LINUX_ARM_ARCH__ == 6 && !defined(CONFIG_CPU_V7)
|
||||
TEST_BB( "sub pc, pc, #1b-2b+8-2") /* UNPREDICTABLE before and after ARMv6 */
|
||||
#endif
|
||||
TEST_BB_R( "sub pc, pc, r",14, 1f-2f+8,"")
|
||||
TEST_BB_R( "rsb pc, r",14,1f-2f+8,", pc")
|
||||
|
|
|
@ -503,7 +503,7 @@ __hw_perf_event_init(struct perf_event *event)
|
|||
event_requires_mode_exclusion(&event->attr)) {
|
||||
pr_debug("ARM performance counters do not support "
|
||||
"mode exclusion\n");
|
||||
return -EPERM;
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -25,7 +25,6 @@
|
|||
#include <linux/regset.h>
|
||||
#include <linux/audit.h>
|
||||
#include <linux/tracehook.h>
|
||||
#include <linux/unistd.h>
|
||||
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/traps.h>
|
||||
|
@ -918,8 +917,6 @@ asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
|
|||
audit_syscall_entry(AUDIT_ARCH_ARM, scno, regs->ARM_r0,
|
||||
regs->ARM_r1, regs->ARM_r2, regs->ARM_r3);
|
||||
|
||||
if (why == 0 && test_and_clear_thread_flag(TIF_SYSCALL_RESTARTSYS))
|
||||
scno = __NR_restart_syscall - __NR_SYSCALL_BASE;
|
||||
if (!test_thread_flag(TIF_SYSCALL_TRACE))
|
||||
return scno;
|
||||
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
*/
|
||||
#define SWI_SYS_SIGRETURN (0xef000000|(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE))
|
||||
#define SWI_SYS_RT_SIGRETURN (0xef000000|(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE))
|
||||
#define SWI_SYS_RESTART (0xef000000|__NR_restart_syscall|__NR_OABI_SYSCALL_BASE)
|
||||
|
||||
/*
|
||||
* With EABI, the syscall number has to be loaded into r7.
|
||||
|
@ -46,6 +47,18 @@ const unsigned long sigreturn_codes[7] = {
|
|||
MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN,
|
||||
};
|
||||
|
||||
/*
|
||||
* Either we support OABI only, or we have EABI with the OABI
|
||||
* compat layer enabled. In the later case we don't know if
|
||||
* user space is EABI or not, and if not we must not clobber r7.
|
||||
* Always using the OABI syscall solves that issue and works for
|
||||
* all those cases.
|
||||
*/
|
||||
const unsigned long syscall_restart_code[2] = {
|
||||
SWI_SYS_RESTART, /* swi __NR_restart_syscall */
|
||||
0xe49df004, /* ldr pc, [sp], #4 */
|
||||
};
|
||||
|
||||
/*
|
||||
* atomically swap in the new signal mask, and wait for a signal.
|
||||
*/
|
||||
|
@ -592,10 +605,12 @@ static void do_signal(struct pt_regs *regs, int syscall)
|
|||
case -ERESTARTNOHAND:
|
||||
case -ERESTARTSYS:
|
||||
case -ERESTARTNOINTR:
|
||||
case -ERESTART_RESTARTBLOCK:
|
||||
regs->ARM_r0 = regs->ARM_ORIG_r0;
|
||||
regs->ARM_pc = restart_addr;
|
||||
break;
|
||||
case -ERESTART_RESTARTBLOCK:
|
||||
regs->ARM_r0 = -EINTR;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -611,14 +626,12 @@ static void do_signal(struct pt_regs *regs, int syscall)
|
|||
* debugger has chosen to restart at a different PC.
|
||||
*/
|
||||
if (regs->ARM_pc == restart_addr) {
|
||||
if (retval == -ERESTARTNOHAND ||
|
||||
retval == -ERESTART_RESTARTBLOCK
|
||||
if (retval == -ERESTARTNOHAND
|
||||
|| (retval == -ERESTARTSYS
|
||||
&& !(ka.sa.sa_flags & SA_RESTART))) {
|
||||
regs->ARM_r0 = -EINTR;
|
||||
regs->ARM_pc = continue_addr;
|
||||
}
|
||||
clear_thread_flag(TIF_SYSCALL_RESTARTSYS);
|
||||
}
|
||||
|
||||
handle_signal(signr, &ka, &info, regs);
|
||||
|
@ -632,8 +645,29 @@ static void do_signal(struct pt_regs *regs, int syscall)
|
|||
* ignore the restart.
|
||||
*/
|
||||
if (retval == -ERESTART_RESTARTBLOCK
|
||||
&& regs->ARM_pc == restart_addr)
|
||||
set_thread_flag(TIF_SYSCALL_RESTARTSYS);
|
||||
&& regs->ARM_pc == continue_addr) {
|
||||
if (thumb_mode(regs)) {
|
||||
regs->ARM_r7 = __NR_restart_syscall - __NR_SYSCALL_BASE;
|
||||
regs->ARM_pc -= 2;
|
||||
} else {
|
||||
#if defined(CONFIG_AEABI) && !defined(CONFIG_OABI_COMPAT)
|
||||
regs->ARM_r7 = __NR_restart_syscall;
|
||||
regs->ARM_pc -= 4;
|
||||
#else
|
||||
u32 __user *usp;
|
||||
|
||||
regs->ARM_sp -= 4;
|
||||
usp = (u32 __user *)regs->ARM_sp;
|
||||
|
||||
if (put_user(regs->ARM_pc, usp) == 0) {
|
||||
regs->ARM_pc = KERN_RESTART_CODE;
|
||||
} else {
|
||||
regs->ARM_sp += 4;
|
||||
force_sigsegv(0, current);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
restore_saved_sigmask();
|
||||
|
|
|
@ -8,5 +8,7 @@
|
|||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#define KERN_SIGRETURN_CODE (CONFIG_VECTORS_BASE + 0x00000500)
|
||||
#define KERN_RESTART_CODE (KERN_SIGRETURN_CODE + sizeof(sigreturn_codes))
|
||||
|
||||
extern const unsigned long sigreturn_codes[7];
|
||||
extern const unsigned long syscall_restart_code[2];
|
||||
|
|
|
@ -820,6 +820,8 @@ void __init early_trap_init(void *vectors_base)
|
|||
*/
|
||||
memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE),
|
||||
sigreturn_codes, sizeof(sigreturn_codes));
|
||||
memcpy((void *)(vectors + KERN_RESTART_CODE - CONFIG_VECTORS_BASE),
|
||||
syscall_restart_code, sizeof(syscall_restart_code));
|
||||
|
||||
flush_icache_range(vectors, vectors + PAGE_SIZE);
|
||||
modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
|
||||
|
|
|
@ -183,7 +183,9 @@ SECTIONS
|
|||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
PERCPU_SECTION(L1_CACHE_BYTES)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_XIP_KERNEL
|
||||
__data_loc = ALIGN(4); /* location in binary */
|
||||
|
|
|
@ -50,5 +50,6 @@
|
|||
#define POWER_MANAGEMENT (BRIDGE_VIRT_BASE | 0x011c)
|
||||
|
||||
#define TIMER_VIRT_BASE (BRIDGE_VIRT_BASE | 0x0300)
|
||||
#define TIMER_PHYS_BASE (BRIDGE_PHYS_BASE | 0x0300)
|
||||
|
||||
#endif
|
||||
|
|
|
@ -78,6 +78,7 @@
|
|||
|
||||
/* North-South Bridge */
|
||||
#define BRIDGE_VIRT_BASE (DOVE_SB_REGS_VIRT_BASE | 0x20000)
|
||||
#define BRIDGE_PHYS_BASE (DOVE_SB_REGS_PHYS_BASE | 0x20000)
|
||||
|
||||
/* Cryptographic Engine */
|
||||
#define DOVE_CRYPT_PHYS_BASE (DOVE_SB_REGS_PHYS_BASE | 0x30000)
|
||||
|
|
|
@ -119,7 +119,9 @@ static __init void exynos_pm_add_dev_to_genpd(struct platform_device *pdev,
|
|||
struct exynos_pm_domain *pd)
|
||||
{
|
||||
if (pdev->dev.bus) {
|
||||
if (pm_genpd_add_device(&pd->pd, &pdev->dev))
|
||||
if (!pm_genpd_add_device(&pd->pd, &pdev->dev))
|
||||
pm_genpd_dev_need_restore(&pdev->dev, true);
|
||||
else
|
||||
pr_info("%s: error in adding %s device to %s power"
|
||||
"domain\n", __func__, dev_name(&pdev->dev),
|
||||
pd->name);
|
||||
|
@ -151,9 +153,12 @@ static __init int exynos4_pm_init_power_domain(void)
|
|||
if (of_have_populated_dt())
|
||||
return exynos_pm_dt_parse_domains();
|
||||
|
||||
for (idx = 0; idx < ARRAY_SIZE(exynos4_pm_domains); idx++)
|
||||
pm_genpd_init(&exynos4_pm_domains[idx]->pd, NULL,
|
||||
exynos4_pm_domains[idx]->is_off);
|
||||
for (idx = 0; idx < ARRAY_SIZE(exynos4_pm_domains); idx++) {
|
||||
struct exynos_pm_domain *pd = exynos4_pm_domains[idx];
|
||||
int on = __raw_readl(pd->base + 0x4) & S5P_INT_LOCAL_PWR_EN;
|
||||
|
||||
pm_genpd_init(&pd->pd, NULL, !on);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_S5P_DEV_FIMD0
|
||||
exynos_pm_add_dev_to_genpd(&s5p_device_fimd0, &exynos4_pd_lcd0);
|
||||
|
|
|
@ -201,7 +201,6 @@ int __init mx35_clocks_init()
|
|||
pr_err("i.MX35 clk %d: register failed with %ld\n",
|
||||
i, PTR_ERR(clk[i]));
|
||||
|
||||
|
||||
clk_register_clkdev(clk[pata_gate], NULL, "pata_imx");
|
||||
clk_register_clkdev(clk[can1_gate], NULL, "flexcan.0");
|
||||
clk_register_clkdev(clk[can2_gate], NULL, "flexcan.1");
|
||||
|
@ -264,6 +263,14 @@ int __init mx35_clocks_init()
|
|||
clk_prepare_enable(clk[iim_gate]);
|
||||
clk_prepare_enable(clk[emi_gate]);
|
||||
|
||||
/*
|
||||
* SCC is needed to boot via mmc after a watchdog reset. The clock code
|
||||
* before conversion to common clk also enabled UART1 (which isn't
|
||||
* handled here and not needed for mmc) and IIM (which is enabled
|
||||
* unconditionally above).
|
||||
*/
|
||||
clk_prepare_enable(clk[scc_gate]);
|
||||
|
||||
imx_print_silicon_rev("i.MX35", mx35_revision());
|
||||
|
||||
#ifdef CONFIG_MXC_USE_EPIT
|
||||
|
|
|
@ -38,7 +38,7 @@
|
|||
#include <asm/mach-types.h>
|
||||
#include <asm/mach/arch.h>
|
||||
#include <asm/mach/time.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/system_info.h>
|
||||
#include <mach/common.h>
|
||||
#include <mach/iomux-mx27.h>
|
||||
|
||||
|
|
|
@ -1,29 +0,0 @@
|
|||
#ifndef __ASM_MACH_GPIO_PXA_H
|
||||
#define __ASM_MACH_GPIO_PXA_H
|
||||
|
||||
#include <mach/addr-map.h>
|
||||
#include <mach/cputype.h>
|
||||
#include <mach/irqs.h>
|
||||
|
||||
#define GPIO_REGS_VIRT (APB_VIRT_BASE + 0x19000)
|
||||
|
||||
#define BANK_OFF(n) (((n) < 3) ? (n) << 2 : 0x100 + (((n) - 3) << 2))
|
||||
#define GPIO_REG(x) (*(volatile u32 *)(GPIO_REGS_VIRT + (x)))
|
||||
|
||||
#define gpio_to_bank(gpio) ((gpio) >> 5)
|
||||
|
||||
/* NOTE: these macros are defined here to make optimization of
|
||||
* gpio_{get,set}_value() to work when 'gpio' is a constant.
|
||||
* Usage of these macros otherwise is no longer recommended,
|
||||
* use generic GPIO API whenever possible.
|
||||
*/
|
||||
#define GPIO_bit(gpio) (1 << ((gpio) & 0x1f))
|
||||
|
||||
#define GPLR(x) GPIO_REG(BANK_OFF(gpio_to_bank(x)) + 0x00)
|
||||
#define GPDR(x) GPIO_REG(BANK_OFF(gpio_to_bank(x)) + 0x0c)
|
||||
#define GPSR(x) GPIO_REG(BANK_OFF(gpio_to_bank(x)) + 0x18)
|
||||
#define GPCR(x) GPIO_REG(BANK_OFF(gpio_to_bank(x)) + 0x24)
|
||||
|
||||
#include <plat/gpio-pxa.h>
|
||||
|
||||
#endif /* __ASM_MACH_GPIO_PXA_H */
|
|
@ -31,5 +31,6 @@
|
|||
#define IRQ_MASK_HIGH_OFF 0x0014
|
||||
|
||||
#define TIMER_VIRT_BASE (BRIDGE_VIRT_BASE | 0x0300)
|
||||
#define TIMER_PHYS_BASE (BRIDGE_PHYS_BASE | 0x0300)
|
||||
|
||||
#endif
|
||||
|
|
|
@ -42,6 +42,7 @@
|
|||
#define MV78XX0_CORE0_REGS_PHYS_BASE 0xf1020000
|
||||
#define MV78XX0_CORE1_REGS_PHYS_BASE 0xf1024000
|
||||
#define MV78XX0_CORE_REGS_VIRT_BASE 0xfe400000
|
||||
#define MV78XX0_CORE_REGS_PHYS_BASE 0xfe400000
|
||||
#define MV78XX0_CORE_REGS_SIZE SZ_16K
|
||||
|
||||
#define MV78XX0_PCIE_IO_PHYS_BASE(i) (0xf0800000 + ((i) << 20))
|
||||
|
@ -59,6 +60,7 @@
|
|||
* Core-specific peripheral registers.
|
||||
*/
|
||||
#define BRIDGE_VIRT_BASE (MV78XX0_CORE_REGS_VIRT_BASE)
|
||||
#define BRIDGE_PHYS_BASE (MV78XX0_CORE_REGS_PHYS_BASE)
|
||||
|
||||
/*
|
||||
* Register Map
|
||||
|
|
|
@ -205,6 +205,16 @@ static int apx4devkit_phy_fixup(struct phy_device *phy)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void __init apx4devkit_fec_phy_clk_enable(void)
|
||||
{
|
||||
struct clk *clk;
|
||||
|
||||
/* Enable fec phy clock */
|
||||
clk = clk_get_sys("enet_out", NULL);
|
||||
if (!IS_ERR(clk))
|
||||
clk_prepare_enable(clk);
|
||||
}
|
||||
|
||||
static void __init apx4devkit_init(void)
|
||||
{
|
||||
mx28_soc_init();
|
||||
|
@ -225,6 +235,7 @@ static void __init apx4devkit_init(void)
|
|||
phy_register_fixup_for_uid(PHY_ID_KS8051, MICREL_PHY_ID_MASK,
|
||||
apx4devkit_phy_fixup);
|
||||
|
||||
apx4devkit_fec_phy_clk_enable();
|
||||
mx28_add_fec(0, &mx28_fec_pdata);
|
||||
|
||||
mx28_add_mxs_mmc(0, &apx4devkit_mmc_pdata);
|
||||
|
|
|
@ -494,8 +494,8 @@ static void __init overo_init(void)
|
|||
|
||||
regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
|
||||
omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
|
||||
omap_hsmmc_init(mmc);
|
||||
overo_i2c_init();
|
||||
omap_hsmmc_init(mmc);
|
||||
omap_display_init(&overo_dss_data);
|
||||
omap_serial_init();
|
||||
omap_sdrc_init(mt46h32m32lf6_sdrc_params,
|
||||
|
|
|
@ -31,12 +31,16 @@
|
|||
*
|
||||
* CLKDM_NO_AUTODEPS: Prevent "autodeps" from being added/removed from this
|
||||
* clockdomain. (Currently, this applies to OMAP3 clockdomains only.)
|
||||
* CLKDM_ACTIVE_WITH_MPU: The PRCM guarantees that this clockdomain is
|
||||
* active whenever the MPU is active. True for interconnects and
|
||||
* the WKUP clockdomains.
|
||||
*/
|
||||
#define CLKDM_CAN_FORCE_SLEEP (1 << 0)
|
||||
#define CLKDM_CAN_FORCE_WAKEUP (1 << 1)
|
||||
#define CLKDM_CAN_ENABLE_AUTO (1 << 2)
|
||||
#define CLKDM_CAN_DISABLE_AUTO (1 << 3)
|
||||
#define CLKDM_NO_AUTODEPS (1 << 4)
|
||||
#define CLKDM_ACTIVE_WITH_MPU (1 << 5)
|
||||
|
||||
#define CLKDM_CAN_HWSUP (CLKDM_CAN_ENABLE_AUTO | CLKDM_CAN_DISABLE_AUTO)
|
||||
#define CLKDM_CAN_SWSUP (CLKDM_CAN_FORCE_SLEEP | CLKDM_CAN_FORCE_WAKEUP)
|
||||
|
|
|
@ -88,4 +88,5 @@ struct clockdomain wkup_common_clkdm = {
|
|||
.name = "wkup_clkdm",
|
||||
.pwrdm = { .name = "wkup_pwrdm" },
|
||||
.dep_bit = OMAP_EN_WKUP_SHIFT,
|
||||
.flags = CLKDM_ACTIVE_WITH_MPU,
|
||||
};
|
||||
|
|
|
@ -381,7 +381,7 @@ static struct clockdomain l4_wkup_44xx_clkdm = {
|
|||
.cm_inst = OMAP4430_PRM_WKUP_CM_INST,
|
||||
.clkdm_offs = OMAP4430_PRM_WKUP_CM_WKUP_CDOFFS,
|
||||
.dep_bit = OMAP4430_L4WKUP_STATDEP_SHIFT,
|
||||
.flags = CLKDM_CAN_HWSUP,
|
||||
.flags = CLKDM_CAN_HWSUP | CLKDM_ACTIVE_WITH_MPU,
|
||||
};
|
||||
|
||||
static struct clockdomain emu_sys_44xx_clkdm = {
|
||||
|
|
|
@ -1124,15 +1124,18 @@ static struct omap_hwmod_addr_space * __init _find_mpu_rt_addr_space(struct omap
|
|||
* _enable_sysc - try to bring a module out of idle via OCP_SYSCONFIG
|
||||
* @oh: struct omap_hwmod *
|
||||
*
|
||||
* If module is marked as SWSUP_SIDLE, force the module out of slave
|
||||
* idle; otherwise, configure it for smart-idle. If module is marked
|
||||
* as SWSUP_MSUSPEND, force the module out of master standby;
|
||||
* otherwise, configure it for smart-standby. No return value.
|
||||
* Ensure that the OCP_SYSCONFIG register for the IP block represented
|
||||
* by @oh is set to indicate to the PRCM that the IP block is active.
|
||||
* Usually this means placing the module into smart-idle mode and
|
||||
* smart-standby, but if there is a bug in the automatic idle handling
|
||||
* for the IP block, it may need to be placed into the force-idle or
|
||||
* no-idle variants of these modes. No return value.
|
||||
*/
|
||||
static void _enable_sysc(struct omap_hwmod *oh)
|
||||
{
|
||||
u8 idlemode, sf;
|
||||
u32 v;
|
||||
bool clkdm_act;
|
||||
|
||||
if (!oh->class->sysc)
|
||||
return;
|
||||
|
@ -1141,8 +1144,16 @@ static void _enable_sysc(struct omap_hwmod *oh)
|
|||
sf = oh->class->sysc->sysc_flags;
|
||||
|
||||
if (sf & SYSC_HAS_SIDLEMODE) {
|
||||
idlemode = (oh->flags & HWMOD_SWSUP_SIDLE) ?
|
||||
HWMOD_IDLEMODE_NO : HWMOD_IDLEMODE_SMART;
|
||||
clkdm_act = ((oh->clkdm &&
|
||||
oh->clkdm->flags & CLKDM_ACTIVE_WITH_MPU) ||
|
||||
(oh->_clk && oh->_clk->clkdm &&
|
||||
oh->_clk->clkdm->flags & CLKDM_ACTIVE_WITH_MPU));
|
||||
if (clkdm_act && !(oh->class->sysc->idlemodes &
|
||||
(SIDLE_SMART | SIDLE_SMART_WKUP)))
|
||||
idlemode = HWMOD_IDLEMODE_FORCE;
|
||||
else
|
||||
idlemode = (oh->flags & HWMOD_SWSUP_SIDLE) ?
|
||||
HWMOD_IDLEMODE_NO : HWMOD_IDLEMODE_SMART;
|
||||
_set_slave_idlemode(oh, idlemode, &v);
|
||||
}
|
||||
|
||||
|
@ -1208,8 +1219,13 @@ static void _idle_sysc(struct omap_hwmod *oh)
|
|||
sf = oh->class->sysc->sysc_flags;
|
||||
|
||||
if (sf & SYSC_HAS_SIDLEMODE) {
|
||||
idlemode = (oh->flags & HWMOD_SWSUP_SIDLE) ?
|
||||
HWMOD_IDLEMODE_FORCE : HWMOD_IDLEMODE_SMART;
|
||||
/* XXX What about HWMOD_IDLEMODE_SMART_WKUP? */
|
||||
if (oh->flags & HWMOD_SWSUP_SIDLE ||
|
||||
!(oh->class->sysc->idlemodes &
|
||||
(SIDLE_SMART | SIDLE_SMART_WKUP)))
|
||||
idlemode = HWMOD_IDLEMODE_FORCE;
|
||||
else
|
||||
idlemode = HWMOD_IDLEMODE_SMART;
|
||||
_set_slave_idlemode(oh, idlemode, &v);
|
||||
}
|
||||
|
||||
|
|
|
@ -1928,7 +1928,7 @@ static struct omap_hwmod_dma_info omap44xx_mcbsp1_sdma_reqs[] = {
|
|||
|
||||
static struct omap_hwmod_opt_clk mcbsp1_opt_clks[] = {
|
||||
{ .role = "pad_fck", .clk = "pad_clks_ck" },
|
||||
{ .role = "prcm_clk", .clk = "mcbsp1_sync_mux_ck" },
|
||||
{ .role = "prcm_fck", .clk = "mcbsp1_sync_mux_ck" },
|
||||
};
|
||||
|
||||
static struct omap_hwmod omap44xx_mcbsp1_hwmod = {
|
||||
|
@ -1963,7 +1963,7 @@ static struct omap_hwmod_dma_info omap44xx_mcbsp2_sdma_reqs[] = {
|
|||
|
||||
static struct omap_hwmod_opt_clk mcbsp2_opt_clks[] = {
|
||||
{ .role = "pad_fck", .clk = "pad_clks_ck" },
|
||||
{ .role = "prcm_clk", .clk = "mcbsp2_sync_mux_ck" },
|
||||
{ .role = "prcm_fck", .clk = "mcbsp2_sync_mux_ck" },
|
||||
};
|
||||
|
||||
static struct omap_hwmod omap44xx_mcbsp2_hwmod = {
|
||||
|
@ -1998,7 +1998,7 @@ static struct omap_hwmod_dma_info omap44xx_mcbsp3_sdma_reqs[] = {
|
|||
|
||||
static struct omap_hwmod_opt_clk mcbsp3_opt_clks[] = {
|
||||
{ .role = "pad_fck", .clk = "pad_clks_ck" },
|
||||
{ .role = "prcm_clk", .clk = "mcbsp3_sync_mux_ck" },
|
||||
{ .role = "prcm_fck", .clk = "mcbsp3_sync_mux_ck" },
|
||||
};
|
||||
|
||||
static struct omap_hwmod omap44xx_mcbsp3_hwmod = {
|
||||
|
@ -2033,7 +2033,7 @@ static struct omap_hwmod_dma_info omap44xx_mcbsp4_sdma_reqs[] = {
|
|||
|
||||
static struct omap_hwmod_opt_clk mcbsp4_opt_clks[] = {
|
||||
{ .role = "pad_fck", .clk = "pad_clks_ck" },
|
||||
{ .role = "prcm_clk", .clk = "mcbsp4_sync_mux_ck" },
|
||||
{ .role = "prcm_fck", .clk = "mcbsp4_sync_mux_ck" },
|
||||
};
|
||||
|
||||
static struct omap_hwmod omap44xx_mcbsp4_hwmod = {
|
||||
|
@ -3864,7 +3864,7 @@ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__l3_main_2 = {
|
|||
};
|
||||
|
||||
/* usb_host_fs -> l3_main_2 */
|
||||
static struct omap_hwmod_ocp_if omap44xx_usb_host_fs__l3_main_2 = {
|
||||
static struct omap_hwmod_ocp_if __maybe_unused omap44xx_usb_host_fs__l3_main_2 = {
|
||||
.master = &omap44xx_usb_host_fs_hwmod,
|
||||
.slave = &omap44xx_l3_main_2_hwmod,
|
||||
.clk = "l3_div_ck",
|
||||
|
@ -3922,7 +3922,7 @@ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__l3_main_3 = {
|
|||
};
|
||||
|
||||
/* aess -> l4_abe */
|
||||
static struct omap_hwmod_ocp_if omap44xx_aess__l4_abe = {
|
||||
static struct omap_hwmod_ocp_if __maybe_unused omap44xx_aess__l4_abe = {
|
||||
.master = &omap44xx_aess_hwmod,
|
||||
.slave = &omap44xx_l4_abe_hwmod,
|
||||
.clk = "ocp_abe_iclk",
|
||||
|
@ -4013,7 +4013,7 @@ static struct omap_hwmod_addr_space omap44xx_aess_addrs[] = {
|
|||
};
|
||||
|
||||
/* l4_abe -> aess */
|
||||
static struct omap_hwmod_ocp_if omap44xx_l4_abe__aess = {
|
||||
static struct omap_hwmod_ocp_if __maybe_unused omap44xx_l4_abe__aess = {
|
||||
.master = &omap44xx_l4_abe_hwmod,
|
||||
.slave = &omap44xx_aess_hwmod,
|
||||
.clk = "ocp_abe_iclk",
|
||||
|
@ -4031,7 +4031,7 @@ static struct omap_hwmod_addr_space omap44xx_aess_dma_addrs[] = {
|
|||
};
|
||||
|
||||
/* l4_abe -> aess (dma) */
|
||||
static struct omap_hwmod_ocp_if omap44xx_l4_abe__aess_dma = {
|
||||
static struct omap_hwmod_ocp_if __maybe_unused omap44xx_l4_abe__aess_dma = {
|
||||
.master = &omap44xx_l4_abe_hwmod,
|
||||
.slave = &omap44xx_aess_hwmod,
|
||||
.clk = "ocp_abe_iclk",
|
||||
|
@ -5857,7 +5857,7 @@ static struct omap_hwmod_addr_space omap44xx_usb_host_fs_addrs[] = {
|
|||
};
|
||||
|
||||
/* l4_cfg -> usb_host_fs */
|
||||
static struct omap_hwmod_ocp_if omap44xx_l4_cfg__usb_host_fs = {
|
||||
static struct omap_hwmod_ocp_if __maybe_unused omap44xx_l4_cfg__usb_host_fs = {
|
||||
.master = &omap44xx_l4_cfg_hwmod,
|
||||
.slave = &omap44xx_usb_host_fs_hwmod,
|
||||
.clk = "l4_div_ck",
|
||||
|
@ -6014,13 +6014,13 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
|
|||
&omap44xx_iva__l3_main_2,
|
||||
&omap44xx_l3_main_1__l3_main_2,
|
||||
&omap44xx_l4_cfg__l3_main_2,
|
||||
&omap44xx_usb_host_fs__l3_main_2,
|
||||
/* &omap44xx_usb_host_fs__l3_main_2, */
|
||||
&omap44xx_usb_host_hs__l3_main_2,
|
||||
&omap44xx_usb_otg_hs__l3_main_2,
|
||||
&omap44xx_l3_main_1__l3_main_3,
|
||||
&omap44xx_l3_main_2__l3_main_3,
|
||||
&omap44xx_l4_cfg__l3_main_3,
|
||||
&omap44xx_aess__l4_abe,
|
||||
/* &omap44xx_aess__l4_abe, */
|
||||
&omap44xx_dsp__l4_abe,
|
||||
&omap44xx_l3_main_1__l4_abe,
|
||||
&omap44xx_mpu__l4_abe,
|
||||
|
@ -6029,8 +6029,8 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
|
|||
&omap44xx_l4_cfg__l4_wkup,
|
||||
&omap44xx_mpu__mpu_private,
|
||||
&omap44xx_l4_cfg__ocp_wp_noc,
|
||||
&omap44xx_l4_abe__aess,
|
||||
&omap44xx_l4_abe__aess_dma,
|
||||
/* &omap44xx_l4_abe__aess, */
|
||||
/* &omap44xx_l4_abe__aess_dma, */
|
||||
&omap44xx_l3_main_2__c2c,
|
||||
&omap44xx_l4_wkup__counter_32k,
|
||||
&omap44xx_l4_cfg__ctrl_module_core,
|
||||
|
@ -6136,7 +6136,7 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
|
|||
&omap44xx_l4_per__uart2,
|
||||
&omap44xx_l4_per__uart3,
|
||||
&omap44xx_l4_per__uart4,
|
||||
&omap44xx_l4_cfg__usb_host_fs,
|
||||
/* &omap44xx_l4_cfg__usb_host_fs, */
|
||||
&omap44xx_l4_cfg__usb_host_hs,
|
||||
&omap44xx_l4_cfg__usb_otg_hs,
|
||||
&omap44xx_l4_cfg__usb_tll_hs,
|
||||
|
|
|
@ -32,6 +32,7 @@
|
|||
#include "twl-common.h"
|
||||
#include "pm.h"
|
||||
#include "voltage.h"
|
||||
#include "mux.h"
|
||||
|
||||
static struct i2c_board_info __initdata pmic_i2c_board_info = {
|
||||
.addr = 0x48,
|
||||
|
@ -77,6 +78,7 @@ void __init omap4_pmic_init(const char *pmic_type,
|
|||
struct twl6040_platform_data *twl6040_data, int twl6040_irq)
|
||||
{
|
||||
/* PMIC part*/
|
||||
omap_mux_init_signal("sys_nirq1", OMAP_PIN_INPUT_PULLUP | OMAP_PIN_OFF_WAKEUPENABLE);
|
||||
strncpy(omap4_i2c1_board_info[0].type, pmic_type,
|
||||
sizeof(omap4_i2c1_board_info[0].type));
|
||||
omap4_i2c1_board_info[0].irq = OMAP44XX_IRQ_SYS_1N;
|
||||
|
|
|
@ -127,7 +127,11 @@ static unsigned long hx4700_pin_config[] __initdata = {
|
|||
GPIO19_SSP2_SCLK,
|
||||
GPIO86_SSP2_RXD,
|
||||
GPIO87_SSP2_TXD,
|
||||
GPIO88_GPIO,
|
||||
GPIO88_GPIO | MFP_LPM_DRIVE_HIGH, /* TSC2046_CS */
|
||||
|
||||
/* BQ24022 Regulator */
|
||||
GPIO72_GPIO | MFP_LPM_KEEP_OUTPUT, /* BQ24022_nCHARGE_EN */
|
||||
GPIO96_GPIO | MFP_LPM_KEEP_OUTPUT, /* BQ24022_ISET2 */
|
||||
|
||||
/* HX4700 specific input GPIOs */
|
||||
GPIO12_GPIO | WAKEUP_ON_EDGE_RISE, /* ASIC3_IRQ */
|
||||
|
@ -135,6 +139,10 @@ static unsigned long hx4700_pin_config[] __initdata = {
|
|||
GPIO14_GPIO, /* nWLAN_IRQ */
|
||||
|
||||
/* HX4700 specific output GPIOs */
|
||||
GPIO61_GPIO | MFP_LPM_DRIVE_HIGH, /* W3220_nRESET */
|
||||
GPIO71_GPIO | MFP_LPM_DRIVE_HIGH, /* ASIC3_nRESET */
|
||||
GPIO81_GPIO | MFP_LPM_DRIVE_HIGH, /* CPU_GP_nRESET */
|
||||
GPIO116_GPIO | MFP_LPM_DRIVE_HIGH, /* CPU_HW_nRESET */
|
||||
GPIO102_GPIO | MFP_LPM_DRIVE_LOW, /* SYNAPTICS_POWER_ON */
|
||||
|
||||
GPIO10_GPIO, /* GSM_IRQ */
|
||||
|
@ -872,14 +880,19 @@ static struct gpio global_gpios[] = {
|
|||
{ GPIO110_HX4700_LCD_LVDD_3V3_ON, GPIOF_OUT_INIT_HIGH, "LCD_LVDD" },
|
||||
{ GPIO111_HX4700_LCD_AVDD_3V3_ON, GPIOF_OUT_INIT_HIGH, "LCD_AVDD" },
|
||||
{ GPIO32_HX4700_RS232_ON, GPIOF_OUT_INIT_HIGH, "RS232_ON" },
|
||||
{ GPIO61_HX4700_W3220_nRESET, GPIOF_OUT_INIT_HIGH, "W3220_nRESET" },
|
||||
{ GPIO71_HX4700_ASIC3_nRESET, GPIOF_OUT_INIT_HIGH, "ASIC3_nRESET" },
|
||||
{ GPIO81_HX4700_CPU_GP_nRESET, GPIOF_OUT_INIT_HIGH, "CPU_GP_nRESET" },
|
||||
{ GPIO82_HX4700_EUART_RESET, GPIOF_OUT_INIT_HIGH, "EUART_RESET" },
|
||||
{ GPIO116_HX4700_CPU_HW_nRESET, GPIOF_OUT_INIT_HIGH, "CPU_HW_nRESET" },
|
||||
};
|
||||
|
||||
static void __init hx4700_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
PCFR = PCFR_GPR_EN | PCFR_OPDE;
|
||||
|
||||
pxa2xx_mfp_config(ARRAY_AND_SIZE(hx4700_pin_config));
|
||||
gpio_set_wake(GPIO12_HX4700_ASIC3_IRQ, 1);
|
||||
ret = gpio_request_array(ARRAY_AND_SIZE(global_gpios));
|
||||
|
|
|
@ -106,7 +106,7 @@ static struct clk s3c2440_clk_cam_upll = {
|
|||
static struct clk s3c2440_clk_ac97 = {
|
||||
.name = "ac97",
|
||||
.enable = s3c2410_clkcon_enable,
|
||||
.ctrlbit = S3C2440_CLKCON_CAMERA,
|
||||
.ctrlbit = S3C2440_CLKCON_AC97,
|
||||
};
|
||||
|
||||
static unsigned long s3c2440_fclk_n_getrate(struct clk *clk)
|
||||
|
|
|
@ -22,8 +22,13 @@
|
|||
#include <mach/common.h>
|
||||
#include <mach/emev2.h>
|
||||
|
||||
#ifdef CONFIG_ARCH_SH73A0
|
||||
#define is_sh73a0() (machine_is_ag5evm() || machine_is_kota2() || \
|
||||
of_machine_is_compatible("renesas,sh73a0"))
|
||||
#else
|
||||
#define is_sh73a0() (0)
|
||||
#endif
|
||||
|
||||
#define is_r8a7779() machine_is_marzen()
|
||||
|
||||
#ifdef CONFIG_ARCH_EMEV2
|
||||
|
|
|
@ -625,11 +625,6 @@ static struct platform_device *snowball_platform_devs[] __initdata = {
|
|||
&ab8500_device,
|
||||
};
|
||||
|
||||
static struct platform_device *snowball_of_platform_devs[] __initdata = {
|
||||
&snowball_led_dev,
|
||||
&snowball_key_dev,
|
||||
};
|
||||
|
||||
static void __init mop500_init_machine(void)
|
||||
{
|
||||
struct device *parent = NULL;
|
||||
|
@ -769,6 +764,11 @@ MACHINE_END
|
|||
|
||||
#ifdef CONFIG_MACH_UX500_DT
|
||||
|
||||
static struct platform_device *snowball_of_platform_devs[] __initdata = {
|
||||
&snowball_led_dev,
|
||||
&snowball_key_dev,
|
||||
};
|
||||
|
||||
struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = {
|
||||
/* Requires DMA and call-back bindings. */
|
||||
OF_DEV_AUXDATA("arm,pl011", 0x80120000, "uart0", &uart0_plat),
|
||||
|
@ -786,6 +786,8 @@ struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = {
|
|||
OF_DEV_AUXDATA("st,nomadik-gpio", 0x8011e000, "gpio.6", NULL),
|
||||
OF_DEV_AUXDATA("st,nomadik-gpio", 0x8011e080, "gpio.7", NULL),
|
||||
OF_DEV_AUXDATA("st,nomadik-gpio", 0xa03fe000, "gpio.8", NULL),
|
||||
/* Requires device name bindings. */
|
||||
OF_DEV_AUXDATA("stericsson,nmk_pinctrl", 0, "pinctrl-db8500", NULL),
|
||||
{},
|
||||
};
|
||||
|
||||
|
|
|
@ -63,8 +63,10 @@ static void __init ux500_timer_init(void)
|
|||
|
||||
/* TODO: Once MTU has been DT:ed place code above into else. */
|
||||
if (of_have_populated_dt()) {
|
||||
#ifdef CONFIG_OF
|
||||
np = of_find_matching_node(NULL, prcmu_timer_of_match);
|
||||
if (!np)
|
||||
#endif
|
||||
goto dt_fail;
|
||||
|
||||
tmp_base = of_iomap(np, 0);
|
||||
|
|
|
@ -339,7 +339,6 @@ void __init pci_versatile_preinit(void)
|
|||
static int __init versatile_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
|
||||
{
|
||||
int irq;
|
||||
int devslot = PCI_SLOT(dev->devfn);
|
||||
|
||||
/* slot, pin, irq
|
||||
* 24 1 27
|
||||
|
|
|
@ -64,7 +64,7 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page
|
|||
#ifdef CONFIG_ZONE_DMA
|
||||
extern phys_addr_t arm_dma_limit;
|
||||
#else
|
||||
#define arm_dma_limit ((u32)~0)
|
||||
#define arm_dma_limit ((phys_addr_t)~0)
|
||||
#endif
|
||||
|
||||
extern phys_addr_t arm_lowmem_limit;
|
||||
|
|
|
@ -791,6 +791,79 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
|
|||
}
|
||||
}
|
||||
|
||||
#ifndef CONFIG_ARM_LPAE
|
||||
|
||||
/*
|
||||
* The Linux PMD is made of two consecutive section entries covering 2MB
|
||||
* (see definition in include/asm/pgtable-2level.h). However a call to
|
||||
* create_mapping() may optimize static mappings by using individual
|
||||
* 1MB section mappings. This leaves the actual PMD potentially half
|
||||
* initialized if the top or bottom section entry isn't used, leaving it
|
||||
* open to problems if a subsequent ioremap() or vmalloc() tries to use
|
||||
* the virtual space left free by that unused section entry.
|
||||
*
|
||||
* Let's avoid the issue by inserting dummy vm entries covering the unused
|
||||
* PMD halves once the static mappings are in place.
|
||||
*/
|
||||
|
||||
static void __init pmd_empty_section_gap(unsigned long addr)
|
||||
{
|
||||
struct vm_struct *vm;
|
||||
|
||||
vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm));
|
||||
vm->addr = (void *)addr;
|
||||
vm->size = SECTION_SIZE;
|
||||
vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
|
||||
vm->caller = pmd_empty_section_gap;
|
||||
vm_area_add_early(vm);
|
||||
}
|
||||
|
||||
static void __init fill_pmd_gaps(void)
|
||||
{
|
||||
struct vm_struct *vm;
|
||||
unsigned long addr, next = 0;
|
||||
pmd_t *pmd;
|
||||
|
||||
/* we're still single threaded hence no lock needed here */
|
||||
for (vm = vmlist; vm; vm = vm->next) {
|
||||
if (!(vm->flags & VM_ARM_STATIC_MAPPING))
|
||||
continue;
|
||||
addr = (unsigned long)vm->addr;
|
||||
if (addr < next)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Check if this vm starts on an odd section boundary.
|
||||
* If so and the first section entry for this PMD is free
|
||||
* then we block the corresponding virtual address.
|
||||
*/
|
||||
if ((addr & ~PMD_MASK) == SECTION_SIZE) {
|
||||
pmd = pmd_off_k(addr);
|
||||
if (pmd_none(*pmd))
|
||||
pmd_empty_section_gap(addr & PMD_MASK);
|
||||
}
|
||||
|
||||
/*
|
||||
* Then check if this vm ends on an odd section boundary.
|
||||
* If so and the second section entry for this PMD is empty
|
||||
* then we block the corresponding virtual address.
|
||||
*/
|
||||
addr += vm->size;
|
||||
if ((addr & ~PMD_MASK) == SECTION_SIZE) {
|
||||
pmd = pmd_off_k(addr) + 1;
|
||||
if (pmd_none(*pmd))
|
||||
pmd_empty_section_gap(addr);
|
||||
}
|
||||
|
||||
/* no need to look at any vm entry until we hit the next PMD */
|
||||
next = (addr + PMD_SIZE - 1) & PMD_MASK;
|
||||
}
|
||||
}
|
||||
|
||||
#else
|
||||
#define fill_pmd_gaps() do { } while (0)
|
||||
#endif
|
||||
|
||||
static void * __initdata vmalloc_min =
|
||||
(void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
|
||||
|
||||
|
@ -1072,6 +1145,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
|
|||
*/
|
||||
if (mdesc->map_io)
|
||||
mdesc->map_io();
|
||||
fill_pmd_gaps();
|
||||
|
||||
/*
|
||||
* Finally flush the caches and tlb to ensure that we're in a
|
||||
|
|
|
@ -157,11 +157,13 @@ int s3c_adc_start(struct s3c_adc_client *client,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (client->is_ts && adc->ts_pend)
|
||||
return -EAGAIN;
|
||||
|
||||
spin_lock_irqsave(&adc->lock, flags);
|
||||
|
||||
if (client->is_ts && adc->ts_pend) {
|
||||
spin_unlock_irqrestore(&adc->lock, flags);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
client->channel = channel;
|
||||
client->nr_samples = nr_samples;
|
||||
|
||||
|
|
|
@ -126,7 +126,8 @@ struct platform_device s3c_device_adc = {
|
|||
#ifdef CONFIG_CPU_S3C2440
|
||||
static struct resource s3c_camif_resource[] = {
|
||||
[0] = DEFINE_RES_MEM(S3C2440_PA_CAMIF, S3C2440_SZ_CAMIF),
|
||||
[1] = DEFINE_RES_IRQ(IRQ_CAM),
|
||||
[1] = DEFINE_RES_IRQ(IRQ_S3C2440_CAM_C),
|
||||
[2] = DEFINE_RES_IRQ(IRQ_S3C2440_CAM_P),
|
||||
};
|
||||
|
||||
struct platform_device s3c_device_camif = {
|
||||
|
|
|
@ -37,6 +37,7 @@ struct clk clk_ext_xtal_mux = {
|
|||
struct clk clk_xusbxti = {
|
||||
.name = "xusbxti",
|
||||
.id = -1,
|
||||
.rate = 24000000,
|
||||
};
|
||||
|
||||
struct clk s5p_clk_27m = {
|
||||
|
|
|
@ -70,4 +70,7 @@ extern int is_in_rom(unsigned long);
|
|||
#define VMALLOC_END 0xffffffff
|
||||
|
||||
#define arch_enter_lazy_cpu_mode() do {} while (0)
|
||||
|
||||
#include <asm-generic/pgtable.h>
|
||||
|
||||
#endif /* _H8300_PGTABLE_H */
|
||||
|
|
|
@ -100,7 +100,6 @@ extern int __put_user_bad(void);
|
|||
break; \
|
||||
default: \
|
||||
__gu_err = __get_user_bad(); \
|
||||
__gu_val = 0; \
|
||||
break; \
|
||||
} \
|
||||
(x) = __gu_val; \
|
||||
|
@ -159,4 +158,6 @@ clear_user(void *to, unsigned long n)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#define __clear_user clear_user
|
||||
|
||||
#endif /* _H8300_UACCESS_H */
|
||||
|
|
|
@ -447,7 +447,7 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
|
|||
* want to handle. Thus you cannot kill init even with a SIGKILL even by
|
||||
* mistake.
|
||||
*/
|
||||
statis void do_signal(struct pt_regs *regs)
|
||||
static void do_signal(struct pt_regs *regs)
|
||||
{
|
||||
siginfo_t info;
|
||||
int signr;
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
#include <linux/profile.h>
|
||||
|
||||
#include <asm/io.h>
|
||||
#include <asm/irq_regs.h>
|
||||
#include <asm/timer.h>
|
||||
|
||||
#define TICK_SIZE (tick_nsec / 1000)
|
||||
|
|
|
@ -129,7 +129,7 @@ static int __devinit ltq_pci_startup(struct platform_device *pdev)
|
|||
|
||||
/* setup reset gpio used by pci */
|
||||
reset_gpio = of_get_named_gpio(node, "gpio-reset", 0);
|
||||
if (reset_gpio > 0)
|
||||
if (gpio_is_valid(reset_gpio))
|
||||
devm_gpio_request(&pdev->dev, reset_gpio, "pci-reset");
|
||||
|
||||
/* enable auto-switching between PCI and EBU */
|
||||
|
@ -192,7 +192,7 @@ static int __devinit ltq_pci_startup(struct platform_device *pdev)
|
|||
ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_IEN) | 0x10, LTQ_EBU_PCC_IEN);
|
||||
|
||||
/* toggle reset pin */
|
||||
if (reset_gpio > 0) {
|
||||
if (gpio_is_valid(reset_gpio)) {
|
||||
__gpio_set_value(reset_gpio, 0);
|
||||
wmb();
|
||||
mdelay(1);
|
||||
|
|
|
@ -81,9 +81,6 @@ struct pt_regs {
|
|||
#define PTRACE_GETFPREGS 14
|
||||
#define PTRACE_SETFPREGS 15
|
||||
|
||||
/* options set using PTRACE_SETOPTIONS */
|
||||
#define PTRACE_O_TRACESYSGOOD 0x00000001
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#define user_mode(regs) (((regs)->epsw & EPSW_nSL) == EPSW_nSL)
|
||||
|
|
|
@ -123,7 +123,7 @@ static inline unsigned long current_stack_pointer(void)
|
|||
}
|
||||
|
||||
#ifndef CONFIG_KGDB
|
||||
void arch_release_thread_info(struct thread_info *ti)
|
||||
void arch_release_thread_info(struct thread_info *ti);
|
||||
#endif
|
||||
#define get_thread_info(ti) get_task_struct((ti)->task)
|
||||
#define put_thread_info(ti) put_task_struct((ti)->task)
|
||||
|
|
|
@ -11,7 +11,6 @@
|
|||
#ifndef _ASM_TIMEX_H
|
||||
#define _ASM_TIMEX_H
|
||||
|
||||
#include <asm/hardirq.h>
|
||||
#include <unit/timex.h>
|
||||
|
||||
#define TICK_SIZE (tick_nsec / 1000)
|
||||
|
@ -30,16 +29,6 @@ static inline cycles_t get_cycles(void)
|
|||
extern int init_clockevents(void);
|
||||
extern int init_clocksource(void);
|
||||
|
||||
static inline void setup_jiffies_interrupt(int irq,
|
||||
struct irqaction *action)
|
||||
{
|
||||
u16 tmp;
|
||||
setup_irq(irq, action);
|
||||
set_intr_level(irq, NUM2GxICR_LEVEL(CONFIG_TIMER_IRQ_LEVEL));
|
||||
GxICR(irq) |= GxICR_ENABLE | GxICR_DETECT | GxICR_REQUEST;
|
||||
tmp = GxICR(irq);
|
||||
}
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif /* _ASM_TIMEX_H */
|
||||
|
|
|
@ -70,6 +70,16 @@ static void event_handler(struct clock_event_device *dev)
|
|||
{
|
||||
}
|
||||
|
||||
static inline void setup_jiffies_interrupt(int irq,
|
||||
struct irqaction *action)
|
||||
{
|
||||
u16 tmp;
|
||||
setup_irq(irq, action);
|
||||
set_intr_level(irq, NUM2GxICR_LEVEL(CONFIG_TIMER_IRQ_LEVEL));
|
||||
GxICR(irq) |= GxICR_ENABLE | GxICR_DETECT | GxICR_REQUEST;
|
||||
tmp = GxICR(irq);
|
||||
}
|
||||
|
||||
int __init init_clockevents(void)
|
||||
{
|
||||
struct clock_event_device *cd;
|
||||
|
|
|
@ -9,6 +9,8 @@
|
|||
* 2 of the Licence, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/irqreturn.h>
|
||||
|
||||
struct clocksource;
|
||||
struct clock_event_device;
|
||||
|
||||
|
|
|
@ -170,9 +170,9 @@ mn10300_cpupic_setaffinity(struct irq_data *d, const struct cpumask *mask,
|
|||
case SC1TXIRQ:
|
||||
#ifdef CONFIG_MN10300_TTYSM1_TIMER12
|
||||
case TM12IRQ:
|
||||
#elif CONFIG_MN10300_TTYSM1_TIMER9
|
||||
#elif defined(CONFIG_MN10300_TTYSM1_TIMER9)
|
||||
case TM9IRQ:
|
||||
#elif CONFIG_MN10300_TTYSM1_TIMER3
|
||||
#elif defined(CONFIG_MN10300_TTYSM1_TIMER3)
|
||||
case TM3IRQ:
|
||||
#endif /* CONFIG_MN10300_TTYSM1_TIMER12 */
|
||||
#endif /* CONFIG_MN10300_TTYSM1 */
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#include <linux/kdebug.h>
|
||||
#include <linux/bug.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/export.h>
|
||||
#include <asm/processor.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/io.h>
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#include <linux/string.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/export.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
static unsigned long pci_sram_allocated = 0xbc000000;
|
||||
|
|
|
@ -11,10 +11,6 @@
|
|||
#ifndef _ASM_UNIT_TIMEX_H
|
||||
#define _ASM_UNIT_TIMEX_H
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <linux/irq.h>
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#include <asm/timer-regs.h>
|
||||
#include <unit/clock.h>
|
||||
#include <asm/param.h>
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#include <linux/platform_device.h>
|
||||
|
||||
#include <asm/io.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/timex.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/intctl-regs.h>
|
||||
|
|
|
@ -11,10 +11,6 @@
|
|||
#ifndef _ASM_UNIT_TIMEX_H
|
||||
#define _ASM_UNIT_TIMEX_H
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <linux/irq.h>
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#include <asm/timer-regs.h>
|
||||
#include <unit/clock.h>
|
||||
#include <asm/param.h>
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include <linux/init.h>
|
||||
#include <linux/pci.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/intctl-regs.h>
|
||||
|
|
|
@ -11,10 +11,6 @@
|
|||
#ifndef _ASM_UNIT_TIMEX_H
|
||||
#define _ASM_UNIT_TIMEX_H
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <linux/irq.h>
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#include <asm/timer-regs.h>
|
||||
#include <unit/clock.h>
|
||||
#include <asm/param.h>
|
||||
|
|
|
@ -86,8 +86,8 @@ static inline bool arch_irqs_disabled(void)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3E
|
||||
#define __hard_irq_enable() asm volatile("wrteei 1" : : : "memory");
|
||||
#define __hard_irq_disable() asm volatile("wrteei 0" : : : "memory");
|
||||
#define __hard_irq_enable() asm volatile("wrteei 1" : : : "memory")
|
||||
#define __hard_irq_disable() asm volatile("wrteei 0" : : : "memory")
|
||||
#else
|
||||
#define __hard_irq_enable() __mtmsrd(local_paca->kernel_msr | MSR_EE, 1)
|
||||
#define __hard_irq_disable() __mtmsrd(local_paca->kernel_msr, 1)
|
||||
|
@ -125,6 +125,8 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
|
|||
return !regs->softe;
|
||||
}
|
||||
|
||||
extern bool prep_irq_for_idle(void);
|
||||
|
||||
#else /* CONFIG_PPC64 */
|
||||
|
||||
#define SET_MSR_EE(x) mtmsr(x)
|
||||
|
|
|
@ -229,7 +229,7 @@ notrace void arch_local_irq_restore(unsigned long en)
|
|||
*/
|
||||
if (unlikely(irq_happened != PACA_IRQ_HARD_DIS))
|
||||
__hard_irq_disable();
|
||||
#ifdef CONFIG_TRACE_IRQFLAG
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
else {
|
||||
/*
|
||||
* We should already be hard disabled here. We had bugs
|
||||
|
@ -286,6 +286,52 @@ void notrace restore_interrupts(void)
|
|||
__hard_irq_enable();
|
||||
}
|
||||
|
||||
/*
|
||||
* This is a helper to use when about to go into idle low-power
|
||||
* when the latter has the side effect of re-enabling interrupts
|
||||
* (such as calling H_CEDE under pHyp).
|
||||
*
|
||||
* You call this function with interrupts soft-disabled (this is
|
||||
* already the case when ppc_md.power_save is called). The function
|
||||
* will return whether to enter power save or just return.
|
||||
*
|
||||
* In the former case, it will have notified lockdep of interrupts
|
||||
* being re-enabled and generally sanitized the lazy irq state,
|
||||
* and in the latter case it will leave with interrupts hard
|
||||
* disabled and marked as such, so the local_irq_enable() call
|
||||
* in cpu_idle() will properly re-enable everything.
|
||||
*/
|
||||
bool prep_irq_for_idle(void)
|
||||
{
|
||||
/*
|
||||
* First we need to hard disable to ensure no interrupt
|
||||
* occurs before we effectively enter the low power state
|
||||
*/
|
||||
hard_irq_disable();
|
||||
|
||||
/*
|
||||
* If anything happened while we were soft-disabled,
|
||||
* we return now and do not enter the low power state.
|
||||
*/
|
||||
if (lazy_irq_pending())
|
||||
return false;
|
||||
|
||||
/* Tell lockdep we are about to re-enable */
|
||||
trace_hardirqs_on();
|
||||
|
||||
/*
|
||||
* Mark interrupts as soft-enabled and clear the
|
||||
* PACA_IRQ_HARD_DIS from the pending mask since we
|
||||
* are about to hard enable as well as a side effect
|
||||
* of entering the low power state.
|
||||
*/
|
||||
local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
|
||||
local_paca->soft_enabled = 1;
|
||||
|
||||
/* Tell the caller to enter the low power state */
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_PPC64 */
|
||||
|
||||
int arch_show_interrupts(struct seq_file *p, int prec)
|
||||
|
|
|
@ -810,7 +810,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
|
|||
lwz r3,VCORE_NAPPING_THREADS(r5)
|
||||
lwz r4,VCPU_PTID(r9)
|
||||
li r0,1
|
||||
sldi r0,r0,r4
|
||||
sld r0,r0,r4
|
||||
andc. r3,r3,r0 /* no sense IPI'ing ourselves */
|
||||
beq 43f
|
||||
mulli r4,r4,PACA_SIZE /* get paca for thread 0 */
|
||||
|
|
|
@ -241,6 +241,7 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
|
|||
case H_PUT_TCE:
|
||||
return kvmppc_h_pr_put_tce(vcpu);
|
||||
case H_CEDE:
|
||||
vcpu->arch.shared->msr |= MSR_EE;
|
||||
kvm_vcpu_block(vcpu);
|
||||
clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
|
||||
vcpu->stat.halt_wakeup++;
|
||||
|
|
|
@ -639,7 +639,7 @@ static void __init parse_drconf_memory(struct device_node *memory)
|
|||
unsigned int n, rc, ranges, is_kexec_kdump = 0;
|
||||
unsigned long lmb_size, base, size, sz;
|
||||
int nid;
|
||||
struct assoc_arrays aa;
|
||||
struct assoc_arrays aa = { .arrays = NULL };
|
||||
|
||||
n = of_get_drconf_memory(memory, &dm);
|
||||
if (!n)
|
||||
|
|
|
@ -42,11 +42,9 @@ static void cbe_power_save(void)
|
|||
{
|
||||
unsigned long ctrl, thread_switch_control;
|
||||
|
||||
/*
|
||||
* We need to hard disable interrupts, the local_irq_enable() done by
|
||||
* our caller upon return will hard re-enable.
|
||||
*/
|
||||
hard_irq_disable();
|
||||
/* Ensure our interrupt state is properly tracked */
|
||||
if (!prep_irq_for_idle())
|
||||
return;
|
||||
|
||||
ctrl = mfspr(SPRN_CTRLF);
|
||||
|
||||
|
@ -81,6 +79,9 @@ static void cbe_power_save(void)
|
|||
*/
|
||||
ctrl &= ~(CTRL_RUNLATCH | CTRL_TE);
|
||||
mtspr(SPRN_CTRLT, ctrl);
|
||||
|
||||
/* Re-enable interrupts in MSR */
|
||||
__hard_irq_enable();
|
||||
}
|
||||
|
||||
static int cbe_system_reset_exception(struct pt_regs *regs)
|
||||
|
|
|
@ -99,15 +99,18 @@ static int snooze_loop(struct cpuidle_device *dev,
|
|||
static void check_and_cede_processor(void)
|
||||
{
|
||||
/*
|
||||
* Interrupts are soft-disabled at this point,
|
||||
* but not hard disabled. So an interrupt might have
|
||||
* occurred before entering NAP, and would be potentially
|
||||
* lost (edge events, decrementer events, etc...) unless
|
||||
* we first hard disable then check.
|
||||
* Ensure our interrupt state is properly tracked,
|
||||
* also checks if no interrupt has occurred while we
|
||||
* were soft-disabled
|
||||
*/
|
||||
hard_irq_disable();
|
||||
if (!lazy_irq_pending())
|
||||
if (prep_irq_for_idle()) {
|
||||
cede_processor();
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
/* Ensure that H_CEDE returns with IRQs on */
|
||||
if (WARN_ON(!(mfmsr() & MSR_EE)))
|
||||
__hard_irq_enable();
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
static int dedicated_cede_loop(struct cpuidle_device *dev,
|
||||
|
|
|
@ -971,7 +971,7 @@ static int cpu_cmd(void)
|
|||
/* print cpus waiting or in xmon */
|
||||
printf("cpus stopped:");
|
||||
count = 0;
|
||||
for (cpu = 0; cpu < NR_CPUS; ++cpu) {
|
||||
for_each_possible_cpu(cpu) {
|
||||
if (cpumask_test_cpu(cpu, &cpus_in_xmon)) {
|
||||
if (count == 0)
|
||||
printf(" %x", cpu);
|
||||
|
|
|
@ -19,9 +19,20 @@ static inline u32 inl(unsigned long addr)
|
|||
return -1;
|
||||
}
|
||||
|
||||
#define outb(x, y) BUG()
|
||||
#define outw(x, y) BUG()
|
||||
#define outl(x, y) BUG()
|
||||
static inline void outb(unsigned char x, unsigned long port)
|
||||
{
|
||||
BUG();
|
||||
}
|
||||
|
||||
static inline void outw(unsigned short x, unsigned long port)
|
||||
{
|
||||
BUG();
|
||||
}
|
||||
|
||||
static inline void outl(unsigned int x, unsigned long port)
|
||||
{
|
||||
BUG();
|
||||
}
|
||||
|
||||
#define inb_p(addr) inb(addr)
|
||||
#define inw_p(addr) inw(addr)
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
#include <linux/serial_core.h>
|
||||
#include <linux/io.h>
|
||||
#include <cpu/serial.h>
|
||||
#include <asm/gpio.h>
|
||||
#include <cpu/gpio.h>
|
||||
|
||||
static void sh7720_sci_init_pins(struct uart_port *port, unsigned int cflag)
|
||||
{
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/string.h>
|
||||
#include <asm/byteorder.h>
|
||||
#include <asm/backtrace.h>
|
||||
#include <asm/tile-desc.h>
|
||||
#include <arch/abi.h>
|
||||
|
@ -336,8 +337,12 @@ static void find_caller_pc_and_caller_sp(CallerLocation *location,
|
|||
bytes_to_prefetch / sizeof(tile_bundle_bits);
|
||||
}
|
||||
|
||||
/* Decode the next bundle. */
|
||||
bundle.bits = prefetched_bundles[next_bundle++];
|
||||
/*
|
||||
* Decode the next bundle.
|
||||
* TILE always stores instruction bundles in little-endian
|
||||
* mode, even when the chip is running in big-endian mode.
|
||||
*/
|
||||
bundle.bits = le64_to_cpu(prefetched_bundles[next_bundle++]);
|
||||
bundle.num_insns =
|
||||
parse_insn_tile(bundle.bits, pc, bundle.insns);
|
||||
num_info_ops = bt_get_info_ops(&bundle, info_operands);
|
||||
|
|
|
@ -705,7 +705,6 @@ static void stack_proc(void *arg)
|
|||
struct task_struct *from = current, *to = arg;
|
||||
|
||||
to->thread.saved_task = from;
|
||||
rcu_switch_from(from);
|
||||
switch_to(from, to, from);
|
||||
}
|
||||
|
||||
|
|
|
@ -139,6 +139,19 @@ static int addr_to_vsyscall_nr(unsigned long addr)
|
|||
return nr;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SECCOMP
|
||||
static int vsyscall_seccomp(struct task_struct *tsk, int syscall_nr)
|
||||
{
|
||||
if (!seccomp_mode(&tsk->seccomp))
|
||||
return 0;
|
||||
task_pt_regs(tsk)->orig_ax = syscall_nr;
|
||||
task_pt_regs(tsk)->ax = syscall_nr;
|
||||
return __secure_computing(syscall_nr);
|
||||
}
|
||||
#else
|
||||
#define vsyscall_seccomp(_tsk, _nr) 0
|
||||
#endif
|
||||
|
||||
static bool write_ok_or_segv(unsigned long ptr, size_t size)
|
||||
{
|
||||
/*
|
||||
|
@ -174,6 +187,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
|
|||
int vsyscall_nr;
|
||||
int prev_sig_on_uaccess_error;
|
||||
long ret;
|
||||
int skip;
|
||||
|
||||
/*
|
||||
* No point in checking CS -- the only way to get here is a user mode
|
||||
|
@ -205,9 +219,6 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
|
|||
}
|
||||
|
||||
tsk = current;
|
||||
if (seccomp_mode(&tsk->seccomp))
|
||||
do_exit(SIGKILL);
|
||||
|
||||
/*
|
||||
* With a real vsyscall, page faults cause SIGSEGV. We want to
|
||||
* preserve that behavior to make writing exploits harder.
|
||||
|
@ -222,8 +233,13 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
|
|||
* address 0".
|
||||
*/
|
||||
ret = -EFAULT;
|
||||
skip = 0;
|
||||
switch (vsyscall_nr) {
|
||||
case 0:
|
||||
skip = vsyscall_seccomp(tsk, __NR_gettimeofday);
|
||||
if (skip)
|
||||
break;
|
||||
|
||||
if (!write_ok_or_segv(regs->di, sizeof(struct timeval)) ||
|
||||
!write_ok_or_segv(regs->si, sizeof(struct timezone)))
|
||||
break;
|
||||
|
@ -234,6 +250,10 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
|
|||
break;
|
||||
|
||||
case 1:
|
||||
skip = vsyscall_seccomp(tsk, __NR_time);
|
||||
if (skip)
|
||||
break;
|
||||
|
||||
if (!write_ok_or_segv(regs->di, sizeof(time_t)))
|
||||
break;
|
||||
|
||||
|
@ -241,6 +261,10 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
|
|||
break;
|
||||
|
||||
case 2:
|
||||
skip = vsyscall_seccomp(tsk, __NR_getcpu);
|
||||
if (skip)
|
||||
break;
|
||||
|
||||
if (!write_ok_or_segv(regs->di, sizeof(unsigned)) ||
|
||||
!write_ok_or_segv(regs->si, sizeof(unsigned)))
|
||||
break;
|
||||
|
@ -253,6 +277,12 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
|
|||
|
||||
current_thread_info()->sig_on_uaccess_error = prev_sig_on_uaccess_error;
|
||||
|
||||
if (skip) {
|
||||
if ((long)regs->ax <= 0L) /* seccomp errno emulation */
|
||||
goto do_ret;
|
||||
goto done; /* seccomp trace/trap */
|
||||
}
|
||||
|
||||
if (ret == -EFAULT) {
|
||||
/* Bad news -- userspace fed a bad pointer to a vsyscall. */
|
||||
warn_bad_vsyscall(KERN_INFO, regs,
|
||||
|
@ -271,10 +301,11 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
|
|||
|
||||
regs->ax = ret;
|
||||
|
||||
do_ret:
|
||||
/* Emulate a ret instruction. */
|
||||
regs->ip = caller;
|
||||
regs->sp += 8;
|
||||
|
||||
done:
|
||||
return true;
|
||||
|
||||
sigsegv:
|
||||
|
|
|
@ -3934,6 +3934,9 @@ static void kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm,
|
|||
{
|
||||
struct kvm_mmu_page *page;
|
||||
|
||||
if (list_empty(&kvm->arch.active_mmu_pages))
|
||||
return;
|
||||
|
||||
page = container_of(kvm->arch.active_mmu_pages.prev,
|
||||
struct kvm_mmu_page, link);
|
||||
kvm_mmu_prepare_zap_page(kvm, page, invalid_list);
|
||||
|
|
|
@ -277,7 +277,7 @@ void xtensa_elf_core_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs)
|
|||
|
||||
/* Don't leak any random bits. */
|
||||
|
||||
memset(elfregs, 0, sizeof (elfregs));
|
||||
memset(elfregs, 0, sizeof(*elfregs));
|
||||
|
||||
/* Note: PS.EXCM is not set while user task is running; its
|
||||
* being set in regs->ps is for exception handling convenience.
|
||||
|
|
|
@ -125,12 +125,8 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q)
|
|||
|
||||
blkg->pd[i] = pd;
|
||||
pd->blkg = blkg;
|
||||
}
|
||||
|
||||
/* invoke per-policy init */
|
||||
for (i = 0; i < BLKCG_MAX_POLS; i++) {
|
||||
struct blkcg_policy *pol = blkcg_policy[i];
|
||||
|
||||
/* invoke per-policy init */
|
||||
if (blkcg_policy_enabled(blkg->q, pol))
|
||||
pol->pd_init_fn(blkg);
|
||||
}
|
||||
|
@ -245,10 +241,9 @@ EXPORT_SYMBOL_GPL(blkg_lookup_create);
|
|||
|
||||
static void blkg_destroy(struct blkcg_gq *blkg)
|
||||
{
|
||||
struct request_queue *q = blkg->q;
|
||||
struct blkcg *blkcg = blkg->blkcg;
|
||||
|
||||
lockdep_assert_held(q->queue_lock);
|
||||
lockdep_assert_held(blkg->q->queue_lock);
|
||||
lockdep_assert_held(&blkcg->lock);
|
||||
|
||||
/* Something wrong if we are trying to remove same group twice */
|
||||
|
|
|
@ -361,9 +361,10 @@ EXPORT_SYMBOL(blk_put_queue);
|
|||
*/
|
||||
void blk_drain_queue(struct request_queue *q, bool drain_all)
|
||||
{
|
||||
int i;
|
||||
|
||||
while (true) {
|
||||
bool drain = false;
|
||||
int i;
|
||||
|
||||
spin_lock_irq(q->queue_lock);
|
||||
|
||||
|
@ -408,6 +409,18 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
|
|||
break;
|
||||
msleep(10);
|
||||
}
|
||||
|
||||
/*
|
||||
* With queue marked dead, any woken up waiter will fail the
|
||||
* allocation path, so the wakeup chaining is lost and we're
|
||||
* left with hung waiters. We need to wake up those waiters.
|
||||
*/
|
||||
if (q->request_fn) {
|
||||
spin_lock_irq(q->queue_lock);
|
||||
for (i = 0; i < ARRAY_SIZE(q->rq.wait); i++)
|
||||
wake_up_all(&q->rq.wait[i]);
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -467,7 +480,6 @@ void blk_cleanup_queue(struct request_queue *q)
|
|||
/* mark @q DEAD, no new request or merges will be allowed afterwards */
|
||||
mutex_lock(&q->sysfs_lock);
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
|
||||
|
||||
spin_lock_irq(lock);
|
||||
|
||||
/*
|
||||
|
@ -485,10 +497,6 @@ void blk_cleanup_queue(struct request_queue *q)
|
|||
queue_flag_set(QUEUE_FLAG_NOMERGES, q);
|
||||
queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
|
||||
queue_flag_set(QUEUE_FLAG_DEAD, q);
|
||||
|
||||
if (q->queue_lock != &q->__queue_lock)
|
||||
q->queue_lock = &q->__queue_lock;
|
||||
|
||||
spin_unlock_irq(lock);
|
||||
mutex_unlock(&q->sysfs_lock);
|
||||
|
||||
|
@ -499,6 +507,11 @@ void blk_cleanup_queue(struct request_queue *q)
|
|||
del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
|
||||
blk_sync_queue(q);
|
||||
|
||||
spin_lock_irq(lock);
|
||||
if (q->queue_lock != &q->__queue_lock)
|
||||
q->queue_lock = &q->__queue_lock;
|
||||
spin_unlock_irq(lock);
|
||||
|
||||
/* @q is and will stay empty, shutdown and put */
|
||||
blk_put_queue(q);
|
||||
}
|
||||
|
|
|
@ -197,44 +197,3 @@ void blk_add_timer(struct request *req)
|
|||
mod_timer(&q->timeout, expiry);
|
||||
}
|
||||
|
||||
/**
|
||||
* blk_abort_queue -- Abort all request on given queue
|
||||
* @queue: pointer to queue
|
||||
*
|
||||
*/
|
||||
void blk_abort_queue(struct request_queue *q)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct request *rq, *tmp;
|
||||
LIST_HEAD(list);
|
||||
|
||||
/*
|
||||
* Not a request based block device, nothing to abort
|
||||
*/
|
||||
if (!q->request_fn)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(q->queue_lock, flags);
|
||||
|
||||
elv_abort_queue(q);
|
||||
|
||||
/*
|
||||
* Splice entries to local list, to avoid deadlocking if entries
|
||||
* get readded to the timeout list by error handling
|
||||
*/
|
||||
list_splice_init(&q->timeout_list, &list);
|
||||
|
||||
list_for_each_entry_safe(rq, tmp, &list, timeout_list)
|
||||
blk_abort_request(rq);
|
||||
|
||||
/*
|
||||
* Occasionally, blk_abort_request() will return without
|
||||
* deleting the element from the list. Make sure we add those back
|
||||
* instead of leaving them on the local stack list.
|
||||
*/
|
||||
list_splice(&list, &q->timeout_list);
|
||||
|
||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_abort_queue);
|
||||
|
|
|
@ -17,8 +17,6 @@
|
|||
#include "blk.h"
|
||||
#include "blk-cgroup.h"
|
||||
|
||||
static struct blkcg_policy blkcg_policy_cfq __maybe_unused;
|
||||
|
||||
/*
|
||||
* tunables
|
||||
*/
|
||||
|
@ -418,11 +416,6 @@ static inline struct cfq_group *pd_to_cfqg(struct blkg_policy_data *pd)
|
|||
return pd ? container_of(pd, struct cfq_group, pd) : NULL;
|
||||
}
|
||||
|
||||
static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg)
|
||||
{
|
||||
return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq));
|
||||
}
|
||||
|
||||
static inline struct blkcg_gq *cfqg_to_blkg(struct cfq_group *cfqg)
|
||||
{
|
||||
return pd_to_blkg(&cfqg->pd);
|
||||
|
@ -572,6 +565,13 @@ static inline void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) { }
|
|||
|
||||
#ifdef CONFIG_CFQ_GROUP_IOSCHED
|
||||
|
||||
static struct blkcg_policy blkcg_policy_cfq;
|
||||
|
||||
static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg)
|
||||
{
|
||||
return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq));
|
||||
}
|
||||
|
||||
static inline void cfqg_get(struct cfq_group *cfqg)
|
||||
{
|
||||
return blkg_get(cfqg_to_blkg(cfqg));
|
||||
|
@ -3951,10 +3951,11 @@ static void cfq_exit_queue(struct elevator_queue *e)
|
|||
|
||||
cfq_shutdown_timer_wq(cfqd);
|
||||
|
||||
#ifndef CONFIG_CFQ_GROUP_IOSCHED
|
||||
#ifdef CONFIG_CFQ_GROUP_IOSCHED
|
||||
blkcg_deactivate_policy(q, &blkcg_policy_cfq);
|
||||
#else
|
||||
kfree(cfqd->root_group);
|
||||
#endif
|
||||
blkcg_deactivate_policy(q, &blkcg_policy_cfq);
|
||||
kfree(cfqd);
|
||||
}
|
||||
|
||||
|
@ -4194,14 +4195,15 @@ static int __init cfq_init(void)
|
|||
#ifdef CONFIG_CFQ_GROUP_IOSCHED
|
||||
if (!cfq_group_idle)
|
||||
cfq_group_idle = 1;
|
||||
#else
|
||||
cfq_group_idle = 0;
|
||||
#endif
|
||||
|
||||
ret = blkcg_policy_register(&blkcg_policy_cfq);
|
||||
if (ret)
|
||||
return ret;
|
||||
#else
|
||||
cfq_group_idle = 0;
|
||||
#endif
|
||||
|
||||
ret = -ENOMEM;
|
||||
cfq_pool = KMEM_CACHE(cfq_queue, 0);
|
||||
if (!cfq_pool)
|
||||
goto err_pol_unreg;
|
||||
|
@ -4215,13 +4217,17 @@ static int __init cfq_init(void)
|
|||
err_free_pool:
|
||||
kmem_cache_destroy(cfq_pool);
|
||||
err_pol_unreg:
|
||||
#ifdef CONFIG_CFQ_GROUP_IOSCHED
|
||||
blkcg_policy_unregister(&blkcg_policy_cfq);
|
||||
#endif
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit cfq_exit(void)
|
||||
{
|
||||
#ifdef CONFIG_CFQ_GROUP_IOSCHED
|
||||
blkcg_policy_unregister(&blkcg_policy_cfq);
|
||||
#endif
|
||||
elv_unregister(&iosched_cfq);
|
||||
kmem_cache_destroy(cfq_pool);
|
||||
}
|
||||
|
|
|
@ -721,11 +721,14 @@ int scsi_verify_blk_ioctl(struct block_device *bd, unsigned int cmd)
|
|||
break;
|
||||
}
|
||||
|
||||
if (capable(CAP_SYS_RAWIO))
|
||||
return 0;
|
||||
|
||||
/* In particular, rule out all resets and host-specific ioctls. */
|
||||
printk_ratelimited(KERN_WARNING
|
||||
"%s: sending ioctl %x to a partition!\n", current->comm, cmd);
|
||||
|
||||
return capable(CAP_SYS_RAWIO) ? 0 : -ENOIOCTLCMD;
|
||||
return -ENOIOCTLCMD;
|
||||
}
|
||||
EXPORT_SYMBOL(scsi_verify_blk_ioctl);
|
||||
|
||||
|
|
|
@ -95,18 +95,6 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags)
|
|||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
if (sleep_state != ACPI_STATE_S5) {
|
||||
/*
|
||||
* Disable BM arbitration. This feature is contained within an
|
||||
* optional register (PM2 Control), so ignore a BAD_ADDRESS
|
||||
* exception.
|
||||
*/
|
||||
status = acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
|
||||
if (ACPI_FAILURE(status) && (status != AE_BAD_ADDRESS)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* 1) Disable/Clear all GPEs
|
||||
* 2) Enable all wakeup GPEs
|
||||
|
@ -364,16 +352,6 @@ acpi_status acpi_hw_legacy_wake(u8 sleep_state, u8 flags)
|
|||
[ACPI_EVENT_POWER_BUTTON].
|
||||
status_register_id, ACPI_CLEAR_STATUS);
|
||||
|
||||
/*
|
||||
* Enable BM arbitration. This feature is contained within an
|
||||
* optional register (PM2 Control), so ignore a BAD_ADDRESS
|
||||
* exception.
|
||||
*/
|
||||
status = acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
|
||||
if (ACPI_FAILURE(status) && (status != AE_BAD_ADDRESS)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
acpi_hw_execute_sleep_method(METHOD_PATHNAME__SST, ACPI_SST_WORKING);
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
|
|
@ -638,7 +638,7 @@ acpi_ns_check_package(struct acpi_predefined_data *data,
|
|||
/* Create the new outer package and populate it */
|
||||
|
||||
status =
|
||||
acpi_ns_wrap_with_package(data, *elements,
|
||||
acpi_ns_wrap_with_package(data, return_object,
|
||||
return_object_ptr);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return (status);
|
||||
|
|
|
@ -189,10 +189,12 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
|
|||
* Processor (CPU3, 0x03, 0x00000410, 0x06) {}
|
||||
* }
|
||||
*
|
||||
* Ignores apic_id and always return 0 for CPU0's handle.
|
||||
* Ignores apic_id and always returns 0 for the processor
|
||||
* handle with acpi id 0 if nr_cpu_ids is 1.
|
||||
* This should be the case if SMP tables are not found.
|
||||
* Return -1 for other CPU's handle.
|
||||
*/
|
||||
if (acpi_id == 0)
|
||||
if (nr_cpu_ids <= 1 && acpi_id == 0)
|
||||
return acpi_id;
|
||||
else
|
||||
return apic_id;
|
||||
|
|
|
@ -1475,10 +1475,17 @@ void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
|
|||
first_word = 0;
|
||||
spin_lock_irq(&b->bm_lock);
|
||||
}
|
||||
|
||||
/* last page (respectively only page, for first page == last page) */
|
||||
last_word = MLPP(el >> LN2_BPL);
|
||||
bm_set_full_words_within_one_page(mdev->bitmap, last_page, first_word, last_word);
|
||||
|
||||
/* consider bitmap->bm_bits = 32768, bitmap->bm_number_of_pages = 1. (or multiples).
|
||||
* ==> e = 32767, el = 32768, last_page = 2,
|
||||
* and now last_word = 0.
|
||||
* We do not want to touch last_page in this case,
|
||||
* as we did not allocate it, it is not present in bitmap->bm_pages.
|
||||
*/
|
||||
if (last_word)
|
||||
bm_set_full_words_within_one_page(mdev->bitmap, last_page, first_word, last_word);
|
||||
|
||||
/* possibly trailing bits.
|
||||
* example: (e & 63) == 63, el will be e+1.
|
||||
|
|
|
@ -472,12 +472,17 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
|
|||
req->rq_state |= RQ_LOCAL_COMPLETED;
|
||||
req->rq_state &= ~RQ_LOCAL_PENDING;
|
||||
|
||||
D_ASSERT(!(req->rq_state & RQ_NET_MASK));
|
||||
if (req->rq_state & RQ_LOCAL_ABORTED) {
|
||||
_req_may_be_done(req, m);
|
||||
break;
|
||||
}
|
||||
|
||||
__drbd_chk_io_error(mdev, false);
|
||||
|
||||
goto_queue_for_net_read:
|
||||
|
||||
D_ASSERT(!(req->rq_state & RQ_NET_MASK));
|
||||
|
||||
/* no point in retrying if there is no good remote data,
|
||||
* or we have no connection. */
|
||||
if (mdev->state.pdsk != D_UP_TO_DATE) {
|
||||
|
@ -765,6 +770,40 @@ static int drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int s
|
|||
return 0 == drbd_bm_count_bits(mdev, sbnr, ebnr);
|
||||
}
|
||||
|
||||
static void maybe_pull_ahead(struct drbd_conf *mdev)
|
||||
{
|
||||
int congested = 0;
|
||||
|
||||
/* If I don't even have good local storage, we can not reasonably try
|
||||
* to pull ahead of the peer. We also need the local reference to make
|
||||
* sure mdev->act_log is there.
|
||||
* Note: caller has to make sure that net_conf is there.
|
||||
*/
|
||||
if (!get_ldev_if_state(mdev, D_UP_TO_DATE))
|
||||
return;
|
||||
|
||||
if (mdev->net_conf->cong_fill &&
|
||||
atomic_read(&mdev->ap_in_flight) >= mdev->net_conf->cong_fill) {
|
||||
dev_info(DEV, "Congestion-fill threshold reached\n");
|
||||
congested = 1;
|
||||
}
|
||||
|
||||
if (mdev->act_log->used >= mdev->net_conf->cong_extents) {
|
||||
dev_info(DEV, "Congestion-extents threshold reached\n");
|
||||
congested = 1;
|
||||
}
|
||||
|
||||
if (congested) {
|
||||
queue_barrier(mdev); /* last barrier, after mirrored writes */
|
||||
|
||||
if (mdev->net_conf->on_congestion == OC_PULL_AHEAD)
|
||||
_drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL);
|
||||
else /*mdev->net_conf->on_congestion == OC_DISCONNECT */
|
||||
_drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), 0, NULL);
|
||||
}
|
||||
put_ldev(mdev);
|
||||
}
|
||||
|
||||
static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time)
|
||||
{
|
||||
const int rw = bio_rw(bio);
|
||||
|
@ -972,29 +1011,8 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns
|
|||
_req_mod(req, queue_for_send_oos);
|
||||
|
||||
if (remote &&
|
||||
mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96) {
|
||||
int congested = 0;
|
||||
|
||||
if (mdev->net_conf->cong_fill &&
|
||||
atomic_read(&mdev->ap_in_flight) >= mdev->net_conf->cong_fill) {
|
||||
dev_info(DEV, "Congestion-fill threshold reached\n");
|
||||
congested = 1;
|
||||
}
|
||||
|
||||
if (mdev->act_log->used >= mdev->net_conf->cong_extents) {
|
||||
dev_info(DEV, "Congestion-extents threshold reached\n");
|
||||
congested = 1;
|
||||
}
|
||||
|
||||
if (congested) {
|
||||
queue_barrier(mdev); /* last barrier, after mirrored writes */
|
||||
|
||||
if (mdev->net_conf->on_congestion == OC_PULL_AHEAD)
|
||||
_drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL);
|
||||
else /*mdev->net_conf->on_congestion == OC_DISCONNECT */
|
||||
_drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), 0, NULL);
|
||||
}
|
||||
}
|
||||
mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96)
|
||||
maybe_pull_ahead(mdev);
|
||||
|
||||
spin_unlock_irq(&mdev->req_lock);
|
||||
kfree(b); /* if someone else has beaten us to it... */
|
||||
|
|
|
@ -671,6 +671,7 @@ static void __reschedule_timeout(int drive, const char *message)
|
|||
|
||||
if (drive == current_reqD)
|
||||
drive = current_drive;
|
||||
__cancel_delayed_work(&fd_timeout);
|
||||
|
||||
if (drive < 0 || drive >= N_DRIVE) {
|
||||
delay = 20UL * HZ;
|
||||
|
|
|
@ -1597,14 +1597,12 @@ static int loop_add(struct loop_device **l, int i)
|
|||
struct gendisk *disk;
|
||||
int err;
|
||||
|
||||
err = -ENOMEM;
|
||||
lo = kzalloc(sizeof(*lo), GFP_KERNEL);
|
||||
if (!lo) {
|
||||
err = -ENOMEM;
|
||||
if (!lo)
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = idr_pre_get(&loop_index_idr, GFP_KERNEL);
|
||||
if (err < 0)
|
||||
if (!idr_pre_get(&loop_index_idr, GFP_KERNEL))
|
||||
goto out_free_dev;
|
||||
|
||||
if (i >= 0) {
|
||||
|
|
|
@ -37,6 +37,7 @@
|
|||
#include <linux/kthread.h>
|
||||
#include <../drivers/ata/ahci.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include "mtip32xx.h"
|
||||
|
||||
#define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32)
|
||||
|
@ -85,6 +86,7 @@ static int instance;
|
|||
* allocated in mtip_init().
|
||||
*/
|
||||
static int mtip_major;
|
||||
static struct dentry *dfs_parent;
|
||||
|
||||
static DEFINE_SPINLOCK(rssd_index_lock);
|
||||
static DEFINE_IDA(rssd_index_ida);
|
||||
|
@ -2546,7 +2548,7 @@ static struct scatterlist *mtip_hw_get_scatterlist(struct driver_data *dd,
|
|||
}
|
||||
|
||||
/*
|
||||
* Sysfs register/status dump.
|
||||
* Sysfs status dump.
|
||||
*
|
||||
* @dev Pointer to the device structure, passed by the kernrel.
|
||||
* @attr Pointer to the device_attribute structure passed by the kernel.
|
||||
|
@ -2555,71 +2557,6 @@ static struct scatterlist *mtip_hw_get_scatterlist(struct driver_data *dd,
|
|||
* return value
|
||||
* The size, in bytes, of the data copied into buf.
|
||||
*/
|
||||
static ssize_t mtip_hw_show_registers(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
u32 group_allocated;
|
||||
struct driver_data *dd = dev_to_disk(dev)->private_data;
|
||||
int size = 0;
|
||||
int n;
|
||||
|
||||
size += sprintf(&buf[size], "Hardware\n--------\n");
|
||||
size += sprintf(&buf[size], "S ACTive : [ 0x");
|
||||
|
||||
for (n = dd->slot_groups-1; n >= 0; n--)
|
||||
size += sprintf(&buf[size], "%08X ",
|
||||
readl(dd->port->s_active[n]));
|
||||
|
||||
size += sprintf(&buf[size], "]\n");
|
||||
size += sprintf(&buf[size], "Command Issue : [ 0x");
|
||||
|
||||
for (n = dd->slot_groups-1; n >= 0; n--)
|
||||
size += sprintf(&buf[size], "%08X ",
|
||||
readl(dd->port->cmd_issue[n]));
|
||||
|
||||
size += sprintf(&buf[size], "]\n");
|
||||
size += sprintf(&buf[size], "Completed : [ 0x");
|
||||
|
||||
for (n = dd->slot_groups-1; n >= 0; n--)
|
||||
size += sprintf(&buf[size], "%08X ",
|
||||
readl(dd->port->completed[n]));
|
||||
|
||||
size += sprintf(&buf[size], "]\n");
|
||||
size += sprintf(&buf[size], "PORT IRQ STAT : [ 0x%08X ]\n",
|
||||
readl(dd->port->mmio + PORT_IRQ_STAT));
|
||||
size += sprintf(&buf[size], "HOST IRQ STAT : [ 0x%08X ]\n",
|
||||
readl(dd->mmio + HOST_IRQ_STAT));
|
||||
size += sprintf(&buf[size], "\n");
|
||||
|
||||
size += sprintf(&buf[size], "Local\n-----\n");
|
||||
size += sprintf(&buf[size], "Allocated : [ 0x");
|
||||
|
||||
for (n = dd->slot_groups-1; n >= 0; n--) {
|
||||
if (sizeof(long) > sizeof(u32))
|
||||
group_allocated =
|
||||
dd->port->allocated[n/2] >> (32*(n&1));
|
||||
else
|
||||
group_allocated = dd->port->allocated[n];
|
||||
size += sprintf(&buf[size], "%08X ", group_allocated);
|
||||
}
|
||||
size += sprintf(&buf[size], "]\n");
|
||||
|
||||
size += sprintf(&buf[size], "Commands in Q: [ 0x");
|
||||
|
||||
for (n = dd->slot_groups-1; n >= 0; n--) {
|
||||
if (sizeof(long) > sizeof(u32))
|
||||
group_allocated =
|
||||
dd->port->cmds_to_issue[n/2] >> (32*(n&1));
|
||||
else
|
||||
group_allocated = dd->port->cmds_to_issue[n];
|
||||
size += sprintf(&buf[size], "%08X ", group_allocated);
|
||||
}
|
||||
size += sprintf(&buf[size], "]\n");
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
static ssize_t mtip_hw_show_status(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
|
@ -2637,24 +2574,121 @@ static ssize_t mtip_hw_show_status(struct device *dev,
|
|||
return size;
|
||||
}
|
||||
|
||||
static ssize_t mtip_hw_show_flags(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL);
|
||||
|
||||
static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf,
|
||||
size_t len, loff_t *offset)
|
||||
{
|
||||
struct driver_data *dd = dev_to_disk(dev)->private_data;
|
||||
int size = 0;
|
||||
struct driver_data *dd = (struct driver_data *)f->private_data;
|
||||
char buf[MTIP_DFS_MAX_BUF_SIZE];
|
||||
u32 group_allocated;
|
||||
int size = *offset;
|
||||
int n;
|
||||
|
||||
size += sprintf(&buf[size], "Flag in port struct : [ %08lX ]\n",
|
||||
dd->port->flags);
|
||||
size += sprintf(&buf[size], "Flag in dd struct : [ %08lX ]\n",
|
||||
dd->dd_flag);
|
||||
if (!len || size)
|
||||
return 0;
|
||||
|
||||
return size;
|
||||
if (size < 0)
|
||||
return -EINVAL;
|
||||
|
||||
size += sprintf(&buf[size], "H/ S ACTive : [ 0x");
|
||||
|
||||
for (n = dd->slot_groups-1; n >= 0; n--)
|
||||
size += sprintf(&buf[size], "%08X ",
|
||||
readl(dd->port->s_active[n]));
|
||||
|
||||
size += sprintf(&buf[size], "]\n");
|
||||
size += sprintf(&buf[size], "H/ Command Issue : [ 0x");
|
||||
|
||||
for (n = dd->slot_groups-1; n >= 0; n--)
|
||||
size += sprintf(&buf[size], "%08X ",
|
||||
readl(dd->port->cmd_issue[n]));
|
||||
|
||||
size += sprintf(&buf[size], "]\n");
|
||||
size += sprintf(&buf[size], "H/ Completed : [ 0x");
|
||||
|
||||
for (n = dd->slot_groups-1; n >= 0; n--)
|
||||
size += sprintf(&buf[size], "%08X ",
|
||||
readl(dd->port->completed[n]));
|
||||
|
||||
size += sprintf(&buf[size], "]\n");
|
||||
size += sprintf(&buf[size], "H/ PORT IRQ STAT : [ 0x%08X ]\n",
|
||||
readl(dd->port->mmio + PORT_IRQ_STAT));
|
||||
size += sprintf(&buf[size], "H/ HOST IRQ STAT : [ 0x%08X ]\n",
|
||||
readl(dd->mmio + HOST_IRQ_STAT));
|
||||
size += sprintf(&buf[size], "\n");
|
||||
|
||||
size += sprintf(&buf[size], "L/ Allocated : [ 0x");
|
||||
|
||||
for (n = dd->slot_groups-1; n >= 0; n--) {
|
||||
if (sizeof(long) > sizeof(u32))
|
||||
group_allocated =
|
||||
dd->port->allocated[n/2] >> (32*(n&1));
|
||||
else
|
||||
group_allocated = dd->port->allocated[n];
|
||||
size += sprintf(&buf[size], "%08X ", group_allocated);
|
||||
}
|
||||
size += sprintf(&buf[size], "]\n");
|
||||
|
||||
size += sprintf(&buf[size], "L/ Commands in Q : [ 0x");
|
||||
|
||||
for (n = dd->slot_groups-1; n >= 0; n--) {
|
||||
if (sizeof(long) > sizeof(u32))
|
||||
group_allocated =
|
||||
dd->port->cmds_to_issue[n/2] >> (32*(n&1));
|
||||
else
|
||||
group_allocated = dd->port->cmds_to_issue[n];
|
||||
size += sprintf(&buf[size], "%08X ", group_allocated);
|
||||
}
|
||||
size += sprintf(&buf[size], "]\n");
|
||||
|
||||
*offset = size <= len ? size : len;
|
||||
size = copy_to_user(ubuf, buf, *offset);
|
||||
if (size)
|
||||
return -EFAULT;
|
||||
|
||||
return *offset;
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(registers, S_IRUGO, mtip_hw_show_registers, NULL);
|
||||
static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL);
|
||||
static DEVICE_ATTR(flags, S_IRUGO, mtip_hw_show_flags, NULL);
|
||||
static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf,
|
||||
size_t len, loff_t *offset)
|
||||
{
|
||||
struct driver_data *dd = (struct driver_data *)f->private_data;
|
||||
char buf[MTIP_DFS_MAX_BUF_SIZE];
|
||||
int size = *offset;
|
||||
|
||||
if (!len || size)
|
||||
return 0;
|
||||
|
||||
if (size < 0)
|
||||
return -EINVAL;
|
||||
|
||||
size += sprintf(&buf[size], "Flag-port : [ %08lX ]\n",
|
||||
dd->port->flags);
|
||||
size += sprintf(&buf[size], "Flag-dd : [ %08lX ]\n",
|
||||
dd->dd_flag);
|
||||
|
||||
*offset = size <= len ? size : len;
|
||||
size = copy_to_user(ubuf, buf, *offset);
|
||||
if (size)
|
||||
return -EFAULT;
|
||||
|
||||
return *offset;
|
||||
}
|
||||
|
||||
static const struct file_operations mtip_regs_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = simple_open,
|
||||
.read = mtip_hw_read_registers,
|
||||
.llseek = no_llseek,
|
||||
};
|
||||
|
||||
static const struct file_operations mtip_flags_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = simple_open,
|
||||
.read = mtip_hw_read_flags,
|
||||
.llseek = no_llseek,
|
||||
};
|
||||
|
||||
/*
|
||||
* Create the sysfs related attributes.
|
||||
|
@ -2671,15 +2705,9 @@ static int mtip_hw_sysfs_init(struct driver_data *dd, struct kobject *kobj)
|
|||
if (!kobj || !dd)
|
||||
return -EINVAL;
|
||||
|
||||
if (sysfs_create_file(kobj, &dev_attr_registers.attr))
|
||||
dev_warn(&dd->pdev->dev,
|
||||
"Error creating 'registers' sysfs entry\n");
|
||||
if (sysfs_create_file(kobj, &dev_attr_status.attr))
|
||||
dev_warn(&dd->pdev->dev,
|
||||
"Error creating 'status' sysfs entry\n");
|
||||
if (sysfs_create_file(kobj, &dev_attr_flags.attr))
|
||||
dev_warn(&dd->pdev->dev,
|
||||
"Error creating 'flags' sysfs entry\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2698,13 +2726,39 @@ static int mtip_hw_sysfs_exit(struct driver_data *dd, struct kobject *kobj)
|
|||
if (!kobj || !dd)
|
||||
return -EINVAL;
|
||||
|
||||
sysfs_remove_file(kobj, &dev_attr_registers.attr);
|
||||
sysfs_remove_file(kobj, &dev_attr_status.attr);
|
||||
sysfs_remove_file(kobj, &dev_attr_flags.attr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mtip_hw_debugfs_init(struct driver_data *dd)
|
||||
{
|
||||
if (!dfs_parent)
|
||||
return -1;
|
||||
|
||||
dd->dfs_node = debugfs_create_dir(dd->disk->disk_name, dfs_parent);
|
||||
if (IS_ERR_OR_NULL(dd->dfs_node)) {
|
||||
dev_warn(&dd->pdev->dev,
|
||||
"Error creating node %s under debugfs\n",
|
||||
dd->disk->disk_name);
|
||||
dd->dfs_node = NULL;
|
||||
return -1;
|
||||
}
|
||||
|
||||
debugfs_create_file("flags", S_IRUGO, dd->dfs_node, dd,
|
||||
&mtip_flags_fops);
|
||||
debugfs_create_file("registers", S_IRUGO, dd->dfs_node, dd,
|
||||
&mtip_regs_fops);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mtip_hw_debugfs_exit(struct driver_data *dd)
|
||||
{
|
||||
debugfs_remove_recursive(dd->dfs_node);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Perform any init/resume time hardware setup
|
||||
*
|
||||
|
@ -3730,6 +3784,7 @@ static int mtip_block_initialize(struct driver_data *dd)
|
|||
mtip_hw_sysfs_init(dd, kobj);
|
||||
kobject_put(kobj);
|
||||
}
|
||||
mtip_hw_debugfs_init(dd);
|
||||
|
||||
if (dd->mtip_svc_handler) {
|
||||
set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag);
|
||||
|
@ -3755,6 +3810,8 @@ static int mtip_block_initialize(struct driver_data *dd)
|
|||
return rv;
|
||||
|
||||
kthread_run_error:
|
||||
mtip_hw_debugfs_exit(dd);
|
||||
|
||||
/* Delete our gendisk. This also removes the device from /dev */
|
||||
del_gendisk(dd->disk);
|
||||
|
||||
|
@ -3805,6 +3862,7 @@ static int mtip_block_remove(struct driver_data *dd)
|
|||
kobject_put(kobj);
|
||||
}
|
||||
}
|
||||
mtip_hw_debugfs_exit(dd);
|
||||
|
||||
/*
|
||||
* Delete our gendisk structure. This also removes the device
|
||||
|
@ -4152,10 +4210,20 @@ static int __init mtip_init(void)
|
|||
}
|
||||
mtip_major = error;
|
||||
|
||||
if (!dfs_parent) {
|
||||
dfs_parent = debugfs_create_dir("rssd", NULL);
|
||||
if (IS_ERR_OR_NULL(dfs_parent)) {
|
||||
printk(KERN_WARNING "Error creating debugfs parent\n");
|
||||
dfs_parent = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/* Register our PCI operations. */
|
||||
error = pci_register_driver(&mtip_pci_driver);
|
||||
if (error)
|
||||
if (error) {
|
||||
debugfs_remove(dfs_parent);
|
||||
unregister_blkdev(mtip_major, MTIP_DRV_NAME);
|
||||
}
|
||||
|
||||
return error;
|
||||
}
|
||||
|
@ -4172,6 +4240,8 @@ static int __init mtip_init(void)
|
|||
*/
|
||||
static void __exit mtip_exit(void)
|
||||
{
|
||||
debugfs_remove_recursive(dfs_parent);
|
||||
|
||||
/* Release the allocated major block device number. */
|
||||
unregister_blkdev(mtip_major, MTIP_DRV_NAME);
|
||||
|
||||
|
|
|
@ -26,7 +26,6 @@
|
|||
#include <linux/ata.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/genhd.h>
|
||||
#include <linux/version.h>
|
||||
|
||||
/* Offset of Subsystem Device ID in pci confoguration space */
|
||||
#define PCI_SUBSYSTEM_DEVICEID 0x2E
|
||||
|
@ -111,6 +110,8 @@
|
|||
#define dbg_printk(format, arg...)
|
||||
#endif
|
||||
|
||||
#define MTIP_DFS_MAX_BUF_SIZE 1024
|
||||
|
||||
#define __force_bit2int (unsigned int __force)
|
||||
|
||||
enum {
|
||||
|
@ -447,6 +448,8 @@ struct driver_data {
|
|||
unsigned long dd_flag; /* NOTE: use atomic bit operations on this */
|
||||
|
||||
struct task_struct *mtip_svc_handler; /* task_struct of svc thd */
|
||||
|
||||
struct dentry *dfs_node;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue