Merge branch 'akpm' (patches from Andrew)
Merge yet more updates from Andrew Morton: "This is the post-linux-next queue. Material which was based on or dependent upon material which was in -next. 69 patches. Subsystems affected by this patch series: mm (migration and zsmalloc), sysctl, proc, and lib" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (69 commits) mm: hide the FRONTSWAP Kconfig symbol frontswap: remove support for multiple ops mm: mark swap_lock and swap_active_head static frontswap: simplify frontswap_register_ops frontswap: remove frontswap_test mm: simplify try_to_unuse frontswap: remove the frontswap exports frontswap: simplify frontswap_init frontswap: remove frontswap_curr_pages frontswap: remove frontswap_shrink frontswap: remove frontswap_tmem_exclusive_gets frontswap: remove frontswap_writethrough mm: remove cleancache lib/stackdepot: always do filter_irq_stacks() in stack_depot_save() lib/stackdepot: allow optional init and stack_table allocation by kvmalloc() proc: remove PDE_DATA() completely fs: proc: store PDE()->data into inode->i_private zsmalloc: replace get_cpu_var with local_lock zsmalloc: replace per zpage lock with pool->migrate_lock locking/rwlocks: introduce write_lock_nested ...
This commit is contained in:
commit
1c52283265
|
@ -1,296 +0,0 @@
|
|||
.. _cleancache:
|
||||
|
||||
==========
|
||||
Cleancache
|
||||
==========
|
||||
|
||||
Motivation
|
||||
==========
|
||||
|
||||
Cleancache is a new optional feature provided by the VFS layer that
|
||||
potentially dramatically increases page cache effectiveness for
|
||||
many workloads in many environments at a negligible cost.
|
||||
|
||||
Cleancache can be thought of as a page-granularity victim cache for clean
|
||||
pages that the kernel's pageframe replacement algorithm (PFRA) would like
|
||||
to keep around, but can't since there isn't enough memory. So when the
|
||||
PFRA "evicts" a page, it first attempts to use cleancache code to
|
||||
put the data contained in that page into "transcendent memory", memory
|
||||
that is not directly accessible or addressable by the kernel and is
|
||||
of unknown and possibly time-varying size.
|
||||
|
||||
Later, when a cleancache-enabled filesystem wishes to access a page
|
||||
in a file on disk, it first checks cleancache to see if it already
|
||||
contains it; if it does, the page of data is copied into the kernel
|
||||
and a disk access is avoided.
|
||||
|
||||
Transcendent memory "drivers" for cleancache are currently implemented
|
||||
in Xen (using hypervisor memory) and zcache (using in-kernel compressed
|
||||
memory) and other implementations are in development.
|
||||
|
||||
:ref:`FAQs <faq>` are included below.
|
||||
|
||||
Implementation Overview
|
||||
=======================
|
||||
|
||||
A cleancache "backend" that provides transcendent memory registers itself
|
||||
to the kernel's cleancache "frontend" by calling cleancache_register_ops,
|
||||
passing a pointer to a cleancache_ops structure with funcs set appropriately.
|
||||
The functions provided must conform to certain semantics as follows:
|
||||
|
||||
Most important, cleancache is "ephemeral". Pages which are copied into
|
||||
cleancache have an indefinite lifetime which is completely unknowable
|
||||
by the kernel and so may or may not still be in cleancache at any later time.
|
||||
Thus, as its name implies, cleancache is not suitable for dirty pages.
|
||||
Cleancache has complete discretion over what pages to preserve and what
|
||||
pages to discard and when.
|
||||
|
||||
Mounting a cleancache-enabled filesystem should call "init_fs" to obtain a
|
||||
pool id which, if positive, must be saved in the filesystem's superblock;
|
||||
a negative return value indicates failure. A "put_page" will copy a
|
||||
(presumably about-to-be-evicted) page into cleancache and associate it with
|
||||
the pool id, a file key, and a page index into the file. (The combination
|
||||
of a pool id, a file key, and an index is sometimes called a "handle".)
|
||||
A "get_page" will copy the page, if found, from cleancache into kernel memory.
|
||||
An "invalidate_page" will ensure the page no longer is present in cleancache;
|
||||
an "invalidate_inode" will invalidate all pages associated with the specified
|
||||
file; and, when a filesystem is unmounted, an "invalidate_fs" will invalidate
|
||||
all pages in all files specified by the given pool id and also surrender
|
||||
the pool id.
|
||||
|
||||
An "init_shared_fs", like init_fs, obtains a pool id but tells cleancache
|
||||
to treat the pool as shared using a 128-bit UUID as a key. On systems
|
||||
that may run multiple kernels (such as hard partitioned or virtualized
|
||||
systems) that may share a clustered filesystem, and where cleancache
|
||||
may be shared among those kernels, calls to init_shared_fs that specify the
|
||||
same UUID will receive the same pool id, thus allowing the pages to
|
||||
be shared. Note that any security requirements must be imposed outside
|
||||
of the kernel (e.g. by "tools" that control cleancache). Or a
|
||||
cleancache implementation can simply disable shared_init by always
|
||||
returning a negative value.
|
||||
|
||||
If a get_page is successful on a non-shared pool, the page is invalidated
|
||||
(thus making cleancache an "exclusive" cache). On a shared pool, the page
|
||||
is NOT invalidated on a successful get_page so that it remains accessible to
|
||||
other sharers. The kernel is responsible for ensuring coherency between
|
||||
cleancache (shared or not), the page cache, and the filesystem, using
|
||||
cleancache invalidate operations as required.
|
||||
|
||||
Note that cleancache must enforce put-put-get coherency and get-get
|
||||
coherency. For the former, if two puts are made to the same handle but
|
||||
with different data, say AAA by the first put and BBB by the second, a
|
||||
subsequent get can never return the stale data (AAA). For get-get coherency,
|
||||
if a get for a given handle fails, subsequent gets for that handle will
|
||||
never succeed unless preceded by a successful put with that handle.
|
||||
|
||||
Last, cleancache provides no SMP serialization guarantees; if two
|
||||
different Linux threads are simultaneously putting and invalidating a page
|
||||
with the same handle, the results are indeterminate. Callers must
|
||||
lock the page to ensure serial behavior.
|
||||
|
||||
Cleancache Performance Metrics
|
||||
==============================
|
||||
|
||||
If properly configured, monitoring of cleancache is done via debugfs in
|
||||
the `/sys/kernel/debug/cleancache` directory. The effectiveness of cleancache
|
||||
can be measured (across all filesystems) with:
|
||||
|
||||
``succ_gets``
|
||||
number of gets that were successful
|
||||
|
||||
``failed_gets``
|
||||
number of gets that failed
|
||||
|
||||
``puts``
|
||||
number of puts attempted (all "succeed")
|
||||
|
||||
``invalidates``
|
||||
number of invalidates attempted
|
||||
|
||||
A backend implementation may provide additional metrics.
|
||||
|
||||
.. _faq:
|
||||
|
||||
FAQ
|
||||
===
|
||||
|
||||
* Where's the value? (Andrew Morton)
|
||||
|
||||
Cleancache provides a significant performance benefit to many workloads
|
||||
in many environments with negligible overhead by improving the
|
||||
effectiveness of the pagecache. Clean pagecache pages are
|
||||
saved in transcendent memory (RAM that is otherwise not directly
|
||||
addressable to the kernel); fetching those pages later avoids "refaults"
|
||||
and thus disk reads.
|
||||
|
||||
Cleancache (and its sister code "frontswap") provide interfaces for
|
||||
this transcendent memory (aka "tmem"), which conceptually lies between
|
||||
fast kernel-directly-addressable RAM and slower DMA/asynchronous devices.
|
||||
Disallowing direct kernel or userland reads/writes to tmem
|
||||
is ideal when data is transformed to a different form and size (such
|
||||
as with compression) or secretly moved (as might be useful for write-
|
||||
balancing for some RAM-like devices). Evicted page-cache pages (and
|
||||
swap pages) are a great use for this kind of slower-than-RAM-but-much-
|
||||
faster-than-disk transcendent memory, and the cleancache (and frontswap)
|
||||
"page-object-oriented" specification provides a nice way to read and
|
||||
write -- and indirectly "name" -- the pages.
|
||||
|
||||
In the virtual case, the whole point of virtualization is to statistically
|
||||
multiplex physical resources across the varying demands of multiple
|
||||
virtual machines. This is really hard to do with RAM and efforts to
|
||||
do it well with no kernel change have essentially failed (except in some
|
||||
well-publicized special-case workloads). Cleancache -- and frontswap --
|
||||
with a fairly small impact on the kernel, provide a huge amount
|
||||
of flexibility for more dynamic, flexible RAM multiplexing.
|
||||
Specifically, the Xen Transcendent Memory backend allows otherwise
|
||||
"fallow" hypervisor-owned RAM to not only be "time-shared" between multiple
|
||||
virtual machines, but the pages can be compressed and deduplicated to
|
||||
optimize RAM utilization. And when guest OS's are induced to surrender
|
||||
underutilized RAM (e.g. with "self-ballooning"), page cache pages
|
||||
are the first to go, and cleancache allows those pages to be
|
||||
saved and reclaimed if overall host system memory conditions allow.
|
||||
|
||||
And the identical interface used for cleancache can be used in
|
||||
physical systems as well. The zcache driver acts as a memory-hungry
|
||||
device that stores pages of data in a compressed state. And
|
||||
the proposed "RAMster" driver shares RAM across multiple physical
|
||||
systems.
|
||||
|
||||
* Why does cleancache have its sticky fingers so deep inside the
|
||||
filesystems and VFS? (Andrew Morton and Christoph Hellwig)
|
||||
|
||||
The core hooks for cleancache in VFS are in most cases a single line
|
||||
and the minimum set are placed precisely where needed to maintain
|
||||
coherency (via cleancache_invalidate operations) between cleancache,
|
||||
the page cache, and disk. All hooks compile into nothingness if
|
||||
cleancache is config'ed off and turn into a function-pointer-
|
||||
compare-to-NULL if config'ed on but no backend claims the ops
|
||||
functions, or to a compare-struct-element-to-negative if a
|
||||
backend claims the ops functions but a filesystem doesn't enable
|
||||
cleancache.
|
||||
|
||||
Some filesystems are built entirely on top of VFS and the hooks
|
||||
in VFS are sufficient, so don't require an "init_fs" hook; the
|
||||
initial implementation of cleancache didn't provide this hook.
|
||||
But for some filesystems (such as btrfs), the VFS hooks are
|
||||
incomplete and one or more hooks in fs-specific code are required.
|
||||
And for some other filesystems, such as tmpfs, cleancache may
|
||||
be counterproductive. So it seemed prudent to require a filesystem
|
||||
to "opt in" to use cleancache, which requires adding a hook in
|
||||
each filesystem. Not all filesystems are supported by cleancache
|
||||
only because they haven't been tested. The existing set should
|
||||
be sufficient to validate the concept, the opt-in approach means
|
||||
that untested filesystems are not affected, and the hooks in the
|
||||
existing filesystems should make it very easy to add more
|
||||
filesystems in the future.
|
||||
|
||||
The total impact of the hooks to existing fs and mm files is only
|
||||
about 40 lines added (not counting comments and blank lines).
|
||||
|
||||
* Why not make cleancache asynchronous and batched so it can more
|
||||
easily interface with real devices with DMA instead of copying each
|
||||
individual page? (Minchan Kim)
|
||||
|
||||
The one-page-at-a-time copy semantics simplifies the implementation
|
||||
on both the frontend and backend and also allows the backend to
|
||||
do fancy things on-the-fly like page compression and
|
||||
page deduplication. And since the data is "gone" (copied into/out
|
||||
of the pageframe) before the cleancache get/put call returns,
|
||||
a great deal of race conditions and potential coherency issues
|
||||
are avoided. While the interface seems odd for a "real device"
|
||||
or for real kernel-addressable RAM, it makes perfect sense for
|
||||
transcendent memory.
|
||||
|
||||
* Why is non-shared cleancache "exclusive"? And where is the
|
||||
page "invalidated" after a "get"? (Minchan Kim)
|
||||
|
||||
The main reason is to free up space in transcendent memory and
|
||||
to avoid unnecessary cleancache_invalidate calls. If you want inclusive,
|
||||
the page can be "put" immediately following the "get". If
|
||||
put-after-get for inclusive becomes common, the interface could
|
||||
be easily extended to add a "get_no_invalidate" call.
|
||||
|
||||
The invalidate is done by the cleancache backend implementation.
|
||||
|
||||
* What's the performance impact?
|
||||
|
||||
Performance analysis has been presented at OLS'09 and LCA'10.
|
||||
Briefly, performance gains can be significant on most workloads,
|
||||
especially when memory pressure is high (e.g. when RAM is
|
||||
overcommitted in a virtual workload); and because the hooks are
|
||||
invoked primarily in place of or in addition to a disk read/write,
|
||||
overhead is negligible even in worst case workloads. Basically
|
||||
cleancache replaces I/O with memory-copy-CPU-overhead; on older
|
||||
single-core systems with slow memory-copy speeds, cleancache
|
||||
has little value, but in newer multicore machines, especially
|
||||
consolidated/virtualized machines, it has great value.
|
||||
|
||||
* How do I add cleancache support for filesystem X? (Boaz Harrash)
|
||||
|
||||
Filesystems that are well-behaved and conform to certain
|
||||
restrictions can utilize cleancache simply by making a call to
|
||||
cleancache_init_fs at mount time. Unusual, misbehaving, or
|
||||
poorly layered filesystems must either add additional hooks
|
||||
and/or undergo extensive additional testing... or should just
|
||||
not enable the optional cleancache.
|
||||
|
||||
Some points for a filesystem to consider:
|
||||
|
||||
- The FS should be block-device-based (e.g. a ram-based FS such
|
||||
as tmpfs should not enable cleancache)
|
||||
- To ensure coherency/correctness, the FS must ensure that all
|
||||
file removal or truncation operations either go through VFS or
|
||||
add hooks to do the equivalent cleancache "invalidate" operations
|
||||
- To ensure coherency/correctness, either inode numbers must
|
||||
be unique across the lifetime of the on-disk file OR the
|
||||
FS must provide an "encode_fh" function.
|
||||
- The FS must call the VFS superblock alloc and deactivate routines
|
||||
or add hooks to do the equivalent cleancache calls done there.
|
||||
- To maximize performance, all pages fetched from the FS should
|
||||
go through the do_mpag_readpage routine or the FS should add
|
||||
hooks to do the equivalent (cf. btrfs)
|
||||
- Currently, the FS blocksize must be the same as PAGESIZE. This
|
||||
is not an architectural restriction, but no backends currently
|
||||
support anything different.
|
||||
- A clustered FS should invoke the "shared_init_fs" cleancache
|
||||
hook to get best performance for some backends.
|
||||
|
||||
* Why not use the KVA of the inode as the key? (Christoph Hellwig)
|
||||
|
||||
If cleancache would use the inode virtual address instead of
|
||||
inode/filehandle, the pool id could be eliminated. But, this
|
||||
won't work because cleancache retains pagecache data pages
|
||||
persistently even when the inode has been pruned from the
|
||||
inode unused list, and only invalidates the data page if the file
|
||||
gets removed/truncated. So if cleancache used the inode kva,
|
||||
there would be potential coherency issues if/when the inode
|
||||
kva is reused for a different file. Alternately, if cleancache
|
||||
invalidated the pages when the inode kva was freed, much of the value
|
||||
of cleancache would be lost because the cache of pages in cleanache
|
||||
is potentially much larger than the kernel pagecache and is most
|
||||
useful if the pages survive inode cache removal.
|
||||
|
||||
* Why is a global variable required?
|
||||
|
||||
The cleancache_enabled flag is checked in all of the frequently-used
|
||||
cleancache hooks. The alternative is a function call to check a static
|
||||
variable. Since cleancache is enabled dynamically at runtime, systems
|
||||
that don't enable cleancache would suffer thousands (possibly
|
||||
tens-of-thousands) of unnecessary function calls per second. So the
|
||||
global variable allows cleancache to be enabled by default at compile
|
||||
time, but have insignificant performance impact when cleancache remains
|
||||
disabled at runtime.
|
||||
|
||||
* Does cleanache work with KVM?
|
||||
|
||||
The memory model of KVM is sufficiently different that a cleancache
|
||||
backend may have less value for KVM. This remains to be tested,
|
||||
especially in an overcommitted system.
|
||||
|
||||
* Does cleancache work in userspace? It sounds useful for
|
||||
memory hungry caches like web browsers. (Jamie Lokier)
|
||||
|
||||
No plans yet, though we agree it sounds useful, at least for
|
||||
apps that bypass the page cache (e.g. O_DIRECT).
|
||||
|
||||
Last updated: Dan Magenheimer, April 13 2011
|
|
@ -8,12 +8,6 @@ Frontswap provides a "transcendent memory" interface for swap pages.
|
|||
In some environments, dramatic performance savings may be obtained because
|
||||
swapped pages are saved in RAM (or a RAM-like device) instead of a swap disk.
|
||||
|
||||
(Note, frontswap -- and :ref:`cleancache` (merged at 3.0) -- are the "frontends"
|
||||
and the only necessary changes to the core kernel for transcendent memory;
|
||||
all other supporting code -- the "backends" -- is implemented as drivers.
|
||||
See the LWN.net article `Transcendent memory in a nutshell`_
|
||||
for a detailed overview of frontswap and related kernel parts)
|
||||
|
||||
.. _Transcendent memory in a nutshell: https://lwn.net/Articles/454795/
|
||||
|
||||
Frontswap is so named because it can be thought of as the opposite of
|
||||
|
@ -45,12 +39,6 @@ a disk write and, if the data is later read back, a disk read are avoided.
|
|||
If a store returns failure, transcendent memory has rejected the data, and the
|
||||
page can be written to swap as usual.
|
||||
|
||||
If a backend chooses, frontswap can be configured as a "writethrough
|
||||
cache" by calling frontswap_writethrough(). In this mode, the reduction
|
||||
in swap device writes is lost (and also a non-trivial performance advantage)
|
||||
in order to allow the backend to arbitrarily "reclaim" space used to
|
||||
store frontswap pages to more completely manage its memory usage.
|
||||
|
||||
Note that if a page is stored and the page already exists in transcendent memory
|
||||
(a "duplicate" store), either the store succeeds and the data is overwritten,
|
||||
or the store fails AND the page is invalidated. This ensures stale data may
|
||||
|
@ -87,11 +75,9 @@ This interface is ideal when data is transformed to a different form
|
|||
and size (such as with compression) or secretly moved (as might be
|
||||
useful for write-balancing for some RAM-like devices). Swap pages (and
|
||||
evicted page-cache pages) are a great use for this kind of slower-than-RAM-
|
||||
but-much-faster-than-disk "pseudo-RAM device" and the frontswap (and
|
||||
cleancache) interface to transcendent memory provides a nice way to read
|
||||
and write -- and indirectly "name" -- the pages.
|
||||
but-much-faster-than-disk "pseudo-RAM device".
|
||||
|
||||
Frontswap -- and cleancache -- with a fairly small impact on the kernel,
|
||||
Frontswap with a fairly small impact on the kernel,
|
||||
provides a huge amount of flexibility for more dynamic, flexible RAM
|
||||
utilization in various system configurations:
|
||||
|
||||
|
@ -269,19 +255,6 @@ the old data and ensure that it is no longer accessible. Since the
|
|||
swap subsystem then writes the new data to the read swap device,
|
||||
this is the correct course of action to ensure coherency.
|
||||
|
||||
* What is frontswap_shrink for?
|
||||
|
||||
When the (non-frontswap) swap subsystem swaps out a page to a real
|
||||
swap device, that page is only taking up low-value pre-allocated disk
|
||||
space. But if frontswap has placed a page in transcendent memory, that
|
||||
page may be taking up valuable real estate. The frontswap_shrink
|
||||
routine allows code outside of the swap subsystem to force pages out
|
||||
of the memory managed by frontswap and back into kernel-addressable memory.
|
||||
For example, in RAMster, a "suction driver" thread will attempt
|
||||
to "repatriate" pages sent to a remote machine back to the local machine;
|
||||
this is driven using the frontswap_shrink mechanism when memory pressure
|
||||
subsides.
|
||||
|
||||
* Why does the frontswap patch create the new include file swapfile.h?
|
||||
|
||||
The frontswap code depends on some swap-subsystem-internal data
|
||||
|
|
|
@ -15,7 +15,6 @@ algorithms. If you are looking for advice on simply allocating memory, see the
|
|||
active_mm
|
||||
arch_pgtable_helpers
|
||||
balance
|
||||
cleancache
|
||||
damon/index
|
||||
free_page_reporting
|
||||
frontswap
|
||||
|
|
|
@ -4705,13 +4705,6 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/cla
|
|||
F: include/linux/cfi.h
|
||||
F: kernel/cfi.c
|
||||
|
||||
CLEANCACHE API
|
||||
M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
S: Maintained
|
||||
F: include/linux/cleancache.h
|
||||
F: mm/cleancache.c
|
||||
|
||||
CLK API
|
||||
M: Russell King <linux@armlinux.org.uk>
|
||||
L: linux-clk@vger.kernel.org
|
||||
|
|
|
@ -83,14 +83,14 @@ static int srm_env_proc_show(struct seq_file *m, void *v)
|
|||
|
||||
static int srm_env_proc_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, srm_env_proc_show, PDE_DATA(inode));
|
||||
return single_open(file, srm_env_proc_show, pde_data(inode));
|
||||
}
|
||||
|
||||
static ssize_t srm_env_proc_write(struct file *file, const char __user *buffer,
|
||||
size_t count, loff_t *pos)
|
||||
{
|
||||
int res;
|
||||
unsigned long id = (unsigned long)PDE_DATA(file_inode(file));
|
||||
unsigned long id = (unsigned long)pde_data(file_inode(file));
|
||||
char *buf = (char *) __get_free_page(GFP_USER);
|
||||
unsigned long ret1, ret2;
|
||||
|
||||
|
|
|
@ -31,7 +31,6 @@ CONFIG_ARCH_BCM2835=y
|
|||
CONFIG_PREEMPT_VOLUNTARY=y
|
||||
CONFIG_AEABI=y
|
||||
CONFIG_KSM=y
|
||||
CONFIG_CLEANCACHE=y
|
||||
CONFIG_CMA=y
|
||||
CONFIG_SECCOMP=y
|
||||
CONFIG_KEXEC=y
|
||||
|
|
|
@ -27,7 +27,6 @@ CONFIG_PCIE_QCOM=y
|
|||
CONFIG_SMP=y
|
||||
CONFIG_PREEMPT=y
|
||||
CONFIG_HIGHMEM=y
|
||||
CONFIG_CLEANCACHE=y
|
||||
CONFIG_ARM_APPENDED_DTB=y
|
||||
CONFIG_ARM_ATAG_DTB_COMPAT=y
|
||||
CONFIG_CPU_IDLE=y
|
||||
|
|
|
@ -13,7 +13,7 @@ struct buffer {
|
|||
static ssize_t atags_read(struct file *file, char __user *buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct buffer *b = PDE_DATA(file_inode(file));
|
||||
struct buffer *b = pde_data(file_inode(file));
|
||||
return simple_read_from_buffer(buf, count, ppos, b->data, b->size);
|
||||
}
|
||||
|
||||
|
|
|
@ -1005,7 +1005,7 @@ static int __init noalign_setup(char *__unused)
|
|||
__setup("noalign", noalign_setup);
|
||||
|
||||
/*
|
||||
* This needs to be done after sysctl_init, otherwise sys/ will be
|
||||
* This needs to be done after sysctl_init_bases(), otherwise sys/ will be
|
||||
* overwritten. Actually, this shouldn't be in sys/ at all since
|
||||
* it isn't a sysctl, and it doesn't contain sysctl information.
|
||||
* We now locate it in /proc/cpu/alignment instead.
|
||||
|
|
|
@ -282,7 +282,7 @@ salinfo_event_open(struct inode *inode, struct file *file)
|
|||
static ssize_t
|
||||
salinfo_event_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
|
||||
{
|
||||
struct salinfo_data *data = PDE_DATA(file_inode(file));
|
||||
struct salinfo_data *data = pde_data(file_inode(file));
|
||||
char cmd[32];
|
||||
size_t size;
|
||||
int i, n, cpu = -1;
|
||||
|
@ -340,7 +340,7 @@ static const struct proc_ops salinfo_event_proc_ops = {
|
|||
static int
|
||||
salinfo_log_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct salinfo_data *data = PDE_DATA(inode);
|
||||
struct salinfo_data *data = pde_data(inode);
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
@ -365,7 +365,7 @@ salinfo_log_open(struct inode *inode, struct file *file)
|
|||
static int
|
||||
salinfo_log_release(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct salinfo_data *data = PDE_DATA(inode);
|
||||
struct salinfo_data *data = pde_data(inode);
|
||||
|
||||
if (data->state == STATE_NO_DATA) {
|
||||
vfree(data->log_buffer);
|
||||
|
@ -433,7 +433,7 @@ salinfo_log_new_read(int cpu, struct salinfo_data *data)
|
|||
static ssize_t
|
||||
salinfo_log_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
|
||||
{
|
||||
struct salinfo_data *data = PDE_DATA(file_inode(file));
|
||||
struct salinfo_data *data = pde_data(file_inode(file));
|
||||
u8 *buf;
|
||||
u64 bufsize;
|
||||
|
||||
|
@ -494,7 +494,7 @@ salinfo_log_clear(struct salinfo_data *data, int cpu)
|
|||
static ssize_t
|
||||
salinfo_log_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
|
||||
{
|
||||
struct salinfo_data *data = PDE_DATA(file_inode(file));
|
||||
struct salinfo_data *data = pde_data(file_inode(file));
|
||||
char cmd[32];
|
||||
size_t size;
|
||||
u32 offset;
|
||||
|
|
|
@ -45,7 +45,6 @@ CONFIG_IOSCHED_BFQ=m
|
|||
CONFIG_BINFMT_AOUT=m
|
||||
CONFIG_BINFMT_MISC=m
|
||||
# CONFIG_COMPACTION is not set
|
||||
CONFIG_CLEANCACHE=y
|
||||
CONFIG_ZPOOL=m
|
||||
CONFIG_NET=y
|
||||
CONFIG_PACKET=y
|
||||
|
|
|
@ -41,7 +41,6 @@ CONFIG_IOSCHED_BFQ=m
|
|||
CONFIG_BINFMT_AOUT=m
|
||||
CONFIG_BINFMT_MISC=m
|
||||
# CONFIG_COMPACTION is not set
|
||||
CONFIG_CLEANCACHE=y
|
||||
CONFIG_ZPOOL=m
|
||||
CONFIG_NET=y
|
||||
CONFIG_PACKET=y
|
||||
|
|
|
@ -48,7 +48,6 @@ CONFIG_IOSCHED_BFQ=m
|
|||
CONFIG_BINFMT_AOUT=m
|
||||
CONFIG_BINFMT_MISC=m
|
||||
# CONFIG_COMPACTION is not set
|
||||
CONFIG_CLEANCACHE=y
|
||||
CONFIG_ZPOOL=m
|
||||
CONFIG_NET=y
|
||||
CONFIG_PACKET=y
|
||||
|
|
|
@ -38,7 +38,6 @@ CONFIG_IOSCHED_BFQ=m
|
|||
CONFIG_BINFMT_AOUT=m
|
||||
CONFIG_BINFMT_MISC=m
|
||||
# CONFIG_COMPACTION is not set
|
||||
CONFIG_CLEANCACHE=y
|
||||
CONFIG_ZPOOL=m
|
||||
CONFIG_NET=y
|
||||
CONFIG_PACKET=y
|
||||
|
|
|
@ -40,7 +40,6 @@ CONFIG_IOSCHED_BFQ=m
|
|||
CONFIG_BINFMT_AOUT=m
|
||||
CONFIG_BINFMT_MISC=m
|
||||
# CONFIG_COMPACTION is not set
|
||||
CONFIG_CLEANCACHE=y
|
||||
CONFIG_ZPOOL=m
|
||||
CONFIG_NET=y
|
||||
CONFIG_PACKET=y
|
||||
|
|
|
@ -39,7 +39,6 @@ CONFIG_IOSCHED_BFQ=m
|
|||
CONFIG_BINFMT_AOUT=m
|
||||
CONFIG_BINFMT_MISC=m
|
||||
# CONFIG_COMPACTION is not set
|
||||
CONFIG_CLEANCACHE=y
|
||||
CONFIG_ZPOOL=m
|
||||
CONFIG_NET=y
|
||||
CONFIG_PACKET=y
|
||||
|
|
|
@ -59,7 +59,6 @@ CONFIG_IOSCHED_BFQ=m
|
|||
CONFIG_BINFMT_AOUT=m
|
||||
CONFIG_BINFMT_MISC=m
|
||||
# CONFIG_COMPACTION is not set
|
||||
CONFIG_CLEANCACHE=y
|
||||
CONFIG_ZPOOL=m
|
||||
CONFIG_NET=y
|
||||
CONFIG_PACKET=y
|
||||
|
|
|
@ -37,7 +37,6 @@ CONFIG_IOSCHED_BFQ=m
|
|||
CONFIG_BINFMT_AOUT=m
|
||||
CONFIG_BINFMT_MISC=m
|
||||
# CONFIG_COMPACTION is not set
|
||||
CONFIG_CLEANCACHE=y
|
||||
CONFIG_ZPOOL=m
|
||||
CONFIG_NET=y
|
||||
CONFIG_PACKET=y
|
||||
|
|
|
@ -38,7 +38,6 @@ CONFIG_IOSCHED_BFQ=m
|
|||
CONFIG_BINFMT_AOUT=m
|
||||
CONFIG_BINFMT_MISC=m
|
||||
# CONFIG_COMPACTION is not set
|
||||
CONFIG_CLEANCACHE=y
|
||||
CONFIG_ZPOOL=m
|
||||
CONFIG_NET=y
|
||||
CONFIG_PACKET=y
|
||||
|
|
|
@ -39,7 +39,6 @@ CONFIG_IOSCHED_BFQ=m
|
|||
CONFIG_BINFMT_AOUT=m
|
||||
CONFIG_BINFMT_MISC=m
|
||||
# CONFIG_COMPACTION is not set
|
||||
CONFIG_CLEANCACHE=y
|
||||
CONFIG_ZPOOL=m
|
||||
CONFIG_NET=y
|
||||
CONFIG_PACKET=y
|
||||
|
|
|
@ -35,7 +35,6 @@ CONFIG_IOSCHED_BFQ=m
|
|||
CONFIG_BINFMT_AOUT=m
|
||||
CONFIG_BINFMT_MISC=m
|
||||
# CONFIG_COMPACTION is not set
|
||||
CONFIG_CLEANCACHE=y
|
||||
CONFIG_ZPOOL=m
|
||||
CONFIG_NET=y
|
||||
CONFIG_PACKET=y
|
||||
|
|
|
@ -35,7 +35,6 @@ CONFIG_IOSCHED_BFQ=m
|
|||
CONFIG_BINFMT_AOUT=m
|
||||
CONFIG_BINFMT_MISC=m
|
||||
# CONFIG_COMPACTION is not set
|
||||
CONFIG_CLEANCACHE=y
|
||||
CONFIG_ZPOOL=m
|
||||
CONFIG_NET=y
|
||||
CONFIG_PACKET=y
|
||||
|
|
|
@ -25,7 +25,7 @@ static ssize_t page_map_read( struct file *file, char __user *buf, size_t nbytes
|
|||
loff_t *ppos)
|
||||
{
|
||||
return simple_read_from_buffer(buf, nbytes, ppos,
|
||||
PDE_DATA(file_inode(file)), PAGE_SIZE);
|
||||
pde_data(file_inode(file)), PAGE_SIZE);
|
||||
}
|
||||
|
||||
static int page_map_mmap( struct file *file, struct vm_area_struct *vma )
|
||||
|
@ -34,7 +34,7 @@ static int page_map_mmap( struct file *file, struct vm_area_struct *vma )
|
|||
return -EINVAL;
|
||||
|
||||
remap_pfn_range(vma, vma->vm_start,
|
||||
__pa(PDE_DATA(file_inode(file))) >> PAGE_SHIFT,
|
||||
__pa(pde_data(file_inode(file))) >> PAGE_SHIFT,
|
||||
PAGE_SIZE, vma->vm_page_prot);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -96,7 +96,6 @@ CONFIG_MEMORY_HOTPLUG=y
|
|||
CONFIG_MEMORY_HOTREMOVE=y
|
||||
CONFIG_KSM=y
|
||||
CONFIG_TRANSPARENT_HUGEPAGE=y
|
||||
CONFIG_CLEANCACHE=y
|
||||
CONFIG_FRONTSWAP=y
|
||||
CONFIG_CMA_DEBUG=y
|
||||
CONFIG_CMA_DEBUGFS=y
|
||||
|
|
|
@ -91,7 +91,6 @@ CONFIG_MEMORY_HOTPLUG=y
|
|||
CONFIG_MEMORY_HOTREMOVE=y
|
||||
CONFIG_KSM=y
|
||||
CONFIG_TRANSPARENT_HUGEPAGE=y
|
||||
CONFIG_CLEANCACHE=y
|
||||
CONFIG_FRONTSWAP=y
|
||||
CONFIG_CMA_SYSFS=y
|
||||
CONFIG_CMA_AREAS=7
|
||||
|
|
|
@ -140,7 +140,7 @@ static int alignment_proc_open(struct inode *inode, struct file *file)
|
|||
static ssize_t alignment_proc_write(struct file *file,
|
||||
const char __user *buffer, size_t count, loff_t *pos)
|
||||
{
|
||||
int *data = PDE_DATA(file_inode(file));
|
||||
int *data = pde_data(file_inode(file));
|
||||
char mode;
|
||||
|
||||
if (count > 0) {
|
||||
|
@ -161,7 +161,7 @@ static const struct proc_ops alignment_proc_ops = {
|
|||
};
|
||||
|
||||
/*
|
||||
* This needs to be done after sysctl_init, otherwise sys/ will be
|
||||
* This needs to be done after sysctl_init_bases(), otherwise sys/ will be
|
||||
* overwritten. Actually, this shouldn't be in sys/ at all since
|
||||
* it isn't a sysctl, and it doesn't contain sysctl information.
|
||||
* We now locate it in /proc/cpu/alignment instead.
|
||||
|
|
|
@ -208,7 +208,7 @@ static int simdisk_detach(struct simdisk *dev)
|
|||
static ssize_t proc_read_simdisk(struct file *file, char __user *buf,
|
||||
size_t size, loff_t *ppos)
|
||||
{
|
||||
struct simdisk *dev = PDE_DATA(file_inode(file));
|
||||
struct simdisk *dev = pde_data(file_inode(file));
|
||||
const char *s = dev->filename;
|
||||
if (s) {
|
||||
ssize_t n = simple_read_from_buffer(buf, size, ppos,
|
||||
|
@ -225,7 +225,7 @@ static ssize_t proc_write_simdisk(struct file *file, const char __user *buf,
|
|||
size_t count, loff_t *ppos)
|
||||
{
|
||||
char *tmp = memdup_user_nul(buf, count);
|
||||
struct simdisk *dev = PDE_DATA(file_inode(file));
|
||||
struct simdisk *dev = pde_data(file_inode(file));
|
||||
int err;
|
||||
|
||||
if (IS_ERR(tmp))
|
||||
|
|
|
@ -24,7 +24,6 @@
|
|||
#include <linux/pseudo_fs.h>
|
||||
#include <linux/uio.h>
|
||||
#include <linux/namei.h>
|
||||
#include <linux/cleancache.h>
|
||||
#include <linux/part_stat.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include "../fs/internal.h"
|
||||
|
@ -88,10 +87,6 @@ void invalidate_bdev(struct block_device *bdev)
|
|||
lru_add_drain_all(); /* make sure all lru add caches are flushed */
|
||||
invalidate_mapping_pages(mapping, 0, -1);
|
||||
}
|
||||
/* 99% of the time, we don't need to flush the cleancache on the bdev.
|
||||
* But, for the strange corners, lets be cautious
|
||||
*/
|
||||
cleancache_invalidate_inode(mapping);
|
||||
}
|
||||
EXPORT_SYMBOL(invalidate_bdev);
|
||||
|
||||
|
|
|
@ -127,7 +127,7 @@ static int
|
|||
acpi_system_wakeup_device_open_fs(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, acpi_system_wakeup_device_seq_show,
|
||||
PDE_DATA(inode));
|
||||
pde_data(inode));
|
||||
}
|
||||
|
||||
static const struct proc_ops acpi_system_wakeup_device_proc_ops = {
|
||||
|
|
|
@ -199,11 +199,16 @@ static struct class firmware_class = {
|
|||
|
||||
int register_sysfs_loader(void)
|
||||
{
|
||||
return class_register(&firmware_class);
|
||||
int ret = class_register(&firmware_class);
|
||||
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
return register_firmware_config_sysctl();
|
||||
}
|
||||
|
||||
void unregister_sysfs_loader(void)
|
||||
{
|
||||
unregister_firmware_config_sysctl();
|
||||
class_unregister(&firmware_class);
|
||||
}
|
||||
|
||||
|
|
|
@ -42,6 +42,17 @@ void fw_fallback_set_default_timeout(void);
|
|||
|
||||
int register_sysfs_loader(void);
|
||||
void unregister_sysfs_loader(void);
|
||||
#ifdef CONFIG_SYSCTL
|
||||
extern int register_firmware_config_sysctl(void);
|
||||
extern void unregister_firmware_config_sysctl(void);
|
||||
#else
|
||||
static inline int register_firmware_config_sysctl(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void unregister_firmware_config_sysctl(void) { }
|
||||
#endif /* CONFIG_SYSCTL */
|
||||
|
||||
#else /* CONFIG_FW_LOADER_USER_HELPER */
|
||||
static inline int firmware_fallback_sysfs(struct firmware *fw, const char *name,
|
||||
struct device *device,
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
#include <linux/kconfig.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/security.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/umh.h>
|
||||
|
@ -24,7 +25,7 @@ struct firmware_fallback_config fw_fallback_config = {
|
|||
EXPORT_SYMBOL_NS_GPL(fw_fallback_config, FIRMWARE_LOADER_PRIVATE);
|
||||
|
||||
#ifdef CONFIG_SYSCTL
|
||||
struct ctl_table firmware_config_table[] = {
|
||||
static struct ctl_table firmware_config_table[] = {
|
||||
{
|
||||
.procname = "force_sysfs_fallback",
|
||||
.data = &fw_fallback_config.force_sysfs_fallback,
|
||||
|
@ -45,4 +46,24 @@ struct ctl_table firmware_config_table[] = {
|
|||
},
|
||||
{ }
|
||||
};
|
||||
#endif
|
||||
|
||||
static struct ctl_table_header *firmware_config_sysct_table_header;
|
||||
int register_firmware_config_sysctl(void)
|
||||
{
|
||||
firmware_config_sysct_table_header =
|
||||
register_sysctl("kernel/firmware_config",
|
||||
firmware_config_table);
|
||||
if (!firmware_config_sysct_table_header)
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(register_firmware_config_sysctl, FIRMWARE_LOADER_PRIVATE);
|
||||
|
||||
void unregister_firmware_config_sysctl(void)
|
||||
{
|
||||
unregister_sysctl_table(firmware_config_sysct_table_header);
|
||||
firmware_config_sysct_table_header = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(unregister_firmware_config_sysctl, FIRMWARE_LOADER_PRIVATE);
|
||||
|
||||
#endif /* CONFIG_SYSCTL */
|
||||
|
|
|
@ -3691,27 +3691,6 @@ static struct ctl_table cdrom_table[] = {
|
|||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
static struct ctl_table cdrom_cdrom_table[] = {
|
||||
{
|
||||
.procname = "cdrom",
|
||||
.maxlen = 0,
|
||||
.mode = 0555,
|
||||
.child = cdrom_table,
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
/* Make sure that /proc/sys/dev is there */
|
||||
static struct ctl_table cdrom_root_table[] = {
|
||||
{
|
||||
.procname = "dev",
|
||||
.maxlen = 0,
|
||||
.mode = 0555,
|
||||
.child = cdrom_cdrom_table,
|
||||
},
|
||||
{ }
|
||||
};
|
||||
static struct ctl_table_header *cdrom_sysctl_header;
|
||||
|
||||
static void cdrom_sysctl_register(void)
|
||||
|
@ -3721,7 +3700,7 @@ static void cdrom_sysctl_register(void)
|
|||
if (!atomic_add_unless(&initialized, 1, 1))
|
||||
return;
|
||||
|
||||
cdrom_sysctl_header = register_sysctl_table(cdrom_root_table);
|
||||
cdrom_sysctl_header = register_sysctl("dev/cdrom", cdrom_table);
|
||||
|
||||
/* set the defaults */
|
||||
cdrom_sysctl_settings.autoclose = autoclose;
|
||||
|
|
|
@ -746,26 +746,6 @@ static struct ctl_table hpet_table[] = {
|
|||
{}
|
||||
};
|
||||
|
||||
static struct ctl_table hpet_root[] = {
|
||||
{
|
||||
.procname = "hpet",
|
||||
.maxlen = 0,
|
||||
.mode = 0555,
|
||||
.child = hpet_table,
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
static struct ctl_table dev_root[] = {
|
||||
{
|
||||
.procname = "dev",
|
||||
.maxlen = 0,
|
||||
.mode = 0555,
|
||||
.child = hpet_root,
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
static struct ctl_table_header *sysctl_header;
|
||||
|
||||
/*
|
||||
|
@ -1061,7 +1041,7 @@ static int __init hpet_init(void)
|
|||
if (result < 0)
|
||||
return -ENODEV;
|
||||
|
||||
sysctl_header = register_sysctl_table(dev_root);
|
||||
sysctl_header = register_sysctl("dev/hpet", hpet_table);
|
||||
|
||||
result = acpi_bus_register_driver(&hpet_acpi_driver);
|
||||
if (result < 0) {
|
||||
|
|
|
@ -1992,8 +1992,7 @@ static int proc_do_entropy(struct ctl_table *table, int write, void *buffer,
|
|||
}
|
||||
|
||||
static int sysctl_poolsize = POOL_BITS;
|
||||
extern struct ctl_table random_table[];
|
||||
struct ctl_table random_table[] = {
|
||||
static struct ctl_table random_table[] = {
|
||||
{
|
||||
.procname = "poolsize",
|
||||
.data = &sysctl_poolsize,
|
||||
|
@ -2055,6 +2054,17 @@ struct ctl_table random_table[] = {
|
|||
#endif
|
||||
{ }
|
||||
};
|
||||
|
||||
/*
|
||||
* rand_initialize() is called before sysctl_init(),
|
||||
* so we cannot call register_sysctl_init() in rand_initialize()
|
||||
*/
|
||||
static int __init random_sysctls_init(void)
|
||||
{
|
||||
register_sysctl_init("kernel/random", random_table);
|
||||
return 0;
|
||||
}
|
||||
device_initcall(random_sysctls_init);
|
||||
#endif /* CONFIG_SYSCTL */
|
||||
|
||||
struct batched_entropy {
|
||||
|
|
|
@ -5511,6 +5511,7 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
|
|||
mutex_init(&mgr->probe_lock);
|
||||
#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
|
||||
mutex_init(&mgr->topology_ref_history_lock);
|
||||
stack_depot_init();
|
||||
#endif
|
||||
INIT_LIST_HEAD(&mgr->tx_msg_downq);
|
||||
INIT_LIST_HEAD(&mgr->destroy_port_list);
|
||||
|
|
|
@ -980,6 +980,10 @@ void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
|
|||
add_hole(&mm->head_node);
|
||||
|
||||
mm->scan_active = 0;
|
||||
|
||||
#ifdef CONFIG_DRM_DEBUG_MM
|
||||
stack_depot_init();
|
||||
#endif
|
||||
}
|
||||
EXPORT_SYMBOL(drm_mm_init);
|
||||
|
||||
|
|
|
@ -107,6 +107,11 @@ static void __drm_stack_depot_print(depot_stack_handle_t stack_depot)
|
|||
|
||||
kfree(buf);
|
||||
}
|
||||
|
||||
static void __drm_stack_depot_init(void)
|
||||
{
|
||||
stack_depot_init();
|
||||
}
|
||||
#else /* CONFIG_DRM_DEBUG_MODESET_LOCK */
|
||||
static depot_stack_handle_t __drm_stack_depot_save(void)
|
||||
{
|
||||
|
@ -115,6 +120,9 @@ static depot_stack_handle_t __drm_stack_depot_save(void)
|
|||
static void __drm_stack_depot_print(depot_stack_handle_t stack_depot)
|
||||
{
|
||||
}
|
||||
static void __drm_stack_depot_init(void)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_DRM_DEBUG_MODESET_LOCK */
|
||||
|
||||
/**
|
||||
|
@ -359,6 +367,7 @@ void drm_modeset_lock_init(struct drm_modeset_lock *lock)
|
|||
{
|
||||
ww_mutex_init(&lock->mutex, &crtc_ww_class);
|
||||
INIT_LIST_HEAD(&lock->head);
|
||||
__drm_stack_depot_init();
|
||||
}
|
||||
EXPORT_SYMBOL(drm_modeset_lock_init);
|
||||
|
||||
|
|
|
@ -4273,26 +4273,6 @@ static struct ctl_table oa_table[] = {
|
|||
{}
|
||||
};
|
||||
|
||||
static struct ctl_table i915_root[] = {
|
||||
{
|
||||
.procname = "i915",
|
||||
.maxlen = 0,
|
||||
.mode = 0555,
|
||||
.child = oa_table,
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
static struct ctl_table dev_root[] = {
|
||||
{
|
||||
.procname = "dev",
|
||||
.maxlen = 0,
|
||||
.mode = 0555,
|
||||
.child = i915_root,
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
static void oa_init_supported_formats(struct i915_perf *perf)
|
||||
{
|
||||
struct drm_i915_private *i915 = perf->i915;
|
||||
|
@ -4488,7 +4468,7 @@ static int destroy_config(int id, void *p, void *data)
|
|||
|
||||
int i915_perf_sysctl_register(void)
|
||||
{
|
||||
sysctl_header = register_sysctl_table(dev_root);
|
||||
sysctl_header = register_sysctl("dev/i915", oa_table);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -68,6 +68,9 @@ static noinline depot_stack_handle_t __save_depot_stack(void)
|
|||
static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
|
||||
{
|
||||
spin_lock_init(&rpm->debug.lock);
|
||||
|
||||
if (rpm->available)
|
||||
stack_depot_init();
|
||||
}
|
||||
|
||||
static noinline depot_stack_handle_t
|
||||
|
|
|
@ -451,7 +451,7 @@ static int i8k_get_power_status(void)
|
|||
|
||||
static long i8k_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
struct dell_smm_data *data = PDE_DATA(file_inode(fp));
|
||||
struct dell_smm_data *data = pde_data(file_inode(fp));
|
||||
int __user *argp = (int __user *)arg;
|
||||
int speed, err;
|
||||
int val = 0;
|
||||
|
@ -585,7 +585,7 @@ static int i8k_proc_show(struct seq_file *seq, void *offset)
|
|||
|
||||
static int i8k_open_fs(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, i8k_proc_show, PDE_DATA(inode));
|
||||
return single_open(file, i8k_proc_show, pde_data(inode));
|
||||
}
|
||||
|
||||
static const struct proc_ops i8k_proc_ops = {
|
||||
|
|
|
@ -239,33 +239,11 @@ static struct ctl_table mac_hid_files[] = {
|
|||
{ }
|
||||
};
|
||||
|
||||
/* dir in /proc/sys/dev */
|
||||
static struct ctl_table mac_hid_dir[] = {
|
||||
{
|
||||
.procname = "mac_hid",
|
||||
.maxlen = 0,
|
||||
.mode = 0555,
|
||||
.child = mac_hid_files,
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
/* /proc/sys/dev itself, in case that is not there yet */
|
||||
static struct ctl_table mac_hid_root_dir[] = {
|
||||
{
|
||||
.procname = "dev",
|
||||
.maxlen = 0,
|
||||
.mode = 0555,
|
||||
.child = mac_hid_dir,
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
static struct ctl_table_header *mac_hid_sysctl_header;
|
||||
|
||||
static int __init mac_hid_init(void)
|
||||
{
|
||||
mac_hid_sysctl_header = register_sysctl_table(mac_hid_root_dir);
|
||||
mac_hid_sysctl_header = register_sysctl("dev/mac_hid", mac_hid_files);
|
||||
if (!mac_hid_sysctl_header)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
|
||||
__acquires(RCU)
|
||||
{
|
||||
struct bonding *bond = PDE_DATA(file_inode(seq->file));
|
||||
struct bonding *bond = pde_data(file_inode(seq->file));
|
||||
struct list_head *iter;
|
||||
struct slave *slave;
|
||||
loff_t off = 0;
|
||||
|
@ -30,7 +30,7 @@ static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
|
|||
|
||||
static void *bond_info_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
||||
{
|
||||
struct bonding *bond = PDE_DATA(file_inode(seq->file));
|
||||
struct bonding *bond = pde_data(file_inode(seq->file));
|
||||
struct list_head *iter;
|
||||
struct slave *slave;
|
||||
bool found = false;
|
||||
|
@ -57,7 +57,7 @@ static void bond_info_seq_stop(struct seq_file *seq, void *v)
|
|||
|
||||
static void bond_info_show_master(struct seq_file *seq)
|
||||
{
|
||||
struct bonding *bond = PDE_DATA(file_inode(seq->file));
|
||||
struct bonding *bond = pde_data(file_inode(seq->file));
|
||||
const struct bond_opt_value *optval;
|
||||
struct slave *curr, *primary;
|
||||
int i;
|
||||
|
@ -175,7 +175,7 @@ static void bond_info_show_master(struct seq_file *seq)
|
|||
static void bond_info_show_slave(struct seq_file *seq,
|
||||
const struct slave *slave)
|
||||
{
|
||||
struct bonding *bond = PDE_DATA(file_inode(seq->file));
|
||||
struct bonding *bond = pde_data(file_inode(seq->file));
|
||||
|
||||
seq_printf(seq, "\nSlave Interface: %s\n", slave->dev->name);
|
||||
seq_printf(seq, "MII Status: %s\n", bond_slave_link_status(slave->link));
|
||||
|
|
|
@ -4672,7 +4672,7 @@ static ssize_t proc_write(struct file *file,
|
|||
static int proc_status_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct proc_data *data;
|
||||
struct net_device *dev = PDE_DATA(inode);
|
||||
struct net_device *dev = pde_data(inode);
|
||||
struct airo_info *apriv = dev->ml_priv;
|
||||
CapabilityRid cap_rid;
|
||||
StatusRid status_rid;
|
||||
|
@ -4756,7 +4756,7 @@ static int proc_stats_rid_open(struct inode *inode,
|
|||
u16 rid)
|
||||
{
|
||||
struct proc_data *data;
|
||||
struct net_device *dev = PDE_DATA(inode);
|
||||
struct net_device *dev = pde_data(inode);
|
||||
struct airo_info *apriv = dev->ml_priv;
|
||||
StatsRid stats;
|
||||
int i, j;
|
||||
|
@ -4819,7 +4819,7 @@ static inline int sniffing_mode(struct airo_info *ai)
|
|||
static void proc_config_on_close(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct proc_data *data = file->private_data;
|
||||
struct net_device *dev = PDE_DATA(inode);
|
||||
struct net_device *dev = pde_data(inode);
|
||||
struct airo_info *ai = dev->ml_priv;
|
||||
char *line;
|
||||
|
||||
|
@ -5030,7 +5030,7 @@ static const char *get_rmode(__le16 mode)
|
|||
static int proc_config_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct proc_data *data;
|
||||
struct net_device *dev = PDE_DATA(inode);
|
||||
struct net_device *dev = pde_data(inode);
|
||||
struct airo_info *ai = dev->ml_priv;
|
||||
int i;
|
||||
__le16 mode;
|
||||
|
@ -5120,7 +5120,7 @@ static int proc_config_open(struct inode *inode, struct file *file)
|
|||
static void proc_SSID_on_close(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct proc_data *data = file->private_data;
|
||||
struct net_device *dev = PDE_DATA(inode);
|
||||
struct net_device *dev = pde_data(inode);
|
||||
struct airo_info *ai = dev->ml_priv;
|
||||
SsidRid SSID_rid;
|
||||
int i;
|
||||
|
@ -5156,7 +5156,7 @@ static void proc_SSID_on_close(struct inode *inode, struct file *file)
|
|||
static void proc_APList_on_close(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct proc_data *data = file->private_data;
|
||||
struct net_device *dev = PDE_DATA(inode);
|
||||
struct net_device *dev = pde_data(inode);
|
||||
struct airo_info *ai = dev->ml_priv;
|
||||
APListRid *APList_rid = &ai->APList;
|
||||
int i;
|
||||
|
@ -5280,7 +5280,7 @@ static int set_wep_tx_idx(struct airo_info *ai, u16 index, int perm, int lock)
|
|||
static void proc_wepkey_on_close(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct proc_data *data;
|
||||
struct net_device *dev = PDE_DATA(inode);
|
||||
struct net_device *dev = pde_data(inode);
|
||||
struct airo_info *ai = dev->ml_priv;
|
||||
int i, rc;
|
||||
char key[16];
|
||||
|
@ -5331,7 +5331,7 @@ static void proc_wepkey_on_close(struct inode *inode, struct file *file)
|
|||
static int proc_wepkey_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct proc_data *data;
|
||||
struct net_device *dev = PDE_DATA(inode);
|
||||
struct net_device *dev = pde_data(inode);
|
||||
struct airo_info *ai = dev->ml_priv;
|
||||
char *ptr;
|
||||
WepKeyRid wkr;
|
||||
|
@ -5379,7 +5379,7 @@ static int proc_wepkey_open(struct inode *inode, struct file *file)
|
|||
static int proc_SSID_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct proc_data *data;
|
||||
struct net_device *dev = PDE_DATA(inode);
|
||||
struct net_device *dev = pde_data(inode);
|
||||
struct airo_info *ai = dev->ml_priv;
|
||||
int i;
|
||||
char *ptr;
|
||||
|
@ -5423,7 +5423,7 @@ static int proc_SSID_open(struct inode *inode, struct file *file)
|
|||
static int proc_APList_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct proc_data *data;
|
||||
struct net_device *dev = PDE_DATA(inode);
|
||||
struct net_device *dev = pde_data(inode);
|
||||
struct airo_info *ai = dev->ml_priv;
|
||||
int i;
|
||||
char *ptr;
|
||||
|
@ -5462,7 +5462,7 @@ static int proc_APList_open(struct inode *inode, struct file *file)
|
|||
static int proc_BSSList_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct proc_data *data;
|
||||
struct net_device *dev = PDE_DATA(inode);
|
||||
struct net_device *dev = pde_data(inode);
|
||||
struct airo_info *ai = dev->ml_priv;
|
||||
char *ptr;
|
||||
BSSListRid BSSList_rid;
|
||||
|
|
|
@ -69,7 +69,7 @@ static void prism2_send_mgmt(struct net_device *dev,
|
|||
#if !defined(PRISM2_NO_PROCFS_DEBUG) && defined(CONFIG_PROC_FS)
|
||||
static int ap_debug_proc_show(struct seq_file *m, void *v)
|
||||
{
|
||||
struct ap_data *ap = PDE_DATA(file_inode(m->file));
|
||||
struct ap_data *ap = pde_data(file_inode(m->file));
|
||||
|
||||
seq_printf(m, "BridgedUnicastFrames=%u\n", ap->bridged_unicast);
|
||||
seq_printf(m, "BridgedMulticastFrames=%u\n", ap->bridged_multicast);
|
||||
|
@ -320,7 +320,7 @@ void hostap_deauth_all_stas(struct net_device *dev, struct ap_data *ap,
|
|||
|
||||
static int ap_control_proc_show(struct seq_file *m, void *v)
|
||||
{
|
||||
struct ap_data *ap = PDE_DATA(file_inode(m->file));
|
||||
struct ap_data *ap = pde_data(file_inode(m->file));
|
||||
char *policy_txt;
|
||||
struct mac_entry *entry;
|
||||
|
||||
|
@ -352,20 +352,20 @@ static int ap_control_proc_show(struct seq_file *m, void *v)
|
|||
|
||||
static void *ap_control_proc_start(struct seq_file *m, loff_t *_pos)
|
||||
{
|
||||
struct ap_data *ap = PDE_DATA(file_inode(m->file));
|
||||
struct ap_data *ap = pde_data(file_inode(m->file));
|
||||
spin_lock_bh(&ap->mac_restrictions.lock);
|
||||
return seq_list_start_head(&ap->mac_restrictions.mac_list, *_pos);
|
||||
}
|
||||
|
||||
static void *ap_control_proc_next(struct seq_file *m, void *v, loff_t *_pos)
|
||||
{
|
||||
struct ap_data *ap = PDE_DATA(file_inode(m->file));
|
||||
struct ap_data *ap = pde_data(file_inode(m->file));
|
||||
return seq_list_next(v, &ap->mac_restrictions.mac_list, _pos);
|
||||
}
|
||||
|
||||
static void ap_control_proc_stop(struct seq_file *m, void *v)
|
||||
{
|
||||
struct ap_data *ap = PDE_DATA(file_inode(m->file));
|
||||
struct ap_data *ap = pde_data(file_inode(m->file));
|
||||
spin_unlock_bh(&ap->mac_restrictions.lock);
|
||||
}
|
||||
|
||||
|
@ -554,20 +554,20 @@ static int prism2_ap_proc_show(struct seq_file *m, void *v)
|
|||
|
||||
static void *prism2_ap_proc_start(struct seq_file *m, loff_t *_pos)
|
||||
{
|
||||
struct ap_data *ap = PDE_DATA(file_inode(m->file));
|
||||
struct ap_data *ap = pde_data(file_inode(m->file));
|
||||
spin_lock_bh(&ap->sta_table_lock);
|
||||
return seq_list_start_head(&ap->sta_list, *_pos);
|
||||
}
|
||||
|
||||
static void *prism2_ap_proc_next(struct seq_file *m, void *v, loff_t *_pos)
|
||||
{
|
||||
struct ap_data *ap = PDE_DATA(file_inode(m->file));
|
||||
struct ap_data *ap = pde_data(file_inode(m->file));
|
||||
return seq_list_next(v, &ap->sta_list, _pos);
|
||||
}
|
||||
|
||||
static void prism2_ap_proc_stop(struct seq_file *m, void *v)
|
||||
{
|
||||
struct ap_data *ap = PDE_DATA(file_inode(m->file));
|
||||
struct ap_data *ap = pde_data(file_inode(m->file));
|
||||
spin_unlock_bh(&ap->sta_table_lock);
|
||||
}
|
||||
|
||||
|
|
|
@ -227,7 +227,7 @@ static int prism2_download_aux_dump_proc_open(struct inode *inode, struct file *
|
|||
sizeof(struct prism2_download_aux_dump));
|
||||
if (ret == 0) {
|
||||
struct seq_file *m = file->private_data;
|
||||
m->private = PDE_DATA(inode);
|
||||
m->private = pde_data(inode);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -97,20 +97,20 @@ static int prism2_wds_proc_show(struct seq_file *m, void *v)
|
|||
|
||||
static void *prism2_wds_proc_start(struct seq_file *m, loff_t *_pos)
|
||||
{
|
||||
local_info_t *local = PDE_DATA(file_inode(m->file));
|
||||
local_info_t *local = pde_data(file_inode(m->file));
|
||||
read_lock_bh(&local->iface_lock);
|
||||
return seq_list_start(&local->hostap_interfaces, *_pos);
|
||||
}
|
||||
|
||||
static void *prism2_wds_proc_next(struct seq_file *m, void *v, loff_t *_pos)
|
||||
{
|
||||
local_info_t *local = PDE_DATA(file_inode(m->file));
|
||||
local_info_t *local = pde_data(file_inode(m->file));
|
||||
return seq_list_next(v, &local->hostap_interfaces, _pos);
|
||||
}
|
||||
|
||||
static void prism2_wds_proc_stop(struct seq_file *m, void *v)
|
||||
{
|
||||
local_info_t *local = PDE_DATA(file_inode(m->file));
|
||||
local_info_t *local = pde_data(file_inode(m->file));
|
||||
read_unlock_bh(&local->iface_lock);
|
||||
}
|
||||
|
||||
|
@ -123,7 +123,7 @@ static const struct seq_operations prism2_wds_proc_seqops = {
|
|||
|
||||
static int prism2_bss_list_proc_show(struct seq_file *m, void *v)
|
||||
{
|
||||
local_info_t *local = PDE_DATA(file_inode(m->file));
|
||||
local_info_t *local = pde_data(file_inode(m->file));
|
||||
struct list_head *ptr = v;
|
||||
struct hostap_bss_info *bss;
|
||||
|
||||
|
@ -151,21 +151,21 @@ static int prism2_bss_list_proc_show(struct seq_file *m, void *v)
|
|||
static void *prism2_bss_list_proc_start(struct seq_file *m, loff_t *_pos)
|
||||
__acquires(&local->lock)
|
||||
{
|
||||
local_info_t *local = PDE_DATA(file_inode(m->file));
|
||||
local_info_t *local = pde_data(file_inode(m->file));
|
||||
spin_lock_bh(&local->lock);
|
||||
return seq_list_start_head(&local->bss_list, *_pos);
|
||||
}
|
||||
|
||||
static void *prism2_bss_list_proc_next(struct seq_file *m, void *v, loff_t *_pos)
|
||||
{
|
||||
local_info_t *local = PDE_DATA(file_inode(m->file));
|
||||
local_info_t *local = pde_data(file_inode(m->file));
|
||||
return seq_list_next(v, &local->bss_list, _pos);
|
||||
}
|
||||
|
||||
static void prism2_bss_list_proc_stop(struct seq_file *m, void *v)
|
||||
__releases(&local->lock)
|
||||
{
|
||||
local_info_t *local = PDE_DATA(file_inode(m->file));
|
||||
local_info_t *local = pde_data(file_inode(m->file));
|
||||
spin_unlock_bh(&local->lock);
|
||||
}
|
||||
|
||||
|
@ -198,7 +198,7 @@ static int prism2_crypt_proc_show(struct seq_file *m, void *v)
|
|||
static ssize_t prism2_pda_proc_read(struct file *file, char __user *buf,
|
||||
size_t count, loff_t *_pos)
|
||||
{
|
||||
local_info_t *local = PDE_DATA(file_inode(file));
|
||||
local_info_t *local = pde_data(file_inode(file));
|
||||
size_t off;
|
||||
|
||||
if (local->pda == NULL || *_pos >= PRISM2_PDA_SIZE)
|
||||
|
@ -272,7 +272,7 @@ static int prism2_io_debug_proc_read(char *page, char **start, off_t off,
|
|||
#ifndef PRISM2_NO_STATION_MODES
|
||||
static int prism2_scan_results_proc_show(struct seq_file *m, void *v)
|
||||
{
|
||||
local_info_t *local = PDE_DATA(file_inode(m->file));
|
||||
local_info_t *local = pde_data(file_inode(m->file));
|
||||
unsigned long entry;
|
||||
int i, len;
|
||||
struct hfa384x_hostscan_result *scanres;
|
||||
|
@ -322,7 +322,7 @@ static int prism2_scan_results_proc_show(struct seq_file *m, void *v)
|
|||
|
||||
static void *prism2_scan_results_proc_start(struct seq_file *m, loff_t *_pos)
|
||||
{
|
||||
local_info_t *local = PDE_DATA(file_inode(m->file));
|
||||
local_info_t *local = pde_data(file_inode(m->file));
|
||||
spin_lock_bh(&local->lock);
|
||||
|
||||
/* We have a header (pos 0) + N results to show (pos 1...N) */
|
||||
|
@ -333,7 +333,7 @@ static void *prism2_scan_results_proc_start(struct seq_file *m, loff_t *_pos)
|
|||
|
||||
static void *prism2_scan_results_proc_next(struct seq_file *m, void *v, loff_t *_pos)
|
||||
{
|
||||
local_info_t *local = PDE_DATA(file_inode(m->file));
|
||||
local_info_t *local = pde_data(file_inode(m->file));
|
||||
|
||||
++*_pos;
|
||||
if (*_pos > local->last_scan_results_count)
|
||||
|
@ -343,7 +343,7 @@ static void *prism2_scan_results_proc_next(struct seq_file *m, void *v, loff_t *
|
|||
|
||||
static void prism2_scan_results_proc_stop(struct seq_file *m, void *v)
|
||||
{
|
||||
local_info_t *local = PDE_DATA(file_inode(m->file));
|
||||
local_info_t *local = pde_data(file_inode(m->file));
|
||||
spin_unlock_bh(&local->lock);
|
||||
}
|
||||
|
||||
|
|
|
@ -2746,7 +2746,7 @@ static ssize_t int_proc_write(struct file *file, const char __user *buffer,
|
|||
nr = nr * 10 + c;
|
||||
p++;
|
||||
} while (--len);
|
||||
*(int *)PDE_DATA(file_inode(file)) = nr;
|
||||
*(int *)pde_data(file_inode(file)) = nr;
|
||||
return count;
|
||||
}
|
||||
|
||||
|
|
|
@ -93,30 +93,30 @@ struct nubus_proc_pde_data {
|
|||
static struct nubus_proc_pde_data *
|
||||
nubus_proc_alloc_pde_data(unsigned char *ptr, unsigned int size)
|
||||
{
|
||||
struct nubus_proc_pde_data *pde_data;
|
||||
struct nubus_proc_pde_data *pded;
|
||||
|
||||
pde_data = kmalloc(sizeof(*pde_data), GFP_KERNEL);
|
||||
if (!pde_data)
|
||||
pded = kmalloc(sizeof(*pded), GFP_KERNEL);
|
||||
if (!pded)
|
||||
return NULL;
|
||||
|
||||
pde_data->res_ptr = ptr;
|
||||
pde_data->res_size = size;
|
||||
return pde_data;
|
||||
pded->res_ptr = ptr;
|
||||
pded->res_size = size;
|
||||
return pded;
|
||||
}
|
||||
|
||||
static int nubus_proc_rsrc_show(struct seq_file *m, void *v)
|
||||
{
|
||||
struct inode *inode = m->private;
|
||||
struct nubus_proc_pde_data *pde_data;
|
||||
struct nubus_proc_pde_data *pded;
|
||||
|
||||
pde_data = PDE_DATA(inode);
|
||||
if (!pde_data)
|
||||
pded = pde_data(inode);
|
||||
if (!pded)
|
||||
return 0;
|
||||
|
||||
if (pde_data->res_size > m->size)
|
||||
if (pded->res_size > m->size)
|
||||
return -EFBIG;
|
||||
|
||||
if (pde_data->res_size) {
|
||||
if (pded->res_size) {
|
||||
int lanes = (int)proc_get_parent_data(inode);
|
||||
struct nubus_dirent ent;
|
||||
|
||||
|
@ -124,11 +124,11 @@ static int nubus_proc_rsrc_show(struct seq_file *m, void *v)
|
|||
return 0;
|
||||
|
||||
ent.mask = lanes;
|
||||
ent.base = pde_data->res_ptr;
|
||||
ent.base = pded->res_ptr;
|
||||
ent.data = 0;
|
||||
nubus_seq_write_rsrc_mem(m, &ent, pde_data->res_size);
|
||||
nubus_seq_write_rsrc_mem(m, &ent, pded->res_size);
|
||||
} else {
|
||||
unsigned int data = (unsigned int)pde_data->res_ptr;
|
||||
unsigned int data = (unsigned int)pded->res_ptr;
|
||||
|
||||
seq_putc(m, data >> 16);
|
||||
seq_putc(m, data >> 8);
|
||||
|
@ -142,18 +142,18 @@ void nubus_proc_add_rsrc_mem(struct proc_dir_entry *procdir,
|
|||
unsigned int size)
|
||||
{
|
||||
char name[9];
|
||||
struct nubus_proc_pde_data *pde_data;
|
||||
struct nubus_proc_pde_data *pded;
|
||||
|
||||
if (!procdir)
|
||||
return;
|
||||
|
||||
snprintf(name, sizeof(name), "%x", ent->type);
|
||||
if (size)
|
||||
pde_data = nubus_proc_alloc_pde_data(nubus_dirptr(ent), size);
|
||||
pded = nubus_proc_alloc_pde_data(nubus_dirptr(ent), size);
|
||||
else
|
||||
pde_data = NULL;
|
||||
pded = NULL;
|
||||
proc_create_single_data(name, S_IFREG | 0444, procdir,
|
||||
nubus_proc_rsrc_show, pde_data);
|
||||
nubus_proc_rsrc_show, pded);
|
||||
}
|
||||
|
||||
void nubus_proc_add_rsrc(struct proc_dir_entry *procdir,
|
||||
|
|
|
@ -168,14 +168,14 @@ static int led_proc_show(struct seq_file *m, void *v)
|
|||
|
||||
static int led_proc_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, led_proc_show, PDE_DATA(inode));
|
||||
return single_open(file, led_proc_show, pde_data(inode));
|
||||
}
|
||||
|
||||
|
||||
static ssize_t led_proc_write(struct file *file, const char __user *buf,
|
||||
size_t count, loff_t *pos)
|
||||
{
|
||||
void *data = PDE_DATA(file_inode(file));
|
||||
void *data = pde_data(file_inode(file));
|
||||
char *cur, lbuf[32];
|
||||
int d;
|
||||
|
||||
|
|
|
@ -21,14 +21,14 @@ static int proc_initialized; /* = 0 */
|
|||
|
||||
static loff_t proc_bus_pci_lseek(struct file *file, loff_t off, int whence)
|
||||
{
|
||||
struct pci_dev *dev = PDE_DATA(file_inode(file));
|
||||
struct pci_dev *dev = pde_data(file_inode(file));
|
||||
return fixed_size_llseek(file, off, whence, dev->cfg_size);
|
||||
}
|
||||
|
||||
static ssize_t proc_bus_pci_read(struct file *file, char __user *buf,
|
||||
size_t nbytes, loff_t *ppos)
|
||||
{
|
||||
struct pci_dev *dev = PDE_DATA(file_inode(file));
|
||||
struct pci_dev *dev = pde_data(file_inode(file));
|
||||
unsigned int pos = *ppos;
|
||||
unsigned int cnt, size;
|
||||
|
||||
|
@ -114,7 +114,7 @@ static ssize_t proc_bus_pci_write(struct file *file, const char __user *buf,
|
|||
size_t nbytes, loff_t *ppos)
|
||||
{
|
||||
struct inode *ino = file_inode(file);
|
||||
struct pci_dev *dev = PDE_DATA(ino);
|
||||
struct pci_dev *dev = pde_data(ino);
|
||||
int pos = *ppos;
|
||||
int size = dev->cfg_size;
|
||||
int cnt, ret;
|
||||
|
@ -196,7 +196,7 @@ struct pci_filp_private {
|
|||
static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
struct pci_dev *dev = PDE_DATA(file_inode(file));
|
||||
struct pci_dev *dev = pde_data(file_inode(file));
|
||||
#ifdef HAVE_PCI_MMAP
|
||||
struct pci_filp_private *fpriv = file->private_data;
|
||||
#endif /* HAVE_PCI_MMAP */
|
||||
|
@ -244,7 +244,7 @@ static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd,
|
|||
#ifdef HAVE_PCI_MMAP
|
||||
static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
{
|
||||
struct pci_dev *dev = PDE_DATA(file_inode(file));
|
||||
struct pci_dev *dev = pde_data(file_inode(file));
|
||||
struct pci_filp_private *fpriv = file->private_data;
|
||||
int i, ret, write_combine = 0, res_bit = IORESOURCE_MEM;
|
||||
|
||||
|
|
|
@ -880,14 +880,14 @@ static int dispatch_proc_show(struct seq_file *m, void *v)
|
|||
|
||||
static int dispatch_proc_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, dispatch_proc_show, PDE_DATA(inode));
|
||||
return single_open(file, dispatch_proc_show, pde_data(inode));
|
||||
}
|
||||
|
||||
static ssize_t dispatch_proc_write(struct file *file,
|
||||
const char __user *userbuf,
|
||||
size_t count, loff_t *pos)
|
||||
{
|
||||
struct ibm_struct *ibm = PDE_DATA(file_inode(file));
|
||||
struct ibm_struct *ibm = pde_data(file_inode(file));
|
||||
char *kernbuf;
|
||||
int ret;
|
||||
|
||||
|
|
|
@ -1368,7 +1368,7 @@ static int lcd_proc_show(struct seq_file *m, void *v)
|
|||
|
||||
static int lcd_proc_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, lcd_proc_show, PDE_DATA(inode));
|
||||
return single_open(file, lcd_proc_show, pde_data(inode));
|
||||
}
|
||||
|
||||
static int set_lcd_brightness(struct toshiba_acpi_dev *dev, int value)
|
||||
|
@ -1404,7 +1404,7 @@ static int set_lcd_status(struct backlight_device *bd)
|
|||
static ssize_t lcd_proc_write(struct file *file, const char __user *buf,
|
||||
size_t count, loff_t *pos)
|
||||
{
|
||||
struct toshiba_acpi_dev *dev = PDE_DATA(file_inode(file));
|
||||
struct toshiba_acpi_dev *dev = pde_data(file_inode(file));
|
||||
char cmd[42];
|
||||
size_t len;
|
||||
int levels;
|
||||
|
@ -1469,13 +1469,13 @@ static int video_proc_show(struct seq_file *m, void *v)
|
|||
|
||||
static int video_proc_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, video_proc_show, PDE_DATA(inode));
|
||||
return single_open(file, video_proc_show, pde_data(inode));
|
||||
}
|
||||
|
||||
static ssize_t video_proc_write(struct file *file, const char __user *buf,
|
||||
size_t count, loff_t *pos)
|
||||
{
|
||||
struct toshiba_acpi_dev *dev = PDE_DATA(file_inode(file));
|
||||
struct toshiba_acpi_dev *dev = pde_data(file_inode(file));
|
||||
char *buffer;
|
||||
char *cmd;
|
||||
int lcd_out = -1, crt_out = -1, tv_out = -1;
|
||||
|
@ -1580,13 +1580,13 @@ static int fan_proc_show(struct seq_file *m, void *v)
|
|||
|
||||
static int fan_proc_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, fan_proc_show, PDE_DATA(inode));
|
||||
return single_open(file, fan_proc_show, pde_data(inode));
|
||||
}
|
||||
|
||||
static ssize_t fan_proc_write(struct file *file, const char __user *buf,
|
||||
size_t count, loff_t *pos)
|
||||
{
|
||||
struct toshiba_acpi_dev *dev = PDE_DATA(file_inode(file));
|
||||
struct toshiba_acpi_dev *dev = pde_data(file_inode(file));
|
||||
char cmd[42];
|
||||
size_t len;
|
||||
int value;
|
||||
|
@ -1628,13 +1628,13 @@ static int keys_proc_show(struct seq_file *m, void *v)
|
|||
|
||||
static int keys_proc_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, keys_proc_show, PDE_DATA(inode));
|
||||
return single_open(file, keys_proc_show, pde_data(inode));
|
||||
}
|
||||
|
||||
static ssize_t keys_proc_write(struct file *file, const char __user *buf,
|
||||
size_t count, loff_t *pos)
|
||||
{
|
||||
struct toshiba_acpi_dev *dev = PDE_DATA(file_inode(file));
|
||||
struct toshiba_acpi_dev *dev = pde_data(file_inode(file));
|
||||
char cmd[42];
|
||||
size_t len;
|
||||
int value;
|
||||
|
|
|
@ -22,7 +22,7 @@ static loff_t isapnp_proc_bus_lseek(struct file *file, loff_t off, int whence)
|
|||
static ssize_t isapnp_proc_bus_read(struct file *file, char __user * buf,
|
||||
size_t nbytes, loff_t * ppos)
|
||||
{
|
||||
struct pnp_dev *dev = PDE_DATA(file_inode(file));
|
||||
struct pnp_dev *dev = pde_data(file_inode(file));
|
||||
int pos = *ppos;
|
||||
int cnt, size = 256;
|
||||
|
||||
|
|
|
@ -173,13 +173,13 @@ static int pnpbios_proc_show(struct seq_file *m, void *v)
|
|||
|
||||
static int pnpbios_proc_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, pnpbios_proc_show, PDE_DATA(inode));
|
||||
return single_open(file, pnpbios_proc_show, pde_data(inode));
|
||||
}
|
||||
|
||||
static ssize_t pnpbios_proc_write(struct file *file, const char __user *buf,
|
||||
size_t count, loff_t *pos)
|
||||
{
|
||||
void *data = PDE_DATA(file_inode(file));
|
||||
void *data = pde_data(file_inode(file));
|
||||
struct pnp_bios_node *node;
|
||||
int boot = (long)data >> 8;
|
||||
u8 nodenum = (long)data;
|
||||
|
|
|
@ -49,7 +49,7 @@ static DEFINE_MUTEX(global_host_template_mutex);
|
|||
static ssize_t proc_scsi_host_write(struct file *file, const char __user *buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct Scsi_Host *shost = PDE_DATA(file_inode(file));
|
||||
struct Scsi_Host *shost = pde_data(file_inode(file));
|
||||
ssize_t ret = -ENOMEM;
|
||||
char *page;
|
||||
|
||||
|
@ -79,7 +79,7 @@ static int proc_scsi_show(struct seq_file *m, void *v)
|
|||
|
||||
static int proc_scsi_host_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open_size(file, proc_scsi_show, PDE_DATA(inode),
|
||||
return single_open_size(file, proc_scsi_show, pde_data(inode),
|
||||
4 * PAGE_SIZE);
|
||||
}
|
||||
|
||||
|
|
|
@ -77,7 +77,7 @@ static int sg_proc_init(void);
|
|||
|
||||
#define SG_DEFAULT_TIMEOUT mult_frac(SG_DEFAULT_TIMEOUT_USER, HZ, USER_HZ)
|
||||
|
||||
int sg_big_buff = SG_DEF_RESERVED_SIZE;
|
||||
static int sg_big_buff = SG_DEF_RESERVED_SIZE;
|
||||
/* N.B. This variable is readable and writeable via
|
||||
/proc/scsi/sg/def_reserved_size . Each time sg_open() is called a buffer
|
||||
of this size (or less if there is not enough memory) will be reserved
|
||||
|
@ -1634,6 +1634,37 @@ MODULE_PARM_DESC(scatter_elem_sz, "scatter gather element "
|
|||
MODULE_PARM_DESC(def_reserved_size, "size of buffer reserved for each fd");
|
||||
MODULE_PARM_DESC(allow_dio, "allow direct I/O (default: 0 (disallow))");
|
||||
|
||||
#ifdef CONFIG_SYSCTL
|
||||
#include <linux/sysctl.h>
|
||||
|
||||
static struct ctl_table sg_sysctls[] = {
|
||||
{
|
||||
.procname = "sg-big-buff",
|
||||
.data = &sg_big_buff,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0444,
|
||||
.proc_handler = proc_dointvec,
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
static struct ctl_table_header *hdr;
|
||||
static void register_sg_sysctls(void)
|
||||
{
|
||||
if (!hdr)
|
||||
hdr = register_sysctl("kernel", sg_sysctls);
|
||||
}
|
||||
|
||||
static void unregister_sg_sysctls(void)
|
||||
{
|
||||
if (hdr)
|
||||
unregister_sysctl_table(hdr);
|
||||
}
|
||||
#else
|
||||
#define register_sg_sysctls() do { } while (0)
|
||||
#define unregister_sg_sysctls() do { } while (0)
|
||||
#endif /* CONFIG_SYSCTL */
|
||||
|
||||
static int __init
|
||||
init_sg(void)
|
||||
{
|
||||
|
@ -1666,6 +1697,7 @@ init_sg(void)
|
|||
return 0;
|
||||
}
|
||||
class_destroy(sg_sysfs_class);
|
||||
register_sg_sysctls();
|
||||
err_out:
|
||||
unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), SG_MAX_DEVS);
|
||||
return rc;
|
||||
|
@ -1674,6 +1706,7 @@ init_sg(void)
|
|||
static void __exit
|
||||
exit_sg(void)
|
||||
{
|
||||
unregister_sg_sysctls();
|
||||
#ifdef CONFIG_SCSI_PROC_FS
|
||||
remove_proc_subtree("scsi/sg", NULL);
|
||||
#endif /* CONFIG_SCSI_PROC_FS */
|
||||
|
|
|
@ -1117,7 +1117,7 @@ static int rndis_proc_show(struct seq_file *m, void *v)
|
|||
static ssize_t rndis_proc_write(struct file *file, const char __user *buffer,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
rndis_params *p = PDE_DATA(file_inode(file));
|
||||
rndis_params *p = pde_data(file_inode(file));
|
||||
u32 speed = 0;
|
||||
int i, fl_speed = 0;
|
||||
|
||||
|
@ -1161,7 +1161,7 @@ static ssize_t rndis_proc_write(struct file *file, const char __user *buffer,
|
|||
|
||||
static int rndis_proc_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, rndis_proc_show, PDE_DATA(inode));
|
||||
return single_open(file, rndis_proc_show, pde_data(inode));
|
||||
}
|
||||
|
||||
static const struct proc_ops rndis_proc_ops = {
|
||||
|
|
|
@ -30,7 +30,7 @@ proc_bus_zorro_lseek(struct file *file, loff_t off, int whence)
|
|||
static ssize_t
|
||||
proc_bus_zorro_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
|
||||
{
|
||||
struct zorro_dev *z = PDE_DATA(file_inode(file));
|
||||
struct zorro_dev *z = pde_data(file_inode(file));
|
||||
struct ConfigDev cd;
|
||||
loff_t pos = *ppos;
|
||||
|
||||
|
|
|
@ -6,6 +6,8 @@
|
|||
# Rewritten to use lists instead of if-statements.
|
||||
#
|
||||
|
||||
obj-$(CONFIG_SYSCTL) += sysctls.o
|
||||
|
||||
obj-y := open.o read_write.o file_table.o super.o \
|
||||
char_dev.o stat.o exec.o pipe.o namei.o fcntl.o \
|
||||
ioctl.o readdir.o select.o dcache.o inode.o \
|
||||
|
|
|
@ -227,7 +227,7 @@ static int afs_proc_cell_volumes_show(struct seq_file *m, void *v)
|
|||
static void *afs_proc_cell_volumes_start(struct seq_file *m, loff_t *_pos)
|
||||
__acquires(cell->proc_lock)
|
||||
{
|
||||
struct afs_cell *cell = PDE_DATA(file_inode(m->file));
|
||||
struct afs_cell *cell = pde_data(file_inode(m->file));
|
||||
|
||||
rcu_read_lock();
|
||||
return seq_hlist_start_head_rcu(&cell->proc_volumes, *_pos);
|
||||
|
@ -236,7 +236,7 @@ static void *afs_proc_cell_volumes_start(struct seq_file *m, loff_t *_pos)
|
|||
static void *afs_proc_cell_volumes_next(struct seq_file *m, void *v,
|
||||
loff_t *_pos)
|
||||
{
|
||||
struct afs_cell *cell = PDE_DATA(file_inode(m->file));
|
||||
struct afs_cell *cell = pde_data(file_inode(m->file));
|
||||
|
||||
return seq_hlist_next_rcu(v, &cell->proc_volumes, _pos);
|
||||
}
|
||||
|
@ -322,7 +322,7 @@ static void *afs_proc_cell_vlservers_start(struct seq_file *m, loff_t *_pos)
|
|||
{
|
||||
struct afs_vl_seq_net_private *priv = m->private;
|
||||
struct afs_vlserver_list *vllist;
|
||||
struct afs_cell *cell = PDE_DATA(file_inode(m->file));
|
||||
struct afs_cell *cell = pde_data(file_inode(m->file));
|
||||
loff_t pos = *_pos;
|
||||
|
||||
rcu_read_lock();
|
||||
|
|
31
fs/aio.c
31
fs/aio.c
|
@ -220,9 +220,35 @@ struct aio_kiocb {
|
|||
|
||||
/*------ sysctl variables----*/
|
||||
static DEFINE_SPINLOCK(aio_nr_lock);
|
||||
unsigned long aio_nr; /* current system wide number of aio requests */
|
||||
unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */
|
||||
static unsigned long aio_nr; /* current system wide number of aio requests */
|
||||
static unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */
|
||||
/*----end sysctl variables---*/
|
||||
#ifdef CONFIG_SYSCTL
|
||||
static struct ctl_table aio_sysctls[] = {
|
||||
{
|
||||
.procname = "aio-nr",
|
||||
.data = &aio_nr,
|
||||
.maxlen = sizeof(aio_nr),
|
||||
.mode = 0444,
|
||||
.proc_handler = proc_doulongvec_minmax,
|
||||
},
|
||||
{
|
||||
.procname = "aio-max-nr",
|
||||
.data = &aio_max_nr,
|
||||
.maxlen = sizeof(aio_max_nr),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_doulongvec_minmax,
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
static void __init aio_sysctl_init(void)
|
||||
{
|
||||
register_sysctl_init("fs", aio_sysctls);
|
||||
}
|
||||
#else
|
||||
#define aio_sysctl_init() do { } while (0)
|
||||
#endif
|
||||
|
||||
static struct kmem_cache *kiocb_cachep;
|
||||
static struct kmem_cache *kioctx_cachep;
|
||||
|
@ -275,6 +301,7 @@ static int __init aio_setup(void)
|
|||
|
||||
kiocb_cachep = KMEM_CACHE(aio_kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
|
||||
kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
|
||||
aio_sysctl_init();
|
||||
return 0;
|
||||
}
|
||||
__initcall(aio_setup);
|
||||
|
|
|
@ -822,7 +822,11 @@ static int __init init_misc_binfmt(void)
|
|||
int err = register_filesystem(&bm_fs_type);
|
||||
if (!err)
|
||||
insert_binfmt(&misc_format);
|
||||
return err;
|
||||
if (!register_sysctl_mount_point("fs/binfmt_misc")) {
|
||||
pr_warn("Failed to create fs/binfmt_misc sysctl mount point");
|
||||
return -ENOMEM;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit exit_misc_binfmt(void)
|
||||
|
|
|
@ -12,7 +12,6 @@
|
|||
#include <linux/writeback.h>
|
||||
#include <linux/pagevec.h>
|
||||
#include <linux/prefetch.h>
|
||||
#include <linux/cleancache.h>
|
||||
#include <linux/fsverity.h>
|
||||
#include "misc.h"
|
||||
#include "extent_io.h"
|
||||
|
@ -3578,15 +3577,6 @@ int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (!PageUptodate(page)) {
|
||||
if (cleancache_get_page(page) == 0) {
|
||||
BUG_ON(blocksize != PAGE_SIZE);
|
||||
unlock_extent(tree, start, end);
|
||||
unlock_page(page);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
if (page->index == last_byte >> PAGE_SHIFT) {
|
||||
size_t zero_offset = offset_in_page(last_byte);
|
||||
|
||||
|
|
|
@ -23,7 +23,6 @@
|
|||
#include <linux/miscdevice.h>
|
||||
#include <linux/magic.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/cleancache.h>
|
||||
#include <linux/ratelimit.h>
|
||||
#include <linux/crc32c.h>
|
||||
#include <linux/btrfs.h>
|
||||
|
@ -1374,7 +1373,6 @@ static int btrfs_fill_super(struct super_block *sb,
|
|||
goto fail_close;
|
||||
}
|
||||
|
||||
cleancache_init_fs(sb);
|
||||
sb->s_flags |= SB_ACTIVE;
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -41,6 +41,7 @@
|
|||
#include <linux/fs.h>
|
||||
#include <linux/path.h>
|
||||
#include <linux/timekeeping.h>
|
||||
#include <linux/sysctl.h>
|
||||
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
@ -52,9 +53,9 @@
|
|||
|
||||
#include <trace/events/sched.h>
|
||||
|
||||
int core_uses_pid;
|
||||
unsigned int core_pipe_limit;
|
||||
char core_pattern[CORENAME_MAX_SIZE] = "core";
|
||||
static int core_uses_pid;
|
||||
static unsigned int core_pipe_limit;
|
||||
static char core_pattern[CORENAME_MAX_SIZE] = "core";
|
||||
static int core_name_size = CORENAME_MAX_SIZE;
|
||||
|
||||
struct core_name {
|
||||
|
@ -62,8 +63,6 @@ struct core_name {
|
|||
int used, size;
|
||||
};
|
||||
|
||||
/* The maximal length of core_pattern is also specified in sysctl.c */
|
||||
|
||||
static int expand_corename(struct core_name *cn, int size)
|
||||
{
|
||||
char *corename = krealloc(cn->corename, size, GFP_KERNEL);
|
||||
|
@ -893,6 +892,63 @@ int dump_align(struct coredump_params *cprm, int align)
|
|||
}
|
||||
EXPORT_SYMBOL(dump_align);
|
||||
|
||||
#ifdef CONFIG_SYSCTL
|
||||
|
||||
void validate_coredump_safety(void)
|
||||
{
|
||||
if (suid_dumpable == SUID_DUMP_ROOT &&
|
||||
core_pattern[0] != '/' && core_pattern[0] != '|') {
|
||||
pr_warn(
|
||||
"Unsafe core_pattern used with fs.suid_dumpable=2.\n"
|
||||
"Pipe handler or fully qualified core dump path required.\n"
|
||||
"Set kernel.core_pattern before fs.suid_dumpable.\n"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
static int proc_dostring_coredump(struct ctl_table *table, int write,
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int error = proc_dostring(table, write, buffer, lenp, ppos);
|
||||
|
||||
if (!error)
|
||||
validate_coredump_safety();
|
||||
return error;
|
||||
}
|
||||
|
||||
static struct ctl_table coredump_sysctls[] = {
|
||||
{
|
||||
.procname = "core_uses_pid",
|
||||
.data = &core_uses_pid,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec,
|
||||
},
|
||||
{
|
||||
.procname = "core_pattern",
|
||||
.data = core_pattern,
|
||||
.maxlen = CORENAME_MAX_SIZE,
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dostring_coredump,
|
||||
},
|
||||
{
|
||||
.procname = "core_pipe_limit",
|
||||
.data = &core_pipe_limit,
|
||||
.maxlen = sizeof(unsigned int),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec,
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
static int __init init_fs_coredump_sysctls(void)
|
||||
{
|
||||
register_sysctl_init("kernel", coredump_sysctls);
|
||||
return 0;
|
||||
}
|
||||
fs_initcall(init_fs_coredump_sysctls);
|
||||
#endif /* CONFIG_SYSCTL */
|
||||
|
||||
/*
|
||||
* The purpose of always_dump_vma() is to make sure that special kernel mappings
|
||||
* that are useful for post-mortem analysis are included in every core dump.
|
||||
|
|
37
fs/dcache.c
37
fs/dcache.c
|
@ -115,10 +115,13 @@ static inline struct hlist_bl_head *in_lookup_hash(const struct dentry *parent,
|
|||
return in_lookup_hashtable + hash_32(hash, IN_LOOKUP_SHIFT);
|
||||
}
|
||||
|
||||
|
||||
/* Statistics gathering. */
|
||||
struct dentry_stat_t dentry_stat = {
|
||||
.age_limit = 45,
|
||||
struct dentry_stat_t {
|
||||
long nr_dentry;
|
||||
long nr_unused;
|
||||
long age_limit; /* age in seconds */
|
||||
long want_pages; /* pages requested by system */
|
||||
long nr_negative; /* # of unused negative dentries */
|
||||
long dummy; /* Reserved for future use */
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(long, nr_dentry);
|
||||
|
@ -126,6 +129,10 @@ static DEFINE_PER_CPU(long, nr_dentry_unused);
|
|||
static DEFINE_PER_CPU(long, nr_dentry_negative);
|
||||
|
||||
#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
|
||||
/* Statistics gathering. */
|
||||
static struct dentry_stat_t dentry_stat = {
|
||||
.age_limit = 45,
|
||||
};
|
||||
|
||||
/*
|
||||
* Here we resort to our own counters instead of using generic per-cpu counters
|
||||
|
@ -167,14 +174,32 @@ static long get_nr_dentry_negative(void)
|
|||
return sum < 0 ? 0 : sum;
|
||||
}
|
||||
|
||||
int proc_nr_dentry(struct ctl_table *table, int write, void *buffer,
|
||||
size_t *lenp, loff_t *ppos)
|
||||
static int proc_nr_dentry(struct ctl_table *table, int write, void *buffer,
|
||||
size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
dentry_stat.nr_dentry = get_nr_dentry();
|
||||
dentry_stat.nr_unused = get_nr_dentry_unused();
|
||||
dentry_stat.nr_negative = get_nr_dentry_negative();
|
||||
return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
|
||||
}
|
||||
|
||||
static struct ctl_table fs_dcache_sysctls[] = {
|
||||
{
|
||||
.procname = "dentry-state",
|
||||
.data = &dentry_stat,
|
||||
.maxlen = 6*sizeof(long),
|
||||
.mode = 0444,
|
||||
.proc_handler = proc_nr_dentry,
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
static int __init init_fs_dcache_sysctls(void)
|
||||
{
|
||||
register_sysctl_init("fs", fs_dcache_sysctls);
|
||||
return 0;
|
||||
}
|
||||
fs_initcall(init_fs_dcache_sysctls);
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
|
|
@ -307,7 +307,7 @@ static void unlist_file(struct epitems_head *head)
|
|||
static long long_zero;
|
||||
static long long_max = LONG_MAX;
|
||||
|
||||
struct ctl_table epoll_table[] = {
|
||||
static struct ctl_table epoll_table[] = {
|
||||
{
|
||||
.procname = "max_user_watches",
|
||||
.data = &max_user_watches,
|
||||
|
@ -319,6 +319,13 @@ struct ctl_table epoll_table[] = {
|
|||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
static void __init epoll_sysctls_init(void)
|
||||
{
|
||||
register_sysctl("fs/epoll", epoll_table);
|
||||
}
|
||||
#else
|
||||
#define epoll_sysctls_init() do { } while (0)
|
||||
#endif /* CONFIG_SYSCTL */
|
||||
|
||||
static const struct file_operations eventpoll_fops;
|
||||
|
@ -2378,6 +2385,7 @@ static int __init eventpoll_init(void)
|
|||
/* Allocates slab cache used to allocate "struct eppoll_entry" */
|
||||
pwq_cache = kmem_cache_create("eventpoll_pwq",
|
||||
sizeof(struct eppoll_entry), 0, SLAB_PANIC|SLAB_ACCOUNT, NULL);
|
||||
epoll_sysctls_init();
|
||||
|
||||
ephead_cache = kmem_cache_create("ep_head",
|
||||
sizeof(struct epitems_head), 0, SLAB_PANIC|SLAB_ACCOUNT, NULL);
|
||||
|
|
35
fs/exec.c
35
fs/exec.c
|
@ -65,6 +65,7 @@
|
|||
#include <linux/vmalloc.h>
|
||||
#include <linux/io_uring.h>
|
||||
#include <linux/syscall_user_dispatch.h>
|
||||
#include <linux/coredump.h>
|
||||
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
@ -2099,3 +2100,37 @@ COMPAT_SYSCALL_DEFINE5(execveat, int, fd,
|
|||
argv, envp, flags);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SYSCTL
|
||||
|
||||
static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int error = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
|
||||
|
||||
if (!error)
|
||||
validate_coredump_safety();
|
||||
return error;
|
||||
}
|
||||
|
||||
static struct ctl_table fs_exec_sysctls[] = {
|
||||
{
|
||||
.procname = "suid_dumpable",
|
||||
.data = &suid_dumpable,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec_minmax_coredump,
|
||||
.extra1 = SYSCTL_ZERO,
|
||||
.extra2 = SYSCTL_TWO,
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
static int __init init_fs_exec_sysctls(void)
|
||||
{
|
||||
register_sysctl_init("fs", fs_exec_sysctls);
|
||||
return 0;
|
||||
}
|
||||
|
||||
fs_initcall(init_fs_exec_sysctls);
|
||||
#endif /* CONFIG_SYSCTL */
|
||||
|
|
|
@ -2834,7 +2834,7 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
|
|||
|
||||
static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
|
||||
{
|
||||
struct super_block *sb = PDE_DATA(file_inode(seq->file));
|
||||
struct super_block *sb = pde_data(file_inode(seq->file));
|
||||
ext4_group_t group;
|
||||
|
||||
if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
|
||||
|
@ -2845,7 +2845,7 @@ static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
|
|||
|
||||
static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)
|
||||
{
|
||||
struct super_block *sb = PDE_DATA(file_inode(seq->file));
|
||||
struct super_block *sb = pde_data(file_inode(seq->file));
|
||||
ext4_group_t group;
|
||||
|
||||
++*pos;
|
||||
|
@ -2857,7 +2857,7 @@ static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)
|
|||
|
||||
static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
|
||||
{
|
||||
struct super_block *sb = PDE_DATA(file_inode(seq->file));
|
||||
struct super_block *sb = pde_data(file_inode(seq->file));
|
||||
ext4_group_t group = (ext4_group_t) ((unsigned long) v);
|
||||
int i;
|
||||
int err, buddy_loaded = 0;
|
||||
|
@ -2985,7 +2985,7 @@ int ext4_seq_mb_stats_show(struct seq_file *seq, void *offset)
|
|||
static void *ext4_mb_seq_structs_summary_start(struct seq_file *seq, loff_t *pos)
|
||||
__acquires(&EXT4_SB(sb)->s_mb_rb_lock)
|
||||
{
|
||||
struct super_block *sb = PDE_DATA(file_inode(seq->file));
|
||||
struct super_block *sb = pde_data(file_inode(seq->file));
|
||||
unsigned long position;
|
||||
|
||||
read_lock(&EXT4_SB(sb)->s_mb_rb_lock);
|
||||
|
@ -2998,7 +2998,7 @@ __acquires(&EXT4_SB(sb)->s_mb_rb_lock)
|
|||
|
||||
static void *ext4_mb_seq_structs_summary_next(struct seq_file *seq, void *v, loff_t *pos)
|
||||
{
|
||||
struct super_block *sb = PDE_DATA(file_inode(seq->file));
|
||||
struct super_block *sb = pde_data(file_inode(seq->file));
|
||||
unsigned long position;
|
||||
|
||||
++*pos;
|
||||
|
@ -3010,7 +3010,7 @@ static void *ext4_mb_seq_structs_summary_next(struct seq_file *seq, void *v, lof
|
|||
|
||||
static int ext4_mb_seq_structs_summary_show(struct seq_file *seq, void *v)
|
||||
{
|
||||
struct super_block *sb = PDE_DATA(file_inode(seq->file));
|
||||
struct super_block *sb = pde_data(file_inode(seq->file));
|
||||
struct ext4_sb_info *sbi = EXT4_SB(sb);
|
||||
unsigned long position = ((unsigned long) v);
|
||||
struct ext4_group_info *grp;
|
||||
|
@ -3058,7 +3058,7 @@ static int ext4_mb_seq_structs_summary_show(struct seq_file *seq, void *v)
|
|||
static void ext4_mb_seq_structs_summary_stop(struct seq_file *seq, void *v)
|
||||
__releases(&EXT4_SB(sb)->s_mb_rb_lock)
|
||||
{
|
||||
struct super_block *sb = PDE_DATA(file_inode(seq->file));
|
||||
struct super_block *sb = pde_data(file_inode(seq->file));
|
||||
|
||||
read_unlock(&EXT4_SB(sb)->s_mb_rb_lock);
|
||||
}
|
||||
|
|
|
@ -43,7 +43,6 @@
|
|||
#include <linux/writeback.h>
|
||||
#include <linux/backing-dev.h>
|
||||
#include <linux/pagevec.h>
|
||||
#include <linux/cleancache.h>
|
||||
|
||||
#include "ext4.h"
|
||||
|
||||
|
@ -350,11 +349,6 @@ int ext4_mpage_readpages(struct inode *inode,
|
|||
} else if (fully_mapped) {
|
||||
SetPageMappedToDisk(page);
|
||||
}
|
||||
if (fully_mapped && blocks_per_page == 1 &&
|
||||
!PageUptodate(page) && cleancache_get_page(page) == 0) {
|
||||
SetPageUptodate(page);
|
||||
goto confused;
|
||||
}
|
||||
|
||||
/*
|
||||
* This page will go to BIO. Do we need to send this
|
||||
|
|
|
@ -39,7 +39,6 @@
|
|||
#include <linux/log2.h>
|
||||
#include <linux/crc16.h>
|
||||
#include <linux/dax.h>
|
||||
#include <linux/cleancache.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/iversion.h>
|
||||
#include <linux/unicode.h>
|
||||
|
@ -3149,8 +3148,6 @@ static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
|
|||
EXT4_BLOCKS_PER_GROUP(sb),
|
||||
EXT4_INODES_PER_GROUP(sb),
|
||||
sbi->s_mount_opt, sbi->s_mount_opt2);
|
||||
|
||||
cleancache_init_fs(sb);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
#include <linux/swap.h>
|
||||
#include <linux/prefetch.h>
|
||||
#include <linux/uio.h>
|
||||
#include <linux/cleancache.h>
|
||||
#include <linux/sched/signal.h>
|
||||
#include <linux/fiemap.h>
|
||||
#include <linux/iomap.h>
|
||||
|
@ -2035,12 +2034,6 @@ static int f2fs_read_single_page(struct inode *inode, struct page *page,
|
|||
block_nr = map->m_pblk + block_in_file - map->m_lblk;
|
||||
SetPageMappedToDisk(page);
|
||||
|
||||
if (!PageUptodate(page) && (!PageSwapCache(page) &&
|
||||
!cleancache_get_page(page))) {
|
||||
SetPageUptodate(page);
|
||||
goto confused;
|
||||
}
|
||||
|
||||
if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr,
|
||||
DATA_GENERIC_ENHANCE_READ)) {
|
||||
ret = -EFSCORRUPTED;
|
||||
|
@ -2096,12 +2089,6 @@ static int f2fs_read_single_page(struct inode *inode, struct page *page,
|
|||
ClearPageError(page);
|
||||
*last_block_in_bio = block_nr;
|
||||
goto out;
|
||||
confused:
|
||||
if (bio) {
|
||||
__submit_bio(F2FS_I_SB(inode), bio, DATA);
|
||||
bio = NULL;
|
||||
}
|
||||
unlock_page(page);
|
||||
out:
|
||||
*bio_ret = bio;
|
||||
return ret;
|
||||
|
|
|
@ -33,7 +33,7 @@
|
|||
#include "internal.h"
|
||||
|
||||
/* sysctl tunables... */
|
||||
struct files_stat_struct files_stat = {
|
||||
static struct files_stat_struct files_stat = {
|
||||
.max_files = NR_FILE
|
||||
};
|
||||
|
||||
|
@ -75,22 +75,53 @@ unsigned long get_max_files(void)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(get_max_files);
|
||||
|
||||
#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
|
||||
|
||||
/*
|
||||
* Handle nr_files sysctl
|
||||
*/
|
||||
#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
|
||||
int proc_nr_files(struct ctl_table *table, int write,
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
static int proc_nr_files(struct ctl_table *table, int write, void *buffer,
|
||||
size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
files_stat.nr_files = get_nr_files();
|
||||
return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
|
||||
}
|
||||
#else
|
||||
int proc_nr_files(struct ctl_table *table, int write,
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
|
||||
static struct ctl_table fs_stat_sysctls[] = {
|
||||
{
|
||||
.procname = "file-nr",
|
||||
.data = &files_stat,
|
||||
.maxlen = sizeof(files_stat),
|
||||
.mode = 0444,
|
||||
.proc_handler = proc_nr_files,
|
||||
},
|
||||
{
|
||||
.procname = "file-max",
|
||||
.data = &files_stat.max_files,
|
||||
.maxlen = sizeof(files_stat.max_files),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_doulongvec_minmax,
|
||||
.extra1 = SYSCTL_LONG_ZERO,
|
||||
.extra2 = SYSCTL_LONG_MAX,
|
||||
},
|
||||
{
|
||||
.procname = "nr_open",
|
||||
.data = &sysctl_nr_open,
|
||||
.maxlen = sizeof(unsigned int),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec_minmax,
|
||||
.extra1 = &sysctl_nr_open_min,
|
||||
.extra2 = &sysctl_nr_open_max,
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
static int __init init_fs_stat_sysctls(void)
|
||||
{
|
||||
return -ENOSYS;
|
||||
register_sysctl_init("fs", fs_stat_sysctls);
|
||||
return 0;
|
||||
}
|
||||
fs_initcall(init_fs_stat_sysctls);
|
||||
#endif
|
||||
|
||||
static struct file *__alloc_file(int flags, const struct cred *cred)
|
||||
|
|
39
fs/inode.c
39
fs/inode.c
|
@ -67,11 +67,6 @@ const struct address_space_operations empty_aops = {
|
|||
};
|
||||
EXPORT_SYMBOL(empty_aops);
|
||||
|
||||
/*
|
||||
* Statistics gathering..
|
||||
*/
|
||||
struct inodes_stat_t inodes_stat;
|
||||
|
||||
static DEFINE_PER_CPU(unsigned long, nr_inodes);
|
||||
static DEFINE_PER_CPU(unsigned long, nr_unused);
|
||||
|
||||
|
@ -106,13 +101,43 @@ long get_nr_dirty_inodes(void)
|
|||
* Handle nr_inode sysctl
|
||||
*/
|
||||
#ifdef CONFIG_SYSCTL
|
||||
int proc_nr_inodes(struct ctl_table *table, int write,
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
/*
|
||||
* Statistics gathering..
|
||||
*/
|
||||
static struct inodes_stat_t inodes_stat;
|
||||
|
||||
static int proc_nr_inodes(struct ctl_table *table, int write, void *buffer,
|
||||
size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
inodes_stat.nr_inodes = get_nr_inodes();
|
||||
inodes_stat.nr_unused = get_nr_inodes_unused();
|
||||
return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
|
||||
}
|
||||
|
||||
static struct ctl_table inodes_sysctls[] = {
|
||||
{
|
||||
.procname = "inode-nr",
|
||||
.data = &inodes_stat,
|
||||
.maxlen = 2*sizeof(long),
|
||||
.mode = 0444,
|
||||
.proc_handler = proc_nr_inodes,
|
||||
},
|
||||
{
|
||||
.procname = "inode-state",
|
||||
.data = &inodes_stat,
|
||||
.maxlen = 7*sizeof(long),
|
||||
.mode = 0444,
|
||||
.proc_handler = proc_nr_inodes,
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
static int __init init_fs_inode_sysctls(void)
|
||||
{
|
||||
register_sysctl_init("fs", inodes_sysctls);
|
||||
return 0;
|
||||
}
|
||||
early_initcall(init_fs_inode_sysctls);
|
||||
#endif
|
||||
|
||||
static int no_open(struct inode *inode, struct file *file)
|
||||
|
|
|
@ -1212,7 +1212,7 @@ static const struct seq_operations jbd2_seq_info_ops = {
|
|||
|
||||
static int jbd2_seq_info_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
journal_t *journal = PDE_DATA(inode);
|
||||
journal_t *journal = pde_data(inode);
|
||||
struct jbd2_stats_proc_session *s;
|
||||
int rc, size;
|
||||
|
||||
|
|
34
fs/locks.c
34
fs/locks.c
|
@ -62,6 +62,7 @@
|
|||
#include <linux/pid_namespace.h>
|
||||
#include <linux/hashtable.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/sysctl.h>
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/filelock.h>
|
||||
|
@ -88,8 +89,37 @@ static int target_leasetype(struct file_lock *fl)
|
|||
return fl->fl_type;
|
||||
}
|
||||
|
||||
int leases_enable = 1;
|
||||
int lease_break_time = 45;
|
||||
static int leases_enable = 1;
|
||||
static int lease_break_time = 45;
|
||||
|
||||
#ifdef CONFIG_SYSCTL
|
||||
static struct ctl_table locks_sysctls[] = {
|
||||
{
|
||||
.procname = "leases-enable",
|
||||
.data = &leases_enable,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec,
|
||||
},
|
||||
#ifdef CONFIG_MMU
|
||||
{
|
||||
.procname = "lease-break-time",
|
||||
.data = &lease_break_time,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec,
|
||||
},
|
||||
#endif /* CONFIG_MMU */
|
||||
{}
|
||||
};
|
||||
|
||||
static int __init init_fs_locks_sysctls(void)
|
||||
{
|
||||
register_sysctl_init("fs", locks_sysctls);
|
||||
return 0;
|
||||
}
|
||||
early_initcall(init_fs_locks_sysctls);
|
||||
#endif /* CONFIG_SYSCTL */
|
||||
|
||||
/*
|
||||
* The global file_lock_list is only used for displaying /proc/locks, so we
|
||||
|
|
|
@ -29,7 +29,6 @@
|
|||
#include <linux/writeback.h>
|
||||
#include <linux/backing-dev.h>
|
||||
#include <linux/pagevec.h>
|
||||
#include <linux/cleancache.h>
|
||||
#include "internal.h"
|
||||
|
||||
/*
|
||||
|
@ -284,12 +283,6 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
|
|||
SetPageMappedToDisk(page);
|
||||
}
|
||||
|
||||
if (fully_mapped && blocks_per_page == 1 && !PageUptodate(page) &&
|
||||
cleancache_get_page(page) == 0) {
|
||||
SetPageUptodate(page);
|
||||
goto confused;
|
||||
}
|
||||
|
||||
/*
|
||||
* This page will go to BIO. Do we need to send this BIO off first?
|
||||
*/
|
||||
|
|
58
fs/namei.c
58
fs/namei.c
|
@ -1020,10 +1020,60 @@ static inline void put_link(struct nameidata *nd)
|
|||
path_put(&last->link);
|
||||
}
|
||||
|
||||
int sysctl_protected_symlinks __read_mostly = 0;
|
||||
int sysctl_protected_hardlinks __read_mostly = 0;
|
||||
int sysctl_protected_fifos __read_mostly;
|
||||
int sysctl_protected_regular __read_mostly;
|
||||
static int sysctl_protected_symlinks __read_mostly;
|
||||
static int sysctl_protected_hardlinks __read_mostly;
|
||||
static int sysctl_protected_fifos __read_mostly;
|
||||
static int sysctl_protected_regular __read_mostly;
|
||||
|
||||
#ifdef CONFIG_SYSCTL
|
||||
static struct ctl_table namei_sysctls[] = {
|
||||
{
|
||||
.procname = "protected_symlinks",
|
||||
.data = &sysctl_protected_symlinks,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0600,
|
||||
.proc_handler = proc_dointvec_minmax,
|
||||
.extra1 = SYSCTL_ZERO,
|
||||
.extra2 = SYSCTL_ONE,
|
||||
},
|
||||
{
|
||||
.procname = "protected_hardlinks",
|
||||
.data = &sysctl_protected_hardlinks,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0600,
|
||||
.proc_handler = proc_dointvec_minmax,
|
||||
.extra1 = SYSCTL_ZERO,
|
||||
.extra2 = SYSCTL_ONE,
|
||||
},
|
||||
{
|
||||
.procname = "protected_fifos",
|
||||
.data = &sysctl_protected_fifos,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0600,
|
||||
.proc_handler = proc_dointvec_minmax,
|
||||
.extra1 = SYSCTL_ZERO,
|
||||
.extra2 = SYSCTL_TWO,
|
||||
},
|
||||
{
|
||||
.procname = "protected_regular",
|
||||
.data = &sysctl_protected_regular,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0600,
|
||||
.proc_handler = proc_dointvec_minmax,
|
||||
.extra1 = SYSCTL_ZERO,
|
||||
.extra2 = SYSCTL_TWO,
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
static int __init init_fs_namei_sysctls(void)
|
||||
{
|
||||
register_sysctl_init("fs", namei_sysctls);
|
||||
return 0;
|
||||
}
|
||||
fs_initcall(init_fs_namei_sysctls);
|
||||
|
||||
#endif /* CONFIG_SYSCTL */
|
||||
|
||||
/**
|
||||
* may_follow_link - Check symlink following for unsafe situations
|
||||
|
|
|
@ -37,7 +37,7 @@
|
|||
#include "internal.h"
|
||||
|
||||
/* Maximum number of mounts in a mount namespace */
|
||||
unsigned int sysctl_mount_max __read_mostly = 100000;
|
||||
static unsigned int sysctl_mount_max __read_mostly = 100000;
|
||||
|
||||
static unsigned int m_hash_mask __read_mostly;
|
||||
static unsigned int m_hash_shift __read_mostly;
|
||||
|
@ -4620,3 +4620,25 @@ const struct proc_ns_operations mntns_operations = {
|
|||
.install = mntns_install,
|
||||
.owner = mntns_owner,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_SYSCTL
|
||||
static struct ctl_table fs_namespace_sysctls[] = {
|
||||
{
|
||||
.procname = "mount-max",
|
||||
.data = &sysctl_mount_max,
|
||||
.maxlen = sizeof(unsigned int),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec_minmax,
|
||||
.extra1 = SYSCTL_ONE,
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
static int __init init_fs_namespace_sysctls(void)
|
||||
{
|
||||
register_sysctl_init("fs", fs_namespace_sysctls);
|
||||
return 0;
|
||||
}
|
||||
fs_initcall(init_fs_namespace_sysctls);
|
||||
|
||||
#endif /* CONFIG_SYSCTL */
|
||||
|
|
|
@ -19,7 +19,25 @@
|
|||
#include <linux/fdtable.h>
|
||||
#include <linux/fsnotify_backend.h>
|
||||
|
||||
int dir_notify_enable __read_mostly = 1;
|
||||
static int dir_notify_enable __read_mostly = 1;
|
||||
#ifdef CONFIG_SYSCTL
|
||||
static struct ctl_table dnotify_sysctls[] = {
|
||||
{
|
||||
.procname = "dir-notify-enable",
|
||||
.data = &dir_notify_enable,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec,
|
||||
},
|
||||
{}
|
||||
};
|
||||
static void __init dnotify_sysctl_init(void)
|
||||
{
|
||||
register_sysctl_init("fs", dnotify_sysctls);
|
||||
}
|
||||
#else
|
||||
#define dnotify_sysctl_init() do { } while (0)
|
||||
#endif
|
||||
|
||||
static struct kmem_cache *dnotify_struct_cache __read_mostly;
|
||||
static struct kmem_cache *dnotify_mark_cache __read_mostly;
|
||||
|
@ -386,6 +404,7 @@ static int __init dnotify_init(void)
|
|||
dnotify_group = fsnotify_alloc_group(&dnotify_fsnotify_ops);
|
||||
if (IS_ERR(dnotify_group))
|
||||
panic("unable to allocate fsnotify group for dnotify\n");
|
||||
dnotify_sysctl_init();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -59,7 +59,7 @@ static int fanotify_max_queued_events __read_mostly;
|
|||
static long ft_zero = 0;
|
||||
static long ft_int_max = INT_MAX;
|
||||
|
||||
struct ctl_table fanotify_table[] = {
|
||||
static struct ctl_table fanotify_table[] = {
|
||||
{
|
||||
.procname = "max_user_groups",
|
||||
.data = &init_user_ns.ucount_max[UCOUNT_FANOTIFY_GROUPS],
|
||||
|
@ -88,6 +88,13 @@ struct ctl_table fanotify_table[] = {
|
|||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
static void __init fanotify_sysctls_init(void)
|
||||
{
|
||||
register_sysctl("fs/fanotify", fanotify_table);
|
||||
}
|
||||
#else
|
||||
#define fanotify_sysctls_init() do { } while (0)
|
||||
#endif /* CONFIG_SYSCTL */
|
||||
|
||||
/*
|
||||
|
@ -1743,6 +1750,7 @@ static int __init fanotify_user_setup(void)
|
|||
init_user_ns.ucount_max[UCOUNT_FANOTIFY_GROUPS] =
|
||||
FANOTIFY_DEFAULT_MAX_GROUPS;
|
||||
init_user_ns.ucount_max[UCOUNT_FANOTIFY_MARKS] = max_marks;
|
||||
fanotify_sysctls_init();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -58,7 +58,7 @@ struct kmem_cache *inotify_inode_mark_cachep __read_mostly;
|
|||
static long it_zero = 0;
|
||||
static long it_int_max = INT_MAX;
|
||||
|
||||
struct ctl_table inotify_table[] = {
|
||||
static struct ctl_table inotify_table[] = {
|
||||
{
|
||||
.procname = "max_user_instances",
|
||||
.data = &init_user_ns.ucount_max[UCOUNT_INOTIFY_INSTANCES],
|
||||
|
@ -87,6 +87,14 @@ struct ctl_table inotify_table[] = {
|
|||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
static void __init inotify_sysctls_init(void)
|
||||
{
|
||||
register_sysctl("fs/inotify", inotify_table);
|
||||
}
|
||||
|
||||
#else
|
||||
#define inotify_sysctls_init() do { } while (0)
|
||||
#endif /* CONFIG_SYSCTL */
|
||||
|
||||
static inline __u32 inotify_arg_to_mask(struct inode *inode, u32 arg)
|
||||
|
@ -849,6 +857,7 @@ static int __init inotify_user_setup(void)
|
|||
inotify_max_queued_events = 16384;
|
||||
init_user_ns.ucount_max[UCOUNT_INOTIFY_INSTANCES] = 128;
|
||||
init_user_ns.ucount_max[UCOUNT_INOTIFY_WATCHES] = watches_max;
|
||||
inotify_sysctls_init();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -11,7 +11,6 @@
|
|||
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/buffer_head.h>
|
||||
#include <linux/cleancache.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/kernel.h>
|
||||
|
|
|
@ -672,31 +672,8 @@ static struct ctl_table ocfs2_mod_table[] = {
|
|||
{ }
|
||||
};
|
||||
|
||||
static struct ctl_table ocfs2_kern_table[] = {
|
||||
{
|
||||
.procname = "ocfs2",
|
||||
.data = NULL,
|
||||
.maxlen = 0,
|
||||
.mode = 0555,
|
||||
.child = ocfs2_mod_table
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
static struct ctl_table ocfs2_root_table[] = {
|
||||
{
|
||||
.procname = "fs",
|
||||
.data = NULL,
|
||||
.maxlen = 0,
|
||||
.mode = 0555,
|
||||
.child = ocfs2_kern_table
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
static struct ctl_table_header *ocfs2_table_header;
|
||||
|
||||
|
||||
/*
|
||||
* Initialization
|
||||
*/
|
||||
|
@ -705,7 +682,7 @@ static int __init ocfs2_stack_glue_init(void)
|
|||
{
|
||||
strcpy(cluster_stack_name, OCFS2_STACK_PLUGIN_O2CB);
|
||||
|
||||
ocfs2_table_header = register_sysctl_table(ocfs2_root_table);
|
||||
ocfs2_table_header = register_sysctl("fs/ocfs2", ocfs2_mod_table);
|
||||
if (!ocfs2_table_header) {
|
||||
printk(KERN_ERR
|
||||
"ocfs2 stack glue: unable to register sysctl\n");
|
||||
|
|
|
@ -25,7 +25,6 @@
|
|||
#include <linux/mount.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/quotaops.h>
|
||||
#include <linux/cleancache.h>
|
||||
#include <linux/signal.h>
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
|
@ -2283,7 +2282,6 @@ static int ocfs2_initialize_super(struct super_block *sb,
|
|||
mlog_errno(status);
|
||||
goto bail;
|
||||
}
|
||||
cleancache_init_shared_fs(sb);
|
||||
|
||||
osb->ocfs2_wq = alloc_ordered_workqueue("ocfs2_wq", WQ_MEM_RECLAIM);
|
||||
if (!osb->ocfs2_wq) {
|
||||
|
|
64
fs/pipe.c
64
fs/pipe.c
|
@ -25,6 +25,7 @@
|
|||
#include <linux/fcntl.h>
|
||||
#include <linux/memcontrol.h>
|
||||
#include <linux/watch_queue.h>
|
||||
#include <linux/sysctl.h>
|
||||
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/ioctls.h>
|
||||
|
@ -50,13 +51,13 @@
|
|||
* The max size that a non-root user is allowed to grow the pipe. Can
|
||||
* be set by root in /proc/sys/fs/pipe-max-size
|
||||
*/
|
||||
unsigned int pipe_max_size = 1048576;
|
||||
static unsigned int pipe_max_size = 1048576;
|
||||
|
||||
/* Maximum allocatable pages per user. Hard limit is unset by default, soft
|
||||
* matches default values.
|
||||
*/
|
||||
unsigned long pipe_user_pages_hard;
|
||||
unsigned long pipe_user_pages_soft = PIPE_DEF_BUFFERS * INR_OPEN_CUR;
|
||||
static unsigned long pipe_user_pages_hard;
|
||||
static unsigned long pipe_user_pages_soft = PIPE_DEF_BUFFERS * INR_OPEN_CUR;
|
||||
|
||||
/*
|
||||
* We use head and tail indices that aren't masked off, except at the point of
|
||||
|
@ -1428,6 +1429,60 @@ static struct file_system_type pipe_fs_type = {
|
|||
.kill_sb = kill_anon_super,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_SYSCTL
|
||||
static int do_proc_dopipe_max_size_conv(unsigned long *lvalp,
|
||||
unsigned int *valp,
|
||||
int write, void *data)
|
||||
{
|
||||
if (write) {
|
||||
unsigned int val;
|
||||
|
||||
val = round_pipe_size(*lvalp);
|
||||
if (val == 0)
|
||||
return -EINVAL;
|
||||
|
||||
*valp = val;
|
||||
} else {
|
||||
unsigned int val = *valp;
|
||||
*lvalp = (unsigned long) val;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int proc_dopipe_max_size(struct ctl_table *table, int write,
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
return do_proc_douintvec(table, write, buffer, lenp, ppos,
|
||||
do_proc_dopipe_max_size_conv, NULL);
|
||||
}
|
||||
|
||||
static struct ctl_table fs_pipe_sysctls[] = {
|
||||
{
|
||||
.procname = "pipe-max-size",
|
||||
.data = &pipe_max_size,
|
||||
.maxlen = sizeof(pipe_max_size),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dopipe_max_size,
|
||||
},
|
||||
{
|
||||
.procname = "pipe-user-pages-hard",
|
||||
.data = &pipe_user_pages_hard,
|
||||
.maxlen = sizeof(pipe_user_pages_hard),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_doulongvec_minmax,
|
||||
},
|
||||
{
|
||||
.procname = "pipe-user-pages-soft",
|
||||
.data = &pipe_user_pages_soft,
|
||||
.maxlen = sizeof(pipe_user_pages_soft),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_doulongvec_minmax,
|
||||
},
|
||||
{ }
|
||||
};
|
||||
#endif
|
||||
|
||||
static int __init init_pipe_fs(void)
|
||||
{
|
||||
int err = register_filesystem(&pipe_fs_type);
|
||||
|
@ -1439,6 +1494,9 @@ static int __init init_pipe_fs(void)
|
|||
unregister_filesystem(&pipe_fs_type);
|
||||
}
|
||||
}
|
||||
#ifdef CONFIG_SYSCTL
|
||||
register_sysctl_init("fs", fs_pipe_sysctls);
|
||||
#endif
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -791,12 +791,6 @@ void proc_remove(struct proc_dir_entry *de)
|
|||
}
|
||||
EXPORT_SYMBOL(proc_remove);
|
||||
|
||||
void *PDE_DATA(const struct inode *inode)
|
||||
{
|
||||
return __PDE_DATA(inode);
|
||||
}
|
||||
EXPORT_SYMBOL(PDE_DATA);
|
||||
|
||||
/*
|
||||
* Pull a user buffer into memory and pass it to the file's write handler if
|
||||
* one is supplied. The ->write() method is permitted to modify the
|
||||
|
|
|
@ -650,6 +650,7 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
inode->i_private = de->data;
|
||||
inode->i_ino = de->low_ino;
|
||||
inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
|
||||
PROC_I(inode)->pde = de;
|
||||
|
|
|
@ -115,11 +115,6 @@ static inline struct proc_dir_entry *PDE(const struct inode *inode)
|
|||
return PROC_I(inode)->pde;
|
||||
}
|
||||
|
||||
static inline void *__PDE_DATA(const struct inode *inode)
|
||||
{
|
||||
return PDE(inode)->data;
|
||||
}
|
||||
|
||||
static inline struct pid *proc_pid(const struct inode *inode)
|
||||
{
|
||||
return PROC_I(inode)->pid;
|
||||
|
|
|
@ -138,7 +138,7 @@ EXPORT_SYMBOL_GPL(proc_create_net_data);
|
|||
* @parent: The parent directory in which to create.
|
||||
* @ops: The seq_file ops with which to read the file.
|
||||
* @write: The write method with which to 'modify' the file.
|
||||
* @data: Data for retrieval by PDE_DATA().
|
||||
* @data: Data for retrieval by pde_data().
|
||||
*
|
||||
* Create a network namespaced proc file in the @parent directory with the
|
||||
* specified @name and @mode that allows reading of a file that displays a
|
||||
|
@ -153,7 +153,7 @@ EXPORT_SYMBOL_GPL(proc_create_net_data);
|
|||
* modified by the @write function. @write should return 0 on success.
|
||||
*
|
||||
* The @data value is accessible from the @show and @write functions by calling
|
||||
* PDE_DATA() on the file inode. The network namespace must be accessed by
|
||||
* pde_data() on the file inode. The network namespace must be accessed by
|
||||
* calling seq_file_net() on the seq_file struct.
|
||||
*/
|
||||
struct proc_dir_entry *proc_create_net_data_write(const char *name, umode_t mode,
|
||||
|
@ -230,7 +230,7 @@ EXPORT_SYMBOL_GPL(proc_create_net_single);
|
|||
* @parent: The parent directory in which to create.
|
||||
* @show: The seqfile show method with which to read the file.
|
||||
* @write: The write method with which to 'modify' the file.
|
||||
* @data: Data for retrieval by PDE_DATA().
|
||||
* @data: Data for retrieval by pde_data().
|
||||
*
|
||||
* Create a network-namespaced proc file in the @parent directory with the
|
||||
* specified @name and @mode that allows reading of a file that displays a
|
||||
|
@ -245,7 +245,7 @@ EXPORT_SYMBOL_GPL(proc_create_net_single);
|
|||
* modified by the @write function. @write should return 0 on success.
|
||||
*
|
||||
* The @data value is accessible from the @show and @write functions by calling
|
||||
* PDE_DATA() on the file inode. The network namespace must be accessed by
|
||||
* pde_data() on the file inode. The network namespace must be accessed by
|
||||
* calling seq_file_single_net() on the seq_file struct.
|
||||
*/
|
||||
struct proc_dir_entry *proc_create_net_single_write(const char *name, umode_t mode,
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/bpf-cgroup.h>
|
||||
#include <linux/mount.h>
|
||||
#include <linux/kmemleak.h>
|
||||
#include "internal.h"
|
||||
|
||||
static const struct dentry_operations proc_sys_dentry_operations;
|
||||
|
@ -25,15 +26,32 @@ static const struct file_operations proc_sys_dir_file_operations;
|
|||
static const struct inode_operations proc_sys_dir_operations;
|
||||
|
||||
/* shared constants to be used in various sysctls */
|
||||
const int sysctl_vals[] = { 0, 1, INT_MAX };
|
||||
const int sysctl_vals[] = { -1, 0, 1, 2, 4, 100, 200, 1000, 3000, INT_MAX, 65535 };
|
||||
EXPORT_SYMBOL(sysctl_vals);
|
||||
|
||||
const unsigned long sysctl_long_vals[] = { 0, 1, LONG_MAX };
|
||||
EXPORT_SYMBOL_GPL(sysctl_long_vals);
|
||||
|
||||
/* Support for permanently empty directories */
|
||||
|
||||
struct ctl_table sysctl_mount_point[] = {
|
||||
{ }
|
||||
};
|
||||
|
||||
/**
|
||||
* register_sysctl_mount_point() - registers a sysctl mount point
|
||||
* @path: path for the mount point
|
||||
*
|
||||
* Used to create a permanently empty directory to serve as mount point.
|
||||
* There are some subtle but important permission checks this allows in the
|
||||
* case of unprivileged mounts.
|
||||
*/
|
||||
struct ctl_table_header *register_sysctl_mount_point(const char *path)
|
||||
{
|
||||
return register_sysctl(path, sysctl_mount_point);
|
||||
}
|
||||
EXPORT_SYMBOL(register_sysctl_mount_point);
|
||||
|
||||
static bool is_empty_dir(struct ctl_table_header *head)
|
||||
{
|
||||
return head->ctl_table[0].child == sysctl_mount_point;
|
||||
|
@ -1383,6 +1401,38 @@ struct ctl_table_header *register_sysctl(const char *path, struct ctl_table *tab
|
|||
}
|
||||
EXPORT_SYMBOL(register_sysctl);
|
||||
|
||||
/**
|
||||
* __register_sysctl_init() - register sysctl table to path
|
||||
* @path: path name for sysctl base
|
||||
* @table: This is the sysctl table that needs to be registered to the path
|
||||
* @table_name: The name of sysctl table, only used for log printing when
|
||||
* registration fails
|
||||
*
|
||||
* The sysctl interface is used by userspace to query or modify at runtime
|
||||
* a predefined value set on a variable. These variables however have default
|
||||
* values pre-set. Code which depends on these variables will always work even
|
||||
* if register_sysctl() fails. If register_sysctl() fails you'd just loose the
|
||||
* ability to query or modify the sysctls dynamically at run time. Chances of
|
||||
* register_sysctl() failing on init are extremely low, and so for both reasons
|
||||
* this function does not return any error as it is used by initialization code.
|
||||
*
|
||||
* Context: Can only be called after your respective sysctl base path has been
|
||||
* registered. So for instance, most base directories are registered early on
|
||||
* init before init levels are processed through proc_sys_init() and
|
||||
* sysctl_init_bases().
|
||||
*/
|
||||
void __init __register_sysctl_init(const char *path, struct ctl_table *table,
|
||||
const char *table_name)
|
||||
{
|
||||
struct ctl_table_header *hdr = register_sysctl(path, table);
|
||||
|
||||
if (unlikely(!hdr)) {
|
||||
pr_err("failed when register_sysctl %s to %s\n", table_name, path);
|
||||
return;
|
||||
}
|
||||
kmemleak_not_leak(hdr);
|
||||
}
|
||||
|
||||
static char *append_path(const char *path, char *pos, const char *name)
|
||||
{
|
||||
int namelen;
|
||||
|
@ -1596,6 +1646,15 @@ struct ctl_table_header *register_sysctl_table(struct ctl_table *table)
|
|||
}
|
||||
EXPORT_SYMBOL(register_sysctl_table);
|
||||
|
||||
int __register_sysctl_base(struct ctl_table *base_table)
|
||||
{
|
||||
struct ctl_table_header *hdr;
|
||||
|
||||
hdr = register_sysctl_table(base_table);
|
||||
kmemleak_not_leak(hdr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void put_links(struct ctl_table_header *header)
|
||||
{
|
||||
struct ctl_table_set *root_set = &sysctl_table_root.default_set;
|
||||
|
@ -1709,7 +1768,7 @@ int __init proc_sys_init(void)
|
|||
proc_sys_root->proc_dir_ops = &proc_sys_dir_file_operations;
|
||||
proc_sys_root->nlink = 0;
|
||||
|
||||
return sysctl_init();
|
||||
return sysctl_init_bases();
|
||||
}
|
||||
|
||||
struct sysctl_alias {
|
||||
|
|
|
@ -31,7 +31,6 @@
|
|||
#include <linux/mutex.h>
|
||||
#include <linux/backing-dev.h>
|
||||
#include <linux/rculist_bl.h>
|
||||
#include <linux/cleancache.h>
|
||||
#include <linux/fscrypt.h>
|
||||
#include <linux/fsnotify.h>
|
||||
#include <linux/lockdep.h>
|
||||
|
@ -260,7 +259,6 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags,
|
|||
s->s_time_gran = 1000000000;
|
||||
s->s_time_min = TIME64_MIN;
|
||||
s->s_time_max = TIME64_MAX;
|
||||
s->cleancache_poolid = CLEANCACHE_NO_POOL;
|
||||
|
||||
s->s_shrink.seeks = DEFAULT_SEEKS;
|
||||
s->s_shrink.scan_objects = super_cache_scan;
|
||||
|
@ -330,7 +328,6 @@ void deactivate_locked_super(struct super_block *s)
|
|||
{
|
||||
struct file_system_type *fs = s->s_type;
|
||||
if (atomic_dec_and_test(&s->s_active)) {
|
||||
cleancache_invalidate_fs(s);
|
||||
unregister_shrinker(&s->s_shrink);
|
||||
fs->kill_sb(s);
|
||||
|
||||
|
|
|
@ -0,0 +1,39 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* /proc/sys/fs shared sysctls
|
||||
*
|
||||
* These sysctls are shared between different filesystems.
|
||||
*/
|
||||
#include <linux/init.h>
|
||||
#include <linux/sysctl.h>
|
||||
|
||||
static struct ctl_table fs_shared_sysctls[] = {
|
||||
{
|
||||
.procname = "overflowuid",
|
||||
.data = &fs_overflowuid,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec_minmax,
|
||||
.extra1 = SYSCTL_ZERO,
|
||||
.extra2 = SYSCTL_MAXOLDUID,
|
||||
},
|
||||
{
|
||||
.procname = "overflowgid",
|
||||
.data = &fs_overflowgid,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec_minmax,
|
||||
.extra1 = SYSCTL_ZERO,
|
||||
.extra2 = SYSCTL_MAXOLDUID,
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
DECLARE_SYSCTL_BASE(fs, fs_shared_sysctls);
|
||||
|
||||
static int __init init_fs_sysctls(void)
|
||||
{
|
||||
return register_sysctl_base(fs);
|
||||
}
|
||||
|
||||
early_initcall(init_fs_sysctls);
|
|
@ -20,8 +20,4 @@ static inline void kiocb_set_cancel_fn(struct kiocb *req,
|
|||
kiocb_cancel_fn *cancel) { }
|
||||
#endif /* CONFIG_AIO */
|
||||
|
||||
/* for sysctl: */
|
||||
extern unsigned long aio_nr;
|
||||
extern unsigned long aio_max_nr;
|
||||
|
||||
#endif /* __LINUX__AIO_H */
|
||||
|
|
|
@ -1,124 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _LINUX_CLEANCACHE_H
|
||||
#define _LINUX_CLEANCACHE_H
|
||||
|
||||
#include <linux/fs.h>
|
||||
#include <linux/exportfs.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
#define CLEANCACHE_NO_POOL -1
|
||||
#define CLEANCACHE_NO_BACKEND -2
|
||||
#define CLEANCACHE_NO_BACKEND_SHARED -3
|
||||
|
||||
#define CLEANCACHE_KEY_MAX 6
|
||||
|
||||
/*
|
||||
* cleancache requires every file with a page in cleancache to have a
|
||||
* unique key unless/until the file is removed/truncated. For some
|
||||
* filesystems, the inode number is unique, but for "modern" filesystems
|
||||
* an exportable filehandle is required (see exportfs.h)
|
||||
*/
|
||||
struct cleancache_filekey {
|
||||
union {
|
||||
ino_t ino;
|
||||
__u32 fh[CLEANCACHE_KEY_MAX];
|
||||
u32 key[CLEANCACHE_KEY_MAX];
|
||||
} u;
|
||||
};
|
||||
|
||||
struct cleancache_ops {
|
||||
int (*init_fs)(size_t);
|
||||
int (*init_shared_fs)(uuid_t *uuid, size_t);
|
||||
int (*get_page)(int, struct cleancache_filekey,
|
||||
pgoff_t, struct page *);
|
||||
void (*put_page)(int, struct cleancache_filekey,
|
||||
pgoff_t, struct page *);
|
||||
void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
|
||||
void (*invalidate_inode)(int, struct cleancache_filekey);
|
||||
void (*invalidate_fs)(int);
|
||||
};
|
||||
|
||||
extern int cleancache_register_ops(const struct cleancache_ops *ops);
|
||||
extern void __cleancache_init_fs(struct super_block *);
|
||||
extern void __cleancache_init_shared_fs(struct super_block *);
|
||||
extern int __cleancache_get_page(struct page *);
|
||||
extern void __cleancache_put_page(struct page *);
|
||||
extern void __cleancache_invalidate_page(struct address_space *, struct page *);
|
||||
extern void __cleancache_invalidate_inode(struct address_space *);
|
||||
extern void __cleancache_invalidate_fs(struct super_block *);
|
||||
|
||||
#ifdef CONFIG_CLEANCACHE
|
||||
#define cleancache_enabled (1)
|
||||
static inline bool cleancache_fs_enabled_mapping(struct address_space *mapping)
|
||||
{
|
||||
return mapping->host->i_sb->cleancache_poolid >= 0;
|
||||
}
|
||||
static inline bool cleancache_fs_enabled(struct page *page)
|
||||
{
|
||||
return cleancache_fs_enabled_mapping(page->mapping);
|
||||
}
|
||||
#else
|
||||
#define cleancache_enabled (0)
|
||||
#define cleancache_fs_enabled(_page) (0)
|
||||
#define cleancache_fs_enabled_mapping(_page) (0)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The shim layer provided by these inline functions allows the compiler
|
||||
* to reduce all cleancache hooks to nothingness if CONFIG_CLEANCACHE
|
||||
* is disabled, to a single global variable check if CONFIG_CLEANCACHE
|
||||
* is enabled but no cleancache "backend" has dynamically enabled it,
|
||||
* and, for the most frequent cleancache ops, to a single global variable
|
||||
* check plus a superblock element comparison if CONFIG_CLEANCACHE is enabled
|
||||
* and a cleancache backend has dynamically enabled cleancache, but the
|
||||
* filesystem referenced by that cleancache op has not enabled cleancache.
|
||||
* As a result, CONFIG_CLEANCACHE can be enabled by default with essentially
|
||||
* no measurable performance impact.
|
||||
*/
|
||||
|
||||
static inline void cleancache_init_fs(struct super_block *sb)
|
||||
{
|
||||
if (cleancache_enabled)
|
||||
__cleancache_init_fs(sb);
|
||||
}
|
||||
|
||||
static inline void cleancache_init_shared_fs(struct super_block *sb)
|
||||
{
|
||||
if (cleancache_enabled)
|
||||
__cleancache_init_shared_fs(sb);
|
||||
}
|
||||
|
||||
static inline int cleancache_get_page(struct page *page)
|
||||
{
|
||||
if (cleancache_enabled && cleancache_fs_enabled(page))
|
||||
return __cleancache_get_page(page);
|
||||
return -1;
|
||||
}
|
||||
|
||||
static inline void cleancache_put_page(struct page *page)
|
||||
{
|
||||
if (cleancache_enabled && cleancache_fs_enabled(page))
|
||||
__cleancache_put_page(page);
|
||||
}
|
||||
|
||||
static inline void cleancache_invalidate_page(struct address_space *mapping,
|
||||
struct page *page)
|
||||
{
|
||||
/* careful... page->mapping is NULL sometimes when this is called */
|
||||
if (cleancache_enabled && cleancache_fs_enabled_mapping(mapping))
|
||||
__cleancache_invalidate_page(mapping, page);
|
||||
}
|
||||
|
||||
static inline void cleancache_invalidate_inode(struct address_space *mapping)
|
||||
{
|
||||
if (cleancache_enabled && cleancache_fs_enabled_mapping(mapping))
|
||||
__cleancache_invalidate_inode(mapping);
|
||||
}
|
||||
|
||||
static inline void cleancache_invalidate_fs(struct super_block *sb)
|
||||
{
|
||||
if (cleancache_enabled)
|
||||
__cleancache_invalidate_fs(sb);
|
||||
}
|
||||
|
||||
#endif /* _LINUX_CLEANCACHE_H */
|
|
@ -14,10 +14,6 @@ struct core_vma_metadata {
|
|||
unsigned long dump_size;
|
||||
};
|
||||
|
||||
extern int core_uses_pid;
|
||||
extern char core_pattern[];
|
||||
extern unsigned int core_pipe_limit;
|
||||
|
||||
/*
|
||||
* These are the only things you should do on a core-file: use only these
|
||||
* functions to write out all the necessary info.
|
||||
|
@ -37,4 +33,10 @@ extern void do_coredump(const kernel_siginfo_t *siginfo);
|
|||
static inline void do_coredump(const kernel_siginfo_t *siginfo) {}
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_COREDUMP) && defined(CONFIG_SYSCTL)
|
||||
extern void validate_coredump_safety(void);
|
||||
#else
|
||||
static inline void validate_coredump_safety(void) {}
|
||||
#endif
|
||||
|
||||
#endif /* _LINUX_COREDUMP_H */
|
||||
|
|
|
@ -61,16 +61,6 @@ extern const struct qstr empty_name;
|
|||
extern const struct qstr slash_name;
|
||||
extern const struct qstr dotdot_name;
|
||||
|
||||
struct dentry_stat_t {
|
||||
long nr_dentry;
|
||||
long nr_unused;
|
||||
long age_limit; /* age in seconds */
|
||||
long want_pages; /* pages requested by system */
|
||||
long nr_negative; /* # of unused negative dentries */
|
||||
long dummy; /* Reserved for future use */
|
||||
};
|
||||
extern struct dentry_stat_t dentry_stat;
|
||||
|
||||
/*
|
||||
* Try to keep struct dentry aligned on 64 byte cachelines (this will
|
||||
* give reasonable cacheline footprint with larger lines without the
|
||||
|
|
|
@ -29,7 +29,6 @@ struct dnotify_struct {
|
|||
FS_CREATE | FS_RENAME |\
|
||||
FS_MOVED_FROM | FS_MOVED_TO)
|
||||
|
||||
extern int dir_notify_enable;
|
||||
extern void dnotify_flush(struct file *, fl_owner_t);
|
||||
extern int fcntl_dirnotify(int, struct file *, unsigned long);
|
||||
|
||||
|
|
|
@ -5,8 +5,6 @@
|
|||
#include <linux/sysctl.h>
|
||||
#include <uapi/linux/fanotify.h>
|
||||
|
||||
extern struct ctl_table fanotify_table[]; /* for sysctl */
|
||||
|
||||
#define FAN_GROUP_FLAG(group, flag) \
|
||||
((group)->fanotify_data.flags & (flag))
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue