From 5b735eb1ce481b2f1674a47c0995944b1cb6f5d5 Mon Sep 17 00:00:00 2001 From: Andrea Parri Date: Mon, 3 Dec 2018 15:04:49 -0800 Subject: [PATCH 01/71] tools/memory-model: Model smp_mb__after_unlock_lock() The kernel documents smp_mb__after_unlock_lock() the following way: "Place this after a lock-acquisition primitive to guarantee that an UNLOCK+LOCK pair acts as a full barrier. This guarantee applies if the UNLOCK and LOCK are executed by the same CPU or if the UNLOCK and LOCK operate on the same lock variable." Formalize in LKMM the above guarantee by defining (new) mb-links according to the law: ([M] ; po ; [UL] ; (co | po) ; [LKW] ; fencerel(After-unlock-lock) ; [M]) where the component ([UL] ; co ; [LKW]) identifies "UNLOCK+LOCK pairs on the same lock variable" and the component ([UL] ; po ; [LKW]) identifies "UNLOCK+LOCK pairs executed by the same CPU". In particular, the LKMM forbids the following two behaviors (the second litmus test below is based on: Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html c.f., Section "Tree RCU Grace Period Memory Ordering Building Blocks"): C after-unlock-lock-same-cpu (* * Result: Never *) {} P0(spinlock_t *s, spinlock_t *t, int *x, int *y) { int r0; spin_lock(s); WRITE_ONCE(*x, 1); spin_unlock(s); spin_lock(t); smp_mb__after_unlock_lock(); r0 = READ_ONCE(*y); spin_unlock(t); } P1(int *x, int *y) { int r0; WRITE_ONCE(*y, 1); smp_mb(); r0 = READ_ONCE(*x); } exists (0:r0=0 /\ 1:r0=0) C after-unlock-lock-same-lock-variable (* * Result: Never *) {} P0(spinlock_t *s, int *x, int *y) { int r0; spin_lock(s); WRITE_ONCE(*x, 1); r0 = READ_ONCE(*y); spin_unlock(s); } P1(spinlock_t *s, int *y, int *z) { int r0; spin_lock(s); smp_mb__after_unlock_lock(); WRITE_ONCE(*y, 1); r0 = READ_ONCE(*z); spin_unlock(s); } P2(int *z, int *x) { int r0; WRITE_ONCE(*z, 1); smp_mb(); r0 = READ_ONCE(*x); } exists (0:r0=0 /\ 1:r0=0 /\ 2:r0=0) Signed-off-by: Andrea Parri Signed-off-by: Paul E. McKenney Cc: Akira Yokosawa Cc: Alan Stern Cc: Boqun Feng Cc: Daniel Lustig Cc: David Howells Cc: Jade Alglave Cc: Linus Torvalds Cc: Luc Maranget Cc: Nicholas Piggin Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Will Deacon Cc: linux-arch@vger.kernel.org Cc: parri.andrea@gmail.com Link: http://lkml.kernel.org/r/20181203230451.28921-1-paulmck@linux.ibm.com Signed-off-by: Ingo Molnar --- tools/memory-model/linux-kernel.bell | 3 ++- tools/memory-model/linux-kernel.cat | 4 +++- tools/memory-model/linux-kernel.def | 1 + 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/tools/memory-model/linux-kernel.bell b/tools/memory-model/linux-kernel.bell index b84fb2f67109..796513362c05 100644 --- a/tools/memory-model/linux-kernel.bell +++ b/tools/memory-model/linux-kernel.bell @@ -29,7 +29,8 @@ enum Barriers = 'wmb (*smp_wmb*) || 'sync-rcu (*synchronize_rcu*) || 'before-atomic (*smp_mb__before_atomic*) || 'after-atomic (*smp_mb__after_atomic*) || - 'after-spinlock (*smp_mb__after_spinlock*) + 'after-spinlock (*smp_mb__after_spinlock*) || + 'after-unlock-lock (*smp_mb__after_unlock_lock*) instructions F[Barriers] (* Compute matching pairs of nested Rcu-lock and Rcu-unlock *) diff --git a/tools/memory-model/linux-kernel.cat b/tools/memory-model/linux-kernel.cat index 882fc33274ac..8f23c74a96fd 100644 --- a/tools/memory-model/linux-kernel.cat +++ b/tools/memory-model/linux-kernel.cat @@ -30,7 +30,9 @@ let wmb = [W] ; fencerel(Wmb) ; [W] let mb = ([M] ; fencerel(Mb) ; [M]) | ([M] ; fencerel(Before-atomic) ; [RMW] ; po? ; [M]) | ([M] ; po? ; [RMW] ; fencerel(After-atomic) ; [M]) | - ([M] ; po? ; [LKW] ; fencerel(After-spinlock) ; [M]) + ([M] ; po? ; [LKW] ; fencerel(After-spinlock) ; [M]) | + ([M] ; po ; [UL] ; (co | po) ; [LKW] ; + fencerel(After-unlock-lock) ; [M]) let gp = po ; [Sync-rcu] ; po? let strong-fence = mb | gp diff --git a/tools/memory-model/linux-kernel.def b/tools/memory-model/linux-kernel.def index 6fa3eb28d40b..b27911cc087d 100644 --- a/tools/memory-model/linux-kernel.def +++ b/tools/memory-model/linux-kernel.def @@ -23,6 +23,7 @@ smp_wmb() { __fence{wmb}; } smp_mb__before_atomic() { __fence{before-atomic}; } smp_mb__after_atomic() { __fence{after-atomic}; } smp_mb__after_spinlock() { __fence{after-spinlock}; } +smp_mb__after_unlock_lock() { __fence{after-unlock-lock}; } // Exchange xchg(X,V) __xchg{mb}(X,V) From b02eb5b0961a06561b89f5b7f0dd171b750e5789 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 3 Dec 2018 15:04:50 -0800 Subject: [PATCH 02/71] tools/memory-model: Add scripts to check github litmus tests The https://github.com/paulmckrcu/litmus repository contains a large number of C-language litmus tests that include "Result:" comments predicting the verification result. This commit adds a number of scripts that run tests on these litmus tests: checkghlitmus.sh: Runs all litmus tests in the https://github.com/paulmckrcu/litmus archive that are C-language and that have "Result:" comment lines documenting expected results, comparing the actual results to those expected. Clones the repository if it has not already been cloned into the "tools/memory-model/litmus" directory. initlitmushist.sh Run all litmus tests having no more than the specified number of processes given a specified timeout, recording the results in .litmus.out files. Clones the repository if it has not already been cloned into the "tools/memory-model/litmus" directory. newlitmushist.sh For all new or updated litmus tests having no more than the specified number of processes given a specified timeout, run and record the results in .litmus.out files. checklitmushist.sh Run all litmus tests having .litmus.out files from previous initlitmushist.sh or newlitmushist.sh runs, comparing the herd output to that of the original runs. The above scripts will run litmus tests concurrently, by default with one job per available CPU. Giving any of these scripts the --help argument will cause them to print usage information. This commit also adds a number of helper scripts that are not intended to be invoked from the command line: cmplitmushist.sh: Compare the output of two different runs of the same litmus test. judgelitmus.sh: Compare the output of a litmus test to its "Result:" comment line. parseargs.sh: Parse command-line arguments. runlitmushist.sh: Run the litmus tests whose pathnames are provided one per line on standard input. While in the area, this commit also makes the existing checklitmus.sh and checkalllitmus.sh scripts use parseargs.sh in order to provide a bit of uniformity. In addition, per-litmus-test status output is directed to stdout, while end-of-test summary information is directed to stderr. Finally, the error flag standardizes on "!!!" to assist those familiar with rcutorture output. The defaults for the parseargs.sh arguments may be overridden by using environment variables: LKMM_DESTDIR for --destdir, LKMM_HERD_OPTIONS for --herdoptions, LKMM_JOBS for --jobs, LKMM_PROCS for --procs, and LKMM_TIMEOUT for --timeout. [ paulmck: History-check summary-line changes per Alan Stern feedback. ] Signed-off-by: Paul E. McKenney Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: akiyks@gmail.com Cc: boqun.feng@gmail.com Cc: dhowells@redhat.com Cc: j.alglave@ucl.ac.uk Cc: linux-arch@vger.kernel.org Cc: luc.maranget@inria.fr Cc: npiggin@gmail.com Cc: parri.andrea@gmail.com Cc: stern@rowland.harvard.edu Cc: will.deacon@arm.com Link: http://lkml.kernel.org/r/20181203230451.28921-2-paulmck@linux.ibm.com Signed-off-by: Ingo Molnar --- tools/memory-model/.gitignore | 1 + tools/memory-model/README | 2 + tools/memory-model/scripts/README | 70 ++++++++++ tools/memory-model/scripts/checkalllitmus.sh | 53 ++++---- tools/memory-model/scripts/checkghlitmus.sh | 65 +++++++++ tools/memory-model/scripts/checklitmus.sh | 74 ++-------- tools/memory-model/scripts/checklitmushist.sh | 60 +++++++++ tools/memory-model/scripts/cmplitmushist.sh | 87 ++++++++++++ tools/memory-model/scripts/initlitmushist.sh | 68 ++++++++++ tools/memory-model/scripts/judgelitmus.sh | 78 +++++++++++ tools/memory-model/scripts/newlitmushist.sh | 61 +++++++++ tools/memory-model/scripts/parseargs.sh | 126 ++++++++++++++++++ tools/memory-model/scripts/runlitmushist.sh | 87 ++++++++++++ 13 files changed, 739 insertions(+), 93 deletions(-) create mode 100644 tools/memory-model/.gitignore create mode 100644 tools/memory-model/scripts/README create mode 100644 tools/memory-model/scripts/checkghlitmus.sh create mode 100644 tools/memory-model/scripts/checklitmushist.sh create mode 100644 tools/memory-model/scripts/cmplitmushist.sh create mode 100644 tools/memory-model/scripts/initlitmushist.sh create mode 100644 tools/memory-model/scripts/judgelitmus.sh create mode 100644 tools/memory-model/scripts/newlitmushist.sh create mode 100644 tools/memory-model/scripts/parseargs.sh create mode 100644 tools/memory-model/scripts/runlitmushist.sh diff --git a/tools/memory-model/.gitignore b/tools/memory-model/.gitignore new file mode 100644 index 000000000000..b1d34c52f3c3 --- /dev/null +++ b/tools/memory-model/.gitignore @@ -0,0 +1 @@ +litmus diff --git a/tools/memory-model/README b/tools/memory-model/README index acf9077cffaa..0f2c366518c6 100644 --- a/tools/memory-model/README +++ b/tools/memory-model/README @@ -156,6 +156,8 @@ lock.cat README This file. +scripts Various scripts, see scripts/README. + =========== LIMITATIONS diff --git a/tools/memory-model/scripts/README b/tools/memory-model/scripts/README new file mode 100644 index 000000000000..29375a1fbbfa --- /dev/null +++ b/tools/memory-model/scripts/README @@ -0,0 +1,70 @@ + ============ + LKMM SCRIPTS + ============ + + +These scripts are run from the tools/memory-model directory. + +checkalllitmus.sh + + Run all litmus tests in the litmus-tests directory, checking + the results against the expected results recorded in the + "Result:" comment lines. + +checkghlitmus.sh + + Run all litmus tests in the https://github.com/paulmckrcu/litmus + archive that are C-language and that have "Result:" comment lines + documenting expected results, comparing the actual results to + those expected. + +checklitmushist.sh + + Run all litmus tests having .litmus.out files from previous + initlitmushist.sh or newlitmushist.sh runs, comparing the + herd output to that of the original runs. + +checklitmus.sh + + Check a single litmus test against its "Result:" expected result. + +cmplitmushist.sh + + Compare output from two different runs of the same litmus tests, + with the absolute pathnames of the tests to run provided one + name per line on standard input. Not normally run manually, + provided instead for use by other scripts. + +initlitmushist.sh + + Run all litmus tests having no more than the specified number + of processes given a specified timeout, recording the results + in .litmus.out files. + +judgelitmus.sh + + Given a .litmus file and its .litmus.out herd output, check the + .litmus.out file against the .litmus file's "Result:" comment to + judge whether the test ran correctly. Not normally run manually, + provided instead for use by other scripts. + +newlitmushist.sh + + For all new or updated litmus tests having no more than the + specified number of processes given a specified timeout, run + and record the results in .litmus.out files. + +parseargs.sh + + Parse command-line arguments. Not normally run manually, + provided instead for use by other scripts. + +runlitmushist.sh + + Run the litmus tests whose absolute pathnames are provided one + name per line on standard input. Not normally run manually, + provided instead for use by other scripts. + +README + + This file diff --git a/tools/memory-model/scripts/checkalllitmus.sh b/tools/memory-model/scripts/checkalllitmus.sh index ca528f9a24d4..b35fcd61ecf6 100755 --- a/tools/memory-model/scripts/checkalllitmus.sh +++ b/tools/memory-model/scripts/checkalllitmus.sh @@ -1,42 +1,27 @@ #!/bin/sh +# SPDX-License-Identifier: GPL-2.0+ # -# Run herd tests on all .litmus files in the specified directory (which -# defaults to litmus-tests) and check each file's result against a "Result:" -# comment within that litmus test. If the verification result does not -# match that specified in the litmus test, this script prints an error -# message prefixed with "^^^". It also outputs verification results to -# a file whose name is that of the specified litmus test, but with ".out" -# appended. +# Run herd tests on all .litmus files in the litmus-tests directory +# and check each file's result against a "Result:" comment within that +# litmus test. If the verification result does not match that specified +# in the litmus test, this script prints an error message prefixed with +# "^^^". It also outputs verification results to a file whose name is +# that of the specified litmus test, but with ".out" appended. # # Usage: -# checkalllitmus.sh [ directory ] +# checkalllitmus.sh # -# The LINUX_HERD_OPTIONS environment variable may be used to specify -# arguments to herd, whose default is defined by the checklitmus.sh script. -# Thus, one would normally run this in the directory containing the memory -# model, specifying the pathname of the litmus test to check. +# Run this in the directory containing the memory model. # # This script makes no attempt to run the litmus tests concurrently. # -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, you can access it online at -# http://www.gnu.org/licenses/gpl-2.0.html. -# # Copyright IBM Corporation, 2018 # # Author: Paul E. McKenney -litmusdir=${1-litmus-tests} +. scripts/parseargs.sh + +litmusdir=litmus-tests if test -d "$litmusdir" -a -r "$litmusdir" -a -x "$litmusdir" then : @@ -45,6 +30,14 @@ else exit 255 fi +# Create any new directories that have appeared in the github litmus +# repo since the last run. +if test "$LKMM_DESTDIR" != "." +then + find $litmusdir -type d -print | + ( cd "$LKMM_DESTDIR"; sed -e 's/^/mkdir -p /' | sh ) +fi + # Find the checklitmus script. If it is not where we expect it, then # assume that the caller has the PATH environment variable set # appropriately. @@ -57,7 +50,7 @@ fi # Run the script on all the litmus tests in the specified directory ret=0 -for i in litmus-tests/*.litmus +for i in $litmusdir/*.litmus do if ! $clscript $i then @@ -66,8 +59,8 @@ do done if test "$ret" -ne 0 then - echo " ^^^ VERIFICATION MISMATCHES" + echo " ^^^ VERIFICATION MISMATCHES" 1>&2 else - echo All litmus tests verified as was expected. + echo All litmus tests verified as was expected. 1>&2 fi exit $ret diff --git a/tools/memory-model/scripts/checkghlitmus.sh b/tools/memory-model/scripts/checkghlitmus.sh new file mode 100644 index 000000000000..6589fbb6f653 --- /dev/null +++ b/tools/memory-model/scripts/checkghlitmus.sh @@ -0,0 +1,65 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0+ +# +# Runs the C-language litmus tests having a maximum number of processes +# to run, defaults to 6. +# +# sh checkghlitmus.sh +# +# Run from the Linux kernel tools/memory-model directory. See the +# parseargs.sh scripts for arguments. + +. scripts/parseargs.sh + +T=/tmp/checkghlitmus.sh.$$ +trap 'rm -rf $T' 0 +mkdir $T + +# Clone the repository if it is not already present. +if test -d litmus +then + : +else + git clone https://github.com/paulmckrcu/litmus + ( cd litmus; git checkout origin/master ) +fi + +# Create any new directories that have appeared in the github litmus +# repo since the last run. +if test "$LKMM_DESTDIR" != "." +then + find litmus -type d -print | + ( cd "$LKMM_DESTDIR"; sed -e 's/^/mkdir -p /' | sh ) +fi + +# Create a list of the C-language litmus tests previously run. +( cd $LKMM_DESTDIR; find litmus -name '*.litmus.out' -print ) | + sed -e 's/\.out$//' | + xargs -r egrep -l '^ \* Result: (Never|Sometimes|Always|DEADLOCK)' | + xargs -r grep -L "^P${LKMM_PROCS}"> $T/list-C-already + +# Create a list of C-language litmus tests with "Result:" commands and +# no more than the specified number of processes. +find litmus -name '*.litmus' -exec grep -l -m 1 "^C " {} \; > $T/list-C +xargs < $T/list-C -r egrep -l '^ \* Result: (Never|Sometimes|Always|DEADLOCK)' > $T/list-C-result +xargs < $T/list-C-result -r grep -L "^P${LKMM_PROCS}" > $T/list-C-result-short + +# Form list of tests without corresponding .litmus.out files +sort $T/list-C-already $T/list-C-result-short | uniq -u > $T/list-C-needed + +# Run any needed tests. +if scripts/runlitmushist.sh < $T/list-C-needed > $T/run.stdout 2> $T/run.stderr +then + errs= +else + errs=1 +fi + +sed < $T/list-C-result-short -e 's,^,scripts/judgelitmus.sh ,' | + sh > $T/judge.stdout 2> $T/judge.stderr + +if test -n "$errs" +then + cat $T/run.stderr 1>&2 +fi +grep '!!!' $T/judge.stdout diff --git a/tools/memory-model/scripts/checklitmus.sh b/tools/memory-model/scripts/checklitmus.sh index bf12a75c0719..dd08801a30b0 100755 --- a/tools/memory-model/scripts/checklitmus.sh +++ b/tools/memory-model/scripts/checklitmus.sh @@ -1,40 +1,24 @@ #!/bin/sh +# SPDX-License-Identifier: GPL-2.0+ # -# Run a herd test and check the result against a "Result:" comment within -# the litmus test. If the verification result does not match that specified -# in the litmus test, this script prints an error message prefixed with -# "^^^" and exits with a non-zero status. It also outputs verification +# Run a herd test and invokes judgelitmus.sh to check the result against +# a "Result:" comment within the litmus test. It also outputs verification # results to a file whose name is that of the specified litmus test, but # with ".out" appended. # # Usage: # checklitmus.sh file.litmus # -# The LINUX_HERD_OPTIONS environment variable may be used to specify -# arguments to herd, which default to "-conf linux-kernel.cfg". Thus, -# one would normally run this in the directory containing the memory model, -# specifying the pathname of the litmus test to check. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, you can access it online at -# http://www.gnu.org/licenses/gpl-2.0.html. +# Run this in the directory containing the memory model, specifying the +# pathname of the litmus test to check. The caller is expected to have +# properly set up the LKMM environment variables. # # Copyright IBM Corporation, 2018 # # Author: Paul E. McKenney litmus=$1 -herdoptions=${LINUX_HERD_OPTIONS--conf linux-kernel.cfg} +herdoptions=${LKMM_HERD_OPTIONS--conf linux-kernel.cfg} if test -f "$litmus" -a -r "$litmus" then @@ -43,44 +27,8 @@ else echo ' --- ' error: \"$litmus\" is not a readable file exit 255 fi -if grep -q '^ \* Result: ' $litmus -then - outcome=`grep -m 1 '^ \* Result: ' $litmus | awk '{ print $3 }'` -else - outcome=specified -fi -echo Herd options: $herdoptions > $litmus.out -/usr/bin/time herd7 -o ~/tmp $herdoptions $litmus >> $litmus.out 2>&1 -grep "Herd options:" $litmus.out -grep '^Observation' $litmus.out -if grep -q '^Observation' $litmus.out -then - : -else - cat $litmus.out - echo ' ^^^ Verification error' - echo ' ^^^ Verification error' >> $litmus.out 2>&1 - exit 255 -fi -if test "$outcome" = DEADLOCK -then - echo grep 3 and 4 - if grep '^Observation' $litmus.out | grep -q 'Never 0 0$' - then - ret=0 - else - echo " ^^^ Unexpected non-$outcome verification" - echo " ^^^ Unexpected non-$outcome verification" >> $litmus.out 2>&1 - ret=1 - fi -elif grep '^Observation' $litmus.out | grep -q $outcome || test "$outcome" = Maybe -then - ret=0 -else - echo " ^^^ Unexpected non-$outcome verification" - echo " ^^^ Unexpected non-$outcome verification" >> $litmus.out 2>&1 - ret=1 -fi -tail -2 $litmus.out | head -1 -exit $ret +echo Herd options: $herdoptions > $LKMM_DESTDIR/$litmus.out +/usr/bin/time $LKMM_TIMEOUT_CMD herd7 $herdoptions $litmus >> $LKMM_DESTDIR/$litmus.out 2>&1 + +scripts/judgelitmus.sh $litmus diff --git a/tools/memory-model/scripts/checklitmushist.sh b/tools/memory-model/scripts/checklitmushist.sh new file mode 100644 index 000000000000..1d210ffb7c8a --- /dev/null +++ b/tools/memory-model/scripts/checklitmushist.sh @@ -0,0 +1,60 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0+ +# +# Reruns the C-language litmus tests previously run that match the +# specified criteria, and compares the result to that of the previous +# runs from initlitmushist.sh and/or newlitmushist.sh. +# +# sh checklitmushist.sh +# +# Run from the Linux kernel tools/memory-model directory. +# See scripts/parseargs.sh for list of arguments. +# +# Copyright IBM Corporation, 2018 +# +# Author: Paul E. McKenney + +. scripts/parseargs.sh + +T=/tmp/checklitmushist.sh.$$ +trap 'rm -rf $T' 0 +mkdir $T + +if test -d litmus +then + : +else + echo Run scripts/initlitmushist.sh first, need litmus repo. + exit 1 +fi + +# Create the results directory and populate it with subdirectories. +# The initial output is created here to avoid clobbering the output +# generated earlier. +mkdir $T/results +find litmus -type d -print | ( cd $T/results; sed -e 's/^/mkdir -p /' | sh ) + +# Create the list of litmus tests already run, then remove those that +# are excluded by this run's --procs argument. +( cd $LKMM_DESTDIR; find litmus -name '*.litmus.out' -print ) | + sed -e 's/\.out$//' | + xargs -r grep -L "^P${LKMM_PROCS}"> $T/list-C-already +xargs < $T/list-C-already -r grep -L "^P${LKMM_PROCS}" > $T/list-C-short + +# Redirect output, run tests, then restore destination directory. +destdir="$LKMM_DESTDIR" +LKMM_DESTDIR=$T/results; export LKMM_DESTDIR +scripts/runlitmushist.sh < $T/list-C-short > $T/runlitmushist.sh.out 2>&1 +LKMM_DESTDIR="$destdir"; export LKMM_DESTDIR + +# Move the newly generated .litmus.out files to .litmus.out.new files +# in the destination directory. +cdir=`pwd` +ddir=`awk -v c="$cdir" -v d="$LKMM_DESTDIR" \ + 'END { if (d ~ /^\//) print d; else print c "/" d; }' < /dev/null` +( cd $T/results; find litmus -type f -name '*.litmus.out' -print | + sed -e 's,^.*$,cp & '"$ddir"'/&.new,' | sh ) + +sed < $T/list-C-short -e 's,^,'"$LKMM_DESTDIR/"',' | + sh scripts/cmplitmushist.sh +exit $? diff --git a/tools/memory-model/scripts/cmplitmushist.sh b/tools/memory-model/scripts/cmplitmushist.sh new file mode 100644 index 000000000000..0f498aeeccf5 --- /dev/null +++ b/tools/memory-model/scripts/cmplitmushist.sh @@ -0,0 +1,87 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0+ +# +# Compares .out and .out.new files for each name on standard input, +# one full pathname per line. Outputs comparison results followed by +# a summary. +# +# sh cmplitmushist.sh + +T=/tmp/cmplitmushist.sh.$$ +trap 'rm -rf $T' 0 +mkdir $T + +# comparetest oldpath newpath +perfect=0 +obsline=0 +noobsline=0 +obsresult=0 +badcompare=0 +comparetest () { + grep -v 'maxresident)k\|minor)pagefaults\|^Time' $1 > $T/oldout + grep -v 'maxresident)k\|minor)pagefaults\|^Time' $2 > $T/newout + if cmp -s $T/oldout $T/newout && grep -q '^Observation' $1 + then + echo Exact output match: $2 + perfect=`expr "$perfect" + 1` + return 0 + fi + + grep '^Observation' $1 > $T/oldout + grep '^Observation' $2 > $T/newout + if test -s $T/oldout -o -s $T/newout + then + if cmp -s $T/oldout $T/newout + then + echo Matching Observation result and counts: $2 + obsline=`expr "$obsline" + 1` + return 0 + fi + else + echo Missing Observation line "(e.g., herd7 timeout)": $2 + noobsline=`expr "$noobsline" + 1` + return 0 + fi + + grep '^Observation' $1 | awk '{ print $3 }' > $T/oldout + grep '^Observation' $2 | awk '{ print $3 }' > $T/newout + if cmp -s $T/oldout $T/newout + then + echo Matching Observation Always/Sometimes/Never result: $2 + obsresult=`expr "$obsresult" + 1` + return 0 + fi + echo ' !!!' Result changed: $2 + badcompare=`expr "$badcompare" + 1` + return 1 +} + +sed -e 's/^.*$/comparetest &.out &.out.new/' > $T/cmpscript +. $T/cmpscript > $T/cmpscript.out +cat $T/cmpscript.out + +echo ' ---' Summary: 1>&2 +grep '!!!' $T/cmpscript.out 1>&2 +if test "$perfect" -ne 0 +then + echo Exact output matches: $perfect 1>&2 +fi +if test "$obsline" -ne 0 +then + echo Matching Observation result and counts: $obsline 1>&2 +fi +if test "$noobsline" -ne 0 +then + echo Missing Observation line "(e.g., herd7 timeout)": $noobsline 1>&2 +fi +if test "$obsresult" -ne 0 +then + echo Matching Observation Always/Sometimes/Never result: $obsresult 1>&2 +fi +if test "$badcompare" -ne 0 +then + echo "!!!" Result changed: $badcompare 1>&2 + exit 1 +fi + +exit 0 diff --git a/tools/memory-model/scripts/initlitmushist.sh b/tools/memory-model/scripts/initlitmushist.sh new file mode 100644 index 000000000000..956b6957484d --- /dev/null +++ b/tools/memory-model/scripts/initlitmushist.sh @@ -0,0 +1,68 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0+ +# +# Runs the C-language litmus tests matching the specified criteria. +# Generates the output for each .litmus file into a corresponding +# .litmus.out file, and does not judge the result. +# +# sh initlitmushist.sh +# +# Run from the Linux kernel tools/memory-model directory. +# See scripts/parseargs.sh for list of arguments. +# +# This script can consume significant wallclock time and CPU, especially as +# the value of --procs rises. On a four-core (eight hardware threads) +# 2.5GHz x86 with a one-minute per-run timeout: +# +# --procs wallclock CPU timeouts tests +# 1 0m11.241s 0m1.086s 0 19 +# 2 1m12.598s 2m8.459s 2 393 +# 3 1m30.007s 6m2.479s 4 2291 +# 4 3m26.042s 18m5.139s 9 3217 +# 5 4m26.661s 23m54.128s 13 3784 +# 6 4m41.900s 26m4.721s 13 4352 +# 7 5m51.463s 35m50.868s 13 4626 +# 8 10m5.235s 68m43.672s 34 5117 +# 9 15m57.80s 105m58.101s 69 5156 +# 10 16m14.13s 103m35.009s 69 5165 +# 20 27m48.55s 198m3.286s 156 5269 +# +# Increasing the timeout on the 20-process run to five minutes increases +# the runtime to about 90 minutes with the CPU time rising to about +# 10 hours. On the other hand, it decreases the number of timeouts to 101. +# +# Note that there are historical tests for which herd7 will fail +# completely, for example, litmus/manual/atomic/C-unlock-wait-00.litmus +# contains a call to spin_unlock_wait(), which no longer exists in either +# the kernel or LKMM. + +. scripts/parseargs.sh + +T=/tmp/initlitmushist.sh.$$ +trap 'rm -rf $T' 0 +mkdir $T + +if test -d litmus +then + : +else + git clone https://github.com/paulmckrcu/litmus + ( cd litmus; git checkout origin/master ) +fi + +# Create any new directories that have appeared in the github litmus +# repo since the last run. +if test "$LKMM_DESTDIR" != "." +then + find litmus -type d -print | + ( cd "$LKMM_DESTDIR"; sed -e 's/^/mkdir -p /' | sh ) +fi + +# Create a list of the C-language litmus tests with no more than the +# specified number of processes (per the --procs argument). +find litmus -name '*.litmus' -exec grep -l -m 1 "^C " {} \; > $T/list-C +xargs < $T/list-C -r grep -L "^P${LKMM_PROCS}" > $T/list-C-short + +scripts/runlitmushist.sh < $T/list-C-short + +exit 0 diff --git a/tools/memory-model/scripts/judgelitmus.sh b/tools/memory-model/scripts/judgelitmus.sh new file mode 100644 index 000000000000..0cc63875e395 --- /dev/null +++ b/tools/memory-model/scripts/judgelitmus.sh @@ -0,0 +1,78 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0+ +# +# Given a .litmus test and the corresponding .litmus.out file, check +# the .litmus.out file against the "Result:" comment to judge whether +# the test ran correctly. +# +# Usage: +# judgelitmus.sh file.litmus +# +# Run this in the directory containing the memory model, specifying the +# pathname of the litmus test to check. +# +# Copyright IBM Corporation, 2018 +# +# Author: Paul E. McKenney + +litmus=$1 + +if test -f "$litmus" -a -r "$litmus" +then + : +else + echo ' --- ' error: \"$litmus\" is not a readable file + exit 255 +fi +if test -f "$LKMM_DESTDIR/$litmus".out -a -r "$LKMM_DESTDIR/$litmus".out +then + : +else + echo ' --- ' error: \"$LKMM_DESTDIR/$litmus\".out is not a readable file + exit 255 +fi +if grep -q '^ \* Result: ' $litmus +then + outcome=`grep -m 1 '^ \* Result: ' $litmus | awk '{ print $3 }'` +else + outcome=specified +fi + +grep '^Observation' $LKMM_DESTDIR/$litmus.out +if grep -q '^Observation' $LKMM_DESTDIR/$litmus.out +then + : +else + echo ' !!! Verification error' $litmus + if ! grep -q '!!!' $LKMM_DESTDIR/$litmus.out + then + echo ' !!! Verification error' >> $LKMM_DESTDIR/$litmus.out 2>&1 + fi + exit 255 +fi +if test "$outcome" = DEADLOCK +then + if grep '^Observation' $LKMM_DESTDIR/$litmus.out | grep -q 'Never 0 0$' + then + ret=0 + else + echo " !!! Unexpected non-$outcome verification" $litmus + if ! grep -q '!!!' $LKMM_DESTDIR/$litmus.out + then + echo " !!! Unexpected non-$outcome verification" >> $LKMM_DESTDIR/$litmus.out 2>&1 + fi + ret=1 + fi +elif grep '^Observation' $LKMM_DESTDIR/$litmus.out | grep -q $outcome || test "$outcome" = Maybe +then + ret=0 +else + echo " !!! Unexpected non-$outcome verification" $litmus + if ! grep -q '!!!' $LKMM_DESTDIR/$litmus.out + then + echo " !!! Unexpected non-$outcome verification" >> $LKMM_DESTDIR/$litmus.out 2>&1 + fi + ret=1 +fi +tail -2 $LKMM_DESTDIR/$litmus.out | head -1 +exit $ret diff --git a/tools/memory-model/scripts/newlitmushist.sh b/tools/memory-model/scripts/newlitmushist.sh new file mode 100644 index 000000000000..991f8f814881 --- /dev/null +++ b/tools/memory-model/scripts/newlitmushist.sh @@ -0,0 +1,61 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0+ +# +# Runs the C-language litmus tests matching the specified criteria +# that do not already have a corresponding .litmus.out file, and does +# not judge the result. +# +# sh newlitmushist.sh +# +# Run from the Linux kernel tools/memory-model directory. +# See scripts/parseargs.sh for list of arguments. +# +# Copyright IBM Corporation, 2018 +# +# Author: Paul E. McKenney + +. scripts/parseargs.sh + +T=/tmp/newlitmushist.sh.$$ +trap 'rm -rf $T' 0 +mkdir $T + +if test -d litmus +then + : +else + echo Run scripts/initlitmushist.sh first, need litmus repo. + exit 1 +fi + +# Create any new directories that have appeared in the github litmus +# repo since the last run. +if test "$LKMM_DESTDIR" != "." +then + find litmus -type d -print | + ( cd "$LKMM_DESTDIR"; sed -e 's/^/mkdir -p /' | sh ) +fi + +# Create a list of the C-language litmus tests previously run. +( cd $LKMM_DESTDIR; find litmus -name '*.litmus.out' -print ) | + sed -e 's/\.out$//' | + xargs -r grep -L "^P${LKMM_PROCS}"> $T/list-C-already + +# Form full list of litmus tests with no more than the specified +# number of processes (per the --procs argument). +find litmus -name '*.litmus' -exec grep -l -m 1 "^C " {} \; > $T/list-C-all +xargs < $T/list-C-all -r grep -L "^P${LKMM_PROCS}" > $T/list-C-short + +# Form list of new tests. Note: This does not handle litmus-test deletion! +sort $T/list-C-already $T/list-C-short | uniq -u > $T/list-C-new + +# Form list of litmus tests that have changed since the last run. +sed < $T/list-C-short -e 's,^.*$,if test & -nt '"$LKMM_DESTDIR"'/&.out; then echo &; fi,' > $T/list-C-script +sh $T/list-C-script > $T/list-C-newer + +# Merge the list of new and of updated litmus tests: These must be (re)run. +sort -u $T/list-C-new $T/list-C-newer > $T/list-C-needed + +scripts/runlitmushist.sh < $T/list-C-needed + +exit 0 diff --git a/tools/memory-model/scripts/parseargs.sh b/tools/memory-model/scripts/parseargs.sh new file mode 100644 index 000000000000..96b307c8d64a --- /dev/null +++ b/tools/memory-model/scripts/parseargs.sh @@ -0,0 +1,126 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0+ +# +# the corresponding .litmus.out file, and does not judge the result. +# +# . scripts/parseargs.sh +# +# Include into other Linux kernel tools/memory-model scripts. +# +# Copyright IBM Corporation, 2018 +# +# Author: Paul E. McKenney + +T=/tmp/parseargs.sh.$$ +mkdir $T + +# Initialize one parameter: initparam name default +initparam () { + echo if test -z '"$'$1'"' > $T/s + echo then >> $T/s + echo $1='"'$2'"' >> $T/s + echo export $1 >> $T/s + echo fi >> $T/s + echo $1_DEF='$'$1 >> $T/s + . $T/s +} + +initparam LKMM_DESTDIR "." +initparam LKMM_HERD_OPTIONS "-conf linux-kernel.cfg" +initparam LKMM_JOBS `getconf _NPROCESSORS_ONLN` +initparam LKMM_PROCS "3" +initparam LKMM_TIMEOUT "1m" + +scriptname=$0 + +usagehelp () { + echo "Usage $scriptname [ arguments ]" + echo " --destdir path (place for .litmus.out, default by .litmus)" + echo " --herdopts -conf linux-kernel.cfg ..." + echo " --jobs N (number of jobs, default one per CPU)" + echo " --procs N (litmus tests with at most this many processes)" + echo " --timeout N (herd7 timeout (e.g., 10s, 1m, 2hr, 1d, '')" + echo "Defaults: --destdir '$LKMM_DESTDIR_DEF' --herdopts '$LKMM_HERD_OPTIONS_DEF' --jobs '$LKMM_JOBS_DEF' --procs '$LKMM_PROCS_DEF' --timeout '$LKMM_TIMEOUT_DEF'" + exit 1 +} + +usage () { + usagehelp 1>&2 +} + +# checkarg --argname argtype $# arg mustmatch cannotmatch +checkarg () { + if test $3 -le 1 + then + echo $1 needs argument $2 matching \"$5\" + usage + fi + if echo "$4" | grep -q -e "$5" + then + : + else + echo $1 $2 \"$4\" must match \"$5\" + usage + fi + if echo "$4" | grep -q -e "$6" + then + echo $1 $2 \"$4\" must not match \"$6\" + usage + fi +} + +while test $# -gt 0 +do + case "$1" in + --destdir) + checkarg --destdir "(path to directory)" "$#" "$2" '.\+' '^--' + LKMM_DESTDIR="$2" + mkdir $LKMM_DESTDIR > /dev/null 2>&1 + if ! test -e "$LKMM_DESTDIR" + then + echo "Cannot create directory --destdir '$LKMM_DESTDIR'" + usage + fi + if test -d "$LKMM_DESTDIR" -a -w "$LKMM_DESTDIR" -a -x "$LKMM_DESTDIR" + then + : + else + echo "Directory --destdir '$LKMM_DESTDIR' insufficient permissions to create files" + usage + fi + shift + ;; + --herdopts|--herdopt) + checkarg --destdir "(herd options)" "$#" "$2" '.*' '^--' + LKMM_HERD_OPTIONS="$2" + shift + ;; + --jobs|--job) + checkarg --jobs "(number)" "$#" "$2" '^[0-9]\+$' '^--' + LKMM_JOBS="$2" + shift + ;; + --procs|--proc) + checkarg --procs "(number)" "$#" "$2" '^[0-9]\+$' '^--' + LKMM_PROCS="$2" + shift + ;; + --timeout) + checkarg --timeout "(timeout spec)" "$#" "$2" '^\([0-9]\+[smhd]\?\|\)$' '^--' + LKMM_TIMEOUT="$2" + shift + ;; + *) + echo Unknown argument $1 + usage + ;; + esac + shift +done +if test -z "$LKMM_TIMEOUT" +then + LKMM_TIMEOUT_CMD=""; export LKMM_TIMEOUT_CMD +else + LKMM_TIMEOUT_CMD="timeout $LKMM_TIMEOUT"; export LKMM_TIMEOUT_CMD +fi +rm -rf $T diff --git a/tools/memory-model/scripts/runlitmushist.sh b/tools/memory-model/scripts/runlitmushist.sh new file mode 100644 index 000000000000..e507f5f933d5 --- /dev/null +++ b/tools/memory-model/scripts/runlitmushist.sh @@ -0,0 +1,87 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0+ +# +# Runs the C-language litmus tests specified on standard input, using up +# to the specified number of CPUs (defaulting to all of them) and placing +# the results in the specified directory (defaulting to the same place +# the litmus test came from). +# +# sh runlitmushist.sh +# +# Run from the Linux kernel tools/memory-model directory. +# This script uses environment variables produced by parseargs.sh. +# +# Copyright IBM Corporation, 2018 +# +# Author: Paul E. McKenney + +T=/tmp/runlitmushist.sh.$$ +trap 'rm -rf $T' 0 +mkdir $T + +if test -d litmus +then + : +else + echo Directory \"litmus\" missing, aborting run. + exit 1 +fi + +# Prefixes for per-CPU scripts +for ((i=0;i<$LKMM_JOBS;i++)) +do + echo dir="$LKMM_DESTDIR" > $T/$i.sh + echo T=$T >> $T/$i.sh + echo herdoptions=\"$LKMM_HERD_OPTIONS\" >> $T/$i.sh + cat << '___EOF___' >> $T/$i.sh + runtest () { + echo ' ... ' /usr/bin/time $LKMM_TIMEOUT_CMD herd7 $herdoptions $1 '>' $dir/$1.out '2>&1' + if /usr/bin/time $LKMM_TIMEOUT_CMD herd7 $herdoptions $1 > $dir/$1.out 2>&1 + then + if ! grep -q '^Observation ' $dir/$1.out + then + echo ' !!! Herd failed, no Observation:' $1 + fi + else + exitcode=$? + if test "$exitcode" -eq 124 + then + exitmsg="timed out" + else + exitmsg="failed, exit code $exitcode" + fi + echo ' !!! Herd' ${exitmsg}: $1 + fi + } +___EOF___ +done + +awk -v q="'" -v b='\\' ' +{ + print "echo `grep " q "^P[0-9]" b "+(" q " " $0 " | tail -1 | sed -e " q "s/^P" b "([0-9]" b "+" b ")(.*$/" b "1/" q "` " $0 +}' | bash | +sort -k1n | +awk -v ncpu=$LKMM_JOBS -v t=$T ' +{ + print "runtest " $2 >> t "/" NR % ncpu ".sh"; +} + +END { + for (i = 0; i < ncpu; i++) { + print "sh " t "/" i ".sh > " t "/" i ".sh.out 2>&1 &"; + close(t "/" i ".sh"); + } + print "wait"; +}' | sh +cat $T/*.sh.out +if grep -q '!!!' $T/*.sh.out +then + echo ' ---' Summary: 1>&2 + grep '!!!' $T/*.sh.out 1>&2 + nfail="`grep '!!!' $T/*.sh.out | wc -l`" + echo 'Number of failed herd runs (e.g., timeout): ' $nfail 1>&2 + exit 1 +else + echo All runs completed successfully. 1>&2 + exit 0 +fi From 910cc9591d1433c2e26bd1c210844b09c699dd89 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 3 Dec 2018 15:04:51 -0800 Subject: [PATCH 03/71] tools/memory-model: Make scripts take "-j" abbreviation for "--jobs" The "--jobs" argument to the litmus-test scripts is similar to the "-jN" argument to "make", so this commit allows the "-jN" form as well. While in the area, it also prohibits the various forms of "-j0". Suggested-by: Alan Stern Signed-off-by: Paul E. McKenney Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: akiyks@gmail.com Cc: boqun.feng@gmail.com Cc: dhowells@redhat.com Cc: j.alglave@ucl.ac.uk Cc: linux-arch@vger.kernel.org Cc: luc.maranget@inria.fr Cc: npiggin@gmail.com Cc: parri.andrea@gmail.com Cc: will.deacon@arm.com Link: http://lkml.kernel.org/r/20181203230451.28921-3-paulmck@linux.ibm.com Signed-off-by: Ingo Molnar --- tools/memory-model/scripts/parseargs.sh | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/tools/memory-model/scripts/parseargs.sh b/tools/memory-model/scripts/parseargs.sh index 96b307c8d64a..859e1d581e05 100644 --- a/tools/memory-model/scripts/parseargs.sh +++ b/tools/memory-model/scripts/parseargs.sh @@ -95,8 +95,18 @@ do LKMM_HERD_OPTIONS="$2" shift ;; - --jobs|--job) - checkarg --jobs "(number)" "$#" "$2" '^[0-9]\+$' '^--' + -j[1-9]*) + njobs="`echo $1 | sed -e 's/^-j//'`" + trailchars="`echo $njobs | sed -e 's/[0-9]\+\(.*\)$/\1/'`" + if test -n "$trailchars" + then + echo $1 trailing characters "'$trailchars'" + usagehelp + fi + LKMM_JOBS="`echo $njobs | sed -e 's/^\([0-9]\+\).*$/\1/'`" + ;; + --jobs|--job|-j) + checkarg --jobs "(number)" "$#" "$2" '^[1-9][0-9]\+$' '^--' LKMM_JOBS="$2" shift ;; From 337e9b07db3b8c7f7d68b849df32f434a1a3b831 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 6 Nov 2018 19:10:53 -0800 Subject: [PATCH 04/71] sched: Replace call_rcu_sched() with call_rcu() Now that call_rcu()'s callback is not invoked until after all preempt-disable regions of code have completed (in addition to explicitly marked RCU read-side critical sections), call_rcu() can be used in place of call_rcu_sched(). This commit therefore makes that change. While in the area, this commit also updates an outdated header comment for for_each_domain(). Signed-off-by: Paul E. McKenney Cc: Ingo Molnar Cc: Peter Zijlstra --- kernel/sched/sched.h | 2 +- kernel/sched/topology.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index d04530bf251f..6665b9c02e2f 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1260,7 +1260,7 @@ extern void sched_ttwu_pending(void); /* * The domain tree (rq->sd) is protected by RCU's quiescent state transition. - * See detach_destroy_domains: synchronize_sched for details. + * See destroy_sched_domains: call_rcu for details. * * The domain tree of any CPU may only be accessed from within * preempt-disabled sections. diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 3f35ba1d8fde..7d905f55e7fa 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -442,7 +442,7 @@ void rq_attach_root(struct rq *rq, struct root_domain *rd) raw_spin_unlock_irqrestore(&rq->lock, flags); if (old_rd) - call_rcu_sched(&old_rd->rcu, free_rootdomain); + call_rcu(&old_rd->rcu, free_rootdomain); } void sched_get_rd(struct root_domain *rd) @@ -455,7 +455,7 @@ void sched_put_rd(struct root_domain *rd) if (!atomic_dec_and_test(&rd->refcount)) return; - call_rcu_sched(&rd->rcu, free_rootdomain); + call_rcu(&rd->rcu, free_rootdomain); } static int init_rootdomain(struct root_domain *rd) From b290ebcf7bc4638b38c413f192963f4b74e45b7b Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 6 Nov 2018 19:13:54 -0800 Subject: [PATCH 05/71] sched: Replace synchronize_sched() with synchronize_rcu() Now that synchronize_rcu() waits for preempt-disable regions of code as well as RCU read-side critical sections, synchronize_sched() can be replaced by synchronize_rcu(), in fact, synchronize_sched() is now completely equivalent to synchronize_rcu(). This commit therefore replaces synchronize_sched() with synchronize_rcu() so that synchronize_sched() can eventually be removed entirely. Signed-off-by: Paul E. McKenney Cc: Ingo Molnar Cc: Peter Zijlstra --- kernel/sched/cpufreq.c | 4 ++-- kernel/sched/cpufreq_schedutil.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/kernel/sched/cpufreq.c b/kernel/sched/cpufreq.c index 22bd8980f32f..835671f0f917 100644 --- a/kernel/sched/cpufreq.c +++ b/kernel/sched/cpufreq.c @@ -48,8 +48,8 @@ EXPORT_SYMBOL_GPL(cpufreq_add_update_util_hook); * * Clear the update_util_data pointer for the given CPU. * - * Callers must use RCU-sched callbacks to free any memory that might be - * accessed via the old update_util_data pointer or invoke synchronize_sched() + * Callers must use RCU callbacks to free any memory that might be + * accessed via the old update_util_data pointer or invoke synchronize_rcu() * right after this function to avoid use-after-free. */ void cpufreq_remove_update_util_hook(int cpu) diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index 033ec7c45f13..2efe629425be 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -859,7 +859,7 @@ static void sugov_stop(struct cpufreq_policy *policy) for_each_cpu(cpu, policy->cpus) cpufreq_remove_update_util_hook(cpu); - synchronize_sched(); + synchronize_rcu(); if (!policy->fast_switch_enabled) { irq_work_sync(&sg_policy->irq_work); From ad368d15b08ad22509a56bdfd6ee3a04da91ce10 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 27 Nov 2018 13:55:53 -0800 Subject: [PATCH 06/71] rcu: Rename and comment changes due to only one rcuo kthread per CPU Given RCU flavor consolidation, the name rcu_spawn_all_nocb_kthreads() is quite misleading. It no longer ever creates more than one kthread, and it does so only for the specified CPU. This commit therefore changes this name to the more descriptive rcu_spawn_cpu_nocb_kthread(), and also fixes up a similar issue in its header comment while in the area. Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 2 +- kernel/rcu/tree.h | 2 +- kernel/rcu/tree_plugin.h | 8 ++++---- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 9180158756d2..f4edc664fb65 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -3299,7 +3299,7 @@ int rcutree_prepare_cpu(unsigned int cpu) trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl")); raw_spin_unlock_irqrestore_rcu_node(rnp, flags); rcu_prepare_kthreads(cpu); - rcu_spawn_all_nocb_kthreads(cpu); + rcu_spawn_cpu_nocb_kthread(cpu); return 0; } diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index d90b02b53c0e..bcfd684a5c57 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -451,7 +451,7 @@ static bool rcu_nocb_adopt_orphan_cbs(struct rcu_data *my_rdp, static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp); static void do_nocb_deferred_wakeup(struct rcu_data *rdp); static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp); -static void rcu_spawn_all_nocb_kthreads(int cpu); +static void rcu_spawn_cpu_nocb_kthread(int cpu); static void __init rcu_spawn_nocb_kthreads(void); #ifdef CONFIG_RCU_NOCB_CPU static void __init rcu_organize_nocb_kthreads(void); diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 1b3dd2fc0cd6..4d4091565a2c 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -2517,9 +2517,9 @@ static void rcu_spawn_one_nocb_kthread(int cpu) /* * If the specified CPU is a no-CBs CPU that does not already have its - * rcuo kthreads, spawn them. + * rcuo kthread, spawn it. */ -static void rcu_spawn_all_nocb_kthreads(int cpu) +static void rcu_spawn_cpu_nocb_kthread(int cpu) { if (rcu_scheduler_fully_active) rcu_spawn_one_nocb_kthread(cpu); @@ -2536,7 +2536,7 @@ static void __init rcu_spawn_nocb_kthreads(void) int cpu; for_each_online_cpu(cpu) - rcu_spawn_all_nocb_kthreads(cpu); + rcu_spawn_cpu_nocb_kthread(cpu); } /* How many follower CPU IDs per leader? Default of -1 for sqrt(nr_cpu_ids). */ @@ -2670,7 +2670,7 @@ static void do_nocb_deferred_wakeup(struct rcu_data *rdp) { } -static void rcu_spawn_all_nocb_kthreads(int cpu) +static void rcu_spawn_cpu_nocb_kthread(int cpu) { } From 1de462ed85062df2ab6939eeee1625e767052907 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 28 Nov 2018 10:37:42 -0800 Subject: [PATCH 07/71] rcu: Make expedited IPI handler return after handling critical section During expedited RCU grace-period initialization, IPIs are sent to all non-idle online CPUs. The IPI handler checks to see if the CPU is in quiescent state, reporting one if so. This handler looks at three different cases: (1) The CPU is not in an rcu_read_lock()-based critical section, (2) The CPU is in the process of exiting an rcu_read_lock()-based critical section, and (3) The CPU is in an rcu_read_lock()-based critical section. In case (2), execution falls through into case (3). This is harmless from a functionality viewpoint, but can result in needless overhead during an improbable corner case. This commit therefore adds the "return" statement needed to prevent fall-through. Signed-off-by: Paul E. McKenney --- kernel/rcu/tree_exp.h | 1 + 1 file changed, 1 insertion(+) diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h index 928fe5893a57..6d4eb4694b6f 100644 --- a/kernel/rcu/tree_exp.h +++ b/kernel/rcu/tree_exp.h @@ -697,6 +697,7 @@ static void sync_rcu_exp_handler(void *unused) WRITE_ONCE(t->rcu_read_unlock_special.b.exp_hint, true); } raw_spin_unlock_irqrestore_rcu_node(rnp, flags); + return; } /* From cd920e5a34abea837418691d366472311e7b9147 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 28 Nov 2018 16:57:54 -0800 Subject: [PATCH 08/71] rcu: Inline force_quiescent_state() into rcu_force_quiescent_state() Given that rcu_force_quiescent_state() is a simple wrapper around force_quiescent_state(), this commit saves a few lines of code by inlining force_quiescent_state() into rcu_force_quiescent_state(), and changing all references to force_quiescent_state() to instead invoke rcu_force_quiescent_state(). Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 21 ++++++--------------- 1 file changed, 6 insertions(+), 15 deletions(-) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index f4edc664fb65..e56a46444775 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -479,7 +479,6 @@ module_param_cb(jiffies_till_next_fqs, &next_fqs_jiffies_ops, &jiffies_till_next module_param(rcu_kick_kthreads, bool, 0644); static void force_qs_rnp(int (*f)(struct rcu_data *rdp)); -static void force_quiescent_state(void); static int rcu_pending(void); /* @@ -503,15 +502,6 @@ unsigned long rcu_exp_batches_completed(void) } EXPORT_SYMBOL_GPL(rcu_exp_batches_completed); -/* - * Force a quiescent state. - */ -void rcu_force_quiescent_state(void) -{ - force_quiescent_state(); -} -EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); - /* * Convert a ->gp_state value to a character string. */ @@ -1310,7 +1300,7 @@ static void print_other_cpu_stall(unsigned long gp_seq) panic_on_rcu_stall(); - force_quiescent_state(); /* Kick them all. */ + rcu_force_quiescent_state(); /* Kick them all. */ } static void print_cpu_stall(void) @@ -2578,7 +2568,7 @@ static void force_qs_rnp(int (*f)(struct rcu_data *rdp)) * Force quiescent states on reluctant CPUs, and also detect which * CPUs are in dyntick-idle mode. */ -static void force_quiescent_state(void) +void rcu_force_quiescent_state(void) { unsigned long flags; bool ret; @@ -2610,6 +2600,7 @@ static void force_quiescent_state(void) raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags); rcu_gp_kthread_wake(); } +EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); /* * This function checks for grace-period requests that fail to motivate @@ -2801,9 +2792,9 @@ static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head, /* * Force the grace period if too many callbacks or too long waiting. - * Enforce hysteresis, and don't invoke force_quiescent_state() + * Enforce hysteresis, and don't invoke rcu_force_quiescent_state() * if some other CPU has recently done so. Also, don't bother - * invoking force_quiescent_state() if the newly enqueued callback + * invoking rcu_force_quiescent_state() if the newly enqueued callback * is the only one waiting for a grace period to complete. */ if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) > @@ -2820,7 +2811,7 @@ static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head, rdp->blimit = LONG_MAX; if (rcu_state.n_force_qs == rdp->n_force_qs_snap && rcu_segcblist_first_pend_cb(&rdp->cblist) != head) - force_quiescent_state(); + rcu_force_quiescent_state(); rdp->n_force_qs_snap = rcu_state.n_force_qs; rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist); } From c97058d03329284068e45796df13510e5f940d8b Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 28 Nov 2018 16:59:50 -0800 Subject: [PATCH 09/71] rcu: Eliminate RCU_BH_FLAVOR and RCU_SCHED_FLAVOR Now that the RCU flavors have been consolidated, RCU_BH_FLAVOR and RCU_SCHED_FLAVOR are no longer used. This commit therefore saves a few lines by removing them. Signed-off-by: Paul E. McKenney --- kernel/rcu/rcu.h | 2 -- kernel/rcu/tree.c | 2 -- 2 files changed, 4 deletions(-) diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h index a393e24a9195..75787186bd4f 100644 --- a/kernel/rcu/rcu.h +++ b/kernel/rcu/rcu.h @@ -462,8 +462,6 @@ void rcu_request_urgent_qs_task(struct task_struct *t); enum rcutorture_type { RCU_FLAVOR, - RCU_BH_FLAVOR, - RCU_SCHED_FLAVOR, RCU_TASKS_FLAVOR, SRCU_FLAVOR, INVALID_RCU_FLAVOR diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index e56a46444775..fc37bec32731 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -556,8 +556,6 @@ void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, { switch (test_type) { case RCU_FLAVOR: - case RCU_BH_FLAVOR: - case RCU_SCHED_FLAVOR: *flags = READ_ONCE(rcu_state.gp_flags); *gp_seq = rcu_seq_current(&rcu_state.gp_seq); break; From c46f497a6151d48cb341e18fdd4dff345f7d253d Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 28 Nov 2018 17:02:44 -0800 Subject: [PATCH 10/71] rcu: Inline rcu_kthread_do_work() into its sole remaining caller The rcu_kthread_do_work() function has a single-line body and only one remaining caller. This commit therefore saves a few lines of code by inlining rcu_kthread_do_work() into its sole remaining caller. Signed-off-by: Paul E. McKenney --- kernel/rcu/tree_plugin.h | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 4d4091565a2c..bcf3e7366a28 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -1369,11 +1369,6 @@ static int rcu_spawn_one_boost_kthread(struct rcu_node *rnp) return 0; } -static void rcu_kthread_do_work(void) -{ - rcu_do_batch(this_cpu_ptr(&rcu_data)); -} - static void rcu_cpu_kthread_setup(unsigned int cpu) { struct sched_param sp; @@ -1413,7 +1408,7 @@ static void rcu_cpu_kthread(unsigned int cpu) *workp = 0; local_irq_enable(); if (work) - rcu_kthread_do_work(); + rcu_do_batch(this_cpu_ptr(&rcu_data)); local_bh_enable(); if (*workp == 0) { trace_rcu_utilization(TPS("End CPU kthread@rcu_wait")); From 142d106d5e62ff2cf0dfd2dfe1adfcaff1c2ed85 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 29 Nov 2018 09:15:54 -0800 Subject: [PATCH 11/71] rcu: Determine expedited-GP IPI handler at build time Back when there could be multiple RCU flavors running in the same kernel at the same time, it was necessary to specify the expedited grace-period IPI handler at runtime. Now that there is only one RCU flavor, the IPI handler can be determined at build time. There is therefore no longer any reason for the RCU-preempt and RCU-sched IPI handlers to have different names, nor is there any reason to pass these handlers in function arguments and in the data structures enclosing workqueues. This commit therefore makes all these changes, pushing the specification of the expedited grace-period IPI handler down to the point of use. Signed-off-by: Paul E. McKenney --- .../Expedited-Grace-Periods/ExpSchedFlow.svg | 18 ++++++----- .../Expedited-Grace-Periods.html | 26 ++++++++-------- kernel/rcu/tree.h | 1 - kernel/rcu/tree_exp.h | 30 +++++++++---------- 4 files changed, 38 insertions(+), 37 deletions(-) diff --git a/Documentation/RCU/Design/Expedited-Grace-Periods/ExpSchedFlow.svg b/Documentation/RCU/Design/Expedited-Grace-Periods/ExpSchedFlow.svg index e4233ac93c2b..6189ffcc6aff 100644 --- a/Documentation/RCU/Design/Expedited-Grace-Periods/ExpSchedFlow.svg +++ b/Documentation/RCU/Design/Expedited-Grace-Periods/ExpSchedFlow.svg @@ -328,13 +328,13 @@ inkscape:window-height="1148" id="namedview90" showgrid="true" - inkscape:zoom="0.80021373" - inkscape:cx="462.49289" - inkscape:cy="473.6718" + inkscape:zoom="0.69092787" + inkscape:cx="476.34085" + inkscape:cy="712.80957" inkscape:window-x="770" inkscape:window-y="24" inkscape:window-maximized="0" - inkscape:current-layer="g4114-9-3-9" + inkscape:current-layer="g4" inkscape:snap-grids="false" fit-margin-top="5" fit-margin-right="5" @@ -813,14 +813,18 @@ reched_cpu() + id="tspan4925-1-2-4-5">Requestcontext switch diff --git a/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html b/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html index 8e4f873b979f..19e7a5fb6b73 100644 --- a/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html +++ b/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html @@ -72,10 +72,10 @@ will ignore it because idle and offline CPUs are already residing in quiescent states. Otherwise, the expedited grace period will use smp_call_function_single() to send the CPU an IPI, which -is handled by sync_rcu_exp_handler(). +is handled by rcu_exp_handler().

-However, because this is preemptible RCU, sync_rcu_exp_handler() +However, because this is preemptible RCU, rcu_exp_handler() can check to see if the CPU is currently running in an RCU read-side critical section. If not, the handler can immediately report a quiescent state. @@ -145,19 +145,18 @@ expedited grace period is shown in the following diagram:

ExpSchedFlow.svg

-As with RCU-preempt's synchronize_rcu_expedited(), +As with RCU-preempt, RCU-sched's synchronize_sched_expedited() ignores offline and idle CPUs, again because they are in remotely detectable quiescent states. -However, the synchronize_rcu_expedited() handler -is sync_sched_exp_handler(), and because the +However, because the rcu_read_lock_sched() and rcu_read_unlock_sched() leave no trace of their invocation, in general it is not possible to tell whether or not the current CPU is in an RCU read-side critical section. -The best that sync_sched_exp_handler() can do is to check +The best that RCU-sched's rcu_exp_handler() can do is to check for idle, on the off-chance that the CPU went idle while the IPI was in flight. -If the CPU is idle, then sync_sched_exp_handler() reports +If the CPU is idle, then rcu_exp_handler() reports the quiescent state.

Otherwise, the handler forces a future context switch by setting the @@ -298,19 +297,18 @@ Instead, the task pushing the grace period forward will include the idle CPUs in the mask passed to rcu_report_exp_cpu_mult().

-For RCU-sched, there is an additional check for idle in the IPI -handler, sync_sched_exp_handler(). +For RCU-sched, there is an additional check: If the IPI has interrupted the idle loop, then -sync_sched_exp_handler() invokes rcu_report_exp_rdp() +rcu_exp_handler() invokes rcu_report_exp_rdp() to report the corresponding quiescent state.

For RCU-preempt, there is no specific check for idle in the -IPI handler (sync_rcu_exp_handler()), but because +IPI handler (rcu_exp_handler()), but because RCU read-side critical sections are not permitted within the -idle loop, if sync_rcu_exp_handler() sees that the CPU is within +idle loop, if rcu_exp_handler() sees that the CPU is within RCU read-side critical section, the CPU cannot possibly be idle. -Otherwise, sync_rcu_exp_handler() invokes +Otherwise, rcu_exp_handler() invokes rcu_report_exp_rdp() to report the corresponding quiescent state, regardless of whether or not that quiescent state was due to the CPU being idle. @@ -625,6 +623,8 @@ checks, but only during the mid-boot dead zone.

With this refinement, synchronous grace periods can now be used from task context pretty much any time during the life of the kernel. +That is, aside from some points in the suspend, hibernate, or shutdown +code path.

Summary

diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index bcfd684a5c57..50bb41cdc5fb 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -36,7 +36,6 @@ /* Communicate arguments to a workqueue handler. */ struct rcu_exp_work { - smp_call_func_t rew_func; unsigned long rew_s; struct work_struct rew_work; }; diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h index 6d4eb4694b6f..7f5cb4228b59 100644 --- a/kernel/rcu/tree_exp.h +++ b/kernel/rcu/tree_exp.h @@ -22,6 +22,8 @@ #include +static void rcu_exp_handler(void *unused); + /* * Record the start of an expedited grace period. */ @@ -344,7 +346,6 @@ static void sync_rcu_exp_select_node_cpus(struct work_struct *wp) { int cpu; unsigned long flags; - smp_call_func_t func; unsigned long mask_ofl_test; unsigned long mask_ofl_ipi; int ret; @@ -352,7 +353,6 @@ static void sync_rcu_exp_select_node_cpus(struct work_struct *wp) container_of(wp, struct rcu_exp_work, rew_work); struct rcu_node *rnp = container_of(rewp, struct rcu_node, rew); - func = rewp->rew_func; raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Each pass checks a CPU for identity, offline, and idle. */ @@ -396,7 +396,7 @@ static void sync_rcu_exp_select_node_cpus(struct work_struct *wp) mask_ofl_test |= mask; continue; } - ret = smp_call_function_single(cpu, func, NULL, 0); + ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0); if (!ret) { mask_ofl_ipi &= ~mask; continue; @@ -426,7 +426,7 @@ static void sync_rcu_exp_select_node_cpus(struct work_struct *wp) * Select the nodes that the upcoming expedited grace period needs * to wait for. */ -static void sync_rcu_exp_select_cpus(smp_call_func_t func) +static void sync_rcu_exp_select_cpus(void) { int cpu; struct rcu_node *rnp; @@ -440,7 +440,6 @@ static void sync_rcu_exp_select_cpus(smp_call_func_t func) rnp->exp_need_flush = false; if (!READ_ONCE(rnp->expmask)) continue; /* Avoid early boot non-existent wq. */ - rnp->rew.rew_func = func; if (!READ_ONCE(rcu_par_gp_wq) || rcu_scheduler_active != RCU_SCHEDULER_RUNNING || rcu_is_last_leaf_node(rnp)) { @@ -580,10 +579,10 @@ static void rcu_exp_wait_wake(unsigned long s) * Common code to drive an expedited grace period forward, used by * workqueues and mid-boot-time tasks. */ -static void rcu_exp_sel_wait_wake(smp_call_func_t func, unsigned long s) +static void rcu_exp_sel_wait_wake(unsigned long s) { /* Initialize the rcu_node tree in preparation for the wait. */ - sync_rcu_exp_select_cpus(func); + sync_rcu_exp_select_cpus(); /* Wait and clean up, including waking everyone. */ rcu_exp_wait_wake(s); @@ -597,14 +596,14 @@ static void wait_rcu_exp_gp(struct work_struct *wp) struct rcu_exp_work *rewp; rewp = container_of(wp, struct rcu_exp_work, rew_work); - rcu_exp_sel_wait_wake(rewp->rew_func, rewp->rew_s); + rcu_exp_sel_wait_wake(rewp->rew_s); } /* * Given a smp_call_function() handler, kick off the specified * implementation of expedited grace period. */ -static void _synchronize_rcu_expedited(smp_call_func_t func) +static void _synchronize_rcu_expedited(void) { struct rcu_data *rdp; struct rcu_exp_work rew; @@ -625,10 +624,9 @@ static void _synchronize_rcu_expedited(smp_call_func_t func) /* Ensure that load happens before action based on it. */ if (unlikely(rcu_scheduler_active == RCU_SCHEDULER_INIT)) { /* Direct call during scheduler init and early_initcalls(). */ - rcu_exp_sel_wait_wake(func, s); + rcu_exp_sel_wait_wake(s); } else { /* Marshall arguments & schedule the expedited grace period. */ - rew.rew_func = func; rew.rew_s = s; INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp); queue_work(rcu_gp_wq, &rew.rew_work); @@ -654,7 +652,7 @@ static void _synchronize_rcu_expedited(smp_call_func_t func) * ->expmask fields in the rcu_node tree. Otherwise, immediately * report the quiescent state. */ -static void sync_rcu_exp_handler(void *unused) +static void rcu_exp_handler(void *unused) { unsigned long flags; struct rcu_data *rdp = this_cpu_ptr(&rcu_data); @@ -760,14 +758,14 @@ void synchronize_rcu_expedited(void) if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) return; - _synchronize_rcu_expedited(sync_rcu_exp_handler); + _synchronize_rcu_expedited(); } EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); #else /* #ifdef CONFIG_PREEMPT_RCU */ /* Invoked on each online non-idle CPU for expedited quiescent state. */ -static void sync_sched_exp_handler(void *unused) +static void rcu_exp_handler(void *unused) { struct rcu_data *rdp; struct rcu_node *rnp; @@ -799,7 +797,7 @@ static void sync_sched_exp_online_cleanup(int cpu) rnp = rdp->mynode; if (!(READ_ONCE(rnp->expmask) & rdp->grpmask)) return; - ret = smp_call_function_single(cpu, sync_sched_exp_handler, NULL, 0); + ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0); WARN_ON_ONCE(ret); } @@ -835,7 +833,7 @@ void synchronize_rcu_expedited(void) if (rcu_blocking_is_gp()) return; - _synchronize_rcu_expedited(sync_sched_exp_handler); + _synchronize_rcu_expedited(); } EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); From 3cd4ca47aa577689c2e6b295d8f52af0e6f26333 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 29 Nov 2018 10:01:52 -0800 Subject: [PATCH 12/71] rcu: Consolidate PREEMPT and !PREEMPT synchronize_rcu_expedited() The CONFIG_PREEMPT=n and CONFIG_PREEMPT=y implementations of synchronize_rcu_expedited() are quite similar, and with small modifications to rcu_blocking_is_gp() can be made identical. This commit therefore makes this change in order to save a few lines of code and to reduce the amount of duplicate code. Signed-off-by: Paul E. McKenney --- kernel/rcu/tree_exp.h | 105 ++++++++++++++++++++---------------------- 1 file changed, 49 insertions(+), 56 deletions(-) diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h index 7f5cb4228b59..b800bdfe74b3 100644 --- a/kernel/rcu/tree_exp.h +++ b/kernel/rcu/tree_exp.h @@ -643,6 +643,33 @@ static void _synchronize_rcu_expedited(void) mutex_unlock(&rcu_state.exp_mutex); } +/* + * During early boot, any blocking grace-period wait automatically + * implies a grace period. Later on, this is never the case for PREEMPT. + * + * Howevr, because a context switch is a grace period for !PREEMPT, any + * blocking grace-period wait automatically implies a grace period if + * there is only one CPU online at any point time during execution of + * either synchronize_rcu() or synchronize_rcu_expedited(). It is OK to + * occasionally incorrectly indicate that there are multiple CPUs online + * when there was in fact only one the whole time, as this just adds some + * overhead: RCU still operates correctly. + */ +static int rcu_blocking_is_gp(void) +{ + int ret; + + if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) + return true; + if (IS_ENABLED(CONFIG_PREEMPT)) + return false; + might_sleep(); /* Check for RCU read-side critical section. */ + preempt_disable(); + ret = num_online_cpus() <= 1; + preempt_enable(); + return ret; +} + #ifdef CONFIG_PREEMPT_RCU /* @@ -729,39 +756,6 @@ static void sync_sched_exp_online_cleanup(int cpu) { } -/** - * synchronize_rcu_expedited - Brute-force RCU grace period - * - * Wait for an RCU-preempt grace period, but expedite it. The basic - * idea is to IPI all non-idle non-nohz online CPUs. The IPI handler - * checks whether the CPU is in an RCU-preempt critical section, and - * if so, it sets a flag that causes the outermost rcu_read_unlock() - * to report the quiescent state. On the other hand, if the CPU is - * not in an RCU read-side critical section, the IPI handler reports - * the quiescent state immediately. - * - * Although this is a greate improvement over previous expedited - * implementations, it is still unfriendly to real-time workloads, so is - * thus not recommended for any sort of common-case code. In fact, if - * you are using synchronize_rcu_expedited() in a loop, please restructure - * your code to batch your updates, and then Use a single synchronize_rcu() - * instead. - * - * This has the same semantics as (but is more brutal than) synchronize_rcu(). - */ -void synchronize_rcu_expedited(void) -{ - RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) || - lock_is_held(&rcu_lock_map) || - lock_is_held(&rcu_sched_lock_map), - "Illegal synchronize_rcu_expedited() in RCU read-side critical section"); - - if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) - return; - _synchronize_rcu_expedited(); -} -EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); - #else /* #ifdef CONFIG_PREEMPT_RCU */ /* Invoked on each online non-idle CPU for expedited quiescent state. */ @@ -801,27 +795,28 @@ static void sync_sched_exp_online_cleanup(int cpu) WARN_ON_ONCE(ret); } -/* - * Because a context switch is a grace period for !PREEMPT, any - * blocking grace-period wait automatically implies a grace period if - * there is only one CPU online at any point time during execution of - * either synchronize_rcu() or synchronize_rcu_expedited(). It is OK to - * occasionally incorrectly indicate that there are multiple CPUs online - * when there was in fact only one the whole time, as this just adds some - * overhead: RCU still operates correctly. +#endif /* #else #ifdef CONFIG_PREEMPT_RCU */ + +/** + * synchronize_rcu_expedited - Brute-force RCU grace period + * + * Wait for an RCU grace period, but expedite it. The basic idea is to + * IPI all non-idle non-nohz online CPUs. The IPI handler checks whether + * the CPU is in an RCU critical section, and if so, it sets a flag that + * causes the outermost rcu_read_unlock() to report the quiescent state + * for RCU-preempt or asks the scheduler for help for RCU-sched. On the + * other hand, if the CPU is not in an RCU read-side critical section, + * the IPI handler reports the quiescent state immediately. + * + * Although this is a greate improvement over previous expedited + * implementations, it is still unfriendly to real-time workloads, so is + * thus not recommended for any sort of common-case code. In fact, if + * you are using synchronize_rcu_expedited() in a loop, please restructure + * your code to batch your updates, and then Use a single synchronize_rcu() + * instead. + * + * This has the same semantics as (but is more brutal than) synchronize_rcu(). */ -static int rcu_blocking_is_gp(void) -{ - int ret; - - might_sleep(); /* Check for RCU read-side critical section. */ - preempt_disable(); - ret = num_online_cpus() <= 1; - preempt_enable(); - return ret; -} - -/* PREEMPT=n implementation of synchronize_rcu_expedited(). */ void synchronize_rcu_expedited(void) { RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) || @@ -829,12 +824,10 @@ void synchronize_rcu_expedited(void) lock_is_held(&rcu_sched_lock_map), "Illegal synchronize_rcu_expedited() in RCU read-side critical section"); - /* If only one CPU, this is automatically a grace period. */ + /* Is the state is such that the call is a grace period? */ if (rcu_blocking_is_gp()) return; _synchronize_rcu_expedited(); } EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); - -#endif /* #else #ifdef CONFIG_PREEMPT_RCU */ From e5bc3af7734f90278a47906d917852a85544510b Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 29 Nov 2018 10:42:06 -0800 Subject: [PATCH 13/71] rcu: Consolidate PREEMPT and !PREEMPT synchronize_rcu() Now that rcu_blocking_is_gp() makes the correct immediate-return decision for both PREEMPT and !PREEMPT, a single implementation of synchronize_rcu() will work correctly under both configurations. This commit therefore eliminates a few lines of code by consolidating the two implementations of synchronize_rcu(). Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 73 ++++++++++++++++++++++++++++++++++++++++ kernel/rcu/tree_exp.h | 27 --------------- kernel/rcu/tree_plugin.h | 64 ----------------------------------- 3 files changed, 73 insertions(+), 91 deletions(-) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index fc37bec32731..e2bd42b2b563 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2950,6 +2950,79 @@ void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func) } EXPORT_SYMBOL_GPL(kfree_call_rcu); +/* + * During early boot, any blocking grace-period wait automatically + * implies a grace period. Later on, this is never the case for PREEMPT. + * + * Howevr, because a context switch is a grace period for !PREEMPT, any + * blocking grace-period wait automatically implies a grace period if + * there is only one CPU online at any point time during execution of + * either synchronize_rcu() or synchronize_rcu_expedited(). It is OK to + * occasionally incorrectly indicate that there are multiple CPUs online + * when there was in fact only one the whole time, as this just adds some + * overhead: RCU still operates correctly. + */ +static int rcu_blocking_is_gp(void) +{ + int ret; + + if (IS_ENABLED(CONFIG_PREEMPT)) + return rcu_scheduler_active == RCU_SCHEDULER_INACTIVE; + might_sleep(); /* Check for RCU read-side critical section. */ + preempt_disable(); + ret = num_online_cpus() <= 1; + preempt_enable(); + return ret; +} + +/** + * synchronize_rcu - wait until a grace period has elapsed. + * + * Control will return to the caller some time after a full grace + * period has elapsed, in other words after all currently executing RCU + * read-side critical sections have completed. Note, however, that + * upon return from synchronize_rcu(), the caller might well be executing + * concurrently with new RCU read-side critical sections that began while + * synchronize_rcu() was waiting. RCU read-side critical sections are + * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested. + * In addition, regions of code across which interrupts, preemption, or + * softirqs have been disabled also serve as RCU read-side critical + * sections. This includes hardware interrupt handlers, softirq handlers, + * and NMI handlers. + * + * Note that this guarantee implies further memory-ordering guarantees. + * On systems with more than one CPU, when synchronize_rcu() returns, + * each CPU is guaranteed to have executed a full memory barrier since + * the end of its last RCU read-side critical section whose beginning + * preceded the call to synchronize_rcu(). In addition, each CPU having + * an RCU read-side critical section that extends beyond the return from + * synchronize_rcu() is guaranteed to have executed a full memory barrier + * after the beginning of synchronize_rcu() and before the beginning of + * that RCU read-side critical section. Note that these guarantees include + * CPUs that are offline, idle, or executing in user mode, as well as CPUs + * that are executing in the kernel. + * + * Furthermore, if CPU A invoked synchronize_rcu(), which returned + * to its caller on CPU B, then both CPU A and CPU B are guaranteed + * to have executed a full memory barrier during the execution of + * synchronize_rcu() -- even if CPU A and CPU B are the same CPU (but + * again only if the system has more than one CPU). + */ +void synchronize_rcu(void) +{ + RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) || + lock_is_held(&rcu_lock_map) || + lock_is_held(&rcu_sched_lock_map), + "Illegal synchronize_rcu() in RCU read-side critical section"); + if (rcu_blocking_is_gp()) + return; + if (rcu_gp_is_expedited()) + synchronize_rcu_expedited(); + else + wait_rcu_gp(call_rcu); +} +EXPORT_SYMBOL_GPL(synchronize_rcu); + /** * get_state_synchronize_rcu - Snapshot current RCU state * diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h index b800bdfe74b3..353d113c0cd4 100644 --- a/kernel/rcu/tree_exp.h +++ b/kernel/rcu/tree_exp.h @@ -643,33 +643,6 @@ static void _synchronize_rcu_expedited(void) mutex_unlock(&rcu_state.exp_mutex); } -/* - * During early boot, any blocking grace-period wait automatically - * implies a grace period. Later on, this is never the case for PREEMPT. - * - * Howevr, because a context switch is a grace period for !PREEMPT, any - * blocking grace-period wait automatically implies a grace period if - * there is only one CPU online at any point time during execution of - * either synchronize_rcu() or synchronize_rcu_expedited(). It is OK to - * occasionally incorrectly indicate that there are multiple CPUs online - * when there was in fact only one the whole time, as this just adds some - * overhead: RCU still operates correctly. - */ -static int rcu_blocking_is_gp(void) -{ - int ret; - - if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) - return true; - if (IS_ENABLED(CONFIG_PREEMPT)) - return false; - might_sleep(); /* Check for RCU read-side critical section. */ - preempt_disable(); - ret = num_online_cpus() <= 1; - preempt_enable(); - return ret; -} - #ifdef CONFIG_PREEMPT_RCU /* diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index bcf3e7366a28..43f3f2ee9d63 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -825,54 +825,6 @@ static void rcu_flavor_check_callbacks(int user) t->rcu_read_unlock_special.b.need_qs = true; } -/** - * synchronize_rcu - wait until a grace period has elapsed. - * - * Control will return to the caller some time after a full grace - * period has elapsed, in other words after all currently executing RCU - * read-side critical sections have completed. Note, however, that - * upon return from synchronize_rcu(), the caller might well be executing - * concurrently with new RCU read-side critical sections that began while - * synchronize_rcu() was waiting. RCU read-side critical sections are - * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested. - * In addition, regions of code across which interrupts, preemption, or - * softirqs have been disabled also serve as RCU read-side critical - * sections. This includes hardware interrupt handlers, softirq handlers, - * and NMI handlers. - * - * Note that this guarantee implies further memory-ordering guarantees. - * On systems with more than one CPU, when synchronize_rcu() returns, - * each CPU is guaranteed to have executed a full memory barrier since - * the end of its last RCU read-side critical section whose beginning - * preceded the call to synchronize_rcu(). In addition, each CPU having - * an RCU read-side critical section that extends beyond the return from - * synchronize_rcu() is guaranteed to have executed a full memory barrier - * after the beginning of synchronize_rcu() and before the beginning of - * that RCU read-side critical section. Note that these guarantees include - * CPUs that are offline, idle, or executing in user mode, as well as CPUs - * that are executing in the kernel. - * - * Furthermore, if CPU A invoked synchronize_rcu(), which returned - * to its caller on CPU B, then both CPU A and CPU B are guaranteed - * to have executed a full memory barrier during the execution of - * synchronize_rcu() -- even if CPU A and CPU B are the same CPU (but - * again only if the system has more than one CPU). - */ -void synchronize_rcu(void) -{ - RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) || - lock_is_held(&rcu_lock_map) || - lock_is_held(&rcu_sched_lock_map), - "Illegal synchronize_rcu() in RCU read-side critical section"); - if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) - return; - if (rcu_gp_is_expedited()) - synchronize_rcu_expedited(); - else - wait_rcu_gp(call_rcu); -} -EXPORT_SYMBOL_GPL(synchronize_rcu); - /* * Check for a task exiting while in a preemptible-RCU read-side * critical section, clean up if so. No need to issue warnings, @@ -1115,22 +1067,6 @@ static void rcu_flavor_check_callbacks(int user) } } -/* PREEMPT=n implementation of synchronize_rcu(). */ -void synchronize_rcu(void) -{ - RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) || - lock_is_held(&rcu_lock_map) || - lock_is_held(&rcu_sched_lock_map), - "Illegal synchronize_rcu() in RCU read-side critical section"); - if (rcu_blocking_is_gp()) - return; - if (rcu_gp_is_expedited()) - synchronize_rcu_expedited(); - else - wait_rcu_gp(call_rcu); -} -EXPORT_SYMBOL_GPL(synchronize_rcu); - /* * Because preemptible RCU does not exist, tasks cannot possibly exit * while in preemptible RCU read-side critical sections. From 892307266429f6439803f2735a59a4bc58a6ded4 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 29 Nov 2018 11:50:04 -0800 Subject: [PATCH 14/71] rcu: Inline _synchronize_rcu_expedited() into synchronize_rcu_expedited() Now that _synchronize_rcu_expedited() has only one caller, and given that this is a tail call, this commit inlines _synchronize_rcu_expedited() into synchronize_rcu_expedited(). Signed-off-by: Paul E. McKenney --- kernel/rcu/tree_exp.h | 81 +++++++++++++++++++------------------------ 1 file changed, 36 insertions(+), 45 deletions(-) diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h index 353d113c0cd4..d882ca0cd01b 100644 --- a/kernel/rcu/tree_exp.h +++ b/kernel/rcu/tree_exp.h @@ -599,50 +599,6 @@ static void wait_rcu_exp_gp(struct work_struct *wp) rcu_exp_sel_wait_wake(rewp->rew_s); } -/* - * Given a smp_call_function() handler, kick off the specified - * implementation of expedited grace period. - */ -static void _synchronize_rcu_expedited(void) -{ - struct rcu_data *rdp; - struct rcu_exp_work rew; - struct rcu_node *rnp; - unsigned long s; - - /* If expedited grace periods are prohibited, fall back to normal. */ - if (rcu_gp_is_normal()) { - wait_rcu_gp(call_rcu); - return; - } - - /* Take a snapshot of the sequence number. */ - s = rcu_exp_gp_seq_snap(); - if (exp_funnel_lock(s)) - return; /* Someone else did our work for us. */ - - /* Ensure that load happens before action based on it. */ - if (unlikely(rcu_scheduler_active == RCU_SCHEDULER_INIT)) { - /* Direct call during scheduler init and early_initcalls(). */ - rcu_exp_sel_wait_wake(s); - } else { - /* Marshall arguments & schedule the expedited grace period. */ - rew.rew_s = s; - INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp); - queue_work(rcu_gp_wq, &rew.rew_work); - } - - /* Wait for expedited grace period to complete. */ - rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id()); - rnp = rcu_get_root(); - wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3], - sync_exp_work_done(s)); - smp_mb(); /* Workqueue actions happen before return. */ - - /* Let the next expedited grace period start. */ - mutex_unlock(&rcu_state.exp_mutex); -} - #ifdef CONFIG_PREEMPT_RCU /* @@ -792,6 +748,11 @@ static void sync_sched_exp_online_cleanup(int cpu) */ void synchronize_rcu_expedited(void) { + struct rcu_data *rdp; + struct rcu_exp_work rew; + struct rcu_node *rnp; + unsigned long s; + RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) || lock_is_held(&rcu_lock_map) || lock_is_held(&rcu_sched_lock_map), @@ -801,6 +762,36 @@ void synchronize_rcu_expedited(void) if (rcu_blocking_is_gp()) return; - _synchronize_rcu_expedited(); + /* If expedited grace periods are prohibited, fall back to normal. */ + if (rcu_gp_is_normal()) { + wait_rcu_gp(call_rcu); + return; + } + + /* Take a snapshot of the sequence number. */ + s = rcu_exp_gp_seq_snap(); + if (exp_funnel_lock(s)) + return; /* Someone else did our work for us. */ + + /* Ensure that load happens before action based on it. */ + if (unlikely(rcu_scheduler_active == RCU_SCHEDULER_INIT)) { + /* Direct call during scheduler init and early_initcalls(). */ + rcu_exp_sel_wait_wake(s); + } else { + /* Marshall arguments & schedule the expedited grace period. */ + rew.rew_s = s; + INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp); + queue_work(rcu_gp_wq, &rew.rew_work); + } + + /* Wait for expedited grace period to complete. */ + rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id()); + rnp = rcu_get_root(); + wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3], + sync_exp_work_done(s)); + smp_mb(); /* Workqueue actions happen before return. */ + + /* Let the next expedited grace period start. */ + mutex_unlock(&rcu_state.exp_mutex); } EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); From 260e1e4fd826a8e47c8976efba6a54d62b4a57de Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 29 Nov 2018 13:28:49 -0800 Subject: [PATCH 15/71] rcu: Discard separate per-CPU callback counts Back when there were multiple flavors of RCU, it was necessary to separately count lazy and non-lazy callbacks for each CPU. These counts were used in CONFIG_RCU_FAST_NO_HZ kernels to determine how long a newly idle CPU should be allowed to sleep before handling its RCU callbacks. But now that there is only one flavor, the callback counts for a given CPU's sole rcu_data structure are the counts for that CPU. This commit therefore removes the rcu_data structure's ->nonlazy_posted and ->nonlazy_posted_snap fields, the rcu_idle_count_callbacks_posted() and rcu_cpu_has_callbacks() functions, repurposes the rcu_data structure's ->all_lazy field to record the laziness state at the beginning of the latest idle sojourn, and modifies CONFIG_RCU_FAST_NO_HZ RCU CPU stall warnings accordingly. Signed-off-by: Paul E. McKenney --- Documentation/RCU/stallwarn.txt | 15 +++++----- kernel/rcu/tree.c | 25 ----------------- kernel/rcu/tree.h | 6 +--- kernel/rcu/tree_plugin.h | 50 ++++++++------------------------- 4 files changed, 21 insertions(+), 75 deletions(-) diff --git a/Documentation/RCU/stallwarn.txt b/Documentation/RCU/stallwarn.txt index 073dbc12d1ea..1ab70c37921f 100644 --- a/Documentation/RCU/stallwarn.txt +++ b/Documentation/RCU/stallwarn.txt @@ -219,17 +219,18 @@ an estimate of the total number of RCU callbacks queued across all CPUs In kernels with CONFIG_RCU_FAST_NO_HZ, more information is printed for each CPU: - 0: (64628 ticks this GP) idle=dd5/3fffffffffffffff/0 softirq=82/543 last_accelerate: a345/d342 nonlazy_posted: 25 .D + 0: (64628 ticks this GP) idle=dd5/3fffffffffffffff/0 softirq=82/543 last_accelerate: a345/d342 Nonlazy posted: ..D The "last_accelerate:" prints the low-order 16 bits (in hex) of the jiffies counter when this CPU last invoked rcu_try_advance_all_cbs() from rcu_needs_cpu() or last invoked rcu_accelerate_cbs() from -rcu_prepare_for_idle(). The "nonlazy_posted:" prints the number -of non-lazy callbacks posted since the last call to rcu_needs_cpu(). -Finally, an "L" indicates that there are currently no non-lazy callbacks -("." is printed otherwise, as shown above) and "D" indicates that -dyntick-idle processing is enabled ("." is printed otherwise, for example, -if disabled via the "nohz=" kernel boot parameter). +rcu_prepare_for_idle(). The "Nonlazy posted:" indicates lazy-callback +status, so that an "l" indicates that all callbacks were lazy at the start +of the last idle period and an "L" indicates that there are currently +no non-lazy callbacks (in both cases, "." is printed otherwise, as +shown above) and "D" indicates that dyntick-idle processing is enabled +("." is printed otherwise, for example, if disabled via the "nohz=" +kernel boot parameter). If the grace period ends just as the stall warning starts printing, there will be a spurious stall-warning message, which will include diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index e2bd42b2b563..e53a586b397b 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2878,9 +2878,6 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func, int cpu, bool lazy) rcu_segcblist_init(&rdp->cblist); } rcu_segcblist_enqueue(&rdp->cblist, head, lazy); - if (!lazy) - rcu_idle_count_callbacks_posted(); - if (__is_kfree_rcu_offset((unsigned long)func)) trace_rcu_kfree_callback(rcu_state.name, head, (unsigned long)func, @@ -3110,28 +3107,6 @@ static int rcu_pending(void) return 0; } -/* - * Return true if the specified CPU has any callback. If all_lazy is - * non-NULL, store an indication of whether all callbacks are lazy. - * (If there are no callbacks, all of them are deemed to be lazy.) - */ -static bool rcu_cpu_has_callbacks(bool *all_lazy) -{ - bool al = true; - bool hc = false; - struct rcu_data *rdp; - - rdp = this_cpu_ptr(&rcu_data); - if (!rcu_segcblist_empty(&rdp->cblist)) { - hc = true; - if (rcu_segcblist_n_nonlazy_cbs(&rdp->cblist)) - al = false; - } - if (all_lazy) - *all_lazy = al; - return hc; -} - /* * Helper function for rcu_barrier() tracing. If tracing is disabled, * the compiler is expected to optimize this away. diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 50bb41cdc5fb..20feecbb0ab6 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -193,10 +193,7 @@ struct rcu_data { bool rcu_need_heavy_qs; /* GP old, so heavy quiescent state! */ bool rcu_urgent_qs; /* GP old need light quiescent state. */ #ifdef CONFIG_RCU_FAST_NO_HZ - bool all_lazy; /* Are all CPU's CBs lazy? */ - unsigned long nonlazy_posted; /* # times non-lazy CB posted to CPU. */ - unsigned long nonlazy_posted_snap; - /* Nonlazy_posted snapshot. */ + bool all_lazy; /* All CPU's CBs lazy at idle start? */ unsigned long last_accelerate; /* Last jiffy CBs were accelerated. */ unsigned long last_advance_all; /* Last jiffy CBs were all advanced. */ int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */ @@ -430,7 +427,6 @@ static void __init rcu_spawn_boost_kthreads(void); static void rcu_prepare_kthreads(int cpu); static void rcu_cleanup_after_idle(void); static void rcu_prepare_for_idle(void); -static void rcu_idle_count_callbacks_posted(void); static bool rcu_preempt_has_tasks(struct rcu_node *rnp); static bool rcu_preempt_need_deferred_qs(struct task_struct *t); static void rcu_preempt_deferred_qs(struct task_struct *t); diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 43f3f2ee9d63..abd238c70900 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -1474,7 +1474,7 @@ static void rcu_prepare_kthreads(int cpu) int rcu_needs_cpu(u64 basemono, u64 *nextevt) { *nextevt = KTIME_MAX; - return rcu_cpu_has_callbacks(NULL); + return !rcu_segcblist_empty(&this_cpu_ptr(&rcu_data)->cblist); } /* @@ -1493,14 +1493,6 @@ static void rcu_prepare_for_idle(void) { } -/* - * Don't bother keeping a running count of the number of RCU callbacks - * posted because CONFIG_RCU_FAST_NO_HZ=n. - */ -static void rcu_idle_count_callbacks_posted(void) -{ -} - #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */ /* @@ -1583,11 +1575,8 @@ int rcu_needs_cpu(u64 basemono, u64 *nextevt) lockdep_assert_irqs_disabled(); - /* Snapshot to detect later posting of non-lazy callback. */ - rdp->nonlazy_posted_snap = rdp->nonlazy_posted; - /* If no callbacks, RCU doesn't need the CPU. */ - if (!rcu_cpu_has_callbacks(&rdp->all_lazy)) { + if (rcu_segcblist_empty(&rdp->cblist)) { *nextevt = KTIME_MAX; return 0; } @@ -1601,11 +1590,12 @@ int rcu_needs_cpu(u64 basemono, u64 *nextevt) rdp->last_accelerate = jiffies; /* Request timer delay depending on laziness, and round. */ - if (!rdp->all_lazy) { + rdp->all_lazy = !rcu_segcblist_n_nonlazy_cbs(&rdp->cblist); + if (rdp->all_lazy) { + dj = round_jiffies(rcu_idle_lazy_gp_delay + jiffies) - jiffies; + } else { dj = round_up(rcu_idle_gp_delay + jiffies, rcu_idle_gp_delay) - jiffies; - } else { - dj = round_jiffies(rcu_idle_lazy_gp_delay + jiffies) - jiffies; } *nextevt = basemono + dj * TICK_NSEC; return 0; @@ -1635,7 +1625,7 @@ static void rcu_prepare_for_idle(void) /* Handle nohz enablement switches conservatively. */ tne = READ_ONCE(tick_nohz_active); if (tne != rdp->tick_nohz_enabled_snap) { - if (rcu_cpu_has_callbacks(NULL)) + if (!rcu_segcblist_empty(&rdp->cblist)) invoke_rcu_core(); /* force nohz to see update. */ rdp->tick_nohz_enabled_snap = tne; return; @@ -1648,10 +1638,8 @@ static void rcu_prepare_for_idle(void) * callbacks, invoke RCU core for the side-effect of recalculating * idle duration on re-entry to idle. */ - if (rdp->all_lazy && - rdp->nonlazy_posted != rdp->nonlazy_posted_snap) { + if (rdp->all_lazy && rcu_segcblist_n_nonlazy_cbs(&rdp->cblist)) { rdp->all_lazy = false; - rdp->nonlazy_posted_snap = rdp->nonlazy_posted; invoke_rcu_core(); return; } @@ -1687,19 +1675,6 @@ static void rcu_cleanup_after_idle(void) invoke_rcu_core(); } -/* - * Keep a running count of the number of non-lazy callbacks posted - * on this CPU. This running counter (which is never decremented) allows - * rcu_prepare_for_idle() to detect when something out of the idle loop - * posts a callback, even if an equal number of callbacks are invoked. - * Of course, callbacks should only be posted from within a trace event - * designed to be called from idle or from within RCU_NONIDLE(). - */ -static void rcu_idle_count_callbacks_posted(void) -{ - __this_cpu_add(rcu_data.nonlazy_posted, 1); -} - #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */ #ifdef CONFIG_RCU_FAST_NO_HZ @@ -1707,13 +1682,12 @@ static void rcu_idle_count_callbacks_posted(void) static void print_cpu_stall_fast_no_hz(char *cp, int cpu) { struct rcu_data *rdp = &per_cpu(rcu_data, cpu); - unsigned long nlpd = rdp->nonlazy_posted - rdp->nonlazy_posted_snap; - sprintf(cp, "last_accelerate: %04lx/%04lx, nonlazy_posted: %ld, %c%c", + sprintf(cp, "last_accelerate: %04lx/%04lx, Nonlazy posted: %c%c%c", rdp->last_accelerate & 0xffff, jiffies & 0xffff, - ulong2long(nlpd), - rdp->all_lazy ? 'L' : '.', - rdp->tick_nohz_enabled_snap ? '.' : 'D'); + ".l"[rdp->all_lazy], + ".L"[!rcu_segcblist_n_nonlazy_cbs(&rdp->cblist)], + ".D"[!rdp->tick_nohz_enabled_snap]); } #else /* #ifdef CONFIG_RCU_FAST_NO_HZ */ From 6ba7d681aca22e53385bdb35b1d7662e61905760 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 9 Jan 2019 15:22:03 -0800 Subject: [PATCH 16/71] rcu: Remove wrapper definitions for obsolete RCU update functions None of synchronize_rcu_bh, synchronize_rcu_bh_expedited, call_rcu_bh, rcu_barrier_bh, synchronize_sched, synchronize_sched_expedited, call_rcu_sched, rcu_barrier_sched, get_state_synchronize_sched, and cond_synchronize_sched are actually used. This commit therefore removes their trivial wrapper-function definitions. Signed-off-by: Paul E. McKenney --- include/linux/rcupdate.h | 53 ---------------------------------------- 1 file changed, 53 deletions(-) diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 4db8bcacc51a..0e39e0d2629e 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -896,57 +896,4 @@ rcu_head_after_call_rcu(struct rcu_head *rhp, rcu_callback_t f) return false; } - -/* Transitional pre-consolidation compatibility definitions. */ - -static inline void synchronize_rcu_bh(void) -{ - synchronize_rcu(); -} - -static inline void synchronize_rcu_bh_expedited(void) -{ - synchronize_rcu_expedited(); -} - -static inline void call_rcu_bh(struct rcu_head *head, rcu_callback_t func) -{ - call_rcu(head, func); -} - -static inline void rcu_barrier_bh(void) -{ - rcu_barrier(); -} - -static inline void synchronize_sched(void) -{ - synchronize_rcu(); -} - -static inline void synchronize_sched_expedited(void) -{ - synchronize_rcu_expedited(); -} - -static inline void call_rcu_sched(struct rcu_head *head, rcu_callback_t func) -{ - call_rcu(head, func); -} - -static inline void rcu_barrier_sched(void) -{ - rcu_barrier(); -} - -static inline unsigned long get_state_synchronize_sched(void) -{ - return get_state_synchronize_rcu(); -} - -static inline void cond_synchronize_sched(unsigned long oldstate) -{ - cond_synchronize_rcu(oldstate); -} - #endif /* __LINUX_RCUPDATE_H */ From 9cf422a8e71455032b61c6c0ea56a1e96206aab0 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 20 Nov 2018 10:43:34 -0800 Subject: [PATCH 17/71] rcu: Accommodate zero jiffies_till_first_fqs and kthread kicking It is perfectly fine to set the rcutree.jiffies_till_first_fqs boot parameter to zero, in fact, this can be useful on specialty systems that usually have at least one idle CPU and that need fast grace periods. This is because this setting causes the RCU grace-period kthread to scan for idle threads immediately after grace-period initialization, as opposed to waiting several jiffies to do so. It is also perfectly fine to set the rcutree.rcu_kick_kthreads kernel parameter, which gives the RCU grace-period kthread an extra wakeup if it doesn't make progress for a period of three times the setting of the rcutree.jiffies_till_first_fqs boot parameter. This is of course problematic when the value of this parameter is zero, as it can result in unnecessary wakeup IPIs along with unnecessary WARN_ONCE() invocations. This commit therefore defers kthread kicking for at least two jiffies, regardless of the setting of rcutree.jiffies_till_first_fqs. Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 9180158756d2..b003a3cfe192 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -1939,7 +1939,7 @@ static void rcu_gp_fqs_loop(void) if (!ret) { rcu_state.jiffies_force_qs = jiffies + j; WRITE_ONCE(rcu_state.jiffies_kick_kthreads, - jiffies + 3 * j); + jiffies + (j ? 3 * j : 2)); } trace_rcu_grace_period(rcu_state.name, READ_ONCE(rcu_state.gp_seq), From 37f62d7cf00c085e1d7a91a6af286c4e8d32e1e1 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 30 Nov 2018 16:11:14 -0800 Subject: [PATCH 18/71] rcu: Move rcu_cpu_kthread_task to rcu_data structure Given that RCU has a perfectly good per-CPU rcu_data structure, most per-CPU quantities should be stored there. This commit therefore moves the rcu_cpu_kthread_task per-CPU variable to the rcu_data structure. This also makes this variable unconditionally present, which should be acceptable given the memory reduction due to the RCU flavor consolidation and also due to simplifications this will enable. Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.h | 6 +++++- kernel/rcu/tree_plugin.h | 11 +++++------ 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index d90b02b53c0e..ef517ba25192 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -234,7 +234,11 @@ struct rcu_data { /* Leader CPU takes GP-end wakeups. */ #endif /* #ifdef CONFIG_RCU_NOCB_CPU */ - /* 6) Diagnostic data, including RCU CPU stall warnings. */ + /* 6) RCU priority boosting. */ + struct task_struct *rcu_cpu_kthread_task; + /* rcuc per-CPU kthread or NULL. */ + + /* 7) Diagnostic data, including RCU CPU stall warnings. */ unsigned int softirq_snap; /* Snapshot of softirq activity. */ /* ->rcu_iw* fields protected by leaf rcu_node ->lock. */ struct irq_work rcu_iw; /* Check for non-irq activity. */ diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 1b3dd2fc0cd6..359bf1f6f8e0 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -40,7 +40,6 @@ /* * Control variables for per-CPU and per-rcu_node kthreads. */ -static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task); DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status); DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); DEFINE_PER_CPU(char, rcu_cpu_has_work); @@ -1308,9 +1307,9 @@ static void invoke_rcu_callbacks_kthread(void) local_irq_save(flags); __this_cpu_write(rcu_cpu_has_work, 1); - if (__this_cpu_read(rcu_cpu_kthread_task) != NULL && - current != __this_cpu_read(rcu_cpu_kthread_task)) { - rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task), + if (__this_cpu_read(rcu_data.rcu_cpu_kthread_task) != NULL && + current != __this_cpu_read(rcu_data.rcu_cpu_kthread_task)) { + rcu_wake_cond(__this_cpu_read(rcu_data.rcu_cpu_kthread_task), __this_cpu_read(rcu_cpu_kthread_status)); } local_irq_restore(flags); @@ -1322,7 +1321,7 @@ static void invoke_rcu_callbacks_kthread(void) */ static bool rcu_is_callbacks_kthread(void) { - return __this_cpu_read(rcu_cpu_kthread_task) == current; + return __this_cpu_read(rcu_data.rcu_cpu_kthread_task) == current; } #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000) @@ -1459,7 +1458,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) } static struct smp_hotplug_thread rcu_cpu_thread_spec = { - .store = &rcu_cpu_kthread_task, + .store = &rcu_data.rcu_cpu_kthread_task, .thread_should_run = rcu_cpu_kthread_should_run, .thread_fn = rcu_cpu_kthread, .thread_comm = "rcuc/%u", From 6ffdde28b7558ec48f4e0eee01821a66a67a8e25 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 30 Nov 2018 16:43:05 -0800 Subject: [PATCH 19/71] rcu: Move rcu_cpu_kthread_status to rcu_data structure Given that RCU has a perfectly good per-CPU rcu_data structure, most per-CPU quantities should be stored there. This commit therefore moves the rcu_cpu_kthread_status per-CPU variable to the rcu_data structure. This also makes this variable unconditionally present, which should be acceptable given the memory reduction due to the RCU flavor consolidation and also due to simplifications this will enable. Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.h | 3 ++- kernel/rcu/tree_plugin.h | 7 +++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index ef517ba25192..047f5e9350d1 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -237,6 +237,8 @@ struct rcu_data { /* 6) RCU priority boosting. */ struct task_struct *rcu_cpu_kthread_task; /* rcuc per-CPU kthread or NULL. */ + unsigned int rcu_cpu_kthread_status; + /* Running status for rcuc. */ /* 7) Diagnostic data, including RCU CPU stall warnings. */ unsigned int softirq_snap; /* Snapshot of softirq activity. */ @@ -407,7 +409,6 @@ static const char *tp_rcu_varname __used __tracepoint_string = rcu_name; int rcu_dynticks_snap(struct rcu_data *rdp); #ifdef CONFIG_RCU_BOOST -DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status); DECLARE_PER_CPU(int, rcu_cpu_kthread_cpu); DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); DECLARE_PER_CPU(char, rcu_cpu_has_work); diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 359bf1f6f8e0..935dc594cf30 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -40,7 +40,6 @@ /* * Control variables for per-CPU and per-rcu_node kthreads. */ -DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status); DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); DEFINE_PER_CPU(char, rcu_cpu_has_work); @@ -1310,7 +1309,7 @@ static void invoke_rcu_callbacks_kthread(void) if (__this_cpu_read(rcu_data.rcu_cpu_kthread_task) != NULL && current != __this_cpu_read(rcu_data.rcu_cpu_kthread_task)) { rcu_wake_cond(__this_cpu_read(rcu_data.rcu_cpu_kthread_task), - __this_cpu_read(rcu_cpu_kthread_status)); + __this_cpu_read(rcu_data.rcu_cpu_kthread_status)); } local_irq_restore(flags); } @@ -1383,7 +1382,7 @@ static void rcu_cpu_kthread_setup(unsigned int cpu) static void rcu_cpu_kthread_park(unsigned int cpu) { - per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; + per_cpu(rcu_data.rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; } static int rcu_cpu_kthread_should_run(unsigned int cpu) @@ -1398,7 +1397,7 @@ static int rcu_cpu_kthread_should_run(unsigned int cpu) */ static void rcu_cpu_kthread(unsigned int cpu) { - unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status); + unsigned int *statusp = this_cpu_ptr(&rcu_data.rcu_cpu_kthread_status); char work, *workp = this_cpu_ptr(&rcu_cpu_has_work); int spincnt; From 8b4d0f4858864af6a0753740f00353035bd058de Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 30 Nov 2018 17:17:21 -0800 Subject: [PATCH 20/71] rcu: Remove unused rcu_cpu_kthread_loops per-CPU variable The rcu_cpu_kthread_loops variable used to provide debugfs information, but is no longer used. This commit therefore removes it. Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.h | 1 - kernel/rcu/tree_plugin.h | 2 -- 2 files changed, 3 deletions(-) diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 047f5e9350d1..e50b0a5a94bc 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -410,7 +410,6 @@ int rcu_dynticks_snap(struct rcu_data *rdp); #ifdef CONFIG_RCU_BOOST DECLARE_PER_CPU(int, rcu_cpu_kthread_cpu); -DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); DECLARE_PER_CPU(char, rcu_cpu_has_work); #endif /* #ifdef CONFIG_RCU_BOOST */ diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 935dc594cf30..d1f32c63c789 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -40,7 +40,6 @@ /* * Control variables for per-CPU and per-rcu_node kthreads. */ -DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); DEFINE_PER_CPU(char, rcu_cpu_has_work); #else /* #ifdef CONFIG_RCU_BOOST */ @@ -1405,7 +1404,6 @@ static void rcu_cpu_kthread(unsigned int cpu) trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait")); local_bh_disable(); *statusp = RCU_KTHREAD_RUNNING; - this_cpu_inc(rcu_cpu_kthread_loops); local_irq_disable(); work = *workp; *workp = 0; From f7e972ee128e0a65784b13ec1459fe35b817eed7 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 30 Nov 2018 18:21:32 -0800 Subject: [PATCH 21/71] rcu: Move rcu_cpu_has_work to rcu_data structure Given that RCU has a perfectly good per-CPU rcu_data structure, most per-CPU quantities should be stored there. This commit therefore moves the rcu_cpu_has_work per-CPU variable to the rcu_data structure. This also makes this variable unconditionally present, which should be acceptable given the memory reduction due to the RCU flavor consolidation and also due to simplifications this will enable. Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.h | 3 +-- kernel/rcu/tree_plugin.h | 15 ++++----------- 2 files changed, 5 insertions(+), 13 deletions(-) diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index e50b0a5a94bc..7ae6774a920e 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -238,7 +238,7 @@ struct rcu_data { struct task_struct *rcu_cpu_kthread_task; /* rcuc per-CPU kthread or NULL. */ unsigned int rcu_cpu_kthread_status; - /* Running status for rcuc. */ + char rcu_cpu_has_work; /* 7) Diagnostic data, including RCU CPU stall warnings. */ unsigned int softirq_snap; /* Snapshot of softirq activity. */ @@ -410,7 +410,6 @@ int rcu_dynticks_snap(struct rcu_data *rdp); #ifdef CONFIG_RCU_BOOST DECLARE_PER_CPU(int, rcu_cpu_kthread_cpu); -DECLARE_PER_CPU(char, rcu_cpu_has_work); #endif /* #ifdef CONFIG_RCU_BOOST */ /* Forward declarations for rcutree_plugin.h */ diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index d1f32c63c789..b241c4b20549 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -34,14 +34,7 @@ #include "../time/tick-internal.h" #ifdef CONFIG_RCU_BOOST - #include "../locking/rtmutex_common.h" - -/* - * Control variables for per-CPU and per-rcu_node kthreads. - */ -DEFINE_PER_CPU(char, rcu_cpu_has_work); - #else /* #ifdef CONFIG_RCU_BOOST */ /* @@ -1304,7 +1297,7 @@ static void invoke_rcu_callbacks_kthread(void) unsigned long flags; local_irq_save(flags); - __this_cpu_write(rcu_cpu_has_work, 1); + __this_cpu_write(rcu_data.rcu_cpu_has_work, 1); if (__this_cpu_read(rcu_data.rcu_cpu_kthread_task) != NULL && current != __this_cpu_read(rcu_data.rcu_cpu_kthread_task)) { rcu_wake_cond(__this_cpu_read(rcu_data.rcu_cpu_kthread_task), @@ -1386,7 +1379,7 @@ static void rcu_cpu_kthread_park(unsigned int cpu) static int rcu_cpu_kthread_should_run(unsigned int cpu) { - return __this_cpu_read(rcu_cpu_has_work); + return __this_cpu_read(rcu_data.rcu_cpu_has_work); } /* @@ -1397,7 +1390,7 @@ static int rcu_cpu_kthread_should_run(unsigned int cpu) static void rcu_cpu_kthread(unsigned int cpu) { unsigned int *statusp = this_cpu_ptr(&rcu_data.rcu_cpu_kthread_status); - char work, *workp = this_cpu_ptr(&rcu_cpu_has_work); + char work, *workp = this_cpu_ptr(&rcu_data.rcu_cpu_has_work); int spincnt; for (spincnt = 0; spincnt < 10; spincnt++) { @@ -1472,7 +1465,7 @@ static void __init rcu_spawn_boost_kthreads(void) int cpu; for_each_possible_cpu(cpu) - per_cpu(rcu_cpu_has_work, cpu) = 0; + per_cpu(rcu_data.rcu_cpu_has_work, cpu) = 0; if (WARN_ONCE(smpboot_register_percpu_thread(&rcu_cpu_thread_spec), "%s: Could not start rcub kthread, OOM is now expected behavior\n", __func__)) return; rcu_for_each_leaf_node(rnp) From b2c1955b88495c1531b2080ba4fad119c0a03cc1 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 30 Nov 2018 19:12:04 -0800 Subject: [PATCH 22/71] rcu: Remove unused rcu_cpu_kthread_cpu per-CPU variable The rcu_cpu_kthread_cpu used to provide debugfs information, but is no longer used. This commit therefore removes it. Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.h | 4 ---- 1 file changed, 4 deletions(-) diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 7ae6774a920e..008c356c7033 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -408,10 +408,6 @@ static const char *tp_rcu_varname __used __tracepoint_string = rcu_name; int rcu_dynticks_snap(struct rcu_data *rdp); -#ifdef CONFIG_RCU_BOOST -DECLARE_PER_CPU(int, rcu_cpu_kthread_cpu); -#endif /* #ifdef CONFIG_RCU_BOOST */ - /* Forward declarations for rcutree_plugin.h */ static void rcu_bootup_announce(void); static void rcu_qs(void); From a9fefdb257259ac2f0f5fcd916edecc5c9427635 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 3 Dec 2018 14:07:17 -0800 Subject: [PATCH 23/71] rcu: Update NOCB comments This commit updates a few obsolete comments in the RCU callback-offload code. Signed-off-by: Paul E. McKenney --- kernel/rcu/tree_plugin.h | 33 ++++++++++++++++----------------- 1 file changed, 16 insertions(+), 17 deletions(-) diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index b241c4b20549..f0019c2a2cbc 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -1857,22 +1857,24 @@ static void zero_cpu_stall_ticks(struct rcu_data *rdp) /* * Offload callback processing from the boot-time-specified set of CPUs - * specified by rcu_nocb_mask. For each CPU in the set, there is a - * kthread created that pulls the callbacks from the corresponding CPU, - * waits for a grace period to elapse, and invokes the callbacks. - * The no-CBs CPUs do a wake_up() on their kthread when they insert - * a callback into any empty list, unless the rcu_nocb_poll boot parameter - * has been specified, in which case each kthread actively polls its - * CPU. (Which isn't so great for energy efficiency, but which does - * reduce RCU's overhead on that CPU.) + * specified by rcu_nocb_mask. For the CPUs in the set, there are kthreads + * created that pull the callbacks from the corresponding CPU, wait for + * a grace period to elapse, and invoke the callbacks. These kthreads + * are organized into leaders, which manage incoming callbacks, wait for + * grace periods, and awaken followers, and the followers, which only + * invoke callbacks. Each leader is its own follower. The no-CBs CPUs + * do a wake_up() on their kthread when they insert a callback into any + * empty list, unless the rcu_nocb_poll boot parameter has been specified, + * in which case each kthread actively polls its CPU. (Which isn't so great + * for energy efficiency, but which does reduce RCU's overhead on that CPU.) * * This is intended to be used in conjunction with Frederic Weisbecker's * adaptive-idle work, which would seriously reduce OS jitter on CPUs * running CPU-bound user-mode computations. * - * Offloading of callback processing could also in theory be used as - * an energy-efficiency measure because CPUs with no RCU callbacks - * queued are more aggressive about entering dyntick-idle mode. + * Offloading of callbacks can also be used as an energy-efficiency + * measure because CPUs with no RCU callbacks queued are more aggressive + * about entering dyntick-idle mode. */ @@ -1976,10 +1978,7 @@ static void wake_nocb_leader_defer(struct rcu_data *rdp, int waketype, raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); } -/* - * Does the specified CPU need an RCU callback for this invocation - * of rcu_barrier()? - */ +/* Does rcu_barrier need to queue an RCU callback on the specified CPU? */ static bool rcu_nocb_cpu_needs_barrier(int cpu) { struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); @@ -1995,8 +1994,8 @@ static bool rcu_nocb_cpu_needs_barrier(int cpu) * callbacks would be posted. In the worst case, the first * barrier in rcu_barrier() suffices (but the caller cannot * necessarily rely on this, not a substitute for the caller - * getting the concurrency design right!). There must also be - * a barrier between the following load an posting of a callback + * getting the concurrency design right!). There must also be a + * barrier between the following load and posting of a callback * (if a callback is in fact needed). This is associated with an * atomic_inc() in the caller. */ From fd897573fa4cfe66ebddf5f4444f36710cf0cad0 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 10 Dec 2018 16:09:49 -0800 Subject: [PATCH 24/71] rcu: Improve diagnostics for failed RCU grace-period start If a grace period fails to start (for example, because you commented out the last two lines of rcu_accelerate_cbs_unlocked()), rcu_core() will invoke rcu_check_gp_start_stall(), which will notice and complain. However, this complaint is lacking crucial debugging information such as when the last wakeup executed and what the value of ->gp_seq was at that time. This commit therefore removes the current pr_alert() from rcu_check_gp_start_stall(), instead invoking show_rcu_gp_kthreads(), which has been updated to print the needed information, which is collected by rcu_gp_kthread_wake(). Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 55 +++++++++++++++++++++++++++-------------------- kernel/rcu/tree.h | 2 ++ 2 files changed, 34 insertions(+), 23 deletions(-) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index b003a3cfe192..8543a90d53f2 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -512,6 +512,14 @@ void rcu_force_quiescent_state(void) } EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); +/* + * Return the root node of the rcu_state structure. + */ +static struct rcu_node *rcu_get_root(void) +{ + return &rcu_state.node[0]; +} + /* * Convert a ->gp_state value to a character string. */ @@ -529,19 +537,30 @@ void show_rcu_gp_kthreads(void) { int cpu; unsigned long j; + unsigned long ja; + unsigned long jr; + unsigned long jw; struct rcu_data *rdp; struct rcu_node *rnp; - j = jiffies - READ_ONCE(rcu_state.gp_activity); - pr_info("%s: wait state: %s(%d) ->state: %#lx delta ->gp_activity %ld\n", + j = jiffies; + ja = j - READ_ONCE(rcu_state.gp_activity); + jr = j - READ_ONCE(rcu_state.gp_req_activity); + jw = j - READ_ONCE(rcu_state.gp_wake_time); + pr_info("%s: wait state: %s(%d) ->state: %#lx delta ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_flags %#x\n", rcu_state.name, gp_state_getname(rcu_state.gp_state), - rcu_state.gp_state, rcu_state.gp_kthread->state, j); + rcu_state.gp_state, + rcu_state.gp_kthread ? rcu_state.gp_kthread->state : 0x1ffffL, + ja, jr, jw, (long)READ_ONCE(rcu_state.gp_wake_seq), + (long)READ_ONCE(rcu_state.gp_seq), + (long)READ_ONCE(rcu_get_root()->gp_seq_needed), + READ_ONCE(rcu_state.gp_flags)); rcu_for_each_node_breadth_first(rnp) { if (ULONG_CMP_GE(rcu_state.gp_seq, rnp->gp_seq_needed)) continue; - pr_info("\trcu_node %d:%d ->gp_seq %lu ->gp_seq_needed %lu\n", - rnp->grplo, rnp->grphi, rnp->gp_seq, - rnp->gp_seq_needed); + pr_info("\trcu_node %d:%d ->gp_seq %ld ->gp_seq_needed %ld\n", + rnp->grplo, rnp->grphi, (long)rnp->gp_seq, + (long)rnp->gp_seq_needed); if (!rcu_is_leaf_node(rnp)) continue; for_each_leaf_node_possible_cpu(rnp, cpu) { @@ -550,8 +569,8 @@ void show_rcu_gp_kthreads(void) ULONG_CMP_GE(rcu_state.gp_seq, rdp->gp_seq_needed)) continue; - pr_info("\tcpu %d ->gp_seq_needed %lu\n", - cpu, rdp->gp_seq_needed); + pr_info("\tcpu %d ->gp_seq_needed %ld\n", + cpu, (long)rdp->gp_seq_needed); } } /* sched_show_task(rcu_state.gp_kthread); */ @@ -577,14 +596,6 @@ void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, } EXPORT_SYMBOL_GPL(rcutorture_get_gp_data); -/* - * Return the root node of the rcu_state structure. - */ -static struct rcu_node *rcu_get_root(void) -{ - return &rcu_state.node[0]; -} - /* * Enter an RCU extended quiescent state, which can be either the * idle loop or adaptive-tickless usermode execution. @@ -1560,7 +1571,8 @@ static bool rcu_future_gp_cleanup(struct rcu_node *rnp) * Awaken the grace-period kthread. Don't do a self-awaken, and don't * bother awakening when there is nothing for the grace-period kthread * to do (as in several CPUs raced to awaken, and we lost), and finally - * don't try to awaken a kthread that has not yet been created. + * don't try to awaken a kthread that has not yet been created. If + * all those checks are passed, track some debug information and awaken. */ static void rcu_gp_kthread_wake(void) { @@ -1568,6 +1580,8 @@ static void rcu_gp_kthread_wake(void) !READ_ONCE(rcu_state.gp_flags) || !rcu_state.gp_kthread) return; + WRITE_ONCE(rcu_state.gp_wake_time, jiffies); + WRITE_ONCE(rcu_state.gp_wake_seq, READ_ONCE(rcu_state.gp_seq)); swake_up_one(&rcu_state.gp_wq); } @@ -2657,16 +2671,11 @@ rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp, raw_spin_unlock_irqrestore_rcu_node(rnp, flags); return; } - pr_alert("%s: g%ld->%ld gar:%lu ga:%lu f%#x gs:%d %s->state:%#lx\n", - __func__, (long)READ_ONCE(rcu_state.gp_seq), - (long)READ_ONCE(rnp_root->gp_seq_needed), - j - rcu_state.gp_req_activity, j - rcu_state.gp_activity, - rcu_state.gp_flags, rcu_state.gp_state, rcu_state.name, - rcu_state.gp_kthread ? rcu_state.gp_kthread->state : 0x1ffffL); WARN_ON(1); if (rnp_root != rnp) raw_spin_unlock_rcu_node(rnp_root); raw_spin_unlock_irqrestore_rcu_node(rnp, flags); + show_rcu_gp_kthreads(); } /* diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 008c356c7033..1f2ada7ef7d7 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -309,6 +309,8 @@ struct rcu_state { struct swait_queue_head gp_wq; /* Where GP task waits. */ short gp_flags; /* Commands for GP task. */ short gp_state; /* GP kthread sleep state. */ + unsigned long gp_wake_time; /* Last GP kthread wake. */ + unsigned long gp_wake_seq; /* ->gp_seq at ^^^. */ /* End of fields guarded by root rcu_node's lock. */ From 3b6505fd8eb86e3ef5ce12b34fe81e9edeb84475 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 12 Dec 2018 07:20:07 -0800 Subject: [PATCH 25/71] rcu: Protect rcu_check_gp_kthread_starvation() access to ->gp_flags The rcu_check_gp_kthread_starvation() function can be invoked without holding locks, so the access to the rcu_state structure's ->gp_flags field must be protected with READ_ONCE(). This commit therefore adds this protection. Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 8543a90d53f2..238150684fed 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -1192,7 +1192,7 @@ static void rcu_check_gp_kthread_starvation(void) pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#lx ->cpu=%d\n", rcu_state.name, j, (long)rcu_seq_current(&rcu_state.gp_seq), - rcu_state.gp_flags, + READ_ONCE(rcu_state.gp_flags), gp_state_getname(rcu_state.gp_state), rcu_state.gp_state, gpk ? gpk->state : ~0, gpk ? task_cpu(gpk) : -1); if (gpk) { From 2ccaff10f71307ad4f75ce673b815d3fe6254e3d Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 12 Dec 2018 12:32:06 -0800 Subject: [PATCH 26/71] rcu: Add sysrq rcu_node-dump capability Life is hard if RCU manages to get stuck without triggering RCU CPU stall warnings or triggering the rcu_check_gp_start_stall() checks for failing to start a grace period. This commit therefore adds a boot-time-selectable sysrq key (commandeering "y") that allows manually dumping Tree RCU state. The new rcutree.sysrq_rcu kernel boot parameter must be set for this sysrq to be available. Signed-off-by: Paul E. McKenney --- .../admin-guide/kernel-parameters.txt | 5 ++++ kernel/rcu/tree.c | 25 +++++++++++++++++++ 2 files changed, 30 insertions(+) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index b799bcf67d7b..b0dbb2fa401f 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -3721,6 +3721,11 @@ This wake_up() will be accompanied by a WARN_ONCE() splat and an ftrace_dump(). + rcutree.sysrq_rcu= [KNL] + Commandeer a sysrq key to dump out Tree RCU's + rcu_node tree with an eye towards determining + why a new grace period has not yet started. + rcuperf.gp_async= [KNL] Measure performance of asynchronous grace-period primitives such as call_rcu(). diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 238150684fed..9ceb93f848cd 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -62,6 +62,7 @@ #include #include #include +#include #include "tree.h" #include "rcu.h" @@ -115,6 +116,9 @@ int num_rcu_lvl[] = NUM_RCU_LVL_INIT; int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */ /* panic() on RCU Stall sysctl. */ int sysctl_panic_on_rcu_stall __read_mostly; +/* Commandeer a sysrq key to dump RCU's tree. */ +static bool sysrq_rcu; +module_param(sysrq_rcu, bool, 0444); /* * The rcu_scheduler_active variable is initialized to the value @@ -577,6 +581,27 @@ void show_rcu_gp_kthreads(void) } EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads); +/* Dump grace-period-request information due to commandeered sysrq. */ +static void sysrq_show_rcu(int key) +{ + show_rcu_gp_kthreads(); +} + +static struct sysrq_key_op sysrq_rcudump_op = { + .handler = sysrq_show_rcu, + .help_msg = "show-rcu(y)", + .action_msg = "Show RCU tree", + .enable_mask = SYSRQ_ENABLE_DUMP, +}; + +static int __init rcu_sysrq_init(void) +{ + if (sysrq_rcu) + return register_sysrq_key('y', &sysrq_rcudump_op); + return 0; +} +early_initcall(rcu_sysrq_init); + /* * Send along grace-period-related data for rcutorture diagnostics. */ From 1d1f898df6586c5ea9aeaf349f13089c6fa37903 Mon Sep 17 00:00:00 2001 From: "Zhang, Jun" Date: Tue, 18 Dec 2018 06:55:01 -0800 Subject: [PATCH 27/71] rcu: Do RCU GP kthread self-wakeup from softirq and interrupt The rcu_gp_kthread_wake() function is invoked when it might be necessary to wake the RCU grace-period kthread. Because self-wakeups are normally a useless waste of CPU cycles, if rcu_gp_kthread_wake() is invoked from this kthread, it naturally refuses to do the wakeup. Unfortunately, natural though it might be, this heuristic fails when rcu_gp_kthread_wake() is invoked from an interrupt or softirq handler that interrupted the grace-period kthread just after the final check of the wait-event condition but just before the schedule() call. In this case, a wakeup is required, even though the call to rcu_gp_kthread_wake() is within the RCU grace-period kthread's context. Failing to provide this wakeup can result in grace periods failing to start, which in turn results in out-of-memory conditions. This race window is quite narrow, but it actually did happen during real testing. It would of course need to be fixed even if it was strictly theoretical in nature. This patch does not Cc stable because it does not apply cleanly to earlier kernel versions. Fixes: 48a7639ce80c ("rcu: Make callers awaken grace-period kthread") Reported-by: "He, Bo" Co-developed-by: "Zhang, Jun" Co-developed-by: "He, Bo" Co-developed-by: "xiao, jin" Co-developed-by: Bai, Jie A Signed-off: "Zhang, Jun" Signed-off: "He, Bo" Signed-off: "xiao, jin" Signed-off: Bai, Jie A Signed-off-by: "Zhang, Jun" [ paulmck: Switch from !in_softirq() to "!in_interrupt() && !in_serving_softirq() to avoid redundant wakeups and to also handle the interrupt-handler scenario as well as the softirq-handler scenario that actually occurred in testing. ] Signed-off-by: Paul E. McKenney Link: https://lkml.kernel.org/r/CD6925E8781EFD4D8E11882D20FC406D52A11F61@SHSMSX104.ccr.corp.intel.com --- kernel/rcu/tree.c | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 9ceb93f848cd..21775eebb8f0 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -1593,15 +1593,23 @@ static bool rcu_future_gp_cleanup(struct rcu_node *rnp) } /* - * Awaken the grace-period kthread. Don't do a self-awaken, and don't - * bother awakening when there is nothing for the grace-period kthread - * to do (as in several CPUs raced to awaken, and we lost), and finally - * don't try to awaken a kthread that has not yet been created. If - * all those checks are passed, track some debug information and awaken. + * Awaken the grace-period kthread. Don't do a self-awaken (unless in + * an interrupt or softirq handler), and don't bother awakening when there + * is nothing for the grace-period kthread to do (as in several CPUs raced + * to awaken, and we lost), and finally don't try to awaken a kthread that + * has not yet been created. If all those checks are passed, track some + * debug information and awaken. + * + * So why do the self-wakeup when in an interrupt or softirq handler + * in the grace-period kthread's context? Because the kthread might have + * been interrupted just as it was going to sleep, and just after the final + * pre-sleep check of the awaken condition. In this case, a wakeup really + * is required, and is therefore supplied. */ static void rcu_gp_kthread_wake(void) { - if (current == rcu_state.gp_kthread || + if ((current == rcu_state.gp_kthread && + !in_interrupt() && !in_serving_softirq()) || !READ_ONCE(rcu_state.gp_flags) || !rcu_state.gp_kthread) return; From 13dc7d0c7a2ed438f0ec8e9fb365a1256d87cf87 Mon Sep 17 00:00:00 2001 From: "Zhang, Jun" Date: Wed, 19 Dec 2018 10:37:34 -0800 Subject: [PATCH 28/71] rcu: Prevent needless ->gp_seq_needed update in __note_gp_changes() Currently, __note_gp_changes() checks to see if the rcu_node structure's ->gp_seq_needed is greater than or equal to that of the rcu_data structure, and if so, updates the rcu_data structure's ->gp_seq_needed field. This results in a useless store in the case where the two fields are equal. This commit therefore carries out this store only in the case where the rcu_node structure's ->gp_seq_needed is strictly greater than that of the rcu_data structure. Signed-off-by: "Zhang, Jun" Signed-off-by: Paul E. McKenney Link: https://lkml.kernel.org/r/88DC34334CA3444C85D647DBFA962C2735AD5F77@SHSMSX104.ccr.corp.intel.com --- kernel/rcu/tree.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 21775eebb8f0..9d0e2ac9356e 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -1758,7 +1758,7 @@ static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp) zero_cpu_stall_ticks(rdp); } rdp->gp_seq = rnp->gp_seq; /* Remember new grace-period state. */ - if (ULONG_CMP_GE(rnp->gp_seq_needed, rdp->gp_seq_needed) || rdp->gpwrap) + if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap) rdp->gp_seq_needed = rnp->gp_seq_needed; WRITE_ONCE(rdp->gpwrap, false); rcu_gpnum_ovf(rnp, rdp); From 1a4762b9272f513aa4c7d8abe7a7529540f95e2d Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 20 Nov 2018 10:22:00 -0800 Subject: [PATCH 29/71] doc: Now jiffies_till_sched_qs solicits help from cond_resched() The rcutree.jiffies_till_sched_qs kernel boot parameter used to solicit help only from rcu_note_context_switch(), but now also solicits help from cond_resched(). This commit therefore updates kernel-parameters.txt accordingly. Signed-off-by: Paul E. McKenney --- .../admin-guide/kernel-parameters.txt | 27 ++++++++++--------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index b0dbb2fa401f..bec49d593583 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -3654,19 +3654,6 @@ latencies, which will choose a value aligned with the appropriate hardware boundaries. - rcutree.jiffies_till_sched_qs= [KNL] - Set required age in jiffies for a - given grace period before RCU starts - soliciting quiescent-state help from - rcu_note_context_switch(). If not specified, the - kernel will calculate a value based on the most - recent settings of rcutree.jiffies_till_first_fqs - and rcutree.jiffies_till_next_fqs. - This calculated value may be viewed in - rcutree.jiffies_to_sched_qs. Any attempt to - set rcutree.jiffies_to_sched_qs will be - cheerfully overwritten. - rcutree.jiffies_till_first_fqs= [KNL] Set delay from grace-period initialization to first attempt to force quiescent states. @@ -3678,6 +3665,20 @@ quiescent states. Units are jiffies, minimum value is one, and maximum value is HZ. + rcutree.jiffies_till_sched_qs= [KNL] + Set required age in jiffies for a + given grace period before RCU starts + soliciting quiescent-state help from + rcu_note_context_switch() and cond_resched(). + If not specified, the kernel will calculate + a value based on the most recent settings + of rcutree.jiffies_till_first_fqs + and rcutree.jiffies_till_next_fqs. + This calculated value may be viewed in + rcutree.jiffies_to_sched_qs. Any attempt to set + rcutree.jiffies_to_sched_qs will be cheerfully + overwritten. + rcutree.kthread_prio= [KNL,BOOT] Set the SCHED_FIFO priority of the RCU per-CPU kthreads (rcuc/N). This value is also used for From 6efebf84997b26a6264eaeacf05eb6f5de2772b7 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 11 Dec 2018 08:28:56 -0800 Subject: [PATCH 30/71] doc: CPU-hotplug notifiers cannot invoke synchronize_srcu() or srcu_barrier() SRCU's synchronize_srcu() may not be invoked from CPU-hotplug notifiers, due to the fact that SRCU grace periods make use of timers and the possibility of timers being temporarily stranded on the outgoing CPU. This stranding of timers means that timers posted to the outgoing CPU will not fire until late in the CPU-hotplug process. The problem is that if a notifier is waiting on an SRCU grace period, that grace period is waiting on a timer, and that timer is stranded on the outgoing CPU, then the notifier will never be awakened, in other words, deadlock has occurred. This same situation of course also prohibits srcu_barrier() from being invoked from CPU-hotplug notifiers. This commit therefore updates the requirements to include this restriction. Signed-off-by: Paul E. McKenney --- .../RCU/Design/Requirements/Requirements.html | 20 ++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/Documentation/RCU/Design/Requirements/Requirements.html b/Documentation/RCU/Design/Requirements/Requirements.html index 9fca73e03a98..5a9238a2883c 100644 --- a/Documentation/RCU/Design/Requirements/Requirements.html +++ b/Documentation/RCU/Design/Requirements/Requirements.html @@ -3099,7 +3099,7 @@ If you block forever in one of a given domain's SRCU read-side critical sections, then that domain's grace periods will also be blocked forever. Of course, one good way to block forever is to deadlock, which can happen if any operation in a given domain's SRCU read-side critical -section can block waiting, either directly or indirectly, for that domain's +section can wait, either directly or indirectly, for that domain's grace period to elapse. For example, this results in a self-deadlock: @@ -3139,12 +3139,18 @@ API, which, in combination with srcu_read_unlock(), guarantees a full memory barrier.

-Also unlike other RCU flavors, SRCU's callbacks-wait function -srcu_barrier() may be invoked from CPU-hotplug notifiers, -though this is not necessarily a good idea. -The reason that this is possible is that SRCU is insensitive -to whether or not a CPU is online, which means that srcu_barrier() -need not exclude CPU-hotplug operations. +Also unlike other RCU flavors, synchronize_srcu() may not +be invoked from CPU-hotplug notifiers, due to the fact that SRCU grace +periods make use of timers and the possibility of timers being temporarily +“stranded” on the outgoing CPU. +This stranding of timers means that timers posted to the outgoing CPU +will not fire until late in the CPU-hotplug process. +The problem is that if a notifier is waiting on an SRCU grace period, +that grace period is waiting on a timer, and that timer is stranded on the +outgoing CPU, then the notifier will never be awakened, in other words, +deadlock has occurred. +This same situation of course also prohibits srcu_barrier() +from being invoked from CPU-hotplug notifiers.

SRCU also differs from other RCU flavors in that SRCU's expedited and From 87d1779dc48fae842514771c867d797f23307062 Mon Sep 17 00:00:00 2001 From: Junchang Wang Date: Tue, 1 Jan 2019 22:03:19 +0800 Subject: [PATCH 31/71] doc: Fix outdated links Fix outdated links in whatisRCU.txt. Signed-off-by: Junchang Wang Signed-off-by: Paul E. McKenney --- Documentation/RCU/whatisRCU.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Documentation/RCU/whatisRCU.txt b/Documentation/RCU/whatisRCU.txt index 4a6854318b17..1ace20815bb1 100644 --- a/Documentation/RCU/whatisRCU.txt +++ b/Documentation/RCU/whatisRCU.txt @@ -302,7 +302,7 @@ rcu_dereference() must prohibit. The rcu_dereference_protected() variant takes a lockdep expression to indicate which locks must be acquired by the caller. If the indicated protection is not provided, - a lockdep splat is emitted. See RCU/Design/Requirements.html + a lockdep splat is emitted. See RCU/Design/Requirements/Requirements.html and the API's code comments for more details and example usage. The following diagram shows how each API communicates among the @@ -560,7 +560,7 @@ presents two such "toy" implementations of RCU, one that is implemented in terms of familiar locking primitives, and another that more closely resembles "classic" RCU. Both are way too simple for real-world use, lacking both functionality and performance. However, they are useful -in getting a feel for how RCU works. See kernel/rcupdate.c for a +in getting a feel for how RCU works. See kernel/rcu/update.c for a production-quality implementation, and see: http://www.rdrop.com/users/paulmck/RCU From 2aa5503026ceaa8860697b93c9e5bbbcd025ba89 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 20 Nov 2018 08:29:35 -0800 Subject: [PATCH 32/71] rcu: Docbook for rcu_head_init() and rcu_head_after_call_rcu() This commit adds the missing asterisks required to make Sphinx pick up the current header comments for these two functions. Signed-off-by: Paul E. McKenney --- include/linux/rcupdate.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 0e39e0d2629e..632113946757 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -859,7 +859,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) /* Has the specified rcu_head structure been handed to call_rcu()? */ -/* +/** * rcu_head_init - Initialize rcu_head for rcu_head_after_call_rcu() * @rhp: The rcu_head structure to initialize. * @@ -874,10 +874,10 @@ static inline void rcu_head_init(struct rcu_head *rhp) rhp->func = (rcu_callback_t)~0L; } -/* +/** * rcu_head_after_call_rcu - Has this rcu_head been passed to call_rcu()? * @rhp: The rcu_head structure to test. - * @func: The function passed to call_rcu() along with @rhp. + * @f: The function passed to call_rcu() along with @rhp. * * Returns @true if the @rhp has been passed to call_rcu() with @func, * and @false otherwise. Emits a warning in any other case, including From c98cac603f1ce7d00e2a802b5640bced3bc3c1f2 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 21 Nov 2018 11:35:03 -0800 Subject: [PATCH 33/71] rcu: Rename rcu_check_callbacks() to rcu_sched_clock_irq() The name rcu_check_callbacks() arguably made sense back in the early 2000s when RCU was quite a bit simpler than it is today, but it has become quite misleading, especially with the advent of dyntick-idle and NO_HZ_FULL. The rcu_check_callbacks() function is RCU's hook into the scheduling-clock interrupt, and is now but one of many ways that callbacks get promoted to invocable state. This commit therefore changes the name to rcu_sched_clock_irq(), which is the same number of characters and clearly indicates this function's relation to the rest of the Linux kernel. In addition, for the sake of consistency, rcu_flavor_check_callbacks() is also renamed to rcu_flavor_sched_clock_irq(). While in the area, the header comments for both functions are reworked. Signed-off-by: Paul E. McKenney --- .../Tree-RCU-Memory-Ordering.html | 4 ++-- .../TreeRCU-callback-invocation.svg | 2 +- .../RCU/Design/Memory-Ordering/TreeRCU-gp.svg | 4 ++-- .../RCU/Design/Memory-Ordering/TreeRCU-qs.svg | 2 +- include/linux/rcupdate.h | 2 +- kernel/rcu/tiny.c | 2 +- kernel/rcu/tree.c | 18 +++++++------- kernel/rcu/tree.h | 2 +- kernel/rcu/tree_plugin.h | 24 ++++++++----------- kernel/time/timer.c | 2 +- 10 files changed, 29 insertions(+), 33 deletions(-) diff --git a/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html b/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html index e4d94fba6c89..a3acfd49255f 100644 --- a/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html +++ b/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html @@ -485,7 +485,7 @@ section that the grace period must wait on. noted by rcu_node_context_switch() on the left. On the other hand, if the CPU takes a scheduler-clock interrupt while executing in usermode, a quiescent state will be noted by -rcu_check_callbacks() on the right. +rcu_sched_clock_irq() on the right. Either way, the passage through a quiescent state will be noted in a per-CPU variable. @@ -651,7 +651,7 @@ to end. These callbacks are identified by rcu_advance_cbs(), which is usually invoked by __note_gp_changes(). As shown in the diagram below, this invocation can be triggered by -the scheduling-clock interrupt (rcu_check_callbacks() on +the scheduling-clock interrupt (rcu_sched_clock_irq() on the left) or by idle entry (rcu_cleanup_after_idle() on the right, but only for kernels build with CONFIG_RCU_FAST_NO_HZ=y). diff --git a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-callback-invocation.svg b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-callback-invocation.svg index 832408313d93..3fcf0c17cef2 100644 --- a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-callback-invocation.svg +++ b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-callback-invocation.svg @@ -349,7 +349,7 @@ font-weight="bold" font-size="192" id="text202-7-5" - style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_check_callbacks() + style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_sched_clock_irq() rcu_check_callbacks() + xml:space="preserve">rcu_sched_clock_irq() rcu_check_callbacks() + style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_sched_clock_irq() rcu_check_callbacks() + xml:space="preserve">rcu_sched_clock_irq() rcu_read_unlock_special.b.need_qs = false; } } @@ -778,13 +778,13 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) } /* - * Check for a quiescent state from the current CPU. When a task blocks, - * the task is recorded in the corresponding CPU's rcu_node structure, - * which is checked elsewhere. - * - * Caller must disable hard irqs. + * Check for a quiescent state from the current CPU, including voluntary + * context switches for Tasks RCU. When a task blocks, the task is + * recorded in the corresponding CPU's rcu_node structure, which is checked + * elsewhere, hence this function need only check for quiescent states + * related to the current CPU, not to those related to tasks. */ -static void rcu_flavor_check_callbacks(int user) +static void rcu_flavor_sched_clock_irq(int user) { struct task_struct *t = current; @@ -1030,14 +1030,10 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) } /* - * Check to see if this CPU is in a non-context-switch quiescent state - * (user mode or idle loop for rcu, non-softirq execution for rcu_bh). - * Also schedule RCU core processing. - * - * This function must be called from hardirq context. It is normally - * invoked from the scheduling-clock interrupt. + * Check to see if this CPU is in a non-context-switch quiescent state, + * namely user mode and idle loop. */ -static void rcu_flavor_check_callbacks(int user) +static void rcu_flavor_sched_clock_irq(int user) { if (user || rcu_is_cpu_rrupt_from_idle()) { diff --git a/kernel/time/timer.c b/kernel/time/timer.c index 444156debfa0..6eb7cc4b6d52 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -1632,7 +1632,7 @@ void update_process_times(int user_tick) /* Note: this timer irq context must be accounted for as well. */ account_process_tick(p, user_tick); run_local_timers(); - rcu_check_callbacks(user_tick); + rcu_sched_clock_irq(user_tick); #ifdef CONFIG_IRQ_WORK if (in_irq()) irq_work_tick(); From fb60e533beab3bf27adc0e39e03337e7584c6d5a Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 21 Nov 2018 12:42:12 -0800 Subject: [PATCH 34/71] rcu: Rename rcu_process_callbacks() to rcu_core() for Tree RCU Although the name rcu_process_callbacks() still makes sense for Tiny RCU, where most of what it does is invoke callbacks, it no longer makes much sense for Tree RCU, especially given that the actually callback invocation is relegated to rcu_do_batch(), or, for no-CBs CPUs, to the rcuo kthreads. Especially in the latter case, rcu_process_callbacks() has very little to do with actual callbacks. A better description of this function is that it performs RCU's core processing. This commit therefore changes the name of Tree RCU's rcu_process_callbacks() function to rcu_core(), which also has the virtue of being consistent with the existing invoke_rcu_core() function. While in the area, the header comment is reworked. Signed-off-by: Paul E. McKenney --- .../Memory-Ordering/Tree-RCU-Memory-Ordering.html | 2 +- .../RCU/Design/Memory-Ordering/TreeRCU-gp.svg | 4 ++-- .../RCU/Design/Memory-Ordering/TreeRCU-qs.svg | 4 ++-- kernel/rcu/tree.c | 10 +++------- 4 files changed, 8 insertions(+), 12 deletions(-) diff --git a/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html b/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html index a3acfd49255f..8d21af02b1f0 100644 --- a/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html +++ b/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html @@ -491,7 +491,7 @@ in a per-CPU variable.

The next time an RCU_SOFTIRQ handler executes on this CPU (for example, after the next scheduler-clock -interrupt), __rcu_process_callbacks() will invoke +interrupt), rcu_core() will invoke rcu_check_quiescent_state(), which will notice the recorded quiescent state, and invoke rcu_report_qs_rdp(). diff --git a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp.svg b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp.svg index f0bbe6f8d729..2bcd742d6e49 100644 --- a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp.svg +++ b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp.svg @@ -3924,7 +3924,7 @@ font-style="normal" y="-4418.6582" x="3745.7725" - xml:space="preserve">rcu_process_callbacks() + xml:space="preserve">rcu_core() rcu_check_quiescent_state()) + xml:space="preserve">rcu_check_quiescent_state() rcu_process_callbacks() + xml:space="preserve">rcu_core() rcu_check_quiescent_state()) + xml:space="preserve">rcu_check_quiescent_state() Date: Mon, 26 Nov 2018 11:12:39 -0800 Subject: [PATCH 35/71] rcu: Remove preemption disabling from expedited CPU selection It turns out that it is queue_delayed_work_on() rather than queue_work_on() that has difficulties when used concurrently with CPU-hotplug removal operations. It is therefore unnecessary to protect CPU identification and queue_work_on() with preempt_disable(). This commit therefore removes the preempt_disable() and preempt_enable() from sync_rcu_exp_select_cpus(), which has the further benefit of reducing the number of changes that must be maintained in the -rt patchset. Reported-by: Thomas Gleixner Reported-by: Sebastian Siewior Suggested-by: Boqun Feng Signed-off-by: Paul E. McKenney --- kernel/rcu/tree_exp.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h index d882ca0cd01b..763649d345d7 100644 --- a/kernel/rcu/tree_exp.h +++ b/kernel/rcu/tree_exp.h @@ -448,7 +448,6 @@ static void sync_rcu_exp_select_cpus(void) continue; } INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus); - preempt_disable(); cpu = find_next_bit(&rnp->ffmask, BITS_PER_LONG, -1); /* If all offline, queue the work on an unbound CPU. */ if (unlikely(cpu > rnp->grphi - rnp->grplo)) @@ -456,7 +455,6 @@ static void sync_rcu_exp_select_cpus(void) else cpu += rnp->grplo; queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work); - preempt_enable(); rnp->exp_need_flush = true; } From 39abefe7433236735c92749492fd045fd40c071e Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 26 Nov 2018 18:33:25 -0800 Subject: [PATCH 36/71] rcu: Repair rcu_nmi_exit() docbook header This commit removes the "@irq" argument from the rcu_nmi_exit() docbook header, given that this function now has no arguments. Reported-by: kbuild test robot Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 1 - 1 file changed, 1 deletion(-) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index e67f8dc1894b..9cbadddf1f31 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -725,7 +725,6 @@ static __always_inline void rcu_nmi_exit_common(bool irq) /** * rcu_nmi_exit - inform RCU of exit from NMI context - * @irq: Is this call from rcu_irq_exit? * * If you add or remove a call to rcu_nmi_exit(), be sure to test * with CONFIG_RCU_EQS_DEBUG=y. From c2d8089de7f0b849af11c271278fe6b904db5df2 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 28 Nov 2018 12:47:23 -0800 Subject: [PATCH 37/71] rcu: Fix obsolete DYNTICK_IRQ_NONIDLE comment This commit updates the DYNTICK_IRQ_NONIDLE header comment to remove the obsolete commentary about unmatched rcu_irq_{enter,exit}(). Signed-off-by: Paul E. McKenney --- kernel/rcu/rcu.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h index 75787186bd4f..51f1fb21f171 100644 --- a/kernel/rcu/rcu.h +++ b/kernel/rcu/rcu.h @@ -30,7 +30,7 @@ #define RCU_TRACE(stmt) #endif /* #else #ifdef CONFIG_RCU_TRACE */ -/* Offset to allow for unmatched rcu_irq_{enter,exit}(). */ +/* Offset to allow distinguishing irq vs. task-based idle entry/exit. */ #define DYNTICK_IRQ_NONIDLE ((LONG_MAX / 2) + 1) From 423a86a610cad121742ebe698ef98a3b4c87b5dd Mon Sep 17 00:00:00 2001 From: "Joel Fernandes (Google)" Date: Wed, 12 Dec 2018 14:37:10 -0800 Subject: [PATCH 38/71] rcu: Add sparse check to rcu_assign_pointer() The rcu_assign_pointer() function currently doesn't do any sparse checking on the assigned-to pointer. So its possible that a pointer that is not __rcu annotated is assigned with rcu_assign_pointer without sparse complaints. Because rcu_dereference() already does such checking, this commit makes rcu_assign_pointer() to do the same. The extra error could be helpful in cases where an RCU pointer is assigned with rcu_assign_pointer() but not annotated with __rcu. This doesn't generate any code in the normal case because __CHECKER__ is defined only in the context of sparse. This commit also renames rcu_dereference_sparse() to rcu_check_parse() since the checking now happens not only during derereferencing but also during assignment. Test: Introduced an rcu_assign_pointer in code and checked the output of sparse with and without this change. The change correctly causes sparse to throw an error. Signed-off-by: Joel Fernandes (Google) Signed-off-by: Paul E. McKenney --- include/linux/rcupdate.h | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 6f8f047c4068..4a2cce4d4bd9 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -309,16 +309,16 @@ static inline void rcu_preempt_sleep_check(void) { } */ #ifdef __CHECKER__ -#define rcu_dereference_sparse(p, space) \ +#define rcu_check_sparse(p, space) \ ((void)(((typeof(*p) space *)p) == p)) #else /* #ifdef __CHECKER__ */ -#define rcu_dereference_sparse(p, space) +#define rcu_check_sparse(p, space) #endif /* #else #ifdef __CHECKER__ */ #define __rcu_access_pointer(p, space) \ ({ \ typeof(*p) *_________p1 = (typeof(*p) *__force)READ_ONCE(p); \ - rcu_dereference_sparse(p, space); \ + rcu_check_sparse(p, space); \ ((typeof(*p) __force __kernel *)(_________p1)); \ }) #define __rcu_dereference_check(p, c, space) \ @@ -326,13 +326,13 @@ static inline void rcu_preempt_sleep_check(void) { } /* Dependency order vs. p above. */ \ typeof(*p) *________p1 = (typeof(*p) *__force)READ_ONCE(p); \ RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_check() usage"); \ - rcu_dereference_sparse(p, space); \ + rcu_check_sparse(p, space); \ ((typeof(*p) __force __kernel *)(________p1)); \ }) #define __rcu_dereference_protected(p, c, space) \ ({ \ RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_protected() usage"); \ - rcu_dereference_sparse(p, space); \ + rcu_check_sparse(p, space); \ ((typeof(*p) __force __kernel *)(p)); \ }) #define rcu_dereference_raw(p) \ @@ -382,6 +382,7 @@ static inline void rcu_preempt_sleep_check(void) { } #define rcu_assign_pointer(p, v) \ ({ \ uintptr_t _r_a_p__v = (uintptr_t)(v); \ + rcu_check_sparse(p, __rcu); \ \ if (__builtin_constant_p(v) && (_r_a_p__v) == (uintptr_t)NULL) \ WRITE_ONCE((p), (typeof(p))(_r_a_p__v)); \ @@ -785,7 +786,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) */ #define RCU_INIT_POINTER(p, v) \ do { \ - rcu_dereference_sparse(p, __rcu); \ + rcu_check_sparse(p, __rcu); \ WRITE_ONCE(p, RCU_INITIALIZER(v)); \ } while (0) From 728e3e6178aa5790066b642dd2ebc9e9bdacff1b Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 8 Jan 2019 10:13:44 -0800 Subject: [PATCH 39/71] include/asm-generic: Remove spin_is_locked() comment The WARN_ON_SMP() comment header suggests using spin_is_locked() to check for locks being held. But these days we prefer lockdep_assert_held(), so this commit removes that suggestion. Signed-off-by: Paul E. McKenney Cc: Arnd Bergmann Cc: --- include/asm-generic/bug.h | 3 --- 1 file changed, 3 deletions(-) diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h index 20561a60db9c..0e9bd9c83870 100644 --- a/include/asm-generic/bug.h +++ b/include/asm-generic/bug.h @@ -211,9 +211,6 @@ void __warn(const char *file, int line, void *caller, unsigned taint, /* * WARN_ON_SMP() is for cases that the warning is either * meaningless for !SMP or may even cause failures. - * This is usually used for cases that we have - * WARN_ON(!spin_is_locked(&lock)) checks, as spin_is_locked() - * returns 0 for uniprocessor settings. * It can also be used with values that are only defined * on SMP: * From 6706dae90d85d394d0134d0ec516c25560b9ce77 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 8 Jan 2019 10:39:49 -0800 Subject: [PATCH 40/71] virt/kvm: Replace spin_is_locked() with lockdep MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit lockdep_assert_held() is better suited to checking locking requirements, since it only checks if the current thread holds the lock regardless of whether someone else does. This is also a step towards possibly removing spin_is_locked(). Signed-off-by: Paul E. McKenney Cc: Paolo Bonzini Cc: "Radim Krčmář" Cc: Acked-by: Paolo Bonzini --- virt/kvm/kvm_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 5ecea812cb6a..28de87ff9f51 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -4083,7 +4083,7 @@ static int kvm_suspend(void) static void kvm_resume(void) { if (kvm_usage_count) { - WARN_ON(raw_spin_is_locked(&kvm_count_lock)); + lockdep_assert_held(&kvm_count_lock); hardware_enable_nolock(NULL); } } From c8ca1aa774b20f182733d1661f3b6aa3105338e7 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 30 Nov 2018 10:06:46 -0800 Subject: [PATCH 41/71] srcu: Check for invalid idx argument in srcu_read_unlock() The current SRCU implementation has an idx argument of zero or one, and never anything else. This commit therefore adds a WARN_ON_ONCE() to complain if this restriction is violated. Signed-off-by: Paul E. McKenney --- include/linux/srcu.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/linux/srcu.h b/include/linux/srcu.h index c614375cd264..33cf83b9bda8 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h @@ -223,6 +223,7 @@ srcu_read_lock_notrace(struct srcu_struct *ssp) __acquires(ssp) static inline void srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases(ssp) { + WARN_ON_ONCE(idx & ~0x1); rcu_lock_release(&(ssp)->dep_map); __srcu_read_unlock(ssp, idx); } From e81baf4cb19a9b428ba477fd0423f81672a58817 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 11 Dec 2018 12:12:38 +0100 Subject: [PATCH 42/71] srcu: Remove srcu_queue_delayed_work_on() srcu_queue_delayed_work_on() disables preemption (and therefore CPU hotplug in RCU's case) and then checks based on its own accounting if a CPU is online. If the CPU is online it uses queue_delayed_work_on() otherwise it fallbacks to queue_delayed_work(). The problem here is that queue_work() on -RT does not work with disabled preemption. queue_work_on() works also on an offlined CPU. queue_delayed_work_on() has the problem that it is possible to program a timer on an offlined CPU. This timer will fire once the CPU is online again. But until then, the timer remains programmed and nothing will happen. Add a local timer which will fire (as requested per delay) on the local CPU and then enqueue the work on the specific CPU. RCUtorture testing with SRCU-P for 24h showed no problems. Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Paul E. McKenney --- include/linux/srcutree.h | 3 ++- kernel/rcu/srcutree.c | 57 ++++++++++++++++++---------------------- kernel/rcu/tree.c | 4 --- kernel/rcu/tree.h | 8 ------ 4 files changed, 27 insertions(+), 45 deletions(-) diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h index 6f292bd3e7db..0faa978c9880 100644 --- a/include/linux/srcutree.h +++ b/include/linux/srcutree.h @@ -45,7 +45,8 @@ struct srcu_data { unsigned long srcu_gp_seq_needed; /* Furthest future GP needed. */ unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */ bool srcu_cblist_invoking; /* Invoking these CBs? */ - struct delayed_work work; /* Context for CB invoking. */ + struct timer_list delay_work; /* Delay for CB invoking */ + struct work_struct work; /* Context for CB invoking. */ struct rcu_head srcu_barrier_head; /* For srcu_barrier() use. */ struct srcu_node *mynode; /* Leaf srcu_node. */ unsigned long grpmask; /* Mask for leaf srcu_node */ diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c index 3600d88d8956..7f041f2435df 100644 --- a/kernel/rcu/srcutree.c +++ b/kernel/rcu/srcutree.c @@ -58,6 +58,7 @@ static bool __read_mostly srcu_init_done; static void srcu_invoke_callbacks(struct work_struct *work); static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay); static void process_srcu(struct work_struct *work); +static void srcu_delay_timer(struct timer_list *t); /* Wrappers for lock acquisition and release, see raw_spin_lock_rcu_node(). */ #define spin_lock_rcu_node(p) \ @@ -156,7 +157,8 @@ static void init_srcu_struct_nodes(struct srcu_struct *ssp, bool is_static) snp->grphi = cpu; } sdp->cpu = cpu; - INIT_DELAYED_WORK(&sdp->work, srcu_invoke_callbacks); + INIT_WORK(&sdp->work, srcu_invoke_callbacks); + timer_setup(&sdp->delay_work, srcu_delay_timer, 0); sdp->ssp = ssp; sdp->grpmask = 1 << (cpu - sdp->mynode->grplo); if (is_static) @@ -386,13 +388,19 @@ void _cleanup_srcu_struct(struct srcu_struct *ssp, bool quiesced) } else { flush_delayed_work(&ssp->work); } - for_each_possible_cpu(cpu) + for_each_possible_cpu(cpu) { + struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu); + if (quiesced) { - if (WARN_ON(delayed_work_pending(&per_cpu_ptr(ssp->sda, cpu)->work))) + if (WARN_ON(timer_pending(&sdp->delay_work))) + return; /* Just leak it! */ + if (WARN_ON(work_pending(&sdp->work))) return; /* Just leak it! */ } else { - flush_delayed_work(&per_cpu_ptr(ssp->sda, cpu)->work); + del_timer_sync(&sdp->delay_work); + flush_work(&sdp->work); } + } if (WARN_ON(rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) != SRCU_STATE_IDLE) || WARN_ON(srcu_readers_active(ssp))) { pr_info("%s: Active srcu_struct %p state: %d\n", @@ -463,39 +471,23 @@ static void srcu_gp_start(struct srcu_struct *ssp) WARN_ON_ONCE(state != SRCU_STATE_SCAN1); } -/* - * Track online CPUs to guide callback workqueue placement. - */ -DEFINE_PER_CPU(bool, srcu_online); -void srcu_online_cpu(unsigned int cpu) +static void srcu_delay_timer(struct timer_list *t) { - WRITE_ONCE(per_cpu(srcu_online, cpu), true); + struct srcu_data *sdp = container_of(t, struct srcu_data, delay_work); + + queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work); } -void srcu_offline_cpu(unsigned int cpu) -{ - WRITE_ONCE(per_cpu(srcu_online, cpu), false); -} - -/* - * Place the workqueue handler on the specified CPU if online, otherwise - * just run it whereever. This is useful for placing workqueue handlers - * that are to invoke the specified CPU's callbacks. - */ -static bool srcu_queue_delayed_work_on(int cpu, struct workqueue_struct *wq, - struct delayed_work *dwork, +static void srcu_queue_delayed_work_on(struct srcu_data *sdp, unsigned long delay) { - bool ret; + if (!delay) { + queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work); + return; + } - preempt_disable(); - if (READ_ONCE(per_cpu(srcu_online, cpu))) - ret = queue_delayed_work_on(cpu, wq, dwork, delay); - else - ret = queue_delayed_work(wq, dwork, delay); - preempt_enable(); - return ret; + timer_reduce(&sdp->delay_work, jiffies + delay); } /* @@ -504,7 +496,7 @@ static bool srcu_queue_delayed_work_on(int cpu, struct workqueue_struct *wq, */ static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay) { - srcu_queue_delayed_work_on(sdp->cpu, rcu_gp_wq, &sdp->work, delay); + srcu_queue_delayed_work_on(sdp, delay); } /* @@ -1186,7 +1178,8 @@ static void srcu_invoke_callbacks(struct work_struct *work) struct srcu_data *sdp; struct srcu_struct *ssp; - sdp = container_of(work, struct srcu_data, work.work); + sdp = container_of(work, struct srcu_data, work); + ssp = sdp->ssp; rcu_cblist_init(&ready_cbs); spin_lock_irq_rcu_node(sdp); diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 1c4add096078..127255795859 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -3408,8 +3408,6 @@ int rcutree_online_cpu(unsigned int cpu) raw_spin_lock_irqsave_rcu_node(rnp, flags); rnp->ffmask |= rdp->grpmask; raw_spin_unlock_irqrestore_rcu_node(rnp, flags); - if (IS_ENABLED(CONFIG_TREE_SRCU)) - srcu_online_cpu(cpu); if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) return 0; /* Too early in boot for scheduler work. */ sync_sched_exp_online_cleanup(cpu); @@ -3434,8 +3432,6 @@ int rcutree_offline_cpu(unsigned int cpu) raw_spin_unlock_irqrestore_rcu_node(rnp, flags); rcutree_affinity_setting(cpu, cpu); - if (IS_ENABLED(CONFIG_TREE_SRCU)) - srcu_offline_cpu(cpu); return 0; } diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 149557b7c39c..4bba017c703c 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -458,11 +458,3 @@ static void rcu_bind_gp_kthread(void); static bool rcu_nohz_full_cpu(void); static void rcu_dynticks_task_enter(void); static void rcu_dynticks_task_exit(void); - -#ifdef CONFIG_SRCU -void srcu_online_cpu(unsigned int cpu); -void srcu_offline_cpu(unsigned int cpu); -#else /* #ifdef CONFIG_SRCU */ -void srcu_online_cpu(unsigned int cpu) { } -void srcu_offline_cpu(unsigned int cpu) { } -#endif /* #else #ifdef CONFIG_SRCU */ From cd618d102b753a3f6823b297ac5c5d03d4e7f0c2 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 8 Jan 2019 13:41:26 -0800 Subject: [PATCH 43/71] rcutorture: Record grace periods in forward-progress histogram This commit records grace periods in rcutorture's n_launders_hist[] histogram, thus allowing rcu_torture_fwd_cb_hist() to print out the elapsed number of grace periods between buckets. This information helps to determine whether a lack of forward progress is due to stalled grace periods on the one hand or due to sluggish callback invocation on the other. Signed-off-by: Paul E. McKenney --- kernel/rcu/rcutorture.c | 29 ++++++++++++++++++++++------- 1 file changed, 22 insertions(+), 7 deletions(-) diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index f6e85faa4ff4..0955f3a20952 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -1630,21 +1630,34 @@ static bool rcu_fwd_emergency_stop; #define MIN_FWD_CB_LAUNDERS 3 /* This many CB invocations to count. */ #define MIN_FWD_CBS_LAUNDERED 100 /* Number of counted CBs. */ #define FWD_CBS_HIST_DIV 10 /* Histogram buckets/second. */ -static long n_launders_hist[2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV)]; +struct rcu_launder_hist { + long n_launders; + unsigned long launder_gp_seq; +}; +#define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV)) +static struct rcu_launder_hist n_launders_hist[N_LAUNDERS_HIST]; +static unsigned long rcu_launder_gp_seq_start; static void rcu_torture_fwd_cb_hist(void) { + unsigned long gps; + unsigned long gps_old; int i; int j; for (i = ARRAY_SIZE(n_launders_hist) - 1; i > 0; i--) - if (n_launders_hist[i] > 0) + if (n_launders_hist[i].n_launders > 0) break; pr_alert("%s: Callback-invocation histogram (duration %lu jiffies):", __func__, jiffies - rcu_fwd_startat); - for (j = 0; j <= i; j++) - pr_cont(" %ds/%d: %ld", - j + 1, FWD_CBS_HIST_DIV, n_launders_hist[j]); + gps_old = rcu_launder_gp_seq_start; + for (j = 0; j <= i; j++) { + gps = n_launders_hist[j].launder_gp_seq; + pr_cont(" %ds/%d: %ld:%ld", + j + 1, FWD_CBS_HIST_DIV, n_launders_hist[j].n_launders, + rcutorture_seq_diff(gps, gps_old)); + gps_old = gps; + } pr_cont("\n"); } @@ -1666,7 +1679,8 @@ static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp) i = ((jiffies - rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV)); if (i >= ARRAY_SIZE(n_launders_hist)) i = ARRAY_SIZE(n_launders_hist) - 1; - n_launders_hist[i]++; + n_launders_hist[i].n_launders++; + n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq(); spin_unlock_irqrestore(&rcu_fwd_lock, flags); } @@ -1786,9 +1800,10 @@ static void rcu_torture_fwd_prog_cr(void) n_max_cbs = 0; n_max_gps = 0; for (i = 0; i < ARRAY_SIZE(n_launders_hist); i++) - n_launders_hist[i] = 0; + n_launders_hist[i].n_launders = 0; cver = READ_ONCE(rcu_torture_current_version); gps = cur_ops->get_gp_seq(); + rcu_launder_gp_seq_start = gps; while (time_before(jiffies, stopat) && !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { rfcp = READ_ONCE(rcu_fwd_cb_head); From 0d8a9ea9764a0e34e17e3b80a2be3855de239d6e Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 4 Dec 2018 14:59:12 -0800 Subject: [PATCH 44/71] torture: Explain and simplify odd "for" loop in mkinitrd.sh Why a Bourne-shell "for" loop? And why 192 instances of "a"? This commit adds a shell comment to present the answer to these mysteries. It also uses a series of factor-of-four Bourne-shell assignments to make it easy to see how many instances there are, replacing the earlier wall of 'a' characters. Reported-by: Josh Triplett Signed-off-by: Paul E. McKenney Reviewed-by: Josh Triplett [ paulmck: Fix wrong-variable bugs noted by Andrea Parri. ] --- .../selftests/rcutorture/bin/mkinitrd.sh | 23 ++++++++++++------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/tools/testing/selftests/rcutorture/bin/mkinitrd.sh b/tools/testing/selftests/rcutorture/bin/mkinitrd.sh index da298394daa2..e79eb35c41e2 100755 --- a/tools/testing/selftests/rcutorture/bin/mkinitrd.sh +++ b/tools/testing/selftests/rcutorture/bin/mkinitrd.sh @@ -40,17 +40,24 @@ mkdir $T cat > $T/init << '__EOF___' #!/bin/sh # Run in userspace a few milliseconds every second. This helps to -# exercise the NO_HZ_FULL portions of RCU. +# exercise the NO_HZ_FULL portions of RCU. The 192 instances of "a" was +# empirically shown to give a nice multi-millisecond burst of user-mode +# execution on a 2GHz CPU, as desired. Modern CPUs will vary from a +# couple of milliseconds up to perhaps 100 milliseconds, which is an +# acceptable range. +# +# Why not calibrate an exact delay? Because within this initrd, we +# are restricted to Bourne-shell builtins, which as far as I know do not +# provide any means of obtaining a fine-grained timestamp. + +a4="a a a a" +a16="$a4 $a4 $a4 $a4" +a64="$a16 $a16 $a16 $a16" +a192="$a64 $a64 $a64" while : do q= - for i in \ - a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a \ - a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a \ - a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a \ - a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a \ - a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a \ - a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a + for i in $a192 do q="$q $i" done From 3a6cb58f159e64241b2af9374acad41a70939349 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 10 Dec 2018 09:44:52 -0800 Subject: [PATCH 45/71] rcutorture: Add grace period after CPU offline Beyond a certain point in the CPU-hotplug offline process, timers get stranded on the outgoing CPU, and won't fire until that CPU comes back online, which might well be never. This commit therefore adds a hook in torture_onoff_init() that is invoked from torture_offline(), which rcutorture uses to occasionally wait for a grace period. This should result in failures for RCU implementations that rely on stranded timers eventually firing in the absence of the CPU coming back online. Reported-by: Sebastian Andrzej Siewior Signed-off-by: Paul E. McKenney --- include/linux/torture.h | 3 ++- kernel/locking/locktorture.c | 2 +- kernel/rcu/rcutorture.c | 11 ++++++++++- kernel/torture.c | 6 +++++- 4 files changed, 18 insertions(+), 4 deletions(-) diff --git a/include/linux/torture.h b/include/linux/torture.h index 48fad21109fc..f2d3bcbf4337 100644 --- a/include/linux/torture.h +++ b/include/linux/torture.h @@ -50,11 +50,12 @@ do { if (verbose) pr_alert("%s" TORTURE_FLAG "!!! %s\n", torture_type, s); } while (0) /* Definitions for online/offline exerciser. */ +typedef void torture_ofl_func(void); bool torture_offline(int cpu, long *n_onl_attempts, long *n_onl_successes, unsigned long *sum_offl, int *min_onl, int *max_onl); bool torture_online(int cpu, long *n_onl_attempts, long *n_onl_successes, unsigned long *sum_onl, int *min_onl, int *max_onl); -int torture_onoff_init(long ooholdoff, long oointerval); +int torture_onoff_init(long ooholdoff, long oointerval, torture_ofl_func *f); void torture_onoff_stats(void); bool torture_onoff_failures(void); diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c index 7d0b0ed74404..c8b348097bb5 100644 --- a/kernel/locking/locktorture.c +++ b/kernel/locking/locktorture.c @@ -970,7 +970,7 @@ static int __init lock_torture_init(void) /* Prepare torture context. */ if (onoff_interval > 0) { firsterr = torture_onoff_init(onoff_holdoff * HZ, - onoff_interval * HZ); + onoff_interval * HZ, NULL); if (firsterr) goto unwind; } diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 0955f3a20952..9eb9235c1ec9 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -2243,6 +2243,14 @@ static void rcu_test_debug_objects(void) #endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ } +static void rcutorture_sync(void) +{ + static unsigned long n; + + if (cur_ops->sync && !(++n & 0xfff)) + cur_ops->sync(); +} + static int __init rcu_torture_init(void) { @@ -2404,7 +2412,8 @@ rcu_torture_init(void) firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup); if (firsterr) goto unwind; - firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval); + firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval, + rcutorture_sync); if (firsterr) goto unwind; firsterr = rcu_torture_stall_init(); diff --git a/kernel/torture.c b/kernel/torture.c index bbf6d473e50c..a03ff722352b 100644 --- a/kernel/torture.c +++ b/kernel/torture.c @@ -75,6 +75,7 @@ static DEFINE_MUTEX(fullstop_mutex); static struct task_struct *onoff_task; static long onoff_holdoff; static long onoff_interval; +static torture_ofl_func *onoff_f; static long n_offline_attempts; static long n_offline_successes; static unsigned long sum_offline; @@ -118,6 +119,8 @@ bool torture_offline(int cpu, long *n_offl_attempts, long *n_offl_successes, pr_alert("%s" TORTURE_FLAG "torture_onoff task: offlined %d\n", torture_type, cpu); + if (onoff_f) + onoff_f(); (*n_offl_successes)++; delta = jiffies - starttime; *sum_offl += delta; @@ -243,11 +246,12 @@ torture_onoff(void *arg) /* * Initiate online-offline handling. */ -int torture_onoff_init(long ooholdoff, long oointerval) +int torture_onoff_init(long ooholdoff, long oointerval, torture_ofl_func *f) { #ifdef CONFIG_HOTPLUG_CPU onoff_holdoff = ooholdoff; onoff_interval = oointerval; + onoff_f = f; if (onoff_interval <= 0) return 0; return torture_create_kthread(torture_onoff, NULL, onoff_task); From e838a7d66ee2bb7abb46214cb9a3505749e29505 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 28 Dec 2018 07:48:43 -0800 Subject: [PATCH 46/71] rcuperf: Stop abusing IS_ENABLED() The ever-evolving IS_ENABLED() macro is intended for CONFIG_* Kconfig options, but rcuperf currently uses it for the decidedly non-CONFIG_* MODULE macro. In the spirit of not inviting trouble, this commit substitutes tried-and-true #ifdef. Reported-by: Ingo Molnar Signed-off-by: Paul E. McKenney Acked-by: Ingo Molnar --- kernel/rcu/rcuperf.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/kernel/rcu/rcuperf.c b/kernel/rcu/rcuperf.c index b459da70b4fc..8d342d57cdaf 100644 --- a/kernel/rcu/rcuperf.c +++ b/kernel/rcu/rcuperf.c @@ -83,13 +83,19 @@ MODULE_AUTHOR("Paul E. McKenney "); * Various other use cases may of course be specified. */ +#ifdef MODULE +# define RCUPERF_SHUTDOWN 0 +#else +# define RCUPERF_SHUTDOWN 1 +#endif + torture_param(bool, gp_async, false, "Use asynchronous GP wait primitives"); torture_param(int, gp_async_max, 1000, "Max # outstanding waits per reader"); torture_param(bool, gp_exp, false, "Use expedited GP wait primitives"); torture_param(int, holdoff, 10, "Holdoff time before test start (s)"); torture_param(int, nreaders, -1, "Number of RCU reader threads"); torture_param(int, nwriters, -1, "Number of RCU updater threads"); -torture_param(bool, shutdown, !IS_ENABLED(MODULE), +torture_param(bool, shutdown, RCUPERF_SHUTDOWN, "Shutdown at end of performance tests."); torture_param(int, verbose, 1, "Enable verbose debugging printk()s"); torture_param(int, writer_holdoff, 0, "Holdoff (us) between GPs, zero to disable"); From f90a66d6850e082a41c87e2883030019f42b5096 Mon Sep 17 00:00:00 2001 From: Willy Tarreau Date: Sat, 29 Dec 2018 19:02:16 +0100 Subject: [PATCH 47/71] rcutorture/nolibc: Fix the clobbered registers in the MIPS syscall definition A last-minute checkpatch cleanup caused most of list of clobbered registers to be lost in the MIPS syscall definition. Although this code is not yet used on MIPS, it is nevertheless better to fix it before it does get used. Cc: Paul E. McKenney Signed-off-by: Willy Tarreau Signed-off-by: Paul E. McKenney --- tools/testing/selftests/rcutorture/bin/nolibc.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tools/testing/selftests/rcutorture/bin/nolibc.h b/tools/testing/selftests/rcutorture/bin/nolibc.h index f98f5b92d3eb..e8687cd24b8c 100644 --- a/tools/testing/selftests/rcutorture/bin/nolibc.h +++ b/tools/testing/selftests/rcutorture/bin/nolibc.h @@ -1006,7 +1006,7 @@ struct sys_stat_struct { : "=r"(_num), "=r"(_arg4) \ : "r"(_num) \ : "memory", "cc", "at", "v1", "hi", "lo", \ - \ + "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9" \ ); \ _arg4 ? -_num : _num; \ }) @@ -1025,7 +1025,7 @@ struct sys_stat_struct { : "0"(_num), \ "r"(_arg1) \ : "memory", "cc", "at", "v1", "hi", "lo", \ - \ + "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9" \ ); \ _arg4 ? -_num : _num; \ }) @@ -1045,7 +1045,7 @@ struct sys_stat_struct { : "0"(_num), \ "r"(_arg1), "r"(_arg2) \ : "memory", "cc", "at", "v1", "hi", "lo", \ - \ + "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9" \ ); \ _arg4 ? -_num : _num; \ }) @@ -1066,7 +1066,7 @@ struct sys_stat_struct { : "0"(_num), \ "r"(_arg1), "r"(_arg2), "r"(_arg3) \ : "memory", "cc", "at", "v1", "hi", "lo", \ - \ + "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9" \ ); \ _arg4 ? -_num : _num; \ }) @@ -1087,7 +1087,7 @@ struct sys_stat_struct { : "0"(_num), \ "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4) \ : "memory", "cc", "at", "v1", "hi", "lo", \ - \ + "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9" \ ); \ _arg4 ? -_num : _num; \ }) @@ -1110,7 +1110,7 @@ struct sys_stat_struct { : "0"(_num), \ "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5) \ : "memory", "cc", "at", "v1", "hi", "lo", \ - \ + "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9" \ ); \ _arg4 ? -_num : _num; \ }) From 85ebb12c4e22c792738b69405f26b5c5948db83f Mon Sep 17 00:00:00 2001 From: Willy Tarreau Date: Sat, 29 Dec 2018 19:02:17 +0100 Subject: [PATCH 48/71] rcutorture/nolibc: Fix some poor indentation and alignment A few macros had their rightmost backslash misaligned, and the pollfd struct definition resisted the previous code reindent. Nothing else changed. Cc: Paul E. McKenney Signed-off-by: Willy Tarreau Signed-off-by: Paul E. McKenney Reviewed-by: Joey Pabalinas --- tools/testing/selftests/rcutorture/bin/nolibc.h | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/tools/testing/selftests/rcutorture/bin/nolibc.h b/tools/testing/selftests/rcutorture/bin/nolibc.h index e8687cd24b8c..cfbbbad4bca4 100644 --- a/tools/testing/selftests/rcutorture/bin/nolibc.h +++ b/tools/testing/selftests/rcutorture/bin/nolibc.h @@ -81,9 +81,9 @@ typedef signed long time_t; /* for poll() */ struct pollfd { - int fd; - short int events; - short int revents; + int fd; + short int events; + short int revents; }; /* for select() */ @@ -239,7 +239,7 @@ struct stat { "syscall\n" \ : "=a" (_ret) \ : "0"(_num) \ - : "rcx", "r8", "r9", "r10", "r11", "memory", "cc" \ + : "rcx", "r8", "r9", "r10", "r11", "memory", "cc" \ ); \ _ret; \ }) @@ -255,7 +255,7 @@ struct stat { : "=a" (_ret) \ : "r"(_arg1), \ "0"(_num) \ - : "rcx", "r8", "r9", "r10", "r11", "memory", "cc" \ + : "rcx", "r8", "r9", "r10", "r11", "memory", "cc" \ ); \ _ret; \ }) @@ -272,7 +272,7 @@ struct stat { : "=a" (_ret) \ : "r"(_arg1), "r"(_arg2), \ "0"(_num) \ - : "rcx", "r8", "r9", "r10", "r11", "memory", "cc" \ + : "rcx", "r8", "r9", "r10", "r11", "memory", "cc" \ ); \ _ret; \ }) @@ -290,7 +290,7 @@ struct stat { : "=a" (_ret) \ : "r"(_arg1), "r"(_arg2), "r"(_arg3), \ "0"(_num) \ - : "rcx", "r8", "r9", "r10", "r11", "memory", "cc" \ + : "rcx", "r8", "r9", "r10", "r11", "memory", "cc" \ ); \ _ret; \ }) From cc72a50994b4910e36445d750b2749b86c37d32b Mon Sep 17 00:00:00 2001 From: Willy Tarreau Date: Sat, 29 Dec 2018 19:02:18 +0100 Subject: [PATCH 49/71] rcutorture/nolibc: Add a bit of documentation to explain how to use nolibc Ingo rightfully asked for a bit more documentation in the nolibc header, so this patch adds some explanation about its purpose, how it's made, and how to use it. Cc: Ingo Molnar Cc: Paul E. McKenney Cc: Randy Dunlap Signed-off-by: Willy Tarreau Signed-off-by: Paul E. McKenney Reviewed-by: Joey Pabalinas Reviewed-by: Randy Dunlap --- .../testing/selftests/rcutorture/bin/nolibc.h | 92 ++++++++++++++++--- 1 file changed, 79 insertions(+), 13 deletions(-) diff --git a/tools/testing/selftests/rcutorture/bin/nolibc.h b/tools/testing/selftests/rcutorture/bin/nolibc.h index cfbbbad4bca4..1708e9f9f8aa 100644 --- a/tools/testing/selftests/rcutorture/bin/nolibc.h +++ b/tools/testing/selftests/rcutorture/bin/nolibc.h @@ -3,7 +3,85 @@ * Copyright (C) 2017-2018 Willy Tarreau */ -/* some archs (at least aarch64) don't expose the regular syscalls anymore by +/* + * This file is designed to be used as a libc alternative for minimal programs + * with very limited requirements. It consists of a small number of syscall and + * type definitions, and the minimal startup code needed to call main(). + * All syscalls are declared as static functions so that they can be optimized + * away by the compiler when not used. + * + * Syscalls are split into 3 levels: + * - The lower level is the arch-specific syscall() definition, consisting in + * assembly code in compound expressions. These are called my_syscall0() to + * my_syscall6() depending on the number of arguments. The MIPS + * implementation is limited to 5 arguments. All input arguments are cast + * to a long stored in a register. These expressions always return the + * syscall's return value as a signed long value which is often either a + * pointer or the negated errno value. + * + * - The second level is mostly architecture-independent. It is made of + * static functions called sys_() which rely on my_syscallN() + * depending on the syscall definition. These functions are responsible + * for exposing the appropriate types for the syscall arguments (int, + * pointers, etc) and for setting the appropriate return type (often int). + * A few of them are architecture-specific because the syscalls are not all + * mapped exactly the same among architectures. For example, some archs do + * not implement select() and need pselect6() instead, so the sys_select() + * function will have to abstract this. + * + * - The third level is the libc call definition. It exposes the lower raw + * sys_() calls in a way that looks like what a libc usually does, + * takes care of specific input values, and of setting errno upon error. + * There can be minor variations compared to standard libc calls. For + * example the open() call always takes 3 args here. + * + * The errno variable is declared static and unused. This way it can be + * optimized away if not used. However this means that a program made of + * multiple C files may observe different errno values (one per C file). For + * the type of programs this project targets it usually is not a problem. The + * resulting program may even be reduced by defining the NOLIBC_IGNORE_ERRNO + * macro, in which case the errno value will never be assigned. + * + * Some stdint-like integer types are defined. These are valid on all currently + * supported architectures, because signs are enforced, ints are assumed to be + * 32 bits, longs the size of a pointer and long long 64 bits. If more + * architectures have to be supported, this may need to be adapted. + * + * Some macro definitions like the O_* values passed to open(), and some + * structures like the sys_stat struct depend on the architecture. + * + * The definitions start with the architecture-specific parts, which are picked + * based on what the compiler knows about the target architecture, and are + * completed with the generic code. Since it is the compiler which sets the + * target architecture, cross-compiling normally works out of the box without + * having to specify anything. + * + * Finally some very common libc-level functions are provided. It is the case + * for a few functions usually found in string.h, ctype.h, or stdlib.h. Nothing + * is currently provided regarding stdio emulation. + * + * The macro NOLIBC is always defined, so that it is possible for a program to + * check this macro to know if it is being built against and decide to disable + * some features or simply not to include some standard libc files. + * + * Ideally this file should be split in multiple files for easier long term + * maintenance, but provided as a single file as it is now, it's quite + * convenient to use. Maybe some variations involving a set of includes at the + * top could work. + * + * A simple static executable may be built this way : + * $ gcc -fno-asynchronous-unwind-tables -fno-ident -s -Os -nostdlib \ + * -static -include nolibc.h -lgcc -o hello hello.c + * + * A very useful calling convention table may be found here : + * http://man7.org/linux/man-pages/man2/syscall.2.html + * + * This doc is quite convenient though not necessarily up to date : + * https://w3challs.com/syscalls/ + * + */ + +/* Some archs (at least aarch64) don't expose the regular syscalls anymore by * default, either because they have an "_at" replacement, or because there are * more modern alternatives. For now we'd rather still use them. */ @@ -19,18 +97,6 @@ #define NOLIBC -/* Build a static executable this way : - * $ gcc -fno-asynchronous-unwind-tables -fno-ident -s -Os -nostdlib \ - * -static -include nolibc.h -lgcc -o hello hello.c - * - * Useful calling convention table found here : - * http://man7.org/linux/man-pages/man2/syscall.2.html - * - * This doc is even better : - * https://w3challs.com/syscalls/ - */ - - /* this way it will be removed if unused */ static int errno; From 30ca20517ac136e63967396899af89f359f16f36 Mon Sep 17 00:00:00 2001 From: Willy Tarreau Date: Sat, 29 Dec 2018 19:04:53 +0100 Subject: [PATCH 50/71] tools headers: Move the nolibc header from rcutorture to tools/include/nolibc/ As suggested by Ingo, this header file might benefit other tools than just rcutorture. For now it's quite limited, but is easy to extend, so exposing it into tools/include/nolibc/ will make it much easier to adopt by other tools. The mkinitrd.sh script in rcutorture was updated to use this new location. Cc: Ingo Molnar Cc: Arnaldo Carvalho de Melo Cc: Paul E. McKenney Signed-off-by: Willy Tarreau Signed-off-by: Paul E. McKenney --- .../selftests/rcutorture/bin => include/nolibc}/nolibc.h | 0 tools/testing/selftests/rcutorture/bin/mkinitrd.sh | 4 ++-- 2 files changed, 2 insertions(+), 2 deletions(-) rename tools/{testing/selftests/rcutorture/bin => include/nolibc}/nolibc.h (100%) diff --git a/tools/testing/selftests/rcutorture/bin/nolibc.h b/tools/include/nolibc/nolibc.h similarity index 100% rename from tools/testing/selftests/rcutorture/bin/nolibc.h rename to tools/include/nolibc/nolibc.h diff --git a/tools/testing/selftests/rcutorture/bin/mkinitrd.sh b/tools/testing/selftests/rcutorture/bin/mkinitrd.sh index e79eb35c41e2..83552bb007b4 100755 --- a/tools/testing/selftests/rcutorture/bin/mkinitrd.sh +++ b/tools/testing/selftests/rcutorture/bin/mkinitrd.sh @@ -131,8 +131,8 @@ if echo -e "#if __x86_64__||__i386__||__i486__||__i586__||__i686__" \ | grep -q '^yes'; then # architecture supported by nolibc ${CROSS_COMPILE}gcc -fno-asynchronous-unwind-tables -fno-ident \ - -nostdlib -include ../bin/nolibc.h -lgcc -s -static -Os \ - -o init init.c + -nostdlib -include ../../../../include/nolibc/nolibc.h \ + -lgcc -s -static -Os -o init init.c else ${CROSS_COMPILE}gcc -s -static -Os -o init init.c fi From 6f7541df3a6cbf530da0ec19d402acc1a3de1b30 Mon Sep 17 00:00:00 2001 From: Willy Tarreau Date: Sat, 29 Dec 2018 19:55:48 +0100 Subject: [PATCH 51/71] MAINTAINERS: Add myself as the maintainer for the nolibc header file(s) I don't expect too many updates there so I should not become a bottleneck, and if I become one, it will mean that someone will be more active than me and will be in a better position than me to take over maintainership. :-) Signed-off-by: Willy Tarreau Signed-off-by: Paul E. McKenney --- MAINTAINERS | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index 4d04cebb4a71..e2e4eac8a7a7 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -10791,6 +10791,12 @@ F: drivers/power/supply/bq27xxx_battery_i2c.c F: drivers/power/supply/isp1704_charger.c F: drivers/power/supply/rx51_battery.c +NOLIBC HEADER FILE +M: Willy Tarreau +S: Maintained +T: git git://git.kernel.org/pub/scm/linux/kernel/git/wtarreau/nolibc.git +F: tools/include/nolibc/ + NTB AMD DRIVER M: Shyam Sundar S K L: linux-ntb@googlegroups.com From 6684880a8b161994e320d8cad70d9ea5dc87f031 Mon Sep 17 00:00:00 2001 From: Junchang Wang Date: Thu, 3 Jan 2019 22:24:51 +0800 Subject: [PATCH 52/71] RCU/torture.txt: Remove section MODULE PARAMETERS The supported module parameters are detailed in both RCU/torture.txt and admin-guide/kernel-parameters.txt, and the latter is actively maintained. So this patch removes section MODULE PARAMETERS in torture.txt and adds a reference to the information in kernel-parameters.txt. Signed-off-by: Junchang Wang Signed-off-by: Paul E. McKenney [ paulmck: Add search string. ] --- Documentation/RCU/torture.txt | 169 +--------------------------------- 1 file changed, 2 insertions(+), 167 deletions(-) diff --git a/Documentation/RCU/torture.txt b/Documentation/RCU/torture.txt index 55918b54808b..a41a0384d20c 100644 --- a/Documentation/RCU/torture.txt +++ b/Documentation/RCU/torture.txt @@ -10,173 +10,8 @@ status messages via printk(), which can be examined via the dmesg command (perhaps grepping for "torture"). The test is started when the module is loaded, and stops when the module is unloaded. - -MODULE PARAMETERS - -This module has the following parameters: - -fqs_duration Duration (in microseconds) of artificially induced bursts - of force_quiescent_state() invocations. In RCU - implementations having force_quiescent_state(), these - bursts help force races between forcing a given grace - period and that grace period ending on its own. - -fqs_holdoff Holdoff time (in microseconds) between consecutive calls - to force_quiescent_state() within a burst. - -fqs_stutter Wait time (in seconds) between consecutive bursts - of calls to force_quiescent_state(). - -gp_normal Make the fake writers use normal synchronous grace-period - primitives. - -gp_exp Make the fake writers use expedited synchronous grace-period - primitives. If both gp_normal and gp_exp are set, or - if neither gp_normal nor gp_exp are set, then randomly - choose the primitive so that about 50% are normal and - 50% expedited. By default, neither are set, which - gives best overall test coverage. - -irqreader Says to invoke RCU readers from irq level. This is currently - done via timers. Defaults to "1" for variants of RCU that - permit this. (Or, more accurately, variants of RCU that do - -not- permit this know to ignore this variable.) - -n_barrier_cbs If this is nonzero, RCU barrier testing will be conducted, - in which case n_barrier_cbs specifies the number of - RCU callbacks (and corresponding kthreads) to use for - this testing. The value cannot be negative. If you - specify this to be non-zero when torture_type indicates a - synchronous RCU implementation (one for which a member of - the synchronize_rcu() rather than the call_rcu() family is - used -- see the documentation for torture_type below), an - error will be reported and no testing will be carried out. - -nfakewriters This is the number of RCU fake writer threads to run. Fake - writer threads repeatedly use the synchronous "wait for - current readers" function of the interface selected by - torture_type, with a delay between calls to allow for various - different numbers of writers running in parallel. - nfakewriters defaults to 4, which provides enough parallelism - to trigger special cases caused by multiple writers, such as - the synchronize_srcu() early return optimization. - -nreaders This is the number of RCU reading threads supported. - The default is twice the number of CPUs. Why twice? - To properly exercise RCU implementations with preemptible - read-side critical sections. - -onoff_interval - The number of seconds between each attempt to execute a - randomly selected CPU-hotplug operation. Defaults to - zero, which disables CPU hotplugging. In HOTPLUG_CPU=n - kernels, rcutorture will silently refuse to do any - CPU-hotplug operations regardless of what value is - specified for onoff_interval. - -onoff_holdoff The number of seconds to wait until starting CPU-hotplug - operations. This would normally only be used when - rcutorture was built into the kernel and started - automatically at boot time, in which case it is useful - in order to avoid confusing boot-time code with CPUs - coming and going. - -shuffle_interval - The number of seconds to keep the test threads affinitied - to a particular subset of the CPUs, defaults to 3 seconds. - Used in conjunction with test_no_idle_hz. - -shutdown_secs The number of seconds to run the test before terminating - the test and powering off the system. The default is - zero, which disables test termination and system shutdown. - This capability is useful for automated testing. - -stall_cpu The number of seconds that a CPU should be stalled while - within both an rcu_read_lock() and a preempt_disable(). - This stall happens only once per rcutorture run. - If you need multiple stalls, use modprobe and rmmod to - repeatedly run rcutorture. The default for stall_cpu - is zero, which prevents rcutorture from stalling a CPU. - - Note that attempts to rmmod rcutorture while the stall - is ongoing will hang, so be careful what value you - choose for this module parameter! In addition, too-large - values for stall_cpu might well induce failures and - warnings in other parts of the kernel. You have been - warned! - -stall_cpu_holdoff - The number of seconds to wait after rcutorture starts - before stalling a CPU. Defaults to 10 seconds. - -stat_interval The number of seconds between output of torture - statistics (via printk()). Regardless of the interval, - statistics are printed when the module is unloaded. - Setting the interval to zero causes the statistics to - be printed -only- when the module is unloaded, and this - is the default. - -stutter The length of time to run the test before pausing for this - same period of time. Defaults to "stutter=5", so as - to run and pause for (roughly) five-second intervals. - Specifying "stutter=0" causes the test to run continuously - without pausing, which is the old default behavior. - -test_boost Whether or not to test the ability of RCU to do priority - boosting. Defaults to "test_boost=1", which performs - RCU priority-inversion testing only if the selected - RCU implementation supports priority boosting. Specifying - "test_boost=0" never performs RCU priority-inversion - testing. Specifying "test_boost=2" performs RCU - priority-inversion testing even if the selected RCU - implementation does not support RCU priority boosting, - which can be used to test rcutorture's ability to - carry out RCU priority-inversion testing. - -test_boost_interval - The number of seconds in an RCU priority-inversion test - cycle. Defaults to "test_boost_interval=7". It is - usually wise for this value to be relatively prime to - the value selected for "stutter". - -test_boost_duration - The number of seconds to do RCU priority-inversion testing - within any given "test_boost_interval". Defaults to - "test_boost_duration=4". - -test_no_idle_hz Whether or not to test the ability of RCU to operate in - a kernel that disables the scheduling-clock interrupt to - idle CPUs. Boolean parameter, "1" to test, "0" otherwise. - Defaults to omitting this test. - -torture_type The type of RCU to test, with string values as follows: - - "rcu": rcu_read_lock(), rcu_read_unlock() and call_rcu(), - along with expedited, synchronous, and polling - variants. - - "rcu_bh": rcu_read_lock_bh(), rcu_read_unlock_bh(), and - call_rcu_bh(), along with expedited and synchronous - variants. - - "rcu_busted": This tests an intentionally incorrect version - of RCU in order to help test rcutorture itself. - - "srcu": srcu_read_lock(), srcu_read_unlock() and - call_srcu(), along with expedited and - synchronous variants. - - "sched": preempt_disable(), preempt_enable(), and - call_rcu_sched(), along with expedited, - synchronous, and polling variants. - - "tasks": voluntary context switch and call_rcu_tasks(), - along with expedited and synchronous variants. - - Defaults to "rcu". - -verbose Enable debug printk()s. Default is disabled. - +Module parameters are prefixed by "rcutorture." in +Documentation/admin-guide/kernel-parameters.txt. OUTPUT From b5b11890de69ec216ab7a10a24fcd1b2d46a2d6e Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 17 Jan 2019 10:05:33 -0800 Subject: [PATCH 53/71] rcu/rcu.h: Convert to SPDX license identifier Replace the license boiler plate with a SPDX license identifier. While in the area, update an email address. Signed-off-by: Paul E. McKenney Reviewed-by: Thomas Gleixner --- kernel/rcu/rcu.h | 17 ++--------------- 1 file changed, 2 insertions(+), 15 deletions(-) diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h index 75787186bd4f..e672b8f050ac 100644 --- a/kernel/rcu/rcu.h +++ b/kernel/rcu/rcu.h @@ -1,23 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * Read-Copy Update definitions shared among RCU implementations. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, you can access it online at - * http://www.gnu.org/licenses/gpl-2.0.html. - * * Copyright IBM Corporation, 2011 * - * Author: Paul E. McKenney + * Author: Paul E. McKenney */ #ifndef __LINUX_RCU_H From 8bf05ed3adf9c40f2f47a967dcfc713d26b07247 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 17 Jan 2019 10:09:19 -0800 Subject: [PATCH 54/71] rcu/rcuperf: Convert to SPDX license identifier Replace the license boiler plate with a SPDX license identifier. While in the area, update an email address. Signed-off-by: Paul E. McKenney Reviewed-by: Thomas Gleixner --- kernel/rcu/rcuperf.c | 19 +++---------------- 1 file changed, 3 insertions(+), 16 deletions(-) diff --git a/kernel/rcu/rcuperf.c b/kernel/rcu/rcuperf.c index b459da70b4fc..83c411cb09bd 100644 --- a/kernel/rcu/rcuperf.c +++ b/kernel/rcu/rcuperf.c @@ -1,23 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * Read-Copy Update module-based performance-test facility * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, you can access it online at - * http://www.gnu.org/licenses/gpl-2.0.html. - * * Copyright (C) IBM Corporation, 2015 * - * Authors: Paul E. McKenney + * Authors: Paul E. McKenney */ #define pr_fmt(fmt) fmt @@ -54,7 +41,7 @@ #include "rcu.h" MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Paul E. McKenney "); +MODULE_AUTHOR("Paul E. McKenney "); #define PERF_FLAG "-perf:" #define PERFOUT_STRING(s) \ From eb7935e479a32cd77b9770baf7eaae6726e68f46 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 17 Jan 2019 10:13:19 -0800 Subject: [PATCH 55/71] rcu/rcu_segcblist: Convert to SPDX license identifier Replace the license boiler plate with a SPDX license identifier. While in the area, update an email address. Signed-off-by: Paul E. McKenney Reviewed-by: Thomas Gleixner --- kernel/rcu/rcu_segcblist.c | 17 ++--------------- kernel/rcu/rcu_segcblist.h | 17 ++--------------- 2 files changed, 4 insertions(+), 30 deletions(-) diff --git a/kernel/rcu/rcu_segcblist.c b/kernel/rcu/rcu_segcblist.c index 5aff271adf1e..9bd5f6023c21 100644 --- a/kernel/rcu/rcu_segcblist.c +++ b/kernel/rcu/rcu_segcblist.c @@ -1,23 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * RCU segmented callback lists, function definitions * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, you can access it online at - * http://www.gnu.org/licenses/gpl-2.0.html. - * * Copyright IBM Corporation, 2017 * - * Authors: Paul E. McKenney + * Authors: Paul E. McKenney */ #include diff --git a/kernel/rcu/rcu_segcblist.h b/kernel/rcu/rcu_segcblist.h index 948470cef385..71b64648464e 100644 --- a/kernel/rcu/rcu_segcblist.h +++ b/kernel/rcu/rcu_segcblist.h @@ -1,23 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * RCU segmented callback lists, internal-to-rcu header file * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, you can access it online at - * http://www.gnu.org/licenses/gpl-2.0.html. - * * Copyright IBM Corporation, 2017 * - * Authors: Paul E. McKenney + * Authors: Paul E. McKenney */ #include From 2e24ce88524714ce82675dfc0e203abc509f84c3 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 17 Jan 2019 10:16:42 -0800 Subject: [PATCH 56/71] rcu/rcutorture: Convert to SPDX license identifier Replace the license boiler plate with a SPDX license identifier. While in the area, update an email address. Signed-off-by: Paul E. McKenney Reviewed-by: Thomas Gleixner --- kernel/rcu/rcutorture.c | 19 +++---------------- 1 file changed, 3 insertions(+), 16 deletions(-) diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index f6e85faa4ff4..c47dba261cbe 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -1,23 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * Read-Copy Update module-based torture test facility * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, you can access it online at - * http://www.gnu.org/licenses/gpl-2.0.html. - * * Copyright (C) IBM Corporation, 2005, 2006 * - * Authors: Paul E. McKenney + * Authors: Paul E. McKenney * Josh Triplett * * See also: Documentation/RCU/torture.txt @@ -61,7 +48,7 @@ #include "rcu.h" MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Paul E. McKenney and Josh Triplett "); +MODULE_AUTHOR("Paul E. McKenney and Josh Triplett "); /* Bits for ->extendables field, extendables param, and related definitions. */ From e7ee1501cd5af551c3bcd92162bf91b9877b2057 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 17 Jan 2019 10:18:16 -0800 Subject: [PATCH 57/71] rcu/srcu: Convert to SPDX license identifier Replace the license boiler plate with a SPDX license identifier. While in the area, update an email address. Signed-off-by: Paul E. McKenney Reviewed-by: Thomas Gleixner --- kernel/rcu/srcutiny.c | 17 ++--------------- kernel/rcu/srcutree.c | 17 ++--------------- 2 files changed, 4 insertions(+), 30 deletions(-) diff --git a/kernel/rcu/srcutiny.c b/kernel/rcu/srcutiny.c index 32dfd6522548..5d4a39a6505a 100644 --- a/kernel/rcu/srcutiny.c +++ b/kernel/rcu/srcutiny.c @@ -1,24 +1,11 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * Sleepable Read-Copy Update mechanism for mutual exclusion, * tiny version for non-preemptible single-CPU use. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, you can access it online at - * http://www.gnu.org/licenses/gpl-2.0.html. - * * Copyright (C) IBM Corporation, 2017 * - * Author: Paul McKenney + * Author: Paul McKenney */ #include diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c index 3600d88d8956..7a5944b42b02 100644 --- a/kernel/rcu/srcutree.c +++ b/kernel/rcu/srcutree.c @@ -1,24 +1,11 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * Sleepable Read-Copy Update mechanism for mutual exclusion. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, you can access it online at - * http://www.gnu.org/licenses/gpl-2.0.html. - * * Copyright (C) IBM Corporation, 2006 * Copyright (C) Fujitsu, 2012 * - * Author: Paul McKenney + * Author: Paul McKenney * Lai Jiangshan * * For detailed explanation of Read-Copy Update mechanism see - From 96b903f5da491199b51a7a9e70699f606a8a09b4 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 17 Jan 2019 10:19:01 -0800 Subject: [PATCH 58/71] rcu/sync: Convert to SPDX license identifier Replace the license boiler plate with a SPDX license identifier. Signed-off-by: Paul E. McKenney Reviewed-by: Thomas Gleixner --- kernel/rcu/sync.c | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) diff --git a/kernel/rcu/sync.c b/kernel/rcu/sync.c index be10036fa621..a8304d90573f 100644 --- a/kernel/rcu/sync.c +++ b/kernel/rcu/sync.c @@ -1,20 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * RCU-based infrastructure for lightweight reader-writer locking * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, you can access it online at - * http://www.gnu.org/licenses/gpl-2.0.html. - * * Copyright (c) 2015, Red Hat, Inc. * * Author: Oleg Nesterov From 00de9d7415352ef546e0bf0b8ff5d092e265c080 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 17 Jan 2019 10:21:12 -0800 Subject: [PATCH 59/71] rcu/tiny: Convert to SPDX license identifier Replace the license boiler plate with a SPDX license identifier. While in the area, update an email address. Signed-off-by: Paul E. McKenney Reviewed-by: Thomas Gleixner --- kernel/rcu/tiny.c | 17 ++--------------- 1 file changed, 2 insertions(+), 15 deletions(-) diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c index 5f5963ba313e..f142c579fda8 100644 --- a/kernel/rcu/tiny.c +++ b/kernel/rcu/tiny.c @@ -1,23 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, you can access it online at - * http://www.gnu.org/licenses/gpl-2.0.html. - * * Copyright IBM Corporation, 2008 * - * Author: Paul E. McKenney + * Author: Paul E. McKenney * * For detailed explanation of Read-Copy Update mechanism see - * Documentation/RCU From 22e409253144d564ec7c8a425fa84b598e36f03c Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 17 Jan 2019 10:23:39 -0800 Subject: [PATCH 60/71] rcu/tree: Convert to SPDX license identifier Replace the license boiler plate with a SPDX license identifier. While in the area, update an email address. Signed-off-by: Paul E. McKenney [ paulmck: Update .h file SPDX comment format per Joe Perches. ] Reviewed-by: Thomas Gleixner --- kernel/rcu/tree.c | 19 +++---------------- kernel/rcu/tree.h | 17 ++--------------- kernel/rcu/tree_exp.h | 17 ++--------------- kernel/rcu/tree_plugin.h | 17 ++--------------- 4 files changed, 9 insertions(+), 61 deletions(-) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 1c4add096078..3b8e7d56c028 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -1,27 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * Read-Copy Update mechanism for mutual exclusion * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, you can access it online at - * http://www.gnu.org/licenses/gpl-2.0.html. - * * Copyright IBM Corporation, 2008 * * Authors: Dipankar Sarma * Manfred Spraul - * Paul E. McKenney Hierarchical version + * Paul E. McKenney Hierarchical version * - * Based on the original work by Paul McKenney + * Based on the original work by Paul McKenney * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. * * For detailed explanation of Read-Copy Update mechanism see - diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 149557b7c39c..f22c21ff3a89 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -1,25 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * Read-Copy Update mechanism for mutual exclusion (tree-based version) * Internal non-public definitions. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, you can access it online at - * http://www.gnu.org/licenses/gpl-2.0.html. - * * Copyright IBM Corporation, 2008 * * Author: Ingo Molnar - * Paul E. McKenney + * Paul E. McKenney */ #include diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h index d882ca0cd01b..66983626d37c 100644 --- a/kernel/rcu/tree_exp.h +++ b/kernel/rcu/tree_exp.h @@ -1,23 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * RCU expedited grace periods * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, you can access it online at - * http://www.gnu.org/licenses/gpl-2.0.html. - * * Copyright IBM Corporation, 2016 * - * Authors: Paul E. McKenney + * Authors: Paul E. McKenney */ #include diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 8ceed9e25ad5..8f67814a5a85 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -1,27 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * Read-Copy Update mechanism for mutual exclusion (tree-based version) * Internal non-public definitions that provide either classic * or preemptible semantics. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, you can access it online at - * http://www.gnu.org/licenses/gpl-2.0.html. - * * Copyright Red Hat, 2009 * Copyright IBM Corporation, 2009 * * Author: Ingo Molnar - * Paul E. McKenney + * Paul E. McKenney */ #include From 38b4df649e8c71c193e4ace237403f1574b900be Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 17 Jan 2019 10:25:18 -0800 Subject: [PATCH 61/71] rcu/update: Convert to SPDX license identifier Replace the license boiler plate with a SPDX license identifier. While in the area, update an email address. Signed-off-by: Paul E. McKenney Reviewed-by: Thomas Gleixner --- kernel/rcu/update.c | 17 ++--------------- 1 file changed, 2 insertions(+), 15 deletions(-) diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c index 1971869c4072..e3c6395c9b4c 100644 --- a/kernel/rcu/update.c +++ b/kernel/rcu/update.c @@ -1,26 +1,13 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * Read-Copy Update mechanism for mutual exclusion * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, you can access it online at - * http://www.gnu.org/licenses/gpl-2.0.html. - * * Copyright IBM Corporation, 2001 * * Authors: Dipankar Sarma * Manfred Spraul * - * Based on the original work by Paul McKenney + * Based on the original work by Paul McKenney * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. * Papers: * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf From 5efd1d94a5a748c492580b50b9bd3a7e42c31411 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 17 Jan 2019 10:26:59 -0800 Subject: [PATCH 62/71] linux/rcu_node_tree: Convert to SPDX license identifier Replace the license boiler plate with a SPDX license identifier. While in the area, update an email address. Signed-off-by: Paul E. McKenney [ paulmck: Update .h SPDX comment format per Joe Perches. ] Reviewed-by: Thomas Gleixner --- include/linux/rcu_node_tree.h | 17 ++--------------- 1 file changed, 2 insertions(+), 15 deletions(-) diff --git a/include/linux/rcu_node_tree.h b/include/linux/rcu_node_tree.h index 426cee67f0e2..b8e094b125ee 100644 --- a/include/linux/rcu_node_tree.h +++ b/include/linux/rcu_node_tree.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * RCU node combining tree definitions. These are used to compute * global attributes while avoiding common-case global contention. A key @@ -11,23 +12,9 @@ * because the size of the TREE SRCU srcu_struct structure depends * on these definitions. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, you can access it online at - * http://www.gnu.org/licenses/gpl-2.0.html. - * * Copyright IBM Corporation, 2017 * - * Author: Paul E. McKenney + * Author: Paul E. McKenney */ #ifndef __LINUX_RCU_NODE_TREE_H From 73604da52167c17c4000a38f7f784f5a2edf0461 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 17 Jan 2019 10:30:40 -0800 Subject: [PATCH 63/71] linux/rcupdate: Convert to SPDX license identifier Replace the license boiler plate with a SPDX license identifier. While in the area, update an email address. Signed-off-by: Paul E. McKenney [ paulmck: Update .h SPDX format per Joe Perches. ] Reviewed-by: Thomas Gleixner --- include/linux/rcupdate.h | 17 ++--------------- 1 file changed, 2 insertions(+), 15 deletions(-) diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 0e39e0d2629e..4c82279dd4b7 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -1,25 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * Read-Copy Update mechanism for mutual exclusion * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, you can access it online at - * http://www.gnu.org/licenses/gpl-2.0.html. - * * Copyright IBM Corporation, 2001 * * Author: Dipankar Sarma * - * Based on the original work by Paul McKenney + * Based on the original work by Paul McKenney * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. * Papers: * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf From 265b4d4dc16c2a04ca72386d17c93e5901f5212a Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 17 Jan 2019 10:31:34 -0800 Subject: [PATCH 64/71] linux/rcu_segcblist: Convert to SPDX license identifier Replace the license boiler plate with a SPDX license identifier. While in the area, update an email address. Signed-off-by: Paul E. McKenney [ paulmck: Update .h SPDX format per Joe Perches. ] Reviewed-by: Thomas Gleixner --- include/linux/rcu_segcblist.h | 17 ++--------------- 1 file changed, 2 insertions(+), 15 deletions(-) diff --git a/include/linux/rcu_segcblist.h b/include/linux/rcu_segcblist.h index c3ad00e63556..87404cb015f1 100644 --- a/include/linux/rcu_segcblist.h +++ b/include/linux/rcu_segcblist.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * RCU segmented callback lists * @@ -5,23 +6,9 @@ * because the size of the TREE SRCU srcu_struct structure depends * on these definitions. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, you can access it online at - * http://www.gnu.org/licenses/gpl-2.0.html. - * * Copyright IBM Corporation, 2017 * - * Authors: Paul E. McKenney + * Authors: Paul E. McKenney */ #ifndef __INCLUDE_LINUX_RCU_SEGCBLIST_H From a66e0092fff1f1d4ac3e3de6090b3f15a5ca784a Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 17 Jan 2019 10:32:48 -0800 Subject: [PATCH 65/71] linux/rcu_sync: Convert to SPDX license identifier Replace the license boiler plate with a SPDX license identifier. Signed-off-by: Paul E. McKenney [ paulmck: Update .h SPDX format per Joe Perches. ] Reviewed-by: Thomas Gleixner --- include/linux/rcu_sync.h | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) diff --git a/include/linux/rcu_sync.h b/include/linux/rcu_sync.h index ece7ed9a4a70..6fc53a1345b3 100644 --- a/include/linux/rcu_sync.h +++ b/include/linux/rcu_sync.h @@ -1,20 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * RCU-based infrastructure for lightweight reader-writer locking * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, you can access it online at - * http://www.gnu.org/licenses/gpl-2.0.html. - * * Copyright (c) 2015, Red Hat, Inc. * * Author: Oleg Nesterov From 6c4421273694bd2351e230f491c1033b118734fd Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 17 Jan 2019 10:34:35 -0800 Subject: [PATCH 66/71] linux/rcutiny: Convert to SPDX license identifier Replace the license boiler plate with a SPDX license identifier. While in the area, update an email address. Signed-off-by: Paul E. McKenney [ paulmck: Update .h SPDX format per Joe Perches. ] Reviewed-by: Thomas Gleixner --- include/linux/rcutiny.h | 17 ++--------------- 1 file changed, 2 insertions(+), 15 deletions(-) diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index af65d1f36ddb..8e727f57d814 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -1,23 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, you can access it online at - * http://www.gnu.org/licenses/gpl-2.0.html. - * * Copyright IBM Corporation, 2008 * - * Author: Paul E. McKenney + * Author: Paul E. McKenney * * For detailed explanation of Read-Copy Update mechanism see - * Documentation/RCU From a9b7343ec1a2f061967e4a17eb9276d129b679f4 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 17 Jan 2019 10:36:27 -0800 Subject: [PATCH 67/71] linux/rcutree: Convert to SPDX license identifier Replace the license boiler plate with a SPDX license identifier. While in the area, update an email address. Signed-off-by: Paul E. McKenney [ paulmck: Update .h SPDX format per Joe Perches. ] Reviewed-by: Thomas Gleixner --- include/linux/rcutree.h | 19 +++---------------- 1 file changed, 3 insertions(+), 16 deletions(-) diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 7f83179177d1..735601ac27d3 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -1,26 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * Read-Copy Update mechanism for mutual exclusion (tree-based version) * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, you can access it online at - * http://www.gnu.org/licenses/gpl-2.0.html. - * * Copyright IBM Corporation, 2008 * * Author: Dipankar Sarma - * Paul E. McKenney Hierarchical algorithm + * Paul E. McKenney Hierarchical algorithm * - * Based on the original work by Paul McKenney + * Based on the original work by Paul McKenney * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. * * For detailed explanation of Read-Copy Update mechanism see - From 8c366db05b1f27fac01a7dbf9e4904d499bd5d55 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 17 Jan 2019 10:39:22 -0800 Subject: [PATCH 68/71] linux/srcu: Convert to SPDX license identifier Replace the license boiler plate with a SPDX license identifier. While in the area, update an email address. Signed-off-by: Paul E. McKenney [ paulmck: Update ,h SPDX format per Joe Perches. ] Reviewed-by: Thomas Gleixner --- include/linux/srcu.h | 17 ++--------------- include/linux/srcutiny.h | 17 ++--------------- include/linux/srcutree.h | 17 ++--------------- 3 files changed, 6 insertions(+), 45 deletions(-) diff --git a/include/linux/srcu.h b/include/linux/srcu.h index c614375cd264..0d5fed02df16 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h @@ -1,24 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * Sleepable Read-Copy Update mechanism for mutual exclusion * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, you can access it online at - * http://www.gnu.org/licenses/gpl-2.0.html. - * * Copyright (C) IBM Corporation, 2006 * Copyright (C) Fujitsu, 2012 * - * Author: Paul McKenney + * Author: Paul McKenney * Lai Jiangshan * * For detailed explanation of Read-Copy Update mechanism see - diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h index b19216aaaef2..5a5a1941ca15 100644 --- a/include/linux/srcutiny.h +++ b/include/linux/srcutiny.h @@ -1,24 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * Sleepable Read-Copy Update mechanism for mutual exclusion, * tiny variant. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, you can access it online at - * http://www.gnu.org/licenses/gpl-2.0.html. - * * Copyright (C) IBM Corporation, 2017 * - * Author: Paul McKenney + * Author: Paul McKenney */ #ifndef _LINUX_SRCU_TINY_H diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h index 6f292bd3e7db..de7a42116b2e 100644 --- a/include/linux/srcutree.h +++ b/include/linux/srcutree.h @@ -1,24 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * Sleepable Read-Copy Update mechanism for mutual exclusion, * tree variant. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, you can access it online at - * http://www.gnu.org/licenses/gpl-2.0.html. - * * Copyright (C) IBM Corporation, 2017 * - * Author: Paul McKenney + * Author: Paul McKenney */ #ifndef _LINUX_SRCU_TREE_H From 8f8e76c09ced491a0ab9b088a90b726cb23c4c0a Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 17 Jan 2019 10:41:31 -0800 Subject: [PATCH 69/71] torture: Convert to SPDX license identifier Replace the license boiler plate with a SPDX license identifier. While in the area, update an email address. Signed-off-by: Paul E. McKenney Reviewed-by: Thomas Gleixner --- kernel/torture.c | 19 +++---------------- 1 file changed, 3 insertions(+), 16 deletions(-) diff --git a/kernel/torture.c b/kernel/torture.c index bbf6d473e50c..67620e5141d2 100644 --- a/kernel/torture.c +++ b/kernel/torture.c @@ -1,23 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * Common functions for in-kernel torture tests. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, you can access it online at - * http://www.gnu.org/licenses/gpl-2.0.html. - * * Copyright (C) IBM Corporation, 2014 * - * Author: Paul E. McKenney + * Author: Paul E. McKenney * Based on kernel/rcu/torture.c. */ @@ -53,7 +40,7 @@ #include "rcu/rcu.h" MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Paul E. McKenney "); +MODULE_AUTHOR("Paul E. McKenney "); static char *torture_type; static int verbose; From 082dfb3c93d6c0f85025638928c92933f62d234d Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 17 Jan 2019 10:46:34 -0800 Subject: [PATCH 70/71] linux/torture: Convert to SPDX license identifier Replace the license boiler plate with a SPDX license identifier. While in the area, update an email address. Signed-off-by: Paul E. McKenney [ paulmck: Update .h SPDX format per Joe Perches. ] Reviewed-by: Thomas Gleixner --- include/linux/torture.h | 17 ++--------------- 1 file changed, 2 insertions(+), 15 deletions(-) diff --git a/include/linux/torture.h b/include/linux/torture.h index 48fad21109fc..e5167820108a 100644 --- a/include/linux/torture.h +++ b/include/linux/torture.h @@ -1,23 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * Common functions for in-kernel torture tests. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, you can access it online at - * http://www.gnu.org/licenses/gpl-2.0.html. - * * Copyright IBM Corporation, 2014 * - * Author: Paul E. McKenney + * Author: Paul E. McKenney */ #ifndef __LINUX_TORTURE_H From 5a4eb3cb2012b38022041c7a87cbcf5af6a3302f Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 17 Jan 2019 11:11:00 -0800 Subject: [PATCH 71/71] locking/locktorture: Convert to SPDX license identifier Replace the license boiler plate with a SPDX license identifier. While in the area, update an email address. Signed-off-by: Paul E. McKenney Reviewed-by: Thomas Gleixner --- kernel/locking/locktorture.c | 19 +++---------------- 1 file changed, 3 insertions(+), 16 deletions(-) diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c index 7d0b0ed74404..d163c5b75d72 100644 --- a/kernel/locking/locktorture.c +++ b/kernel/locking/locktorture.c @@ -1,23 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * Module-based torture test facility for locking * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, you can access it online at - * http://www.gnu.org/licenses/gpl-2.0.html. - * * Copyright (C) IBM Corporation, 2014 * - * Authors: Paul E. McKenney + * Authors: Paul E. McKenney * Davidlohr Bueso * Based on kernel/rcu/torture.c. */ @@ -45,7 +32,7 @@ #include MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Paul E. McKenney "); +MODULE_AUTHOR("Paul E. McKenney "); torture_param(int, nwriters_stress, -1, "Number of write-locking stress-test threads");