Merge branch 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull RCU updates from Ingo Molnar:
 "The biggest RCU changes in this cycle were:

   - Convert RCU's BUG_ON() and similar calls to WARN_ON() and similar.

   - Replace calls of RCU-bh and RCU-sched update-side functions to
     their vanilla RCU counterparts. This series is a step towards
     complete removal of the RCU-bh and RCU-sched update-side functions.

     ( Note that some of these conversions are going upstream via their
       respective maintainers. )

   - Documentation updates, including a number of flavor-consolidation
     updates from Joel Fernandes.

   - Miscellaneous fixes.

   - Automate generation of the initrd filesystem used for rcutorture
     testing.

   - Convert spin_is_locked() assertions to instead use lockdep.

     ( Note that some of these conversions are going upstream via their
       respective maintainers. )

   - SRCU updates, especially including a fix from Dennis Krein for a
     bag-on-head-class bug.

   - RCU torture-test updates"

* 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (112 commits)
  rcutorture: Don't do busted forward-progress testing
  rcutorture: Use 100ms buckets for forward-progress callback histograms
  rcutorture: Recover from OOM during forward-progress tests
  rcutorture: Print forward-progress test age upon failure
  rcutorture: Print time since GP end upon forward-progress failure
  rcutorture: Print histogram of CB invocation at OOM time
  rcutorture: Print GP age upon forward-progress failure
  rcu: Print per-CPU callback counts for forward-progress failures
  rcu: Account for nocb-CPU callback counts in RCU CPU stall warnings
  rcutorture: Dump grace-period diagnostics upon forward-progress OOM
  rcutorture: Prepare for asynchronous access to rcu_fwd_startat
  torture: Remove unnecessary "ret" variables
  rcutorture: Affinity forward-progress test to avoid housekeeping CPUs
  rcutorture: Break up too-long rcu_torture_fwd_prog() function
  rcutorture: Remove cbflood facility
  torture: Bring any extra CPUs online during kernel startup
  rcutorture: Add call_rcu() flooding forward-progress tests
  rcutorture/formal: Replace synchronize_sched() with synchronize_rcu()
  tools/kernel.h: Replace synchronize_sched() with synchronize_rcu()
  net/decnet: Replace rcu_barrier_bh() with rcu_barrier()
  ...
This commit is contained in:
Linus Torvalds 2018-12-26 13:07:19 -08:00
commit 792bf4d871
88 changed files with 4282 additions and 4109 deletions

View File

@ -1,499 +0,0 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Creator: fig2dev Version 3.2 Patchlevel 5e -->
<!-- CreationDate: Wed Dec 9 17:26:09 2015 -->
<!-- Magnification: 2.000 -->
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://creativecommons.org/ns#"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns="http://www.w3.org/2000/svg"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
width="5.7in"
height="6.6in"
viewBox="-44 -44 6838 7888"
id="svg2"
version="1.1"
inkscape:version="0.48.4 r9939"
sodipodi:docname="BigTreeClassicRCUBH.fig">
<metadata
id="metadata110">
<rdf:RDF>
<cc:Work
rdf:about="">
<dc:format>image/svg+xml</dc:format>
<dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
<dc:title></dc:title>
</cc:Work>
</rdf:RDF>
</metadata>
<defs
id="defs108">
<marker
inkscape:stockid="Arrow1Mend"
orient="auto"
refY="0.0"
refX="0.0"
id="Arrow1Mend"
style="overflow:visible;">
<path
id="path3868"
d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;"
transform="scale(0.4) rotate(180) translate(10,0)" />
</marker>
<marker
inkscape:stockid="Arrow2Mend"
orient="auto"
refY="0.0"
refX="0.0"
id="Arrow2Mend"
style="overflow:visible;">
<path
id="path3886"
style="fill-rule:evenodd;stroke-width:0.62500000;stroke-linejoin:round;"
d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
transform="scale(0.6) rotate(180) translate(0,0)" />
</marker>
</defs>
<sodipodi:namedview
pagecolor="#ffffff"
bordercolor="#666666"
borderopacity="1"
objecttolerance="10"
gridtolerance="10"
guidetolerance="10"
inkscape:pageopacity="0"
inkscape:pageshadow="2"
inkscape:window-width="878"
inkscape:window-height="1148"
id="namedview106"
showgrid="false"
inkscape:zoom="1.3547758"
inkscape:cx="256.5"
inkscape:cy="297"
inkscape:window-x="45"
inkscape:window-y="24"
inkscape:window-maximized="0"
inkscape:current-layer="g4" />
<g
style="stroke-width:.025in; fill:none"
id="g4">
<!-- Line: box -->
<rect
x="450"
y="0"
width="6300"
height="7350"
rx="0"
style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
id="rect6" />
<!-- Line: box -->
<rect
x="4950"
y="4950"
width="1500"
height="900"
rx="0"
style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
id="rect8" />
<!-- Line: box -->
<rect
x="750"
y="600"
width="5700"
height="3750"
rx="0"
style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
id="rect10" />
<!-- Line: box -->
<rect
x="0"
y="450"
width="6300"
height="7350"
rx="0"
style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
id="rect12" />
<!-- Line: box -->
<rect
x="300"
y="1050"
width="5700"
height="3750"
rx="0"
style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
id="rect14" />
<!-- Circle -->
<circle
cx="2850"
cy="3900"
r="76"
style="fill:#000000;stroke:#000000;stroke-width:14;"
id="circle16" />
<!-- Circle -->
<circle
cx="3150"
cy="3900"
r="76"
style="fill:#000000;stroke:#000000;stroke-width:14;"
id="circle18" />
<!-- Circle -->
<circle
cx="3450"
cy="3900"
r="76"
style="fill:#000000;stroke:#000000;stroke-width:14;"
id="circle20" />
<!-- Circle -->
<circle
cx="1350"
cy="5100"
r="76"
style="fill:#000000;stroke:#000000;stroke-width:14;"
id="circle22" />
<!-- Circle -->
<circle
cx="1650"
cy="5100"
r="76"
style="fill:#000000;stroke:#000000;stroke-width:14;"
id="circle24" />
<!-- Circle -->
<circle
cx="1950"
cy="5100"
r="76"
style="fill:#000000;stroke:#000000;stroke-width:14;"
id="circle26" />
<!-- Circle -->
<circle
cx="4350"
cy="5100"
r="76"
style="fill:#000000;stroke:#000000;stroke-width:14;"
id="circle28" />
<!-- Circle -->
<circle
cx="4650"
cy="5100"
r="76"
style="fill:#000000;stroke:#000000;stroke-width:14;"
id="circle30" />
<!-- Circle -->
<circle
cx="4950"
cy="5100"
r="76"
style="fill:#000000;stroke:#000000;stroke-width:14;"
id="circle32" />
<!-- Line -->
<polyline
points="1350,3450 2350,2590 "
style="stroke:#00d1d1;stroke-width:30.0045575;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
id="polyline34" />
<!-- Arrowhead on XXXpoint 1350 3450 - 2444 2510-->
<!-- Line -->
<polyline
points="4950,3450 3948,2590 "
style="stroke:#00d1d1;stroke-width:30.0045575;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
id="polyline38" />
<!-- Arrowhead on XXXpoint 4950 3450 - 3854 2510-->
<!-- Line: box -->
<rect
x="750"
y="3450"
width="1800"
height="900"
rx="0"
style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
id="rect42" />
<!-- Line -->
<polyline
points="2250,5400 2250,4414 "
style="stroke:#00d1d1;stroke-width:30.0045575;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
id="polyline44" />
<!-- Arrowhead on XXXpoint 2250 5400 - 2250 4290-->
<!-- Line: box -->
<rect
x="1500"
y="5400"
width="1500"
height="900"
rx="0"
style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
id="rect48" />
<!-- Line: box -->
<rect
x="300"
y="6600"
width="1500"
height="900"
rx="0"
style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
id="rect50" />
<!-- Line: box -->
<rect
x="3750"
y="3450"
width="1800"
height="900"
rx="0"
style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
id="rect52" />
<!-- Line: box -->
<rect
x="4500"
y="5400"
width="1500"
height="900"
rx="0"
style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
id="rect54" />
<!-- Line: box -->
<rect
x="3300"
y="6600"
width="1500"
height="900"
rx="0"
style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
id="rect56" />
<!-- Line: box -->
<rect
x="2250"
y="1650"
width="1800"
height="900"
rx="0"
style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
id="rect58" />
<!-- Text -->
<text
xml:space="preserve"
x="6450"
y="300"
fill="#000000"
font-family="Helvetica"
font-style="normal"
font-weight="normal"
font-size="192"
text-anchor="end"
id="text60">rcu_bh</text>
<!-- Text -->
<text
xml:space="preserve"
x="3150"
y="1950"
fill="#000000"
font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="192"
text-anchor="middle"
id="text62">struct</text>
<!-- Text -->
<text
xml:space="preserve"
x="3150"
y="2250"
fill="#000000"
font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="192"
text-anchor="middle"
id="text64">rcu_node</text>
<!-- Text -->
<text
xml:space="preserve"
x="1650"
y="3750"
fill="#000000"
font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="192"
text-anchor="middle"
id="text66">struct</text>
<!-- Text -->
<text
xml:space="preserve"
x="1650"
y="4050"
fill="#000000"
font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="192"
text-anchor="middle"
id="text68">rcu_node</text>
<!-- Text -->
<text
xml:space="preserve"
x="4650"
y="4050"
fill="#000000"
font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="192"
text-anchor="middle"
id="text70">rcu_node</text>
<!-- Text -->
<text
xml:space="preserve"
x="4650"
y="3750"
fill="#000000"
font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="192"
text-anchor="middle"
id="text72">struct</text>
<!-- Text -->
<text
xml:space="preserve"
x="2250"
y="5700"
fill="#000000"
font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="192"
text-anchor="middle"
id="text74">struct</text>
<!-- Text -->
<text
xml:space="preserve"
x="2250"
y="6000"
fill="#000000"
font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="192"
text-anchor="middle"
id="text76">rcu_data</text>
<!-- Text -->
<text
xml:space="preserve"
x="1050"
y="6900"
fill="#000000"
font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="192"
text-anchor="middle"
id="text78">struct</text>
<!-- Text -->
<text
xml:space="preserve"
x="1050"
y="7200"
fill="#000000"
font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="192"
text-anchor="middle"
id="text80">rcu_data</text>
<!-- Text -->
<text
xml:space="preserve"
x="5250"
y="5700"
fill="#000000"
font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="192"
text-anchor="middle"
id="text82">struct</text>
<!-- Text -->
<text
xml:space="preserve"
x="5250"
y="6000"
fill="#000000"
font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="192"
text-anchor="middle"
id="text84">rcu_data</text>
<!-- Text -->
<text
xml:space="preserve"
x="4050"
y="6900"
fill="#000000"
font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="192"
text-anchor="middle"
id="text86">struct</text>
<!-- Text -->
<text
xml:space="preserve"
x="4050"
y="7200"
fill="#000000"
font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="192"
text-anchor="middle"
id="text88">rcu_data</text>
<!-- Text -->
<text
xml:space="preserve"
x="450"
y="1350"
fill="#000000"
font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="192"
text-anchor="start"
id="text90">struct rcu_state</text>
<!-- Text -->
<text
xml:space="preserve"
x="6000"
y="750"
fill="#000000"
font-family="Helvetica"
font-style="normal"
font-weight="normal"
font-size="192"
text-anchor="end"
id="text92">rcu_sched</text>
<!-- Line -->
<polyline
points="5250,5400 5250,4414 "
style="stroke:#00d1d1;stroke-width:30.0045575;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
id="polyline94" />
<!-- Arrowhead on XXXpoint 5250 5400 - 5250 4290-->
<!-- Line -->
<polyline
points="4050,6600 4050,4414 "
style="stroke:#00d1d1;stroke-width:30.0045575;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
id="polyline98" />
<!-- Arrowhead on XXXpoint 4050 6600 - 4050 4290-->
<!-- Line -->
<polyline
points="1050,6600 1050,4414 "
style="stroke:#00d1d1;stroke-width:30.0045575;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
id="polyline102" />
<!-- Arrowhead on XXXpoint 1050 6600 - 1050 4290-->
</g>
</svg>

Before

Width:  |  Height:  |  Size: 13 KiB

View File

@ -1,695 +0,0 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Creator: fig2dev Version 3.2 Patchlevel 5e -->
<!-- CreationDate: Wed Dec 9 17:20:02 2015 -->
<!-- Magnification: 2.000 -->
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://creativecommons.org/ns#"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns="http://www.w3.org/2000/svg"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
width="5.7in"
height="8.6in"
viewBox="-44 -44 6838 10288"
id="svg2"
version="1.1"
inkscape:version="0.48.4 r9939"
sodipodi:docname="BigTreeClassicRCUBHdyntick.fig">
<metadata
id="metadata166">
<rdf:RDF>
<cc:Work
rdf:about="">
<dc:format>image/svg+xml</dc:format>
<dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
<dc:title></dc:title>
</cc:Work>
</rdf:RDF>
</metadata>
<defs
id="defs164">
<marker
inkscape:stockid="Arrow1Mend"
orient="auto"
refY="0.0"
refX="0.0"
id="Arrow1Mend"
style="overflow:visible;">
<path
id="path3924"
d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;"
transform="scale(0.4) rotate(180) translate(10,0)" />
</marker>
<marker
inkscape:stockid="Arrow2Lend"
orient="auto"
refY="0.0"
refX="0.0"
id="Arrow2Lend"
style="overflow:visible;">
<path
id="path3936"
style="fill-rule:evenodd;stroke-width:0.62500000;stroke-linejoin:round;"
d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
transform="scale(1.1) rotate(180) translate(1,0)" />
</marker>
</defs>
<sodipodi:namedview
pagecolor="#ffffff"
bordercolor="#666666"
borderopacity="1"
objecttolerance="10"
gridtolerance="10"
guidetolerance="10"
inkscape:pageopacity="0"
inkscape:pageshadow="2"
inkscape:window-width="845"
inkscape:window-height="988"
id="namedview162"
showgrid="false"
inkscape:zoom="1.0452196"
inkscape:cx="256.5"
inkscape:cy="387.00003"
inkscape:window-x="356"
inkscape:window-y="61"
inkscape:window-maximized="0"
inkscape:current-layer="g4" />
<g
style="stroke-width:.025in; fill:none"
id="g4">
<!-- Line: box -->
<rect
x="450"
y="0"
width="6300"
height="7350"
rx="0"
style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
id="rect6" />
<!-- Line: box -->
<rect
x="4950"
y="4950"
width="1500"
height="900"
rx="0"
style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
id="rect8" />
<!-- Line: box -->
<rect
x="750"
y="600"
width="5700"
height="3750"
rx="0"
style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
id="rect10" />
<!-- Line -->
<polyline
points="5250,8100 5688,5912 "
style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
id="polyline12" />
<!-- Arrowhead on XXXpoint 5250 8100 - 5710 5790-->
<polyline
points="5714 6068 5704 5822 5598 6044 "
style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
id="polyline14" />
<!-- Line -->
<polyline
points="4050,9300 4486,7262 "
style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
id="polyline16" />
<!-- Arrowhead on XXXpoint 4050 9300 - 4512 7140-->
<polyline
points="4514 7418 4506 7172 4396 7394 "
style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
id="polyline18" />
<!-- Line -->
<polyline
points="1040,9300 1476,7262 "
style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
id="polyline20" />
<!-- Arrowhead on XXXpoint 1040 9300 - 1502 7140-->
<polyline
points="1504 7418 1496 7172 1386 7394 "
style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
id="polyline22" />
<!-- Line -->
<polyline
points="2240,8100 2676,6062 "
style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
id="polyline24" />
<!-- Arrowhead on XXXpoint 2240 8100 - 2702 5940-->
<polyline
points="2704 6218 2696 5972 2586 6194 "
style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
id="polyline26" />
<!-- Line: box -->
<rect
x="0"
y="450"
width="6300"
height="7350"
rx="0"
style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
id="rect28" />
<!-- Line: box -->
<rect
x="300"
y="1050"
width="5700"
height="3750"
rx="0"
style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
id="rect30" />
<!-- Line -->
<polyline
points="1350,3450 2350,2590 "
style="stroke:#00d1d1;stroke-width:30.0045575;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
id="polyline32" />
<!-- Arrowhead on XXXpoint 1350 3450 - 2444 2510-->
<!-- Line -->
<polyline
points="4950,3450 3948,2590 "
style="stroke:#00d1d1;stroke-width:30.0045575;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
id="polyline36" />
<!-- Arrowhead on XXXpoint 4950 3450 - 3854 2510-->
<!-- Line -->
<polyline
points="4050,6600 4050,4414 "
style="stroke:#00d1d1;stroke-width:30.00455750000000066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
id="polyline40" />
<!-- Arrowhead on XXXpoint 4050 6600 - 4050 4290-->
<!-- Line -->
<polyline
points="1050,6600 1050,4414 "
style="stroke:#00d1d1;stroke-width:30.00455750000000066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
id="polyline44" />
<!-- Arrowhead on XXXpoint 1050 6600 - 1050 4290-->
<!-- Line -->
<polyline
points="2250,5400 2250,4414 "
style="stroke:#00d1d1;stroke-width:30.00455750000000066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
id="polyline48" />
<!-- Arrowhead on XXXpoint 2250 5400 - 2250 4290-->
<!-- Line -->
<polyline
points="2250,8100 2250,6364 "
style="stroke:#00ff00;stroke-width:30;stroke-linejoin:miter;stroke-linecap:butt;marker-end:url(#Arrow1Mend)"
id="polyline52" />
<!-- Arrowhead on XXXpoint 2250 8100 - 2250 6240-->
<!-- Line -->
<polyline
points="1050,9300 1050,7564 "
style="stroke:#00ff00;stroke-width:30;stroke-linejoin:miter;stroke-linecap:butt;marker-end:url(#Arrow1Mend)"
id="polyline56" />
<!-- Arrowhead on XXXpoint 1050 9300 - 1050 7440-->
<!-- Line -->
<polyline
points="4050,9300 4050,7564 "
style="stroke:#00ff00;stroke-width:30;stroke-linejoin:miter;stroke-linecap:butt;marker-end:url(#Arrow1Mend)"
id="polyline60" />
<!-- Arrowhead on XXXpoint 4050 9300 - 4050 7440-->
<!-- Line -->
<polyline
points="5250,8100 5250,6364 "
style="stroke:#00ff00;stroke-width:30;stroke-linejoin:miter;stroke-linecap:butt;marker-end:url(#Arrow1Mend)"
id="polyline64" />
<!-- Arrowhead on XXXpoint 5250 8100 - 5250 6240-->
<!-- Circle -->
<circle
cx="2850"
cy="3900"
r="76"
style="fill:#000000;stroke:#000000;stroke-width:14;"
id="circle68" />
<!-- Circle -->
<circle
cx="3150"
cy="3900"
r="76"
style="fill:#000000;stroke:#000000;stroke-width:14;"
id="circle70" />
<!-- Circle -->
<circle
cx="3450"
cy="3900"
r="76"
style="fill:#000000;stroke:#000000;stroke-width:14;"
id="circle72" />
<!-- Circle -->
<circle
cx="1350"
cy="5100"
r="76"
style="fill:#000000;stroke:#000000;stroke-width:14;"
id="circle74" />
<!-- Circle -->
<circle
cx="1650"
cy="5100"
r="76"
style="fill:#000000;stroke:#000000;stroke-width:14;"
id="circle76" />
<!-- Circle -->
<circle
cx="1950"
cy="5100"
r="76"
style="fill:#000000;stroke:#000000;stroke-width:14;"
id="circle78" />
<!-- Circle -->
<circle
cx="4350"
cy="5100"
r="76"
style="fill:#000000;stroke:#000000;stroke-width:14;"
id="circle80" />
<!-- Circle -->
<circle
cx="4650"
cy="5100"
r="76"
style="fill:#000000;stroke:#000000;stroke-width:14;"
id="circle82" />
<!-- Circle -->
<circle
cx="4950"
cy="5100"
r="76"
style="fill:#000000;stroke:#000000;stroke-width:14;"
id="circle84" />
<!-- Line: box -->
<rect
x="750"
y="3450"
width="1800"
height="900"
rx="0"
style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
id="rect86" />
<!-- Line: box -->
<rect
x="300"
y="6600"
width="1500"
height="900"
rx="0"
style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
id="rect88" />
<!-- Line: box -->
<rect
x="3750"
y="3450"
width="1800"
height="900"
rx="0"
style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
id="rect90" />
<!-- Line: box -->
<rect
x="4500"
y="5400"
width="1500"
height="900"
rx="0"
style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
id="rect92" />
<!-- Line: box -->
<rect
x="3300"
y="6600"
width="1500"
height="900"
rx="0"
style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
id="rect94" />
<!-- Line: box -->
<rect
x="2250"
y="1650"
width="1800"
height="900"
rx="0"
style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
id="rect96" />
<!-- Line: box -->
<rect
x="0"
y="9300"
width="2100"
height="900"
rx="0"
style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
id="rect98" />
<!-- Line: box -->
<rect
x="1350"
y="8100"
width="2100"
height="900"
rx="0"
style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
id="rect100" />
<!-- Line: box -->
<rect
x="3000"
y="9300"
width="2100"
height="900"
rx="0"
style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
id="rect102" />
<!-- Line: box -->
<rect
x="4350"
y="8100"
width="2100"
height="900"
rx="0"
style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
id="rect104" />
<!-- Line: box -->
<rect
x="1500"
y="5400"
width="1500"
height="900"
rx="0"
style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
id="rect106" />
<!-- Text -->
<text
xml:space="preserve"
x="6450"
y="300"
fill="#000000"
font-family="Helvetica"
font-style="normal"
font-weight="normal"
font-size="192"
text-anchor="end"
id="text108">rcu_bh</text>
<!-- Text -->
<text
xml:space="preserve"
x="3150"
y="1950"
fill="#000000"
font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="192"
text-anchor="middle"
id="text110">struct</text>
<!-- Text -->
<text
xml:space="preserve"
x="3150"
y="2250"
fill="#000000"
font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="192"
text-anchor="middle"
id="text112">rcu_node</text>
<!-- Text -->
<text
xml:space="preserve"
x="1650"
y="3750"
fill="#000000"
font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="192"
text-anchor="middle"
id="text114">struct</text>
<!-- Text -->
<text
xml:space="preserve"
x="1650"
y="4050"
fill="#000000"
font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="192"
text-anchor="middle"
id="text116">rcu_node</text>
<!-- Text -->
<text
xml:space="preserve"
x="4650"
y="4050"
fill="#000000"
font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="192"
text-anchor="middle"
id="text118">rcu_node</text>
<!-- Text -->
<text
xml:space="preserve"
x="4650"
y="3750"
fill="#000000"
font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="192"
text-anchor="middle"
id="text120">struct</text>
<!-- Text -->
<text
xml:space="preserve"
x="2250"
y="5700"
fill="#000000"
font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="192"
text-anchor="middle"
id="text122">struct</text>
<!-- Text -->
<text
xml:space="preserve"
x="2250"
y="6000"
fill="#000000"
font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="192"
text-anchor="middle"
id="text124">rcu_data</text>
<!-- Text -->
<text
xml:space="preserve"
x="1050"
y="6900"
fill="#000000"
font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="192"
text-anchor="middle"
id="text126">struct</text>
<!-- Text -->
<text
xml:space="preserve"
x="1050"
y="7200"
fill="#000000"
font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="192"
text-anchor="middle"
id="text128">rcu_data</text>
<!-- Text -->
<text
xml:space="preserve"
x="5250"
y="5700"
fill="#000000"
font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="192"
text-anchor="middle"
id="text130">struct</text>
<!-- Text -->
<text
xml:space="preserve"
x="5250"
y="6000"
fill="#000000"
font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="192"
text-anchor="middle"
id="text132">rcu_data</text>
<!-- Text -->
<text
xml:space="preserve"
x="4050"
y="6900"
fill="#000000"
font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="192"
text-anchor="middle"
id="text134">struct</text>
<!-- Text -->
<text
xml:space="preserve"
x="4050"
y="7200"
fill="#000000"
font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="192"
text-anchor="middle"
id="text136">rcu_data</text>
<!-- Text -->
<text
xml:space="preserve"
x="450"
y="1350"
fill="#000000"
font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="192"
text-anchor="start"
id="text138">struct rcu_state</text>
<!-- Text -->
<text
xml:space="preserve"
x="1050"
y="9600"
fill="#000000"
font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="192"
text-anchor="middle"
id="text140">struct</text>
<!-- Text -->
<text
xml:space="preserve"
x="1050"
y="9900"
fill="#000000"
font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="192"
text-anchor="middle"
id="text142">rcu_dynticks</text>
<!-- Text -->
<text
xml:space="preserve"
x="4050"
y="9600"
fill="#000000"
font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="192"
text-anchor="middle"
id="text144">struct</text>
<!-- Text -->
<text
xml:space="preserve"
x="4050"
y="9900"
fill="#000000"
font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="192"
text-anchor="middle"
id="text146">rcu_dynticks</text>
<!-- Text -->
<text
xml:space="preserve"
x="2400"
y="8400"
fill="#000000"
font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="192"
text-anchor="middle"
id="text148">struct</text>
<!-- Text -->
<text
xml:space="preserve"
x="2400"
y="8700"
fill="#000000"
font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="192"
text-anchor="middle"
id="text150">rcu_dynticks</text>
<!-- Text -->
<text
xml:space="preserve"
x="5400"
y="8400"
fill="#000000"
font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="192"
text-anchor="middle"
id="text152">struct</text>
<!-- Text -->
<text
xml:space="preserve"
x="5400"
y="8700"
fill="#000000"
font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="192"
text-anchor="middle"
id="text154">rcu_dynticks</text>
<!-- Text -->
<text
xml:space="preserve"
x="6000"
y="750"
fill="#000000"
font-family="Helvetica"
font-style="normal"
font-weight="normal"
font-size="192"
text-anchor="end"
id="text156">rcu_sched</text>
<!-- Line -->
<polyline
points="5250,5400 5250,4414 "
style="stroke:#00d1d1;stroke-width:30.00455750000000066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
id="polyline158" />
<!-- Arrowhead on XXXpoint 5250 5400 - 5250 4290-->
</g>
</svg>

Before

Width:  |  Height:  |  Size: 19 KiB

View File

@ -1,741 +0,0 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Creator: fig2dev Version 3.2 Patchlevel 5e -->
<!-- CreationDate: Wed Dec 9 17:32:59 2015 -->
<!-- Magnification: 2.000 -->
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://creativecommons.org/ns#"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns="http://www.w3.org/2000/svg"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
width="6.1in"
height="8.9in"
viewBox="-44 -44 7288 10738"
id="svg2"
version="1.1"
inkscape:version="0.48.4 r9939"
sodipodi:docname="BigTreePreemptRCUBHdyntick.fig">
<metadata
id="metadata182">
<rdf:RDF>
<cc:Work
rdf:about="">
<dc:format>image/svg+xml</dc:format>
<dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
<dc:title></dc:title>
</cc:Work>
</rdf:RDF>
</metadata>
<defs
id="defs180">
<marker
inkscape:stockid="Arrow1Mend"
orient="auto"
refY="0.0"
refX="0.0"
id="Arrow1Mend"
style="overflow:visible;">
<path
id="path3940"
d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;"
transform="scale(0.4) rotate(180) translate(10,0)" />
</marker>
</defs>
<sodipodi:namedview
pagecolor="#ffffff"
bordercolor="#666666"
borderopacity="1"
objecttolerance="10"
gridtolerance="10"
guidetolerance="10"
inkscape:pageopacity="0"
inkscape:pageshadow="2"
inkscape:window-width="874"
inkscape:window-height="1148"
id="namedview178"
showgrid="false"
inkscape:zoom="1.2097379"
inkscape:cx="274.5"
inkscape:cy="400.49997"
inkscape:window-x="946"
inkscape:window-y="24"
inkscape:window-maximized="0"
inkscape:current-layer="g4" />
<g
style="stroke-width:.025in; fill:none"
id="g4">
<!-- Line: box -->
<rect
x="900"
y="0"
width="6300"
height="7350"
rx="0"
style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
id="rect6" />
<!-- Line: box -->
<rect
x="1200"
y="600"
width="5700"
height="3750"
rx="0"
style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
id="rect8" />
<!-- Line: box -->
<rect
x="5400"
y="4950"
width="1500"
height="900"
rx="0"
style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
id="rect10" />
<!-- Line: box -->
<rect
x="450"
y="450"
width="6300"
height="7350"
rx="0"
style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
id="rect12" />
<!-- Line: box -->
<rect
x="750"
y="1050"
width="5700"
height="3750"
rx="0"
style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
id="rect14" />
<!-- Line: box -->
<rect
x="4950"
y="5400"
width="1500"
height="900"
rx="0"
style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
id="rect16" />
<!-- Line -->
<polyline
points="5250,8550 5688,6362 "
style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
id="polyline18" />
<!-- Arrowhead on XXXpoint 5250 8550 - 5710 6240-->
<polyline
points="5714 6518 5704 6272 5598 6494 "
style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
id="polyline20" />
<!-- Line -->
<polyline
points="4050,9750 4486,7712 "
style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
id="polyline22" />
<!-- Arrowhead on XXXpoint 4050 9750 - 4512 7590-->
<polyline
points="4514 7868 4506 7622 4396 7844 "
style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
id="polyline24" />
<!-- Line -->
<polyline
points="1040,9750 1476,7712 "
style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
id="polyline26" />
<!-- Arrowhead on XXXpoint 1040 9750 - 1502 7590-->
<polyline
points="1504 7868 1496 7622 1386 7844 "
style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
id="polyline28" />
<!-- Line -->
<polyline
points="2240,8550 2676,6512 "
style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
id="polyline30" />
<!-- Arrowhead on XXXpoint 2240 8550 - 2702 6390-->
<polyline
points="2704 6668 2696 6422 2586 6644 "
style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
id="polyline32" />
<!-- Line -->
<polyline
points="4050,9750 5682,6360 "
style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
id="polyline34" />
<!-- Arrowhead on XXXpoint 4050 9750 - 5736 6246-->
<polyline
points="5672 6518 5722 6276 5562 6466 "
style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
id="polyline36" />
<!-- Line -->
<polyline
points="1010,9750 2642,6360 "
style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
id="polyline38" />
<!-- Arrowhead on XXXpoint 1010 9750 - 2696 6246-->
<polyline
points="2632 6518 2682 6276 2522 6466 "
style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
id="polyline40" />
<!-- Line: box -->
<rect
x="0"
y="900"
width="6300"
height="7350"
rx="0"
style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
id="rect42" />
<!-- Line: box -->
<rect
x="300"
y="1500"
width="5700"
height="3750"
rx="0"
style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
id="rect44" />
<!-- Line -->
<polyline
points="1350,3900 2350,3040 "
style="stroke:#00d1d1;stroke-width:30.00205472;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
id="polyline46" />
<!-- Arrowhead on XXXpoint 1350 3900 - 2444 2960-->
<!-- Line -->
<polyline
points="4950,3900 3948,3040 "
style="stroke:#00d1d1;stroke-width:30.00205472;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
id="polyline50" />
<!-- Arrowhead on XXXpoint 4950 3900 - 3854 2960-->
<!-- Line -->
<polyline
points="4050,7050 4050,4864 "
style="stroke:#00d1d1;stroke-width:30.00205472;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
id="polyline54" />
<!-- Arrowhead on XXXpoint 4050 7050 - 4050 4740-->
<!-- Line -->
<polyline
points="1050,7050 1050,4864 "
style="stroke:#00d1d1;stroke-width:30.00205472;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
id="polyline58" />
<!-- Arrowhead on XXXpoint 1050 7050 - 1050 4740-->
<!-- Line -->
<polyline
points="2250,5850 2250,4864 "
style="stroke:#00d1d1;stroke-width:30.00205472;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
id="polyline62" />
<!-- Arrowhead on XXXpoint 2250 5850 - 2250 4740-->
<!-- Line -->
<polyline
points="2250,8550 2250,6814 "
style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
id="polyline66" />
<!-- Arrowhead on XXXpoint 2250 8550 - 2250 6690-->
<!-- Line -->
<polyline
points="1050,9750 1050,8014 "
style="stroke:#00ff00;stroke-width:30.00205472;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
id="polyline70" />
<!-- Arrowhead on XXXpoint 1050 9750 - 1050 7890-->
<!-- Line -->
<polyline
points="4050,9750 4050,8014 "
style="stroke:#00ff00;stroke-width:30.00205472;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
id="polyline74" />
<!-- Arrowhead on XXXpoint 4050 9750 - 4050 7890-->
<!-- Line -->
<polyline
points="5250,8550 5250,6814 "
style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
id="polyline78" />
<!-- Arrowhead on XXXpoint 5250 8550 - 5250 6690-->
<!-- Circle -->
<circle
cx="2850"
cy="4350"
r="76"
style="fill:#000000;stroke:#000000;stroke-width:14;"
id="circle82" />
<!-- Circle -->
<circle
cx="3150"
cy="4350"
r="76"
style="fill:#000000;stroke:#000000;stroke-width:14;"
id="circle84" />
<!-- Circle -->
<circle
cx="3450"
cy="4350"
r="76"
style="fill:#000000;stroke:#000000;stroke-width:14;"
id="circle86" />
<!-- Circle -->
<circle
cx="1350"
cy="5550"
r="76"
style="fill:#000000;stroke:#000000;stroke-width:14;"
id="circle88" />
<!-- Circle -->
<circle
cx="1650"
cy="5550"
r="76"
style="fill:#000000;stroke:#000000;stroke-width:14;"
id="circle90" />
<!-- Circle -->
<circle
cx="1950"
cy="5550"
r="76"
style="fill:#000000;stroke:#000000;stroke-width:14;"
id="circle92" />
<!-- Circle -->
<circle
cx="4350"
cy="5550"
r="76"
style="fill:#000000;stroke:#000000;stroke-width:14;"
id="circle94" />
<!-- Circle -->
<circle
cx="4650"
cy="5550"
r="76"
style="fill:#000000;stroke:#000000;stroke-width:14;"
id="circle96" />
<!-- Circle -->
<circle
cx="4950"
cy="5550"
r="76"
style="fill:#000000;stroke:#000000;stroke-width:14;"
id="circle98" />
<!-- Line: box -->
<rect
x="750"
y="3900"
width="1800"
height="900"
rx="0"
style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
id="rect100" />
<!-- Line: box -->
<rect
x="300"
y="7050"
width="1500"
height="900"
rx="0"
style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
id="rect102" />
<!-- Line: box -->
<rect
x="3750"
y="3900"
width="1800"
height="900"
rx="0"
style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
id="rect104" />
<!-- Line: box -->
<rect
x="4500"
y="5850"
width="1500"
height="900"
rx="0"
style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
id="rect106" />
<!-- Line: box -->
<rect
x="3300"
y="7050"
width="1500"
height="900"
rx="0"
style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
id="rect108" />
<!-- Line: box -->
<rect
x="2250"
y="2100"
width="1800"
height="900"
rx="0"
style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
id="rect110" />
<!-- Line: box -->
<rect
x="0"
y="9750"
width="2100"
height="900"
rx="0"
style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
id="rect112" />
<!-- Line: box -->
<rect
x="1350"
y="8550"
width="2100"
height="900"
rx="0"
style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
id="rect114" />
<!-- Line: box -->
<rect
x="3000"
y="9750"
width="2100"
height="900"
rx="0"
style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
id="rect116" />
<!-- Line: box -->
<rect
x="4350"
y="8550"
width="2100"
height="900"
rx="0"
style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
id="rect118" />
<!-- Line: box -->
<rect
x="1500"
y="5850"
width="1500"
height="900"
rx="0"
style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
id="rect120" />
<!-- Text -->
<text
xml:space="preserve"
x="6450"
y="750"
fill="#000000"
font-family="Helvetica"
font-style="normal"
font-weight="normal"
font-size="192"
text-anchor="end"
id="text122">rcu_bh</text>
<!-- Text -->
<text
xml:space="preserve"
x="3150"
y="2400"
fill="#000000"
font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="192"
text-anchor="middle"
id="text124">struct</text>
<!-- Text -->
<text
xml:space="preserve"
x="3150"
y="2700"
fill="#000000"
font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="192"
text-anchor="middle"
id="text126">rcu_node</text>
<!-- Text -->
<text
xml:space="preserve"
x="1650"
y="4200"
fill="#000000"
font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="192"
text-anchor="middle"
id="text128">struct</text>
<!-- Text -->
<text
xml:space="preserve"
x="1650"
y="4500"
fill="#000000"
font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="192"
text-anchor="middle"
id="text130">rcu_node</text>
<!-- Text -->
<text
xml:space="preserve"
x="4650"
y="4500"
fill="#000000"
font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="192"
text-anchor="middle"
id="text132">rcu_node</text>
<!-- Text -->
<text
xml:space="preserve"
x="4650"
y="4200"
fill="#000000"
font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="192"
text-anchor="middle"
id="text134">struct</text>
<!-- Text -->
<text
xml:space="preserve"
x="2250"
y="6150"
fill="#000000"
font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="192"
text-anchor="middle"
id="text136">struct</text>
<!-- Text -->
<text
xml:space="preserve"
x="2250"
y="6450"
fill="#000000"
font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="192"
text-anchor="middle"
id="text138">rcu_data</text>
<!-- Text -->
<text
xml:space="preserve"
x="1050"
y="7350"
fill="#000000"
font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="192"
text-anchor="middle"
id="text140">struct</text>
<!-- Text -->
<text
xml:space="preserve"
x="1050"
y="7650"
fill="#000000"
font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="192"
text-anchor="middle"
id="text142">rcu_data</text>
<!-- Text -->
<text
xml:space="preserve"
x="5250"
y="6150"
fill="#000000"
font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="192"
text-anchor="middle"
id="text144">struct</text>
<!-- Text -->
<text
xml:space="preserve"
x="5250"
y="6450"
fill="#000000"
font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="192"
text-anchor="middle"
id="text146">rcu_data</text>
<!-- Text -->
<text
xml:space="preserve"
x="4050"
y="7350"
fill="#000000"
font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="192"
text-anchor="middle"
id="text148">struct</text>
<!-- Text -->
<text
xml:space="preserve"
x="4050"
y="7650"
fill="#000000"
font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="192"
text-anchor="middle"
id="text150">rcu_data</text>
<!-- Text -->
<text
xml:space="preserve"
x="450"
y="1800"
fill="#000000"
font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="192"
text-anchor="start"
id="text152">struct rcu_state</text>
<!-- Text -->
<text
xml:space="preserve"
x="1050"
y="10050"
fill="#000000"
font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="192"
text-anchor="middle"
id="text154">struct</text>
<!-- Text -->
<text
xml:space="preserve"
x="1050"
y="10350"
fill="#000000"
font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="192"
text-anchor="middle"
id="text156">rcu_dynticks</text>
<!-- Text -->
<text
xml:space="preserve"
x="4050"
y="10050"
fill="#000000"
font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="192"
text-anchor="middle"
id="text158">struct</text>
<!-- Text -->
<text
xml:space="preserve"
x="4050"
y="10350"
fill="#000000"
font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="192"
text-anchor="middle"
id="text160">rcu_dynticks</text>
<!-- Text -->
<text
xml:space="preserve"
x="2400"
y="8850"
fill="#000000"
font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="192"
text-anchor="middle"
id="text162">struct</text>
<!-- Text -->
<text
xml:space="preserve"
x="2400"
y="9150"
fill="#000000"
font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="192"
text-anchor="middle"
id="text164">rcu_dynticks</text>
<!-- Text -->
<text
xml:space="preserve"
x="5400"
y="8850"
fill="#000000"
font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="192"
text-anchor="middle"
id="text166">struct</text>
<!-- Text -->
<text
xml:space="preserve"
x="5400"
y="9150"
fill="#000000"
font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="192"
text-anchor="middle"
id="text168">rcu_dynticks</text>
<!-- Text -->
<text
xml:space="preserve"
x="6900"
y="300"
fill="#000000"
font-family="Helvetica"
font-style="normal"
font-weight="normal"
font-size="192"
text-anchor="end"
id="text170">rcu_preempt</text>
<!-- Text -->
<text
xml:space="preserve"
x="6000"
y="1200"
fill="#000000"
font-family="Helvetica"
font-style="normal"
font-weight="normal"
font-size="192"
text-anchor="end"
id="text172">rcu_sched</text>
<!-- Line -->
<polyline
points="5250,5850 5250,4864 "
style="stroke:#00d1d1;stroke-width:30.00205472;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
id="polyline174" />
<!-- Arrowhead on XXXpoint 5250 5850 - 5250 4740-->
</g>
</svg>

Before

Width:  |  Height:  |  Size: 20 KiB

File diff suppressed because it is too large Load Diff

Before

Width:  |  Height:  |  Size: 24 KiB

After

Width:  |  Height:  |  Size: 22 KiB

View File

@ -23,8 +23,6 @@ to each other.
The <tt>rcu_segcblist</tt> Structure</a> The <tt>rcu_segcblist</tt> Structure</a>
<li> <a href="#The rcu_data Structure"> <li> <a href="#The rcu_data Structure">
The <tt>rcu_data</tt> Structure</a> The <tt>rcu_data</tt> Structure</a>
<li> <a href="#The rcu_dynticks Structure">
The <tt>rcu_dynticks</tt> Structure</a>
<li> <a href="#The rcu_head Structure"> <li> <a href="#The rcu_head Structure">
The <tt>rcu_head</tt> Structure</a> The <tt>rcu_head</tt> Structure</a>
<li> <a href="#RCU-Specific Fields in the task_struct Structure"> <li> <a href="#RCU-Specific Fields in the task_struct Structure">
@ -127,9 +125,11 @@ CPUs, RCU would configure the <tt>rcu_node</tt> tree as follows:
</p><p>RCU currently permits up to a four-level tree, which on a 64-bit system </p><p>RCU currently permits up to a four-level tree, which on a 64-bit system
accommodates up to 4,194,304 CPUs, though only a mere 524,288 CPUs for accommodates up to 4,194,304 CPUs, though only a mere 524,288 CPUs for
32-bit systems. 32-bit systems.
On the other hand, you can set <tt>CONFIG_RCU_FANOUT</tt> to be On the other hand, you can set both <tt>CONFIG_RCU_FANOUT</tt> and
as small as 2 if you wish, which would permit only 16 CPUs, which <tt>CONFIG_RCU_FANOUT_LEAF</tt> to be as small as 2, which would result
is useful for testing. in a 16-CPU test using a 4-level tree.
This can be useful for testing large-system capabilities on small test
machines.
</p><p>This multi-level combining tree allows us to get most of the </p><p>This multi-level combining tree allows us to get most of the
performance and scalability performance and scalability
@ -154,44 +154,9 @@ on that root <tt>rcu_node</tt> structure remains acceptably low.
keeping lock contention under control at all tree levels regardless keeping lock contention under control at all tree levels regardless
of the level of loading on the system. of the level of loading on the system.
</p><p>The Linux kernel actually supports multiple flavors of RCU
running concurrently, so RCU builds separate data structures for each
flavor.
For example, for <tt>CONFIG_TREE_RCU=y</tt> kernels, RCU provides
rcu_sched and rcu_bh, as shown below:
</p><p><img src="BigTreeClassicRCUBH.svg" alt="BigTreeClassicRCUBH.svg" width="33%">
</p><p>Energy efficiency is increasingly important, and for that
reason the Linux kernel provides <tt>CONFIG_NO_HZ_IDLE</tt>, which
turns off the scheduling-clock interrupts on idle CPUs, which in
turn allows those CPUs to attain deeper sleep states and to consume
less energy.
CPUs whose scheduling-clock interrupts have been turned off are
said to be in <i>dyntick-idle mode</i>.
RCU must handle dyntick-idle CPUs specially
because RCU would otherwise wake up each CPU on every grace period,
which would defeat the whole purpose of <tt>CONFIG_NO_HZ_IDLE</tt>.
RCU uses the <tt>rcu_dynticks</tt> structure to track
which CPUs are in dyntick idle mode, as shown below:
</p><p><img src="BigTreeClassicRCUBHdyntick.svg" alt="BigTreeClassicRCUBHdyntick.svg" width="33%">
</p><p>However, if a CPU is in dyntick-idle mode, it is in that mode
for all flavors of RCU.
Therefore, a single <tt>rcu_dynticks</tt> structure is allocated per
CPU, and all of a given CPU's <tt>rcu_data</tt> structures share
that <tt>rcu_dynticks</tt>, as shown in the figure.
</p><p>Kernels built with <tt>CONFIG_PREEMPT_RCU</tt> support
rcu_preempt in addition to rcu_sched and rcu_bh, as shown below:
</p><p><img src="BigTreePreemptRCUBHdyntick.svg" alt="BigTreePreemptRCUBHdyntick.svg" width="35%">
</p><p>RCU updaters wait for normal grace periods by registering </p><p>RCU updaters wait for normal grace periods by registering
RCU callbacks, either directly via <tt>call_rcu()</tt> and RCU callbacks, either directly via <tt>call_rcu()</tt> and
friends (namely <tt>call_rcu_bh()</tt> and <tt>call_rcu_sched()</tt>), friends (namely <tt>call_rcu_bh()</tt> and <tt>call_rcu_sched()</tt>),
there being a separate interface per flavor of RCU)
or indirectly via <tt>synchronize_rcu()</tt> and friends. or indirectly via <tt>synchronize_rcu()</tt> and friends.
RCU callbacks are represented by <tt>rcu_head</tt> structures, RCU callbacks are represented by <tt>rcu_head</tt> structures,
which are queued on <tt>rcu_data</tt> structures while they are which are queued on <tt>rcu_data</tt> structures while they are
@ -214,9 +179,6 @@ its own synchronization:
<li> Each <tt>rcu_node</tt> structure has a spinlock. <li> Each <tt>rcu_node</tt> structure has a spinlock.
<li> The fields in <tt>rcu_data</tt> are private to the corresponding <li> The fields in <tt>rcu_data</tt> are private to the corresponding
CPU, although a few can be read and written by other CPUs. CPU, although a few can be read and written by other CPUs.
<li> Similarly, the fields in <tt>rcu_dynticks</tt> are private
to the corresponding CPU, although a few can be read by
other CPUs.
</ol> </ol>
<p>It is important to note that different data structures can have <p>It is important to note that different data structures can have
@ -272,11 +234,6 @@ follows:
access to this information from the corresponding CPU. access to this information from the corresponding CPU.
Finally, this structure records past dyntick-idle state Finally, this structure records past dyntick-idle state
for the corresponding CPU and also tracks statistics. for the corresponding CPU and also tracks statistics.
<li> <tt>rcu_dynticks</tt>:
This per-CPU structure tracks the current dyntick-idle
state for the corresponding CPU.
Unlike the other three structures, the <tt>rcu_dynticks</tt>
structure is not replicated per RCU flavor.
<li> <tt>rcu_head</tt>: <li> <tt>rcu_head</tt>:
This structure represents RCU callbacks, and is the This structure represents RCU callbacks, and is the
only structure allocated and managed by RCU users. only structure allocated and managed by RCU users.
@ -287,14 +244,14 @@ follows:
<p>If all you wanted from this article was a general notion of how <p>If all you wanted from this article was a general notion of how
RCU's data structures are related, you are done. RCU's data structures are related, you are done.
Otherwise, each of the following sections give more details on Otherwise, each of the following sections give more details on
the <tt>rcu_state</tt>, <tt>rcu_node</tt>, <tt>rcu_data</tt>, the <tt>rcu_state</tt>, <tt>rcu_node</tt> and <tt>rcu_data</tt> data
and <tt>rcu_dynticks</tt> data structures. structures.
<h3><a name="The rcu_state Structure"> <h3><a name="The rcu_state Structure">
The <tt>rcu_state</tt> Structure</a></h3> The <tt>rcu_state</tt> Structure</a></h3>
<p>The <tt>rcu_state</tt> structure is the base structure that <p>The <tt>rcu_state</tt> structure is the base structure that
represents a flavor of RCU. represents the state of RCU in the system.
This structure forms the interconnection between the This structure forms the interconnection between the
<tt>rcu_node</tt> and <tt>rcu_data</tt> structures, <tt>rcu_node</tt> and <tt>rcu_data</tt> structures,
tracks grace periods, contains the lock used to tracks grace periods, contains the lock used to
@ -389,7 +346,7 @@ sequence number.
The bottom two bits are the state of the current grace period, The bottom two bits are the state of the current grace period,
which can be zero for not yet started or one for in progress. which can be zero for not yet started or one for in progress.
In other words, if the bottom two bits of <tt>-&gt;gp_seq</tt> are In other words, if the bottom two bits of <tt>-&gt;gp_seq</tt> are
zero, the corresponding flavor of RCU is idle. zero, then RCU is idle.
Any other value in the bottom two bits indicates that something is broken. Any other value in the bottom two bits indicates that something is broken.
This field is protected by the root <tt>rcu_node</tt> structure's This field is protected by the root <tt>rcu_node</tt> structure's
<tt>-&gt;lock</tt> field. <tt>-&gt;lock</tt> field.
@ -419,10 +376,10 @@ as follows:
grace period in jiffies. grace period in jiffies.
It is protected by the root <tt>rcu_node</tt>'s <tt>-&gt;lock</tt>. It is protected by the root <tt>rcu_node</tt>'s <tt>-&gt;lock</tt>.
<p>The <tt>-&gt;name</tt> field points to the name of the RCU flavor <p>The <tt>-&gt;name</tt> and <tt>-&gt;abbr</tt> fields distinguish
(for example, &ldquo;rcu_sched&rdquo;), and is constant. between preemptible RCU (&ldquo;rcu_preempt&rdquo; and &ldquo;p&rdquo;)
The <tt>-&gt;abbr</tt> field contains a one-character abbreviation, and non-preemptible RCU (&ldquo;rcu_sched&rdquo; and &ldquo;s&rdquo;).
for example, &ldquo;s&rdquo; for RCU-sched. These fields are used for diagnostic and tracing purposes.
<h3><a name="The rcu_node Structure"> <h3><a name="The rcu_node Structure">
The <tt>rcu_node</tt> Structure</a></h3> The <tt>rcu_node</tt> Structure</a></h3>
@ -971,25 +928,31 @@ this <tt>rcu_segcblist</tt> structure, <i>not</i> the <tt>-&gt;head</tt>
pointer. pointer.
The reason for this is that all the ready-to-invoke callbacks The reason for this is that all the ready-to-invoke callbacks
(that is, those in the <tt>RCU_DONE_TAIL</tt> segment) are extracted (that is, those in the <tt>RCU_DONE_TAIL</tt> segment) are extracted
all at once at callback-invocation time. all at once at callback-invocation time (<tt>rcu_do_batch</tt>), due
to which <tt>-&gt;head</tt> may be set to NULL if there are no not-done
callbacks remaining in the <tt>rcu_segcblist</tt>.
If callback invocation must be postponed, for example, because a If callback invocation must be postponed, for example, because a
high-priority process just woke up on this CPU, then the remaining high-priority process just woke up on this CPU, then the remaining
callbacks are placed back on the <tt>RCU_DONE_TAIL</tt> segment. callbacks are placed back on the <tt>RCU_DONE_TAIL</tt> segment and
Either way, the <tt>-&gt;len</tt> and <tt>-&gt;len_lazy</tt> counts <tt>-&gt;head</tt> once again points to the start of the segment.
are adjusted after the corresponding callbacks have been invoked, and so In short, the head field can briefly be <tt>NULL</tt> even though the
again it is the <tt>-&gt;len</tt> count that accurately reflects whether CPU has callbacks present the entire time.
or not there are callbacks associated with this <tt>rcu_segcblist</tt> Therefore, it is not appropriate to test the <tt>-&gt;head</tt> pointer
structure. for <tt>NULL</tt>.
<p>In contrast, the <tt>-&gt;len</tt> and <tt>-&gt;len_lazy</tt> counts
are adjusted only after the corresponding callbacks have been invoked.
This means that the <tt>-&gt;len</tt> count is zero only if
the <tt>rcu_segcblist</tt> structure really is devoid of callbacks.
Of course, off-CPU sampling of the <tt>-&gt;len</tt> count requires Of course, off-CPU sampling of the <tt>-&gt;len</tt> count requires
the use of appropriate synchronization, for example, memory barriers. careful use of appropriate synchronization, for example, memory barriers.
This synchronization can be a bit subtle, particularly in the case This synchronization can be a bit subtle, particularly in the case
of <tt>rcu_barrier()</tt>. of <tt>rcu_barrier()</tt>.
<h3><a name="The rcu_data Structure"> <h3><a name="The rcu_data Structure">
The <tt>rcu_data</tt> Structure</a></h3> The <tt>rcu_data</tt> Structure</a></h3>
<p>The <tt>rcu_data</tt> maintains the per-CPU state for the <p>The <tt>rcu_data</tt> maintains the per-CPU state for the RCU subsystem.
corresponding flavor of RCU.
The fields in this structure may be accessed only from the corresponding The fields in this structure may be accessed only from the corresponding
CPU (and from tracing) unless otherwise stated. CPU (and from tracing) unless otherwise stated.
This structure is the This structure is the
@ -1015,30 +978,19 @@ as follows:
<pre> <pre>
1 int cpu; 1 int cpu;
2 struct rcu_state *rsp; 2 struct rcu_node *mynode;
3 struct rcu_node *mynode; 3 unsigned long grpmask;
4 struct rcu_dynticks *dynticks; 4 bool beenonline;
5 unsigned long grpmask;
6 bool beenonline;
</pre> </pre>
<p>The <tt>-&gt;cpu</tt> field contains the number of the <p>The <tt>-&gt;cpu</tt> field contains the number of the
corresponding CPU, the <tt>-&gt;rsp</tt> pointer references corresponding CPU and the <tt>-&gt;mynode</tt> field references the
the corresponding <tt>rcu_state</tt> structure (and is most frequently corresponding <tt>rcu_node</tt> structure.
used to locate the name of the corresponding flavor of RCU for tracing),
and the <tt>-&gt;mynode</tt> field references the corresponding
<tt>rcu_node</tt> structure.
The <tt>-&gt;mynode</tt> is used to propagate quiescent states The <tt>-&gt;mynode</tt> is used to propagate quiescent states
up the combining tree. up the combining tree.
<p>The <tt>-&gt;dynticks</tt> pointer references the These two fields are constant and therefore do not require synchronization.
<tt>rcu_dynticks</tt> structure corresponding to this
CPU.
Recall that a single per-CPU instance of the <tt>rcu_dynticks</tt>
structure is shared among all flavors of RCU.
These first four fields are constant and therefore require not
synchronization.
</p><p>The <tt>-&gt;grpmask</tt> field indicates the bit in <p>The <tt>-&gt;grpmask</tt> field indicates the bit in
the <tt>-&gt;mynode-&gt;qsmask</tt> corresponding to this the <tt>-&gt;mynode-&gt;qsmask</tt> corresponding to this
<tt>rcu_data</tt> structure, and is also used when propagating <tt>rcu_data</tt> structure, and is also used when propagating
quiescent states. quiescent states.
@ -1057,12 +1009,12 @@ as follows:
3 bool cpu_no_qs; 3 bool cpu_no_qs;
4 bool core_needs_qs; 4 bool core_needs_qs;
5 bool gpwrap; 5 bool gpwrap;
6 unsigned long rcu_qs_ctr_snap;
</pre> </pre>
<p>The <tt>-&gt;gp_seq</tt> and <tt>-&gt;gp_seq_needed</tt> <p>The <tt>-&gt;gp_seq</tt> field is the counterpart of the field of the same
fields are the counterparts of the fields of the same name name in the <tt>rcu_state</tt> and <tt>rcu_node</tt> structures. The
in the <tt>rcu_state</tt> and <tt>rcu_node</tt> structures. <tt>-&gt;gp_seq_needed</tt> field is the counterpart of the field of the same
name in the rcu_node</tt> structure.
They may each lag up to one behind their <tt>rcu_node</tt> They may each lag up to one behind their <tt>rcu_node</tt>
counterparts, but in <tt>CONFIG_NO_HZ_IDLE</tt> and counterparts, but in <tt>CONFIG_NO_HZ_IDLE</tt> and
<tt>CONFIG_NO_HZ_FULL</tt> kernels can lag <tt>CONFIG_NO_HZ_FULL</tt> kernels can lag
@ -1103,10 +1055,6 @@ CPU has remained idle for so long that the
<tt>gp_seq</tt> counter is in danger of overflow, which <tt>gp_seq</tt> counter is in danger of overflow, which
will cause the CPU to disregard the values of its counters on will cause the CPU to disregard the values of its counters on
its next exit from idle. its next exit from idle.
Finally, the <tt>rcu_qs_ctr_snap</tt> field is used to detect
cases where a given operation has resulted in a quiescent state
for all flavors of RCU, for example, <tt>cond_resched()</tt>
when RCU has indicated a need for quiescent states.
<h5>RCU Callback Handling</h5> <h5>RCU Callback Handling</h5>
@ -1179,26 +1127,22 @@ Finally, the <tt>-&gt;dynticks_fqs</tt> field is used to
count the number of times this CPU is determined to be in count the number of times this CPU is determined to be in
dyntick-idle state, and is used for tracing and debugging purposes. dyntick-idle state, and is used for tracing and debugging purposes.
<h3><a name="The rcu_dynticks Structure"> <p>
The <tt>rcu_dynticks</tt> Structure</a></h3> This portion of the rcu_data structure is declared as follows:
<p>The <tt>rcu_dynticks</tt> maintains the per-CPU dyntick-idle state
for the corresponding CPU.
Unlike the other structures, <tt>rcu_dynticks</tt> is not
replicated over the different flavors of RCU.
The fields in this structure may be accessed only from the corresponding
CPU (and from tracing) unless otherwise stated.
Its fields are as follows:
<pre> <pre>
1 long dynticks_nesting; 1 long dynticks_nesting;
2 long dynticks_nmi_nesting; 2 long dynticks_nmi_nesting;
3 atomic_t dynticks; 3 atomic_t dynticks;
4 bool rcu_need_heavy_qs; 4 bool rcu_need_heavy_qs;
5 unsigned long rcu_qs_ctr; 5 bool rcu_urgent_qs;
6 bool rcu_urgent_qs;
</pre> </pre>
<p>These fields in the rcu_data structure maintain the per-CPU dyntick-idle
state for the corresponding CPU.
The fields may be accessed only from the corresponding CPU (and from tracing)
unless otherwise stated.
<p>The <tt>-&gt;dynticks_nesting</tt> field counts the <p>The <tt>-&gt;dynticks_nesting</tt> field counts the
nesting depth of process execution, so that in normal circumstances nesting depth of process execution, so that in normal circumstances
this counter has value zero or one. this counter has value zero or one.
@ -1240,19 +1184,12 @@ it is willing to call for heavy-weight dyntick-counter operations.
This flag is checked by RCU's context-switch and <tt>cond_resched()</tt> This flag is checked by RCU's context-switch and <tt>cond_resched()</tt>
code, which provide a momentary idle sojourn in response. code, which provide a momentary idle sojourn in response.
</p><p>The <tt>-&gt;rcu_qs_ctr</tt> field is used to record
quiescent states from <tt>cond_resched()</tt>.
Because <tt>cond_resched()</tt> can execute quite frequently, this
must be quite lightweight, as in a non-atomic increment of this
per-CPU field.
</p><p>Finally, the <tt>-&gt;rcu_urgent_qs</tt> field is used to record </p><p>Finally, the <tt>-&gt;rcu_urgent_qs</tt> field is used to record
the fact that the RCU core code would really like to see a quiescent the fact that the RCU core code would really like to see a quiescent state from
state from the corresponding CPU, with the various other fields indicating the corresponding CPU, with the various other fields indicating just how badly
just how badly RCU wants this quiescent state. RCU wants this quiescent state.
This flag is checked by RCU's context-switch and <tt>cond_resched()</tt> This flag is checked by RCU's context-switch path
code, which, if nothing else, non-atomically increment <tt>-&gt;rcu_qs_ctr</tt> (<tt>rcu_note_context_switch</tt>) and the cond_resched code.
in response.
<table> <table>
<tr><th>&nbsp;</th></tr> <tr><th>&nbsp;</th></tr>
@ -1425,11 +1362,11 @@ the last part of the array, thus traversing only the leaf
<h3><a name="Summary"> <h3><a name="Summary">
Summary</a></h3> Summary</a></h3>
So each flavor of RCU is represented by an <tt>rcu_state</tt> structure, So the state of RCU is represented by an <tt>rcu_state</tt> structure,
which contains a combining tree of <tt>rcu_node</tt> and which contains a combining tree of <tt>rcu_node</tt> and
<tt>rcu_data</tt> structures. <tt>rcu_data</tt> structures.
Finally, in <tt>CONFIG_NO_HZ_IDLE</tt> kernels, each CPU's dyntick-idle Finally, in <tt>CONFIG_NO_HZ_IDLE</tt> kernels, each CPU's dyntick-idle
state is tracked by an <tt>rcu_dynticks</tt> structure. state is tracked by dynticks-related fields in the <tt>rcu_data</tt> structure.
If you made it this far, you are well prepared to read the code If you made it this far, you are well prepared to read the code
walkthroughs in the other articles in this series. walkthroughs in the other articles in this series.

File diff suppressed because it is too large Load Diff

Before

Width:  |  Height:  |  Size: 24 KiB

After

Width:  |  Height:  |  Size: 20 KiB

View File

@ -160,9 +160,9 @@ was in flight.
If the CPU is idle, then <tt>sync_sched_exp_handler()</tt> reports If the CPU is idle, then <tt>sync_sched_exp_handler()</tt> reports
the quiescent state. the quiescent state.
<p> <p> Otherwise, the handler forces a future context switch by setting the
Otherwise, the handler invokes <tt>resched_cpu()</tt>, which forces NEED_RESCHED flag of the current task's thread flag and the CPU preempt
a future context switch. counter.
At the time of the context switch, the CPU reports the quiescent state. At the time of the context switch, the CPU reports the quiescent state.
Should the CPU go offline first, it will report the quiescent state Should the CPU go offline first, it will report the quiescent state
at that time. at that time.

View File

@ -77,7 +77,7 @@ The key point is that the lock-acquisition functions, including
<tt>smp_mb__after_unlock_lock()</tt> immediately after successful <tt>smp_mb__after_unlock_lock()</tt> immediately after successful
acquisition of the lock. acquisition of the lock.
<p>Therefore, for any given <tt>rcu_node</tt> struction, any access <p>Therefore, for any given <tt>rcu_node</tt> structure, any access
happening before one of the above lock-release functions will be seen happening before one of the above lock-release functions will be seen
by all CPUs as happening before any access happening after a later by all CPUs as happening before any access happening after a later
one of the above lock-acquisition functions. one of the above lock-acquisition functions.

View File

@ -900,8 +900,6 @@ Except where otherwise noted, these non-guarantees were premeditated.
Grace Periods Don't Partition Read-Side Critical Sections</a> Grace Periods Don't Partition Read-Side Critical Sections</a>
<li> <a href="#Read-Side Critical Sections Don't Partition Grace Periods"> <li> <a href="#Read-Side Critical Sections Don't Partition Grace Periods">
Read-Side Critical Sections Don't Partition Grace Periods</a> Read-Side Critical Sections Don't Partition Grace Periods</a>
<li> <a href="#Disabling Preemption Does Not Block Grace Periods">
Disabling Preemption Does Not Block Grace Periods</a>
</ol> </ol>
<h3><a name="Readers Impose Minimal Ordering">Readers Impose Minimal Ordering</a></h3> <h3><a name="Readers Impose Minimal Ordering">Readers Impose Minimal Ordering</a></h3>
@ -1259,54 +1257,6 @@ of RCU grace periods.
<tr><td>&nbsp;</td></tr> <tr><td>&nbsp;</td></tr>
</table> </table>
<h3><a name="Disabling Preemption Does Not Block Grace Periods">
Disabling Preemption Does Not Block Grace Periods</a></h3>
<p>
There was a time when disabling preemption on any given CPU would block
subsequent grace periods.
However, this was an accident of implementation and is not a requirement.
And in the current Linux-kernel implementation, disabling preemption
on a given CPU in fact does not block grace periods, as Oleg Nesterov
<a href="https://lkml.kernel.org/g/20150614193825.GA19582@redhat.com">demonstrated</a>.
<p>
If you need a preempt-disable region to block grace periods, you need to add
<tt>rcu_read_lock()</tt> and <tt>rcu_read_unlock()</tt>, for example
as follows:
<blockquote>
<pre>
1 preempt_disable();
2 rcu_read_lock();
3 do_something();
4 rcu_read_unlock();
5 preempt_enable();
6
7 /* Spinlocks implicitly disable preemption. */
8 spin_lock(&amp;mylock);
9 rcu_read_lock();
10 do_something();
11 rcu_read_unlock();
12 spin_unlock(&amp;mylock);
</pre>
</blockquote>
<p>
In theory, you could enter the RCU read-side critical section first,
but it is more efficient to keep the entire RCU read-side critical
section contained in the preempt-disable region as shown above.
Of course, RCU read-side critical sections that extend outside of
preempt-disable regions will work correctly, but such critical sections
can be preempted, which forces <tt>rcu_read_unlock()</tt> to do
more work.
And no, this is <i>not</i> an invitation to enclose all of your RCU
read-side critical sections within preempt-disable regions, because
doing so would degrade real-time response.
<p>
This non-requirement appeared with preemptible RCU.
<h2><a name="Parallelism Facts of Life">Parallelism Facts of Life</a></h2> <h2><a name="Parallelism Facts of Life">Parallelism Facts of Life</a></h2>
<p> <p>
@ -1381,6 +1331,7 @@ Classes of quality-of-implementation requirements are as follows:
<ol> <ol>
<li> <a href="#Specialization">Specialization</a> <li> <a href="#Specialization">Specialization</a>
<li> <a href="#Performance and Scalability">Performance and Scalability</a> <li> <a href="#Performance and Scalability">Performance and Scalability</a>
<li> <a href="#Forward Progress">Forward Progress</a>
<li> <a href="#Composability">Composability</a> <li> <a href="#Composability">Composability</a>
<li> <a href="#Corner Cases">Corner Cases</a> <li> <a href="#Corner Cases">Corner Cases</a>
</ol> </ol>
@ -1645,7 +1596,7 @@ used in place of <tt>synchronize_rcu()</tt> as follows:
16 struct foo *p; 16 struct foo *p;
17 17
18 spin_lock(&amp;gp_lock); 18 spin_lock(&amp;gp_lock);
19 p = rcu_dereference(gp); 19 p = rcu_access_pointer(gp);
20 if (!p) { 20 if (!p) {
21 spin_unlock(&amp;gp_lock); 21 spin_unlock(&amp;gp_lock);
22 return false; 22 return false;
@ -1822,6 +1773,106 @@ so it is too early to tell whether they will stand the test of time.
RCU thus provides a range of tools to allow updaters to strike the RCU thus provides a range of tools to allow updaters to strike the
required tradeoff between latency, flexibility and CPU overhead. required tradeoff between latency, flexibility and CPU overhead.
<h3><a name="Forward Progress">Forward Progress</a></h3>
<p>
In theory, delaying grace-period completion and callback invocation
is harmless.
In practice, not only are memory sizes finite but also callbacks sometimes
do wakeups, and sufficiently deferred wakeups can be difficult
to distinguish from system hangs.
Therefore, RCU must provide a number of mechanisms to promote forward
progress.
<p>
These mechanisms are not foolproof, nor can they be.
For one simple example, an infinite loop in an RCU read-side critical
section must by definition prevent later grace periods from ever completing.
For a more involved example, consider a 64-CPU system built with
<tt>CONFIG_RCU_NOCB_CPU=y</tt> and booted with <tt>rcu_nocbs=1-63</tt>,
where CPUs&nbsp;1 through&nbsp;63 spin in tight loops that invoke
<tt>call_rcu()</tt>.
Even if these tight loops also contain calls to <tt>cond_resched()</tt>
(thus allowing grace periods to complete), CPU&nbsp;0 simply will
not be able to invoke callbacks as fast as the other 63 CPUs can
register them, at least not until the system runs out of memory.
In both of these examples, the Spiderman principle applies: With great
power comes great responsibility.
However, short of this level of abuse, RCU is required to
ensure timely completion of grace periods and timely invocation of
callbacks.
<p>
RCU takes the following steps to encourage timely completion of
grace periods:
<ol>
<li> If a grace period fails to complete within 100&nbsp;milliseconds,
RCU causes future invocations of <tt>cond_resched()</tt> on
the holdout CPUs to provide an RCU quiescent state.
RCU also causes those CPUs' <tt>need_resched()</tt> invocations
to return <tt>true</tt>, but only after the corresponding CPU's
next scheduling-clock.
<li> CPUs mentioned in the <tt>nohz_full</tt> kernel boot parameter
can run indefinitely in the kernel without scheduling-clock
interrupts, which defeats the above <tt>need_resched()</tt>
strategem.
RCU will therefore invoke <tt>resched_cpu()</tt> on any
<tt>nohz_full</tt> CPUs still holding out after
109&nbsp;milliseconds.
<li> In kernels built with <tt>CONFIG_RCU_BOOST=y</tt>, if a given
task that has been preempted within an RCU read-side critical
section is holding out for more than 500&nbsp;milliseconds,
RCU will resort to priority boosting.
<li> If a CPU is still holding out 10&nbsp;seconds into the grace
period, RCU will invoke <tt>resched_cpu()</tt> on it regardless
of its <tt>nohz_full</tt> state.
</ol>
<p>
The above values are defaults for systems running with <tt>HZ=1000</tt>.
They will vary as the value of <tt>HZ</tt> varies, and can also be
changed using the relevant Kconfig options and kernel boot parameters.
RCU currently does not do much sanity checking of these
parameters, so please use caution when changing them.
Note that these forward-progress measures are provided only for RCU,
not for
<a href="#Sleepable RCU">SRCU</a> or
<a href="#Tasks RCU">Tasks RCU</a>.
<p>
RCU takes the following steps in <tt>call_rcu()</tt> to encourage timely
invocation of callbacks when any given non-<tt>rcu_nocbs</tt> CPU has
10,000 callbacks, or has 10,000 more callbacks than it had the last time
encouragement was provided:
<ol>
<li> Starts a grace period, if one is not already in progress.
<li> Forces immediate checking for quiescent states, rather than
waiting for three milliseconds to have elapsed since the
beginning of the grace period.
<li> Immediately tags the CPU's callbacks with their grace period
completion numbers, rather than waiting for the <tt>RCU_SOFTIRQ</tt>
handler to get around to it.
<li> Lifts callback-execution batch limits, which speeds up callback
invocation at the expense of degrading realtime response.
</ol>
<p>
Again, these are default values when running at <tt>HZ=1000</tt>,
and can be overridden.
Again, these forward-progress measures are provided only for RCU,
not for
<a href="#Sleepable RCU">SRCU</a> or
<a href="#Tasks RCU">Tasks RCU</a>.
Even for RCU, callback-invocation forward progress for <tt>rcu_nocbs</tt>
CPUs is much less well-developed, in part because workloads benefiting
from <tt>rcu_nocbs</tt> CPUs tend to invoke <tt>call_rcu()</tt>
relatively infrequently.
If workloads emerge that need both <tt>rcu_nocbs</tt> CPUs and high
<tt>call_rcu()</tt> invocation rates, then additional forward-progress
work will be required.
<h3><a name="Composability">Composability</a></h3> <h3><a name="Composability">Composability</a></h3>
<p> <p>
@ -2272,7 +2323,7 @@ that meets this requirement.
Furthermore, NMI handlers can be interrupted by what appear to RCU Furthermore, NMI handlers can be interrupted by what appear to RCU
to be normal interrupts. to be normal interrupts.
One way that this can happen is for code that directly invokes One way that this can happen is for code that directly invokes
<tt>rcu_irq_enter()</tt> and </tt>rcu_irq_exit()</tt> to be called <tt>rcu_irq_enter()</tt> and <tt>rcu_irq_exit()</tt> to be called
from an NMI handler. from an NMI handler.
This astonishing fact of life prompted the current code structure, This astonishing fact of life prompted the current code structure,
which has <tt>rcu_irq_enter()</tt> invoking <tt>rcu_nmi_enter()</tt> which has <tt>rcu_irq_enter()</tt> invoking <tt>rcu_nmi_enter()</tt>
@ -2294,7 +2345,7 @@ via <tt>del_timer_sync()</tt> or similar.
<p> <p>
Unfortunately, there is no way to cancel an RCU callback; Unfortunately, there is no way to cancel an RCU callback;
once you invoke <tt>call_rcu()</tt>, the callback function is once you invoke <tt>call_rcu()</tt>, the callback function is
going to eventually be invoked, unless the system goes down first. eventually going to be invoked, unless the system goes down first.
Because it is normally considered socially irresponsible to crash the system Because it is normally considered socially irresponsible to crash the system
in response to a module unload request, we need some other way in response to a module unload request, we need some other way
to deal with in-flight RCU callbacks. to deal with in-flight RCU callbacks.
@ -2424,23 +2475,37 @@ for context-switch-heavy <tt>CONFIG_NO_HZ_FULL=y</tt> workloads,
but there is room for further improvement. but there is room for further improvement.
<p> <p>
In the past, it was forbidden to disable interrupts across an It is forbidden to hold any of scheduler's runqueue or priority-inheritance
<tt>rcu_read_unlock()</tt> unless that interrupt-disabled region spinlocks across an <tt>rcu_read_unlock()</tt> unless interrupts have been
of code also included the matching <tt>rcu_read_lock()</tt>. disabled across the entire RCU read-side critical section, that is,
Violating this restriction could result in deadlocks involving the up to and including the matching <tt>rcu_read_lock()</tt>.
scheduler's runqueue and priority-inheritance spinlocks. Violating this restriction can result in deadlocks involving these
This restriction was lifted when interrupt-disabled calls to scheduler spinlocks.
<tt>rcu_read_unlock()</tt> started deferring the reporting of There was hope that this restriction might be lifted when interrupt-disabled
the resulting RCU-preempt quiescent state until the end of that calls to <tt>rcu_read_unlock()</tt> started deferring the reporting of
the resulting RCU-preempt quiescent state until the end of the corresponding
interrupts-disabled region. interrupts-disabled region.
This deferred reporting means that the scheduler's runqueue and Unfortunately, timely reporting of the corresponding quiescent state
priority-inheritance locks cannot be held while reporting an RCU-preempt to expedited grace periods requires a call to <tt>raise_softirq()</tt>,
quiescent state, which lifts the earlier restriction, at least from which can acquire these scheduler spinlocks.
a deadlock perspective. In addition, real-time systems using RCU priority boosting
Unfortunately, real-time systems using RCU priority boosting may
need this restriction to remain in effect because deferred need this restriction to remain in effect because deferred
quiescent-state reporting also defers deboosting, which in turn quiescent-state reporting would also defer deboosting, which in turn
degrades real-time latencies. would degrade real-time latencies.
<p>
In theory, if a given RCU read-side critical section could be
guaranteed to be less than one second in duration, holding a scheduler
spinlock across that critical section's <tt>rcu_read_unlock()</tt>
would require only that preemption be disabled across the entire
RCU read-side critical section, not interrupts.
Unfortunately, given the possibility of vCPU preemption, long-running
interrupts, and so on, it is not possible in practice to guarantee
that a given RCU read-side critical section will complete in less than
one second.
Therefore, as noted above, if scheduler spinlocks are held across
a given call to <tt>rcu_read_unlock()</tt>, interrupts must be
disabled across the entire RCU read-side critical section.
<h3><a name="Tracing and RCU">Tracing and RCU</a></h3> <h3><a name="Tracing and RCU">Tracing and RCU</a></h3>
@ -3233,6 +3298,11 @@ For example, RCU callback overhead might be charged back to the
originating <tt>call_rcu()</tt> instance, though probably not originating <tt>call_rcu()</tt> instance, though probably not
in production kernels. in production kernels.
<p>
Additional work may be required to provide reasonable forward-progress
guarantees under heavy load for grace periods and for callback
invocation.
<h2><a name="Summary">Summary</a></h2> <h2><a name="Summary">Summary</a></h2>
<p> <p>

View File

@ -63,7 +63,7 @@ over a rather long period of time, but improvements are always welcome!
pointer must be covered by rcu_read_lock(), rcu_read_lock_bh(), pointer must be covered by rcu_read_lock(), rcu_read_lock_bh(),
rcu_read_lock_sched(), or by the appropriate update-side lock. rcu_read_lock_sched(), or by the appropriate update-side lock.
Disabling of preemption can serve as rcu_read_lock_sched(), but Disabling of preemption can serve as rcu_read_lock_sched(), but
is less readable. is less readable and prevents lockdep from detecting locking issues.
Letting RCU-protected pointers "leak" out of an RCU read-side Letting RCU-protected pointers "leak" out of an RCU read-side
critical section is every bid as bad as letting them leak out critical section is every bid as bad as letting them leak out
@ -285,11 +285,7 @@ over a rather long period of time, but improvements are always welcome!
here is that superuser already has lots of ways to crash here is that superuser already has lots of ways to crash
the machine. the machine.
d. Use call_rcu_bh() rather than call_rcu(), in order to take d. Periodically invoke synchronize_rcu(), permitting a limited
advantage of call_rcu_bh()'s faster grace periods. (This
is only a partial solution, though.)
e. Periodically invoke synchronize_rcu(), permitting a limited
number of updates per grace period. number of updates per grace period.
The same cautions apply to call_rcu_bh(), call_rcu_sched(), The same cautions apply to call_rcu_bh(), call_rcu_sched(),
@ -324,37 +320,14 @@ over a rather long period of time, but improvements are always welcome!
will break Alpha, cause aggressive compilers to generate bad code, will break Alpha, cause aggressive compilers to generate bad code,
and confuse people trying to read your code. and confuse people trying to read your code.
11. Note that synchronize_rcu() -only- guarantees to wait until 11. Any lock acquired by an RCU callback must be acquired elsewhere
all currently executing rcu_read_lock()-protected RCU read-side
critical sections complete. It does -not- necessarily guarantee
that all currently running interrupts, NMIs, preempt_disable()
code, or idle loops will complete. Therefore, if your
read-side critical sections are protected by something other
than rcu_read_lock(), do -not- use synchronize_rcu().
Similarly, disabling preemption is not an acceptable substitute
for rcu_read_lock(). Code that attempts to use preemption
disabling where it should be using rcu_read_lock() will break
in CONFIG_PREEMPT=y kernel builds.
If you want to wait for interrupt handlers, NMI handlers, and
code under the influence of preempt_disable(), you instead
need to use synchronize_irq() or synchronize_sched().
This same limitation also applies to synchronize_rcu_bh()
and synchronize_srcu(), as well as to the asynchronous and
expedited forms of the three primitives, namely call_rcu(),
call_rcu_bh(), call_srcu(), synchronize_rcu_expedited(),
synchronize_rcu_bh_expedited(), and synchronize_srcu_expedited().
12. Any lock acquired by an RCU callback must be acquired elsewhere
with softirq disabled, e.g., via spin_lock_irqsave(), with softirq disabled, e.g., via spin_lock_irqsave(),
spin_lock_bh(), etc. Failing to disable irq on a given spin_lock_bh(), etc. Failing to disable irq on a given
acquisition of that lock will result in deadlock as soon as acquisition of that lock will result in deadlock as soon as
the RCU softirq handler happens to run your RCU callback while the RCU softirq handler happens to run your RCU callback while
interrupting that acquisition's critical section. interrupting that acquisition's critical section.
13. RCU callbacks can be and are executed in parallel. In many cases, 12. RCU callbacks can be and are executed in parallel. In many cases,
the callback code simply wrappers around kfree(), so that this the callback code simply wrappers around kfree(), so that this
is not an issue (or, more accurately, to the extent that it is is not an issue (or, more accurately, to the extent that it is
an issue, the memory-allocator locking handles it). However, an issue, the memory-allocator locking handles it). However,
@ -370,7 +343,7 @@ over a rather long period of time, but improvements are always welcome!
not the case, a self-spawning RCU callback would prevent the not the case, a self-spawning RCU callback would prevent the
victim CPU from ever going offline.) victim CPU from ever going offline.)
14. Unlike other forms of RCU, it -is- permissible to block in an 13. Unlike other forms of RCU, it -is- permissible to block in an
SRCU read-side critical section (demarked by srcu_read_lock() SRCU read-side critical section (demarked by srcu_read_lock()
and srcu_read_unlock()), hence the "SRCU": "sleepable RCU". and srcu_read_unlock()), hence the "SRCU": "sleepable RCU".
Please note that if you don't need to sleep in read-side critical Please note that if you don't need to sleep in read-side critical
@ -414,7 +387,7 @@ over a rather long period of time, but improvements are always welcome!
Note that rcu_dereference() and rcu_assign_pointer() relate to Note that rcu_dereference() and rcu_assign_pointer() relate to
SRCU just as they do to other forms of RCU. SRCU just as they do to other forms of RCU.
15. The whole point of call_rcu(), synchronize_rcu(), and friends 14. The whole point of call_rcu(), synchronize_rcu(), and friends
is to wait until all pre-existing readers have finished before is to wait until all pre-existing readers have finished before
carrying out some otherwise-destructive operation. It is carrying out some otherwise-destructive operation. It is
therefore critically important to -first- remove any path therefore critically important to -first- remove any path
@ -426,13 +399,13 @@ over a rather long period of time, but improvements are always welcome!
is the caller's responsibility to guarantee that any subsequent is the caller's responsibility to guarantee that any subsequent
readers will execute safely. readers will execute safely.
16. The various RCU read-side primitives do -not- necessarily contain 15. The various RCU read-side primitives do -not- necessarily contain
memory barriers. You should therefore plan for the CPU memory barriers. You should therefore plan for the CPU
and the compiler to freely reorder code into and out of RCU and the compiler to freely reorder code into and out of RCU
read-side critical sections. It is the responsibility of the read-side critical sections. It is the responsibility of the
RCU update-side primitives to deal with this. RCU update-side primitives to deal with this.
17. Use CONFIG_PROVE_LOCKING, CONFIG_DEBUG_OBJECTS_RCU_HEAD, and the 16. Use CONFIG_PROVE_LOCKING, CONFIG_DEBUG_OBJECTS_RCU_HEAD, and the
__rcu sparse checks to validate your RCU code. These can help __rcu sparse checks to validate your RCU code. These can help
find problems as follows: find problems as follows:
@ -455,7 +428,7 @@ over a rather long period of time, but improvements are always welcome!
These debugging aids can help you find problems that are These debugging aids can help you find problems that are
otherwise extremely difficult to spot. otherwise extremely difficult to spot.
18. If you register a callback using call_rcu(), call_rcu_bh(), 17. If you register a callback using call_rcu(), call_rcu_bh(),
call_rcu_sched(), or call_srcu(), and pass in a function defined call_rcu_sched(), or call_srcu(), and pass in a function defined
within a loadable module, then it in necessary to wait for within a loadable module, then it in necessary to wait for
all pending callbacks to be invoked after the last invocation all pending callbacks to be invoked after the last invocation
@ -469,8 +442,8 @@ over a rather long period of time, but improvements are always welcome!
You instead need to use one of the barrier functions: You instead need to use one of the barrier functions:
o call_rcu() -> rcu_barrier() o call_rcu() -> rcu_barrier()
o call_rcu_bh() -> rcu_barrier_bh() o call_rcu_bh() -> rcu_barrier()
o call_rcu_sched() -> rcu_barrier_sched() o call_rcu_sched() -> rcu_barrier()
o call_srcu() -> srcu_barrier() o call_srcu() -> srcu_barrier()
However, these barrier functions are absolutely -not- guaranteed However, these barrier functions are absolutely -not- guaranteed

View File

@ -176,9 +176,8 @@ causing stalls, and that the stall was affecting RCU-sched. This message
will normally be followed by stack dumps for each CPU. Please note that will normally be followed by stack dumps for each CPU. Please note that
PREEMPT_RCU builds can be stalled by tasks as well as by CPUs, and that PREEMPT_RCU builds can be stalled by tasks as well as by CPUs, and that
the tasks will be indicated by PID, for example, "P3421". It is even the tasks will be indicated by PID, for example, "P3421". It is even
possible for a rcu_preempt_state stall to be caused by both CPUs -and- possible for an rcu_state stall to be caused by both CPUs -and- tasks,
tasks, in which case the offending CPUs and tasks will all be called in which case the offending CPUs and tasks will all be called out in the list.
out in the list.
CPU 2's "(3 GPs behind)" indicates that this CPU has not interacted with CPU 2's "(3 GPs behind)" indicates that this CPU has not interacted with
the RCU core for the past three grace periods. In contrast, CPU 16's "(0 the RCU core for the past three grace periods. In contrast, CPU 16's "(0
@ -206,7 +205,7 @@ handlers are no longer able to execute on this CPU. This can happen if
the stalled CPU is spinning with interrupts are disabled, or, in -rt the stalled CPU is spinning with interrupts are disabled, or, in -rt
kernels, if a high-priority process is starving RCU's softirq handler. kernels, if a high-priority process is starving RCU's softirq handler.
The "fps=" shows the number of force-quiescent-state idle/offline The "fqs=" shows the number of force-quiescent-state idle/offline
detection passes that the grace-period kthread has made across this detection passes that the grace-period kthread has made across this
CPU since the last time that this CPU noted the beginning of a grace CPU since the last time that this CPU noted the beginning of a grace
period. period.

View File

@ -266,7 +266,7 @@ rcu_dereference()
unnecessary overhead on Alpha CPUs. unnecessary overhead on Alpha CPUs.
Note that the value returned by rcu_dereference() is valid Note that the value returned by rcu_dereference() is valid
only within the enclosing RCU read-side critical section. only within the enclosing RCU read-side critical section [1].
For example, the following is -not- legal: For example, the following is -not- legal:
rcu_read_lock(); rcu_read_lock();
@ -292,6 +292,19 @@ rcu_dereference()
typically used indirectly, via the _rcu list-manipulation typically used indirectly, via the _rcu list-manipulation
primitives, such as list_for_each_entry_rcu(). primitives, such as list_for_each_entry_rcu().
[1] The variant rcu_dereference_protected() can be used outside
of an RCU read-side critical section as long as the usage is
protected by locks acquired by the update-side code. This variant
avoids the lockdep warning that would happen when using (for
example) rcu_dereference() without rcu_read_lock() protection.
Using rcu_dereference_protected() also has the advantage
of permitting compiler optimizations that rcu_dereference()
must prohibit. The rcu_dereference_protected() variant takes
a lockdep expression to indicate which locks must be acquired
by the caller. If the indicated protection is not provided,
a lockdep splat is emitted. See RCU/Design/Requirements.html
and the API's code comments for more details and example usage.
The following diagram shows how each API communicates among the The following diagram shows how each API communicates among the
reader, updater, and reclaimer. reader, updater, and reclaimer.
@ -322,28 +335,27 @@ to their callers and (2) call_rcu() callbacks may be invoked. Efficient
implementations of the RCU infrastructure make heavy use of batching in implementations of the RCU infrastructure make heavy use of batching in
order to amortize their overhead over many uses of the corresponding APIs. order to amortize their overhead over many uses of the corresponding APIs.
There are no fewer than three RCU mechanisms in the Linux kernel; the There are at least three flavors of RCU usage in the Linux kernel. The diagram
diagram above shows the first one, which is by far the most commonly used. above shows the most common one. On the updater side, the rcu_assign_pointer(),
The rcu_dereference() and rcu_assign_pointer() primitives are used for sychronize_rcu() and call_rcu() primitives used are the same for all three
all three mechanisms, but different defer and protect primitives are flavors. However for protection (on the reader side), the primitives used vary
used as follows: depending on the flavor:
Defer Protect a. rcu_read_lock() / rcu_read_unlock()
rcu_dereference()
a. synchronize_rcu() rcu_read_lock() / rcu_read_unlock() b. rcu_read_lock_bh() / rcu_read_unlock_bh()
call_rcu() rcu_dereference() local_bh_disable() / local_bh_enable()
rcu_dereference_bh()
b. synchronize_rcu_bh() rcu_read_lock_bh() / rcu_read_unlock_bh() c. rcu_read_lock_sched() / rcu_read_unlock_sched()
call_rcu_bh() rcu_dereference_bh() preempt_disable() / preempt_enable()
c. synchronize_sched() rcu_read_lock_sched() / rcu_read_unlock_sched()
call_rcu_sched() preempt_disable() / preempt_enable()
local_irq_save() / local_irq_restore() local_irq_save() / local_irq_restore()
hardirq enter / hardirq exit hardirq enter / hardirq exit
NMI enter / NMI exit NMI enter / NMI exit
rcu_dereference_sched() rcu_dereference_sched()
These three mechanisms are used as follows: These three flavors are used as follows:
a. RCU applied to normal data structures. a. RCU applied to normal data structures.
@ -867,18 +879,20 @@ RCU: Critical sections Grace period Barrier
bh: Critical sections Grace period Barrier bh: Critical sections Grace period Barrier
rcu_read_lock_bh call_rcu_bh rcu_barrier_bh rcu_read_lock_bh call_rcu rcu_barrier
rcu_read_unlock_bh synchronize_rcu_bh rcu_read_unlock_bh synchronize_rcu
rcu_dereference_bh synchronize_rcu_bh_expedited [local_bh_disable] synchronize_rcu_expedited
[and friends]
rcu_dereference_bh
rcu_dereference_bh_check rcu_dereference_bh_check
rcu_dereference_bh_protected rcu_dereference_bh_protected
rcu_read_lock_bh_held rcu_read_lock_bh_held
sched: Critical sections Grace period Barrier sched: Critical sections Grace period Barrier
rcu_read_lock_sched synchronize_sched rcu_barrier_sched rcu_read_lock_sched call_rcu rcu_barrier
rcu_read_unlock_sched call_rcu_sched rcu_read_unlock_sched synchronize_rcu
[preempt_disable] synchronize_sched_expedited [preempt_disable] synchronize_rcu_expedited
[and friends] [and friends]
rcu_read_lock_sched_notrace rcu_read_lock_sched_notrace
rcu_read_unlock_sched_notrace rcu_read_unlock_sched_notrace
@ -890,8 +904,8 @@ sched: Critical sections Grace period Barrier
SRCU: Critical sections Grace period Barrier SRCU: Critical sections Grace period Barrier
srcu_read_lock synchronize_srcu srcu_barrier srcu_read_lock call_srcu srcu_barrier
srcu_read_unlock call_srcu srcu_read_unlock synchronize_srcu
srcu_dereference synchronize_srcu_expedited srcu_dereference synchronize_srcu_expedited
srcu_dereference_check srcu_dereference_check
srcu_read_lock_held srcu_read_lock_held
@ -1034,7 +1048,7 @@ Answer: Just as PREEMPT_RT permits preemption of spinlock
spinlocks blocking while in RCU read-side critical spinlocks blocking while in RCU read-side critical
sections. sections.
Why the apparent inconsistency? Because it is it Why the apparent inconsistency? Because it is
possible to use priority boosting to keep the RCU possible to use priority boosting to keep the RCU
grace periods short if need be (for example, if running grace periods short if need be (for example, if running
short of memory). In contrast, if blocking waiting short of memory). In contrast, if blocking waiting

View File

@ -3754,24 +3754,6 @@
in microseconds. The default of zero says in microseconds. The default of zero says
no holdoff. no holdoff.
rcutorture.cbflood_inter_holdoff= [KNL]
Set holdoff time (jiffies) between successive
callback-flood tests.
rcutorture.cbflood_intra_holdoff= [KNL]
Set holdoff time (jiffies) between successive
bursts of callbacks within a given callback-flood
test.
rcutorture.cbflood_n_burst= [KNL]
Set the number of bursts making up a given
callback-flood test. Set this to zero to
disable callback-flood testing.
rcutorture.cbflood_n_per_burst= [KNL]
Set the number of callbacks to be registered
in a given burst of a callback-flood test.
rcutorture.fqs_duration= [KNL] rcutorture.fqs_duration= [KNL]
Set duration of force_quiescent_state bursts Set duration of force_quiescent_state bursts
in microseconds. in microseconds.
@ -3784,6 +3766,23 @@
Set wait time between force_quiescent_state bursts Set wait time between force_quiescent_state bursts
in seconds. in seconds.
rcutorture.fwd_progress= [KNL]
Enable RCU grace-period forward-progress testing
for the types of RCU supporting this notion.
rcutorture.fwd_progress_div= [KNL]
Specify the fraction of a CPU-stall-warning
period to do tight-loop forward-progress testing.
rcutorture.fwd_progress_holdoff= [KNL]
Number of seconds to wait between successive
forward-progress tests.
rcutorture.fwd_progress_need_resched= [KNL]
Enclose cond_resched() calls within checks for
need_resched() during tight-loop forward-progress
testing.
rcutorture.gp_cond= [KNL] rcutorture.gp_cond= [KNL]
Use conditional/asynchronous update-side Use conditional/asynchronous update-side
primitives, if available. primitives, if available.

View File

@ -4138,7 +4138,7 @@ S: Supported
F: drivers/net/ethernet/chelsio/cxgb4vf/ F: drivers/net/ethernet/chelsio/cxgb4vf/
CXL (IBM Coherent Accelerator Processor Interface CAPI) DRIVER CXL (IBM Coherent Accelerator Processor Interface CAPI) DRIVER
M: Frederic Barrat <fbarrat@linux.vnet.ibm.com> M: Frederic Barrat <fbarrat@linux.ibm.com>
M: Andrew Donnellan <andrew.donnellan@au1.ibm.com> M: Andrew Donnellan <andrew.donnellan@au1.ibm.com>
L: linuxppc-dev@lists.ozlabs.org L: linuxppc-dev@lists.ozlabs.org
S: Supported S: Supported
@ -4150,9 +4150,9 @@ F: Documentation/powerpc/cxl.txt
F: Documentation/ABI/testing/sysfs-class-cxl F: Documentation/ABI/testing/sysfs-class-cxl
CXLFLASH (IBM Coherent Accelerator Processor Interface CAPI Flash) SCSI DRIVER CXLFLASH (IBM Coherent Accelerator Processor Interface CAPI Flash) SCSI DRIVER
M: Manoj N. Kumar <manoj@linux.vnet.ibm.com> M: Manoj N. Kumar <manoj@linux.ibm.com>
M: Matthew R. Ochs <mrochs@linux.vnet.ibm.com> M: Matthew R. Ochs <mrochs@linux.ibm.com>
M: Uma Krishnan <ukrishn@linux.vnet.ibm.com> M: Uma Krishnan <ukrishn@linux.ibm.com>
L: linux-scsi@vger.kernel.org L: linux-scsi@vger.kernel.org
S: Supported S: Supported
F: drivers/scsi/cxlflash/ F: drivers/scsi/cxlflash/
@ -5544,7 +5544,7 @@ S: Orphan
F: fs/efs/ F: fs/efs/
EHEA (IBM pSeries eHEA 10Gb ethernet adapter) DRIVER EHEA (IBM pSeries eHEA 10Gb ethernet adapter) DRIVER
M: Douglas Miller <dougmill@linux.vnet.ibm.com> M: Douglas Miller <dougmill@linux.ibm.com>
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
S: Maintained S: Maintained
F: drivers/net/ethernet/ibm/ehea/ F: drivers/net/ethernet/ibm/ehea/
@ -5682,7 +5682,7 @@ F: Documentation/filesystems/ext4/ext4.rst
F: fs/ext4/ F: fs/ext4/
Extended Verification Module (EVM) Extended Verification Module (EVM)
M: Mimi Zohar <zohar@linux.vnet.ibm.com> M: Mimi Zohar <zohar@linux.ibm.com>
L: linux-integrity@vger.kernel.org L: linux-integrity@vger.kernel.org
S: Supported S: Supported
F: security/integrity/evm/ F: security/integrity/evm/
@ -5892,7 +5892,7 @@ F: include/linux/firmware.h
FLASH ADAPTER DRIVER (IBM Flash Adapter 900GB Full Height PCI Flash Card) FLASH ADAPTER DRIVER (IBM Flash Adapter 900GB Full Height PCI Flash Card)
M: Joshua Morris <josh.h.morris@us.ibm.com> M: Joshua Morris <josh.h.morris@us.ibm.com>
M: Philip Kelleher <pjk1939@linux.vnet.ibm.com> M: Philip Kelleher <pjk1939@linux.ibm.com>
S: Maintained S: Maintained
F: drivers/block/rsxx/ F: drivers/block/rsxx/
@ -6159,7 +6159,7 @@ F: include/linux/fscrypt*.h
F: Documentation/filesystems/fscrypt.rst F: Documentation/filesystems/fscrypt.rst
FSI-ATTACHED I2C DRIVER FSI-ATTACHED I2C DRIVER
M: Eddie James <eajames@linux.vnet.ibm.com> M: Eddie James <eajames@linux.ibm.com>
L: linux-i2c@vger.kernel.org L: linux-i2c@vger.kernel.org
L: openbmc@lists.ozlabs.org (moderated for non-subscribers) L: openbmc@lists.ozlabs.org (moderated for non-subscribers)
S: Maintained S: Maintained
@ -6335,8 +6335,7 @@ S: Supported
F: drivers/uio/uio_pci_generic.c F: drivers/uio/uio_pci_generic.c
GENWQE (IBM Generic Workqueue Card) GENWQE (IBM Generic Workqueue Card)
M: Frank Haverkamp <haver@linux.vnet.ibm.com> M: Frank Haverkamp <haver@linux.ibm.com>
M: Guilherme G. Piccoli <gpiccoli@linux.vnet.ibm.com>
S: Supported S: Supported
F: drivers/misc/genwqe/ F: drivers/misc/genwqe/
@ -7147,8 +7146,7 @@ F: crypto/842.c
F: lib/842/ F: lib/842/
IBM Power in-Nest Crypto Acceleration IBM Power in-Nest Crypto Acceleration
M: Leonidas S. Barbosa <leosilva@linux.vnet.ibm.com> M: Paulo Flabiano Smorigo <pfsmorigo@linux.ibm.com>
M: Paulo Flabiano Smorigo <pfsmorigo@linux.vnet.ibm.com>
L: linux-crypto@vger.kernel.org L: linux-crypto@vger.kernel.org
S: Supported S: Supported
F: drivers/crypto/nx/Makefile F: drivers/crypto/nx/Makefile
@ -7165,8 +7163,8 @@ S: Supported
F: drivers/scsi/ipr.* F: drivers/scsi/ipr.*
IBM Power SRIOV Virtual NIC Device Driver IBM Power SRIOV Virtual NIC Device Driver
M: Thomas Falcon <tlfalcon@linux.vnet.ibm.com> M: Thomas Falcon <tlfalcon@linux.ibm.com>
M: John Allen <jallen@linux.vnet.ibm.com> M: John Allen <jallen@linux.ibm.com>
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
S: Supported S: Supported
F: drivers/net/ethernet/ibm/ibmvnic.* F: drivers/net/ethernet/ibm/ibmvnic.*
@ -7181,41 +7179,38 @@ F: arch/powerpc/include/asm/vas.h
F: arch/powerpc/include/uapi/asm/vas.h F: arch/powerpc/include/uapi/asm/vas.h
IBM Power Virtual Ethernet Device Driver IBM Power Virtual Ethernet Device Driver
M: Thomas Falcon <tlfalcon@linux.vnet.ibm.com> M: Thomas Falcon <tlfalcon@linux.ibm.com>
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
S: Supported S: Supported
F: drivers/net/ethernet/ibm/ibmveth.* F: drivers/net/ethernet/ibm/ibmveth.*
IBM Power Virtual FC Device Drivers IBM Power Virtual FC Device Drivers
M: Tyrel Datwyler <tyreld@linux.vnet.ibm.com> M: Tyrel Datwyler <tyreld@linux.ibm.com>
L: linux-scsi@vger.kernel.org L: linux-scsi@vger.kernel.org
S: Supported S: Supported
F: drivers/scsi/ibmvscsi/ibmvfc* F: drivers/scsi/ibmvscsi/ibmvfc*
IBM Power Virtual Management Channel Driver IBM Power Virtual Management Channel Driver
M: Bryant G. Ly <bryantly@linux.vnet.ibm.com> M: Steven Royer <seroyer@linux.ibm.com>
M: Steven Royer <seroyer@linux.vnet.ibm.com>
S: Supported S: Supported
F: drivers/misc/ibmvmc.* F: drivers/misc/ibmvmc.*
IBM Power Virtual SCSI Device Drivers IBM Power Virtual SCSI Device Drivers
M: Tyrel Datwyler <tyreld@linux.vnet.ibm.com> M: Tyrel Datwyler <tyreld@linux.ibm.com>
L: linux-scsi@vger.kernel.org L: linux-scsi@vger.kernel.org
S: Supported S: Supported
F: drivers/scsi/ibmvscsi/ibmvscsi* F: drivers/scsi/ibmvscsi/ibmvscsi*
F: include/scsi/viosrp.h F: include/scsi/viosrp.h
IBM Power Virtual SCSI Device Target Driver IBM Power Virtual SCSI Device Target Driver
M: Bryant G. Ly <bryantly@linux.vnet.ibm.com> M: Michael Cyr <mikecyr@linux.ibm.com>
M: Michael Cyr <mikecyr@linux.vnet.ibm.com>
L: linux-scsi@vger.kernel.org L: linux-scsi@vger.kernel.org
L: target-devel@vger.kernel.org L: target-devel@vger.kernel.org
S: Supported S: Supported
F: drivers/scsi/ibmvscsi_tgt/ F: drivers/scsi/ibmvscsi_tgt/
IBM Power VMX Cryptographic instructions IBM Power VMX Cryptographic instructions
M: Leonidas S. Barbosa <leosilva@linux.vnet.ibm.com> M: Paulo Flabiano Smorigo <pfsmorigo@linux.ibm.com>
M: Paulo Flabiano Smorigo <pfsmorigo@linux.vnet.ibm.com>
L: linux-crypto@vger.kernel.org L: linux-crypto@vger.kernel.org
S: Supported S: Supported
F: drivers/crypto/vmx/Makefile F: drivers/crypto/vmx/Makefile
@ -7492,7 +7487,7 @@ S: Maintained
L: linux-crypto@vger.kernel.org L: linux-crypto@vger.kernel.org
INTEGRITY MEASUREMENT ARCHITECTURE (IMA) INTEGRITY MEASUREMENT ARCHITECTURE (IMA)
M: Mimi Zohar <zohar@linux.vnet.ibm.com> M: Mimi Zohar <zohar@linux.ibm.com>
M: Dmitry Kasatkin <dmitry.kasatkin@gmail.com> M: Dmitry Kasatkin <dmitry.kasatkin@gmail.com>
L: linux-integrity@vger.kernel.org L: linux-integrity@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/zohar/linux-integrity.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/zohar/linux-integrity.git
@ -8101,9 +8096,8 @@ S: Maintained
F: drivers/media/platform/rcar_jpu.c F: drivers/media/platform/rcar_jpu.c
JSM Neo PCI based serial card JSM Neo PCI based serial card
M: Guilherme G. Piccoli <gpiccoli@linux.vnet.ibm.com>
L: linux-serial@vger.kernel.org L: linux-serial@vger.kernel.org
S: Maintained S: Orphan
F: drivers/tty/serial/jsm/ F: drivers/tty/serial/jsm/
K10TEMP HARDWARE MONITORING DRIVER K10TEMP HARDWARE MONITORING DRIVER
@ -8334,7 +8328,7 @@ F: include/uapi/linux/kexec.h
F: kernel/kexec* F: kernel/kexec*
KEYS-ENCRYPTED KEYS-ENCRYPTED
M: Mimi Zohar <zohar@linux.vnet.ibm.com> M: Mimi Zohar <zohar@linux.ibm.com>
L: linux-integrity@vger.kernel.org L: linux-integrity@vger.kernel.org
L: keyrings@vger.kernel.org L: keyrings@vger.kernel.org
S: Supported S: Supported
@ -8343,9 +8337,9 @@ F: include/keys/encrypted-type.h
F: security/keys/encrypted-keys/ F: security/keys/encrypted-keys/
KEYS-TRUSTED KEYS-TRUSTED
M: James Bottomley <jejb@linux.vnet.ibm.com> M: James Bottomley <jejb@linux.ibm.com>
M: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com> M: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
M: Mimi Zohar <zohar@linux.vnet.ibm.com> M: Mimi Zohar <zohar@linuxibm.com>
L: linux-integrity@vger.kernel.org L: linux-integrity@vger.kernel.org
L: keyrings@vger.kernel.org L: keyrings@vger.kernel.org
S: Supported S: Supported
@ -8398,7 +8392,7 @@ F: lib/test_kmod.c
F: tools/testing/selftests/kmod/ F: tools/testing/selftests/kmod/
KPROBES KPROBES
M: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> M: Naveen N. Rao <naveen.n.rao@linux.ibm.com>
M: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> M: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
M: "David S. Miller" <davem@davemloft.net> M: "David S. Miller" <davem@davemloft.net>
M: Masami Hiramatsu <mhiramat@kernel.org> M: Masami Hiramatsu <mhiramat@kernel.org>
@ -8754,7 +8748,7 @@ M: Nicholas Piggin <npiggin@gmail.com>
M: David Howells <dhowells@redhat.com> M: David Howells <dhowells@redhat.com>
M: Jade Alglave <j.alglave@ucl.ac.uk> M: Jade Alglave <j.alglave@ucl.ac.uk>
M: Luc Maranget <luc.maranget@inria.fr> M: Luc Maranget <luc.maranget@inria.fr>
M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> M: "Paul E. McKenney" <paulmck@linux.ibm.com>
R: Akira Yokosawa <akiyks@gmail.com> R: Akira Yokosawa <akiyks@gmail.com>
R: Daniel Lustig <dlustig@nvidia.com> R: Daniel Lustig <dlustig@nvidia.com>
L: linux-kernel@vger.kernel.org L: linux-kernel@vger.kernel.org
@ -9719,7 +9713,7 @@ F: drivers/platform/x86/mlx-platform.c
MEMBARRIER SUPPORT MEMBARRIER SUPPORT
M: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> M: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> M: "Paul E. McKenney" <paulmck@linux.ibm.com>
L: linux-kernel@vger.kernel.org L: linux-kernel@vger.kernel.org
S: Supported S: Supported
F: kernel/sched/membarrier.c F: kernel/sched/membarrier.c
@ -10861,7 +10855,7 @@ S: Supported
F: tools/objtool/ F: tools/objtool/
OCXL (Open Coherent Accelerator Processor Interface OpenCAPI) DRIVER OCXL (Open Coherent Accelerator Processor Interface OpenCAPI) DRIVER
M: Frederic Barrat <fbarrat@linux.vnet.ibm.com> M: Frederic Barrat <fbarrat@linux.ibm.com>
M: Andrew Donnellan <andrew.donnellan@au1.ibm.com> M: Andrew Donnellan <andrew.donnellan@au1.ibm.com>
L: linuxppc-dev@lists.ozlabs.org L: linuxppc-dev@lists.ozlabs.org
S: Supported S: Supported
@ -12675,7 +12669,7 @@ S: Orphan
F: drivers/net/wireless/ray* F: drivers/net/wireless/ray*
RCUTORTURE TEST FRAMEWORK RCUTORTURE TEST FRAMEWORK
M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> M: "Paul E. McKenney" <paulmck@linux.ibm.com>
M: Josh Triplett <josh@joshtriplett.org> M: Josh Triplett <josh@joshtriplett.org>
R: Steven Rostedt <rostedt@goodmis.org> R: Steven Rostedt <rostedt@goodmis.org>
R: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> R: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
@ -12722,11 +12716,12 @@ F: arch/x86/include/asm/resctrl_sched.h
F: Documentation/x86/resctrl* F: Documentation/x86/resctrl*
READ-COPY UPDATE (RCU) READ-COPY UPDATE (RCU)
M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> M: "Paul E. McKenney" <paulmck@linux.ibm.com>
M: Josh Triplett <josh@joshtriplett.org> M: Josh Triplett <josh@joshtriplett.org>
R: Steven Rostedt <rostedt@goodmis.org> R: Steven Rostedt <rostedt@goodmis.org>
R: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> R: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
R: Lai Jiangshan <jiangshanlai@gmail.com> R: Lai Jiangshan <jiangshanlai@gmail.com>
R: Joel Fernandes <joel@joelfernandes.org>
L: linux-kernel@vger.kernel.org L: linux-kernel@vger.kernel.org
W: http://www.rdrop.com/users/paulmck/RCU/ W: http://www.rdrop.com/users/paulmck/RCU/
S: Supported S: Supported
@ -12862,7 +12857,7 @@ F: include/linux/reset-controller.h
RESTARTABLE SEQUENCES SUPPORT RESTARTABLE SEQUENCES SUPPORT
M: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> M: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
M: Peter Zijlstra <peterz@infradead.org> M: Peter Zijlstra <peterz@infradead.org>
M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> M: "Paul E. McKenney" <paulmck@linux.ibm.com>
M: Boqun Feng <boqun.feng@gmail.com> M: Boqun Feng <boqun.feng@gmail.com>
L: linux-kernel@vger.kernel.org L: linux-kernel@vger.kernel.org
S: Supported S: Supported
@ -13394,7 +13389,7 @@ F: drivers/scsi/sg.c
F: include/scsi/sg.h F: include/scsi/sg.h
SCSI SUBSYSTEM SCSI SUBSYSTEM
M: "James E.J. Bottomley" <jejb@linux.vnet.ibm.com> M: "James E.J. Bottomley" <jejb@linux.ibm.com>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi.git
M: "Martin K. Petersen" <martin.petersen@oracle.com> M: "Martin K. Petersen" <martin.petersen@oracle.com>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkp/scsi.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkp/scsi.git
@ -13835,7 +13830,7 @@ F: mm/sl?b*
SLEEPABLE READ-COPY UPDATE (SRCU) SLEEPABLE READ-COPY UPDATE (SRCU)
M: Lai Jiangshan <jiangshanlai@gmail.com> M: Lai Jiangshan <jiangshanlai@gmail.com>
M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> M: "Paul E. McKenney" <paulmck@linux.ibm.com>
M: Josh Triplett <josh@joshtriplett.org> M: Josh Triplett <josh@joshtriplett.org>
R: Steven Rostedt <rostedt@goodmis.org> R: Steven Rostedt <rostedt@goodmis.org>
R: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> R: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
@ -15280,7 +15275,7 @@ F: drivers/platform/x86/topstar-laptop.c
TORTURE-TEST MODULES TORTURE-TEST MODULES
M: Davidlohr Bueso <dave@stgolabs.net> M: Davidlohr Bueso <dave@stgolabs.net>
M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> M: "Paul E. McKenney" <paulmck@linux.ibm.com>
M: Josh Triplett <josh@joshtriplett.org> M: Josh Triplett <josh@joshtriplett.org>
L: linux-kernel@vger.kernel.org L: linux-kernel@vger.kernel.org
S: Supported S: Supported

View File

@ -289,7 +289,7 @@ static void hugepd_free(struct mmu_gather *tlb, void *hugepte)
(*batchp)->ptes[(*batchp)->index++] = hugepte; (*batchp)->ptes[(*batchp)->index++] = hugepte;
if ((*batchp)->index == HUGEPD_FREELIST_SIZE) { if ((*batchp)->index == HUGEPD_FREELIST_SIZE) {
call_rcu_sched(&(*batchp)->rcu, hugepd_free_rcu_callback); call_rcu(&(*batchp)->rcu, hugepd_free_rcu_callback);
*batchp = NULL; *batchp = NULL;
} }
put_cpu_var(hugepd_freelist_cur); put_cpu_var(hugepd_freelist_cur);

View File

@ -352,7 +352,7 @@ void tlb_table_flush(struct mmu_gather *tlb)
struct mmu_table_batch **batch = &tlb->batch; struct mmu_table_batch **batch = &tlb->batch;
if (*batch) { if (*batch) {
call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu); call_rcu(&(*batch)->rcu, tlb_remove_table_rcu);
*batch = NULL; *batch = NULL;
} }
} }

View File

@ -53,7 +53,7 @@ static void timer_stop(void)
{ {
nmi_adjust_hz(1); nmi_adjust_hz(1);
unregister_die_notifier(&profile_timer_exceptions_nb); unregister_die_notifier(&profile_timer_exceptions_nb);
synchronize_sched(); /* Allow already-started NMIs to complete. */ synchronize_rcu(); /* Allow already-started NMIs to complete. */
} }
static int op_nmi_timer_init(struct oprofile_operations *ops) static int op_nmi_timer_init(struct oprofile_operations *ops)

View File

@ -59,7 +59,7 @@ static struct pcibios_fwaddrmap *pcibios_fwaddrmap_lookup(struct pci_dev *dev)
{ {
struct pcibios_fwaddrmap *map; struct pcibios_fwaddrmap *map;
WARN_ON_SMP(!spin_is_locked(&pcibios_fwaddrmap_lock)); lockdep_assert_held(&pcibios_fwaddrmap_lock);
list_for_each_entry(map, &pcibios_fwaddrmappings, list) list_for_each_entry(map, &pcibios_fwaddrmappings, list)
if (map->dev == dev) if (map->dev == dev)

View File

@ -382,7 +382,7 @@ static int pcrypt_cpumask_change_notify(struct notifier_block *self,
cpumask_copy(new_mask->mask, cpumask->cbcpu); cpumask_copy(new_mask->mask, cpumask->cbcpu);
rcu_assign_pointer(pcrypt->cb_cpumask, new_mask); rcu_assign_pointer(pcrypt->cb_cpumask, new_mask);
synchronize_rcu_bh(); synchronize_rcu();
free_cpumask_var(old_mask->mask); free_cpumask_var(old_mask->mask);
kfree(old_mask); kfree(old_mask);

View File

@ -2187,7 +2187,7 @@ static void shutdown_smi(void *send_info)
* handlers might have been running before we freed the * handlers might have been running before we freed the
* interrupt. * interrupt.
*/ */
synchronize_sched(); synchronize_rcu();
/* /*
* Timeouts are stopped, now make sure the interrupts are off * Timeouts are stopped, now make sure the interrupts are off

View File

@ -346,7 +346,7 @@ static inline void gov_clear_update_util(struct cpufreq_policy *policy)
for_each_cpu(i, policy->cpus) for_each_cpu(i, policy->cpus)
cpufreq_remove_update_util_hook(i); cpufreq_remove_update_util_hook(i);
synchronize_sched(); synchronize_rcu();
} }
static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *policy, static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *policy,

View File

@ -1952,7 +1952,7 @@ static void intel_pstate_clear_update_util_hook(unsigned int cpu)
cpufreq_remove_update_util_hook(cpu); cpufreq_remove_update_util_hook(cpu);
cpu_data->update_util_set = false; cpu_data->update_util_set = false;
synchronize_sched(); synchronize_rcu();
} }
static int intel_pstate_get_max_freq(struct cpudata *cpu) static int intel_pstate_get_max_freq(struct cpudata *cpu)

View File

@ -1661,7 +1661,7 @@ static void rtl8139_tx_timeout_task (struct work_struct *work)
napi_disable(&tp->napi); napi_disable(&tp->napi);
netif_stop_queue(dev); netif_stop_queue(dev);
synchronize_sched(); synchronize_rcu();
netdev_dbg(dev, "Transmit timeout, status %02x %04x %04x media %02x\n", netdev_dbg(dev, "Transmit timeout, status %02x %04x %04x media %02x\n",
RTL_R8(ChipCmd), RTL_R16(IntrStatus), RTL_R8(ChipCmd), RTL_R16(IntrStatus),

View File

@ -5866,7 +5866,7 @@ static void rtl_reset_work(struct rtl8169_private *tp)
napi_disable(&tp->napi); napi_disable(&tp->napi);
netif_stop_queue(dev); netif_stop_queue(dev);
synchronize_sched(); synchronize_rcu();
rtl8169_hw_reset(tp); rtl8169_hw_reset(tp);
@ -6609,7 +6609,7 @@ static void rtl8169_down(struct net_device *dev)
rtl8169_rx_missed(dev); rtl8169_rx_missed(dev);
/* Give a racing hard_start_xmit a few cycles to complete. */ /* Give a racing hard_start_xmit a few cycles to complete. */
synchronize_sched(); synchronize_rcu();
rtl8169_tx_clear(tp); rtl8169_tx_clear(tp);

View File

@ -3167,7 +3167,7 @@ struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx,
{ {
u32 hash = efx_filter_spec_hash(spec); u32 hash = efx_filter_spec_hash(spec);
WARN_ON(!spin_is_locked(&efx->rps_hash_lock)); lockdep_assert_held(&efx->rps_hash_lock);
if (!efx->rps_hash_table) if (!efx->rps_hash_table)
return NULL; return NULL;
return &efx->rps_hash_table[hash % EFX_ARFS_HASH_TABLE_SIZE]; return &efx->rps_hash_table[hash % EFX_ARFS_HASH_TABLE_SIZE];

View File

@ -1142,7 +1142,7 @@ static void sis190_down(struct net_device *dev)
if (!poll_locked) if (!poll_locked)
poll_locked++; poll_locked++;
synchronize_sched(); synchronize_rcu();
} while (SIS_R32(IntrMask)); } while (SIS_R32(IntrMask));

View File

@ -67,7 +67,7 @@
#ifdef CONFIG_DEBUG_SPINLOCK #ifdef CONFIG_DEBUG_SPINLOCK
#define SMSC_ASSERT_MAC_LOCK(pdata) \ #define SMSC_ASSERT_MAC_LOCK(pdata) \
WARN_ON_SMP(!spin_is_locked(&pdata->mac_lock)) lockdep_assert_held(&pdata->mac_lock)
#else #else
#define SMSC_ASSERT_MAC_LOCK(pdata) do {} while (0) #define SMSC_ASSERT_MAC_LOCK(pdata) do {} while (0)
#endif /* CONFIG_DEBUG_SPINLOCK */ #endif /* CONFIG_DEBUG_SPINLOCK */

View File

@ -1365,7 +1365,7 @@ static int vhost_net_release(struct inode *inode, struct file *f)
if (rx_sock) if (rx_sock)
sockfd_put(rx_sock); sockfd_put(rx_sock);
/* Make sure no callbacks are outstanding */ /* Make sure no callbacks are outstanding */
synchronize_rcu_bh(); synchronize_rcu();
/* We do an extra flush before freeing memory, /* We do an extra flush before freeing memory,
* since jobs can re-queue themselves. */ * since jobs can re-queue themselves. */
vhost_net_flush(n); vhost_net_flush(n);

View File

@ -158,7 +158,7 @@ static int expand_fdtable(struct files_struct *files, unsigned int nr)
* or have finished their rcu_read_lock_sched() section. * or have finished their rcu_read_lock_sched() section.
*/ */
if (atomic_read(&files->count) > 1) if (atomic_read(&files->count) > 1)
synchronize_sched(); synchronize_rcu();
spin_lock(&files->file_lock); spin_lock(&files->file_lock);
if (!new_fdt) if (!new_fdt)

View File

@ -926,7 +926,7 @@ static inline struct userfaultfd_wait_queue *find_userfault_in(
wait_queue_entry_t *wq; wait_queue_entry_t *wq;
struct userfaultfd_wait_queue *uwq; struct userfaultfd_wait_queue *uwq;
VM_BUG_ON(!spin_is_locked(&wqh->lock)); lockdep_assert_held(&wqh->lock);
uwq = NULL; uwq = NULL;
if (!waitqueue_active(wqh)) if (!waitqueue_active(wqh))

View File

@ -41,7 +41,7 @@ static inline void percpu_down_read_preempt_disable(struct percpu_rw_semaphore *
* cannot both change sem->state from readers_fast and start checking * cannot both change sem->state from readers_fast and start checking
* counters while we are here. So if we see !sem->state, we know that * counters while we are here. So if we see !sem->state, we know that
* the writer won't be checking until we're past the preempt_enable() * the writer won't be checking until we're past the preempt_enable()
* and that one the synchronize_sched() is done, the writer will see * and that once the synchronize_rcu() is done, the writer will see
* anything we did within this RCU-sched read-size critical section. * anything we did within this RCU-sched read-size critical section.
*/ */
__this_cpu_inc(*sem->read_count); __this_cpu_inc(*sem->read_count);

View File

@ -31,21 +31,4 @@ do { \
#define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__) #define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__)
/**
* synchronize_rcu_mult - Wait concurrently for multiple grace periods
* @...: List of call_rcu() functions for different grace periods to wait on
*
* This macro waits concurrently for multiple types of RCU grace periods.
* For example, synchronize_rcu_mult(call_rcu, call_rcu_tasks) would wait
* on concurrent RCU and RCU-tasks grace periods. Waiting on a give SRCU
* domain requires you to write a wrapper function for that SRCU domain's
* call_srcu() function, supplying the corresponding srcu_struct.
*
* If Tiny RCU, tell _wait_rcu_gp() does not bother waiting for RCU,
* given that anywhere synchronize_rcu_mult() can be called is automatically
* a grace period.
*/
#define synchronize_rcu_mult(...) \
_wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), __VA_ARGS__)
#endif /* _LINUX_SCHED_RCUPDATE_WAIT_H */ #endif /* _LINUX_SCHED_RCUPDATE_WAIT_H */

View File

@ -572,8 +572,10 @@ union rcu_special {
struct { struct {
u8 blocked; u8 blocked;
u8 need_qs; u8 need_qs;
u8 exp_hint; /* Hint for performance. */
u8 pad; /* No garbage from compiler! */
} b; /* Bits. */ } b; /* Bits. */
u16 s; /* Set of bits. */ u32 s; /* Set of bits. */
}; };
enum perf_event_task_context { enum perf_event_task_context {

View File

@ -38,20 +38,20 @@ struct srcu_struct;
#ifdef CONFIG_DEBUG_LOCK_ALLOC #ifdef CONFIG_DEBUG_LOCK_ALLOC
int __init_srcu_struct(struct srcu_struct *sp, const char *name, int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
struct lock_class_key *key); struct lock_class_key *key);
#define init_srcu_struct(sp) \ #define init_srcu_struct(ssp) \
({ \ ({ \
static struct lock_class_key __srcu_key; \ static struct lock_class_key __srcu_key; \
\ \
__init_srcu_struct((sp), #sp, &__srcu_key); \ __init_srcu_struct((ssp), #ssp, &__srcu_key); \
}) })
#define __SRCU_DEP_MAP_INIT(srcu_name) .dep_map = { .name = #srcu_name }, #define __SRCU_DEP_MAP_INIT(srcu_name) .dep_map = { .name = #srcu_name },
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
int init_srcu_struct(struct srcu_struct *sp); int init_srcu_struct(struct srcu_struct *ssp);
#define __SRCU_DEP_MAP_INIT(srcu_name) #define __SRCU_DEP_MAP_INIT(srcu_name)
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
@ -67,28 +67,28 @@ int init_srcu_struct(struct srcu_struct *sp);
struct srcu_struct { }; struct srcu_struct { };
#endif #endif
void call_srcu(struct srcu_struct *sp, struct rcu_head *head, void call_srcu(struct srcu_struct *ssp, struct rcu_head *head,
void (*func)(struct rcu_head *head)); void (*func)(struct rcu_head *head));
void _cleanup_srcu_struct(struct srcu_struct *sp, bool quiesced); void _cleanup_srcu_struct(struct srcu_struct *ssp, bool quiesced);
int __srcu_read_lock(struct srcu_struct *sp) __acquires(sp); int __srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp);
void __srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp); void __srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases(ssp);
void synchronize_srcu(struct srcu_struct *sp); void synchronize_srcu(struct srcu_struct *ssp);
/** /**
* cleanup_srcu_struct - deconstruct a sleep-RCU structure * cleanup_srcu_struct - deconstruct a sleep-RCU structure
* @sp: structure to clean up. * @ssp: structure to clean up.
* *
* Must invoke this after you are finished using a given srcu_struct that * Must invoke this after you are finished using a given srcu_struct that
* was initialized via init_srcu_struct(), else you leak memory. * was initialized via init_srcu_struct(), else you leak memory.
*/ */
static inline void cleanup_srcu_struct(struct srcu_struct *sp) static inline void cleanup_srcu_struct(struct srcu_struct *ssp)
{ {
_cleanup_srcu_struct(sp, false); _cleanup_srcu_struct(ssp, false);
} }
/** /**
* cleanup_srcu_struct_quiesced - deconstruct a quiesced sleep-RCU structure * cleanup_srcu_struct_quiesced - deconstruct a quiesced sleep-RCU structure
* @sp: structure to clean up. * @ssp: structure to clean up.
* *
* Must invoke this after you are finished using a given srcu_struct that * Must invoke this after you are finished using a given srcu_struct that
* was initialized via init_srcu_struct(), else you leak memory. Also, * was initialized via init_srcu_struct(), else you leak memory. Also,
@ -103,16 +103,16 @@ static inline void cleanup_srcu_struct(struct srcu_struct *sp)
* (with high probability, anyway), and will also cause the srcu_struct * (with high probability, anyway), and will also cause the srcu_struct
* to be leaked. * to be leaked.
*/ */
static inline void cleanup_srcu_struct_quiesced(struct srcu_struct *sp) static inline void cleanup_srcu_struct_quiesced(struct srcu_struct *ssp)
{ {
_cleanup_srcu_struct(sp, true); _cleanup_srcu_struct(ssp, true);
} }
#ifdef CONFIG_DEBUG_LOCK_ALLOC #ifdef CONFIG_DEBUG_LOCK_ALLOC
/** /**
* srcu_read_lock_held - might we be in SRCU read-side critical section? * srcu_read_lock_held - might we be in SRCU read-side critical section?
* @sp: The srcu_struct structure to check * @ssp: The srcu_struct structure to check
* *
* If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an SRCU * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an SRCU
* read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC,
@ -126,16 +126,16 @@ static inline void cleanup_srcu_struct_quiesced(struct srcu_struct *sp)
* relies on normal RCU, it can be called from the CPU which * relies on normal RCU, it can be called from the CPU which
* is in the idle loop from an RCU point of view or offline. * is in the idle loop from an RCU point of view or offline.
*/ */
static inline int srcu_read_lock_held(const struct srcu_struct *sp) static inline int srcu_read_lock_held(const struct srcu_struct *ssp)
{ {
if (!debug_lockdep_rcu_enabled()) if (!debug_lockdep_rcu_enabled())
return 1; return 1;
return lock_is_held(&sp->dep_map); return lock_is_held(&ssp->dep_map);
} }
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
static inline int srcu_read_lock_held(const struct srcu_struct *sp) static inline int srcu_read_lock_held(const struct srcu_struct *ssp)
{ {
return 1; return 1;
} }
@ -145,7 +145,7 @@ static inline int srcu_read_lock_held(const struct srcu_struct *sp)
/** /**
* srcu_dereference_check - fetch SRCU-protected pointer for later dereferencing * srcu_dereference_check - fetch SRCU-protected pointer for later dereferencing
* @p: the pointer to fetch and protect for later dereferencing * @p: the pointer to fetch and protect for later dereferencing
* @sp: pointer to the srcu_struct, which is used to check that we * @ssp: pointer to the srcu_struct, which is used to check that we
* really are in an SRCU read-side critical section. * really are in an SRCU read-side critical section.
* @c: condition to check for update-side use * @c: condition to check for update-side use
* *
@ -154,29 +154,32 @@ static inline int srcu_read_lock_held(const struct srcu_struct *sp)
* to 1. The @c argument will normally be a logical expression containing * to 1. The @c argument will normally be a logical expression containing
* lockdep_is_held() calls. * lockdep_is_held() calls.
*/ */
#define srcu_dereference_check(p, sp, c) \ #define srcu_dereference_check(p, ssp, c) \
__rcu_dereference_check((p), (c) || srcu_read_lock_held(sp), __rcu) __rcu_dereference_check((p), (c) || srcu_read_lock_held(ssp), __rcu)
/** /**
* srcu_dereference - fetch SRCU-protected pointer for later dereferencing * srcu_dereference - fetch SRCU-protected pointer for later dereferencing
* @p: the pointer to fetch and protect for later dereferencing * @p: the pointer to fetch and protect for later dereferencing
* @sp: pointer to the srcu_struct, which is used to check that we * @ssp: pointer to the srcu_struct, which is used to check that we
* really are in an SRCU read-side critical section. * really are in an SRCU read-side critical section.
* *
* Makes rcu_dereference_check() do the dirty work. If PROVE_RCU * Makes rcu_dereference_check() do the dirty work. If PROVE_RCU
* is enabled, invoking this outside of an RCU read-side critical * is enabled, invoking this outside of an RCU read-side critical
* section will result in an RCU-lockdep splat. * section will result in an RCU-lockdep splat.
*/ */
#define srcu_dereference(p, sp) srcu_dereference_check((p), (sp), 0) #define srcu_dereference(p, ssp) srcu_dereference_check((p), (ssp), 0)
/** /**
* srcu_dereference_notrace - no tracing and no lockdep calls from here * srcu_dereference_notrace - no tracing and no lockdep calls from here
* @p: the pointer to fetch and protect for later dereferencing
* @ssp: pointer to the srcu_struct, which is used to check that we
* really are in an SRCU read-side critical section.
*/ */
#define srcu_dereference_notrace(p, sp) srcu_dereference_check((p), (sp), 1) #define srcu_dereference_notrace(p, ssp) srcu_dereference_check((p), (ssp), 1)
/** /**
* srcu_read_lock - register a new reader for an SRCU-protected structure. * srcu_read_lock - register a new reader for an SRCU-protected structure.
* @sp: srcu_struct in which to register the new reader. * @ssp: srcu_struct in which to register the new reader.
* *
* Enter an SRCU read-side critical section. Note that SRCU read-side * Enter an SRCU read-side critical section. Note that SRCU read-side
* critical sections may be nested. However, it is illegal to * critical sections may be nested. However, it is illegal to
@ -191,44 +194,44 @@ static inline int srcu_read_lock_held(const struct srcu_struct *sp)
* srcu_read_unlock() in an irq handler if the matching srcu_read_lock() * srcu_read_unlock() in an irq handler if the matching srcu_read_lock()
* was invoked in process context. * was invoked in process context.
*/ */
static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp) static inline int srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp)
{ {
int retval; int retval;
retval = __srcu_read_lock(sp); retval = __srcu_read_lock(ssp);
rcu_lock_acquire(&(sp)->dep_map); rcu_lock_acquire(&(ssp)->dep_map);
return retval; return retval;
} }
/* Used by tracing, cannot be traced and cannot invoke lockdep. */ /* Used by tracing, cannot be traced and cannot invoke lockdep. */
static inline notrace int static inline notrace int
srcu_read_lock_notrace(struct srcu_struct *sp) __acquires(sp) srcu_read_lock_notrace(struct srcu_struct *ssp) __acquires(ssp)
{ {
int retval; int retval;
retval = __srcu_read_lock(sp); retval = __srcu_read_lock(ssp);
return retval; return retval;
} }
/** /**
* srcu_read_unlock - unregister a old reader from an SRCU-protected structure. * srcu_read_unlock - unregister a old reader from an SRCU-protected structure.
* @sp: srcu_struct in which to unregister the old reader. * @ssp: srcu_struct in which to unregister the old reader.
* @idx: return value from corresponding srcu_read_lock(). * @idx: return value from corresponding srcu_read_lock().
* *
* Exit an SRCU read-side critical section. * Exit an SRCU read-side critical section.
*/ */
static inline void srcu_read_unlock(struct srcu_struct *sp, int idx) static inline void srcu_read_unlock(struct srcu_struct *ssp, int idx)
__releases(sp) __releases(ssp)
{ {
rcu_lock_release(&(sp)->dep_map); rcu_lock_release(&(ssp)->dep_map);
__srcu_read_unlock(sp, idx); __srcu_read_unlock(ssp, idx);
} }
/* Used by tracing, cannot be traced and cannot call lockdep. */ /* Used by tracing, cannot be traced and cannot call lockdep. */
static inline notrace void static inline notrace void
srcu_read_unlock_notrace(struct srcu_struct *sp, int idx) __releases(sp) srcu_read_unlock_notrace(struct srcu_struct *ssp, int idx) __releases(ssp)
{ {
__srcu_read_unlock(sp, idx); __srcu_read_unlock(ssp, idx);
} }
/** /**

View File

@ -60,7 +60,7 @@ void srcu_drive_gp(struct work_struct *wp);
#define DEFINE_STATIC_SRCU(name) \ #define DEFINE_STATIC_SRCU(name) \
static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name) static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name)
void synchronize_srcu(struct srcu_struct *sp); void synchronize_srcu(struct srcu_struct *ssp);
/* /*
* Counts the new reader in the appropriate per-CPU element of the * Counts the new reader in the appropriate per-CPU element of the
@ -68,36 +68,36 @@ void synchronize_srcu(struct srcu_struct *sp);
* __srcu_read_unlock() must be in the same handler instance. Returns an * __srcu_read_unlock() must be in the same handler instance. Returns an
* index that must be passed to the matching srcu_read_unlock(). * index that must be passed to the matching srcu_read_unlock().
*/ */
static inline int __srcu_read_lock(struct srcu_struct *sp) static inline int __srcu_read_lock(struct srcu_struct *ssp)
{ {
int idx; int idx;
idx = READ_ONCE(sp->srcu_idx); idx = READ_ONCE(ssp->srcu_idx);
WRITE_ONCE(sp->srcu_lock_nesting[idx], sp->srcu_lock_nesting[idx] + 1); WRITE_ONCE(ssp->srcu_lock_nesting[idx], ssp->srcu_lock_nesting[idx] + 1);
return idx; return idx;
} }
static inline void synchronize_srcu_expedited(struct srcu_struct *sp) static inline void synchronize_srcu_expedited(struct srcu_struct *ssp)
{ {
synchronize_srcu(sp); synchronize_srcu(ssp);
} }
static inline void srcu_barrier(struct srcu_struct *sp) static inline void srcu_barrier(struct srcu_struct *ssp)
{ {
synchronize_srcu(sp); synchronize_srcu(ssp);
} }
/* Defined here to avoid size increase for non-torture kernels. */ /* Defined here to avoid size increase for non-torture kernels. */
static inline void srcu_torture_stats_print(struct srcu_struct *sp, static inline void srcu_torture_stats_print(struct srcu_struct *ssp,
char *tt, char *tf) char *tt, char *tf)
{ {
int idx; int idx;
idx = READ_ONCE(sp->srcu_idx) & 0x1; idx = READ_ONCE(ssp->srcu_idx) & 0x1;
pr_alert("%s%s Tiny SRCU per-CPU(idx=%d): (%hd,%hd)\n", pr_alert("%s%s Tiny SRCU per-CPU(idx=%d): (%hd,%hd)\n",
tt, tf, idx, tt, tf, idx,
READ_ONCE(sp->srcu_lock_nesting[!idx]), READ_ONCE(ssp->srcu_lock_nesting[!idx]),
READ_ONCE(sp->srcu_lock_nesting[idx])); READ_ONCE(ssp->srcu_lock_nesting[idx]));
} }
#endif #endif

View File

@ -51,7 +51,7 @@ struct srcu_data {
unsigned long grpmask; /* Mask for leaf srcu_node */ unsigned long grpmask; /* Mask for leaf srcu_node */
/* ->srcu_data_have_cbs[]. */ /* ->srcu_data_have_cbs[]. */
int cpu; int cpu;
struct srcu_struct *sp; struct srcu_struct *ssp;
}; };
/* /*
@ -138,8 +138,8 @@ struct srcu_struct {
#define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */) #define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */)
#define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static) #define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static)
void synchronize_srcu_expedited(struct srcu_struct *sp); void synchronize_srcu_expedited(struct srcu_struct *ssp);
void srcu_barrier(struct srcu_struct *sp); void srcu_barrier(struct srcu_struct *ssp);
void srcu_torture_stats_print(struct srcu_struct *sp, char *tt, char *tf); void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf);
#endif #endif

View File

@ -82,7 +82,7 @@ int unregister_tracepoint_module_notifier(struct notifier_block *nb)
static inline void tracepoint_synchronize_unregister(void) static inline void tracepoint_synchronize_unregister(void)
{ {
synchronize_srcu(&tracepoint_srcu); synchronize_srcu(&tracepoint_srcu);
synchronize_sched(); synchronize_rcu();
} }
#else #else
static inline void tracepoint_synchronize_unregister(void) static inline void tracepoint_synchronize_unregister(void)

View File

@ -212,8 +212,8 @@ struct ustat {
* weird ABI and we need to ask it explicitly. * weird ABI and we need to ask it explicitly.
* *
* The alignment is required to guarantee that bit 0 of @next will be * The alignment is required to guarantee that bit 0 of @next will be
* clear under normal conditions -- as long as we use call_rcu(), * clear under normal conditions -- as long as we use call_rcu() or
* call_rcu_bh(), call_rcu_sched(), or call_srcu() to queue callback. * call_srcu() to queue the callback.
* *
* This guarantee is important for few reasons: * This guarantee is important for few reasons:
* - future call_rcu_lazy() will make use of lower bits in the pointer; * - future call_rcu_lazy() will make use of lower bits in the pointer;

View File

@ -1046,12 +1046,12 @@ static void mark_readonly(void)
{ {
if (rodata_enabled) { if (rodata_enabled) {
/* /*
* load_module() results in W+X mappings, which are cleaned up * load_module() results in W+X mappings, which are cleaned
* with call_rcu_sched(). Let's make sure that queued work is * up with call_rcu(). Let's make sure that queued work is
* flushed so that we don't hit false positives looking for * flushed so that we don't hit false positives looking for
* insecure pages which are W+X. * insecure pages which are W+X.
*/ */
rcu_barrier_sched(); rcu_barrier();
mark_rodata_ro(); mark_rodata_ro();
rodata_test(); rodata_test();
} else } else

View File

@ -5343,7 +5343,7 @@ int __init cgroup_init(void)
cgroup_rstat_boot(); cgroup_rstat_boot();
/* /*
* The latency of the synchronize_sched() is too high for cgroups, * The latency of the synchronize_rcu() is too high for cgroups,
* avoid it at the cost of forcing all readers into the slow path. * avoid it at the cost of forcing all readers into the slow path.
*/ */
rcu_sync_enter_start(&cgroup_threadgroup_rwsem.rss); rcu_sync_enter_start(&cgroup_threadgroup_rwsem.rss);

View File

@ -9918,7 +9918,7 @@ static void account_event(struct perf_event *event)
* call the perf scheduling hooks before proceeding to * call the perf scheduling hooks before proceeding to
* install events that need them. * install events that need them.
*/ */
synchronize_sched(); synchronize_rcu();
} }
/* /*
* Now that we have waited for the sync_sched(), allow further * Now that we have waited for the sync_sched(), allow further

View File

@ -229,7 +229,7 @@ static int collect_garbage_slots(struct kprobe_insn_cache *c)
struct kprobe_insn_page *kip, *next; struct kprobe_insn_page *kip, *next;
/* Ensure no-one is interrupted on the garbages */ /* Ensure no-one is interrupted on the garbages */
synchronize_sched(); synchronize_rcu();
list_for_each_entry_safe(kip, next, &c->pages, list) { list_for_each_entry_safe(kip, next, &c->pages, list) {
int i; int i;
@ -1382,7 +1382,7 @@ static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
if (ret) { if (ret) {
ap->flags |= KPROBE_FLAG_DISABLED; ap->flags |= KPROBE_FLAG_DISABLED;
list_del_rcu(&p->list); list_del_rcu(&p->list);
synchronize_sched(); synchronize_rcu();
} }
} }
} }
@ -1597,7 +1597,7 @@ int register_kprobe(struct kprobe *p)
ret = arm_kprobe(p); ret = arm_kprobe(p);
if (ret) { if (ret) {
hlist_del_rcu(&p->hlist); hlist_del_rcu(&p->hlist);
synchronize_sched(); synchronize_rcu();
goto out; goto out;
} }
} }
@ -1776,7 +1776,7 @@ void unregister_kprobes(struct kprobe **kps, int num)
kps[i]->addr = NULL; kps[i]->addr = NULL;
mutex_unlock(&kprobe_mutex); mutex_unlock(&kprobe_mutex);
synchronize_sched(); synchronize_rcu();
for (i = 0; i < num; i++) for (i = 0; i < num; i++)
if (kps[i]->addr) if (kps[i]->addr)
__unregister_kprobe_bottom(kps[i]); __unregister_kprobe_bottom(kps[i]);
@ -1966,7 +1966,7 @@ void unregister_kretprobes(struct kretprobe **rps, int num)
rps[i]->kp.addr = NULL; rps[i]->kp.addr = NULL;
mutex_unlock(&kprobe_mutex); mutex_unlock(&kprobe_mutex);
synchronize_sched(); synchronize_rcu();
for (i = 0; i < num; i++) { for (i = 0; i < num; i++) {
if (rps[i]->kp.addr) { if (rps[i]->kp.addr) {
__unregister_kprobe_bottom(&rps[i]->kp); __unregister_kprobe_bottom(&rps[i]->kp);

View File

@ -61,7 +61,7 @@ static void notrace klp_ftrace_handler(unsigned long ip,
ops = container_of(fops, struct klp_ops, fops); ops = container_of(fops, struct klp_ops, fops);
/* /*
* A variant of synchronize_sched() is used to allow patching functions * A variant of synchronize_rcu() is used to allow patching functions
* where RCU is not watching, see klp_synchronize_transition(). * where RCU is not watching, see klp_synchronize_transition().
*/ */
preempt_disable_notrace(); preempt_disable_notrace();
@ -72,7 +72,7 @@ static void notrace klp_ftrace_handler(unsigned long ip,
/* /*
* func should never be NULL because preemption should be disabled here * func should never be NULL because preemption should be disabled here
* and unregister_ftrace_function() does the equivalent of a * and unregister_ftrace_function() does the equivalent of a
* synchronize_sched() before the func_stack removal. * synchronize_rcu() before the func_stack removal.
*/ */
if (WARN_ON_ONCE(!func)) if (WARN_ON_ONCE(!func))
goto unlock; goto unlock;

View File

@ -52,7 +52,7 @@ static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn);
/* /*
* This function is just a stub to implement a hard force * This function is just a stub to implement a hard force
* of synchronize_sched(). This requires synchronizing * of synchronize_rcu(). This requires synchronizing
* tasks even in userspace and idle. * tasks even in userspace and idle.
*/ */
static void klp_sync(struct work_struct *work) static void klp_sync(struct work_struct *work)
@ -175,7 +175,7 @@ void klp_cancel_transition(void)
void klp_update_patch_state(struct task_struct *task) void klp_update_patch_state(struct task_struct *task)
{ {
/* /*
* A variant of synchronize_sched() is used to allow patching functions * A variant of synchronize_rcu() is used to allow patching functions
* where RCU is not watching, see klp_synchronize_transition(). * where RCU is not watching, see klp_synchronize_transition().
*/ */
preempt_disable_notrace(); preempt_disable_notrace();

View File

@ -4195,7 +4195,7 @@ void lockdep_free_key_range(void *start, unsigned long size)
* *
* sync_sched() is sufficient because the read-side is IRQ disable. * sync_sched() is sufficient because the read-side is IRQ disable.
*/ */
synchronize_sched(); synchronize_rcu();
/* /*
* XXX at this point we could return the resources to the pool; * XXX at this point we could return the resources to the pool;

View File

@ -36,7 +36,7 @@ void debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter)
void debug_mutex_wake_waiter(struct mutex *lock, struct mutex_waiter *waiter) void debug_mutex_wake_waiter(struct mutex *lock, struct mutex_waiter *waiter)
{ {
SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock)); lockdep_assert_held(&lock->wait_lock);
DEBUG_LOCKS_WARN_ON(list_empty(&lock->wait_list)); DEBUG_LOCKS_WARN_ON(list_empty(&lock->wait_list));
DEBUG_LOCKS_WARN_ON(waiter->magic != waiter); DEBUG_LOCKS_WARN_ON(waiter->magic != waiter);
DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list)); DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
@ -51,7 +51,7 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
struct task_struct *task) struct task_struct *task)
{ {
SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock)); lockdep_assert_held(&lock->wait_lock);
/* Mark the current thread as blocked on the lock: */ /* Mark the current thread as blocked on the lock: */
task->blocked_on = waiter; task->blocked_on = waiter;

View File

@ -2159,7 +2159,7 @@ static void free_module(struct module *mod)
/* Remove this module from bug list, this uses list_del_rcu */ /* Remove this module from bug list, this uses list_del_rcu */
module_bug_cleanup(mod); module_bug_cleanup(mod);
/* Wait for RCU-sched synchronizing before releasing mod->list and buglist. */ /* Wait for RCU-sched synchronizing before releasing mod->list and buglist. */
synchronize_sched(); synchronize_rcu();
mutex_unlock(&module_mutex); mutex_unlock(&module_mutex);
/* This may be empty, but that's OK */ /* This may be empty, but that's OK */
@ -3507,15 +3507,15 @@ static noinline int do_init_module(struct module *mod)
/* /*
* We want to free module_init, but be aware that kallsyms may be * We want to free module_init, but be aware that kallsyms may be
* walking this with preempt disabled. In all the failure paths, we * walking this with preempt disabled. In all the failure paths, we
* call synchronize_sched(), but we don't want to slow down the success * call synchronize_rcu(), but we don't want to slow down the success
* path, so use actual RCU here. * path, so use actual RCU here.
* Note that module_alloc() on most architectures creates W+X page * Note that module_alloc() on most architectures creates W+X page
* mappings which won't be cleaned up until do_free_init() runs. Any * mappings which won't be cleaned up until do_free_init() runs. Any
* code such as mark_rodata_ro() which depends on those mappings to * code such as mark_rodata_ro() which depends on those mappings to
* be cleaned up needs to sync with the queued work - ie * be cleaned up needs to sync with the queued work - ie
* rcu_barrier_sched() * rcu_barrier()
*/ */
call_rcu_sched(&freeinit->rcu, do_free_init); call_rcu(&freeinit->rcu, do_free_init);
mutex_unlock(&module_mutex); mutex_unlock(&module_mutex);
wake_up_all(&module_wq); wake_up_all(&module_wq);
@ -3526,7 +3526,7 @@ static noinline int do_init_module(struct module *mod)
fail: fail:
/* Try to protect us from buggy refcounters. */ /* Try to protect us from buggy refcounters. */
mod->state = MODULE_STATE_GOING; mod->state = MODULE_STATE_GOING;
synchronize_sched(); synchronize_rcu();
module_put(mod); module_put(mod);
blocking_notifier_call_chain(&module_notify_list, blocking_notifier_call_chain(&module_notify_list,
MODULE_STATE_GOING, mod); MODULE_STATE_GOING, mod);
@ -3819,7 +3819,7 @@ static int load_module(struct load_info *info, const char __user *uargs,
ddebug_cleanup: ddebug_cleanup:
ftrace_release_mod(mod); ftrace_release_mod(mod);
dynamic_debug_remove(mod, info->debug); dynamic_debug_remove(mod, info->debug);
synchronize_sched(); synchronize_rcu();
kfree(mod->args); kfree(mod->args);
free_arch_cleanup: free_arch_cleanup:
module_arch_cleanup(mod); module_arch_cleanup(mod);
@ -3834,7 +3834,7 @@ static int load_module(struct load_info *info, const char __user *uargs,
mod_tree_remove(mod); mod_tree_remove(mod);
wake_up_all(&module_wq); wake_up_all(&module_wq);
/* Wait for RCU-sched synchronizing before releasing mod->list. */ /* Wait for RCU-sched synchronizing before releasing mod->list. */
synchronize_sched(); synchronize_rcu();
mutex_unlock(&module_mutex); mutex_unlock(&module_mutex);
free_module: free_module:
/* Free lock-classes; relies on the preceding sync_rcu() */ /* Free lock-classes; relies on the preceding sync_rcu() */

View File

@ -526,12 +526,14 @@ srcu_batches_completed(struct srcu_struct *sp) { return 0; }
static inline void rcu_force_quiescent_state(void) { } static inline void rcu_force_quiescent_state(void) { }
static inline void show_rcu_gp_kthreads(void) { } static inline void show_rcu_gp_kthreads(void) { }
static inline int rcu_get_gp_kthreads_prio(void) { return 0; } static inline int rcu_get_gp_kthreads_prio(void) { return 0; }
static inline void rcu_fwd_progress_check(unsigned long j) { }
#else /* #ifdef CONFIG_TINY_RCU */ #else /* #ifdef CONFIG_TINY_RCU */
unsigned long rcu_get_gp_seq(void); unsigned long rcu_get_gp_seq(void);
unsigned long rcu_exp_batches_completed(void); unsigned long rcu_exp_batches_completed(void);
unsigned long srcu_batches_completed(struct srcu_struct *sp); unsigned long srcu_batches_completed(struct srcu_struct *sp);
void show_rcu_gp_kthreads(void); void show_rcu_gp_kthreads(void);
int rcu_get_gp_kthreads_prio(void); int rcu_get_gp_kthreads_prio(void);
void rcu_fwd_progress_check(unsigned long j);
void rcu_force_quiescent_state(void); void rcu_force_quiescent_state(void);
extern struct workqueue_struct *rcu_gp_wq; extern struct workqueue_struct *rcu_gp_wq;
extern struct workqueue_struct *rcu_par_gp_wq; extern struct workqueue_struct *rcu_par_gp_wq;
@ -539,8 +541,10 @@ extern struct workqueue_struct *rcu_par_gp_wq;
#ifdef CONFIG_RCU_NOCB_CPU #ifdef CONFIG_RCU_NOCB_CPU
bool rcu_is_nocb_cpu(int cpu); bool rcu_is_nocb_cpu(int cpu);
void rcu_bind_current_to_nocb(void);
#else #else
static inline bool rcu_is_nocb_cpu(int cpu) { return false; } static inline bool rcu_is_nocb_cpu(int cpu) { return false; }
static inline void rcu_bind_current_to_nocb(void) { }
#endif #endif
#endif /* __LINUX_RCU_H */ #endif /* __LINUX_RCU_H */

View File

@ -56,6 +56,7 @@
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/sched/debug.h> #include <linux/sched/debug.h>
#include <linux/sched/sysctl.h> #include <linux/sched/sysctl.h>
#include <linux/oom.h>
#include "rcu.h" #include "rcu.h"
@ -80,13 +81,6 @@ MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@jos
/* Must be power of two minus one. */ /* Must be power of two minus one. */
#define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3) #define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3)
torture_param(int, cbflood_inter_holdoff, HZ,
"Holdoff between floods (jiffies)");
torture_param(int, cbflood_intra_holdoff, 1,
"Holdoff between bursts (jiffies)");
torture_param(int, cbflood_n_burst, 3, "# bursts in flood, zero to disable");
torture_param(int, cbflood_n_per_burst, 20000,
"# callbacks per burst in flood");
torture_param(int, extendables, RCUTORTURE_MAX_EXTEND, torture_param(int, extendables, RCUTORTURE_MAX_EXTEND,
"Extend readers by disabling bh (1), irqs (2), or preempt (4)"); "Extend readers by disabling bh (1), irqs (2), or preempt (4)");
torture_param(int, fqs_duration, 0, torture_param(int, fqs_duration, 0,
@ -138,12 +132,10 @@ module_param(torture_type, charp, 0444);
MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)"); MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)");
static int nrealreaders; static int nrealreaders;
static int ncbflooders;
static struct task_struct *writer_task; static struct task_struct *writer_task;
static struct task_struct **fakewriter_tasks; static struct task_struct **fakewriter_tasks;
static struct task_struct **reader_tasks; static struct task_struct **reader_tasks;
static struct task_struct *stats_task; static struct task_struct *stats_task;
static struct task_struct **cbflood_task;
static struct task_struct *fqs_task; static struct task_struct *fqs_task;
static struct task_struct *boost_tasks[NR_CPUS]; static struct task_struct *boost_tasks[NR_CPUS];
static struct task_struct *stall_task; static struct task_struct *stall_task;
@ -181,7 +173,6 @@ static long n_rcu_torture_boosts;
static atomic_long_t n_rcu_torture_timers; static atomic_long_t n_rcu_torture_timers;
static long n_barrier_attempts; static long n_barrier_attempts;
static long n_barrier_successes; /* did rcu_barrier test succeed? */ static long n_barrier_successes; /* did rcu_barrier test succeed? */
static atomic_long_t n_cbfloods;
static struct list_head rcu_torture_removed; static struct list_head rcu_torture_removed;
static int rcu_torture_writer_state; static int rcu_torture_writer_state;
@ -259,6 +250,8 @@ static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */
static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */ static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */
static DECLARE_WAIT_QUEUE_HEAD(barrier_wq); static DECLARE_WAIT_QUEUE_HEAD(barrier_wq);
static bool rcu_fwd_cb_nodelay; /* Short rcu_torture_delay() delays. */
/* /*
* Allocate an element from the rcu_tortures pool. * Allocate an element from the rcu_tortures pool.
*/ */
@ -348,7 +341,8 @@ rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
* period, and we want a long delay occasionally to trigger * period, and we want a long delay occasionally to trigger
* force_quiescent_state. */ * force_quiescent_state. */
if (!(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) { if (!rcu_fwd_cb_nodelay &&
!(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) {
started = cur_ops->get_gp_seq(); started = cur_ops->get_gp_seq();
ts = rcu_trace_clock_local(); ts = rcu_trace_clock_local();
if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK)) if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK))
@ -870,59 +864,6 @@ checkwait: stutter_wait("rcu_torture_boost");
return 0; return 0;
} }
static void rcu_torture_cbflood_cb(struct rcu_head *rhp)
{
}
/*
* RCU torture callback-flood kthread. Repeatedly induces bursts of calls
* to call_rcu() or analogous, increasing the probability of occurrence
* of callback-overflow corner cases.
*/
static int
rcu_torture_cbflood(void *arg)
{
int err = 1;
int i;
int j;
struct rcu_head *rhp;
if (cbflood_n_per_burst > 0 &&
cbflood_inter_holdoff > 0 &&
cbflood_intra_holdoff > 0 &&
cur_ops->call &&
cur_ops->cb_barrier) {
rhp = vmalloc(array3_size(cbflood_n_burst,
cbflood_n_per_burst,
sizeof(*rhp)));
err = !rhp;
}
if (err) {
VERBOSE_TOROUT_STRING("rcu_torture_cbflood disabled: Bad args or OOM");
goto wait_for_stop;
}
VERBOSE_TOROUT_STRING("rcu_torture_cbflood task started");
do {
schedule_timeout_interruptible(cbflood_inter_holdoff);
atomic_long_inc(&n_cbfloods);
WARN_ON(signal_pending(current));
for (i = 0; i < cbflood_n_burst; i++) {
for (j = 0; j < cbflood_n_per_burst; j++) {
cur_ops->call(&rhp[i * cbflood_n_per_burst + j],
rcu_torture_cbflood_cb);
}
schedule_timeout_interruptible(cbflood_intra_holdoff);
WARN_ON(signal_pending(current));
}
cur_ops->cb_barrier();
stutter_wait("rcu_torture_cbflood");
} while (!torture_must_stop());
vfree(rhp);
wait_for_stop:
torture_kthread_stopping("rcu_torture_cbflood");
return 0;
}
/* /*
* RCU torture force-quiescent-state kthread. Repeatedly induces * RCU torture force-quiescent-state kthread. Repeatedly induces
* bursts of calls to force_quiescent_state(), increasing the probability * bursts of calls to force_quiescent_state(), increasing the probability
@ -1457,11 +1398,10 @@ rcu_torture_stats_print(void)
n_rcu_torture_boosts, n_rcu_torture_boosts,
atomic_long_read(&n_rcu_torture_timers)); atomic_long_read(&n_rcu_torture_timers));
torture_onoff_stats(); torture_onoff_stats();
pr_cont("barrier: %ld/%ld:%ld ", pr_cont("barrier: %ld/%ld:%ld\n",
n_barrier_successes, n_barrier_successes,
n_barrier_attempts, n_barrier_attempts,
n_rcu_torture_barrier_error); n_rcu_torture_barrier_error);
pr_cont("cbflood: %ld\n", atomic_long_read(&n_cbfloods));
pr_alert("%s%s ", torture_type, TORTURE_FLAG); pr_alert("%s%s ", torture_type, TORTURE_FLAG);
if (atomic_read(&n_rcu_torture_mberror) != 0 || if (atomic_read(&n_rcu_torture_mberror) != 0 ||
@ -1674,8 +1614,90 @@ static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp)
cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb); cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb);
} }
/* Carry out grace-period forward-progress testing. */ /* State for continuous-flood RCU callbacks. */
static int rcu_torture_fwd_prog(void *args) struct rcu_fwd_cb {
struct rcu_head rh;
struct rcu_fwd_cb *rfc_next;
int rfc_gps;
};
static DEFINE_SPINLOCK(rcu_fwd_lock);
static struct rcu_fwd_cb *rcu_fwd_cb_head;
static struct rcu_fwd_cb **rcu_fwd_cb_tail = &rcu_fwd_cb_head;
static long n_launders_cb;
static unsigned long rcu_fwd_startat;
static bool rcu_fwd_emergency_stop;
#define MAX_FWD_CB_JIFFIES (8 * HZ) /* Maximum CB test duration. */
#define MIN_FWD_CB_LAUNDERS 3 /* This many CB invocations to count. */
#define MIN_FWD_CBS_LAUNDERED 100 /* Number of counted CBs. */
#define FWD_CBS_HIST_DIV 10 /* Histogram buckets/second. */
static long n_launders_hist[2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV)];
static void rcu_torture_fwd_cb_hist(void)
{
int i;
int j;
for (i = ARRAY_SIZE(n_launders_hist) - 1; i > 0; i--)
if (n_launders_hist[i] > 0)
break;
pr_alert("%s: Callback-invocation histogram (duration %lu jiffies):",
__func__, jiffies - rcu_fwd_startat);
for (j = 0; j <= i; j++)
pr_cont(" %ds/%d: %ld",
j + 1, FWD_CBS_HIST_DIV, n_launders_hist[j]);
pr_cont("\n");
}
/* Callback function for continuous-flood RCU callbacks. */
static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp)
{
unsigned long flags;
int i;
struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh);
struct rcu_fwd_cb **rfcpp;
rfcp->rfc_next = NULL;
rfcp->rfc_gps++;
spin_lock_irqsave(&rcu_fwd_lock, flags);
rfcpp = rcu_fwd_cb_tail;
rcu_fwd_cb_tail = &rfcp->rfc_next;
WRITE_ONCE(*rfcpp, rfcp);
WRITE_ONCE(n_launders_cb, n_launders_cb + 1);
i = ((jiffies - rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV));
if (i >= ARRAY_SIZE(n_launders_hist))
i = ARRAY_SIZE(n_launders_hist) - 1;
n_launders_hist[i]++;
spin_unlock_irqrestore(&rcu_fwd_lock, flags);
}
/*
* Free all callbacks on the rcu_fwd_cb_head list, either because the
* test is over or because we hit an OOM event.
*/
static unsigned long rcu_torture_fwd_prog_cbfree(void)
{
unsigned long flags;
unsigned long freed = 0;
struct rcu_fwd_cb *rfcp;
for (;;) {
spin_lock_irqsave(&rcu_fwd_lock, flags);
rfcp = rcu_fwd_cb_head;
if (!rfcp)
break;
rcu_fwd_cb_head = rfcp->rfc_next;
if (!rcu_fwd_cb_head)
rcu_fwd_cb_tail = &rcu_fwd_cb_head;
spin_unlock_irqrestore(&rcu_fwd_lock, flags);
kfree(rfcp);
freed++;
}
spin_unlock_irqrestore(&rcu_fwd_lock, flags);
return freed;
}
/* Carry out need_resched()/cond_resched() forward-progress testing. */
static void rcu_torture_fwd_prog_nr(int *tested, int *tested_tries)
{ {
unsigned long cver; unsigned long cver;
unsigned long dur; unsigned long dur;
@ -1686,19 +1708,14 @@ static int rcu_torture_fwd_prog(void *args)
int sd4; int sd4;
bool selfpropcb = false; bool selfpropcb = false;
unsigned long stopat; unsigned long stopat;
int tested = 0;
int tested_tries = 0;
static DEFINE_TORTURE_RANDOM(trs); static DEFINE_TORTURE_RANDOM(trs);
VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started");
if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST))
set_user_nice(current, MAX_NICE);
if (cur_ops->call && cur_ops->sync && cur_ops->cb_barrier) { if (cur_ops->call && cur_ops->sync && cur_ops->cb_barrier) {
init_rcu_head_on_stack(&fcs.rh); init_rcu_head_on_stack(&fcs.rh);
selfpropcb = true; selfpropcb = true;
} }
do {
schedule_timeout_interruptible(fwd_progress_holdoff * HZ); /* Tight loop containing cond_resched(). */
if (selfpropcb) { if (selfpropcb) {
WRITE_ONCE(fcs.stop, 0); WRITE_ONCE(fcs.stop, 0);
cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb); cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb);
@ -1708,17 +1725,20 @@ static int rcu_torture_fwd_prog(void *args)
sd = cur_ops->stall_dur() + 1; sd = cur_ops->stall_dur() + 1;
sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div; sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div;
dur = sd4 + torture_random(&trs) % (sd - sd4); dur = sd4 + torture_random(&trs) % (sd - sd4);
stopat = jiffies + dur; WRITE_ONCE(rcu_fwd_startat, jiffies);
while (time_before(jiffies, stopat) && !torture_must_stop()) { stopat = rcu_fwd_startat + dur;
while (time_before(jiffies, stopat) &&
!READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
idx = cur_ops->readlock(); idx = cur_ops->readlock();
udelay(10); udelay(10);
cur_ops->readunlock(idx); cur_ops->readunlock(idx);
if (!fwd_progress_need_resched || need_resched()) if (!fwd_progress_need_resched || need_resched())
cond_resched(); cond_resched();
} }
tested_tries++; (*tested_tries)++;
if (!time_before(jiffies, stopat) && !torture_must_stop()) { if (!time_before(jiffies, stopat) &&
tested++; !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
(*tested)++;
cver = READ_ONCE(rcu_torture_current_version) - cver; cver = READ_ONCE(rcu_torture_current_version) - cver;
gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
WARN_ON(!cver && gps < 2); WARN_ON(!cver && gps < 2);
@ -1729,13 +1749,145 @@ static int rcu_torture_fwd_prog(void *args)
cur_ops->sync(); /* Wait for running CB to complete. */ cur_ops->sync(); /* Wait for running CB to complete. */
cur_ops->cb_barrier(); /* Wait for queued callbacks. */ cur_ops->cb_barrier(); /* Wait for queued callbacks. */
} }
/* Avoid slow periods, better to test when busy. */
stutter_wait("rcu_torture_fwd_prog");
} while (!torture_must_stop());
if (selfpropcb) { if (selfpropcb) {
WARN_ON(READ_ONCE(fcs.stop) != 2); WARN_ON(READ_ONCE(fcs.stop) != 2);
destroy_rcu_head_on_stack(&fcs.rh); destroy_rcu_head_on_stack(&fcs.rh);
} }
}
/* Carry out call_rcu() forward-progress testing. */
static void rcu_torture_fwd_prog_cr(void)
{
unsigned long cver;
unsigned long gps;
int i;
long n_launders;
long n_launders_cb_snap;
long n_launders_sa;
long n_max_cbs;
long n_max_gps;
struct rcu_fwd_cb *rfcp;
struct rcu_fwd_cb *rfcpn;
unsigned long stopat;
unsigned long stoppedat;
if (READ_ONCE(rcu_fwd_emergency_stop))
return; /* Get out of the way quickly, no GP wait! */
/* Loop continuously posting RCU callbacks. */
WRITE_ONCE(rcu_fwd_cb_nodelay, true);
cur_ops->sync(); /* Later readers see above write. */
WRITE_ONCE(rcu_fwd_startat, jiffies);
stopat = rcu_fwd_startat + MAX_FWD_CB_JIFFIES;
n_launders = 0;
n_launders_cb = 0;
n_launders_sa = 0;
n_max_cbs = 0;
n_max_gps = 0;
for (i = 0; i < ARRAY_SIZE(n_launders_hist); i++)
n_launders_hist[i] = 0;
cver = READ_ONCE(rcu_torture_current_version);
gps = cur_ops->get_gp_seq();
while (time_before(jiffies, stopat) &&
!READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
rfcp = READ_ONCE(rcu_fwd_cb_head);
rfcpn = NULL;
if (rfcp)
rfcpn = READ_ONCE(rfcp->rfc_next);
if (rfcpn) {
if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS &&
++n_max_gps >= MIN_FWD_CBS_LAUNDERED)
break;
rcu_fwd_cb_head = rfcpn;
n_launders++;
n_launders_sa++;
} else {
rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL);
if (WARN_ON_ONCE(!rfcp)) {
schedule_timeout_interruptible(1);
continue;
}
n_max_cbs++;
n_launders_sa = 0;
rfcp->rfc_gps = 0;
}
cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr);
cond_resched();
}
stoppedat = jiffies;
n_launders_cb_snap = READ_ONCE(n_launders_cb);
cver = READ_ONCE(rcu_torture_current_version) - cver;
gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */
(void)rcu_torture_fwd_prog_cbfree();
WRITE_ONCE(rcu_fwd_cb_nodelay, false);
if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop)) {
WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED);
pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld\n",
__func__,
stoppedat - rcu_fwd_startat, jiffies - stoppedat,
n_launders + n_max_cbs - n_launders_cb_snap,
n_launders, n_launders_sa,
n_max_gps, n_max_cbs, cver, gps);
rcu_torture_fwd_cb_hist();
}
}
/*
* OOM notifier, but this only prints diagnostic information for the
* current forward-progress test.
*/
static int rcutorture_oom_notify(struct notifier_block *self,
unsigned long notused, void *nfreed)
{
WARN(1, "%s invoked upon OOM during forward-progress testing.\n",
__func__);
rcu_torture_fwd_cb_hist();
rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rcu_fwd_startat) / 2));
WRITE_ONCE(rcu_fwd_emergency_stop, true);
smp_mb(); /* Emergency stop before free and wait to avoid hangs. */
pr_info("%s: Freed %lu RCU callbacks.\n",
__func__, rcu_torture_fwd_prog_cbfree());
rcu_barrier();
pr_info("%s: Freed %lu RCU callbacks.\n",
__func__, rcu_torture_fwd_prog_cbfree());
rcu_barrier();
pr_info("%s: Freed %lu RCU callbacks.\n",
__func__, rcu_torture_fwd_prog_cbfree());
smp_mb(); /* Frees before return to avoid redoing OOM. */
(*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */
pr_info("%s returning after OOM processing.\n", __func__);
return NOTIFY_OK;
}
static struct notifier_block rcutorture_oom_nb = {
.notifier_call = rcutorture_oom_notify
};
/* Carry out grace-period forward-progress testing. */
static int rcu_torture_fwd_prog(void *args)
{
int tested = 0;
int tested_tries = 0;
VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started");
rcu_bind_current_to_nocb();
if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST))
set_user_nice(current, MAX_NICE);
do {
schedule_timeout_interruptible(fwd_progress_holdoff * HZ);
WRITE_ONCE(rcu_fwd_emergency_stop, false);
register_oom_notifier(&rcutorture_oom_nb);
rcu_torture_fwd_prog_nr(&tested, &tested_tries);
rcu_torture_fwd_prog_cr();
unregister_oom_notifier(&rcutorture_oom_nb);
/* Avoid slow periods, better to test when busy. */
stutter_wait("rcu_torture_fwd_prog");
} while (!torture_must_stop());
/* Short runs might not contain a valid forward-progress attempt. */ /* Short runs might not contain a valid forward-progress attempt. */
WARN_ON(!tested && tested_tries >= 5); WARN_ON(!tested && tested_tries >= 5);
pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries); pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries);
@ -1748,7 +1900,8 @@ static int __init rcu_torture_fwd_prog_init(void)
{ {
if (!fwd_progress) if (!fwd_progress)
return 0; /* Not requested, so don't do it. */ return 0; /* Not requested, so don't do it. */
if (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0) { if (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0 ||
cur_ops == &rcu_busted_ops) {
VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test"); VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test");
return 0; return 0;
} }
@ -1968,8 +2121,6 @@ rcu_torture_cleanup(void)
cur_ops->name, gp_seq, flags); cur_ops->name, gp_seq, flags);
torture_stop_kthread(rcu_torture_stats, stats_task); torture_stop_kthread(rcu_torture_stats, stats_task);
torture_stop_kthread(rcu_torture_fqs, fqs_task); torture_stop_kthread(rcu_torture_fqs, fqs_task);
for (i = 0; i < ncbflooders; i++)
torture_stop_kthread(rcu_torture_cbflood, cbflood_task[i]);
if (rcu_torture_can_boost()) if (rcu_torture_can_boost())
cpuhp_remove_state(rcutor_hp); cpuhp_remove_state(rcutor_hp);
@ -2252,24 +2403,6 @@ rcu_torture_init(void)
goto unwind; goto unwind;
if (object_debug) if (object_debug)
rcu_test_debug_objects(); rcu_test_debug_objects();
if (cbflood_n_burst > 0) {
/* Create the cbflood threads */
ncbflooders = (num_online_cpus() + 3) / 4;
cbflood_task = kcalloc(ncbflooders, sizeof(*cbflood_task),
GFP_KERNEL);
if (!cbflood_task) {
VERBOSE_TOROUT_ERRSTRING("out of memory");
firsterr = -ENOMEM;
goto unwind;
}
for (i = 0; i < ncbflooders; i++) {
firsterr = torture_create_kthread(rcu_torture_cbflood,
NULL,
cbflood_task[i]);
if (firsterr)
goto unwind;
}
}
torture_init_end(); torture_init_end();
return 0; return 0;

View File

@ -37,30 +37,30 @@ int rcu_scheduler_active __read_mostly;
static LIST_HEAD(srcu_boot_list); static LIST_HEAD(srcu_boot_list);
static bool srcu_init_done; static bool srcu_init_done;
static int init_srcu_struct_fields(struct srcu_struct *sp) static int init_srcu_struct_fields(struct srcu_struct *ssp)
{ {
sp->srcu_lock_nesting[0] = 0; ssp->srcu_lock_nesting[0] = 0;
sp->srcu_lock_nesting[1] = 0; ssp->srcu_lock_nesting[1] = 0;
init_swait_queue_head(&sp->srcu_wq); init_swait_queue_head(&ssp->srcu_wq);
sp->srcu_cb_head = NULL; ssp->srcu_cb_head = NULL;
sp->srcu_cb_tail = &sp->srcu_cb_head; ssp->srcu_cb_tail = &ssp->srcu_cb_head;
sp->srcu_gp_running = false; ssp->srcu_gp_running = false;
sp->srcu_gp_waiting = false; ssp->srcu_gp_waiting = false;
sp->srcu_idx = 0; ssp->srcu_idx = 0;
INIT_WORK(&sp->srcu_work, srcu_drive_gp); INIT_WORK(&ssp->srcu_work, srcu_drive_gp);
INIT_LIST_HEAD(&sp->srcu_work.entry); INIT_LIST_HEAD(&ssp->srcu_work.entry);
return 0; return 0;
} }
#ifdef CONFIG_DEBUG_LOCK_ALLOC #ifdef CONFIG_DEBUG_LOCK_ALLOC
int __init_srcu_struct(struct srcu_struct *sp, const char *name, int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
struct lock_class_key *key) struct lock_class_key *key)
{ {
/* Don't re-initialize a lock while it is held. */ /* Don't re-initialize a lock while it is held. */
debug_check_no_locks_freed((void *)sp, sizeof(*sp)); debug_check_no_locks_freed((void *)ssp, sizeof(*ssp));
lockdep_init_map(&sp->dep_map, name, key, 0); lockdep_init_map(&ssp->dep_map, name, key, 0);
return init_srcu_struct_fields(sp); return init_srcu_struct_fields(ssp);
} }
EXPORT_SYMBOL_GPL(__init_srcu_struct); EXPORT_SYMBOL_GPL(__init_srcu_struct);
@ -68,15 +68,15 @@ EXPORT_SYMBOL_GPL(__init_srcu_struct);
/* /*
* init_srcu_struct - initialize a sleep-RCU structure * init_srcu_struct - initialize a sleep-RCU structure
* @sp: structure to initialize. * @ssp: structure to initialize.
* *
* Must invoke this on a given srcu_struct before passing that srcu_struct * Must invoke this on a given srcu_struct before passing that srcu_struct
* to any other function. Each srcu_struct represents a separate domain * to any other function. Each srcu_struct represents a separate domain
* of SRCU protection. * of SRCU protection.
*/ */
int init_srcu_struct(struct srcu_struct *sp) int init_srcu_struct(struct srcu_struct *ssp)
{ {
return init_srcu_struct_fields(sp); return init_srcu_struct_fields(ssp);
} }
EXPORT_SYMBOL_GPL(init_srcu_struct); EXPORT_SYMBOL_GPL(init_srcu_struct);
@ -84,22 +84,22 @@ EXPORT_SYMBOL_GPL(init_srcu_struct);
/* /*
* cleanup_srcu_struct - deconstruct a sleep-RCU structure * cleanup_srcu_struct - deconstruct a sleep-RCU structure
* @sp: structure to clean up. * @ssp: structure to clean up.
* *
* Must invoke this after you are finished using a given srcu_struct that * Must invoke this after you are finished using a given srcu_struct that
* was initialized via init_srcu_struct(), else you leak memory. * was initialized via init_srcu_struct(), else you leak memory.
*/ */
void _cleanup_srcu_struct(struct srcu_struct *sp, bool quiesced) void _cleanup_srcu_struct(struct srcu_struct *ssp, bool quiesced)
{ {
WARN_ON(sp->srcu_lock_nesting[0] || sp->srcu_lock_nesting[1]); WARN_ON(ssp->srcu_lock_nesting[0] || ssp->srcu_lock_nesting[1]);
if (quiesced) if (quiesced)
WARN_ON(work_pending(&sp->srcu_work)); WARN_ON(work_pending(&ssp->srcu_work));
else else
flush_work(&sp->srcu_work); flush_work(&ssp->srcu_work);
WARN_ON(sp->srcu_gp_running); WARN_ON(ssp->srcu_gp_running);
WARN_ON(sp->srcu_gp_waiting); WARN_ON(ssp->srcu_gp_waiting);
WARN_ON(sp->srcu_cb_head); WARN_ON(ssp->srcu_cb_head);
WARN_ON(&sp->srcu_cb_head != sp->srcu_cb_tail); WARN_ON(&ssp->srcu_cb_head != ssp->srcu_cb_tail);
} }
EXPORT_SYMBOL_GPL(_cleanup_srcu_struct); EXPORT_SYMBOL_GPL(_cleanup_srcu_struct);
@ -107,13 +107,13 @@ EXPORT_SYMBOL_GPL(_cleanup_srcu_struct);
* Removes the count for the old reader from the appropriate element of * Removes the count for the old reader from the appropriate element of
* the srcu_struct. * the srcu_struct.
*/ */
void __srcu_read_unlock(struct srcu_struct *sp, int idx) void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
{ {
int newval = sp->srcu_lock_nesting[idx] - 1; int newval = ssp->srcu_lock_nesting[idx] - 1;
WRITE_ONCE(sp->srcu_lock_nesting[idx], newval); WRITE_ONCE(ssp->srcu_lock_nesting[idx], newval);
if (!newval && READ_ONCE(sp->srcu_gp_waiting)) if (!newval && READ_ONCE(ssp->srcu_gp_waiting))
swake_up_one(&sp->srcu_wq); swake_up_one(&ssp->srcu_wq);
} }
EXPORT_SYMBOL_GPL(__srcu_read_unlock); EXPORT_SYMBOL_GPL(__srcu_read_unlock);
@ -127,24 +127,24 @@ void srcu_drive_gp(struct work_struct *wp)
int idx; int idx;
struct rcu_head *lh; struct rcu_head *lh;
struct rcu_head *rhp; struct rcu_head *rhp;
struct srcu_struct *sp; struct srcu_struct *ssp;
sp = container_of(wp, struct srcu_struct, srcu_work); ssp = container_of(wp, struct srcu_struct, srcu_work);
if (sp->srcu_gp_running || !READ_ONCE(sp->srcu_cb_head)) if (ssp->srcu_gp_running || !READ_ONCE(ssp->srcu_cb_head))
return; /* Already running or nothing to do. */ return; /* Already running or nothing to do. */
/* Remove recently arrived callbacks and wait for readers. */ /* Remove recently arrived callbacks and wait for readers. */
WRITE_ONCE(sp->srcu_gp_running, true); WRITE_ONCE(ssp->srcu_gp_running, true);
local_irq_disable(); local_irq_disable();
lh = sp->srcu_cb_head; lh = ssp->srcu_cb_head;
sp->srcu_cb_head = NULL; ssp->srcu_cb_head = NULL;
sp->srcu_cb_tail = &sp->srcu_cb_head; ssp->srcu_cb_tail = &ssp->srcu_cb_head;
local_irq_enable(); local_irq_enable();
idx = sp->srcu_idx; idx = ssp->srcu_idx;
WRITE_ONCE(sp->srcu_idx, !sp->srcu_idx); WRITE_ONCE(ssp->srcu_idx, !ssp->srcu_idx);
WRITE_ONCE(sp->srcu_gp_waiting, true); /* srcu_read_unlock() wakes! */ WRITE_ONCE(ssp->srcu_gp_waiting, true); /* srcu_read_unlock() wakes! */
swait_event_exclusive(sp->srcu_wq, !READ_ONCE(sp->srcu_lock_nesting[idx])); swait_event_exclusive(ssp->srcu_wq, !READ_ONCE(ssp->srcu_lock_nesting[idx]));
WRITE_ONCE(sp->srcu_gp_waiting, false); /* srcu_read_unlock() cheap. */ WRITE_ONCE(ssp->srcu_gp_waiting, false); /* srcu_read_unlock() cheap. */
/* Invoke the callbacks we removed above. */ /* Invoke the callbacks we removed above. */
while (lh) { while (lh) {
@ -161,9 +161,9 @@ void srcu_drive_gp(struct work_struct *wp)
* at interrupt level, but the ->srcu_gp_running checks will * at interrupt level, but the ->srcu_gp_running checks will
* straighten that out. * straighten that out.
*/ */
WRITE_ONCE(sp->srcu_gp_running, false); WRITE_ONCE(ssp->srcu_gp_running, false);
if (READ_ONCE(sp->srcu_cb_head)) if (READ_ONCE(ssp->srcu_cb_head))
schedule_work(&sp->srcu_work); schedule_work(&ssp->srcu_work);
} }
EXPORT_SYMBOL_GPL(srcu_drive_gp); EXPORT_SYMBOL_GPL(srcu_drive_gp);
@ -171,7 +171,7 @@ EXPORT_SYMBOL_GPL(srcu_drive_gp);
* Enqueue an SRCU callback on the specified srcu_struct structure, * Enqueue an SRCU callback on the specified srcu_struct structure,
* initiating grace-period processing if it is not already running. * initiating grace-period processing if it is not already running.
*/ */
void call_srcu(struct srcu_struct *sp, struct rcu_head *rhp, void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
rcu_callback_t func) rcu_callback_t func)
{ {
unsigned long flags; unsigned long flags;
@ -179,14 +179,14 @@ void call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
rhp->func = func; rhp->func = func;
rhp->next = NULL; rhp->next = NULL;
local_irq_save(flags); local_irq_save(flags);
*sp->srcu_cb_tail = rhp; *ssp->srcu_cb_tail = rhp;
sp->srcu_cb_tail = &rhp->next; ssp->srcu_cb_tail = &rhp->next;
local_irq_restore(flags); local_irq_restore(flags);
if (!READ_ONCE(sp->srcu_gp_running)) { if (!READ_ONCE(ssp->srcu_gp_running)) {
if (likely(srcu_init_done)) if (likely(srcu_init_done))
schedule_work(&sp->srcu_work); schedule_work(&ssp->srcu_work);
else if (list_empty(&sp->srcu_work.entry)) else if (list_empty(&ssp->srcu_work.entry))
list_add(&sp->srcu_work.entry, &srcu_boot_list); list_add(&ssp->srcu_work.entry, &srcu_boot_list);
} }
} }
EXPORT_SYMBOL_GPL(call_srcu); EXPORT_SYMBOL_GPL(call_srcu);
@ -194,13 +194,13 @@ EXPORT_SYMBOL_GPL(call_srcu);
/* /*
* synchronize_srcu - wait for prior SRCU read-side critical-section completion * synchronize_srcu - wait for prior SRCU read-side critical-section completion
*/ */
void synchronize_srcu(struct srcu_struct *sp) void synchronize_srcu(struct srcu_struct *ssp)
{ {
struct rcu_synchronize rs; struct rcu_synchronize rs;
init_rcu_head_on_stack(&rs.head); init_rcu_head_on_stack(&rs.head);
init_completion(&rs.completion); init_completion(&rs.completion);
call_srcu(sp, &rs.head, wakeme_after_rcu); call_srcu(ssp, &rs.head, wakeme_after_rcu);
wait_for_completion(&rs.completion); wait_for_completion(&rs.completion);
destroy_rcu_head_on_stack(&rs.head); destroy_rcu_head_on_stack(&rs.head);
} }
@ -219,13 +219,13 @@ void __init rcu_scheduler_starting(void)
*/ */
void __init srcu_init(void) void __init srcu_init(void)
{ {
struct srcu_struct *sp; struct srcu_struct *ssp;
srcu_init_done = true; srcu_init_done = true;
while (!list_empty(&srcu_boot_list)) { while (!list_empty(&srcu_boot_list)) {
sp = list_first_entry(&srcu_boot_list, ssp = list_first_entry(&srcu_boot_list,
struct srcu_struct, srcu_work.entry); struct srcu_struct, srcu_work.entry);
list_del_init(&sp->srcu_work.entry); list_del_init(&ssp->srcu_work.entry);
schedule_work(&sp->srcu_work); schedule_work(&ssp->srcu_work);
} }
} }

File diff suppressed because it is too large Load Diff

View File

@ -44,15 +44,15 @@ static const struct {
__INIT_HELD(rcu_read_lock_held) __INIT_HELD(rcu_read_lock_held)
}, },
[RCU_SCHED_SYNC] = { [RCU_SCHED_SYNC] = {
.sync = synchronize_sched, .sync = synchronize_rcu,
.call = call_rcu_sched, .call = call_rcu,
.wait = rcu_barrier_sched, .wait = rcu_barrier,
__INIT_HELD(rcu_read_lock_sched_held) __INIT_HELD(rcu_read_lock_sched_held)
}, },
[RCU_BH_SYNC] = { [RCU_BH_SYNC] = {
.sync = synchronize_rcu_bh, .sync = synchronize_rcu,
.call = call_rcu_bh, .call = call_rcu,
.wait = rcu_barrier_bh, .wait = rcu_barrier,
__INIT_HELD(rcu_read_lock_bh_held) __INIT_HELD(rcu_read_lock_bh_held)
}, },
}; };
@ -125,8 +125,7 @@ void rcu_sync_enter(struct rcu_sync *rsp)
rsp->gp_state = GP_PENDING; rsp->gp_state = GP_PENDING;
spin_unlock_irq(&rsp->rss_lock); spin_unlock_irq(&rsp->rss_lock);
BUG_ON(need_wait && need_sync); WARN_ON_ONCE(need_wait && need_sync);
if (need_sync) { if (need_sync) {
gp_ops[rsp->gp_type].sync(); gp_ops[rsp->gp_type].sync();
rsp->gp_state = GP_PASSED; rsp->gp_state = GP_PASSED;
@ -139,7 +138,7 @@ void rcu_sync_enter(struct rcu_sync *rsp)
* Nobody has yet been allowed the 'fast' path and thus we can * Nobody has yet been allowed the 'fast' path and thus we can
* avoid doing any sync(). The callback will get 'dropped'. * avoid doing any sync(). The callback will get 'dropped'.
*/ */
BUG_ON(rsp->gp_state != GP_PASSED); WARN_ON_ONCE(rsp->gp_state != GP_PASSED);
} }
} }
@ -166,8 +165,8 @@ static void rcu_sync_func(struct rcu_head *rhp)
struct rcu_sync *rsp = container_of(rhp, struct rcu_sync, cb_head); struct rcu_sync *rsp = container_of(rhp, struct rcu_sync, cb_head);
unsigned long flags; unsigned long flags;
BUG_ON(rsp->gp_state != GP_PASSED); WARN_ON_ONCE(rsp->gp_state != GP_PASSED);
BUG_ON(rsp->cb_state == CB_IDLE); WARN_ON_ONCE(rsp->cb_state == CB_IDLE);
spin_lock_irqsave(&rsp->rss_lock, flags); spin_lock_irqsave(&rsp->rss_lock, flags);
if (rsp->gp_count) { if (rsp->gp_count) {
@ -225,7 +224,7 @@ void rcu_sync_dtor(struct rcu_sync *rsp)
{ {
int cb_state; int cb_state;
BUG_ON(rsp->gp_count); WARN_ON_ONCE(rsp->gp_count);
spin_lock_irq(&rsp->rss_lock); spin_lock_irq(&rsp->rss_lock);
if (rsp->cb_state == CB_REPLAY) if (rsp->cb_state == CB_REPLAY)
@ -235,6 +234,6 @@ void rcu_sync_dtor(struct rcu_sync *rsp)
if (cb_state != CB_IDLE) { if (cb_state != CB_IDLE) {
gp_ops[rsp->gp_type].wait(); gp_ops[rsp->gp_type].wait();
BUG_ON(rsp->cb_state != CB_IDLE); WARN_ON_ONCE(rsp->cb_state != CB_IDLE);
} }
} }

View File

@ -207,6 +207,19 @@ static int rcu_gp_in_progress(void)
return rcu_seq_state(rcu_seq_current(&rcu_state.gp_seq)); return rcu_seq_state(rcu_seq_current(&rcu_state.gp_seq));
} }
/*
* Return the number of callbacks queued on the specified CPU.
* Handles both the nocbs and normal cases.
*/
static long rcu_get_n_cbs_cpu(int cpu)
{
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
if (rcu_segcblist_is_enabled(&rdp->cblist)) /* Online normal CPU? */
return rcu_segcblist_n_cbs(&rdp->cblist);
return rcu_get_n_cbs_nocb_cpu(rdp); /* Works for offline, too. */
}
void rcu_softirq_qs(void) void rcu_softirq_qs(void)
{ {
rcu_qs(); rcu_qs();
@ -499,17 +512,30 @@ void rcu_force_quiescent_state(void)
} }
EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
/*
* Convert a ->gp_state value to a character string.
*/
static const char *gp_state_getname(short gs)
{
if (gs < 0 || gs >= ARRAY_SIZE(gp_state_names))
return "???";
return gp_state_names[gs];
}
/* /*
* Show the state of the grace-period kthreads. * Show the state of the grace-period kthreads.
*/ */
void show_rcu_gp_kthreads(void) void show_rcu_gp_kthreads(void)
{ {
int cpu; int cpu;
unsigned long j;
struct rcu_data *rdp; struct rcu_data *rdp;
struct rcu_node *rnp; struct rcu_node *rnp;
pr_info("%s: wait state: %d ->state: %#lx\n", rcu_state.name, j = jiffies - READ_ONCE(rcu_state.gp_activity);
rcu_state.gp_state, rcu_state.gp_kthread->state); pr_info("%s: wait state: %s(%d) ->state: %#lx delta ->gp_activity %ld\n",
rcu_state.name, gp_state_getname(rcu_state.gp_state),
rcu_state.gp_state, rcu_state.gp_kthread->state, j);
rcu_for_each_node_breadth_first(rnp) { rcu_for_each_node_breadth_first(rnp) {
if (ULONG_CMP_GE(rcu_state.gp_seq, rnp->gp_seq_needed)) if (ULONG_CMP_GE(rcu_state.gp_seq, rnp->gp_seq_needed))
continue; continue;
@ -891,12 +917,12 @@ void rcu_irq_enter_irqson(void)
} }
/** /**
* rcu_is_watching - see if RCU thinks that the current CPU is idle * rcu_is_watching - see if RCU thinks that the current CPU is not idle
* *
* Return true if RCU is watching the running CPU, which means that this * Return true if RCU is watching the running CPU, which means that this
* CPU can safely enter RCU read-side critical sections. In other words, * CPU can safely enter RCU read-side critical sections. In other words,
* if the current CPU is in its idle loop and is neither in an interrupt * if the current CPU is not in its idle loop or is in an interrupt or
* or NMI handler, return true. * NMI handler, return true.
*/ */
bool notrace rcu_is_watching(void) bool notrace rcu_is_watching(void)
{ {
@ -1142,16 +1168,6 @@ static void record_gp_stall_check_time(void)
rcu_state.n_force_qs_gpstart = READ_ONCE(rcu_state.n_force_qs); rcu_state.n_force_qs_gpstart = READ_ONCE(rcu_state.n_force_qs);
} }
/*
* Convert a ->gp_state value to a character string.
*/
static const char *gp_state_getname(short gs)
{
if (gs < 0 || gs >= ARRAY_SIZE(gp_state_names))
return "???";
return gp_state_names[gs];
}
/* /*
* Complain about starvation of grace-period kthread. * Complain about starvation of grace-period kthread.
*/ */
@ -1262,8 +1278,7 @@ static void print_other_cpu_stall(unsigned long gp_seq)
print_cpu_stall_info_end(); print_cpu_stall_info_end();
for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(&rcu_data, totqlen += rcu_get_n_cbs_cpu(cpu);
cpu)->cblist);
pr_cont("(detected by %d, t=%ld jiffies, g=%ld, q=%lu)\n", pr_cont("(detected by %d, t=%ld jiffies, g=%ld, q=%lu)\n",
smp_processor_id(), (long)(jiffies - rcu_state.gp_start), smp_processor_id(), (long)(jiffies - rcu_state.gp_start),
(long)rcu_seq_current(&rcu_state.gp_seq), totqlen); (long)rcu_seq_current(&rcu_state.gp_seq), totqlen);
@ -1323,8 +1338,7 @@ static void print_cpu_stall(void)
raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags); raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags);
print_cpu_stall_info_end(); print_cpu_stall_info_end();
for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(&rcu_data, totqlen += rcu_get_n_cbs_cpu(cpu);
cpu)->cblist);
pr_cont(" (t=%lu jiffies g=%ld q=%lu)\n", pr_cont(" (t=%lu jiffies g=%ld q=%lu)\n",
jiffies - rcu_state.gp_start, jiffies - rcu_state.gp_start,
(long)rcu_seq_current(&rcu_state.gp_seq), totqlen); (long)rcu_seq_current(&rcu_state.gp_seq), totqlen);
@ -1986,7 +2000,8 @@ static void rcu_gp_cleanup(void)
WRITE_ONCE(rcu_state.gp_activity, jiffies); WRITE_ONCE(rcu_state.gp_activity, jiffies);
raw_spin_lock_irq_rcu_node(rnp); raw_spin_lock_irq_rcu_node(rnp);
gp_duration = jiffies - rcu_state.gp_start; rcu_state.gp_end = jiffies;
gp_duration = rcu_state.gp_end - rcu_state.gp_start;
if (gp_duration > rcu_state.gp_max) if (gp_duration > rcu_state.gp_max)
rcu_state.gp_max = gp_duration; rcu_state.gp_max = gp_duration;
@ -2032,9 +2047,9 @@ static void rcu_gp_cleanup(void)
rnp = rcu_get_root(); rnp = rcu_get_root();
raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */ raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */
/* Declare grace period done. */ /* Declare grace period done, trace first to use old GP number. */
rcu_seq_end(&rcu_state.gp_seq);
trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("end")); trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("end"));
rcu_seq_end(&rcu_state.gp_seq);
rcu_state.gp_state = RCU_GP_IDLE; rcu_state.gp_state = RCU_GP_IDLE;
/* Check for GP requests since above loop. */ /* Check for GP requests since above loop. */
rdp = this_cpu_ptr(&rcu_data); rdp = this_cpu_ptr(&rcu_data);
@ -2600,10 +2615,10 @@ static void force_quiescent_state(void)
* This function checks for grace-period requests that fail to motivate * This function checks for grace-period requests that fail to motivate
* RCU to come out of its idle mode. * RCU to come out of its idle mode.
*/ */
static void void
rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp) rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp,
const unsigned long gpssdelay)
{ {
const unsigned long gpssdelay = rcu_jiffies_till_stall_check() * HZ;
unsigned long flags; unsigned long flags;
unsigned long j; unsigned long j;
struct rcu_node *rnp_root = rcu_get_root(); struct rcu_node *rnp_root = rcu_get_root();
@ -2654,6 +2669,48 @@ rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp)
raw_spin_unlock_irqrestore_rcu_node(rnp, flags); raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
} }
/*
* Do a forward-progress check for rcutorture. This is normally invoked
* due to an OOM event. The argument "j" gives the time period during
* which rcutorture would like progress to have been made.
*/
void rcu_fwd_progress_check(unsigned long j)
{
unsigned long cbs;
int cpu;
unsigned long max_cbs = 0;
int max_cpu = -1;
struct rcu_data *rdp;
if (rcu_gp_in_progress()) {
pr_info("%s: GP age %lu jiffies\n",
__func__, jiffies - rcu_state.gp_start);
show_rcu_gp_kthreads();
} else {
pr_info("%s: Last GP end %lu jiffies ago\n",
__func__, jiffies - rcu_state.gp_end);
preempt_disable();
rdp = this_cpu_ptr(&rcu_data);
rcu_check_gp_start_stall(rdp->mynode, rdp, j);
preempt_enable();
}
for_each_possible_cpu(cpu) {
cbs = rcu_get_n_cbs_cpu(cpu);
if (!cbs)
continue;
if (max_cpu < 0)
pr_info("%s: callbacks", __func__);
pr_cont(" %d: %lu", cpu, cbs);
if (cbs <= max_cbs)
continue;
max_cbs = cbs;
max_cpu = cpu;
}
if (max_cpu >= 0)
pr_cont("\n");
}
EXPORT_SYMBOL_GPL(rcu_fwd_progress_check);
/* /*
* This does the RCU core processing work for the specified rcu_data * This does the RCU core processing work for the specified rcu_data
* structures. This may be called only from the CPU to whom the rdp * structures. This may be called only from the CPU to whom the rdp
@ -2690,7 +2747,7 @@ static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused
local_irq_restore(flags); local_irq_restore(flags);
} }
rcu_check_gp_start_stall(rnp, rdp); rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check());
/* If there are callbacks ready, invoke them. */ /* If there are callbacks ready, invoke them. */
if (rcu_segcblist_ready_cbs(&rdp->cblist)) if (rcu_segcblist_ready_cbs(&rdp->cblist))
@ -2826,7 +2883,7 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func, int cpu, bool lazy)
* Very early boot, before rcu_init(). Initialize if needed * Very early boot, before rcu_init(). Initialize if needed
* and then drop through to queue the callback. * and then drop through to queue the callback.
*/ */
BUG_ON(cpu != -1); WARN_ON_ONCE(cpu != -1);
WARN_ON_ONCE(!rcu_is_watching()); WARN_ON_ONCE(!rcu_is_watching());
if (rcu_segcblist_empty(&rdp->cblist)) if (rcu_segcblist_empty(&rdp->cblist))
rcu_segcblist_init(&rdp->cblist); rcu_segcblist_init(&rdp->cblist);
@ -3485,7 +3542,8 @@ static int __init rcu_spawn_gp_kthread(void)
rcu_scheduler_fully_active = 1; rcu_scheduler_fully_active = 1;
t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name); t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name);
BUG_ON(IS_ERR(t)); if (WARN_ONCE(IS_ERR(t), "%s: Could not start grace-period kthread, OOM is now expected behavior\n", __func__))
return 0;
rnp = rcu_get_root(); rnp = rcu_get_root();
raw_spin_lock_irqsave_rcu_node(rnp, flags); raw_spin_lock_irqsave_rcu_node(rnp, flags);
rcu_state.gp_kthread = t; rcu_state.gp_kthread = t;

View File

@ -57,7 +57,7 @@ struct rcu_node {
/* some rcu_state fields as well as */ /* some rcu_state fields as well as */
/* following. */ /* following. */
unsigned long gp_seq; /* Track rsp->rcu_gp_seq. */ unsigned long gp_seq; /* Track rsp->rcu_gp_seq. */
unsigned long gp_seq_needed; /* Track rsp->rcu_gp_seq_needed. */ unsigned long gp_seq_needed; /* Track furthest future GP request. */
unsigned long completedqs; /* All QSes done for this node. */ unsigned long completedqs; /* All QSes done for this node. */
unsigned long qsmask; /* CPUs or groups that need to switch in */ unsigned long qsmask; /* CPUs or groups that need to switch in */
/* order for current grace period to proceed.*/ /* order for current grace period to proceed.*/
@ -163,7 +163,7 @@ union rcu_noqs {
struct rcu_data { struct rcu_data {
/* 1) quiescent-state and grace-period handling : */ /* 1) quiescent-state and grace-period handling : */
unsigned long gp_seq; /* Track rsp->rcu_gp_seq counter. */ unsigned long gp_seq; /* Track rsp->rcu_gp_seq counter. */
unsigned long gp_seq_needed; /* Track rsp->rcu_gp_seq_needed ctr. */ unsigned long gp_seq_needed; /* Track furthest future GP request. */
union rcu_noqs cpu_no_qs; /* No QSes yet for this CPU. */ union rcu_noqs cpu_no_qs; /* No QSes yet for this CPU. */
bool core_needs_qs; /* Core waits for quiesc state. */ bool core_needs_qs; /* Core waits for quiesc state. */
bool beenonline; /* CPU online at least once. */ bool beenonline; /* CPU online at least once. */
@ -328,6 +328,8 @@ struct rcu_state {
/* force_quiescent_state(). */ /* force_quiescent_state(). */
unsigned long gp_start; /* Time at which GP started, */ unsigned long gp_start; /* Time at which GP started, */
/* but in jiffies. */ /* but in jiffies. */
unsigned long gp_end; /* Time last GP ended, again */
/* in jiffies. */
unsigned long gp_activity; /* Time of last GP kthread */ unsigned long gp_activity; /* Time of last GP kthread */
/* activity in jiffies. */ /* activity in jiffies. */
unsigned long gp_req_activity; /* Time of last GP request */ unsigned long gp_req_activity; /* Time of last GP request */
@ -398,17 +400,6 @@ static const char *tp_rcu_varname __used __tracepoint_string = rcu_name;
#define RCU_NAME rcu_name #define RCU_NAME rcu_name
#endif /* #else #ifdef CONFIG_TRACING */ #endif /* #else #ifdef CONFIG_TRACING */
/*
* RCU implementation internal declarations:
*/
extern struct rcu_state rcu_sched_state;
extern struct rcu_state rcu_bh_state;
#ifdef CONFIG_PREEMPT_RCU
extern struct rcu_state rcu_preempt_state;
#endif /* #ifdef CONFIG_PREEMPT_RCU */
int rcu_dynticks_snap(struct rcu_data *rdp); int rcu_dynticks_snap(struct rcu_data *rdp);
#ifdef CONFIG_RCU_BOOST #ifdef CONFIG_RCU_BOOST
@ -466,6 +457,7 @@ static void __init rcu_spawn_nocb_kthreads(void);
static void __init rcu_organize_nocb_kthreads(void); static void __init rcu_organize_nocb_kthreads(void);
#endif /* #ifdef CONFIG_RCU_NOCB_CPU */ #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
static bool init_nocb_callback_list(struct rcu_data *rdp); static bool init_nocb_callback_list(struct rcu_data *rdp);
static unsigned long rcu_get_n_cbs_nocb_cpu(struct rcu_data *rdp);
static void rcu_bind_gp_kthread(void); static void rcu_bind_gp_kthread(void);
static bool rcu_nohz_full_cpu(void); static bool rcu_nohz_full_cpu(void);
static void rcu_dynticks_task_enter(void); static void rcu_dynticks_task_enter(void);

View File

@ -450,10 +450,12 @@ static void sync_rcu_exp_select_cpus(smp_call_func_t func)
} }
INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus); INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus);
preempt_disable(); preempt_disable();
cpu = cpumask_next(rnp->grplo - 1, cpu_online_mask); cpu = find_next_bit(&rnp->ffmask, BITS_PER_LONG, -1);
/* If all offline, queue the work on an unbound CPU. */ /* If all offline, queue the work on an unbound CPU. */
if (unlikely(cpu > rnp->grphi)) if (unlikely(cpu > rnp->grphi - rnp->grplo))
cpu = WORK_CPU_UNBOUND; cpu = WORK_CPU_UNBOUND;
else
cpu += rnp->grplo;
queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work); queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work);
preempt_enable(); preempt_enable();
rnp->exp_need_flush = true; rnp->exp_need_flush = true;
@ -690,8 +692,10 @@ static void sync_rcu_exp_handler(void *unused)
*/ */
if (t->rcu_read_lock_nesting > 0) { if (t->rcu_read_lock_nesting > 0) {
raw_spin_lock_irqsave_rcu_node(rnp, flags); raw_spin_lock_irqsave_rcu_node(rnp, flags);
if (rnp->expmask & rdp->grpmask) if (rnp->expmask & rdp->grpmask) {
rdp->deferred_qs = true; rdp->deferred_qs = true;
WRITE_ONCE(t->rcu_read_unlock_special.b.exp_hint, true);
}
raw_spin_unlock_irqrestore_rcu_node(rnp, flags); raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
} }

View File

@ -397,6 +397,11 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
return rnp->gp_tasks != NULL; return rnp->gp_tasks != NULL;
} }
/* Bias and limit values for ->rcu_read_lock_nesting. */
#define RCU_NEST_BIAS INT_MAX
#define RCU_NEST_NMAX (-INT_MAX / 2)
#define RCU_NEST_PMAX (INT_MAX / 2)
/* /*
* Preemptible RCU implementation for rcu_read_lock(). * Preemptible RCU implementation for rcu_read_lock().
* Just increment ->rcu_read_lock_nesting, shared state will be updated * Just increment ->rcu_read_lock_nesting, shared state will be updated
@ -405,6 +410,8 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
void __rcu_read_lock(void) void __rcu_read_lock(void)
{ {
current->rcu_read_lock_nesting++; current->rcu_read_lock_nesting++;
if (IS_ENABLED(CONFIG_PROVE_LOCKING))
WARN_ON_ONCE(current->rcu_read_lock_nesting > RCU_NEST_PMAX);
barrier(); /* critical section after entry code. */ barrier(); /* critical section after entry code. */
} }
EXPORT_SYMBOL_GPL(__rcu_read_lock); EXPORT_SYMBOL_GPL(__rcu_read_lock);
@ -424,20 +431,18 @@ void __rcu_read_unlock(void)
--t->rcu_read_lock_nesting; --t->rcu_read_lock_nesting;
} else { } else {
barrier(); /* critical section before exit code. */ barrier(); /* critical section before exit code. */
t->rcu_read_lock_nesting = INT_MIN; t->rcu_read_lock_nesting = -RCU_NEST_BIAS;
barrier(); /* assign before ->rcu_read_unlock_special load */ barrier(); /* assign before ->rcu_read_unlock_special load */
if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s))) if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s)))
rcu_read_unlock_special(t); rcu_read_unlock_special(t);
barrier(); /* ->rcu_read_unlock_special load before assign */ barrier(); /* ->rcu_read_unlock_special load before assign */
t->rcu_read_lock_nesting = 0; t->rcu_read_lock_nesting = 0;
} }
#ifdef CONFIG_PROVE_LOCKING if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
{ int rrln = t->rcu_read_lock_nesting;
int rrln = READ_ONCE(t->rcu_read_lock_nesting);
WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2); WARN_ON_ONCE(rrln < 0 && rrln > RCU_NEST_NMAX);
} }
#endif /* #ifdef CONFIG_PROVE_LOCKING */
} }
EXPORT_SYMBOL_GPL(__rcu_read_unlock); EXPORT_SYMBOL_GPL(__rcu_read_unlock);
@ -597,7 +602,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
*/ */
static bool rcu_preempt_need_deferred_qs(struct task_struct *t) static bool rcu_preempt_need_deferred_qs(struct task_struct *t)
{ {
return (this_cpu_ptr(&rcu_data)->deferred_qs || return (__this_cpu_read(rcu_data.deferred_qs) ||
READ_ONCE(t->rcu_read_unlock_special.s)) && READ_ONCE(t->rcu_read_unlock_special.s)) &&
t->rcu_read_lock_nesting <= 0; t->rcu_read_lock_nesting <= 0;
} }
@ -617,11 +622,11 @@ static void rcu_preempt_deferred_qs(struct task_struct *t)
if (!rcu_preempt_need_deferred_qs(t)) if (!rcu_preempt_need_deferred_qs(t))
return; return;
if (couldrecurse) if (couldrecurse)
t->rcu_read_lock_nesting -= INT_MIN; t->rcu_read_lock_nesting -= RCU_NEST_BIAS;
local_irq_save(flags); local_irq_save(flags);
rcu_preempt_deferred_qs_irqrestore(t, flags); rcu_preempt_deferred_qs_irqrestore(t, flags);
if (couldrecurse) if (couldrecurse)
t->rcu_read_lock_nesting += INT_MIN; t->rcu_read_lock_nesting += RCU_NEST_BIAS;
} }
/* /*
@ -642,13 +647,21 @@ static void rcu_read_unlock_special(struct task_struct *t)
local_irq_save(flags); local_irq_save(flags);
irqs_were_disabled = irqs_disabled_flags(flags); irqs_were_disabled = irqs_disabled_flags(flags);
if ((preempt_bh_were_disabled || irqs_were_disabled) && if (preempt_bh_were_disabled || irqs_were_disabled) {
t->rcu_read_unlock_special.b.blocked) { WRITE_ONCE(t->rcu_read_unlock_special.b.exp_hint, false);
/* Need to defer quiescent state until everything is enabled. */ /* Need to defer quiescent state until everything is enabled. */
if (irqs_were_disabled) {
/* Enabling irqs does not reschedule, so... */
raise_softirq_irqoff(RCU_SOFTIRQ); raise_softirq_irqoff(RCU_SOFTIRQ);
} else {
/* Enabling BH or preempt does reschedule, so... */
set_tsk_need_resched(current);
set_preempt_need_resched();
}
local_irq_restore(flags); local_irq_restore(flags);
return; return;
} }
WRITE_ONCE(t->rcu_read_unlock_special.b.exp_hint, false);
rcu_preempt_deferred_qs_irqrestore(t, flags); rcu_preempt_deferred_qs_irqrestore(t, flags);
} }
@ -1464,7 +1477,8 @@ static void __init rcu_spawn_boost_kthreads(void)
for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
per_cpu(rcu_cpu_has_work, cpu) = 0; per_cpu(rcu_cpu_has_work, cpu) = 0;
BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec)); if (WARN_ONCE(smpboot_register_percpu_thread(&rcu_cpu_thread_spec), "%s: Could not start rcub kthread, OOM is now expected behavior\n", __func__))
return;
rcu_for_each_leaf_node(rnp) rcu_for_each_leaf_node(rnp)
(void)rcu_spawn_one_boost_kthread(rnp); (void)rcu_spawn_one_boost_kthread(rnp);
} }
@ -1997,7 +2011,7 @@ static bool rcu_nocb_cpu_needs_barrier(int cpu)
* (if a callback is in fact needed). This is associated with an * (if a callback is in fact needed). This is associated with an
* atomic_inc() in the caller. * atomic_inc() in the caller.
*/ */
ret = atomic_long_read(&rdp->nocb_q_count); ret = rcu_get_n_cbs_nocb_cpu(rdp);
#ifdef CONFIG_PROVE_RCU #ifdef CONFIG_PROVE_RCU
rhp = READ_ONCE(rdp->nocb_head); rhp = READ_ONCE(rdp->nocb_head);
@ -2052,7 +2066,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
TPS("WakeNotPoll")); TPS("WakeNotPoll"));
return; return;
} }
len = atomic_long_read(&rdp->nocb_q_count); len = rcu_get_n_cbs_nocb_cpu(rdp);
if (old_rhpp == &rdp->nocb_head) { if (old_rhpp == &rdp->nocb_head) {
if (!irqs_disabled_flags(flags)) { if (!irqs_disabled_flags(flags)) {
/* ... if queue was empty ... */ /* ... if queue was empty ... */
@ -2101,11 +2115,11 @@ static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
trace_rcu_kfree_callback(rcu_state.name, rhp, trace_rcu_kfree_callback(rcu_state.name, rhp,
(unsigned long)rhp->func, (unsigned long)rhp->func,
-atomic_long_read(&rdp->nocb_q_count_lazy), -atomic_long_read(&rdp->nocb_q_count_lazy),
-atomic_long_read(&rdp->nocb_q_count)); -rcu_get_n_cbs_nocb_cpu(rdp));
else else
trace_rcu_callback(rcu_state.name, rhp, trace_rcu_callback(rcu_state.name, rhp,
-atomic_long_read(&rdp->nocb_q_count_lazy), -atomic_long_read(&rdp->nocb_q_count_lazy),
-atomic_long_read(&rdp->nocb_q_count)); -rcu_get_n_cbs_nocb_cpu(rdp));
/* /*
* If called from an extended quiescent state with interrupts * If called from an extended quiescent state with interrupts
@ -2322,13 +2336,14 @@ static int rcu_nocb_kthread(void *arg)
tail = rdp->nocb_follower_tail; tail = rdp->nocb_follower_tail;
rdp->nocb_follower_tail = &rdp->nocb_follower_head; rdp->nocb_follower_tail = &rdp->nocb_follower_head;
raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
BUG_ON(!list); if (WARN_ON_ONCE(!list))
continue;
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeNonEmpty")); trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeNonEmpty"));
/* Each pass through the following loop invokes a callback. */ /* Each pass through the following loop invokes a callback. */
trace_rcu_batch_start(rcu_state.name, trace_rcu_batch_start(rcu_state.name,
atomic_long_read(&rdp->nocb_q_count_lazy), atomic_long_read(&rdp->nocb_q_count_lazy),
atomic_long_read(&rdp->nocb_q_count), -1); rcu_get_n_cbs_nocb_cpu(rdp), -1);
c = cl = 0; c = cl = 0;
while (list) { while (list) {
next = list->next; next = list->next;
@ -2495,7 +2510,8 @@ static void rcu_spawn_one_nocb_kthread(int cpu)
/* Spawn the kthread for this CPU. */ /* Spawn the kthread for this CPU. */
t = kthread_run(rcu_nocb_kthread, rdp_spawn, t = kthread_run(rcu_nocb_kthread, rdp_spawn,
"rcuo%c/%d", rcu_state.abbr, cpu); "rcuo%c/%d", rcu_state.abbr, cpu);
BUG_ON(IS_ERR(t)); if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo kthread, OOM is now expected behavior\n", __func__))
return;
WRITE_ONCE(rdp_spawn->nocb_kthread, t); WRITE_ONCE(rdp_spawn->nocb_kthread, t);
} }
@ -2587,6 +2603,26 @@ static bool init_nocb_callback_list(struct rcu_data *rdp)
return true; return true;
} }
/*
* Bind the current task to the offloaded CPUs. If there are no offloaded
* CPUs, leave the task unbound. Splat if the bind attempt fails.
*/
void rcu_bind_current_to_nocb(void)
{
if (cpumask_available(rcu_nocb_mask) && cpumask_weight(rcu_nocb_mask))
WARN_ON(sched_setaffinity(current->pid, rcu_nocb_mask));
}
EXPORT_SYMBOL_GPL(rcu_bind_current_to_nocb);
/*
* Return the number of RCU callbacks still queued from the specified
* CPU, which must be a nocbs CPU.
*/
static unsigned long rcu_get_n_cbs_nocb_cpu(struct rcu_data *rdp)
{
return atomic_long_read(&rdp->nocb_q_count);
}
#else /* #ifdef CONFIG_RCU_NOCB_CPU */ #else /* #ifdef CONFIG_RCU_NOCB_CPU */
static bool rcu_nocb_cpu_needs_barrier(int cpu) static bool rcu_nocb_cpu_needs_barrier(int cpu)
@ -2647,6 +2683,11 @@ static bool init_nocb_callback_list(struct rcu_data *rdp)
return false; return false;
} }
static unsigned long rcu_get_n_cbs_nocb_cpu(struct rcu_data *rdp)
{
return 0;
}
#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */ #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
/* /*

View File

@ -335,8 +335,7 @@ void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
/* Initialize and register callbacks for each crcu_array element. */ /* Initialize and register callbacks for each crcu_array element. */
for (i = 0; i < n; i++) { for (i = 0; i < n; i++) {
if (checktiny && if (checktiny &&
(crcu_array[i] == call_rcu || (crcu_array[i] == call_rcu)) {
crcu_array[i] == call_rcu_bh)) {
might_sleep(); might_sleep();
continue; continue;
} }
@ -352,8 +351,7 @@ void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
/* Wait for all callbacks to be invoked. */ /* Wait for all callbacks to be invoked. */
for (i = 0; i < n; i++) { for (i = 0; i < n; i++) {
if (checktiny && if (checktiny &&
(crcu_array[i] == call_rcu || (crcu_array[i] == call_rcu))
crcu_array[i] == call_rcu_bh))
continue; continue;
for (j = 0; j < i; j++) for (j = 0; j < i; j++)
if (crcu_array[j] == crcu_array[i]) if (crcu_array[j] == crcu_array[i])
@ -822,7 +820,8 @@ static int __init rcu_spawn_tasks_kthread(void)
struct task_struct *t; struct task_struct *t;
t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread"); t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread");
BUG_ON(IS_ERR(t)); if (WARN_ONCE(IS_ERR(t), "%s: Could not start Tasks-RCU grace-period kthread, OOM is now expected behavior\n", __func__))
return 0;
smp_mb(); /* Ensure others see full kthread. */ smp_mb(); /* Ensure others see full kthread. */
WRITE_ONCE(rcu_tasks_kthread_ptr, t); WRITE_ONCE(rcu_tasks_kthread_ptr, t);
return 0; return 0;

View File

@ -5783,7 +5783,7 @@ int sched_cpu_deactivate(unsigned int cpu)
* *
* Do sync before park smpboot threads to take care the rcu boost case. * Do sync before park smpboot threads to take care the rcu boost case.
*/ */
synchronize_rcu_mult(call_rcu, call_rcu_sched); synchronize_rcu();
#ifdef CONFIG_SCHED_SMT #ifdef CONFIG_SCHED_SMT
/* /*

View File

@ -210,7 +210,7 @@ static int membarrier_register_global_expedited(void)
* future scheduler executions will observe the new * future scheduler executions will observe the new
* thread flag state for this mm. * thread flag state for this mm.
*/ */
synchronize_sched(); synchronize_rcu();
} }
atomic_or(MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY, atomic_or(MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY,
&mm->membarrier_state); &mm->membarrier_state);
@ -246,7 +246,7 @@ static int membarrier_register_private_expedited(int flags)
* Ensure all future scheduler executions will observe the * Ensure all future scheduler executions will observe the
* new thread flag state for this process. * new thread flag state for this process.
*/ */
synchronize_sched(); synchronize_rcu();
} }
atomic_or(state, &mm->membarrier_state); atomic_or(state, &mm->membarrier_state);
@ -298,7 +298,7 @@ SYSCALL_DEFINE2(membarrier, int, cmd, int, flags)
if (tick_nohz_full_enabled()) if (tick_nohz_full_enabled())
return -EINVAL; return -EINVAL;
if (num_online_cpus() > 1) if (num_online_cpus() > 1)
synchronize_sched(); synchronize_rcu();
return 0; return 0;
case MEMBARRIER_CMD_GLOBAL_EXPEDITED: case MEMBARRIER_CMD_GLOBAL_EXPEDITED:
return membarrier_global_expedited(); return membarrier_global_expedited();

View File

@ -194,11 +194,23 @@ torture_onoff(void *arg)
int cpu; int cpu;
int maxcpu = -1; int maxcpu = -1;
DEFINE_TORTURE_RANDOM(rand); DEFINE_TORTURE_RANDOM(rand);
int ret;
VERBOSE_TOROUT_STRING("torture_onoff task started"); VERBOSE_TOROUT_STRING("torture_onoff task started");
for_each_online_cpu(cpu) for_each_online_cpu(cpu)
maxcpu = cpu; maxcpu = cpu;
WARN_ON(maxcpu < 0); WARN_ON(maxcpu < 0);
if (!IS_MODULE(CONFIG_TORTURE_TEST))
for_each_possible_cpu(cpu) {
if (cpu_online(cpu))
continue;
ret = cpu_up(cpu);
if (ret && verbose) {
pr_alert("%s" TORTURE_FLAG
"%s: Initial online %d: errno %d\n",
__func__, torture_type, cpu, ret);
}
}
if (maxcpu == 0) { if (maxcpu == 0) {
VERBOSE_TOROUT_STRING("Only one CPU, so CPU-hotplug testing is disabled"); VERBOSE_TOROUT_STRING("Only one CPU, so CPU-hotplug testing is disabled");
@ -233,16 +245,15 @@ torture_onoff(void *arg)
*/ */
int torture_onoff_init(long ooholdoff, long oointerval) int torture_onoff_init(long ooholdoff, long oointerval)
{ {
int ret = 0;
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
onoff_holdoff = ooholdoff; onoff_holdoff = ooholdoff;
onoff_interval = oointerval; onoff_interval = oointerval;
if (onoff_interval <= 0) if (onoff_interval <= 0)
return 0; return 0;
ret = torture_create_kthread(torture_onoff, NULL, onoff_task); return torture_create_kthread(torture_onoff, NULL, onoff_task);
#endif /* #ifdef CONFIG_HOTPLUG_CPU */ #else /* #ifdef CONFIG_HOTPLUG_CPU */
return ret; return 0;
#endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
} }
EXPORT_SYMBOL_GPL(torture_onoff_init); EXPORT_SYMBOL_GPL(torture_onoff_init);
@ -513,15 +524,13 @@ static int torture_shutdown(void *arg)
*/ */
int torture_shutdown_init(int ssecs, void (*cleanup)(void)) int torture_shutdown_init(int ssecs, void (*cleanup)(void))
{ {
int ret = 0;
torture_shutdown_hook = cleanup; torture_shutdown_hook = cleanup;
if (ssecs > 0) { if (ssecs > 0) {
shutdown_time = ktime_add(ktime_get(), ktime_set(ssecs, 0)); shutdown_time = ktime_add(ktime_get(), ktime_set(ssecs, 0));
ret = torture_create_kthread(torture_shutdown, NULL, return torture_create_kthread(torture_shutdown, NULL,
shutdown_task); shutdown_task);
} }
return ret; return 0;
} }
EXPORT_SYMBOL_GPL(torture_shutdown_init); EXPORT_SYMBOL_GPL(torture_shutdown_init);
@ -620,13 +629,10 @@ static int torture_stutter(void *arg)
/* /*
* Initialize and kick off the torture_stutter kthread. * Initialize and kick off the torture_stutter kthread.
*/ */
int torture_stutter_init(int s) int torture_stutter_init(const int s)
{ {
int ret;
stutter = s; stutter = s;
ret = torture_create_kthread(torture_stutter, NULL, stutter_task); return torture_create_kthread(torture_stutter, NULL, stutter_task);
return ret;
} }
EXPORT_SYMBOL_GPL(torture_stutter_init); EXPORT_SYMBOL_GPL(torture_stutter_init);

View File

@ -173,7 +173,7 @@ static void ftrace_sync(struct work_struct *work)
{ {
/* /*
* This function is just a stub to implement a hard force * This function is just a stub to implement a hard force
* of synchronize_sched(). This requires synchronizing * of synchronize_rcu(). This requires synchronizing
* tasks even in userspace and idle. * tasks even in userspace and idle.
* *
* Yes, function tracing is rude. * Yes, function tracing is rude.
@ -934,7 +934,7 @@ ftrace_profile_write(struct file *filp, const char __user *ubuf,
ftrace_profile_enabled = 0; ftrace_profile_enabled = 0;
/* /*
* unregister_ftrace_profiler calls stop_machine * unregister_ftrace_profiler calls stop_machine
* so this acts like an synchronize_sched. * so this acts like an synchronize_rcu.
*/ */
unregister_ftrace_profiler(); unregister_ftrace_profiler();
} }
@ -1086,7 +1086,7 @@ struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr)
/* /*
* Some of the ops may be dynamically allocated, * Some of the ops may be dynamically allocated,
* they are freed after a synchronize_sched(). * they are freed after a synchronize_rcu().
*/ */
preempt_disable_notrace(); preempt_disable_notrace();
@ -1286,7 +1286,7 @@ static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
{ {
if (!hash || hash == EMPTY_HASH) if (!hash || hash == EMPTY_HASH)
return; return;
call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu); call_rcu(&hash->rcu, __free_ftrace_hash_rcu);
} }
void ftrace_free_filter(struct ftrace_ops *ops) void ftrace_free_filter(struct ftrace_ops *ops)
@ -1501,7 +1501,7 @@ static bool hash_contains_ip(unsigned long ip,
* the ip is not in the ops->notrace_hash. * the ip is not in the ops->notrace_hash.
* *
* This needs to be called with preemption disabled as * This needs to be called with preemption disabled as
* the hashes are freed with call_rcu_sched(). * the hashes are freed with call_rcu().
*/ */
static int static int
ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
@ -4496,7 +4496,7 @@ unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
if (ftrace_enabled && !ftrace_hash_empty(hash)) if (ftrace_enabled && !ftrace_hash_empty(hash))
ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS, ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS,
&old_hash_ops); &old_hash_ops);
synchronize_sched(); synchronize_rcu();
hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) { hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) {
hlist_del(&entry->hlist); hlist_del(&entry->hlist);
@ -5314,7 +5314,7 @@ ftrace_graph_release(struct inode *inode, struct file *file)
mutex_unlock(&graph_lock); mutex_unlock(&graph_lock);
/* Wait till all users are no longer using the old hash */ /* Wait till all users are no longer using the old hash */
synchronize_sched(); synchronize_rcu();
free_ftrace_hash(old_hash); free_ftrace_hash(old_hash);
} }
@ -5708,7 +5708,7 @@ void ftrace_release_mod(struct module *mod)
list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) { list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) {
if (mod_map->mod == mod) { if (mod_map->mod == mod) {
list_del_rcu(&mod_map->list); list_del_rcu(&mod_map->list);
call_rcu_sched(&mod_map->rcu, ftrace_free_mod_map); call_rcu(&mod_map->rcu, ftrace_free_mod_map);
break; break;
} }
} }
@ -5928,7 +5928,7 @@ ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
struct ftrace_mod_map *mod_map; struct ftrace_mod_map *mod_map;
const char *ret = NULL; const char *ret = NULL;
/* mod_map is freed via call_rcu_sched() */ /* mod_map is freed via call_rcu() */
preempt_disable(); preempt_disable();
list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) { list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
ret = ftrace_func_address_lookup(mod_map, addr, size, off, sym); ret = ftrace_func_address_lookup(mod_map, addr, size, off, sym);
@ -6263,7 +6263,7 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
/* /*
* Some of the ops may be dynamically allocated, * Some of the ops may be dynamically allocated,
* they must be freed after a synchronize_sched(). * they must be freed after a synchronize_rcu().
*/ */
preempt_disable_notrace(); preempt_disable_notrace();
@ -6434,7 +6434,7 @@ static void clear_ftrace_pids(struct trace_array *tr)
rcu_assign_pointer(tr->function_pids, NULL); rcu_assign_pointer(tr->function_pids, NULL);
/* Wait till all users are no longer using pid filtering */ /* Wait till all users are no longer using pid filtering */
synchronize_sched(); synchronize_rcu();
trace_free_pid_list(pid_list); trace_free_pid_list(pid_list);
} }
@ -6581,7 +6581,7 @@ ftrace_pid_write(struct file *filp, const char __user *ubuf,
rcu_assign_pointer(tr->function_pids, pid_list); rcu_assign_pointer(tr->function_pids, pid_list);
if (filtered_pids) { if (filtered_pids) {
synchronize_sched(); synchronize_rcu();
trace_free_pid_list(filtered_pids); trace_free_pid_list(filtered_pids);
} else if (pid_list) { } else if (pid_list) {
/* Register a probe to set whether to ignore the tracing of a task */ /* Register a probe to set whether to ignore the tracing of a task */

View File

@ -1834,7 +1834,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
* There could have been a race between checking * There could have been a race between checking
* record_disable and incrementing it. * record_disable and incrementing it.
*/ */
synchronize_sched(); synchronize_rcu();
for_each_buffer_cpu(buffer, cpu) { for_each_buffer_cpu(buffer, cpu) {
cpu_buffer = buffer->buffers[cpu]; cpu_buffer = buffer->buffers[cpu];
rb_check_pages(cpu_buffer); rb_check_pages(cpu_buffer);
@ -3151,7 +3151,7 @@ static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
* This prevents all writes to the buffer. Any attempt to write * This prevents all writes to the buffer. Any attempt to write
* to the buffer after this will fail and return NULL. * to the buffer after this will fail and return NULL.
* *
* The caller should call synchronize_sched() after this. * The caller should call synchronize_rcu() after this.
*/ */
void ring_buffer_record_disable(struct ring_buffer *buffer) void ring_buffer_record_disable(struct ring_buffer *buffer)
{ {
@ -3253,7 +3253,7 @@ bool ring_buffer_record_is_set_on(struct ring_buffer *buffer)
* This prevents all writes to the buffer. Any attempt to write * This prevents all writes to the buffer. Any attempt to write
* to the buffer after this will fail and return NULL. * to the buffer after this will fail and return NULL.
* *
* The caller should call synchronize_sched() after this. * The caller should call synchronize_rcu() after this.
*/ */
void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu) void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
{ {
@ -4191,7 +4191,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_read_prepare);
void void
ring_buffer_read_prepare_sync(void) ring_buffer_read_prepare_sync(void)
{ {
synchronize_sched(); synchronize_rcu();
} }
EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync); EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);
@ -4363,7 +4363,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
atomic_inc(&cpu_buffer->record_disabled); atomic_inc(&cpu_buffer->record_disabled);
/* Make sure all commits have finished */ /* Make sure all commits have finished */
synchronize_sched(); synchronize_rcu();
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
@ -4496,7 +4496,7 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
goto out; goto out;
/* /*
* We can't do a synchronize_sched here because this * We can't do a synchronize_rcu here because this
* function can be called in atomic context. * function can be called in atomic context.
* Normally this will be called from the same CPU as cpu. * Normally this will be called from the same CPU as cpu.
* If not it's up to the caller to protect this. * If not it's up to the caller to protect this.

View File

@ -1681,7 +1681,7 @@ void tracing_reset(struct trace_buffer *buf, int cpu)
ring_buffer_record_disable(buffer); ring_buffer_record_disable(buffer);
/* Make sure all commits have finished */ /* Make sure all commits have finished */
synchronize_sched(); synchronize_rcu();
ring_buffer_reset_cpu(buffer, cpu); ring_buffer_reset_cpu(buffer, cpu);
ring_buffer_record_enable(buffer); ring_buffer_record_enable(buffer);
@ -1698,7 +1698,7 @@ void tracing_reset_online_cpus(struct trace_buffer *buf)
ring_buffer_record_disable(buffer); ring_buffer_record_disable(buffer);
/* Make sure all commits have finished */ /* Make sure all commits have finished */
synchronize_sched(); synchronize_rcu();
buf->time_start = buffer_ftrace_now(buf, buf->cpu); buf->time_start = buffer_ftrace_now(buf, buf->cpu);
@ -2250,7 +2250,7 @@ void trace_buffered_event_disable(void)
preempt_enable(); preempt_enable();
/* Wait for all current users to finish */ /* Wait for all current users to finish */
synchronize_sched(); synchronize_rcu();
for_each_tracing_cpu(cpu) { for_each_tracing_cpu(cpu) {
free_page((unsigned long)per_cpu(trace_buffered_event, cpu)); free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
@ -5398,7 +5398,7 @@ static int tracing_set_tracer(struct trace_array *tr, const char *buf)
if (tr->current_trace->reset) if (tr->current_trace->reset)
tr->current_trace->reset(tr); tr->current_trace->reset(tr);
/* Current trace needs to be nop_trace before synchronize_sched */ /* Current trace needs to be nop_trace before synchronize_rcu */
tr->current_trace = &nop_trace; tr->current_trace = &nop_trace;
#ifdef CONFIG_TRACER_MAX_TRACE #ifdef CONFIG_TRACER_MAX_TRACE
@ -5412,7 +5412,7 @@ static int tracing_set_tracer(struct trace_array *tr, const char *buf)
* The update_max_tr is called from interrupts disabled * The update_max_tr is called from interrupts disabled
* so a synchronized_sched() is sufficient. * so a synchronized_sched() is sufficient.
*/ */
synchronize_sched(); synchronize_rcu();
free_snapshot(tr); free_snapshot(tr);
} }
#endif #endif

View File

@ -1616,7 +1616,7 @@ static int process_system_preds(struct trace_subsystem_dir *dir,
/* /*
* The calls can still be using the old filters. * The calls can still be using the old filters.
* Do a synchronize_sched() and to ensure all calls are * Do a synchronize_rcu() and to ensure all calls are
* done with them before we free them. * done with them before we free them.
*/ */
tracepoint_synchronize_unregister(); tracepoint_synchronize_unregister();
@ -1848,7 +1848,7 @@ int apply_subsystem_event_filter(struct trace_subsystem_dir *dir,
if (filter) { if (filter) {
/* /*
* No event actually uses the system filter * No event actually uses the system filter
* we can free it without synchronize_sched(). * we can free it without synchronize_rcu().
*/ */
__free_filter(system->filter); __free_filter(system->filter);
system->filter = filter; system->filter = filter;

View File

@ -333,7 +333,7 @@ disable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
* event_call related objects, which will be accessed in * event_call related objects, which will be accessed in
* the kprobe_trace_func/kretprobe_trace_func. * the kprobe_trace_func/kretprobe_trace_func.
*/ */
synchronize_sched(); synchronize_rcu();
kfree(link); /* Ignored if link == NULL */ kfree(link); /* Ignored if link == NULL */
} }

View File

@ -92,7 +92,7 @@ static __init int release_early_probes(void)
while (early_probes) { while (early_probes) {
tmp = early_probes; tmp = early_probes;
early_probes = tmp->next; early_probes = tmp->next;
call_rcu_sched(tmp, rcu_free_old_probes); call_rcu(tmp, rcu_free_old_probes);
} }
return 0; return 0;
@ -123,7 +123,7 @@ static inline void release_probes(struct tracepoint_func *old)
* cover both cases. So let us chain the SRCU and sched RCU * cover both cases. So let us chain the SRCU and sched RCU
* callbacks to wait for both grace periods. * callbacks to wait for both grace periods.
*/ */
call_rcu_sched(&tp_probes->rcu, rcu_free_old_probes); call_rcu(&tp_probes->rcu, rcu_free_old_probes);
} }
} }

View File

@ -3396,7 +3396,7 @@ static void put_unbound_pool(struct worker_pool *pool)
del_timer_sync(&pool->mayday_timer); del_timer_sync(&pool->mayday_timer);
/* sched-RCU protected to allow dereferences from get_work_pool() */ /* sched-RCU protected to allow dereferences from get_work_pool() */
call_rcu_sched(&pool->rcu, rcu_free_pool); call_rcu(&pool->rcu, rcu_free_pool);
} }
/** /**
@ -3503,14 +3503,14 @@ static void pwq_unbound_release_workfn(struct work_struct *work)
put_unbound_pool(pool); put_unbound_pool(pool);
mutex_unlock(&wq_pool_mutex); mutex_unlock(&wq_pool_mutex);
call_rcu_sched(&pwq->rcu, rcu_free_pwq); call_rcu(&pwq->rcu, rcu_free_pwq);
/* /*
* If we're the last pwq going away, @wq is already dead and no one * If we're the last pwq going away, @wq is already dead and no one
* is gonna access it anymore. Schedule RCU free. * is gonna access it anymore. Schedule RCU free.
*/ */
if (is_last) if (is_last)
call_rcu_sched(&wq->rcu, rcu_free_wq); call_rcu(&wq->rcu, rcu_free_wq);
} }
/** /**
@ -4195,7 +4195,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
* The base ref is never dropped on per-cpu pwqs. Directly * The base ref is never dropped on per-cpu pwqs. Directly
* schedule RCU free. * schedule RCU free.
*/ */
call_rcu_sched(&wq->rcu, rcu_free_wq); call_rcu(&wq->rcu, rcu_free_wq);
} else { } else {
/* /*
* We're the sole accessor of @wq at this point. Directly * We're the sole accessor of @wq at this point. Directly

View File

@ -181,7 +181,7 @@ static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref,
ref->confirm_switch = confirm_switch ?: percpu_ref_noop_confirm_switch; ref->confirm_switch = confirm_switch ?: percpu_ref_noop_confirm_switch;
percpu_ref_get(ref); /* put after confirmation */ percpu_ref_get(ref); /* put after confirmation */
call_rcu_sched(&ref->rcu, percpu_ref_switch_to_atomic_rcu); call_rcu(&ref->rcu, percpu_ref_switch_to_atomic_rcu);
} }
static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref) static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)

View File

@ -1225,7 +1225,7 @@ static void collect_mm_slot(struct mm_slot *mm_slot)
{ {
struct mm_struct *mm = mm_slot->mm; struct mm_struct *mm = mm_slot->mm;
VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock)); lockdep_assert_held(&khugepaged_mm_lock);
if (khugepaged_test_exit(mm)) { if (khugepaged_test_exit(mm)) {
/* free mm_slot */ /* free mm_slot */
@ -1653,7 +1653,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
int progress = 0; int progress = 0;
VM_BUG_ON(!pages); VM_BUG_ON(!pages);
VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock)); lockdep_assert_held(&khugepaged_mm_lock);
if (khugepaged_scan.mm_slot) if (khugepaged_scan.mm_slot)
mm_slot = khugepaged_scan.mm_slot; mm_slot = khugepaged_scan.mm_slot;

View File

@ -199,7 +199,7 @@ void tlb_table_flush(struct mmu_gather *tlb)
if (*batch) { if (*batch) {
tlb_table_invalidate(tlb); tlb_table_invalidate(tlb);
call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu); call_rcu(&(*batch)->rcu, tlb_remove_table_rcu);
*batch = NULL; *batch = NULL;
} }
} }

View File

@ -962,10 +962,10 @@ static int setup_kmem_cache_node(struct kmem_cache *cachep,
* To protect lockless access to n->shared during irq disabled context. * To protect lockless access to n->shared during irq disabled context.
* If n->shared isn't NULL in irq disabled context, accessing to it is * If n->shared isn't NULL in irq disabled context, accessing to it is
* guaranteed to be valid until irq is re-enabled, because it will be * guaranteed to be valid until irq is re-enabled, because it will be
* freed after synchronize_sched(). * freed after synchronize_rcu().
*/ */
if (old_shared && force_change) if (old_shared && force_change)
synchronize_sched(); synchronize_rcu();
fail: fail:
kfree(old_shared); kfree(old_shared);

View File

@ -724,7 +724,7 @@ void slab_deactivate_memcg_cache_rcu_sched(struct kmem_cache *s,
css_get(&s->memcg_params.memcg->css); css_get(&s->memcg_params.memcg->css);
s->memcg_params.deact_fn = deact_fn; s->memcg_params.deact_fn = deact_fn;
call_rcu_sched(&s->memcg_params.deact_rcu_head, kmemcg_deactivate_rcufn); call_rcu(&s->memcg_params.deact_rcu_head, kmemcg_deactivate_rcufn);
} }
void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg) void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg)
@ -839,11 +839,11 @@ static void flush_memcg_workqueue(struct kmem_cache *s)
mutex_unlock(&slab_mutex); mutex_unlock(&slab_mutex);
/* /*
* SLUB deactivates the kmem_caches through call_rcu_sched. Make * SLUB deactivates the kmem_caches through call_rcu. Make
* sure all registered rcu callbacks have been invoked. * sure all registered rcu callbacks have been invoked.
*/ */
if (IS_ENABLED(CONFIG_SLUB)) if (IS_ENABLED(CONFIG_SLUB))
rcu_barrier_sched(); rcu_barrier();
/* /*
* SLAB and SLUB create memcg kmem_caches through workqueue and SLUB * SLAB and SLUB create memcg kmem_caches through workqueue and SLUB

View File

@ -823,8 +823,7 @@ void lru_add_page_tail(struct page *page, struct page *page_tail,
VM_BUG_ON_PAGE(!PageHead(page), page); VM_BUG_ON_PAGE(!PageHead(page), page);
VM_BUG_ON_PAGE(PageCompound(page_tail), page); VM_BUG_ON_PAGE(PageCompound(page_tail), page);
VM_BUG_ON_PAGE(PageLRU(page_tail), page); VM_BUG_ON_PAGE(PageLRU(page_tail), page);
VM_BUG_ON(NR_CPUS != 1 && lockdep_assert_held(&lruvec_pgdat(lruvec)->lru_lock);
!spin_is_locked(&lruvec_pgdat(lruvec)->lru_lock));
if (!list) if (!list)
SetPageLRU(page_tail); SetPageLRU(page_tail);

View File

@ -728,7 +728,7 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
rcu_assign_pointer(*pp, p->next); rcu_assign_pointer(*pp, p->next);
hlist_del_init(&p->mglist); hlist_del_init(&p->mglist);
del_timer(&p->timer); del_timer(&p->timer);
call_rcu_bh(&p->rcu, br_multicast_free_pg); call_rcu(&p->rcu, br_multicast_free_pg);
err = 0; err = 0;
if (!mp->ports && !mp->host_joined && if (!mp->ports && !mp->host_joined &&

View File

@ -260,7 +260,7 @@ static void br_multicast_group_expired(struct timer_list *t)
hlist_del_rcu(&mp->hlist[mdb->ver]); hlist_del_rcu(&mp->hlist[mdb->ver]);
mdb->size--; mdb->size--;
call_rcu_bh(&mp->rcu, br_multicast_free_group); call_rcu(&mp->rcu, br_multicast_free_group);
out: out:
spin_unlock(&br->multicast_lock); spin_unlock(&br->multicast_lock);
@ -291,7 +291,7 @@ static void br_multicast_del_pg(struct net_bridge *br,
del_timer(&p->timer); del_timer(&p->timer);
br_mdb_notify(br->dev, p->port, &pg->addr, RTM_DELMDB, br_mdb_notify(br->dev, p->port, &pg->addr, RTM_DELMDB,
p->flags); p->flags);
call_rcu_bh(&p->rcu, br_multicast_free_pg); call_rcu(&p->rcu, br_multicast_free_pg);
if (!mp->ports && !mp->host_joined && if (!mp->ports && !mp->host_joined &&
netif_running(br->dev)) netif_running(br->dev))
@ -358,7 +358,7 @@ static int br_mdb_rehash(struct net_bridge_mdb_htable __rcu **mdbp, int max,
} }
br_mdb_rehash_seq++; br_mdb_rehash_seq++;
call_rcu_bh(&mdb->rcu, br_mdb_free); call_rcu(&mdb->rcu, br_mdb_free);
out: out:
rcu_assign_pointer(*mdbp, mdb); rcu_assign_pointer(*mdbp, mdb);
@ -1629,7 +1629,7 @@ br_multicast_leave_group(struct net_bridge *br,
rcu_assign_pointer(*pp, p->next); rcu_assign_pointer(*pp, p->next);
hlist_del_init(&p->mglist); hlist_del_init(&p->mglist);
del_timer(&p->timer); del_timer(&p->timer);
call_rcu_bh(&p->rcu, br_multicast_free_pg); call_rcu(&p->rcu, br_multicast_free_pg);
br_mdb_notify(br->dev, port, group, RTM_DELMDB, br_mdb_notify(br->dev, port, group, RTM_DELMDB,
p->flags); p->flags);
@ -2051,19 +2051,19 @@ void br_multicast_dev_del(struct net_bridge *br)
hlist_for_each_entry_safe(mp, n, &mdb->mhash[i], hlist_for_each_entry_safe(mp, n, &mdb->mhash[i],
hlist[ver]) { hlist[ver]) {
del_timer(&mp->timer); del_timer(&mp->timer);
call_rcu_bh(&mp->rcu, br_multicast_free_group); call_rcu(&mp->rcu, br_multicast_free_group);
} }
} }
if (mdb->old) { if (mdb->old) {
spin_unlock_bh(&br->multicast_lock); spin_unlock_bh(&br->multicast_lock);
rcu_barrier_bh(); rcu_barrier();
spin_lock_bh(&br->multicast_lock); spin_lock_bh(&br->multicast_lock);
WARN_ON(mdb->old); WARN_ON(mdb->old);
} }
mdb->old = mdb; mdb->old = mdb;
call_rcu_bh(&mdb->rcu, br_mdb_free); call_rcu(&mdb->rcu, br_mdb_free);
out: out:
spin_unlock_bh(&br->multicast_lock); spin_unlock_bh(&br->multicast_lock);

View File

@ -801,7 +801,7 @@ void __netpoll_cleanup(struct netpoll *np)
ops->ndo_netpoll_cleanup(np->dev); ops->ndo_netpoll_cleanup(np->dev);
RCU_INIT_POINTER(np->dev->npinfo, NULL); RCU_INIT_POINTER(np->dev->npinfo, NULL);
call_rcu_bh(&npinfo->rcu, rcu_cleanup_netpoll_info); call_rcu(&npinfo->rcu, rcu_cleanup_netpoll_info);
} else } else
RCU_INIT_POINTER(np->dev->npinfo, NULL); RCU_INIT_POINTER(np->dev->npinfo, NULL);
} }
@ -812,7 +812,7 @@ void __netpoll_free(struct netpoll *np)
ASSERT_RTNL(); ASSERT_RTNL();
/* Wait for transmitting packets to finish before freeing. */ /* Wait for transmitting packets to finish before freeing. */
synchronize_rcu_bh(); synchronize_rcu();
__netpoll_cleanup(np); __netpoll_cleanup(np);
kfree(np); kfree(np);
} }

View File

@ -583,7 +583,7 @@ void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
write_unlock_bh(&sk->sk_callback_lock); write_unlock_bh(&sk->sk_callback_lock);
sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED); sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
call_rcu_sched(&psock->rcu, sk_psock_destroy); call_rcu(&psock->rcu, sk_psock_destroy);
} }
EXPORT_SYMBOL_GPL(sk_psock_drop); EXPORT_SYMBOL_GPL(sk_psock_drop);

View File

@ -2405,7 +2405,7 @@ static void __exit decnet_exit(void)
proto_unregister(&dn_proto); proto_unregister(&dn_proto);
rcu_barrier_bh(); /* Wait for completion of call_rcu_bh()'s */ rcu_barrier(); /* Wait for completion of call_rcu()'s */
} }
module_exit(decnet_exit); module_exit(decnet_exit);
#endif #endif

View File

@ -540,7 +540,7 @@ void qdisc_put_stab(struct qdisc_size_table *tab)
if (--tab->refcnt == 0) { if (--tab->refcnt == 0) {
list_del(&tab->list); list_del(&tab->list);
call_rcu_bh(&tab->rcu, stab_kfree_rcu); call_rcu(&tab->rcu, stab_kfree_rcu);
} }
} }
EXPORT_SYMBOL(qdisc_put_stab); EXPORT_SYMBOL(qdisc_put_stab);

View File

@ -1372,7 +1372,7 @@ void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
if (!tp_head) { if (!tp_head) {
RCU_INIT_POINTER(*miniqp->p_miniq, NULL); RCU_INIT_POINTER(*miniqp->p_miniq, NULL);
/* Wait for flying RCU callback before it is freed. */ /* Wait for flying RCU callback before it is freed. */
rcu_barrier_bh(); rcu_barrier();
return; return;
} }
@ -1380,10 +1380,10 @@ void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
&miniqp->miniq1 : &miniqp->miniq2; &miniqp->miniq1 : &miniqp->miniq2;
/* We need to make sure that readers won't see the miniq /* We need to make sure that readers won't see the miniq
* we are about to modify. So wait until previous call_rcu_bh callback * we are about to modify. So wait until previous call_rcu callback
* is done. * is done.
*/ */
rcu_barrier_bh(); rcu_barrier();
miniq->filter_list = tp_head; miniq->filter_list = tp_head;
rcu_assign_pointer(*miniqp->p_miniq, miniq); rcu_assign_pointer(*miniqp->p_miniq, miniq);
@ -1392,7 +1392,7 @@ void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
* block potential new user of miniq_old until all readers * block potential new user of miniq_old until all readers
* are not seeing it. * are not seeing it.
*/ */
call_rcu_bh(&miniq_old->rcu, mini_qdisc_rcu_func); call_rcu(&miniq_old->rcu, mini_qdisc_rcu_func);
} }
EXPORT_SYMBOL(mini_qdisc_pair_swap); EXPORT_SYMBOL(mini_qdisc_pair_swap);

View File

@ -573,6 +573,27 @@ foreach my $entry (@mode_permission_funcs) {
} }
$mode_perms_search = "(?:${mode_perms_search})"; $mode_perms_search = "(?:${mode_perms_search})";
our %deprecated_apis = (
"synchronize_rcu_bh" => "synchronize_rcu",
"synchronize_rcu_bh_expedited" => "synchronize_rcu_expedited",
"call_rcu_bh" => "call_rcu",
"rcu_barrier_bh" => "rcu_barrier",
"synchronize_sched" => "synchronize_rcu",
"synchronize_sched_expedited" => "synchronize_rcu_expedited",
"call_rcu_sched" => "call_rcu",
"rcu_barrier_sched" => "rcu_barrier",
"get_state_synchronize_sched" => "get_state_synchronize_rcu",
"cond_synchronize_sched" => "cond_synchronize_rcu",
);
#Create a search pattern for all these strings to speed up a loop below
our $deprecated_apis_search = "";
foreach my $entry (keys %deprecated_apis) {
$deprecated_apis_search .= '|' if ($deprecated_apis_search ne "");
$deprecated_apis_search .= $entry;
}
$deprecated_apis_search = "(?:${deprecated_apis_search})";
our $mode_perms_world_writable = qr{ our $mode_perms_world_writable = qr{
S_IWUGO | S_IWUGO |
S_IWOTH | S_IWOTH |
@ -6368,6 +6389,20 @@ sub process {
"please use device_initcall() or more appropriate function instead of __initcall() (see include/linux/init.h)\n" . $herecurr); "please use device_initcall() or more appropriate function instead of __initcall() (see include/linux/init.h)\n" . $herecurr);
} }
# check for spin_is_locked(), suggest lockdep instead
if ($line =~ /\bspin_is_locked\(/) {
WARN("USE_LOCKDEP",
"Where possible, use lockdep_assert_held instead of assertions based on spin_is_locked\n" . $herecurr);
}
# check for deprecated apis
if ($line =~ /\b($deprecated_apis_search)\b\s*\(/) {
my $deprecated_api = $1;
my $new_api = $deprecated_apis{$deprecated_api};
WARN("DEPRECATED_API",
"Deprecated use of '$deprecated_api', prefer '$new_api' instead\n" . $herecurr);
}
# check for various structs that are normally const (ops, kgdb, device_tree) # check for various structs that are normally const (ops, kgdb, device_tree)
# and avoid what seem like struct definitions 'struct foo {' # and avoid what seem like struct definitions 'struct foo {'
if ($line !~ /\bconst\b/ && if ($line !~ /\bconst\b/ &&

View File

@ -116,6 +116,6 @@ int scnprintf(char * buf, size_t size, const char * fmt, ...);
#define round_down(x, y) ((x) & ~__round_mask(x, y)) #define round_down(x, y) ((x) & ~__round_mask(x, y))
#define current_gfp_context(k) 0 #define current_gfp_context(k) 0
#define synchronize_sched() #define synchronize_rcu()
#endif #endif

View File

@ -194,6 +194,14 @@ do
shift shift
done done
if test -z "$TORTURE_INITRD" || tools/testing/selftests/rcutorture/bin/mkinitrd.sh
then
:
else
echo No initrd and unable to create one, aborting test >&2
exit 1
fi
CONFIGFRAG=${KVM}/configs/${TORTURE_SUITE}; export CONFIGFRAG CONFIGFRAG=${KVM}/configs/${TORTURE_SUITE}; export CONFIGFRAG
if test -z "$configs" if test -z "$configs"

View File

@ -0,0 +1,136 @@
#!/bin/bash
#
# Create an initrd directory if one does not already exist.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, you can access it online at
# http://www.gnu.org/licenses/gpl-2.0.html.
#
# Copyright (C) IBM Corporation, 2013
#
# Author: Connor Shu <Connor.Shu@ibm.com>
D=tools/testing/selftests/rcutorture
# Prerequisite checks
[ -z "$D" ] && echo >&2 "No argument supplied" && exit 1
if [ ! -d "$D" ]; then
echo >&2 "$D does not exist: Malformed kernel source tree?"
exit 1
fi
if [ -s "$D/initrd/init" ]; then
echo "$D/initrd/init already exists, no need to create it"
exit 0
fi
T=${TMPDIR-/tmp}/mkinitrd.sh.$$
trap 'rm -rf $T' 0 2
mkdir $T
cat > $T/init << '__EOF___'
#!/bin/sh
# Run in userspace a few milliseconds every second. This helps to
# exercise the NO_HZ_FULL portions of RCU.
while :
do
q=
for i in \
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a \
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a \
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a \
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a \
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a \
a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
do
q="$q $i"
done
sleep 1
done
__EOF___
# Try using dracut to create initrd
if command -v dracut >/dev/null 2>&1
then
echo Creating $D/initrd using dracut.
# Filesystem creation
dracut --force --no-hostonly --no-hostonly-cmdline --module "base" $T/initramfs.img
cd $D
mkdir -p initrd
cd initrd
zcat $T/initramfs.img | cpio -id
cp $T/init init
chmod +x init
echo Done creating $D/initrd using dracut
exit 0
fi
# No dracut, so create a C-language initrd/init program and statically
# link it. This results in a very small initrd, but might be a bit less
# future-proof than dracut.
echo "Could not find dracut, attempting C initrd"
cd $D
mkdir -p initrd
cd initrd
cat > init.c << '___EOF___'
#ifndef NOLIBC
#include <unistd.h>
#include <sys/time.h>
#endif
volatile unsigned long delaycount;
int main(int argc, int argv[])
{
int i;
struct timeval tv;
struct timeval tvb;
for (;;) {
sleep(1);
/* Need some userspace time. */
if (gettimeofday(&tvb, NULL))
continue;
do {
for (i = 0; i < 1000 * 100; i++)
delaycount = i * i;
if (gettimeofday(&tv, NULL))
break;
tv.tv_sec -= tvb.tv_sec;
if (tv.tv_sec > 1)
break;
tv.tv_usec += tv.tv_sec * 1000 * 1000;
tv.tv_usec -= tvb.tv_usec;
} while (tv.tv_usec < 1000);
}
return 0;
}
___EOF___
# build using nolibc on supported archs (smaller executable) and fall
# back to regular glibc on other ones.
if echo -e "#if __x86_64__||__i386__||__i486__||__i586__||__i686__" \
"||__ARM_EABI__||__aarch64__\nyes\n#endif" \
| ${CROSS_COMPILE}gcc -E -nostdlib -xc - \
| grep -q '^yes'; then
# architecture supported by nolibc
${CROSS_COMPILE}gcc -fno-asynchronous-unwind-tables -fno-ident \
-nostdlib -include ../bin/nolibc.h -lgcc -s -static -Os \
-o init init.c
else
${CROSS_COMPILE}gcc -s -static -Os -o init init.c
fi
rm init.c
echo "Done creating a statically linked C-language initrd"
exit 0

File diff suppressed because it is too large Load Diff

View File

@ -1,9 +1,12 @@
This document describes one way to create the initrd directory hierarchy The rcutorture scripting tools automatically create the needed initrd
in order to allow an initrd to be built into your kernel. The trick directory using dracut. Failing that, this tool will create an initrd
here is to steal the initrd file used on your Linux laptop, Ubuntu in containing a single statically linked binary named "init" that loops
this case. There are probably much better ways of doing this. over a very long sleep() call. In both cases, this creation is done
by tools/testing/selftests/rcutorture/bin/mkinitrd.sh.
That said, here are the commands: However, if you are attempting to run rcutorture on a system that does
not have dracut installed, and if you don't like the notion of static
linking, you might wish to press an existing initrd into service:
------------------------------------------------------------------------ ------------------------------------------------------------------------
cd tools/testing/selftests/rcutorture cd tools/testing/selftests/rcutorture
@ -11,22 +14,7 @@ zcat /initrd.img > /tmp/initrd.img.zcat
mkdir initrd mkdir initrd
cd initrd cd initrd
cpio -id < /tmp/initrd.img.zcat cpio -id < /tmp/initrd.img.zcat
------------------------------------------------------------------------ # Manually verify that initrd contains needed binaries and libraries.
Another way to create an initramfs image is using "dracut"[1], which is
available on many distros, however the initramfs dracut generates is a cpio
archive with another cpio archive in it, so an extra step is needed to create
the initrd directory hierarchy.
Here are the commands to create a initrd directory for rcutorture using
dracut:
------------------------------------------------------------------------
dracut --no-hostonly --no-hostonly-cmdline --module "base bash shutdown" /tmp/initramfs.img
cd tools/testing/selftests/rcutorture
mkdir initrd
cd initrd
/usr/lib/dracut/skipcpio /tmp/initramfs.img | zcat | cpio -id < /tmp/initramfs.img
------------------------------------------------------------------------ ------------------------------------------------------------------------
Interestingly enough, if you are running rcutorture, you don't really Interestingly enough, if you are running rcutorture, you don't really
@ -39,75 +27,12 @@ with 0755 mode.
------------------------------------------------------------------------ ------------------------------------------------------------------------
#!/bin/sh #!/bin/sh
[ -d /dev ] || mkdir -m 0755 /dev
[ -d /root ] || mkdir -m 0700 /root
[ -d /sys ] || mkdir /sys
[ -d /proc ] || mkdir /proc
[ -d /tmp ] || mkdir /tmp
mkdir -p /var/lock
mount -t sysfs -o nodev,noexec,nosuid sysfs /sys
mount -t proc -o nodev,noexec,nosuid proc /proc
# Some things don't work properly without /etc/mtab.
ln -sf /proc/mounts /etc/mtab
# Note that this only becomes /dev on the real filesystem if udev's scripts
# are used; which they will be, but it's worth pointing out
if ! mount -t devtmpfs -o mode=0755 udev /dev; then
echo "W: devtmpfs not available, falling back to tmpfs for /dev"
mount -t tmpfs -o mode=0755 udev /dev
[ -e /dev/console ] || mknod --mode=600 /dev/console c 5 1
[ -e /dev/kmsg ] || mknod --mode=644 /dev/kmsg c 1 11
[ -e /dev/null ] || mknod --mode=666 /dev/null c 1 3
fi
mkdir /dev/pts
mount -t devpts -o noexec,nosuid,gid=5,mode=0620 devpts /dev/pts || true
mount -t tmpfs -o "nosuid,size=20%,mode=0755" tmpfs /run
mkdir /run/initramfs
# compatibility symlink for the pre-oneiric locations
ln -s /run/initramfs /dev/.initramfs
# Export relevant variables
export ROOT=
export ROOTDELAY=
export ROOTFLAGS=
export ROOTFSTYPE=
export IP=
export BOOT=
export BOOTIF=
export UBIMTD=
export break=
export init=/sbin/init
export quiet=n
export readonly=y
export rootmnt=/root
export debug=
export panic=
export blacklist=
export resume=
export resume_offset=
export recovery=
for i in /sys/devices/system/cpu/cpu*/online
do
case $i in
'/sys/devices/system/cpu/cpu0/online')
;;
'/sys/devices/system/cpu/cpu*/online')
;;
*)
echo 1 > $i
;;
esac
done
while : while :
do do
sleep 10 sleep 10
done done
------------------------------------------------------------------------ ------------------------------------------------------------------------
References: This approach also allows most of the binaries and libraries in the
[1]: https://dracut.wiki.kernel.org/index.php/Main_Page initrd filesystem to be dispensed with, which can save significant
[2]: http://blog.elastocloud.org/2015/06/rapid-linux-kernel-devtest-with-qemu.html space in rcutorture's "res" directory.
[3]: https://www.centos.org/forums/viewtopic.php?t=51621

View File

@ -131,8 +131,8 @@ struct hlist_node {
* weird ABI and we need to ask it explicitly. * weird ABI and we need to ask it explicitly.
* *
* The alignment is required to guarantee that bits 0 and 1 of @next will be * The alignment is required to guarantee that bits 0 and 1 of @next will be
* clear under normal conditions -- as long as we use call_rcu(), * clear under normal conditions -- as long as we use call_rcu() or
* call_rcu_bh(), call_rcu_sched(), or call_srcu() to queue callback. * call_srcu() to queue callback.
* *
* This guarantee is important for few reasons: * This guarantee is important for few reasons:
* - future call_rcu_lazy() will make use of lower bits in the pointer; * - future call_rcu_lazy() will make use of lower bits in the pointer;

View File

@ -196,7 +196,7 @@ void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active)
*/ */
static struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq) static struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq)
{ {
DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock)); lockdep_assert_held(&irq->irq_lock);
/* If the interrupt is active, it must stay on the current vcpu */ /* If the interrupt is active, it must stay on the current vcpu */
if (irq->active) if (irq->active)
@ -273,7 +273,7 @@ static void vgic_sort_ap_list(struct kvm_vcpu *vcpu)
{ {
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock)); lockdep_assert_held(&vgic_cpu->ap_list_lock);
list_sort(NULL, &vgic_cpu->ap_list_head, vgic_irq_cmp); list_sort(NULL, &vgic_cpu->ap_list_head, vgic_irq_cmp);
} }
@ -311,7 +311,7 @@ bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
{ {
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock)); lockdep_assert_held(&irq->irq_lock);
retry: retry:
vcpu = vgic_target_oracle(irq); vcpu = vgic_target_oracle(irq);
@ -702,7 +702,7 @@ static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu)
static inline void vgic_populate_lr(struct kvm_vcpu *vcpu, static inline void vgic_populate_lr(struct kvm_vcpu *vcpu,
struct vgic_irq *irq, int lr) struct vgic_irq *irq, int lr)
{ {
DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock)); lockdep_assert_held(&irq->irq_lock);
if (kvm_vgic_global_state.type == VGIC_V2) if (kvm_vgic_global_state.type == VGIC_V2)
vgic_v2_populate_lr(vcpu, irq, lr); vgic_v2_populate_lr(vcpu, irq, lr);
@ -736,7 +736,7 @@ static int compute_ap_list_depth(struct kvm_vcpu *vcpu,
*multi_sgi = false; *multi_sgi = false;
DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock)); lockdep_assert_held(&vgic_cpu->ap_list_lock);
list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
int w; int w;
@ -761,7 +761,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
bool multi_sgi; bool multi_sgi;
u8 prio = 0xff; u8 prio = 0xff;
DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock)); lockdep_assert_held(&vgic_cpu->ap_list_lock);
count = compute_ap_list_depth(vcpu, &multi_sgi); count = compute_ap_list_depth(vcpu, &multi_sgi);
if (count > kvm_vgic_global_state.nr_lr || multi_sgi) if (count > kvm_vgic_global_state.nr_lr || multi_sgi)