Make classes+methods that can be static static in many spots (#85370)
Just some quick static analysis+fixing here. Not much in terms of code changes besides adding the `static` keywords with the exception of some simplifications to some of the search objects that don't need the search controller instance passed down in many spots. This was done mostly automatically by the IDE but some quick manual inspection shows quite a few spots where this should make things behave better via things like making lambdas non-capturing.
This commit is contained in:
parent
ee864bd3c2
commit
898d84998b
|
@ -143,7 +143,7 @@ public class SecureSM extends SecurityManager {
|
|||
}
|
||||
|
||||
@SuppressForbidden(reason = "java.security.debug messages go to standard error")
|
||||
private void debugThreadGroups(final ThreadGroup caller, final ThreadGroup target) {
|
||||
private static void debugThreadGroups(final ThreadGroup caller, final ThreadGroup target) {
|
||||
System.err.println("access: caller group=" + caller);
|
||||
System.err.println("access: target group=" + target);
|
||||
}
|
||||
|
|
|
@ -136,7 +136,7 @@ public final class DiagnosticTrustManager extends X509ExtendedTrustManager {
|
|||
logger.warning(diagnostic, cause);
|
||||
}
|
||||
|
||||
private SSLSession session(Socket socket) {
|
||||
private static SSLSession session(Socket socket) {
|
||||
if (socket instanceof final SSLSocket ssl) {
|
||||
final SSLSession handshakeSession = ssl.getHandshakeSession();
|
||||
if (handshakeSession == null) {
|
||||
|
@ -149,7 +149,7 @@ public final class DiagnosticTrustManager extends X509ExtendedTrustManager {
|
|||
}
|
||||
}
|
||||
|
||||
private SSLSession session(SSLEngine engine) {
|
||||
private static SSLSession session(SSLEngine engine) {
|
||||
return engine.getHandshakeSession();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -293,7 +293,7 @@ public abstract class SslConfigurationLoader {
|
|||
return buildDefaultTrustConfig(defaultTrustConfig, keyConfig);
|
||||
}
|
||||
|
||||
protected SslTrustConfig buildDefaultTrustConfig(SslTrustConfig trustConfig, SslKeyConfig keyConfig) {
|
||||
protected static SslTrustConfig buildDefaultTrustConfig(SslTrustConfig trustConfig, SslKeyConfig keyConfig) {
|
||||
final SslTrustConfig trust = keyConfig.asTrustConfig();
|
||||
if (trust == null) {
|
||||
return trustConfig;
|
||||
|
|
|
@ -190,7 +190,7 @@ public class StoreKeyConfig implements SslKeyConfig {
|
|||
/**
|
||||
* Verifies that the keystore contains at least 1 private key entry.
|
||||
*/
|
||||
private void checkKeyStore(KeyStore keyStore, Path path) throws KeyStoreException {
|
||||
private static void checkKeyStore(KeyStore keyStore, Path path) throws KeyStoreException {
|
||||
Enumeration<String> aliases = keyStore.aliases();
|
||||
while (aliases.hasMoreElements()) {
|
||||
String alias = aliases.nextElement();
|
||||
|
|
|
@ -123,7 +123,7 @@ public final class StoreTrustConfig implements SslTrustConfig {
|
|||
/**
|
||||
* Verifies that the keystore contains at least 1 trusted certificate entry.
|
||||
*/
|
||||
private void checkTrustStore(KeyStore store, Path path) throws GeneralSecurityException {
|
||||
private static void checkTrustStore(KeyStore store, Path path) throws GeneralSecurityException {
|
||||
Enumeration<String> aliases = store.aliases();
|
||||
while (aliases.hasMoreElements()) {
|
||||
String alias = aliases.nextElement();
|
||||
|
|
|
@ -254,7 +254,7 @@ public class JsonXContentParser extends AbstractXContentParser {
|
|||
IOUtils.closeWhileHandlingException(parser);
|
||||
}
|
||||
|
||||
private NumberType convertNumberType(JsonParser.NumberType numberType) {
|
||||
private static NumberType convertNumberType(JsonParser.NumberType numberType) {
|
||||
return switch (numberType) {
|
||||
case INT -> NumberType.INT;
|
||||
case BIG_INTEGER -> NumberType.BIG_INTEGER;
|
||||
|
|
|
@ -341,7 +341,7 @@ public final class ConstructingObjectParser<Value, Context> extends AbstractObje
|
|||
* Constructor arguments are detected by this "marker" consumer. It
|
||||
* keeps the API looking clean even if it is a bit sleezy.
|
||||
*/
|
||||
private boolean isConstructorArg(BiConsumer<?, ?> consumer) {
|
||||
private static boolean isConstructorArg(BiConsumer<?, ?> consumer) {
|
||||
return consumer == REQUIRED_CONSTRUCTOR_ARG_MARKER || consumer == OPTIONAL_CONSTRUCTOR_ARG_MARKER;
|
||||
}
|
||||
|
||||
|
|
|
@ -64,7 +64,7 @@ public class MediaTypeRegistry<T extends MediaType> {
|
|||
return this;
|
||||
}
|
||||
|
||||
private Map<String, Pattern> convertPatterns(Map<String, String> paramNameAndValueRegex) {
|
||||
private static Map<String, Pattern> convertPatterns(Map<String, String> paramNameAndValueRegex) {
|
||||
Map<String, Pattern> parametersForMediaType = new HashMap<>(paramNameAndValueRegex.size());
|
||||
for (Map.Entry<String, String> params : paramNameAndValueRegex.entrySet()) {
|
||||
String parameterName = params.getKey().toLowerCase(Locale.ROOT);
|
||||
|
|
|
@ -90,7 +90,7 @@ public class NamedXContentRegistry {
|
|||
this.registry = unmodifiableMap(createRegistry(entries));
|
||||
}
|
||||
|
||||
private Map<RestApiVersion, Map<Class<?>, Map<String, Entry>>> createRegistry(List<Entry> entries) {
|
||||
private static Map<RestApiVersion, Map<Class<?>, Map<String, Entry>>> createRegistry(List<Entry> entries) {
|
||||
if (entries.isEmpty()) {
|
||||
return emptyMap();
|
||||
}
|
||||
|
@ -109,7 +109,7 @@ public class NamedXContentRegistry {
|
|||
return newRegistry;
|
||||
}
|
||||
|
||||
private void registerParsers(
|
||||
private static void registerParsers(
|
||||
Map<RestApiVersion, Map<Class<?>, Map<String, Entry>>> newRegistry,
|
||||
Entry entry,
|
||||
String name,
|
||||
|
|
|
@ -340,7 +340,7 @@ public final class ObjectParser<Value, Context> extends AbstractObjectParser<Val
|
|||
throw new XContentParseException(parser.getTokenLocation(), "[" + name + "] no field found");
|
||||
}
|
||||
|
||||
private void throwMissingRequiredFields(List<String[]> requiredFields) {
|
||||
private static void throwMissingRequiredFields(List<String[]> requiredFields) {
|
||||
final StringBuilder message = new StringBuilder();
|
||||
for (String[] fields : requiredFields) {
|
||||
message.append("Required one of fields ").append(Arrays.toString(fields)).append(", but none were specified. ");
|
||||
|
@ -348,7 +348,7 @@ public final class ObjectParser<Value, Context> extends AbstractObjectParser<Val
|
|||
throw new IllegalArgumentException(message.toString());
|
||||
}
|
||||
|
||||
private void ensureExclusiveFields(List<List<String>> exclusiveFields) {
|
||||
private static void ensureExclusiveFields(List<List<String>> exclusiveFields) {
|
||||
StringBuilder message = null;
|
||||
for (List<String> fieldset : exclusiveFields) {
|
||||
if (fieldset.size() > 1) {
|
||||
|
@ -373,7 +373,7 @@ public final class ObjectParser<Value, Context> extends AbstractObjectParser<Val
|
|||
}
|
||||
}
|
||||
|
||||
private void maybeMarkRequiredField(String currentFieldName, List<String[]> requiredFields) {
|
||||
private static void maybeMarkRequiredField(String currentFieldName, List<String[]> requiredFields) {
|
||||
Iterator<String[]> iter = requiredFields.iterator();
|
||||
while (iter.hasNext()) {
|
||||
String[] requiredFieldNames = iter.next();
|
||||
|
@ -522,7 +522,7 @@ public final class ObjectParser<Value, Context> extends AbstractObjectParser<Val
|
|||
}, field, ValueType.OBJECT_ARRAY);
|
||||
}
|
||||
|
||||
private <T> void parseObjectsInArray(
|
||||
private static <Value, Context, T> void parseObjectsInArray(
|
||||
Consumer<Value> orderedModeCallback,
|
||||
ParseField field,
|
||||
BiFunction<XContentParser, Context, T> objectParser,
|
||||
|
@ -546,7 +546,7 @@ public final class ObjectParser<Value, Context> extends AbstractObjectParser<Val
|
|||
}
|
||||
}
|
||||
|
||||
private XContentParseException wrapCanBeObjectOrArrayOfObjects(ParseField field, XContentParser p) {
|
||||
private static XContentParseException wrapCanBeObjectOrArrayOfObjects(ParseField field, XContentParser p) {
|
||||
return new XContentParseException(
|
||||
p.getTokenLocation(),
|
||||
"["
|
||||
|
@ -556,11 +556,11 @@ public final class ObjectParser<Value, Context> extends AbstractObjectParser<Val
|
|||
);
|
||||
}
|
||||
|
||||
private XContentParseException wrapParseError(ParseField field, XContentParser p, IOException e, String s) {
|
||||
private static XContentParseException wrapParseError(ParseField field, XContentParser p, IOException e, String s) {
|
||||
return new XContentParseException(p.getTokenLocation(), "[" + field + "] " + s, e);
|
||||
}
|
||||
|
||||
private XContentParseException rethrowFieldParseFailure(ParseField field, XContentParser p, String currentName, Exception e) {
|
||||
private static XContentParseException rethrowFieldParseFailure(ParseField field, XContentParser p, String currentName, Exception e) {
|
||||
return new XContentParseException(p.getTokenLocation(), "[" + field + "] failed to parse field [" + currentName + "]", e);
|
||||
}
|
||||
|
||||
|
@ -668,7 +668,7 @@ public final class ObjectParser<Value, Context> extends AbstractObjectParser<Val
|
|||
}
|
||||
}
|
||||
|
||||
private void throwMustEndOn(String currentFieldName, XContentParser.Token token) {
|
||||
private static void throwMustEndOn(String currentFieldName, XContentParser.Token token) {
|
||||
throw new IllegalStateException("parser for [" + currentFieldName + "] did not end on " + token);
|
||||
}
|
||||
|
||||
|
|
|
@ -139,7 +139,7 @@ public class ParsedMediaType {
|
|||
return null;
|
||||
}
|
||||
|
||||
private boolean isValidParameter(String paramName, String value, Map<String, Pattern> registeredParams) {
|
||||
private static boolean isValidParameter(String paramName, String value, Map<String, Pattern> registeredParams) {
|
||||
if (registeredParams.containsKey(paramName)) {
|
||||
Pattern regex = registeredParams.get(paramName);
|
||||
return regex.matcher(value).matches();
|
||||
|
@ -162,7 +162,7 @@ public class ParsedMediaType {
|
|||
return mediaTypeWithoutParameters() + formatParameters(params);
|
||||
}
|
||||
|
||||
private String formatParameters(Map<String, String> params) {
|
||||
private static String formatParameters(Map<String, String> params) {
|
||||
String joined = params.entrySet().stream().map(e -> e.getKey() + "=" + e.getValue()).collect(Collectors.joining(";"));
|
||||
return joined.isEmpty() ? "" : ";" + joined;
|
||||
}
|
||||
|
|
|
@ -131,7 +131,7 @@ public class FilterPath {
|
|||
}
|
||||
|
||||
private static class FilterPathBuilder {
|
||||
private class BuildNode {
|
||||
private static class BuildNode {
|
||||
private final Map<String, BuildNode> children;
|
||||
private final boolean isFinalNode;
|
||||
|
||||
|
@ -141,7 +141,7 @@ public class FilterPath {
|
|||
}
|
||||
}
|
||||
|
||||
private BuildNode root = new BuildNode(false);
|
||||
private final BuildNode root = new BuildNode(false);
|
||||
|
||||
void insert(String filter) {
|
||||
insertNode(filter, root);
|
||||
|
|
|
@ -58,7 +58,7 @@ public class CharGroupTokenizerFactory extends AbstractTokenizerFactory {
|
|||
}
|
||||
}
|
||||
|
||||
private char parseEscapedChar(final String s) {
|
||||
private static char parseEscapedChar(final String s) {
|
||||
int len = s.length();
|
||||
char c = s.charAt(0);
|
||||
if (c == '\\') {
|
||||
|
|
|
@ -36,7 +36,7 @@ public class MinHashTokenFilterFactory extends AbstractTokenFilterFactory {
|
|||
return minHashFilterFactory.create(tokenStream);
|
||||
}
|
||||
|
||||
private Map<String, String> convertSettings(Settings settings) {
|
||||
private static Map<String, String> convertSettings(Settings settings) {
|
||||
Map<String, String> settingMap = new HashMap<>();
|
||||
if (settings.hasValue("hash_count")) {
|
||||
settingMap.put("hashCount", settings.get("hash_count"));
|
||||
|
|
|
@ -111,7 +111,7 @@ public class MultiplexerTokenFilterFactory extends AbstractTokenFilterFactory {
|
|||
};
|
||||
}
|
||||
|
||||
private TokenFilterFactory chainFilters(String name, List<TokenFilterFactory> filters) {
|
||||
private static TokenFilterFactory chainFilters(String name, List<TokenFilterFactory> filters) {
|
||||
return new TokenFilterFactory() {
|
||||
@Override
|
||||
public String name() {
|
||||
|
@ -137,7 +137,7 @@ public class MultiplexerTokenFilterFactory extends AbstractTokenFilterFactory {
|
|||
}
|
||||
}
|
||||
|
||||
private final class MultiplexTokenFilter extends TokenFilter {
|
||||
private static final class MultiplexTokenFilter extends TokenFilter {
|
||||
|
||||
private final TokenStream source;
|
||||
private final int filterCount;
|
||||
|
|
|
@ -41,7 +41,7 @@ public class SynonymGraphTokenFilterFactory extends SynonymTokenFilterFactory {
|
|||
List<TokenFilterFactory> previousTokenFilters,
|
||||
Function<String, TokenFilterFactory> allFilters
|
||||
) {
|
||||
final Analyzer analyzer = buildSynonymAnalyzer(tokenizer, charFilters, previousTokenFilters, allFilters);
|
||||
final Analyzer analyzer = buildSynonymAnalyzer(tokenizer, charFilters, previousTokenFilters);
|
||||
final SynonymMap synonyms = buildSynonyms(analyzer, getRulesFromSettings(environment));
|
||||
final String name = name();
|
||||
return new TokenFilterFactory() {
|
||||
|
|
|
@ -79,7 +79,7 @@ public class SynonymTokenFilterFactory extends AbstractTokenFilterFactory {
|
|||
List<TokenFilterFactory> previousTokenFilters,
|
||||
Function<String, TokenFilterFactory> allFilters
|
||||
) {
|
||||
final Analyzer analyzer = buildSynonymAnalyzer(tokenizer, charFilters, previousTokenFilters, allFilters);
|
||||
final Analyzer analyzer = buildSynonymAnalyzer(tokenizer, charFilters, previousTokenFilters);
|
||||
final SynonymMap synonyms = buildSynonyms(analyzer, getRulesFromSettings(environment));
|
||||
final String name = name();
|
||||
return new TokenFilterFactory() {
|
||||
|
@ -108,11 +108,10 @@ public class SynonymTokenFilterFactory extends AbstractTokenFilterFactory {
|
|||
};
|
||||
}
|
||||
|
||||
Analyzer buildSynonymAnalyzer(
|
||||
static Analyzer buildSynonymAnalyzer(
|
||||
TokenizerFactory tokenizer,
|
||||
List<CharFilterFactory> charFilters,
|
||||
List<TokenFilterFactory> tokenFilters,
|
||||
Function<String, TokenFilterFactory> allFilters
|
||||
List<TokenFilterFactory> tokenFilters
|
||||
) {
|
||||
return new CustomAnalyzer(
|
||||
tokenizer,
|
||||
|
|
|
@ -95,7 +95,7 @@ public class WordDelimiterGraphTokenFilterFactory extends AbstractTokenFilterFac
|
|||
throw new IllegalArgumentException("Token filter [" + name() + "] cannot be used to parse synonyms");
|
||||
}
|
||||
|
||||
private int getFlag(int flag, Settings settings, String key, boolean defaultValue) {
|
||||
private static int getFlag(int flag, Settings settings, String key, boolean defaultValue) {
|
||||
if (settings.getAsBoolean(key, defaultValue)) {
|
||||
return flag;
|
||||
}
|
||||
|
|
|
@ -95,7 +95,7 @@ public class WordDelimiterTokenFilterFactory extends AbstractTokenFilterFactory
|
|||
throw new IllegalArgumentException("Token filter [" + name() + "] cannot be used to parse synonyms");
|
||||
}
|
||||
|
||||
public int getFlag(int flag, Settings settings, String key, boolean defaultValue) {
|
||||
public static int getFlag(int flag, Settings settings, String key, boolean defaultValue) {
|
||||
if (settings.getAsBoolean(key, defaultValue)) {
|
||||
return flag;
|
||||
}
|
||||
|
@ -103,7 +103,7 @@ public class WordDelimiterTokenFilterFactory extends AbstractTokenFilterFactory
|
|||
}
|
||||
|
||||
// source => type
|
||||
private static Pattern typePattern = Pattern.compile("(.*)\\s*=>\\s*(.*)\\s*$");
|
||||
private static final Pattern typePattern = Pattern.compile("(.*)\\s*=>\\s*(.*)\\s*$");
|
||||
|
||||
/**
|
||||
* parses a list of MappingCharFilter style rules into a custom byte[] type table
|
||||
|
|
|
@ -240,7 +240,11 @@ public class SynonymsAnalysisTests extends ESTestCase {
|
|||
TokenFilterFactory tff = plugin.getTokenFilters().get(factory).get(idxSettings, null, factory, settings);
|
||||
TokenizerFactory tok = new KeywordTokenizerFactory(idxSettings, null, "keyword", settings);
|
||||
SynonymTokenFilterFactory stff = new SynonymTokenFilterFactory(idxSettings, null, "synonym", settings);
|
||||
Analyzer analyzer = stff.buildSynonymAnalyzer(tok, Collections.emptyList(), Collections.singletonList(tff), null);
|
||||
Analyzer analyzer = SynonymTokenFilterFactory.buildSynonymAnalyzer(
|
||||
tok,
|
||||
Collections.emptyList(),
|
||||
Collections.singletonList(tff)
|
||||
);
|
||||
|
||||
try (TokenStream ts = analyzer.tokenStream("field", "text")) {
|
||||
assertThat(ts, instanceOf(KeywordTokenizer.class));
|
||||
|
@ -308,7 +312,7 @@ public class SynonymsAnalysisTests extends ESTestCase {
|
|||
IllegalArgumentException e = expectThrows(
|
||||
IllegalArgumentException.class,
|
||||
"Expected IllegalArgumentException for factory " + factory,
|
||||
() -> stff.buildSynonymAnalyzer(tok, Collections.emptyList(), Collections.singletonList(tff), null)
|
||||
() -> SynonymTokenFilterFactory.buildSynonymAnalyzer(tok, Collections.emptyList(), Collections.singletonList(tff))
|
||||
);
|
||||
|
||||
assertEquals(factory, "Token filter [" + factory + "] cannot be used to parse synonyms", e.getMessage());
|
||||
|
|
|
@ -175,7 +175,7 @@ final class CsvParser {
|
|||
return line.charAt(currentIndex);
|
||||
}
|
||||
|
||||
private boolean isWhitespace(char c) {
|
||||
private static boolean isWhitespace(char c) {
|
||||
return c == SPACE || c == TAB;
|
||||
}
|
||||
|
||||
|
|
|
@ -61,7 +61,7 @@ enum DateFormat {
|
|||
return date -> Instant.ofEpochMilli(parseMillis(date)).atZone(timezone);
|
||||
}
|
||||
|
||||
private long parseMillis(String date) {
|
||||
private static long parseMillis(String date) {
|
||||
if (date.startsWith("@")) {
|
||||
date = date.substring(1);
|
||||
}
|
||||
|
|
|
@ -346,7 +346,7 @@ public final class FingerprintProcessor extends AbstractProcessor {
|
|||
|
||||
@Override
|
||||
public String getAlgorithm() {
|
||||
return mh.getAlgorithm();
|
||||
return Murmur3Hasher.getAlgorithm();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -179,11 +179,11 @@ public class NetworkDirectionProcessor extends AbstractProcessor {
|
|||
};
|
||||
}
|
||||
|
||||
private boolean isLoopback(InetAddress ip) {
|
||||
private static boolean isLoopback(InetAddress ip) {
|
||||
return ip.isLoopbackAddress();
|
||||
}
|
||||
|
||||
private boolean isUnicast(InetAddress ip) {
|
||||
private static boolean isUnicast(InetAddress ip) {
|
||||
return Arrays.equals(ip.getAddress(), BROADCAST_IP4) == false
|
||||
&& isUnspecified(ip) == false
|
||||
&& isLoopback(ip) == false
|
||||
|
@ -191,36 +191,36 @@ public class NetworkDirectionProcessor extends AbstractProcessor {
|
|||
&& isLinkLocalUnicast(ip) == false;
|
||||
}
|
||||
|
||||
private boolean isLinkLocalUnicast(InetAddress ip) {
|
||||
private static boolean isLinkLocalUnicast(InetAddress ip) {
|
||||
return ip.isLinkLocalAddress();
|
||||
}
|
||||
|
||||
private boolean isInterfaceLocalMulticast(InetAddress ip) {
|
||||
private static boolean isInterfaceLocalMulticast(InetAddress ip) {
|
||||
return ip.isMCNodeLocal();
|
||||
}
|
||||
|
||||
private boolean isLinkLocalMulticast(InetAddress ip) {
|
||||
private static boolean isLinkLocalMulticast(InetAddress ip) {
|
||||
return ip.isMCLinkLocal();
|
||||
}
|
||||
|
||||
private boolean isMulticast(InetAddress ip) {
|
||||
private static boolean isMulticast(InetAddress ip) {
|
||||
return ip.isMulticastAddress();
|
||||
}
|
||||
|
||||
private boolean isUnspecified(InetAddress ip) {
|
||||
private static boolean isUnspecified(InetAddress ip) {
|
||||
var address = ip.getAddress();
|
||||
return Arrays.equals(UNDEFINED_IP4, address) || Arrays.equals(UNDEFINED_IP6, address);
|
||||
}
|
||||
|
||||
private boolean isPrivate(String ip) {
|
||||
private static boolean isPrivate(String ip) {
|
||||
return CIDRUtils.isInRange(ip, "10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "fd00::/8");
|
||||
}
|
||||
|
||||
private boolean isPublic(String ip) {
|
||||
private static boolean isPublic(String ip) {
|
||||
return isLocalOrPrivate(ip) == false;
|
||||
}
|
||||
|
||||
private boolean isLocalOrPrivate(String ip) {
|
||||
private static boolean isLocalOrPrivate(String ip) {
|
||||
var address = InetAddresses.forString(ip);
|
||||
return isPrivate(ip)
|
||||
|| isLoopback(address)
|
||||
|
|
|
@ -103,7 +103,7 @@ public class RegisteredDomainProcessor extends AbstractProcessor {
|
|||
return TYPE;
|
||||
}
|
||||
|
||||
private class DomainInfo {
|
||||
private static class DomainInfo {
|
||||
private final String domain;
|
||||
private final String registeredDomain;
|
||||
private final String eTLD;
|
||||
|
|
|
@ -73,7 +73,7 @@ public final class RemoveProcessor extends AbstractProcessor {
|
|||
.forEach(documentField -> removeWhenPresent(document, documentField));
|
||||
}
|
||||
|
||||
private void removeWhenPresent(IngestDocument document, String documentField) {
|
||||
private static void removeWhenPresent(IngestDocument document, String documentField) {
|
||||
if (document.hasField(documentField)) {
|
||||
document.removeField(documentField);
|
||||
}
|
||||
|
@ -127,7 +127,7 @@ public final class RemoveProcessor extends AbstractProcessor {
|
|||
.collect(Collectors.toList());
|
||||
}
|
||||
|
||||
private List<String> getFields(String processorTag, Map<String, Object> config, String propertyName) {
|
||||
private static List<String> getFields(String processorTag, Map<String, Object> config, String propertyName) {
|
||||
final List<String> fields = new ArrayList<>();
|
||||
|
||||
if (config.containsKey(propertyName) == false) {
|
||||
|
|
|
@ -197,7 +197,7 @@ public class Netty4HttpServerTransport extends AbstractHttpServerTransport {
|
|||
serverBootstrap.childOption(ChannelOption.ALLOCATOR, NettyAllocator.getAllocator());
|
||||
|
||||
serverBootstrap.childHandler(configureServerChannelHandler());
|
||||
serverBootstrap.handler(new ServerChannelExceptionHandler(this));
|
||||
serverBootstrap.handler(ServerChannelExceptionHandler.INSTANCE);
|
||||
|
||||
serverBootstrap.childOption(ChannelOption.TCP_NODELAY, SETTING_HTTP_TCP_NO_DELAY.get(settings));
|
||||
serverBootstrap.childOption(ChannelOption.SO_KEEPALIVE, SETTING_HTTP_TCP_KEEP_ALIVE.get(settings));
|
||||
|
@ -339,20 +339,18 @@ public class Netty4HttpServerTransport extends AbstractHttpServerTransport {
|
|||
@ChannelHandler.Sharable
|
||||
private static class ServerChannelExceptionHandler extends ChannelInboundHandlerAdapter {
|
||||
|
||||
private final Netty4HttpServerTransport transport;
|
||||
static final ServerChannelExceptionHandler INSTANCE = new ServerChannelExceptionHandler();
|
||||
|
||||
private ServerChannelExceptionHandler(Netty4HttpServerTransport transport) {
|
||||
this.transport = transport;
|
||||
}
|
||||
private ServerChannelExceptionHandler() {}
|
||||
|
||||
@Override
|
||||
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
|
||||
ExceptionsHelper.maybeDieOnAnotherThread(cause);
|
||||
Netty4HttpServerChannel httpServerChannel = ctx.channel().attr(HTTP_SERVER_CHANNEL_KEY).get();
|
||||
if (cause instanceof Error) {
|
||||
transport.onServerException(httpServerChannel, new Exception(cause));
|
||||
AbstractHttpServerTransport.onServerException(httpServerChannel, new Exception(cause));
|
||||
} else {
|
||||
transport.onServerException(httpServerChannel, (Exception) cause);
|
||||
AbstractHttpServerTransport.onServerException(httpServerChannel, (Exception) cause);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -378,7 +378,7 @@ public class Netty4Transport extends TcpTransport {
|
|||
.addLast("dispatcher", new Netty4MessageInboundHandler(this, recycler));
|
||||
}
|
||||
|
||||
private void addClosedExceptionLogger(Channel channel) {
|
||||
private static void addClosedExceptionLogger(Channel channel) {
|
||||
channel.closeFuture().addListener(f -> {
|
||||
if (f.isSuccess() == false) {
|
||||
logger.debug(() -> new ParameterizedMessage("exception while closing channel: {}", channel), f.cause());
|
||||
|
@ -387,7 +387,7 @@ public class Netty4Transport extends TcpTransport {
|
|||
}
|
||||
|
||||
@ChannelHandler.Sharable
|
||||
private class ServerChannelExceptionHandler extends ChannelInboundHandlerAdapter {
|
||||
private static class ServerChannelExceptionHandler extends ChannelInboundHandlerAdapter {
|
||||
|
||||
@Override
|
||||
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
|
||||
|
|
|
@ -278,7 +278,6 @@ public class CorruptedBlobStoreRepositoryIT extends AbstractSnapshotIntegTestCas
|
|||
);
|
||||
|
||||
logger.info("--> verify that repo is assumed in old metadata format");
|
||||
final SnapshotsService snapshotsService = internalCluster().getCurrentMasterNodeInstance(SnapshotsService.class);
|
||||
final ThreadPool threadPool = internalCluster().getCurrentMasterNodeInstance(ThreadPool.class);
|
||||
assertThat(
|
||||
PlainActionFuture.get(
|
||||
|
@ -286,7 +285,7 @@ public class CorruptedBlobStoreRepositoryIT extends AbstractSnapshotIntegTestCas
|
|||
.execute(
|
||||
ActionRunnable.supply(
|
||||
f,
|
||||
() -> snapshotsService.minCompatibleVersion(Version.CURRENT, getRepositoryData(repoName), null)
|
||||
() -> SnapshotsService.minCompatibleVersion(Version.CURRENT, getRepositoryData(repoName), null)
|
||||
)
|
||||
)
|
||||
),
|
||||
|
@ -303,7 +302,7 @@ public class CorruptedBlobStoreRepositoryIT extends AbstractSnapshotIntegTestCas
|
|||
.execute(
|
||||
ActionRunnable.supply(
|
||||
f,
|
||||
() -> snapshotsService.minCompatibleVersion(Version.CURRENT, getRepositoryData(repoName), null)
|
||||
() -> SnapshotsService.minCompatibleVersion(Version.CURRENT, getRepositoryData(repoName), null)
|
||||
)
|
||||
)
|
||||
),
|
||||
|
|
|
@ -680,7 +680,7 @@ public class ActionModule extends AbstractModule {
|
|||
return unmodifiableMap(actions.getRegistry());
|
||||
}
|
||||
|
||||
private ActionFilters setupActionFilters(List<ActionPlugin> actionPlugins) {
|
||||
private static ActionFilters setupActionFilters(List<ActionPlugin> actionPlugins) {
|
||||
return new ActionFilters(
|
||||
Collections.unmodifiableSet(actionPlugins.stream().flatMap(p -> p.getActionFilters().stream()).collect(Collectors.toSet()))
|
||||
);
|
||||
|
|
|
@ -211,7 +211,7 @@ public final class ClusterAllocationExplanation implements ToXContentObject, Wri
|
|||
return builder;
|
||||
}
|
||||
|
||||
private XContentBuilder unassignedInfoToXContent(UnassignedInfo unassignedInfo, XContentBuilder builder) throws IOException {
|
||||
private static XContentBuilder unassignedInfoToXContent(UnassignedInfo unassignedInfo, XContentBuilder builder) throws IOException {
|
||||
|
||||
builder.startObject("unassigned_info");
|
||||
builder.field("reason", unassignedInfo.getReason());
|
||||
|
|
|
@ -64,14 +64,11 @@ public final class TransportCleanupRepositoryAction extends TransportMasterNodeA
|
|||
|
||||
private final RepositoriesService repositoriesService;
|
||||
|
||||
private final SnapshotsService snapshotsService;
|
||||
|
||||
@Inject
|
||||
public TransportCleanupRepositoryAction(
|
||||
TransportService transportService,
|
||||
ClusterService clusterService,
|
||||
RepositoriesService repositoriesService,
|
||||
SnapshotsService snapshotsService,
|
||||
ThreadPool threadPool,
|
||||
ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver
|
||||
|
@ -88,7 +85,6 @@ public final class TransportCleanupRepositoryAction extends TransportMasterNodeA
|
|||
ThreadPool.Names.SAME
|
||||
);
|
||||
this.repositoriesService = repositoriesService;
|
||||
this.snapshotsService = snapshotsService;
|
||||
// We add a state applier that will remove any dangling repository cleanup actions on master failover.
|
||||
// This is safe to do since cleanups will increment the repository state id before executing any operations to prevent concurrent
|
||||
// operations from corrupting the repository. This is the same safety mechanism used by snapshot deletes.
|
||||
|
@ -234,7 +230,7 @@ public final class TransportCleanupRepositoryAction extends TransportMasterNodeA
|
|||
listener,
|
||||
l -> blobStoreRepository.cleanup(
|
||||
repositoryStateId,
|
||||
snapshotsService.minCompatibleVersion(newState.nodes().getMinNodeVersion(), repositoryData, null),
|
||||
SnapshotsService.minCompatibleVersion(newState.nodes().getMinNodeVersion(), repositoryData, null),
|
||||
ActionListener.wrap(result -> after(null, result), e -> after(e, null))
|
||||
)
|
||||
)
|
||||
|
|
|
@ -158,11 +158,11 @@ final class SettingsUpdater {
|
|||
);
|
||||
}
|
||||
|
||||
private void logUnknownSetting(final String settingType, final Map.Entry<String, String> e, final Logger logger) {
|
||||
private static void logUnknownSetting(final String settingType, final Map.Entry<String, String> e, final Logger logger) {
|
||||
logger.warn("ignoring existing unknown {} setting: [{}] with value [{}]; archiving", settingType, e.getKey(), e.getValue());
|
||||
}
|
||||
|
||||
private void logInvalidSetting(
|
||||
private static void logInvalidSetting(
|
||||
final String settingType,
|
||||
final Map.Entry<String, String> e,
|
||||
final IllegalArgumentException ex,
|
||||
|
|
|
@ -105,7 +105,10 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct
|
|||
* archived settings that have been set to null.
|
||||
* @return true if all settings are clear blocks or archived settings.
|
||||
*/
|
||||
private boolean checkClearedBlockAndArchivedSettings(final Settings settings, final Set<String> clearedBlockAndArchivedSettings) {
|
||||
private static boolean checkClearedBlockAndArchivedSettings(
|
||||
final Settings settings,
|
||||
final Set<String> clearedBlockAndArchivedSettings
|
||||
) {
|
||||
for (String key : settings.keySet()) {
|
||||
if (Metadata.SETTING_READ_ONLY_SETTING.getKey().equals(key)) {
|
||||
if (Metadata.SETTING_READ_ONLY_SETTING.get(settings)) {
|
||||
|
|
|
@ -477,7 +477,7 @@ public class TransportGetSnapshotsAction extends TransportMasterNodeAction<GetSn
|
|||
);
|
||||
}
|
||||
|
||||
private boolean isCurrentSnapshotsOnly(String[] snapshots) {
|
||||
private static boolean isCurrentSnapshotsOnly(String[] snapshots) {
|
||||
return (snapshots.length == 1 && GetSnapshotsRequest.CURRENT_SNAPSHOT.equalsIgnoreCase(snapshots[0]));
|
||||
}
|
||||
|
||||
|
|
|
@ -85,7 +85,7 @@ public class TransportGetShardSnapshotAction extends TransportMasterNodeAction<G
|
|||
}
|
||||
|
||||
GroupedActionListener<Tuple<Optional<ShardSnapshotInfo>, RepositoryException>> groupedActionListener = new GroupedActionListener<>(
|
||||
listener.map(this::transformToResponse),
|
||||
listener.map(TransportGetShardSnapshotAction::transformToResponse),
|
||||
repositories.size()
|
||||
);
|
||||
|
||||
|
@ -124,7 +124,7 @@ public class TransportGetShardSnapshotAction extends TransportMasterNodeAction<G
|
|||
);
|
||||
}
|
||||
|
||||
private GetShardSnapshotResponse transformToResponse(
|
||||
private static GetShardSnapshotResponse transformToResponse(
|
||||
Collection<Tuple<Optional<ShardSnapshotInfo>, RepositoryException>> shardSnapshots
|
||||
) {
|
||||
final Optional<ShardSnapshotInfo> latestSnapshot = shardSnapshots.stream()
|
||||
|
@ -142,7 +142,7 @@ public class TransportGetShardSnapshotAction extends TransportMasterNodeAction<G
|
|||
return new GetShardSnapshotResponse(latestSnapshot.orElse(null), failures);
|
||||
}
|
||||
|
||||
private Set<String> getRequestedRepositories(GetShardSnapshotRequest request, ClusterState state) {
|
||||
private static Set<String> getRequestedRepositories(GetShardSnapshotRequest request, ClusterState state) {
|
||||
RepositoriesMetadata repositories = state.metadata().custom(RepositoriesMetadata.TYPE, RepositoriesMetadata.EMPTY);
|
||||
if (request.getFromAllRepositories()) {
|
||||
return repositories.repositories().stream().map(RepositoryMetadata::name).collect(Collectors.toUnmodifiableSet());
|
||||
|
|
|
@ -313,7 +313,7 @@ public final class AnalysisStats implements ToXContentFragment, Writeable {
|
|||
);
|
||||
}
|
||||
|
||||
private void toXContentCollection(XContentBuilder builder, Params params, String name, Collection<? extends ToXContent> coll)
|
||||
private static void toXContentCollection(XContentBuilder builder, Params params, String name, Collection<? extends ToXContent> coll)
|
||||
throws IOException {
|
||||
builder.startArray(name);
|
||||
for (ToXContent toXContent : coll) {
|
||||
|
|
|
@ -312,7 +312,7 @@ public class ClusterStatsIndices implements ToXContentFragment {
|
|||
static final String INDEX = "index";
|
||||
}
|
||||
|
||||
private void addIntMinMax(String field, int min, int max, double avg, XContentBuilder builder) throws IOException {
|
||||
private static void addIntMinMax(String field, int min, int max, double avg, XContentBuilder builder) throws IOException {
|
||||
builder.startObject(field);
|
||||
builder.field(Fields.MIN, min);
|
||||
builder.field(Fields.MAX, max);
|
||||
|
@ -320,7 +320,7 @@ public class ClusterStatsIndices implements ToXContentFragment {
|
|||
builder.endObject();
|
||||
}
|
||||
|
||||
private void addDoubleMinMax(String field, double min, double max, double avg, XContentBuilder builder) throws IOException {
|
||||
private static void addDoubleMinMax(String field, double min, double max, double avg, XContentBuilder builder) throws IOException {
|
||||
builder.startObject(field);
|
||||
builder.field(Fields.MIN, min);
|
||||
builder.field(Fields.MAX, max);
|
||||
|
|
|
@ -25,16 +25,13 @@ import org.elasticsearch.transport.TransportService;
|
|||
|
||||
public class TransportDeleteStoredScriptAction extends AcknowledgedTransportMasterNodeAction<DeleteStoredScriptRequest> {
|
||||
|
||||
private final ScriptService scriptService;
|
||||
|
||||
@Inject
|
||||
public TransportDeleteStoredScriptAction(
|
||||
TransportService transportService,
|
||||
ClusterService clusterService,
|
||||
ThreadPool threadPool,
|
||||
ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
ScriptService scriptService
|
||||
IndexNameExpressionResolver indexNameExpressionResolver
|
||||
) {
|
||||
super(
|
||||
DeleteStoredScriptAction.NAME,
|
||||
|
@ -46,7 +43,6 @@ public class TransportDeleteStoredScriptAction extends AcknowledgedTransportMast
|
|||
indexNameExpressionResolver,
|
||||
ThreadPool.Names.SAME
|
||||
);
|
||||
this.scriptService = scriptService;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -56,7 +52,7 @@ public class TransportDeleteStoredScriptAction extends AcknowledgedTransportMast
|
|||
ClusterState state,
|
||||
ActionListener<AcknowledgedResponse> listener
|
||||
) throws Exception {
|
||||
scriptService.deleteStoredScript(clusterService, request, listener);
|
||||
ScriptService.deleteStoredScript(clusterService, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -24,16 +24,13 @@ import org.elasticsearch.transport.TransportService;
|
|||
|
||||
public class TransportGetStoredScriptAction extends TransportMasterNodeReadAction<GetStoredScriptRequest, GetStoredScriptResponse> {
|
||||
|
||||
private final ScriptService scriptService;
|
||||
|
||||
@Inject
|
||||
public TransportGetStoredScriptAction(
|
||||
TransportService transportService,
|
||||
ClusterService clusterService,
|
||||
ThreadPool threadPool,
|
||||
ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
ScriptService scriptService
|
||||
IndexNameExpressionResolver indexNameExpressionResolver
|
||||
) {
|
||||
super(
|
||||
GetStoredScriptAction.NAME,
|
||||
|
@ -46,7 +43,6 @@ public class TransportGetStoredScriptAction extends TransportMasterNodeReadActio
|
|||
GetStoredScriptResponse::new,
|
||||
ThreadPool.Names.SAME
|
||||
);
|
||||
this.scriptService = scriptService;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -56,7 +52,7 @@ public class TransportGetStoredScriptAction extends TransportMasterNodeReadActio
|
|||
ClusterState state,
|
||||
ActionListener<GetStoredScriptResponse> listener
|
||||
) throws Exception {
|
||||
listener.onResponse(new GetStoredScriptResponse(request.id(), scriptService.getStoredScript(state, request)));
|
||||
listener.onResponse(new GetStoredScriptResponse(request.id(), ScriptService.getStoredScript(state, request)));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -192,7 +192,7 @@ public class TransportGetAliasesAction extends TransportMasterNodeReadAction<Get
|
|||
);
|
||||
}
|
||||
if (netNewSystemIndices.isEmpty() == false) {
|
||||
throw systemIndices.netNewSystemIndexAccessException(threadContext, netNewSystemIndices);
|
||||
throw SystemIndices.netNewSystemIndexAccessException(threadContext, netNewSystemIndices);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -96,17 +96,17 @@ public class TransportCreateIndexAction extends TransportMasterNodeAction<Create
|
|||
final boolean isSystemIndex = mainDescriptor != null;
|
||||
final boolean isManagedSystemIndex = isSystemIndex && mainDescriptor.isAutomaticallyManaged();
|
||||
if (mainDescriptor != null && mainDescriptor.isNetNew()) {
|
||||
final SystemIndexAccessLevel systemIndexAccessLevel = systemIndices.getSystemIndexAccessLevel(threadPool.getThreadContext());
|
||||
final SystemIndexAccessLevel systemIndexAccessLevel = SystemIndices.getSystemIndexAccessLevel(threadPool.getThreadContext());
|
||||
if (systemIndexAccessLevel != SystemIndexAccessLevel.ALL) {
|
||||
if (systemIndexAccessLevel == SystemIndexAccessLevel.RESTRICTED) {
|
||||
if (systemIndices.getProductSystemIndexNamePredicate(threadPool.getThreadContext()).test(indexName) == false) {
|
||||
throw systemIndices.netNewSystemIndexAccessException(threadPool.getThreadContext(), List.of(indexName));
|
||||
throw SystemIndices.netNewSystemIndexAccessException(threadPool.getThreadContext(), List.of(indexName));
|
||||
}
|
||||
} else {
|
||||
// BACKWARDS_COMPATIBLE_ONLY should never be a possibility here, it cannot be returned from getSystemIndexAccessLevel
|
||||
assert systemIndexAccessLevel == SystemIndexAccessLevel.NONE
|
||||
: "Expected no system index access but level is " + systemIndexAccessLevel;
|
||||
throw systemIndices.netNewSystemIndexAccessException(threadPool.getThreadContext(), List.of(indexName));
|
||||
throw SystemIndices.netNewSystemIndexAccessException(threadPool.getThreadContext(), List.of(indexName));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -171,7 +171,7 @@ public class TransportCreateIndexAction extends TransportMasterNodeAction<Create
|
|||
.waitForActiveShards(request.waitForActiveShards());
|
||||
}
|
||||
|
||||
private CreateIndexClusterStateUpdateRequest buildSystemIndexUpdateRequest(
|
||||
private static CreateIndexClusterStateUpdateRequest buildSystemIndexUpdateRequest(
|
||||
CreateIndexRequest request,
|
||||
String cause,
|
||||
SystemIndexDescriptor descriptor
|
||||
|
|
|
@ -275,7 +275,7 @@ final class IndexDiskUsageAnalyzer {
|
|||
}
|
||||
}
|
||||
|
||||
private void readProximity(Terms terms, PostingsEnum postings) throws IOException {
|
||||
private static void readProximity(Terms terms, PostingsEnum postings) throws IOException {
|
||||
if (terms.hasPositions()) {
|
||||
for (int pos = 0; pos < postings.freq(); pos++) {
|
||||
postings.nextPosition();
|
||||
|
@ -286,7 +286,7 @@ final class IndexDiskUsageAnalyzer {
|
|||
}
|
||||
}
|
||||
|
||||
private BlockTermState getBlockTermState(TermsEnum termsEnum, BytesRef term) throws IOException {
|
||||
private static BlockTermState getBlockTermState(TermsEnum termsEnum, BytesRef term) throws IOException {
|
||||
if (term != null && termsEnum.seekExact(term)) {
|
||||
final TermState termState = termsEnum.termState();
|
||||
if (termState instanceof final Lucene90PostingsFormat.IntBlockTermState blockTermState) {
|
||||
|
|
|
@ -78,7 +78,7 @@ public final class IndexDiskUsageStats implements ToXContentFragment, Writeable
|
|||
return indexSizeInBytes;
|
||||
}
|
||||
|
||||
private void checkByteSize(long bytes) {
|
||||
private static void checkByteSize(long bytes) {
|
||||
if (bytes < 0) {
|
||||
throw new IllegalArgumentException("Bytes must be non-negative; got " + bytes);
|
||||
}
|
||||
|
|
|
@ -113,7 +113,7 @@ public class GetFieldMappingsResponse extends ActionResponse implements ToXConte
|
|||
return builder;
|
||||
}
|
||||
|
||||
private void addFieldMappingsToBuilder(XContentBuilder builder, Params params, Map<String, FieldMappingMetadata> mappings)
|
||||
private static void addFieldMappingsToBuilder(XContentBuilder builder, Params params, Map<String, FieldMappingMetadata> mappings)
|
||||
throws IOException {
|
||||
for (Map.Entry<String, FieldMappingMetadata> fieldEntry : mappings.entrySet()) {
|
||||
builder.startObject(fieldEntry.getKey());
|
||||
|
|
|
@ -83,7 +83,7 @@ public class TransportGetFieldMappingsAction extends HandledTransportAction<GetF
|
|||
}
|
||||
}
|
||||
|
||||
private GetFieldMappingsResponse merge(AtomicReferenceArray<Object> indexResponses) {
|
||||
private static GetFieldMappingsResponse merge(AtomicReferenceArray<Object> indexResponses) {
|
||||
Map<String, Map<String, GetFieldMappingsResponse.FieldMappingMetadata>> mergedResponses = new HashMap<>();
|
||||
for (int i = 0; i < indexResponses.length(); i++) {
|
||||
Object element = indexResponses.get(i);
|
||||
|
|
|
@ -74,7 +74,7 @@ public class TransportGetMappingsAction extends TransportClusterInfoAction<GetMa
|
|||
listener.onResponse(new GetMappingsResponse(mappings));
|
||||
}
|
||||
|
||||
private void checkCancellation(Task task) {
|
||||
private static void checkCancellation(Task task) {
|
||||
if (task instanceof CancellableTask && ((CancellableTask) task).isCancelled()) {
|
||||
throw new CancellationException("Task cancelled");
|
||||
}
|
||||
|
|
|
@ -129,14 +129,14 @@ public class MetadataRolloverService {
|
|||
};
|
||||
}
|
||||
|
||||
public void validateIndexName(ClusterState state, String index) {
|
||||
createIndexService.validateIndexName(index, state);
|
||||
public static void validateIndexName(ClusterState state, String index) {
|
||||
MetadataCreateIndexService.validateIndexName(index, state);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the names that rollover would use, but does not perform the actual rollover
|
||||
*/
|
||||
public NameResolution resolveRolloverNames(
|
||||
public static NameResolution resolveRolloverNames(
|
||||
ClusterState currentState,
|
||||
String rolloverTarget,
|
||||
String newIndexName,
|
||||
|
@ -155,7 +155,7 @@ public class MetadataRolloverService {
|
|||
|
||||
public record NameResolution(String sourceName, @Nullable String unresolvedName, String rolloverName) {}
|
||||
|
||||
private NameResolution resolveAliasRolloverNames(Metadata metadata, IndexAbstraction alias, String newIndexName) {
|
||||
private static NameResolution resolveAliasRolloverNames(Metadata metadata, IndexAbstraction alias, String newIndexName) {
|
||||
final IndexMetadata writeIndex = metadata.index(alias.getWriteIndex());
|
||||
final String sourceProvidedName = writeIndex.getSettings()
|
||||
.get(IndexMetadata.SETTING_INDEX_PROVIDED_NAME, writeIndex.getIndex().getName());
|
||||
|
@ -165,7 +165,7 @@ public class MetadataRolloverService {
|
|||
return new NameResolution(sourceIndexName, unresolvedName, rolloverIndexName);
|
||||
}
|
||||
|
||||
private NameResolution resolveDataStreamRolloverNames(Metadata metadata, IndexAbstraction.DataStream dataStream) {
|
||||
private static NameResolution resolveDataStreamRolloverNames(Metadata metadata, IndexAbstraction.DataStream dataStream) {
|
||||
final DataStream ds = dataStream.getDataStream();
|
||||
final IndexMetadata originalWriteIndex = metadata.index(dataStream.getWriteIndex());
|
||||
return new NameResolution(originalWriteIndex.getIndex().getName(), null, ds.nextWriteIndexAndGeneration(metadata).v1());
|
||||
|
@ -192,7 +192,7 @@ public class MetadataRolloverService {
|
|||
final Boolean isHidden = IndexMetadata.INDEX_HIDDEN_SETTING.exists(createIndexRequest.settings())
|
||||
? IndexMetadata.INDEX_HIDDEN_SETTING.get(createIndexRequest.settings())
|
||||
: null;
|
||||
createIndexService.validateIndexName(rolloverIndexName, currentState); // fails if the index already exists
|
||||
MetadataCreateIndexService.validateIndexName(rolloverIndexName, currentState); // fails if the index already exists
|
||||
checkNoDuplicatedAliasInIndexTemplate(metadata, rolloverIndexName, aliasName, isHidden);
|
||||
if (onlyValidate) {
|
||||
return new RolloverResult(rolloverIndexName, sourceIndexName, currentState);
|
||||
|
@ -260,7 +260,7 @@ public class MetadataRolloverService {
|
|||
final Tuple<String, Long> nextIndexAndGeneration = ds.nextWriteIndexAndGeneration(currentState.metadata());
|
||||
final String newWriteIndexName = nextIndexAndGeneration.v1();
|
||||
final long newGeneration = nextIndexAndGeneration.v2();
|
||||
createIndexService.validateIndexName(newWriteIndexName, currentState); // fails if the index already exists
|
||||
MetadataCreateIndexService.validateIndexName(newWriteIndexName, currentState); // fails if the index already exists
|
||||
if (onlyValidate) {
|
||||
return new RolloverResult(newWriteIndexName, originalWriteIndex.getName(), currentState);
|
||||
}
|
||||
|
|
|
@ -60,7 +60,6 @@ public class TransportRolloverAction extends TransportMasterNodeAction<RolloverR
|
|||
|
||||
private static final Logger logger = LogManager.getLogger(TransportRolloverAction.class);
|
||||
|
||||
private final MetadataRolloverService rolloverService;
|
||||
private final Client client;
|
||||
private final RolloverExecutor rolloverTaskExecutor;
|
||||
|
||||
|
@ -86,7 +85,6 @@ public class TransportRolloverAction extends TransportMasterNodeAction<RolloverR
|
|||
RolloverResponse::new,
|
||||
ThreadPool.Names.SAME
|
||||
);
|
||||
this.rolloverService = rolloverService;
|
||||
this.client = client;
|
||||
this.rolloverTaskExecutor = new RolloverExecutor(
|
||||
allocationService,
|
||||
|
@ -138,7 +136,7 @@ public class TransportRolloverAction extends TransportMasterNodeAction<RolloverR
|
|||
ActionListener.wrap(statsResponse -> {
|
||||
// Now that we have the stats for the cluster, we need to know the names of the index for which we should evaluate
|
||||
// conditions, as well as what our newly created index *would* be.
|
||||
final MetadataRolloverService.NameResolution trialRolloverNames = rolloverService.resolveRolloverNames(
|
||||
final MetadataRolloverService.NameResolution trialRolloverNames = MetadataRolloverService.resolveRolloverNames(
|
||||
oldState,
|
||||
rolloverRequest.getRolloverTarget(),
|
||||
rolloverRequest.getNewIndexName(),
|
||||
|
@ -147,7 +145,7 @@ public class TransportRolloverAction extends TransportMasterNodeAction<RolloverR
|
|||
final String trialSourceIndexName = trialRolloverNames.sourceName();
|
||||
final String trialRolloverIndexName = trialRolloverNames.rolloverName();
|
||||
|
||||
rolloverService.validateIndexName(oldState, trialRolloverIndexName);
|
||||
MetadataRolloverService.validateIndexName(oldState, trialRolloverIndexName);
|
||||
|
||||
// Evaluate the conditions, so that we can tell without a cluster state update whether a rollover would occur.
|
||||
final Map<String, Boolean> trialConditionResults = evaluateConditions(
|
||||
|
@ -304,7 +302,7 @@ public class TransportRolloverAction extends TransportMasterNodeAction<RolloverR
|
|||
final var rolloverRequest = rolloverTask.rolloverRequest();
|
||||
|
||||
// Regenerate the rollover names, as a rollover could have happened in between the pre-check and the cluster state update
|
||||
final var rolloverNames = rolloverService.resolveRolloverNames(
|
||||
final var rolloverNames = MetadataRolloverService.resolveRolloverNames(
|
||||
currentState,
|
||||
rolloverRequest.getRolloverTarget(),
|
||||
rolloverRequest.getNewIndexName(),
|
||||
|
|
|
@ -286,7 +286,7 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc
|
|||
/**
|
||||
* A shard exists/existed in a node only if shard state file exists in the node
|
||||
*/
|
||||
private boolean shardExistsInNode(final NodeGatewayStartedShards response) {
|
||||
private static boolean shardExistsInNode(final NodeGatewayStartedShards response) {
|
||||
return response.storeException() != null || response.allocationId() != null;
|
||||
}
|
||||
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.elasticsearch.common.UUIDs;
|
|||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.IndexSettingProvider;
|
||||
import org.elasticsearch.index.IndexSettingProviders;
|
||||
import org.elasticsearch.index.mapper.DocumentMapper;
|
||||
|
@ -256,7 +257,7 @@ public class TransportSimulateIndexTemplateAction extends TransportMasterNodeRea
|
|||
// the context is only used for validation so it's fine to pass fake values for the
|
||||
// shard id and the current timestamp
|
||||
tempIndexService.newSearchExecutionContext(0, 0, null, () -> 0L, null, emptyMap()),
|
||||
tempIndexService.dateMathExpressionResolverAt(),
|
||||
IndexService.dateMathExpressionResolverAt(),
|
||||
systemIndices::isSystemName
|
||||
)
|
||||
);
|
||||
|
|
|
@ -222,7 +222,7 @@ public class TransportValidateQueryAction extends TransportBroadcastAction<
|
|||
return new ShardValidateQueryResponse(request.shardId(), valid, explanation, error);
|
||||
}
|
||||
|
||||
private String explain(SearchContext context, boolean rewritten) {
|
||||
private static String explain(SearchContext context, boolean rewritten) {
|
||||
Query query = rewritten ? context.rewrittenQuery() : context.query();
|
||||
if (rewritten && query instanceof MatchNoDocsQuery) {
|
||||
return context.parsedQuery().query().toString();
|
||||
|
|
|
@ -419,7 +419,7 @@ public final class BulkRequestParser {
|
|||
}
|
||||
}
|
||||
|
||||
private void checkBulkActionIsProperlyClosed(XContentParser parser) throws IOException {
|
||||
private static void checkBulkActionIsProperlyClosed(XContentParser parser) throws IOException {
|
||||
XContentParser.Token token;
|
||||
try {
|
||||
token = parser.nextToken();
|
||||
|
|
|
@ -414,11 +414,11 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||
}
|
||||
}
|
||||
|
||||
boolean isOnlySystem(BulkRequest request, SortedMap<String, IndexAbstraction> indicesLookup, SystemIndices systemIndices) {
|
||||
static boolean isOnlySystem(BulkRequest request, SortedMap<String, IndexAbstraction> indicesLookup, SystemIndices systemIndices) {
|
||||
return request.getIndices().stream().allMatch(indexName -> isSystemIndex(indicesLookup, systemIndices, indexName));
|
||||
}
|
||||
|
||||
private boolean isSystemIndex(SortedMap<String, IndexAbstraction> indicesLookup, SystemIndices systemIndices, String indexName) {
|
||||
private static boolean isSystemIndex(SortedMap<String, IndexAbstraction> indicesLookup, SystemIndices systemIndices, String indexName) {
|
||||
final IndexAbstraction abstraction = indicesLookup.get(indexName);
|
||||
if (abstraction != null) {
|
||||
return abstraction.isSystem();
|
||||
|
@ -435,7 +435,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||
client.execute(AutoCreateAction.INSTANCE, createIndexRequest, listener);
|
||||
}
|
||||
|
||||
private boolean setResponseFailureIfIndexMatches(
|
||||
private static boolean setResponseFailureIfIndexMatches(
|
||||
AtomicArray<BulkItemResponse> responses,
|
||||
int idx,
|
||||
DocWriteRequest<?> request,
|
||||
|
|
|
@ -198,7 +198,7 @@ public class ExplainResponse extends ActionResponse implements StatusToXContentO
|
|||
return builder;
|
||||
}
|
||||
|
||||
private void buildExplanation(XContentBuilder builder, Explanation explanation) throws IOException {
|
||||
private static void buildExplanation(XContentBuilder builder, Explanation explanation) throws IOException {
|
||||
builder.field(VALUE.getPreferredName(), explanation.getValue());
|
||||
builder.field(DESCRIPTION.getPreferredName(), explanation.getDescription());
|
||||
Explanation[] innerExps = explanation.getDetails();
|
||||
|
|
|
@ -172,7 +172,7 @@ class FieldCapabilitiesFetcher {
|
|||
return true;
|
||||
}
|
||||
|
||||
private boolean canMatchShard(
|
||||
private static boolean canMatchShard(
|
||||
ShardId shardId,
|
||||
QueryBuilder indexFilter,
|
||||
long nowInMillis,
|
||||
|
|
|
@ -169,7 +169,7 @@ public class TransportFieldCapabilitiesAction extends HandledTransportAction<Fie
|
|||
}
|
||||
}
|
||||
|
||||
private void checkIndexBlocks(ClusterState clusterState, String[] concreteIndices) {
|
||||
private static void checkIndexBlocks(ClusterState clusterState, String[] concreteIndices) {
|
||||
clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.READ);
|
||||
for (String index : concreteIndices) {
|
||||
clusterState.blocks().indexBlockedRaiseException(ClusterBlockLevel.READ, index);
|
||||
|
@ -258,7 +258,7 @@ public class TransportFieldCapabilitiesAction extends HandledTransportAction<Fie
|
|||
return new FieldCapabilitiesResponse(indices, Collections.unmodifiableMap(responseMap), failures);
|
||||
}
|
||||
|
||||
private void addUnmappedFields(String[] indices, String field, Map<String, FieldCapabilities.Builder> typeMap) {
|
||||
private static void addUnmappedFields(String[] indices, String field, Map<String, FieldCapabilities.Builder> typeMap) {
|
||||
final Set<String> mappedIndices = new HashSet<>();
|
||||
typeMap.values().forEach(t -> t.getIndices(mappedIndices));
|
||||
if (mappedIndices.size() != indices.length) {
|
||||
|
|
|
@ -32,7 +32,7 @@ class SimulateExecutionService {
|
|||
this.threadPool = threadPool;
|
||||
}
|
||||
|
||||
void executeDocument(
|
||||
static void executeDocument(
|
||||
Pipeline pipeline,
|
||||
IngestDocument ingestDocument,
|
||||
boolean verbose,
|
||||
|
|
|
@ -285,7 +285,7 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
|
|||
return true;
|
||||
}
|
||||
|
||||
private boolean assertExecuteOnStartThread() {
|
||||
private static boolean assertExecuteOnStartThread() {
|
||||
// Ensure that the current code has the following stacktrace:
|
||||
// AbstractSearchAsyncAction#start -> AbstractSearchAsyncAction#executePhase -> AbstractSearchAsyncAction#performPhaseOnShard
|
||||
final StackTraceElement[] stackTraceElements = Thread.currentThread().getStackTrace();
|
||||
|
|
|
@ -106,7 +106,7 @@ final class ExpandSearchPhase extends SearchPhase {
|
|||
}
|
||||
}
|
||||
|
||||
private SearchSourceBuilder buildExpandSearchSourceBuilder(InnerHitBuilder options, CollapseBuilder innerCollapseBuilder) {
|
||||
private static SearchSourceBuilder buildExpandSearchSourceBuilder(InnerHitBuilder options, CollapseBuilder innerCollapseBuilder) {
|
||||
SearchSourceBuilder groupSource = new SearchSourceBuilder();
|
||||
groupSource.from(options.getFrom());
|
||||
groupSource.size(options.getSize());
|
||||
|
|
|
@ -36,7 +36,6 @@ import java.util.function.BiFunction;
|
|||
*/
|
||||
final class FetchSearchPhase extends SearchPhase {
|
||||
private final ArraySearchPhaseResults<FetchSearchResult> fetchResults;
|
||||
private final SearchPhaseController searchPhaseController;
|
||||
private final AtomicArray<SearchPhaseResult> queryResults;
|
||||
private final BiFunction<InternalSearchResponse, AtomicArray<SearchPhaseResult>, SearchPhase> nextPhaseFactory;
|
||||
private final SearchPhaseContext context;
|
||||
|
@ -45,15 +44,9 @@ final class FetchSearchPhase extends SearchPhase {
|
|||
private final SearchProgressListener progressListener;
|
||||
private final AggregatedDfs aggregatedDfs;
|
||||
|
||||
FetchSearchPhase(
|
||||
SearchPhaseResults<SearchPhaseResult> resultConsumer,
|
||||
SearchPhaseController searchPhaseController,
|
||||
AggregatedDfs aggregatedDfs,
|
||||
SearchPhaseContext context
|
||||
) {
|
||||
FetchSearchPhase(SearchPhaseResults<SearchPhaseResult> resultConsumer, AggregatedDfs aggregatedDfs, SearchPhaseContext context) {
|
||||
this(
|
||||
resultConsumer,
|
||||
searchPhaseController,
|
||||
aggregatedDfs,
|
||||
context,
|
||||
(response, queryPhaseResults) -> new ExpandSearchPhase(
|
||||
|
@ -66,7 +59,6 @@ final class FetchSearchPhase extends SearchPhase {
|
|||
|
||||
FetchSearchPhase(
|
||||
SearchPhaseResults<SearchPhaseResult> resultConsumer,
|
||||
SearchPhaseController searchPhaseController,
|
||||
AggregatedDfs aggregatedDfs,
|
||||
SearchPhaseContext context,
|
||||
BiFunction<InternalSearchResponse, AtomicArray<SearchPhaseResult>, SearchPhase> nextPhaseFactory
|
||||
|
@ -81,7 +73,6 @@ final class FetchSearchPhase extends SearchPhase {
|
|||
);
|
||||
}
|
||||
this.fetchResults = new ArraySearchPhaseResults<>(resultConsumer.getNumShards());
|
||||
this.searchPhaseController = searchPhaseController;
|
||||
this.queryResults = resultConsumer.getAtomicArray();
|
||||
this.aggregatedDfs = aggregatedDfs;
|
||||
this.nextPhaseFactory = nextPhaseFactory;
|
||||
|
@ -116,7 +107,6 @@ final class FetchSearchPhase extends SearchPhase {
|
|||
final SearchPhaseController.ReducedQueryPhase reducedQueryPhase = resultConsumer.reduce();
|
||||
final boolean queryAndFetchOptimization = queryResults.length() == 1;
|
||||
final Runnable finishPhase = () -> moveToNextPhase(
|
||||
searchPhaseController,
|
||||
queryResults,
|
||||
reducedQueryPhase,
|
||||
queryAndFetchOptimization ? queryResults : fetchResults.getAtomicArray()
|
||||
|
@ -128,7 +118,7 @@ final class FetchSearchPhase extends SearchPhase {
|
|||
finishPhase.run();
|
||||
} else {
|
||||
ScoreDoc[] scoreDocs = reducedQueryPhase.sortedTopDocs().scoreDocs();
|
||||
final IntArrayList[] docIdsToLoad = searchPhaseController.fillDocIdsToLoad(numShards, scoreDocs);
|
||||
final IntArrayList[] docIdsToLoad = SearchPhaseController.fillDocIdsToLoad(numShards, scoreDocs);
|
||||
// no docs to fetch -- sidestep everything and return
|
||||
if (scoreDocs.length == 0) {
|
||||
// we have to release contexts here to free up resources
|
||||
|
@ -136,7 +126,7 @@ final class FetchSearchPhase extends SearchPhase {
|
|||
finishPhase.run();
|
||||
} else {
|
||||
final ScoreDoc[] lastEmittedDocPerShard = isScrollSearch
|
||||
? searchPhaseController.getLastEmittedDocPerShard(reducedQueryPhase, numShards)
|
||||
? SearchPhaseController.getLastEmittedDocPerShard(reducedQueryPhase, numShards)
|
||||
: null;
|
||||
final CountedCollector<FetchSearchResult> counter = new CountedCollector<>(
|
||||
fetchResults,
|
||||
|
@ -272,12 +262,11 @@ final class FetchSearchPhase extends SearchPhase {
|
|||
}
|
||||
|
||||
private void moveToNextPhase(
|
||||
SearchPhaseController searchPhaseController,
|
||||
AtomicArray<SearchPhaseResult> queryPhaseResults,
|
||||
SearchPhaseController.ReducedQueryPhase reducedQueryPhase,
|
||||
AtomicArray<? extends SearchPhaseResult> fetchResultsArr
|
||||
) {
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(
|
||||
final InternalSearchResponse internalResponse = SearchPhaseController.merge(
|
||||
context.getRequest().scroll() != null,
|
||||
reducedQueryPhase,
|
||||
fetchResultsArr.asList(),
|
||||
|
|
|
@ -55,7 +55,6 @@ public class QueryPhaseResultConsumer extends ArraySearchPhaseResults<SearchPhas
|
|||
|
||||
private final Executor executor;
|
||||
private final CircuitBreaker circuitBreaker;
|
||||
private final SearchPhaseController controller;
|
||||
private final SearchProgressListener progressListener;
|
||||
private final AggregationReduceContext.Builder aggReduceContextBuilder;
|
||||
|
||||
|
@ -84,7 +83,6 @@ public class QueryPhaseResultConsumer extends ArraySearchPhaseResults<SearchPhas
|
|||
super(expectedResultSize);
|
||||
this.executor = executor;
|
||||
this.circuitBreaker = circuitBreaker;
|
||||
this.controller = controller;
|
||||
this.progressListener = progressListener;
|
||||
this.aggReduceContextBuilder = controller.getReduceContext(isCanceled, request);
|
||||
this.topNSize = getTopDocsSize(request);
|
||||
|
@ -127,9 +125,9 @@ public class QueryPhaseResultConsumer extends ArraySearchPhaseResults<SearchPhas
|
|||
long breakerSize = pendingMerges.circuitBreakerBytes;
|
||||
if (hasAggs) {
|
||||
// Add an estimate of the final reduce size
|
||||
breakerSize = pendingMerges.addEstimateAndMaybeBreak(pendingMerges.estimateRamBytesUsedForReduce(breakerSize));
|
||||
breakerSize = pendingMerges.addEstimateAndMaybeBreak(PendingMerges.estimateRamBytesUsedForReduce(breakerSize));
|
||||
}
|
||||
SearchPhaseController.ReducedQueryPhase reducePhase = controller.reducedQueryPhase(
|
||||
SearchPhaseController.ReducedQueryPhase reducePhase = SearchPhaseController.reducedQueryPhase(
|
||||
results.asList(),
|
||||
aggsList,
|
||||
topDocsList,
|
||||
|
@ -323,7 +321,7 @@ public class QueryPhaseResultConsumer extends ArraySearchPhaseResults<SearchPhas
|
|||
* off for some aggregations but it is corrected with the real size after
|
||||
* the reduce completes.
|
||||
*/
|
||||
long estimateRamBytesUsedForReduce(long size) {
|
||||
static long estimateRamBytesUsedForReduce(long size) {
|
||||
return Math.round(1.5d * size - size);
|
||||
}
|
||||
|
||||
|
|
|
@ -25,8 +25,6 @@ import java.util.function.BiFunction;
|
|||
|
||||
final class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<DfsSearchResult> {
|
||||
|
||||
private final SearchPhaseController searchPhaseController;
|
||||
|
||||
private final QueryPhaseResultConsumer queryPhaseResultConsumer;
|
||||
|
||||
SearchDfsQueryThenFetchAsyncAction(
|
||||
|
@ -35,7 +33,6 @@ final class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction
|
|||
final BiFunction<String, String, Transport.Connection> nodeIdToConnection,
|
||||
final Map<String, AliasFilter> aliasFilter,
|
||||
final Map<String, Float> concreteIndexBoosts,
|
||||
final SearchPhaseController searchPhaseController,
|
||||
final Executor executor,
|
||||
final QueryPhaseResultConsumer queryPhaseResultConsumer,
|
||||
final SearchRequest request,
|
||||
|
@ -65,7 +62,6 @@ final class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction
|
|||
clusters
|
||||
);
|
||||
this.queryPhaseResultConsumer = queryPhaseResultConsumer;
|
||||
this.searchPhaseController = searchPhaseController;
|
||||
SearchProgressListener progressListener = task.getProgressListener();
|
||||
if (progressListener != SearchProgressListener.NOOP) {
|
||||
notifyListShards(progressListener, clusters, request.source());
|
||||
|
@ -89,13 +85,13 @@ final class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction
|
|||
@Override
|
||||
protected SearchPhase getNextPhase(final SearchPhaseResults<DfsSearchResult> results, SearchPhaseContext context) {
|
||||
final List<DfsSearchResult> dfsSearchResults = results.getAtomicArray().asList();
|
||||
final AggregatedDfs aggregatedDfs = searchPhaseController.aggregateDfs(dfsSearchResults);
|
||||
final AggregatedDfs aggregatedDfs = SearchPhaseController.aggregateDfs(dfsSearchResults);
|
||||
|
||||
return new DfsQueryPhase(
|
||||
dfsSearchResults,
|
||||
aggregatedDfs,
|
||||
queryPhaseResultConsumer,
|
||||
(queryResults) -> new FetchSearchPhase(queryResults, searchPhaseController, aggregatedDfs, context),
|
||||
(queryResults) -> new FetchSearchPhase(queryResults, aggregatedDfs, context),
|
||||
context
|
||||
);
|
||||
}
|
||||
|
|
|
@ -69,7 +69,7 @@ public final class SearchPhaseController {
|
|||
this.requestToAggReduceContextBuilder = requestToAggReduceContextBuilder;
|
||||
}
|
||||
|
||||
public AggregatedDfs aggregateDfs(Collection<DfsSearchResult> results) {
|
||||
public static AggregatedDfs aggregateDfs(Collection<DfsSearchResult> results) {
|
||||
Map<Term, TermStatistics> termStatistics = new HashMap<>();
|
||||
Map<String, CollectionStatistics> fieldStatistics = new HashMap<>();
|
||||
long aggMaxDoc = 0;
|
||||
|
@ -216,7 +216,7 @@ public final class SearchPhaseController {
|
|||
}
|
||||
}
|
||||
|
||||
public ScoreDoc[] getLastEmittedDocPerShard(ReducedQueryPhase reducedQueryPhase, int numShards) {
|
||||
public static ScoreDoc[] getLastEmittedDocPerShard(ReducedQueryPhase reducedQueryPhase, int numShards) {
|
||||
final ScoreDoc[] lastEmittedDocPerShard = new ScoreDoc[numShards];
|
||||
if (reducedQueryPhase.isEmptyResult == false) {
|
||||
final ScoreDoc[] sortedScoreDocs = reducedQueryPhase.sortedTopDocs.scoreDocs;
|
||||
|
@ -235,7 +235,7 @@ public final class SearchPhaseController {
|
|||
/**
|
||||
* Builds an array, with potential null elements, with docs to load.
|
||||
*/
|
||||
public IntArrayList[] fillDocIdsToLoad(int numShards, ScoreDoc[] shardDocs) {
|
||||
public static IntArrayList[] fillDocIdsToLoad(int numShards, ScoreDoc[] shardDocs) {
|
||||
IntArrayList[] docIdsToLoad = new IntArrayList[numShards];
|
||||
for (ScoreDoc shardDoc : shardDocs) {
|
||||
IntArrayList shardDocIdsToLoad = docIdsToLoad[shardDoc.shardIndex];
|
||||
|
@ -254,7 +254,7 @@ public final class SearchPhaseController {
|
|||
* Expects sortedDocs to have top search docs across all shards, optionally followed by top suggest docs for each named
|
||||
* completion suggestion ordered by suggestion name
|
||||
*/
|
||||
public InternalSearchResponse merge(
|
||||
public static InternalSearchResponse merge(
|
||||
boolean ignoreFrom,
|
||||
ReducedQueryPhase reducedQueryPhase,
|
||||
Collection<? extends SearchPhaseResult> fetchResults,
|
||||
|
@ -298,7 +298,7 @@ public final class SearchPhaseController {
|
|||
return reducedQueryPhase.buildResponse(hits, fetchResults);
|
||||
}
|
||||
|
||||
private SearchHits getHits(
|
||||
private static SearchHits getHits(
|
||||
ReducedQueryPhase reducedQueryPhase,
|
||||
boolean ignoreFrom,
|
||||
Collection<? extends SearchPhaseResult> fetchResults,
|
||||
|
@ -368,7 +368,7 @@ public final class SearchPhaseController {
|
|||
* Reduces the given query results and consumes all aggregations and profile results.
|
||||
* @param queryResults a list of non-null query shard results
|
||||
*/
|
||||
ReducedQueryPhase reducedScrollQueryPhase(Collection<? extends SearchPhaseResult> queryResults) {
|
||||
static ReducedQueryPhase reducedScrollQueryPhase(Collection<? extends SearchPhaseResult> queryResults) {
|
||||
AggregationReduceContext.Builder aggReduceContextBuilder = new AggregationReduceContext.Builder() {
|
||||
@Override
|
||||
public AggregationReduceContext forPartialReduction() {
|
||||
|
@ -405,7 +405,7 @@ public final class SearchPhaseController {
|
|||
* @see QuerySearchResult#consumeAggs()
|
||||
* @see QuerySearchResult#consumeProfileResult()
|
||||
*/
|
||||
ReducedQueryPhase reducedQueryPhase(
|
||||
static ReducedQueryPhase reducedQueryPhase(
|
||||
Collection<? extends SearchPhaseResult> queryResults,
|
||||
List<InternalAggregations> bufferedAggs,
|
||||
List<TopDocs> bufferedTopDocs,
|
||||
|
|
|
@ -29,7 +29,6 @@ import static org.elasticsearch.action.search.SearchPhaseController.getTopDocsSi
|
|||
|
||||
class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<SearchPhaseResult> {
|
||||
|
||||
private final SearchPhaseController searchPhaseController;
|
||||
private final SearchProgressListener progressListener;
|
||||
|
||||
// informations to track the best bottom top doc globally.
|
||||
|
@ -43,7 +42,6 @@ class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<SearchPh
|
|||
final BiFunction<String, String, Transport.Connection> nodeIdToConnection,
|
||||
final Map<String, AliasFilter> aliasFilter,
|
||||
final Map<String, Float> concreteIndexBoosts,
|
||||
final SearchPhaseController searchPhaseController,
|
||||
final Executor executor,
|
||||
final QueryPhaseResultConsumer resultConsumer,
|
||||
final SearchRequest request,
|
||||
|
@ -74,7 +72,6 @@ class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<SearchPh
|
|||
);
|
||||
this.topDocsSize = getTopDocsSize(request);
|
||||
this.trackTotalHitsUpTo = request.resolveTrackTotalHitsUpTo();
|
||||
this.searchPhaseController = searchPhaseController;
|
||||
this.progressListener = task.getProgressListener();
|
||||
|
||||
// register the release of the query consumer to free up the circuit breaker memory
|
||||
|
@ -125,7 +122,7 @@ class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<SearchPh
|
|||
|
||||
@Override
|
||||
protected SearchPhase getNextPhase(final SearchPhaseResults<SearchPhaseResult> results, SearchPhaseContext context) {
|
||||
return new FetchSearchPhase(results, searchPhaseController, null, this);
|
||||
return new FetchSearchPhase(results, null, this);
|
||||
}
|
||||
|
||||
private ShardSearchRequest rewriteShardSearchRequest(ShardSearchRequest request) {
|
||||
|
|
|
@ -260,7 +260,7 @@ final class SearchResponseMerger {
|
|||
return clusterAlias1.compareTo(clusterAlias2);
|
||||
}
|
||||
|
||||
private ShardId extractShardId(ShardSearchFailure failure) {
|
||||
private static ShardId extractShardId(ShardSearchFailure failure) {
|
||||
SearchShardTarget shard = failure.shard();
|
||||
if (shard != null) {
|
||||
return shard.getShardId();
|
||||
|
|
|
@ -46,7 +46,6 @@ abstract class SearchScrollAsyncAction<T extends SearchPhaseResult> implements R
|
|||
protected final ActionListener<SearchResponse> listener;
|
||||
protected final ParsedScrollId scrollId;
|
||||
protected final DiscoveryNodes nodes;
|
||||
protected final SearchPhaseController searchPhaseController;
|
||||
protected final SearchScrollRequest request;
|
||||
protected final SearchTransportService searchTransportService;
|
||||
private final long startTime;
|
||||
|
@ -58,7 +57,6 @@ abstract class SearchScrollAsyncAction<T extends SearchPhaseResult> implements R
|
|||
Logger logger,
|
||||
DiscoveryNodes nodes,
|
||||
ActionListener<SearchResponse> listener,
|
||||
SearchPhaseController searchPhaseController,
|
||||
SearchScrollRequest request,
|
||||
SearchTransportService searchTransportService
|
||||
) {
|
||||
|
@ -68,7 +66,6 @@ abstract class SearchScrollAsyncAction<T extends SearchPhaseResult> implements R
|
|||
this.logger = logger;
|
||||
this.listener = listener;
|
||||
this.nodes = nodes;
|
||||
this.searchPhaseController = searchPhaseController;
|
||||
this.request = request;
|
||||
this.searchTransportService = searchTransportService;
|
||||
}
|
||||
|
@ -244,7 +241,7 @@ abstract class SearchScrollAsyncAction<T extends SearchPhaseResult> implements R
|
|||
final AtomicArray<? extends SearchPhaseResult> fetchResults
|
||||
) {
|
||||
try {
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(
|
||||
final InternalSearchResponse internalResponse = SearchPhaseController.merge(
|
||||
true,
|
||||
queryPhase,
|
||||
fetchResults.asList(),
|
||||
|
|
|
@ -29,13 +29,12 @@ final class SearchScrollQueryAndFetchAsyncAction extends SearchScrollAsyncAction
|
|||
Logger logger,
|
||||
ClusterService clusterService,
|
||||
SearchTransportService searchTransportService,
|
||||
SearchPhaseController searchPhaseController,
|
||||
SearchScrollRequest request,
|
||||
SearchTask task,
|
||||
ParsedScrollId scrollId,
|
||||
ActionListener<SearchResponse> listener
|
||||
) {
|
||||
super(scrollId, logger, clusterService.state().nodes(), listener, searchPhaseController, request, searchTransportService);
|
||||
super(scrollId, logger, clusterService.state().nodes(), listener, request, searchTransportService);
|
||||
this.task = task;
|
||||
this.queryFetchResults = new AtomicArray<>(scrollId.getContext().length);
|
||||
}
|
||||
|
@ -51,7 +50,7 @@ final class SearchScrollQueryAndFetchAsyncAction extends SearchScrollAsyncAction
|
|||
|
||||
@Override
|
||||
protected SearchPhase moveToNextPhase(BiFunction<String, String, DiscoveryNode> clusterNodeLookup) {
|
||||
return sendResponsePhase(searchPhaseController.reducedScrollQueryPhase(queryFetchResults.asList()), queryFetchResults);
|
||||
return sendResponsePhase(SearchPhaseController.reducedScrollQueryPhase(queryFetchResults.asList()), queryFetchResults);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -37,13 +37,12 @@ final class SearchScrollQueryThenFetchAsyncAction extends SearchScrollAsyncActio
|
|||
Logger logger,
|
||||
ClusterService clusterService,
|
||||
SearchTransportService searchTransportService,
|
||||
SearchPhaseController searchPhaseController,
|
||||
SearchScrollRequest request,
|
||||
SearchTask task,
|
||||
ParsedScrollId scrollId,
|
||||
ActionListener<SearchResponse> listener
|
||||
) {
|
||||
super(scrollId, logger, clusterService.state().nodes(), listener, searchPhaseController, request, searchTransportService);
|
||||
super(scrollId, logger, clusterService.state().nodes(), listener, request, searchTransportService);
|
||||
this.task = task;
|
||||
this.fetchResults = new AtomicArray<>(scrollId.getContext().length);
|
||||
this.queryResults = new AtomicArray<>(scrollId.getContext().length);
|
||||
|
@ -67,7 +66,7 @@ final class SearchScrollQueryThenFetchAsyncAction extends SearchScrollAsyncActio
|
|||
return new SearchPhase("fetch") {
|
||||
@Override
|
||||
public void run() {
|
||||
final SearchPhaseController.ReducedQueryPhase reducedQueryPhase = searchPhaseController.reducedScrollQueryPhase(
|
||||
final SearchPhaseController.ReducedQueryPhase reducedQueryPhase = SearchPhaseController.reducedScrollQueryPhase(
|
||||
queryResults.asList()
|
||||
);
|
||||
ScoreDoc[] scoreDocs = reducedQueryPhase.sortedTopDocs().scoreDocs();
|
||||
|
@ -76,8 +75,8 @@ final class SearchScrollQueryThenFetchAsyncAction extends SearchScrollAsyncActio
|
|||
return;
|
||||
}
|
||||
|
||||
final IntArrayList[] docIdsToLoad = searchPhaseController.fillDocIdsToLoad(queryResults.length(), scoreDocs);
|
||||
final ScoreDoc[] lastEmittedDocPerShard = searchPhaseController.getLastEmittedDocPerShard(
|
||||
final IntArrayList[] docIdsToLoad = SearchPhaseController.fillDocIdsToLoad(queryResults.length(), scoreDocs);
|
||||
final ScoreDoc[] lastEmittedDocPerShard = SearchPhaseController.getLastEmittedDocPerShard(
|
||||
reducedQueryPhase,
|
||||
queryResults.length()
|
||||
);
|
||||
|
|
|
@ -611,7 +611,7 @@ public class SearchTransportService {
|
|||
}
|
||||
}
|
||||
|
||||
final class ConnectionCountingHandler<Response extends TransportResponse> extends ActionListenerResponseHandler<Response> {
|
||||
static final class ConnectionCountingHandler<Response extends TransportResponse> extends ActionListenerResponseHandler<Response> {
|
||||
private final Map<String, Long> clientConnections;
|
||||
private final String nodeId;
|
||||
|
||||
|
|
|
@ -1191,7 +1191,6 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
|
|||
connectionLookup,
|
||||
aliasFilter,
|
||||
concreteIndexBoosts,
|
||||
searchPhaseController,
|
||||
executor,
|
||||
queryResultConsumer,
|
||||
searchRequest,
|
||||
|
@ -1208,7 +1207,6 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
|
|||
connectionLookup,
|
||||
aliasFilter,
|
||||
concreteIndexBoosts,
|
||||
searchPhaseController,
|
||||
executor,
|
||||
queryResultConsumer,
|
||||
searchRequest,
|
||||
|
|
|
@ -13,7 +13,6 @@ import org.elasticsearch.action.support.ActionFilters;
|
|||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
|
@ -25,20 +24,17 @@ public class TransportSearchScrollAction extends HandledTransportAction<SearchSc
|
|||
|
||||
private final ClusterService clusterService;
|
||||
private final SearchTransportService searchTransportService;
|
||||
private final SearchPhaseController searchPhaseController;
|
||||
|
||||
@Inject
|
||||
public TransportSearchScrollAction(
|
||||
TransportService transportService,
|
||||
ClusterService clusterService,
|
||||
ActionFilters actionFilters,
|
||||
SearchTransportService searchTransportService,
|
||||
SearchPhaseController searchPhaseController
|
||||
SearchTransportService searchTransportService
|
||||
) {
|
||||
super(SearchScrollAction.NAME, transportService, actionFilters, (Writeable.Reader<SearchScrollRequest>) SearchScrollRequest::new);
|
||||
super(SearchScrollAction.NAME, transportService, actionFilters, SearchScrollRequest::new);
|
||||
this.clusterService = clusterService;
|
||||
this.searchTransportService = searchTransportService;
|
||||
this.searchPhaseController = searchPhaseController;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -50,7 +46,6 @@ public class TransportSearchScrollAction extends HandledTransportAction<SearchSc
|
|||
logger,
|
||||
clusterService,
|
||||
searchTransportService,
|
||||
searchPhaseController,
|
||||
request,
|
||||
(SearchTask) task,
|
||||
scrollId,
|
||||
|
@ -61,7 +56,6 @@ public class TransportSearchScrollAction extends HandledTransportAction<SearchSc
|
|||
logger,
|
||||
clusterService,
|
||||
searchTransportService,
|
||||
searchPhaseController,
|
||||
request,
|
||||
(SearchTask) task,
|
||||
scrollId,
|
||||
|
|
|
@ -119,7 +119,7 @@ public final class AutoCreateIndex {
|
|||
this.autoCreate = autoCreate;
|
||||
}
|
||||
|
||||
private ComposableIndexTemplate findTemplate(String indexName, Metadata metadata) {
|
||||
private static ComposableIndexTemplate findTemplate(String indexName, Metadata metadata) {
|
||||
final String templateName = MetadataIndexTemplateService.findV2Template(metadata, indexName, false);
|
||||
return metadata.templatesV2().get(templateName);
|
||||
}
|
||||
|
|
|
@ -118,7 +118,7 @@ public abstract class RetryableAction<Response> {
|
|||
return Math.min(previousDelayBound * 2, Integer.MAX_VALUE);
|
||||
}
|
||||
|
||||
protected long minimumDelayMillis() {
|
||||
protected static long minimumDelayMillis() {
|
||||
return 0L;
|
||||
}
|
||||
|
||||
|
|
|
@ -313,13 +313,13 @@ public abstract class TransportReplicationAction<
|
|||
return null;
|
||||
}
|
||||
|
||||
protected boolean retryPrimaryException(final Throwable e) {
|
||||
protected static boolean retryPrimaryException(final Throwable e) {
|
||||
return e.getClass() == ReplicationOperation.RetryOnPrimaryException.class
|
||||
|| TransportActions.isShardNotAvailableException(e)
|
||||
|| isRetryableClusterBlockException(e);
|
||||
}
|
||||
|
||||
boolean isRetryableClusterBlockException(final Throwable e) {
|
||||
static boolean isRetryableClusterBlockException(final Throwable e) {
|
||||
if (e instanceof ClusterBlockException) {
|
||||
return ((ClusterBlockException) e).retryable();
|
||||
}
|
||||
|
|
|
@ -85,7 +85,7 @@ public abstract class TransportInstanceSingleOperationAction<
|
|||
|
||||
protected abstract Response newResponse(StreamInput in) throws IOException;
|
||||
|
||||
protected ClusterBlockException checkGlobalBlock(ClusterState state) {
|
||||
protected static ClusterBlockException checkGlobalBlock(ClusterState state) {
|
||||
return state.blocks().globalBlockedException(ClusterBlockLevel.WRITE);
|
||||
}
|
||||
|
||||
|
@ -102,7 +102,7 @@ public abstract class TransportInstanceSingleOperationAction<
|
|||
return false;
|
||||
}
|
||||
|
||||
protected TransportRequestOptions transportOptions() {
|
||||
protected static TransportRequestOptions transportOptions() {
|
||||
return TransportRequestOptions.EMPTY;
|
||||
}
|
||||
|
||||
|
|
|
@ -108,7 +108,7 @@ public abstract class TransportSingleShardAction<Request extends SingleShardRequ
|
|||
|
||||
protected abstract boolean resolveIndex(Request request);
|
||||
|
||||
protected ClusterBlockException checkGlobalBlock(ClusterState state) {
|
||||
protected static ClusterBlockException checkGlobalBlock(ClusterState state) {
|
||||
return state.blocks().globalBlockedException(ClusterBlockLevel.READ);
|
||||
}
|
||||
|
||||
|
|
|
@ -396,7 +396,7 @@ public final class TermVectorsFields extends Fields {
|
|||
}
|
||||
}
|
||||
|
||||
private final class TermVectorPostingsEnum extends PostingsEnum {
|
||||
private static final class TermVectorPostingsEnum extends PostingsEnum {
|
||||
private boolean hasPositions;
|
||||
private boolean hasOffsets;
|
||||
private boolean hasPayloads;
|
||||
|
@ -492,7 +492,7 @@ public final class TermVectorsFields extends Fields {
|
|||
// the writer writes a 0 for -1 or value +1 and accordingly we have to
|
||||
// subtract 1 again
|
||||
// adds one to mock not existing term freq
|
||||
int readPotentiallyNegativeVInt(StreamInput stream) throws IOException {
|
||||
static int readPotentiallyNegativeVInt(StreamInput stream) throws IOException {
|
||||
return stream.readVInt() - 1;
|
||||
}
|
||||
|
||||
|
@ -500,7 +500,7 @@ public final class TermVectorsFields extends Fields {
|
|||
// case, the writer writes a 0 for -1 or value +1 and accordingly we have to
|
||||
// subtract 1 again
|
||||
// adds one to mock not existing term freq
|
||||
long readPotentiallyNegativeVLong(StreamInput stream) throws IOException {
|
||||
static long readPotentiallyNegativeVLong(StreamInput stream) throws IOException {
|
||||
return stream.readVLong() - 1;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -229,7 +229,7 @@ public class TermVectorsResponse extends ActionResponse implements ToXContentObj
|
|||
builder.endObject();
|
||||
}
|
||||
|
||||
private void buildTermStatistics(XContentBuilder builder, TermsEnum termIter) throws IOException {
|
||||
private static void buildTermStatistics(XContentBuilder builder, TermsEnum termIter) throws IOException {
|
||||
// write term statistics. At this point we do not naturally have a
|
||||
// boolean that says if these values actually were requested.
|
||||
// However, we can assume that they were not if the statistic values are
|
||||
|
@ -302,7 +302,7 @@ public class TermVectorsResponse extends ActionResponse implements ToXContentObj
|
|||
}
|
||||
}
|
||||
|
||||
private void buildFieldStatistics(XContentBuilder builder, Terms curTerms) throws IOException {
|
||||
private static void buildFieldStatistics(XContentBuilder builder, Terms curTerms) throws IOException {
|
||||
long sumDocFreq = curTerms.getSumDocFreq();
|
||||
int docCount = curTerms.getDocCount();
|
||||
long sumTotalTermFrequencies = curTerms.getSumTotalTermFreq();
|
||||
|
|
|
@ -180,7 +180,7 @@ public class UpdateHelper {
|
|||
* Prepare the request for merging the existing document with a new one, can optionally detect a noop change. Returns a {@code Result}
|
||||
* containing a new {@code IndexRequest} to be executed on the primary and replicas.
|
||||
*/
|
||||
Result prepareUpdateIndexRequest(ShardId shardId, UpdateRequest request, GetResult getResult, boolean detectNoop) {
|
||||
static Result prepareUpdateIndexRequest(ShardId shardId, UpdateRequest request, GetResult getResult, boolean detectNoop) {
|
||||
final IndexRequest currentRequest = request.doc();
|
||||
final String routing = calculateRouting(getResult, currentRequest);
|
||||
final Tuple<XContentType, Map<String, Object>> sourceAndContent = XContentHelper.convertToMap(getResult.internalSourceRef(), true);
|
||||
|
|
|
@ -299,7 +299,7 @@ final class BootstrapChecks {
|
|||
|
||||
// visible for testing
|
||||
long getMaxFileDescriptorCount() {
|
||||
return ProcessProbe.getInstance().getMaxFileDescriptorCount();
|
||||
return ProcessProbe.getMaxFileDescriptorCount();
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -458,7 +458,7 @@ final class BootstrapChecks {
|
|||
}
|
||||
|
||||
@SuppressForbidden(reason = "access /proc/sys/vm/max_map_count")
|
||||
private Path getProcSysVmMaxMapCountPath() {
|
||||
private static Path getProcSysVmMaxMapCountPath() {
|
||||
return PathUtils.get("/proc/sys/vm/max_map_count");
|
||||
}
|
||||
|
||||
|
@ -468,12 +468,12 @@ final class BootstrapChecks {
|
|||
}
|
||||
|
||||
// visible for testing
|
||||
String readProcSysVmMaxMapCount(final BufferedReader bufferedReader) throws IOException {
|
||||
static String readProcSysVmMaxMapCount(final BufferedReader bufferedReader) throws IOException {
|
||||
return bufferedReader.readLine();
|
||||
}
|
||||
|
||||
// visible for testing
|
||||
long parseProcSysVmMaxMapCount(final String procSysVmMaxMapCount) throws NumberFormatException {
|
||||
static long parseProcSysVmMaxMapCount(final String procSysVmMaxMapCount) throws NumberFormatException {
|
||||
return Long.parseLong(procSysVmMaxMapCount);
|
||||
}
|
||||
|
||||
|
|
|
@ -84,7 +84,7 @@ final class Spawner implements Closeable {
|
|||
* Attempt to spawn the controller daemon for a given module. The spawned process will remain connected to this JVM via its stdin,
|
||||
* stdout, and stderr streams, but the references to these streams are not available to code outside this package.
|
||||
*/
|
||||
private Process spawnNativeController(final Path spawnPath, final Path tmpPath, final boolean inheritIo) throws IOException {
|
||||
private static Process spawnNativeController(final Path spawnPath, final Path tmpPath, final boolean inheritIo) throws IOException {
|
||||
final String command;
|
||||
if (Constants.WINDOWS) {
|
||||
/*
|
||||
|
|
|
@ -396,7 +396,7 @@ public class RestoreInProgress extends AbstractNamedDiffable<Custom> implements
|
|||
* @param entry restore operation metadata
|
||||
* @param builder XContent builder
|
||||
*/
|
||||
public void toXContent(Entry entry, XContentBuilder builder) throws IOException {
|
||||
public static void toXContent(Entry entry, XContentBuilder builder) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field("snapshot", entry.snapshot().getSnapshotId().getName());
|
||||
builder.field("repository", entry.snapshot().getRepository());
|
||||
|
|
|
@ -1261,7 +1261,7 @@ public class SnapshotsInProgress extends AbstractNamedDiffable<Custom> implement
|
|||
return builder;
|
||||
}
|
||||
|
||||
private void writeShardSnapshotStatus(XContentBuilder builder, ToXContent indexId, int shardId, ShardSnapshotStatus status)
|
||||
private static void writeShardSnapshotStatus(XContentBuilder builder, ToXContent indexId, int shardId, ShardSnapshotStatus status)
|
||||
throws IOException {
|
||||
builder.startObject();
|
||||
builder.field("index", indexId);
|
||||
|
|
|
@ -223,7 +223,7 @@ public class ClusterFormationFailureHelper {
|
|||
);
|
||||
}
|
||||
|
||||
private String describeQuorum(VotingConfiguration votingConfiguration) {
|
||||
private static String describeQuorum(VotingConfiguration votingConfiguration) {
|
||||
final Set<String> nodeIds = votingConfiguration.getNodeIds();
|
||||
assert nodeIds.isEmpty() == false;
|
||||
final int requiredNodes = nodeIds.size() / 2 + 1;
|
||||
|
|
|
@ -144,7 +144,7 @@ public abstract class ElasticsearchNodeCommand extends EnvironmentAwareCommand {
|
|||
}
|
||||
}
|
||||
|
||||
protected void confirm(Terminal terminal, String msg) {
|
||||
protected static void confirm(Terminal terminal, String msg) {
|
||||
terminal.println(msg);
|
||||
String text = terminal.readText("Confirm [y/N] ");
|
||||
if (text.equalsIgnoreCase("y") == false) {
|
||||
|
@ -180,7 +180,7 @@ public abstract class ElasticsearchNodeCommand extends EnvironmentAwareCommand {
|
|||
protected abstract void processNodePaths(Terminal terminal, Path[] dataPaths, OptionSet options, Environment env) throws IOException,
|
||||
UserException;
|
||||
|
||||
protected NodeEnvironment.NodePath[] toNodePaths(Path[] dataPaths) {
|
||||
protected static NodeEnvironment.NodePath[] toNodePaths(Path[] dataPaths) {
|
||||
return Arrays.stream(dataPaths).map(ElasticsearchNodeCommand::createNodePath).toArray(NodeEnvironment.NodePath[]::new);
|
||||
}
|
||||
|
||||
|
|
|
@ -23,8 +23,6 @@ import java.util.stream.Collectors;
|
|||
*/
|
||||
public final class ClusterNameExpressionResolver {
|
||||
|
||||
private final WildcardExpressionResolver wildcardResolver = new WildcardExpressionResolver();
|
||||
|
||||
/**
|
||||
* Resolves the provided cluster expression to matching cluster names. Supports exact or wildcard matches.
|
||||
* Throws {@link NoSuchRemoteClusterException} in case there are no registered remote clusters matching the provided expression.
|
||||
|
@ -34,11 +32,11 @@ public final class ClusterNameExpressionResolver {
|
|||
* @return the resolved cluster aliases.
|
||||
* @throws NoSuchRemoteClusterException if there are no remote clusters matching the provided expression
|
||||
*/
|
||||
public List<String> resolveClusterNames(Set<String> remoteClusters, String clusterExpression) {
|
||||
public static List<String> resolveClusterNames(Set<String> remoteClusters, String clusterExpression) {
|
||||
if (remoteClusters.contains(clusterExpression)) {
|
||||
return Collections.singletonList(clusterExpression);
|
||||
} else if (Regex.isSimpleMatchPattern(clusterExpression)) {
|
||||
return wildcardResolver.resolve(remoteClusters, clusterExpression);
|
||||
return WildcardExpressionResolver.resolve(remoteClusters, clusterExpression);
|
||||
} else {
|
||||
throw new NoSuchRemoteClusterException(clusterExpression);
|
||||
}
|
||||
|
@ -46,7 +44,7 @@ public final class ClusterNameExpressionResolver {
|
|||
|
||||
private static class WildcardExpressionResolver {
|
||||
|
||||
private List<String> resolve(Set<String> remoteClusters, String clusterExpression) {
|
||||
private static List<String> resolve(Set<String> remoteClusters, String clusterExpression) {
|
||||
if (isTrivialWildcard(clusterExpression)) {
|
||||
return resolveTrivialWildcard(remoteClusters);
|
||||
}
|
||||
|
@ -59,11 +57,11 @@ public final class ClusterNameExpressionResolver {
|
|||
}
|
||||
}
|
||||
|
||||
private boolean isTrivialWildcard(String clusterExpression) {
|
||||
private static boolean isTrivialWildcard(String clusterExpression) {
|
||||
return Regex.isMatchAllPattern(clusterExpression);
|
||||
}
|
||||
|
||||
private List<String> resolveTrivialWildcard(Set<String> remoteClusters) {
|
||||
private static List<String> resolveTrivialWildcard(Set<String> remoteClusters) {
|
||||
return new ArrayList<>(remoteClusters);
|
||||
}
|
||||
|
||||
|
|
|
@ -359,7 +359,7 @@ public class ComposableIndexTemplate implements SimpleDiffable<ComposableIndexTe
|
|||
}
|
||||
}
|
||||
|
||||
public String getTimestampField() {
|
||||
public static String getTimestampField() {
|
||||
return FIXED_TIMESTAMP_FIELD;
|
||||
}
|
||||
|
||||
|
|
|
@ -124,7 +124,7 @@ public class DiffableStringMap extends AbstractMap<String, String> implements Di
|
|||
return deletes;
|
||||
}
|
||||
|
||||
public Map<String, Diff<String>> getDiffs() {
|
||||
public static Map<String, Diff<String>> getDiffs() {
|
||||
return Collections.emptyMap();
|
||||
}
|
||||
|
||||
|
|
|
@ -97,7 +97,7 @@ public class IndexMetadataVerifier {
|
|||
* Check that the index version is compatible. Elasticsearch does not support indices created before the
|
||||
* previous major version.
|
||||
*/
|
||||
private void checkSupportedVersion(IndexMetadata indexMetadata, Version minimumIndexCompatibilityVersion) {
|
||||
private static void checkSupportedVersion(IndexMetadata indexMetadata, Version minimumIndexCompatibilityVersion) {
|
||||
boolean isSupportedVersion = indexMetadata.getCompatibilityVersion().onOrAfter(minimumIndexCompatibilityVersion);
|
||||
if (isSupportedVersion == false) {
|
||||
throw new IllegalStateException(
|
||||
|
@ -236,7 +236,7 @@ public class IndexMetadataVerifier {
|
|||
* Convert shared_cache searchable snapshot indices to only specify
|
||||
* _tier_preference: data_frozen, removing any pre-existing tier allocation rules.
|
||||
*/
|
||||
IndexMetadata convertSharedCacheTierPreference(IndexMetadata indexMetadata) {
|
||||
static IndexMetadata convertSharedCacheTierPreference(IndexMetadata indexMetadata) {
|
||||
// Only remove these settings for a shared_cache searchable snapshot
|
||||
if (indexMetadata.isPartialSearchableSnapshot()) {
|
||||
final Settings settings = indexMetadata.getSettings();
|
||||
|
@ -261,7 +261,7 @@ public class IndexMetadataVerifier {
|
|||
/**
|
||||
* Removes index level ._tier allocation filters, if they exist
|
||||
*/
|
||||
IndexMetadata removeTierFiltering(IndexMetadata indexMetadata) {
|
||||
static IndexMetadata removeTierFiltering(IndexMetadata indexMetadata) {
|
||||
final Settings settings = indexMetadata.getSettings();
|
||||
final Settings.Builder settingsBuilder = Settings.builder().put(settings);
|
||||
// Clear any allocation rules other than preference for tier
|
||||
|
|
|
@ -489,10 +489,10 @@ public class IndexNameExpressionResolver {
|
|||
);
|
||||
}
|
||||
if (resolvedSystemDataStreams.isEmpty() == false) {
|
||||
throw systemIndices.dataStreamAccessException(threadContext, resolvedSystemDataStreams);
|
||||
throw SystemIndices.dataStreamAccessException(threadContext, resolvedSystemDataStreams);
|
||||
}
|
||||
if (resolvedNetNewSystemIndices.isEmpty() == false) {
|
||||
throw systemIndices.netNewSystemIndexAccessException(threadContext, resolvedNetNewSystemIndices);
|
||||
throw SystemIndices.netNewSystemIndexAccessException(threadContext, resolvedNetNewSystemIndices);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -932,7 +932,7 @@ public class IndexNameExpressionResolver {
|
|||
}
|
||||
|
||||
public SystemIndexAccessLevel getSystemIndexAccessLevel() {
|
||||
final SystemIndexAccessLevel accessLevel = systemIndices.getSystemIndexAccessLevel(threadContext);
|
||||
final SystemIndexAccessLevel accessLevel = SystemIndices.getSystemIndexAccessLevel(threadContext);
|
||||
assert accessLevel != SystemIndexAccessLevel.BACKWARDS_COMPATIBLE_ONLY
|
||||
: "BACKWARDS_COMPATIBLE_ONLY access level should never be used automatically, it should only be used in known special cases";
|
||||
return accessLevel;
|
||||
|
|
|
@ -757,7 +757,7 @@ public class Metadata extends AbstractCollection<IndexMetadata> implements Diffa
|
|||
return routing;
|
||||
}
|
||||
|
||||
private void rejectSingleIndexOperation(String aliasOrIndex, IndexAbstraction result) {
|
||||
private static void rejectSingleIndexOperation(String aliasOrIndex, IndexAbstraction result) {
|
||||
String[] indexNames = new String[result.getIndices().size()];
|
||||
int i = 0;
|
||||
for (Index indexName : result.getIndices()) {
|
||||
|
@ -1293,7 +1293,7 @@ public class Metadata extends AbstractCollection<IndexMetadata> implements Diffa
|
|||
return this;
|
||||
}
|
||||
|
||||
boolean unsetPreviousIndicesLookup(IndexMetadata previous, IndexMetadata current) {
|
||||
static boolean unsetPreviousIndicesLookup(IndexMetadata previous, IndexMetadata current) {
|
||||
if (previous == null) {
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -249,7 +249,7 @@ public class MetadataCreateDataStreamService {
|
|||
assert writeIndex != null;
|
||||
assert writeIndex.mapping() != null : "no mapping found for backing index [" + writeIndex.getIndex().getName() + "]";
|
||||
|
||||
String fieldName = template.getDataStreamTemplate().getTimestampField();
|
||||
String fieldName = ComposableIndexTemplate.DataStreamTemplate.getTimestampField();
|
||||
DataStream.TimestampField timestampField = new DataStream.TimestampField(fieldName);
|
||||
List<Index> dsBackingIndices = backingIndices.stream()
|
||||
.map(IndexMetadata::getIndex)
|
||||
|
|
|
@ -162,7 +162,7 @@ public class MetadataCreateIndexService {
|
|||
/**
|
||||
* Validate the name for an index against some static rules and a cluster state.
|
||||
*/
|
||||
public void validateIndexName(String index, ClusterState state) {
|
||||
public static void validateIndexName(String index, ClusterState state) {
|
||||
validateIndexOrAliasName(index, InvalidIndexNameException::new);
|
||||
if (index.toLowerCase(Locale.ROOT).equals(index) == false) {
|
||||
throw new InvalidIndexNameException(index, "must be lowercase");
|
||||
|
@ -559,7 +559,7 @@ public class MetadataCreateIndexService {
|
|||
// shard id and the current timestamp
|
||||
xContentRegistry,
|
||||
indexService.newSearchExecutionContext(0, 0, null, () -> 0L, null, emptyMap()),
|
||||
indexService.dateMathExpressionResolverAt(request.getNameResolvedAt()),
|
||||
IndexService.dateMathExpressionResolverAt(request.getNameResolvedAt()),
|
||||
systemIndices::isSystemName
|
||||
),
|
||||
templates.stream().map(IndexTemplateMetadata::getName).collect(toList()),
|
||||
|
@ -625,7 +625,7 @@ public class MetadataCreateIndexService {
|
|||
xContentRegistry,
|
||||
// the context is used ony for validation so it's fine to pass fake values for the shard id and the current timestamp
|
||||
indexService.newSearchExecutionContext(0, 0, null, () -> 0L, null, emptyMap()),
|
||||
indexService.dateMathExpressionResolverAt(request.getNameResolvedAt()),
|
||||
IndexService.dateMathExpressionResolverAt(request.getNameResolvedAt()),
|
||||
systemIndices::isSystemName
|
||||
),
|
||||
Collections.singletonList(templateName),
|
||||
|
@ -681,7 +681,7 @@ public class MetadataCreateIndexService {
|
|||
// shard id and the current timestamp
|
||||
xContentRegistry,
|
||||
indexService.newSearchExecutionContext(0, 0, null, () -> 0L, null, emptyMap()),
|
||||
indexService.dateMathExpressionResolverAt(request.getNameResolvedAt()),
|
||||
IndexService.dateMathExpressionResolverAt(request.getNameResolvedAt()),
|
||||
systemIndices::isSystemName
|
||||
),
|
||||
List.of(),
|
||||
|
@ -775,7 +775,7 @@ public class MetadataCreateIndexService {
|
|||
// the context is only used for validation so it's fine to pass fake values for the
|
||||
// shard id and the current timestamp
|
||||
indexService.newSearchExecutionContext(0, 0, null, () -> 0L, null, emptyMap()),
|
||||
indexService.dateMathExpressionResolverAt(request.getNameResolvedAt()),
|
||||
IndexService.dateMathExpressionResolverAt(request.getNameResolvedAt()),
|
||||
systemIndices::isSystemName
|
||||
),
|
||||
List.of(),
|
||||
|
@ -1214,7 +1214,7 @@ public class MetadataCreateIndexService {
|
|||
return blocksBuilder;
|
||||
}
|
||||
|
||||
private void updateIndexMappingsAndBuildSortOrder(
|
||||
private static void updateIndexMappingsAndBuildSortOrder(
|
||||
IndexService indexService,
|
||||
CreateIndexClusterStateUpdateRequest request,
|
||||
List<CompressedXContent> mappings,
|
||||
|
|
|
@ -232,7 +232,7 @@ public class MetadataIndexAliasesService {
|
|||
);
|
||||
}
|
||||
|
||||
private void validateAliasTargetIsNotDSBackingIndex(ClusterState currentState, AliasAction action) {
|
||||
private static void validateAliasTargetIsNotDSBackingIndex(ClusterState currentState, AliasAction action) {
|
||||
IndexAbstraction indexAbstraction = currentState.metadata().getIndicesLookup().get(action.getIndex());
|
||||
assert indexAbstraction != null : "invalid cluster metadata. index [" + action.getIndex() + "] was not found";
|
||||
if (indexAbstraction.getParentDataStream() != null) {
|
||||
|
|
|
@ -1471,7 +1471,7 @@ public class MetadataIndexTemplateService {
|
|||
// shard id and the current timestamp
|
||||
xContentRegistry,
|
||||
tempIndexService.newSearchExecutionContext(0, 0, null, () -> 0L, null, emptyMap()),
|
||||
tempIndexService.dateMathExpressionResolverAt(System.currentTimeMillis()),
|
||||
IndexService.dateMathExpressionResolverAt(System.currentTimeMillis()),
|
||||
systemIndices::isSystemName
|
||||
);
|
||||
|
||||
|
|
|
@ -123,7 +123,7 @@ public class MetadataMappingService {
|
|||
}
|
||||
}
|
||||
|
||||
private ClusterState applyRequest(
|
||||
private static ClusterState applyRequest(
|
||||
ClusterState currentState,
|
||||
PutMappingClusterStateUpdateRequest request,
|
||||
Map<Index, MapperService> indexMapperServices
|
||||
|
|
|
@ -243,7 +243,7 @@ public class TemplateUpgradeService implements ClusterStateListener {
|
|||
|
||||
private static final ToXContent.Params PARAMS = new ToXContent.MapParams(singletonMap("reduce_mappings", "true"));
|
||||
|
||||
private BytesReference toBytesReference(IndexTemplateMetadata templateMetadata) {
|
||||
private static BytesReference toBytesReference(IndexTemplateMetadata templateMetadata) {
|
||||
try {
|
||||
return XContentHelper.toXContent((builder, params) -> {
|
||||
IndexTemplateMetadata.Builder.toInnerXContentWithTypes(templateMetadata, builder, params);
|
||||
|
|
|
@ -73,7 +73,7 @@ public class DiscoveryNodeFilters {
|
|||
this.withoutTierPreferences = doTrimTier(this);
|
||||
}
|
||||
|
||||
private boolean matchByIP(String[] values, @Nullable String hostIp, @Nullable String publishIp) {
|
||||
private static boolean matchByIP(String[] values, @Nullable String hostIp, @Nullable String publishIp) {
|
||||
for (String ipOrHost : values) {
|
||||
String value = InetAddresses.isInetAddress(ipOrHost) ? NetworkAddress.format(InetAddresses.forString(ipOrHost)) : ipOrHost;
|
||||
boolean matchIp = Regex.simpleMatch(value, hostIp) || Regex.simpleMatch(value, publishIp);
|
||||
|
|
|
@ -262,7 +262,7 @@ public abstract class IndexRouting {
|
|||
return createId(hashSource(flat), suffix);
|
||||
}
|
||||
|
||||
private String createId(int routingHash, byte[] suffix) {
|
||||
private static String createId(int routingHash, byte[] suffix) {
|
||||
byte[] idBytes = new byte[4 + suffix.length];
|
||||
ByteUtils.writeIntLE(routingHash, idBytes, 0);
|
||||
System.arraycopy(suffix, 0, idBytes, 4, suffix.length);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue