Remove doPrivileged from plugins (#127996)

This commit continues the work of removing SecurityManager related code
by removing doPrivileged calls from Elasticsearch plugins.
This commit is contained in:
Ryan Ernst 2025-05-21 09:00:24 -07:00 committed by GitHub
parent f492bb9a0d
commit 79d3aa8277
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
17 changed files with 49 additions and 373 deletions

View File

@ -29,9 +29,6 @@ import org.elasticsearch.logging.LogManager;
import org.elasticsearch.logging.Logger;
import java.io.IOException;
import java.security.AccessController;
import java.security.PrivilegedActionException;
import java.security.PrivilegedExceptionAction;
import java.util.ServiceLoader;
public class AzureComputeServiceImpl extends AbstractLifecycleComponent implements AzureComputeService {
@ -94,11 +91,8 @@ public class AzureComputeServiceImpl extends AbstractLifecycleComponent implemen
public HostedServiceGetDetailedResponse getServiceDetails() {
SpecialPermission.check();
try {
return AccessController.doPrivileged(
(PrivilegedExceptionAction<HostedServiceGetDetailedResponse>) () -> client.getHostedServicesOperations()
.getDetailed(serviceName)
);
} catch (PrivilegedActionException e) {
return client.getHostedServicesOperations().getDetailed(serviceName);
} catch (Exception e) {
throw new AzureServiceRemoteException("can not get list of azure nodes", e.getCause());
}
}

View File

@ -107,7 +107,7 @@ class AwsEc2SeedHostsProvider implements SeedHostsProvider {
// NOTE: we don't filter by security group during the describe instances request for two reasons:
// 1. differences in VPCs require different parameters during query (ID vs Name)
// 2. We want to use two different strategies: (all security groups vs. any security groups)
descInstances = SocketAccess.doPrivileged(() -> clientReference.client().describeInstances(buildDescribeInstancesRequest()));
descInstances = clientReference.client().describeInstances(buildDescribeInstancesRequest());
} catch (final Exception e) {
logger.info("Exception while retrieving instance list from AWS API: {}", e.getMessage());
logger.debug("Full exception:", e);

View File

@ -66,7 +66,7 @@ class AwsEc2ServiceImpl implements AwsEc2Service {
final var endpoint = Endpoint.builder().url(URI.create(clientSettings.endpoint)).build();
ec2ClientBuilder.endpointProvider(endpointParams -> CompletableFuture.completedFuture(endpoint));
}
return SocketAccess.doPrivileged(ec2ClientBuilder::build);
return ec2ClientBuilder.build();
}
private static void applyProxyConfiguration(Ec2ClientSettings clientSettings, ApacheHttpClient.Builder httpClientBuilder) {

View File

@ -22,8 +22,8 @@ class AwsEc2Utils {
static String getInstanceMetadata(String metadataPath) {
final var httpClientBuilder = ApacheHttpClient.builder();
httpClientBuilder.connectionTimeout(IMDS_CONNECTION_TIMEOUT);
try (var ec2Client = SocketAccess.doPrivileged(Ec2MetadataClient.builder().httpClient(httpClientBuilder)::build)) {
final var metadataValue = SocketAccess.doPrivileged(() -> ec2Client.get(metadataPath)).asString();
try (var ec2Client = Ec2MetadataClient.builder().httpClient(httpClientBuilder).build()) {
final var metadataValue = ec2Client.get(metadataPath).asString();
if (Strings.hasText(metadataValue) == false) {
throw new IllegalStateException("no ec2 metadata returned from " + metadataPath);
}

View File

@ -1,44 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the "Elastic License
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
* Public License v 1"; you may not use this file except in compliance with, at
* your election, the "Elastic License 2.0", the "GNU Affero General Public
* License v3.0 only", or the "Server Side Public License, v 1".
*/
package org.elasticsearch.discovery.ec2;
import org.elasticsearch.SpecialPermission;
import java.io.IOException;
import java.net.SocketPermission;
import java.security.AccessController;
import java.security.PrivilegedAction;
import java.security.PrivilegedActionException;
import java.security.PrivilegedExceptionAction;
/**
* This plugin uses aws libraries to connect to aws services. For these remote calls the plugin needs
* {@link SocketPermission} 'connect' to establish connections. This class wraps the operations requiring access in
* {@link AccessController#doPrivileged(PrivilegedAction)} blocks.
*/
final class SocketAccess {
private SocketAccess() {}
public static <T> T doPrivileged(PrivilegedAction<T> operation) {
SpecialPermission.check();
return AccessController.doPrivileged(operation);
}
public static <T> T doPrivilegedIOException(PrivilegedExceptionAction<T> operation) throws IOException {
SpecialPermission.check();
try {
return AccessController.doPrivileged(operation);
} catch (PrivilegedActionException e) {
throw (IOException) e.getCause();
}
}
}

View File

@ -14,7 +14,6 @@ import com.google.api.services.compute.model.NetworkInterface;
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
import org.elasticsearch.cloud.gce.GceInstancesService;
import org.elasticsearch.cloud.gce.util.Access;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.core.TimeValue;
@ -138,23 +137,21 @@ public class GceDiscoverTests extends ESIntegTestCase {
return new GceInstancesService() {
@Override
public Collection<Instance> instances() {
return Access.doPrivileged(() -> {
final List<Instance> instances = new ArrayList<>();
final List<Instance> instances = new ArrayList<>();
for (DiscoveryNode discoveryNode : nodes.values()) {
Instance instance = new Instance();
instance.setName(discoveryNode.getName());
instance.setStatus("STARTED");
for (DiscoveryNode discoveryNode : nodes.values()) {
Instance instance = new Instance();
instance.setName(discoveryNode.getName());
instance.setStatus("STARTED");
NetworkInterface networkInterface = new NetworkInterface();
networkInterface.setNetworkIP(discoveryNode.getAddress().toString());
instance.setNetworkInterfaces(singletonList(networkInterface));
NetworkInterface networkInterface = new NetworkInterface();
networkInterface.setNetworkIP(discoveryNode.getAddress().toString());
instance.setNetworkInterfaces(singletonList(networkInterface));
instances.add(instance);
}
instances.add(instance);
}
return instances;
});
return instances;
}
@Override

View File

@ -25,7 +25,6 @@ import com.google.api.services.compute.Compute;
import com.google.api.services.compute.model.Instance;
import com.google.api.services.compute.model.InstanceList;
import org.elasticsearch.cloud.gce.util.Access;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
@ -68,19 +67,17 @@ public class GceInstancesServiceImpl implements GceInstancesService {
try {
// hack around code messiness in GCE code
// TODO: get this fixed
return Access.doPrivilegedIOException(() -> {
String nextPageToken = null;
List<Instance> zoneInstances = new ArrayList<>();
do {
Compute.Instances.List list = client().instances().list(project, zoneId).setPageToken(nextPageToken);
InstanceList instanceList = list.execute();
nextPageToken = instanceList.getNextPageToken();
if (instanceList.isEmpty() == false && instanceList.getItems() != null) {
zoneInstances.addAll(instanceList.getItems());
}
} while (nextPageToken != null);
return zoneInstances;
});
String nextPageToken = null;
List<Instance> zoneInstances = new ArrayList<>();
do {
Compute.Instances.List list = client().instances().list(project, zoneId).setPageToken(nextPageToken);
InstanceList instanceList = list.execute();
nextPageToken = instanceList.getNextPageToken();
if (instanceList.isEmpty() == false && instanceList.getItems() != null) {
zoneInstances.addAll(instanceList.getItems());
}
} while (nextPageToken != null);
return zoneInstances;
} catch (IOException e) {
logger.warn(() -> "Problem fetching instance list for zone " + zoneId, e);
logger.debug("Full exception:", e);
@ -152,7 +149,7 @@ public class GceInstancesServiceImpl implements GceInstancesService {
String getAppEngineValueFromMetadataServer(String serviceURL) throws GeneralSecurityException, IOException {
String metadata = GceMetadataService.GCE_HOST.get(settings);
GenericUrl url = Access.doPrivileged(() -> new GenericUrl(metadata + serviceURL));
GenericUrl url = new GenericUrl(metadata + serviceURL);
HttpTransport httpTransport = getGceHttpTransport();
HttpRequestFactory requestFactory = httpTransport.createRequestFactory();
@ -160,7 +157,7 @@ public class GceInstancesServiceImpl implements GceInstancesService {
.setConnectTimeout(500)
.setReadTimeout(500)
.setHeaders(new HttpHeaders().set("Metadata-Flavor", "Google"));
HttpResponse response = Access.doPrivilegedIOException(() -> request.execute());
HttpResponse response = request.execute();
return headerContainsMetadataFlavor(response) ? response.parseAsString() : null;
}
@ -211,7 +208,7 @@ public class GceInstancesServiceImpl implements GceInstancesService {
// hack around code messiness in GCE code
// TODO: get this fixed
Access.doPrivilegedIOException(credential::refreshToken);
credential.refreshToken();
logger.debug("token [{}] will expire in [{}] s", credential.getAccessToken(), credential.getExpiresInSeconds());
if (credential.getExpiresInSeconds() != null) {

View File

@ -15,7 +15,6 @@ import com.google.api.client.http.HttpHeaders;
import com.google.api.client.http.HttpResponse;
import com.google.api.client.http.HttpTransport;
import org.elasticsearch.cloud.gce.util.Access;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
@ -66,14 +65,12 @@ public class GceMetadataService extends AbstractLifecycleComponent {
try {
// hack around code messiness in GCE code
// TODO: get this fixed
headers = Access.doPrivileged(HttpHeaders::new);
GenericUrl genericUrl = Access.doPrivileged(() -> new GenericUrl(urlMetadataNetwork));
headers = new HttpHeaders();
GenericUrl genericUrl = new GenericUrl(urlMetadataNetwork);
// This is needed to query meta data: https://cloud.google.com/compute/docs/metadata
headers.put("Metadata-Flavor", "Google");
HttpResponse response = Access.doPrivilegedIOException(
() -> getGceHttpTransport().createRequestFactory().buildGetRequest(genericUrl).setHeaders(headers).execute()
);
HttpResponse response = getGceHttpTransport().createRequestFactory().buildGetRequest(genericUrl).setHeaders(headers).execute();
String metadata = response.parseAsString();
logger.debug("metadata found [{}]", metadata);
return metadata;

View File

@ -10,12 +10,12 @@
package org.elasticsearch.cloud.gce.network;
import org.elasticsearch.cloud.gce.GceMetadataService;
import org.elasticsearch.cloud.gce.util.Access;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.network.NetworkService.CustomNameResolver;
import java.io.IOException;
import java.net.InetAddress;
import java.net.URISyntaxException;
/**
* <p>Resolves certain GCE related 'meta' hostnames into an actual hostname
@ -97,13 +97,13 @@ public class GceNameResolver implements CustomNameResolver {
}
try {
String metadataResult = Access.doPrivilegedIOException(() -> gceMetadataService.metadata(gceMetadataPath));
String metadataResult = gceMetadataService.metadata(gceMetadataPath);
if (metadataResult == null || metadataResult.length() == 0) {
throw new IOException("no gce metadata returned from [" + gceMetadataPath + "] for [" + value + "]");
}
// only one address: because we explicitly ask for only one via the GceHostnameType
return new InetAddress[] { InetAddress.getByName(metadataResult) };
} catch (IOException e) {
} catch (URISyntaxException | IOException e) {
throw new IOException("IOException caught when fetching InetAddress from [" + gceMetadataPath + "]", e);
}
}

View File

@ -1,54 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the "Elastic License
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
* Public License v 1"; you may not use this file except in compliance with, at
* your election, the "Elastic License 2.0", the "GNU Affero General Public
* License v3.0 only", or the "Server Side Public License, v 1".
*/
package org.elasticsearch.cloud.gce.util;
import org.elasticsearch.SpecialPermission;
import java.io.IOException;
import java.net.SocketPermission;
import java.security.AccessController;
import java.security.PrivilegedAction;
import java.security.PrivilegedActionException;
import java.security.PrivilegedExceptionAction;
/**
* GCE's HTTP client changes access levels. Specifically it needs {@link RuntimePermission} {@code
* accessDeclaredMembers} and {@code setFactory}, and {@link java.lang.reflect.ReflectPermission}
* {@code suppressAccessChecks}. For remote calls, the plugin needs {@link SocketPermission} for
* {@code connect}. This class wraps the operations requiring access in
* {@link AccessController#doPrivileged(PrivilegedAction)} blocks.
*/
public final class Access {
private Access() {}
public static <T> T doPrivileged(final PrivilegedAction<T> operation) {
SpecialPermission.check();
return AccessController.doPrivileged(operation);
}
public static void doPrivilegedVoid(final Runnable action) {
SpecialPermission.check();
AccessController.doPrivileged((PrivilegedAction<Void>) () -> {
action.run();
return null;
});
}
public static <T> T doPrivilegedIOException(final PrivilegedExceptionAction<T> operation) throws IOException {
SpecialPermission.check();
try {
return AccessController.doPrivileged(operation);
} catch (final PrivilegedActionException e) {
throw (IOException) e.getCause();
}
}
}

View File

@ -20,7 +20,6 @@ import com.google.api.client.http.HttpUnsuccessfulResponseHandler;
import com.google.api.client.util.ExponentialBackOff;
import com.google.api.client.util.Sleeper;
import org.elasticsearch.cloud.gce.util.Access;
import org.elasticsearch.core.TimeValue;
import org.elasticsearch.logging.LogManager;
import org.elasticsearch.logging.Logger;
@ -60,7 +59,7 @@ public class RetryHttpInitializerWrapper implements HttpRequestInitializer {
// Use only for testing
static MockGoogleCredential.Builder newMockCredentialBuilder() {
// TODO: figure out why GCE is so bad like this
return Access.doPrivileged(MockGoogleCredential.Builder::new);
return new MockGoogleCredential.Builder();
}
@Override

View File

@ -9,15 +9,11 @@
package org.elasticsearch.plugin.discovery.gce;
import com.google.api.client.http.HttpHeaders;
import com.google.api.client.util.ClassInfo;
import org.apache.lucene.util.SetOnce;
import org.elasticsearch.cloud.gce.GceInstancesService;
import org.elasticsearch.cloud.gce.GceInstancesServiceImpl;
import org.elasticsearch.cloud.gce.GceMetadataService;
import org.elasticsearch.cloud.gce.network.GceNameResolver;
import org.elasticsearch.cloud.gce.util.Access;
import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
@ -53,18 +49,6 @@ public class GceDiscoveryPlugin extends Plugin implements DiscoveryPlugin, Close
// stashed when created in order to properly close
private final SetOnce<GceInstancesService> gceInstancesService = new SetOnce<>();
static {
/*
* GCE's http client changes access levels because its silly and we
* can't allow that on any old stack so we pull it here, up front,
* so we can cleanly check the permissions for it. Without this changing
* the permission can fail if any part of core is on the stack because
* our plugin permissions don't allow core to "reach through" plugins to
* change the permission. Because that'd be silly.
*/
Access.doPrivilegedVoid(() -> ClassInfo.of(HttpHeaders.class, true));
}
public GceDiscoveryPlugin(Settings settings) {
this.settings = settings;
logger.trace("starting gce discovery plugin...");

View File

@ -32,7 +32,6 @@ import org.elasticsearch.core.Nullable;
import org.elasticsearch.repositories.hdfs.HdfsBlobStore.Operation;
import java.io.FileNotFoundException;
import java.io.FilterInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
@ -113,9 +112,7 @@ final class HdfsBlobContainer extends AbstractBlobContainer {
// HDFSPrivilegedInputSteam which will ensure that underlying methods will
// be called with the proper privileges.
try {
return store.execute(
fileContext -> new HDFSPrivilegedInputSteam(fileContext.open(new Path(path, blobName), bufferSize), securityContext)
);
return store.execute(fileContext -> fileContext.open(new Path(path, blobName), bufferSize));
} catch (FileNotFoundException fnfe) {
throw new NoSuchFileException("[" + blobName + "] blob not found");
}
@ -134,7 +131,7 @@ final class HdfsBlobContainer extends AbstractBlobContainer {
// should direct the datanode to start on the appropriate block, at the
// appropriate target position.
fsInput.seek(position);
return Streams.limitStream(new HDFSPrivilegedInputSteam(fsInput, securityContext), length);
return Streams.limitStream(fsInput, length);
});
} catch (FileNotFoundException fnfe) {
throw new NoSuchFileException("[" + blobName + "] blob not found");
@ -326,48 +323,6 @@ final class HdfsBlobContainer extends AbstractBlobContainer {
return Collections.unmodifiableMap(map);
}
/**
* Exists to wrap underlying InputStream methods that might make socket connections in
* doPrivileged blocks. This is due to the way that hdfs client libraries might open
* socket connections when you are reading from an InputStream.
*/
private static class HDFSPrivilegedInputSteam extends FilterInputStream {
private final HdfsSecurityContext securityContext;
HDFSPrivilegedInputSteam(InputStream in, HdfsSecurityContext hdfsSecurityContext) {
super(in);
this.securityContext = hdfsSecurityContext;
}
public int read() throws IOException {
return securityContext.doPrivilegedOrThrow(in::read);
}
public int read(byte b[]) throws IOException {
return securityContext.doPrivilegedOrThrow(() -> in.read(b));
}
public int read(byte b[], int off, int len) throws IOException {
return securityContext.doPrivilegedOrThrow(() -> in.read(b, off, len));
}
public long skip(long n) throws IOException {
return securityContext.doPrivilegedOrThrow(() -> in.skip(n));
}
public int available() throws IOException {
return securityContext.doPrivilegedOrThrow(() -> in.available());
}
public synchronized void reset() throws IOException {
securityContext.doPrivilegedOrThrow(() -> {
in.reset();
return null;
});
}
}
@Override
public void compareAndExchangeRegister(
OperationPurpose purpose,

View File

@ -36,9 +36,7 @@ final class HdfsBlobStore implements BlobStore {
HdfsBlobStore(FileContext fileContext, String path, int bufferSize, boolean readOnly, boolean haEnabled, Short replicationFactor)
throws IOException {
this.fileContext = fileContext;
// Only restrict permissions if not running with HA
boolean restrictPermissions = (haEnabled == false);
this.securityContext = new HdfsSecurityContext(fileContext.getUgi(), restrictPermissions);
this.securityContext = new HdfsSecurityContext(fileContext.getUgi());
this.bufferSize = bufferSize;
this.replicationFactor = replicationFactor;
this.root = execute(fileContext1 -> fileContext1.makeQualified(new Path(path)));
@ -103,10 +101,8 @@ final class HdfsBlobStore implements BlobStore {
if (closed) {
throw new AlreadyClosedException("HdfsBlobStore is closed: " + this);
}
return securityContext.doPrivilegedOrThrow(() -> {
securityContext.ensureLogin();
return operation.run(fileContext);
});
securityContext.ensureLogin();
return operation.run(fileContext);
}
@Override

View File

@ -11,10 +11,8 @@ package org.elasticsearch.repositories.hdfs;
import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB;
import org.apache.hadoop.security.KerberosInfo;
import org.apache.hadoop.security.SecurityUtil;
import org.elasticsearch.SpecialPermission;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.core.SuppressForbidden;
import org.elasticsearch.env.Environment;
import org.elasticsearch.indices.recovery.RecoverySettings;
import org.elasticsearch.plugins.Plugin;
@ -23,59 +21,17 @@ import org.elasticsearch.repositories.RepositoriesMetrics;
import org.elasticsearch.repositories.Repository;
import org.elasticsearch.xcontent.NamedXContentRegistry;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.security.AccessController;
import java.security.PrivilegedAction;
import java.util.Collections;
import java.util.Map;
public final class HdfsPlugin extends Plugin implements RepositoryPlugin {
// initialize some problematic classes with elevated privileges
// initialize some problematic classes
static {
SpecialPermission.check();
AccessController.doPrivileged((PrivilegedAction<Void>) HdfsPlugin::evilHadoopInit);
AccessController.doPrivileged((PrivilegedAction<Void>) HdfsPlugin::eagerInit);
eagerInitSecurityUtil();
}
@SuppressForbidden(reason = "Needs a security hack for hadoop on windows, until HADOOP-XXXX is fixed")
private static Void evilHadoopInit() {
// hack: on Windows, Shell's clinit has a similar problem that on unix,
// but here we can workaround it for now by setting hadoop home
// on unix: we still want to set this to something we control, because
// if the user happens to have HADOOP_HOME in their environment -> checkHadoopHome goes boom
// TODO: remove THIS when hadoop is fixed
Path hadoopHome = null;
String oldValue = null;
try {
hadoopHome = Files.createTempDirectory("hadoop").toAbsolutePath();
oldValue = System.setProperty("hadoop.home.dir", hadoopHome.toString());
Class.forName("org.apache.hadoop.security.UserGroupInformation");
Class.forName("org.apache.hadoop.util.StringUtils");
Class.forName("org.apache.hadoop.util.ShutdownHookManager");
Class.forName("org.apache.hadoop.conf.Configuration");
} catch (ClassNotFoundException | IOException e) {
throw new RuntimeException(e);
} finally {
// try to clean up the hack
if (oldValue == null) {
System.clearProperty("hadoop.home.dir");
} else {
System.setProperty("hadoop.home.dir", oldValue);
}
try {
// try to clean up our temp dir too if we can
if (hadoopHome != null) {
Files.delete(hadoopHome);
}
} catch (IOException thisIsBestEffort) {}
}
return null;
}
private static Void eagerInit() {
private static Void eagerInitSecurityUtil() {
/*
* Hadoop RPC wire serialization uses ProtocolBuffers. All proto classes for Hadoop
* come annotated with configurations that denote information about if they support

View File

@ -17,7 +17,6 @@ import org.apache.hadoop.io.retry.FailoverProxyProvider;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
import org.elasticsearch.SpecialPermission;
import org.elasticsearch.cluster.metadata.RepositoryMetadata;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Strings;
@ -39,7 +38,6 @@ import java.io.UncheckedIOException;
import java.net.InetAddress;
import java.net.URI;
import java.net.UnknownHostException;
import java.security.AccessController;
import java.security.PrivilegedAction;
import java.util.Locale;
@ -281,12 +279,7 @@ public final class HdfsRepository extends BlobStoreRepository {
@Override
protected HdfsBlobStore createBlobStore() {
// initialize our blobstore using elevated privileges.
SpecialPermission.check();
final HdfsBlobStore blobStore = AccessController.doPrivileged(
(PrivilegedAction<HdfsBlobStore>) () -> createBlobstore(uri, pathSetting, getMetadata().settings())
);
return blobStore;
return createBlobstore(uri, pathSetting, getMetadata().settings());
}
@Override

View File

@ -9,73 +9,20 @@
package org.elasticsearch.repositories.hdfs;
import org.apache.hadoop.security.UserGroupInformation;
import org.elasticsearch.SpecialPermission;
import org.elasticsearch.env.Environment;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.lang.reflect.ReflectPermission;
import java.net.SocketPermission;
import java.nio.file.Files;
import java.nio.file.Path;
import java.security.AccessController;
import java.security.Permission;
import java.security.PrivilegedActionException;
import java.security.PrivilegedExceptionAction;
import java.util.Arrays;
import javax.security.auth.AuthPermission;
import javax.security.auth.PrivateCredentialPermission;
import javax.security.auth.kerberos.ServicePermission;
/**
* Oversees all the security specific logic for the HDFS Repository plugin.
*
* Keeps track of the current user for a given repository, as well as which
* permissions to grant the blob store restricted execution methods.
* Keeps track of the current user for a given repository.
*/
class HdfsSecurityContext {
private static final Permission[] SIMPLE_AUTH_PERMISSIONS;
private static final Permission[] KERBEROS_AUTH_PERMISSIONS;
static {
// We can do FS ops with only a few elevated permissions:
SIMPLE_AUTH_PERMISSIONS = new Permission[] {
new SocketPermission("*", "connect"),
// 1) hadoop dynamic proxy is messy with access rules
new ReflectPermission("suppressAccessChecks"),
// 2) allow hadoop to add credentials to our Subject
new AuthPermission("modifyPrivateCredentials"),
// 3) RPC Engine requires this for re-establishing pooled connections over the lifetime of the client
new PrivateCredentialPermission("org.apache.hadoop.security.Credentials * \"*\"", "read"),
new RuntimePermission("getClassLoader") };
// If Security is enabled, we need all the following elevated permissions:
KERBEROS_AUTH_PERMISSIONS = new Permission[] {
new SocketPermission("*", "connect"),
// 1) hadoop dynamic proxy is messy with access rules
new ReflectPermission("suppressAccessChecks"),
// 2) allow hadoop to add credentials to our Subject
new AuthPermission("modifyPrivateCredentials"),
// 3) allow hadoop to act as the logged in Subject
new AuthPermission("doAs"),
// 4) Listen and resolve permissions for kerberos server principals
new SocketPermission("localhost:0", "listen,resolve"),
// We add the following since hadoop requires the client to re-login when the kerberos ticket expires:
// 5) All the permissions needed for UGI to do its weird JAAS hack
new RuntimePermission("getClassLoader"),
new RuntimePermission("setContextClassLoader"),
// 6) Additional permissions for the login modules
new AuthPermission("modifyPrincipals"),
new PrivateCredentialPermission("org.apache.hadoop.security.Credentials * \"*\"", "read"),
new PrivateCredentialPermission("javax.security.auth.kerberos.KerberosTicket * \"*\"", "read"),
new PrivateCredentialPermission("javax.security.auth.kerberos.KeyTab * \"*\"", "read")
// Included later:
// 7) allow code to initiate kerberos connections as the logged in user
// Still far and away fewer permissions than the original full plugin policy
};
}
/**
* Locates the keytab file in the environment and verifies that it exists.
* Expects keytab file to exist at {@code $CONFIG_DIR$/repository-hdfs/krb5.keytab}
@ -93,50 +40,9 @@ class HdfsSecurityContext {
}
private final UserGroupInformation ugi;
private final boolean restrictPermissions;
private final Permission[] restrictedExecutionPermissions;
HdfsSecurityContext(UserGroupInformation ugi, boolean restrictPermissions) {
HdfsSecurityContext(UserGroupInformation ugi) {
this.ugi = ugi;
this.restrictPermissions = restrictPermissions;
this.restrictedExecutionPermissions = renderPermissions(ugi);
}
private Permission[] renderPermissions(UserGroupInformation userGroupInformation) {
Permission[] permissions;
if (userGroupInformation.isFromKeytab()) {
// KERBEROS
// Leave room to append one extra permission based on the logged in user's info.
int permlen = KERBEROS_AUTH_PERMISSIONS.length + 1;
permissions = new Permission[permlen];
System.arraycopy(KERBEROS_AUTH_PERMISSIONS, 0, permissions, 0, KERBEROS_AUTH_PERMISSIONS.length);
// Append a kerberos.ServicePermission to only allow initiating kerberos connections
// as the logged in user.
permissions[permissions.length - 1] = new ServicePermission(userGroupInformation.getUserName(), "initiate");
} else {
// SIMPLE
permissions = Arrays.copyOf(SIMPLE_AUTH_PERMISSIONS, SIMPLE_AUTH_PERMISSIONS.length);
}
return permissions;
}
private Permission[] getRestrictedExecutionPermissions() {
return restrictedExecutionPermissions;
}
<T> T doPrivilegedOrThrow(PrivilegedExceptionAction<T> action) throws IOException {
SpecialPermission.check();
try {
if (restrictPermissions) {
return AccessController.doPrivileged(action, null, this.getRestrictedExecutionPermissions());
} else {
return AccessController.doPrivileged(action);
}
} catch (PrivilegedActionException e) {
throw (IOException) e.getCause();
}
}
void ensureLogin() {