diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/OzoneAdmins.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/OzoneAdmins.java index 12b6b64f49a..5ee38e12eb7 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/OzoneAdmins.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/OzoneAdmins.java @@ -17,13 +17,16 @@ */ package org.apache.hadoop.hdds.server; +import java.io.IOException; import java.util.Collection; import java.util.Collections; +import java.util.HashSet; import java.util.LinkedHashSet; import java.util.Set; import com.google.common.collect.Sets; +import jakarta.annotation.Nullable; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; @@ -33,6 +36,9 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS_WILDCARD; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_READONLY_ADMINISTRATORS; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_READONLY_ADMINISTRATORS_GROUPS; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_S3_ADMINISTRATORS; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_S3_ADMINISTRATORS_GROUPS; + /** * This class contains ozone admin user information, username and group, @@ -186,4 +192,91 @@ public static Collection getOzoneReadOnlyAdminsGroupsFromConfig( return conf.getTrimmedStringCollection( OZONE_READONLY_ADMINISTRATORS_GROUPS); } + + /** + * Get the list of S3 administrators from Ozone config. + *

+ * Notes: + *

+ * @param conf An instance of {@link OzoneConfiguration} being used + * @return A {@link Collection} of the S3 administrator users + * + */ + public static Set getS3AdminsFromConfig(OzoneConfiguration conf) throws IOException { + Set ozoneAdmins = new HashSet<>(conf.getTrimmedStringCollection(OZONE_S3_ADMINISTRATORS)); + + if (ozoneAdmins.isEmpty()) { + ozoneAdmins = new HashSet<>(conf.getTrimmedStringCollection(OZONE_ADMINISTRATORS)); + } + + String omSPN = UserGroupInformation.getCurrentUser().getShortUserName(); + if (!ozoneAdmins.contains(omSPN)) { + ozoneAdmins.add(omSPN); + } + + return ozoneAdmins; + } + + /** + * Get the list of the groups that are a part of S3 administrators from Ozone config. + *

+ * Note: If ozone.s3.administrators.groups value is empty or unset, + * defaults to the ozone.administrators.groups value + * + * @param conf An instance of {@link OzoneConfiguration} being used + * @return A {@link Collection} of the S3 administrator groups + */ + public static Set getS3AdminsGroupsFromConfig(OzoneConfiguration conf) { + Set s3AdminsGroup = new HashSet<>(conf.getTrimmedStringCollection(OZONE_S3_ADMINISTRATORS_GROUPS)); + + if (s3AdminsGroup.isEmpty() && conf.getTrimmedStringCollection(OZONE_S3_ADMINISTRATORS).isEmpty()) { + s3AdminsGroup = new HashSet<>(conf.getTrimmedStringCollection(OZONE_ADMINISTRATORS_GROUPS)); + } + + return s3AdminsGroup; + } + + /** + * Get the users and groups that are a part of S3 administrators. + * @param conf Stores an instance of {@link OzoneConfiguration} being used + * @return an instance of {@link OzoneAdmins} containing the S3 admin users and groups + */ + public static OzoneAdmins getS3Admins(OzoneConfiguration conf) { + Set s3Admins; + try { + s3Admins = getS3AdminsFromConfig(conf); + } catch (IOException ie) { + s3Admins = Collections.emptySet(); + } + Set s3AdminGroups = getS3AdminsGroupsFromConfig(conf); + + return new OzoneAdmins(s3Admins, s3AdminGroups); + } + + /** + * Check if the provided user is an S3 administrator. + * @param user An instance of {@link UserGroupInformation} with information about the user to verify + * @param s3Admins An instance of {@link OzoneAdmins} containing information + * of the S3 administrator users and groups in the system + * @return {@code true} if the provided user is an S3 administrator else {@code false} + */ + public static boolean isS3Admin(@Nullable UserGroupInformation user, OzoneAdmins s3Admins) { + return null != user && s3Admins.isAdmin(user); + } + + /** + * Check if the provided user is an S3 administrator. + * @param user An instance of {@link UserGroupInformation} with information about the user to verify + * @param conf An instance of {@link OzoneConfiguration} being used + * @return {@code true} if the provided user is an S3 administrator else {@code false} + */ + public static boolean isS3Admin(@Nullable UserGroupInformation user, OzoneConfiguration conf) { + OzoneAdmins s3Admins = getS3Admins(conf); + return isS3Admin(user, s3Admins); + } } diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestOzoneAdmins.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestOzoneAdmins.java new file mode 100644 index 00000000000..bbfd507d767 --- /dev/null +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestOzoneAdmins.java @@ -0,0 +1,122 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + * + */ + +package org.apache.hadoop.hdds.server; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.security.UserGroupInformation; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvSource; +import org.junit.jupiter.params.provider.ValueSource; + +import java.io.IOException; +import java.util.Arrays; + +import static org.assertj.core.api.Assertions.assertThat; + +/** + * This class is to test the utilities present in the OzoneAdmins class. + */ +class TestOzoneAdmins { + // The following set of tests are to validate the S3 based utilities present in OzoneAdmins + @ParameterizedTest + @ValueSource(strings = {OzoneConfigKeys.OZONE_S3_ADMINISTRATORS, + OzoneConfigKeys.OZONE_ADMINISTRATORS}) + void testS3AdminExtraction(String configKey) throws IOException { + OzoneConfiguration configuration = new OzoneConfiguration(); + configuration.set(configKey, "alice,bob"); + + assertThat(OzoneAdmins.getS3AdminsFromConfig(configuration)) + .containsAll(Arrays.asList("alice", "bob")); + } + + @ParameterizedTest + @ValueSource(strings = {OzoneConfigKeys.OZONE_S3_ADMINISTRATORS_GROUPS, + OzoneConfigKeys.OZONE_ADMINISTRATORS_GROUPS}) + void testS3AdminGroupExtraction(String configKey) { + OzoneConfiguration configuration = new OzoneConfiguration(); + configuration.set(configKey, "test1, test2"); + + assertThat(OzoneAdmins.getS3AdminsGroupsFromConfig(configuration)) + .containsAll(Arrays.asList("test1", "test2")); + } + + @ParameterizedTest + @CsvSource({ + OzoneConfigKeys.OZONE_ADMINISTRATORS + ", " + OzoneConfigKeys.OZONE_ADMINISTRATORS_GROUPS, + OzoneConfigKeys.OZONE_S3_ADMINISTRATORS + ", " + OzoneConfigKeys.OZONE_S3_ADMINISTRATORS_GROUPS + }) + void testIsAdmin(String adminKey, String adminGroupKey) { + // When there is no S3 admin, but Ozone admins present + OzoneConfiguration configuration = new OzoneConfiguration(); + configuration.set(adminKey, "alice"); + configuration.set(adminGroupKey, "test_group"); + + OzoneAdmins admins = OzoneAdmins.getS3Admins(configuration); + UserGroupInformation ugi = UserGroupInformation.createUserForTesting( + "alice", new String[] {"test_group"}); + + assertThat(admins.isAdmin(ugi)).isEqualTo(true); + + // Test that when a user is present in an admin group but not an Ozone Admin + UserGroupInformation ugiGroupOnly = UserGroupInformation.createUserForTesting( + "bob", new String[] {"test_group"}); + assertThat(admins.isAdmin(ugiGroupOnly)).isEqualTo(true); + } + + @ParameterizedTest + @ValueSource(booleans = {true, false}) + void testIsAdminWithUgi(boolean isAdminSet) { + OzoneConfiguration configuration = new OzoneConfiguration(); + if (isAdminSet) { + configuration.set(OzoneConfigKeys.OZONE_ADMINISTRATORS, "alice"); + configuration.set(OzoneConfigKeys.OZONE_ADMINISTRATORS_GROUPS, "test_group"); + } + OzoneAdmins admins = OzoneAdmins.getS3Admins(configuration); + UserGroupInformation ugi = UserGroupInformation.createUserForTesting( + "alice", new String[] {"test_group"}); + // Test that when a user is present in an admin group but not an Ozone Admin + UserGroupInformation ugiGroupOnly = UserGroupInformation.createUserForTesting( + "bob", new String[] {"test_group"}); + + assertThat(admins.isAdmin(ugi)).isEqualTo(isAdminSet); + assertThat(admins.isAdmin(ugiGroupOnly)).isEqualTo(isAdminSet); + } + + @ParameterizedTest + @ValueSource(booleans = {true, false}) + void testIsS3AdminWithUgiAndConfiguration(boolean isAdminSet) { + OzoneConfiguration configuration = new OzoneConfiguration(); + if (isAdminSet) { + configuration.set(OzoneConfigKeys.OZONE_S3_ADMINISTRATORS, "alice"); + configuration.set(OzoneConfigKeys.OZONE_S3_ADMINISTRATORS_GROUPS, "test_group"); + UserGroupInformation ugi = UserGroupInformation.createUserForTesting( + "alice", new String[] {"test_group"}); + // Scenario when user is present in an admin group but not an Ozone Admin + UserGroupInformation ugiGroupOnly = UserGroupInformation.createUserForTesting( + "bob", new String[] {"test_group"}); + + assertThat(OzoneAdmins.isS3Admin(ugi, configuration)).isEqualTo(true); + assertThat(OzoneAdmins.isS3Admin(ugiGroupOnly, configuration)).isEqualTo(true); + } else { + assertThat(OzoneAdmins.isS3Admin(null, configuration)).isEqualTo(false); + } + + } +} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/GrpcOMFailoverProxyProvider.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/GrpcOMFailoverProxyProvider.java index 65d9e559005..744ada797e7 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/GrpcOMFailoverProxyProvider.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/GrpcOMFailoverProxyProvider.java @@ -18,12 +18,9 @@ package org.apache.hadoop.ozone.om.ha; import io.grpc.Status; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.conf.ConfigurationException; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.HddsUtils; -import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource; -import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneConsts; @@ -41,6 +38,7 @@ import java.util.Optional; import java.util.OptionalInt; import io.grpc.StatusRuntimeException; +import org.apache.hadoop.security.UserGroupInformation; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -60,9 +58,10 @@ public class GrpcOMFailoverProxyProvider extends LoggerFactory.getLogger(GrpcOMFailoverProxyProvider.class); public GrpcOMFailoverProxyProvider(ConfigurationSource configuration, + UserGroupInformation ugi, String omServiceId, Class protocol) throws IOException { - super(configuration, omServiceId, protocol); + super(configuration, ugi, omServiceId, protocol); } @Override @@ -116,9 +115,7 @@ protected void loadOMClientConfigs(ConfigurationSource config, String omSvcId) private T createOMProxy() throws IOException { InetSocketAddress addr = new InetSocketAddress(0); - Configuration hadoopConf = - LegacyHadoopConfigurationSource.asHadoopConfiguration(getConf()); - return (T) RPC.getProxy(getInterface(), 0, addr, hadoopConf); + return createOMProxy(addr); } /** diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/HadoopRpcOMFailoverProxyProvider.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/HadoopRpcOMFailoverProxyProvider.java index 543d2e4aed3..4447a72ab13 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/HadoopRpcOMFailoverProxyProvider.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/HadoopRpcOMFailoverProxyProvider.java @@ -29,15 +29,9 @@ import java.util.List; import java.util.Map; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.conf.ConfigurationSource; -import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource; import org.apache.hadoop.io.Text; -import org.apache.hadoop.io.retry.RetryPolicies; -import org.apache.hadoop.io.retry.RetryPolicy; -import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.ha.ConfUtils; @@ -59,9 +53,7 @@ public class HadoopRpcOMFailoverProxyProvider extends public static final Logger LOG = LoggerFactory.getLogger(HadoopRpcOMFailoverProxyProvider.class); - private final long omVersion; private final Text delegationTokenService; - private final UserGroupInformation ugi; private Map omProxyInfos; private List retryExceptions = new ArrayList<>(); @@ -75,9 +67,7 @@ public HadoopRpcOMFailoverProxyProvider(ConfigurationSource configuration, UserGroupInformation ugi, String omServiceId, Class protocol) throws IOException { - super(configuration, omServiceId, protocol); - this.ugi = ugi; - this.omVersion = RPC.getProtocolVersion(protocol); + super(configuration, ugi, omServiceId, protocol); this.delegationTokenService = computeDelegationTokenService(); } @@ -130,24 +120,6 @@ protected void loadOMClientConfigs(ConfigurationSource config, String omSvcId) setOmNodeAddressMap(omNodeAddressMap); } - private T createOMProxy(InetSocketAddress omAddress) throws IOException { - Configuration hadoopConf = - LegacyHadoopConfigurationSource.asHadoopConfiguration(getConf()); - RPC.setProtocolEngine(hadoopConf, getInterface(), ProtobufRpcEngine.class); - - // FailoverOnNetworkException ensures that the IPC layer does not attempt - // retries on the same OM in case of connection exception. This retry - // policy essentially results in TRY_ONCE_THEN_FAIL. - RetryPolicy connectionRetryPolicy = RetryPolicies - .failoverOnNetworkException(0); - - return (T) RPC.getProtocolProxy(getInterface(), omVersion, - omAddress, ugi, hadoopConf, NetUtils.getDefaultSocketFactory( - hadoopConf), (int) OmUtils.getOMClientRpcTimeOut(getConf()), - connectionRetryPolicy).getProxy(); - - } - /** * Get the proxy object which should be used until the next failover event * occurs. RPC proxy object is intialized lazily. diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProviderBase.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProviderBase.java index 1a738b2ac84..5045a32bdcd 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProviderBase.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProviderBase.java @@ -21,17 +21,25 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.protobuf.ServiceException; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource; import org.apache.hadoop.io.retry.FailoverProxyProvider; +import org.apache.hadoop.io.retry.RetryPolicies; import org.apache.hadoop.io.retry.RetryPolicy; import org.apache.hadoop.io.retry.RetryPolicy.RetryAction.RetryDecision; +import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.exceptions.OMLeaderNotReadyException; import org.apache.hadoop.ozone.om.exceptions.OMNotLeaderException; import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.SecretManager; import org.apache.ratis.protocol.exceptions.StateMachineException; import org.slf4j.Logger; @@ -85,13 +93,17 @@ public abstract class OMFailoverProxyProviderBase implements private Set accessControlExceptionOMs = new HashSet<>(); private boolean performFailoverDone; + private final UserGroupInformation ugi; + public OMFailoverProxyProviderBase(ConfigurationSource configuration, + UserGroupInformation ugi, String omServiceId, Class protocol) throws IOException { this.conf = configuration; this.protocolClass = protocol; this.performFailoverDone = true; this.omServiceId = omServiceId; + this.ugi = ugi; waitBetweenRetries = conf.getLong( OzoneConfigKeys.OZONE_CLIENT_WAIT_BETWEEN_RETRIES_MILLIS_KEY, @@ -112,6 +124,35 @@ protected abstract void loadOMClientConfigs(ConfigurationSource config, String omSvcId) throws IOException; + /** + * Get the protocol proxy for provided address. + * @param omAddress An instance of {@link InetSocketAddress} which contains the address to connect + * @return the proxy connection to the address and the set of methods supported by the server at the address + * @throws IOException if any error occurs while trying to get the proxy + */ + protected T createOMProxy(InetSocketAddress omAddress) throws IOException { + Configuration hadoopConf = + LegacyHadoopConfigurationSource.asHadoopConfiguration(getConf()); + + // TODO: Post upgrade to Protobuf 3.x we need to use ProtobufRpcEngine2 + RPC.setProtocolEngine(hadoopConf, getInterface(), ProtobufRpcEngine.class); + + // Ensure we do not attempt retry on the same OM in case of exceptions + RetryPolicy connectionRetryPolicy = RetryPolicies.failoverOnNetworkException(0); + + return (T) RPC.getProtocolProxy( + getInterface(), + RPC.getProtocolVersion(protocolClass), + omAddress, + ugi, + hadoopConf, + NetUtils.getDefaultSocketFactory(hadoopConf), + (int) OmUtils.getOMClientRpcTimeOut(getConf()), + connectionRetryPolicy + ).getProxy(); + } + + protected synchronized boolean shouldFailover(Exception ex) { Throwable unwrappedException = HddsUtils.getUnwrappedException(ex); if (unwrappedException instanceof AccessControlException || diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/GrpcOmTransport.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/GrpcOmTransport.java index ac2e85da84d..c9eb9cbb44f 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/GrpcOmTransport.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/GrpcOmTransport.java @@ -121,6 +121,7 @@ public GrpcOmTransport(ConfigurationSource conf, omFailoverProxyProvider = new GrpcOMFailoverProxyProvider( conf, + ugi, omServiceId, OzoneManagerProtocolPB.class); diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-compose.yaml index 39d26c362f6..026dfa1edc3 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-compose.yaml +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-compose.yaml @@ -96,7 +96,7 @@ services: - 9878:9878 env_file: - ./docker-config - command: ["/opt/hadoop/bin/ozone","s3g"] + command: ["/opt/hadoop/bin/ozone","s3g", "-Dozone.om.transport.class=${OZONE_S3_OM_TRANSPORT:-org.apache.hadoop.ozone.om.protocolPB.GrpcOmTransportFactory}"] environment: OZONE_OPTS: recon: diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/test-fcq.sh b/hadoop-ozone/dist/src/main/compose/ozonesecure/test-fcq.sh index 644e45c4d5a..a9e87a60cdd 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure/test-fcq.sh +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/test-fcq.sh @@ -25,6 +25,7 @@ source "$COMPOSE_DIR/../testlib.sh" export SECURITY_ENABLED=true export COMPOSE_FILE=docker-compose.yaml:fcq.yaml +export OZONE_S3_OM_TRANSPORT="org.apache.hadoop.ozone.om.protocolPB.Hadoop3OmTransportFactory" start_docker_env diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/secretgenerate.robot b/hadoop-ozone/dist/src/main/smoketest/s3/secretgenerate.robot index e9b5dd5df72..e0c2fc7f818 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/secretgenerate.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/secretgenerate.robot @@ -45,15 +45,19 @@ S3 Gateway Secret Already Exists Should contain ${result} HTTP/1.1 400 S3_SECRET_ALREADY_EXISTS ignore_case=True S3 Gateway Generate Secret By Username - [Tags] robot:skip # TODO: Enable after HDDS-11041 is done. Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled ${result} = Execute curl -X PUT --negotiate -u : -v ${ENDPOINT_URL}/secret/testuser Should contain ${result} HTTP/1.1 200 OK ignore_case=True Should Match Regexp ${result} .*.* S3 Gateway Generate Secret By Username For Other User - [Tags] robot:skip # TODO: Enable after HDDS-11041 is done. Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled ${result} = Execute curl -X PUT --negotiate -u : -v ${ENDPOINT_URL}/secret/testuser2 Should contain ${result} HTTP/1.1 200 OK ignore_case=True Should Match Regexp ${result} .*.* + +S3 Gateway Reject Secret Generation By Non-admin User + Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled + Run Keyword Kinit test user testuser2 testuser2.keytab + ${result} = Execute curl -X PUT --negotiate -u : -v ${ENDPOINT_URL}/secret/testuser + Should contain ${result} HTTP/1.1 403 FORBIDDEN ignore_case=True \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/secretrevoke.robot b/hadoop-ozone/dist/src/main/smoketest/s3/secretrevoke.robot index 59725c0416c..ffb03a85a8a 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/secretrevoke.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/secretrevoke.robot @@ -38,15 +38,19 @@ S3 Gateway Revoke Secret Should contain ${result} HTTP/1.1 200 OK ignore_case=True S3 Gateway Revoke Secret By Username - [Tags] robot:skip # TODO: Enable after HDDS-11041 is done. Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled Execute ozone s3 getsecret -u testuser ${OM_HA_PARAM} ${result} = Execute curl -X DELETE --negotiate -u : -v ${ENDPOINT_URL}/secret/testuser Should contain ${result} HTTP/1.1 200 OK ignore_case=True S3 Gateway Revoke Secret By Username For Other User - [Tags] robot:skip # TODO: Enable after HDDS-11041 is done. Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled Execute ozone s3 getsecret -u testuser2 ${OM_HA_PARAM} ${result} = Execute curl -X DELETE --negotiate -u : -v ${ENDPOINT_URL}/secret/testuser2 Should contain ${result} HTTP/1.1 200 OK ignore_case=True + +S3 Gateway Reject Secret Revoke By Non-admin User + Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled + Run Keyword Kinit test user testuser2 testuser2.keytab + ${result} = Execute curl -X DELETE --negotiate -u : -v ${ENDPOINT_URL}/secret/testuser + Should contain ${result} HTTP/1.1 403 FORBIDDEN ignore_case=True \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneConfigUtil.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneConfigUtil.java index c09c5b91af5..cad987bb7da 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneConfigUtil.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneConfigUtil.java @@ -19,21 +19,11 @@ import org.apache.hadoop.hdds.client.DefaultReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.security.UserGroupInformation; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; -import java.util.Collection; - -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS_GROUPS; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_S3_ADMINISTRATORS; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_S3_ADMINISTRATORS_GROUPS; - /** * Utility class for ozone configurations. */ @@ -43,38 +33,6 @@ public final class OzoneConfigUtil { private OzoneConfigUtil() { } - /** - * Return list of s3 administrators prop from config. - * - * If ozone.s3.administrators value is empty string or unset, - * defaults to ozone.administrators value. - */ - static Collection getS3AdminsFromConfig(OzoneConfiguration conf) - throws IOException { - Collection ozAdmins = - conf.getTrimmedStringCollection(OZONE_S3_ADMINISTRATORS); - if (ozAdmins == null || ozAdmins.isEmpty()) { - ozAdmins = conf.getTrimmedStringCollection(OZONE_ADMINISTRATORS); - } - String omSPN = UserGroupInformation.getCurrentUser().getShortUserName(); - if (!ozAdmins.contains(omSPN)) { - ozAdmins.add(omSPN); - } - return ozAdmins; - } - - static Collection getS3AdminsGroupsFromConfig( - OzoneConfiguration conf) { - Collection s3AdminsGroup = - conf.getTrimmedStringCollection(OZONE_S3_ADMINISTRATORS_GROUPS); - if (s3AdminsGroup.isEmpty() && conf - .getTrimmedStringCollection(OZONE_S3_ADMINISTRATORS).isEmpty()) { - s3AdminsGroup = conf - .getTrimmedStringCollection(OZONE_ADMINISTRATORS_GROUPS); - } - return s3AdminsGroup; - } - public static ReplicationConfig resolveReplicationConfigPreference( HddsProtos.ReplicationType clientType, HddsProtos.ReplicationFactor clientFactor, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index 0038bca2e32..a90764714da 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -704,11 +704,7 @@ private OzoneManager(OzoneConfiguration conf, StartupOption startupOption) // Get read only admin list readOnlyAdmins = OzoneAdmins.getReadonlyAdmins(conf); - Collection s3AdminUsernames = - OzoneConfigUtil.getS3AdminsFromConfig(configuration); - Collection s3AdminGroups = - OzoneConfigUtil.getS3AdminsGroupsFromConfig(configuration); - s3OzoneAdmins = new OzoneAdmins(s3AdminUsernames, s3AdminGroups); + s3OzoneAdmins = OzoneAdmins.getS3Admins(conf); instantiateServices(false); // Create special volume s3v which is required for S3G. @@ -4338,7 +4334,7 @@ private void checkAdminUserPrivilege(String operation) throws IOException { } public boolean isS3Admin(UserGroupInformation callerUgi) { - return callerUgi != null && s3OzoneAdmins.isAdmin(callerUgi); + return OzoneAdmins.isS3Admin(callerUgi, s3OzoneAdmins); } @VisibleForTesting diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneConfigUtil.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneConfigUtil.java index 0bd99d49499..41d6c28e2b9 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneConfigUtil.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneConfigUtil.java @@ -20,16 +20,10 @@ import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.ozone.OzoneConfigKeys; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import java.io.IOException; -import java.util.Arrays; - -import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -119,42 +113,4 @@ public void testResolveClientSideRepConfigWhenBucketHasEC3() // should return ratis. assertEquals(ratisReplicationConfig, replicationConfig); } - - @Test - public void testS3AdminExtraction() throws IOException { - OzoneConfiguration configuration = new OzoneConfiguration(); - configuration.set(OzoneConfigKeys.OZONE_S3_ADMINISTRATORS, "alice,bob"); - - assertThat(OzoneConfigUtil.getS3AdminsFromConfig(configuration)) - .containsAll(Arrays.asList("alice", "bob")); - } - - @Test - public void testS3AdminExtractionWithFallback() throws IOException { - OzoneConfiguration configuration = new OzoneConfiguration(); - configuration.set(OzoneConfigKeys.OZONE_ADMINISTRATORS, "alice,bob"); - - assertThat(OzoneConfigUtil.getS3AdminsFromConfig(configuration)) - .containsAll(Arrays.asList("alice", "bob")); - } - - @Test - public void testS3AdminGroupExtraction() { - OzoneConfiguration configuration = new OzoneConfiguration(); - configuration.set(OzoneConfigKeys.OZONE_S3_ADMINISTRATORS_GROUPS, - "test1, test2"); - - assertThat(OzoneConfigUtil.getS3AdminsGroupsFromConfig(configuration)) - .containsAll(Arrays.asList("test1", "test2")); - } - - @Test - public void testS3AdminGroupExtractionWithFallback() { - OzoneConfiguration configuration = new OzoneConfiguration(); - configuration.set(OzoneConfigKeys.OZONE_ADMINISTRATORS_GROUPS, - "test1, test2"); - - assertThat(OzoneConfigUtil.getS3AdminsGroupsFromConfig(configuration)) - .containsAll(Arrays.asList("test1", "test2")); - } } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3AdminEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3AdminEndpoint.java new file mode 100644 index 00000000000..b5c7b242cb5 --- /dev/null +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3AdminEndpoint.java @@ -0,0 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + * + */ + +package org.apache.hadoop.ozone.s3secret; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; +import javax.ws.rs.NameBinding; + +/** + * Annotation to only allow admin users to access the endpoint. + */ +@NameBinding +@Retention(RetentionPolicy.RUNTIME) +@Target({ElementType.TYPE, ElementType.METHOD}) +public @interface S3AdminEndpoint { +} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3SecretAdminFilter.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3SecretAdminFilter.java new file mode 100644 index 00000000000..5ecdfa7c121 --- /dev/null +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3SecretAdminFilter.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + * + */ + +package org.apache.hadoop.ozone.s3secret; + + +import javax.inject.Inject; +import javax.ws.rs.container.ContainerRequestContext; +import javax.ws.rs.container.ContainerRequestFilter; +import javax.ws.rs.core.Response; +import javax.ws.rs.core.Response.Status; +import javax.ws.rs.ext.Provider; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.server.OzoneAdmins; +import org.apache.hadoop.security.UserGroupInformation; + +import java.io.IOException; +import java.security.Principal; + +/** + * Filter that only allows admin to access endpoints annotated with {@link S3AdminEndpoint}. + * Condition is based on the value of the configuration keys for: + *

    + *
  • ozone.administrators
  • + *
  • ozone.administrators.groups
  • + *
+ */ +@S3AdminEndpoint +@Provider +public class S3SecretAdminFilter implements ContainerRequestFilter { + + @Inject + private OzoneConfiguration conf; + + @Override + public void filter(ContainerRequestContext requestContext) throws IOException { + final Principal userPrincipal = requestContext.getSecurityContext().getUserPrincipal(); + if (null != userPrincipal) { + UserGroupInformation user = UserGroupInformation.createRemoteUser(userPrincipal.getName()); + if (!OzoneAdmins.isS3Admin(user, conf)) { + requestContext.abortWith(Response.status(Status.FORBIDDEN).build()); + } + } + } +} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3SecretManagementEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3SecretManagementEndpoint.java index 4ea17d2a2fd..739dadfb28e 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3SecretManagementEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3SecretManagementEndpoint.java @@ -33,7 +33,6 @@ import java.io.IOException; import static javax.ws.rs.core.Response.Status.BAD_REQUEST; -import static javax.ws.rs.core.Response.Status.METHOD_NOT_ALLOWED; import static javax.ws.rs.core.Response.Status.NOT_FOUND; /** @@ -41,6 +40,7 @@ */ @Path("/secret") @S3SecretEnabled +@S3AdminEndpoint public class S3SecretManagementEndpoint extends S3SecretEndpointBase { private static final Logger LOG = LoggerFactory.getLogger(S3SecretManagementEndpoint.class); @@ -54,8 +54,7 @@ public Response generate() throws IOException { @Path("/{username}") public Response generate(@PathParam("username") String username) throws IOException { - // TODO: It is a temporary solution. To be removed after HDDS-11041 is done. - return Response.status(METHOD_NOT_ALLOWED).build(); + return generateInternal(username); } private Response generateInternal(@Nullable String username) throws IOException { @@ -95,8 +94,7 @@ public Response revoke() throws IOException { @Path("/{username}") public Response revoke(@PathParam("username") String username) throws IOException { - // TODO: It is a temporary solution. To be removed after HDDS-11041 is done. - return Response.status(METHOD_NOT_ALLOWED).build(); + return revokeInternal(username); } private Response revokeInternal(@Nullable String username) diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretGenerate.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretGenerate.java index d1f81faddd2..b548d17d9ff 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretGenerate.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretGenerate.java @@ -33,7 +33,6 @@ import org.apache.hadoop.ozone.client.protocol.ClientProtocol; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.S3SecretValue; -import org.apache.ozone.test.tag.Unhealthy; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; @@ -93,7 +92,7 @@ void testSecretGenerate() throws IOException { hasNoSecretYet(); S3SecretResponse response = - (S3SecretResponse) endpoint.generate().getEntity(); + (S3SecretResponse) endpoint.generate().getEntity(); assertEquals(USER_SECRET, response.getAwsSecret()); assertEquals(USER_NAME, response.getAwsAccessKey()); @@ -112,12 +111,11 @@ void testIfSecretAlreadyExists() throws IOException { } @Test - @Unhealthy("HDDS-11041") void testSecretGenerateWithUsername() throws IOException { hasNoSecretYet(); S3SecretResponse response = - (S3SecretResponse) endpoint.generate(OTHER_USER_NAME).getEntity(); + (S3SecretResponse) endpoint.generate(OTHER_USER_NAME).getEntity(); assertEquals(USER_SECRET, response.getAwsSecret()); assertEquals(OTHER_USER_NAME, response.getAwsAccessKey()); } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretRevoke.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretRevoke.java index 85e6bd4c10e..b26df0e8996 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretRevoke.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretRevoke.java @@ -30,7 +30,6 @@ import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientStub; import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.ozone.test.tag.Unhealthy; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; @@ -98,7 +97,6 @@ void testSecretRevoke() throws IOException { } @Test - @Unhealthy("HDDS-11041") void testSecretRevokeWithUsername() throws IOException { endpoint.revoke(OTHER_USER_NAME); verify(objectStore, times(1))