diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneManagerVersion.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneManagerVersion.java index 7d3f8629f0eb..f339a4a8018a 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneManagerVersion.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneManagerVersion.java @@ -57,7 +57,10 @@ public enum OzoneManagerVersion implements ComponentVersion { ATOMIC_CREATE_IF_NOT_EXISTS(12, "OzoneManager version that supports explicit create-if-not-exists key semantics"), - + + S3_BUCKET_CORS(13, + "OzoneManager version that supports bucket CORS configuration"), + FUTURE_VERSION(-1, "Used internally in the client when the server side is " + " newer and an unknown server version has arrived to the client."); diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java index f4173963dd6e..b4ee65a19ab7 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java @@ -29,6 +29,7 @@ import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.CorsConfiguration; /** * This class encapsulates the arguments that are @@ -68,6 +69,7 @@ public final class BucketArgs { private final long quotaInNamespace; private final String owner; + private final CorsConfiguration corsConfiguration; /** * Bucket Layout. @@ -87,6 +89,7 @@ private BucketArgs(Builder b) { bucketLayout = b.bucketLayout; owner = b.owner; defaultReplicationConfig = b.defaultReplicationConfig; + corsConfiguration = b.corsConfiguration; } /** @@ -185,6 +188,10 @@ public String getOwner() { return owner; } + public CorsConfiguration getCorsConfiguration() { + return corsConfiguration; + } + /** * Builder for OmBucketInfo. */ @@ -201,6 +208,7 @@ public static class Builder { private BucketLayout bucketLayout; private String owner; private DefaultReplicationConfig defaultReplicationConfig; + private CorsConfiguration corsConfiguration; public Builder() { quotaInBytes = OzoneConsts.QUOTA_RESET; @@ -274,6 +282,12 @@ public BucketArgs.Builder setDefaultReplicationConfig( return this; } + public BucketArgs.Builder setCorsConfiguration( + CorsConfiguration corsConfig) { + corsConfiguration = corsConfig; + return this; + } + /** * Constructs the BucketArgs. * @return instance of BucketArgs. diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java index 2bde6939651d..2b043f954b63 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java @@ -54,6 +54,7 @@ import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BasicOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.CorsConfiguration; import org.apache.hadoop.ozone.om.helpers.ErrorInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo; @@ -151,6 +152,7 @@ public class OzoneBucket extends WithMetadata { * Bucket Owner. */ private String owner; + private CorsConfiguration corsConfiguration; /** * Pending deletion bytes (Includes bytes retained by snapshots). */ @@ -201,6 +203,7 @@ protected OzoneBucket(Builder builder) { this.bucketLayout = builder.bucketLayout; } this.owner = builder.owner; + this.corsConfiguration = builder.corsConfiguration; } /** @@ -1148,6 +1151,21 @@ public boolean setOwner(String userName) throws IOException { return result; } + public CorsConfiguration getCorsConfiguration() { + return corsConfiguration; + } + + public void setCorsConfiguration( + CorsConfiguration newCorsConfiguration) throws IOException { + proxy.setBucketCors(volumeName, name, newCorsConfiguration); + this.corsConfiguration = newCorsConfiguration; + } + + public void deleteCorsConfiguration() throws IOException { + proxy.deleteBucketCors(volumeName, name); + this.corsConfiguration = null; + } + /** * Builder for OmBucketInfo. /** @@ -1231,6 +1249,7 @@ public static class Builder extends WithMetadata.Builder { private String owner; private long pendingDeleteBytes; private long pendingDeleteNamespace; + private CorsConfiguration corsConfiguration; protected Builder() { } @@ -1327,6 +1346,12 @@ public Builder setOwner(String owner) { return this; } + public Builder setCorsConfiguration( + CorsConfiguration corsConfig) { + this.corsConfiguration = corsConfig; + return this; + } + public Builder setPendingDeleteBytes(long pendingDeleteBytes) { this.pendingDeleteBytes = pendingDeleteBytes; return this; diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java index f1570143dfdd..6383fe195ab1 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java @@ -46,6 +46,7 @@ import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.CorsConfiguration; import org.apache.hadoop.ozone.om.helpers.DeleteTenantState; import org.apache.hadoop.ozone.om.helpers.ErrorInfo; import org.apache.hadoop.ozone.om.helpers.LeaseKeyInfo; @@ -277,6 +278,26 @@ void setBucketStorageType(String volumeName, String bucketName, StorageType storageType) throws IOException; + /** + * Sets the CORS configuration of a Bucket. + * @param volumeName Name of the Volume + * @param bucketName Name of the Bucket + * @param corsConfiguration CORS configuration to set + * @throws IOException + */ + void setBucketCors(String volumeName, String bucketName, + CorsConfiguration corsConfiguration) + throws IOException; + + /** + * Clears the CORS configuration of a Bucket. + * @param volumeName Name of the Volume + * @param bucketName Name of the Bucket + * @throws IOException + */ + void deleteBucketCors(String volumeName, String bucketName) + throws IOException; + /** * Deletes a bucket if it is empty. * @param volumeName Name of the Volume diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index f02e65a7ce58..75fed3f985da 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -131,6 +131,7 @@ import org.apache.hadoop.ozone.om.helpers.BasicOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.BucketEncryptionKeyInfo; import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.CorsConfiguration; import org.apache.hadoop.ozone.om.helpers.DeleteTenantState; import org.apache.hadoop.ozone.om.helpers.ErrorInfo; import org.apache.hadoop.ozone.om.helpers.KeyInfoWithVolumeContext; @@ -622,6 +623,9 @@ public void createBucket( + " not support Erasure Coded replication."); } } + if (bucketArgs.getCorsConfiguration() != null) { + checkBucketCorsFeatureEnabled(); + } final String owner; // If S3 auth exists, set owner name to the short user name derived from the @@ -656,7 +660,8 @@ public void createBucket( .setQuotaInBytes(bucketArgs.getQuotaInBytes()) .setQuotaInNamespace(bucketArgs.getQuotaInNamespace()) .setBucketLayout(bucketLayout) - .setOwner(owner); + .setOwner(owner) + .setCorsConfiguration(bucketArgs.getCorsConfiguration()); if (bucketArgs.getAcls() != null) { builder.acls().addAll(bucketArgs.getAcls()); @@ -1232,6 +1237,40 @@ public void setBucketStorageType( ozoneManagerClient.setBucketProperty(builder.build()); } + @Override + public void setBucketCors(String volumeName, String bucketName, + CorsConfiguration corsConfiguration) throws IOException { + verifyVolumeName(volumeName); + verifyBucketName(bucketName); + Objects.requireNonNull(corsConfiguration, "corsConfiguration == null"); + checkBucketCorsFeatureEnabled(); + OmBucketArgs.Builder builder = OmBucketArgs.newBuilder(); + builder.setVolumeName(volumeName) + .setBucketName(bucketName) + .setCorsConfiguration(corsConfiguration); + ozoneManagerClient.setBucketProperty(builder.build()); + } + + @Override + public void deleteBucketCors(String volumeName, String bucketName) + throws IOException { + verifyVolumeName(volumeName); + verifyBucketName(bucketName); + checkBucketCorsFeatureEnabled(); + OmBucketArgs.Builder builder = OmBucketArgs.newBuilder(); + builder.setVolumeName(volumeName) + .setBucketName(bucketName) + .setClearCorsConfiguration(true); + ozoneManagerClient.setBucketProperty(builder.build()); + } + + private void checkBucketCorsFeatureEnabled() throws IOException { + if (omVersion.compareTo(OzoneManagerVersion.S3_BUCKET_CORS) < 0) { + throw new IOException("OzoneManager does not support bucket CORS " + + "configuration."); + } + } + @Override public void setBucketQuota(String volumeName, String bucketName, long quotaInNamespace, long quotaInBytes) throws IOException { @@ -1340,6 +1379,7 @@ public OzoneBucket getBucketDetails( .setBucketLayout(bucketInfo.getBucketLayout()) .setOwner(bucketInfo.getOwner()) .setDefaultReplicationConfig(bucketInfo.getDefaultReplicationConfig()) + .setCorsConfiguration(bucketInfo.getCorsConfiguration()) .build(); } @@ -1374,6 +1414,7 @@ public List listBuckets(String volumeName, String bucketPrefix, .setOwner(bucket.getOwner()) .setDefaultReplicationConfig( bucket.getDefaultReplicationConfig()) + .setCorsConfiguration(bucket.getCorsConfiguration()) .build()) .collect(Collectors.toList()); } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/CorsConfiguration.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/CorsConfiguration.java new file mode 100644 index 000000000000..e27efe914841 --- /dev/null +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/CorsConfiguration.java @@ -0,0 +1,111 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.helpers; + +import com.google.common.collect.ImmutableList; +import java.util.List; +import java.util.Objects; +import java.util.stream.Collectors; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CORSConfiguration; + +/** + * S3 bucket CORS configuration. + */ +public final class CorsConfiguration { + private final ImmutableList rules; + + private CorsConfiguration(Builder builder) { + this.rules = ImmutableList.copyOf(builder.rules); + } + + public static Builder newBuilder() { + return new Builder(); + } + + public List getRules() { + return rules; + } + + public CORSConfiguration getProtobuf() { + return CORSConfiguration.newBuilder() + .addAllCorsRule(rules.stream() + .map(CorsRule::getProtobuf) + .collect(Collectors.toList())) + .build(); + } + + public static CorsConfiguration getFromProtobuf( + CORSConfiguration proto) { + return newBuilder() + .setRules(proto.getCorsRuleList().stream() + .map(CorsRule::getFromProtobuf) + .collect(Collectors.toList())) + .build(); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (!(obj instanceof CorsConfiguration)) { + return false; + } + CorsConfiguration that = (CorsConfiguration) obj; + return Objects.equals(rules, that.rules); + } + + @Override + public int hashCode() { + return Objects.hash(rules); + } + + @Override + public String toString() { + return "CorsConfiguration{" + + "rules=" + rules + + '}'; + } + + /** + * Builder for {@link CorsConfiguration}. + */ + public static final class Builder { + private List rules = ImmutableList.of(); + + private Builder() { + } + + public Builder setRules(List corsRules) { + this.rules = corsRules == null ? ImmutableList.of() : corsRules; + return this; + } + + public Builder addRule(CorsRule rule) { + ImmutableList.Builder builder = ImmutableList.builder(); + builder.addAll(rules); + builder.add(rule); + this.rules = builder.build(); + return this; + } + + public CorsConfiguration build() { + return new CorsConfiguration(this); + } + } +} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/CorsRule.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/CorsRule.java new file mode 100644 index 000000000000..fbd6247d8050 --- /dev/null +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/CorsRule.java @@ -0,0 +1,196 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.helpers; + +import com.google.common.collect.ImmutableList; +import java.util.List; +import java.util.Objects; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CORSRule; + +/** + * One S3 bucket CORS rule. + */ +public final class CorsRule { + private final String id; + private final ImmutableList allowedOrigins; + private final ImmutableList allowedMethods; + private final ImmutableList allowedHeaders; + private final ImmutableList exposeHeaders; + private final Integer maxAgeSeconds; + + private CorsRule(Builder builder) { + this.id = builder.id; + this.allowedOrigins = ImmutableList.copyOf(builder.allowedOrigins); + this.allowedMethods = ImmutableList.copyOf(builder.allowedMethods); + this.allowedHeaders = ImmutableList.copyOf(builder.allowedHeaders); + this.exposeHeaders = ImmutableList.copyOf(builder.exposeHeaders); + this.maxAgeSeconds = builder.maxAgeSeconds; + } + + public static Builder newBuilder() { + return new Builder(); + } + + public String getId() { + return id; + } + + public List getAllowedOrigins() { + return allowedOrigins; + } + + public List getAllowedMethods() { + return allowedMethods; + } + + public List getAllowedHeaders() { + return allowedHeaders; + } + + public List getExposeHeaders() { + return exposeHeaders; + } + + public Integer getMaxAgeSeconds() { + return maxAgeSeconds; + } + + public CORSRule getProtobuf() { + CORSRule.Builder builder = CORSRule.newBuilder() + .addAllAllowedOrigins(allowedOrigins) + .addAllAllowedMethods(allowedMethods) + .addAllAllowedHeaders(allowedHeaders) + .addAllExposeHeaders(exposeHeaders); + if (id != null) { + builder.setId(id); + } + if (maxAgeSeconds != null) { + builder.setMaxAgeSeconds(maxAgeSeconds); + } + return builder.build(); + } + + public static CorsRule getFromProtobuf(CORSRule proto) { + Builder builder = newBuilder() + .setAllowedOrigins(proto.getAllowedOriginsList()) + .setAllowedMethods(proto.getAllowedMethodsList()) + .setAllowedHeaders(proto.getAllowedHeadersList()) + .setExposeHeaders(proto.getExposeHeadersList()); + if (proto.hasId()) { + builder.setId(proto.getId()); + } + if (proto.hasMaxAgeSeconds()) { + builder.setMaxAgeSeconds(proto.getMaxAgeSeconds()); + } + return builder.build(); + } + + public Builder toBuilder() { + return newBuilder() + .setId(id) + .setAllowedOrigins(allowedOrigins) + .setAllowedMethods(allowedMethods) + .setAllowedHeaders(allowedHeaders) + .setExposeHeaders(exposeHeaders) + .setMaxAgeSeconds(maxAgeSeconds); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (!(obj instanceof CorsRule)) { + return false; + } + CorsRule that = (CorsRule) obj; + return Objects.equals(id, that.id) + && Objects.equals(allowedOrigins, that.allowedOrigins) + && Objects.equals(allowedMethods, that.allowedMethods) + && Objects.equals(allowedHeaders, that.allowedHeaders) + && Objects.equals(exposeHeaders, that.exposeHeaders) + && Objects.equals(maxAgeSeconds, that.maxAgeSeconds); + } + + @Override + public int hashCode() { + return Objects.hash(id, allowedOrigins, allowedMethods, allowedHeaders, + exposeHeaders, maxAgeSeconds); + } + + @Override + public String toString() { + return "CorsRule{" + + "id='" + id + '\'' + + ", allowedOrigins=" + allowedOrigins + + ", allowedMethods=" + allowedMethods + + ", allowedHeaders=" + allowedHeaders + + ", exposeHeaders=" + exposeHeaders + + ", maxAgeSeconds=" + maxAgeSeconds + + '}'; + } + + /** + * Builder for {@link CorsRule}. + */ + public static final class Builder { + private String id; + private List allowedOrigins = ImmutableList.of(); + private List allowedMethods = ImmutableList.of(); + private List allowedHeaders = ImmutableList.of(); + private List exposeHeaders = ImmutableList.of(); + private Integer maxAgeSeconds; + + private Builder() { + } + + public Builder setId(String ruleId) { + this.id = ruleId; + return this; + } + + public Builder setAllowedOrigins(List origins) { + this.allowedOrigins = origins == null ? ImmutableList.of() : origins; + return this; + } + + public Builder setAllowedMethods(List methods) { + this.allowedMethods = methods == null ? ImmutableList.of() : methods; + return this; + } + + public Builder setAllowedHeaders(List headers) { + this.allowedHeaders = headers == null ? ImmutableList.of() : headers; + return this; + } + + public Builder setExposeHeaders(List headers) { + this.exposeHeaders = headers == null ? ImmutableList.of() : headers; + return this; + } + + public Builder setMaxAgeSeconds(Integer seconds) { + this.maxAgeSeconds = seconds; + return this; + } + + public CorsRule build() { + return new CorsRule(this); + } + } +} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java index 6491a2ec146c..c3eae7598f65 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java @@ -58,6 +58,8 @@ public final class OmBucketArgs extends WithMetadata implements Auditable { private final boolean quotaInBytesSet; private final boolean quotaInNamespaceSet; private final DefaultReplicationConfig defaultReplicationConfig; + private final CorsConfiguration corsConfiguration; + private final boolean clearCorsConfiguration; /** * Bucket Owner Name. */ @@ -76,6 +78,8 @@ private OmBucketArgs(Builder b) { this.quotaInNamespaceSet = b.quotaInNamespaceSet; this.quotaInNamespace = quotaInNamespaceSet ? b.quotaInNamespace : OzoneConsts.QUOTA_RESET; this.bekInfo = b.bekInfo; + this.corsConfiguration = b.corsConfiguration; + this.clearCorsConfiguration = b.clearCorsConfiguration; } /** @@ -151,6 +155,18 @@ public BucketEncryptionKeyInfo getBucketEncryptionKeyInfo() { return bekInfo; } + public CorsConfiguration getCorsConfiguration() { + return corsConfiguration; + } + + public boolean hasCorsConfiguration() { + return corsConfiguration != null; + } + + public boolean shouldClearCorsConfiguration() { + return clearCorsConfiguration; + } + /** * Returns Bucket Owner Name. * @@ -204,6 +220,12 @@ public Map toAuditMap() { this.defaultReplicationConfig.getReplicationConfig() .getReplication()); } + if (this.corsConfiguration != null) { + auditMap.put("corsConfiguration", this.corsConfiguration.toString()); + } + if (this.clearCorsConfiguration) { + auditMap.put("clearCorsConfiguration", "true"); + } return auditMap; } @@ -221,6 +243,8 @@ public static class Builder extends WithMetadata.Builder { private long quotaInNamespace; private BucketEncryptionKeyInfo bekInfo; private DefaultReplicationConfig defaultReplicationConfig; + private CorsConfiguration corsConfiguration; + private boolean clearCorsConfiguration; private String ownerName; /** @@ -288,6 +312,21 @@ public Builder setOwnerName(String owner) { return this; } + public Builder setCorsConfiguration( + CorsConfiguration corsConfig) { + this.corsConfiguration = corsConfig; + this.clearCorsConfiguration = false; + return this; + } + + public Builder setClearCorsConfiguration(boolean shouldClear) { + this.clearCorsConfiguration = shouldClear; + if (shouldClear) { + this.corsConfiguration = null; + } + return this; + } + /** * Constructs the OmBucketArgs. * @return instance of OmBucketArgs. @@ -330,6 +369,12 @@ public BucketArgs getProtobuf() { if (bekInfo != null) { builder.setBekInfo(OMPBHelper.convert(bekInfo)); } + if (corsConfiguration != null) { + builder.setCorsConfiguration(corsConfiguration.getProtobuf()); + } + if (clearCorsConfiguration) { + builder.setClearCorsConfiguration(true); + } return builder.build(); } @@ -371,6 +416,13 @@ public static Builder builderFromProtobuf(BucketArgs bucketArgs) { builder.setBucketEncryptionKey( OMPBHelper.convert(bucketArgs.getBekInfo())); } + if (bucketArgs.hasCorsConfiguration()) { + builder.setCorsConfiguration(CorsConfiguration.getFromProtobuf( + bucketArgs.getCorsConfiguration())); + } + if (bucketArgs.hasClearCorsConfiguration()) { + builder.setClearCorsConfiguration(bucketArgs.getClearCorsConfiguration()); + } return builder; } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java index bce6adb636a0..ad9b10b8bb2e 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java @@ -85,6 +85,8 @@ public final class OmBucketInfo extends WithObjectID implements Auditable, CopyO */ private final DefaultReplicationConfig defaultReplicationConfig; + private final CorsConfiguration corsConfiguration; + private final String sourceVolume; private final String sourceBucket; @@ -128,6 +130,7 @@ private OmBucketInfo(Builder b) { this.bucketLayout = b.bucketLayout; this.owner = b.owner; this.defaultReplicationConfig = b.defaultReplicationConfig; + this.corsConfiguration = b.corsConfiguration; } public static Codec getCodec() { @@ -217,6 +220,10 @@ public DefaultReplicationConfig getDefaultReplicationConfig() { return defaultReplicationConfig; } + public CorsConfiguration getCorsConfiguration() { + return corsConfiguration; + } + public String getSourceVolume() { return sourceVolume; } @@ -378,7 +385,8 @@ public Builder toBuilder() { .setSnapshotUsedNamespace(snapshotUsedNamespace) .setBucketLayout(bucketLayout) .setOwner(owner) - .setDefaultReplicationConfig(defaultReplicationConfig); + .setDefaultReplicationConfig(defaultReplicationConfig) + .setCorsConfiguration(corsConfiguration); } /** @@ -404,6 +412,7 @@ public static class Builder extends WithObjectID.Builder { private DefaultReplicationConfig defaultReplicationConfig; private long snapshotUsedBytes; private long snapshotUsedNamespace; + private CorsConfiguration corsConfiguration; public Builder() { acls = AclListBuilder.empty(); @@ -550,6 +559,12 @@ public Builder setDefaultReplicationConfig( return this; } + public Builder setCorsConfiguration( + CorsConfiguration corsConfig) { + this.corsConfiguration = corsConfig; + return this; + } + @Override protected void validate() { super.validate(); @@ -595,6 +610,9 @@ public BucketInfo getProtobuf() { if (defaultReplicationConfig != null) { bib.setDefaultReplicationConfig(defaultReplicationConfig.toProto()); } + if (corsConfiguration != null) { + bib.setCorsConfiguration(corsConfiguration.getProtobuf()); + } if (sourceVolume != null) { bib.setSourceVolume(sourceVolume); } @@ -651,6 +669,10 @@ public static Builder builderFromProtobuf(BucketInfo bucketInfo, DefaultReplicationConfig.fromProto( bucketInfo.getDefaultReplicationConfig())); } + if (bucketInfo.hasCorsConfiguration()) { + obib.setCorsConfiguration(CorsConfiguration.getFromProtobuf( + bucketInfo.getCorsConfiguration())); + } if (bucketInfo.hasObjectID()) { obib.setObjectID(bucketInfo.getObjectID()); } @@ -745,7 +767,8 @@ public boolean equals(Object o) { Objects.equals(getMetadata(), that.getMetadata()) && Objects.equals(bekInfo, that.bekInfo) && Objects.equals(owner, that.owner) && - Objects.equals(defaultReplicationConfig, that.defaultReplicationConfig); + Objects.equals(defaultReplicationConfig, that.defaultReplicationConfig) && + Objects.equals(corsConfiguration, that.corsConfiguration); } @Override @@ -777,6 +800,7 @@ public String toString() { ", bucketLayout=" + bucketLayout + ", owner=" + owner + ", defaultReplicationConfig=" + defaultReplicationConfig + + ", corsConfiguration=" + corsConfiguration + '}'; } } diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmBucketArgs.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmBucketArgs.java index 147255b3b573..ae10df036b1a 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmBucketArgs.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmBucketArgs.java @@ -23,12 +23,13 @@ import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; +import java.util.Collections; import org.apache.hadoop.hdds.client.DefaultReplicationConfig; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.junit.jupiter.api.Test; /** - * Tests for the OmBucketArgs class. + * Test bucket property arguments. */ public class TestOmBucketArgs { @@ -90,4 +91,38 @@ public void testDefaultReplicationConfigIsSetCorrectly() { assertEquals(EC, argsFromProto.getDefaultReplicationConfig().getType()); } + + @Test + public void corsConfigurationCanBeSetOrCleared() { + CorsConfiguration corsConfiguration = + CorsConfiguration.newBuilder() + .addRule(CorsRule.newBuilder() + .setAllowedOrigins(Collections.singletonList("*")) + .setAllowedMethods(Collections.singletonList("GET")) + .build()) + .build(); + + OmBucketArgs setArgs = OmBucketArgs.newBuilder() + .setVolumeName("vol1") + .setBucketName("bucket") + .setCorsConfiguration(corsConfiguration) + .build(); + + OmBucketArgs recoveredSetArgs = + OmBucketArgs.getFromProtobuf(setArgs.getProtobuf()); + assertTrue(recoveredSetArgs.hasCorsConfiguration()); + assertFalse(recoveredSetArgs.shouldClearCorsConfiguration()); + assertEquals(corsConfiguration, recoveredSetArgs.getCorsConfiguration()); + + OmBucketArgs clearArgs = OmBucketArgs.newBuilder() + .setVolumeName("vol1") + .setBucketName("bucket") + .setClearCorsConfiguration(true) + .build(); + + OmBucketArgs recoveredClearArgs = + OmBucketArgs.getFromProtobuf(clearArgs.getProtobuf()); + assertFalse(recoveredClearArgs.hasCorsConfiguration()); + assertTrue(recoveredClearArgs.shouldClearCorsConfiguration()); + } } diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmBucketInfo.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmBucketInfo.java index 857103a20c0d..0173edde88f6 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmBucketInfo.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmBucketInfo.java @@ -23,6 +23,7 @@ import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; +import java.util.Arrays; import java.util.Collections; import org.apache.hadoop.hdds.client.DefaultReplicationConfig; import org.apache.hadoop.hdds.client.ECReplicationConfig; @@ -67,6 +68,35 @@ public void protobufConversionOfBucketLink() { OmBucketInfo.getFromProtobuf(bucket.getProtobuf())); } + @Test + public void protobufConversionWithCorsConfiguration() { + CorsRule corsRule = CorsRule.newBuilder() + .setId("read-rule") + .setAllowedOrigins(Arrays.asList("https://example.com")) + .setAllowedMethods(Arrays.asList("GET", "HEAD")) + .setAllowedHeaders(Arrays.asList("Authorization", "x-amz-*")) + .setExposeHeaders(Arrays.asList("ETag")) + .setMaxAgeSeconds(3000) + .build(); + CorsConfiguration corsConfiguration = + CorsConfiguration.newBuilder() + .addRule(corsRule) + .build(); + + OmBucketInfo bucket = OmBucketInfo.newBuilder() + .setBucketName("bucket") + .setVolumeName("vol1") + .setCreationTime(1L) + .setIsVersionEnabled(false) + .setStorageType(StorageType.ARCHIVE) + .setCorsConfiguration(corsConfiguration) + .build(); + + OmBucketInfo recovered = OmBucketInfo.getFromProtobuf(bucket.getProtobuf()); + assertEquals(corsConfiguration, recovered.getCorsConfiguration()); + assertEquals(bucket, recovered); + } + @Test public void testClone() { OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder() diff --git a/hadoop-ozone/dist/src/main/compose/common/ec-test.sh b/hadoop-ozone/dist/src/main/compose/common/ec-test.sh index 556590a14a29..c428cb9f1e94 100755 --- a/hadoop-ozone/dist/src/main/compose/common/ec-test.sh +++ b/hadoop-ozone/dist/src/main/compose/common/ec-test.sh @@ -18,7 +18,7 @@ start_docker_env 5 ## Exclude virtual-host tests. This is tested separately as it requires additional config. -execute_robot_test scm -v BUCKET:erasure --exclude virtual-host s3 +execute_robot_test scm -v BUCKET:erasure --exclude virtual-host --exclude bucket-cors s3 execute_robot_test scm ec/rewrite.robot diff --git a/hadoop-ozone/dist/src/main/compose/ozone-ha/test-haproxy-s3g.sh b/hadoop-ozone/dist/src/main/compose/ozone-ha/test-haproxy-s3g.sh index af67a7099dde..ca6c8b1c352d 100755 --- a/hadoop-ozone/dist/src/main/compose/ozone-ha/test-haproxy-s3g.sh +++ b/hadoop-ozone/dist/src/main/compose/ozone-ha/test-haproxy-s3g.sh @@ -31,10 +31,9 @@ source "$COMPOSE_DIR/../testlib.sh" start_docker_env ## Exclude virtual-host tests. This is tested separately as it requires additional config. -exclude="--exclude virtual-host" +exclude="--exclude virtual-host --exclude bucket-cors" for bucket in generated; do execute_robot_test ${SCM} -v BUCKET:${bucket} -N s3-${bucket} ${exclude} s3 # some tests are independent of the bucket type, only need to be run once - ## Exclude awss3virtualhost.robot - exclude="--exclude virtual-host --exclude no-bucket-type" + exclude="--exclude virtual-host --exclude bucket-cors --exclude no-bucket-type" done diff --git a/hadoop-ozone/dist/src/main/compose/ozone-ha/test.sh b/hadoop-ozone/dist/src/main/compose/ozone-ha/test.sh index 6c09e7b76158..60b41f670bc0 100755 --- a/hadoop-ozone/dist/src/main/compose/ozone-ha/test.sh +++ b/hadoop-ozone/dist/src/main/compose/ozone-ha/test.sh @@ -38,12 +38,12 @@ execute_robot_test ${SCM} basic/links.robot execute_robot_test ${SCM} -v SCHEME:ofs -v BUCKET_TYPE:link -N ozonefs-ofs-link ozonefs/ozonefs.robot ## Exclude virtual-host tests. This is tested separately as it requires additional config. -exclude="--exclude virtual-host" +exclude="--exclude virtual-host --exclude bucket-cors" for bucket in generated; do for layout in OBJECT_STORE LEGACY FILE_SYSTEM_OPTIMIZED; do execute_robot_test ${SCM} -v BUCKET:${bucket} -v BUCKET_LAYOUT:${layout} -N s3-${layout}-${bucket} ${exclude} s3 # some tests are independent of the bucket type, only need to be run once - exclude="--exclude virtual-host --exclude no-bucket-type" + exclude="--exclude virtual-host --exclude bucket-cors --exclude no-bucket-type" done done diff --git a/hadoop-ozone/dist/src/main/compose/ozone/test.sh b/hadoop-ozone/dist/src/main/compose/ozone/test.sh index 653a0aaf766e..42066086ca3b 100755 --- a/hadoop-ozone/dist/src/main/compose/ozone/test.sh +++ b/hadoop-ozone/dist/src/main/compose/ozone/test.sh @@ -53,6 +53,7 @@ execute_robot_test scm -v USERNAME:httpfs httpfs execute_robot_test scm -v SCHEME:o3fs -v BUCKET_TYPE:bucket -N ozonefs-o3fs-bucket ozonefs/ozonefs.robot execute_robot_test scm -v SCHEME:ofs -N ozonefs-obs ozonefs/ozonefs-obs.robot +execute_robot_test s3g -N s3-bucketcors s3/bucketcors.robot execute_robot_test s3g grpc/grpc-om-s3-metrics.robot execute_robot_test scm --exclude pre-finalized-snapshot-tests snapshot diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/test-haproxy-s3g.sh b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/test-haproxy-s3g.sh index a2b11418a88c..7ced55010f6a 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/test-haproxy-s3g.sh +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/test-haproxy-s3g.sh @@ -36,10 +36,10 @@ start_docker_env execute_command_in_container kms hadoop key create ${OZONE_BUCKET_KEY_NAME} ## Exclude virtual-host tests. This is tested separately as it requires additional config. -exclude="--exclude virtual-host" +exclude="--exclude virtual-host --exclude bucket-cors" for bucket in encrypted; do execute_robot_test recon -v BUCKET:${bucket} -N s3-${bucket} ${exclude} s3 # some tests are independent of the bucket type, only need to be run once ## Exclude virtual-host.robot - exclude="--exclude virtual-host --exclude no-bucket-type" + exclude="--exclude virtual-host --exclude bucket-cors --exclude no-bucket-type" done diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/test.sh b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/test.sh index 6d0b4442ffa6..d476aacbf306 100755 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/test.sh +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/test.sh @@ -47,12 +47,12 @@ execute_robot_test s3g -v SCHEME:o3fs -v BUCKET_TYPE:link -N ozonefs-o3fs-link o execute_robot_test s3g basic/links.robot ## Exclude virtual-host tests. This is tested separately as it requires additional config. -exclude="--exclude virtual-host" +exclude="--exclude virtual-host --exclude bucket-cors" for bucket in link; do execute_robot_test s3g -v BUCKET:${bucket} -N s3-${bucket} ${exclude} s3 # some tests are independent of the bucket type, only need to be run once ## Exclude virtual-host.robot - exclude="--exclude virtual-host --exclude no-bucket-type" + exclude="--exclude virtual-host --exclude bucket-cors --exclude no-bucket-type" done # Run Fault Injection tests at the end diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/test-vault.sh b/hadoop-ozone/dist/src/main/compose/ozonesecure/test-vault.sh index 0d1fa16a927f..104bc49dd1b9 100755 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure/test-vault.sh +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/test-vault.sh @@ -31,4 +31,4 @@ export COMPOSE_FILE=docker-compose.yaml:vault.yaml start_docker_env ## Exclude virtual-host tests. This is tested separately as it requires additional config. -execute_robot_test scm --exclude virtual-host s3 +execute_robot_test scm --exclude virtual-host --exclude bucket-cors s3 diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh b/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh index 637268b59e54..e2ed956a047e 100755 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh @@ -44,12 +44,12 @@ execute_robot_test scm repair/bucket-encryption.robot execute_robot_test scm -v SCHEME:ofs -v BUCKET_TYPE:bucket -N ozonefs-ofs-bucket ozonefs/ozonefs.robot ## Exclude virtual-host tests. This is tested separately as it requires additional config. -exclude="--exclude virtual-host" +exclude="--exclude virtual-host --exclude bucket-cors" for bucket in encrypted; do execute_robot_test s3g -v BUCKET:${bucket} -N s3-${bucket} ${exclude} s3 # some tests are independent of the bucket type, only need to be run once ## Exclude virtual-host.robot - exclude="--exclude virtual-host --exclude no-bucket-type" + exclude="--exclude virtual-host --exclude bucket-cors --exclude no-bucket-type" done #expects 4 pipelines, should be run before diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/bucketcors.robot b/hadoop-ozone/dist/src/main/smoketest/s3/bucketcors.robot new file mode 100644 index 000000000000..81ec8295545d --- /dev/null +++ b/hadoop-ozone/dist/src/main/smoketest/s3/bucketcors.robot @@ -0,0 +1,73 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +*** Settings *** +Documentation S3 bucket CORS configuration test with aws cli +Library OperatingSystem +Library String +Resource ../commonlib.robot +Resource commonawslib.robot +Test Timeout 5 minutes +Suite Setup Setup s3 tests +Test Tags no-bucket-type bucket-cors + +*** Variables *** +${ENDPOINT_URL} http://s3g:9878 +${BUCKET} generated +${CORS_FILE} /tmp/ozone-bucket-cors.json + +*** Test Cases *** + +Put get and delete bucket CORS configuration + ${cors} = Catenate SEPARATOR= + ... {"CORSRules":[{"ID":"robot-write-cors", + ... "AllowedOrigins":["http://www.example.com"], + ... "AllowedMethods":["PUT","POST","DELETE"], + ... "AllowedHeaders":["*"], + ... "ExposeHeaders":["x-amz-server-side-encryption"], + ... "MaxAgeSeconds":3000}, + ... {"ID":"robot-read-cors", + ... "AllowedOrigins":["*"], + ... "AllowedMethods":["GET","HEAD"]}]} + Create File ${CORS_FILE} ${cors} + + ${result} = Execute AWSS3APICli put-bucket-cors --bucket ${BUCKET} --cors-configuration file://${CORS_FILE} + ${result} = Execute AWSS3APICli get-bucket-cors --bucket ${BUCKET} + Should Contain ${result} robot-write-cors + Should Contain ${result} robot-read-cors + Should Contain ${result} http://www.example.com + Should Contain ${result} x-amz-server-side-encryption + + ${preflight} = Execute curl --silent --show-error --include -X OPTIONS -H 'Origin: http://www.example.com' -H 'Access-Control-Request-Method: PUT' -H 'Access-Control-Request-Headers: x-amz-meta-test' ${ENDPOINT_URL}/${BUCKET}/${PREFIX}/cors-key + Should Contain ${preflight} HTTP/1.1 200 + Should Contain ${preflight} Access-Control-Allow-Origin: http://www.example.com + Should Contain ${preflight} Access-Control-Allow-Methods: PUT, POST, DELETE + Should Contain ${preflight} Access-Control-Allow-Headers: x-amz-meta-test + Should Contain ${preflight} Access-Control-Max-Age: 3000 + Should Contain ${preflight} Access-Control-Expose-Headers: x-amz-server-side-encryption + + ${preflight} = Execute curl --silent --show-error --include -X OPTIONS -H 'Origin: https://other.example.com' -H 'Access-Control-Request-Method: GET' ${ENDPOINT_URL}/${BUCKET}/${PREFIX}/cors-key + Should Contain ${preflight} HTTP/1.1 200 + Should Contain ${preflight} Access-Control-Allow-Origin: https://other.example.com + Should Contain ${preflight} Access-Control-Allow-Methods: GET, HEAD + + ${status} = Execute curl --silent --show-error --output /dev/null --write-out '\%{http_code}' -X OPTIONS -H 'Origin: https://other.example.com' -H 'Access-Control-Request-Method: DELETE' ${ENDPOINT_URL}/${BUCKET}/${PREFIX}/cors-key + Should Be Equal ${status} 403 + + ${result} = Execute AWSS3APICli delete-bucket-cors --bucket ${BUCKET} + ${result} = Execute AWSS3APICli and checkrc get-bucket-cors --bucket ${BUCKET} 255 + Should Contain ${result} NoSuchCORSConfiguration + + [Teardown] Remove File ${CORS_FILE} diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/bucketlist.robot b/hadoop-ozone/dist/src/main/smoketest/s3/bucketlist.robot index 5e8ed10e9823..5533ac474410 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/bucketlist.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/bucketlist.robot @@ -42,6 +42,9 @@ List buckets Get bucket info with Ozone Shell to check the owner field Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled ${result} = Execute ozone sh bucket info /s3v/${BUCKET} | jq -r '.owner' + IF '${result}' == '' + ${result} = Get bucket owner ${BUCKET} + END Should Be Equal ${result} testuser # In ozonesecure(-ha) docker-config, hadoop.security.auth_to_local is set # in the way that getShortUserName() converts the accessId to "testuser". diff --git a/hadoop-ozone/integration-test-s3/src/test/java/org/apache/hadoop/ozone/s3/MultiS3GatewayService.java b/hadoop-ozone/integration-test-s3/src/test/java/org/apache/hadoop/ozone/s3/MultiS3GatewayService.java index 11ab61f16855..539538c25572 100644 --- a/hadoop-ozone/integration-test-s3/src/test/java/org/apache/hadoop/ozone/s3/MultiS3GatewayService.java +++ b/hadoop-ozone/integration-test-s3/src/test/java/org/apache/hadoop/ozone/s3/MultiS3GatewayService.java @@ -49,7 +49,7 @@ public void start(OzoneConfiguration conf) throws Exception { List urls = new ArrayList<>(); for (S3GatewayService service : gatewayServices) { service.start(conf); - String redirectUrl = "http://" + service.getConf().get(S3GatewayConfigKeys.OZONE_S3G_HTTP_ADDRESS_KEY); + String redirectUrl = "http://" + service.getHttpAddressForClient(); urls.add(redirectUrl); } diff --git a/hadoop-ozone/integration-test-s3/src/test/java/org/apache/hadoop/ozone/s3/S3GatewayService.java b/hadoop-ozone/integration-test-s3/src/test/java/org/apache/hadoop/ozone/s3/S3GatewayService.java index 44ed2f8a4198..3cea2db39fee 100644 --- a/hadoop-ozone/integration-test-s3/src/test/java/org/apache/hadoop/ozone/s3/S3GatewayService.java +++ b/hadoop-ozone/integration-test-s3/src/test/java/org/apache/hadoop/ozone/s3/S3GatewayService.java @@ -17,8 +17,10 @@ package org.apache.hadoop.ozone.s3; +import static org.apache.ozone.test.GenericTestUtils.PortAllocator.HOST_ADDRESS; import static org.apache.ozone.test.GenericTestUtils.PortAllocator.localhostWithFreePort; +import java.net.InetSocketAddress; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.ratis.util.Preconditions; @@ -56,6 +58,13 @@ public OzoneConfiguration getConf() { return OzoneConfigurationHolder.configuration(); } + public String getHttpAddressForClient() { + final Gateway instance = s3g; + Preconditions.assertNotNull(instance, "S3 Gateway not running"); + InetSocketAddress address = instance.getHttpAddress(); + return HOST_ADDRESS + ":" + address.getPort(); + } + private void configureS3G(OzoneConfiguration conf) { OzoneConfigurationHolder.resetConfiguration(); diff --git a/hadoop-ozone/integration-test-s3/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/AbstractS3SDKV1Tests.java b/hadoop-ozone/integration-test-s3/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/AbstractS3SDKV1Tests.java index c39a840d375b..c1c4b61fd39d 100644 --- a/hadoop-ozone/integration-test-s3/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/AbstractS3SDKV1Tests.java +++ b/hadoop-ozone/integration-test-s3/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/AbstractS3SDKV1Tests.java @@ -38,6 +38,8 @@ import com.amazonaws.services.s3.model.AbortMultipartUploadRequest; import com.amazonaws.services.s3.model.AccessControlList; import com.amazonaws.services.s3.model.Bucket; +import com.amazonaws.services.s3.model.BucketCrossOriginConfiguration; +import com.amazonaws.services.s3.model.CORSRule; import com.amazonaws.services.s3.model.CanonicalGrantee; import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest; import com.amazonaws.services.s3.model.CompleteMultipartUploadResult; @@ -256,6 +258,68 @@ public void testBucketACLOperations() { //assertEquals(aclList, s3Client.getBucketAcl(bucketName)); } + @Test + public void testBucketCORSOperations() { + final String bucketName = getBucketName(); + s3Client.createBucket(bucketName); + + CORSRule rule = new CORSRule() + .withId("sdk-v1-cors") + .withAllowedOrigins(Collections.singletonList("https://example.com")) + .withAllowedMethods(Arrays.asList( + CORSRule.AllowedMethods.GET, CORSRule.AllowedMethods.HEAD)) + .withAllowedHeaders(Collections.singletonList("Authorization")) + .withExposedHeaders(Collections.singletonList("ETag")) + .withMaxAgeSeconds(3600); + BucketCrossOriginConfiguration configuration = + new BucketCrossOriginConfiguration().withRules(rule); + + s3Client.setBucketCrossOriginConfiguration(bucketName, configuration); + + BucketCrossOriginConfiguration result = + s3Client.getBucketCrossOriginConfiguration(bucketName); + assertThat(result.getRules()).hasSize(1); + CORSRule resultRule = result.getRules().get(0); + assertEquals("sdk-v1-cors", resultRule.getId()); + assertThat(resultRule.getAllowedOrigins()) + .containsExactly("https://example.com"); + assertThat(resultRule.getAllowedMethods()).containsExactly( + CORSRule.AllowedMethods.GET, CORSRule.AllowedMethods.HEAD); + assertThat(resultRule.getAllowedHeaders()) + .containsExactly("Authorization"); + assertThat(resultRule.getExposedHeaders()).containsExactly("ETag"); + assertEquals(3600, resultRule.getMaxAgeSeconds()); + + s3Client.deleteBucketCrossOriginConfiguration(bucketName); + + try { + assertThat(s3Client.getBucketCrossOriginConfiguration(bucketName)) + .isNull(); + } catch (AmazonServiceException ase) { + assertEquals(ErrorType.Client, ase.getErrorType()); + assertEquals(404, ase.getStatusCode()); + assertEquals("NoSuchCORSConfiguration", ase.getErrorCode()); + } + } + + @Test + public void testDeleteBucketCORSWithoutConfiguration() { + final String bucketName = getBucketName(); + s3Client.createBucket(bucketName); + + s3Client.deleteBucketCrossOriginConfiguration(bucketName); + s3Client.deleteBucketCrossOriginConfiguration(bucketName); + + try { + assertThat(s3Client.getBucketCrossOriginConfiguration(bucketName)) + .isNull(); + } catch (AmazonServiceException ase) { + assertEquals(ErrorType.Client, ase.getErrorType()); + assertEquals(404, ase.getStatusCode()); + assertEquals("NoSuchCORSConfiguration", ase.getErrorCode()); + } + } + @Test public void testListBuckets() throws IOException { List bucketNames = new ArrayList<>(); diff --git a/hadoop-ozone/integration-test-s3/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v2/AbstractS3SDKV2Tests.java b/hadoop-ozone/integration-test-s3/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v2/AbstractS3SDKV2Tests.java index 51d0d7bbd204..0c5a78a9bf05 100644 --- a/hadoop-ozone/integration-test-s3/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v2/AbstractS3SDKV2Tests.java +++ b/hadoop-ozone/integration-test-s3/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v2/AbstractS3SDKV2Tests.java @@ -103,6 +103,8 @@ import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.s3.S3Configuration; import software.amazon.awssdk.services.s3.model.AbortMultipartUploadRequest; +import software.amazon.awssdk.services.s3.model.CORSConfiguration; +import software.amazon.awssdk.services.s3.model.CORSRule; import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadRequest; import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadResponse; import software.amazon.awssdk.services.s3.model.CompletedMultipartUpload; @@ -220,6 +222,61 @@ public void listBuckets() throws Exception { assertEquals(S3Owner.DEFAULT_S3OWNER_ID, syncResponse.owner().id()); } + @Test + public void testBucketCORSOperations() { + final String bucketName = getBucketName(); + s3Client.createBucket(b -> b.bucket(bucketName)); + + CORSRule rule = CORSRule.builder() + .id("sdk-v2-cors") + .allowedOrigins("https://example.com") + .allowedMethods("GET", "HEAD") + .allowedHeaders("Authorization") + .exposeHeaders("ETag") + .maxAgeSeconds(3600) + .build(); + CORSConfiguration configuration = CORSConfiguration.builder() + .corsRules(rule) + .build(); + + s3Client.putBucketCors(b -> b.bucket(bucketName) + .corsConfiguration(configuration)); + + List resultRules = + s3Client.getBucketCors(b -> b.bucket(bucketName)).corsRules(); + assertThat(resultRules).hasSize(1); + CORSRule resultRule = resultRules.get(0); + assertEquals("sdk-v2-cors", resultRule.id()); + assertThat(resultRule.allowedOrigins()).containsExactly("https://example.com"); + assertThat(resultRule.allowedMethods()).containsExactly("GET", "HEAD"); + assertThat(resultRule.allowedHeaders()).containsExactly("Authorization"); + assertThat(resultRule.exposeHeaders()).containsExactly("ETag"); + assertEquals(3600, resultRule.maxAgeSeconds()); + + s3Client.deleteBucketCors(b -> b.bucket(bucketName)); + + S3Exception exception = assertThrows(S3Exception.class, + () -> s3Client.getBucketCors(b -> b.bucket(bucketName))); + assertEquals(404, exception.statusCode()); + assertEquals("NoSuchCORSConfiguration", + exception.awsErrorDetails().errorCode()); + } + + @Test + public void testDeleteBucketCORSWithoutConfiguration() { + final String bucketName = getBucketName(); + s3Client.createBucket(b -> b.bucket(bucketName)); + + s3Client.deleteBucketCors(b -> b.bucket(bucketName)); + s3Client.deleteBucketCors(b -> b.bucket(bucketName)); + + S3Exception exception = assertThrows(S3Exception.class, + () -> s3Client.getBucketCors(b -> b.bucket(bucketName))); + assertEquals(404, exception.statusCode()); + assertEquals("NoSuchCORSConfiguration", + exception.awsErrorDetails().errorCode()); + } + @Test public void testPutObject() { final String bucketName = getBucketName(); @@ -1691,6 +1748,27 @@ public void testPutBucketAcl() { verifyBucketOwnershipVerificationAccessDenied(() -> s3Client.putBucketAcl(wrongRequest)); } + @Test + public void testPutBucketCors() { + CORSRule rule = CORSRule.builder() + .allowedOrigins("https://example.com") + .allowedMethods("GET") + .build(); + CORSConfiguration configuration = CORSConfiguration.builder() + .corsRules(rule) + .build(); + + verifyPassBucketOwnershipVerification(() -> s3Client.putBucketCors( + b -> b.bucket(DEFAULT_BUCKET_NAME) + .expectedBucketOwner(correctOwner) + .corsConfiguration(configuration))); + + verifyBucketOwnershipVerificationAccessDenied(() -> s3Client.putBucketCors( + b -> b.bucket(DEFAULT_BUCKET_NAME) + .expectedBucketOwner(WRONG_OWNER) + .corsConfiguration(configuration))); + } + @Test public void testHeadBucket() { HeadBucketRequest correctRequest = HeadBucketRequest.builder() diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto index a972e87ddc1b..c15f5dca50c3 100644 --- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto +++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto @@ -787,6 +787,7 @@ message BucketInfo { optional hadoop.hdds.DefaultReplicationConfig defaultReplicationConfig = 20; optional uint64 snapshotUsedBytes = 21; optional uint64 snapshotUsedNamespace = 22; + optional CORSConfiguration corsConfiguration = 23; } enum BucketLayoutProto { @@ -860,6 +861,21 @@ message BucketArgs { optional string ownerName = 10; optional hadoop.hdds.DefaultReplicationConfig defaultReplicationConfig = 11; optional BucketEncryptionInfoProto bekInfo = 12; + optional CORSConfiguration corsConfiguration = 13; + optional bool clearCorsConfiguration = 14 [default = false]; +} + +message CORSConfiguration { + repeated CORSRule corsRule = 1; +} + +message CORSRule { + optional string id = 1; + repeated string allowedOrigins = 2; + repeated string allowedMethods = 3; + repeated string allowedHeaders = 4; + repeated string exposeHeaders = 5; + optional uint32 maxAgeSeconds = 6; } message PrefixInfo { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java index a88e5fb73334..71b79c3883d7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java @@ -205,6 +205,13 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, Execut bucketInfoBuilder.setBucketEncryptionKey(bek); } + if (omBucketArgs.hasCorsConfiguration()) { + bucketInfoBuilder.setCorsConfiguration( + omBucketArgs.getCorsConfiguration()); + } else if (omBucketArgs.shouldClearCorsConfiguration()) { + bucketInfoBuilder.setCorsConfiguration(null); + } + omBucketInfo = bucketInfoBuilder.build(); // Update table cache. diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketSetPropertyRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketSetPropertyRequest.java index c7c27abeb6a2..944a5c5e0e95 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketSetPropertyRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketSetPropertyRequest.java @@ -26,6 +26,8 @@ import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; +import java.util.Arrays; +import java.util.Collections; import java.util.UUID; import org.apache.hadoop.hdds.client.DefaultReplicationConfig; import org.apache.hadoop.hdds.client.ECReplicationConfig; @@ -33,6 +35,8 @@ import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.om.helpers.BucketEncryptionKeyInfo; import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.CorsConfiguration; +import org.apache.hadoop.ozone.om.helpers.CorsRule; import org.apache.hadoop.ozone.om.helpers.OmBucketArgs; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; @@ -157,17 +161,130 @@ public void testValidateAndUpdateCacheFails() throws Exception { private OMRequest createSetBucketPropertyRequest(String volumeName, String bucketName, boolean isVersionEnabled, long quotaInBytes) { + BucketArgs bucketArgs = BucketArgs.newBuilder().setBucketName(bucketName) + .setVolumeName(volumeName) + .setQuotaInBytes(quotaInBytes) + .setQuotaInNamespace(1000L) + .setIsVersionEnabled(isVersionEnabled).build(); + return createSetBucketPropertyRequest(bucketArgs); + } + + private OMRequest createSetBucketPropertyRequest(BucketArgs bucketArgs) { return OMRequest.newBuilder().setSetBucketPropertyRequest( - SetBucketPropertyRequest.newBuilder().setBucketArgs( - BucketArgs.newBuilder().setBucketName(bucketName) - .setVolumeName(volumeName) - .setQuotaInBytes(quotaInBytes) - .setQuotaInNamespace(1000L) - .setIsVersionEnabled(isVersionEnabled).build())) + SetBucketPropertyRequest.newBuilder().setBucketArgs(bucketArgs)) .setCmdType(OzoneManagerProtocolProtos.Type.SetBucketProperty) .setClientId(UUID.randomUUID().toString()).build(); } + @Test + public void testSetCorsConfiguration() throws Exception { + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + CorsConfiguration corsConfiguration = createCorsConfiguration(); + + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + omMetadataManager); + BucketArgs bucketArgs = OmBucketArgs.newBuilder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setCorsConfiguration(corsConfiguration) + .build() + .getProtobuf(); + + OMBucketSetPropertyRequest request = + new OMBucketSetPropertyRequest( + createSetBucketPropertyRequest(bucketArgs)); + + OMClientResponse response = + request.validateAndUpdateCache(ozoneManager, 1); + + assertEquals(OzoneManagerProtocolProtos.Status.OK, + response.getOMResponse().getStatus()); + OmBucketInfo bucketInfo = omMetadataManager.getBucketTable().get( + omMetadataManager.getBucketKey(volumeName, bucketName)); + assertEquals(corsConfiguration, bucketInfo.getCorsConfiguration()); + } + + @Test + public void testDeleteCorsConfiguration() throws Exception { + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + OmBucketInfo.Builder bucketInfo = OmBucketInfo.newBuilder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setCorsConfiguration(createCorsConfiguration()); + + OMRequestTestUtils.addVolumeToDB(volumeName, omMetadataManager); + OMRequestTestUtils.addBucketToDB(omMetadataManager, bucketInfo); + BucketArgs bucketArgs = OmBucketArgs.newBuilder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setClearCorsConfiguration(true) + .build() + .getProtobuf(); + + OMBucketSetPropertyRequest request = + new OMBucketSetPropertyRequest( + createSetBucketPropertyRequest(bucketArgs)); + + OMClientResponse response = + request.validateAndUpdateCache(ozoneManager, 1); + + assertEquals(OzoneManagerProtocolProtos.Status.OK, + response.getOMResponse().getStatus()); + OmBucketInfo updatedBucketInfo = omMetadataManager.getBucketTable().get( + omMetadataManager.getBucketKey(volumeName, bucketName)); + assertNull(updatedBucketInfo.getCorsConfiguration()); + } + + @Test + public void rejectsDeletingCorsConfigurationOnLink() throws Exception { + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + String linkName = UUID.randomUUID().toString(); + + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + omMetadataManager); + OmBucketInfo.Builder link = OmBucketInfo.newBuilder() + .setVolumeName(volumeName) + .setBucketName(linkName) + .setSourceVolume(volumeName) + .setSourceBucket(bucketName); + OMRequestTestUtils.addBucketToDB(omMetadataManager, link); + BucketArgs bucketArgs = OmBucketArgs.newBuilder() + .setVolumeName(volumeName) + .setBucketName(linkName) + .setClearCorsConfiguration(true) + .build() + .getProtobuf(); + + OMBucketSetPropertyRequest request = + new OMBucketSetPropertyRequest( + createSetBucketPropertyRequest(bucketArgs)); + + OMClientResponse response = + request.validateAndUpdateCache(ozoneManager, 1); + + assertFalse(response.getOMResponse().getSuccess()); + assertEquals( + OzoneManagerProtocolProtos.Status.NOT_SUPPORTED_OPERATION, + response.getOMResponse().getStatus()); + } + + private static CorsConfiguration createCorsConfiguration() { + return CorsConfiguration.newBuilder() + .addRule(CorsRule.newBuilder() + .setId("read-rule") + .setAllowedOrigins(Collections.singletonList( + "https://example.com")) + .setAllowedMethods(Arrays.asList("GET", "HEAD")) + .setAllowedHeaders(Collections.singletonList("Authorization")) + .setExposeHeaders(Collections.singletonList("ETag")) + .setMaxAgeSeconds(3000) + .build()) + .build(); + } + @Test public void testValidateAndUpdateCacheWithQuota() throws Exception { diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/audit/S3GAction.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/audit/S3GAction.java index 6c295b7aafc7..05c526dc0e31 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/audit/S3GAction.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/audit/S3GAction.java @@ -29,6 +29,9 @@ public enum S3GAction implements AuditAction { DELETE_BUCKET, GET_ACL, PUT_ACL, + GET_BUCKET_CORS, + PUT_BUCKET_CORS, + DELETE_BUCKET_CORS, LIST_MULTIPART_UPLOAD, MULTI_DELETE, diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AuthorizationFilter.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AuthorizationFilter.java index ae4ce9bada83..b1f5230ac9d5 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AuthorizationFilter.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AuthorizationFilter.java @@ -36,6 +36,7 @@ import org.apache.hadoop.ozone.s3.signature.SignatureInfo.Version; import org.apache.hadoop.ozone.s3.signature.SignatureProcessor; import org.apache.hadoop.ozone.s3.signature.StringToSignProducer; +import org.apache.hadoop.ozone.s3.util.S3Consts; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -63,6 +64,11 @@ public class AuthorizationFilter implements ContainerRequestFilter { @Override public void filter(ContainerRequestContext context) throws IOException { + if (isCorsPreflightRequest(context)) { + signatureInfo.initialize(new SignatureInfo.Builder(Version.NONE).build()); + return; + } + try { signatureInfo.initialize(signatureProcessor.parseSignature()); if (signatureInfo.getVersion() == Version.V4) { @@ -106,4 +112,12 @@ public SignatureInfo getSignatureInfo() { return signatureInfo; } + private static boolean isCorsPreflightRequest( + ContainerRequestContext context) { + return "OPTIONS".equalsIgnoreCase(context.getMethod()) + && context.getHeaderString(S3Consts.ORIGIN_HEADER) != null + && context.getHeaderString( + S3Consts.ACCESS_CONTROL_REQUEST_METHOD) != null; + } + } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/CommonHeadersContainerResponseFilter.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/CommonHeadersContainerResponseFilter.java index 3a5978a7ff89..7c55385989da 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/CommonHeadersContainerResponseFilter.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/CommonHeadersContainerResponseFilter.java @@ -18,11 +18,19 @@ package org.apache.hadoop.ozone.s3; import java.io.IOException; +import java.util.Map; +import java.util.Optional; import javax.inject.Inject; import javax.ws.rs.container.ContainerRequestContext; import javax.ws.rs.container.ContainerResponseContext; import javax.ws.rs.container.ContainerResponseFilter; import javax.ws.rs.ext.Provider; +import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.om.helpers.CorsRule; +import org.apache.hadoop.ozone.s3.util.S3Consts; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * This class adds common header responses for all the requests. @@ -31,6 +39,9 @@ public class CommonHeadersContainerResponseFilter implements ContainerResponseFilter { + private static final Logger LOG = + LoggerFactory.getLogger(CommonHeadersContainerResponseFilter.class); + @Inject private RequestIdentifier requestIdentifier; @@ -44,5 +55,54 @@ public void filter(ContainerRequestContext containerRequestContext, containerResponseContext.getHeaders() .add("x-amz-request-id", requestIdentifier.getRequestId()); + addCorsHeaders(containerRequestContext, containerResponseContext); + } + + private void addCorsHeaders(ContainerRequestContext requestContext, + ContainerResponseContext responseContext) { + String origin = requestContext.getHeaderString(S3Consts.ORIGIN_HEADER); + if (StringUtils.isBlank(origin) + || "OPTIONS".equalsIgnoreCase(requestContext.getMethod())) { + return; + } + + String bucketName = getBucketName(requestContext); + if (StringUtils.isBlank(bucketName)) { + return; + } + + try { + OzoneBucket bucket = getCachedBucket(requestContext, bucketName); + if (bucket == null) { + return; + } + Optional rule = S3CorsHeaders.findMatchingRule( + bucket.getCorsConfiguration(), origin, requestContext.getMethod(), + null); + rule.ifPresent(corsRule -> S3CorsHeaders.applyHeaders( + responseContext.getHeaders(), corsRule, origin, null, false)); + } catch (Exception ex) { + LOG.debug("Unable to add CORS headers for bucket {}", bucketName, ex); + } + } + + @SuppressWarnings("unchecked") + private static OzoneBucket getCachedBucket( + ContainerRequestContext requestContext, String bucketName) { + Map buckets = + (Map) requestContext.getProperty( + S3Consts.CACHED_BUCKETS_CONTEXT_PROPERTY); + return buckets == null ? null : buckets.get(bucketName); + } + + private static String getBucketName(ContainerRequestContext requestContext) { + String path = requestContext.getUriInfo().getPath(false); + if (StringUtils.isBlank(path)) { + return null; + } + String normalizedPath = path.charAt(0) == '/' ? path.substring(1) : path; + int delimiter = normalizedPath.indexOf('/'); + return delimiter < 0 ? normalizedPath : normalizedPath.substring(0, + delimiter); } } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3CorsHeaders.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3CorsHeaders.java new file mode 100644 index 000000000000..02914a4e724b --- /dev/null +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3CorsHeaders.java @@ -0,0 +1,156 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.s3; + +import static org.apache.hadoop.ozone.s3.util.S3Consts.ACCESS_CONTROL_ALLOW_HEADERS; +import static org.apache.hadoop.ozone.s3.util.S3Consts.ACCESS_CONTROL_ALLOW_METHODS; +import static org.apache.hadoop.ozone.s3.util.S3Consts.ACCESS_CONTROL_ALLOW_ORIGIN; +import static org.apache.hadoop.ozone.s3.util.S3Consts.ACCESS_CONTROL_EXPOSE_HEADERS; +import static org.apache.hadoop.ozone.s3.util.S3Consts.ACCESS_CONTROL_MAX_AGE; + +import java.util.Arrays; +import java.util.Locale; +import java.util.Optional; +import java.util.stream.Collectors; +import javax.ws.rs.core.MultivaluedMap; +import javax.ws.rs.core.Response; +import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.ozone.om.helpers.CorsConfiguration; +import org.apache.hadoop.ozone.om.helpers.CorsRule; + +/** + * Evaluates S3 CORS rules and writes CORS response headers. + */ +public final class S3CorsHeaders { + + private S3CorsHeaders() { + } + + public static Optional findMatchingRule( + CorsConfiguration configuration, String origin, String method, + String requestedHeaders) { + if (configuration == null || StringUtils.isBlank(origin) + || StringUtils.isBlank(method)) { + return Optional.empty(); + } + return configuration.getRules().stream() + .filter(rule -> matchesAny(origin, rule.getAllowedOrigins(), true)) + .filter(rule -> rule.getAllowedMethods().stream() + .anyMatch(allowed -> allowed.equalsIgnoreCase(method))) + .filter(rule -> requestedHeadersMatch(requestedHeaders, rule)) + .findFirst(); + } + + public static void applyHeaders(MultivaluedMap headers, + CorsRule rule, String origin, String requestedHeaders, + boolean preflight) { + headers.putSingle(ACCESS_CONTROL_ALLOW_ORIGIN, origin); + if (preflight) { + headers.putSingle(ACCESS_CONTROL_ALLOW_METHODS, + String.join(", ", rule.getAllowedMethods())); + String allowedHeaders = allowedHeadersForResponse(requestedHeaders, rule); + if (!allowedHeaders.isEmpty()) { + headers.putSingle(ACCESS_CONTROL_ALLOW_HEADERS, allowedHeaders); + } + if (rule.getMaxAgeSeconds() != null) { + headers.putSingle(ACCESS_CONTROL_MAX_AGE, + String.valueOf(rule.getMaxAgeSeconds())); + } + } + if (!rule.getExposeHeaders().isEmpty()) { + headers.putSingle(ACCESS_CONTROL_EXPOSE_HEADERS, + String.join(", ", rule.getExposeHeaders())); + } + } + + public static Response.ResponseBuilder applyHeaders( + Response.ResponseBuilder builder, CorsRule rule, String origin, + String requestedHeaders, boolean preflight) { + builder.header(ACCESS_CONTROL_ALLOW_ORIGIN, origin); + if (preflight) { + builder.header(ACCESS_CONTROL_ALLOW_METHODS, + String.join(", ", rule.getAllowedMethods())); + String allowedHeaders = allowedHeadersForResponse(requestedHeaders, rule); + if (!allowedHeaders.isEmpty()) { + builder.header(ACCESS_CONTROL_ALLOW_HEADERS, allowedHeaders); + } + if (rule.getMaxAgeSeconds() != null) { + builder.header(ACCESS_CONTROL_MAX_AGE, + String.valueOf(rule.getMaxAgeSeconds())); + } + } + if (!rule.getExposeHeaders().isEmpty()) { + builder.header(ACCESS_CONTROL_EXPOSE_HEADERS, + String.join(", ", rule.getExposeHeaders())); + } + return builder; + } + + private static boolean requestedHeadersMatch( + String requestedHeaders, CorsRule rule) { + if (StringUtils.isBlank(requestedHeaders)) { + return true; + } + return Arrays.stream(requestedHeaders.split(",")) + .map(String::trim) + .filter(StringUtils::isNotEmpty) + .allMatch(header -> matchesAny(header, rule.getAllowedHeaders(), false)); + } + + private static String allowedHeadersForResponse( + String requestedHeaders, CorsRule rule) { + if (StringUtils.isBlank(requestedHeaders)) { + return ""; + } + return Arrays.stream(requestedHeaders.split(",")) + .map(String::trim) + .filter(StringUtils::isNotEmpty) + .filter(header -> matchesAny(header, rule.getAllowedHeaders(), false)) + .collect(Collectors.joining(", ")); + } + + private static boolean matchesAny(String value, Iterable patterns, + boolean caseSensitive) { + for (String pattern : patterns) { + if (matches(value, pattern, caseSensitive)) { + return true; + } + } + return false; + } + + private static boolean matches(String value, String pattern, + boolean caseSensitive) { + if (pattern == null) { + return false; + } + String candidate = caseSensitive ? value : value.toLowerCase(Locale.ROOT); + String normalizedPattern = caseSensitive ? pattern + : pattern.toLowerCase(Locale.ROOT); + if ("*".equals(normalizedPattern)) { + return true; + } + int wildcard = normalizedPattern.indexOf('*'); + if (wildcard < 0) { + return candidate.equals(normalizedPattern); + } + String prefix = normalizedPattern.substring(0, wildcard); + String suffix = normalizedPattern.substring(wildcard + 1); + return candidate.startsWith(prefix) && candidate.endsWith(suffix); + } +} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/RequestParameters.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/RequestParameters.java index 7b5e2d0d6e82..33954f5651ab 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/RequestParameters.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/RequestParameters.java @@ -26,6 +26,10 @@ public interface RequestParameters { String get(String key); + default boolean contains(String key) { + return get(key) != null; + } + static MultivaluedMapImpl of(MultivaluedMap params) { return new MultivaluedMapImpl(params); } @@ -77,6 +81,11 @@ public String get(String key) { return params.getFirst(key); } + @Override + public boolean contains(String key) { + return params.containsKey(key); + } + @Override public void set(String key, String value) { params.putSingle(key, value); diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketAclHandler.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketAclHandler.java index ace1110791ac..05dd7e17ddb1 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketAclHandler.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketAclHandler.java @@ -68,7 +68,7 @@ public class BucketAclHandler extends BucketOperationHandler { * @return true if the request has the "acl" query parameter */ private boolean shouldHandle() { - return queryParams().get(QueryParams.ACL) != null; + return queryParams().contains(QueryParams.ACL); } /** @@ -87,7 +87,7 @@ Response handleGetRequest(S3RequestContext context, String bucketName) context.setAction(S3GAction.GET_ACL); try { - OzoneBucket bucket = context.getVolume().getBucket(bucketName); + OzoneBucket bucket = context.getBucket(bucketName); S3Owner.verifyBucketOwnerCondition(getHeaders(), bucketName, bucket.getOwner()); S3Owner owner = S3Owner.of(bucket.getOwner()); @@ -139,7 +139,7 @@ Response handlePutRequest(S3RequestContext context, String bucketName, InputStre try { OzoneVolume volume = context.getVolume(); - OzoneBucket bucket = volume.getBucket(bucketName); + OzoneBucket bucket = context.getBucket(bucketName); S3Owner.verifyBucketOwnerCondition(getHeaders(), bucketName, bucket.getOwner()); List ozoneAclListOnBucket = new ArrayList<>(); diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketCorsHandler.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketCorsHandler.java new file mode 100644 index 000000000000..a621cc6c88d9 --- /dev/null +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketCorsHandler.java @@ -0,0 +1,153 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.s3.endpoint; + +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.INVALID_ARGUMENT; +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.MALFORMED_XML; +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NO_SUCH_CORS_CONFIGURATION; +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.newError; + +import java.io.IOException; +import java.io.InputStream; +import java.util.Arrays; +import java.util.HashSet; +import java.util.Locale; +import java.util.Set; +import javax.ws.rs.WebApplicationException; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.Response; +import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.ozone.audit.S3GAction; +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.om.helpers.CorsConfiguration; +import org.apache.hadoop.ozone.om.helpers.CorsRule; +import org.apache.hadoop.ozone.s3.exception.OS3Exception; +import org.apache.hadoop.ozone.s3.util.S3Consts.QueryParams; +import org.apache.http.HttpStatus; +import org.apache.ratis.util.MemoizedSupplier; + +/** + * Handler for bucket CORS operations (?cors query parameter). + */ +public class BucketCorsHandler extends BucketOperationHandler { + private static final int MAX_RULES = 100; + private static final int MAX_RULE_ID_LENGTH = 255; + private static final Set ALLOWED_METHODS = new HashSet<>( + Arrays.asList("GET", "PUT", "POST", "DELETE", "HEAD")); + private static final MemoizedSupplier> + UNMARSHALLER = + MemoizedSupplier.valueOf(() -> new MessageUnmarshaller<>( + S3BucketCors.class)); + + private boolean shouldHandle() { + return queryParams().contains(QueryParams.CORS); + } + + @Override + Response handleGetRequest(S3RequestContext context, String bucketName) + throws IOException, OS3Exception { + if (!shouldHandle()) { + return null; + } + context.setAction(S3GAction.GET_BUCKET_CORS); + + OzoneBucket bucket = context.getBucket(bucketName); + S3Owner.verifyBucketOwnerCondition(getHeaders(), bucketName, + bucket.getOwner()); + CorsConfiguration corsConfiguration = bucket.getCorsConfiguration(); + if (corsConfiguration == null) { + throw newError(NO_SUCH_CORS_CONFIGURATION, bucketName); + } + return Response.ok( + S3BucketCors.fromCorsConfiguration(corsConfiguration), + MediaType.APPLICATION_XML_TYPE).build(); + } + + @Override + Response handlePutRequest(S3RequestContext context, String bucketName, + InputStream body) throws IOException, OS3Exception { + if (!shouldHandle()) { + return null; + } + context.setAction(S3GAction.PUT_BUCKET_CORS); + + OzoneBucket bucket = context.getBucket(bucketName); + S3Owner.verifyBucketOwnerCondition(getHeaders(), bucketName, + bucket.getOwner()); + S3BucketCors cors; + try { + cors = UNMARSHALLER.get().readFrom(body); + } catch (WebApplicationException ex) { + throw newError(MALFORMED_XML, bucketName, ex); + } + CorsConfiguration corsConfiguration = + cors.toCorsConfiguration(); + validate(corsConfiguration, bucketName); + bucket.setCorsConfiguration(corsConfiguration); + return Response.status(HttpStatus.SC_OK).build(); + } + + @Override + Response handleDeleteRequest(S3RequestContext context, String bucketName) + throws IOException, OS3Exception { + if (!shouldHandle()) { + return null; + } + context.setAction(S3GAction.DELETE_BUCKET_CORS); + + OzoneBucket bucket = context.getBucket(bucketName); + S3Owner.verifyBucketOwnerCondition(getHeaders(), bucketName, + bucket.getOwner()); + bucket.deleteCorsConfiguration(); + return Response.status(HttpStatus.SC_NO_CONTENT).build(); + } + + private static void validate(CorsConfiguration corsConfiguration, + String bucketName) throws OS3Exception { + if (corsConfiguration.getRules().isEmpty() + || corsConfiguration.getRules().size() > MAX_RULES) { + throw newError(INVALID_ARGUMENT, bucketName); + } + for (CorsRule rule : corsConfiguration.getRules()) { + if (StringUtils.isNotEmpty(rule.getId()) + && rule.getId().length() > MAX_RULE_ID_LENGTH) { + throw newError(INVALID_ARGUMENT, bucketName); + } + if (rule.getAllowedOrigins().isEmpty() + || rule.getAllowedMethods().isEmpty()) { + throw newError(INVALID_ARGUMENT, bucketName); + } + for (String method : rule.getAllowedMethods()) { + if (!ALLOWED_METHODS.contains(method.toUpperCase(Locale.ROOT))) { + throw newError(INVALID_ARGUMENT, bucketName); + } + } + validateWildcardCount(rule.getAllowedOrigins(), bucketName); + validateWildcardCount(rule.getAllowedHeaders(), bucketName); + } + } + + private static void validateWildcardCount(Iterable values, + String bucketName) throws OS3Exception { + for (String value : values) { + if (StringUtils.countMatches(value, '*') > 1) { + throw newError(INVALID_ARGUMENT, bucketName); + } + } + } +} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketCrudHandler.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketCrudHandler.java index a2698b3098e3..496107b5e469 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketCrudHandler.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketCrudHandler.java @@ -45,9 +45,10 @@ public class BucketCrudHandler extends BucketOperationHandler { * Handle only plain PUT bucket (create bucket), not subresources. */ private boolean shouldHandle() { - return queryParams().get(QueryParams.ACL) == null - && queryParams().get(QueryParams.UPLOADS) == null - && queryParams().get(QueryParams.DELETE) == null; + return !queryParams().contains(QueryParams.ACL) + && !queryParams().contains(QueryParams.UPLOADS) + && !queryParams().contains(QueryParams.DELETE) + && !queryParams().contains(QueryParams.CORS); } /** @@ -90,7 +91,7 @@ Response handleDeleteRequest(S3RequestContext context, String bucketName) try { if (S3Owner.hasBucketOwnershipVerificationConditions(getHeaders())) { - OzoneBucket bucket = context.getVolume().getBucket(bucketName); + OzoneBucket bucket = context.getBucket(bucketName); S3Owner.verifyBucketOwnerCondition(getHeaders(), bucketName, bucket.getOwner()); } context.getVolume().deleteBucket(bucketName); diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java index 2fd6dee16db7..b1e939f5c6ae 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java @@ -37,6 +37,7 @@ import javax.ws.rs.DELETE; import javax.ws.rs.GET; import javax.ws.rs.HEAD; +import javax.ws.rs.OPTIONS; import javax.ws.rs.POST; import javax.ws.rs.PUT; import javax.ws.rs.Path; @@ -102,6 +103,12 @@ public Response get( } } + @OPTIONS + public Response options(@PathParam(BUCKET) String bucketName) + throws IOException, OS3Exception { + return corsPreflightResponse(bucketName); + } + @Override Response handleGetRequest(S3RequestContext context, String bucketName) throws IOException, OS3Exception { final String continueToken = queryParams().get(QueryParams.CONTINUATION_TOKEN); @@ -134,7 +141,7 @@ Response handleGetRequest(S3RequestContext context, String bucketName) throws IO boolean shallow = listKeysShallowEnabled && OZONE_URI_DELIMITER.equals(delimiter); - bucket = context.getVolume().getBucket(bucketName); + bucket = context.getBucket(bucketName); S3Owner.verifyBucketOwnerCondition(getHeaders(), bucketName, bucket.getOwner()); ozoneKeyIterator = bucket.listKeys(prefix, prevKey, shallow); @@ -289,8 +296,9 @@ public Response head(@PathParam(BUCKET) String bucketName) throws OS3Exception, IOException { long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.HEAD_BUCKET; + S3RequestContext context = new S3RequestContext(this, s3GAction); try { - OzoneBucket bucket = getVolume().getBucket(bucketName); + OzoneBucket bucket = context.getS3Bucket(bucketName); S3Owner.verifyBucketOwnerCondition(getHeaders(), bucketName, bucket.getOwner()); auditReadSuccess(s3GAction); getMetrics().updateHeadBucketSuccessStats(startNanos); @@ -341,41 +349,47 @@ public MultiDeleteResponse multiDelete( ) throws OS3Exception, IOException { S3GAction s3GAction = S3GAction.MULTI_DELETE; - OzoneBucket bucket = getVolume().getBucket(bucketName); + S3RequestContext context = new S3RequestContext(this, s3GAction); MultiDeleteResponse result = new MultiDeleteResponse(); List deleteKeys = new ArrayList<>(); - if (request.getObjects() != null) { - Map undeletedKeyResultMap; - for (DeleteObject keyToDelete : request.getObjects()) { - deleteKeys.add(keyToDelete.getKey()); - } - long startNanos = Time.monotonicNowNanos(); - try { + try { + OzoneBucket bucket = context.getS3Bucket(bucketName); + if (request.getObjects() != null) { + Map undeletedKeyResultMap; + for (DeleteObject keyToDelete : request.getObjects()) { + deleteKeys.add(keyToDelete.getKey()); + } + long startNanos = Time.monotonicNowNanos(); S3Owner.verifyBucketOwnerCondition(getHeaders(), bucketName, bucket.getOwner()); - undeletedKeyResultMap = bucket.deleteKeys(deleteKeys, true); - for (DeleteObject d : request.getObjects()) { - ErrorInfo error = undeletedKeyResultMap.get(d.getKey()); - boolean deleted = error == null || - // if the key is not found, it is assumed to be successfully deleted - ResultCodes.KEY_NOT_FOUND.name().equals(error.getCode()); - if (deleted) { - deleteKeys.remove(d.getKey()); - if (!request.isQuiet()) { - result.addDeleted(new DeletedObject(d.getKey())); + try { + undeletedKeyResultMap = bucket.deleteKeys(deleteKeys, true); + for (DeleteObject d : request.getObjects()) { + ErrorInfo error = undeletedKeyResultMap.get(d.getKey()); + boolean deleted = error == null || + // if the key is not found, it is assumed to be successfully deleted + ResultCodes.KEY_NOT_FOUND.name().equals(error.getCode()); + if (deleted) { + deleteKeys.remove(d.getKey()); + if (!request.isQuiet()) { + result.addDeleted(new DeletedObject(d.getKey())); + } + } else { + result.addError(new Error(d.getKey(), error.getCode(), + error.getMessage())); } - } else { - result.addError(new Error(d.getKey(), error.getCode(), error.getMessage())); } + getMetrics().updateDeleteKeySuccessStats(startNanos); + } catch (IOException ex) { + LOG.error("Delete key failed: {}", ex.getMessage()); + getMetrics().updateDeleteKeyFailureStats(startNanos); + result.addError( + new Error("ALL", "InternalError", + ex.getMessage())); } - getMetrics().updateDeleteKeySuccessStats(startNanos); - } catch (IOException ex) { - LOG.error("Delete key failed: {}", ex.getMessage()); - getMetrics().updateDeleteKeyFailureStats(startNanos); - result.addError( - new Error("ALL", "InternalError", - ex.getMessage())); } + } catch (OMException ex) { + throw newError(bucketName, ex); } AuditMessage.Builder message = auditMessageFor(s3GAction); @@ -421,6 +435,7 @@ protected void init() { BucketOperationHandler chain = BucketOperationHandlerChain.newBuilder(this) .add(new BucketAclHandler()) .add(new ListMultipartUploadsHandler()) + .add(new BucketCorsHandler()) .add(new BucketCrudHandler()) .add(this) .build(); diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java index 649b14b49cd7..0569a8f17cd8 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java @@ -60,6 +60,7 @@ import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Set; import java.util.function.Consumer; import java.util.function.Function; @@ -93,9 +94,11 @@ import org.apache.hadoop.ozone.client.protocol.ClientProtocol; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; +import org.apache.hadoop.ozone.om.helpers.CorsRule; import org.apache.hadoop.ozone.om.protocol.S3Auth; import org.apache.hadoop.ozone.s3.MultiDigestInputStream; import org.apache.hadoop.ozone.s3.RequestIdentifier; +import org.apache.hadoop.ozone.s3.S3CorsHeaders; import org.apache.hadoop.ozone.s3.SignedChunksInputStream; import org.apache.hadoop.ozone.s3.UnsignedChunksInputStream; import org.apache.hadoop.ozone.s3.commontypes.RequestParameters; @@ -103,7 +106,9 @@ import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; import org.apache.hadoop.ozone.s3.metrics.S3GatewayMetrics; import org.apache.hadoop.ozone.s3.signature.SignatureInfo; +import org.apache.hadoop.ozone.s3.signature.SignatureInfo.Version; import org.apache.hadoop.ozone.s3.util.AuditUtils; +import org.apache.hadoop.ozone.s3.util.S3Consts; import org.apache.hadoop.ozone.s3.util.S3Utils; import org.apache.http.NameValuePair; import org.apache.http.client.utils.URLEncodedUtils; @@ -191,14 +196,19 @@ public void initialization() { queryParams = RequestParameters.of(context.getUriInfo().getQueryParameters()); // Note: userPrincipal is initialized to be the same value as accessId, // could be updated later in RpcClient#getS3Volume - s3Auth = new S3Auth(signatureInfo.getStringToSign(), - signatureInfo.getSignature(), - signatureInfo.getAwsAccessId(), signatureInfo.getAwsAccessId()); - LOG.debug("S3 access id: {}", s3Auth.getAccessID()); ClientProtocol clientProtocol = getClient().getObjectStore().getClientProxy(); - clientProtocol.setThreadLocalS3Auth(s3Auth); clientProtocol.setIsS3Request(true); + if (signatureInfo.getVersion() == Version.NONE + || StringUtils.isBlank(signatureInfo.getAwsAccessId())) { + clientProtocol.clearThreadLocalS3Auth(); + } else { + s3Auth = new S3Auth(signatureInfo.getStringToSign(), + signatureInfo.getSignature(), + signatureInfo.getAwsAccessId(), signatureInfo.getAwsAccessId()); + LOG.debug("S3 access id: {}", s3Auth.getAccessID()); + clientProtocol.setThreadLocalS3Auth(s3Auth); + } bufferSize = (int) getOzoneConfiguration().getStorageSize( OZONE_S3G_CLIENT_BUFFER_SIZE_KEY, @@ -221,10 +231,54 @@ protected void init() { // hook method } + protected OzoneBucket loadBucket(String bucketName) throws IOException { + OzoneBucket bucket = getVolume().getBucket(bucketName); + cacheBucket(bucketName, bucket); + return bucket; + } + + @SuppressWarnings("unchecked") + protected void cacheBucket(String bucketName, OzoneBucket bucket) { + if (context == null || StringUtils.isBlank(bucketName) || bucket == null + || StringUtils.isBlank( + context.getHeaderString(S3Consts.ORIGIN_HEADER))) { + return; + } + Map buckets = + (Map) context.getProperty( + S3Consts.CACHED_BUCKETS_CONTEXT_PROPERTY); + if (buckets == null) { + buckets = new HashMap<>(); + context.setProperty(S3Consts.CACHED_BUCKETS_CONTEXT_PROPERTY, buckets); + } + buckets.put(bucketName, bucket); + } + protected OzoneVolume getVolume() throws IOException { return client.getObjectStore().getS3Volume(); } + protected Response corsPreflightResponse(String bucketName) + throws IOException, OS3Exception { + String origin = getHeaders().getHeaderString(S3Consts.ORIGIN_HEADER); + String method = getHeaders().getHeaderString( + S3Consts.ACCESS_CONTROL_REQUEST_METHOD); + String requestedHeaders = getHeaders().getHeaderString( + S3Consts.ACCESS_CONTROL_REQUEST_HEADERS); + try { + OzoneBucket bucket = new S3RequestContext(this, null).getS3Bucket(bucketName); + Optional rule = S3CorsHeaders.findMatchingRule( + bucket.getCorsConfiguration(), origin, method, requestedHeaders); + if (!rule.isPresent()) { + throw newError(S3ErrorTable.ACCESS_DENIED, bucketName); + } + return S3CorsHeaders.applyHeaders(Response.ok(), rule.get(), origin, + requestedHeaders, true).build(); + } catch (OMException ex) { + throw newError(bucketName, ex); + } + } + /** * Returns Iterator to iterate over all buckets for a specific user. * The result can be restricted using bucket prefix, will return all diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListMultipartUploadsHandler.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListMultipartUploadsHandler.java index 7f8efb3678d5..667addc3e9e3 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListMultipartUploadsHandler.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListMultipartUploadsHandler.java @@ -38,7 +38,7 @@ class ListMultipartUploadsHandler extends BucketOperationHandler { Response handleGetRequest(S3RequestContext context, String bucketName) throws IOException, OS3Exception { - if (queryParams().get(QueryParams.UPLOADS) == null) { + if (!queryParams().contains(QueryParams.UPLOADS)) { return null; // Not responsible for this request } @@ -57,7 +57,7 @@ Response handleGetRequest(S3RequestContext context, String bucketName) long startNanos = context.getStartNanos(); - OzoneBucket bucket = context.getVolume().getBucket(bucketName); + OzoneBucket bucket = context.getBucket(bucketName); try { S3Owner.verifyBucketOwnerCondition(getHeaders(), bucketName, bucket.getOwner()); diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectAclHandler.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectAclHandler.java index 263c093970ad..50b85db9e946 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectAclHandler.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectAclHandler.java @@ -47,7 +47,7 @@ Response handlePutRequest(ObjectRequestContext context, String keyName, InputStr @SuppressWarnings("SwitchStatementWithTooFewBranches") S3GAction getAction() { - if (queryParams().get(S3Consts.QueryParams.ACL) == null) { + if (!queryParams().contains(S3Consts.QueryParams.ACL)) { return null; } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java index 60a5f742141b..a9be7ec7154e 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java @@ -63,6 +63,7 @@ import javax.ws.rs.DELETE; import javax.ws.rs.GET; import javax.ws.rs.HEAD; +import javax.ws.rs.OPTIONS; import javax.ws.rs.POST; import javax.ws.rs.PUT; import javax.ws.rs.Path; @@ -154,6 +155,12 @@ protected void init() { handler = new AuditingObjectOperationHandler(chain); } + @OPTIONS + public Response options(@PathParam(BUCKET) String bucketName) + throws IOException, OS3Exception { + return corsPreflightResponse(bucketName); + } + /** * Rest endpoint to upload object to a bucket. *

@@ -541,7 +548,7 @@ public Response head( OzoneKey key; try { if (S3Owner.hasBucketOwnershipVerificationConditions(getHeaders())) { - OzoneBucket bucket = getVolume().getBucket(bucketName); + OzoneBucket bucket = loadBucket(bucketName); S3Owner.verifyBucketOwnerCondition(getHeaders(), bucketName, bucket.getOwner()); } key = getClientProtocol().headS3Object(bucketName, keyPath); @@ -672,7 +679,7 @@ public Response initializeMultipartUpload( S3GAction s3GAction = S3GAction.INIT_MULTIPART_UPLOAD; try { - OzoneBucket ozoneBucket = getVolume().getBucket(bucket); + OzoneBucket ozoneBucket = loadBucket(bucket); S3Owner.verifyBucketOwnerCondition(getHeaders(), bucket, ozoneBucket.getOwner()); Map customMetadata = @@ -720,7 +727,6 @@ public Response completeMultipartUpload( final String uploadID = queryParams().get(QueryParams.UPLOAD_ID, ""); long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.COMPLETE_MULTIPART_UPLOAD; - OzoneVolume volume = getVolume(); // Using LinkedHashMap to preserve ordering of parts list. Map partsMap = new LinkedHashMap<>(); List partList = @@ -728,7 +734,7 @@ public Response completeMultipartUpload( OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo; try { - OzoneBucket ozoneBucket = volume.getBucket(bucket); + OzoneBucket ozoneBucket = loadBucket(bucket); S3Owner.verifyBucketOwnerCondition(getHeaders(), bucket, ozoneBucket.getOwner()); for (CompleteMultipartUploadRequest.Part part : partList) { @@ -822,7 +828,7 @@ private Response createMultipartKey(OzoneVolume volume, OzoneBucket ozoneBucket, String sourceBucket = result.getLeft(); String sourceKey = result.getRight(); if (S3Owner.hasBucketOwnershipVerificationConditions(getHeaders())) { - String sourceBucketOwner = volume.getBucket(sourceBucket).getOwner(); + String sourceBucketOwner = loadBucket(sourceBucket).getOwner(); S3Owner.verifyBucketOwnerConditionOnCopyOperation(getHeaders(), sourceBucket, sourceBucketOwner, bucketName, ozoneBucket.getOwner()); } @@ -965,7 +971,7 @@ void copy(OzoneVolume volume, DigestInputStream src, long srcKeyLen, srcKeyLen > getDatastreamMinLength()) { perf.appendStreamMode(); copyLength = ObjectEndpointStreaming - .copyKeyWithStream(volume.getBucket(destBucket), destKey, srcKeyLen, + .copyKeyWithStream(loadBucket(destBucket), destKey, srcKeyLen, getChunkSize(), replication, metadata, src, perf, startNanos, tags); } else { try (OzoneOutputStream dest = getClientProtocol() @@ -999,7 +1005,7 @@ private CopyObjectResponse copyObject(OzoneVolume volume, DigestInputStream sourceDigestInputStream = null; if (S3Owner.hasBucketOwnershipVerificationConditions(getHeaders())) { - String sourceBucketOwner = volume.getBucket(sourceBucket).getOwner(); + String sourceBucketOwner = loadBucket(sourceBucket).getOwner(); // The destBucket owner has already been checked in the caller method S3Owner.verifyBucketOwnerConditionOnCopyOperation(getHeaders(), sourceBucket, sourceBucketOwner, null, null); } @@ -1128,7 +1134,6 @@ private OzoneOutputStream openKeyForPut(String volumeName, String bucketName, St /** Request context shared among {@code ObjectOperationHandler}s. */ final class ObjectRequestContext extends S3RequestContext { private final String bucketName; - private OzoneBucket bucket; /** @param action best guess on action based on request method, may be refined later by handlers */ ObjectRequestContext(S3GAction action, String bucketName) { @@ -1141,10 +1146,7 @@ String getBucketName() { } OzoneBucket getBucket() throws IOException { - if (bucket == null) { - bucket = getVolume().getBucket(bucketName); - } - return bucket; + return super.getS3Bucket(bucketName); } } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectTaggingHandler.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectTaggingHandler.java index 3c5e756ed5ba..20ff53fb084e 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectTaggingHandler.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectTaggingHandler.java @@ -115,7 +115,7 @@ Response handleGetRequest(ObjectRequestContext context, String keyName) } private S3GAction getAction() { - if (queryParams().get(S3Consts.QueryParams.TAGGING) == null) { + if (!queryParams().contains(S3Consts.QueryParams.TAGGING)) { return null; } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3BucketCors.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3BucketCors.java new file mode 100644 index 000000000000..3abee4e208dd --- /dev/null +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3BucketCors.java @@ -0,0 +1,126 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.s3.endpoint; + +import java.util.ArrayList; +import java.util.List; +import java.util.stream.Collectors; +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlElement; +import javax.xml.bind.annotation.XmlRootElement; +import org.apache.hadoop.ozone.om.helpers.CorsConfiguration; +import org.apache.hadoop.ozone.om.helpers.CorsRule; +import org.apache.hadoop.ozone.s3.util.S3Consts; + +/** + * S3 bucket CORS XML document. + */ +@XmlAccessorType(XmlAccessType.FIELD) +@XmlRootElement(name = "CORSConfiguration", + namespace = S3Consts.S3_XML_NAMESPACE) +public class S3BucketCors { + + @XmlElement(name = "CORSRule") + private List rules = new ArrayList<>(); + + public List getRules() { + return rules; + } + + public void setRules(List corsRules) { + this.rules = corsRules; + } + + public CorsConfiguration toCorsConfiguration() { + return CorsConfiguration.newBuilder() + .setRules(rules.stream() + .map(CORSRule::toCorsRule) + .collect(Collectors.toList())) + .build(); + } + + public static S3BucketCors fromCorsConfiguration( + CorsConfiguration corsConfiguration) { + S3BucketCors result = new S3BucketCors(); + result.setRules(corsConfiguration.getRules().stream() + .map(CORSRule::fromCorsRule) + .collect(Collectors.toList())); + return result; + } + + @Override + public String toString() { + return "S3BucketCors{" + + "rules=" + rules + + '}'; + } + + /** + * S3 CORSRule XML element. + */ + @XmlAccessorType(XmlAccessType.FIELD) + public static class CORSRule { + @XmlElement(name = "ID") + private String id; + @XmlElement(name = "AllowedOrigin") + private List allowedOrigins = new ArrayList<>(); + @XmlElement(name = "AllowedMethod") + private List allowedMethods = new ArrayList<>(); + @XmlElement(name = "AllowedHeader") + private List allowedHeaders = new ArrayList<>(); + @XmlElement(name = "ExposeHeader") + private List exposeHeaders = new ArrayList<>(); + @XmlElement(name = "MaxAgeSeconds") + private Integer maxAgeSeconds; + + public CorsRule toCorsRule() { + return CorsRule.newBuilder() + .setId(id) + .setAllowedOrigins(allowedOrigins) + .setAllowedMethods(allowedMethods) + .setAllowedHeaders(allowedHeaders) + .setExposeHeaders(exposeHeaders) + .setMaxAgeSeconds(maxAgeSeconds) + .build(); + } + + public static CORSRule fromCorsRule(CorsRule corsRule) { + CORSRule rule = new CORSRule(); + rule.id = corsRule.getId(); + rule.allowedOrigins = new ArrayList<>(corsRule.getAllowedOrigins()); + rule.allowedMethods = new ArrayList<>(corsRule.getAllowedMethods()); + rule.allowedHeaders = new ArrayList<>(corsRule.getAllowedHeaders()); + rule.exposeHeaders = new ArrayList<>(corsRule.getExposeHeaders()); + rule.maxAgeSeconds = corsRule.getMaxAgeSeconds(); + return rule; + } + + @Override + public String toString() { + return "CORSRule{" + + "id='" + id + '\'' + + ", allowedOrigins=" + allowedOrigins + + ", allowedMethods=" + allowedMethods + + ", allowedHeaders=" + allowedHeaders + + ", exposeHeaders=" + exposeHeaders + + ", maxAgeSeconds=" + maxAgeSeconds + + '}'; + } + } +} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3RequestContext.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3RequestContext.java index 4130feaf6fdb..69fcc918d0ee 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3RequestContext.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3RequestContext.java @@ -19,8 +19,12 @@ import jakarta.annotation.Nullable; import java.io.IOException; +import java.util.HashMap; +import java.util.Map; import org.apache.hadoop.ozone.audit.AuditLogger.PerformanceStringBuilder; import org.apache.hadoop.ozone.audit.S3GAction; +import org.apache.hadoop.ozone.client.ObjectStore; +import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.util.Time; @@ -30,6 +34,7 @@ class S3RequestContext { private final EndpointBase endpoint; private S3GAction action; private OzoneVolume volume; + private final Map buckets = new HashMap<>(); S3RequestContext(EndpointBase endpoint, S3GAction action) { this.endpoint = endpoint; @@ -53,6 +58,27 @@ OzoneVolume getVolume() throws IOException { return volume; } + OzoneBucket getBucket(String bucketName) throws IOException { + OzoneBucket bucket = buckets.get(bucketName); + if (bucket == null) { + bucket = getVolume().getBucket(bucketName); + endpoint.cacheBucket(bucketName, bucket); + buckets.put(bucketName, bucket); + } + return bucket; + } + + OzoneBucket getS3Bucket(String bucketName) throws IOException { + OzoneBucket bucket = buckets.get(bucketName); + if (bucket == null) { + ObjectStore objectStore = endpoint.getClient().getObjectStore(); + bucket = objectStore.getS3Bucket(bucketName); + endpoint.cacheBucket(bucketName, bucket); + buckets.put(bucketName, bucket); + } + return bucket; + } + S3GAction getAction() { return action; } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/S3ErrorTable.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/S3ErrorTable.java index 022dc08949e4..7f3c27fe2f61 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/S3ErrorTable.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/S3ErrorTable.java @@ -140,6 +140,10 @@ public enum S3ErrorTable { NO_SUCH_TAG_SET( "NoSuchTagSet", "The specified tag does not exist.", HTTP_NOT_FOUND), + NO_SUCH_CORS_CONFIGURATION( + "NoSuchCORSConfiguration", "The CORS configuration does not exist.", + HTTP_NOT_FOUND), + MALFORMED_XML( "MalformedXML", "The XML you provided was not well-formed or did not " + "validate against our published schema", HTTP_BAD_REQUEST), diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java index 67f6b4c7d41d..6738152062d6 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java @@ -98,6 +98,24 @@ public final class S3Consts { public static final String CHECKSUM_HEADER = "Content-MD5"; + public static final String ORIGIN_HEADER = "Origin"; + public static final String ACCESS_CONTROL_REQUEST_METHOD = + "Access-Control-Request-Method"; + public static final String ACCESS_CONTROL_REQUEST_HEADERS = + "Access-Control-Request-Headers"; + public static final String ACCESS_CONTROL_ALLOW_ORIGIN = + "Access-Control-Allow-Origin"; + public static final String ACCESS_CONTROL_ALLOW_METHODS = + "Access-Control-Allow-Methods"; + public static final String ACCESS_CONTROL_ALLOW_HEADERS = + "Access-Control-Allow-Headers"; + public static final String ACCESS_CONTROL_EXPOSE_HEADERS = + "Access-Control-Expose-Headers"; + public static final String ACCESS_CONTROL_MAX_AGE = + "Access-Control-Max-Age"; + public static final String CACHED_BUCKETS_CONTEXT_PROPERTY = + "org.apache.hadoop.ozone.s3.cachedBuckets"; + // Conditional request headers public static final String IF_NONE_MATCH_HEADER = "If-None-Match"; public static final String IF_MATCH_HEADER = "If-Match"; @@ -121,6 +139,7 @@ public enum CopyDirective { /** Constants for query parameters. */ public static final class QueryParams { public static final String ACL = "acl"; + public static final String CORS = "cors"; public static final String CONTINUATION_TOKEN = "continuation-token"; public static final String DELETE = "delete"; public static final String DELIMITER = "delimiter"; diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java index 720c5851d640..ef098878e9af 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java @@ -37,6 +37,7 @@ import org.apache.hadoop.ozone.client.io.OzoneInputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.client.protocol.ClientProtocol; +import org.apache.hadoop.ozone.om.helpers.CorsConfiguration; import org.apache.hadoop.ozone.om.helpers.DeleteTenantState; import org.apache.hadoop.ozone.om.helpers.ErrorInfo; import org.apache.hadoop.ozone.om.helpers.LeaseKeyInfo; @@ -186,6 +187,18 @@ public void setBucketStorageType(String volumeName, String bucketName, } + @Override + public void setBucketCors(String volumeName, String bucketName, + CorsConfiguration corsConfiguration) throws IOException { + getBucket(volumeName, bucketName).setCorsConfiguration(corsConfiguration); + } + + @Override + public void deleteBucketCors(String volumeName, String bucketName) + throws IOException { + getBucket(volumeName, bucketName).deleteCorsConfiguration(); + } + @Override public void deleteBucket(String volumeName, String bucketName) throws IOException { diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java index a1e511eda886..e6622905d81e 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java @@ -55,6 +55,7 @@ import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; +import org.apache.hadoop.ozone.om.helpers.CorsConfiguration; import org.apache.hadoop.ozone.om.helpers.ErrorInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo; @@ -81,6 +82,7 @@ public final class OzoneBucketStub extends OzoneBucket { private ArrayList aclList = new ArrayList<>(); private ReplicationConfig replicationConfig; + private CorsConfiguration corsConfiguration; public static Builder newBuilder() { return new Builder(); @@ -109,6 +111,22 @@ boolean isEmpty() { return keyDetails.isEmpty(); } + @Override + public CorsConfiguration getCorsConfiguration() { + return corsConfiguration; + } + + @Override + public void setCorsConfiguration( + CorsConfiguration newCorsConfiguration) { + this.corsConfiguration = newCorsConfiguration; + } + + @Override + public void deleteCorsConfiguration() { + this.corsConfiguration = null; + } + @Override public OzoneOutputStream createKey(String key, long size) throws IOException { return createKey(key, size, diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneVolumeStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneVolumeStub.java index b19187d45c02..32b265820028 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneVolumeStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneVolumeStub.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.CorsConfiguration; import org.apache.hadoop.util.Time; /** @@ -119,15 +120,18 @@ public void createBucket(String bucketName, BucketArgs bucketArgs) throws OMExce throw new OMException("", OMException.ResultCodes.BUCKET_ALREADY_EXISTS); } + CorsConfiguration corsConfiguration = bucketArgs.getCorsConfiguration(); buckets.put(bucketName, OzoneBucketStub.newBuilder() .setVolumeName(getName()) .setName(bucketName) + .setOwner(getOwner()) .setDefaultReplicationConfig(new DefaultReplicationConfig( RatisReplicationConfig.getInstance( HddsProtos.ReplicationFactor.THREE))) .setBucketLayout(bucketArgs.getBucketLayout()) .setStorageType(bucketArgs.getStorageType()) .setVersioning(bucketArgs.getVersioning()) + .setCorsConfiguration(corsConfiguration) .setCreationTime(Time.now()) .build()); } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestCommonHeadersContainerResponseFilter.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestCommonHeadersContainerResponseFilter.java new file mode 100644 index 000000000000..83c076e01ef0 --- /dev/null +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestCommonHeadersContainerResponseFilter.java @@ -0,0 +1,137 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.s3; + +import static org.apache.hadoop.ozone.s3.util.S3Consts.ACCESS_CONTROL_ALLOW_METHODS; +import static org.apache.hadoop.ozone.s3.util.S3Consts.ACCESS_CONTROL_ALLOW_ORIGIN; +import static org.apache.hadoop.ozone.s3.util.S3Consts.ACCESS_CONTROL_EXPOSE_HEADERS; +import static org.apache.hadoop.ozone.s3.util.S3Consts.CACHED_BUCKETS_CONTEXT_PROPERTY; +import static org.apache.hadoop.ozone.s3.util.S3Consts.ORIGIN_HEADER; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.lang.reflect.Field; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import javax.ws.rs.container.ContainerRequestContext; +import javax.ws.rs.container.ContainerResponseContext; +import javax.ws.rs.core.MultivaluedHashMap; +import javax.ws.rs.core.MultivaluedMap; +import javax.ws.rs.core.UriInfo; +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.om.helpers.CorsConfiguration; +import org.apache.hadoop.ozone.om.helpers.CorsRule; +import org.junit.jupiter.api.Test; + +/** + * Tests common S3 response headers. + */ +public class TestCommonHeadersContainerResponseFilter { + + @Test + public void addsCorsHeadersToActualResponseWhenRuleMatches() + throws Exception { + String bucketName = "cors-filter-bucket"; + String origin = "https://example.com"; + OzoneBucket bucket = mock(OzoneBucket.class); + when(bucket.getCorsConfiguration()).thenReturn(corsConfiguration(origin)); + + CommonHeadersContainerResponseFilter filter = + new CommonHeadersContainerResponseFilter(); + setField(filter, "requestIdentifier", new RequestIdentifier()); + MultivaluedMap responseHeaders = + new MultivaluedHashMap<>(); + Map cachedBuckets = new HashMap<>(); + cachedBuckets.put(bucketName, bucket); + + filter.filter(request("GET", "/" + bucketName + "/key", origin, + cachedBuckets), + response(responseHeaders)); + + assertThat(responseHeaders.getFirst(ACCESS_CONTROL_ALLOW_ORIGIN)) + .isEqualTo(origin); + assertThat(responseHeaders.getFirst(ACCESS_CONTROL_EXPOSE_HEADERS)) + .isEqualTo("ETag"); + assertThat(responseHeaders.getFirst(ACCESS_CONTROL_ALLOW_METHODS)) + .isNull(); + } + + @Test + public void usesCachedBucketForCorsHeaders() throws Exception { + String bucketName = "cors-filter-bucket"; + String origin = "https://example.com"; + OzoneBucket bucket = mock(OzoneBucket.class); + when(bucket.getCorsConfiguration()).thenReturn(corsConfiguration(origin)); + CommonHeadersContainerResponseFilter filter = + new CommonHeadersContainerResponseFilter(); + setField(filter, "requestIdentifier", new RequestIdentifier()); + MultivaluedMap responseHeaders = + new MultivaluedHashMap<>(); + Map cachedBuckets = new HashMap<>(); + cachedBuckets.put(bucketName, bucket); + + filter.filter(request("GET", "/" + bucketName + "/key", origin, + cachedBuckets), + response(responseHeaders)); + + assertThat(responseHeaders.getFirst(ACCESS_CONTROL_ALLOW_ORIGIN)) + .isEqualTo(origin); + assertThat(responseHeaders.getFirst(ACCESS_CONTROL_EXPOSE_HEADERS)) + .isEqualTo("ETag"); + } + + private static ContainerRequestContext request(String method, String path, + String origin, Map cachedBuckets) { + ContainerRequestContext request = mock(ContainerRequestContext.class); + UriInfo uriInfo = mock(UriInfo.class); + when(request.getMethod()).thenReturn(method); + when(request.getHeaderString(ORIGIN_HEADER)).thenReturn(origin); + when(request.getUriInfo()).thenReturn(uriInfo); + when(uriInfo.getPath(false)).thenReturn(path); + when(request.getProperty(CACHED_BUCKETS_CONTEXT_PROPERTY)) + .thenReturn(cachedBuckets); + return request; + } + + private static ContainerResponseContext response( + MultivaluedMap responseHeaders) { + ContainerResponseContext response = mock(ContainerResponseContext.class); + when(response.getHeaders()).thenReturn(responseHeaders); + return response; + } + + private static void setField(Object target, String name, Object value) + throws ReflectiveOperationException { + Field field = target.getClass().getDeclaredField(name); + field.setAccessible(true); + field.set(target, value); + } + + private static CorsConfiguration corsConfiguration(String origin) { + return CorsConfiguration.newBuilder() + .addRule(CorsRule.newBuilder() + .setAllowedOrigins(Collections.singletonList(origin)) + .setAllowedMethods(Arrays.asList("GET", "HEAD")) + .setExposeHeaders(Collections.singletonList("ETag")) + .build()) + .build(); + } +} diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestCorsHeaders.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestCorsHeaders.java new file mode 100644 index 000000000000..d1b75cb204a0 --- /dev/null +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestCorsHeaders.java @@ -0,0 +1,95 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.s3; + +import static org.apache.hadoop.ozone.s3.util.S3Consts.ACCESS_CONTROL_ALLOW_HEADERS; +import static org.apache.hadoop.ozone.s3.util.S3Consts.ACCESS_CONTROL_ALLOW_METHODS; +import static org.apache.hadoop.ozone.s3.util.S3Consts.ACCESS_CONTROL_ALLOW_ORIGIN; +import static org.apache.hadoop.ozone.s3.util.S3Consts.ACCESS_CONTROL_EXPOSE_HEADERS; +import static org.apache.hadoop.ozone.s3.util.S3Consts.ACCESS_CONTROL_MAX_AGE; +import static org.assertj.core.api.Assertions.assertThat; + +import java.util.Arrays; +import java.util.Collections; +import java.util.Optional; +import javax.ws.rs.core.MultivaluedHashMap; +import javax.ws.rs.core.MultivaluedMap; +import org.apache.hadoop.ozone.om.helpers.CorsConfiguration; +import org.apache.hadoop.ozone.om.helpers.CorsRule; +import org.junit.jupiter.api.Test; + +/** + * Tests S3 CORS rule matching and response header generation. + */ +public class TestCorsHeaders { + + @Test + public void firstMatchingRuleMustMatchOriginMethodAndHeaders() { + CorsConfiguration configuration = CorsConfiguration.newBuilder() + .addRule(rule("post-rule", "https://example.com", + Collections.singletonList("POST"), + Collections.singletonList("*"))) + .addRule(rule("read-rule", "https://example.com", + Arrays.asList("GET", "HEAD"), + Arrays.asList("Authorization", "x-amz-*"))) + .build(); + + Optional rule = S3CorsHeaders.findMatchingRule( + configuration, "https://example.com", "GET", + "Authorization, X-Amz-Date"); + + assertThat(rule).isPresent(); + assertThat(rule.get().getId()).isEqualTo("read-rule"); + assertThat(S3CorsHeaders.findMatchingRule(configuration, + "https://example.com", "GET", "Content-Type")).isEmpty(); + } + + @Test + public void applyPreflightHeaders() { + CorsRule rule = rule("read-rule", "https://example.com", + Arrays.asList("GET", "HEAD"), Collections.singletonList("*")) + .toBuilder() + .setExposeHeaders(Collections.singletonList("ETag")) + .setMaxAgeSeconds(3000) + .build(); + MultivaluedMap headers = new MultivaluedHashMap<>(); + + S3CorsHeaders.applyHeaders(headers, rule, "https://example.com", + "Authorization", true); + + assertThat(headers.getFirst(ACCESS_CONTROL_ALLOW_ORIGIN)) + .isEqualTo("https://example.com"); + assertThat(headers.getFirst(ACCESS_CONTROL_ALLOW_METHODS)) + .isEqualTo("GET, HEAD"); + assertThat(headers.getFirst(ACCESS_CONTROL_ALLOW_HEADERS)) + .isEqualTo("Authorization"); + assertThat(headers.getFirst(ACCESS_CONTROL_MAX_AGE)).isEqualTo("3000"); + assertThat(headers.getFirst(ACCESS_CONTROL_EXPOSE_HEADERS)) + .isEqualTo("ETag"); + } + + private static CorsRule rule(String id, String origin, + java.util.List methods, java.util.List headers) { + return CorsRule.newBuilder() + .setId(id) + .setAllowedOrigins(Collections.singletonList(origin)) + .setAllowedMethods(methods) + .setAllowedHeaders(headers) + .build(); + } +} diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/commontypes/TestRequestParameters.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/commontypes/TestRequestParameters.java new file mode 100644 index 000000000000..b77ac7c7d24c --- /dev/null +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/commontypes/TestRequestParameters.java @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.s3.commontypes; + +import static org.apache.hadoop.ozone.s3.util.S3Consts.QueryParams.CORS; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import java.util.Collections; +import javax.ws.rs.core.MultivaluedHashMap; +import org.junit.jupiter.api.Test; + +class TestRequestParameters { + + @Test + void detectsBareQueryParameterWithoutValue() { + MultivaluedHashMap params = new MultivaluedHashMap<>(); + params.put(CORS, Collections.emptyList()); + + RequestParameters requestParameters = RequestParameters.of(params); + + assertTrue(requestParameters.contains(CORS)); + assertNull(requestParameters.get(CORS)); + } +} diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketAclHandler.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketAclHandler.java index 475de95feca7..a779f73842aa 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketAclHandler.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketAclHandler.java @@ -35,6 +35,7 @@ import javax.ws.rs.core.HttpHeaders; import javax.ws.rs.core.Response; import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientStub; import org.apache.hadoop.ozone.client.OzoneVolume; @@ -263,9 +264,10 @@ public void testHandleGetRequestReturnsCorrectAclStructure() throws Exception { private S3RequestContext mockContext() throws IOException { BucketEndpoint endpoint = mock(BucketEndpoint.class); OzoneVolume volume = mock(OzoneVolume.class); + OzoneBucket bucket = client.getObjectStore().getS3Bucket(BUCKET_NAME); when(endpoint.getVolume()).thenReturn(volume); when(volume.getBucket(BUCKET_NAME)) - .thenAnswer(any -> client.getObjectStore().getS3Bucket(BUCKET_NAME)); + .thenReturn(bucket); when(volume.getBucket("nonexistent-bucket")) .thenThrow(new OMException("", OMException.ResultCodes.BUCKET_NOT_FOUND)); return new S3RequestContext(endpoint, null); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketCorsHandler.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketCorsHandler.java new file mode 100644 index 000000000000..fc7b4d7252bd --- /dev/null +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketCorsHandler.java @@ -0,0 +1,141 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.s3.endpoint; + +import static java.net.HttpURLConnection.HTTP_NO_CONTENT; +import static java.net.HttpURLConnection.HTTP_OK; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.Mockito.when; + +import java.io.ByteArrayInputStream; +import java.nio.charset.StandardCharsets; +import javax.ws.rs.core.HttpHeaders; +import javax.ws.rs.core.Response; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.client.OzoneClientStub; +import org.apache.hadoop.ozone.s3.exception.OS3Exception; +import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; +import org.apache.hadoop.ozone.s3.util.S3Consts; +import org.apache.hadoop.ozone.s3.util.S3Consts.QueryParams; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.Mockito; + +/** + * Tests S3 bucket CORS subresource operations. + */ +public class TestBucketCorsHandler { + private static final String BUCKET_NAME = OzoneConsts.S3_BUCKET; + private OzoneClient client; + private BucketEndpoint bucketEndpoint; + + @BeforeEach + public void setup() throws Exception { + client = new OzoneClientStub(); + client.getObjectStore().createS3Bucket(BUCKET_NAME); + bucketEndpoint = EndpointBuilder.newBucketEndpointBuilder() + .setClient(client) + .setHeaders(Mockito.mock(HttpHeaders.class)) + .build(); + bucketEndpoint.queryParamsForTest().set(QueryParams.CORS, ""); + } + + @AfterEach + public void clean() throws Exception { + if (client != null) { + client.close(); + } + } + + @Test + public void putGetAndDeleteCorsConfiguration() throws Exception { + String xml = "" + + "" + + "read-rule" + + "https://example.com" + + "GET" + + "HEAD" + + "Authorization" + + "ETag" + + "3000" + + "" + + ""; + + Response putResponse = bucketEndpoint.put(BUCKET_NAME, + new ByteArrayInputStream(xml.getBytes(StandardCharsets.UTF_8))); + assertEquals(HTTP_OK, putResponse.getStatus()); + + Response getResponse = bucketEndpoint.get(BUCKET_NAME); + assertEquals(HTTP_OK, getResponse.getStatus()); + assertThat(getResponse.getEntity()).isInstanceOf(S3BucketCors.class); + assertThat(getResponse.getEntity().toString()).contains("read-rule"); + + Response deleteResponse = bucketEndpoint.delete(BUCKET_NAME); + assertEquals(HTTP_NO_CONTENT, deleteResponse.getStatus()); + + OS3Exception noCors = assertThrows(OS3Exception.class, + () -> bucketEndpoint.get(BUCKET_NAME)); + assertEquals("NoSuchCORSConfiguration", noCors.getCode()); + } + + @Test + public void deleteCorsWithoutConfigurationIsIdempotent() throws Exception { + Response firstDeleteResponse = bucketEndpoint.delete(BUCKET_NAME); + assertEquals(HTTP_NO_CONTENT, firstDeleteResponse.getStatus()); + + Response secondDeleteResponse = bucketEndpoint.delete(BUCKET_NAME); + assertEquals(HTTP_NO_CONTENT, secondDeleteResponse.getStatus()); + + OS3Exception noCors = assertThrows(OS3Exception.class, + () -> bucketEndpoint.get(BUCKET_NAME)); + assertEquals("NoSuchCORSConfiguration", noCors.getCode()); + } + + @Test + public void putCorsFailsWhenExpectedBucketOwnerDoesNotMatch() + throws Exception { + HttpHeaders headers = Mockito.mock(HttpHeaders.class); + when(headers.getHeaderString(S3Consts.EXPECTED_BUCKET_OWNER_HEADER)) + .thenReturn("wrong-owner"); + + bucketEndpoint = EndpointBuilder.newBucketEndpointBuilder() + .setClient(client) + .setHeaders(headers) + .build(); + bucketEndpoint.queryParamsForTest().set(QueryParams.CORS, ""); + + String xml = "" + + "" + + "https://example.com" + + "GET" + + "" + + ""; + + OS3Exception ex = assertThrows(OS3Exception.class, + () -> bucketEndpoint.put(BUCKET_NAME, + new ByteArrayInputStream(xml.getBytes(StandardCharsets.UTF_8)))); + assertEquals(S3ErrorTable.BUCKET_OWNER_MISMATCH.getCode(), + ex.getCode()); + assertEquals(S3ErrorTable.BUCKET_OWNER_MISMATCH.getErrorMessage(), + ex.getErrorMessage()); + } +} diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestEndpointBase.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestEndpointBase.java index 2910aa22a51c..611166658ab9 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestEndpointBase.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestEndpointBase.java @@ -17,95 +17,196 @@ package org.apache.hadoop.ozone.s3.endpoint; +import static org.apache.hadoop.ozone.s3.util.S3Consts.CACHED_BUCKETS_CONTEXT_PROPERTY; import static org.apache.hadoop.ozone.s3.util.S3Consts.CUSTOM_METADATA_HEADER_PREFIX; +import static org.apache.hadoop.ozone.s3.util.S3Consts.ORIGIN_HEADER; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; import java.nio.charset.StandardCharsets; import java.util.Locale; import java.util.Map; +import javax.ws.rs.container.ContainerRequestContext; +import javax.ws.rs.core.HttpHeaders; import javax.ws.rs.core.MultivaluedHashMap; import javax.ws.rs.core.MultivaluedMap; +import javax.ws.rs.core.UriInfo; import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.client.ObjectStore; +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.client.protocol.ClientProtocol; +import org.apache.hadoop.ozone.om.protocol.S3Auth; import org.apache.hadoop.ozone.s3.exception.OS3Exception; +import org.apache.hadoop.ozone.s3.signature.SignatureInfo; +import org.apache.hadoop.ozone.s3.signature.SignatureInfo.Version; import org.junit.jupiter.api.Test; +import org.mockito.ArgumentCaptor; /** - * Tests the s3 EndpointBase class methods. - * Test methods of the EndpointBase. + * Tests EndpointBase behaviors. */ public class TestEndpointBase { - /** - * Verify s3 metadata key "gdprEnabled" can't be set up directly - * from the normal client's request, - * it should be decided on the server side. - */ @Test - public void testFilterGDPRFromCustomMetadataHeaders() - throws OS3Exception { - MultivaluedMap s3requestHeaders - = new MultivaluedHashMap<>(); + public void testFilterGDPRFromCustomMetadataHeaders() throws OS3Exception { + MultivaluedMap s3requestHeaders = new MultivaluedHashMap<>(); s3requestHeaders.add( - CUSTOM_METADATA_HEADER_PREFIX + "custom-key1", "custom-value1"); + CUSTOM_METADATA_HEADER_PREFIX + "custom-key1", "custom-value1"); s3requestHeaders.add( - CUSTOM_METADATA_HEADER_PREFIX + "custom-key2", "custom-value2"); + CUSTOM_METADATA_HEADER_PREFIX + "custom-key2", "custom-value2"); s3requestHeaders.add( - CUSTOM_METADATA_HEADER_PREFIX + OzoneConsts.GDPR_FLAG, "true"); + CUSTOM_METADATA_HEADER_PREFIX + OzoneConsts.GDPR_FLAG, "true"); EndpointBase endpointBase = new EndpointBase() { }; Map filteredCustomMetadata = - endpointBase.getCustomMetadataFromHeaders(s3requestHeaders); + endpointBase.getCustomMetadataFromHeaders(s3requestHeaders); assertThat(filteredCustomMetadata).containsKey("custom-key1"); - assertEquals( - "custom-value1", filteredCustomMetadata.get("custom-key1")); + assertEquals("custom-value1", filteredCustomMetadata.get("custom-key1")); assertThat(filteredCustomMetadata).containsKey("custom-key2"); - assertEquals( - "custom-value2", filteredCustomMetadata.get("custom-key2")); + assertEquals("custom-value2", filteredCustomMetadata.get("custom-key2")); assertThat(filteredCustomMetadata).doesNotContainKey(OzoneConsts.GDPR_FLAG); } - /** - * Verify s3 request metadata size should be smaller than 2 KB. - */ @Test public void testCustomMetadataHeadersSizeOverbig() { - MultivaluedMap s3requestHeaders - = new MultivaluedHashMap<>(); + MultivaluedMap s3requestHeaders = new MultivaluedHashMap<>(); s3requestHeaders.add( - CUSTOM_METADATA_HEADER_PREFIX + "custom-key1", "custom-value1"); + CUSTOM_METADATA_HEADER_PREFIX + "custom-key1", "custom-value1"); s3requestHeaders.add( - CUSTOM_METADATA_HEADER_PREFIX + "custom-key2", "custom-value2"); + CUSTOM_METADATA_HEADER_PREFIX + "custom-key2", "custom-value2"); s3requestHeaders.add( - CUSTOM_METADATA_HEADER_PREFIX + "custom-key3", - new String(new byte[3000], StandardCharsets.UTF_8)); + CUSTOM_METADATA_HEADER_PREFIX + "custom-key3", + new String(new byte[3000], StandardCharsets.UTF_8)); EndpointBase endpointBase = new EndpointBase() { }; - OS3Exception e = assertThrows(OS3Exception.class, () -> endpointBase - .getCustomMetadataFromHeaders(s3requestHeaders), - "getCustomMetadataFromHeaders should fail." + - " Expected OS3Exception not thrown"); + OS3Exception e = assertThrows(OS3Exception.class, + () -> endpointBase.getCustomMetadataFromHeaders(s3requestHeaders)); assertThat(e.getCode()).contains("MetadataTooLarge"); } @Test - public void testCustomMetadataHeadersWithUpperCaseHeaders() throws OS3Exception { + public void testCustomMetadataHeadersWithUpperCaseHeaders() + throws OS3Exception { MultivaluedMap s3requestHeaders = new MultivaluedHashMap<>(); String key = "CUSTOM-KEY"; String value = "custom-value1"; - s3requestHeaders.add(CUSTOM_METADATA_HEADER_PREFIX.toUpperCase(Locale.ROOT) + key, value); + s3requestHeaders.add( + CUSTOM_METADATA_HEADER_PREFIX.toUpperCase(Locale.ROOT) + key, value); EndpointBase endpointBase = new EndpointBase() { }; - Map customMetadata = endpointBase.getCustomMetadataFromHeaders(s3requestHeaders); + Map customMetadata = + endpointBase.getCustomMetadataFromHeaders(s3requestHeaders); assertEquals(value, customMetadata.get(key)); } + @Test + public void clearsThreadLocalS3AuthForUnsignedPreflight() { + ClientProtocol proxy = mock(ClientProtocol.class); + + EndpointBuilder.newObjectEndpointBuilder() + .setClient(mockClient(proxy)) + .setSignatureInfo(new SignatureInfo.Builder(Version.NONE).build()) + .build(); + + verify(proxy).setIsS3Request(true); + verify(proxy).clearThreadLocalS3Auth(); + verify(proxy, never()).setThreadLocalS3Auth(any()); + } + + @Test + public void setsThreadLocalS3AuthForSignedRequests() { + ClientProtocol proxy = mock(ClientProtocol.class); + SignatureInfo signatureInfo = new SignatureInfo.Builder(Version.V4) + .setAwsAccessId("testuser") + .setSignature("signature") + .setStringToSign("string-to-sign") + .build(); + + EndpointBuilder.newObjectEndpointBuilder() + .setClient(mockClient(proxy)) + .setSignatureInfo(signatureInfo) + .build(); + + ArgumentCaptor authCaptor = ArgumentCaptor.forClass(S3Auth.class); + verify(proxy).setIsS3Request(true); + verify(proxy).setThreadLocalS3Auth(authCaptor.capture()); + verify(proxy, never()).clearThreadLocalS3Auth(); + assertEquals("testuser", authCaptor.getValue().getAccessID()); + assertEquals("testuser", authCaptor.getValue().getUserPrincipal()); + } + + @Test + @SuppressWarnings("unchecked") + public void cacheBucketPublishesBucketForCorsRequests() { + ContainerRequestContext requestContext = mock(ContainerRequestContext.class); + HttpHeaders headers = mock(HttpHeaders.class); + ClientProtocol proxy = mock(ClientProtocol.class); + OzoneBucket bucket = mock(OzoneBucket.class); + UriInfo uriInfo = mock(UriInfo.class); + when(requestContext.getUriInfo()).thenReturn(uriInfo); + when(uriInfo.getQueryParameters()).thenReturn(new MultivaluedHashMap<>()); + when(requestContext.getHeaderString(ORIGIN_HEADER)) + .thenReturn("https://example.com"); + when(headers.getHeaderString(ORIGIN_HEADER)) + .thenReturn("https://example.com"); + + EndpointBase endpoint = EndpointBuilder.newObjectEndpointBuilder() + .setClient(mockClient(proxy)) + .setContext(requestContext) + .setHeaders(headers) + .build(); + + endpoint.cacheBucket("bucket", bucket); + + ArgumentCaptor> bucketsCaptor = + ArgumentCaptor.forClass(Map.class); + verify(requestContext).setProperty(eq(CACHED_BUCKETS_CONTEXT_PROPERTY), + bucketsCaptor.capture()); + assertThat(bucketsCaptor.getValue()).containsEntry("bucket", bucket); + } + + @Test + public void cacheBucketSkipsPublishingBucketWithoutOriginHeader() { + ContainerRequestContext requestContext = mock(ContainerRequestContext.class); + HttpHeaders headers = mock(HttpHeaders.class); + ClientProtocol proxy = mock(ClientProtocol.class); + OzoneBucket bucket = mock(OzoneBucket.class); + UriInfo uriInfo = mock(UriInfo.class); + when(requestContext.getUriInfo()).thenReturn(uriInfo); + when(uriInfo.getQueryParameters()).thenReturn(new MultivaluedHashMap<>()); + + EndpointBase endpoint = EndpointBuilder.newObjectEndpointBuilder() + .setClient(mockClient(proxy)) + .setContext(requestContext) + .setHeaders(headers) + .build(); + + endpoint.cacheBucket("bucket", bucket); + + verify(requestContext, never()).setProperty( + eq(CACHED_BUCKETS_CONTEXT_PROPERTY), any()); + } + + private static OzoneClient mockClient(ClientProtocol proxy) { + OzoneClient client = mock(OzoneClient.class); + ObjectStore objectStore = mock(ObjectStore.class); + when(client.getObjectStore()).thenReturn(objectStore); + when(objectStore.getClientProxy()).thenReturn(proxy); + return client; + } } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingDelete.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingDelete.java index d60752dfeddb..d4e3967b7185 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingDelete.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingDelete.java @@ -106,6 +106,7 @@ public void testDeleteObjectTaggingNotImplemented() throws Exception { when(mockClient.getObjectStore()).thenReturn(mockObjectStore); when(mockObjectStore.getS3Volume()).thenReturn(mockVolume); + when(mockObjectStore.getS3Bucket("fsoBucket")).thenReturn(mockBucket); when(mockObjectStore.getClientProxy()).thenReturn(mock(ClientProtocol.class)); when(mockVolume.getBucket("fsoBucket")).thenReturn(mockBucket); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingPut.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingPut.java index 75ddd97bd241..246cdf6c6ea6 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingPut.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingPut.java @@ -119,6 +119,7 @@ public void testPutObjectTaggingNotImplemented() throws Exception { when(mockClient.getObjectStore()).thenReturn(mockObjectStore); when(mockObjectStore.getS3Volume()).thenReturn(mockVolume); + when(mockObjectStore.getS3Bucket("fsoBucket")).thenReturn(mockBucket); when(mockObjectStore.getClientProxy()).thenReturn(mock(ClientProtocol.class)); when(mockVolume.getBucket("fsoBucket")).thenReturn(mockBucket); @@ -196,4 +197,3 @@ public String tagValueNotSpecified() { ""; } } - diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java index bc9e19db1b6d..32d69b8c5ef1 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java @@ -118,8 +118,7 @@ public void testListS3Buckets() throws IOException { */ @Test public void testGetBucket() throws IOException { - doThrow(exception).when(volume).getBucket(anyString()); - when(objectStore.getS3Volume()).thenReturn(volume); + doThrow(exception).when(objectStore).getS3Bucket(anyString()); BucketEndpoint bucketEndpoint = EndpointBuilder.newBucketEndpointBuilder() .setClient(client) .build(); @@ -145,7 +144,7 @@ public void testCreateBucket() throws IOException { public void testDeleteBucket() throws IOException { doThrow(exception).when(volume).deleteBucket(anyString()); when(objectStore.getS3Volume()).thenReturn(volume); - when(volume.getBucket(anyString())).thenReturn(bucket); + when(objectStore.getS3Bucket(anyString())).thenReturn(bucket); BucketEndpoint bucketEndpoint = EndpointBuilder.newBucketEndpointBuilder() .setClient(client) .build(); @@ -185,7 +184,7 @@ public void testListKey() throws IOException { public void testDeleteKeys() throws IOException, OS3Exception { when(objectStore.getVolume(anyString())).thenReturn(volume); when(objectStore.getS3Volume()).thenReturn(volume); - when(volume.getBucket(anyString())).thenReturn(bucket); + when(objectStore.getS3Bucket(anyString())).thenReturn(bucket); Map deleteErrors = new HashMap<>(); deleteErrors.put("deleteKeyName", new ErrorInfo("ACCESS_DENIED", "ACL check failed")); when(bucket.deleteKeys(any(), anyBoolean())).thenReturn(deleteErrors); @@ -261,6 +260,7 @@ public void testGetKey() throws IOException { @Test public void testPutKey() throws IOException { when(objectStore.getS3Volume()).thenReturn(volume); + when(objectStore.getS3Bucket("bucketName")).thenReturn(bucket); when(volume.getBucket("bucketName")).thenReturn(bucket); doThrow(exception).when(clientProtocol).createKey( anyString(), anyString(), anyString(), anyLong(), any(), anyMap(), anyMap()); @@ -308,6 +308,7 @@ public void testMultiUploadKey() throws IOException { public void testObjectTagging() throws Exception { when(objectStore.getVolume(anyString())).thenReturn(volume); when(objectStore.getS3Volume()).thenReturn(volume); + when(objectStore.getS3Bucket("bucketName")).thenReturn(bucket); when(volume.getBucket("bucketName")).thenReturn(bucket); when(bucket.getObjectTagging(anyString())).thenThrow(exception); doThrow(exception).when(bucket).putObjectTagging(anyString(), anyMap()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestS3RequestContext.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestS3RequestContext.java new file mode 100644 index 000000000000..932a754a6f02 --- /dev/null +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestS3RequestContext.java @@ -0,0 +1,93 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.s3.endpoint; + +import static org.junit.jupiter.api.Assertions.assertSame; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import org.apache.hadoop.ozone.audit.S3GAction; +import org.apache.hadoop.ozone.client.ObjectStore; +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.junit.jupiter.api.Test; + +/** + * Tests for {@link S3RequestContext}. + */ +public class TestS3RequestContext { + + @Test + public void getBucketCachesLoadedBuckets() throws Exception { + EndpointBase endpoint = mock(EndpointBase.class); + OzoneVolume volume = mock(OzoneVolume.class); + OzoneBucket bucket = mock(OzoneBucket.class); + when(endpoint.getVolume()).thenReturn(volume); + when(volume.getBucket("bucket")).thenReturn(bucket); + + S3RequestContext context = new S3RequestContext(endpoint, S3GAction.GET_BUCKET); + + assertSame(bucket, context.getBucket("bucket")); + assertSame(bucket, context.getBucket("bucket")); + verify(endpoint, times(1)).getVolume(); + verify(volume, times(1)).getBucket("bucket"); + verify(endpoint, times(1)).cacheBucket("bucket", bucket); + } + + @Test + public void getS3BucketCachesLoadedBuckets() throws Exception { + EndpointBase endpoint = mock(EndpointBase.class); + OzoneClient client = mock(OzoneClient.class); + ObjectStore objectStore = mock(ObjectStore.class); + OzoneBucket bucket = mock(OzoneBucket.class); + when(endpoint.getClient()).thenReturn(client); + when(client.getObjectStore()).thenReturn(objectStore); + when(objectStore.getS3Bucket("bucket")).thenReturn(bucket); + + S3RequestContext context = new S3RequestContext(endpoint, S3GAction.GET_BUCKET); + + assertSame(bucket, context.getS3Bucket("bucket")); + assertSame(bucket, context.getS3Bucket("bucket")); + verify(objectStore, times(1)).getS3Bucket("bucket"); + verify(endpoint, times(1)).cacheBucket("bucket", bucket); + verify(endpoint, never()).getVolume(); + } + + @Test + public void getS3BucketPropagatesOmExceptions() throws Exception { + EndpointBase endpoint = mock(EndpointBase.class); + OzoneClient client = mock(OzoneClient.class); + ObjectStore objectStore = mock(ObjectStore.class); + OMException exception = + new OMException("missing", OMException.ResultCodes.BUCKET_NOT_FOUND); + when(endpoint.getClient()).thenReturn(client); + when(client.getObjectStore()).thenReturn(objectStore); + when(objectStore.getS3Bucket("bucket")).thenThrow(exception); + + S3RequestContext context = new S3RequestContext(endpoint, S3GAction.GET_BUCKET); + + assertSame(exception, + assertThrows(OMException.class, () -> context.getS3Bucket("bucket"))); + } +}