Skip to content

Commit

Permalink
Ensure necessary security context for s3 bulk deletions (elastic#108280
Browse files Browse the repository at this point in the history
…) (elastic#108299)

This PR moves the doPrivileged wrapper closer to the actual deletion
request to ensure the necesary security context is established at all
times. Also added a new repository setting to configure max size for s3
deleteObjects request.

Fixes: elastic#108049
(cherry picked from commit bcf4297)

# Conflicts:
#	docs/reference/snapshot-restore/repository-s3.asciidoc
#	modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java
  • Loading branch information
ywangd committed May 6, 2024
1 parent a337e2e commit 9094f79
Show file tree
Hide file tree
Showing 5 changed files with 42 additions and 12 deletions.
6 changes: 6 additions & 0 deletions docs/changelog/108280.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
pr: 108280
summary: Ensure necessary security context for s3 bulk deletions
area: Snapshot/Restore
type: bug
issues:
- 108049
8 changes: 8 additions & 0 deletions docs/reference/snapshot-restore/repository-s3.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -301,6 +301,14 @@ include::repository-shared-settings.asciidoc[]
`intelligent_tiering`. Defaults to `standard`. See
<<repository-s3-storage-classes>> for more information.

`delete_objects_max_size`::

(<<number,numeric>>) Sets the maxmimum batch size, betewen 1 and 1000, used
for `DeleteObjects` requests. Defaults to 1000 which is the maximum number
supported by the
https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjects.html[AWS
DeleteObjects API].

NOTE: The option of defining client settings in the repository settings as
documented below is considered deprecated, and will be removed in a future
version.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ class S3BlobStore implements BlobStore {
* Maximum number of deletes in a {@link DeleteObjectsRequest}.
* @see <a href="https://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html">S3 Documentation</a>.
*/
private static final int MAX_BULK_DELETES = 1000;
static final int MAX_BULK_DELETES = 1000;

private static final Logger logger = LogManager.getLogger(S3BlobStore.class);

Expand All @@ -88,6 +88,8 @@ class S3BlobStore implements BlobStore {

private final StatsCollectors statsCollectors = new StatsCollectors();

private final int bulkDeletionBatchSize;

S3BlobStore(
S3Service service,
String bucket,
Expand All @@ -111,6 +113,7 @@ class S3BlobStore implements BlobStore {
this.threadPool = threadPool;
this.snapshotExecutor = threadPool.executor(ThreadPool.Names.SNAPSHOT);
this.repositoriesMetrics = repositoriesMetrics;
this.bulkDeletionBatchSize = S3Repository.DELETION_BATCH_SIZE_SETTING.get(repositoryMetadata.settings());
}

RequestMetricCollector getMetricCollector(Operation operation, OperationPurpose purpose) {
Expand Down Expand Up @@ -308,18 +311,16 @@ public void deleteBlobsIgnoringIfNotExists(OperationPurpose purpose, Iterator<St
try (AmazonS3Reference clientReference = clientReference()) {
// S3 API only allows 1k blobs per delete so we split up the given blobs into requests of max. 1k deletes
final AtomicReference<Exception> aex = new AtomicReference<>();
SocketAccess.doPrivilegedVoid(() -> {
blobNames.forEachRemaining(key -> {
partition.add(key);
if (partition.size() == MAX_BULK_DELETES) {
deletePartition(purpose, clientReference, partition, aex);
partition.clear();
}
});
if (partition.isEmpty() == false) {
blobNames.forEachRemaining(key -> {
partition.add(key);
if (partition.size() == bulkDeletionBatchSize) {
deletePartition(purpose, clientReference, partition, aex);
partition.clear();
}
});
if (partition.isEmpty() == false) {
deletePartition(purpose, clientReference, partition, aex);
}
if (aex.get() != null) {
throw aex.get();
}
Expand All @@ -335,7 +336,7 @@ private void deletePartition(
AtomicReference<Exception> aex
) {
try {
clientReference.client().deleteObjects(bulkDelete(purpose, this, partition));
SocketAccess.doPrivilegedVoid(() -> clientReference.client().deleteObjects(bulkDelete(purpose, this, partition)));
} catch (MultiObjectDeleteException e) {
// We are sending quiet mode requests so we can't use the deleted keys entry on the exception and instead
// first remove all keys that were sent in the request and then add back those that ran into an exception.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -173,6 +173,16 @@ class S3Repository extends MeteredBlobStoreRepository {
*/
static final Setting<String> BASE_PATH_SETTING = Setting.simpleString("base_path");

/**
* The batch size for DeleteObjects request
*/
static final Setting<Integer> DELETION_BATCH_SIZE_SETTING = Setting.intSetting(
"delete_objects_max_size",
S3BlobStore.MAX_BULK_DELETES,
1,
S3BlobStore.MAX_BULK_DELETES
);

private final S3Service service;

private final String bucket;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,11 @@ protected Settings repositorySettings() {
final String basePath = System.getProperty("test.s3.base_path");
assertThat(basePath, not(blankOrNullString()));

return Settings.builder().put("client", "repo_test_kit").put("bucket", bucket).put("base_path", basePath).build();
return Settings.builder()
.put("client", "repo_test_kit")
.put("bucket", bucket)
.put("base_path", basePath)
.put("delete_objects_max_size", between(1, 1000))
.build();
}
}

0 comments on commit 9094f79

Please sign in to comment.