Skip to content

Commit

Permalink
tests: unflake system tests ensure config changes are propagated (#836)
Browse files Browse the repository at this point in the history
* tests: unflake system tests ensure config changes are propagated

* avoid rate limits by using uuid
  • Loading branch information
cojenco committed Aug 10, 2022
1 parent 5fa1947 commit ef74c3d
Show file tree
Hide file tree
Showing 3 changed files with 62 additions and 29 deletions.
28 changes: 26 additions & 2 deletions tests/system/_helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,9 +46,33 @@ def _has_kms_key_name(blob):
return blob.kms_key_name is not None


def _has_retention_expiration(blob):
return blob.retention_expiration_time is not None


def _no_retention_expiration(blob):
return blob.retention_expiration_time is None


def _has_retetion_period(bucket):
return bucket.retention_period is not None


def _no_retetion_period(bucket):
return bucket.retention_period is None


retry_bad_copy = RetryErrors(exceptions.BadRequest, error_predicate=_bad_copy)
retry_no_event_based_hold = RetryInstanceState(_no_event_based_hold)
retry_has_kms_key_name = RetryInstanceState(_has_kms_key_name)
retry_no_event_based_hold = RetryInstanceState(_no_event_based_hold, max_tries=10)
retry_has_kms_key_name = RetryInstanceState(_has_kms_key_name, max_tries=10)
retry_has_retention_expiration = RetryInstanceState(
_has_retention_expiration, max_tries=10
)
retry_no_retention_expiration = RetryInstanceState(
_no_retention_expiration, max_tries=10
)
retry_has_retention_period = RetryInstanceState(_has_retetion_period, max_tries=10)
retry_no_retention_period = RetryInstanceState(_no_retetion_period, max_tries=10)


def unique_name(prefix):
Expand Down
49 changes: 25 additions & 24 deletions tests/system/test_blob.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
import io
import os
import tempfile
import uuid
import warnings

import pytest
Expand Down Expand Up @@ -44,7 +45,7 @@ def test_large_file_write_from_stream(
file_data,
service_account,
):
blob = shared_bucket.blob("LargeFile")
blob = shared_bucket.blob(f"LargeFile{uuid.uuid4().hex}")

info = file_data["big"]
with open(info["path"], "rb") as file_obj:
Expand All @@ -60,7 +61,7 @@ def test_large_file_write_from_stream_w_checksum(
file_data,
service_account,
):
blob = shared_bucket.blob("LargeFile")
blob = shared_bucket.blob(f"LargeFile{uuid.uuid4().hex}")

info = file_data["big"]
with open(info["path"], "rb") as file_obj:
Expand All @@ -76,7 +77,7 @@ def test_large_file_write_from_stream_w_failed_checksum(
file_data,
service_account,
):
blob = shared_bucket.blob("LargeFile")
blob = shared_bucket.blob(f"LargeFile{uuid.uuid4().hex}")

# Intercept the digest processing at the last stage and replace it
# with garbage. This is done with a patch to monkey-patch the
Expand Down Expand Up @@ -128,7 +129,7 @@ def test_small_file_write_from_filename(
file_data,
service_account,
):
blob = shared_bucket.blob("SmallFile")
blob = shared_bucket.blob(f"SmallFile{uuid.uuid4().hex}")

info = file_data["simple"]
blob.upload_from_filename(info["path"])
Expand All @@ -143,7 +144,7 @@ def test_small_file_write_from_filename_with_checksum(
file_data,
service_account,
):
blob = shared_bucket.blob("SmallFile")
blob = shared_bucket.blob(f"SmallFile{uuid.uuid4().hex}")

info = file_data["simple"]
blob.upload_from_filename(info["path"], checksum="crc32c")
Expand All @@ -158,7 +159,7 @@ def test_small_file_write_from_filename_with_failed_checksum(
file_data,
service_account,
):
blob = shared_bucket.blob("SmallFile")
blob = shared_bucket.blob(f"SmallFile{uuid.uuid4().hex}")

info = file_data["simple"]
# Intercept the digest processing at the last stage and replace
Expand Down Expand Up @@ -381,7 +382,7 @@ def test_blob_acl_w_user_project(
with_user_project = storage_client.bucket(
shared_bucket.name, user_project=user_project
)
blob = with_user_project.blob("SmallFile")
blob = with_user_project.blob(f"SmallFile{uuid.uuid4().hex}")

info = file_data["simple"]

Expand Down Expand Up @@ -444,10 +445,10 @@ def test_blob_acl_upload_predefined(
file_data,
service_account,
):
control = shared_bucket.blob("logo")
control = shared_bucket.blob(f"logo{uuid.uuid4().hex}")
control_info = file_data["logo"]

blob = shared_bucket.blob("SmallFile")
blob = shared_bucket.blob(f"SmallFile{uuid.uuid4().hex}")
info = file_data["simple"]

try:
Expand Down Expand Up @@ -649,7 +650,7 @@ def test_blob_upload_from_file_resumable_with_generation(
file_data,
service_account,
):
blob = shared_bucket.blob("LargeFile")
blob = shared_bucket.blob(f"LargeFile{uuid.uuid4().hex}")
wrong_generation = 3
wrong_meta_generation = 3

Expand Down Expand Up @@ -826,13 +827,13 @@ def test_blob_compose_new_blob_wo_content_type(shared_bucket, blobs_to_delete):

def test_blob_compose_replace_existing_blob(shared_bucket, blobs_to_delete):
payload_before = b"AAA\n"
original = shared_bucket.blob("original")
original = shared_bucket.blob(uuid.uuid4().hex)
original.content_type = "text/plain"
original.upload_from_string(payload_before)
blobs_to_delete.append(original)

payload_to_append = b"BBB\n"
to_append = shared_bucket.blob("to_append")
to_append = shared_bucket.blob(uuid.uuid4().hex)
to_append.upload_from_string(payload_to_append)
blobs_to_delete.append(to_append)

Expand All @@ -843,15 +844,15 @@ def test_blob_compose_replace_existing_blob(shared_bucket, blobs_to_delete):

def test_blob_compose_w_generation_match_list(shared_bucket, blobs_to_delete):
payload_before = b"AAA\n"
original = shared_bucket.blob("original")
original = shared_bucket.blob(uuid.uuid4().hex)
original.content_type = "text/plain"
original.upload_from_string(payload_before)
blobs_to_delete.append(original)
wrong_generations = [6, 7]
wrong_metagenerations = [8, 9]

payload_to_append = b"BBB\n"
to_append = shared_bucket.blob("to_append")
to_append = shared_bucket.blob(uuid.uuid4().hex)
to_append.upload_from_string(payload_to_append)
blobs_to_delete.append(to_append)

Expand All @@ -877,13 +878,13 @@ def test_blob_compose_w_generation_match_list(shared_bucket, blobs_to_delete):

def test_blob_compose_w_generation_match_long(shared_bucket, blobs_to_delete):
payload_before = b"AAA\n"
original = shared_bucket.blob("original")
original = shared_bucket.blob(uuid.uuid4().hex)
original.content_type = "text/plain"
original.upload_from_string(payload_before)
blobs_to_delete.append(original)

payload_to_append = b"BBB\n"
to_append = shared_bucket.blob("to_append")
to_append = shared_bucket.blob(uuid.uuid4().hex)
to_append.upload_from_string(payload_to_append)
blobs_to_delete.append(to_append)

Expand All @@ -897,14 +898,14 @@ def test_blob_compose_w_generation_match_long(shared_bucket, blobs_to_delete):

def test_blob_compose_w_source_generation_match(shared_bucket, blobs_to_delete):
payload_before = b"AAA\n"
original = shared_bucket.blob("original")
original = shared_bucket.blob(uuid.uuid4().hex)
original.content_type = "text/plain"
original.upload_from_string(payload_before)
blobs_to_delete.append(original)
wrong_source_generations = [6, 7]

payload_to_append = b"BBB\n"
to_append = shared_bucket.blob("to_append")
to_append = shared_bucket.blob(uuid.uuid4().hex)
to_append.upload_from_string(payload_to_append)
blobs_to_delete.append(to_append)

Expand All @@ -929,18 +930,18 @@ def test_blob_compose_w_user_project(storage_client, buckets_to_delete, user_pro
created.requester_pays = True

payload_1 = b"AAA\n"
source_1 = created.blob("source-1")
source_1 = created.blob(uuid.uuid4().hex)
source_1.upload_from_string(payload_1)

payload_2 = b"BBB\n"
source_2 = created.blob("source-2")
source_2 = created.blob(uuid.uuid4().hex)
source_2.upload_from_string(payload_2)

with_user_project = storage_client.bucket(
new_bucket_name, user_project=user_project
)

destination = with_user_project.blob("destination")
destination = with_user_project.blob(uuid.uuid4().hex)
destination.content_type = "text/plain"
destination.compose([source_1, source_2])

Expand All @@ -949,13 +950,13 @@ def test_blob_compose_w_user_project(storage_client, buckets_to_delete, user_pro

def test_blob_rewrite_new_blob_add_key(shared_bucket, blobs_to_delete, file_data):
info = file_data["simple"]
source = shared_bucket.blob("source")
source = shared_bucket.blob(uuid.uuid4().hex)
source.upload_from_filename(info["path"])
blobs_to_delete.append(source)
source_data = source.download_as_bytes()

key = os.urandom(32)
dest = shared_bucket.blob("dest", encryption_key=key)
dest = shared_bucket.blob(uuid.uuid4().hex, encryption_key=key)
token, rewritten, total = dest.rewrite(source)
blobs_to_delete.append(dest)

Expand Down Expand Up @@ -1097,7 +1098,7 @@ def test_blob_update_storage_class_large_file(
):
from google.cloud.storage import constants

blob = shared_bucket.blob("BigFile")
blob = shared_bucket.blob(f"BigFile{uuid.uuid4().hex}")

info = file_data["big"]
blob.upload_from_filename(info["path"])
Expand Down
14 changes: 11 additions & 3 deletions tests/system/test_bucket.py
Original file line number Diff line number Diff line change
Expand Up @@ -632,6 +632,10 @@ def test_bucket_w_retention_period(
bucket.default_event_based_hold = False
bucket.patch()

# Changes to the bucket will be readable immediately after writing,
# but configuration changes may take time to propagate.
_helpers.retry_has_retention_period(bucket.reload)()

assert bucket.retention_period == period_secs
assert isinstance(bucket.retention_policy_effective_time, datetime.datetime)
assert not bucket.default_event_based_hold
Expand All @@ -645,6 +649,7 @@ def test_bucket_w_retention_period(
blobs_to_delete.append(blob)

other = bucket.get_blob(blob_name)
_helpers.retry_has_retention_expiration(other.reload)()

assert not other.event_based_hold
assert not other.temporary_hold
Expand All @@ -656,12 +661,16 @@ def test_bucket_w_retention_period(
bucket.retention_period = None
bucket.patch()

# Changes to the bucket will be readable immediately after writing,
# but configuration changes may take time to propagate.
_helpers.retry_no_retention_period(bucket.reload)()

assert bucket.retention_period is None
assert bucket.retention_policy_effective_time is None
assert not bucket.default_event_based_hold
assert not bucket.retention_policy_locked

_helpers.retry_no_event_based_hold(other.reload)()
_helpers.retry_no_retention_expiration(other.reload)()

assert not other.event_based_hold
assert not other.temporary_hold
Expand Down Expand Up @@ -719,8 +728,7 @@ def test_bucket_w_default_event_based_hold(
blob.upload_from_string(payload)

# https://github.com/googleapis/python-storage/issues/435
if blob.event_based_hold:
_helpers.retry_no_event_based_hold(blob.reload)()
_helpers.retry_no_event_based_hold(blob.reload)()

assert not blob.event_based_hold
assert not blob.temporary_hold
Expand Down

0 comments on commit ef74c3d

Please sign in to comment.