diff --git a/snakemake/executors/__init__.py b/snakemake/executors/__init__.py index 582370e0a..349e6e71d 100644 --- a/snakemake/executors/__init__.py +++ b/snakemake/executors/__init__.py @@ -1911,6 +1911,7 @@ def run(self, job, callback=None, submit_callback=None, error_callback=None): container.env.append(envvar) # request resources + logger.debug(f"job resources: {dict(job.resources)}") container.resources = kubernetes.client.V1ResourceRequirements() container.resources.requests = {} container.resources.requests["cpu"] = job.resources["_cores"] @@ -1918,6 +1919,11 @@ def run(self, job, callback=None, submit_callback=None, error_callback=None): container.resources.requests["memory"] = "{}M".format( job.resources["mem_mb"] ) + if "disk_mb" in job.resources.keys(): + disk_mb = int(job.resources.get("disk_mb", 1024)) + container.resources.requests["ephemeral-storage"] = f"{disk_mb}M" + + logger.debug(f"k8s pod resources: {container.resources.requests}") # capabilities if job.needs_singularity and self.workflow.use_singularity: diff --git a/tests/test_kubernetes.py b/tests/test_kubernetes.py index e6dd028fb..f7d843814 100644 --- a/tests/test_kubernetes.py +++ b/tests/test_kubernetes.py @@ -17,7 +17,7 @@ def __init__(self): try: shell( """ - gcloud container clusters create {self.cluster} --num-nodes 3 --scopes storage-rw --zone us-central1-a --machine-type n1-standard-2 + gcloud container clusters create {self.cluster} --num-nodes 3 --scopes storage-rw --zone us-central1-a --machine-type n1-standard-2 --ephemeral-storage local-ssd-count=1 gcloud container clusters get-credentials {self.cluster} --zone us-central1-a gsutil mb gs://{self.bucket_name} """ diff --git a/tests/test_kubernetes/Snakefile b/tests/test_kubernetes/Snakefile index 1aeae1ab8..d031096cb 100644 --- a/tests/test_kubernetes/Snakefile +++ b/tests/test_kubernetes/Snakefile @@ -20,11 +20,13 @@ rule copy: "landsat-data.txt", resources: mem_mb=100, + disk_mb=301000 run: - # we could test volume size like this but it is currently unclear what f1-micro instances provide as boot disk size - # stats = os.statvfs('.') - # volume_gib = stats.f_bsize * stats.f_blocks / 1.074e9 - # assert volume_gib > 90 + # Nominally, we have attached a 375GB SSD to our underlying cluster node, and requested 301GB above. + stats = os.statvfs('.') + volume_gb = stats.f_bsize * stats.f_blocks / 1.0e9 + assert volume_gb >= 300 + shell("cp {input} {output}")