Skip to content

Commit

Permalink
fix: request disk_mb resource from k8s (#1858)
Browse files Browse the repository at this point in the history
* fix: request disk_mb resource from k8s

* explicit test of disk_mb passthrough
  • Loading branch information
kdm9 committed Sep 20, 2022
1 parent 4234fe7 commit f68f166
Show file tree
Hide file tree
Showing 3 changed files with 13 additions and 5 deletions.
6 changes: 6 additions & 0 deletions snakemake/executors/__init__.py
Expand Up @@ -1911,13 +1911,19 @@ def run(self, job, callback=None, submit_callback=None, error_callback=None):
container.env.append(envvar)

# request resources
logger.debug(f"job resources: {dict(job.resources)}")
container.resources = kubernetes.client.V1ResourceRequirements()
container.resources.requests = {}
container.resources.requests["cpu"] = job.resources["_cores"]
if "mem_mb" in job.resources.keys():
container.resources.requests["memory"] = "{}M".format(
job.resources["mem_mb"]
)
if "disk_mb" in job.resources.keys():
disk_mb = int(job.resources.get("disk_mb", 1024))
container.resources.requests["ephemeral-storage"] = f"{disk_mb}M"

logger.debug(f"k8s pod resources: {container.resources.requests}")

# capabilities
if job.needs_singularity and self.workflow.use_singularity:
Expand Down
2 changes: 1 addition & 1 deletion tests/test_kubernetes.py
Expand Up @@ -17,7 +17,7 @@ def __init__(self):
try:
shell(
"""
gcloud container clusters create {self.cluster} --num-nodes 3 --scopes storage-rw --zone us-central1-a --machine-type n1-standard-2
gcloud container clusters create {self.cluster} --num-nodes 3 --scopes storage-rw --zone us-central1-a --machine-type n1-standard-2 --ephemeral-storage local-ssd-count=1
gcloud container clusters get-credentials {self.cluster} --zone us-central1-a
gsutil mb gs://{self.bucket_name}
"""
Expand Down
10 changes: 6 additions & 4 deletions tests/test_kubernetes/Snakefile
Expand Up @@ -20,11 +20,13 @@ rule copy:
"landsat-data.txt",
resources:
mem_mb=100,
disk_mb=301000
run:
# we could test volume size like this but it is currently unclear what f1-micro instances provide as boot disk size
# stats = os.statvfs('.')
# volume_gib = stats.f_bsize * stats.f_blocks / 1.074e9
# assert volume_gib > 90
# Nominally, we have attached a 375GB SSD to our underlying cluster node, and requested 301GB above.
stats = os.statvfs('.')
volume_gb = stats.f_bsize * stats.f_blocks / 1.0e9
assert volume_gb >= 300

shell("cp {input} {output}")


Expand Down

0 comments on commit f68f166

Please sign in to comment.