From 2e8978a7d1a8cbc50aad990fb9264da5e775dbbc Mon Sep 17 00:00:00 2001 From: Peter Andreas Entschev Date: Wed, 20 Oct 2021 02:27:23 -0700 Subject: [PATCH] Update memory_pause_fraction in test_spill Changes from https://github.com/dask/distributed/pull/5438 have slightly modified the behavior for worker spilling options, thus we need to update memory_pause_fraction to a value that isn't `None`. --- dask_cuda/benchmarks/local_cudf_shuffle.py | 1 + dask_cuda/tests/test_spill.py | 24 +++++++++++----------- 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/dask_cuda/benchmarks/local_cudf_shuffle.py b/dask_cuda/benchmarks/local_cudf_shuffle.py index f2c812d0..8d01d279 100644 --- a/dask_cuda/benchmarks/local_cudf_shuffle.py +++ b/dask_cuda/benchmarks/local_cudf_shuffle.py @@ -82,6 +82,7 @@ def main(args): "ignore", message=".*NVLink.*rmm_pool_size.*", category=UserWarning ) + print(cluster_kwargs) cluster = Cluster(*cluster_args, **cluster_kwargs) if args.multi_node: import time diff --git a/dask_cuda/tests/test_spill.py b/dask_cuda/tests/test_spill.py index 0b3ec54e..5b62406d 100644 --- a/dask_cuda/tests/test_spill.py +++ b/dask_cuda/tests/test_spill.py @@ -89,7 +89,7 @@ def delayed_worker_assert(total_size, device_chunk_overhead, serialized_chunk_ov "memory_limit": int(800e6), "host_target": 0.0, "host_spill": 0.0, - "host_pause": None, + "host_pause": 0.0, "spills_to_disk": False, }, { @@ -97,7 +97,7 @@ def delayed_worker_assert(total_size, device_chunk_overhead, serialized_chunk_ov "memory_limit": int(200e6), "host_target": 0.0, "host_spill": 0.0, - "host_pause": None, + "host_pause": 0.0, "spills_to_disk": True, }, { @@ -105,7 +105,7 @@ def delayed_worker_assert(total_size, device_chunk_overhead, serialized_chunk_ov "memory_limit": 0, "host_target": 0.0, "host_spill": 0.0, - "host_pause": None, + "host_pause": 0.0, "spills_to_disk": False, }, ], @@ -167,7 +167,7 @@ def test_device_spill(client, scheduler, worker): "memory_limit": int(800e6), "host_target": 0.0, "host_spill": 0.0, - "host_pause": None, + "host_pause": 0.0, "spills_to_disk": False, }, { @@ -175,7 +175,7 @@ def test_device_spill(client, scheduler, worker): "memory_limit": int(200e6), "host_target": 0.0, "host_spill": 0.0, - "host_pause": None, + "host_pause": 0.0, "spills_to_disk": True, }, { @@ -183,7 +183,7 @@ def test_device_spill(client, scheduler, worker): "memory_limit": 0, "host_target": 0.0, "host_spill": 0.0, - "host_pause": None, + "host_pause": 0.0, "spills_to_disk": False, }, ], @@ -243,7 +243,7 @@ async def test_cupy_cluster_device_spill(params): "memory_limit": int(800e6), "host_target": 0.0, "host_spill": 0.0, - "host_pause": None, + "host_pause": 0.0, "spills_to_disk": False, }, { @@ -251,7 +251,7 @@ async def test_cupy_cluster_device_spill(params): "memory_limit": int(200e6), "host_target": 0.0, "host_spill": 0.0, - "host_pause": None, + "host_pause": 0.0, "spills_to_disk": True, }, { @@ -259,7 +259,7 @@ async def test_cupy_cluster_device_spill(params): "memory_limit": 0, "host_target": 0.0, "host_spill": 0.0, - "host_pause": None, + "host_pause": 0.0, "spills_to_disk": False, }, ], @@ -332,7 +332,7 @@ def test_device_spill(client, scheduler, worker): "memory_limit": int(800e6), "host_target": 0.0, "host_spill": 0.0, - "host_pause": None, + "host_pause": 0.0, "spills_to_disk": False, }, { @@ -340,7 +340,7 @@ def test_device_spill(client, scheduler, worker): "memory_limit": int(200e6), "host_target": 0.0, "host_spill": 0.0, - "host_pause": None, + "host_pause": 0.0, "spills_to_disk": True, }, { @@ -348,7 +348,7 @@ def test_device_spill(client, scheduler, worker): "memory_limit": 0, "host_target": 0.0, "host_spill": 0.0, - "host_pause": None, + "host_pause": 0.0, "spills_to_disk": False, }, ],