From 254576828cf6ceb3635e89f148ceb91dc8393e66 Mon Sep 17 00:00:00 2001 From: fjetter Date: Mon, 18 Oct 2021 14:46:12 +0200 Subject: [PATCH 1/3] Ensure reconnecting workers do not loose required data --- distributed/scheduler.py | 22 +++++++++++++--------- distributed/tests/test_worker.py | 2 -- distributed/worker.py | 6 +++--- 3 files changed, 16 insertions(+), 14 deletions(-) diff --git a/distributed/scheduler.py b/distributed/scheduler.py index 481220a97e..1ca5fa8474 100644 --- a/distributed/scheduler.py +++ b/distributed/scheduler.py @@ -2737,8 +2737,8 @@ def transition_processing_memory( if ws != ts._processing_on: # someone else has this task logger.info( - "Unexpected worker completed task, likely due to " - "work stealing. Expected: %s, Got: %s, Key: %s", + "Unexpected worker completed task %s. Expected: %s, Got: %s, Key: %s", + ts.key, ts._processing_on, ws, key, @@ -2835,7 +2835,7 @@ def transition_memory_released(self, key, safe: bint = False): worker_msg = { "op": "free-keys", "keys": [key], - "reason": f"Memory->Released {key}", + "stimulus_id": f"memory-released-{time()}", } for ws in ts._who_has: worker_msgs[ws._address] = [worker_msg] @@ -2935,7 +2935,11 @@ def transition_erred_released(self, key): if dts._state == "erred": recommendations[dts._key] = "waiting" - w_msg = {"op": "free-keys", "keys": [key], "reason": "Erred->Released"} + w_msg = { + "op": "free-keys", + "keys": [key], + "stimulus_id": f"erred-released-{time()}", + } for ws_addr in ts._erred_on: worker_msgs[ws_addr] = [w_msg] ts._erred_on.clear() @@ -3013,7 +3017,7 @@ def transition_processing_released(self, key): { "op": "free-keys", "keys": [key], - "reason": f"processing-released-{time()}", + "stimulus_id": f"processing-released-{time()}", } ] @@ -4339,9 +4343,9 @@ async def add_worker( worker_msgs[address] = [] worker_msgs[address].append( { - "op": "free-keys", + "op": "remove-replicas", "keys": already_released_keys, - "reason": f"reconnect-already-released-{time()}", + "stimulus_id": f"reconnect-already-released-{time()}", } ) for ts in list(parent._unrunnable): @@ -4767,7 +4771,7 @@ def stimulus_task_finished(self, key=None, worker=None, **kwargs): { "op": "free-keys", "keys": [key], - "reason": f"already-released-or-forgotten-{time()}", + "stimulus_id": f"already-released-or-forgotten-{time()}", } ] elif ts._state == "memory": @@ -7846,7 +7850,7 @@ def _propagate_forgotten( { "op": "free-keys", "keys": [key], - "reason": f"propagate-forgotten {ts.key}", + "stimulus_id": f"propagate-forgotten-{time()}", } ] state.remove_all_replicas(ts) diff --git a/distributed/tests/test_worker.py b/distributed/tests/test_worker.py index 93d391f6ef..81e25d8d03 100644 --- a/distributed/tests/test_worker.py +++ b/distributed/tests/test_worker.py @@ -2444,7 +2444,6 @@ async def test_hold_on_to_replicas(c, s, *workers): await asyncio.sleep(0.01) -@pytest.mark.flaky(reruns=10, reruns_delay=5) @gen_cluster(client=True) async def test_worker_reconnects_mid_compute(c, s, a, b): """Ensure that, if a worker disconnects while computing a result, the scheduler will @@ -2513,7 +2512,6 @@ def fast_on_a(lock): await asyncio.sleep(0.001) -@pytest.mark.flaky(reruns=10, reruns_delay=5) @gen_cluster(client=True) async def test_worker_reconnects_mid_compute_multiple_states_on_scheduler(c, s, a, b): """ diff --git a/distributed/worker.py b/distributed/worker.py index 88e82d5915..ebd62e9cd9 100644 --- a/distributed/worker.py +++ b/distributed/worker.py @@ -1596,7 +1596,7 @@ def update_data( self.batched_stream.send(msg) return {"nbytes": {k: sizeof(v) for k, v in data.items()}, "status": "OK"} - def handle_free_keys(self, comm=None, keys=None, reason=None): + def handle_free_keys(self, comm=None, keys=None, stimulus_id=None): """ Handler to be called by the scheduler. @@ -1607,14 +1607,14 @@ def handle_free_keys(self, comm=None, keys=None, reason=None): still decide to hold on to the data and task since it is required by an upstream dependency. """ - self.log.append(("free-keys", keys, reason)) + self.log.append(("free-keys", keys, stimulus_id)) recommendations = {} for key in keys: ts = self.tasks.get(key) if ts: recommendations[ts] = "released" if ts.dependents else "forgotten" - self.transitions(recommendations, stimulus_id=reason) + self.transitions(recommendations, stimulus_id=stimulus_id) def handle_remove_replicas(self, keys, stimulus_id): """Stream handler notifying the worker that it might be holding unreferenced, From 19d4bb8eb20e46dee22d19924a86abbb2c83abd2 Mon Sep 17 00:00:00 2001 From: fjetter Date: Mon, 18 Oct 2021 17:20:19 +0200 Subject: [PATCH 2/3] fix free_keys calls --- distributed/scheduler.py | 5 ++--- distributed/tests/test_failed_workers.py | 2 +- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/distributed/scheduler.py b/distributed/scheduler.py index 1ca5fa8474..3ea042c60b 100644 --- a/distributed/scheduler.py +++ b/distributed/scheduler.py @@ -2737,8 +2737,7 @@ def transition_processing_memory( if ws != ts._processing_on: # someone else has this task logger.info( - "Unexpected worker completed task %s. Expected: %s, Got: %s, Key: %s", - ts.key, + "Unexpected worker completed task. Expected: %s, Got: %s, Key: %s", ts._processing_on, ws, key, @@ -5969,7 +5968,7 @@ async def delete_worker_data( await retry_operation( self.rpc(addr=worker_address).free_keys, keys=list(keys), - reason="rebalance/replicate", + stimulus_id="rebalance/replicate", ) except OSError as e: # This can happen e.g. if the worker is going through controlled shutdown; diff --git a/distributed/tests/test_failed_workers.py b/distributed/tests/test_failed_workers.py index 8e5d01167d..2b6d5149a2 100644 --- a/distributed/tests/test_failed_workers.py +++ b/distributed/tests/test_failed_workers.py @@ -488,7 +488,7 @@ def sink(*args): # artificially, without notifying the scheduler. # This can only succeed if B handles the missing data properly by # removing A from the known sources of keys - a.handle_free_keys(keys=["f1"], reason="Am I evil?") # Yes, I am! + a.handle_free_keys(keys=["f1"], stimulus_id="Am I evil?") # Yes, I am! result_fut = c.submit(sink, futures, workers=x.address) await result_fut From f9bb38ffee6a3911c0c823dd9e502b1f9e84927d Mon Sep 17 00:00:00 2001 From: fjetter Date: Mon, 18 Oct 2021 17:22:30 +0200 Subject: [PATCH 3/3] attach timestamp to stim ID of delete_worker_data --- distributed/scheduler.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/distributed/scheduler.py b/distributed/scheduler.py index 3ea042c60b..fd873469af 100644 --- a/distributed/scheduler.py +++ b/distributed/scheduler.py @@ -5968,7 +5968,7 @@ async def delete_worker_data( await retry_operation( self.rpc(addr=worker_address).free_keys, keys=list(keys), - stimulus_id="rebalance/replicate", + stimulus_id=f"delete-data-{time()}", ) except OSError as e: # This can happen e.g. if the worker is going through controlled shutdown;