diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index edaef1c72..e386b0858 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -11,9 +11,17 @@ repos: - repo: https://github.com/psf/black rev: 23.3.0 hooks: - - id: black + - id: black-jupyter - repo: https://github.com/charliermarsh/ruff-pre-commit rev: "v0.0.261" hooks: - id: ruff args: ["--fix"] + - repo: https://github.com/nbQA-dev/nbQA + rev: 1.7.0 + hooks: + - id: nbqa-black + additional_dependencies: [jupytext, black] + - id: nbqa + args: ["ruff", "--fix", "--ignore=E402,B018,F704"] + additional_dependencies: [jupytext, ruff] diff --git a/docs/source/algorithms_and_examples.md b/docs/source/algorithms_and_examples.md index 3c702784a..e5c532dc3 100644 --- a/docs/source/algorithms_and_examples.md +++ b/docs/source/algorithms_and_examples.md @@ -1,15 +1,13 @@ --- -kernelspec: - name: python3 - display_name: python3 jupytext: text_representation: extension: .md format_name: myst - format_version: '0.13' - jupytext_version: 1.13.8 -execution: - timeout: 300 + format_version: 0.13 + jupytext_version: 1.14.5 +kernelspec: + display_name: python3 + name: python3 --- ```{include} ../../README.md @@ -101,16 +99,17 @@ def plot_loss_interval(learner): return hv.Scatter((x, y)).opts(size=6, color="r") -def plot(learner, npoints): - adaptive.runner.simple(learner, npoints_goal= npoints) +def plot_interval(learner, npoints): + adaptive.runner.simple(learner, npoints_goal=npoints) return (learner.plot() * plot_loss_interval(learner))[:, -1.1:1.1] def get_hm(loss_per_interval, N=101): learner = adaptive.Learner1D(f, bounds=(-1, 1), loss_per_interval=loss_per_interval) - plots = {n: plot(learner, n) for n in range(N)} + plots = {n: plot_interval(learner, n) for n in range(N)} return hv.HoloMap(plots, kdims=["npoints"]) + plot_homo = get_hm(uniform_loss).relabel("homogeneous sampling") plot_adaptive = get_hm(default_loss).relabel("with adaptive") layout = plot_homo + plot_adaptive @@ -122,7 +121,6 @@ layout.opts(toolbar=None) ```{code-cell} ipython3 :tags: [hide-input] - def ring(xy): import numpy as np @@ -131,7 +129,7 @@ def ring(xy): return x + np.exp(-((x**2 + y**2 - 0.75**2) ** 2) / a**4) -def plot(learner, npoints): +def plot_compare(learner, npoints): adaptive.runner.simple(learner, npoints_goal=npoints) learner2 = adaptive.Learner2D(ring, bounds=learner.bounds) xs = ys = np.linspace(*learner.bounds[0], int(learner.npoints**0.5)) @@ -146,7 +144,7 @@ def plot(learner, npoints): learner = adaptive.Learner2D(ring, bounds=[(-1, 1), (-1, 1)]) -plots = {n: plot(learner, n) for n in range(4, 1010, 20)} +plots = {n: plot_compare(learner, n) for n in range(4, 1010, 20)} hv.HoloMap(plots, kdims=["npoints"]).collate() ``` @@ -155,7 +153,6 @@ hv.HoloMap(plots, kdims=["npoints"]).collate() ```{code-cell} ipython3 :tags: [hide-input] - def g(n): import random @@ -167,12 +164,12 @@ def g(n): learner = adaptive.AverageLearner(g, atol=None, rtol=0.01) -def plot(learner, npoints): +def plot_avg(learner, npoints): adaptive.runner.simple(learner, npoints_goal=npoints) return learner.plot().relabel(f"loss={learner.loss():.2f}") -plots = {n: plot(learner, n) for n in range(10, 10000, 200)} +plots = {n: plot_avg(learner, n) for n in range(10, 10000, 200)} hv.HoloMap(plots, kdims=["npoints"]) ``` @@ -181,7 +178,6 @@ hv.HoloMap(plots, kdims=["npoints"]) ```{code-cell} ipython3 :tags: [hide-input] - def sphere(xyz): import numpy as np diff --git a/docs/source/logo.md b/docs/source/logo.md index c0baf5ddd..e7e553899 100644 --- a/docs/source/logo.md +++ b/docs/source/logo.md @@ -4,19 +4,16 @@ jupytext: extension: .md format_name: myst format_version: 0.13 - jupytext_version: 1.14.1 + jupytext_version: 1.14.5 kernelspec: display_name: Python 3 (ipykernel) language: python name: python3 -execution: - timeout: 300 --- ```{code-cell} ipython3 :tags: [remove-input] -import os import functools import subprocess import tempfile @@ -75,7 +72,7 @@ def remove_rounded_corners(fname): def learner_till(till, learner, data): new_learner = adaptive.Learner2D(None, bounds=learner.bounds) - new_learner.data = {k: v for k, v in data[:till]} + new_learner.data = dict(data[:till]) for x, y in learner._bounds_points: # always include the bounds new_learner.tell((x, y), learner.data[x, y]) diff --git a/docs/source/tutorial/tutorial.BalancingLearner.md b/docs/source/tutorial/tutorial.BalancingLearner.md index 5f43bdd64..b33a6f823 100644 --- a/docs/source/tutorial/tutorial.BalancingLearner.md +++ b/docs/source/tutorial/tutorial.BalancingLearner.md @@ -1,14 +1,15 @@ --- -kernelspec: - name: python3 - display_name: python3 jupytext: text_representation: extension: .md format_name: myst - format_version: '0.13' - jupytext_version: 1.13.8 + format_version: 0.13 + jupytext_version: 1.14.5 +kernelspec: + display_name: python3 + name: python3 --- + # Tutorial {class}`~adaptive.BalancingLearner` ```{note} @@ -60,7 +61,10 @@ runner.live_info() ``` ```{code-cell} ipython3 -plotter = lambda learner: hv.Overlay([L.plot() for L in learner.learners]) +def plotter(learner): + return hv.Overlay([L.plot() for L in learner.learners]) + + runner.live_plot(plotter=plotter, update_interval=0.1) ``` @@ -83,7 +87,7 @@ combos = { } learner = adaptive.BalancingLearner.from_product( - jacobi, adaptive.Learner1D, dict(bounds=(0, 1)), combos + jacobi, adaptive.Learner1D, {"bounds": (0, 1)}, combos ) runner = adaptive.BlockingRunner(learner, loss_goal=0.01) diff --git a/docs/source/tutorial/tutorial.DataSaver.md b/docs/source/tutorial/tutorial.DataSaver.md index 4d4e0efc4..1eb35e707 100644 --- a/docs/source/tutorial/tutorial.DataSaver.md +++ b/docs/source/tutorial/tutorial.DataSaver.md @@ -69,7 +69,7 @@ runner.live_info() ``` ```{code-cell} ipython3 -runner.live_plot(plotter=lambda l: l.learner.plot(), update_interval=0.1) +runner.live_plot(plotter=lambda lrn: lrn.learner.plot(), update_interval=0.1) ``` Now the `DataSavingLearner` will have an dictionary attribute `extra_data` that has `x` as key and the data that was returned by `learner.function` as values. diff --git a/docs/source/tutorial/tutorial.IntegratorLearner.md b/docs/source/tutorial/tutorial.IntegratorLearner.md index 8110512a9..50aaf2e5b 100644 --- a/docs/source/tutorial/tutorial.IntegratorLearner.md +++ b/docs/source/tutorial/tutorial.IntegratorLearner.md @@ -1,14 +1,15 @@ --- -kernelspec: - name: python3 - display_name: python3 jupytext: text_representation: extension: .md format_name: myst - format_version: '0.13' - jupytext_version: 1.13.8 + format_version: 0.13 + jupytext_version: 1.14.5 +kernelspec: + display_name: python3 + name: python3 --- + # Tutorial {class}`~adaptive.IntegratorLearner` ```{note} @@ -60,9 +61,7 @@ learner = adaptive.IntegratorLearner(f24, bounds=(0, 3), tol=1e-8) # We use a SequentialExecutor, which runs the function to be learned in # *this* process only. This means we don't pay # the overhead of evaluating the function in another process. -runner = adaptive.Runner( - learner, executor=SequentialExecutor() -) +runner = adaptive.Runner(learner, executor=SequentialExecutor()) ``` ```{code-cell} ipython3 diff --git a/docs/source/tutorial/tutorial.Learner1D.md b/docs/source/tutorial/tutorial.Learner1D.md index db80e03ec..de40a83d1 100644 --- a/docs/source/tutorial/tutorial.Learner1D.md +++ b/docs/source/tutorial/tutorial.Learner1D.md @@ -1,14 +1,15 @@ --- -kernelspec: - name: python3 - display_name: python3 jupytext: text_representation: extension: .md format_name: myst - format_version: '0.13' - jupytext_version: 1.13.8 + format_version: 0.13 + jupytext_version: 1.14.5 +kernelspec: + display_name: python3 + name: python3 --- + (TutorialLearner1D)= # Tutorial {class}`~adaptive.Learner1D` @@ -112,6 +113,8 @@ random.seed(0) offsets = [random.uniform(-0.8, 0.8) for _ in range(3)] # sharp peaks at random locations in the domain + + def f_levels(x, offsets=offsets): a = 0.01 return np.array( @@ -124,7 +127,9 @@ The `Learner1D` can be used for such functions: ```{code-cell} ipython3 learner = adaptive.Learner1D(f_levels, bounds=(-1, 1)) -runner = adaptive.Runner(learner, loss_goal=0.01) # continue until `learner.loss()<=0.01` +runner = adaptive.Runner( + learner, loss_goal=0.01 +) # continue until `learner.loss()<=0.01` ``` ```{code-cell} ipython3 @@ -211,12 +216,14 @@ learner.to_numpy() ``` If Pandas is installed (optional dependency), you can also run + ```{code-cell} ipython3 df = learner.to_dataframe() df ``` and load that data into a new learner with + ```{code-cell} ipython3 new_learner = adaptive.Learner1D(learner.function, (-1, 1)) # create an empty learner new_learner.load_dataframe(df) # load the pandas.DataFrame's data diff --git a/docs/source/tutorial/tutorial.Learner2D.md b/docs/source/tutorial/tutorial.Learner2D.md index d15446fe4..7d0130fc6 100644 --- a/docs/source/tutorial/tutorial.Learner2D.md +++ b/docs/source/tutorial/tutorial.Learner2D.md @@ -1,14 +1,15 @@ --- -kernelspec: - name: python3 - display_name: python3 jupytext: text_representation: extension: .md format_name: myst - format_version: '0.13' - jupytext_version: 1.13.8 + format_version: 0.13 + jupytext_version: 1.14.5 +kernelspec: + display_name: python3 + name: python3 --- + # Tutorial {class}`~adaptive.Learner2D` ```{note} @@ -24,6 +25,7 @@ import holoviews as hv import numpy as np from functools import partial + adaptive.notebook_extension() ``` diff --git a/docs/source/tutorial/tutorial.LearnerND.md b/docs/source/tutorial/tutorial.LearnerND.md index c03f6b5c8..46f948708 100644 --- a/docs/source/tutorial/tutorial.LearnerND.md +++ b/docs/source/tutorial/tutorial.LearnerND.md @@ -1,16 +1,15 @@ --- -kernelspec: - name: python3 - display_name: python3 jupytext: text_representation: extension: .md format_name: myst - format_version: '0.13' - jupytext_version: 1.13.8 -execution: - timeout: 300 + format_version: 0.13 + jupytext_version: 1.14.5 +kernelspec: + display_name: python3 + name: python3 --- + # Tutorial {class}`~adaptive.LearnerND` ```{note} @@ -111,6 +110,7 @@ You could use the following code as an example: ```{code-cell} ipython3 import scipy + def f(xyz): x, y, z = xyz return x**4 + y**4 + z**4 - (x**2 + y**2 + z**2) ** 2 diff --git a/docs/source/tutorial/tutorial.advanced-topics.md b/docs/source/tutorial/tutorial.advanced-topics.md index 92d4cae43..2dfc6cf29 100644 --- a/docs/source/tutorial/tutorial.advanced-topics.md +++ b/docs/source/tutorial/tutorial.advanced-topics.md @@ -1,14 +1,15 @@ --- -kernelspec: - name: python3 - display_name: python3 jupytext: text_representation: extension: .md format_name: myst - format_version: '0.13' - jupytext_version: 1.13.8 + format_version: 0.13 + jupytext_version: 1.14.5 +kernelspec: + display_name: python3 + name: python3 --- + # Advanced Topics ```{note} @@ -24,7 +25,6 @@ import adaptive adaptive.notebook_extension() import asyncio -from functools import partial import random offset = random.uniform(-0.5, 0.5) @@ -92,7 +92,7 @@ def slow_f(x): learner = adaptive.Learner1D(slow_f, bounds=[0, 1]) runner = adaptive.Runner(learner, npoints_goal=100) runner.start_periodic_saving( - save_kwargs=dict(fname="data/periodic_example.p"), interval=6 + save_kwargs={"fname": "data/periodic_example.p"}, interval=6 ) ``` @@ -168,9 +168,7 @@ If you want to enable determinism, want to continue using the non-blocking {clas from adaptive.runner import SequentialExecutor learner = adaptive.Learner1D(f, bounds=(-1, 1)) -runner = adaptive.Runner( - learner, executor=SequentialExecutor(), loss_goal=0.01 -) +runner = adaptive.Runner(learner, executor=SequentialExecutor(), loss_goal=0.01) ``` ```{code-cell} ipython3 @@ -275,6 +273,7 @@ If the runner stopped due to an exception then asking for the result will raise ```{code-cell} ipython3 :tags: [raises-exception] + runner.task.result() ``` @@ -380,6 +379,7 @@ a slow part `g` which can be reused by multiple inputs and shared across functio ```{code-cell} ipython3 import time + def f(x): """ Integer part of `x` repeats and should be reused @@ -407,9 +407,10 @@ from dask import delayed # Convert g and h to dask.Delayed objects g, h = delayed(g), delayed(h) + @delayed def f(x, y): - return (x + y)**2 + return (x + y) ** 2 ``` Next we define a computation using coroutines such that it reuses previously submitted tasks. @@ -421,6 +422,7 @@ client = await Client(asynchronous=True) g_futures = {} + async def f_parallel(x): # Get or sumbit the slow function future if (g_future := g_futures.get(int(x))) is None: diff --git a/docs/source/tutorial/tutorial.custom_loss.md b/docs/source/tutorial/tutorial.custom_loss.md index f76af484d..222dc6306 100644 --- a/docs/source/tutorial/tutorial.custom_loss.md +++ b/docs/source/tutorial/tutorial.custom_loss.md @@ -1,14 +1,15 @@ --- -kernelspec: - name: python3 - display_name: python3 jupytext: text_representation: extension: .md format_name: myst - format_version: '0.13' - jupytext_version: 1.13.8 + format_version: 0.13 + jupytext_version: 1.14.5 +kernelspec: + display_name: python3 + name: python3 --- + # Custom adaptive logic for 1D and 2D ```{note} @@ -25,7 +26,6 @@ adaptive.notebook_extension() # Import modules that are used in multiple cells import numpy as np -from functools import partial import holoviews as hv ``` @@ -79,9 +79,6 @@ learner.plot().select(y=(0, 10000)) ``` ```{code-cell} ipython3 -from adaptive.runner import SequentialExecutor - - def uniform_sampling_2d(ip): from adaptive.learner.learner2D import areas @@ -99,7 +96,9 @@ learner = adaptive.Learner2D( ) # this takes a while, so use the async Runner so we know *something* is happening -runner = adaptive.Runner(learner, goal=lambda l: l.loss() < 0.03 or l.npoints > 1000) +runner = adaptive.Runner( + learner, goal=lambda lrn: lrn.loss() < 0.03 or lrn.npoints > 1000 +) ``` ```{code-cell} ipython3 @@ -113,7 +112,10 @@ runner.live_info() ``` ```{code-cell} ipython3 -plotter = lambda l: l.plot(tri_alpha=0.3).relabel("1 / (x^2 + y^2) in log scale") +def plotter(lrn): + return lrn.plot(tri_alpha=0.3).relabel("1 / (x^2 + y^2) in log scale") + + runner.live_plot(update_interval=0.2, plotter=plotter) ``` diff --git a/example-notebook.ipynb b/example-notebook.ipynb index d3a739056..af6fa1888 100644 --- a/example-notebook.ipynb +++ b/example-notebook.ipynb @@ -27,11 +27,12 @@ "\n", "adaptive.notebook_extension()\n", "\n", + "import random\n", + "from functools import partial\n", + "\n", "# Import modules that are used in multiple cells\n", "import holoviews as hv\n", - "import numpy as np\n", - "from functools import partial\n", - "import random" + "import numpy as np" ] }, { @@ -60,15 +61,15 @@ "\n", "\n", "def peak(x, offset=offset, wait=True):\n", - " from time import sleep\n", " from random import random\n", + " from time import sleep\n", "\n", " a = 0.01\n", " if wait:\n", " # we pretend that this is a slow function\n", " sleep(random())\n", "\n", - " return x + a**2 / (a**2 + (x - offset)**2)" + " return x + a**2 / (a**2 + (x - offset) ** 2)" ] }, { @@ -173,16 +174,17 @@ "outputs": [], "source": [ "def ring(xy, wait=True):\n", - " import numpy as np\n", - " from time import sleep\n", " from random import random\n", + " from time import sleep\n", + "\n", + " import numpy as np\n", "\n", " if wait:\n", " # we pretend that this is a slow function\n", " sleep(random() / 10)\n", " x, y = xy\n", " a = 0.2\n", - " return x + np.exp(-((x**2 + y**2 - 0.75**2)**2) / a**4)\n", + " return x + np.exp(-((x**2 + y**2 - 0.75**2) ** 2) / a**4)\n", "\n", "\n", "learner = adaptive.Learner2D(ring, bounds=[(-1, 1), (-1, 1)])" @@ -223,7 +225,7 @@ "# Create a learner and add data on homogeneous grid, so that we can plot it\n", "learner2 = adaptive.Learner2D(ring, bounds=learner.bounds)\n", "n = int(learner.npoints**0.5)\n", - "xs, ys = [np.linspace(*bounds, n) for bounds in learner.bounds]\n", + "xs, ys = (np.linspace(*bounds, n) for bounds in learner.bounds)\n", "xys = list(itertools.product(xs, ys))\n", "zs = [ring(xy, wait=False) for xy in xys]\n", "learner2.tell_many(xys, zs)\n", @@ -233,7 +235,7 @@ " + learner.plot().relabel(\"With adaptive\")\n", " + learner2.plot(n, tri_alpha=0.4)\n", " + learner.plot(tri_alpha=0.4)\n", - ").cols(2).opts({\"EdgePaths\": dict(color=\"w\")})" + ").cols(2).opts({\"EdgePaths\": {\"color\": \"w\"}})" ] }, { @@ -316,7 +318,7 @@ "source": [ "def noisy_peak(seed_x, sigma=0, peak_width=0.05, offset=-0.5):\n", " seed, x = seed_x\n", - " y = x**3 - x + 3 * peak_width**2 / (peak_width**2 + (x - offset)**2)\n", + " y = x**3 - x + 3 * peak_width**2 / (peak_width**2 + (x - offset) ** 2)\n", " rng = np.random.RandomState(int(seed))\n", " noise = rng.normal(scale=sigma)\n", " return y + noise" @@ -373,8 +375,8 @@ "learner = adaptive.AverageLearner1D(partial(noisy_peak, sigma=1), bounds=(-2, 2))\n", "\n", "\n", - "def goal(l):\n", - " return l.nsamples >= 10_000 and l.min_samples_per_point >= 20\n", + "def goal(lrn):\n", + " return lrn.nsamples >= 10_000 and lrn.min_samples_per_point >= 20\n", "\n", "\n", "runner = adaptive.Runner(learner, goal=goal)\n", @@ -446,10 +448,11 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "We initialize a learner again and pass the bounds and relative tolerance we want to reach. Then in the `Runner` we pass `goal=lambda l: l.done()` where `learner.done()` is `True` when the relative tolerance has been reached." + "We initialize a learner again and pass the bounds and relative tolerance we want to reach. Then in the `Runner` we pass `goal=lambda lrn: lrn.done()` where `learner.done()` is `True` when the relative tolerance has been reached." ] }, { @@ -515,9 +518,9 @@ "random.seed(0)\n", "offsets = [random.uniform(-0.8, 0.8) for _ in range(3)]\n", "\n", + "\n", "# sharp peaks at random locations in the domain\n", "def f_levels(x, offsets=offsets):\n", - " a = 0.01\n", " return np.array([offset + peak(x, offset, wait=False) for offset in offsets])" ] }, @@ -567,7 +570,7 @@ "def sphere(xyz):\n", " x, y, z = xyz\n", " a = 0.4\n", - " return x + z**2 + np.exp(-((x**2 + y**2 + z**2 - 0.75**2)**2) / a**4)\n", + " return x + z**2 + np.exp(-((x**2 + y**2 + z**2 - 0.75**2) ** 2) / a**4)\n", "\n", "\n", "learner = adaptive.LearnerND(sphere, bounds=[(-1, 1), (-1, 1), (-1, 1)])\n", @@ -612,7 +615,7 @@ "source": [ "def plot_cut(x1, x2, directions, learner=learner):\n", " cut_mapping = {\"xyz\".index(d): x for d, x in zip(directions, [x1, x2])}\n", - " return learner.plot_slice(cut_mapping).opts({\"Path\": dict(framewise=True)})\n", + " return learner.plot_slice(cut_mapping).opts({\"Path\": {\"framewise\": True}})\n", "\n", "\n", "dm = hv.DynamicMap(plot_cut, kdims=[\"v1\", \"v2\", \"directions\"])\n", @@ -709,7 +712,7 @@ "\n", "def plot_logz(learner):\n", " p = learner.plot(tri_alpha=0.3).relabel(\"1 / (x^2 + y^2) in log scale\")\n", - " return p.opts({\"Image\": dict(logz=True), \"EdgePaths\": dict(color=\"w\")})\n", + " return p.opts({\"Image\": {\"logz\": True}, \"EdgePaths\": {\"color\": \"w\"}})\n", "\n", "\n", "learner = adaptive.Learner2D(\n", @@ -818,7 +821,7 @@ "source": [ "def h(x, offset=0):\n", " a = 0.01\n", - " return x + a**2 / (a**2 + (x - offset)**2)\n", + " return x + a**2 / (a**2 + (x - offset) ** 2)\n", "\n", "\n", "learners = [\n", @@ -837,7 +840,10 @@ "metadata": {}, "outputs": [], "source": [ - "plotter = lambda learner: hv.Overlay([L.plot() for L in learner.learners])\n", + "def plotter(learner):\n", + " return hv.Overlay([lrn.plot() for lrn in learner.learners])\n", + "\n", + "\n", "runner.live_plot(plotter=plotter, update_interval=0.1)" ] }, @@ -868,7 +874,7 @@ "}\n", "\n", "learner = adaptive.BalancingLearner.from_product(\n", - " jacobi, adaptive.Learner1D, dict(bounds=(0, 1)), combos\n", + " jacobi, adaptive.Learner1D, {\"bounds\": (0, 1)}, combos\n", ")\n", "\n", "runner = adaptive.BlockingRunner(learner, loss_goal=0.01)\n", @@ -946,7 +952,7 @@ "metadata": {}, "outputs": [], "source": [ - "runner.live_plot(plotter=lambda l: l.learner.plot(), update_interval=0.1)" + "runner.live_plot(plotter=lambda lrn: lrn.learner.plot(), update_interval=0.1)" ] }, { @@ -1243,7 +1249,7 @@ "runner = adaptive.Runner(learner, npoints_goal=100)\n", "\n", "runner.start_periodic_saving(\n", - " save_kwargs=dict(fname=\"data/periodic_example.p\"), interval=6\n", + " save_kwargs={\"fname\": \"data/periodic_example.p\"}, interval=6\n", ")\n", "\n", "runner.live_info()" @@ -1394,9 +1400,7 @@ "\n", "learner = adaptive.Learner1D(peak, bounds=(-1, 1))\n", "\n", - "runner = adaptive.Runner(\n", - " learner, executor=SequentialExecutor(), loss_goal=0.002\n", - ")\n", + "runner = adaptive.Runner(learner, executor=SequentialExecutor(), loss_goal=0.002)\n", "runner.live_info()\n", "runner.live_plot(update_interval=0.1)" ] @@ -1573,7 +1577,7 @@ "metadata": {}, "outputs": [], "source": [ - "learner.plot().Scatter.I.opts(style=dict(size=6)) * reconstructed_learner.plot()" + "learner.plot().Scatter.I.opts(style={\"size\": 6}) * reconstructed_learner.plot()" ] }, { diff --git a/readthedocs.yml b/readthedocs.yml index 280e95ee7..23fab10c6 100644 --- a/readthedocs.yml +++ b/readthedocs.yml @@ -1,7 +1,7 @@ version: 2 build: - os: "ubuntu-20.04" + os: "ubuntu-22.04" tools: python: "mambaforge-4.10"