Skip to content

Commit

Permalink
Added forward/backward derivatives for divergence (#564)
Browse files Browse the repository at this point in the history
* Added forward/backward derivatives for divergence on Cartesian grid
* Added forward and backward derivatives for divergence on spherical grids
  • Loading branch information
david-zwicker committed May 13, 2024
1 parent eb44cbb commit 911579f
Show file tree
Hide file tree
Showing 6 changed files with 131 additions and 34 deletions.
Binary file modified docs/methods/boundary_discretization/boundary_discretization.pdf
Binary file not shown.
12 changes: 11 additions & 1 deletion docs/methods/boundary_discretization/boundary_discretization.tex
Original file line number Diff line number Diff line change
Expand Up @@ -510,14 +510,24 @@ \subsection{Conservative divergence operator}
\partial_\alpha v_{\alpha} \sim
\frac{r_{n + \frac12}^2 v_r(r_{n + \frac12}) - r_{n - \frac12}^2 v_r(r_{n - \frac12})}{V_n}
\end{align}
where $V_n$ is given by \Eqref{eqn:spherical_shell_volume}.
where $V_n$ is given by \Eqref{eqn:spherical_shell_volume}.
We obtain different estimates for different finite difference schemes:

\paragraph{Central differences:}
Here, we approximate the function values at the midpoints using a simple average,
\begin{align}
v_r(r_{n + \frac12}) &\approx \frac{v_r(r_{n}) + v_r(r_{n+1})}{2}
\;,
\end{align}
which might introduced uncontrolled artifacts.

\paragraph{Forward difference:}
Use $v_r(r_{n + \frac12}) \approx v_r(r_{n+1})$.

\paragraph{Backward difference:}
Use $v_r(r_{n + \frac12}) \approx v_r(r_{n})$.



\subsection{Conservative tensor divergence operator}
The radial component of the divergence of a spherically-symmetric tensor field (with $t_{\theta\theta} = t_{\phi\phi}$) reads
Expand Down
106 changes: 86 additions & 20 deletions pde/grids/operators/cartesian.py
Original file line number Diff line number Diff line change
Expand Up @@ -955,20 +955,33 @@ def make_gradient_squared(grid: CartesianGrid, central: bool = True) -> Operator
return gradient_squared


def _make_divergence_scipy_nd(grid: CartesianGrid) -> OperatorType:
def _make_divergence_scipy_nd(
grid: CartesianGrid, method: Literal["central", "forward", "backward"] = "central"
) -> OperatorType:
"""make a divergence operator using the scipy module
Args:
grid (:class:`~pde.grids.cartesian.CartesianGrid`):
The grid for which the operator is created
method (str):
The method for calculating the derivative. Possible values are 'central',
'forward', and 'backward'.
Returns:
A function that can be applied to an array of values
"""
from scipy import ndimage

data_shape = grid._shape_full
scale = 0.5 / grid.discretization
scale = 1 / grid.discretization
if method == "central":
stencil = [-0.5, 0, 0.5]
elif method == "forward":
stencil = [0, -1, 1]
elif method == "backward":
stencil = [-1, 1, 0]
else:
raise ValueError(f"Unknown derivative type `{method}`")

def divergence(arr: np.ndarray, out: np.ndarray) -> None:
"""apply divergence operator to array `arr`"""
Expand All @@ -984,45 +997,67 @@ def divergence(arr: np.ndarray, out: np.ndarray) -> None:
with np.errstate(all="ignore"):
# some errors can happen for ghost cells
for i in range(len(data_shape)):
out += ndimage.convolve1d(arr[i], [1, 0, -1], axis=i)[valid] * scale[i]
out += ndimage.correlate1d(arr[i], stencil, axis=i)[valid] * scale[i]

return divergence


def _make_divergence_numba_1d(grid: CartesianGrid) -> OperatorType:
def _make_divergence_numba_1d(
grid: CartesianGrid, method: Literal["central", "forward", "backward"] = "central"
) -> OperatorType:
"""make a 1d divergence operator using numba compilation
Args:
grid (:class:`~pde.grids.cartesian.CartesianGrid`):
The grid for which the operator is created
method (str):
The method for calculating the derivative. Possible values are 'central',
'forward', and 'backward'.
Returns:
A function that can be applied to an array of values
"""
if method not in {"central", "forward", "backward"}:
raise ValueError(f"Unknown derivative type `{method}`")
dim_x = grid.shape[0]
scale = 0.5 / grid.discretization[0]
dx = grid.discretization[0]

@jit
def divergence(arr: np.ndarray, out: np.ndarray) -> None:
"""apply gradient operator to array `arr`"""
for i in range(1, dim_x + 1):
out[i - 1] = (arr[0, i + 1] - arr[0, i - 1]) * scale
if method == "central":
out[i - 1] = (arr[0, i + 1] - arr[0, i - 1]) / (2 * dx)
elif method == "forward":
out[i - 1] = (arr[0, i + 1] - arr[0, i]) / dx
elif method == "backward":
out[i - 1] = (arr[0, i] - arr[0, i - 1]) / dx

return divergence # type: ignore


def _make_divergence_numba_2d(grid: CartesianGrid) -> OperatorType:
def _make_divergence_numba_2d(
grid: CartesianGrid, method: Literal["central", "forward", "backward"] = "central"
) -> OperatorType:
"""make a 2d divergence operator using numba compilation
Args:
grid (:class:`~pde.grids.cartesian.CartesianGrid`):
The grid for which the operator is created
method (str):
The method for calculating the derivative. Possible values are 'central',
'forward', and 'backward'.
Returns:
A function that can be applied to an array of values
"""
dim_x, dim_y = grid.shape
scale_x, scale_y = 0.5 / grid.discretization
if method == "central":
scale_x, scale_y = 0.5 / grid.discretization
elif method in {"forward", "backward"}:
scale_x, scale_y = 1 / grid.discretization
else:
raise ValueError(f"Unknown derivative type `{method}`")

# use parallel processing for large enough arrays
parallel = dim_x * dim_y >= config["numba.multithreading_threshold"]
Expand All @@ -1032,25 +1067,42 @@ def divergence(arr: np.ndarray, out: np.ndarray) -> None:
"""apply gradient operator to array `arr`"""
for i in nb.prange(1, dim_x + 1):
for j in range(1, dim_y + 1):
d_x = (arr[0, i + 1, j] - arr[0, i - 1, j]) * scale_x
d_y = (arr[1, i, j + 1] - arr[1, i, j - 1]) * scale_y
if method == "central":
d_x = (arr[0, i + 1, j] - arr[0, i - 1, j]) * scale_x
d_y = (arr[1, i, j + 1] - arr[1, i, j - 1]) * scale_y
elif method == "forward":
d_x = (arr[0, i + 1, j] - arr[0, i, j]) * scale_x
d_y = (arr[1, i, j + 1] - arr[1, i, j]) * scale_y
elif method == "backward":
d_x = (arr[0, i, j] - arr[0, i - 1, j]) * scale_x
d_y = (arr[1, i, j] - arr[1, i, j - 1]) * scale_y
out[i - 1, j - 1] = d_x + d_y

return divergence # type: ignore


def _make_divergence_numba_3d(grid: CartesianGrid) -> OperatorType:
def _make_divergence_numba_3d(
grid: CartesianGrid, method: Literal["central", "forward", "backward"] = "central"
) -> OperatorType:
"""make a 3d divergence operator using numba compilation
Args:
grid (:class:`~pde.grids.cartesian.CartesianGrid`):
The grid for which the operator is created
method (str):
The method for calculating the derivative. Possible values are 'central',
'forward', and 'backward'.
Returns:
A function that can be applied to an array of values
"""
dim_x, dim_y, dim_z = grid.shape
scale_x, scale_y, scale_z = 0.5 / grid.discretization
if method == "central":
scale_x, scale_y, scale_z = 0.5 / grid.discretization
elif method in {"forward", "backward"}:
scale_x, scale_y, scale_z = 1 / grid.discretization
else:
raise ValueError(f"Unknown derivative type `{method}`")

# use parallel processing for large enough arrays
parallel = dim_x * dim_y * dim_z >= config["numba.multithreading_threshold"]
Expand All @@ -1061,17 +1113,28 @@ def divergence(arr: np.ndarray, out: np.ndarray) -> None:
for i in nb.prange(1, dim_x + 1):
for j in range(1, dim_y + 1):
for k in range(1, dim_z + 1):
d_x = (arr[0, i + 1, j, k] - arr[0, i - 1, j, k]) * scale_x
d_y = (arr[1, i, j + 1, k] - arr[1, i, j - 1, k]) * scale_y
d_z = (arr[2, i, j, k + 1] - arr[2, i, j, k - 1]) * scale_z
if method == "central":
d_x = (arr[0, i + 1, j, k] - arr[0, i - 1, j, k]) * scale_x
d_y = (arr[1, i, j + 1, k] - arr[1, i, j - 1, k]) * scale_y
d_z = (arr[2, i, j, k + 1] - arr[2, i, j, k - 1]) * scale_z
elif method == "forward":
d_x = (arr[0, i + 1, j, k] - arr[0, i, j, k]) * scale_x
d_y = (arr[1, i, j + 1, k] - arr[1, i, j, k]) * scale_y
d_z = (arr[2, i, j, k + 1] - arr[2, i, j, k]) * scale_z
elif method == "backward":
d_x = (arr[0, i, j, k] - arr[0, i - 1, j, k]) * scale_x
d_y = (arr[1, i, j, k] - arr[1, i, j - 1, k]) * scale_y
d_z = (arr[2, i, j, k] - arr[2, i, j, k - 1]) * scale_z
out[i - 1, j - 1, k - 1] = d_x + d_y + d_z

return divergence # type: ignore


@CartesianGrid.register_operator("divergence", rank_in=1, rank_out=0)
def make_divergence(
grid: CartesianGrid, backend: Literal["auto", "numba", "scipy"] = "auto"
grid: CartesianGrid,
backend: Literal["auto", "numba", "scipy"] = "auto",
method: Literal["central", "forward", "backward"] = "central",
) -> OperatorType:
"""make a divergence operator on a Cartesian grid
Expand All @@ -1081,6 +1144,9 @@ def make_divergence(
backend (str):
Backend used for calculating the divergence operator.
If backend='auto', a suitable backend is chosen automatically.
method (str):
The method for calculating the derivative. Possible values are 'central',
'forward', and 'backward'.
Returns:
A function that can be applied to an array of values
Expand All @@ -1096,18 +1162,18 @@ def make_divergence(

if backend == "numba":
if dim == 1:
divergence = _make_divergence_numba_1d(grid)
divergence = _make_divergence_numba_1d(grid, method=method)
elif dim == 2:
divergence = _make_divergence_numba_2d(grid)
divergence = _make_divergence_numba_2d(grid, method=method)
elif dim == 3:
divergence = _make_divergence_numba_3d(grid)
divergence = _make_divergence_numba_3d(grid, method=method)
else:
raise NotImplementedError(
f"Numba divergence operator not implemented for dimension {dim}"
)

elif backend == "scipy":
divergence = _make_divergence_scipy_nd(grid)
divergence = _make_divergence_scipy_nd(grid, method=method)

else:
raise ValueError(f"Backend `{backend}` is not defined")
Expand Down
27 changes: 22 additions & 5 deletions pde/grids/operators/spherical_sym.py
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,10 @@ def gradient_squared(arr: np.ndarray, out: np.ndarray) -> None:
@SphericalSymGrid.register_operator("divergence", rank_in=1, rank_out=0)
@fill_in_docstring
def make_divergence(
grid: SphericalSymGrid, safe: bool = True, conservative: bool = True
grid: SphericalSymGrid,
safe: bool = True,
conservative: bool = True,
method: Literal["central", "forward", "backward"] = "central",
) -> OperatorType:
"""make a discretized divergence operator for a spherical grid
Expand All @@ -203,6 +206,9 @@ def make_divergence(
Flag indicating whether the operator should be conservative (which results
in slightly slower computations). Conservative operators ensure mass
conservation.
method (str):
The method for calculating the derivative. Possible values are 'central',
'forward', and 'backward'.
Returns:
A function that can be applied to an array of values
Expand Down Expand Up @@ -234,13 +240,19 @@ def divergence(arr: np.ndarray, out: np.ndarray) -> None:

arr_r = arr[0, :]
for i in range(1, dim_r + 1): # iterate radial points
term_h = factor_h[i - 1] * (arr_r[i] + arr_r[i + 1])
term_l = factor_l[i - 1] * (arr_r[i - 1] + arr_r[i])
if method == "central":
term_h = factor_h[i - 1] * (arr_r[i] + arr_r[i + 1])
term_l = factor_l[i - 1] * (arr_r[i - 1] + arr_r[i])
elif method == "forward":
term_h = 2 * factor_h[i - 1] * arr_r[i + 1]
term_l = 2 * factor_l[i - 1] * arr_r[i]
elif method == "backward":
term_h = 2 * factor_h[i - 1] * arr_r[i]
term_l = 2 * factor_l[i - 1] * arr_r[i - 1]
out[i - 1] = term_h - term_l

else:
# implement naive divergence operator
scale_r = 1 / (2 * dr)
factors = 2 / rs # factors that need to be multiplied below

@jit
Expand All @@ -255,7 +267,12 @@ def divergence(arr: np.ndarray, out: np.ndarray) -> None:

arr_r = arr[0, :]
for i in range(1, dim_r + 1): # iterate radial points
diff_r = (arr_r[i + 1] - arr_r[i - 1]) * scale_r
if method == "central":
diff_r = (arr_r[i + 1] - arr_r[i - 1]) / (2 * dr)
elif method == "forward":
diff_r = (arr_r[i + 1] - arr_r[i]) / dr
elif method == "backward":
diff_r = (arr_r[i] - arr_r[i - 1]) / dr
out[i - 1] = diff_r + factors[i - 1] * arr_r[i]

return divergence # type: ignore
Expand Down
7 changes: 4 additions & 3 deletions tests/grids/operators/test_cartesian_operators.py
Original file line number Diff line number Diff line change
Expand Up @@ -181,13 +181,14 @@ def test_gradient_cart(ndim, method, periodic, rng):


@pytest.mark.parametrize("ndim", [1, 2, 3])
@pytest.mark.parametrize("method", ["central", "forward", "backward"])
@pytest.mark.parametrize("periodic", [True, False])
def test_divergence_cart(ndim, periodic, rng):
def test_divergence_cart(ndim, method, periodic, rng):
"""test different divergence operators"""
bcs = _get_random_grid_bcs(ndim, dx="uniform", periodic=periodic, rank=1)
field = VectorField.random_uniform(bcs.grid, rng=rng)
res1 = field.divergence(bcs, backend="scipy").data
res2 = field.divergence(bcs, backend="numba").data
res1 = field.divergence(bcs, backend="scipy", method=method).data
res2 = field.divergence(bcs, backend="numba", method=method).data
np.testing.assert_allclose(res1, res2)


Expand Down
13 changes: 8 additions & 5 deletions tests/grids/operators/test_spherical_operators.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,8 +38,10 @@ def test_findiff_sph():
# test divergence
div = v.divergence(bc=["derivative", "value"], conservative=False)
np.testing.assert_allclose(div.data, [9, 3 + 4 / r1, -6 + 8 / r2])
div = v.divergence(bc="derivative", conservative=False)
np.testing.assert_allclose(div.data, [9, 3 + 4 / r1, 2 + 8 / r2])
div = v.divergence(bc="derivative", method="forward", conservative=False)
np.testing.assert_allclose(div.data, [10, 4 + 4 / r1, 8 / r2])
div = v.divergence(bc="derivative", method="backward", conservative=False)
np.testing.assert_allclose(div.data, [8, 2 + 4 / r1, 4 + 8 / r2])


def test_conservative_sph():
Expand All @@ -48,9 +50,10 @@ def test_conservative_sph():
expr = "1 / cosh((r - 1) * 10)"

# test divergence of vector field
vf = VectorField.from_expression(grid, [expr, 0, 0])
div = vf.divergence(bc="derivative", conservative=True)
assert div.integral == pytest.approx(0, abs=1e-2)
for method in ["central", "forward", "backward"]:
vf = VectorField.from_expression(grid, [expr, 0, 0])
div = vf.divergence(bc="derivative", conservative=True, method=method)
assert div.integral == pytest.approx(0, abs=1e-2)

# test laplacian of scalar field
lap = vf[0].laplace("derivative")
Expand Down

0 comments on commit 911579f

Please sign in to comment.