Skip to content

Commit

Permalink
Added forward and backward derivatives for divergence on spherical grids
Browse files Browse the repository at this point in the history
  • Loading branch information
david-zwicker committed May 13, 2024
1 parent 57ce4fd commit 074319f
Show file tree
Hide file tree
Showing 4 changed files with 41 additions and 11 deletions.
Binary file modified docs/methods/boundary_discretization/boundary_discretization.pdf
Binary file not shown.
12 changes: 11 additions & 1 deletion docs/methods/boundary_discretization/boundary_discretization.tex
Original file line number Diff line number Diff line change
Expand Up @@ -510,14 +510,24 @@ \subsection{Conservative divergence operator}
\partial_\alpha v_{\alpha} \sim
\frac{r_{n + \frac12}^2 v_r(r_{n + \frac12}) - r_{n - \frac12}^2 v_r(r_{n - \frac12})}{V_n}
\end{align}
where $V_n$ is given by \Eqref{eqn:spherical_shell_volume}.
where $V_n$ is given by \Eqref{eqn:spherical_shell_volume}.
We obtain different estimates for different finite difference schemes:

\paragraph{Central differences:}
Here, we approximate the function values at the midpoints using a simple average,
\begin{align}
v_r(r_{n + \frac12}) &\approx \frac{v_r(r_{n}) + v_r(r_{n+1})}{2}
\;,
\end{align}
which might introduced uncontrolled artifacts.

\paragraph{Forward difference:}
Use $v_r(r_{n + \frac12}) \approx v_r(r_{n+1})$.

\paragraph{Backward difference:}
Use $v_r(r_{n + \frac12}) \approx v_r(r_{n})$.



\subsection{Conservative tensor divergence operator}
The radial component of the divergence of a spherically-symmetric tensor field (with $t_{\theta\theta} = t_{\phi\phi}$) reads
Expand Down
27 changes: 22 additions & 5 deletions pde/grids/operators/spherical_sym.py
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,10 @@ def gradient_squared(arr: np.ndarray, out: np.ndarray) -> None:
@SphericalSymGrid.register_operator("divergence", rank_in=1, rank_out=0)
@fill_in_docstring
def make_divergence(
grid: SphericalSymGrid, safe: bool = True, conservative: bool = True
grid: SphericalSymGrid,
safe: bool = True,
conservative: bool = True,
method: Literal["central", "forward", "backward"] = "central",
) -> OperatorType:
"""make a discretized divergence operator for a spherical grid
Expand All @@ -203,6 +206,9 @@ def make_divergence(
Flag indicating whether the operator should be conservative (which results
in slightly slower computations). Conservative operators ensure mass
conservation.
method (str):
The method for calculating the derivative. Possible values are 'central',
'forward', and 'backward'.
Returns:
A function that can be applied to an array of values
Expand Down Expand Up @@ -234,13 +240,19 @@ def divergence(arr: np.ndarray, out: np.ndarray) -> None:

arr_r = arr[0, :]
for i in range(1, dim_r + 1): # iterate radial points
term_h = factor_h[i - 1] * (arr_r[i] + arr_r[i + 1])
term_l = factor_l[i - 1] * (arr_r[i - 1] + arr_r[i])
if method == "central":
term_h = factor_h[i - 1] * (arr_r[i] + arr_r[i + 1])
term_l = factor_l[i - 1] * (arr_r[i - 1] + arr_r[i])
elif method == "forward":
term_h = 2 * factor_h[i - 1] * arr_r[i + 1]
term_l = 2 * factor_l[i - 1] * arr_r[i]
elif method == "backward":
term_h = 2 * factor_h[i - 1] * arr_r[i]
term_l = 2 * factor_l[i - 1] * arr_r[i - 1]
out[i - 1] = term_h - term_l

else:
# implement naive divergence operator
scale_r = 1 / (2 * dr)
factors = 2 / rs # factors that need to be multiplied below

@jit
Expand All @@ -255,7 +267,12 @@ def divergence(arr: np.ndarray, out: np.ndarray) -> None:

arr_r = arr[0, :]
for i in range(1, dim_r + 1): # iterate radial points
diff_r = (arr_r[i + 1] - arr_r[i - 1]) * scale_r
if method == "central":
diff_r = (arr_r[i + 1] - arr_r[i - 1]) / (2 * dr)
elif method == "forward":
diff_r = (arr_r[i + 1] - arr_r[i]) / dr
elif method == "backward":
diff_r = (arr_r[i] - arr_r[i - 1]) / dr
out[i - 1] = diff_r + factors[i - 1] * arr_r[i]

return divergence # type: ignore
Expand Down
13 changes: 8 additions & 5 deletions tests/grids/operators/test_spherical_operators.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,8 +38,10 @@ def test_findiff_sph():
# test divergence
div = v.divergence(bc=["derivative", "value"], conservative=False)
np.testing.assert_allclose(div.data, [9, 3 + 4 / r1, -6 + 8 / r2])
div = v.divergence(bc="derivative", conservative=False)
np.testing.assert_allclose(div.data, [9, 3 + 4 / r1, 2 + 8 / r2])
div = v.divergence(bc="derivative", method="forward", conservative=False)
np.testing.assert_allclose(div.data, [10, 4 + 4 / r1, 8 / r2])
div = v.divergence(bc="derivative", method="backward", conservative=False)
np.testing.assert_allclose(div.data, [8, 2 + 4 / r1, 4 + 8 / r2])


def test_conservative_sph():
Expand All @@ -48,9 +50,10 @@ def test_conservative_sph():
expr = "1 / cosh((r - 1) * 10)"

# test divergence of vector field
vf = VectorField.from_expression(grid, [expr, 0, 0])
div = vf.divergence(bc="derivative", conservative=True)
assert div.integral == pytest.approx(0, abs=1e-2)
for method in ["central", "forward", "backward"]:
vf = VectorField.from_expression(grid, [expr, 0, 0])
div = vf.divergence(bc="derivative", conservative=True, method=method)
assert div.integral == pytest.approx(0, abs=1e-2)

# test laplacian of scalar field
lap = vf[0].laplace("derivative")
Expand Down

0 comments on commit 074319f

Please sign in to comment.