Skip to content

Commit

Permalink
Merge pull request #2271 from pymor/pre-commit-ci-update-config
Browse files Browse the repository at this point in the history
[pre-commit.ci] pre-commit autoupdate
  • Loading branch information
sdrave committed May 13, 2024
2 parents 79e9ea7 + dff9f24 commit b2913b5
Show file tree
Hide file tree
Showing 10 changed files with 32 additions and 31 deletions.
4 changes: 2 additions & 2 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -34,13 +34,13 @@ repos:
- --autofix
- id: trailing-whitespace
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.1.9
rev: v0.3.5
hooks:
- id: ruff
args:
- --fix
- repo: https://github.com/DavidAnson/markdownlint-cli2
rev: v0.11.0
rev: v0.12.1
hooks:
- id: markdownlint-cli2
args:
Expand Down
17 changes: 9 additions & 8 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ tests = [
]
# additional dependencies for pyMOR development
dev = [
"ruff==0.1.9", # our linter of choice
"ruff==0.3.5", # our linter of choice
"conda_lock==1.4.0", # needed for make ci_conda_requirements
"hatch", # build frontend for building and publishing pyMOR images
]
Expand Down Expand Up @@ -209,6 +209,8 @@ markers = [
[tool.ruff]
src = ["src"] # this makes isort behave nicely
line-length = 120

[tool.ruff.lint]
select = [
"F", # Pyflakes
"W", # pycodestyle warning
Expand Down Expand Up @@ -262,7 +264,6 @@ ignore = [
"N802", # function name should be lowercase
"N803", # argument name should be lowercase (we use single capital letters everywhere for vectorarrays)
"N806", # same for variables in function
"PGH001", # no builtin eval allowed
"PT004", # fixture does not return anything, add leading underscore
"PT011", # pytest.raises(Exception) is too broad
"PT012", # pytest.raises() block should contain a single simple statement
Expand All @@ -278,17 +279,17 @@ ignore = [
"TD003", # missing issue link on the line following this TODO
]

[tool.ruff.flake8-import-conventions]
[tool.ruff.lint.flake8-import-conventions]
banned-from = ["numpy.linalg"] # avoids importing similar routines from numpy.linalg and scipy.linalg

[tool.ruff.flake8-import-conventions.extend-aliases]
[tool.ruff.lint.flake8-import-conventions.extend-aliases]
scipy = "" # don't import scipy directly
"scipy.linalg" = "spla"

[tool.ruff.flake8-quotes]
[tool.ruff.lint.flake8-quotes]
inline-quotes = "single"

[tool.ruff.per-file-ignores]
[tool.ruff.lint.per-file-ignores]
"__init__.py" = ["F401"] # module imported but unused
"docs/source/try_on_binder.py" = ["N801"] # class name CapWords convention
"src/pymor/algorithms/genericsolvers.py" = ["TD001"] # invalid TODO tag (XXX)
Expand All @@ -297,10 +298,10 @@ inline-quotes = "single"
"src/pymor/basic.py" = ["F401"] # ununsed imports
"src/pymordemos/*" = ["F403", "F405"] # undefined import due to pymor.basic functionality

[tool.ruff.pycodestyle]
[tool.ruff.lint.pycodestyle]
max-doc-length = 100

[tool.ruff.pydocstyle]
[tool.ruff.lint.pydocstyle]
convention = "numpy"

[tool.tomlsort]
Expand Down
2 changes: 1 addition & 1 deletion src/pymor/algorithms/eigs.py
Original file line number Diff line number Diff line change
Expand Up @@ -200,7 +200,7 @@ def _arnoldi(A, l, b, complex_evp):
"""Compute an Arnoldi factorization."""
v = b * (1 / b.norm()[0])

H = np.zeros((l, l), dtype=np.complex_ if complex_evp else np.float_)
H = np.zeros((l, l), dtype=np.complex128 if complex_evp else np.float64)
V = A.source.empty(reserve=l)

V.append(v)
Expand Down
2 changes: 1 addition & 1 deletion src/pymor/algorithms/genericsolvers.py
Original file line number Diff line number Diff line change
Expand Up @@ -937,7 +937,7 @@ def lsmr(A, b, damp=0.0, atol=1e-6, btol=1e-6, conlim=1e8,
if (normA * normr) != 0:
test2 = normar / (normA * normr)
else:
test2 = np.infty
test2 = np.inf
test3 = 1 / condA
t1 = test1 / (1 + normA * normx / normb)
rtol = btol + atol * normA * normx / normb
Expand Down
2 changes: 1 addition & 1 deletion src/pymor/core/cache.py
Original file line number Diff line number Diff line change
Expand Up @@ -290,7 +290,7 @@ def enable_caching(self, region, cache_id=None):
self.__dict__['cache_region'] = None
else:
self.__dict__['cache_region'] = region
r = cache_regions.get(region, None)
r = cache_regions.get(region)
if r and r.persistent and cache_id is None:
raise ValueError('For persistent CacheRegions a cache_id has to be specified.')

Expand Down
2 changes: 1 addition & 1 deletion src/pymor/models/iosys.py
Original file line number Diff line number Diff line change
Expand Up @@ -568,7 +568,7 @@ def from_mat_file(cls, file_name, sampling_time=0, T=None, time_stepper=None, nu
for i in range(len(matrices)):
mat = matrices[i]
if mat is not None and np.issubdtype(mat.dtype, np.integer):
matrices[i] = mat.astype(np.float_)
matrices[i] = mat.astype(np.float64)

return cls.from_matrices(*matrices, sampling_time=sampling_time, T=T, time_stepper=time_stepper,
num_values=num_values, presets=presets, state_id=state_id,
Expand Down
10 changes: 5 additions & 5 deletions src/pymor/reductors/interpolation.py
Original file line number Diff line number Diff line change
Expand Up @@ -345,10 +345,10 @@ def reduce(self, sigma, b, c):
c = c * (1 / np.linalg.norm(c)) if c.shape[1] > 1 else np.ones((r, 1))

# matrices of the interpolatory LTI system
Er = np.empty((r, r), dtype=np.complex_)
Ar = np.empty((r, r), dtype=np.complex_)
Br = np.empty((r, self.fom.dim_input), dtype=np.complex_)
Cr = np.empty((self.fom.dim_output, r), dtype=np.complex_)
Er = np.empty((r, r), dtype=np.complex128)
Ar = np.empty((r, r), dtype=np.complex128)
Br = np.empty((r, self.fom.dim_input), dtype=np.complex128)
Cr = np.empty((self.fom.dim_output, r), dtype=np.complex128)

Hs = [self.fom.eval_tf(s, mu=self.mu) for s in sigma]
dHs = [self.fom.eval_dtf(s, mu=self.mu) for s in sigma]
Expand All @@ -366,7 +366,7 @@ def reduce(self, sigma, b, c):
Cr[:, i] = Hs[i] @ b[i]

# transform the system to have real matrices
T = np.zeros((r, r), dtype=np.complex_)
T = np.zeros((r, r), dtype=np.complex128)
for i in range(r):
if sigma[i].imag == 0:
T[i, i] = 1
Expand Down
12 changes: 6 additions & 6 deletions src/pymor/reductors/loewner.py
Original file line number Diff line number Diff line change
Expand Up @@ -264,10 +264,10 @@ def loewner_quadruple(self):
rtd = self.mimo_handling[1]
assert ltd.shape == (len(ip), self.dim_output)
assert rtd.shape == (self.dim_input, len(jp))
L = np.empty((len(ip), len(jp)), dtype=np.complex_)
Ls = np.empty((len(ip), len(jp)), dtype=np.complex_)
V = np.empty((len(ip), self.dim_input), dtype=np.complex_)
W = np.empty((self.dim_output, len(jp)), dtype=np.complex_)
L = np.empty((len(ip), len(jp)), dtype=np.complex128)
Ls = np.empty((len(ip), len(jp)), dtype=np.complex128)
V = np.empty((len(ip), self.dim_input), dtype=np.complex128)
W = np.empty((self.dim_output, len(jp)), dtype=np.complex128)
for i, si in enumerate(ip):
for j, sj in enumerate(jp):
L[i, j] = ltd[i] @ (self.Hs[si] - self.Hs[sj]) @ rtd[:, j] / (self.s[si] - self.s[sj])
Expand All @@ -279,7 +279,7 @@ def loewner_quadruple(self):

# transform the system to have real matrices
if self.conjugate:
TL = np.zeros((len(ip), len(ip)), dtype=np.complex_)
TL = np.zeros((len(ip), len(ip)), dtype=np.complex128)
for i, si in enumerate(ip):
if self.s[si].imag == 0:
TL[i, i] = 1
Expand All @@ -291,7 +291,7 @@ def loewner_quadruple(self):
TL[j, i] = -1j
TL[j, j] = 1j

TR = np.zeros((len(jp), len(jp)), dtype=np.complex_)
TR = np.zeros((len(jp), len(jp)), dtype=np.complex128)
for i, si in enumerate(jp):
if self.s[si].imag == 0:
TR[i, i] = 1
Expand Down
10 changes: 5 additions & 5 deletions src/pymor/reductors/neural_network.py
Original file line number Diff line number Diff line change
Expand Up @@ -1083,14 +1083,14 @@ def prepare_datum(datum):
training_data = [(prepare_datum(datum[0]), prepare_datum(datum[1])) for datum in training_data]
validation_data = [(prepare_datum(datum[0]), prepare_datum(datum[1])) for datum in validation_data]

optimizer = optim.LBFGS if 'optimizer' not in training_parameters else training_parameters['optimizer']
epochs = 1000 if 'epochs' not in training_parameters else training_parameters['epochs']
optimizer = training_parameters.get('optimizer', optim.LBFGS)
epochs = training_parameters.get('epochs', 1000)
assert isinstance(epochs, int)
assert epochs > 0
batch_size = 20 if 'batch_size' not in training_parameters else training_parameters['batch_size']
batch_size = training_parameters.get('batch_size', 20)
assert isinstance(batch_size, int)
assert batch_size > 0
learning_rate = 1. if 'learning_rate' not in training_parameters else training_parameters['learning_rate']
learning_rate = training_parameters.get('learning_rate', 1.0)
assert learning_rate > 0.
loss_function = (nn.MSELoss() if (training_parameters.get('loss_function') is None)
else training_parameters['loss_function'])
Expand Down Expand Up @@ -1208,7 +1208,7 @@ def closure(inputs=inputs, targets=targets):
if log_loss_frequency > 0 and epoch % log_loss_frequency == 0:
logger.info(f'Epoch {epoch}: Current {phase} loss of {losses[phase]:.3e}')

if 'lr_scheduler' in training_parameters and training_parameters['lr_scheduler']:
if training_parameters.get('lr_scheduler'):
lr_scheduler.step()

# check for early stopping
Expand Down
2 changes: 1 addition & 1 deletion src/pymortests/strategies.py
Original file line number Diff line number Diff line change
Expand Up @@ -456,7 +456,7 @@ def base_vector_arrays(draw, count=1, dtype=None, max_dim=100):
-------
A list of |VectorArray| linear-independent objects of same dimension and length.
"""
dtype = dtype or np.float_
dtype = dtype or np.float64
# simplest way currently of getting a |VectorSpace| to construct our new arrays from
space_types = _picklable_vector_space_types + _other_vector_space_types
space = draw(vector_arrays(count=1, dtype=dtype, length=hyst.just((1,)), compatible=True, space_types=space_types)
Expand Down

0 comments on commit b2913b5

Please sign in to comment.