Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We鈥檒l occasionally send you account related emails.

Already on GitHub? Sign in to your account

CI: add flake8 #4239

Merged
merged 5 commits into from
Oct 19, 2020
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
23 changes: 22 additions & 1 deletion .github/workflows/code-formatting.yml
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ jobs:

python-types:
name: Python static type checking with Pyright
runs-on: ubuntu-18.04
runs-on: ubuntu-20.04

# Timeout: https://stackoverflow.com/a/59076067/4521646
timeout-minutes: 15
Expand Down Expand Up @@ -71,3 +71,24 @@ jobs:
- name: Run type checking
run: |
$(npm bin)/pyright --project .pyrightconfig.json

python-pep8:
name: Python formatting PEP8
runs-on: ubuntu-20.04

# Timeout: https://stackoverflow.com/a/59076067/4521646
timeout-minutes: 10
steps:
- name: Checkout
uses: actions/checkout@v2
- uses: actions/setup-python@v2
with:
python-version: 3.7

- name: Install dependencies
run: |
pip install flake8

- name: Run checking
run: |
flake8 .
6 changes: 4 additions & 2 deletions pytorch_lightning/accelerators/ddp2_accelerator.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,8 @@

import torch
import torch.distributed as torch_distrib
from pytorch_lightning.core.lightning import LightningModule

from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.core.step_result import Result
from pytorch_lightning.distributed.dist import LightningDistributed
Expand Down Expand Up @@ -191,14 +193,14 @@ def ddp_train(self, process_idx, mp_queue, model):
return results

def configure_ddp(
self, model: "LightningModule", device_ids: List[int]
self, model: LightningModule, device_ids: List[int]
) -> DistributedDataParallel:
model = LightningDistributedDataParallel(
model, device_ids=device_ids, find_unused_parameters=True
)
return model

def configure_sync_batchnorm(self, model: "LightningModule") -> "LightningModule":
def configure_sync_batchnorm(self, model: LightningModule) -> LightningModule:
"""
Add global batchnorm for a model spread across multiple GPUs and nodes.

Expand Down
6 changes: 3 additions & 3 deletions pytorch_lightning/accelerators/ddp_accelerator.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
from time import sleep
from typing import Optional
import numpy as np

from pytorch_lightning.core.lightning import LightningModule

from pytorch_lightning import _logger as log
from pytorch_lightning.utilities.distributed import find_free_network_port
Expand Down Expand Up @@ -284,14 +284,14 @@ def ddp_train(self, process_idx, model):
return results

def configure_ddp(
self, model: "LightningModule", device_ids: List[int]
self, model: LightningModule, device_ids: List[int]
) -> DistributedDataParallel:
model = LightningDistributedDataParallel(
model, device_ids=device_ids, find_unused_parameters=True
)
return model

def configure_sync_batchnorm(self, model: "LightningModule") -> "LightningModule":
def configure_sync_batchnorm(self, model: LightningModule) -> LightningModule:
"""
Add global batchnorm for a model spread across multiple GPUs and nodes.

Expand Down
5 changes: 3 additions & 2 deletions pytorch_lightning/accelerators/ddp_cpu_slurm_accelerator.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
import torch
import torch.distributed as torch_distrib
import torch.distributed as dist
from pytorch_lightning.core.lightning import LightningModule

from pytorch_lightning.accelerators.accelerator import Accelerator
from pytorch_lightning import _logger as log
Expand Down Expand Up @@ -177,14 +178,14 @@ def ddp_train(self, process_idx, model):
return results

def configure_ddp(
self, model: "LightningModule", device_ids: List[int]
self, model: LightningModule, device_ids: List[int]
) -> DistributedDataParallel:
model = LightningDistributedDataParallel(
model, device_ids=device_ids, find_unused_parameters=True
)
return model

def configure_sync_batchnorm(self, model: "LightningModule") -> "LightningModule":
def configure_sync_batchnorm(self, model: LightningModule) -> LightningModule:
"""
Add global batchnorm for a model spread across multiple GPUs and nodes.

Expand Down
5 changes: 3 additions & 2 deletions pytorch_lightning/accelerators/ddp_cpu_spawn_accelerator.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
import torch.distributed as torch_distrib
import torch.distributed as dist
import torch.multiprocessing as mp
from pytorch_lightning.core.lightning import LightningModule

from pytorch_lightning import _logger as log
from pytorch_lightning.accelerators.accelerator import Accelerator
Expand Down Expand Up @@ -210,14 +211,14 @@ def transfer_distrib_spawn_state_on_fit_end(self, model, mp_queue, results):
mp_queue.put(results)

def configure_ddp(
self, model: "LightningModule", device_ids: List[int]
self, model: LightningModule, device_ids: List[int]
) -> DistributedDataParallel:
model = LightningDistributedDataParallel(
model, device_ids=device_ids, find_unused_parameters=True
)
return model

def configure_sync_batchnorm(self, model: "LightningModule") -> "LightningModule":
def configure_sync_batchnorm(self, model: LightningModule) -> LightningModule:
"""
Add global batchnorm for a model spread across multiple GPUs and nodes.

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
import torch
import torch.distributed as torch_distrib
import torch.distributed as dist
from pytorch_lightning.core.lightning import LightningModule

from pytorch_lightning.accelerators.accelerator import Accelerator
from pytorch_lightning import _logger as log
Expand Down Expand Up @@ -176,14 +177,14 @@ def ddp_train(self, process_idx, model):
return results

def configure_ddp(
self, model: "LightningModule", device_ids: List[int]
self, model: LightningModule, device_ids: List[int]
) -> DistributedDataParallel:
model = LightningDistributedDataParallel(
model, device_ids=device_ids, find_unused_parameters=True
)
return model

def configure_sync_batchnorm(self, model: "LightningModule") -> "LightningModule":
def configure_sync_batchnorm(self, model: LightningModule) -> LightningModule:
"""
Add global batchnorm for a model spread across multiple GPUs and nodes.

Expand Down
5 changes: 3 additions & 2 deletions pytorch_lightning/accelerators/ddp_slurm_accelerator.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
import torch
import torch.distributed as torch_distrib
import torch.distributed as dist
from pytorch_lightning.core.lightning import LightningModule

from pytorch_lightning.accelerators.accelerator import Accelerator
from pytorch_lightning import _logger as log
Expand Down Expand Up @@ -182,14 +183,14 @@ def ddp_train(self, process_idx, model):
return results

def configure_ddp(
self, model: "LightningModule", device_ids: List[int]
self, model: LightningModule, device_ids: List[int]
) -> DistributedDataParallel:
model = LightningDistributedDataParallel(
model, device_ids=device_ids, find_unused_parameters=True
)
return model

def configure_sync_batchnorm(self, model: "LightningModule") -> "LightningModule":
def configure_sync_batchnorm(self, model: LightningModule) -> LightningModule:
"""
Add global batchnorm for a model spread across multiple GPUs and nodes.

Expand Down
5 changes: 3 additions & 2 deletions pytorch_lightning/accelerators/ddp_spawn_accelerator.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
import torch.multiprocessing as mp
import torch.distributed as torch_distrib
import torch.distributed as dist
from pytorch_lightning.core.lightning import LightningModule

from pytorch_lightning import _logger as log
from pytorch_lightning.accelerators.accelerator import Accelerator
Expand Down Expand Up @@ -237,14 +238,14 @@ def transfer_distrib_spawn_state_on_fit_end(self, model, mp_queue, results):
mp_queue.put(last_path)

def configure_ddp(
self, model: "LightningModule", device_ids: List[int]
self, model: LightningModule, device_ids: List[int]
) -> DistributedDataParallel:
model = LightningDistributedDataParallel(
model, device_ids=device_ids, find_unused_parameters=True
)
return model

def configure_sync_batchnorm(self, model: "LightningModule") -> "LightningModule":
def configure_sync_batchnorm(self, model: LightningModule) -> LightningModule:
"""
Add global batchnorm for a model spread across multiple GPUs and nodes.

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
import torch
import torch.distributed as torch_distrib
import torch.distributed as dist
from pytorch_lightning.core.lightning import LightningModule

from pytorch_lightning.accelerators.accelerator import Accelerator
from pytorch_lightning import _logger as log
Expand Down Expand Up @@ -179,14 +180,14 @@ def ddp_train(self, process_idx, model):
return results

def configure_ddp(
self, model: "LightningModule", device_ids: List[int]
self, model: LightningModule, device_ids: List[int]
) -> DistributedDataParallel:
model = LightningDistributedDataParallel(
model, device_ids=device_ids, find_unused_parameters=True
)
return model

def configure_sync_batchnorm(self, model: "LightningModule") -> "LightningModule":
def configure_sync_batchnorm(self, model: LightningModule) -> LightningModule:
"""
Add global batchnorm for a model spread across multiple GPUs and nodes.

Expand Down
2 changes: 1 addition & 1 deletion pytorch_lightning/accelerators/horovod_accelerator.py
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ def test_step(self, args):
return output

def backward(self, closure_loss, optimizer, opt_idx, *args, **kwargs):
super().backward(closure_loss, optimizer, opt_idx, *args, **kwargs)
super().backward(closure_loss, optimizer, opt_idx, *args, **kwargs)
optimizer.synchronize()

def on_train_epoch_end(self, outputs):
Expand Down
2 changes: 0 additions & 2 deletions pytorch_lightning/callbacks/early_stopping.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,8 +35,6 @@
torch_inf = torch.tensor(np.Inf)




class EarlyStopping(Callback):
r"""
Monitor a validation metric and stop training when it stops improving.
Expand Down
2 changes: 1 addition & 1 deletion pytorch_lightning/core/lightning.py
Original file line number Diff line number Diff line change
Expand Up @@ -233,7 +233,7 @@ def log(
m = f'on_step=True cannot be used on {self._current_fx_name} method'
raise MisconfigurationException(m)

if 'epoch_end' in self._current_fx_name and on_epoch == False:
if 'epoch_end' in self._current_fx_name and on_epoch is False:
m = f'on_epoch cannot be False when called from the {self._current_fx_name} method'
raise MisconfigurationException(m)

Expand Down
6 changes: 4 additions & 2 deletions pytorch_lightning/plugins/apex.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Tuple

from pytorch_lightning.core.lightning import LightningModule
from torch.optim.optimizer import Optimizer
from pytorch_lightning.utilities.distributed import rank_zero_warn
from pytorch_lightning.utilities import AMPType
Expand Down Expand Up @@ -65,10 +67,10 @@ def backward(self, closure_loss, optimizer, opt_idx, *args, **kwargs):
def configure_apex(
self,
amp: object,
model: "LightningModule",
model: LightningModule,
optimizers: List[Optimizer],
amp_level: str,
) -> Tuple["LightningModule", List[Optimizer]]:
) -> Tuple[LightningModule, List[Optimizer]]:
r"""
Override to init AMP your own way.
Must return a model and list of optimizers.
Expand Down
4 changes: 1 addition & 3 deletions tests/backends/test_accelerator_connector.py
Original file line number Diff line number Diff line change
Expand Up @@ -310,9 +310,7 @@ def on_fit_start(self, trainer, pl_module):
@mock.patch('torch.cuda.device_count', return_value=0)
def test_custom_accelerator(tmpdir):
class Accel(Accelerator):
def init_ddp_connection(
self, global_rank: int, world_size: int, is_slurm_managing_tasks: bool = True
) -> None:
def init_ddp_connection(self, global_rank: int, world_size: int, is_slurm_managing_tasks: bool = True) -> None:
pass

class CB(Callback):
Expand Down
2 changes: 1 addition & 1 deletion tests/loggers/test_mlflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ def test_mlflow_log_dir(client, mlflow, tmpdir):
def test_mlflow_logger_dirs_creation(tmpdir):
""" Test that the logger creates the folders and files in the right place. """
if not importlib.util.find_spec('mlflow'):
pytest.xfail(f"test for explicit file creation requires mlflow dependency to be installed.")
pytest.xfail("test for explicit file creation requires mlflow dependency to be installed.")

assert not os.listdir(tmpdir)
logger = MLFlowLogger('test', save_dir=tmpdir)
Expand Down
6 changes: 3 additions & 3 deletions tests/metrics/classification/test_accuracy.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,9 +93,9 @@ def test_accuracy_invalid_shape():
(_multiclass_prob_inputs.preds, _multiclass_prob_inputs.target, _sk_accuracy_multiclass_prob),
(_multiclass_inputs.preds, _multiclass_inputs.target, _sk_accuracy_multiclass),
(
_multidim_multiclass_prob_inputs.preds,
_multidim_multiclass_prob_inputs.target,
_sk_accuracy_multidim_multiclass_prob,
_multidim_multiclass_prob_inputs.preds,
_multidim_multiclass_prob_inputs.target,
_sk_accuracy_multidim_multiclass_prob,
),
(_multidim_multiclass_inputs.preds, _multidim_multiclass_inputs.target, _sk_accuracy_multidim_multiclass),
],
Expand Down
20 changes: 10 additions & 10 deletions tests/metrics/classification/test_f_beta.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,18 +90,18 @@ def _sk_fbeta_multidim_multiclass(preds, target, average='micro', beta=1.0):
(_multiclass_prob_inputs.preds, _multiclass_prob_inputs.target, _sk_fbeta_multiclass_prob, NUM_CLASSES, False),
(_multiclass_inputs.preds, _multiclass_inputs.target, _sk_fbeta_multiclass, NUM_CLASSES, False),
(
_multidim_multiclass_prob_inputs.preds,
_multidim_multiclass_prob_inputs.target,
_sk_fbeta_multidim_multiclass_prob,
NUM_CLASSES,
False,
_multidim_multiclass_prob_inputs.preds,
_multidim_multiclass_prob_inputs.target,
_sk_fbeta_multidim_multiclass_prob,
NUM_CLASSES,
False,
),
(
_multidim_multiclass_inputs.preds,
_multidim_multiclass_inputs.target,
_sk_fbeta_multidim_multiclass,
NUM_CLASSES,
False,
_multidim_multiclass_inputs.preds,
_multidim_multiclass_inputs.target,
_sk_fbeta_multidim_multiclass,
NUM_CLASSES,
False,
),
],
)
Expand Down
20 changes: 10 additions & 10 deletions tests/metrics/classification/test_precision_recall.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,18 +90,18 @@ def _sk_prec_recall_multidim_multiclass(preds, target, sk_fn=precision_score, av
(_multiclass_prob_inputs.preds, _multiclass_prob_inputs.target, _sk_prec_recall_multiclass_prob, NUM_CLASSES, False),
(_multiclass_inputs.preds, _multiclass_inputs.target, _sk_prec_recall_multiclass, NUM_CLASSES, False),
(
_multidim_multiclass_prob_inputs.preds,
_multidim_multiclass_prob_inputs.target,
_sk_prec_recall_multidim_multiclass_prob,
NUM_CLASSES,
False,
_multidim_multiclass_prob_inputs.preds,
_multidim_multiclass_prob_inputs.target,
_sk_prec_recall_multidim_multiclass_prob,
NUM_CLASSES,
False,
),
(
_multidim_multiclass_inputs.preds,
_multidim_multiclass_inputs.target,
_sk_prec_recall_multidim_multiclass,
NUM_CLASSES,
False,
_multidim_multiclass_inputs.preds,
_multidim_multiclass_inputs.target,
_sk_prec_recall_multidim_multiclass,
NUM_CLASSES,
False,
),
],
)
Expand Down
1 change: 0 additions & 1 deletion tests/metrics/test_metric.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,6 @@ def test_add_state_persistent():
assert "b" not in a.state_dict()



def test_reset():
class A(Dummy):
pass
Expand Down
2 changes: 1 addition & 1 deletion tests/models/test_hparams.py
Original file line number Diff line number Diff line change
Expand Up @@ -552,7 +552,7 @@ def test_args(tmpdir):
trainer.fit(model)

raw_checkpoint_path = _raw_checkpoint_path(trainer)
with pytest.raises(TypeError, match="__init__\(\) got an unexpected keyword argument 'test'"):
with pytest.raises(TypeError, match=r"__init__\(\) got an unexpected keyword argument 'test'"):
SubClassVarArgs.load_from_checkpoint(raw_checkpoint_path)


Expand Down