Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

add isort support with black profile #4136

Merged
merged 22 commits into from
Aug 19, 2022
Merged
Show file tree
Hide file tree
Changes from 20 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 1 addition & 1 deletion .circleci/config.yml
Expand Up @@ -1009,7 +1009,7 @@ workflows:
- tox-base:
name: "code-check"
python_version_minor: 8
toxenv: "protocheck,generatecheck,codecovcheck,mypy,mypy-report,pyupgrade,black,flake8,docstrings"
toxenv: "protocheck,generatecheck,codecovcheck,mypy,mypy-report,pyupgrade,black,isort-check,flake8,docstrings"
#
# Unit tests with pytest on Linux, using real wandb server
#
Expand Down
14 changes: 9 additions & 5 deletions pyproject.toml
Expand Up @@ -2,20 +2,24 @@
line-length = 88
include = '\.pyi?$'
exclude = '''
wandb/vendor/
| wandb/old/
wandb/vendor/
| wandb/bin/
| wandb/proto/
| wandb/proto_check/
| wandb/sdk/launch/deploys/
| wandb/run*
| wandb/offline-run*
| tests/fixtures/
| tests/logs/
| tests/notebooks/
| __pycache__
| .pyc
'''

[tool.isort]
profile = "black"
skip = ["wandb/__init__.py"]
extend_skip_glob = [
"wandb/vendor/**",
"wandb/proto/*.py",
"wandb/proto_check/*.py",
"tests/**",
"tools/**",
]
3 changes: 3 additions & 0 deletions tests/.isort.cfg
@@ -0,0 +1,3 @@
[settings]
profile=black
known_third_party=wandb
4 changes: 2 additions & 2 deletions tests/functional_tests/t0_main/catboost/t1_regression.py
@@ -1,9 +1,9 @@
#!/usr/bin/env python
"""Test CatBoost integration."""

from catboost import CatBoostClassifier, datasets, Pool
import wandb
from wandb.catboost import log_summary, WandbCallback
from catboost import CatBoostClassifier, Pool, datasets
from wandb.catboost import WandbCallback, log_summary

train_df, _ = datasets.msrank_10k()
X, Y = train_df[train_df.columns[1:]], train_df[train_df.columns[0]]
Expand Down
1 change: 0 additions & 1 deletion tests/functional_tests/t0_main/console/test_tqdm_nested.py
Expand Up @@ -4,7 +4,6 @@
import tqdm
import wandb


run = wandb.init()
wandb.log(dict(this=2))
print("before progress")
Expand Down
1 change: 0 additions & 1 deletion tests/functional_tests/t0_main/debug/t3_sentry_userproc.py
Expand Up @@ -3,7 +3,6 @@

import wandb


with mock.patch(
"wandb.sdk.wandb_init._WandbInit.init", mock.Mock(side_effect=Exception("injected"))
):
Expand Down
3 changes: 1 addition & 2 deletions tests/functional_tests/t0_main/fastai/t1_v1.py
@@ -1,8 +1,7 @@
from fastai.vision import * # noqa: F403
import wandb
from fastai.vision import * # noqa: F403
from wandb.fastai import WandbCallback


wandb.init()

path = untar_data(URLs.MNIST_SAMPLE) # noqa: F405
Expand Down
1 change: 0 additions & 1 deletion tests/functional_tests/t0_main/imports/10-batch10.py
Expand Up @@ -53,7 +53,6 @@
import wandb
import zenml # noqa: F401


run = wandb.init()
wandb.log(dict(loss=1))
run.finish()
1 change: 0 additions & 1 deletion tests/functional_tests/t0_main/jax/01-log-bfloat16.py
Expand Up @@ -4,7 +4,6 @@
import jax.numpy as jnp
import wandb


if __name__ == "__main__":
run = wandb.init()
m1 = jnp.array(1.0, dtype=jnp.float32)
Expand Down
Expand Up @@ -3,7 +3,6 @@
import wandb
from wandb.keras import WandbCallback


np.random.seed(42)
x = np.random.randint(255, size=(100, 28, 28, 1))
y = np.random.randint(10, size=(100,))
Expand Down
Expand Up @@ -2,7 +2,6 @@
import tensorflow as tf
import wandb


dftrain = pd.read_csv("https://storage.googleapis.com/tf-datasets/titanic/train.csv")
y_train = dftrain.pop("survived")
dftrain = dftrain[["sex", "class", "age", "fare", "n_siblings_spouses", "parch"]]
Expand Down
@@ -1,7 +1,7 @@
import numpy as np
import tensorflow as tf
from tensorflow.keras.callbacks import TensorBoard
import wandb
from tensorflow.keras.callbacks import TensorBoard
from wandb.keras import WandbCallback

wandb.tensorboard.patch(root_logdir="logs")
Expand Down
16 changes: 9 additions & 7 deletions tests/functional_tests/t0_main/kfp/kfp-pipeline-helper.py
Expand Up @@ -3,8 +3,8 @@
from typing import NamedTuple

import kfp
from kfp import components
import kfp.dsl as dsl
from kfp import components
from kubernetes.client.models import V1EnvVar
from wandb_probe import wandb_probe_package

Expand All @@ -30,11 +30,12 @@ def preprocess_data(
mlpipeline_ui_metadata_path: components.OutputPath(),
seed: int = 1337,
):
import json

import numpy as np
import wandb
from sklearn import datasets
from sklearn.model_selection import train_test_split
import wandb
import json

def add_wandb_visualization(run, mlpipeline_ui_metadata_path):
"""NOTE: To use this, you must modify your component to have an output called `mlpipeline_ui_metadata_path` AND call `wandb.init` yourself inside that component.
Expand Down Expand Up @@ -91,11 +92,12 @@ def train_model(
model_path: components.OutputPath("sklearn_model"), # noqa: F821
mlpipeline_ui_metadata_path: components.OutputPath(),
):
import json

import joblib
import numpy as np
from sklearn.ensemble import RandomForestClassifier
import wandb
import json
from sklearn.ensemble import RandomForestClassifier

def add_wandb_visualization(run, mlpipeline_ui_metadata_path):
"""NOTE: To use this, you must modify your component to have an output called `mlpipeline_ui_metadata_path` AND call `wandb.init` yourself inside that component.
Expand Down Expand Up @@ -148,14 +150,14 @@ def test_model(
) -> NamedTuple(
"Output", [("accuracy", float), ("precision", float), ("recall", float)]
):
import json
from collections import namedtuple

import joblib
import numpy as np
import wandb
from sklearn.ensemble import RandomForestClassifier # noqa: F401
from sklearn.metrics import accuracy_score, precision_score, recall_score
import wandb
import json

def add_wandb_visualization(run, mlpipeline_ui_metadata_path):
"""NOTE: To use this, you must modify your component to have an output called `mlpipeline_ui_metadata_path` AND call `wandb.init` yourself inside that component.
Expand Down
4 changes: 2 additions & 2 deletions tests/functional_tests/t0_main/kfp/kfp-pipeline-pytorch.py
Expand Up @@ -2,10 +2,10 @@
import random

import kfp
from kfp import components
import kfp.dsl as dsl
from kubernetes.client.models import V1EnvVar
import wandb
from kfp import components
from kubernetes.client.models import V1EnvVar
from wandb.integration.kfp import wandb_log
from wandb_probe import wandb_probe_package

Expand Down
2 changes: 1 addition & 1 deletion tests/functional_tests/t0_main/kfp/kfp-pipeline-simple.py
Expand Up @@ -2,8 +2,8 @@
import random

import kfp
from kfp import components
import kfp.dsl as dsl
from kfp import components
from kubernetes.client.models import V1EnvVar
from wandb.integration.kfp import wandb_log
from wandb_probe import wandb_probe_package
Expand Down
2 changes: 1 addition & 1 deletion tests/functional_tests/t0_main/kfp/kfp-pipeline-sklearn.py
Expand Up @@ -3,8 +3,8 @@
from typing import NamedTuple

import kfp
from kfp import components
import kfp.dsl as dsl
from kfp import components
from kubernetes.client.models import V1EnvVar
from wandb.integration.kfp import wandb_log
from wandb_probe import wandb_probe_package
Expand Down
2 changes: 1 addition & 1 deletion tests/functional_tests/t0_main/lightning/pl_base.py
@@ -1,5 +1,5 @@
from pytorch_lightning import LightningModule
import torch
from pytorch_lightning import LightningModule
from torch.utils.data import Dataset


Expand Down
@@ -1,8 +1,8 @@
#!/usr/bin/env python
import os

from pl_base import BoringModel, RandomDataset
import pytorch_lightning as pl
from pl_base import BoringModel, RandomDataset
from pytorch_lightning.loggers import WandbLogger
from torch.utils.data import DataLoader

Expand Down
2 changes: 1 addition & 1 deletion tests/functional_tests/t0_main/lightning/train_gpu_ddp.py
Expand Up @@ -3,11 +3,11 @@
import os
import pathlib

import wandb
from pl_base import BoringModel, RandomDataset
from pytorch_lightning import Trainer
from pytorch_lightning.loggers import WandbLogger
from torch.utils.data import DataLoader
import wandb


def main():
Expand Down
2 changes: 1 addition & 1 deletion tests/functional_tests/t0_main/lightning/train_tpu_ddp.py
Expand Up @@ -2,11 +2,11 @@

import os

import wandb
from pl_base import BoringModel, RandomDataset
from pytorch_lightning import Trainer
from pytorch_lightning.loggers import WandbLogger
from torch.utils.data import DataLoader
import wandb


def main():
Expand Down
1 change: 0 additions & 1 deletion tests/functional_tests/t0_main/login/t1_timeout.py
Expand Up @@ -5,7 +5,6 @@

import wandb


timeout = 4
slop = 0.50
tm_start = time.time()
Expand Down
Expand Up @@ -5,7 +5,6 @@
import wandb
import wandb.errors


if __name__ == "__main__":
# api_key starts with "local", but base_url points to cloud
with pytest.raises(wandb.errors.UsageError) as e:
Expand Down
1 change: 0 additions & 1 deletion tests/functional_tests/t0_main/magic/t1_mnist_convnet.py
Expand Up @@ -23,7 +23,6 @@
import numpy as np
from tensorflow import keras
from tensorflow.keras import layers

from wandb import magic # noqa: F401

# Make sure this is reproducible
Expand Down
Expand Up @@ -5,13 +5,12 @@
import os

import pandas as pd
from metaflow import FlowSpec, Parameter, step
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from wandb.integration.metaflow import wandb_log

from metaflow import FlowSpec, Parameter, step

os.environ["WANDB_SILENT"] = "true"
os.environ["METAFLOW_USER"] = "test_user"

Expand Down
Expand Up @@ -5,13 +5,12 @@
import os

import pandas as pd
from metaflow import FlowSpec, Parameter, step
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from wandb.integration.metaflow import wandb_log

from metaflow import FlowSpec, Parameter, step

os.environ["WANDB_SILENT"] = "true"
os.environ["METAFLOW_USER"] = "test_user"

Expand Down
Expand Up @@ -5,13 +5,12 @@
import os

import pandas as pd
from metaflow import FlowSpec, Parameter, step
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from wandb.integration.metaflow import wandb_log

from metaflow import FlowSpec, Parameter, step

os.environ["WANDB_SILENT"] = "true"
os.environ["METAFLOW_USER"] = "test_user"

Expand Down
5 changes: 2 additions & 3 deletions tests/functional_tests/t0_main/metaflow/wandb-foreach-flow.py
Expand Up @@ -5,16 +5,15 @@
import os

import pandas as pd
from metaflow import FlowSpec, Parameter, step
from sklearn.ensemble import ( # noqa: F401
RandomForestClassifier,
GradientBoostingClassifier,
RandomForestClassifier,
)
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from wandb.integration.metaflow import wandb_log

from metaflow import FlowSpec, Parameter, step

os.environ["WANDB_SILENT"] = "true"
os.environ["METAFLOW_USER"] = "test_user"

Expand Down
5 changes: 2 additions & 3 deletions tests/functional_tests/t0_main/metaflow/wandb-pytorch-flow.py
Expand Up @@ -9,13 +9,12 @@
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import wandb
from metaflow import FlowSpec, Parameter, step
from torch.optim.lr_scheduler import StepLR
from torchvision import datasets, transforms
from wandb.integration.metaflow import wandb_log

import wandb
from metaflow import FlowSpec, Parameter, step

os.environ["WANDB_SILENT"] = "true"
os.environ["METAFLOW_USER"] = "test_user"

Expand Down
Expand Up @@ -3,9 +3,9 @@
example usage of `run.log` with user provide step greater than the internal step"""


from contextlib import redirect_stderr
import io
import multiprocessing as mp
from contextlib import redirect_stderr

import wandb
import yea
Expand Down
Expand Up @@ -2,9 +2,9 @@
"""Test parent and child processes sharing a run. Compare to a run in a single process.
example usage of `run.log` with user provide step less than the internal step"""

from contextlib import redirect_stderr
import io
import multiprocessing as mp
from contextlib import redirect_stderr

import wandb
import yea
Expand Down
Expand Up @@ -10,7 +10,6 @@
import tensorflow as tf
import wandb


parser = argparse.ArgumentParser()
parser.add_argument("--log_dir", type=str, help="Where to store tensorboard files")
args = parser.parse_args()
Expand Down
Expand Up @@ -2,8 +2,8 @@
"""Simple example of using ThreadPoolExecutor with service.
This example is base on issue https://wandb.atlassian.net/browse/WB-8733
"""
from concurrent.futures import ThreadPoolExecutor
import multiprocessing as mp
from concurrent.futures import ThreadPoolExecutor

import wandb
import yea
Expand Down