Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

STY Uses black's with target_version >= 3.7 #20294

Merged
merged 2 commits into from
Jun 17, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ requires = [

[tool.black]
line-length = 88
target_version = ['py37', 'py38', 'py39']
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is there a way to get that from setup.py metadata?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think the idea is pyproject.toml to be static, to avoid all issues with dynamic behavior in setup.py. It's indeed slightly not ideal to change this for each Python version, but if you read psf/black#751 in the end as long as py37 is in there, the rest has no effect. There is no changes in formatting for Python3.7+.

exclude = '''
/(
\.eggs # exclude a few common directories in the
Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -280,7 +280,7 @@ def setup_package():
python_requires=">=3.7",
install_requires=min_deps.tag_to_packages["install"],
package_data={"": ["*.pxd"]},
**extra_setuptools_args
**extra_setuptools_args,
)

commands = [arg for arg in sys.argv[1:] if not arg.startswith("-")]
Expand Down
4 changes: 2 additions & 2 deletions sklearn/cluster/_affinity_propagation.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def affinity_propagation(
copy=True,
verbose=False,
return_n_iter=False,
random_state=None
random_state=None,
):
"""Perform Affinity Propagation Clustering of data.

Expand Down Expand Up @@ -385,7 +385,7 @@ def __init__(
preference=None,
affinity="euclidean",
verbose=False,
random_state=None
random_state=None,
):

self.damping = damping
Expand Down
6 changes: 3 additions & 3 deletions sklearn/cluster/_agglomerative.py
Original file line number Diff line number Diff line change
Expand Up @@ -853,7 +853,7 @@ def __init__(
compute_full_tree="auto",
linkage="ward",
distance_threshold=None,
compute_distances=False
compute_distances=False,
):
self.n_clusters = n_clusters
self.distance_threshold = distance_threshold
Expand Down Expand Up @@ -953,7 +953,7 @@ def fit(self, X, y=None):
connectivity=connectivity,
n_clusters=n_clusters,
return_distance=return_distance,
**kwargs
**kwargs,
)
(self.children_, self.n_connected_components_, self.n_leaves_, parents) = out[
:4
Expand Down Expand Up @@ -1141,7 +1141,7 @@ def __init__(
linkage="ward",
pooling_func=np.mean,
distance_threshold=None,
compute_distances=False
compute_distances=False,
):
super().__init__(
n_clusters=n_clusters,
Expand Down
4 changes: 2 additions & 2 deletions sklearn/cluster/_bicluster.py
Original file line number Diff line number Diff line change
Expand Up @@ -309,7 +309,7 @@ def __init__(
mini_batch=False,
init="k-means++",
n_init=10,
random_state=None
random_state=None,
):
super().__init__(
n_clusters, svd_method, n_svd_vecs, mini_batch, init, n_init, random_state
Expand Down Expand Up @@ -461,7 +461,7 @@ def __init__(
mini_batch=False,
init="k-means++",
n_init=10,
random_state=None
random_state=None,
):
super().__init__(
n_clusters, svd_method, n_svd_vecs, mini_batch, init, n_init, random_state
Expand Down
2 changes: 1 addition & 1 deletion sklearn/cluster/_birch.py
Original file line number Diff line number Diff line change
Expand Up @@ -463,7 +463,7 @@ def __init__(
branching_factor=50,
n_clusters=3,
compute_labels=True,
copy=True
copy=True,
):
self.threshold = threshold
self.branching_factor = branching_factor
Expand Down
4 changes: 2 additions & 2 deletions sklearn/cluster/_dbscan.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ def dbscan(
leaf_size=30,
p=2,
sample_weight=None,
n_jobs=None
n_jobs=None,
):
"""Perform DBSCAN clustering from vector array or distance matrix.

Expand Down Expand Up @@ -301,7 +301,7 @@ def __init__(
algorithm="auto",
leaf_size=30,
p=None,
n_jobs=None
n_jobs=None,
):
self.eps = eps
self.min_samples = min_samples
Expand Down
4 changes: 2 additions & 2 deletions sklearn/cluster/_mean_shift.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ def mean_shift(
min_bin_freq=1,
cluster_all=True,
max_iter=300,
n_jobs=None
n_jobs=None,
):
"""Perform mean shift clustering of data using a flat kernel.

Expand Down Expand Up @@ -380,7 +380,7 @@ def __init__(
min_bin_freq=1,
cluster_all=True,
n_jobs=None,
max_iter=300
max_iter=300,
):
self.bandwidth = bandwidth
self.seeds = seeds
Expand Down
2 changes: 1 addition & 1 deletion sklearn/compose/_target.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ def __init__(
transformer=None,
func=None,
inverse_func=None,
check_inverse=True
check_inverse=True,
):
self.regressor = regressor
self.transformer = transformer
Expand Down
2 changes: 1 addition & 1 deletion sklearn/covariance/_elliptic_envelope.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ def __init__(
assume_centered=False,
support_fraction=None,
contamination=0.1,
random_state=None
random_state=None,
):
super().__init__(
store_precision=store_precision,
Expand Down
6 changes: 3 additions & 3 deletions sklearn/covariance/_graph_lasso.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ def graphical_lasso(
verbose=False,
return_costs=False,
eps=np.finfo(np.float64).eps,
return_n_iter=False
return_n_iter=False,
):
"""l1-penalized covariance estimator

Expand Down Expand Up @@ -408,7 +408,7 @@ def __init__(
enet_tol=1e-4,
max_iter=100,
verbose=False,
assume_centered=False
assume_centered=False,
):
super().__init__(assume_centered=assume_centered)
self.alpha = alpha
Expand Down Expand Up @@ -758,7 +758,7 @@ def __init__(
mode="cd",
n_jobs=None,
verbose=False,
assume_centered=False
assume_centered=False,
):
super().__init__(
mode=mode,
Expand Down
2 changes: 1 addition & 1 deletion sklearn/covariance/_robust_covariance.py
Original file line number Diff line number Diff line change
Expand Up @@ -687,7 +687,7 @@ def __init__(
store_precision=True,
assume_centered=False,
support_fraction=None,
random_state=None
random_state=None,
):
self.store_precision = store_precision
self.assume_centered = assume_centered
Expand Down
2 changes: 1 addition & 1 deletion sklearn/datasets/_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ def load_files(
shuffle=True,
encoding=None,
decode_error="strict",
random_state=0
random_state=0,
):
"""Load text files with categories as subfolder names.

Expand Down
4 changes: 2 additions & 2 deletions sklearn/datasets/_lfw.py
Original file line number Diff line number Diff line change
Expand Up @@ -232,7 +232,7 @@ def fetch_lfw_people(
color=False,
slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True,
return_X_y=False
return_X_y=False,
):
"""Load the Labeled Faces in the Wild (LFW) people dataset \
(classification).
Expand Down Expand Up @@ -413,7 +413,7 @@ def fetch_lfw_pairs(
resize=0.5,
color=False,
slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True
download_if_missing=True,
):
"""Load the Labeled Faces in the Wild (LFW) pairs dataset (classification).

Expand Down
2 changes: 1 addition & 1 deletion sklearn/datasets/_olivetti_faces.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ def fetch_olivetti_faces(
shuffle=False,
random_state=0,
download_if_missing=True,
return_X_y=False
return_X_y=False,
):
"""Load the Olivetti faces data-set from AT&T (classification).

Expand Down
4 changes: 2 additions & 2 deletions sklearn/datasets/_openml.py
Original file line number Diff line number Diff line change
Expand Up @@ -551,7 +551,7 @@ def _download_data_to_bunch(
data_columns: List[int],
target_columns: List,
shape: Optional[Tuple[int, int]],
md5_checksum: str
md5_checksum: str,
):
"""Download OpenML ARFF and convert to Bunch of data"""
# NB: this function is long in order to handle retry for any failure
Expand Down Expand Up @@ -724,7 +724,7 @@ def fetch_openml(
target_column: Optional[Union[str, List]] = "default-target",
cache: bool = True,
return_X_y: bool = False,
as_frame: Union[str, bool] = "auto"
as_frame: Union[str, bool] = "auto",
):
"""Fetch dataset from openml by name or dataset id.

Expand Down
2 changes: 1 addition & 1 deletion sklearn/datasets/_rcv1.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ def fetch_rcv1(
download_if_missing=True,
random_state=None,
shuffle=False,
return_X_y=False
return_X_y=False,
):
"""Load the RCV1 multilabel dataset (classification).

Expand Down
4 changes: 2 additions & 2 deletions sklearn/datasets/_svmlight_format_io.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ def load_svmlight_file(
zero_based="auto",
query_id=False,
offset=0,
length=-1
length=-1,
):
"""Load datasets in the svmlight / libsvm format into sparse CSR matrix

Expand Down Expand Up @@ -225,7 +225,7 @@ def load_svmlight_files(
zero_based="auto",
query_id=False,
offset=0,
length=-1
length=-1,
):
"""Load dataset from multiple files in SVMlight format

Expand Down
2 changes: 1 addition & 1 deletion sklearn/decomposition/_factor_analysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -164,7 +164,7 @@ def __init__(
svd_method="randomized",
iterated_power=3,
rotation=None,
random_state=0
random_state=0,
):
self.n_components = n_components
self.copy = copy
Expand Down
4 changes: 2 additions & 2 deletions sklearn/decomposition/_fastica.py
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,7 @@ def fastica(
random_state=None,
return_X_mean=False,
compute_sources=True,
return_n_iter=False
return_n_iter=False,
):
"""Perform Fast Independent Component Analysis.

Expand Down Expand Up @@ -426,7 +426,7 @@ def __init__(
max_iter=200,
tol=1e-4,
w_init=None,
random_state=None
random_state=None,
):
super().__init__()
if max_iter < 1:
Expand Down
2 changes: 1 addition & 1 deletion sklearn/decomposition/_lda.py
Original file line number Diff line number Diff line change
Expand Up @@ -325,7 +325,7 @@ def __init__(
max_doc_update_iter=100,
n_jobs=None,
verbose=0,
random_state=None
random_state=None,
):
self.n_components = n_components
self.doc_topic_prior = doc_topic_prior
Expand Down
4 changes: 2 additions & 2 deletions sklearn/decomposition/_nmf.py
Original file line number Diff line number Diff line change
Expand Up @@ -918,7 +918,7 @@ def non_negative_factorization(
regularization=None,
random_state=None,
verbose=0,
shuffle=False
shuffle=False,
):
"""Compute Non-negative Matrix Factorization (NMF).

Expand Down Expand Up @@ -1292,7 +1292,7 @@ def __init__(
l1_ratio=0.0,
verbose=0,
shuffle=False,
regularization="both"
regularization="both",
):
self.n_components = n_components
self.init = init
Expand Down
2 changes: 1 addition & 1 deletion sklearn/decomposition/_pca.py
Original file line number Diff line number Diff line change
Expand Up @@ -345,7 +345,7 @@ def __init__(
svd_solver="auto",
tol=0.0,
iterated_power="auto",
random_state=None
random_state=None,
):
self.n_components = n_components
self.copy = copy
Expand Down
4 changes: 2 additions & 2 deletions sklearn/decomposition/_sparse_pca.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ def __init__(
U_init=None,
V_init=None,
verbose=False,
random_state=None
random_state=None,
):
self.n_components = n_components
self.alpha = alpha
Expand Down Expand Up @@ -342,7 +342,7 @@ def __init__(
shuffle=True,
n_jobs=None,
method="lars",
random_state=None
random_state=None,
):
super().__init__(
n_components=n_components,
Expand Down
2 changes: 1 addition & 1 deletion sklearn/decomposition/_truncated_svd.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ def __init__(
algorithm="randomized",
n_iter=5,
random_state=None,
tol=0.0
tol=0.0,
):
self.algorithm = algorithm
self.n_components = n_components
Expand Down
6 changes: 3 additions & 3 deletions sklearn/ensemble/_bagging.py
Original file line number Diff line number Diff line change
Expand Up @@ -220,7 +220,7 @@ def __init__(
warm_start=False,
n_jobs=None,
random_state=None,
verbose=0
verbose=0,
):
super().__init__(base_estimator=base_estimator, n_estimators=n_estimators)

Expand Down Expand Up @@ -648,7 +648,7 @@ def __init__(
warm_start=False,
n_jobs=None,
random_state=None,
verbose=0
verbose=0,
):

super().__init__(
Expand Down Expand Up @@ -1062,7 +1062,7 @@ def __init__(
warm_start=False,
n_jobs=None,
random_state=None,
verbose=0
verbose=0,
):
super().__init__(
base_estimator,
Expand Down