diff --git a/.appveyor.yml b/.appveyor.yml index e51feb91867..2876aac4dfb 100644 --- a/.appveyor.yml +++ b/.appveyor.yml @@ -1,4 +1,4 @@ -version: 3.3.3.{build} +version: 3.3.4.{build} image: Visual Studio 2015 platform: x64 diff --git a/.ci/setup.sh b/.ci/setup.sh index c1e39b67d1a..3f095e0ffbb 100755 --- a/.ci/setup.sh +++ b/.ci/setup.sh @@ -7,6 +7,7 @@ if [[ $OS_NAME == "macos" ]]; then sudo xcode-select -s /Applications/Xcode_10.3.app/Contents/Developer || exit -1 fi else # gcc + sudo xcode-select -s /Applications/Xcode_14.1.app/Contents/Developer || exit -1 if [[ $TASK != "mpi" ]]; then brew install gcc fi diff --git a/.ci/test.sh b/.ci/test.sh index 6439f4c660f..40eec5fa518 100755 --- a/.ci/test.sh +++ b/.ci/test.sh @@ -118,26 +118,26 @@ if [[ $TASK == "swig" ]]; then exit 0 fi -# temporary fix for https://github.com/microsoft/LightGBM/issues/5390 -if [[ $PYTHON_VERSION == "3.7" ]]; then - DEPENDENCIES="dask distributed" -else - DEPENDENCIES="dask=2022.7.0 distributed=2022.7.0 scipy<1.9" +# hack around https://github.com/microsoft/LightGBM/pull/5619#issuecomment-1341935203 just to produce +# a releasable artifact on Ubuntu 14.04 +ARCH=$(uname -m) +PACKAGE_CONSTRAINTS="dask-core distributed pandas numpy scikit-learn scipy" +if [[ $OS_NAME == "linux" ]] && [[ $COMPILER == "gcc" ]] && [[ $ARCH != "aarch64" ]]; then + if [[ $TASK == "bdist" ]] || [[ $TASK == "regular" ]] || [[ $TASK == "mpi" ]]; then + PACKAGE_CONSTRAINTS="dask-core<=2022.7.1 distributed<=2022.7.1 libstdcxx-ng<12.0 numpy<=1.20.0 pandas<=1.4.1 scikit-learn<=1.1.0 scipy<=1.8.0" + fi fi # re-including python=version[build=*cpython] to ensure that conda doesn't fall back to pypy conda install -q -y -n $CONDA_ENV \ cloudpickle \ - ${DEPENDENCIES} \ joblib \ matplotlib \ - numpy \ - pandas \ psutil \ pytest \ + ${PACKAGE_CONSTRAINTS} \ "python=$PYTHON_VERSION[build=*cpython]" \ - python-graphviz \ - scikit-learn || exit -1 + python-graphviz || exit -1 if [[ $OS_NAME == "macos" ]] && [[ $COMPILER == "clang" ]]; then # fix "OMP: Error #15: Initializing libiomp5.dylib, but found libomp.dylib already initialized." (OpenMP library conflict due to conda's MKL) diff --git a/.ci/test_r_package.sh b/.ci/test_r_package.sh index c15b5c59df7..0db88453ab7 100755 --- a/.ci/test_r_package.sh +++ b/.ci/test_r_package.sh @@ -21,9 +21,9 @@ if [[ "${R_MAJOR_VERSION}" == "3" ]]; then export R_LINUX_VERSION="3.6.3-1bionic" export R_APT_REPO="bionic-cran35/" elif [[ "${R_MAJOR_VERSION}" == "4" ]]; then - export R_MAC_VERSION=4.2.1 + export R_MAC_VERSION=4.2.2 export R_MAC_PKG_URL=${CRAN_MIRROR}/bin/macosx/base/R-${R_MAC_VERSION}.pkg - export R_LINUX_VERSION="4.2.1-1.2004.0" + export R_LINUX_VERSION="4.2.2-1.2004.0" export R_APT_REPO="focal-cran40/" else echo "Unrecognized R version: ${R_VERSION}" @@ -76,7 +76,7 @@ if [[ $OS_NAME == "macos" ]]; then brew install --cask basictex || exit -1 export PATH="/Library/TeX/texbin:$PATH" sudo tlmgr --verify-repo=none update --self || exit -1 - sudo tlmgr --verify-repo=none install inconsolata helvetic || exit -1 + sudo tlmgr --verify-repo=none install inconsolata helvetic rsfs || exit -1 curl -sL ${R_MAC_PKG_URL} -o R.pkg || exit -1 sudo installer \ @@ -163,10 +163,11 @@ elif [[ $R_BUILD_TYPE == "cran" ]]; then || (cat ${RCHK_LOG_FILE} && exit -1) cat ${RCHK_LOG_FILE} - # the exception below is from R itself and not LightGBM: + # the exceptions below are from R itself and not LightGBM: # https://github.com/kalibera/rchk/issues/22#issuecomment-656036156 exit $( cat ${RCHK_LOG_FILE} \ + | grep -v "in function RunGenCollect" \ | grep -v "in function strptime_internal" \ | grep --count -E '\[PB\]|ERROR' ) diff --git a/.ci/test_r_package_windows.ps1 b/.ci/test_r_package_windows.ps1 index 2005ad5adee..e4d20de50b9 100644 --- a/.ci/test_r_package_windows.ps1 +++ b/.ci/test_r_package_windows.ps1 @@ -80,7 +80,7 @@ if ($env:R_MAJOR_VERSION -eq "3") { $env:RTOOLS_BIN = "$RTOOLS_INSTALL_PATH\usr\bin" $env:RTOOLS_MINGW_BIN = "$RTOOLS_INSTALL_PATH\x86_64-w64-mingw32.static.posix\bin" $env:RTOOLS_EXE_FILE = "rtools42-5253-5107.exe" - $env:R_WINDOWS_VERSION = "4.2.1" + $env:R_WINDOWS_VERSION = "4.2.2" } else { Write-Output "[ERROR] Unrecognized R version: $env:R_VERSION" Check-Output $false diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index b8ae029079d..a8d69ddcaa3 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -37,17 +37,17 @@ jobs: compiler: gcc python_version: "3.10" cuda_version: "9.0" - # task: cuda - # - method: source - # compiler: gcc - # python_version: "3.8" - # cuda_version: "11.7.1" - # task: cuda_exp - # - method: pip - # compiler: clang - # python_version: "3.9" - # cuda_version: "10.0" - # task: cuda_exp + task: cuda + - method: source + compiler: gcc + python_version: "3.8" + cuda_version: "11.7.1" + task: cuda_exp + - method: pip + compiler: clang + python_version: "3.9" + cuda_version: "10.0" + task: cuda_exp steps: - name: Setup or update software on host machine run: | diff --git a/.github/workflows/r_configure.yml b/.github/workflows/r_configure.yml index f989c272d2c..78287c9da08 100644 --- a/.github/workflows/r_configure.yml +++ b/.github/workflows/r_configure.yml @@ -8,7 +8,7 @@ jobs: r-configure: name: r-configure timeout-minutes: 60 - runs-on: ubuntu-latest + runs-on: 'ubuntuu-20.04' container: "ubuntu:20.04" steps: - name: Install essential software before checkout diff --git a/.github/workflows/r_package.yml b/.github/workflows/r_package.yml index 29c203d160f..9db9f8ba4e0 100644 --- a/.github/workflows/r_package.yml +++ b/.github/workflows/r_package.yml @@ -33,22 +33,22 @@ jobs: ################ # CMake builds # ################ - - os: ubuntu-latest + - os: ubuntu-20.04 task: r-package compiler: gcc r_version: 3.6 build_type: cmake - - os: ubuntu-latest + - os: ubuntu-20.04 task: r-package compiler: gcc r_version: 4.2 build_type: cmake - - os: ubuntu-latest + - os: ubuntu-20.04 task: r-package compiler: clang r_version: 3.6 build_type: cmake - - os: ubuntu-latest + - os: ubuntu-20.04 task: r-package compiler: clang r_version: 4.2 @@ -114,7 +114,7 @@ jobs: toolchain: MSYS r_version: 4.2 build_type: cran - - os: ubuntu-latest + - os: ubuntu-20.04 task: r-package compiler: gcc r_version: 4.2 @@ -127,7 +127,7 @@ jobs: ################ # Other checks # ################ - - os: ubuntu-latest + - os: ubuntu-20.04 task: r-rchk compiler: gcc r_version: 4.2 @@ -151,7 +151,7 @@ jobs: CTAN_MIRROR: https://ctan.math.illinois.edu/systems/win32/miktex TINYTEX_INSTALLER: TinyTeX - name: Setup and run tests on Linux and macOS - if: matrix.os == 'macOS-latest' || matrix.os == 'ubuntu-latest' + if: matrix.os == 'macOS-latest' || matrix.os == 'ubuntu-20.04' shell: bash run: | export TASK="${{ matrix.task }}" @@ -159,7 +159,7 @@ jobs: export GITHUB_ACTIONS="true" if [[ "${{ matrix.os }}" == "macOS-latest" ]]; then export OS_NAME="macos" - elif [[ "${{ matrix.os }}" == "ubuntu-latest" ]]; then + elif [[ "${{ matrix.os }}" == "ubuntu-20.04" ]]; then export OS_NAME="linux" fi export BUILD_DIRECTORY="$GITHUB_WORKSPACE" @@ -181,9 +181,9 @@ jobs: $env:TASK = "${{ matrix.task }}" & "$env:GITHUB_WORKSPACE/.ci/test_windows.ps1" test-r-sanitizers: - name: r-sanitizers (ubuntu-latest, R-devel, ${{ matrix.compiler }} ASAN/UBSAN) + name: r-sanitizers (ubuntu-20.04, R-devel, ${{ matrix.compiler }} ASAN/UBSAN) timeout-minutes: 60 - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 container: wch1/r-debug strategy: fail-fast: false @@ -219,7 +219,7 @@ jobs: test-r-debian-clang: name: r-package (debian, R-devel, clang) timeout-minutes: 60 - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 container: rhub/debian-clang-devel steps: - name: Install Git before checkout @@ -248,7 +248,7 @@ jobs: fi all-successful: # https://github.community/t/is-it-possible-to-require-all-github-actions-tasks-to-pass-without-enumerating-them/117957/4?u=graingert - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 needs: [test, test-r-sanitizers, test-r-debian-clang] steps: - name: Note that all tests succeeded diff --git a/.github/workflows/r_valgrind.yml b/.github/workflows/r_valgrind.yml index 00d74f5c8f3..bc6f343dca0 100644 --- a/.github/workflows/r_valgrind.yml +++ b/.github/workflows/r_valgrind.yml @@ -8,7 +8,7 @@ jobs: test-r-valgrind: name: r-package (ubuntu-latest, R-devel, valgrind) timeout-minutes: 300 - runs-on: ubuntu-latest + runs-on: 'ubuntu-20.04' container: wch1/r-debug env: SECRETS_WORKFLOW: ${{ secrets.WORKFLOW }} diff --git a/.vsts-ci.yml b/.vsts-ci.yml index d80af79ce22..a702dbfe336 100644 --- a/.vsts-ci.yml +++ b/.vsts-ci.yml @@ -40,7 +40,7 @@ jobs: matrix: regular: TASK: regular - PYTHON_VERSION: '3.9' + PYTHON_VERSION: '3.8' sdist: TASK: sdist PYTHON_VERSION: '3.7' @@ -150,7 +150,7 @@ jobs: OS_NAME: 'linux' PRODUCES_ARTIFACTS: 'true' pool: - vmImage: ubuntu-latest + vmImage: 'ubuntu-20.04' timeoutInMinutes: 180 strategy: matrix: @@ -297,7 +297,7 @@ jobs: ########################################### condition: not(startsWith(variables['Build.SourceBranch'], 'refs/pull/')) pool: - vmImage: 'ubuntu-latest' + vmImage: 'ubuntu-20.04' container: rbase steps: - script: | @@ -328,7 +328,7 @@ jobs: - R_artifact condition: and(succeeded(), not(startsWith(variables['Build.SourceBranch'], 'refs/pull/'))) pool: - vmImage: 'ubuntu-latest' + vmImage: 'ubuntu-20.04' steps: # Create archives with complete source code included (with git submodules) - task: ArchiveFiles@2 diff --git a/R-package/configure b/R-package/configure index 2b4951755c2..01bad5e9cd8 100755 --- a/R-package/configure +++ b/R-package/configure @@ -1,6 +1,6 @@ #! /bin/sh # Guess values for system-dependent variables and create Makefiles. -# Generated by GNU Autoconf 2.69 for lightgbm 3.3.3. +# Generated by GNU Autoconf 2.69 for lightgbm 3.3.4. # # # Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc. @@ -576,8 +576,8 @@ MAKEFLAGS= # Identity of this package. PACKAGE_NAME='lightgbm' PACKAGE_TARNAME='lightgbm' -PACKAGE_VERSION='3.3.3' -PACKAGE_STRING='lightgbm 3.3.3' +PACKAGE_VERSION='3.3.4' +PACKAGE_STRING='lightgbm 3.3.4' PACKAGE_BUGREPORT='' PACKAGE_URL='' @@ -1182,7 +1182,7 @@ if test "$ac_init_help" = "long"; then # Omit some internal or obsolete options to make the list less imposing. # This message is too long to be a string in the A/UX 3.1 sh. cat <<_ACEOF -\`configure' configures lightgbm 3.3.3 to adapt to many kinds of systems. +\`configure' configures lightgbm 3.3.4 to adapt to many kinds of systems. Usage: $0 [OPTION]... [VAR=VALUE]... @@ -1244,7 +1244,7 @@ fi if test -n "$ac_init_help"; then case $ac_init_help in - short | recursive ) echo "Configuration of lightgbm 3.3.3:";; + short | recursive ) echo "Configuration of lightgbm 3.3.4:";; esac cat <<\_ACEOF @@ -1311,7 +1311,7 @@ fi test -n "$ac_init_help" && exit $ac_status if $ac_init_version; then cat <<\_ACEOF -lightgbm configure 3.3.3 +lightgbm configure 3.3.4 generated by GNU Autoconf 2.69 Copyright (C) 2012 Free Software Foundation, Inc. @@ -1328,7 +1328,7 @@ cat >config.log <<_ACEOF This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. -It was created by lightgbm $as_me 3.3.3, which was +It was created by lightgbm $as_me 3.3.4, which was generated by GNU Autoconf 2.69. Invocation command line was $ $0 $@ @@ -2419,7 +2419,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" -This file was extended by lightgbm $as_me 3.3.3, which was +This file was extended by lightgbm $as_me 3.3.4, which was generated by GNU Autoconf 2.69. Invocation command line was CONFIG_FILES = $CONFIG_FILES @@ -2472,7 +2472,7 @@ _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" ac_cs_version="\\ -lightgbm config.status 3.3.3 +lightgbm config.status 3.3.4 configured by $0, generated by GNU Autoconf 2.69, with options \\"\$ac_cs_config\\" diff --git a/VERSION.txt b/VERSION.txt index 619b5376684..a0891f563f3 100644 --- a/VERSION.txt +++ b/VERSION.txt @@ -1 +1 @@ -3.3.3 +3.3.4 diff --git a/include/LightGBM/utils/log.h b/include/LightGBM/utils/log.h index 6eb02fbf3a7..7cab31c15b0 100644 --- a/include/LightGBM/utils/log.h +++ b/include/LightGBM/utils/log.h @@ -109,12 +109,13 @@ class Log { } static void Fatal(const char *format, ...) { va_list val; - char str_buf[1024]; + const size_t kBufSize = 1024; + char str_buf[kBufSize]; va_start(val, format); #ifdef _MSC_VER - vsprintf_s(str_buf, format, val); + vsnprintf_s(str_buf, kBufSize, format, val); #else - vsprintf(str_buf, format, val); + vsnprintf(str_buf, kBufSize, format, val); #endif va_end(val); diff --git a/tests/python_package_test/test_dask.py b/tests/python_package_test/test_dask.py index afcbc11a77a..8e8acd61b5d 100644 --- a/tests/python_package_test/test_dask.py +++ b/tests/python_package_test/test_dask.py @@ -1436,6 +1436,7 @@ def test_network_params_not_required_but_respected_if_given(task, listen_port, c @pytest.mark.parametrize('task', tasks) def test_machines_should_be_used_if_provided(task, cluster): + pytest.skip("skipping due to timeout issues discussed in https://github.com/microsoft/LightGBM/issues/5390") with Client(cluster) as client: _, _, _, _, dX, dy, _, dg = _create_data( objective=task, diff --git a/tests/python_package_test/test_engine.py b/tests/python_package_test/test_engine.py index eaf7244fe15..119b3f6f14e 100644 --- a/tests/python_package_test/test_engine.py +++ b/tests/python_package_test/test_engine.py @@ -17,7 +17,7 @@ import lightgbm as lgb -from .utils import load_boston, load_breast_cancer, load_digits, load_iris +from .utils import load_breast_cancer, load_digits, load_iris decreasing_generator = itertools.count(0, -1) @@ -99,6 +99,7 @@ def test_rf(): def test_regression(): + pytest.skip("load_boston() was removed in scikit-learn 1.2.0") X, y = load_boston(return_X_y=True) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) params = { @@ -643,6 +644,7 @@ def test_early_stopping(): def test_continue_train(): + pytest.skip("load_boston() was removed in scikit-learn 1.2.0") X, y = load_boston(return_X_y=True) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) params = { @@ -671,6 +673,7 @@ def test_continue_train(): def test_continue_train_reused_dataset(): + pytest.skip("load_boston() was removed in scikit-learn 1.2.0") X, y = load_boston(return_X_y=True) params = { 'objective': 'regression', @@ -685,6 +688,7 @@ def test_continue_train_reused_dataset(): def test_continue_train_dart(): + pytest.skip("load_boston() was removed in scikit-learn 1.2.0") X, y = load_boston(return_X_y=True) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) params = { @@ -733,6 +737,7 @@ def test_continue_train_multiclass(): def test_cv(): + pytest.skip("load_boston() was removed in scikit-learn 1.2.0") X_train, y_train = load_boston(return_X_y=True) params = {'verbose': -1} lgb_train = lgb.Dataset(X_train, y_train) @@ -837,6 +842,7 @@ def test_cvbooster(): def test_feature_name(): + pytest.skip("load_boston() was removed in scikit-learn 1.2.0") X_train, y_train = load_boston(return_X_y=True) params = {'verbose': -1} lgb_train = lgb.Dataset(X_train, y_train) @@ -866,6 +872,7 @@ def test_feature_name_with_non_ascii(): def test_save_load_copy_pickle(): + pytest.skip("load_boston() was removed in scikit-learn 1.2.0") def train_and_predict(init_model=None, return_model=False): X, y = load_boston(return_X_y=True) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) @@ -1496,6 +1503,7 @@ def test_refit(): def test_mape_rf(): + pytest.skip("load_boston() was removed in scikit-learn 1.2.0") X, y = load_boston(return_X_y=True) params = { 'boosting_type': 'rf', @@ -1514,6 +1522,7 @@ def test_mape_rf(): def test_mape_dart(): + pytest.skip("load_boston() was removed in scikit-learn 1.2.0") X, y = load_boston(return_X_y=True) params = { 'boosting_type': 'dart', @@ -2052,6 +2061,7 @@ def test_default_objective_and_metric(): @pytest.mark.skipif(psutil.virtual_memory().available / 1024 / 1024 / 1024 < 3, reason='not enough RAM') def test_model_size(): + pytest.skip("load_boston() was removed in scikit-learn 1.2.0") X, y = load_boston(return_X_y=True) data = lgb.Dataset(X, y) bst = lgb.train({'verbose': -1}, data, num_boost_round=2) @@ -2079,6 +2089,7 @@ def test_model_size(): def test_get_split_value_histogram(): + pytest.skip("load_boston() was removed in scikit-learn 1.2.0") X, y = load_boston(return_X_y=True) lgb_train = lgb.Dataset(X, y, categorical_feature=[2]) gbm = lgb.train({'verbose': -1}, lgb_train, num_boost_round=20) @@ -2159,6 +2170,7 @@ def test_get_split_value_histogram(): def test_early_stopping_for_only_first_metric(): + pytest.skip("load_boston() was removed in scikit-learn 1.2.0") def metrics_combination_train_regression(valid_sets, metric_list, assumed_iteration, first_metric_only, feval=None): @@ -2465,6 +2477,7 @@ def test_dataset_params_with_reference(): def test_extra_trees(): + pytest.skip("load_boston() was removed in scikit-learn 1.2.0") # check extra trees increases regularization X, y = load_boston(return_X_y=True) lgb_x = lgb.Dataset(X, label=y) @@ -2484,6 +2497,7 @@ def test_extra_trees(): def test_path_smoothing(): + pytest.skip("load_boston() was removed in scikit-learn 1.2.0") # check path smoothing increases regularization X, y = load_boston(return_X_y=True) lgb_x = lgb.Dataset(X, label=y) @@ -2554,6 +2568,7 @@ def _imptcs_to_numpy(X, impcts_dict): def test_interaction_constraints(): + pytest.skip("load_boston() was removed in scikit-learn 1.2.0") X, y = load_boston(return_X_y=True) num_features = X.shape[1] train_data = lgb.Dataset(X, label=y) @@ -2709,6 +2724,7 @@ def test_linear_single_leaf(): def test_predict_with_start_iteration(): + pytest.skip("load_boston() was removed in scikit-learn 1.2.0") def inner_test(X, y, params, early_stopping_rounds): X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) train_data = lgb.Dataset(X_train, label=y_train) diff --git a/tests/python_package_test/test_sklearn.py b/tests/python_package_test/test_sklearn.py index 6b1ac8a9f3d..d107fa60d1a 100644 --- a/tests/python_package_test/test_sklearn.py +++ b/tests/python_package_test/test_sklearn.py @@ -18,7 +18,7 @@ import lightgbm as lgb -from .utils import load_boston, load_breast_cancer, load_digits, load_iris, load_linnerud, make_ranking +from .utils import load_breast_cancer, load_digits, load_iris, load_linnerud, make_ranking sk_version = parse_version(sk_version) if sk_version < parse_version("0.23"): @@ -91,6 +91,7 @@ def test_binary(): def test_regression(): + pytest.skip("load_boston() was removed in scikit-learn 1.2.0") X, y = load_boston(return_X_y=True) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) gbm = lgb.LGBMRegressor(n_estimators=50, silent=True) @@ -157,6 +158,7 @@ def test_eval_at_aliases(): def test_regression_with_custom_objective(): + pytest.skip("load_boston() was removed in scikit-learn 1.2.0") X, y = load_boston(return_X_y=True) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) gbm = lgb.LGBMRegressor(n_estimators=50, silent=True, objective=objective_ls) @@ -180,6 +182,7 @@ def test_binary_classification_with_custom_objective(): def test_dart(): + pytest.skip("load_boston() was removed in scikit-learn 1.2.0") X, y = load_boston(return_X_y=True) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) gbm = lgb.LGBMRegressor(boosting_type='dart', n_estimators=50) @@ -217,6 +220,7 @@ def test_stacking_classifier(): # sklearn <0.23 does not have a stacking regressor and n_features_in_ property @pytest.mark.skipif(sk_version < parse_version('0.23'), reason='scikit-learn version is less than 0.23') def test_stacking_regressor(): + pytest.skip("load_boston() was removed in scikit-learn 1.2.0") from sklearn.ensemble import StackingRegressor X, y = load_boston(return_X_y=True) @@ -384,14 +388,6 @@ def test_regressor_chain(): def test_clone_and_property(): - X, y = load_boston(return_X_y=True) - gbm = lgb.LGBMRegressor(n_estimators=10, silent=True) - gbm.fit(X, y, verbose=False) - - gbm_clone = clone(gbm) - assert isinstance(gbm.booster_, lgb.Booster) - assert isinstance(gbm.feature_importances_, np.ndarray) - X, y = load_digits(n_class=2, return_X_y=True) clf = lgb.LGBMClassifier(n_estimators=10, silent=True) clf.fit(X, y, verbose=False) @@ -402,6 +398,7 @@ def test_clone_and_property(): def test_joblib(): + pytest.skip("load_boston() was removed in scikit-learn 1.2.0") X, y = load_boston(return_X_y=True) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) gbm = lgb.LGBMRegressor(n_estimators=10, objective=custom_asymmetric_obj, @@ -644,6 +641,7 @@ def test_predict(): def test_evaluate_train_set(): + pytest.skip("load_boston() was removed in scikit-learn 1.2.0") X, y = load_boston(return_X_y=True) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) gbm = lgb.LGBMRegressor(n_estimators=10, silent=True) @@ -658,6 +656,7 @@ def test_evaluate_train_set(): def test_metrics(): + pytest.skip("load_boston() was removed in scikit-learn 1.2.0") X, y = load_boston(return_X_y=True) params = {'n_estimators': 2, 'verbose': -1} params_fit = {'X': X, 'y': y, 'eval_set': (X, y), 'verbose': False} @@ -989,7 +988,7 @@ def test_nan_handle(): def test_first_metric_only(): - + pytest.skip("load_boston() was removed in scikit-learn 1.2.0") def fit_and_check(eval_set_names, metric_names, assumed_iteration, first_metric_only): params['first_metric_only'] = first_metric_only gbm = lgb.LGBMRegressor(**params).fit(**params_fit) @@ -1204,7 +1203,7 @@ def test_parameters_default_constructible(estimator): check_parameters_default_constructible(name, Estimator) -@pytest.mark.parametrize('task', ['classification', 'ranking', 'regression']) +@pytest.mark.parametrize('task', ['classification', 'ranking']) def test_training_succeeds_when_data_is_dataframe_and_label_is_column_array(task): pd = pytest.importorskip("pandas") if task == 'ranking': @@ -1214,9 +1213,6 @@ def test_training_succeeds_when_data_is_dataframe_and_label_is_column_array(task elif task == 'classification': X, y = load_iris(return_X_y=True) model_factory = lgb.LGBMClassifier - elif task == 'regression': - X, y = load_boston(return_X_y=True) - model_factory = lgb.LGBMRegressor X = pd.DataFrame(X) y_col_array = y.reshape(-1, 1) params = { diff --git a/tests/python_package_test/utils.py b/tests/python_package_test/utils.py index 320b8e204aa..7d37bf7c6bb 100644 --- a/tests/python_package_test/utils.py +++ b/tests/python_package_test/utils.py @@ -6,11 +6,6 @@ from sklearn.utils import check_random_state -@lru_cache(maxsize=None) -def load_boston(**kwargs): - return sklearn.datasets.load_boston(**kwargs) - - @lru_cache(maxsize=None) def load_breast_cancer(**kwargs): return sklearn.datasets.load_breast_cancer(**kwargs)