diff --git a/Jenkinsfile b/Jenkinsfile deleted file mode 100644 index 7a8d54d4e9f9..000000000000 --- a/Jenkinsfile +++ /dev/null @@ -1,451 +0,0 @@ -#!/usr/bin/groovy -// -*- mode: groovy -*- -// Jenkins pipeline -// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/ - -// Command to run command inside a docker container -dockerRun = 'tests/ci_build/ci_build.sh' - -// Which CUDA version to use when building reference distribution wheel -ref_cuda_ver = '11.0.3' - -import groovy.transform.Field - -@Field -def commit_id // necessary to pass a variable from one stage to another - -pipeline { - // Each stage specify its own agent - agent none - - environment { - DOCKER_CACHE_ECR_ID = '492475357299' - DOCKER_CACHE_ECR_REGION = 'us-west-2' - } - - // Setup common job properties - options { - ansiColor('xterm') - timestamps() - timeout(time: 240, unit: 'MINUTES') - buildDiscarder(logRotator(numToKeepStr: '10')) - preserveStashes() - } - - // Build stages - stages { - stage('Jenkins Linux: Initialize') { - agent { label 'job_initializer' } - steps { - script { - def buildNumber = env.BUILD_NUMBER as int - if (buildNumber > 1) milestone(buildNumber - 1) - milestone(buildNumber) - - checkoutSrcs() - commit_id = "${GIT_COMMIT}" - } - sh 'python3 tests/jenkins_get_approval.py' - stash name: 'srcs' - deleteDir() - } - } - stage('Jenkins Linux: Build') { - agent none - steps { - script { - parallel ([ - 'clang-tidy': { ClangTidy() }, - 'build-cpu': { BuildCPU() }, - 'build-cpu-arm64': { BuildCPUARM64() }, - 'build-cpu-rabit-mock': { BuildCPUMock() }, - // Build reference, distribution-ready Python wheel with CUDA 11.0 - // using CentOS 7 image - 'build-gpu-cuda11.0': { BuildCUDA(cuda_version: '11.0.3', build_rmm: true) }, - 'build-gpu-rpkg': { BuildRPackageWithCUDA(cuda_version: '11.0.3') }, - 'build-jvm-packages-gpu-cuda11.0': { BuildJVMPackagesWithCUDA(spark_version: '3.0.1', cuda_version: '11.0.3') }, - 'build-jvm-packages': { BuildJVMPackages(spark_version: '3.0.1') }, - 'build-jvm-doc': { BuildJVMDoc() } - ]) - } - } - } - stage('Jenkins Linux: Test') { - agent none - steps { - script { - parallel ([ - 'test-python-cpu': { TestPythonCPU() }, - 'test-python-cpu-arm64': { TestPythonCPUARM64() }, - // artifact_cuda_version doesn't apply to RMM tests; RMM tests will always match CUDA version between artifact and host env - 'test-python-gpu-cuda11.0': { TestPythonGPU(artifact_cuda_version: '11.0.3', host_cuda_version: '11.0.3', test_rmm: true) }, - 'test-python-mgpu-cuda11.0': { TestPythonGPU(artifact_cuda_version: '11.0.3', host_cuda_version: '11.0.3', multi_gpu: true, test_rmm: true) }, - 'test-cpp-gpu-cuda11.0': { TestCppGPU(artifact_cuda_version: '11.0.3', host_cuda_version: '11.0.3', test_rmm: true) }, - 'test-jvm-jdk8': { CrossTestJVMwithJDK(jdk_version: '8', spark_version: '3.0.0') } - ]) - } - } - } - stage('Jenkins Linux: Deploy') { - agent none - steps { - script { - parallel ([ - 'deploy-jvm-packages': { DeployJVMPackages(spark_version: '3.0.0') } - ]) - } - } - } - } -} - -// check out source code from git -def checkoutSrcs() { - retry(5) { - try { - timeout(time: 2, unit: 'MINUTES') { - checkout scm - sh 'git submodule update --init' - } - } catch (exc) { - deleteDir() - error "Failed to fetch source codes" - } - } -} - -def ClangTidy() { - node('linux && cpu_build') { - unstash name: 'srcs' - echo "Running clang-tidy job..." - def container_type = "clang_tidy" - def docker_binary = "docker" - def dockerArgs = "--build-arg CUDA_VERSION_ARG=11.0.3" - sh """ - ${dockerRun} ${container_type} ${docker_binary} ${dockerArgs} python3 tests/ci_build/tidy.py --cuda-archs 75 - """ - deleteDir() - } -} - -def BuildCPU() { - node('linux && cpu_build') { - unstash name: 'srcs' - echo "Build CPU" - def container_type = "cpu" - def docker_binary = "docker" - sh """ - ${dockerRun} ${container_type} ${docker_binary} rm -fv dmlc-core/include/dmlc/build_config_default.h - # This step is not necessary, but here we include it, to ensure that DMLC_CORE_USE_CMAKE flag is correctly propagated - # We want to make sure that we use the configured header build/dmlc/build_config.h instead of include/dmlc/build_config_default.h. - # See discussion at https://github.com/dmlc/xgboost/issues/5510 - ${dockerRun} ${container_type} ${docker_binary} tests/ci_build/build_via_cmake.sh -DPLUGIN_DENSE_PARSER=ON - ${dockerRun} ${container_type} ${docker_binary} bash -c "cd build && ctest --extra-verbose" - """ - // Sanitizer test - def docker_extra_params = "CI_DOCKER_EXTRA_PARAMS_INIT='-e ASAN_SYMBOLIZER_PATH=/usr/bin/llvm-symbolizer -e ASAN_OPTIONS=symbolize=1 -e UBSAN_OPTIONS=print_stacktrace=1:log_path=ubsan_error.log --cap-add SYS_PTRACE'" - sh """ - ${dockerRun} ${container_type} ${docker_binary} tests/ci_build/build_via_cmake.sh -DUSE_SANITIZER=ON -DENABLED_SANITIZERS="address;leak;undefined" \ - -DCMAKE_BUILD_TYPE=Debug -DSANITIZER_PATH=/usr/lib/x86_64-linux-gnu/ - ${docker_extra_params} ${dockerRun} ${container_type} ${docker_binary} bash -c "cd build && ctest --exclude-regex AllTestsInDMLCUnitTests --extra-verbose" - """ - - stash name: 'xgboost_cli', includes: 'xgboost' - deleteDir() - } -} - -def BuildCPUARM64() { - node('linux && arm64') { - unstash name: 'srcs' - echo "Build CPU ARM64" - def container_type = "aarch64" - def docker_binary = "docker" - def wheel_tag = "manylinux2014_aarch64" - sh """ - ${dockerRun} ${container_type} ${docker_binary} tests/ci_build/build_via_cmake.sh --conda-env=aarch64_test -DOPEN_MP:BOOL=ON -DHIDE_CXX_SYMBOL=ON - ${dockerRun} ${container_type} ${docker_binary} bash -c "cd build && ctest --extra-verbose" - ${dockerRun} ${container_type} ${docker_binary} bash -c "cd python-package && rm -rf dist/* && python setup.py bdist_wheel --universal" - ${dockerRun} ${container_type} ${docker_binary} python tests/ci_build/rename_whl.py python-package/dist/*.whl ${commit_id} ${wheel_tag} - ${dockerRun} ${container_type} ${docker_binary} bash -c "auditwheel repair --plat ${wheel_tag} python-package/dist/*.whl && python tests/ci_build/rename_whl.py wheelhouse/*.whl ${commit_id} ${wheel_tag}" - mv -v wheelhouse/*.whl python-package/dist/ - # Make sure that libgomp.so is vendored in the wheel - ${dockerRun} ${container_type} ${docker_binary} bash -c "unzip -l python-package/dist/*.whl | grep libgomp || exit -1" - """ - echo 'Stashing Python wheel...' - stash name: "xgboost_whl_arm64_cpu", includes: 'python-package/dist/*.whl' - if (env.BRANCH_NAME == 'master' || env.BRANCH_NAME.startsWith('release')) { - echo 'Uploading Python wheel...' - sh """ - python3 -m awscli s3 cp python-package/dist/*.whl s3://xgboost-nightly-builds/${BRANCH_NAME}/ --acl public-read --no-progress - """ - } - stash name: 'xgboost_cli_arm64', includes: 'xgboost' - deleteDir() - } -} - -def BuildCPUMock() { - node('linux && cpu_build') { - unstash name: 'srcs' - echo "Build CPU with rabit mock" - def container_type = "cpu" - def docker_binary = "docker" - sh """ - ${dockerRun} ${container_type} ${docker_binary} tests/ci_build/build_mock_cmake.sh - """ - echo 'Stashing rabit C++ test executable (xgboost)...' - stash name: 'xgboost_rabit_tests', includes: 'xgboost' - deleteDir() - } -} - -def BuildCUDA(args) { - node('linux && cpu_build') { - unstash name: 'srcs' - echo "Build with CUDA ${args.cuda_version}" - def container_type = "gpu_build_centos7" - def docker_binary = "docker" - def docker_args = "--build-arg CUDA_VERSION_ARG=${args.cuda_version}" - def arch_flag = "" - if (env.BRANCH_NAME != 'master' && !(env.BRANCH_NAME.startsWith('release'))) { - arch_flag = "-DGPU_COMPUTE_VER=75" - } - def wheel_tag = "manylinux2014_x86_64" - sh """ - ${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/prune_libnccl.sh - ${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/build_via_cmake.sh -DUSE_CUDA=ON -DUSE_NCCL=ON -DUSE_OPENMP=ON -DHIDE_CXX_SYMBOLS=ON -DUSE_NCCL_LIB_PATH=ON -DNCCL_INCLUDE_DIR=/usr/include -DNCCL_LIBRARY=/workspace/libnccl_static.a ${arch_flag} - ${dockerRun} ${container_type} ${docker_binary} ${docker_args} bash -c "cd python-package && rm -rf dist/* && python setup.py bdist_wheel --universal" - ${dockerRun} ${container_type} ${docker_binary} ${docker_args} python tests/ci_build/rename_whl.py python-package/dist/*.whl ${commit_id} ${wheel_tag} - """ - if (args.cuda_version == ref_cuda_ver) { - sh """ - ${dockerRun} auditwheel_x86_64 ${docker_binary} auditwheel repair --plat ${wheel_tag} python-package/dist/*.whl - ${dockerRun} ${container_type} ${docker_binary} ${docker_args} python tests/ci_build/rename_whl.py wheelhouse/*.whl ${commit_id} ${wheel_tag} - mv -v wheelhouse/*.whl python-package/dist/ - # Make sure that libgomp.so is vendored in the wheel - ${dockerRun} auditwheel_x86_64 ${docker_binary} bash -c "unzip -l python-package/dist/*.whl | grep libgomp || exit -1" - """ - } - echo 'Stashing Python wheel...' - stash name: "xgboost_whl_cuda${args.cuda_version}", includes: 'python-package/dist/*.whl' - if (args.cuda_version == ref_cuda_ver && (env.BRANCH_NAME == 'master' || env.BRANCH_NAME.startsWith('release'))) { - echo 'Uploading Python wheel...' - sh """ - python3 -m awscli s3 cp python-package/dist/*.whl s3://xgboost-nightly-builds/${BRANCH_NAME}/ --acl public-read --no-progress - """ - } - echo 'Stashing C++ test executable (testxgboost)...' - stash name: "xgboost_cpp_tests_cuda${args.cuda_version}", includes: 'build/testxgboost' - if (args.build_rmm) { - echo "Build with CUDA ${args.cuda_version} and RMM" - container_type = "rmm" - docker_binary = "docker" - docker_args = "--build-arg CUDA_VERSION_ARG=${args.cuda_version}" - sh """ - rm -rf build/ - ${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/build_via_cmake.sh --conda-env=gpu_test -DUSE_CUDA=ON -DUSE_NCCL=ON -DPLUGIN_RMM=ON -DBUILD_WITH_CUDA_CUB=ON ${arch_flag} - ${dockerRun} ${container_type} ${docker_binary} ${docker_args} bash -c "cd python-package && rm -rf dist/* && python setup.py bdist_wheel --universal" - ${dockerRun} ${container_type} ${docker_binary} ${docker_args} python tests/ci_build/rename_whl.py python-package/dist/*.whl ${commit_id} manylinux2014_x86_64 - """ - echo 'Stashing Python wheel...' - stash name: "xgboost_whl_rmm_cuda${args.cuda_version}", includes: 'python-package/dist/*.whl' - echo 'Stashing C++ test executable (testxgboost)...' - stash name: "xgboost_cpp_tests_rmm_cuda${args.cuda_version}", includes: 'build/testxgboost' - } - deleteDir() - } -} - -def BuildRPackageWithCUDA(args) { - node('linux && cpu_build') { - unstash name: 'srcs' - def container_type = 'gpu_build_r_centos7' - def docker_binary = "docker" - def docker_args = "--build-arg CUDA_VERSION_ARG=${args.cuda_version}" - if (env.BRANCH_NAME == 'master' || env.BRANCH_NAME.startsWith('release')) { - sh """ - ${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/build_r_pkg_with_cuda.sh ${commit_id} - """ - echo 'Uploading R tarball...' - sh """ - python3 -m awscli s3 cp xgboost_r_gpu_linux_*.tar.gz s3://xgboost-nightly-builds/${BRANCH_NAME}/ --acl public-read --no-progress - """ - } - deleteDir() - } -} - -def BuildJVMPackagesWithCUDA(args) { - node('linux && mgpu') { - unstash name: 'srcs' - echo "Build XGBoost4J-Spark with Spark ${args.spark_version}, CUDA ${args.cuda_version}" - def container_type = "jvm_gpu_build" - def docker_binary = "nvidia-docker" - def docker_args = "--build-arg CUDA_VERSION_ARG=${args.cuda_version}" - def arch_flag = "" - if (env.BRANCH_NAME != 'master' && !(env.BRANCH_NAME.startsWith('release'))) { - arch_flag = "-DGPU_COMPUTE_VER=75" - } - // Use only 4 CPU cores - def docker_extra_params = "CI_DOCKER_EXTRA_PARAMS_INIT='--cpuset-cpus 0-3'" - sh """ - ${docker_extra_params} ${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/build_jvm_packages.sh ${args.spark_version} -Duse.cuda=ON $arch_flag - """ - echo "Stashing XGBoost4J JAR with CUDA ${args.cuda_version} ..." - stash name: 'xgboost4j_jar_gpu', includes: "jvm-packages/xgboost4j-gpu/target/*.jar,jvm-packages/xgboost4j-spark-gpu/target/*.jar" - deleteDir() - } -} - -def BuildJVMPackages(args) { - node('linux && cpu') { - unstash name: 'srcs' - echo "Build XGBoost4J-Spark with Spark ${args.spark_version}" - def container_type = "jvm" - def docker_binary = "docker" - // Use only 4 CPU cores - def docker_extra_params = "CI_DOCKER_EXTRA_PARAMS_INIT='--cpuset-cpus 0-3'" - sh """ - ${docker_extra_params} ${dockerRun} ${container_type} ${docker_binary} tests/ci_build/build_jvm_packages.sh ${args.spark_version} - """ - echo 'Stashing XGBoost4J JAR...' - stash name: 'xgboost4j_jar', includes: "jvm-packages/xgboost4j/target/*.jar,jvm-packages/xgboost4j-spark/target/*.jar,jvm-packages/xgboost4j-example/target/*.jar" - deleteDir() - } -} - -def BuildJVMDoc() { - node('linux && cpu') { - unstash name: 'srcs' - echo "Building JVM doc..." - def container_type = "jvm" - def docker_binary = "docker" - sh """ - ${dockerRun} ${container_type} ${docker_binary} tests/ci_build/build_jvm_doc.sh ${BRANCH_NAME} - """ - if (env.BRANCH_NAME == 'master' || env.BRANCH_NAME.startsWith('release')) { - echo 'Uploading doc...' - sh """ - python3 -m awscli s3 cp jvm-packages/${BRANCH_NAME}.tar.bz2 s3://xgboost-docs/${BRANCH_NAME}.tar.bz2 --acl public-read --no-progress - """ - } - deleteDir() - } -} - -def TestPythonCPU() { - node('linux && cpu') { - unstash name: "xgboost_whl_cuda${ref_cuda_ver}" - unstash name: 'srcs' - unstash name: 'xgboost_cli' - echo "Test Python CPU" - def container_type = "cpu" - def docker_binary = "docker" - sh """ - ${dockerRun} ${container_type} ${docker_binary} tests/ci_build/test_python.sh cpu - """ - deleteDir() - } -} - -def TestPythonCPUARM64() { - node('linux && arm64') { - unstash name: "xgboost_whl_arm64_cpu" - unstash name: 'srcs' - unstash name: 'xgboost_cli_arm64' - echo "Test Python CPU ARM64" - def container_type = "aarch64" - def docker_binary = "docker" - sh """ - ${dockerRun} ${container_type} ${docker_binary} tests/ci_build/test_python.sh cpu-arm64 - """ - deleteDir() - } -} - -def TestPythonGPU(args) { - def nodeReq = (args.multi_gpu) ? 'linux && mgpu' : 'linux && gpu' - def artifact_cuda_version = (args.artifact_cuda_version) ?: ref_cuda_ver - node(nodeReq) { - unstash name: "xgboost_whl_cuda${artifact_cuda_version}" - unstash name: "xgboost_cpp_tests_cuda${artifact_cuda_version}" - unstash name: 'srcs' - echo "Test Python GPU: CUDA ${args.host_cuda_version}" - def container_type = "gpu" - def docker_binary = "nvidia-docker" - def docker_args = "--build-arg CUDA_VERSION_ARG=${args.host_cuda_version}" - def mgpu_indicator = (args.multi_gpu) ? 'mgpu' : 'gpu' - // Allocate extra space in /dev/shm to enable NCCL - def docker_extra_params = (args.multi_gpu) ? "CI_DOCKER_EXTRA_PARAMS_INIT='--shm-size=4g'" : '' - sh "${docker_extra_params} ${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/test_python.sh ${mgpu_indicator}" - if (args.test_rmm) { - sh "rm -rfv build/ python-package/dist/" - unstash name: "xgboost_whl_rmm_cuda${args.host_cuda_version}" - unstash name: "xgboost_cpp_tests_rmm_cuda${args.host_cuda_version}" - sh "${docker_extra_params} ${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/test_python.sh ${mgpu_indicator} --use-rmm-pool" - } - deleteDir() - } -} - -def TestCppGPU(args) { - def nodeReq = 'linux && mgpu' - def artifact_cuda_version = (args.artifact_cuda_version) ?: ref_cuda_ver - node(nodeReq) { - unstash name: "xgboost_cpp_tests_cuda${artifact_cuda_version}" - unstash name: 'srcs' - echo "Test C++, CUDA ${args.host_cuda_version}, rmm: ${args.test_rmm}" - def container_type = "gpu" - def docker_binary = "nvidia-docker" - def docker_args = "--build-arg CUDA_VERSION_ARG=${args.host_cuda_version}" - sh "${dockerRun} ${container_type} ${docker_binary} ${docker_args} build/testxgboost" - if (args.test_rmm) { - sh "rm -rfv build/" - unstash name: "xgboost_cpp_tests_rmm_cuda${args.host_cuda_version}" - echo "Test C++, CUDA ${args.host_cuda_version} with RMM" - container_type = "rmm" - docker_binary = "nvidia-docker" - docker_args = "--build-arg CUDA_VERSION_ARG=${args.host_cuda_version}" - sh """ - ${dockerRun} ${container_type} ${docker_binary} ${docker_args} bash -c "source activate gpu_test && build/testxgboost --use-rmm-pool" - """ - } - deleteDir() - } -} - -def CrossTestJVMwithJDK(args) { - node('linux && cpu') { - unstash name: 'xgboost4j_jar' - unstash name: 'srcs' - if (args.spark_version != null) { - echo "Test XGBoost4J on a machine with JDK ${args.jdk_version}, Spark ${args.spark_version}" - } else { - echo "Test XGBoost4J on a machine with JDK ${args.jdk_version}" - } - def container_type = "jvm_cross" - def docker_binary = "docker" - def spark_arg = (args.spark_version != null) ? "--build-arg SPARK_VERSION=${args.spark_version}" : "" - def docker_args = "--build-arg JDK_VERSION=${args.jdk_version} ${spark_arg}" - // Run integration tests only when spark_version is given - def docker_extra_params = (args.spark_version != null) ? "CI_DOCKER_EXTRA_PARAMS_INIT='-e RUN_INTEGRATION_TEST=1'" : "" - sh """ - ${docker_extra_params} ${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/test_jvm_cross.sh - """ - deleteDir() - } -} - -def DeployJVMPackages(args) { - node('linux && cpu') { - unstash name: 'srcs' - if (env.BRANCH_NAME == 'master' || env.BRANCH_NAME.startsWith('release')) { - echo 'Deploying to xgboost-maven-repo S3 repo...' - sh """ - ${dockerRun} jvm_gpu_build docker --build-arg CUDA_VERSION_ARG=11.0.3 tests/ci_build/deploy_jvm_packages.sh ${args.spark_version} - """ - } - deleteDir() - } -} diff --git a/Jenkinsfile-win64 b/Jenkinsfile-win64 deleted file mode 100644 index 38841bcdfa2d..000000000000 --- a/Jenkinsfile-win64 +++ /dev/null @@ -1,167 +0,0 @@ -#!/usr/bin/groovy -// -*- mode: groovy -*- - -/* Jenkins pipeline for Windows AMD64 target */ - -import groovy.transform.Field - -@Field -def commit_id // necessary to pass a variable from one stage to another - -pipeline { - agent none - - // Setup common job properties - options { - timestamps() - timeout(time: 240, unit: 'MINUTES') - buildDiscarder(logRotator(numToKeepStr: '10')) - preserveStashes() - } - - // Build stages - stages { - stage('Jenkins Win64: Initialize') { - agent { label 'job_initializer' } - steps { - script { - def buildNumber = env.BUILD_NUMBER as int - if (buildNumber > 1) milestone(buildNumber - 1) - milestone(buildNumber) - checkoutSrcs() - commit_id = "${GIT_COMMIT}" - } - sh 'python3 tests/jenkins_get_approval.py' - stash name: 'srcs' - deleteDir() - } - } - stage('Jenkins Win64: Build') { - agent none - steps { - script { - parallel ([ - 'build-win64-cuda11.0': { BuildWin64() }, - 'build-rpkg-win64-cuda11.0': { BuildRPackageWithCUDAWin64() } - ]) - } - } - } - stage('Jenkins Win64: Test') { - agent none - steps { - script { - parallel ([ - 'test-win64-cuda11.0': { TestWin64() }, - ]) - } - } - } - } -} - -// check out source code from git -def checkoutSrcs() { - retry(5) { - try { - timeout(time: 2, unit: 'MINUTES') { - checkout scm - sh 'git submodule update --init' - } - } catch (exc) { - deleteDir() - error "Failed to fetch source codes" - } - } -} - -def BuildWin64() { - node('win64 && cuda11_unified') { - deleteDir() - unstash name: 'srcs' - echo "Building XGBoost for Windows AMD64 target..." - bat "nvcc --version" - def arch_flag = "" - if (env.BRANCH_NAME != 'master' && !(env.BRANCH_NAME.startsWith('release'))) { - arch_flag = "-DGPU_COMPUTE_VER=75" - } - bat """ - mkdir build - cd build - cmake .. -G"Visual Studio 15 2017 Win64" -DUSE_CUDA=ON -DCMAKE_VERBOSE_MAKEFILE=ON -DGOOGLE_TEST=ON -DUSE_DMLC_GTEST=ON ${arch_flag} -DCMAKE_UNITY_BUILD=ON - """ - bat """ - cd build - "C:\\Program Files (x86)\\Microsoft Visual Studio\\2017\\Community\\MSBuild\\15.0\\Bin\\MSBuild.exe" xgboost.sln /m /p:Configuration=Release /nodeReuse:false - """ - bat """ - cd python-package - conda activate && python setup.py bdist_wheel --universal && for /R %%i in (dist\\*.whl) DO python ../tests/ci_build/rename_whl.py "%%i" ${commit_id} win_amd64 - """ - echo "Insert vcomp140.dll (OpenMP runtime) into the wheel..." - bat """ - cd python-package\\dist - COPY /B ..\\..\\tests\\ci_build\\insert_vcomp140.py - conda activate && python insert_vcomp140.py *.whl - """ - echo 'Stashing Python wheel...' - stash name: 'xgboost_whl', includes: 'python-package/dist/*.whl' - if (env.BRANCH_NAME == 'master' || env.BRANCH_NAME.startsWith('release')) { - echo 'Uploading Python wheel...' - bat """ - cd python-package - conda activate && for /R %%i in (dist\\*.whl) DO python -m awscli s3 cp "%%i" s3://xgboost-nightly-builds/${BRANCH_NAME}/ --acl public-read --no-progress - """ - } - echo 'Stashing C++ test executable (testxgboost)...' - stash name: 'xgboost_cpp_tests', includes: 'build/testxgboost.exe' - stash name: 'xgboost_cli', includes: 'xgboost.exe' - deleteDir() - } -} - -def BuildRPackageWithCUDAWin64() { - node('win64 && cuda11_unified') { - deleteDir() - unstash name: 'srcs' - bat "nvcc --version" - if (env.BRANCH_NAME == 'master' || env.BRANCH_NAME.startsWith('release')) { - bat """ - bash tests/ci_build/build_r_pkg_with_cuda_win64.sh ${commit_id} - """ - echo 'Uploading R tarball...' - bat """ - conda activate && for /R %%i in (xgboost_r_gpu_win64_*.tar.gz) DO python -m awscli s3 cp "%%i" s3://xgboost-nightly-builds/${BRANCH_NAME}/ --acl public-read --no-progress - """ - } - deleteDir() - } -} - -def TestWin64() { - node('win64 && cuda11_unified') { - deleteDir() - unstash name: 'srcs' - unstash name: 'xgboost_whl' - unstash name: 'xgboost_cli' - unstash name: 'xgboost_cpp_tests' - echo "Test Win64" - bat "nvcc --version" - echo "Running C++ tests..." - bat "build\\testxgboost.exe" - echo "Installing Python dependencies..." - def env_name = 'win64_' + UUID.randomUUID().toString().replaceAll('-', '') - bat "conda activate && mamba env create -n ${env_name} --file=tests/ci_build/conda_env/win64_test.yml" - echo "Installing Python wheel..." - bat """ - conda activate ${env_name} && for /R %%i in (python-package\\dist\\*.whl) DO python -m pip install "%%i" - """ - echo "Running Python tests..." - bat "conda activate ${env_name} && python -X faulthandler -m pytest -v -s -rxXs --fulltrace tests\\python" - bat """ - conda activate ${env_name} && python -X faulthandler -m pytest -v -s -rxXs --fulltrace -m "(not slow) and (not mgpu)" tests\\python-gpu - """ - bat "conda env remove --name ${env_name}" - deleteDir() - } -} diff --git a/doc/contrib/donate.rst b/doc/contrib/donate.rst index 6571fef5febd..cc373d2b85cf 100644 --- a/doc/contrib/donate.rst +++ b/doc/contrib/donate.rst @@ -13,9 +13,9 @@ DMLC/XGBoost has grown from a research project incubated in academia to one of t A robust and efficient **continuous integration (CI)** infrastructure is one of the most critical solutions to address the above challenge. A CI service will monitor an open-source repository and run a suite of integration tests for every incoming contribution. This way, the CI ensures that every proposed change in the codebase is compatible with existing functionalities. Furthermore, XGBoost can enable more thorough tests with a powerful CI infrastructure to cover cases which are closer to the production environment. -There are several CI services available free to open source projects, such as Travis CI and AppVeyor. The XGBoost project already utilizes Travis and AppVeyor. However, the XGBoost project has needs that these free services do not adequately address. In particular, the limited usage quota of resources such as CPU and memory leaves XGBoost developers unable to bring "too-intensive" tests. In addition, they do not offer test machines with GPUs for testing XGBoost-GPU code base which has been attracting more and more interest across many organizations. Consequently, the XGBoost project self-hosts a cloud server with Jenkins software installed: https://xgboost-ci.net/. +There are several CI services available free to open source projects, such as Travis CI and AppVeyor. The XGBoost project already utilizes GitHub Actions. However, the XGBoost project has needs that these free services do not adequately address. In particular, the limited usage quota of resources such as CPU and memory leaves XGBoost developers unable to bring "too-intensive" tests. In addition, they do not offer test machines with GPUs for testing XGBoost-GPU code base which has been attracting more and more interest across many organizations. Consequently, the XGBoost project uses a cloud-hosted test farm. We use `BuildKite `_ to organize CI pipelines. -The self-hosted Jenkins CI server has recurring operating expenses. It utilizes a leading cloud provider (AWS) to accommodate variable workload. The master node serving the web interface is available 24/7, to accommodate contributions from people around the globe. In addition, the master node launches slave nodes on demand, to run the test suite on incoming contributions. To save cost, the slave nodes are terminated when they are no longer needed. +The cloud-hosted test farm has recurring operating expenses. It utilizes a leading cloud provider (AWS) to accommodate variable workload. BuildKite launches worker machines on AWS on demand, to run the test suite on incoming contributions. To save cost, the worker machines are terminated when they are no longer needed. To help defray the hosting cost, the XGBoost project seeks donations from third parties. @@ -29,14 +29,14 @@ The Project Management Committee (PMC) of the XGBoost project appointed `Open So All expenses incurred for hosting CI will be submitted to the fiscal host with receipts. Only the expenses in the following categories will be approved for reimbursement: -* Cloud exprenses for the Jenkins CI server (https://xgboost-ci.net) +* Cloud exprenses for the cloud test farm (https://buildkite.com/xgboost) * Cost of domain https://xgboost-ci.net -* Meetup.com account for XGBoost project +* Monthly cost of using BuildKite * Hosting cost of the User Forum (https://discuss.xgboost.ai) -Administration of Jenkins CI server ------------------------------------ -The PMC shall appoint committer(s) to administer the Jenkins CI server on their behalf. The current administrators are as follows: +Administration of cloud CI infrastructure +----------------------------------------- +The PMC shall appoint committer(s) to administer the cloud CI infrastructure on their behalf. The current administrators are as follows: * Primary administrator: `Hyunsu Cho `_ * Secondary administrator: `Jiaming Yuan `_ diff --git a/python-package/xgboost/dask.py b/python-package/xgboost/dask.py index 75eeba875fee..9a74d0143681 100644 --- a/python-package/xgboost/dask.py +++ b/python-package/xgboost/dask.py @@ -726,10 +726,9 @@ def _create_quantile_dmatrix( if parts is None: msg = f"worker {worker.address} has an empty DMatrix." LOGGER.warning(msg) - import cupy d = QuantileDMatrix( - cupy.zeros((0, 0)), + numpy.empty((0, 0)), feature_names=feature_names, feature_types=feature_types, max_bin=max_bin, @@ -1544,15 +1543,21 @@ def inplace_predict( # pylint: disable=unused-argument async def _async_wrap_evaluation_matrices( - client: Optional["distributed.Client"], **kwargs: Any + client: Optional["distributed.Client"], + tree_method: Optional[str], + max_bin: Optional[int], + **kwargs: Any, ) -> Tuple[DaskDMatrix, Optional[List[Tuple[DaskDMatrix, str]]]]: """A switch function for async environment.""" - def _inner(**kwargs: Any) -> DaskDMatrix: - m = DaskDMatrix(client=client, **kwargs) - return m + def _dispatch(ref: Optional[DaskDMatrix], **kwargs: Any) -> DaskDMatrix: + if tree_method in ("hist", "gpu_hist"): + return DaskQuantileDMatrix( + client=client, ref=ref, max_bin=max_bin, **kwargs + ) + return DaskDMatrix(client=client, **kwargs) - train_dmatrix, evals = _wrap_evaluation_matrices(create_dmatrix=_inner, **kwargs) + train_dmatrix, evals = _wrap_evaluation_matrices(create_dmatrix=_dispatch, **kwargs) train_dmatrix = await train_dmatrix if evals is None: return train_dmatrix, evals @@ -1756,6 +1761,8 @@ async def _fit_async( params = self.get_xgb_params() dtrain, evals = await _async_wrap_evaluation_matrices( client=self.client, + tree_method=self.tree_method, + max_bin=self.max_bin, X=X, y=y, group=None, @@ -1851,6 +1858,8 @@ async def _fit_async( params = self.get_xgb_params() dtrain, evals = await _async_wrap_evaluation_matrices( self.client, + tree_method=self.tree_method, + max_bin=self.max_bin, X=X, y=y, group=None, @@ -2057,6 +2066,8 @@ async def _fit_async( params = self.get_xgb_params() dtrain, evals = await _async_wrap_evaluation_matrices( self.client, + tree_method=self.tree_method, + max_bin=self.max_bin, X=X, y=y, group=None, diff --git a/python-package/xgboost/sklearn.py b/python-package/xgboost/sklearn.py index 98ad43af64ad..d820ccc01f87 100644 --- a/python-package/xgboost/sklearn.py +++ b/python-package/xgboost/sklearn.py @@ -38,6 +38,7 @@ Booster, DMatrix, Metric, + QuantileDMatrix, XGBoostError, _convert_ntree_limit, _deprecate_positional_args, @@ -430,7 +431,8 @@ def _wrap_evaluation_matrices( enable_categorical: bool, feature_types: Optional[FeatureTypes], ) -> Tuple[Any, List[Tuple[Any, str]]]: - """Convert array_like evaluation matrices into DMatrix. Perform validation on the way.""" + """Convert array_like evaluation matrices into DMatrix. Perform validation on the + way.""" train_dmatrix = create_dmatrix( data=X, label=y, @@ -442,6 +444,7 @@ def _wrap_evaluation_matrices( missing=missing, enable_categorical=enable_categorical, feature_types=feature_types, + ref=None, ) n_validation = 0 if eval_set is None else len(eval_set) @@ -491,6 +494,7 @@ def validate_or_none(meta: Optional[Sequence], name: str) -> Sequence: missing=missing, enable_categorical=enable_categorical, feature_types=feature_types, + ref=train_dmatrix, ) evals.append(m) nevals = len(evals) @@ -904,6 +908,17 @@ def _duplicated(parameter: str) -> None: return model, metric, params, early_stopping_rounds, callbacks + def _create_dmatrix(self, ref: Optional[DMatrix], **kwargs: Any) -> DMatrix: + # Use `QuantileDMatrix` to save memory. + if self.tree_method in ("hist", "gpu_hist"): + try: + return QuantileDMatrix( + **kwargs, ref=ref, nthread=self.n_jobs, max_bin=self.max_bin + ) + except TypeError: # `QuantileDMatrix` supports lesser types than DMatrix + pass + return DMatrix(**kwargs, nthread=self.n_jobs) + def _set_evaluation_result(self, evals_result: TrainingCallback.EvalsLog) -> None: if evals_result: self.evals_result_ = cast(Dict[str, Dict[str, List[float]]], evals_result) @@ -996,7 +1011,7 @@ def fit( base_margin_eval_set=base_margin_eval_set, eval_group=None, eval_qid=None, - create_dmatrix=lambda **kwargs: DMatrix(nthread=self.n_jobs, **kwargs), + create_dmatrix=self._create_dmatrix, enable_categorical=self.enable_categorical, feature_types=self.feature_types, ) @@ -1479,7 +1494,7 @@ def fit( base_margin_eval_set=base_margin_eval_set, eval_group=None, eval_qid=None, - create_dmatrix=lambda **kwargs: DMatrix(nthread=self.n_jobs, **kwargs), + create_dmatrix=self._create_dmatrix, enable_categorical=self.enable_categorical, feature_types=self.feature_types, ) @@ -1930,7 +1945,7 @@ def fit( base_margin_eval_set=base_margin_eval_set, eval_group=eval_group, eval_qid=eval_qid, - create_dmatrix=lambda **kwargs: DMatrix(nthread=self.n_jobs, **kwargs), + create_dmatrix=self._create_dmatrix, enable_categorical=self.enable_categorical, feature_types=self.feature_types, ) diff --git a/src/data/iterative_dmatrix.cc b/src/data/iterative_dmatrix.cc index f108c746ba09..30583a9439bc 100644 --- a/src/data/iterative_dmatrix.cc +++ b/src/data/iterative_dmatrix.cc @@ -7,6 +7,7 @@ #include "../common/column_matrix.h" #include "../common/hist_util.h" +#include "../tree/param.h" // FIXME(jiamingy): Find a better way to share this parameter. #include "gradient_index.h" #include "proxy_dmatrix.h" #include "simple_batch_iterator.h" @@ -14,6 +15,38 @@ namespace xgboost { namespace data { +IterativeDMatrix::IterativeDMatrix(DataIterHandle iter_handle, DMatrixHandle proxy, + std::shared_ptr ref, DataIterResetCallback* reset, + XGDMatrixCallbackNext* next, float missing, int nthread, + bst_bin_t max_bin) + : proxy_{proxy}, reset_{reset}, next_{next} { + // fetch the first batch + auto iter = + DataIterProxy{iter_handle, reset_, next_}; + iter.Reset(); + bool valid = iter.Next(); + CHECK(valid) << "Iterative DMatrix must have at least 1 batch."; + + auto d = MakeProxy(proxy_)->DeviceIdx(); + + StringView msg{"All batch should be on the same device."}; + if (batch_param_.gpu_id != Context::kCpuId) { + CHECK_EQ(d, batch_param_.gpu_id) << msg; + } + + batch_param_ = BatchParam{d, max_bin}; + // hardcoded parameter. + batch_param_.sparse_thresh = tree::TrainParam::DftSparseThreshold(); + + ctx_.UpdateAllowUnknown( + Args{{"nthread", std::to_string(nthread)}, {"gpu_id", std::to_string(d)}}); + if (ctx_.IsCPU()) { + this->InitFromCPU(iter_handle, missing, ref); + } else { + this->InitFromCUDA(iter_handle, missing, ref); + } +} + void GetCutsFromRef(std::shared_ptr ref_, bst_feature_t n_features, BatchParam p, common::HistogramCuts* p_cuts) { CHECK(ref_); @@ -199,6 +232,7 @@ void IterativeDMatrix::InitFromCPU(DataIterHandle iter_handle, float missing, if (n_batches == 1) { this->info_ = std::move(proxy->Info()); this->info_.num_nonzero_ = nnz; + this->info_.num_col_ = n_features; // proxy might be empty. CHECK_EQ(proxy->Info().labels.Size(), 0); } } @@ -210,6 +244,10 @@ BatchSet IterativeDMatrix::GetGradientIndex(BatchParam const& ghist_ = std::make_shared(&ctx_, Info(), *ellpack_, param); } + if (param.sparse_thresh != tree::TrainParam::DftSparseThreshold()) { + LOG(WARNING) << "`sparse_threshold` can not be changed when `QuantileDMatrix` is used instead " + "of `DMatrix`."; + } auto begin_iter = BatchIterator(new SimpleBatchIteratorImpl(ghist_)); return BatchSet(begin_iter); diff --git a/src/data/iterative_dmatrix.cu b/src/data/iterative_dmatrix.cu index 901662852a15..ceb470a5c7e5 100644 --- a/src/data/iterative_dmatrix.cu +++ b/src/data/iterative_dmatrix.cu @@ -173,8 +173,15 @@ BatchSet IterativeDMatrix::GetEllpackBatches(BatchParam const& para } if (!ellpack_ && ghist_) { ellpack_.reset(new EllpackPage()); - this->ctx_.gpu_id = param.gpu_id; - this->Info().feature_types.SetDevice(param.gpu_id); + // Evaluation QuantileDMatrix initialized from CPU data might not have the correct GPU + // ID. + if (this->ctx_.IsCPU()) { + this->ctx_.gpu_id = param.gpu_id; + } + if (this->ctx_.IsCPU()) { + this->ctx_.gpu_id = dh::CurrentDevice(); + } + this->Info().feature_types.SetDevice(this->ctx_.gpu_id); *ellpack_->Impl() = EllpackPageImpl(&ctx_, *this->ghist_, this->Info().feature_types.ConstDeviceSpan()); } diff --git a/src/data/iterative_dmatrix.h b/src/data/iterative_dmatrix.h index 7a8e5188c921..30358bb819ca 100644 --- a/src/data/iterative_dmatrix.h +++ b/src/data/iterative_dmatrix.h @@ -75,30 +75,7 @@ class IterativeDMatrix : public DMatrix { explicit IterativeDMatrix(DataIterHandle iter_handle, DMatrixHandle proxy, std::shared_ptr ref, DataIterResetCallback *reset, XGDMatrixCallbackNext *next, float missing, int nthread, - bst_bin_t max_bin) - : proxy_{proxy}, reset_{reset}, next_{next} { - // fetch the first batch - auto iter = - DataIterProxy{iter_handle, reset_, next_}; - iter.Reset(); - bool valid = iter.Next(); - CHECK(valid) << "Iterative DMatrix must have at least 1 batch."; - - auto d = MakeProxy(proxy_)->DeviceIdx(); - if (batch_param_.gpu_id != Context::kCpuId) { - CHECK_EQ(d, batch_param_.gpu_id) << "All batch should be on the same device."; - } - batch_param_ = BatchParam{d, max_bin}; - batch_param_.sparse_thresh = 0.2; // default from TrainParam - - ctx_.UpdateAllowUnknown( - Args{{"nthread", std::to_string(nthread)}, {"gpu_id", std::to_string(d)}}); - if (ctx_.IsCPU()) { - this->InitFromCPU(iter_handle, missing, ref); - } else { - this->InitFromCUDA(iter_handle, missing, ref); - } - } + bst_bin_t max_bin); ~IterativeDMatrix() override = default; bool EllpackExists() const override { return static_cast(ellpack_); } diff --git a/src/tree/param.h b/src/tree/param.h index 7930dde8975b..3f5e4ec7bc71 100644 --- a/src/tree/param.h +++ b/src/tree/param.h @@ -78,7 +78,9 @@ struct TrainParam : public XGBoostParameter { // ------ From CPU quantile histogram -------. // percentage threshold for treating a feature as sparse // e.g. 0.2 indicates a feature with fewer than 20% nonzeros is considered sparse - double sparse_threshold; + static constexpr double DftSparseThreshold() { return 0.2; } + + double sparse_threshold{DftSparseThreshold()}; // declare the parameters DMLC_DECLARE_PARAMETER(TrainParam) { @@ -182,7 +184,9 @@ struct TrainParam : public XGBoostParameter { "See tutorial for more information"); // ------ From cpu quantile histogram -------. - DMLC_DECLARE_FIELD(sparse_threshold).set_range(0, 1.0).set_default(0.2) + DMLC_DECLARE_FIELD(sparse_threshold) + .set_range(0, 1.0) + .set_default(DftSparseThreshold()) .describe("percentage threshold for treating a feature as sparse"); // add alias of parameters diff --git a/tests/jenkins_get_approval.py b/tests/jenkins_get_approval.py deleted file mode 100644 index 4a68722d9435..000000000000 --- a/tests/jenkins_get_approval.py +++ /dev/null @@ -1,26 +0,0 @@ -import boto3 -import json - -lambda_client = boto3.client('lambda', region_name='us-west-2') - -# Source code for the Lambda function is available at https://github.com/hcho3/xgboost-devops -r = lambda_client.invoke( - FunctionName='XGBoostCICostWatcher', - InvocationType='RequestResponse', - Payload='{}'.encode('utf-8') -) - -payload = r['Payload'].read().decode('utf-8') -if 'FunctionError' in r: - msg = 'Error when invoking the Lambda function. Stack trace:\n' - error = json.loads(payload) - msg += f" {error['errorType']}: {error['errorMessage']}\n" - for trace in error['stackTrace']: - for line in trace.split('\n'): - msg += f' {line}\n' - raise RuntimeError(msg) -response = json.loads(payload) -if response['approved']: - print(f"Testing approved. Reason: {response['reason']}") -else: - raise RuntimeError(f"Testing rejected. Reason: {response['reason']}") diff --git a/tests/python-gpu/test_gpu_data_iterator.py b/tests/python-gpu/test_gpu_data_iterator.py index f4eaab15e9c3..3a31c93b05cf 100644 --- a/tests/python-gpu/test_gpu_data_iterator.py +++ b/tests/python-gpu/test_gpu_data_iterator.py @@ -19,7 +19,7 @@ def test_gpu_single_batch() -> None: @given( strategies.integers(0, 1024), strategies.integers(1, 7), - strategies.integers(0, 13), + strategies.integers(0, 8), strategies.booleans(), ) @settings(deadline=None, print_blob=True) diff --git a/tests/python/test_data_iterator.py b/tests/python/test_data_iterator.py index 5e0e4686002f..f4d424b839eb 100644 --- a/tests/python/test_data_iterator.py +++ b/tests/python/test_data_iterator.py @@ -92,6 +92,12 @@ def run_data_iterator( assert non_increasing(results_from_it["Train"]["rmse"]) X, y, w = it.as_arrays() + if use_cupy: + _y = y.get() + else: + _y = y + np.testing.assert_allclose(Xy.get_label(), _y) + Xy = xgb.DMatrix(X, y, weight=w) assert Xy.num_row() == n_samples_per_batch * n_batches assert Xy.num_col() == n_features diff --git a/tests/python/testing.py b/tests/python/testing.py index 28afb30b6b8b..eb84605a27ac 100644 --- a/tests/python/testing.py +++ b/tests/python/testing.py @@ -12,6 +12,7 @@ import pytest import gc import xgboost as xgb +from xgboost.core import ArrayLike import numpy as np from scipy import sparse import platform @@ -212,13 +213,16 @@ def reset(self) -> None: def as_arrays( self, - ) -> Tuple[Union[np.ndarray, sparse.csr_matrix], np.ndarray, np.ndarray]: + ) -> Tuple[Union[np.ndarray, sparse.csr_matrix], ArrayLike, ArrayLike]: if isinstance(self.X[0], sparse.csr_matrix): X = sparse.vstack(self.X, format="csr") else: X = np.concatenate(self.X, axis=0) y = np.concatenate(self.y, axis=0) - w = np.concatenate(self.w, axis=0) + if self.w: + w = np.concatenate(self.w, axis=0) + else: + w = None return X, y, w diff --git a/tests/travis/run_test.sh b/tests/travis/run_test.sh deleted file mode 100755 index 4baf983e586a..000000000000 --- a/tests/travis/run_test.sh +++ /dev/null @@ -1,107 +0,0 @@ -#!/bin/bash - -source $HOME/miniconda/bin/activate - -if [ ${TASK} == "python_sdist_test" ]; then - set -e - - conda activate python3 - python --version - cmake --version - - make pippack - python -m pip install xgboost-*.tar.gz -v --user - python -c 'import xgboost' || exit -1 -fi - -if [ ${TASK} == "python_test" ]; then - if grep -n -R '<<<.*>>>\(.*\)' src include | grep --invert "NOLINT"; then - echo 'Do not use raw CUDA execution configuration syntax with <<>>.' \ - 'try `dh::LaunchKernel`' - exit -1 - fi - - set -e - - - # Build binary wheel - if [ ${TRAVIS_CPU_ARCH} == "arm64" ]; then - # Build manylinux2014 wheel on ARM64 - tests/ci_build/ci_build.sh aarch64 docker tests/ci_build/build_via_cmake.sh --conda-env=aarch64_test - tests/ci_build/ci_build.sh aarch64 docker bash -c "cd build && ctest --extra-verbose" - tests/ci_build/ci_build.sh aarch64 docker bash -c "cd python-package && rm -rf dist/* && python setup.py bdist_wheel --universal" - TAG=manylinux2014_aarch64 - tests/ci_build/ci_build.sh aarch64 docker python tests/ci_build/rename_whl.py python-package/dist/*.whl ${TRAVIS_COMMIT} ${TAG} - tests/ci_build/ci_build.sh aarch64 docker auditwheel repair --plat ${TAG} python-package/dist/*.whl - mv -v wheelhouse/*.whl python-package/dist/ - # Make sure that libgomp.so is vendored in the wheel - unzip -l python-package/dist/*.whl | grep libgomp || exit -1 - else - rm -rf build - mkdir build && cd build - conda activate python3 - cmake --version - cmake .. -DUSE_OPENMP=ON -DCMAKE_VERBOSE_MAKEFILE=ON - make -j$(nproc) - cd ../python-package - python setup.py bdist_wheel - cd .. - TAG=macosx_10_14_x86_64.macosx_10_15_x86_64.macosx_11_0_x86_64 - python tests/ci_build/rename_whl.py python-package/dist/*.whl ${TRAVIS_COMMIT} ${TAG} - fi - - # Run unit tests - echo "------------------------------" - if [ ${TRAVIS_CPU_ARCH} == "arm64" ]; then - tests/ci_build/ci_build.sh aarch64 docker \ - bash -c "source activate aarch64_test && python -m pip install ./python-package/dist/xgboost-*-py3-none-${TAG}.whl && python -m pytest -v -s -rxXs --durations=0 --fulltrace tests/python/test_basic.py tests/python/test_basic_models.py tests/python/test_model_compatibility.py --cov=python-package/xgboost" - else - conda env create -n cpu_test --file=tests/ci_build/conda_env/macos_cpu_test.yml - conda activate cpu_test - python -m pip install ./python-package/dist/xgboost-*-py3-none-${TAG}.whl - conda --version - python --version - python -m pytest -v -s -rxXs --durations=0 --fulltrace tests/python --cov=python-package/xgboost || exit -1 - fi - conda activate python3 - codecov - - # Deploy binary wheel to S3 - if [ "${TRAVIS_PULL_REQUEST}" != "false" ] - then - S3_DEST="s3://xgboost-nightly-builds/PR-${TRAVIS_PULL_REQUEST}/" - else - if [ "${TRAVIS_BRANCH}" == "master" ] - then - S3_DEST="s3://xgboost-nightly-builds/" - elif [ -z "${TRAVIS_TAG}" ] - then - S3_DEST="s3://xgboost-nightly-builds/${TRAVIS_BRANCH}/" - fi - fi - python -m awscli s3 cp python-package/dist/*.whl "${S3_DEST}" --acl public-read || true -fi - -if [ ${TASK} == "java_test" ]; then - export RABIT_MOCK=ON - conda activate python3 - cd jvm-packages - mvn -q clean install -DskipTests -Dmaven.test.skip - mvn -q test -fi - -if [ ${TASK} == "s390x_test" ]; then - set -e - python3 -m pip install --user pytest hypothesis cmake - - # Build and run C++ tests - rm -rf build - mkdir build && cd build - cmake .. -DCMAKE_VERBOSE_MAKEFILE=ON -DGOOGLE_TEST=ON -DUSE_OPENMP=ON -DUSE_DMLC_GTEST=ON -GNinja - time ninja -v - ./testxgboost - - # Run model compatibility tests - cd .. - PYTHONPATH=./python-package python3 -m pytest --fulltrace -v -rxXs tests/python/test_basic.py -fi diff --git a/tests/travis/setup.sh b/tests/travis/setup.sh deleted file mode 100755 index 405266e171a0..000000000000 --- a/tests/travis/setup.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash - -if [ ${TASK} == "python_test" ] || [ ${TASK} == "python_sdist_test" ]; then - if [ ${TRAVIS_OS_NAME} == "osx" ]; then - wget --no-verbose -O conda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh - elif [ ${TRAVIS_CPU_ARCH} == "arm64" ]; then - wget --no-verbose -O conda.sh https://github.com/conda-forge/miniforge/releases/download/4.8.2-1/Miniforge3-4.8.2-1-Linux-aarch64.sh - else - wget --no-verbose -O conda.sh https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh - fi - bash conda.sh -b -p $HOME/miniconda - source $HOME/miniconda/bin/activate - hash -r - conda config --set always_yes yes --set changeps1 no - conda update -q conda - # Useful for debugging any issues with conda - conda info -a - conda create -n python3 python=3.7 cmake numpy scipy codecov - conda activate python3 - python -m pip install awscli -fi - -if [ ${TASK} == "s390x_test" ] && [ ${TRAVIS_CPU_ARCH} == "s390x" ]; then - sudo apt-get update - sudo apt-get install -y --no-install-recommends tar unzip wget git build-essential ninja-build \ - time python3 python3-pip python3-numpy python3-scipy python3-sklearn r-base -fi diff --git a/tests/travis/travis_after_failure.sh b/tests/travis/travis_after_failure.sh deleted file mode 100755 index 553cc979e37e..000000000000 --- a/tests/travis/travis_after_failure.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -if [ ${TASK} == "r_test" ]; then - cat xgboost/xgboost.Rcheck/*.log - echo "--------------------------" - cat xgboost/xgboost.Rcheck/*.out -fi diff --git a/tests/travis/travis_before_cache.sh b/tests/travis/travis_before_cache.sh deleted file mode 100755 index 6789ae08ef43..000000000000 --- a/tests/travis/travis_before_cache.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash -# do nothing for now -ls -alLR ${CACHE_PREFIX} \ No newline at end of file diff --git a/tests/travis/travis_setup_env.sh b/tests/travis/travis_setup_env.sh deleted file mode 100644 index 7f4af313e138..000000000000 --- a/tests/travis/travis_setup_env.sh +++ /dev/null @@ -1,40 +0,0 @@ -# script to be sourced in travis yml -# setup all enviroment variables - -export CACHE_PREFIX=${HOME}/.cache/usr -export PATH=${HOME}/.local/bin:${PATH} -export PATH=${PATH}:${CACHE_PREFIX}/bin -export CPLUS_INCLUDE_PATH=${CPLUS_INCLUDE_PATH}:${CACHE_PREFIX}/include -export C_INCLUDE_PATH=${C_INCLUDE_PATH}:${CACHE_PREFIX}/include -export LIBRARY_PATH=${LIBRARY_PATH}:${CACHE_PREFIX}/lib -export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:${CACHE_PREFIX}/lib -export DYLD_LIBRARY_PATH=${DYLD_LIBRARY_PATH}:${CACHE_PREFIX}/lib - -alias make="make -j4" - -# setup the cache prefix folder -if [ ! -d ${HOME}/.cache ]; then - mkdir ${HOME}/.cache -fi - -if [ ! -d ${CACHE_PREFIX} ]; then - mkdir ${CACHE_PREFIX} -fi -if [ ! -d ${CACHE_PREFIX}/include ]; then - mkdir ${CACHE_PREFIX}/include -fi -if [ ! -d ${CACHE_PREFIX}/lib ]; then - mkdir ${CACHE_PREFIX}/lib -fi -if [ ! -d ${CACHE_PREFIX}/bin ]; then - mkdir ${CACHE_PREFIX}/bin -fi - -# setup CUDA path if NVCC_PREFIX exists -if [ ! -z "$NVCC_PREFIX" ]; then - export PATH=${PATH}:${NVCC_PREFIX}/usr/local/cuda-7.5/bin - export CPLUS_INCLUDE_PATH=${CPLUS_INCLUDE_PATH}:${NVCC_PREFIX}/usr/local/cuda-7.5/include - export C_INCLUDE_PATH=${C_INCLUDE_PATH}:${NVCC_PREFIX}/usr/local/cuda-7.5/include - export LIBRARY_PATH=${LIBRARY_PATH}:${NVCC_PREFIX}/usr/local/cuda-7.5/lib64:${NVCC_PREFIX}/usr/lib/x86_64-linux-gnu - export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:${NVCC_PREFIX}/usr/local/cuda-7.5/lib64:${NVCC_PREFIX}/usr/lib/x86_64-linux-gnu -fi