diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 00000000..c6198fb1 --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,8 @@ +# Code owners file. +# This file controls who is tagged for review for any given pull request. +# +# For syntax help see: +# https://help.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners#codeowners-syntax + + +/samples/**/*.py @telpirion @sirtorry @googleapis/python-samples-owners \ No newline at end of file diff --git a/.github/snippet-bot.yml b/.github/snippet-bot.yml index 8b137891..e69de29b 100644 --- a/.github/snippet-bot.yml +++ b/.github/snippet-bot.yml @@ -1 +0,0 @@ - diff --git a/.gitignore b/.gitignore index b87e1ed5..b9daa52f 100644 --- a/.gitignore +++ b/.gitignore @@ -46,6 +46,7 @@ pip-log.txt # Built documentation docs/_build bigquery/docs/generated +docs.metadata # Virtual environment env/ @@ -57,4 +58,4 @@ system_tests/local_test_setup # Make sure a generated file isn't accidentally committed. pylintrc -pylintrc.test \ No newline at end of file +pylintrc.test diff --git a/.kokoro/build.sh b/.kokoro/build.sh index c46b1e63..6c43fa50 100755 --- a/.kokoro/build.sh +++ b/.kokoro/build.sh @@ -36,4 +36,10 @@ python3.6 -m pip uninstall --yes --quiet nox-automation python3.6 -m pip install --upgrade --quiet nox python3.6 -m nox --version -python3.6 -m nox +# If NOX_SESSION is set, it only runs the specified session, +# otherwise run all the sessions. +if [[ -n "${NOX_SESSION:-}" ]]; then + python3.6 -m nox -s "${NOX_SESSION:-}" +else + python3.6 -m nox +fi diff --git a/.kokoro/docker/docs/Dockerfile b/.kokoro/docker/docs/Dockerfile new file mode 100644 index 00000000..412b0b56 --- /dev/null +++ b/.kokoro/docker/docs/Dockerfile @@ -0,0 +1,98 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ubuntu:20.04 + +ENV DEBIAN_FRONTEND noninteractive + +# Ensure local Python is preferred over distribution Python. +ENV PATH /usr/local/bin:$PATH + +# Install dependencies. +RUN apt-get update \ + && apt-get install -y --no-install-recommends \ + apt-transport-https \ + build-essential \ + ca-certificates \ + curl \ + dirmngr \ + git \ + gpg-agent \ + graphviz \ + libbz2-dev \ + libdb5.3-dev \ + libexpat1-dev \ + libffi-dev \ + liblzma-dev \ + libreadline-dev \ + libsnappy-dev \ + libssl-dev \ + libsqlite3-dev \ + portaudio19-dev \ + redis-server \ + software-properties-common \ + ssh \ + sudo \ + tcl \ + tcl-dev \ + tk \ + tk-dev \ + uuid-dev \ + wget \ + zlib1g-dev \ + && add-apt-repository universe \ + && apt-get update \ + && apt-get -y install jq \ + && apt-get clean autoclean \ + && apt-get autoremove -y \ + && rm -rf /var/lib/apt/lists/* \ + && rm -f /var/cache/apt/archives/*.deb + + +COPY fetch_gpg_keys.sh /tmp +# Install the desired versions of Python. +RUN set -ex \ + && export GNUPGHOME="$(mktemp -d)" \ + && echo "disable-ipv6" >> "${GNUPGHOME}/dirmngr.conf" \ + && /tmp/fetch_gpg_keys.sh \ + && for PYTHON_VERSION in 3.7.8 3.8.5; do \ + wget --no-check-certificate -O python-${PYTHON_VERSION}.tar.xz "https://www.python.org/ftp/python/${PYTHON_VERSION%%[a-z]*}/Python-$PYTHON_VERSION.tar.xz" \ + && wget --no-check-certificate -O python-${PYTHON_VERSION}.tar.xz.asc "https://www.python.org/ftp/python/${PYTHON_VERSION%%[a-z]*}/Python-$PYTHON_VERSION.tar.xz.asc" \ + && gpg --batch --verify python-${PYTHON_VERSION}.tar.xz.asc python-${PYTHON_VERSION}.tar.xz \ + && rm -r python-${PYTHON_VERSION}.tar.xz.asc \ + && mkdir -p /usr/src/python-${PYTHON_VERSION} \ + && tar -xJC /usr/src/python-${PYTHON_VERSION} --strip-components=1 -f python-${PYTHON_VERSION}.tar.xz \ + && rm python-${PYTHON_VERSION}.tar.xz \ + && cd /usr/src/python-${PYTHON_VERSION} \ + && ./configure \ + --enable-shared \ + # This works only on Python 2.7 and throws a warning on every other + # version, but seems otherwise harmless. + --enable-unicode=ucs4 \ + --with-system-ffi \ + --without-ensurepip \ + && make -j$(nproc) \ + && make install \ + && ldconfig \ + ; done \ + && rm -rf "${GNUPGHOME}" \ + && rm -rf /usr/src/python* \ + && rm -rf ~/.cache/ + +RUN wget -O /tmp/get-pip.py 'https://bootstrap.pypa.io/get-pip.py' \ + && python3.7 /tmp/get-pip.py \ + && python3.8 /tmp/get-pip.py \ + && rm /tmp/get-pip.py + +CMD ["python3.7"] diff --git a/.kokoro/docker/docs/fetch_gpg_keys.sh b/.kokoro/docker/docs/fetch_gpg_keys.sh new file mode 100755 index 00000000..d653dd86 --- /dev/null +++ b/.kokoro/docker/docs/fetch_gpg_keys.sh @@ -0,0 +1,45 @@ +#!/bin/bash +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# A script to fetch gpg keys with retry. +# Avoid jinja parsing the file. +# + +function retry { + if [[ "${#}" -le 1 ]]; then + echo "Usage: ${0} retry_count commands.." + exit 1 + fi + local retries=${1} + local command="${@:2}" + until [[ "${retries}" -le 0 ]]; do + $command && return 0 + if [[ $? -ne 0 ]]; then + echo "command failed, retrying" + ((retries--)) + fi + done + return 1 +} + +# 3.6.9, 3.7.5 (Ned Deily) +retry 3 gpg --keyserver ha.pool.sks-keyservers.net --recv-keys \ + 0D96DF4D4110E5C43FBFB17F2D347EA6AA65421D + +# 3.8.0 (Łukasz Langa) +retry 3 gpg --keyserver ha.pool.sks-keyservers.net --recv-keys \ + E3FF2839C048B25C084DEBE9B26995E310250568 + +# diff --git a/.kokoro/docs/common.cfg b/.kokoro/docs/common.cfg index b283fb1c..b4c987d2 100644 --- a/.kokoro/docs/common.cfg +++ b/.kokoro/docs/common.cfg @@ -11,12 +11,12 @@ action { gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" # Use the trampoline script to run in docker. -build_file: "python-vision/.kokoro/trampoline.sh" +build_file: "python-vision/.kokoro/trampoline_v2.sh" # Configure the docker image for kokoro-trampoline. env_vars: { key: "TRAMPOLINE_IMAGE" - value: "gcr.io/cloud-devrel-kokoro-resources/python-multi" + value: "gcr.io/cloud-devrel-kokoro-resources/python-lib-docs" } env_vars: { key: "TRAMPOLINE_BUILD_FILE" @@ -28,6 +28,23 @@ env_vars: { value: "docs-staging" } +env_vars: { + key: "V2_STAGING_BUCKET" + value: "docs-staging-v2-staging" +} + +# It will upload the docker image after successful builds. +env_vars: { + key: "TRAMPOLINE_IMAGE_UPLOAD" + value: "true" +} + +# It will always build the docker image. +env_vars: { + key: "TRAMPOLINE_DOCKERFILE" + value: ".kokoro/docker/docs/Dockerfile" +} + # Fetch the token needed for reporting release status to GitHub before_action { fetch_keystore { diff --git a/.kokoro/docs/docs-presubmit.cfg b/.kokoro/docs/docs-presubmit.cfg new file mode 100644 index 00000000..11181078 --- /dev/null +++ b/.kokoro/docs/docs-presubmit.cfg @@ -0,0 +1,17 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "STAGING_BUCKET" + value: "gcloud-python-test" +} + +env_vars: { + key: "V2_STAGING_BUCKET" + value: "gcloud-python-test" +} + +# We only upload the image in the main `docs` build. +env_vars: { + key: "TRAMPOLINE_IMAGE_UPLOAD" + value: "false" +} diff --git a/.kokoro/publish-docs.sh b/.kokoro/publish-docs.sh index 237f0e87..8acb14e8 100755 --- a/.kokoro/publish-docs.sh +++ b/.kokoro/publish-docs.sh @@ -18,26 +18,16 @@ set -eo pipefail # Disable buffering, so that the logs stream through. export PYTHONUNBUFFERED=1 -cd github/python-vision - -# Remove old nox -python3.6 -m pip uninstall --yes --quiet nox-automation +export PATH="${HOME}/.local/bin:${PATH}" # Install nox -python3.6 -m pip install --upgrade --quiet nox -python3.6 -m nox --version +python3 -m pip install --user --upgrade --quiet nox +python3 -m nox --version # build docs nox -s docs -python3 -m pip install gcp-docuploader - -# install a json parser -sudo apt-get update -sudo apt-get -y install software-properties-common -sudo add-apt-repository universe -sudo apt-get update -sudo apt-get -y install jq +python3 -m pip install --user gcp-docuploader # create metadata python3 -m docuploader create-metadata \ @@ -52,4 +42,23 @@ python3 -m docuploader create-metadata \ cat docs.metadata # upload docs -python3 -m docuploader upload docs/_build/html --metadata-file docs.metadata --staging-bucket docs-staging +python3 -m docuploader upload docs/_build/html --metadata-file docs.metadata --staging-bucket "${STAGING_BUCKET}" + + +# docfx yaml files +nox -s docfx + +# create metadata. +python3 -m docuploader create-metadata \ + --name=$(jq --raw-output '.name // empty' .repo-metadata.json) \ + --version=$(python3 setup.py --version) \ + --language=$(jq --raw-output '.language // empty' .repo-metadata.json) \ + --distribution-name=$(python3 setup.py --name) \ + --product-page=$(jq --raw-output '.product_documentation // empty' .repo-metadata.json) \ + --github-repository=$(jq --raw-output '.repo // empty' .repo-metadata.json) \ + --issue-tracker=$(jq --raw-output '.issue_tracker // empty' .repo-metadata.json) + +cat docs.metadata + +# upload docs +python3 -m docuploader upload docs/_build/html/docfx_yaml --metadata-file docs.metadata --destination-prefix docfx --staging-bucket "${V2_STAGING_BUCKET}" diff --git a/.kokoro/trampoline_v2.sh b/.kokoro/trampoline_v2.sh new file mode 100755 index 00000000..719bcd5b --- /dev/null +++ b/.kokoro/trampoline_v2.sh @@ -0,0 +1,487 @@ +#!/usr/bin/env bash +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# trampoline_v2.sh +# +# This script does 3 things. +# +# 1. Prepare the Docker image for the test +# 2. Run the Docker with appropriate flags to run the test +# 3. Upload the newly built Docker image +# +# in a way that is somewhat compatible with trampoline_v1. +# +# To run this script, first download few files from gcs to /dev/shm. +# (/dev/shm is passed into the container as KOKORO_GFILE_DIR). +# +# gsutil cp gs://cloud-devrel-kokoro-resources/python-docs-samples/secrets_viewer_service_account.json /dev/shm +# gsutil cp gs://cloud-devrel-kokoro-resources/python-docs-samples/automl_secrets.txt /dev/shm +# +# Then run the script. +# .kokoro/trampoline_v2.sh +# +# These environment variables are required: +# TRAMPOLINE_IMAGE: The docker image to use. +# TRAMPOLINE_DOCKERFILE: The location of the Dockerfile. +# +# You can optionally change these environment variables: +# TRAMPOLINE_IMAGE_UPLOAD: +# (true|false): Whether to upload the Docker image after the +# successful builds. +# TRAMPOLINE_BUILD_FILE: The script to run in the docker container. +# TRAMPOLINE_WORKSPACE: The workspace path in the docker container. +# Defaults to /workspace. +# Potentially there are some repo specific envvars in .trampolinerc in +# the project root. + + +set -euo pipefail + +TRAMPOLINE_VERSION="2.0.5" + +if command -v tput >/dev/null && [[ -n "${TERM:-}" ]]; then + readonly IO_COLOR_RED="$(tput setaf 1)" + readonly IO_COLOR_GREEN="$(tput setaf 2)" + readonly IO_COLOR_YELLOW="$(tput setaf 3)" + readonly IO_COLOR_RESET="$(tput sgr0)" +else + readonly IO_COLOR_RED="" + readonly IO_COLOR_GREEN="" + readonly IO_COLOR_YELLOW="" + readonly IO_COLOR_RESET="" +fi + +function function_exists { + [ $(LC_ALL=C type -t $1)"" == "function" ] +} + +# Logs a message using the given color. The first argument must be one +# of the IO_COLOR_* variables defined above, such as +# "${IO_COLOR_YELLOW}". The remaining arguments will be logged in the +# given color. The log message will also have an RFC-3339 timestamp +# prepended (in UTC). You can disable the color output by setting +# TERM=vt100. +function log_impl() { + local color="$1" + shift + local timestamp="$(date -u "+%Y-%m-%dT%H:%M:%SZ")" + echo "================================================================" + echo "${color}${timestamp}:" "$@" "${IO_COLOR_RESET}" + echo "================================================================" +} + +# Logs the given message with normal coloring and a timestamp. +function log() { + log_impl "${IO_COLOR_RESET}" "$@" +} + +# Logs the given message in green with a timestamp. +function log_green() { + log_impl "${IO_COLOR_GREEN}" "$@" +} + +# Logs the given message in yellow with a timestamp. +function log_yellow() { + log_impl "${IO_COLOR_YELLOW}" "$@" +} + +# Logs the given message in red with a timestamp. +function log_red() { + log_impl "${IO_COLOR_RED}" "$@" +} + +readonly tmpdir=$(mktemp -d -t ci-XXXXXXXX) +readonly tmphome="${tmpdir}/h" +mkdir -p "${tmphome}" + +function cleanup() { + rm -rf "${tmpdir}" +} +trap cleanup EXIT + +RUNNING_IN_CI="${RUNNING_IN_CI:-false}" + +# The workspace in the container, defaults to /workspace. +TRAMPOLINE_WORKSPACE="${TRAMPOLINE_WORKSPACE:-/workspace}" + +pass_down_envvars=( + # TRAMPOLINE_V2 variables. + # Tells scripts whether they are running as part of CI or not. + "RUNNING_IN_CI" + # Indicates which CI system we're in. + "TRAMPOLINE_CI" + # Indicates the version of the script. + "TRAMPOLINE_VERSION" +) + +log_yellow "Building with Trampoline ${TRAMPOLINE_VERSION}" + +# Detect which CI systems we're in. If we're in any of the CI systems +# we support, `RUNNING_IN_CI` will be true and `TRAMPOLINE_CI` will be +# the name of the CI system. Both envvars will be passing down to the +# container for telling which CI system we're in. +if [[ -n "${KOKORO_BUILD_ID:-}" ]]; then + # descriptive env var for indicating it's on CI. + RUNNING_IN_CI="true" + TRAMPOLINE_CI="kokoro" + if [[ "${TRAMPOLINE_USE_LEGACY_SERVICE_ACCOUNT:-}" == "true" ]]; then + if [[ ! -f "${KOKORO_GFILE_DIR}/kokoro-trampoline.service-account.json" ]]; then + log_red "${KOKORO_GFILE_DIR}/kokoro-trampoline.service-account.json does not exist. Did you forget to mount cloud-devrel-kokoro-resources/trampoline? Aborting." + exit 1 + fi + # This service account will be activated later. + TRAMPOLINE_SERVICE_ACCOUNT="${KOKORO_GFILE_DIR}/kokoro-trampoline.service-account.json" + else + if [[ "${TRAMPOLINE_VERBOSE:-}" == "true" ]]; then + gcloud auth list + fi + log_yellow "Configuring Container Registry access" + gcloud auth configure-docker --quiet + fi + pass_down_envvars+=( + # KOKORO dynamic variables. + "KOKORO_BUILD_NUMBER" + "KOKORO_BUILD_ID" + "KOKORO_JOB_NAME" + "KOKORO_GIT_COMMIT" + "KOKORO_GITHUB_COMMIT" + "KOKORO_GITHUB_PULL_REQUEST_NUMBER" + "KOKORO_GITHUB_PULL_REQUEST_COMMIT" + # For Build Cop Bot + "KOKORO_GITHUB_COMMIT_URL" + "KOKORO_GITHUB_PULL_REQUEST_URL" + ) +elif [[ "${TRAVIS:-}" == "true" ]]; then + RUNNING_IN_CI="true" + TRAMPOLINE_CI="travis" + pass_down_envvars+=( + "TRAVIS_BRANCH" + "TRAVIS_BUILD_ID" + "TRAVIS_BUILD_NUMBER" + "TRAVIS_BUILD_WEB_URL" + "TRAVIS_COMMIT" + "TRAVIS_COMMIT_MESSAGE" + "TRAVIS_COMMIT_RANGE" + "TRAVIS_JOB_NAME" + "TRAVIS_JOB_NUMBER" + "TRAVIS_JOB_WEB_URL" + "TRAVIS_PULL_REQUEST" + "TRAVIS_PULL_REQUEST_BRANCH" + "TRAVIS_PULL_REQUEST_SHA" + "TRAVIS_PULL_REQUEST_SLUG" + "TRAVIS_REPO_SLUG" + "TRAVIS_SECURE_ENV_VARS" + "TRAVIS_TAG" + ) +elif [[ -n "${GITHUB_RUN_ID:-}" ]]; then + RUNNING_IN_CI="true" + TRAMPOLINE_CI="github-workflow" + pass_down_envvars+=( + "GITHUB_WORKFLOW" + "GITHUB_RUN_ID" + "GITHUB_RUN_NUMBER" + "GITHUB_ACTION" + "GITHUB_ACTIONS" + "GITHUB_ACTOR" + "GITHUB_REPOSITORY" + "GITHUB_EVENT_NAME" + "GITHUB_EVENT_PATH" + "GITHUB_SHA" + "GITHUB_REF" + "GITHUB_HEAD_REF" + "GITHUB_BASE_REF" + ) +elif [[ "${CIRCLECI:-}" == "true" ]]; then + RUNNING_IN_CI="true" + TRAMPOLINE_CI="circleci" + pass_down_envvars+=( + "CIRCLE_BRANCH" + "CIRCLE_BUILD_NUM" + "CIRCLE_BUILD_URL" + "CIRCLE_COMPARE_URL" + "CIRCLE_JOB" + "CIRCLE_NODE_INDEX" + "CIRCLE_NODE_TOTAL" + "CIRCLE_PREVIOUS_BUILD_NUM" + "CIRCLE_PROJECT_REPONAME" + "CIRCLE_PROJECT_USERNAME" + "CIRCLE_REPOSITORY_URL" + "CIRCLE_SHA1" + "CIRCLE_STAGE" + "CIRCLE_USERNAME" + "CIRCLE_WORKFLOW_ID" + "CIRCLE_WORKFLOW_JOB_ID" + "CIRCLE_WORKFLOW_UPSTREAM_JOB_IDS" + "CIRCLE_WORKFLOW_WORKSPACE_ID" + ) +fi + +# Configure the service account for pulling the docker image. +function repo_root() { + local dir="$1" + while [[ ! -d "${dir}/.git" ]]; do + dir="$(dirname "$dir")" + done + echo "${dir}" +} + +# Detect the project root. In CI builds, we assume the script is in +# the git tree and traverse from there, otherwise, traverse from `pwd` +# to find `.git` directory. +if [[ "${RUNNING_IN_CI:-}" == "true" ]]; then + PROGRAM_PATH="$(realpath "$0")" + PROGRAM_DIR="$(dirname "${PROGRAM_PATH}")" + PROJECT_ROOT="$(repo_root "${PROGRAM_DIR}")" +else + PROJECT_ROOT="$(repo_root $(pwd))" +fi + +log_yellow "Changing to the project root: ${PROJECT_ROOT}." +cd "${PROJECT_ROOT}" + +# To support relative path for `TRAMPOLINE_SERVICE_ACCOUNT`, we need +# to use this environment variable in `PROJECT_ROOT`. +if [[ -n "${TRAMPOLINE_SERVICE_ACCOUNT:-}" ]]; then + + mkdir -p "${tmpdir}/gcloud" + gcloud_config_dir="${tmpdir}/gcloud" + + log_yellow "Using isolated gcloud config: ${gcloud_config_dir}." + export CLOUDSDK_CONFIG="${gcloud_config_dir}" + + log_yellow "Using ${TRAMPOLINE_SERVICE_ACCOUNT} for authentication." + gcloud auth activate-service-account \ + --key-file "${TRAMPOLINE_SERVICE_ACCOUNT}" + log_yellow "Configuring Container Registry access" + gcloud auth configure-docker --quiet +fi + +required_envvars=( + # The basic trampoline configurations. + "TRAMPOLINE_IMAGE" + "TRAMPOLINE_BUILD_FILE" +) + +if [[ -f "${PROJECT_ROOT}/.trampolinerc" ]]; then + source "${PROJECT_ROOT}/.trampolinerc" +fi + +log_yellow "Checking environment variables." +for e in "${required_envvars[@]}" +do + if [[ -z "${!e:-}" ]]; then + log "Missing ${e} env var. Aborting." + exit 1 + fi +done + +# We want to support legacy style TRAMPOLINE_BUILD_FILE used with V1 +# script: e.g. "github/repo-name/.kokoro/run_tests.sh" +TRAMPOLINE_BUILD_FILE="${TRAMPOLINE_BUILD_FILE#github/*/}" +log_yellow "Using TRAMPOLINE_BUILD_FILE: ${TRAMPOLINE_BUILD_FILE}" + +# ignore error on docker operations and test execution +set +e + +log_yellow "Preparing Docker image." +# We only download the docker image in CI builds. +if [[ "${RUNNING_IN_CI:-}" == "true" ]]; then + # Download the docker image specified by `TRAMPOLINE_IMAGE` + + # We may want to add --max-concurrent-downloads flag. + + log_yellow "Start pulling the Docker image: ${TRAMPOLINE_IMAGE}." + if docker pull "${TRAMPOLINE_IMAGE}"; then + log_green "Finished pulling the Docker image: ${TRAMPOLINE_IMAGE}." + has_image="true" + else + log_red "Failed pulling the Docker image: ${TRAMPOLINE_IMAGE}." + has_image="false" + fi +else + # For local run, check if we have the image. + if docker images "${TRAMPOLINE_IMAGE}:latest" | grep "${TRAMPOLINE_IMAGE}"; then + has_image="true" + else + has_image="false" + fi +fi + + +# The default user for a Docker container has uid 0 (root). To avoid +# creating root-owned files in the build directory we tell docker to +# use the current user ID. +user_uid="$(id -u)" +user_gid="$(id -g)" +user_name="$(id -un)" + +# To allow docker in docker, we add the user to the docker group in +# the host os. +docker_gid=$(cut -d: -f3 < <(getent group docker)) + +update_cache="false" +if [[ "${TRAMPOLINE_DOCKERFILE:-none}" != "none" ]]; then + # Build the Docker image from the source. + context_dir=$(dirname "${TRAMPOLINE_DOCKERFILE}") + docker_build_flags=( + "-f" "${TRAMPOLINE_DOCKERFILE}" + "-t" "${TRAMPOLINE_IMAGE}" + "--build-arg" "UID=${user_uid}" + "--build-arg" "USERNAME=${user_name}" + ) + if [[ "${has_image}" == "true" ]]; then + docker_build_flags+=("--cache-from" "${TRAMPOLINE_IMAGE}") + fi + + log_yellow "Start building the docker image." + if [[ "${TRAMPOLINE_VERBOSE:-false}" == "true" ]]; then + echo "docker build" "${docker_build_flags[@]}" "${context_dir}" + fi + + # ON CI systems, we want to suppress docker build logs, only + # output the logs when it fails. + if [[ "${RUNNING_IN_CI:-}" == "true" ]]; then + if docker build "${docker_build_flags[@]}" "${context_dir}" \ + > "${tmpdir}/docker_build.log" 2>&1; then + if [[ "${TRAMPOLINE_VERBOSE:-}" == "true" ]]; then + cat "${tmpdir}/docker_build.log" + fi + + log_green "Finished building the docker image." + update_cache="true" + else + log_red "Failed to build the Docker image, aborting." + log_yellow "Dumping the build logs:" + cat "${tmpdir}/docker_build.log" + exit 1 + fi + else + if docker build "${docker_build_flags[@]}" "${context_dir}"; then + log_green "Finished building the docker image." + update_cache="true" + else + log_red "Failed to build the Docker image, aborting." + exit 1 + fi + fi +else + if [[ "${has_image}" != "true" ]]; then + log_red "We do not have ${TRAMPOLINE_IMAGE} locally, aborting." + exit 1 + fi +fi + +# We use an array for the flags so they are easier to document. +docker_flags=( + # Remove the container after it exists. + "--rm" + + # Use the host network. + "--network=host" + + # Run in priviledged mode. We are not using docker for sandboxing or + # isolation, just for packaging our dev tools. + "--privileged" + + # Run the docker script with the user id. Because the docker image gets to + # write in ${PWD} you typically want this to be your user id. + # To allow docker in docker, we need to use docker gid on the host. + "--user" "${user_uid}:${docker_gid}" + + # Pass down the USER. + "--env" "USER=${user_name}" + + # Mount the project directory inside the Docker container. + "--volume" "${PROJECT_ROOT}:${TRAMPOLINE_WORKSPACE}" + "--workdir" "${TRAMPOLINE_WORKSPACE}" + "--env" "PROJECT_ROOT=${TRAMPOLINE_WORKSPACE}" + + # Mount the temporary home directory. + "--volume" "${tmphome}:/h" + "--env" "HOME=/h" + + # Allow docker in docker. + "--volume" "/var/run/docker.sock:/var/run/docker.sock" + + # Mount the /tmp so that docker in docker can mount the files + # there correctly. + "--volume" "/tmp:/tmp" + # Pass down the KOKORO_GFILE_DIR and KOKORO_KEYSTORE_DIR + # TODO(tmatsuo): This part is not portable. + "--env" "TRAMPOLINE_SECRET_DIR=/secrets" + "--volume" "${KOKORO_GFILE_DIR:-/dev/shm}:/secrets/gfile" + "--env" "KOKORO_GFILE_DIR=/secrets/gfile" + "--volume" "${KOKORO_KEYSTORE_DIR:-/dev/shm}:/secrets/keystore" + "--env" "KOKORO_KEYSTORE_DIR=/secrets/keystore" +) + +# Add an option for nicer output if the build gets a tty. +if [[ -t 0 ]]; then + docker_flags+=("-it") +fi + +# Passing down env vars +for e in "${pass_down_envvars[@]}" +do + if [[ -n "${!e:-}" ]]; then + docker_flags+=("--env" "${e}=${!e}") + fi +done + +# If arguments are given, all arguments will become the commands run +# in the container, otherwise run TRAMPOLINE_BUILD_FILE. +if [[ $# -ge 1 ]]; then + log_yellow "Running the given commands '" "${@:1}" "' in the container." + readonly commands=("${@:1}") + if [[ "${TRAMPOLINE_VERBOSE:-}" == "true" ]]; then + echo docker run "${docker_flags[@]}" "${TRAMPOLINE_IMAGE}" "${commands[@]}" + fi + docker run "${docker_flags[@]}" "${TRAMPOLINE_IMAGE}" "${commands[@]}" +else + log_yellow "Running the tests in a Docker container." + docker_flags+=("--entrypoint=${TRAMPOLINE_BUILD_FILE}") + if [[ "${TRAMPOLINE_VERBOSE:-}" == "true" ]]; then + echo docker run "${docker_flags[@]}" "${TRAMPOLINE_IMAGE}" + fi + docker run "${docker_flags[@]}" "${TRAMPOLINE_IMAGE}" +fi + + +test_retval=$? + +if [[ ${test_retval} -eq 0 ]]; then + log_green "Build finished with ${test_retval}" +else + log_red "Build finished with ${test_retval}" +fi + +# Only upload it when the test passes. +if [[ "${update_cache}" == "true" ]] && \ + [[ $test_retval == 0 ]] && \ + [[ "${TRAMPOLINE_IMAGE_UPLOAD:-false}" == "true" ]]; then + log_yellow "Uploading the Docker image." + if docker push "${TRAMPOLINE_IMAGE}"; then + log_green "Finished uploading the Docker image." + else + log_red "Failed uploading the Docker image." + fi + # Call trampoline_after_upload_hook if it's defined. + if function_exists trampoline_after_upload_hook; then + trampoline_after_upload_hook + fi + +fi + +exit "${test_retval}" diff --git a/.trampolinerc b/.trampolinerc new file mode 100644 index 00000000..995ee291 --- /dev/null +++ b/.trampolinerc @@ -0,0 +1,51 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Template for .trampolinerc + +# Add required env vars here. +required_envvars+=( + "STAGING_BUCKET" + "V2_STAGING_BUCKET" +) + +# Add env vars which are passed down into the container here. +pass_down_envvars+=( + "STAGING_BUCKET" + "V2_STAGING_BUCKET" +) + +# Prevent unintentional override on the default image. +if [[ "${TRAMPOLINE_IMAGE_UPLOAD:-false}" == "true" ]] && \ + [[ -z "${TRAMPOLINE_IMAGE:-}" ]]; then + echo "Please set TRAMPOLINE_IMAGE if you want to upload the Docker image." + exit 1 +fi + +# Define the default value if it makes sense. +if [[ -z "${TRAMPOLINE_IMAGE_UPLOAD:-}" ]]; then + TRAMPOLINE_IMAGE_UPLOAD="" +fi + +if [[ -z "${TRAMPOLINE_IMAGE:-}" ]]; then + TRAMPOLINE_IMAGE="" +fi + +if [[ -z "${TRAMPOLINE_DOCKERFILE:-}" ]]; then + TRAMPOLINE_DOCKERFILE="" +fi + +if [[ -z "${TRAMPOLINE_BUILD_FILE:-}" ]]; then + TRAMPOLINE_BUILD_FILE="" +fi diff --git a/docs/conf.py b/docs/conf.py index aa411bea..1d281a4e 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -20,12 +20,16 @@ # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath("..")) +# For plugins that can not read conf.py. +# See also: https://github.com/docascode/sphinx-docfx-yaml/issues/85 +sys.path.insert(0, os.path.abspath(".")) + __version__ = "" # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. -needs_sphinx = "1.6.3" +needs_sphinx = "1.5.5" # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom @@ -90,7 +94,12 @@ # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. -exclude_patterns = ["_build"] +exclude_patterns = [ + "_build", + "samples/AUTHORING_GUIDE.md", + "samples/CONTRIBUTING.md", + "samples/snippets/README.rst", +] # The reST default role (used for this markup: `text`) to use for all # documents. diff --git a/noxfile.py b/noxfile.py index d7260c64..a6a01730 100644 --- a/noxfile.py +++ b/noxfile.py @@ -100,6 +100,10 @@ def system(session): """Run the system test suite.""" system_test_path = os.path.join("tests", "system.py") system_test_folder_path = os.path.join("tests", "system") + + # Check the value of `RUN_SYSTEM_TESTS` env var. It defaults to true. + if os.environ.get("RUN_SYSTEM_TESTS", "true") == "false": + session.skip("RUN_SYSTEM_TESTS is set to false, skipping") # Sanity check: Only run tests if the environment variable is set. if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""): session.skip("Credentials must be set via environment variable") @@ -158,3 +162,38 @@ def docs(session): os.path.join("docs", ""), os.path.join("docs", "_build", "html", ""), ) + + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def docfx(session): + """Build the docfx yaml files for this library.""" + + session.install("-e", ".") + # sphinx-docfx-yaml supports up to sphinx version 1.5.5. + # https://github.com/docascode/sphinx-docfx-yaml/issues/97 + session.install("sphinx==1.5.5", "alabaster", "recommonmark", "sphinx-docfx-yaml") + + shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) + session.run( + "sphinx-build", + "-T", # show full traceback on exception + "-N", # no colors + "-D", + ( + "extensions=sphinx.ext.autodoc," + "sphinx.ext.autosummary," + "docfx_yaml.extension," + "sphinx.ext.intersphinx," + "sphinx.ext.coverage," + "sphinx.ext.napoleon," + "sphinx.ext.todo," + "sphinx.ext.viewcode," + "recommonmark" + ), + "-b", + "html", + "-d", + os.path.join("docs", "_build", "doctrees", ""), + os.path.join("docs", ""), + os.path.join("docs", "_build", "html", ""), + ) diff --git a/samples/AUTHORING_GUIDE.md b/samples/AUTHORING_GUIDE.md new file mode 100644 index 00000000..55c97b32 --- /dev/null +++ b/samples/AUTHORING_GUIDE.md @@ -0,0 +1 @@ +See https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/AUTHORING_GUIDE.md \ No newline at end of file diff --git a/samples/CONTRIBUTING.md b/samples/CONTRIBUTING.md new file mode 100644 index 00000000..34c882b6 --- /dev/null +++ b/samples/CONTRIBUTING.md @@ -0,0 +1 @@ +See https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/CONTRIBUTING.md \ No newline at end of file diff --git a/samples/snippets/crop_hints/.gitignore b/samples/snippets/crop_hints/.gitignore new file mode 100644 index 00000000..69e00386 --- /dev/null +++ b/samples/snippets/crop_hints/.gitignore @@ -0,0 +1,2 @@ +output-crop.jpg +output-hint.jpg diff --git a/samples/snippets/crop_hints/README.rst b/samples/snippets/crop_hints/README.rst new file mode 100644 index 00000000..4ca8652f --- /dev/null +++ b/samples/snippets/crop_hints/README.rst @@ -0,0 +1,111 @@ +.. This file is automatically generated. Do not edit this file directly. + +Google Cloud Vision API Python Samples +=============================================================================== + +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=vision/cloud-client/crop_hints/README.rst + + +This directory contains samples for Google Cloud Vision API. `Google Cloud Vision API`_ allows developers to easily integrate vision detection features within applications, including image labeling, face and landmark detection, optical character recognition (OCR), and tagging of explicit content. + +- See the `migration guide`_ for information about migrating to Python client library v0.25.1. + +.. _migration guide: https://cloud.google.com/vision/docs/python-client-migration + + + + +.. _Google Cloud Vision API: https://cloud.google.com/vision/docs + +Setup +------------------------------------------------------------------------------- + + +Authentication +++++++++++++++ + +This sample requires you to have authentication setup. Refer to the +`Authentication Getting Started Guide`_ for instructions on setting up +credentials for applications. + +.. _Authentication Getting Started Guide: + https://cloud.google.com/docs/authentication/getting-started + +Install Dependencies +++++++++++++++++++++ + +#. Clone python-docs-samples and change directory to the sample directory you want to use. + + .. code-block:: bash + + $ git clone https://github.com/GoogleCloudPlatform/python-docs-samples.git + +#. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions. + + .. _Python Development Environment Setup Guide: + https://cloud.google.com/python/setup + +#. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. + + .. code-block:: bash + + $ virtualenv env + $ source env/bin/activate + +#. Install the dependencies needed to run the samples. + + .. code-block:: bash + + $ pip install -r requirements.txt + +.. _pip: https://pip.pypa.io/ +.. _virtualenv: https://virtualenv.pypa.io/ + +Samples +------------------------------------------------------------------------------- + +Crop Hints Tutorial ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=vision/cloud-client/crop_hints/crop_hints.py,vision/cloud-client/crop_hints/README.rst + + + + +To run this sample: + +.. code-block:: bash + + $ python crop_hints.py + + usage: crop_hints.py [-h] image_file mode + + positional arguments: + image_file The image you'd like to crop. + mode Set to "crop" or "draw". + + optional arguments: + -h, --help show this help message and exit + + + + + +The client library +------------------------------------------------------------------------------- + +This sample uses the `Google Cloud Client Library for Python`_. +You can read the documentation for more details on API usage and use GitHub +to `browse the source`_ and `report issues`_. + +.. _Google Cloud Client Library for Python: + https://googlecloudplatform.github.io/google-cloud-python/ +.. _browse the source: + https://github.com/GoogleCloudPlatform/google-cloud-python +.. _report issues: + https://github.com/GoogleCloudPlatform/google-cloud-python/issues + + +.. _Google Cloud SDK: https://cloud.google.com/sdk/ \ No newline at end of file diff --git a/samples/snippets/crop_hints/README.rst.in b/samples/snippets/crop_hints/README.rst.in new file mode 100644 index 00000000..113d2771 --- /dev/null +++ b/samples/snippets/crop_hints/README.rst.in @@ -0,0 +1,30 @@ +# This file is used to generate README.rst + +product: + name: Google Cloud Vision API + short_name: Cloud Vision API + url: https://cloud.google.com/vision/docs + description: > + `Google Cloud Vision API`_ allows developers to easily integrate vision + detection features within applications, including image labeling, face and + landmark detection, optical character recognition (OCR), and tagging of + explicit content. + + + - See the `migration guide`_ for information about migrating to Python client library v0.25.1. + + + .. _migration guide: https://cloud.google.com/vision/docs/python-client-migration + +setup: +- auth +- install_deps + +samples: +- name: Crop Hints Tutorial + file: crop_hints.py + show_help: True + +cloud_client_library: true + +folder: vision/cloud-client/crop_hints \ No newline at end of file diff --git a/samples/snippets/crop_hints/crop_hints.py b/samples/snippets/crop_hints/crop_hints.py new file mode 100644 index 00000000..611eb564 --- /dev/null +++ b/samples/snippets/crop_hints/crop_hints.py @@ -0,0 +1,99 @@ +#!/usr/bin/env python + +# Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Outputs a cropped image or an image highlighting crop regions on an image. + +Examples: + python crop_hints.py resources/cropme.jpg draw + python crop_hints.py resources/cropme.jpg crop +""" +# [START vision_crop_hints_tutorial] +# [START vision_crop_hints_tutorial_imports] +import argparse +import io + +from google.cloud import vision +from google.cloud.vision import types +from PIL import Image, ImageDraw +# [END vision_crop_hints_tutorial_imports] + + +def get_crop_hint(path): + # [START vision_crop_hints_tutorial_get_crop_hints] + """Detect crop hints on a single image and return the first result.""" + client = vision.ImageAnnotatorClient() + + with io.open(path, 'rb') as image_file: + content = image_file.read() + + image = types.Image(content=content) + + crop_hints_params = types.CropHintsParams(aspect_ratios=[1.77]) + image_context = types.ImageContext(crop_hints_params=crop_hints_params) + + response = client.crop_hints(image=image, image_context=image_context) + hints = response.crop_hints_annotation.crop_hints + + # Get bounds for the first crop hint using an aspect ratio of 1.77. + vertices = hints[0].bounding_poly.vertices + # [END vision_crop_hints_tutorial_get_crop_hints] + + return vertices + + +def draw_hint(image_file): + """Draw a border around the image using the hints in the vector list.""" + # [START vision_crop_hints_tutorial_draw_crop_hints] + vects = get_crop_hint(image_file) + + im = Image.open(image_file) + draw = ImageDraw.Draw(im) + draw.polygon([ + vects[0].x, vects[0].y, + vects[1].x, vects[1].y, + vects[2].x, vects[2].y, + vects[3].x, vects[3].y], None, 'red') + im.save('output-hint.jpg', 'JPEG') + print('Saved new image to output-hint.jpg') + # [END vision_crop_hints_tutorial_draw_crop_hints] + + +def crop_to_hint(image_file): + """Crop the image using the hints in the vector list.""" + # [START vision_crop_hints_tutorial_crop_to_hints] + vects = get_crop_hint(image_file) + + im = Image.open(image_file) + im2 = im.crop([vects[0].x, vects[0].y, + vects[2].x - 1, vects[2].y - 1]) + im2.save('output-crop.jpg', 'JPEG') + print('Saved new image to output-crop.jpg') + # [END vision_crop_hints_tutorial_crop_to_hints] + + +if __name__ == '__main__': + # [START vision_crop_hints_tutorial_run_application] + parser = argparse.ArgumentParser() + parser.add_argument('image_file', help='The image you\'d like to crop.') + parser.add_argument('mode', help='Set to "crop" or "draw".') + args = parser.parse_args() + + if args.mode == 'crop': + crop_to_hint(args.image_file) + elif args.mode == 'draw': + draw_hint(args.image_file) + # [END vision_crop_hints_tutorial_run_application] +# [END vision_crop_hints_tutorial] diff --git a/samples/snippets/crop_hints/crop_hints_test.py b/samples/snippets/crop_hints/crop_hints_test.py new file mode 100644 index 00000000..2ba900f4 --- /dev/null +++ b/samples/snippets/crop_hints/crop_hints_test.py @@ -0,0 +1,37 @@ +# Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import crop_hints + + +def test_crop(capsys): + """Checks the output image for cropping the image is created.""" + file_name = os.path.join( + os.path.dirname(__file__), + 'resources/cropme.jpg') + crop_hints.crop_to_hint(file_name) + out, _ = capsys.readouterr() + assert os.path.isfile('output-crop.jpg') + + +def test_draw(capsys): + """Checks the output image for drawing the crop hint is created.""" + file_name = os.path.join( + os.path.dirname(__file__), + 'resources/cropme.jpg') + crop_hints.draw_hint(file_name) + out, _ = capsys.readouterr() + assert os.path.isfile('output-hint.jpg') diff --git a/samples/snippets/crop_hints/noxfile.py b/samples/snippets/crop_hints/noxfile.py new file mode 100644 index 00000000..ba55d7ce --- /dev/null +++ b/samples/snippets/crop_hints/noxfile.py @@ -0,0 +1,224 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import os +from pathlib import Path +import sys + +import nox + + +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING +# DO NOT EDIT THIS FILE EVER! +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING + +# Copy `noxfile_config.py` to your directory and modify it instead. + + +# `TEST_CONFIG` dict is a configuration hook that allows users to +# modify the test configurations. The values here should be in sync +# with `noxfile_config.py`. Users will copy `noxfile_config.py` into +# their directory and modify it. + +TEST_CONFIG = { + # You can opt out from the test for specific Python versions. + 'ignored_versions': ["2.7"], + + # An envvar key for determining the project id to use. Change it + # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a + # build specific Cloud project. You can also use your own string + # to use your own Cloud project. + 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', + # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', + + # A dictionary you want to inject into your test. Don't put any + # secrets here. These values will override predefined values. + 'envs': {}, +} + + +try: + # Ensure we can import noxfile_config in the project's directory. + sys.path.append('.') + from noxfile_config import TEST_CONFIG_OVERRIDE +except ImportError as e: + print("No user noxfile_config found: detail: {}".format(e)) + TEST_CONFIG_OVERRIDE = {} + +# Update the TEST_CONFIG with the user supplied values. +TEST_CONFIG.update(TEST_CONFIG_OVERRIDE) + + +def get_pytest_env_vars(): + """Returns a dict for pytest invocation.""" + ret = {} + + # Override the GCLOUD_PROJECT and the alias. + env_key = TEST_CONFIG['gcloud_project_env'] + # This should error out if not set. + ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] + + # Apply user supplied envs. + ret.update(TEST_CONFIG['envs']) + return ret + + +# DO NOT EDIT - automatically generated. +# All versions used to tested samples. +ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] + +# Any default versions that should be ignored. +IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] + +TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) + +INSTALL_LIBRARY_FROM_SOURCE = bool(os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False)) +# +# Style Checks +# + + +def _determine_local_import_names(start_dir): + """Determines all import names that should be considered "local". + + This is used when running the linter to insure that import order is + properly checked. + """ + file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)] + return [ + basename + for basename, extension in file_ext_pairs + if extension == ".py" + or os.path.isdir(os.path.join(start_dir, basename)) + and basename not in ("__pycache__") + ] + + +# Linting with flake8. +# +# We ignore the following rules: +# E203: whitespace before ‘:’ +# E266: too many leading ‘#’ for block comment +# E501: line too long +# I202: Additional newline in a section of imports +# +# We also need to specify the rules which are ignored by default: +# ['E226', 'W504', 'E126', 'E123', 'W503', 'E24', 'E704', 'E121'] +FLAKE8_COMMON_ARGS = [ + "--show-source", + "--builtin=gettext", + "--max-complexity=20", + "--import-order-style=google", + "--exclude=.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py", + "--ignore=E121,E123,E126,E203,E226,E24,E266,E501,E704,W503,W504,I202", + "--max-line-length=88", +] + + +@nox.session +def lint(session): + session.install("flake8", "flake8-import-order") + + local_names = _determine_local_import_names(".") + args = FLAKE8_COMMON_ARGS + [ + "--application-import-names", + ",".join(local_names), + "." + ] + session.run("flake8", *args) + + +# +# Sample Tests +# + + +PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"] + + +def _session_tests(session, post_install=None): + """Runs py.test for a particular project.""" + if os.path.exists("requirements.txt"): + session.install("-r", "requirements.txt") + + if os.path.exists("requirements-test.txt"): + session.install("-r", "requirements-test.txt") + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars() + ) + + +@nox.session(python=ALL_VERSIONS) +def py(session): + """Runs py.test for a sample using the specified version of Python.""" + if session.python in TESTED_VERSIONS: + _session_tests(session) + else: + session.skip("SKIPPED: {} tests are disabled for this sample.".format( + session.python + )) + + +# +# Readmegen +# + + +def _get_repo_root(): + """ Returns the root folder of the project. """ + # Get root of this repository. Assume we don't have directories nested deeper than 10 items. + p = Path(os.getcwd()) + for i in range(10): + if p is None: + break + if Path(p / ".git").exists(): + return str(p) + p = p.parent + raise Exception("Unable to detect repository root.") + + +GENERATED_READMES = sorted([x for x in Path(".").rglob("*.rst.in")]) + + +@nox.session +@nox.parametrize("path", GENERATED_READMES) +def readmegen(session, path): + """(Re-)generates the readme for a sample.""" + session.install("jinja2", "pyyaml") + dir_ = os.path.dirname(path) + + if os.path.exists(os.path.join(dir_, "requirements.txt")): + session.install("-r", os.path.join(dir_, "requirements.txt")) + + in_file = os.path.join(dir_, "README.rst.in") + session.run( + "python", _get_repo_root() + "/scripts/readme-gen/readme_gen.py", in_file + ) diff --git a/samples/snippets/crop_hints/requirements-test.txt b/samples/snippets/crop_hints/requirements-test.txt new file mode 100644 index 00000000..7e460c8c --- /dev/null +++ b/samples/snippets/crop_hints/requirements-test.txt @@ -0,0 +1 @@ +pytest==6.0.1 diff --git a/samples/snippets/crop_hints/requirements.txt b/samples/snippets/crop_hints/requirements.txt new file mode 100644 index 00000000..79ed641a --- /dev/null +++ b/samples/snippets/crop_hints/requirements.txt @@ -0,0 +1,2 @@ +google-cloud-vision==1.0.0 +pillow==7.2.0 diff --git a/samples/snippets/crop_hints/resources/cropme.jpg b/samples/snippets/crop_hints/resources/cropme.jpg new file mode 100644 index 00000000..50145895 Binary files /dev/null and b/samples/snippets/crop_hints/resources/cropme.jpg differ diff --git a/samples/snippets/detect/README.rst b/samples/snippets/detect/README.rst new file mode 100644 index 00000000..a06e286b --- /dev/null +++ b/samples/snippets/detect/README.rst @@ -0,0 +1,261 @@ + +.. This file is automatically generated. Do not edit this file directly. + +Google Cloud Vision API Python Samples +=============================================================================== + +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=vision/cloud-client/detect/README.rst + + +This directory contains samples for Google Cloud Vision API. `Google Cloud Vision API`_ allows developers to easily integrate vision detection features within applications, including image labeling, face and landmark detection, optical character recognition (OCR), and tagging of explicit content. + +- See the `migration guide`_ for information about migrating to Python client library v0.25.1. + +.. _migration guide: https://cloud.google.com/vision/docs/python-client-migration + + + + +.. _Google Cloud Vision API: https://cloud.google.com/vision/docs + + +Setup +------------------------------------------------------------------------------- + + + +Authentication +++++++++++++++ + +This sample requires you to have authentication setup. Refer to the +`Authentication Getting Started Guide`_ for instructions on setting up +credentials for applications. + +.. _Authentication Getting Started Guide: + https://cloud.google.com/docs/authentication/getting-started + + + + +Install Dependencies +++++++++++++++++++++ + +#. Clone python-docs-samples and change directory to the sample directory you want to use. + + .. code-block:: bash + + $ git clone https://github.com/GoogleCloudPlatform/python-docs-samples.git + +#. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions. + + .. _Python Development Environment Setup Guide: + https://cloud.google.com/python/setup + +#. Create a virtualenv. Samples are compatible with Python 3.6+. + + .. code-block:: bash + + $ virtualenv env + $ source env/bin/activate + +#. Install the dependencies needed to run the samples. + + .. code-block:: bash + + $ pip install -r requirements.txt + +.. _pip: https://pip.pypa.io/ +.. _virtualenv: https://virtualenv.pypa.io/ + + + + + + +Samples +------------------------------------------------------------------------------- + + +Detect ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=vision/cloud-client/detect/detect.py,vision/cloud-client/detect/README.rst + + + + +To run this sample: + +.. code-block:: bash + + $ python detect.py + + + usage: detect.py [-h] + {faces,faces-uri,labels,labels-uri,landmarks,landmarks-uri,text,text-uri,logos,logos-uri,safe-search,safe-search-uri,properties,properties-uri,web,web-uri,web-geo,web-geo-uri,crophints,crophints-uri,document,document-uri,ocr-uri,object-localization,object-localization-uri} + ... + + This application demonstrates how to perform basic operations with the + Google Cloud Vision API. + + Example Usage: + python detect.py text ./resources/wakeupcat.jpg + python detect.py labels ./resources/landmark.jpg + python detect.py web ./resources/landmark.jpg + python detect.py web-uri http://wheresgus.com/dog.JPG + python detect.py web-geo ./resources/city.jpg + python detect.py faces-uri gs://your-bucket/file.jpg + python detect.py ocr-uri gs://python-docs-samples-tests/HodgeConj.pdf gs://BUCKET_NAME/PREFIX/ + python detect.py object-localization ./resources/puppies.jpg + python detect.py object-localization-uri gs://... + + For more information, the documentation at + https://cloud.google.com/vision/docs. + + positional arguments: + {faces,faces-uri,labels,labels-uri,landmarks,landmarks-uri,text,text-uri,logos,logos-uri,safe-search,safe-search-uri,properties,properties-uri,web,web-uri,web-geo,web-geo-uri,crophints,crophints-uri,document,document-uri,ocr-uri,object-localization,object-localization-uri} + faces Detects faces in an image. + faces-uri Detects faces in the file located in Google Cloud + Storage or the web. + labels Detects labels in the file. + labels-uri Detects labels in the file located in Google Cloud + Storage or on the Web. + landmarks Detects landmarks in the file. + landmarks-uri Detects landmarks in the file located in Google Cloud + Storage or on the Web. + text Detects text in the file. + text-uri Detects text in the file located in Google Cloud + Storage or on the Web. + logos Detects logos in the file. + logos-uri Detects logos in the file located in Google Cloud + Storage or on the Web. + safe-search Detects unsafe features in the file. + safe-search-uri Detects unsafe features in the file located in Google + Cloud Storage or on the Web. + properties Detects image properties in the file. + properties-uri Detects image properties in the file located in Google + Cloud Storage or on the Web. + web Detects web annotations given an image. + web-uri Detects web annotations in the file located in Google + Cloud Storage. + web-geo Detects web annotations given an image, using the + geotag metadata in the image to detect web entities. + web-geo-uri Detects web annotations given an image in the file + located in Google Cloud Storage., using the geotag + metadata in the image to detect web entities. + crophints Detects crop hints in an image. + crophints-uri Detects crop hints in the file located in Google Cloud + Storage. + document Detects document features in an image. + document-uri Detects document features in the file located in + Google Cloud Storage. + ocr-uri OCR with PDF/TIFF as source files on GCS + object-localization + OCR with PDF/TIFF as source files on GCS + object-localization-uri + OCR with PDF/TIFF as source files on GCS + + optional arguments: + -h, --help show this help message and exit + + + + + +Beta Detect ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=vision/cloud-client/detect/beta_snippets.py,vision/cloud-client/detect/README.rst + + + + +To run this sample: + +.. code-block:: bash + + $ python beta_snippets.py + + + usage: beta_snippets.py [-h] + {object-localization,object-localization-uri,handwritten-ocr,handwritten-ocr-uri,batch-annotate-files,batch-annotate-files-uri,batch-annotate-images-uri} + ... + + Google Cloud Vision API Python Beta Snippets + + Example Usage: + python beta_snippets.py -h + python beta_snippets.py object-localization INPUT_IMAGE + python beta_snippets.py object-localization-uri gs://... + python beta_snippets.py handwritten-ocr INPUT_IMAGE + python beta_snippets.py handwritten-ocr-uri gs://... + python beta_snippets.py batch-annotate-files INPUT_PDF + python beta_snippets.py batch-annotate-files-uri gs://... + python beta_snippets.py batch-annotate-images-uri gs://... gs://... + + For more information, the documentation at + https://cloud.google.com/vision/docs. + + positional arguments: + {object-localization,object-localization-uri,handwritten-ocr,handwritten-ocr-uri,batch-annotate-files,batch-annotate-files-uri,batch-annotate-images-uri} + object-localization + Localize objects in the local image. Args: path: The + path to the local file. + object-localization-uri + Localize objects in the image on Google Cloud Storage + Args: uri: The path to the file in Google Cloud + Storage (gs://...) + handwritten-ocr Detects handwritten characters in a local image. Args: + path: The path to the local file. + handwritten-ocr-uri + Detects handwritten characters in the file located in + Google Cloud Storage. Args: uri: The path to the file + in Google Cloud Storage (gs://...) + batch-annotate-files + Detects document features in a PDF/TIFF/GIF file. + While your PDF file may have several pages, this API + can process up to 5 pages only. Args: path: The path + to the local file. + batch-annotate-files-uri + Detects document features in a PDF/TIFF/GIF file. + While your PDF file may have several pages, this API + can process up to 5 pages only. Args: uri: The path to + the file in Google Cloud Storage (gs://...) + batch-annotate-images-uri + Batch annotation of images on Google Cloud Storage + asynchronously. Args: input_image_uri: The path to the + image in Google Cloud Storage (gs://...) output_uri: + The path to the output path in Google Cloud Storage + (gs://...) + + optional arguments: + -h, --help show this help message and exit + + + + + + + + + +The client library +------------------------------------------------------------------------------- + +This sample uses the `Google Cloud Client Library for Python`_. +You can read the documentation for more details on API usage and use GitHub +to `browse the source`_ and `report issues`_. + +.. _Google Cloud Client Library for Python: + https://googlecloudplatform.github.io/google-cloud-python/ +.. _browse the source: + https://github.com/GoogleCloudPlatform/google-cloud-python +.. _report issues: + https://github.com/GoogleCloudPlatform/google-cloud-python/issues + + + +.. _Google Cloud SDK: https://cloud.google.com/sdk/ diff --git a/samples/snippets/detect/README.rst.in b/samples/snippets/detect/README.rst.in new file mode 100644 index 00000000..0d105411 --- /dev/null +++ b/samples/snippets/detect/README.rst.in @@ -0,0 +1,33 @@ +# This file is used to generate README.rst + +product: + name: Google Cloud Vision API + short_name: Cloud Vision API + url: https://cloud.google.com/vision/docs + description: > + `Google Cloud Vision API`_ allows developers to easily integrate vision + detection features within applications, including image labeling, face and + landmark detection, optical character recognition (OCR), and tagging of + explicit content. + + + - See the `migration guide`_ for information about migrating to Python client library v0.25.1. + + + .. _migration guide: https://cloud.google.com/vision/docs/python-client-migration + +setup: +- auth +- install_deps + +samples: +- name: Detect + file: detect.py + show_help: True +- name: Beta Detect + file: beta_snippets.py + show_help: True + +cloud_client_library: true + +folder: vision/cloud-client/detect \ No newline at end of file diff --git a/samples/snippets/detect/beta_snippets.py b/samples/snippets/detect/beta_snippets.py new file mode 100644 index 00000000..d5beb402 --- /dev/null +++ b/samples/snippets/detect/beta_snippets.py @@ -0,0 +1,413 @@ +#!/usr/bin/env python + +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Google Cloud Vision API Python Beta Snippets + +Example Usage: +python beta_snippets.py -h +python beta_snippets.py object-localization INPUT_IMAGE +python beta_snippets.py object-localization-uri gs://... +python beta_snippets.py handwritten-ocr INPUT_IMAGE +python beta_snippets.py handwritten-ocr-uri gs://... +python beta_snippets.py batch-annotate-files INPUT_PDF +python beta_snippets.py batch-annotate-files-uri gs://... +python beta_snippets.py batch-annotate-images-uri gs://... gs://... + + +For more information, the documentation at +https://cloud.google.com/vision/docs. +""" + +import argparse +import io + + +# [START vision_localize_objects_beta] +def localize_objects(path): + """Localize objects in the local image. + + Args: + path: The path to the local file. + """ + from google.cloud import vision_v1p3beta1 as vision + client = vision.ImageAnnotatorClient() + + with open(path, 'rb') as image_file: + content = image_file.read() + image = vision.types.Image(content=content) + + objects = client.object_localization( + image=image).localized_object_annotations + + print('Number of objects found: {}'.format(len(objects))) + for object_ in objects: + print('\n{} (confidence: {})'.format(object_.name, object_.score)) + print('Normalized bounding polygon vertices: ') + for vertex in object_.bounding_poly.normalized_vertices: + print(' - ({}, {})'.format(vertex.x, vertex.y)) +# [END vision_localize_objects_beta] + + +# [START vision_localize_objects_gcs_beta] +def localize_objects_uri(uri): + """Localize objects in the image on Google Cloud Storage + + Args: + uri: The path to the file in Google Cloud Storage (gs://...) + """ + from google.cloud import vision_v1p3beta1 as vision + client = vision.ImageAnnotatorClient() + + image = vision.types.Image() + image.source.image_uri = uri + + objects = client.object_localization( + image=image).localized_object_annotations + + print('Number of objects found: {}'.format(len(objects))) + for object_ in objects: + print('\n{} (confidence: {})'.format(object_.name, object_.score)) + print('Normalized bounding polygon vertices: ') + for vertex in object_.bounding_poly.normalized_vertices: + print(' - ({}, {})'.format(vertex.x, vertex.y)) +# [END vision_localize_objects_gcs_beta] + + +# [START vision_handwritten_ocr_beta] +def detect_handwritten_ocr(path): + """Detects handwritten characters in a local image. + + Args: + path: The path to the local file. + """ + from google.cloud import vision_v1p3beta1 as vision + client = vision.ImageAnnotatorClient() + + with io.open(path, 'rb') as image_file: + content = image_file.read() + + image = vision.types.Image(content=content) + + # Language hint codes for handwritten OCR: + # en-t-i0-handwrit, mul-Latn-t-i0-handwrit + # Note: Use only one language hint code per request for handwritten OCR. + image_context = vision.types.ImageContext( + language_hints=['en-t-i0-handwrit']) + + response = client.document_text_detection(image=image, + image_context=image_context) + + print('Full Text: {}'.format(response.full_text_annotation.text)) + for page in response.full_text_annotation.pages: + for block in page.blocks: + print('\nBlock confidence: {}\n'.format(block.confidence)) + + for paragraph in block.paragraphs: + print('Paragraph confidence: {}'.format( + paragraph.confidence)) + + for word in paragraph.words: + word_text = ''.join([ + symbol.text for symbol in word.symbols + ]) + print('Word text: {} (confidence: {})'.format( + word_text, word.confidence)) + + for symbol in word.symbols: + print('\tSymbol: {} (confidence: {})'.format( + symbol.text, symbol.confidence)) + + if response.error.message: + raise Exception( + '{}\nFor more info on error messages, check: ' + 'https://cloud.google.com/apis/design/errors'.format( + response.error.message)) +# [END vision_handwritten_ocr_beta] + + +# [START vision_handwritten_ocr_gcs_beta] +def detect_handwritten_ocr_uri(uri): + """Detects handwritten characters in the file located in Google Cloud + Storage. + + Args: + uri: The path to the file in Google Cloud Storage (gs://...) + """ + from google.cloud import vision_v1p3beta1 as vision + client = vision.ImageAnnotatorClient() + image = vision.types.Image() + image.source.image_uri = uri + + # Language hint codes for handwritten OCR: + # en-t-i0-handwrit, mul-Latn-t-i0-handwrit + # Note: Use only one language hint code per request for handwritten OCR. + image_context = vision.types.ImageContext( + language_hints=['en-t-i0-handwrit']) + + response = client.document_text_detection(image=image, + image_context=image_context) + + print('Full Text: {}'.format(response.full_text_annotation.text)) + for page in response.full_text_annotation.pages: + for block in page.blocks: + print('\nBlock confidence: {}\n'.format(block.confidence)) + + for paragraph in block.paragraphs: + print('Paragraph confidence: {}'.format( + paragraph.confidence)) + + for word in paragraph.words: + word_text = ''.join([ + symbol.text for symbol in word.symbols + ]) + print('Word text: {} (confidence: {})'.format( + word_text, word.confidence)) + + for symbol in word.symbols: + print('\tSymbol: {} (confidence: {})'.format( + symbol.text, symbol.confidence)) + + if response.error.message: + raise Exception( + '{}\nFor more info on error messages, check: ' + 'https://cloud.google.com/apis/design/errors'.format( + response.error.message)) +# [END vision_handwritten_ocr_gcs_beta] + + +# [START vision_batch_annotate_files_beta] +def detect_batch_annotate_files(path): + """Detects document features in a PDF/TIFF/GIF file. + + While your PDF file may have several pages, + this API can process up to 5 pages only. + + Args: + path: The path to the local file. + """ + from google.cloud import vision_v1p4beta1 as vision + client = vision.ImageAnnotatorClient() + + with open(path, 'rb') as pdf_file: + content = pdf_file.read() + + # Other supported mime_types: image/tiff' or 'image/gif' + mime_type = 'application/pdf' + input_config = vision.types.InputConfig( + content=content, mime_type=mime_type) + + feature = vision.types.Feature( + type=vision.enums.Feature.Type.DOCUMENT_TEXT_DETECTION) + # Annotate the first two pages and the last one (max 5 pages) + # First page starts at 1, and not 0. Last page is -1. + pages = [1, 2, -1] + + request = vision.types.AnnotateFileRequest( + input_config=input_config, + features=[feature], + pages=pages) + + response = client.batch_annotate_files(requests=[request]) + + for image_response in response.responses[0].responses: + for page in image_response.full_text_annotation.pages: + for block in page.blocks: + print(u'\nBlock confidence: {}\n'.format(block.confidence)) + for par in block.paragraphs: + print(u'\tParagraph confidence: {}'.format(par.confidence)) + for word in par.words: + symbol_texts = [symbol.text for symbol in word.symbols] + word_text = ''.join(symbol_texts) + print(u'\t\tWord text: {} (confidence: {})'.format( + word_text, word.confidence)) + for symbol in word.symbols: + print(u'\t\t\tSymbol: {} (confidence: {})'.format( + symbol.text, symbol.confidence)) +# [END vision_batch_annotate_files_beta] + + +# [START vision_batch_annotate_files_gcs_beta] +def detect_batch_annotate_files_uri(gcs_uri): + """Detects document features in a PDF/TIFF/GIF file. + + While your PDF file may have several pages, + this API can process up to 5 pages only. + + Args: + uri: The path to the file in Google Cloud Storage (gs://...) + """ + from google.cloud import vision_v1p4beta1 as vision + client = vision.ImageAnnotatorClient() + + # Other supported mime_types: image/tiff' or 'image/gif' + mime_type = 'application/pdf' + input_config = vision.types.InputConfig( + gcs_source=vision.types.GcsSource(uri=gcs_uri), mime_type=mime_type) + + feature = vision.types.Feature( + type=vision.enums.Feature.Type.DOCUMENT_TEXT_DETECTION) + # Annotate the first two pages and the last one (max 5 pages) + # First page starts at 1, and not 0. Last page is -1. + pages = [1, 2, -1] + + request = vision.types.AnnotateFileRequest( + input_config=input_config, + features=[feature], + pages=pages) + + response = client.batch_annotate_files(requests=[request]) + + for image_response in response.responses[0].responses: + for page in image_response.full_text_annotation.pages: + for block in page.blocks: + print(u'\nBlock confidence: {}\n'.format(block.confidence)) + for par in block.paragraphs: + print(u'\tParagraph confidence: {}'.format(par.confidence)) + for word in par.words: + symbol_texts = [symbol.text for symbol in word.symbols] + word_text = ''.join(symbol_texts) + print(u'\t\tWord text: {} (confidence: {})'.format( + word_text, word.confidence)) + for symbol in word.symbols: + print(u'\t\t\tSymbol: {} (confidence: {})'.format( + symbol.text, symbol.confidence)) +# [END vision_batch_annotate_files_gcs_beta] + + +# [START vision_async_batch_annotate_images_beta] +def async_batch_annotate_images_uri(input_image_uri, output_uri): + """Batch annotation of images on Google Cloud Storage asynchronously. + + Args: + input_image_uri: The path to the image in Google Cloud Storage (gs://...) + output_uri: The path to the output path in Google Cloud Storage (gs://...) + """ + import re + + from google.cloud import storage + from google.protobuf import json_format + from google.cloud import vision_v1p4beta1 as vision + client = vision.ImageAnnotatorClient() + + # Construct the request for the image(s) to be annotated: + image_source = vision.types.ImageSource(image_uri=input_image_uri) + image = vision.types.Image(source=image_source) + features = [ + vision.types.Feature(type=vision.enums.Feature.Type.LABEL_DETECTION), + vision.types.Feature(type=vision.enums.Feature.Type.TEXT_DETECTION), + vision.types.Feature(type=vision.enums.Feature.Type.IMAGE_PROPERTIES), + ] + requests = [ + vision.types.AnnotateImageRequest(image=image, features=features), + ] + + gcs_destination = vision.types.GcsDestination(uri=output_uri) + output_config = vision.types.OutputConfig( + gcs_destination=gcs_destination, batch_size=2) + + operation = client.async_batch_annotate_images( + requests=requests, output_config=output_config) + + print('Waiting for the operation to finish.') + operation.result(timeout=10000) + + # Once the request has completed and the output has been + # written to Google Cloud Storage, we can list all the output files. + storage_client = storage.Client() + + match = re.match(r'gs://([^/]+)/(.+)', output_uri) + bucket_name = match.group(1) + prefix = match.group(2) + + bucket = storage_client.get_bucket(bucket_name) + + # Lists objects with the given prefix. + blob_list = list(bucket.list_blobs(prefix=prefix)) + print('Output files:') + for blob in blob_list: + print(blob.name) + + # Processes the first output file from Google Cloud Storage. + # Since we specified batch_size=2, the first response contains + # annotations for the first two annotate image requests. + output = blob_list[0] + + json_string = output.download_as_string() + response = json_format.Parse(json_string, + vision.types.BatchAnnotateImagesResponse()) + + # Prints the actual response for the first annotate image request. + print(u'The annotation response for the first request: {}'.format( + response.responses[0])) +# [END vision_async_batch_annotate_images_beta] + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter) + subparsers = parser.add_subparsers(dest='command') + + object_parser = subparsers.add_parser( + 'object-localization', help=localize_objects.__doc__) + object_parser.add_argument('path') + + object_uri_parser = subparsers.add_parser( + 'object-localization-uri', help=localize_objects_uri.__doc__) + object_uri_parser.add_argument('uri') + + handwritten_parser = subparsers.add_parser( + 'handwritten-ocr', help=detect_handwritten_ocr.__doc__) + handwritten_parser.add_argument('path') + + handwritten_uri_parser = subparsers.add_parser( + 'handwritten-ocr-uri', help=detect_handwritten_ocr_uri.__doc__) + handwritten_uri_parser.add_argument('uri') + + batch_annotate_parser = subparsers.add_parser( + 'batch-annotate-files', help=detect_batch_annotate_files.__doc__) + batch_annotate_parser.add_argument('path') + + batch_annotate_uri_parser = subparsers.add_parser( + 'batch-annotate-files-uri', + help=detect_batch_annotate_files_uri.__doc__) + batch_annotate_uri_parser.add_argument('uri') + + batch_annotate__image_uri_parser = subparsers.add_parser( + 'batch-annotate-images-uri', + help=async_batch_annotate_images_uri.__doc__) + batch_annotate__image_uri_parser.add_argument('uri') + batch_annotate__image_uri_parser.add_argument('output') + + args = parser.parse_args() + + if 'uri' in args.command: + if 'object-localization-uri' in args.command: + localize_objects_uri(args.uri) + elif 'handwritten-ocr-uri' in args.command: + detect_handwritten_ocr_uri(args.uri) + elif 'batch-annotate-files-uri' in args.command: + detect_batch_annotate_files_uri(args.uri) + elif 'batch-annotate-images-uri' in args.command: + async_batch_annotate_images_uri(args.uri, args.output) + else: + if 'object-localization' in args.command: + localize_objects(args.path) + elif 'handwritten-ocr' in args.command: + detect_handwritten_ocr(args.path) + elif 'batch-annotate-files' in args.command: + detect_batch_annotate_files(args.path) diff --git a/samples/snippets/detect/beta_snippets_test.py b/samples/snippets/detect/beta_snippets_test.py new file mode 100644 index 00000000..4a5a4663 --- /dev/null +++ b/samples/snippets/detect/beta_snippets_test.py @@ -0,0 +1,91 @@ +# Copyright 2018 Google LLC All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import uuid + +import beta_snippets + +RESOURCES = os.path.join(os.path.dirname(__file__), 'resources') +GCS_ROOT = 'gs://cloud-samples-data/vision/' + +BUCKET = os.environ['CLOUD_STORAGE_BUCKET'] +OUTPUT_PREFIX = 'TEST_OUTPUT_{}'.format(uuid.uuid4()) +GCS_DESTINATION_URI = 'gs://{}/{}/'.format(BUCKET, OUTPUT_PREFIX) + + +def test_localize_objects(capsys): + path = os.path.join(RESOURCES, 'puppies.jpg') + + beta_snippets.localize_objects(path) + + out, _ = capsys.readouterr() + assert 'Dog' in out + + +def test_localize_objects_uri(capsys): + uri = GCS_ROOT + 'puppies.jpg' + + beta_snippets.localize_objects_uri(uri) + + out, _ = capsys.readouterr() + assert 'Dog' in out + + +def test_handwritten_ocr(capsys): + path = os.path.join(RESOURCES, 'handwritten.jpg') + + beta_snippets.detect_handwritten_ocr(path) + + out, _ = capsys.readouterr() + assert 'Cloud Vision API' in out + + +def test_handwritten_ocr_uri(capsys): + uri = GCS_ROOT + 'handwritten.jpg' + + beta_snippets.detect_handwritten_ocr_uri(uri) + + out, _ = capsys.readouterr() + assert 'Cloud Vision API' in out + + +def test_detect_batch_annotate_files(capsys): + file_name = os.path.join(RESOURCES, 'kafka.pdf') + beta_snippets.detect_batch_annotate_files(file_name) + out, _ = capsys.readouterr() + assert 'Symbol: a' in out + assert 'Word text: evenings' in out + + +def test_detect_batch_annotate_files_uri(capsys): + gcs_uri = GCS_ROOT + 'document_understanding/kafka.pdf' + beta_snippets.detect_batch_annotate_files_uri(gcs_uri) + out, _ = capsys.readouterr() + assert 'Symbol' in out + assert 'Word text' in out + + +def test_async_batch_annotate_images(capsys): + gcs_uri = GCS_ROOT + 'landmark/eiffel_tower.jpg' + beta_snippets.async_batch_annotate_images_uri(gcs_uri, GCS_DESTINATION_URI) + out, _ = capsys.readouterr() + assert 'description: "Tower"' in out + + from google.cloud import storage + storage_client = storage.Client() + bucket = storage_client.get_bucket(BUCKET) + if len(list(bucket.list_blobs(prefix=OUTPUT_PREFIX))) > 0: + for blob in bucket.list_blobs(prefix=OUTPUT_PREFIX): + blob.delete() diff --git a/samples/snippets/detect/detect.py b/samples/snippets/detect/detect.py new file mode 100644 index 00000000..c2731bba --- /dev/null +++ b/samples/snippets/detect/detect.py @@ -0,0 +1,1103 @@ +#!/usr/bin/env python + +# Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This application demonstrates how to perform basic operations with the +Google Cloud Vision API. + +Example Usage: +python detect.py text ./resources/wakeupcat.jpg +python detect.py labels ./resources/landmark.jpg +python detect.py web ./resources/landmark.jpg +python detect.py web-uri http://wheresgus.com/dog.JPG +python detect.py web-geo ./resources/city.jpg +python detect.py faces-uri gs://your-bucket/file.jpg +python detect.py ocr-uri gs://python-docs-samples-tests/HodgeConj.pdf \ +gs://BUCKET_NAME/PREFIX/ +python detect.py object-localization ./resources/puppies.jpg +python detect.py object-localization-uri gs://... + +For more information, the documentation at +https://cloud.google.com/vision/docs. +""" + +import argparse + + +# [START vision_face_detection] +def detect_faces(path): + """Detects faces in an image.""" + from google.cloud import vision + import io + client = vision.ImageAnnotatorClient() + + # [START vision_python_migration_face_detection] + # [START vision_python_migration_image_file] + with io.open(path, 'rb') as image_file: + content = image_file.read() + + image = vision.types.Image(content=content) + # [END vision_python_migration_image_file] + + response = client.face_detection(image=image) + faces = response.face_annotations + + # Names of likelihood from google.cloud.vision.enums + likelihood_name = ('UNKNOWN', 'VERY_UNLIKELY', 'UNLIKELY', 'POSSIBLE', + 'LIKELY', 'VERY_LIKELY') + print('Faces:') + + for face in faces: + print('anger: {}'.format(likelihood_name[face.anger_likelihood])) + print('joy: {}'.format(likelihood_name[face.joy_likelihood])) + print('surprise: {}'.format(likelihood_name[face.surprise_likelihood])) + + vertices = (['({},{})'.format(vertex.x, vertex.y) + for vertex in face.bounding_poly.vertices]) + + print('face bounds: {}'.format(','.join(vertices))) + + if response.error.message: + raise Exception( + '{}\nFor more info on error messages, check: ' + 'https://cloud.google.com/apis/design/errors'.format( + response.error.message)) + # [END vision_python_migration_face_detection] +# [END vision_face_detection] + + +# [START vision_face_detection_gcs] +def detect_faces_uri(uri): + """Detects faces in the file located in Google Cloud Storage or the web.""" + from google.cloud import vision + client = vision.ImageAnnotatorClient() + # [START vision_python_migration_image_uri] + image = vision.types.Image() + image.source.image_uri = uri + # [END vision_python_migration_image_uri] + + response = client.face_detection(image=image) + faces = response.face_annotations + + # Names of likelihood from google.cloud.vision.enums + likelihood_name = ('UNKNOWN', 'VERY_UNLIKELY', 'UNLIKELY', 'POSSIBLE', + 'LIKELY', 'VERY_LIKELY') + print('Faces:') + + for face in faces: + print('anger: {}'.format(likelihood_name[face.anger_likelihood])) + print('joy: {}'.format(likelihood_name[face.joy_likelihood])) + print('surprise: {}'.format(likelihood_name[face.surprise_likelihood])) + + vertices = (['({},{})'.format(vertex.x, vertex.y) + for vertex in face.bounding_poly.vertices]) + + print('face bounds: {}'.format(','.join(vertices))) + + if response.error.message: + raise Exception( + '{}\nFor more info on error messages, check: ' + 'https://cloud.google.com/apis/design/errors'.format( + response.error.message)) +# [END vision_face_detection_gcs] + + +# [START vision_label_detection] +def detect_labels(path): + """Detects labels in the file.""" + from google.cloud import vision + import io + client = vision.ImageAnnotatorClient() + + # [START vision_python_migration_label_detection] + with io.open(path, 'rb') as image_file: + content = image_file.read() + + image = vision.types.Image(content=content) + + response = client.label_detection(image=image) + labels = response.label_annotations + print('Labels:') + + for label in labels: + print(label.description) + + if response.error.message: + raise Exception( + '{}\nFor more info on error messages, check: ' + 'https://cloud.google.com/apis/design/errors'.format( + response.error.message)) + # [END vision_python_migration_label_detection] +# [END vision_label_detection] + + +# [START vision_label_detection_gcs] +def detect_labels_uri(uri): + """Detects labels in the file located in Google Cloud Storage or on the + Web.""" + from google.cloud import vision + client = vision.ImageAnnotatorClient() + image = vision.types.Image() + image.source.image_uri = uri + + response = client.label_detection(image=image) + labels = response.label_annotations + print('Labels:') + + for label in labels: + print(label.description) + + if response.error.message: + raise Exception( + '{}\nFor more info on error messages, check: ' + 'https://cloud.google.com/apis/design/errors'.format( + response.error.message)) +# [END vision_label_detection_gcs] + + +# [START vision_landmark_detection] +def detect_landmarks(path): + """Detects landmarks in the file.""" + from google.cloud import vision + import io + client = vision.ImageAnnotatorClient() + + # [START vision_python_migration_landmark_detection] + with io.open(path, 'rb') as image_file: + content = image_file.read() + + image = vision.types.Image(content=content) + + response = client.landmark_detection(image=image) + landmarks = response.landmark_annotations + print('Landmarks:') + + for landmark in landmarks: + print(landmark.description) + for location in landmark.locations: + lat_lng = location.lat_lng + print('Latitude {}'.format(lat_lng.latitude)) + print('Longitude {}'.format(lat_lng.longitude)) + + if response.error.message: + raise Exception( + '{}\nFor more info on error messages, check: ' + 'https://cloud.google.com/apis/design/errors'.format( + response.error.message)) + # [END vision_python_migration_landmark_detection] +# [END vision_landmark_detection] + + +# [START vision_landmark_detection_gcs] +def detect_landmarks_uri(uri): + """Detects landmarks in the file located in Google Cloud Storage or on the + Web.""" + from google.cloud import vision + client = vision.ImageAnnotatorClient() + image = vision.types.Image() + image.source.image_uri = uri + + response = client.landmark_detection(image=image) + landmarks = response.landmark_annotations + print('Landmarks:') + + for landmark in landmarks: + print(landmark.description) + + if response.error.message: + raise Exception( + '{}\nFor more info on error messages, check: ' + 'https://cloud.google.com/apis/design/errors'.format( + response.error.message)) +# [END vision_landmark_detection_gcs] + + +# [START vision_logo_detection] +def detect_logos(path): + """Detects logos in the file.""" + from google.cloud import vision + import io + client = vision.ImageAnnotatorClient() + + # [START vision_python_migration_logo_detection] + with io.open(path, 'rb') as image_file: + content = image_file.read() + + image = vision.types.Image(content=content) + + response = client.logo_detection(image=image) + logos = response.logo_annotations + print('Logos:') + + for logo in logos: + print(logo.description) + + if response.error.message: + raise Exception( + '{}\nFor more info on error messages, check: ' + 'https://cloud.google.com/apis/design/errors'.format( + response.error.message)) + # [END vision_python_migration_logo_detection] +# [END vision_logo_detection] + + +# [START vision_logo_detection_gcs] +def detect_logos_uri(uri): + """Detects logos in the file located in Google Cloud Storage or on the Web. + """ + from google.cloud import vision + client = vision.ImageAnnotatorClient() + image = vision.types.Image() + image.source.image_uri = uri + + response = client.logo_detection(image=image) + logos = response.logo_annotations + print('Logos:') + + for logo in logos: + print(logo.description) + + if response.error.message: + raise Exception( + '{}\nFor more info on error messages, check: ' + 'https://cloud.google.com/apis/design/errors'.format( + response.error.message)) +# [END vision_logo_detection_gcs] + + +# [START vision_safe_search_detection] +def detect_safe_search(path): + """Detects unsafe features in the file.""" + from google.cloud import vision + import io + client = vision.ImageAnnotatorClient() + + # [START vision_python_migration_safe_search_detection] + with io.open(path, 'rb') as image_file: + content = image_file.read() + + image = vision.types.Image(content=content) + + response = client.safe_search_detection(image=image) + safe = response.safe_search_annotation + + # Names of likelihood from google.cloud.vision.enums + likelihood_name = ('UNKNOWN', 'VERY_UNLIKELY', 'UNLIKELY', 'POSSIBLE', + 'LIKELY', 'VERY_LIKELY') + print('Safe search:') + + print('adult: {}'.format(likelihood_name[safe.adult])) + print('medical: {}'.format(likelihood_name[safe.medical])) + print('spoofed: {}'.format(likelihood_name[safe.spoof])) + print('violence: {}'.format(likelihood_name[safe.violence])) + print('racy: {}'.format(likelihood_name[safe.racy])) + + if response.error.message: + raise Exception( + '{}\nFor more info on error messages, check: ' + 'https://cloud.google.com/apis/design/errors'.format( + response.error.message)) + # [END vision_python_migration_safe_search_detection] +# [END vision_safe_search_detection] + + +# [START vision_safe_search_detection_gcs] +def detect_safe_search_uri(uri): + """Detects unsafe features in the file located in Google Cloud Storage or + on the Web.""" + from google.cloud import vision + client = vision.ImageAnnotatorClient() + image = vision.types.Image() + image.source.image_uri = uri + + response = client.safe_search_detection(image=image) + safe = response.safe_search_annotation + + # Names of likelihood from google.cloud.vision.enums + likelihood_name = ('UNKNOWN', 'VERY_UNLIKELY', 'UNLIKELY', 'POSSIBLE', + 'LIKELY', 'VERY_LIKELY') + print('Safe search:') + + print('adult: {}'.format(likelihood_name[safe.adult])) + print('medical: {}'.format(likelihood_name[safe.medical])) + print('spoofed: {}'.format(likelihood_name[safe.spoof])) + print('violence: {}'.format(likelihood_name[safe.violence])) + print('racy: {}'.format(likelihood_name[safe.racy])) + + if response.error.message: + raise Exception( + '{}\nFor more info on error messages, check: ' + 'https://cloud.google.com/apis/design/errors'.format( + response.error.message)) +# [END vision_safe_search_detection_gcs] + + +# [START vision_text_detection] +def detect_text(path): + """Detects text in the file.""" + from google.cloud import vision + import io + client = vision.ImageAnnotatorClient() + + # [START vision_python_migration_text_detection] + with io.open(path, 'rb') as image_file: + content = image_file.read() + + image = vision.types.Image(content=content) + + response = client.text_detection(image=image) + texts = response.text_annotations + print('Texts:') + + for text in texts: + print('\n"{}"'.format(text.description)) + + vertices = (['({},{})'.format(vertex.x, vertex.y) + for vertex in text.bounding_poly.vertices]) + + print('bounds: {}'.format(','.join(vertices))) + + if response.error.message: + raise Exception( + '{}\nFor more info on error messages, check: ' + 'https://cloud.google.com/apis/design/errors'.format( + response.error.message)) + # [END vision_python_migration_text_detection] +# [END vision_text_detection] + + +# [START vision_text_detection_gcs] +def detect_text_uri(uri): + """Detects text in the file located in Google Cloud Storage or on the Web. + """ + from google.cloud import vision + client = vision.ImageAnnotatorClient() + image = vision.types.Image() + image.source.image_uri = uri + + response = client.text_detection(image=image) + texts = response.text_annotations + print('Texts:') + + for text in texts: + print('\n"{}"'.format(text.description)) + + vertices = (['({},{})'.format(vertex.x, vertex.y) + for vertex in text.bounding_poly.vertices]) + + print('bounds: {}'.format(','.join(vertices))) + + if response.error.message: + raise Exception( + '{}\nFor more info on error messages, check: ' + 'https://cloud.google.com/apis/design/errors'.format( + response.error.message)) +# [END vision_text_detection_gcs] + + +# [START vision_image_property_detection] +def detect_properties(path): + """Detects image properties in the file.""" + from google.cloud import vision + import io + client = vision.ImageAnnotatorClient() + + # [START vision_python_migration_image_properties] + with io.open(path, 'rb') as image_file: + content = image_file.read() + + image = vision.types.Image(content=content) + + response = client.image_properties(image=image) + props = response.image_properties_annotation + print('Properties:') + + for color in props.dominant_colors.colors: + print('fraction: {}'.format(color.pixel_fraction)) + print('\tr: {}'.format(color.color.red)) + print('\tg: {}'.format(color.color.green)) + print('\tb: {}'.format(color.color.blue)) + print('\ta: {}'.format(color.color.alpha)) + + if response.error.message: + raise Exception( + '{}\nFor more info on error messages, check: ' + 'https://cloud.google.com/apis/design/errors'.format( + response.error.message)) + # [END vision_python_migration_image_properties] +# [END vision_image_property_detection] + + +# [START vision_image_property_detection_gcs] +def detect_properties_uri(uri): + """Detects image properties in the file located in Google Cloud Storage or + on the Web.""" + from google.cloud import vision + client = vision.ImageAnnotatorClient() + image = vision.types.Image() + image.source.image_uri = uri + + response = client.image_properties(image=image) + props = response.image_properties_annotation + print('Properties:') + + for color in props.dominant_colors.colors: + print('frac: {}'.format(color.pixel_fraction)) + print('\tr: {}'.format(color.color.red)) + print('\tg: {}'.format(color.color.green)) + print('\tb: {}'.format(color.color.blue)) + print('\ta: {}'.format(color.color.alpha)) + + if response.error.message: + raise Exception( + '{}\nFor more info on error messages, check: ' + 'https://cloud.google.com/apis/design/errors'.format( + response.error.message)) +# [END vision_image_property_detection_gcs] + + +# [START vision_web_detection] +def detect_web(path): + """Detects web annotations given an image.""" + from google.cloud import vision + import io + client = vision.ImageAnnotatorClient() + + # [START vision_python_migration_web_detection] + with io.open(path, 'rb') as image_file: + content = image_file.read() + + image = vision.types.Image(content=content) + + response = client.web_detection(image=image) + annotations = response.web_detection + + if annotations.best_guess_labels: + for label in annotations.best_guess_labels: + print('\nBest guess label: {}'.format(label.label)) + + if annotations.pages_with_matching_images: + print('\n{} Pages with matching images found:'.format( + len(annotations.pages_with_matching_images))) + + for page in annotations.pages_with_matching_images: + print('\n\tPage url : {}'.format(page.url)) + + if page.full_matching_images: + print('\t{} Full Matches found: '.format( + len(page.full_matching_images))) + + for image in page.full_matching_images: + print('\t\tImage url : {}'.format(image.url)) + + if page.partial_matching_images: + print('\t{} Partial Matches found: '.format( + len(page.partial_matching_images))) + + for image in page.partial_matching_images: + print('\t\tImage url : {}'.format(image.url)) + + if annotations.web_entities: + print('\n{} Web entities found: '.format( + len(annotations.web_entities))) + + for entity in annotations.web_entities: + print('\n\tScore : {}'.format(entity.score)) + print(u'\tDescription: {}'.format(entity.description)) + + if annotations.visually_similar_images: + print('\n{} visually similar images found:\n'.format( + len(annotations.visually_similar_images))) + + for image in annotations.visually_similar_images: + print('\tImage url : {}'.format(image.url)) + + if response.error.message: + raise Exception( + '{}\nFor more info on error messages, check: ' + 'https://cloud.google.com/apis/design/errors'.format( + response.error.message)) + # [END vision_python_migration_web_detection] +# [END vision_web_detection] + + +# [START vision_web_detection_gcs] +def detect_web_uri(uri): + """Detects web annotations in the file located in Google Cloud Storage.""" + from google.cloud import vision + client = vision.ImageAnnotatorClient() + image = vision.types.Image() + image.source.image_uri = uri + + response = client.web_detection(image=image) + annotations = response.web_detection + + if annotations.best_guess_labels: + for label in annotations.best_guess_labels: + print('\nBest guess label: {}'.format(label.label)) + + if annotations.pages_with_matching_images: + print('\n{} Pages with matching images found:'.format( + len(annotations.pages_with_matching_images))) + + for page in annotations.pages_with_matching_images: + print('\n\tPage url : {}'.format(page.url)) + + if page.full_matching_images: + print('\t{} Full Matches found: '.format( + len(page.full_matching_images))) + + for image in page.full_matching_images: + print('\t\tImage url : {}'.format(image.url)) + + if page.partial_matching_images: + print('\t{} Partial Matches found: '.format( + len(page.partial_matching_images))) + + for image in page.partial_matching_images: + print('\t\tImage url : {}'.format(image.url)) + + if annotations.web_entities: + print('\n{} Web entities found: '.format( + len(annotations.web_entities))) + + for entity in annotations.web_entities: + print('\n\tScore : {}'.format(entity.score)) + print(u'\tDescription: {}'.format(entity.description)) + + if annotations.visually_similar_images: + print('\n{} visually similar images found:\n'.format( + len(annotations.visually_similar_images))) + + for image in annotations.visually_similar_images: + print('\tImage url : {}'.format(image.url)) + + if response.error.message: + raise Exception( + '{}\nFor more info on error messages, check: ' + 'https://cloud.google.com/apis/design/errors'.format( + response.error.message)) +# [END vision_web_detection_gcs] + + +# [START vision_web_detection_include_geo] +def web_entities_include_geo_results(path): + """Detects web annotations given an image, using the geotag metadata + in the image to detect web entities.""" + from google.cloud import vision + import io + client = vision.ImageAnnotatorClient() + + with io.open(path, 'rb') as image_file: + content = image_file.read() + + image = vision.types.Image(content=content) + + web_detection_params = vision.types.WebDetectionParams( + include_geo_results=True) + image_context = vision.types.ImageContext( + web_detection_params=web_detection_params) + + response = client.web_detection(image=image, image_context=image_context) + + for entity in response.web_detection.web_entities: + print('\n\tScore : {}'.format(entity.score)) + print(u'\tDescription: {}'.format(entity.description)) + + if response.error.message: + raise Exception( + '{}\nFor more info on error messages, check: ' + 'https://cloud.google.com/apis/design/errors'.format( + response.error.message)) +# [END vision_web_detection_include_geo] + + +# [START vision_web_detection_include_geo_gcs] +def web_entities_include_geo_results_uri(uri): + """Detects web annotations given an image in the file located in + Google Cloud Storage., using the geotag metadata in the image to + detect web entities.""" + from google.cloud import vision + client = vision.ImageAnnotatorClient() + + image = vision.types.Image() + image.source.image_uri = uri + + web_detection_params = vision.types.WebDetectionParams( + include_geo_results=True) + image_context = vision.types.ImageContext( + web_detection_params=web_detection_params) + + response = client.web_detection(image=image, image_context=image_context) + + for entity in response.web_detection.web_entities: + print('\n\tScore : {}'.format(entity.score)) + print(u'\tDescription: {}'.format(entity.description)) + + if response.error.message: + raise Exception( + '{}\nFor more info on error messages, check: ' + 'https://cloud.google.com/apis/design/errors'.format( + response.error.message)) +# [END vision_web_detection_include_geo_gcs] + + +# [START vision_crop_hint_detection] +def detect_crop_hints(path): + """Detects crop hints in an image.""" + from google.cloud import vision + import io + client = vision.ImageAnnotatorClient() + + # [START vision_python_migration_crop_hints] + with io.open(path, 'rb') as image_file: + content = image_file.read() + image = vision.types.Image(content=content) + + crop_hints_params = vision.types.CropHintsParams(aspect_ratios=[1.77]) + image_context = vision.types.ImageContext( + crop_hints_params=crop_hints_params) + + response = client.crop_hints(image=image, image_context=image_context) + hints = response.crop_hints_annotation.crop_hints + + for n, hint in enumerate(hints): + print('\nCrop Hint: {}'.format(n)) + + vertices = (['({},{})'.format(vertex.x, vertex.y) + for vertex in hint.bounding_poly.vertices]) + + print('bounds: {}'.format(','.join(vertices))) + + if response.error.message: + raise Exception( + '{}\nFor more info on error messages, check: ' + 'https://cloud.google.com/apis/design/errors'.format( + response.error.message)) + # [END vision_python_migration_crop_hints] +# [END vision_crop_hint_detection] + + +# [START vision_crop_hint_detection_gcs] +def detect_crop_hints_uri(uri): + """Detects crop hints in the file located in Google Cloud Storage.""" + from google.cloud import vision + client = vision.ImageAnnotatorClient() + image = vision.types.Image() + image.source.image_uri = uri + + crop_hints_params = vision.types.CropHintsParams(aspect_ratios=[1.77]) + image_context = vision.types.ImageContext( + crop_hints_params=crop_hints_params) + + response = client.crop_hints(image=image, image_context=image_context) + hints = response.crop_hints_annotation.crop_hints + + for n, hint in enumerate(hints): + print('\nCrop Hint: {}'.format(n)) + + vertices = (['({},{})'.format(vertex.x, vertex.y) + for vertex in hint.bounding_poly.vertices]) + + print('bounds: {}'.format(','.join(vertices))) + + if response.error.message: + raise Exception( + '{}\nFor more info on error messages, check: ' + 'https://cloud.google.com/apis/design/errors'.format( + response.error.message)) +# [END vision_crop_hint_detection_gcs] + + +# [START vision_fulltext_detection] +def detect_document(path): + """Detects document features in an image.""" + from google.cloud import vision + import io + client = vision.ImageAnnotatorClient() + + # [START vision_python_migration_document_text_detection] + with io.open(path, 'rb') as image_file: + content = image_file.read() + + image = vision.types.Image(content=content) + + response = client.document_text_detection(image=image) + + for page in response.full_text_annotation.pages: + for block in page.blocks: + print('\nBlock confidence: {}\n'.format(block.confidence)) + + for paragraph in block.paragraphs: + print('Paragraph confidence: {}'.format( + paragraph.confidence)) + + for word in paragraph.words: + word_text = ''.join([ + symbol.text for symbol in word.symbols + ]) + print('Word text: {} (confidence: {})'.format( + word_text, word.confidence)) + + for symbol in word.symbols: + print('\tSymbol: {} (confidence: {})'.format( + symbol.text, symbol.confidence)) + + if response.error.message: + raise Exception( + '{}\nFor more info on error messages, check: ' + 'https://cloud.google.com/apis/design/errors'.format( + response.error.message)) + # [END vision_python_migration_document_text_detection] +# [END vision_fulltext_detection] + + +# [START vision_fulltext_detection_gcs] +def detect_document_uri(uri): + """Detects document features in the file located in Google Cloud + Storage.""" + from google.cloud import vision + client = vision.ImageAnnotatorClient() + image = vision.types.Image() + image.source.image_uri = uri + + response = client.document_text_detection(image=image) + + for page in response.full_text_annotation.pages: + for block in page.blocks: + print('\nBlock confidence: {}\n'.format(block.confidence)) + + for paragraph in block.paragraphs: + print('Paragraph confidence: {}'.format( + paragraph.confidence)) + + for word in paragraph.words: + word_text = ''.join([ + symbol.text for symbol in word.symbols + ]) + print('Word text: {} (confidence: {})'.format( + word_text, word.confidence)) + + for symbol in word.symbols: + print('\tSymbol: {} (confidence: {})'.format( + symbol.text, symbol.confidence)) + + if response.error.message: + raise Exception( + '{}\nFor more info on error messages, check: ' + 'https://cloud.google.com/apis/design/errors'.format( + response.error.message)) +# [END vision_fulltext_detection_gcs] + + +# [START vision_text_detection_pdf_gcs] +def async_detect_document(gcs_source_uri, gcs_destination_uri): + """OCR with PDF/TIFF as source files on GCS""" + import re + from google.cloud import vision + from google.cloud import storage + from google.protobuf import json_format + # Supported mime_types are: 'application/pdf' and 'image/tiff' + mime_type = 'application/pdf' + + # How many pages should be grouped into each json output file. + batch_size = 2 + + client = vision.ImageAnnotatorClient() + + feature = vision.types.Feature( + type=vision.enums.Feature.Type.DOCUMENT_TEXT_DETECTION) + + gcs_source = vision.types.GcsSource(uri=gcs_source_uri) + input_config = vision.types.InputConfig( + gcs_source=gcs_source, mime_type=mime_type) + + gcs_destination = vision.types.GcsDestination(uri=gcs_destination_uri) + output_config = vision.types.OutputConfig( + gcs_destination=gcs_destination, batch_size=batch_size) + + async_request = vision.types.AsyncAnnotateFileRequest( + features=[feature], input_config=input_config, + output_config=output_config) + + operation = client.async_batch_annotate_files( + requests=[async_request]) + + print('Waiting for the operation to finish.') + operation.result(timeout=420) + + # Once the request has completed and the output has been + # written to GCS, we can list all the output files. + storage_client = storage.Client() + + match = re.match(r'gs://([^/]+)/(.+)', gcs_destination_uri) + bucket_name = match.group(1) + prefix = match.group(2) + + bucket = storage_client.get_bucket(bucket_name) + + # List objects with the given prefix. + blob_list = list(bucket.list_blobs(prefix=prefix)) + print('Output files:') + for blob in blob_list: + print(blob.name) + + # Process the first output file from GCS. + # Since we specified batch_size=2, the first response contains + # the first two pages of the input file. + output = blob_list[0] + + json_string = output.download_as_string() + response = json_format.Parse( + json_string, vision.types.AnnotateFileResponse()) + + # The actual response for the first page of the input file. + first_page_response = response.responses[0] + annotation = first_page_response.full_text_annotation + + # Here we print the full text from the first page. + # The response contains more information: + # annotation/pages/blocks/paragraphs/words/symbols + # including confidence scores and bounding boxes + print(u'Full text:\n{}'.format( + annotation.text)) +# [END vision_text_detection_pdf_gcs] + + +# [START vision_localize_objects] +def localize_objects(path): + """Localize objects in the local image. + + Args: + path: The path to the local file. + """ + from google.cloud import vision + client = vision.ImageAnnotatorClient() + + with open(path, 'rb') as image_file: + content = image_file.read() + image = vision.types.Image(content=content) + + objects = client.object_localization( + image=image).localized_object_annotations + + print('Number of objects found: {}'.format(len(objects))) + for object_ in objects: + print('\n{} (confidence: {})'.format(object_.name, object_.score)) + print('Normalized bounding polygon vertices: ') + for vertex in object_.bounding_poly.normalized_vertices: + print(' - ({}, {})'.format(vertex.x, vertex.y)) +# [END vision_localize_objects] + + +# [START vision_localize_objects_gcs] +def localize_objects_uri(uri): + """Localize objects in the image on Google Cloud Storage + + Args: + uri: The path to the file in Google Cloud Storage (gs://...) + """ + from google.cloud import vision + client = vision.ImageAnnotatorClient() + + image = vision.types.Image() + image.source.image_uri = uri + + objects = client.object_localization( + image=image).localized_object_annotations + + print('Number of objects found: {}'.format(len(objects))) + for object_ in objects: + print('\n{} (confidence: {})'.format(object_.name, object_.score)) + print('Normalized bounding polygon vertices: ') + for vertex in object_.bounding_poly.normalized_vertices: + print(' - ({}, {})'.format(vertex.x, vertex.y)) +# [END vision_localize_objects_gcs] + + +def run_local(args): + if args.command == 'faces': + detect_faces(args.path) + elif args.command == 'labels': + detect_labels(args.path) + elif args.command == 'landmarks': + detect_landmarks(args.path) + elif args.command == 'text': + detect_text(args.path) + elif args.command == 'logos': + detect_logos(args.path) + elif args.command == 'safe-search': + detect_safe_search(args.path) + elif args.command == 'properties': + detect_properties(args.path) + elif args.command == 'web': + detect_web(args.path) + elif args.command == 'crophints': + detect_crop_hints(args.path) + elif args.command == 'document': + detect_document(args.path) + elif args.command == 'web-geo': + web_entities_include_geo_results(args.path) + elif args.command == 'object-localization': + localize_objects(args.path) + + +def run_uri(args): + if args.command == 'text-uri': + detect_text_uri(args.uri) + elif args.command == 'faces-uri': + detect_faces_uri(args.uri) + elif args.command == 'labels-uri': + detect_labels_uri(args.uri) + elif args.command == 'landmarks-uri': + detect_landmarks_uri(args.uri) + elif args.command == 'logos-uri': + detect_logos_uri(args.uri) + elif args.command == 'safe-search-uri': + detect_safe_search_uri(args.uri) + elif args.command == 'properties-uri': + detect_properties_uri(args.uri) + elif args.command == 'web-uri': + detect_web_uri(args.uri) + elif args.command == 'crophints-uri': + detect_crop_hints_uri(args.uri) + elif args.command == 'document-uri': + detect_document_uri(args.uri) + elif args.command == 'web-geo-uri': + web_entities_include_geo_results_uri(args.uri) + elif args.command == 'ocr-uri': + async_detect_document(args.uri, args.destination_uri) + elif args.command == 'object-localization-uri': + localize_objects_uri(args.uri) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter) + subparsers = parser.add_subparsers(dest='command') + + detect_faces_parser = subparsers.add_parser( + 'faces', help=detect_faces.__doc__) + detect_faces_parser.add_argument('path') + + faces_file_parser = subparsers.add_parser( + 'faces-uri', help=detect_faces_uri.__doc__) + faces_file_parser.add_argument('uri') + + detect_labels_parser = subparsers.add_parser( + 'labels', help=detect_labels.__doc__) + detect_labels_parser.add_argument('path') + + labels_file_parser = subparsers.add_parser( + 'labels-uri', help=detect_labels_uri.__doc__) + labels_file_parser.add_argument('uri') + + detect_landmarks_parser = subparsers.add_parser( + 'landmarks', help=detect_landmarks.__doc__) + detect_landmarks_parser.add_argument('path') + + landmark_file_parser = subparsers.add_parser( + 'landmarks-uri', help=detect_landmarks_uri.__doc__) + landmark_file_parser.add_argument('uri') + + detect_text_parser = subparsers.add_parser( + 'text', help=detect_text.__doc__) + detect_text_parser.add_argument('path') + + text_file_parser = subparsers.add_parser( + 'text-uri', help=detect_text_uri.__doc__) + text_file_parser.add_argument('uri') + + detect_logos_parser = subparsers.add_parser( + 'logos', help=detect_logos.__doc__) + detect_logos_parser.add_argument('path') + + logos_file_parser = subparsers.add_parser( + 'logos-uri', help=detect_logos_uri.__doc__) + logos_file_parser.add_argument('uri') + + safe_search_parser = subparsers.add_parser( + 'safe-search', help=detect_safe_search.__doc__) + safe_search_parser.add_argument('path') + + safe_search_file_parser = subparsers.add_parser( + 'safe-search-uri', + help=detect_safe_search_uri.__doc__) + safe_search_file_parser.add_argument('uri') + + properties_parser = subparsers.add_parser( + 'properties', help=detect_properties.__doc__) + properties_parser.add_argument('path') + + properties_file_parser = subparsers.add_parser( + 'properties-uri', + help=detect_properties_uri.__doc__) + properties_file_parser.add_argument('uri') + + # 1.1 Vision features + web_parser = subparsers.add_parser( + 'web', help=detect_web.__doc__) + web_parser.add_argument('path') + + web_uri_parser = subparsers.add_parser( + 'web-uri', + help=detect_web_uri.__doc__) + web_uri_parser.add_argument('uri') + + web_geo_parser = subparsers.add_parser( + 'web-geo', help=web_entities_include_geo_results.__doc__) + web_geo_parser.add_argument('path') + + web_geo_uri_parser = subparsers.add_parser( + 'web-geo-uri', + help=web_entities_include_geo_results_uri.__doc__) + web_geo_uri_parser.add_argument('uri') + + crop_hints_parser = subparsers.add_parser( + 'crophints', help=detect_crop_hints.__doc__) + crop_hints_parser.add_argument('path') + + crop_hints_uri_parser = subparsers.add_parser( + 'crophints-uri', help=detect_crop_hints_uri.__doc__) + crop_hints_uri_parser.add_argument('uri') + + document_parser = subparsers.add_parser( + 'document', help=detect_document.__doc__) + document_parser.add_argument('path') + + document_uri_parser = subparsers.add_parser( + 'document-uri', help=detect_document_uri.__doc__) + document_uri_parser.add_argument('uri') + + ocr_uri_parser = subparsers.add_parser( + 'ocr-uri', help=async_detect_document.__doc__) + ocr_uri_parser.add_argument('uri') + ocr_uri_parser.add_argument('destination_uri') + + object_localization_parser = subparsers.add_parser( + 'object-localization', help=async_detect_document.__doc__) + object_localization_parser.add_argument('path') + + object_localization_uri_parser = subparsers.add_parser( + 'object-localization-uri', help=async_detect_document.__doc__) + object_localization_uri_parser.add_argument('uri') + + args = parser.parse_args() + + if 'uri' in args.command: + run_uri(args) + else: + run_local(args) diff --git a/samples/snippets/detect/detect_test.py b/samples/snippets/detect/detect_test.py new file mode 100644 index 00000000..a618769d --- /dev/null +++ b/samples/snippets/detect/detect_test.py @@ -0,0 +1,283 @@ +# Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import uuid + +import backoff +from google.cloud import storage + +import detect + +ASSET_BUCKET = "cloud-samples-data" + +BUCKET = os.environ['CLOUD_STORAGE_BUCKET'] +OUTPUT_PREFIX = 'TEST_OUTPUT_{}'.format(uuid.uuid4()) +GCS_SOURCE_URI = 'gs://{}/HodgeConj.pdf'.format(BUCKET) +GCS_DESTINATION_URI = 'gs://{}/{}/'.format(BUCKET, OUTPUT_PREFIX) + + +def test_labels(capsys): + file_name = os.path.join( + os.path.dirname(__file__), + 'resources/wakeupcat.jpg') + detect.detect_labels(file_name) + out, _ = capsys.readouterr() + assert 'Labels' in out + + +def test_labels_uri(capsys): + file_name = 'gs://{}/vision/label/wakeupcat.jpg'.format(ASSET_BUCKET) + detect.detect_labels_uri(file_name) + out, _ = capsys.readouterr() + assert 'Labels' in out + + +def test_landmarks(capsys): + file_name = os.path.join( + os.path.dirname(__file__), + 'resources/landmark.jpg') + detect.detect_landmarks(file_name) + out, _ = capsys.readouterr() + assert 'palace' in out.lower() + + +def test_landmarks_uri(capsys): + file_name = 'gs://{}/vision/landmark/pofa.jpg'.format(ASSET_BUCKET) + detect.detect_landmarks_uri(file_name) + out, _ = capsys.readouterr() + assert 'palace' in out.lower() + + +def test_faces(capsys): + file_name = os.path.join( + os.path.dirname(__file__), + 'resources/face_no_surprise.jpg') + detect.detect_faces(file_name) + out, _ = capsys.readouterr() + assert 'face bound' in out + + +def test_faces_uri(capsys): + file_name = 'gs://{}/vision/face/face_no_surprise.jpg'.format(ASSET_BUCKET) + detect.detect_faces_uri(file_name) + out, _ = capsys.readouterr() + assert 'face bound' in out + + +def test_logos(capsys): + file_name = os.path.join( + os.path.dirname(__file__), + 'resources/logos.png') + detect.detect_logos(file_name) + out, _ = capsys.readouterr() + assert 'google' in out.lower() + + +def test_logos_uri(capsys): + file_name = 'gs://{}/vision/logo/logo_google.png'.format(ASSET_BUCKET) + detect.detect_logos_uri(file_name) + out, _ = capsys.readouterr() + assert 'google' in out.lower() + + +def test_safe_search(capsys): + file_name = os.path.join( + os.path.dirname(__file__), + 'resources/wakeupcat.jpg') + detect.detect_safe_search(file_name) + out, _ = capsys.readouterr() + assert 'VERY_LIKELY' in out + assert 'racy: ' in out + + +def test_safe_search_uri(capsys): + file_name = 'gs://{}/vision/label/wakeupcat.jpg'.format(ASSET_BUCKET) + detect.detect_safe_search_uri(file_name) + out, _ = capsys.readouterr() + assert 'VERY_LIKELY' in out + assert 'racy: ' in out + + +def test_detect_text(capsys): + file_name = os.path.join( + os.path.dirname(__file__), + 'resources/text.jpg') + detect.detect_text(file_name) + out, _ = capsys.readouterr() + assert '37%' in out + + +def test_detect_text_uri(capsys): + file_name = 'gs://{}/vision/text/screen.jpg'.format(ASSET_BUCKET) + detect.detect_text_uri(file_name) + out, _ = capsys.readouterr() + assert '37%' in out + + +def test_detect_properties(capsys): + file_name = os.path.join( + os.path.dirname(__file__), + 'resources/landmark.jpg') + detect.detect_properties(file_name) + out, _ = capsys.readouterr() + assert 'frac' in out + + +def test_detect_properties_uri(capsys): + file_name = 'gs://{}/vision/landmark/pofa.jpg'.format(ASSET_BUCKET) + detect.detect_properties_uri(file_name) + out, _ = capsys.readouterr() + assert 'frac' in out + + +def only_sample_error(e): + """A callback for giving up upon Exceptions. + + Giving up upon any Exceptions other than the ones that sample code + throws at the end of the function. + """ + return 'https://cloud.google.com/apis/design/errors' not in str(e) + + +# Vision 1.1 tests +def test_detect_web(capsys): + file_name = os.path.join( + os.path.dirname(__file__), + 'resources/landmark.jpg') + + @backoff.on_exception( + backoff.expo, Exception, max_time=60, giveup=only_sample_error) + def run_sample(): + detect.detect_web(file_name) + + run_sample() + out, _ = capsys.readouterr() + assert 'best guess label: palace of fine arts' in out.lower() + + +def test_detect_web_uri(capsys): + file_name = 'gs://{}/vision/landmark/pofa.jpg'.format(ASSET_BUCKET) + + @backoff.on_exception( + backoff.expo, Exception, max_time=60, giveup=only_sample_error) + def run_sample(): + detect.detect_web_uri(file_name) + + run_sample() + out, _ = capsys.readouterr() + assert 'best guess label: palace of fine arts' in out.lower() + + +def test_detect_web_with_geo(capsys): + file_name = os.path.join( + os.path.dirname(__file__), + 'resources/city.jpg') + + @backoff.on_exception( + backoff.expo, Exception, max_time=60, giveup=only_sample_error) + def run_sample(): + detect.web_entities_include_geo_results(file_name) + + run_sample() + out, _ = capsys.readouterr() + out = out.lower() + assert 'description' in out + + +def test_detect_web_with_geo_uri(capsys): + file_name = 'gs://{}/vision/web/city.jpg'.format(ASSET_BUCKET) + + @backoff.on_exception( + backoff.expo, Exception, max_time=60, giveup=only_sample_error) + def run_sample(): + detect.web_entities_include_geo_results_uri(file_name) + + run_sample() + out, _ = capsys.readouterr() + out = out.lower() + assert 'description' in out + + +def test_detect_document(capsys): + file_name = os.path.join( + os.path.dirname(__file__), + 'resources/text.jpg') + detect.detect_document(file_name) + out, _ = capsys.readouterr() + assert 'class' in out + + +def test_detect_document_uri(capsys): + file_name = 'gs://{}/vision/text/screen.jpg'.format(ASSET_BUCKET) + detect.detect_document_uri(file_name) + out, _ = capsys.readouterr() + assert 'class' in out + + +def test_detect_crop_hints(capsys): + file_name = os.path.join( + os.path.dirname(__file__), + 'resources/wakeupcat.jpg') + detect.detect_crop_hints(file_name) + out, _ = capsys.readouterr() + assert 'bounds: ' in out + + +def test_detect_crop_hints_uri(capsys): + file_name = 'gs://{}/vision/label/wakeupcat.jpg'.format(ASSET_BUCKET) + detect.detect_crop_hints_uri(file_name) + out, _ = capsys.readouterr() + assert 'bounds: ' in out + + +def test_async_detect_document(capsys): + storage_client = storage.Client() + bucket = storage_client.get_bucket(BUCKET) + if len(list(bucket.list_blobs(prefix=OUTPUT_PREFIX))) > 0: + for blob in bucket.list_blobs(prefix=OUTPUT_PREFIX): + blob.delete() + + assert len(list(bucket.list_blobs(prefix=OUTPUT_PREFIX))) == 0 + + uri = 'gs://{}/vision/document/custom_0773375000_title_only.pdf'.format( + ASSET_BUCKET) + detect.async_detect_document( + gcs_source_uri=uri, + gcs_destination_uri=GCS_DESTINATION_URI) + out, _ = capsys.readouterr() + + assert 'OIL, GAS AND MINERAL LEASE' in out + assert len(list(bucket.list_blobs(prefix=OUTPUT_PREFIX))) > 0 + + for blob in bucket.list_blobs(prefix=OUTPUT_PREFIX): + blob.delete() + + assert len(list(bucket.list_blobs(prefix=OUTPUT_PREFIX))) == 0 + + +def test_localize_objects(capsys): + detect.localize_objects('resources/puppies.jpg') + + out, _ = capsys.readouterr() + assert 'dog' in out.lower() + + +def test_localize_objects_uri(capsys): + uri = 'gs://cloud-samples-data/vision/puppies.jpg' + + detect.localize_objects_uri(uri) + + out, _ = capsys.readouterr() + assert 'dog' in out.lower() diff --git a/samples/snippets/detect/noxfile.py b/samples/snippets/detect/noxfile.py new file mode 100644 index 00000000..ba55d7ce --- /dev/null +++ b/samples/snippets/detect/noxfile.py @@ -0,0 +1,224 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import os +from pathlib import Path +import sys + +import nox + + +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING +# DO NOT EDIT THIS FILE EVER! +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING + +# Copy `noxfile_config.py` to your directory and modify it instead. + + +# `TEST_CONFIG` dict is a configuration hook that allows users to +# modify the test configurations. The values here should be in sync +# with `noxfile_config.py`. Users will copy `noxfile_config.py` into +# their directory and modify it. + +TEST_CONFIG = { + # You can opt out from the test for specific Python versions. + 'ignored_versions': ["2.7"], + + # An envvar key for determining the project id to use. Change it + # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a + # build specific Cloud project. You can also use your own string + # to use your own Cloud project. + 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', + # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', + + # A dictionary you want to inject into your test. Don't put any + # secrets here. These values will override predefined values. + 'envs': {}, +} + + +try: + # Ensure we can import noxfile_config in the project's directory. + sys.path.append('.') + from noxfile_config import TEST_CONFIG_OVERRIDE +except ImportError as e: + print("No user noxfile_config found: detail: {}".format(e)) + TEST_CONFIG_OVERRIDE = {} + +# Update the TEST_CONFIG with the user supplied values. +TEST_CONFIG.update(TEST_CONFIG_OVERRIDE) + + +def get_pytest_env_vars(): + """Returns a dict for pytest invocation.""" + ret = {} + + # Override the GCLOUD_PROJECT and the alias. + env_key = TEST_CONFIG['gcloud_project_env'] + # This should error out if not set. + ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] + + # Apply user supplied envs. + ret.update(TEST_CONFIG['envs']) + return ret + + +# DO NOT EDIT - automatically generated. +# All versions used to tested samples. +ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] + +# Any default versions that should be ignored. +IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] + +TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) + +INSTALL_LIBRARY_FROM_SOURCE = bool(os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False)) +# +# Style Checks +# + + +def _determine_local_import_names(start_dir): + """Determines all import names that should be considered "local". + + This is used when running the linter to insure that import order is + properly checked. + """ + file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)] + return [ + basename + for basename, extension in file_ext_pairs + if extension == ".py" + or os.path.isdir(os.path.join(start_dir, basename)) + and basename not in ("__pycache__") + ] + + +# Linting with flake8. +# +# We ignore the following rules: +# E203: whitespace before ‘:’ +# E266: too many leading ‘#’ for block comment +# E501: line too long +# I202: Additional newline in a section of imports +# +# We also need to specify the rules which are ignored by default: +# ['E226', 'W504', 'E126', 'E123', 'W503', 'E24', 'E704', 'E121'] +FLAKE8_COMMON_ARGS = [ + "--show-source", + "--builtin=gettext", + "--max-complexity=20", + "--import-order-style=google", + "--exclude=.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py", + "--ignore=E121,E123,E126,E203,E226,E24,E266,E501,E704,W503,W504,I202", + "--max-line-length=88", +] + + +@nox.session +def lint(session): + session.install("flake8", "flake8-import-order") + + local_names = _determine_local_import_names(".") + args = FLAKE8_COMMON_ARGS + [ + "--application-import-names", + ",".join(local_names), + "." + ] + session.run("flake8", *args) + + +# +# Sample Tests +# + + +PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"] + + +def _session_tests(session, post_install=None): + """Runs py.test for a particular project.""" + if os.path.exists("requirements.txt"): + session.install("-r", "requirements.txt") + + if os.path.exists("requirements-test.txt"): + session.install("-r", "requirements-test.txt") + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars() + ) + + +@nox.session(python=ALL_VERSIONS) +def py(session): + """Runs py.test for a sample using the specified version of Python.""" + if session.python in TESTED_VERSIONS: + _session_tests(session) + else: + session.skip("SKIPPED: {} tests are disabled for this sample.".format( + session.python + )) + + +# +# Readmegen +# + + +def _get_repo_root(): + """ Returns the root folder of the project. """ + # Get root of this repository. Assume we don't have directories nested deeper than 10 items. + p = Path(os.getcwd()) + for i in range(10): + if p is None: + break + if Path(p / ".git").exists(): + return str(p) + p = p.parent + raise Exception("Unable to detect repository root.") + + +GENERATED_READMES = sorted([x for x in Path(".").rglob("*.rst.in")]) + + +@nox.session +@nox.parametrize("path", GENERATED_READMES) +def readmegen(session, path): + """(Re-)generates the readme for a sample.""" + session.install("jinja2", "pyyaml") + dir_ = os.path.dirname(path) + + if os.path.exists(os.path.join(dir_, "requirements.txt")): + session.install("-r", os.path.join(dir_, "requirements.txt")) + + in_file = os.path.join(dir_, "README.rst.in") + session.run( + "python", _get_repo_root() + "/scripts/readme-gen/readme_gen.py", in_file + ) diff --git a/samples/snippets/detect/requirements-test.txt b/samples/snippets/detect/requirements-test.txt new file mode 100644 index 00000000..11251b00 --- /dev/null +++ b/samples/snippets/detect/requirements-test.txt @@ -0,0 +1,3 @@ +backoff==1.10.0 +pytest==6.0.1 +flaky==3.7.0 diff --git a/samples/snippets/detect/requirements.txt b/samples/snippets/detect/requirements.txt new file mode 100644 index 00000000..168d85df --- /dev/null +++ b/samples/snippets/detect/requirements.txt @@ -0,0 +1,2 @@ +google-cloud-vision==1.0.0 +google-cloud-storage==1.31.0 diff --git a/samples/snippets/detect/resources/city.jpg b/samples/snippets/detect/resources/city.jpg new file mode 100644 index 00000000..b14282e7 Binary files /dev/null and b/samples/snippets/detect/resources/city.jpg differ diff --git a/samples/snippets/detect/resources/cloud-samples-data/vision/document/custom_0773375000_title_only.pdf b/samples/snippets/detect/resources/cloud-samples-data/vision/document/custom_0773375000_title_only.pdf new file mode 100644 index 00000000..514ca6b1 Binary files /dev/null and b/samples/snippets/detect/resources/cloud-samples-data/vision/document/custom_0773375000_title_only.pdf differ diff --git a/samples/snippets/detect/resources/duck_and_truck.jpg b/samples/snippets/detect/resources/duck_and_truck.jpg new file mode 100644 index 00000000..5c560fe7 Binary files /dev/null and b/samples/snippets/detect/resources/duck_and_truck.jpg differ diff --git a/samples/snippets/detect/resources/face_no_surprise.jpg b/samples/snippets/detect/resources/face_no_surprise.jpg new file mode 100644 index 00000000..0e2894ad Binary files /dev/null and b/samples/snippets/detect/resources/face_no_surprise.jpg differ diff --git a/samples/snippets/detect/resources/handwritten.jpg b/samples/snippets/detect/resources/handwritten.jpg new file mode 100644 index 00000000..50a9575b Binary files /dev/null and b/samples/snippets/detect/resources/handwritten.jpg differ diff --git a/samples/snippets/detect/resources/kafka.pdf b/samples/snippets/detect/resources/kafka.pdf new file mode 100644 index 00000000..ffa2e2fa Binary files /dev/null and b/samples/snippets/detect/resources/kafka.pdf differ diff --git a/samples/snippets/detect/resources/landmark.jpg b/samples/snippets/detect/resources/landmark.jpg new file mode 100644 index 00000000..41c3d0fc Binary files /dev/null and b/samples/snippets/detect/resources/landmark.jpg differ diff --git a/samples/snippets/detect/resources/logos.png b/samples/snippets/detect/resources/logos.png new file mode 100644 index 00000000..5538eaed Binary files /dev/null and b/samples/snippets/detect/resources/logos.png differ diff --git a/samples/snippets/detect/resources/puppies.jpg b/samples/snippets/detect/resources/puppies.jpg new file mode 100644 index 00000000..1bfbbc9c Binary files /dev/null and b/samples/snippets/detect/resources/puppies.jpg differ diff --git a/samples/snippets/detect/resources/text.jpg b/samples/snippets/detect/resources/text.jpg new file mode 100644 index 00000000..3b17d55d Binary files /dev/null and b/samples/snippets/detect/resources/text.jpg differ diff --git a/samples/snippets/detect/resources/wakeupcat.jpg b/samples/snippets/detect/resources/wakeupcat.jpg new file mode 100644 index 00000000..139cf461 Binary files /dev/null and b/samples/snippets/detect/resources/wakeupcat.jpg differ diff --git a/samples/snippets/detect/set_endpoint.py b/samples/snippets/detect/set_endpoint.py new file mode 100644 index 00000000..44361436 --- /dev/null +++ b/samples/snippets/detect/set_endpoint.py @@ -0,0 +1,48 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +def set_endpoint(): + """Change your endpoint""" + # [START vision_set_endpoint] + from google.cloud import vision + + client_options = {'api_endpoint': 'eu-vision.googleapis.com'} + + client = vision.ImageAnnotatorClient(client_options=client_options) + # [END vision_set_endpoint] + image_source = vision.types.ImageSource( + image_uri='gs://cloud-samples-data/vision/text/screen.jpg') + image = vision.types.Image(source=image_source) + + response = client.text_detection(image=image) + + print('Texts:') + for text in response.text_annotations: + print('{}'.format(text.description)) + + vertices = ['({},{})'.format(vertex.x, vertex.y) + for vertex in text.bounding_poly.vertices] + + print('bounds: {}\n'.format(','.join(vertices))) + + if response.error.message: + raise Exception( + '{}\nFor more info on error messages, check: ' + 'https://cloud.google.com/apis/design/errors'.format( + response.error.message)) + + +if __name__ == '__main__': + set_endpoint() diff --git a/samples/snippets/detect/set_endpoint_test.py b/samples/snippets/detect/set_endpoint_test.py new file mode 100644 index 00000000..37bd590c --- /dev/null +++ b/samples/snippets/detect/set_endpoint_test.py @@ -0,0 +1,23 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import set_endpoint + + +def test_set_endpoint(capsys): + set_endpoint.set_endpoint() + + out, _ = capsys.readouterr() + assert 'System' in out + assert 'bounds:' in out diff --git a/samples/snippets/detect/vision_async_batch_annotate_images.py b/samples/snippets/detect/vision_async_batch_annotate_images.py new file mode 100644 index 00000000..5a9bd8fd --- /dev/null +++ b/samples/snippets/detect/vision_async_batch_annotate_images.py @@ -0,0 +1,56 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# [START vision_async_batch_annotate_images] + +from google.cloud import vision_v1 +from google.cloud.vision_v1 import enums + + +def sample_async_batch_annotate_images( + input_image_uri="gs://cloud-samples-data/vision/label/wakeupcat.jpg", + output_uri="gs://your-bucket/prefix/", +): + """Perform async batch image annotation.""" + client = vision_v1.ImageAnnotatorClient() + + source = {"image_uri": input_image_uri} + image = {"source": source} + features = [ + {"type": enums.Feature.Type.LABEL_DETECTION}, + {"type": enums.Feature.Type.IMAGE_PROPERTIES}, + ] + + # Each requests element corresponds to a single image. To annotate more + # images, create a request element for each image and add it to + # the array of requests + requests = [{"image": image, "features": features}] + gcs_destination = {"uri": output_uri} + + # The max number of responses to output in each JSON file + batch_size = 2 + output_config = {"gcs_destination": gcs_destination, + "batch_size": batch_size} + + operation = client.async_batch_annotate_images(requests, output_config) + + print("Waiting for operation to complete...") + response = operation.result(90) + + # The output is written to GCS with the provided output_uri as prefix + gcs_output_uri = response.output_config.gcs_destination.uri + print("Output written to GCS with prefix: {}".format(gcs_output_uri)) + + +# [END vision_async_batch_annotate_images] diff --git a/samples/snippets/detect/vision_async_batch_annotate_images_test.py b/samples/snippets/detect/vision_async_batch_annotate_images_test.py new file mode 100644 index 00000000..f025d662 --- /dev/null +++ b/samples/snippets/detect/vision_async_batch_annotate_images_test.py @@ -0,0 +1,63 @@ +# Copyright 2020 Google +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import uuid + +from google.cloud import storage +import pytest + +import vision_async_batch_annotate_images + +RESOURCES = os.path.join(os.path.dirname(__file__), "resources") +GCS_ROOT = "gs://cloud-samples-data/vision/" + +BUCKET = os.environ["CLOUD_STORAGE_BUCKET"] +OUTPUT_PREFIX = "TEST_OUTPUT_{}".format(uuid.uuid4()) +GCS_DESTINATION_URI = "gs://{}/{}/".format(BUCKET, OUTPUT_PREFIX) + + +@pytest.fixture() +def storage_client(): + yield storage.Client() + + +@pytest.fixture() +def bucket(storage_client): + bucket = storage_client.get_bucket(BUCKET) + + try: + for blob in bucket.list_blobs(prefix=OUTPUT_PREFIX): + blob.delete() + except Exception: + pass + + yield bucket + + for blob in bucket.list_blobs(prefix=OUTPUT_PREFIX): + blob.delete() + + +@pytest.mark.flaky(max_runs=3, min_passes=1) +def test_sample_asyn_batch_annotate_images(storage_client, bucket, capsys): + input_image_uri = os.path.join(GCS_ROOT, "label/wakeupcat.jpg") + + vision_async_batch_annotate_images.sample_async_batch_annotate_images( + input_image_uri=input_image_uri, output_uri=GCS_DESTINATION_URI + ) + + out, _ = capsys.readouterr() + + assert "Output written to GCS" in out + assert len(list(bucket.list_blobs(prefix=OUTPUT_PREFIX))) > 0 diff --git a/samples/snippets/detect/vision_batch_annotate_files.py b/samples/snippets/detect/vision_batch_annotate_files.py new file mode 100644 index 00000000..1658d4fd --- /dev/null +++ b/samples/snippets/detect/vision_batch_annotate_files.py @@ -0,0 +1,57 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# [START vision_batch_annotate_files] + +import io + +from google.cloud import vision_v1 +from google.cloud.vision_v1 import enums + + +def sample_batch_annotate_files(file_path="path/to/your/document.pdf"): + """Perform batch file annotation.""" + client = vision_v1.ImageAnnotatorClient() + + # Supported mime_type: application/pdf, image/tiff, image/gif + mime_type = "application/pdf" + with io.open(file_path, "rb") as f: + content = f.read() + input_config = {"mime_type": mime_type, "content": content} + features = [{"type": enums.Feature.Type.DOCUMENT_TEXT_DETECTION}] + + # The service can process up to 5 pages per document file. Here we specify + # the first, second, and last page of the document to be processed. + pages = [1, 2, -1] + requests = [{"input_config": input_config, "features": features, "pages": pages}] + + response = client.batch_annotate_files(requests) + for image_response in response.responses[0].responses: + print(u"Full text: {}".format(image_response.full_text_annotation.text)) + for page in image_response.full_text_annotation.pages: + for block in page.blocks: + print(u"\nBlock confidence: {}".format(block.confidence)) + for par in block.paragraphs: + print(u"\tParagraph confidence: {}".format(par.confidence)) + for word in par.words: + print(u"\t\tWord confidence: {}".format(word.confidence)) + for symbol in word.symbols: + print( + u"\t\t\tSymbol: {}, (confidence: {})".format( + symbol.text, symbol.confidence + ) + ) + + +# [END vision_batch_annotate_files] diff --git a/samples/snippets/detect/vision_batch_annotate_files_gcs.py b/samples/snippets/detect/vision_batch_annotate_files_gcs.py new file mode 100644 index 00000000..76c80a38 --- /dev/null +++ b/samples/snippets/detect/vision_batch_annotate_files_gcs.py @@ -0,0 +1,57 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# [START vision_batch_annotate_files_gcs] + +from google.cloud import vision_v1 +from google.cloud.vision_v1 import enums + + +def sample_batch_annotate_files( + storage_uri="gs://cloud-samples-data/vision/document_understanding/kafka.pdf", +): + """Perform batch file annotation.""" + mime_type = "application/pdf" + + client = vision_v1.ImageAnnotatorClient() + + gcs_source = {"uri": storage_uri} + input_config = {"gcs_source": gcs_source, "mime_type": mime_type} + features = [{"type": enums.Feature.Type.DOCUMENT_TEXT_DETECTION}] + + # The service can process up to 5 pages per document file. + # Here we specify the first, second, and last page of the document to be + # processed. + pages = [1, 2, -1] + requests = [{"input_config": input_config, "features": features, "pages": pages}] + + response = client.batch_annotate_files(requests) + for image_response in response.responses[0].responses: + print(u"Full text: {}".format(image_response.full_text_annotation.text)) + for page in image_response.full_text_annotation.pages: + for block in page.blocks: + print(u"\nBlock confidence: {}".format(block.confidence)) + for par in block.paragraphs: + print(u"\tParagraph confidence: {}".format(par.confidence)) + for word in par.words: + print(u"\t\tWord confidence: {}".format(word.confidence)) + for symbol in word.symbols: + print( + u"\t\t\tSymbol: {}, (confidence: {})".format( + symbol.text, symbol.confidence + ) + ) + + +# [END vision_batch_annotate_files_gcs] diff --git a/samples/snippets/detect/vision_batch_annotate_files_gcs_test.py b/samples/snippets/detect/vision_batch_annotate_files_gcs_test.py new file mode 100644 index 00000000..6e861762 --- /dev/null +++ b/samples/snippets/detect/vision_batch_annotate_files_gcs_test.py @@ -0,0 +1,30 @@ +# Copyright 2020 Google +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import vision_batch_annotate_files_gcs + +GCS_ROOT = "gs://cloud-samples-data/vision/" + + +def test_sample_batch_annotate_files_gcs(capsys): + storage_uri = os.path.join(GCS_ROOT, "document_understanding/kafka.pdf") + + vision_batch_annotate_files_gcs.sample_batch_annotate_files(storage_uri=storage_uri) + + out, _ = capsys.readouterr() + + assert "Full text" in out + assert "Block confidence" in out diff --git a/samples/snippets/detect/vision_batch_annotate_files_test.py b/samples/snippets/detect/vision_batch_annotate_files_test.py new file mode 100644 index 00000000..db07c42f --- /dev/null +++ b/samples/snippets/detect/vision_batch_annotate_files_test.py @@ -0,0 +1,30 @@ +# Copyright 2020 Google +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import vision_batch_annotate_files + +RESOURCES = os.path.join(os.path.dirname(__file__), "resources") + + +def test_sample_batch_annotate_files(capsys): + file_path = os.path.join(RESOURCES, "kafka.pdf") + + vision_batch_annotate_files.sample_batch_annotate_files(file_path=file_path) + + out, _ = capsys.readouterr() + + assert "Full text" in out + assert "Block confidence" in out diff --git a/samples/snippets/document_text/.gitignore b/samples/snippets/document_text/.gitignore new file mode 100644 index 00000000..a4c44706 --- /dev/null +++ b/samples/snippets/document_text/.gitignore @@ -0,0 +1 @@ +output-text.jpg diff --git a/samples/snippets/document_text/README.rst b/samples/snippets/document_text/README.rst new file mode 100644 index 00000000..a38564a2 --- /dev/null +++ b/samples/snippets/document_text/README.rst @@ -0,0 +1,111 @@ +.. This file is automatically generated. Do not edit this file directly. + +Google Cloud Vision API Python Samples +=============================================================================== + +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=vision/cloud-client/document_text/README.rst + + +This directory contains samples for Google Cloud Vision API. `Google Cloud Vision API`_ allows developers to easily integrate vision detection features within applications, including image labeling, face and landmark detection, optical character recognition (OCR), and tagging of explicit content. + +- See the `migration guide`_ for information about migrating to Python client library v0.25.1. + +.. _migration guide: https://cloud.google.com/vision/docs/python-client-migration + + + + +.. _Google Cloud Vision API: https://cloud.google.com/vision/docs + +Setup +------------------------------------------------------------------------------- + + +Authentication +++++++++++++++ + +This sample requires you to have authentication setup. Refer to the +`Authentication Getting Started Guide`_ for instructions on setting up +credentials for applications. + +.. _Authentication Getting Started Guide: + https://cloud.google.com/docs/authentication/getting-started + +Install Dependencies +++++++++++++++++++++ + +#. Clone python-docs-samples and change directory to the sample directory you want to use. + + .. code-block:: bash + + $ git clone https://github.com/GoogleCloudPlatform/python-docs-samples.git + +#. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions. + + .. _Python Development Environment Setup Guide: + https://cloud.google.com/python/setup + +#. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. + + .. code-block:: bash + + $ virtualenv env + $ source env/bin/activate + +#. Install the dependencies needed to run the samples. + + .. code-block:: bash + + $ pip install -r requirements.txt + +.. _pip: https://pip.pypa.io/ +.. _virtualenv: https://virtualenv.pypa.io/ + +Samples +------------------------------------------------------------------------------- + +Document Text tutorial ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=vision/cloud-client/document_text/doctext.py,vision/cloud-client/document_text/README.rst + + + + +To run this sample: + +.. code-block:: bash + + $ python doctext.py + + usage: doctext.py [-h] [-out_file OUT_FILE] detect_file + + positional arguments: + detect_file The image for text detection. + + optional arguments: + -h, --help show this help message and exit + -out_file OUT_FILE Optional output file + + + + + +The client library +------------------------------------------------------------------------------- + +This sample uses the `Google Cloud Client Library for Python`_. +You can read the documentation for more details on API usage and use GitHub +to `browse the source`_ and `report issues`_. + +.. _Google Cloud Client Library for Python: + https://googlecloudplatform.github.io/google-cloud-python/ +.. _browse the source: + https://github.com/GoogleCloudPlatform/google-cloud-python +.. _report issues: + https://github.com/GoogleCloudPlatform/google-cloud-python/issues + + +.. _Google Cloud SDK: https://cloud.google.com/sdk/ \ No newline at end of file diff --git a/samples/snippets/document_text/README.rst.in b/samples/snippets/document_text/README.rst.in new file mode 100644 index 00000000..4746e327 --- /dev/null +++ b/samples/snippets/document_text/README.rst.in @@ -0,0 +1,30 @@ +# This file is used to generate README.rst + +product: + name: Google Cloud Vision API + short_name: Cloud Vision API + url: https://cloud.google.com/vision/docs + description: > + `Google Cloud Vision API`_ allows developers to easily integrate vision + detection features within applications, including image labeling, face and + landmark detection, optical character recognition (OCR), and tagging of + explicit content. + + + - See the `migration guide`_ for information about migrating to Python client library v0.25.1. + + + .. _migration guide: https://cloud.google.com/vision/docs/python-client-migration + +setup: +- auth +- install_deps + +samples: +- name: Document Text tutorial + file: doctext.py + show_help: True + +cloud_client_library: true + +folder: vision/cloud-client/document_text \ No newline at end of file diff --git a/samples/snippets/document_text/doctext.py b/samples/snippets/document_text/doctext.py new file mode 100644 index 00000000..29d82d6b --- /dev/null +++ b/samples/snippets/document_text/doctext.py @@ -0,0 +1,117 @@ +#!/usr/bin/env python + +# Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Outlines document text given an image. + +Example: + python doctext.py resources/text_menu.jpg +""" +# [START vision_document_text_tutorial] +# [START vision_document_text_tutorial_imports] +import argparse +from enum import Enum +import io + +from google.cloud import vision +from google.cloud.vision import types +from PIL import Image, ImageDraw +# [END vision_document_text_tutorial_imports] + + +class FeatureType(Enum): + PAGE = 1 + BLOCK = 2 + PARA = 3 + WORD = 4 + SYMBOL = 5 + + +def draw_boxes(image, bounds, color): + """Draw a border around the image using the hints in the vector list.""" + draw = ImageDraw.Draw(image) + + for bound in bounds: + draw.polygon([ + bound.vertices[0].x, bound.vertices[0].y, + bound.vertices[1].x, bound.vertices[1].y, + bound.vertices[2].x, bound.vertices[2].y, + bound.vertices[3].x, bound.vertices[3].y], None, color) + return image + + +def get_document_bounds(image_file, feature): + # [START vision_document_text_tutorial_detect_bounds] + """Returns document bounds given an image.""" + client = vision.ImageAnnotatorClient() + + bounds = [] + + with io.open(image_file, 'rb') as image_file: + content = image_file.read() + + image = types.Image(content=content) + + response = client.document_text_detection(image=image) + document = response.full_text_annotation + + # Collect specified feature bounds by enumerating all document features + for page in document.pages: + for block in page.blocks: + for paragraph in block.paragraphs: + for word in paragraph.words: + for symbol in word.symbols: + if (feature == FeatureType.SYMBOL): + bounds.append(symbol.bounding_box) + + if (feature == FeatureType.WORD): + bounds.append(word.bounding_box) + + if (feature == FeatureType.PARA): + bounds.append(paragraph.bounding_box) + + if (feature == FeatureType.BLOCK): + bounds.append(block.bounding_box) + + # The list `bounds` contains the coordinates of the bounding boxes. + # [END vision_document_text_tutorial_detect_bounds] + return bounds + + +def render_doc_text(filein, fileout): + image = Image.open(filein) + bounds = get_document_bounds(filein, FeatureType.BLOCK) + draw_boxes(image, bounds, 'blue') + bounds = get_document_bounds(filein, FeatureType.PARA) + draw_boxes(image, bounds, 'red') + bounds = get_document_bounds(filein, FeatureType.WORD) + draw_boxes(image, bounds, 'yellow') + + if fileout != 0: + image.save(fileout) + else: + image.show() + + +if __name__ == '__main__': + # [START vision_document_text_tutorial_run_application] + parser = argparse.ArgumentParser() + parser.add_argument('detect_file', help='The image for text detection.') + parser.add_argument('-out_file', help='Optional output file', default=0) + args = parser.parse_args() + + render_doc_text(args.detect_file, args.out_file) + # [END vision_document_text_tutorial_run_application] +# [END vision_document_text_tutorial] diff --git a/samples/snippets/document_text/doctext_test.py b/samples/snippets/document_text/doctext_test.py new file mode 100644 index 00000000..cb881e31 --- /dev/null +++ b/samples/snippets/document_text/doctext_test.py @@ -0,0 +1,24 @@ +# Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import doctext + + +def test_text(capsys): + """Checks the output image for drawing the crop hint is created.""" + doctext.render_doc_text('resources/text_menu.jpg', 'output-text.jpg') + out, _ = capsys.readouterr() + assert os.path.isfile('output-text.jpg') diff --git a/samples/snippets/document_text/noxfile.py b/samples/snippets/document_text/noxfile.py new file mode 100644 index 00000000..ba55d7ce --- /dev/null +++ b/samples/snippets/document_text/noxfile.py @@ -0,0 +1,224 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import os +from pathlib import Path +import sys + +import nox + + +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING +# DO NOT EDIT THIS FILE EVER! +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING + +# Copy `noxfile_config.py` to your directory and modify it instead. + + +# `TEST_CONFIG` dict is a configuration hook that allows users to +# modify the test configurations. The values here should be in sync +# with `noxfile_config.py`. Users will copy `noxfile_config.py` into +# their directory and modify it. + +TEST_CONFIG = { + # You can opt out from the test for specific Python versions. + 'ignored_versions': ["2.7"], + + # An envvar key for determining the project id to use. Change it + # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a + # build specific Cloud project. You can also use your own string + # to use your own Cloud project. + 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', + # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', + + # A dictionary you want to inject into your test. Don't put any + # secrets here. These values will override predefined values. + 'envs': {}, +} + + +try: + # Ensure we can import noxfile_config in the project's directory. + sys.path.append('.') + from noxfile_config import TEST_CONFIG_OVERRIDE +except ImportError as e: + print("No user noxfile_config found: detail: {}".format(e)) + TEST_CONFIG_OVERRIDE = {} + +# Update the TEST_CONFIG with the user supplied values. +TEST_CONFIG.update(TEST_CONFIG_OVERRIDE) + + +def get_pytest_env_vars(): + """Returns a dict for pytest invocation.""" + ret = {} + + # Override the GCLOUD_PROJECT and the alias. + env_key = TEST_CONFIG['gcloud_project_env'] + # This should error out if not set. + ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] + + # Apply user supplied envs. + ret.update(TEST_CONFIG['envs']) + return ret + + +# DO NOT EDIT - automatically generated. +# All versions used to tested samples. +ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] + +# Any default versions that should be ignored. +IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] + +TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) + +INSTALL_LIBRARY_FROM_SOURCE = bool(os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False)) +# +# Style Checks +# + + +def _determine_local_import_names(start_dir): + """Determines all import names that should be considered "local". + + This is used when running the linter to insure that import order is + properly checked. + """ + file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)] + return [ + basename + for basename, extension in file_ext_pairs + if extension == ".py" + or os.path.isdir(os.path.join(start_dir, basename)) + and basename not in ("__pycache__") + ] + + +# Linting with flake8. +# +# We ignore the following rules: +# E203: whitespace before ‘:’ +# E266: too many leading ‘#’ for block comment +# E501: line too long +# I202: Additional newline in a section of imports +# +# We also need to specify the rules which are ignored by default: +# ['E226', 'W504', 'E126', 'E123', 'W503', 'E24', 'E704', 'E121'] +FLAKE8_COMMON_ARGS = [ + "--show-source", + "--builtin=gettext", + "--max-complexity=20", + "--import-order-style=google", + "--exclude=.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py", + "--ignore=E121,E123,E126,E203,E226,E24,E266,E501,E704,W503,W504,I202", + "--max-line-length=88", +] + + +@nox.session +def lint(session): + session.install("flake8", "flake8-import-order") + + local_names = _determine_local_import_names(".") + args = FLAKE8_COMMON_ARGS + [ + "--application-import-names", + ",".join(local_names), + "." + ] + session.run("flake8", *args) + + +# +# Sample Tests +# + + +PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"] + + +def _session_tests(session, post_install=None): + """Runs py.test for a particular project.""" + if os.path.exists("requirements.txt"): + session.install("-r", "requirements.txt") + + if os.path.exists("requirements-test.txt"): + session.install("-r", "requirements-test.txt") + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars() + ) + + +@nox.session(python=ALL_VERSIONS) +def py(session): + """Runs py.test for a sample using the specified version of Python.""" + if session.python in TESTED_VERSIONS: + _session_tests(session) + else: + session.skip("SKIPPED: {} tests are disabled for this sample.".format( + session.python + )) + + +# +# Readmegen +# + + +def _get_repo_root(): + """ Returns the root folder of the project. """ + # Get root of this repository. Assume we don't have directories nested deeper than 10 items. + p = Path(os.getcwd()) + for i in range(10): + if p is None: + break + if Path(p / ".git").exists(): + return str(p) + p = p.parent + raise Exception("Unable to detect repository root.") + + +GENERATED_READMES = sorted([x for x in Path(".").rglob("*.rst.in")]) + + +@nox.session +@nox.parametrize("path", GENERATED_READMES) +def readmegen(session, path): + """(Re-)generates the readme for a sample.""" + session.install("jinja2", "pyyaml") + dir_ = os.path.dirname(path) + + if os.path.exists(os.path.join(dir_, "requirements.txt")): + session.install("-r", os.path.join(dir_, "requirements.txt")) + + in_file = os.path.join(dir_, "README.rst.in") + session.run( + "python", _get_repo_root() + "/scripts/readme-gen/readme_gen.py", in_file + ) diff --git a/samples/snippets/document_text/requirements-test.txt b/samples/snippets/document_text/requirements-test.txt new file mode 100644 index 00000000..7e460c8c --- /dev/null +++ b/samples/snippets/document_text/requirements-test.txt @@ -0,0 +1 @@ +pytest==6.0.1 diff --git a/samples/snippets/document_text/requirements.txt b/samples/snippets/document_text/requirements.txt new file mode 100644 index 00000000..79ed641a --- /dev/null +++ b/samples/snippets/document_text/requirements.txt @@ -0,0 +1,2 @@ +google-cloud-vision==1.0.0 +pillow==7.2.0 diff --git a/samples/snippets/document_text/resources/text_menu.jpg b/samples/snippets/document_text/resources/text_menu.jpg new file mode 100644 index 00000000..caa678b3 Binary files /dev/null and b/samples/snippets/document_text/resources/text_menu.jpg differ diff --git a/samples/snippets/face_detection/.gitignore b/samples/snippets/face_detection/.gitignore new file mode 100644 index 00000000..01f02dff --- /dev/null +++ b/samples/snippets/face_detection/.gitignore @@ -0,0 +1 @@ +out.jpg diff --git a/samples/snippets/face_detection/README.rst b/samples/snippets/face_detection/README.rst new file mode 100644 index 00000000..b04a344e --- /dev/null +++ b/samples/snippets/face_detection/README.rst @@ -0,0 +1,101 @@ +.. This file is automatically generated. Do not edit this file directly. + +Google Cloud Vision API Python Samples +=============================================================================== + +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=vision/cloud-client/face_detection/README.rst + + +This directory contains samples for Google Cloud Vision API. `Google Cloud Vision API`_ allows developers to easily integrate vision detection features within applications, including image labeling, face and landmark detection, optical character recognition (OCR), and tagging of explicit content. + +- See the `migration guide`_ for information about migrating to Python client library v0.25.1. + +.. _migration guide: https://cloud.google.com/vision/docs/python-client-migration + + +This sample demonstrates how to use the Cloud Vision API to do face detection. + + +.. _Google Cloud Vision API: https://cloud.google.com/vision/docs + +Setup +------------------------------------------------------------------------------- + + +Authentication +++++++++++++++ + +This sample requires you to have authentication setup. Refer to the +`Authentication Getting Started Guide`_ for instructions on setting up +credentials for applications. + +.. _Authentication Getting Started Guide: + https://cloud.google.com/docs/authentication/getting-started + +Install Dependencies +++++++++++++++++++++ + +#. Clone python-docs-samples and change directory to the sample directory you want to use. + + .. code-block:: bash + + $ git clone https://github.com/GoogleCloudPlatform/python-docs-samples.git + +#. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions. + + .. _Python Development Environment Setup Guide: + https://cloud.google.com/python/setup + +#. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. + + .. code-block:: bash + + $ virtualenv env + $ source env/bin/activate + +#. Install the dependencies needed to run the samples. + + .. code-block:: bash + + $ pip install -r requirements.txt + +.. _pip: https://pip.pypa.io/ +.. _virtualenv: https://virtualenv.pypa.io/ + +Samples +------------------------------------------------------------------------------- + +Face detection ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=vision/cloud-client/face_detection/faces.py,vision/cloud-client/face_detection/README.rst + + + + +To run this sample: + +.. code-block:: bash + + $ python faces.py + + usage: faces.py [-h] [--out OUTPUT] [--max-results MAX_RESULTS] input_image + + Detects faces in the given image. + + positional arguments: + input_image the image you'd like to detect faces in. + + optional arguments: + -h, --help show this help message and exit + --out OUTPUT the name of the output file. + --max-results MAX_RESULTS + the max results of face detection. + + + + + +.. _Google Cloud SDK: https://cloud.google.com/sdk/ \ No newline at end of file diff --git a/samples/snippets/face_detection/README.rst.in b/samples/snippets/face_detection/README.rst.in new file mode 100644 index 00000000..422cec1d --- /dev/null +++ b/samples/snippets/face_detection/README.rst.in @@ -0,0 +1,31 @@ +# This file is used to generate README.rst + +product: + name: Google Cloud Vision API + short_name: Cloud Vision API + url: https://cloud.google.com/vision/docs + description: > + `Google Cloud Vision API`_ allows developers to easily integrate vision + detection features within applications, including image labeling, face and + landmark detection, optical character recognition (OCR), and tagging of + explicit content. + + + - See the `migration guide`_ for information about migrating to Python client library v0.25.1. + + + .. _migration guide: https://cloud.google.com/vision/docs/python-client-migration + +description: > + This sample demonstrates how to use the Cloud Vision API to do face detection. + +setup: +- auth +- install_deps + +samples: +- name: Face detection + file: faces.py + show_help: true + +folder: vision/cloud-client/face_detection \ No newline at end of file diff --git a/samples/snippets/face_detection/faces.py b/samples/snippets/face_detection/faces.py new file mode 100755 index 00000000..7b95fa1e --- /dev/null +++ b/samples/snippets/face_detection/faces.py @@ -0,0 +1,105 @@ +#!/usr/bin/env python + +# Copyright 2015 Google, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Draws squares around detected faces in the given image.""" + +import argparse + +# [START vision_face_detection_tutorial_imports] +from google.cloud import vision +from google.cloud.vision import types +from PIL import Image, ImageDraw +# [END vision_face_detection_tutorial_imports] + + +# [START vision_face_detection_tutorial_send_request] +def detect_face(face_file, max_results=4): + """Uses the Vision API to detect faces in the given file. + + Args: + face_file: A file-like object containing an image with faces. + + Returns: + An array of Face objects with information about the picture. + """ + # [START vision_face_detection_tutorial_client] + client = vision.ImageAnnotatorClient() + # [END vision_face_detection_tutorial_client] + + content = face_file.read() + image = types.Image(content=content) + + return client.face_detection( + image=image, max_results=max_results).face_annotations +# [END vision_face_detection_tutorial_send_request] + + +# [START vision_face_detection_tutorial_process_response] +def highlight_faces(image, faces, output_filename): + """Draws a polygon around the faces, then saves to output_filename. + + Args: + image: a file containing the image with the faces. + faces: a list of faces found in the file. This should be in the format + returned by the Vision API. + output_filename: the name of the image file to be created, where the + faces have polygons drawn around them. + """ + im = Image.open(image) + draw = ImageDraw.Draw(im) + # Sepecify the font-family and the font-size + for face in faces: + box = [(vertex.x, vertex.y) + for vertex in face.bounding_poly.vertices] + draw.line(box + [box[0]], width=5, fill='#00ff00') + # Place the confidence value/score of the detected faces above the + # detection box in the output image + draw.text(((face.bounding_poly.vertices)[0].x, + (face.bounding_poly.vertices)[0].y - 30), + str(format(face.detection_confidence, '.3f')) + '%', + fill='#FF0000') + im.save(output_filename) +# [END vision_face_detection_tutorial_process_response] + + +# [START vision_face_detection_tutorial_run_application] +def main(input_filename, output_filename, max_results): + with open(input_filename, 'rb') as image: + faces = detect_face(image, max_results) + print('Found {} face{}'.format( + len(faces), '' if len(faces) == 1 else 's')) + + print('Writing to file {}'.format(output_filename)) + # Reset the file pointer, so we can read the file again + image.seek(0) + highlight_faces(image, faces, output_filename) +# [END vision_face_detection_tutorial_run_application] + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description='Detects faces in the given image.') + parser.add_argument( + 'input_image', help='the image you\'d like to detect faces in.') + parser.add_argument( + '--out', dest='output', default='out.jpg', + help='the name of the output file.') + parser.add_argument( + '--max-results', dest='max_results', default=4, + help='the max results of face detection.') + args = parser.parse_args() + + main(args.input_image, args.output, args.max_results) diff --git a/samples/snippets/face_detection/faces_test.py b/samples/snippets/face_detection/faces_test.py new file mode 100644 index 00000000..cca63c20 --- /dev/null +++ b/samples/snippets/face_detection/faces_test.py @@ -0,0 +1,39 @@ +# Copyright 2016, Google, Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +from PIL import Image + +from faces import main + +RESOURCES = os.path.join(os.path.dirname(__file__), 'resources') + + +def test_main(tmpdir): + out_file = os.path.join(tmpdir.dirname, 'face-output.jpg') + in_file = os.path.join(RESOURCES, 'face-input.jpg') + + # Make sure there isn't already a green box + im = Image.open(in_file) + pixels = im.getdata() + greens = sum(1 for (r, g, b) in pixels if r == 0 and g == 255 and b == 0) + assert greens < 1 + + main(in_file, out_file, 10) + + # Make sure there now is some green drawn + im = Image.open(out_file) + pixels = im.getdata() + greens = sum(1 for (r, g, b) in pixels if r == 0 and g == 255 and b == 0) + assert greens > 10 diff --git a/samples/snippets/face_detection/noxfile.py b/samples/snippets/face_detection/noxfile.py new file mode 100644 index 00000000..ba55d7ce --- /dev/null +++ b/samples/snippets/face_detection/noxfile.py @@ -0,0 +1,224 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import os +from pathlib import Path +import sys + +import nox + + +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING +# DO NOT EDIT THIS FILE EVER! +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING + +# Copy `noxfile_config.py` to your directory and modify it instead. + + +# `TEST_CONFIG` dict is a configuration hook that allows users to +# modify the test configurations. The values here should be in sync +# with `noxfile_config.py`. Users will copy `noxfile_config.py` into +# their directory and modify it. + +TEST_CONFIG = { + # You can opt out from the test for specific Python versions. + 'ignored_versions': ["2.7"], + + # An envvar key for determining the project id to use. Change it + # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a + # build specific Cloud project. You can also use your own string + # to use your own Cloud project. + 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', + # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', + + # A dictionary you want to inject into your test. Don't put any + # secrets here. These values will override predefined values. + 'envs': {}, +} + + +try: + # Ensure we can import noxfile_config in the project's directory. + sys.path.append('.') + from noxfile_config import TEST_CONFIG_OVERRIDE +except ImportError as e: + print("No user noxfile_config found: detail: {}".format(e)) + TEST_CONFIG_OVERRIDE = {} + +# Update the TEST_CONFIG with the user supplied values. +TEST_CONFIG.update(TEST_CONFIG_OVERRIDE) + + +def get_pytest_env_vars(): + """Returns a dict for pytest invocation.""" + ret = {} + + # Override the GCLOUD_PROJECT and the alias. + env_key = TEST_CONFIG['gcloud_project_env'] + # This should error out if not set. + ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] + + # Apply user supplied envs. + ret.update(TEST_CONFIG['envs']) + return ret + + +# DO NOT EDIT - automatically generated. +# All versions used to tested samples. +ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] + +# Any default versions that should be ignored. +IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] + +TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) + +INSTALL_LIBRARY_FROM_SOURCE = bool(os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False)) +# +# Style Checks +# + + +def _determine_local_import_names(start_dir): + """Determines all import names that should be considered "local". + + This is used when running the linter to insure that import order is + properly checked. + """ + file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)] + return [ + basename + for basename, extension in file_ext_pairs + if extension == ".py" + or os.path.isdir(os.path.join(start_dir, basename)) + and basename not in ("__pycache__") + ] + + +# Linting with flake8. +# +# We ignore the following rules: +# E203: whitespace before ‘:’ +# E266: too many leading ‘#’ for block comment +# E501: line too long +# I202: Additional newline in a section of imports +# +# We also need to specify the rules which are ignored by default: +# ['E226', 'W504', 'E126', 'E123', 'W503', 'E24', 'E704', 'E121'] +FLAKE8_COMMON_ARGS = [ + "--show-source", + "--builtin=gettext", + "--max-complexity=20", + "--import-order-style=google", + "--exclude=.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py", + "--ignore=E121,E123,E126,E203,E226,E24,E266,E501,E704,W503,W504,I202", + "--max-line-length=88", +] + + +@nox.session +def lint(session): + session.install("flake8", "flake8-import-order") + + local_names = _determine_local_import_names(".") + args = FLAKE8_COMMON_ARGS + [ + "--application-import-names", + ",".join(local_names), + "." + ] + session.run("flake8", *args) + + +# +# Sample Tests +# + + +PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"] + + +def _session_tests(session, post_install=None): + """Runs py.test for a particular project.""" + if os.path.exists("requirements.txt"): + session.install("-r", "requirements.txt") + + if os.path.exists("requirements-test.txt"): + session.install("-r", "requirements-test.txt") + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars() + ) + + +@nox.session(python=ALL_VERSIONS) +def py(session): + """Runs py.test for a sample using the specified version of Python.""" + if session.python in TESTED_VERSIONS: + _session_tests(session) + else: + session.skip("SKIPPED: {} tests are disabled for this sample.".format( + session.python + )) + + +# +# Readmegen +# + + +def _get_repo_root(): + """ Returns the root folder of the project. """ + # Get root of this repository. Assume we don't have directories nested deeper than 10 items. + p = Path(os.getcwd()) + for i in range(10): + if p is None: + break + if Path(p / ".git").exists(): + return str(p) + p = p.parent + raise Exception("Unable to detect repository root.") + + +GENERATED_READMES = sorted([x for x in Path(".").rglob("*.rst.in")]) + + +@nox.session +@nox.parametrize("path", GENERATED_READMES) +def readmegen(session, path): + """(Re-)generates the readme for a sample.""" + session.install("jinja2", "pyyaml") + dir_ = os.path.dirname(path) + + if os.path.exists(os.path.join(dir_, "requirements.txt")): + session.install("-r", os.path.join(dir_, "requirements.txt")) + + in_file = os.path.join(dir_, "README.rst.in") + session.run( + "python", _get_repo_root() + "/scripts/readme-gen/readme_gen.py", in_file + ) diff --git a/samples/snippets/face_detection/requirements-test.txt b/samples/snippets/face_detection/requirements-test.txt new file mode 100644 index 00000000..7e460c8c --- /dev/null +++ b/samples/snippets/face_detection/requirements-test.txt @@ -0,0 +1 @@ +pytest==6.0.1 diff --git a/samples/snippets/face_detection/requirements.txt b/samples/snippets/face_detection/requirements.txt new file mode 100644 index 00000000..79ed641a --- /dev/null +++ b/samples/snippets/face_detection/requirements.txt @@ -0,0 +1,2 @@ +google-cloud-vision==1.0.0 +pillow==7.2.0 diff --git a/samples/snippets/face_detection/resources/face-input.jpg b/samples/snippets/face_detection/resources/face-input.jpg new file mode 100644 index 00000000..c0ee5580 Binary files /dev/null and b/samples/snippets/face_detection/resources/face-input.jpg differ diff --git a/samples/snippets/product_search/create_product_set_test.py b/samples/snippets/product_search/create_product_set_test.py new file mode 100644 index 00000000..5dc5190c --- /dev/null +++ b/samples/snippets/product_search/create_product_set_test.py @@ -0,0 +1,45 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import uuid + +import pytest + +from product_set_management import ( + create_product_set, delete_product_set, list_product_sets) + + +PROJECT_ID = os.getenv('GOOGLE_CLOUD_PROJECT') +LOCATION = 'us-west1' + +PRODUCT_SET_DISPLAY_NAME = 'fake_product_set_display_name_for_testing' +PRODUCT_SET_ID = 'test_{}'.format(uuid.uuid4()) + + +@pytest.fixture(scope="function", autouse=True) +def teardown(): + yield + + # tear down + delete_product_set(PROJECT_ID, LOCATION, PRODUCT_SET_ID) + + +def test_create_product_set(capsys): + create_product_set( + PROJECT_ID, LOCATION, PRODUCT_SET_ID, + PRODUCT_SET_DISPLAY_NAME) + list_product_sets(PROJECT_ID, LOCATION) + out, _ = capsys.readouterr() + assert PRODUCT_SET_ID in out diff --git a/samples/snippets/product_search/create_product_test.py b/samples/snippets/product_search/create_product_test.py new file mode 100644 index 00000000..a4e55f14 --- /dev/null +++ b/samples/snippets/product_search/create_product_test.py @@ -0,0 +1,45 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import uuid + +import pytest + +from product_management import create_product, delete_product, list_products + + +PROJECT_ID = os.getenv('GOOGLE_CLOUD_PROJECT') +LOCATION = 'us-west1' + +PRODUCT_DISPLAY_NAME = 'fake_product_display_name_for_testing' +PRODUCT_CATEGORY = 'homegoods' +PRODUCT_ID = 'test_{}'.format(uuid.uuid4()) + + +@pytest.fixture(scope="function", autouse=True) +def teardown(): + yield + + # tear down + delete_product(PROJECT_ID, LOCATION, PRODUCT_ID) + + +def test_create_product(capsys): + create_product( + PROJECT_ID, LOCATION, PRODUCT_ID, + PRODUCT_DISPLAY_NAME, PRODUCT_CATEGORY) + list_products(PROJECT_ID, LOCATION) + out, _ = capsys.readouterr() + assert PRODUCT_ID in out diff --git a/samples/snippets/product_search/import_product_sets.py b/samples/snippets/product_search/import_product_sets.py new file mode 100755 index 00000000..e2d03714 --- /dev/null +++ b/samples/snippets/product_search/import_product_sets.py @@ -0,0 +1,95 @@ +#!/usr/bin/env python + +# Copyright 2018 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This application demonstrates how to perform import product sets operations +on Product set in Cloud Vision Product Search. + +For more information, see the tutorial page at +https://cloud.google.com/vision/product-search/docs/ +""" + +import argparse + +# [START vision_product_search_tutorial_import] +from google.cloud import vision +# [END vision_product_search_tutorial_import] + + +# [START vision_product_search_import_product_images] +def import_product_sets(project_id, location, gcs_uri): + """Import images of different products in the product set. + Args: + project_id: Id of the project. + location: A compute region name. + gcs_uri: Google Cloud Storage URI. + Target files must be in Product Search CSV format. + """ + client = vision.ProductSearchClient() + + # A resource that represents Google Cloud Platform location. + location_path = client.location_path( + project=project_id, location=location) + + # Set the input configuration along with Google Cloud Storage URI + gcs_source = vision.types.ImportProductSetsGcsSource( + csv_file_uri=gcs_uri) + input_config = vision.types.ImportProductSetsInputConfig( + gcs_source=gcs_source) + + # Import the product sets from the input URI. + response = client.import_product_sets( + parent=location_path, input_config=input_config) + + print('Processing operation name: {}'.format(response.operation.name)) + # synchronous check of operation status + result = response.result() + print('Processing done.') + + for i, status in enumerate(result.statuses): + print('Status of processing line {} of the csv: {}'.format( + i, status)) + # Check the status of reference image + # `0` is the code for OK in google.rpc.Code. + if status.code == 0: + reference_image = result.reference_images[i] + print(reference_image) + else: + print('Status code not OK: {}'.format(status.message)) +# [END vision_product_search_import_product_images] + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter) + subparsers = parser.add_subparsers(dest='command') + parser.add_argument( + '--project_id', + help='Project id. Required', + required=True) + parser.add_argument( + '--location', + help='Compute region name', + default='us-west1') + + import_product_sets_parser = subparsers.add_parser( + 'import_product_sets', help=import_product_sets.__doc__) + import_product_sets_parser.add_argument('gcs_uri') + + args = parser.parse_args() + + if args.command == 'import_product_sets': + import_product_sets(args.project_id, args.location, args.gcs_uri) diff --git a/samples/snippets/product_search/import_product_sets_test.py b/samples/snippets/product_search/import_product_sets_test.py new file mode 100644 index 00000000..b4faf8e5 --- /dev/null +++ b/samples/snippets/product_search/import_product_sets_test.py @@ -0,0 +1,79 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import uuid + +from google.cloud import storage +import pytest + +from import_product_sets import import_product_sets +from product_in_product_set_management import list_products_in_product_set +from product_management import delete_product, list_products +from product_set_management import delete_product_set, list_product_sets +from reference_image_management import list_reference_images + + +PROJECT_ID = os.getenv('GOOGLE_CLOUD_PROJECT') +LOCATION = 'us-west1' + +FILENAME = uuid.uuid4() +GCS_URI = 'gs://{}/vision/{}.csv'.format(PROJECT_ID, FILENAME) +PRODUCT_SET_DISPLAY_NAME = 'fake_product_set_display_name_for_testing' +PRODUCT_SET_ID = 'test_{}'.format(uuid.uuid4()) +PRODUCT_ID_1 = 'test_{}'.format(uuid.uuid4()) +IMAGE_URI_1 = 'shoes_1.jpg' + + +@pytest.fixture(scope="function", autouse=True) +def setup_teardown(): + # Create the product set csv file locally and upload it to GCS + # This is so that there is a unique product set ID for all python version + # tests. + client = storage.Client(project=PROJECT_ID) + bucket = client.get_bucket(PROJECT_ID) + blob = storage.Blob("vision/{}.csv".format(FILENAME), bucket) + blob.upload_from_string( + '"gs://cloud-samples-data/vision/product_search/shoes_1.jpg",' + + '"{}",'.format(IMAGE_URI_1) + + '"{}",'.format(PRODUCT_SET_ID) + + '"{}",'.format(PRODUCT_ID_1) + + '"apparel",,"style=womens","0.1,0.1,0.9,0.1,0.9,0.9,0.1,0.9"') + + yield + + delete_product(PROJECT_ID, LOCATION, PRODUCT_ID_1) + delete_product_set(PROJECT_ID, LOCATION, PRODUCT_SET_ID) + # Delete the created file + blob.delete(client) + + +def test_import_product_sets(capsys): + import_product_sets(PROJECT_ID, LOCATION, GCS_URI) + + list_product_sets(PROJECT_ID, LOCATION) + out, _ = capsys.readouterr() + assert PRODUCT_SET_ID in out + + list_products(PROJECT_ID, LOCATION) + out, _ = capsys.readouterr() + assert PRODUCT_ID_1 in out + + list_products_in_product_set(PROJECT_ID, LOCATION, PRODUCT_SET_ID) + out, _ = capsys.readouterr() + assert PRODUCT_ID_1 in out + + list_reference_images(PROJECT_ID, LOCATION, PRODUCT_ID_1) + out, _ = capsys.readouterr() + assert IMAGE_URI_1 in out diff --git a/samples/snippets/product_search/noxfile.py b/samples/snippets/product_search/noxfile.py new file mode 100644 index 00000000..ba55d7ce --- /dev/null +++ b/samples/snippets/product_search/noxfile.py @@ -0,0 +1,224 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import os +from pathlib import Path +import sys + +import nox + + +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING +# DO NOT EDIT THIS FILE EVER! +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING + +# Copy `noxfile_config.py` to your directory and modify it instead. + + +# `TEST_CONFIG` dict is a configuration hook that allows users to +# modify the test configurations. The values here should be in sync +# with `noxfile_config.py`. Users will copy `noxfile_config.py` into +# their directory and modify it. + +TEST_CONFIG = { + # You can opt out from the test for specific Python versions. + 'ignored_versions': ["2.7"], + + # An envvar key for determining the project id to use. Change it + # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a + # build specific Cloud project. You can also use your own string + # to use your own Cloud project. + 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', + # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', + + # A dictionary you want to inject into your test. Don't put any + # secrets here. These values will override predefined values. + 'envs': {}, +} + + +try: + # Ensure we can import noxfile_config in the project's directory. + sys.path.append('.') + from noxfile_config import TEST_CONFIG_OVERRIDE +except ImportError as e: + print("No user noxfile_config found: detail: {}".format(e)) + TEST_CONFIG_OVERRIDE = {} + +# Update the TEST_CONFIG with the user supplied values. +TEST_CONFIG.update(TEST_CONFIG_OVERRIDE) + + +def get_pytest_env_vars(): + """Returns a dict for pytest invocation.""" + ret = {} + + # Override the GCLOUD_PROJECT and the alias. + env_key = TEST_CONFIG['gcloud_project_env'] + # This should error out if not set. + ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] + + # Apply user supplied envs. + ret.update(TEST_CONFIG['envs']) + return ret + + +# DO NOT EDIT - automatically generated. +# All versions used to tested samples. +ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] + +# Any default versions that should be ignored. +IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] + +TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) + +INSTALL_LIBRARY_FROM_SOURCE = bool(os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False)) +# +# Style Checks +# + + +def _determine_local_import_names(start_dir): + """Determines all import names that should be considered "local". + + This is used when running the linter to insure that import order is + properly checked. + """ + file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)] + return [ + basename + for basename, extension in file_ext_pairs + if extension == ".py" + or os.path.isdir(os.path.join(start_dir, basename)) + and basename not in ("__pycache__") + ] + + +# Linting with flake8. +# +# We ignore the following rules: +# E203: whitespace before ‘:’ +# E266: too many leading ‘#’ for block comment +# E501: line too long +# I202: Additional newline in a section of imports +# +# We also need to specify the rules which are ignored by default: +# ['E226', 'W504', 'E126', 'E123', 'W503', 'E24', 'E704', 'E121'] +FLAKE8_COMMON_ARGS = [ + "--show-source", + "--builtin=gettext", + "--max-complexity=20", + "--import-order-style=google", + "--exclude=.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py", + "--ignore=E121,E123,E126,E203,E226,E24,E266,E501,E704,W503,W504,I202", + "--max-line-length=88", +] + + +@nox.session +def lint(session): + session.install("flake8", "flake8-import-order") + + local_names = _determine_local_import_names(".") + args = FLAKE8_COMMON_ARGS + [ + "--application-import-names", + ",".join(local_names), + "." + ] + session.run("flake8", *args) + + +# +# Sample Tests +# + + +PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"] + + +def _session_tests(session, post_install=None): + """Runs py.test for a particular project.""" + if os.path.exists("requirements.txt"): + session.install("-r", "requirements.txt") + + if os.path.exists("requirements-test.txt"): + session.install("-r", "requirements-test.txt") + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars() + ) + + +@nox.session(python=ALL_VERSIONS) +def py(session): + """Runs py.test for a sample using the specified version of Python.""" + if session.python in TESTED_VERSIONS: + _session_tests(session) + else: + session.skip("SKIPPED: {} tests are disabled for this sample.".format( + session.python + )) + + +# +# Readmegen +# + + +def _get_repo_root(): + """ Returns the root folder of the project. """ + # Get root of this repository. Assume we don't have directories nested deeper than 10 items. + p = Path(os.getcwd()) + for i in range(10): + if p is None: + break + if Path(p / ".git").exists(): + return str(p) + p = p.parent + raise Exception("Unable to detect repository root.") + + +GENERATED_READMES = sorted([x for x in Path(".").rglob("*.rst.in")]) + + +@nox.session +@nox.parametrize("path", GENERATED_READMES) +def readmegen(session, path): + """(Re-)generates the readme for a sample.""" + session.install("jinja2", "pyyaml") + dir_ = os.path.dirname(path) + + if os.path.exists(os.path.join(dir_, "requirements.txt")): + session.install("-r", os.path.join(dir_, "requirements.txt")) + + in_file = os.path.join(dir_, "README.rst.in") + session.run( + "python", _get_repo_root() + "/scripts/readme-gen/readme_gen.py", in_file + ) diff --git a/samples/snippets/product_search/product_in_product_set_management.py b/samples/snippets/product_search/product_in_product_set_management.py new file mode 100755 index 00000000..ad16ab7e --- /dev/null +++ b/samples/snippets/product_search/product_in_product_set_management.py @@ -0,0 +1,208 @@ +#!/usr/bin/env python + +# Copyright 2018 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This application demonstrates how to perform create operations +on Product set in Cloud Vision Product Search. + +For more information, see the tutorial page at +https://cloud.google.com/vision/product-search/docs/ +""" + +import argparse + +# [START vision_product_search_add_product_to_product_set] +# [START vision_product_search_remove_product_from_product_set] +# [START vision_product_search_purge_products_in_product_set] +from google.cloud import vision + +# [END vision_product_search_add_product_to_product_set] +# [END vision_product_search_remove_product_from_product_set] +# [END vision_product_search_purge_products_in_product_set] + + +# [START vision_product_search_add_product_to_product_set] +def add_product_to_product_set( + project_id, location, product_id, product_set_id): + """Add a product to a product set. + Args: + project_id: Id of the project. + location: A compute region name. + product_id: Id of the product. + product_set_id: Id of the product set. + """ + client = vision.ProductSearchClient() + + # Get the full path of the product set. + product_set_path = client.product_set_path( + project=project_id, location=location, + product_set=product_set_id) + + # Get the full path of the product. + product_path = client.product_path( + project=project_id, location=location, product=product_id) + + # Add the product to the product set. + client.add_product_to_product_set( + name=product_set_path, product=product_path) + print('Product added to product set.') +# [END vision_product_search_add_product_to_product_set] + + +# [START vision_product_search_list_products_in_product_set] +def list_products_in_product_set( + project_id, location, product_set_id): + """List all products in a product set. + Args: + project_id: Id of the project. + location: A compute region name. + product_set_id: Id of the product set. + """ + client = vision.ProductSearchClient() + + # Get the full path of the product set. + product_set_path = client.product_set_path( + project=project_id, location=location, + product_set=product_set_id) + + # List all the products available in the product set. + products = client.list_products_in_product_set(name=product_set_path) + + # Display the product information. + for product in products: + print('Product name: {}'.format(product.name)) + print('Product id: {}'.format(product.name.split('/')[-1])) + print('Product display name: {}'.format(product.display_name)) + print('Product description: {}'.format(product.description)) + print('Product category: {}'.format(product.product_category)) + print('Product labels: {}'.format(product.product_labels)) +# [END vision_product_search_list_products_in_product_set] + + +# [START vision_product_search_remove_product_from_product_set] +def remove_product_from_product_set( + project_id, location, product_id, product_set_id): + """Remove a product from a product set. + Args: + project_id: Id of the project. + location: A compute region name. + product_id: Id of the product. + product_set_id: Id of the product set. + """ + client = vision.ProductSearchClient() + + # Get the full path of the product set. + product_set_path = client.product_set_path( + project=project_id, location=location, + product_set=product_set_id) + + # Get the full path of the product. + product_path = client.product_path( + project=project_id, location=location, product=product_id) + + # Remove the product from the product set. + client.remove_product_from_product_set( + name=product_set_path, product=product_path) + print('Product removed from product set.') +# [END vision_product_search_remove_product_from_product_set] + + +# [START vision_product_search_purge_products_in_product_set] +def purge_products_in_product_set( + project_id, location, product_set_id, force): + """Delete all products in a product set. + Args: + project_id: Id of the project. + location: A compute region name. + product_set_id: Id of the product set. + force: Perform the purge only when force is set to True. + """ + client = vision.ProductSearchClient() + + parent = client.location_path( + project=project_id, location=location) + + product_set_purge_config = vision.types.ProductSetPurgeConfig( + product_set_id=product_set_id) + + # The purge operation is async. + operation = client.purge_products( + parent=parent, + product_set_purge_config=product_set_purge_config, + # The operation is irreversible and removes multiple products. + # The user is required to pass in force=True to actually perform the + # purge. + # If force is not set to True, the service raises an exception. + force=force) + + operation.result(timeout=300) + + print('Deleted products in product set.') +# [END vision_product_search_purge_products_in_product_set] + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter) + subparsers = parser.add_subparsers(dest='command') + parser.add_argument( + '--project_id', + help='Project id. Required', + required=True) + parser.add_argument( + '--location', + help='Compute region name', + default='us-west1') + + add_product_to_product_set_parser = subparsers.add_parser( + 'add_product_to_product_set', help=add_product_to_product_set.__doc__) + add_product_to_product_set_parser.add_argument('product_id') + add_product_to_product_set_parser.add_argument('product_set_id') + + list_products_in_product_set_parser = subparsers.add_parser( + 'list_products_in_product_set', + help=list_products_in_product_set.__doc__) + list_products_in_product_set_parser.add_argument('product_set_id') + + remove_product_from_product_set_parser = subparsers.add_parser( + 'remove_product_from_product_set', + help=remove_product_from_product_set.__doc__) + remove_product_from_product_set_parser.add_argument('product_id') + remove_product_from_product_set_parser.add_argument('product_set_id') + + purge_products_in_product_set_parser = subparsers.add_parser( + 'purge_products_in_product_set', + help=purge_products_in_product_set.__doc__) + purge_products_in_product_set_parser.add_argument('product_set_id') + purge_products_in_product_set_parser.add_argument( + '--force', action='store_true') + + args = parser.parse_args() + + if args.command == 'add_product_to_product_set': + add_product_to_product_set( + args.project_id, args.location, args.product_id, + args.product_set_id) + elif args.command == 'list_products_in_product_set': + list_products_in_product_set( + args.project_id, args.location, args.product_set_id) + elif args.command == 'remove_product_from_product_set': + remove_product_from_product_set( + args.project_id, args.location, args.product_id, + args.product_set_id) + elif args.command == 'purge_products_in_product_set': + purge_products_in_product_set( + args.project_id, args.location, args.product_set_id, args.force) diff --git a/samples/snippets/product_search/product_in_product_set_management_test.py b/samples/snippets/product_search/product_in_product_set_management_test.py new file mode 100644 index 00000000..779b84d8 --- /dev/null +++ b/samples/snippets/product_search/product_in_product_set_management_test.py @@ -0,0 +1,92 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import uuid + +import pytest + +from product_in_product_set_management import ( + add_product_to_product_set, list_products_in_product_set, + purge_products_in_product_set, remove_product_from_product_set) +from product_management import create_product, delete_product, list_products +from product_set_management import ( + create_product_set, delete_product_set) + + +PROJECT_ID = os.getenv('GOOGLE_CLOUD_PROJECT') +LOCATION = 'us-west1' + +PRODUCT_SET_DISPLAY_NAME = 'fake_product_set_display_name_for_testing' + +PRODUCT_DISPLAY_NAME = 'fake_product_display_name_for_testing' +PRODUCT_CATEGORY = 'homegoods' + + +@pytest.fixture(scope="function") +def test_resources(): + # set up + product_set_id = f'test_set_{uuid.uuid4()}' + product_id = f'test_product_{uuid.uuid4()}' + create_product_set( + PROJECT_ID, LOCATION, product_set_id, PRODUCT_SET_DISPLAY_NAME) + create_product( + PROJECT_ID, LOCATION, product_id, + PRODUCT_DISPLAY_NAME, PRODUCT_CATEGORY) + + yield product_set_id, product_id + + # tear down + delete_product(PROJECT_ID, LOCATION, product_id) + delete_product_set(PROJECT_ID, LOCATION, product_set_id) + + +def test_add_product_to_product_set(capsys, test_resources): + product_set_id, product_id = test_resources + add_product_to_product_set( + PROJECT_ID, LOCATION, product_id, product_set_id) + list_products_in_product_set(PROJECT_ID, LOCATION, product_set_id) + out, _ = capsys.readouterr() + assert 'Product id: {}'.format(product_id) in out + + +def test_remove_product_from_product_set(capsys, test_resources): + product_set_id, product_id = test_resources + add_product_to_product_set( + PROJECT_ID, LOCATION, product_id, product_set_id) + list_products_in_product_set(PROJECT_ID, LOCATION, product_set_id) + out, _ = capsys.readouterr() + assert 'Product id: {}'.format(product_id) in out + + remove_product_from_product_set( + PROJECT_ID, LOCATION, product_id, product_set_id) + list_products_in_product_set(PROJECT_ID, LOCATION, product_set_id) + out, _ = capsys.readouterr() + assert 'Product id: {}'.format(product_id) not in out + + +def test_purge_products_in_product_set(capsys, test_resources): + product_set_id, product_id = test_resources + add_product_to_product_set( + PROJECT_ID, LOCATION, product_id, product_set_id) + list_products(PROJECT_ID, LOCATION) + out, _ = capsys.readouterr() + assert 'Product id: {}'.format(product_id) in out + + purge_products_in_product_set( + PROJECT_ID, LOCATION, product_set_id, force=True) + + list_products(PROJECT_ID, LOCATION) + out, _ = capsys.readouterr() + assert 'Product id: {}'.format(product_id) not in out diff --git a/samples/snippets/product_search/product_management.py b/samples/snippets/product_search/product_management.py new file mode 100755 index 00000000..46ed4f7b --- /dev/null +++ b/samples/snippets/product_search/product_management.py @@ -0,0 +1,273 @@ +#!/usr/bin/env python + +# Copyright 2018 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This application demonstrates how to perform basic operations on Product +in Cloud Vision Product Search. + +For more information, see the tutorial page at +https://cloud.google.com/vision/product-search/docs/ +""" + +import argparse + +# [START vision_product_search_create_product] +# [START vision_product_search_delete_product] +# [START vision_product_search_list_products] +# [START vision_product_search_get_product] +# [START vision_product_search_update_product_labels] +# [START vision_product_search_purge_orphan_products] +from google.cloud import vision + +# [END vision_product_search_create_product] +# [END vision_product_search_delete_product] +# [END vision_product_search_list_products] +# [END vision_product_search_get_product] +# [END vision_product_search_update_product_labels] +# [END vision_product_search_purge_orphan_products] + + +# [START vision_product_search_create_product] +def create_product( + project_id, location, product_id, product_display_name, + product_category): + """Create one product. + Args: + project_id: Id of the project. + location: A compute region name. + product_id: Id of the product. + product_display_name: Display name of the product. + product_category: Category of the product. + """ + client = vision.ProductSearchClient() + + # A resource that represents Google Cloud Platform location. + location_path = client.location_path(project=project_id, location=location) + + # Create a product with the product specification in the region. + # Set product display name and product category. + product = vision.types.Product( + display_name=product_display_name, + product_category=product_category) + + # The response is the product with the `name` field populated. + response = client.create_product( + parent=location_path, + product=product, + product_id=product_id) + + # Display the product information. + print('Product name: {}'.format(response.name)) +# [END vision_product_search_create_product] + + +# [START vision_product_search_list_products] +def list_products(project_id, location): + """List all products. + Args: + project_id: Id of the project. + location: A compute region name. + """ + client = vision.ProductSearchClient() + + # A resource that represents Google Cloud Platform location. + location_path = client.location_path(project=project_id, location=location) + + # List all the products available in the region. + products = client.list_products(parent=location_path) + + # Display the product information. + for product in products: + print('Product name: {}'.format(product.name)) + print('Product id: {}'.format(product.name.split('/')[-1])) + print('Product display name: {}'.format(product.display_name)) + print('Product description: {}'.format(product.description)) + print('Product category: {}'.format(product.product_category)) + print('Product labels: {}\n'.format(product.product_labels)) +# [END vision_product_search_list_products] + + +# [START vision_product_search_get_product] +def get_product(project_id, location, product_id): + """Get information about a product. + Args: + project_id: Id of the project. + location: A compute region name. + product_id: Id of the product. + """ + client = vision.ProductSearchClient() + + # Get the full path of the product. + product_path = client.product_path( + project=project_id, location=location, product=product_id) + + # Get complete detail of the product. + product = client.get_product(name=product_path) + + # Display the product information. + print('Product name: {}'.format(product.name)) + print('Product id: {}'.format(product.name.split('/')[-1])) + print('Product display name: {}'.format(product.display_name)) + print('Product description: {}'.format(product.description)) + print('Product category: {}'.format(product.product_category)) + print('Product labels: {}'.format(product.product_labels)) +# [END vision_product_search_get_product] + + +# [START vision_product_search_update_product_labels] +def update_product_labels( + project_id, location, product_id, key, value): + """Update the product labels. + Args: + project_id: Id of the project. + location: A compute region name. + product_id: Id of the product. + key: The key of the label. + value: The value of the label. + """ + client = vision.ProductSearchClient() + + # Get the name of the product. + product_path = client.product_path( + project=project_id, location=location, product=product_id) + + # Set product name, product label and product display name. + # Multiple labels are also supported. + key_value = vision.types.Product.KeyValue(key=key, value=value) + product = vision.types.Product( + name=product_path, + product_labels=[key_value]) + + # Updating only the product_labels field here. + update_mask = vision.types.FieldMask(paths=['product_labels']) + + # This overwrites the product_labels. + updated_product = client.update_product( + product=product, update_mask=update_mask) + + # Display the updated product information. + print('Product name: {}'.format(updated_product.name)) + print('Updated product labels: {}'.format(product.product_labels)) +# [END vision_product_search_update_product_labels] + + +# [START vision_product_search_delete_product] +def delete_product(project_id, location, product_id): + """Delete the product and all its reference images. + Args: + project_id: Id of the project. + location: A compute region name. + product_id: Id of the product. + """ + client = vision.ProductSearchClient() + + # Get the full path of the product. + product_path = client.product_path( + project=project_id, location=location, product=product_id) + + # Delete a product. + client.delete_product(name=product_path) + print('Product deleted.') +# [END vision_product_search_delete_product] + + +# [START vision_product_search_purge_orphan_products] +def purge_orphan_products(project_id, location, force): + """Delete all products not in any product sets. + Args: + project_id: Id of the project. + location: A compute region name. + """ + client = vision.ProductSearchClient() + + parent = client.location_path( + project=project_id, location=location) + + # The purge operation is async. + operation = client.purge_products( + parent=parent, + delete_orphan_products=True, + # The operation is irreversible and removes multiple products. + # The user is required to pass in force=True to actually perform the + # purge. + # If force is not set to True, the service raises an exception. + force=force) + + operation.result(timeout=300) + + print('Orphan products deleted.') +# [END vision_product_search_purge_orphan_products] + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter) + parser.add_argument( + '--project_id', + help='Project id. Required', + required=True) + parser.add_argument( + '--location', + help='Compute region name', + default='us-west1') + + subparsers = parser.add_subparsers(dest='command') + + create_product_parser = subparsers.add_parser( + 'create_product', help=create_product.__doc__) + create_product_parser.add_argument('product_id') + create_product_parser.add_argument('product_display_name') + create_product_parser.add_argument('product_category') + + list_products_parser = subparsers.add_parser( + 'list_products', help=list_products.__doc__) + + get_product_parser = subparsers.add_parser( + 'get_product', help=get_product.__doc__) + get_product_parser.add_argument('product_id') + + update_product_labels_parser = subparsers.add_parser( + 'update_product_labels', help=update_product_labels.__doc__) + update_product_labels_parser.add_argument('product_id') + update_product_labels_parser.add_argument('key') + update_product_labels_parser.add_argument('value') + + delete_product_parser = subparsers.add_parser( + 'delete_product', help=delete_product.__doc__) + delete_product_parser.add_argument('product_id') + + purge_orphan_products_parser = subparsers.add_parser( + 'purge_orphan_products', help=purge_orphan_products.__doc__) + purge_orphan_products_parser.add_argument('--force', action='store_true') + + args = parser.parse_args() + + if args.command == 'create_product': + create_product( + args.project_id, args.location, args.product_id, + args.product_display_name, args.product_category) + elif args.command == 'list_products': + list_products(args.project_id, args.location) + elif args.command == 'get_product': + get_product(args.project_id, args.location, args.product_id) + elif args.command == 'update_product_labels': + update_product_labels( + args.project_id, args.location, args.product_id, + args.key, args.value) + elif args.command == 'delete_product': + delete_product(args.project_id, args.location, args.product_id) + elif args.command == 'purge_orphan_products': + purge_orphan_products(args.project_id, args.location, args.force) diff --git a/samples/snippets/product_search/product_management_test.py b/samples/snippets/product_search/product_management_test.py new file mode 100644 index 00000000..a0f4dcc6 --- /dev/null +++ b/samples/snippets/product_search/product_management_test.py @@ -0,0 +1,76 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import uuid + +import pytest + +from product_management import ( + create_product, delete_product, list_products, + purge_orphan_products, update_product_labels) + + +PROJECT_ID = os.getenv('GOOGLE_CLOUD_PROJECT') +LOCATION = 'us-west1' + +PRODUCT_DISPLAY_NAME = 'fake_product_display_name_for_testing' +PRODUCT_CATEGORY = 'homegoods' +PRODUCT_ID = 'test_{}'.format(uuid.uuid4()) +KEY = 'fake_key_for_testing' +VALUE = 'fake_value_for_testing' + + +@pytest.fixture(scope="function", autouse=True) +def setup_teardown(): + # set up + create_product( + PROJECT_ID, LOCATION, PRODUCT_ID, + PRODUCT_DISPLAY_NAME, PRODUCT_CATEGORY) + + yield None + + # tear down + delete_product(PROJECT_ID, LOCATION, PRODUCT_ID) + + +def test_delete_product(capsys): + list_products(PROJECT_ID, LOCATION) + out, _ = capsys.readouterr() + assert PRODUCT_ID in out + + delete_product(PROJECT_ID, LOCATION, PRODUCT_ID) + + list_products(PROJECT_ID, LOCATION) + out, _ = capsys.readouterr() + assert PRODUCT_ID not in out + + +def test_update_product_labels(capsys): + update_product_labels(PROJECT_ID, LOCATION, PRODUCT_ID, KEY, VALUE) + out, _ = capsys.readouterr() + assert KEY in out + assert VALUE in out + + +def test_purge_orphan_products(capsys): + list_products(PROJECT_ID, LOCATION) + out, _ = capsys.readouterr() + assert PRODUCT_ID in out + + purge_orphan_products(PROJECT_ID, LOCATION, force=True) + + list_products(PROJECT_ID, LOCATION) + out, _ = capsys.readouterr() + assert PRODUCT_ID not in out diff --git a/samples/snippets/product_search/product_search.py b/samples/snippets/product_search/product_search.py new file mode 100755 index 00000000..89c8f168 --- /dev/null +++ b/samples/snippets/product_search/product_search.py @@ -0,0 +1,196 @@ +#!/usr/bin/env python + +# Copyright 2018 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This tutorial demonstrates how users query the product set with their +own images and find the products similer to the image using the Cloud +Vision Product Search API. + +For more information, see the tutorial page at +https://cloud.google.com/vision/product-search/docs/ +""" + +import argparse + +# [START vision_product_search_get_similar_products] +# [START vision_product_search_get_similar_products_gcs] +from google.cloud import vision + +# [END vision_product_search_get_similar_products] +# [END vision_product_search_get_similar_products_gcs] + + +# [START vision_product_search_get_similar_products] +def get_similar_products_file( + project_id, location, product_set_id, product_category, + file_path, filter): + """Search similar products to image. + Args: + project_id: Id of the project. + location: A compute region name. + product_set_id: Id of the product set. + product_category: Category of the product. + file_path: Local file path of the image to be searched. + filter: Condition to be applied on the labels. + Example for filter: (color = red OR color = blue) AND style = kids + It will search on all products with the following labels: + color:red AND style:kids + color:blue AND style:kids + """ + # product_search_client is needed only for its helper methods. + product_search_client = vision.ProductSearchClient() + image_annotator_client = vision.ImageAnnotatorClient() + + # Read the image as a stream of bytes. + with open(file_path, 'rb') as image_file: + content = image_file.read() + + # Create annotate image request along with product search feature. + image = vision.types.Image(content=content) + + # product search specific parameters + product_set_path = product_search_client.product_set_path( + project=project_id, location=location, + product_set=product_set_id) + product_search_params = vision.types.ProductSearchParams( + product_set=product_set_path, + product_categories=[product_category], + filter=filter) + image_context = vision.types.ImageContext( + product_search_params=product_search_params) + + # Search products similar to the image. + response = image_annotator_client.product_search( + image, image_context=image_context) + + index_time = response.product_search_results.index_time + print('Product set index time:') + print(' seconds: {}'.format(index_time.seconds)) + print(' nanos: {}\n'.format(index_time.nanos)) + + results = response.product_search_results.results + + print('Search results:') + for result in results: + product = result.product + + print('Score(Confidence): {}'.format(result.score)) + print('Image name: {}'.format(result.image)) + + print('Product name: {}'.format(product.name)) + print('Product display name: {}'.format( + product.display_name)) + print('Product description: {}\n'.format(product.description)) + print('Product labels: {}\n'.format(product.product_labels)) +# [END vision_product_search_get_similar_products] + + +# [START vision_product_search_get_similar_products_gcs] +def get_similar_products_uri( + project_id, location, product_set_id, product_category, + image_uri, filter): + """Search similar products to image. + Args: + project_id: Id of the project. + location: A compute region name. + product_set_id: Id of the product set. + product_category: Category of the product. + image_uri: Cloud Storage location of image to be searched. + filter: Condition to be applied on the labels. + Example for filter: (color = red OR color = blue) AND style = kids + It will search on all products with the following labels: + color:red AND style:kids + color:blue AND style:kids + """ + # product_search_client is needed only for its helper methods. + product_search_client = vision.ProductSearchClient() + image_annotator_client = vision.ImageAnnotatorClient() + + # Create annotate image request along with product search feature. + image_source = vision.types.ImageSource(image_uri=image_uri) + image = vision.types.Image(source=image_source) + + # product search specific parameters + product_set_path = product_search_client.product_set_path( + project=project_id, location=location, + product_set=product_set_id) + product_search_params = vision.types.ProductSearchParams( + product_set=product_set_path, + product_categories=[product_category], + filter=filter) + image_context = vision.types.ImageContext( + product_search_params=product_search_params) + + # Search products similar to the image. + response = image_annotator_client.product_search( + image, image_context=image_context) + + index_time = response.product_search_results.index_time + print('Product set index time:') + print(' seconds: {}'.format(index_time.seconds)) + print(' nanos: {}\n'.format(index_time.nanos)) + + results = response.product_search_results.results + + print('Search results:') + for result in results: + product = result.product + + print('Score(Confidence): {}'.format(result.score)) + print('Image name: {}'.format(result.image)) + + print('Product name: {}'.format(product.name)) + print('Product display name: {}'.format( + product.display_name)) + print('Product description: {}\n'.format(product.description)) + print('Product labels: {}\n'.format(product.product_labels)) +# [END vision_product_search_get_similar_products_gcs] + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter) + subparsers = parser.add_subparsers(dest='command') + parser.add_argument( + '--project_id', + help='Project id. Required', + required=True) + parser.add_argument( + '--location', + help='Compute region name', + default='us-west1') + parser.add_argument('--product_set_id') + parser.add_argument('--product_category') + parser.add_argument('--filter', default='') + + get_similar_products_file_parser = subparsers.add_parser( + 'get_similar_products_file', help=get_similar_products_file.__doc__) + get_similar_products_file_parser.add_argument('--file_path') + + get_similar_products_uri_parser = subparsers.add_parser( + 'get_similar_products_uri', help=get_similar_products_uri.__doc__) + get_similar_products_uri_parser.add_argument('--image_uri') + + args = parser.parse_args() + + if args.command == 'get_similar_products_file': + get_similar_products_file( + args.project_id, args.location, args.product_set_id, + args.product_category, args.file_path, args.filter) + elif args.command == 'get_similar_products_uri': + get_similar_products_uri( + args.project_id, args.location, args.product_set_id, + args.product_category, args.image_uri, args.filter) diff --git a/samples/snippets/product_search/product_search_test.py b/samples/snippets/product_search/product_search_test.py new file mode 100644 index 00000000..8f862fc2 --- /dev/null +++ b/samples/snippets/product_search/product_search_test.py @@ -0,0 +1,69 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import pytest + +from product_search import get_similar_products_file, get_similar_products_uri + + +PROJECT_ID = os.getenv('GOOGLE_CLOUD_PROJECT') +LOCATION = 'us-west1' + +PRODUCT_SET_ID = 'indexed_product_set_id_for_testing' +PRODUCT_CATEGORY = 'apparel' +PRODUCT_ID_1 = 'indexed_product_id_for_testing_1' +PRODUCT_ID_2 = 'indexed_product_id_for_testing_2' + +FILE_PATH_1 = 'resources/shoes_1.jpg' +IMAGE_URI_1 = 'gs://cloud-samples-data/vision/product_search/shoes_1.jpg' +FILTER = 'style=womens' + + +@pytest.mark.flaky(max_runs=5, min_passes=1) +def test_get_similar_products_file(capsys): + get_similar_products_file( + PROJECT_ID, LOCATION, PRODUCT_SET_ID, PRODUCT_CATEGORY, FILE_PATH_1, + '') + out, _ = capsys.readouterr() + assert PRODUCT_ID_1 in out + assert PRODUCT_ID_2 in out + + +def test_get_similar_products_uri(capsys): + get_similar_products_uri( + PROJECT_ID, LOCATION, PRODUCT_SET_ID, PRODUCT_CATEGORY, IMAGE_URI_1, + '') + out, _ = capsys.readouterr() + assert PRODUCT_ID_1 in out + assert PRODUCT_ID_2 in out + + +def test_get_similar_products_file_with_filter(capsys): + get_similar_products_file( + PROJECT_ID, LOCATION, PRODUCT_SET_ID, PRODUCT_CATEGORY, FILE_PATH_1, + FILTER) + out, _ = capsys.readouterr() + assert PRODUCT_ID_1 in out + assert PRODUCT_ID_2 not in out + + +def test_get_similar_products_uri_with_filter(capsys): + get_similar_products_uri( + PROJECT_ID, LOCATION, PRODUCT_SET_ID, PRODUCT_CATEGORY, IMAGE_URI_1, + FILTER) + out, _ = capsys.readouterr() + assert PRODUCT_ID_1 in out + assert PRODUCT_ID_2 not in out diff --git a/samples/snippets/product_search/product_set_management.py b/samples/snippets/product_search/product_set_management.py new file mode 100755 index 00000000..7964bc27 --- /dev/null +++ b/samples/snippets/product_search/product_set_management.py @@ -0,0 +1,187 @@ +#!/usr/bin/env python + +# Copyright 2018 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This application demonstrates how to perform operations +on Product set in Cloud Vision Product Search. + +For more information, see the tutorial page at +https://cloud.google.com/vision/product-search/docs/ +""" + +import argparse + +# [START vision_product_search_delete_product_set] +# [START vision_product_search_list_product_sets] +# [START vision_product_search_get_product_set] +# [START vision_product_search_create_product_set] +from google.cloud import vision + +# [END vision_product_search_delete_product_set] +# [END vision_product_search_list_product_sets] +# [END vision_product_search_get_product_set] +# [END vision_product_search_create_product_set] + + +# [START vision_product_search_create_product_set] +def create_product_set( + project_id, location, product_set_id, product_set_display_name): + """Create a product set. + Args: + project_id: Id of the project. + location: A compute region name. + product_set_id: Id of the product set. + product_set_display_name: Display name of the product set. + """ + client = vision.ProductSearchClient() + + # A resource that represents Google Cloud Platform location. + location_path = client.location_path( + project=project_id, location=location) + + # Create a product set with the product set specification in the region. + product_set = vision.types.ProductSet( + display_name=product_set_display_name) + + # The response is the product set with `name` populated. + response = client.create_product_set( + parent=location_path, + product_set=product_set, + product_set_id=product_set_id) + + # Display the product set information. + print('Product set name: {}'.format(response.name)) +# [END vision_product_search_create_product_set] + + +# [START vision_product_search_list_product_sets] +def list_product_sets(project_id, location): + """List all product sets. + Args: + project_id: Id of the project. + location: A compute region name. + """ + client = vision.ProductSearchClient() + + # A resource that represents Google Cloud Platform location. + location_path = client.location_path( + project=project_id, location=location) + + # List all the product sets available in the region. + product_sets = client.list_product_sets(parent=location_path) + + # Display the product set information. + for product_set in product_sets: + print('Product set name: {}'.format(product_set.name)) + print('Product set id: {}'.format(product_set.name.split('/')[-1])) + print('Product set display name: {}'.format(product_set.display_name)) + print('Product set index time:') + print(' seconds: {}'.format(product_set.index_time.seconds)) + print(' nanos: {}\n'.format(product_set.index_time.nanos)) +# [END vision_product_search_list_product_sets] + + +# [START vision_product_search_get_product_set] +def get_product_set(project_id, location, product_set_id): + """Get info about the product set. + Args: + project_id: Id of the project. + location: A compute region name. + product_set_id: Id of the product set. + """ + client = vision.ProductSearchClient() + + # Get the full path of the product set. + product_set_path = client.product_set_path( + project=project_id, location=location, + product_set=product_set_id) + + # Get complete detail of the product set. + product_set = client.get_product_set(name=product_set_path) + + # Display the product set information. + print('Product set name: {}'.format(product_set.name)) + print('Product set id: {}'.format(product_set.name.split('/')[-1])) + print('Product set display name: {}'.format(product_set.display_name)) + print('Product set index time:') + print(' seconds: {}'.format(product_set.index_time.seconds)) + print(' nanos: {}'.format(product_set.index_time.nanos)) +# [END vision_product_search_get_product_set] + + +# [START vision_product_search_delete_product_set] +def delete_product_set(project_id, location, product_set_id): + """Delete a product set. + Args: + project_id: Id of the project. + location: A compute region name. + product_set_id: Id of the product set. + """ + client = vision.ProductSearchClient() + + # Get the full path of the product set. + product_set_path = client.product_set_path( + project=project_id, location=location, + product_set=product_set_id) + + # Delete the product set. + client.delete_product_set(name=product_set_path) + print('Product set deleted.') +# [END vision_product_search_delete_product_set] + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter) + subparsers = parser.add_subparsers(dest='command') + parser.add_argument( + '--project_id', + help='Project id. Required', + required=True) + parser.add_argument( + '--location', + help='Compute region name', + default='us-west1') + + create_product_set_parser = subparsers.add_parser( + 'create_product_set', help=create_product_set.__doc__) + create_product_set_parser.add_argument('product_set_id') + create_product_set_parser.add_argument('product_set_display_name') + + list_product_sets_parser = subparsers.add_parser( + 'list_product_sets', help=list_product_sets.__doc__) + + get_product_set_parser = subparsers.add_parser( + 'get_product_set', help=get_product_set.__doc__) + get_product_set_parser.add_argument('product_set_id') + + delete_product_set_parser = subparsers.add_parser( + 'delete_product_set', help=delete_product_set.__doc__) + delete_product_set_parser.add_argument('product_set_id') + + args = parser.parse_args() + + if args.command == 'create_product_set': + create_product_set( + args.project_id, args.location, args.product_set_id, + args.product_set_display_name) + elif args.command == 'list_product_sets': + list_product_sets(args.project_id, args.location) + elif args.command == 'get_product_set': + get_product_set(args.project_id, args.location, args.product_set_id) + elif args.command == 'delete_product_set': + delete_product_set( + args.project_id, args.location, args.product_set_id) diff --git a/samples/snippets/product_search/product_set_management_test.py b/samples/snippets/product_search/product_set_management_test.py new file mode 100644 index 00000000..9d8248de --- /dev/null +++ b/samples/snippets/product_search/product_set_management_test.py @@ -0,0 +1,47 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import uuid + +import pytest + +from product_set_management import ( + create_product_set, delete_product_set, list_product_sets) + + +PROJECT_ID = os.getenv('GOOGLE_CLOUD_PROJECT') +LOCATION = 'us-west1' + +PRODUCT_SET_DISPLAY_NAME = 'fake_product_set_display_name_for_testing' +PRODUCT_SET_ID = 'test_{}'.format(uuid.uuid4()) + + +@pytest.fixture(scope="function", autouse=True) +def setup(): + # set up + create_product_set( + PROJECT_ID, LOCATION, PRODUCT_SET_ID, PRODUCT_SET_DISPLAY_NAME) + + +def test_delete_product_set(capsys): + list_product_sets(PROJECT_ID, LOCATION) + out, _ = capsys.readouterr() + assert PRODUCT_SET_ID in out + + delete_product_set(PROJECT_ID, LOCATION, PRODUCT_SET_ID) + + list_product_sets(PROJECT_ID, LOCATION) + out, _ = capsys.readouterr() + assert PRODUCT_SET_ID not in out diff --git a/samples/snippets/product_search/reference_image_management.py b/samples/snippets/product_search/reference_image_management.py new file mode 100755 index 00000000..7e546b7e --- /dev/null +++ b/samples/snippets/product_search/reference_image_management.py @@ -0,0 +1,200 @@ +#!/usr/bin/env python + +# Copyright 2018 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This application demonstrates how to perform basic operations on reference +images in Cloud Vision Product Search. + +For more information, see the tutorial page at +https://cloud.google.com/vision/product-search/docs/ +""" + +import argparse + +# [START vision_product_search_create_reference_image] +# [START vision_product_search_delete_reference_image] +# [START vision_product_search_list_reference_images] +# [START vision_product_search_get_reference_image] +from google.cloud import vision + +# [END vision_product_search_create_reference_image] +# [END vision_product_search_delete_reference_image] +# [END vision_product_search_list_reference_images] +# [END vision_product_search_get_reference_image] + + +# [START vision_product_search_create_reference_image] +def create_reference_image( + project_id, location, product_id, reference_image_id, gcs_uri): + """Create a reference image. + Args: + project_id: Id of the project. + location: A compute region name. + product_id: Id of the product. + reference_image_id: Id of the reference image. + gcs_uri: Google Cloud Storage path of the input image. + """ + client = vision.ProductSearchClient() + + # Get the full path of the product. + product_path = client.product_path( + project=project_id, location=location, product=product_id) + + # Create a reference image. + reference_image = vision.types.ReferenceImage(uri=gcs_uri) + + # The response is the reference image with `name` populated. + image = client.create_reference_image( + parent=product_path, + reference_image=reference_image, + reference_image_id=reference_image_id) + + # Display the reference image information. + print('Reference image name: {}'.format(image.name)) + print('Reference image uri: {}'.format(image.uri)) +# [END vision_product_search_create_reference_image] + + +# [START vision_product_search_list_reference_images] +def list_reference_images( + project_id, location, product_id): + """List all images in a product. + Args: + project_id: Id of the project. + location: A compute region name. + product_id: Id of the product. + """ + client = vision.ProductSearchClient() + + # Get the full path of the product. + product_path = client.product_path( + project=project_id, location=location, product=product_id) + + # List all the reference images available in the product. + reference_images = client.list_reference_images(parent=product_path) + + # Display the reference image information. + for image in reference_images: + print('Reference image name: {}'.format(image.name)) + print('Reference image id: {}'.format(image.name.split('/')[-1])) + print('Reference image uri: {}'.format(image.uri)) + print('Reference image bounding polygons: {}'.format( + image.bounding_polys)) +# [END vision_product_search_list_reference_images] + + +# [START vision_product_search_get_reference_image] +def get_reference_image( + project_id, location, product_id, reference_image_id): + """Get info about a reference image. + Args: + project_id: Id of the project. + location: A compute region name. + product_id: Id of the product. + reference_image_id: Id of the reference image. + """ + client = vision.ProductSearchClient() + + # Get the full path of the reference image. + reference_image_path = client.reference_image_path( + project=project_id, location=location, product=product_id, + reference_image=reference_image_id) + + # Get complete detail of the reference image. + image = client.get_reference_image(name=reference_image_path) + + # Display the reference image information. + print('Reference image name: {}'.format(image.name)) + print('Reference image id: {}'.format(image.name.split('/')[-1])) + print('Reference image uri: {}'.format(image.uri)) + print('Reference image bounding polygons: {}'.format(image.bounding_polys)) +# [END vision_product_search_get_reference_image] + + +# [START vision_product_search_delete_reference_image] +def delete_reference_image( + project_id, location, product_id, reference_image_id): + """Delete a reference image. + Args: + project_id: Id of the project. + location: A compute region name. + product_id: Id of the product. + reference_image_id: Id of the reference image. + """ + client = vision.ProductSearchClient() + + # Get the full path of the reference image. + reference_image_path = client.reference_image_path( + project=project_id, location=location, product=product_id, + reference_image=reference_image_id) + + # Delete the reference image. + client.delete_reference_image(name=reference_image_path) + print('Reference image deleted from product.') +# [END vision_product_search_delete_reference_image] + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter) + subparsers = parser.add_subparsers(dest='command') + parser.add_argument( + '--project_id', + help='Project id. Required', + required=True) + parser.add_argument( + '--location', + help='Compute region name', + default='us-west1') + + create_reference_image_parser = subparsers.add_parser( + 'create_reference_image', help=create_reference_image.__doc__) + create_reference_image_parser.add_argument('product_id') + create_reference_image_parser.add_argument('reference_image_id') + create_reference_image_parser.add_argument('gcs_uri') + + list_reference_images_parser = subparsers.add_parser( + 'list_reference_images', + help=list_reference_images.__doc__) + list_reference_images_parser.add_argument('product_id') + + get_reference_image_parser = subparsers.add_parser( + 'get_reference_image', help=get_reference_image.__doc__) + get_reference_image_parser.add_argument('product_id') + get_reference_image_parser.add_argument('reference_image_id') + + delete_reference_image_parser = subparsers.add_parser( + 'delete_reference_image', help=delete_reference_image.__doc__) + delete_reference_image_parser.add_argument('product_id') + delete_reference_image_parser.add_argument('reference_image_id') + + args = parser.parse_args() + + if args.command == 'create_reference_image': + create_reference_image( + args.project_id, args.location, args.product_id, + args.reference_image_id, args.gcs_uri) + elif args.command == 'list_reference_images': + list_reference_images( + args.project_id, args.location, args.product_id) + elif args.command == 'get_reference_image': + get_reference_image( + args.project_id, args.location, args.product_id, + args.reference_image_id) + elif args.command == 'delete_reference_image': + delete_reference_image( + args.project_id, args.location, args.product_id, + args.reference_image_id) diff --git a/samples/snippets/product_search/reference_image_management_test.py b/samples/snippets/product_search/reference_image_management_test.py new file mode 100644 index 00000000..5e6f8ac4 --- /dev/null +++ b/samples/snippets/product_search/reference_image_management_test.py @@ -0,0 +1,70 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import uuid + +import pytest + +from product_management import create_product, delete_product +from reference_image_management import ( + create_reference_image, delete_reference_image, list_reference_images) + + +PROJECT_ID = os.getenv('GOOGLE_CLOUD_PROJECT') +LOCATION = 'us-west1' + +PRODUCT_DISPLAY_NAME = 'fake_product_display_name_for_testing' +PRODUCT_CATEGORY = 'homegoods' +PRODUCT_ID = 'test_{}'.format(uuid.uuid4()) + +REFERENCE_IMAGE_ID = 'fake_reference_image_id_for_testing' +GCS_URI = 'gs://cloud-samples-data/vision/product_search/shoes_1.jpg' + + +@pytest.fixture(scope="function", autouse=True) +def setup_teardown(): + # set up + create_product( + PROJECT_ID, LOCATION, PRODUCT_ID, + PRODUCT_DISPLAY_NAME, PRODUCT_CATEGORY) + + yield None + + # tear down + delete_product(PROJECT_ID, LOCATION, PRODUCT_ID) + + +def test_create_reference_image(capsys): + create_reference_image( + PROJECT_ID, LOCATION, PRODUCT_ID, REFERENCE_IMAGE_ID, + GCS_URI) + list_reference_images(PROJECT_ID, LOCATION, PRODUCT_ID) + out, _ = capsys.readouterr() + assert REFERENCE_IMAGE_ID in out + + +def test_delete_reference_image(capsys): + create_reference_image( + PROJECT_ID, LOCATION, PRODUCT_ID, REFERENCE_IMAGE_ID, + GCS_URI) + list_reference_images(PROJECT_ID, LOCATION, PRODUCT_ID) + out, _ = capsys.readouterr() + assert REFERENCE_IMAGE_ID in out + + delete_reference_image( + PROJECT_ID, LOCATION, PRODUCT_ID, REFERENCE_IMAGE_ID) + list_reference_images(PROJECT_ID, LOCATION, PRODUCT_ID) + out, _ = capsys.readouterr() + assert REFERENCE_IMAGE_ID not in out diff --git a/samples/snippets/product_search/requirements-test.txt b/samples/snippets/product_search/requirements-test.txt new file mode 100644 index 00000000..95614446 --- /dev/null +++ b/samples/snippets/product_search/requirements-test.txt @@ -0,0 +1,2 @@ +pytest==6.0.1 +flaky==3.7.0 \ No newline at end of file diff --git a/samples/snippets/product_search/requirements.txt b/samples/snippets/product_search/requirements.txt new file mode 100644 index 00000000..3e50f36f --- /dev/null +++ b/samples/snippets/product_search/requirements.txt @@ -0,0 +1,2 @@ +google-cloud-vision==1.0.0 +google-cloud-storage==1.31.0 \ No newline at end of file diff --git a/samples/snippets/product_search/resources/indexed_product_sets.csv b/samples/snippets/product_search/resources/indexed_product_sets.csv new file mode 100644 index 00000000..329ac216 --- /dev/null +++ b/samples/snippets/product_search/resources/indexed_product_sets.csv @@ -0,0 +1,2 @@ +"gs://cloud-samples-data/vision/product_search/shoes_1.jpg","indexed_product_set_id_for_testing","indexed_product_id_for_testing_1","apparel","style=womens","0.1,0.1,0.9,0.1,0.9,0.9,0.1,0.9" +"gs://cloud-samples-data/vision/product_search/shoes_2.jpg","indexed_product_set_id_for_testing","indexed_product_id_for_testing_2","apparel",, \ No newline at end of file diff --git a/samples/snippets/product_search/resources/product_sets.csv b/samples/snippets/product_search/resources/product_sets.csv new file mode 100644 index 00000000..68657eed --- /dev/null +++ b/samples/snippets/product_search/resources/product_sets.csv @@ -0,0 +1,2 @@ +"gs://cloud-samples-data/vision/product_search/shoes_1.jpg","fake_product_set_id_for_testing","fake_product_id_for_testing_1","apparel","style=womens","0.1,0.1,0.9,0.1,0.9,0.9,0.1,0.9" +"gs://cloud-samples-data/vision/product_search/shoes_2.jpg","fake_product_set_id_for_testing","fake_product_id_for_testing_2","apparel",, \ No newline at end of file diff --git a/samples/snippets/product_search/resources/shoes_1.jpg b/samples/snippets/product_search/resources/shoes_1.jpg new file mode 100644 index 00000000..78318eef Binary files /dev/null and b/samples/snippets/product_search/resources/shoes_1.jpg differ diff --git a/samples/snippets/product_search/resources/shoes_2.jpg b/samples/snippets/product_search/resources/shoes_2.jpg new file mode 100644 index 00000000..cdfa80dd Binary files /dev/null and b/samples/snippets/product_search/resources/shoes_2.jpg differ diff --git a/samples/snippets/quickstart/README.rst b/samples/snippets/quickstart/README.rst new file mode 100644 index 00000000..aa4be034 --- /dev/null +++ b/samples/snippets/quickstart/README.rst @@ -0,0 +1,101 @@ +.. This file is automatically generated. Do not edit this file directly. + +Google Cloud Vision API Python Samples +=============================================================================== + +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=vision/cloud-client/quickstart/README.rst + + +This directory contains samples for Google Cloud Vision API. `Google Cloud Vision API`_ allows developers to easily integrate vision detection features within applications, including image labeling, face and landmark detection, optical character recognition (OCR), and tagging of explicit content. + +- See the `migration guide`_ for information about migrating to Python client library v0.25.1. + +.. _migration guide: https://cloud.google.com/vision/docs/python-client-migration + + + + +.. _Google Cloud Vision API: https://cloud.google.com/vision/docs + +Setup +------------------------------------------------------------------------------- + + +Authentication +++++++++++++++ + +This sample requires you to have authentication setup. Refer to the +`Authentication Getting Started Guide`_ for instructions on setting up +credentials for applications. + +.. _Authentication Getting Started Guide: + https://cloud.google.com/docs/authentication/getting-started + +Install Dependencies +++++++++++++++++++++ + +#. Clone python-docs-samples and change directory to the sample directory you want to use. + + .. code-block:: bash + + $ git clone https://github.com/GoogleCloudPlatform/python-docs-samples.git + +#. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions. + + .. _Python Development Environment Setup Guide: + https://cloud.google.com/python/setup + +#. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. + + .. code-block:: bash + + $ virtualenv env + $ source env/bin/activate + +#. Install the dependencies needed to run the samples. + + .. code-block:: bash + + $ pip install -r requirements.txt + +.. _pip: https://pip.pypa.io/ +.. _virtualenv: https://virtualenv.pypa.io/ + +Samples +------------------------------------------------------------------------------- + +Quickstart ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=vision/cloud-client/quickstart/quickstart.py,vision/cloud-client/quickstart/README.rst + + + + +To run this sample: + +.. code-block:: bash + + $ python quickstart.py + + + + +The client library +------------------------------------------------------------------------------- + +This sample uses the `Google Cloud Client Library for Python`_. +You can read the documentation for more details on API usage and use GitHub +to `browse the source`_ and `report issues`_. + +.. _Google Cloud Client Library for Python: + https://googlecloudplatform.github.io/google-cloud-python/ +.. _browse the source: + https://github.com/GoogleCloudPlatform/google-cloud-python +.. _report issues: + https://github.com/GoogleCloudPlatform/google-cloud-python/issues + + +.. _Google Cloud SDK: https://cloud.google.com/sdk/ \ No newline at end of file diff --git a/samples/snippets/quickstart/README.rst.in b/samples/snippets/quickstart/README.rst.in new file mode 100644 index 00000000..bd650a6c --- /dev/null +++ b/samples/snippets/quickstart/README.rst.in @@ -0,0 +1,29 @@ +# This file is used to generate README.rst + +product: + name: Google Cloud Vision API + short_name: Cloud Vision API + url: https://cloud.google.com/vision/docs + description: > + `Google Cloud Vision API`_ allows developers to easily integrate vision + detection features within applications, including image labeling, face and + landmark detection, optical character recognition (OCR), and tagging of + explicit content. + + + - See the `migration guide`_ for information about migrating to Python client library v0.25.1. + + + .. _migration guide: https://cloud.google.com/vision/docs/python-client-migration + +setup: +- auth +- install_deps + +samples: +- name: Quickstart + file: quickstart.py + +cloud_client_library: true + +folder: vision/cloud-client/quickstart \ No newline at end of file diff --git a/samples/snippets/quickstart/noxfile.py b/samples/snippets/quickstart/noxfile.py new file mode 100644 index 00000000..ba55d7ce --- /dev/null +++ b/samples/snippets/quickstart/noxfile.py @@ -0,0 +1,224 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import os +from pathlib import Path +import sys + +import nox + + +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING +# DO NOT EDIT THIS FILE EVER! +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING + +# Copy `noxfile_config.py` to your directory and modify it instead. + + +# `TEST_CONFIG` dict is a configuration hook that allows users to +# modify the test configurations. The values here should be in sync +# with `noxfile_config.py`. Users will copy `noxfile_config.py` into +# their directory and modify it. + +TEST_CONFIG = { + # You can opt out from the test for specific Python versions. + 'ignored_versions': ["2.7"], + + # An envvar key for determining the project id to use. Change it + # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a + # build specific Cloud project. You can also use your own string + # to use your own Cloud project. + 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', + # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', + + # A dictionary you want to inject into your test. Don't put any + # secrets here. These values will override predefined values. + 'envs': {}, +} + + +try: + # Ensure we can import noxfile_config in the project's directory. + sys.path.append('.') + from noxfile_config import TEST_CONFIG_OVERRIDE +except ImportError as e: + print("No user noxfile_config found: detail: {}".format(e)) + TEST_CONFIG_OVERRIDE = {} + +# Update the TEST_CONFIG with the user supplied values. +TEST_CONFIG.update(TEST_CONFIG_OVERRIDE) + + +def get_pytest_env_vars(): + """Returns a dict for pytest invocation.""" + ret = {} + + # Override the GCLOUD_PROJECT and the alias. + env_key = TEST_CONFIG['gcloud_project_env'] + # This should error out if not set. + ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] + + # Apply user supplied envs. + ret.update(TEST_CONFIG['envs']) + return ret + + +# DO NOT EDIT - automatically generated. +# All versions used to tested samples. +ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] + +# Any default versions that should be ignored. +IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] + +TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) + +INSTALL_LIBRARY_FROM_SOURCE = bool(os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False)) +# +# Style Checks +# + + +def _determine_local_import_names(start_dir): + """Determines all import names that should be considered "local". + + This is used when running the linter to insure that import order is + properly checked. + """ + file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)] + return [ + basename + for basename, extension in file_ext_pairs + if extension == ".py" + or os.path.isdir(os.path.join(start_dir, basename)) + and basename not in ("__pycache__") + ] + + +# Linting with flake8. +# +# We ignore the following rules: +# E203: whitespace before ‘:’ +# E266: too many leading ‘#’ for block comment +# E501: line too long +# I202: Additional newline in a section of imports +# +# We also need to specify the rules which are ignored by default: +# ['E226', 'W504', 'E126', 'E123', 'W503', 'E24', 'E704', 'E121'] +FLAKE8_COMMON_ARGS = [ + "--show-source", + "--builtin=gettext", + "--max-complexity=20", + "--import-order-style=google", + "--exclude=.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py", + "--ignore=E121,E123,E126,E203,E226,E24,E266,E501,E704,W503,W504,I202", + "--max-line-length=88", +] + + +@nox.session +def lint(session): + session.install("flake8", "flake8-import-order") + + local_names = _determine_local_import_names(".") + args = FLAKE8_COMMON_ARGS + [ + "--application-import-names", + ",".join(local_names), + "." + ] + session.run("flake8", *args) + + +# +# Sample Tests +# + + +PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"] + + +def _session_tests(session, post_install=None): + """Runs py.test for a particular project.""" + if os.path.exists("requirements.txt"): + session.install("-r", "requirements.txt") + + if os.path.exists("requirements-test.txt"): + session.install("-r", "requirements-test.txt") + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars() + ) + + +@nox.session(python=ALL_VERSIONS) +def py(session): + """Runs py.test for a sample using the specified version of Python.""" + if session.python in TESTED_VERSIONS: + _session_tests(session) + else: + session.skip("SKIPPED: {} tests are disabled for this sample.".format( + session.python + )) + + +# +# Readmegen +# + + +def _get_repo_root(): + """ Returns the root folder of the project. """ + # Get root of this repository. Assume we don't have directories nested deeper than 10 items. + p = Path(os.getcwd()) + for i in range(10): + if p is None: + break + if Path(p / ".git").exists(): + return str(p) + p = p.parent + raise Exception("Unable to detect repository root.") + + +GENERATED_READMES = sorted([x for x in Path(".").rglob("*.rst.in")]) + + +@nox.session +@nox.parametrize("path", GENERATED_READMES) +def readmegen(session, path): + """(Re-)generates the readme for a sample.""" + session.install("jinja2", "pyyaml") + dir_ = os.path.dirname(path) + + if os.path.exists(os.path.join(dir_, "requirements.txt")): + session.install("-r", os.path.join(dir_, "requirements.txt")) + + in_file = os.path.join(dir_, "README.rst.in") + session.run( + "python", _get_repo_root() + "/scripts/readme-gen/readme_gen.py", in_file + ) diff --git a/samples/snippets/quickstart/quickstart.py b/samples/snippets/quickstart/quickstart.py new file mode 100644 index 00000000..a8c7a5f2 --- /dev/null +++ b/samples/snippets/quickstart/quickstart.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python + +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +def run_quickstart(): + # [START vision_quickstart] + import io + import os + + # Imports the Google Cloud client library + # [START vision_python_migration_import] + from google.cloud import vision + from google.cloud.vision import types + # [END vision_python_migration_import] + + # Instantiates a client + # [START vision_python_migration_client] + client = vision.ImageAnnotatorClient() + # [END vision_python_migration_client] + + # The name of the image file to annotate + file_name = os.path.abspath('resources/wakeupcat.jpg') + + # Loads the image into memory + with io.open(file_name, 'rb') as image_file: + content = image_file.read() + + image = types.Image(content=content) + + # Performs label detection on the image file + response = client.label_detection(image=image) + labels = response.label_annotations + + print('Labels:') + for label in labels: + print(label.description) + # [END vision_quickstart] + + +if __name__ == '__main__': + run_quickstart() diff --git a/samples/snippets/quickstart/quickstart_test.py b/samples/snippets/quickstart/quickstart_test.py new file mode 100644 index 00000000..d483d413 --- /dev/null +++ b/samples/snippets/quickstart/quickstart_test.py @@ -0,0 +1,21 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import quickstart + + +def test_quickstart(capsys): + quickstart.run_quickstart() + out, _ = capsys.readouterr() + assert 'Labels' in out diff --git a/samples/snippets/quickstart/requirements-test.txt b/samples/snippets/quickstart/requirements-test.txt new file mode 100644 index 00000000..7e460c8c --- /dev/null +++ b/samples/snippets/quickstart/requirements-test.txt @@ -0,0 +1 @@ +pytest==6.0.1 diff --git a/samples/snippets/quickstart/requirements.txt b/samples/snippets/quickstart/requirements.txt new file mode 100644 index 00000000..7000a096 --- /dev/null +++ b/samples/snippets/quickstart/requirements.txt @@ -0,0 +1 @@ +google-cloud-vision==1.0.0 diff --git a/samples/snippets/quickstart/resources/wakeupcat.jpg b/samples/snippets/quickstart/resources/wakeupcat.jpg new file mode 100644 index 00000000..139cf461 Binary files /dev/null and b/samples/snippets/quickstart/resources/wakeupcat.jpg differ diff --git a/samples/snippets/web/README.rst b/samples/snippets/web/README.rst new file mode 100644 index 00000000..fe3e900b --- /dev/null +++ b/samples/snippets/web/README.rst @@ -0,0 +1,136 @@ + +.. This file is automatically generated. Do not edit this file directly. + +Google Cloud Vision API Python Samples +=============================================================================== + +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=vision/cloud-client/web/README.rst + + +This directory contains samples for Google Cloud Vision API. `Google Cloud Vision API`_ allows developers to easily integrate vision detection features within applications, including image labeling, face and landmark detection, optical character recognition (OCR), and tagging of explicit content. + +- See the `migration guide`_ for information about migrating to Python client library v0.25.1. + +.. _migration guide: https://cloud.google.com/vision/docs/python-client-migration + + + + +.. _Google Cloud Vision API: https://cloud.google.com/vision/docs + + +Setup +------------------------------------------------------------------------------- + + + +Authentication +++++++++++++++ + +This sample requires you to have authentication setup. Refer to the +`Authentication Getting Started Guide`_ for instructions on setting up +credentials for applications. + +.. _Authentication Getting Started Guide: + https://cloud.google.com/docs/authentication/getting-started + + + + +Install Dependencies +++++++++++++++++++++ + +#. Clone python-docs-samples and change directory to the sample directory you want to use. + + .. code-block:: bash + + $ git clone https://github.com/GoogleCloudPlatform/python-docs-samples.git + +#. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions. + + .. _Python Development Environment Setup Guide: + https://cloud.google.com/python/setup + +#. Create a virtualenv. Samples are compatible with Python 3.6+. + + .. code-block:: bash + + $ virtualenv env + $ source env/bin/activate + +#. Install the dependencies needed to run the samples. + + .. code-block:: bash + + $ pip install -r requirements.txt + +.. _pip: https://pip.pypa.io/ +.. _virtualenv: https://virtualenv.pypa.io/ + + + + + + +Samples +------------------------------------------------------------------------------- + + +Web ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=vision/cloud-client/web/web_detect.py,vision/cloud-client/web/README.rst + + + + +To run this sample: + +.. code-block:: bash + + $ python web_detect.py + + + usage: web_detect.py [-h] image_url + + Demonstrates web detection using the Google Cloud Vision API. + + Example usage: + python web_detect.py https://goo.gl/X4qcB6 + python web_detect.py ../detect/resources/landmark.jpg + python web_detect.py gs://your-bucket/image.png + + positional arguments: + image_url The image to detect, can be web URI, Google Cloud Storage, or + path to local file. + + optional arguments: + -h, --help show this help message and exit + + + + + + + + + +The client library +------------------------------------------------------------------------------- + +This sample uses the `Google Cloud Client Library for Python`_. +You can read the documentation for more details on API usage and use GitHub +to `browse the source`_ and `report issues`_. + +.. _Google Cloud Client Library for Python: + https://googlecloudplatform.github.io/google-cloud-python/ +.. _browse the source: + https://github.com/GoogleCloudPlatform/google-cloud-python +.. _report issues: + https://github.com/GoogleCloudPlatform/google-cloud-python/issues + + + +.. _Google Cloud SDK: https://cloud.google.com/sdk/ diff --git a/samples/snippets/web/README.rst.in b/samples/snippets/web/README.rst.in new file mode 100644 index 00000000..8b8533b5 --- /dev/null +++ b/samples/snippets/web/README.rst.in @@ -0,0 +1,30 @@ +# This file is used to generate README.rst + +product: + name: Google Cloud Vision API + short_name: Cloud Vision API + url: https://cloud.google.com/vision/docs + description: > + `Google Cloud Vision API`_ allows developers to easily integrate vision + detection features within applications, including image labeling, face and + landmark detection, optical character recognition (OCR), and tagging of + explicit content. + + + - See the `migration guide`_ for information about migrating to Python client library v0.25.1. + + + .. _migration guide: https://cloud.google.com/vision/docs/python-client-migration + +setup: +- auth +- install_deps + +samples: +- name: Web + file: web_detect.py + show_help: True + +cloud_client_library: true + +folder: vision/cloud-client/web \ No newline at end of file diff --git a/samples/snippets/web/noxfile.py b/samples/snippets/web/noxfile.py new file mode 100644 index 00000000..ba55d7ce --- /dev/null +++ b/samples/snippets/web/noxfile.py @@ -0,0 +1,224 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import os +from pathlib import Path +import sys + +import nox + + +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING +# DO NOT EDIT THIS FILE EVER! +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING + +# Copy `noxfile_config.py` to your directory and modify it instead. + + +# `TEST_CONFIG` dict is a configuration hook that allows users to +# modify the test configurations. The values here should be in sync +# with `noxfile_config.py`. Users will copy `noxfile_config.py` into +# their directory and modify it. + +TEST_CONFIG = { + # You can opt out from the test for specific Python versions. + 'ignored_versions': ["2.7"], + + # An envvar key for determining the project id to use. Change it + # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a + # build specific Cloud project. You can also use your own string + # to use your own Cloud project. + 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', + # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', + + # A dictionary you want to inject into your test. Don't put any + # secrets here. These values will override predefined values. + 'envs': {}, +} + + +try: + # Ensure we can import noxfile_config in the project's directory. + sys.path.append('.') + from noxfile_config import TEST_CONFIG_OVERRIDE +except ImportError as e: + print("No user noxfile_config found: detail: {}".format(e)) + TEST_CONFIG_OVERRIDE = {} + +# Update the TEST_CONFIG with the user supplied values. +TEST_CONFIG.update(TEST_CONFIG_OVERRIDE) + + +def get_pytest_env_vars(): + """Returns a dict for pytest invocation.""" + ret = {} + + # Override the GCLOUD_PROJECT and the alias. + env_key = TEST_CONFIG['gcloud_project_env'] + # This should error out if not set. + ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] + + # Apply user supplied envs. + ret.update(TEST_CONFIG['envs']) + return ret + + +# DO NOT EDIT - automatically generated. +# All versions used to tested samples. +ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] + +# Any default versions that should be ignored. +IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] + +TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) + +INSTALL_LIBRARY_FROM_SOURCE = bool(os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False)) +# +# Style Checks +# + + +def _determine_local_import_names(start_dir): + """Determines all import names that should be considered "local". + + This is used when running the linter to insure that import order is + properly checked. + """ + file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)] + return [ + basename + for basename, extension in file_ext_pairs + if extension == ".py" + or os.path.isdir(os.path.join(start_dir, basename)) + and basename not in ("__pycache__") + ] + + +# Linting with flake8. +# +# We ignore the following rules: +# E203: whitespace before ‘:’ +# E266: too many leading ‘#’ for block comment +# E501: line too long +# I202: Additional newline in a section of imports +# +# We also need to specify the rules which are ignored by default: +# ['E226', 'W504', 'E126', 'E123', 'W503', 'E24', 'E704', 'E121'] +FLAKE8_COMMON_ARGS = [ + "--show-source", + "--builtin=gettext", + "--max-complexity=20", + "--import-order-style=google", + "--exclude=.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py", + "--ignore=E121,E123,E126,E203,E226,E24,E266,E501,E704,W503,W504,I202", + "--max-line-length=88", +] + + +@nox.session +def lint(session): + session.install("flake8", "flake8-import-order") + + local_names = _determine_local_import_names(".") + args = FLAKE8_COMMON_ARGS + [ + "--application-import-names", + ",".join(local_names), + "." + ] + session.run("flake8", *args) + + +# +# Sample Tests +# + + +PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"] + + +def _session_tests(session, post_install=None): + """Runs py.test for a particular project.""" + if os.path.exists("requirements.txt"): + session.install("-r", "requirements.txt") + + if os.path.exists("requirements-test.txt"): + session.install("-r", "requirements-test.txt") + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars() + ) + + +@nox.session(python=ALL_VERSIONS) +def py(session): + """Runs py.test for a sample using the specified version of Python.""" + if session.python in TESTED_VERSIONS: + _session_tests(session) + else: + session.skip("SKIPPED: {} tests are disabled for this sample.".format( + session.python + )) + + +# +# Readmegen +# + + +def _get_repo_root(): + """ Returns the root folder of the project. """ + # Get root of this repository. Assume we don't have directories nested deeper than 10 items. + p = Path(os.getcwd()) + for i in range(10): + if p is None: + break + if Path(p / ".git").exists(): + return str(p) + p = p.parent + raise Exception("Unable to detect repository root.") + + +GENERATED_READMES = sorted([x for x in Path(".").rglob("*.rst.in")]) + + +@nox.session +@nox.parametrize("path", GENERATED_READMES) +def readmegen(session, path): + """(Re-)generates the readme for a sample.""" + session.install("jinja2", "pyyaml") + dir_ = os.path.dirname(path) + + if os.path.exists(os.path.join(dir_, "requirements.txt")): + session.install("-r", os.path.join(dir_, "requirements.txt")) + + in_file = os.path.join(dir_, "README.rst.in") + session.run( + "python", _get_repo_root() + "/scripts/readme-gen/readme_gen.py", in_file + ) diff --git a/samples/snippets/web/requirements-test.txt b/samples/snippets/web/requirements-test.txt new file mode 100644 index 00000000..f8230db6 --- /dev/null +++ b/samples/snippets/web/requirements-test.txt @@ -0,0 +1,2 @@ +flaky==3.7.0 +pytest==6.0.1 diff --git a/samples/snippets/web/requirements.txt b/samples/snippets/web/requirements.txt new file mode 100644 index 00000000..7000a096 --- /dev/null +++ b/samples/snippets/web/requirements.txt @@ -0,0 +1 @@ +google-cloud-vision==1.0.0 diff --git a/samples/snippets/web/web_detect.py b/samples/snippets/web/web_detect.py new file mode 100644 index 00000000..6cdfa256 --- /dev/null +++ b/samples/snippets/web/web_detect.py @@ -0,0 +1,101 @@ +#!/usr/bin/env python + +# Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Demonstrates web detection using the Google Cloud Vision API. + +Example usage: + python web_detect.py https://goo.gl/X4qcB6 + python web_detect.py ../detect/resources/landmark.jpg + python web_detect.py gs://your-bucket/image.png +""" +# [START vision_web_detection_tutorial] +# [START vision_web_detection_tutorial_imports] +import argparse +import io + +from google.cloud import vision +from google.cloud.vision import types +# [END vision_web_detection_tutorial_imports] + + +def annotate(path): + """Returns web annotations given the path to an image.""" + # [START vision_web_detection_tutorial_annotate] + client = vision.ImageAnnotatorClient() + + if path.startswith('http') or path.startswith('gs:'): + image = types.Image() + image.source.image_uri = path + + else: + with io.open(path, 'rb') as image_file: + content = image_file.read() + + image = types.Image(content=content) + + web_detection = client.web_detection(image=image).web_detection + # [END vision_web_detection_tutorial_annotate] + + return web_detection + + +def report(annotations): + """Prints detected features in the provided web annotations.""" + # [START vision_web_detection_tutorial_print_annotations] + if annotations.pages_with_matching_images: + print('\n{} Pages with matching images retrieved'.format( + len(annotations.pages_with_matching_images))) + + for page in annotations.pages_with_matching_images: + print('Url : {}'.format(page.url)) + + if annotations.full_matching_images: + print('\n{} Full Matches found: '.format( + len(annotations.full_matching_images))) + + for image in annotations.full_matching_images: + print('Url : {}'.format(image.url)) + + if annotations.partial_matching_images: + print('\n{} Partial Matches found: '.format( + len(annotations.partial_matching_images))) + + for image in annotations.partial_matching_images: + print('Url : {}'.format(image.url)) + + if annotations.web_entities: + print('\n{} Web entities found: '.format( + len(annotations.web_entities))) + + for entity in annotations.web_entities: + print('Score : {}'.format(entity.score)) + print('Description: {}'.format(entity.description)) + # [END vision_web_detection_tutorial_print_annotations] + + +if __name__ == '__main__': + # [START vision_web_detection_tutorial_run_application] + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter) + path_help = str('The image to detect, can be web URI, ' + 'Google Cloud Storage, or path to local file.') + parser.add_argument('image_url', help=path_help) + args = parser.parse_args() + + report(annotate(args.image_url)) + # [END vision_web_detection_tutorial_run_application] +# [END vision_web_detection_tutorial] diff --git a/samples/snippets/web/web_detect_test.py b/samples/snippets/web/web_detect_test.py new file mode 100644 index 00000000..83f8c84f --- /dev/null +++ b/samples/snippets/web/web_detect_test.py @@ -0,0 +1,35 @@ +# Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest + +import web_detect + +ASSET_BUCKET = "cloud-samples-data" + + +def test_detect_file(capsys): + file_name = ('../detect/resources/landmark.jpg') + web_detect.report(web_detect.annotate(file_name)) + out, _ = capsys.readouterr() + assert 'description' in out.lower() + + +@pytest.mark.flaky(max_runs=3, min_passes=1) +def test_detect_web_gsuri(capsys): + file_name = ('gs://{}/vision/landmark/pofa.jpg'.format( + ASSET_BUCKET)) + web_detect.report(web_detect.annotate(file_name)) + out, _ = capsys.readouterr() + assert 'description:' in out.lower() diff --git a/scripts/decrypt-secrets.sh b/scripts/decrypt-secrets.sh index ff599eb2..21f6d2a2 100755 --- a/scripts/decrypt-secrets.sh +++ b/scripts/decrypt-secrets.sh @@ -20,14 +20,27 @@ ROOT=$( dirname "$DIR" ) # Work from the project root. cd $ROOT +# Prevent it from overriding files. +# We recommend that sample authors use their own service account files and cloud project. +# In that case, they are supposed to prepare these files by themselves. +if [[ -f "testing/test-env.sh" ]] || \ + [[ -f "testing/service-account.json" ]] || \ + [[ -f "testing/client-secrets.json" ]]; then + echo "One or more target files exist, aborting." + exit 1 +fi + # Use SECRET_MANAGER_PROJECT if set, fallback to cloud-devrel-kokoro-resources. PROJECT_ID="${SECRET_MANAGER_PROJECT:-cloud-devrel-kokoro-resources}" gcloud secrets versions access latest --secret="python-docs-samples-test-env" \ + --project="${PROJECT_ID}" \ > testing/test-env.sh gcloud secrets versions access latest \ --secret="python-docs-samples-service-account" \ + --project="${PROJECT_ID}" \ > testing/service-account.json gcloud secrets versions access latest \ --secret="python-docs-samples-client-secrets" \ - > testing/client-secrets.json \ No newline at end of file + --project="${PROJECT_ID}" \ + > testing/client-secrets.json diff --git a/synth.metadata b/synth.metadata index 900dbd2b..18ef3568 100644 --- a/synth.metadata +++ b/synth.metadata @@ -4,22 +4,21 @@ "git": { "name": ".", "remote": "https://github.com/googleapis/python-vision.git", - "sha": "39c16522f7bc97544c361f8e14dbc9a2a5d4c0e4" + "sha": "7b56da8f133d23a3f177fc28d6df53c942929f78" } }, { "git": { - "name": "googleapis", - "remote": "https://github.com/googleapis/googleapis.git", - "sha": "b882b8e6bfcd708042ff00f7adc67ce750817dd0", - "internalRef": "318028816" + "name": "synthtool", + "remote": "https://github.com/googleapis/synthtool.git", + "sha": "b15c0c042cdea746fc19856527d8baf947c3c220" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "303271797a360f8a439203413f13a160f2f5b3b4" + "sha": "b15c0c042cdea746fc19856527d8baf947c3c220" } } ], diff --git a/synth.py b/synth.py index b98c7b33..2440c5c9 100644 --- a/synth.py +++ b/synth.py @@ -16,6 +16,7 @@ import synthtool as s from synthtool import gcp +from synthtool.languages import python gapic = gcp.GAPICBazel() common = gcp.CommonTemplates() @@ -85,9 +86,15 @@ # Add templated files # ---------------------------------------------------------------------------- templated_files = common.py_library( - cov_level=99, system_test_external_dependencies=["google-cloud-storage"] + samples=True, cov_level=99, system_test_external_dependencies=["google-cloud-storage"] ) s.move(templated_files) + +# ---------------------------------------------------------------------------- +# Samples templates +# ---------------------------------------------------------------------------- +python.py_samples(skip_readmes=True) + # TODO(busunkim): Use latest sphinx after microgenerator transition s.replace("noxfile.py", """['"]sphinx['"]""", '"sphinx<3.0.0"')