Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Cleanup: remove TEST_MODE setting #645

Merged
merged 6 commits into from Oct 18, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
26 changes: 16 additions & 10 deletions recipe_scrapers/goustojson.py
@@ -1,9 +1,7 @@
# mypy: disallow_untyped_defs=False
import json
import requests

from recipe_scrapers.settings import settings

from ._abstract import AbstractScraper
from ._abstract import AbstractScraper, HEADERS
from ._utils import get_minutes, get_yields, normalize_string, url_path_to_dict


Expand All @@ -13,14 +11,22 @@ class GoustoJson(AbstractScraper):
Let's see if it stands the test of time and reevaluate.
"""

def __init__(self, url, *args, **kwargs):
if not settings.TEST_MODE: # pragma: no cover
recipe_slug = url_path_to_dict(url).get("path").split("/")[-1]
url = f"https://production-api.gousto.co.uk/cmsreadbroker/v1/recipe/{recipe_slug}"

def __init__(self, url, proxies=None, timeout=None, *args, **kwargs):
super().__init__(url=url, *args, **kwargs)

self.page_data = json.loads(self.page_data).get("data")
recipe_slug = url_path_to_dict(url).get("path").split("/")[-1]
data_url = (
f"https://production-api.gousto.co.uk/cmsreadbroker/v1/recipe/{recipe_slug}"
)

recipe_json = requests.get(
data_url,
headers=HEADERS,
proxies=proxies,
timeout=timeout,
).json()

self.page_data = recipe_json.get("data")
self.data = self.page_data.get("entry")

@classmethod
Expand Down
3 changes: 0 additions & 3 deletions recipe_scrapers/settings/default.py
Expand Up @@ -37,9 +37,6 @@
}


TEST_MODE = False


# logging.DEBUG # 10
# logging.INFO # 20
# logging.WARNING # 30
Expand Down
23 changes: 13 additions & 10 deletions recipe_scrapers/woolworths.py
@@ -1,23 +1,26 @@
# mypy: disallow_untyped_defs=False
import json
import requests

from recipe_scrapers.settings import settings

from ._abstract import AbstractScraper
from ._abstract import AbstractScraper, HEADERS
from ._schemaorg import SchemaOrg
from ._utils import url_path_to_dict


class Woolworths(AbstractScraper):
def __init__(self, url, *args, **kwargs):
if not settings.TEST_MODE: # pragma: no cover
target = url_path_to_dict(url)["path"].split("/")[-1]
url = f"https://foodhub.woolworths.com.au/content/woolworths-foodhub/en/{target}.model.json"

def __init__(self, url, proxies=None, timeout=None, *args, **kwargs):
super().__init__(url=url, *args, **kwargs)

target = url_path_to_dict(url)["path"].split("/")[-1]
data_url = f"https://foodhub.woolworths.com.au/content/woolworths-foodhub/en/{target}.model.json"

self.page_data = (
json.loads(self.page_data)
requests.get(
data_url,
headers=HEADERS,
proxies=proxies,
timeout=timeout,
)
.json()
.get(":items")
.get("root")
.get(":items")
Expand Down
1 change: 0 additions & 1 deletion tests/test_data/test_settings_module/test_settings.py
@@ -1,3 +1,2 @@
SUPPRESS_EXCEPTIONS = True
TEST_MODE = True
# LOG_LEVEL = 20
8 changes: 6 additions & 2 deletions tests/test_goustojson.py
@@ -1,12 +1,16 @@
from responses import GET
from recipe_scrapers.goustojson import GoustoJson
from tests import ScraperTest


class TestGoustoScraper(ScraperTest):

scraper_class = GoustoJson
test_file_name = "gousto"
test_file_extension = "testjson"

@property
def expected_requests(self):
yield GET, "https://www.gousto.co.uk/cookbook/recipes/malaysian-style-coconut-meat-free-chicken-pickled-cucumber", "tests/test_data/gousto.testjson"
yield GET, "https://production-api.gousto.co.uk/cmsreadbroker/v1/recipe/malaysian-style-coconut-meat-free-chicken-pickled-cucumber", "tests/test_data/gousto.testjson"

def test_host(self):
self.assertEqual("gousto.co.uk", self.harvester_class.host())
Expand Down
6 changes: 6 additions & 0 deletions tests/test_woolworths.py
@@ -1,3 +1,4 @@
from responses import GET
from recipe_scrapers.woolworths import Woolworths
from tests import ScraperTest

Expand All @@ -6,6 +7,11 @@ class TestWoolworthsScraper(ScraperTest):

scraper_class = Woolworths

@property
def expected_requests(self):
yield GET, "https://www.woolworths.com.au/shop/recipes/asparagus-salad-with-lemon-vinaigrette", "tests/test_data/woolworths.testhtml"
yield GET, "https://foodhub.woolworths.com.au/content/woolworths-foodhub/en/asparagus-salad-with-lemon-vinaigrette.model.json", "tests/test_data/woolworths.testhtml"

def test_host(self):
self.assertEqual("woolworths.com.au", self.harvester_class.host())

Expand Down