mirror of
https://github.com/TandoorRecipes/recipes.git
synced 2026-01-03 13:19:16 -05:00
bring parser inline with json_import branch
This commit is contained in:
@@ -68,7 +68,7 @@ def get_recipe_from_source(text, url, space):
|
|||||||
'servings': '',
|
'servings': '',
|
||||||
'prepTime': '',
|
'prepTime': '',
|
||||||
'cookTime': ''
|
'cookTime': ''
|
||||||
}
|
}
|
||||||
recipe_tree = []
|
recipe_tree = []
|
||||||
parse_list = []
|
parse_list = []
|
||||||
html_data = []
|
html_data = []
|
||||||
@@ -77,6 +77,9 @@ def get_recipe_from_source(text, url, space):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
parse_list.append(remove_graph(json.loads(text)))
|
parse_list.append(remove_graph(json.loads(text)))
|
||||||
|
if not url and 'url' in parse_list[0]:
|
||||||
|
url = parse_list[0]['url']
|
||||||
|
scrape = text_scraper("<script type='application/ld+json'>" + text + "</script>", url=url)
|
||||||
|
|
||||||
except JSONDecodeError:
|
except JSONDecodeError:
|
||||||
soup = BeautifulSoup(text, "html.parser")
|
soup = BeautifulSoup(text, "html.parser")
|
||||||
@@ -84,6 +87,8 @@ def get_recipe_from_source(text, url, space):
|
|||||||
images += get_images_from_source(soup, url)
|
images += get_images_from_source(soup, url)
|
||||||
for el in soup.find_all('script', type='application/ld+json'):
|
for el in soup.find_all('script', type='application/ld+json'):
|
||||||
el = remove_graph(el)
|
el = remove_graph(el)
|
||||||
|
if not url and 'url' in el:
|
||||||
|
url = el['url']
|
||||||
if type(el) == list:
|
if type(el) == list:
|
||||||
for le in el:
|
for le in el:
|
||||||
parse_list.append(le)
|
parse_list.append(le)
|
||||||
@@ -96,15 +101,6 @@ def get_recipe_from_source(text, url, space):
|
|||||||
parse_list.append(le)
|
parse_list.append(le)
|
||||||
elif type(el) == dict:
|
elif type(el) == dict:
|
||||||
parse_list.append(el)
|
parse_list.append(el)
|
||||||
|
|
||||||
# if a url was not provided, try to find one in the first document
|
|
||||||
if not url and len(parse_list) > 0:
|
|
||||||
if 'url' in parse_list[0]:
|
|
||||||
url = parse_list[0]['url']
|
|
||||||
|
|
||||||
if type(text) == dict:
|
|
||||||
scrape = text_scraper("<script type='application/ld+json'>" + text + "</script>", url=url)
|
|
||||||
elif type(text) == str:
|
|
||||||
scrape = text_scraper(text, url=url)
|
scrape = text_scraper(text, url=url)
|
||||||
|
|
||||||
recipe_json = helper.get_from_scraper(scrape, space)
|
recipe_json = helper.get_from_scraper(scrape, space)
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ from isodate.isoerror import ISO8601Error
|
|||||||
from cookbook.helper.ingredient_parser import parse as parse_single_ingredient
|
from cookbook.helper.ingredient_parser import parse as parse_single_ingredient
|
||||||
from cookbook.models import Keyword
|
from cookbook.models import Keyword
|
||||||
from django.utils.dateparse import parse_duration
|
from django.utils.dateparse import parse_duration
|
||||||
|
from recipe_scrapers._schemaorg import SchemaOrgException
|
||||||
from recipe_scrapers._utils import get_minutes, normalize_string
|
from recipe_scrapers._utils import get_minutes, normalize_string
|
||||||
|
|
||||||
|
|
||||||
@@ -13,7 +14,10 @@ def get_from_scraper(scrape, space):
|
|||||||
# converting the scrape_me object to the existing json format based on ld+json
|
# converting the scrape_me object to the existing json format based on ld+json
|
||||||
|
|
||||||
recipe_json = {}
|
recipe_json = {}
|
||||||
recipe_json['name'] = scrape.title()
|
try:
|
||||||
|
recipe_json['name'] = scrape.title()
|
||||||
|
except TypeError:
|
||||||
|
recipe_json['name'] = ''
|
||||||
|
|
||||||
try:
|
try:
|
||||||
description = scrape.schema.data.get("description") or ''
|
description = scrape.schema.data.get("description") or ''
|
||||||
@@ -21,7 +25,7 @@ def get_from_scraper(scrape, space):
|
|||||||
except AttributeError:
|
except AttributeError:
|
||||||
description = ''
|
description = ''
|
||||||
|
|
||||||
recipe_json['description'] = normalize_string(description)
|
recipe_json['description'] = parse_description(description)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
servings = scrape.yields()
|
servings = scrape.yields()
|
||||||
@@ -40,7 +44,7 @@ def get_from_scraper(scrape, space):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
recipe_json['image'] = parse_image(scrape.image()) or ''
|
recipe_json['image'] = parse_image(scrape.image()) or ''
|
||||||
except (AttributeError, TypeError):
|
except (AttributeError, TypeError, SchemaOrgException):
|
||||||
recipe_json['image'] = ''
|
recipe_json['image'] = ''
|
||||||
|
|
||||||
keywords = []
|
keywords = []
|
||||||
@@ -181,6 +185,14 @@ def parse_ingredients(ingredients):
|
|||||||
return ingredients
|
return ingredients
|
||||||
|
|
||||||
|
|
||||||
|
def parse_description(description):
|
||||||
|
description = re.sub(r'\n\s*\n', '\n\n', description)
|
||||||
|
description = re.sub(' +', ' ', description)
|
||||||
|
description = re.sub('</p>', '\n', description)
|
||||||
|
description = re.sub('<[^<]+?>', '', description)
|
||||||
|
return normalize_string(description)
|
||||||
|
|
||||||
|
|
||||||
def parse_instructions(instructions):
|
def parse_instructions(instructions):
|
||||||
instruction_text = ''
|
instruction_text = ''
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user