bug fix url import

This commit is contained in:
smilerz
2021-04-06 10:57:13 -05:00
parent 393aba1f31
commit a84c41e29f
5 changed files with 23 additions and 20 deletions

View File

@@ -76,6 +76,9 @@ def get_recipe_from_source(text, url, space):
text = normalize_string(text)
try:
parse_list.append(remove_graph(json.loads(text)))
if not url and 'url' in parse_list[0]:
url = parse_list[0]['url']
scrape = text_scraper("<script type='application/ld+json'>" + text + "</script>", url=url)
except JSONDecodeError:
soup = BeautifulSoup(text, "html.parser")
@@ -83,6 +86,8 @@ def get_recipe_from_source(text, url, space):
images += get_images_from_source(soup, url)
for el in soup.find_all('script', type='application/ld+json'):
el = remove_graph(el)
if not url and 'url' in el:
url = el['url']
if type(el) == list:
for le in el:
parse_list.append(le)
@@ -95,15 +100,6 @@ def get_recipe_from_source(text, url, space):
parse_list.append(le)
elif type(el) == dict:
parse_list.append(el)
# if a url was not provided, try to find one in the first document
if not url and len(parse_list) > 0:
if 'url' in parse_list[0]:
url = parse_list[0]['url']
if type(text) == dict:
scrape = text_scraper("<script type='application/ld+json'>" + text + "</script>", url=url)
elif type(text) == str:
scrape = text_scraper(text, url=url)
recipe_json = helper.get_from_scraper(scrape, space)

View File

@@ -39,10 +39,9 @@ def get_from_scraper(scrape, space):
pass
try:
recipe_json['image'] = parse_image(scrape.image())
recipe_json['image'] = parse_image(scrape.image()) or ''
except (AttributeError, TypeError):
recipe_json['image'] = ''
pass
keywords = []
try:
@@ -283,11 +282,11 @@ def parse_keywords(keyword_json, space):
# keywords as list
for kw in keyword_json:
kw = normalize_string(kw)
if k := Keyword.objects.filter(name=kw, space=space).first():
if len(k) != 0:
if len(kw) != 0:
if k := Keyword.objects.filter(name=kw, space=space).first():
keywords.append({'id': str(k.id), 'text': str(k)})
else:
keywords.append({'id': random.randrange(1111111, 9999999, 1), 'text': kw})
else:
keywords.append({'id': random.randrange(1111111, 9999999, 1), 'text': kw})
return keywords

View File

@@ -30,7 +30,7 @@ def text_scraper(text, url=None):
url=None
):
self.wild_mode = False
self.exception_handling = _exception_handling
self.exception_handling = None
self.meta_http_equiv = False
self.soup = BeautifulSoup(page_data, "html.parser")
self.url = url