Skip to content
This repository has been archived by the owner on Jul 5, 2024. It is now read-only.

Commit

Permalink
Fix check-album status (not being awaited)
Browse files Browse the repository at this point in the history
  • Loading branch information
Jules-WinnfieldX committed Mar 8, 2024
1 parent da18089 commit 49dd6ba
Show file tree
Hide file tree
Showing 6 changed files with 8 additions and 11 deletions.
2 changes: 1 addition & 1 deletion cyberdrop_dl/__init__.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
__version__ = "5.1.94"
__version__ = "5.1.95"
7 changes: 2 additions & 5 deletions cyberdrop_dl/scraper/crawlers/bunkrr_crawler.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,13 +86,10 @@ async def album(self, scrape_item: ScrapeItem) -> None:

if "no-image" in src.name:
raise FileNotFoundError("No image found, reverting to parent")

album_check = await self.check_album_results(src, results)
if album_check:
continue

filename, ext = await get_filename_and_ext(src.name)
await self.handle_file(src, new_scrape_item, filename, ext)
if not await self.check_album_results(src, results):
await self.handle_file(src, new_scrape_item, filename, ext)
except FileNotFoundError:
self.manager.task_group.create_task(self.run(ScrapeItem(link, scrape_item.parent_title, True, album_id, date)))

Expand Down
4 changes: 2 additions & 2 deletions cyberdrop_dl/scraper/crawlers/erome_crawler.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,11 +72,11 @@ async def album(self, scrape_item: ScrapeItem) -> None:
for image in images:
link = URL(image['data-src'])
filename, ext = await get_filename_and_ext(link.name)
if not self.check_album_results(link, results):
if not await self.check_album_results(link, results):
await self.handle_file(link, scrape_item, filename, ext)

for video in vidoes:
link = URL(video['src'])
filename, ext = await get_filename_and_ext(link.name)
if not self.check_album_results(link, results):
if not await self.check_album_results(link, results):
await self.handle_file(link, scrape_item, filename, ext)
2 changes: 1 addition & 1 deletion cyberdrop_dl/scraper/crawlers/jpgchurch_crawler.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ async def album(self, scrape_item: ScrapeItem) -> None:
for link in links:
link = URL(link.get('src'))
new_scrape_item = await self.create_scrape_item(scrape_item, link, title, True, album_id)
if not self.check_album_results(link, results):
if not await self.check_album_results(link, results):
await self.handle_direct_link(new_scrape_item)

link_next = soup.select_one('a[data-pagination=next]')
Expand Down
2 changes: 1 addition & 1 deletion cyberdrop_dl/scraper/crawlers/pixeldrain_crawler.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ async def folder(self, scrape_item: ScrapeItem) -> None:
date = await self.parse_datetime(file['date_upload'].replace("T", " ").split(".")[0].strip("Z"))
filename, ext = await get_filename_and_ext(file['name'])
new_scrape_item = await self.create_scrape_item(scrape_item, link, title, True, None, date)
if not self.check_album_results(link, results):
if not await self.check_album_results(link, results):
await self.handle_file(link, new_scrape_item, filename, ext)

@error_handling_wrapper
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "cyberdrop-dl"
version = "5.1.94"
version = "5.1.95"
description = "Bulk downloader for multiple file hosts"
authors = ["Jules Winnfield <[email protected]>"]
readme = "README.md"
Expand Down

0 comments on commit 49dd6ba

Please sign in to comment.