From a20607fab21932bcfa9294edfa560bbb068f00af Mon Sep 17 00:00:00 2001 From: Jules-WinnfieldX Date: Tue, 5 Dec 2023 20:11:10 -0700 Subject: [PATCH] Fix gofile not properly utiliing wrapper format. --- cyberdrop_dl/__init__.py | 2 +- cyberdrop_dl/scraper/crawlers/gofile_crawler.py | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/cyberdrop_dl/__init__.py b/cyberdrop_dl/__init__.py index 0b6d04a4a..ff4f483ae 100644 --- a/cyberdrop_dl/__init__.py +++ b/cyberdrop_dl/__init__.py @@ -1 +1 @@ -__version__ = "5.0.51" +__version__ = "5.0.52" diff --git a/cyberdrop_dl/scraper/crawlers/gofile_crawler.py b/cyberdrop_dl/scraper/crawlers/gofile_crawler.py index 1e873db37..c0d0af409 100644 --- a/cyberdrop_dl/scraper/crawlers/gofile_crawler.py +++ b/cyberdrop_dl/scraper/crawlers/gofile_crawler.py @@ -35,7 +35,7 @@ async def fetch(self, scrape_item: ScrapeItem) -> None: task_id = await self.scraping_progress.add_task(scrape_item.url) await self.get_token(self.client) - await self.get_website_token(self.client) + await self.get_website_token(self.js_address, self.client) await self.album(scrape_item) @@ -57,7 +57,7 @@ async def album(self, scrape_item: ScrapeItem) -> None: if e.status == http.HTTPStatus.UNAUTHORIZED: self.websiteToken = "" self.manager.cache_manager.remove("gofile_website_token") - await self.get_website_token(self.client) + await self.get_website_token(self.js_address, self.client) params["websiteToken"] = self.websiteToken async with self.request_limiter: JSON_Resp = await self.client.get_json(self.domain, self.api_address / "getContent", params) @@ -110,7 +110,7 @@ async def get_token(self, session: ScraperClient) -> None: raise ScrapeFailure(403, "Couldn't generate GoFile token") @error_handling_wrapper - async def get_website_token(self, session: ScraperClient) -> None: + async def get_website_token(self, js_address: URL, session: ScraperClient) -> None: """Creates an anon gofile account to use.""" if self.websiteToken: return @@ -121,7 +121,7 @@ async def get_website_token(self, session: ScraperClient) -> None: return async with self.request_limiter: - text = await session.get_text(self.domain, self.js_address) + text = await session.get_text(self.domain, js_address) text = str(text) self.websiteToken = re.search(r'fetchData\.websiteToken\s*=\s*"(.*?)"', text).group(1) if not self.websiteToken: