Skip to content

Commit

Permalink
Leech To Telegram Added
Browse files Browse the repository at this point in the history
Signed-off-by: anas <[email protected]>
  • Loading branch information
anasty17 committed Sep 21, 2021
1 parent c3df77f commit d888a1e
Show file tree
Hide file tree
Showing 21 changed files with 706 additions and 139 deletions.
19 changes: 15 additions & 4 deletions bot/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,6 +115,8 @@ def get_client() -> qba.TorrentsAPIMixIn:
# Stores list of users and chats the bot is authorized to use in
AUTHORIZED_CHATS = set()
SUDO_USERS = set()
AS_DOC_USERS = set()
AS_MEDIA_USERS = set()
if os.path.exists('authorized_chats.txt'):
with open('authorized_chats.txt', 'r+') as f:
lines = f.readlines()
Expand Down Expand Up @@ -191,11 +193,15 @@ def get_client() -> qba.TorrentsAPIMixIn:
telegraph_token = telegraph.get_access_token()

try:
STATUS_LIMIT = getConfig('STATUS_LIMIT')
if len(STATUS_LIMIT) == 0:
TG_SPLIT_SIZE = int(getConfig('TG_SPLIT_SIZE'))
if len(f'TG_SPLIT_SIZE') == 0 or TG_SPLIT_SIZE > 2097152000:
raise KeyError
except KeyError:
TG_SPLIT_SIZE = 2097152000
try:
STATUS_LIMIT = int(getConfig('STATUS_LIMIT'))
if len(f'STATUS_LIMIT') == 0:
raise KeyError
else:
STATUS_LIMIT = int(getConfig('STATUS_LIMIT'))
except KeyError:
STATUS_LIMIT = None
try:
Expand Down Expand Up @@ -330,6 +336,11 @@ def get_client() -> qba.TorrentsAPIMixIn:
IS_VPS = IS_VPS.lower() == 'true'
except KeyError:
IS_VPS = False
try:
AS_DOCUMENT = getConfig('AS_DOCUMENT')
AS_DOCUMENT = AS_DOCUMENT.lower() == 'true'
except KeyError:
AS_DOCUMENT = False
try:
RECURSIVE_SEARCH = getConfig('RECURSIVE_SEARCH')
RECURSIVE_SEARCH = RECURSIVE_SEARCH.lower() == 'true'
Expand Down
2 changes: 1 addition & 1 deletion bot/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
from .helper.ext_utils.bot_utils import get_readable_file_size, get_readable_time
from .helper.telegram_helper.filters import CustomFilters
from bot.helper.telegram_helper import button_build
from .modules import authorize, list, cancel_mirror, mirror_status, mirror, clone, watch, shell, eval, torrent_search, delete, speedtest, count
from .modules import authorize, list, cancel_mirror, mirror_status, mirror, clone, watch, shell, eval, torrent_search, delete, speedtest, count, leech_settings


def stats(update, context):
Expand Down
86 changes: 36 additions & 50 deletions bot/helper/ext_utils/bot_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ class MirrorStatus:
STATUS_PAUSE = "Paused...⭕️"
STATUS_ARCHIVING = "Archiving...🔐"
STATUS_EXTRACTING = "Extracting...📂"
STATUS_SPLITTING = "Splitting...✂️"


PROGRESS_MAX_SIZE = 100 // 8
Expand All @@ -54,7 +55,6 @@ def __setInterval(self):
def cancel(self):
self.stopEvent.set()


def get_readable_file_size(size_in_bytes) -> str:
if size_in_bytes is None:
return '0B'
Expand All @@ -67,7 +67,6 @@ def get_readable_file_size(size_in_bytes) -> str:
except IndexError:
return 'File too large'


def getDownloadByGid(gid):
with download_dict_lock:
for dl in download_dict.values():
Expand All @@ -77,13 +76,13 @@ def getDownloadByGid(gid):
not in [
MirrorStatus.STATUS_ARCHIVING,
MirrorStatus.STATUS_EXTRACTING,
MirrorStatus.STATUS_SPLITTING,
]
and dl.gid() == gid
):
return dl
return None


def getAllDownload():
with download_dict_lock:
for dlDetails in download_dict.values():
Expand All @@ -93,6 +92,7 @@ def getAllDownload():
not in [
MirrorStatus.STATUS_ARCHIVING,
MirrorStatus.STATUS_EXTRACTING,
MirrorStatus.STATUS_SPLITTING,
MirrorStatus.STATUS_CLONING,
MirrorStatus.STATUS_UPLOADING,
]
Expand All @@ -101,7 +101,6 @@ def getAllDownload():
return dlDetails
return None


def get_progress_bar_string(status):
completed = status.processed_bytes() / 8
total = status.size_raw() / 8
Expand All @@ -116,64 +115,58 @@ def get_progress_bar_string(status):
p_str = f"[{p_str}]"
return p_str


def get_readable_message():
with download_dict_lock:
msg = ""
INDEX = 0
start = 0
if STATUS_LIMIT is not None:
dick_no = len(download_dict)
global pages
pages = math.ceil(dick_no/STATUS_LIMIT)
if PAGE_NO > pages and pages != 0:
globals()['COUNT'] -= STATUS_LIMIT
globals()['PAGE_NO'] -= 1
for download in list(download_dict.values()):
INDEX += 1
if INDEX > COUNT:
msg += f"<b>Filename:</b> <code>{download.name()}</code>"
msg += f"\n<b>Status:</b> <i>{download.status()}</i>"
if download.status() not in [
MirrorStatus.STATUS_ARCHIVING,
MirrorStatus.STATUS_EXTRACTING,
]:
msg += f"\n<code>{get_progress_bar_string(download)} {download.progress()}</code>"
if download.status() == MirrorStatus.STATUS_CLONING:
msg += f"\n<b>Cloned:</b> <code>{get_readable_file_size(download.processed_bytes())}</code> of <code>{download.size()}</code>"
elif download.status() == MirrorStatus.STATUS_UPLOADING:
msg += f"\n<b>Uploaded:</b> <code>{get_readable_file_size(download.processed_bytes())}</code> of <code>{download.size()}</code>"
else:
msg += f"\n<b>Downloaded:</b> <code>{get_readable_file_size(download.processed_bytes())}</code> of <code>{download.size()}</code>"
msg += f"\n<b>Speed:</b> <code>{download.speed()}</code>" \
f", <b>ETA:</b> <code>{download.eta()}</code> "
# if hasattr(download, 'is_torrent'):
try:
msg += f"\n<b>Seeders:</b> <code>{download.aria_download().num_seeders}</code>" \
f" | <b>Peers:</b> <code>{download.aria_download().connections}</code>"
except:
pass
try:
msg += f"\n<b>Seeders:</b> <code>{download.torrent_info().num_seeds}</code>" \
f" | <b>Leechers:</b> <code>{download.torrent_info().num_leechs}</code>"
except:
pass
msg += f"\n<b>To Stop:</b> <code>/{BotCommands.CancelMirror} {download.gid()}</code>"
msg += "\n\n"
if STATUS_LIMIT is not None and INDEX >= COUNT + STATUS_LIMIT:
break
start = COUNT
for index, download in enumerate(list(download_dict.values())[start:], start=1):
msg += f"<b>Filename:</b> <code>{download.name()}</code>"
msg += f"\n<b>Status:</b> <i>{download.status()}</i>"
if download.status() not in [
MirrorStatus.STATUS_ARCHIVING,
MirrorStatus.STATUS_EXTRACTING,
MirrorStatus.STATUS_SPLITTING,
]:
msg += f"\n<code>{get_progress_bar_string(download)}</code> {download.progress()}"
if download.status() == MirrorStatus.STATUS_CLONING:
msg += f"\n<b>Cloned:</b> {get_readable_file_size(download.processed_bytes())} of {download.size()}"
elif download.status() == MirrorStatus.STATUS_UPLOADING:
msg += f"\n<b>Uploaded:</b> {get_readable_file_size(download.processed_bytes())} of {download.size()}"
else:
msg += f"\n<b>Downloaded:</b> {get_readable_file_size(download.processed_bytes())} of {download.size()}"
msg += f"\n<b>Speed:</b> {download.speed()} <b>ETA:</b> {download.eta()}"
try:
msg += f"\n<b>Seeders:</b> {download.aria_download().num_seeders}" \
f" | <b>Peers:</b> {download.aria_download().connections}"
except:
pass
try:
msg += f"\n<b>Seeders:</b> {download.torrent_info().num_seeds}" \
f" | <b>Leechers:</b> {download.torrent_info().num_leechs}"
except:
pass
msg += f"\n<b>To Stop:</b> <code>/{BotCommands.CancelMirror} {download.gid()}</code>"
msg += "\n\n"
if STATUS_LIMIT is not None and index == STATUS_LIMIT:
break
if STATUS_LIMIT is not None:
if INDEX > COUNT + STATUS_LIMIT:
return None, None
if dick_no > STATUS_LIMIT:
msg += f"<b>Page:</b> <code>{PAGE_NO}/{pages}</code> | <b>Tasks:</b> <code>{dick_no}</code>\n"
msg += f"<b>Page:</b> {PAGE_NO}/{pages} | <b>Tasks:</b> {dick_no}\n"
buttons = button_build.ButtonMaker()
buttons.sbutton("Previous", "pre")
buttons.sbutton("Next", "nex")
button = InlineKeyboardMarkup(buttons.build_menu(2))
return msg, button
return msg, ""


def flip(update, context):
query = update.callback_query
query.answer()
Expand All @@ -194,7 +187,6 @@ def flip(update, context):
PAGE_NO -= 1
message_utils.update_all_messages()


def check_limit(size, limit, tar_unzip_limit=None, is_tar_ext=False):
LOGGER.info('Checking File/Folder Size...')
if is_tar_ext and tar_unzip_limit is not None:
Expand Down Expand Up @@ -227,20 +219,16 @@ def get_readable_time(seconds: int) -> str:
result += f'{seconds}s'
return result


def is_url(url: str):
url = re.findall(URL_REGEX, url)
return bool(url)


def is_gdrive_link(url: str):
return "drive.google.com" in url


def is_mega_link(url: str):
return "mega.nz" in url or "mega.co.nz" in url


def get_mega_link_type(url: str):
if "folder" in url:
return "folder"
Expand All @@ -250,12 +238,10 @@ def get_mega_link_type(url: str):
return "folder"
return "file"


def is_magnet(url: str):
magnet = re.findall(MAGNET_REGEX, url)
return bool(magnet)


def new_thread(fn):
"""To use as decorator to make a function call threaded.
Needs import
Expand Down
70 changes: 61 additions & 9 deletions bot/helper/ext_utils/fs_utils.py
Original file line number Diff line number Diff line change
@@ -1,26 +1,35 @@
import sys
from bot import aria2, LOGGER, DOWNLOAD_DIR, get_client
import shutil
import os
import pathlib
import magic
import tarfile
import subprocess
import time

from PIL import Image
from hachoir.parser import createParser
from hachoir.metadata import extractMetadata
from fsplit.filesplit import Filesplit

from .exceptions import NotSupportedExtractionArchive
from bot import aria2, LOGGER, DOWNLOAD_DIR, get_client, TG_SPLIT_SIZE

VIDEO_SUFFIXES = ("M4V", "MP4", "MOV", "FLV", "WMV", "3GP", "MPG", "WEBM", "MKV", "AVI")

fs = Filesplit()

def clean_download(path: str):
if os.path.exists(path):
LOGGER.info(f"Cleaning Download: {path}")
shutil.rmtree(path)


def start_cleanup():
try:
shutil.rmtree(DOWNLOAD_DIR)
except FileNotFoundError:
pass


def clean_all():
aria2.remove_all(True)
get_client().torrents_delete(torrent_hashes="all", delete_files=True)
Expand All @@ -30,7 +39,6 @@ def clean_all():
except FileNotFoundError:
pass


def exit_clean_up(signal, frame):
try:
LOGGER.info("Please wait, while we clean up the downloads and stop running downloads")
Expand All @@ -40,7 +48,6 @@ def exit_clean_up(signal, frame):
LOGGER.warning("Force Exiting before the cleanup finishes!")
sys.exit(1)


def get_path_size(path):
if os.path.isfile(path):
return os.path.getsize(path)
Expand All @@ -51,7 +58,6 @@ def get_path_size(path):
total_size += os.path.getsize(abs_path)
return total_size


def tar(org_path):
tar_path = org_path + ".tar"
#path = pathlib.PurePath(org_path)
Expand All @@ -61,7 +67,6 @@ def tar(org_path):
tar.close()
return tar_path


def zip(name, path):
root_dir = os.path.dirname(path)
base_dir = os.path.basename(path.strip(os.sep))
Expand All @@ -70,7 +75,6 @@ def zip(name, path):
LOGGER.info(f"Zip: {zip_path}")
return zip_path


def get_base_name(orig_path: str):
if orig_path.endswith(".tar.bz2"):
return orig_path.replace(".tar.bz2", "")
Expand Down Expand Up @@ -149,9 +153,57 @@ def get_base_name(orig_path: str):
else:
raise NotSupportedExtractionArchive('File format not supported for extraction')


def get_mime_type(file_path):
mime = magic.Magic(mime=True)
mime_type = mime.from_file(file_path)
mime_type = mime_type or "text/plain"
return mime_type

def take_ss(video_file, duration):
des_dir = f"Thumbnails"
if not os.path.exists(des_dir):
os.mkdir(des_dir)
des_dir = os.path.join(des_dir, f"{time.time()}.jpg")
duration = int(duration) / 2
subprocess.run(["ffmpeg", "-hide_banner", "-loglevel", "error", "-ss", str(duration),
"-i", video_file, "-vframes", "1", des_dir])
Image.open(des_dir).convert("RGB").save(des_dir)
img = Image.open(des_dir)
w, h = img.size
img.resize((320, h))
img.save(des_dir, "JPEG")
if os.path.lexists(des_dir):
return des_dir, 320, h
else:
return None, 0, 0

def split(path, size, split_size, start_time=0, i=1):
ftype = get_mime_type(path)
ftype = ftype.split("/")[0]
ftype = ftype.lower().strip()
out_dir = os.path.dirname(path)
base_name = os.path.basename(path)
if ftype == "video" or base_name.upper().endswith(VIDEO_SUFFIXES):
base_name, extension = os.path.splitext(path)
metadata = extractMetadata(createParser(path))
total_duration = metadata.get('duration').seconds - 5
while start_time < total_duration:
parted_name = "{}.part{}{}".format(str(base_name), str(i).zfill(2), str(extension))
out_path = os.path.join(out_dir, parted_name)
subprocess.run(["ffmpeg", "-hide_banner", "-loglevel", "error", "-i",
path, "-ss", str(start_time), "-fs", str(split_size),
"-strict", "-2", "-c", "copy", out_path])
out_size = get_path_size(out_path)
if out_size > TG_SPLIT_SIZE:
dif = out_size - TG_SPLIT_SIZE
split_size = split_size - dif
os.remove(out_path)
return split(path, size, split_size, start_time, i)
metadata = extractMetadata(createParser(out_path))
start_time = start_time + metadata.get('duration').seconds - 5
i = i + 1
else:
#subprocess.run(["split", "--numeric-suffixes=1", "--suffix-length=5", f"--bytes={split_size}", path, out_dir])
fs.split(file=path, split_size=split_size, output_dir=out_dir)
csv_path = os.path.join(out_dir, "fs_manifest.csv")
os.remove(csv_path)
Loading

0 comments on commit d888a1e

Please sign in to comment.