mirror of
https://github.com/yasirarism/MissKatyPyro.git
synced 2026-01-01 02:24:52 +00:00
Tes new genss and mediainfo
This commit is contained in:
parent
5edabf637b
commit
bc581d43eb
4 changed files with 474 additions and 0 deletions
|
|
@ -33,6 +33,15 @@ def get_readable_time(seconds: int) -> str:
|
|||
return result
|
||||
|
||||
|
||||
def get_readable_bitrate(bitrate_kbps):
|
||||
if bitrate_kbps > 10000:
|
||||
bitrate = str(round(bitrate_kbps / 1000, 2)) + ' ' + 'Mb/s'
|
||||
else:
|
||||
bitrate = str(round(bitrate_kbps, 2)) + ' ' + 'kb/s'
|
||||
|
||||
return bitrate
|
||||
|
||||
|
||||
def get_readable_time2(seconds: int) -> str:
|
||||
count = 0
|
||||
ping_time = ""
|
||||
|
|
|
|||
|
|
@ -60,6 +60,16 @@ TOTAL PLUGINS: {len(ALL_MODULES)}
|
|||
"""
|
||||
|
||||
|
||||
def remove_N(seq):
|
||||
i = 1
|
||||
while i < len(seq):
|
||||
if seq[i] == seq[i - 1]:
|
||||
del seq[i]
|
||||
i -= 1
|
||||
else:
|
||||
i += 1
|
||||
|
||||
|
||||
def get_random_string(length: int = 5):
|
||||
text_str = "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(length))
|
||||
return text_str.upper()
|
||||
|
|
@ -128,3 +138,8 @@ async def search_jw(movie_name: str, locale: str):
|
|||
m_t_ = m_t_[:-2].strip()
|
||||
break
|
||||
return m_t_
|
||||
|
||||
|
||||
SUPPORTED_URL_REGEX = {
|
||||
r"(http|ftp|https):\/\/([\w_-]+(?:(?:\.[\w_-]+)+))([\w.,@?^=%&:\/~+#-]*[\w@?^=%&\/~+#-])": "ddl"
|
||||
}
|
||||
286
misskaty/plugins/genssv2.py
Normal file
286
misskaty/plugins/genssv2.py
Normal file
|
|
@ -0,0 +1,286 @@
|
|||
import asyncio
|
||||
import datetime
|
||||
import json
|
||||
import os
|
||||
import random
|
||||
import re
|
||||
import shlex
|
||||
import shutil
|
||||
import subprocess
|
||||
import time
|
||||
from urllib.parse import unquote
|
||||
|
||||
import requests
|
||||
from pyrogram import filters
|
||||
from pyrogram.errors import MessageNotModified
|
||||
from requests_toolbelt import MultipartEncoder
|
||||
|
||||
from misskaty import app
|
||||
from misskaty.helper import SUPPORTED_URL_REGEX
|
||||
from misskaty.vars import COMMAND_HANDLER
|
||||
|
||||
|
||||
async def slowpics_collection(message, file_name, path):
|
||||
"""
|
||||
Uploads image(s) to https://slow.pics/ from a specified directory.
|
||||
"""
|
||||
|
||||
msg = await message.reply_text("uploading generated screenshots to slow.pics.", quote=True)
|
||||
|
||||
img_list = os.listdir(path)
|
||||
data = {
|
||||
"collectionName": f"{unquote(file_name)}",
|
||||
"hentai": "false",
|
||||
"optimizeImages": "false",
|
||||
"public": "false",
|
||||
}
|
||||
|
||||
for i in range(0, len(img_list)):
|
||||
data[f"images[{i}].name"] = img_list[i]
|
||||
data[f"images[{i}].file"] = (
|
||||
img_list[i],
|
||||
open(f"{path}/{img_list[i]}", "rb"),
|
||||
"image/png",
|
||||
)
|
||||
|
||||
with requests.Session() as client:
|
||||
client.get("https://slow.pics/api/collection")
|
||||
files = MultipartEncoder(data)
|
||||
length = str(files.len)
|
||||
|
||||
headers = {
|
||||
"Accept": "*/*",
|
||||
"Accept-Encoding": "gzip, deflate, br",
|
||||
"Accept-Language": "en-US,en;q=0.5",
|
||||
"Content-Length": length,
|
||||
"Content-Type": files.content_type,
|
||||
"Origin": "https://slow.pics/",
|
||||
"Referer": "https://slow.pics/collection",
|
||||
"Sec-Fetch-Mode": "cors",
|
||||
"Sec-Fetch-Site": "same-origin",
|
||||
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.88 Safari/537.36",
|
||||
"X-XSRF-TOKEN": client.cookies.get_dict()["XSRF-TOKEN"]}
|
||||
|
||||
response = client.post("https://slow.pics/api/collection", data=files, headers=headers)
|
||||
await msg.edit(
|
||||
f"File Name: `{unquote(file_name)}`\n\nFrames: https://slow.pics/c/{response.text}",
|
||||
disable_web_page_preview=True)
|
||||
|
||||
|
||||
async def generate_ss_from_file(
|
||||
message,
|
||||
replymsg,
|
||||
file_name,
|
||||
frame_count,
|
||||
file_duration
|
||||
):
|
||||
"""
|
||||
Generates screenshots from partially/fully downloaded files using ffmpeg.
|
||||
"""
|
||||
|
||||
await replymsg.edit(f"Generating **{frame_count}** screnshots from `{unquote(file_name)}`, please wait...")
|
||||
|
||||
rand_str = os.randstr()
|
||||
os.makedir(f"screenshot_{rand_str}")
|
||||
|
||||
loop_count = frame_count
|
||||
while loop_count != 0:
|
||||
|
||||
random_timestamp = random.uniform(1, file_duration)
|
||||
timestamp = str(datetime.timedelta(seconds=int(random_timestamp)))
|
||||
outputpath = f"screenshot_{rand_str}/{(frame_count - loop_count) + 1}.png"
|
||||
|
||||
ffmpeg_command = f"mediaextract -y -ss {timestamp} -i '{file_name}' -vframes 1 {outputpath}"
|
||||
args = shlex.split(ffmpeg_command)
|
||||
|
||||
shell = await asyncio.create_subprocess_exec(*args, stdout=asyncio.subprocess.PIPE,
|
||||
stderr=asyncio.subprocess.PIPE)
|
||||
|
||||
stdout, stderr = await shell.communicate()
|
||||
result = str(stdout.decode().strip()) + str(stderr.decode().strip())
|
||||
|
||||
if "File ended prematurely" in result:
|
||||
loop_count += 1
|
||||
loop_count -= 1
|
||||
|
||||
await replymsg.delete()
|
||||
await slowpics_collection(message, file_name, path=f"{os.getcwd()}/screenshot_{rand_str}")
|
||||
|
||||
shutil.rmtree(f"screenshot_{rand_str}")
|
||||
os.remove(file_name)
|
||||
|
||||
|
||||
async def generate_ss_from_link(
|
||||
message,
|
||||
replymsg,
|
||||
file_url,
|
||||
headers,
|
||||
file_name,
|
||||
frame_count,
|
||||
file_duration
|
||||
):
|
||||
"""
|
||||
Generates screenshots from direct download links using ffmpeg.
|
||||
"""
|
||||
|
||||
await replymsg.edit(f"Generating **{frame_count}** screnshots from `{unquote(file_name)}`, please wait...")
|
||||
|
||||
rand_str = os.randstr()
|
||||
os.makedir(f"screenshot_{rand_str}")
|
||||
|
||||
loop_count = frame_count
|
||||
while loop_count != 0:
|
||||
random_timestamp = random.uniform(1, file_duration)
|
||||
timestamp = str(datetime.timedelta(seconds=int(random_timestamp)))
|
||||
outputpath = f"screenshot_{rand_str}/{(frame_count - loop_count) + 1}.png"
|
||||
|
||||
ffmpeg_command = f"mediaextract -headers '{headers}' -y -ss {timestamp} -i {file_url} -vframes 1 {outputpath}"
|
||||
args = shlex.split(ffmpeg_command)
|
||||
|
||||
shell = await asyncio.create_subprocess_exec(*args, stdout=asyncio.subprocess.PIPE,
|
||||
stderr=asyncio.subprocess.PIPE)
|
||||
|
||||
_, __ = await shell.communicate()
|
||||
loop_count -= 1
|
||||
time.sleep(3)
|
||||
|
||||
await replymsg.delete()
|
||||
await slowpics_collection(message, file_name, path=f"{os.getcwd()}/screenshot_{rand_str}")
|
||||
|
||||
shutil.rmtree(f"screenshot_{rand_str}")
|
||||
|
||||
|
||||
async def ddl_screenshot(message, frame_count, url):
|
||||
"""
|
||||
Generates Screenshots from Direct Download links.
|
||||
"""
|
||||
|
||||
replymsg = await message.reply_text(f"Checking direct download url....**", quote=True)
|
||||
|
||||
try:
|
||||
|
||||
file_url = f"'{url}'"
|
||||
file_name = re.search(".+/(.+)", url).group(1)
|
||||
|
||||
total_duration = subprocess.check_output(
|
||||
f"ffprobe -v error -show_entries format=duration -of default=noprint_wrappers=1:nokey=1 {file_url}",
|
||||
shell=True).decode("utf-8")
|
||||
total_duration = float(total_duration.strip())
|
||||
|
||||
headers = "user-agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4136.7 Safari/537.36"
|
||||
|
||||
await generate_ss_from_link(
|
||||
message,
|
||||
replymsg,
|
||||
file_url,
|
||||
headers,
|
||||
file_name,
|
||||
frame_count,
|
||||
file_duration=float(total_duration))
|
||||
|
||||
except MessageNotModified:
|
||||
pass
|
||||
except Exception:
|
||||
return await replymsg.edit(
|
||||
f"Something went wrong with the given url. Make sure that url is downloadable video file wich is non ip specific and should return proper response code without any required headers")
|
||||
|
||||
|
||||
async def telegram_screenshot(client, message, frame_count):
|
||||
"""
|
||||
Generates Screenshots from Telegram Video Files.
|
||||
"""
|
||||
|
||||
message = message.reply_to_message
|
||||
if message.text:
|
||||
return await message.reply_text("Reply to a proper video file to Generate Screenshots. **", quote=True)
|
||||
|
||||
elif message.media.value == "video":
|
||||
media = message.video
|
||||
|
||||
elif message.media.value == "document":
|
||||
media = message.document
|
||||
|
||||
else:
|
||||
return await message.reply_text("can only generate screenshots from video file....", quote=True)
|
||||
|
||||
file_name = str(media.file_name)
|
||||
mime = media.mime_type
|
||||
size = media.file_size
|
||||
|
||||
if message.media.value == "document" and "video" not in mime:
|
||||
return await message.reply_text("can only generate screenshots from video file....", quote=True)
|
||||
|
||||
# Downloading partial file.
|
||||
replymsg = await message.reply_text(f"Downloading partial video file....", quote=True)
|
||||
|
||||
if int(size) <= 200000000:
|
||||
await message.download(os.path.join(os.getcwd(), file_name))
|
||||
downloaded_percentage = 100 # (100% download)
|
||||
|
||||
else:
|
||||
limit = ((25 * size) / 100) / 1000000
|
||||
async for chunk in client.stream_media(message, limit=int(limit)):
|
||||
with open(file_name, "ab") as file:
|
||||
file.write(chunk)
|
||||
|
||||
downloaded_percentage = 25
|
||||
|
||||
await replymsg.edit("Partial file downloaded....")
|
||||
# Partial file downloaded
|
||||
|
||||
mediainfo_json = json.loads(subprocess.check_output(["mediainfo", file_name, "--Output=JSON"]).decode("utf-8"))
|
||||
total_duration = mediainfo_json["media"]["track"][0]["Duration"]
|
||||
|
||||
if downloaded_percentage == 100:
|
||||
partial_file_duration = float(total_duration)
|
||||
else:
|
||||
partial_file_duration = (downloaded_percentage * float(total_duration)) / 100
|
||||
|
||||
await generate_ss_from_file(
|
||||
message,
|
||||
replymsg,
|
||||
file_name,
|
||||
frame_count,
|
||||
file_duration=partial_file_duration)
|
||||
|
||||
|
||||
@app.on_message(filters.command("genss2", COMMAND_HANDLER))
|
||||
async def screenshot(client, message):
|
||||
replied_message = message.reply_to_message
|
||||
if replied_message:
|
||||
try:
|
||||
user_input = message.text.split(None, 1)[1]
|
||||
frame_count = int(user_input.strip())
|
||||
except:
|
||||
frame_count = 5
|
||||
|
||||
if frame_count > 15:
|
||||
frame_count = 15
|
||||
return await telegram_screenshot(client, message, frame_count)
|
||||
|
||||
if len(message.command) < 2:
|
||||
mediainfo_usage = "Generates video frame screenshot from Telegram files or direct download links."
|
||||
return await message.reply_text(mediainfo_usage, quote=True)
|
||||
|
||||
user_input = message.text.split(None, 1)[1]
|
||||
if "|" in user_input:
|
||||
|
||||
frame_count = user_input.split("|")[-1].strip()
|
||||
url = user_input.split("|")[0].strip()
|
||||
|
||||
try:
|
||||
frame_count = int(frame_count)
|
||||
except:
|
||||
frame_count = 5
|
||||
if frame_count > 15:
|
||||
frame_count = 15
|
||||
|
||||
else:
|
||||
frame_count = 5
|
||||
url = user_input.split("|")[0].strip()
|
||||
|
||||
for (key, value) in SUPPORTED_URL_REGEX.items():
|
||||
if bool(re.search(Rf"{key}", url)):
|
||||
if value == "ddl":
|
||||
return await ddl_screenshot(message, frame_count, url)
|
||||
return await message.reply_text("This type of link is not supported.", quote=True)
|
||||
164
misskaty/plugins/mediainfov2.py
Normal file
164
misskaty/plugins/mediainfov2.py
Normal file
|
|
@ -0,0 +1,164 @@
|
|||
import json
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
from urllib.parse import unquote
|
||||
|
||||
import requests
|
||||
from pyrogram import filters
|
||||
|
||||
from misskaty import app
|
||||
from misskaty.helper import (SUPPORTED_URL_REGEX, get_readable_bitrate,
|
||||
get_readable_file_size, post_to_telegraph,
|
||||
remove_N)
|
||||
from misskaty.vars import COMMAND_HANDLER
|
||||
|
||||
|
||||
async def ddl_mediainfo(_, message, url):
|
||||
"""
|
||||
Generates Mediainfo from a Direct Download Link.
|
||||
"""
|
||||
|
||||
try:
|
||||
filename = re.search(".+/(.+)", url).group(1)
|
||||
reply_msg = await message.reply_text("Generating Mediainfo, Please wait..", quote=True)
|
||||
|
||||
with requests.get(url, stream=True) as r:
|
||||
with open(filename, 'wb') as f:
|
||||
for chunk in r.iter_content(50000000): f.write(chunk); break
|
||||
|
||||
mediainfo = subprocess.check_output(['mediainfo', filename]).decode("utf-8")
|
||||
mediainfo_json = json.loads(subprocess.check_output(['mediainfo', filename, '--Output=JSON']).decode("utf-8"))
|
||||
|
||||
filesize = requests.head(url).headers.get('content-length')
|
||||
|
||||
lines = mediainfo.splitlines()
|
||||
for i in range(len(lines)):
|
||||
if 'Complete name' in lines[i]:
|
||||
lines[i] = re.sub(r": .+", ': ' + unquote(filename), lines[i])
|
||||
|
||||
elif 'File size' in lines[i]:
|
||||
lines[i] = re.sub(r": .+", ': ' + get_readable_file_size(float(filesize)), lines[i])
|
||||
|
||||
elif 'Overall bit rate' in lines[i] and 'Overall bit rate mode' not in lines[i]:
|
||||
duration = float(mediainfo_json['media']['track'][0]['Duration'])
|
||||
bitrate = get_readable_bitrate(float(filesize) * 8 / (duration * 1000))
|
||||
lines[i] = re.sub(r": .+", ': ' + bitrate, lines[i])
|
||||
|
||||
elif 'IsTruncated' in lines[i] or 'FileExtension_Invalid' in lines[i]:
|
||||
lines[i] = ''
|
||||
|
||||
with open(f'{filename}.txt', 'w') as f:
|
||||
f.write('\n'.join(lines))
|
||||
|
||||
with open(f"{filename}.txt", "r+") as file:
|
||||
content = file.read()
|
||||
output = await post_to_telegraph(False, "MissKaty MediaInfo", content)
|
||||
|
||||
await reply_msg.edit(f"**File Name :** `{unquote(filename)}`\n\n**Mediainfo :** {output}",
|
||||
disable_web_page_preview=True)
|
||||
os.remove(f"{filename}.txt")
|
||||
os.remove(filename)
|
||||
|
||||
except:
|
||||
await reply_msg.delete()
|
||||
return await message.reply_text(f"Something went wrong while generating Mediainfo from the given url.",
|
||||
quote=True)
|
||||
|
||||
|
||||
async def telegram_mediainfo(client, message):
|
||||
"""
|
||||
Generates Mediainfo from a Telegram File.
|
||||
"""
|
||||
|
||||
message = message.reply_to_message
|
||||
|
||||
if message.text:
|
||||
return await message.reply_text("Reply to a proper media file for generating Mediainfo.**", quote=True)
|
||||
|
||||
elif message.media.value == 'video':
|
||||
media = message.video
|
||||
|
||||
elif message.media.value == 'audio':
|
||||
media = message.audio
|
||||
|
||||
elif message.media.value == 'document':
|
||||
media = message.document
|
||||
|
||||
elif message.media.value == 'voice':
|
||||
media = message.voice
|
||||
|
||||
else:
|
||||
return await message.reply_text("This type of media is not supported for generating Mediainfo.**", quote=True)
|
||||
|
||||
filename = str(media.file_name)
|
||||
mime = media.mime_type
|
||||
size = media.file_size
|
||||
|
||||
reply_msg = await message.reply_text("Generating Mediainfo, Please wait..", quote=True)
|
||||
|
||||
if int(size) <= 50000000:
|
||||
await message.download(os.path.join(os.getcwd(), filename))
|
||||
|
||||
else:
|
||||
async for chunk in client.stream_media(message, limit=5):
|
||||
with open(filename, 'ab') as f:
|
||||
f.write(chunk)
|
||||
|
||||
mediainfo = subprocess.check_output(['mediainfo', filename]).decode("utf-8")
|
||||
mediainfo_json = json.loads(subprocess.check_output(['mediainfo', filename, '--Output=JSON']).decode("utf-8"))
|
||||
readable_size = get_readable_file_size()(size)
|
||||
|
||||
try:
|
||||
lines = mediainfo.splitlines()
|
||||
for i in range(len(lines)):
|
||||
if 'File size' in lines[i]:
|
||||
lines[i] = re.sub(r": .+", ': ' + readable_size, lines[i])
|
||||
|
||||
elif 'Overall bit rate' in lines[i] and 'Overall bit rate mode' not in lines[i]:
|
||||
|
||||
duration = float(mediainfo_json['media']['track'][0]['Duration'])
|
||||
bitrate_kbps = (size * 8) / (duration * 1000)
|
||||
bitrate = get_readable_bitrate(bitrate_kbps)
|
||||
|
||||
lines[i] = re.sub(r": .+", ': ' + bitrate, lines[i])
|
||||
|
||||
elif 'IsTruncated' in lines[i] or 'FileExtension_Invalid' in lines[i]:
|
||||
lines[i] = ''
|
||||
|
||||
remove_N(lines)
|
||||
with open(f'{filename}.txt', 'w') as f:
|
||||
f.write('\n'.join(lines))
|
||||
|
||||
with open(f"{filename}.txt", "r+") as file:
|
||||
content = file.read()
|
||||
|
||||
output = await post_to_telegraph(False, "MissKaty MediaInfo", content)
|
||||
|
||||
await reply_msg.edit(f"**File Name :** `{filename}`\n\n**Mediainfo :** {output}", disable_web_page_preview=True)
|
||||
os.remove(f'{filename}.txt')
|
||||
os.remove(filename)
|
||||
|
||||
except:
|
||||
await reply_msg.delete()
|
||||
await message.reply_text(f"Something went wrong while generating Mediainfo of replied Telegram file.", quote=True)
|
||||
|
||||
|
||||
|
||||
|
||||
@app.on_message(filters.command("mediainfo2", COMMAND_HANDLER))
|
||||
async def mediainfo(client, message):
|
||||
mediainfo_usage = f"**Generate mediainfo from Telegram files or direct download links. Reply to any telegram file or just pass the link after the command."
|
||||
|
||||
if message.reply_to_message:
|
||||
return await telegram_mediainfo(client, message)
|
||||
|
||||
elif len(message.command) < 2:
|
||||
return await message.reply_text(mediainfo_usage, quote=True)
|
||||
|
||||
user_url = message.text.split(None, 1)[1].split(" ")[0]
|
||||
for (key, value) in SUPPORTED_URL_REGEX.items():
|
||||
if bool(re.search(FR"{key}", user_url)):
|
||||
if value == "ddl":
|
||||
return await ddl_mediainfo(client, message, url=user_url)
|
||||
await message.reply_text("This type of URL is not supported.", quote=True)
|
||||
Loading…
Reference in a new issue