mirror of
https://github.com/yasirarism/MissKatyPyro.git
synced 2026-01-04 19:24:51 +00:00
Tes update
This commit is contained in:
parent
27ec6138e5
commit
2cd72cce50
5 changed files with 95 additions and 26 deletions
|
|
@ -19,8 +19,8 @@ from misskaty import (
|
||||||
UBOT_USERNAME,
|
UBOT_USERNAME,
|
||||||
)
|
)
|
||||||
from misskaty.plugins import ALL_MODULES
|
from misskaty.plugins import ALL_MODULES
|
||||||
from misskaty.helper import paginate_modules
|
from misskaty.helper import paginate_modules, bot_sys_stats
|
||||||
from misskaty.helper.tools import bot_sys_stats
|
from misskaty.core.message_utils import *
|
||||||
from database.users_chats_db import db
|
from database.users_chats_db import db
|
||||||
from misskaty.vars import LOG_CHANNEL, SUDO
|
from misskaty.vars import LOG_CHANNEL, SUDO
|
||||||
from utils import temp, auto_clean
|
from utils import temp, auto_clean
|
||||||
|
|
|
||||||
|
|
@ -1,16 +1,44 @@
|
||||||
import asyncio
|
import asyncio
|
||||||
from logging import getLogger
|
from logging import getLogger
|
||||||
|
from pyrogram.errors import ChatWriteForbidden, MessageNotModified, FloodWait
|
||||||
|
|
||||||
LOGGER = getLogger(__name__)
|
LOGGER = getLogger(__name__)
|
||||||
|
|
||||||
|
# handler for TG function, so need write exception in every code
|
||||||
|
|
||||||
async def kirimPesan(msg, text: str, reply_markup=None):
|
|
||||||
|
async def kirimPesan(msg, text: str, disable_web_page_preview=True, reply_markup=None):
|
||||||
try:
|
try:
|
||||||
return await msg.reply(text, disable_web_page_preview=True)
|
return await msg.reply(text)
|
||||||
except FloodWait as e:
|
except FloodWait as e:
|
||||||
LOGGER.warning(str(e))
|
LOGGER.warning(str(e))
|
||||||
await asyncio.sleep(e.value)
|
await asyncio.sleep(e.value)
|
||||||
return await kirimPesan(text)
|
return await kirimPesan(text)
|
||||||
|
except ChatWriteForbidden:
|
||||||
|
return await msg.leave()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOGGER.error(str(e))
|
LOGGER.error(str(e))
|
||||||
|
|
||||||
|
|
||||||
|
async def editPesan(msg, text: str, disable_web_page_preview=True, reply_markup=None):
|
||||||
|
try:
|
||||||
|
return await msg.edit(text)
|
||||||
|
except FloodWait as e:
|
||||||
|
LOGGER.warning(str(e))
|
||||||
|
await asyncio.sleep(e.value)
|
||||||
|
return await editPesan(msg, text)
|
||||||
|
except MessageNotModified:
|
||||||
return
|
return
|
||||||
|
except Exception as e:
|
||||||
|
LOGGER.error(str(e))
|
||||||
|
|
||||||
|
|
||||||
|
async def hapusPesan(msg):
|
||||||
|
try:
|
||||||
|
return await msg.delete()
|
||||||
|
except FloodWait as e:
|
||||||
|
LOGGER.warning(str(e))
|
||||||
|
await asyncio.sleep(e.value)
|
||||||
|
return await hapusPesan(msg)
|
||||||
|
except Exception as e:
|
||||||
|
LOGGER.error(str(e))
|
||||||
|
|
|
||||||
|
|
@ -1 +1,2 @@
|
||||||
from .misc import paginate_modules
|
from .misc import paginate_modules
|
||||||
|
from .tools import bot_sys_stats
|
||||||
|
|
|
||||||
|
|
@ -26,7 +26,8 @@ __HELP__ = """
|
||||||
/terbit21 [query <optional>] - Scrape website data from Terbit21. If without query will give latest movie list.
|
/terbit21 [query <optional>] - Scrape website data from Terbit21. If without query will give latest movie list.
|
||||||
/savefilm21 [query <optional>] - Scrape website data from Savefilm21. If without query will give latest movie list.
|
/savefilm21 [query <optional>] - Scrape website data from Savefilm21. If without query will give latest movie list.
|
||||||
/movieku [query <optional>] - Scrape website data from Movieku.cc
|
/movieku [query <optional>] - Scrape website data from Movieku.cc
|
||||||
/nodrakor [query] - Scrape website data from nodrakor
|
/nodrakor [query] - Scrape website data from nodrakor.icu
|
||||||
|
/zonafilm [query] - Scrape website data from zonafilm.icu
|
||||||
/gomov [query <optional>] - Scrape website data from GoMov. If without query will give latest movie list.
|
/gomov [query <optional>] - Scrape website data from GoMov. If without query will give latest movie list.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
@ -37,6 +38,56 @@ headers = {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@app.on_message(filters.command(["zonafilm"], COMMAND_HANDLER))
|
||||||
|
@capture_err
|
||||||
|
async def zonafilm(_, msg):
|
||||||
|
m = await msg.reply("**__⏳ Please wait, scraping data ...__**", True)
|
||||||
|
try:
|
||||||
|
title = msg.text.split(" ", 1)[1]
|
||||||
|
except IndexError:
|
||||||
|
title = ""
|
||||||
|
try:
|
||||||
|
html = await http.get(f"http://173.212.199.27/?s={title}", headers=headers)
|
||||||
|
text = BeautifulSoup(html.text, "lxml")
|
||||||
|
entry = text.find_all(class_="entry-header")
|
||||||
|
if "Nothing Found" in entry[0].text:
|
||||||
|
await m.delete()
|
||||||
|
if title != "":
|
||||||
|
await msg.reply(f"404 Not FOUND For: {title}", True)
|
||||||
|
else:
|
||||||
|
await msg.reply(f"404 Not FOUND!", True)
|
||||||
|
return
|
||||||
|
data = []
|
||||||
|
for i in entry:
|
||||||
|
genre = i.find(class_="gmr-movie-on").text
|
||||||
|
genre = f"{genre}" if genre != "" else "N/A"
|
||||||
|
judul = i.find(class_="entry-title").find("a").text
|
||||||
|
link = i.find(class_="entry-title").find("a").get("href")
|
||||||
|
data.append({"judul": judul, "link": link, "genre": genre})
|
||||||
|
if title != "":
|
||||||
|
head = f"<b>#Zonafilm Results For:</b> <code>{title}</code>\n\n"
|
||||||
|
else:
|
||||||
|
head = f"<b>#Zonafilm Latest:</b>\n🌀 Use /{msg.command[0]} [title] to start search with title.\n\n"
|
||||||
|
msgs = ""
|
||||||
|
await m.delete()
|
||||||
|
for c, i in enumerate(data, start=1):
|
||||||
|
msgs += f"<b>{c}. <a href='{i['link']}'>{i['judul']}</a></b>\n<b>Genre:</b> <code>{i['genre']}</code>\n<b>Extract:</b> <code>/{msg.command[0]}_scrap {i['link']}</code>\n\n"
|
||||||
|
if len(head.encode("utf-8") + msgs.encode("utf-8")) >= 4000:
|
||||||
|
await msg.reply(
|
||||||
|
head + msgs,
|
||||||
|
True,
|
||||||
|
disable_web_page_preview=True,
|
||||||
|
)
|
||||||
|
await asyncio.sleep(2)
|
||||||
|
msgs = ""
|
||||||
|
if msgs != "":
|
||||||
|
await msg.reply(head + msgs, True, disable_web_page_preview=True)
|
||||||
|
except Exception as e:
|
||||||
|
LOGGER.error(e)
|
||||||
|
await m.delete()
|
||||||
|
await msg.reply(f"ERROR: <code>{e}</code>", True)
|
||||||
|
|
||||||
|
|
||||||
@app.on_message(filters.command(["nodrakor"], COMMAND_HANDLER))
|
@app.on_message(filters.command(["nodrakor"], COMMAND_HANDLER))
|
||||||
@capture_err
|
@capture_err
|
||||||
async def nodrakor(_, msg):
|
async def nodrakor(_, msg):
|
||||||
|
|
@ -197,7 +248,7 @@ async def movikucc(_, msg):
|
||||||
@app.on_message(filters.command(["savefilm21"], COMMAND_HANDLER))
|
@app.on_message(filters.command(["savefilm21"], COMMAND_HANDLER))
|
||||||
@capture_err
|
@capture_err
|
||||||
async def savefilm21(_, msg):
|
async def savefilm21(_, msg):
|
||||||
SITE = "http://185.99.135.215"
|
SITE = "https://185.99.135.215"
|
||||||
try:
|
try:
|
||||||
title = msg.text.split(" ", 1)[1]
|
title = msg.text.split(" ", 1)[1]
|
||||||
except:
|
except:
|
||||||
|
|
@ -206,9 +257,7 @@ async def savefilm21(_, msg):
|
||||||
data = []
|
data = []
|
||||||
try:
|
try:
|
||||||
if title is not None:
|
if title is not None:
|
||||||
html = await http.get(
|
html = await http.get(f"{SITE}/?s={title}", headers=headers)
|
||||||
f"{SITE}/?s={title}", headers=headers, follow_redirects=False
|
|
||||||
)
|
|
||||||
bs4 = BeautifulSoup(html.text, "lxml")
|
bs4 = BeautifulSoup(html.text, "lxml")
|
||||||
res = bs4.find_all(class_="entry-title")
|
res = bs4.find_all(class_="entry-title")
|
||||||
for i in res:
|
for i in res:
|
||||||
|
|
@ -239,7 +288,7 @@ async def savefilm21(_, msg):
|
||||||
disable_web_page_preview=True,
|
disable_web_page_preview=True,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
html = await http.get(SITE, headers=headers, follow_redirects=False)
|
html = await http.get(SITE, headers=headers)
|
||||||
bs4 = BeautifulSoup(html.text, "lxml")
|
bs4 = BeautifulSoup(html.text, "lxml")
|
||||||
res = bs4.find_all(class_="entry-title")
|
res = bs4.find_all(class_="entry-title")
|
||||||
for i in res:
|
for i in res:
|
||||||
|
|
@ -549,7 +598,7 @@ async def savefilm21_scrap(_, message):
|
||||||
"User-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"
|
"User-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"
|
||||||
}
|
}
|
||||||
|
|
||||||
html = await http.get(link, headers=headers, follow_redirects=False)
|
html = await http.get(link, headers=headers)
|
||||||
soup = BeautifulSoup(html.text, "lxml")
|
soup = BeautifulSoup(html.text, "lxml")
|
||||||
res = soup.find_all(class_="button button-shadow")
|
res = soup.find_all(class_="button button-shadow")
|
||||||
res = "".join(f"{i.text}\n{i['href']}\n\n" for i in res)
|
res = "".join(f"{i.text}\n{i['href']}\n\n" for i in res)
|
||||||
|
|
@ -573,7 +622,7 @@ async def nodrakor_scrap(_, message):
|
||||||
"User-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"
|
"User-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"
|
||||||
}
|
}
|
||||||
|
|
||||||
html = await http.get(link, headers=headers, follow_redirects=False)
|
html = await http.get(link, headers=headers)
|
||||||
soup = BeautifulSoup(html.text, "lxml")
|
soup = BeautifulSoup(html.text, "lxml")
|
||||||
hasil = soup.find_all(class_="gmr-download-wrap clearfix")[0]
|
hasil = soup.find_all(class_="gmr-download-wrap clearfix")[0]
|
||||||
await message.reply(f"<b>Hasil Scrap dari {link}</b>:\n{hasil}")
|
await message.reply(f"<b>Hasil Scrap dari {link}</b>:\n{hasil}")
|
||||||
|
|
|
||||||
|
|
@ -78,7 +78,7 @@ async def ceksub(_, m):
|
||||||
[
|
[
|
||||||
InlineKeyboardButton(
|
InlineKeyboardButton(
|
||||||
f"0:{mapping}({lang}): {stream_type}: {stream_name}",
|
f"0:{mapping}({lang}): {stream_type}: {stream_name}",
|
||||||
f"streamextract_{mapping}_{stream_name}",
|
f"streamextract_0:{mapping}_{stream_name}",
|
||||||
)
|
)
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
|
@ -119,7 +119,8 @@ async def convertsrt(c, m):
|
||||||
)
|
)
|
||||||
(await shell_exec(f"mediaextract -i '{dl}' '{filename}'.srt"))[0]
|
(await shell_exec(f"mediaextract -i '{dl}' '{filename}'.srt"))[0]
|
||||||
await m.reply_document(
|
await m.reply_document(
|
||||||
f"{filename}.srt", caption=f"<code>{filename}.srt</code>\n\nConverted by @{c.me.username}"
|
f"{filename}.srt",
|
||||||
|
caption=f"<code>{filename}.srt</code>\n\nConverted by @{c.me.username}",
|
||||||
)
|
)
|
||||||
await msg.delete()
|
await msg.delete()
|
||||||
try:
|
try:
|
||||||
|
|
@ -148,24 +149,14 @@ async def stream_extract(bot, update):
|
||||||
format = "mp3"
|
format = "mp3"
|
||||||
elif codec == "eac3":
|
elif codec == "eac3":
|
||||||
format = "eac3"
|
format = "eac3"
|
||||||
elif codec == "subrip":
|
|
||||||
format = "srt"
|
|
||||||
elif codec == "ass":
|
|
||||||
format == "ass"
|
|
||||||
else:
|
else:
|
||||||
format = None
|
format = "srt"
|
||||||
if not format:
|
|
||||||
return await update.answer(
|
|
||||||
"⚠️ Unsupported format, try extract manual using ffmpeg"
|
|
||||||
)
|
|
||||||
start_time = perf_counter()
|
start_time = perf_counter()
|
||||||
namafile = get_subname(link, format)
|
namafile = get_subname(link, format)
|
||||||
LOGGER.info(
|
LOGGER.info(
|
||||||
f"ExtractSub: {namafile} by {update.from_user.first_name} [{update.from_user.id}]"
|
f"ExtractSub: {namafile} by {update.from_user.first_name} [{update.from_user.id}]"
|
||||||
)
|
)
|
||||||
extract = (await shell_exec(f"mediaextract -i {link} -map 0:{map} {namafile}"))[
|
extract = (await shell_exec(f"mediaextract -i {link} -map {map} {namafile}"))[0]
|
||||||
0
|
|
||||||
]
|
|
||||||
end_time = perf_counter()
|
end_time = perf_counter()
|
||||||
timelog = "{:.2f}".format(end_time - start_time) + " second"
|
timelog = "{:.2f}".format(end_time - start_time) + " second"
|
||||||
await update.message.reply_document(
|
await update.message.reply_document(
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue