New Minor Fix

This commit is contained in:
yasirarism 2023-05-09 21:46:32 +07:00 committed by GitHub
parent 821ab8f866
commit caac57c19e
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
14 changed files with 107 additions and 35 deletions

View file

@ -13,6 +13,8 @@
"res_scrape": "<b>Scrape result from</b> <code>{link}</code>:\n\n{kl}",
"header_with_query": "<b>#{web} Results For:</b> <code>{kueri}</code>\n\n",
"header_no_query": "<b>#{web} Latest:</b>\n🌀 Use /{cmd} [title] to start search with title.\n\n",
"invalid_cmd_scrape": "Gunakan command /{cmd} <b>[link]</b> untuk scrap link download",
"invalid_cmd_scrape": "Use command /{cmd} <b>[link]</b> to scrape download link.",
"err_getweb": "ERROR: Failed getting data from web because {err}.",
"err_getapi": "ERROR: Failed getting data from API",
"unsupport_dl_btn": "Some result will not appear in extract button because unsupported link."
}

View file

@ -1,5 +1,6 @@
{
"no_url": "Give A Url To Fetch Screenshot.",
"wait_str": "Capturing screenshot...",
"str_credit": "🌞 Screenshot generated using Puppeteer",
"ss_failed_str": "Failed To Take Screenshot. {err}"
}

View file

@ -13,6 +13,8 @@
"res_scrape": "<b>Hasil Scrape dari</b> <code>{link}</code>:\n\n{kl}",
"header_with_query": "<b>Hasil Pencarian #{web} Untuk:</b> <code>{kueri}</code>\n\n",
"header_no_query": "<b>#{web} Terbaru:</b>\n🌀 Gunakan /{cmd} [judul] untuk memulai pencarian dengan judul.\n\n",
"invalid_cmd_scrape": "Gunakan perintah /{cmd} <b>[tautan]</b> untuk mengambil link unduhan.",
"invalid_cmd_scrape": "Gunakan perintah /{cmd} <b>[link]</b> untuk mengambil link unduhan.",
"err_getweb": "ERROR: Failed getting data from web because {err}.",
"err_getapi": "ERROR: Failed getting data from API",
"unsupport_dl_btn": "Beberapa hasil tidak akan muncul di tombol ekstrak karena tautan tidak didukung."
}

View file

@ -1,5 +1,6 @@
{
"no_url": "Berikan url untuk mengambil tangkapan layar.",
"wait_str": "Mengambil tangkapan layar...",
"str_credit": "🌞 Screenshot dibuat dengan Puppeteer",
"ss_failed_str": "Gagal Mengambil Tangkapan Layar. ERROR: {err}"
}

View file

@ -13,6 +13,8 @@
"res_scrape": "<b>Asil scrape saka</b> <kode>{link}</code>:\n\n{kl}",
"header_with_query": "<b>Asil Nggoleki #{web} Kanggo:</b> <code>{kueri}</code>\n\n",
"header_no_query": "<b>#{web} Paling anyar:</b>\n🌀 Gunakake /{cmd} [judhul] kanggo miwiti nggoleki kanthi judhul.\n\n",
"invalid_cmd_scrape": "Gunakake prentah /{cmd} <b>[pranala]</b> kanggo ngunduh pranala kethokan",
"invalid_cmd_scrape": "Gunakake prentah /{cmd} <b>[link]</b> kanggo ngunduh pranala kethokan",
"err_getweb": "ERROR: Gagal njupuk data saka web amarga {err}.",
"err_getapi": "ERROR: Gagal njupuk data saka API",
"unsupport_dl_btn": "Sawetara asil ora bakal katon ing tombol ekstrak amarga pranala ora didhukung."
}

View file

@ -1,5 +1,6 @@
{
"no_url": "Wenehana url kanggo njupuk screenshot.",
"wait_str": "Lagi njupuk gambar layar...",
"str_credit": "🌞 Screenshot digawe nganggo Puppeteer",
"ss_failed_str": "Gagal njupuk gambar. ERROR: {err}"
}

View file

@ -28,7 +28,7 @@ MOD_NOLOAD = ["subscene_dl"]
HELPABLE = {}
cleanmode = {}
botStartTime = time.time()
misskaty_version = "v2.04.30 - Stable"
misskaty_version = "v2.023.5.9 - Stable"
pymonclient = MongoClient(DATABASE_URI)
mongo = AsyncIOMotorClient(DATABASE_URI)

View file

@ -65,7 +65,7 @@ async def reply_text(self: Message, text: str, as_raw: bool = False, del_in: int
if del_in == 0:
return msg
await asleep(del_in)
return bool(await msg.delete())
return bool(await msg.delete_msg())
except FloodWait as e:
await asleep(e.value)
return await reply_text(self, text, *args, **kwargs)
@ -105,7 +105,7 @@ async def edit_text(self, text: str, del_in: int = 0, *args, **kwargs) -> Union[
if del_in == 0:
return msg
await asleep(del_in)
return bool(await msg.delete())
return bool(await msg.delete_msg())
except FloodWait as e:
LOGGER.warning(str(e))
await asleep(e.value)
@ -116,7 +116,7 @@ async def edit_text(self, text: str, del_in: int = 0, *args, **kwargs) -> Union[
LOGGER.info(f"Leaving from {self.chat.title} [{self.chat.id}] because doesn't have admin permission.")
return await self.chat.leave()
except (MessageAuthorRequired, MessageIdInvalid):
return await reply_text(text=text, *args, **kwargs)
return await reply_text(self, text=text, *args, **kwargs)
async def edit_or_send_as_file(self, text: str, del_in: int = 0, as_raw: bool = False, *args, **kwargs) -> Union["Message", bool]:
@ -164,7 +164,7 @@ async def edit_or_send_as_file(self, text: str, del_in: int = 0, as_raw: bool =
if del_in == 0:
return msg
await asleep(del_in)
return bool(await msg.delete())
return bool(await msg.delete_msg())
except (MessageTooLong, OSError):
return await reply_as_file(self, text=text, *args, **kwargs)

View file

@ -21,8 +21,9 @@ openai.api_key = OPENAI_API
async def chatbot(self: Client, ctx: Message, strings):
if len(ctx.command) == 1:
return await ctx.reply_msg(strings("no_question").format(cmd=ctx.command[0]), quote=True, del_in=5)
is_in_gap, sleep_time = await check_time_gap(ctx.from_user.id or ctx.sender_chat.id)
if is_in_gap and (ctx.from_user.id or ctx.sender_chat.id not in SUDO):
uid = ctx.from_user.id if ctx.from_user else ctx.sender_chat.id
is_in_gap, sleep_time = await check_time_gap(uid)
if is_in_gap and (uid not in SUDO):
return await ctx.reply_msg(strings("dont_spam"), del_in=5)
openai.aiosession.set(ClientSession())
pertanyaan = ctx.input
@ -41,9 +42,9 @@ async def chatbot(self: Client, ctx: Message, strings):
await asyncio.sleep(1.5)
num = 0
await msg.edit_msg(answer)
await openai.aiosession.get().close()
except MessageTooLong:
answerlink = await post_to_telegraph(False, "MissKaty ChatBot ", html.escape(answer))
await msg.edit_msg(strings("answers_too_long").format(answerlink=answerlink), disable_web_page_preview=True)
except Exception as err:
await msg.edit_msg(f"ERROR: {str(err)}")
await openai.aiosession.get().close()

View file

@ -166,7 +166,7 @@ async def shell(self: Client, ctx: Message, strings) -> "Message":
if not ctx.from_user.is_self:
await msg.delete_msg()
else:
await ctx.reply(strings("no_reply"), del_in=5)
await ctx.reply_msg(strings("no_reply"), del_in=5)
@app.on_message((filters.command(["ev", "run", "myeval"], COMMAND_HANDLER) | filters.regex(r"app.run\(\)$")) & filters.user(SUDO))

View file

@ -85,7 +85,7 @@ async def download(client, message):
url = url.strip()
custom_file_name = custom_file_name.strip()
download_file_path = os.path.join("downloads/", custom_file_name)
downloader = SmartDL(url, download_file_path, progress_bar=False)
downloader = SmartDL(url, download_file_path, progress_bar=False, timeout=10)
downloader.start(blocking=False)
c_time = time.time()
while not downloader.isFinished():
@ -159,7 +159,7 @@ async def fbdl(client, message):
url = resjson["result"]["links"]["hd"].replace("&amp;", "&")
except:
url = resjson["result"]["links"]["sd"].replace("&amp;", "&")
obj = SmartDL(url, progress_bar=False)
obj = SmartDL(url, progress_bar=False, timeout=10)
obj.start()
path = obj.get_dest()
await message.reply_video(path, caption=f"<code>{os.path.basename(path)}</code>\n\nUploaded for {message.from_user.mention} [<code>{message.from_user.id}</code>]", thumb="assets/thumb.jpg")

View file

@ -119,16 +119,18 @@ async def ceksub(self: Client, ctx: Message, strings):
@use_chat_lang()
async def convertsrt(self: Client, ctx: Message, strings):
reply = ctx.reply_to_message
if not reply and reply.document and (reply.document.file_name.endswith(".vtt") or reply.document.file_name.endswith(".ass")):
if not reply and not reply.document and not (reply.document.file_name.endswith(".vtt") or reply.document.file_name.endswith(".ass")):
return await ctx.reply_msg(strings("conv_sub_help").format(cmd=ctx.command[0]), del_in=5)
msg = await ctx.reply_msg(strings("convert_str"), quote=True)
dl = await reply.download()
if not os.path.exists("downloads"):
os.makedirs("downloads")
dl = await reply.download(file_name="downloads/")
filename = dl.split("/", 3)[3]
LOGGER.info(f"ConvertSub: {filename} by {ctx.from_user.first_name} [{ctx.from_user.id}]")
(await shell_exec(f"mediaextract -i '{dl}' '{filename}.srt'"))[0]
LOGGER.info(f"ConvertSub: {filename} by {ctx.from_user.first_name if ctx.from_user else ctx.sender_chat.title} [{ctx.from_user.id if ctx.from_user else ctx.sender_chat.id}]")
(await shell_exec(f"mediaextract -i '{dl}' 'downloads/{filename}.srt'"))[0]
c_time = time()
await ctx.reply_document(
f"{filename}.srt",
f"downloads/{filename}.srt",
caption=strings("capt_conv_sub").format(nf=filename, bot=self.me.username),
thumb="assets/thumb.jpg",
progress=progress_for_pyrogram,
@ -137,7 +139,7 @@ async def convertsrt(self: Client, ctx: Message, strings):
await msg.delete_msg()
try:
os.remove(dl)
os.remove(f"{filename}.srt")
os.remove(f"downloads/{filename}.srt")
except:
pass

View file

@ -39,6 +39,21 @@ LOGGER = logging.getLogger(__name__)
SCRAP_DICT = {}
data_kuso = {}
web = {
"yasirapi": "https://yasirapi.eu.org"
"pahe": "https://pahe.li",
"savefilm21": "https://savefilm21.store",
"melongmovie": "http://146.190.193.128",
"terbit21": "https://terbit21.art",
"lk21": "https://nonton.lk21official.wiki",
"gomov": "https://gomov.bio",
"movieku": "https://107.152.37.223",
"kusonime": "https://kusonime.com",
"lendrive": "https://lendrive.web.id",
"samehadaku": "https://samehadaku.cam",
"oplovers": "https://oploverz.top",
}
def split_arr(arr, size: 5):
arrs = []
@ -53,7 +68,11 @@ def split_arr(arr, size: 5):
# Terbit21 GetData
async def getDataTerbit21(msg, kueri, CurrentPage, strings):
if not SCRAP_DICT.get(msg.id):
terbitjson = (await http.get(f"https://yasirapi.eu.org/terbit21?q={kueri}")).json() if kueri else (await http.get("https://yasirapi.eu.org/terbit21")).json()
try:
terbitjson = (await http.get(f"{web['yasirapi']}/terbit21?q={kueri}")).json() if kueri else (await http.get("https://yasirapi.eu.org/terbit21")).json()
except:
await msg.edit_msg(strings("err_getapi"))
return None, None
if not terbitjson.get("result"):
await msg.edit_msg(strings("no_result"), del_in=5)
return None, None
@ -79,7 +98,11 @@ async def getDataTerbit21(msg, kueri, CurrentPage, strings):
# LK21 GetData
async def getDatalk21(msg, kueri, CurrentPage, strings):
if not SCRAP_DICT.get(msg.id):
lk21json = (await http.get(f"https://yasirapi.eu.org/lk21?q={kueri}")).json() if kueri else (await http.get("https://yasirapi.eu.org/lk21")).json()
try:
lk21json = (await http.get(f"{web['yasirapi']}/lk21?q={kueri}")).json() if kueri else (await http.get("https://yasirapi.eu.org/lk21")).json()
except:
await msg.edit_msg(strings("err_getapi"))
return None, None
if not lk21json.get("result"):
await msg.edit_msg(strings("no_result"), del_in=5)
return None, None
@ -105,7 +128,11 @@ async def getDatalk21(msg, kueri, CurrentPage, strings):
# Pahe GetData
async def getDataPahe(msg, kueri, CurrentPage, strings):
if not SCRAP_DICT.get(msg.id):
pahejson = (await http.get(f"https://yasirapi.eu.org/pahe?q={kueri}")).json()
try:
pahejson = (await http.get(f"{web['yasirapi']}/pahe?q={kueri}")).json()
except:
await msg.edit_msg(strings("err_getapi"))
return None, None
if not pahejson.get("result"):
await msg.edit_msg(strings("no_result"), del_in=5)
return None, None
@ -128,7 +155,11 @@ async def getDataPahe(msg, kueri, CurrentPage, strings):
async def getDataKuso(msg, kueri, CurrentPage, user, strings):
if not SCRAP_DICT.get(msg.id):
kusodata = []
data = await http.get(f"https://kusonime.com/?s={kueri}", headers=headers, follow_redirects=True)
try:
data = await http.get(f"{web['kusonime']}/?s={kueri}", headers=headers, follow_redirects=True)
except Exception as err:
await msg.edit_msg(strings("err_getweb").format(err=err))
return None, None
res = BeautifulSoup(data, "lxml").find_all("h2", {"class": "episodeye"})
for i in res:
ress = i.find_all("a")[0]
@ -163,7 +194,11 @@ async def getDataKuso(msg, kueri, CurrentPage, user, strings):
async def getDataMovieku(msg, kueri, CurrentPage, strings):
if not SCRAP_DICT.get(msg.id):
moviekudata = []
data = await http.get(f"https://107.152.37.223/?s={kueri}", headers=headers, follow_redirects=True)
try:
data = await http.get(f"{web['movieku']}/?s={kueri}", headers=headers, follow_redirects=True)
except Exception as err:
await msg.edit_msg(strings("err_getweb").format(err=err))
return None, None
r = BeautifulSoup(data, "lxml")
res = r.find_all(class_="bx")
for i in res:
@ -194,7 +229,11 @@ async def getDataMovieku(msg, kueri, CurrentPage, strings):
async def getDataSavefilm21(msg, kueri, CurrentPage, user, strings):
if not SCRAP_DICT.get(msg.id):
sfdata = []
data = await http.get(f"https://savefilm21.pro/?s={kueri}", headers=headers, follow_redirects=True)
try:
data = await http.get(f"{web['savefilm21']}/?s={kueri}", headers=headers, follow_redirects=True)
except Exception as err:
await msg.edit_msg(strings("err_getweb").format(err=err))
return None, None
text = BeautifulSoup(data, "lxml")
entry = text.find_all(class_="entry-header")
if "Tidak Ditemukan" in entry[0].text:
@ -228,7 +267,11 @@ async def getDataSavefilm21(msg, kueri, CurrentPage, user, strings):
# Lendrive GetData
async def getDataLendrive(msg, kueri, CurrentPage, user, strings):
if not SCRAP_DICT.get(msg.id):
data = await http.get(f"https://lendrive.web.id/?s={kueri}", headers=headers, follow_redirects=True)
try:
data = await http.get(f"{web['lendrive']}/?s={kueri}", headers=headers, follow_redirects=True)
except Exception as err:
await msg.edit_msg(strings("err_getweb").format(err=err))
return None, None
soup = BeautifulSoup(data, "lxml")
lenddata = []
for o in soup.find_all(class_="bsx"):
@ -260,7 +303,11 @@ async def getDataLendrive(msg, kueri, CurrentPage, user, strings):
# MelongMovie GetData
async def getDataMelong(msg, kueri, CurrentPage, user, strings):
if not SCRAP_DICT.get(msg.id):
data = await http.get(f"https://melongmovie.info/?s={kueri}", headers=headers, follow_redirects=True)
try:
data = await http.get(f"{web['melongmovie']}/?s={kueri}", headers=headers, follow_redirects=True)
except Exception as err:
await msg.edit_msg(strings("err_getweb").format(err=err))
return None, None
bs4 = BeautifulSoup(data, "lxml")
melongdata = []
for res in bs4.select(".box"):
@ -295,7 +342,11 @@ async def getDataMelong(msg, kueri, CurrentPage, user, strings):
# GoMov GetData
async def getDataGomov(msg, kueri, CurrentPage, user, strings):
if not SCRAP_DICT.get(msg.id):
gomovv = await http.get(f"https://gomov.bio/?s={kueri}", headers=headers, follow_redirects=True)
try:
gomovv = await http.get(f"{web['gomov']}/?s={kueri}", headers=headers, follow_redirects=True)
except Exception as err:
await msg.edit_msg(strings("err_getweb").format(err=err))
return None, None
text = BeautifulSoup(gomovv, "lxml")
entry = text.find_all(class_="entry-header")
if entry[0].text.strip() == "Nothing Found":
@ -334,10 +385,14 @@ async def getDataGomov(msg, kueri, CurrentPage, user, strings):
async def getSame(msg, query, current_page, strings):
if not SCRAP_DICT.get(msg.id):
cfse = cfscrape.CloudflareScraper()
if query:
data = cfse.get(f"https://samehadaku.cam/?s={query}", headers=headers)
else:
data = cfse.get("https://samehadaku.cam/", headers=headers)
try:
if query:
data = cfse.get(f"{web['samehadaku']}/?s={query}", headers=headers)
else:
data = cfse.get(web["samehadaku"], headers=headers)
except Exception as err:
await msg.edit_msg(strings("err_getweb").format(err=err))
return None, None
res = BeautifulSoup(data.text, "lxml").find_all(class_="animposx")
sdata = []
for i in res:

View file

@ -1,7 +1,9 @@
import os
from asyncio import gather
from pyrogram import filters, Client
from pyrogram.types import Message
from pySmartDL import SmartDL
from misskaty import app
from misskaty.core.decorator.errors import capture_err
@ -23,11 +25,14 @@ async def take_ss(self: Client, ctx: Message, strings):
if len(ctx.command) == 1:
return await ctx.reply_msg(strings("no_url"), del_in=6)
url = ctx.command[1] if ctx.command[1].startswith("http") else f"https://{ctx.command[1]}"
filename = f"webSS_{ctx.from_user.id}.png"
download_file_path = os.path.join("downloads/", f"webSS_{ctx.from_user.id}.png")
msg = await ctx.reply_msg(strings("wait_str"))
try:
url = f"https://webss.yasirapi.eu.org/api?url={url}&width=1280&height=720"
await gather(*[ctx.reply_document(url, file_name=filename), ctx.reply_photo(url)])
downloader = SmartDL(url, download_file_path, progress_bar=False, timeout=10)
downloader.start(blocking=True)
await gather(*[ctx.reply_document(download_file_path), ctx.reply_photo(download_file_path, caption=strings("str_credit"))])
await msg.delete_msg()
await os.remove(download_file_path)
except Exception as e:
await msg.edit_msg(strings("ss_failed_str").format(err=str(e)))