From ba1a5e253d61ed63f7678398fae56c17d7735036 Mon Sep 17 00:00:00 2001 From: yasir Date: Wed, 4 Jan 2023 21:09:52 +0700 Subject: [PATCH] Fix Scraper --- misskaty/plugins/admin.py | 9 ++++--- misskaty/plugins/dev.py | 9 ++++--- misskaty/plugins/inline_search.py | 9 ++++--- misskaty/plugins/scrapwebsite.py | 44 +++++++++++++++---------------- misskaty/plugins/sub_extractor.py | 2 +- 5 files changed, 40 insertions(+), 33 deletions(-) diff --git a/misskaty/plugins/admin.py b/misskaty/plugins/admin.py index 7e059e94..ee00834e 100644 --- a/misskaty/plugins/admin.py +++ b/misskaty/plugins/admin.py @@ -363,9 +363,12 @@ async def deleteFunc(_, message): filters.command(["promote", "fullpromote"], COMMAND_HANDLER) & ~filters.private ) @adminsOnly("can_promote_members") -async def promoteFunc(_, message): - user_id = await extract_user(message) - umention = (await app.get_users(user_id)).mention +async def promoteFunc(client, message): + try: + user_id = await extract_user(message) + umention = (await app.get_users(user_id)).mention + except: + return await message.reply("⚠️ Invalid userid/username") if not user_id: return await message.reply_text("I can't find that user.") bot = await app.get_chat_member(message.chat.id, client.me.id) diff --git a/misskaty/plugins/dev.py b/misskaty/plugins/dev.py index 29de83a8..de832e6b 100644 --- a/misskaty/plugins/dev.py +++ b/misskaty/plugins/dev.py @@ -77,7 +77,7 @@ async def shell(_, m): @app.on_message(filters.command(["ev", "run"], COMMAND_HANDLER) & filters.user(SUDO)) @app.on_edited_message(filters.command(["ev", "run"]) & filters.user(SUDO)) -async def evaluation_cmd_t(_, m): +async def evaluation_cmd_t(c, m): status_message = await m.reply("__Processing eval pyrogram...__") try: cmd = m.text.split(" ", maxsplit=1)[1] @@ -126,13 +126,14 @@ async def evaluation_cmd_t(_, m): await status_message.edit(final_output, parse_mode=enums.ParseMode.MARKDOWN) -async def aexec(code, client, message): +async def aexec(code, c, m): exec( - "async def __aexec(client, message): " + "async def __aexec(c, m): " + "\n p = print" + + "\n replied = m.reply_to_message" + "".join(f"\n {l_}" for l_ in code.split("\n")) ) - return await locals()["__aexec"](client, message) + return await locals()["__aexec"](c, m) async def shell_exec(code, treat=True): diff --git a/misskaty/plugins/inline_search.py b/misskaty/plugins/inline_search.py index f21de3be..e6cf7868 100644 --- a/misskaty/plugins/inline_search.py +++ b/misskaty/plugins/inline_search.py @@ -159,15 +159,18 @@ async def inline_menu(_, inline_query: InlineQuery): namanya = ( f"{diaa.first_name} {diaa.last_name}" if diaa.last_name else diaa.first_name ) - msg = f"🏷 Name: {namanya}\n🆔 ID: {diaa.id}\n" + msg = f"🏷 Name: {namanya}\n🆔 ID: {diaa.id}\n" if diaa.username: msg += f"🌐 Username: @{diaa.username}\n" - msg += f"🕰 User Status: {diaa.status}\n" + if diaa.status: + msg += f"🕰 User Status: {diaa.status}\n" if diaa.dc_id: msg += f"🌏 DC: {diaa.dc_id}\n" msg += f"✨ Premium: {diaa.is_premium}\n" msg += f"⭐️ Verified: {diaa.is_verified}\n" - msg += f"🇮🇩 Language: {diaa.language_code}" + msg += f"🤖 Verified: {diaa.is_bot}\n" + if diaa.language_code: + msg += f"🇮🇩 Language: {diaa.language_code}" results = [ InlineQueryResultArticle( title=f"Get information off {diaa.id}", diff --git a/misskaty/plugins/scrapwebsite.py b/misskaty/plugins/scrapwebsite.py index 68903aef..440a6135 100644 --- a/misskaty/plugins/scrapwebsite.py +++ b/misskaty/plugins/scrapwebsite.py @@ -102,8 +102,8 @@ async def movikucc(_, msg): data = [] if len(msg.command) == 1: try: - html = await ambil_source(f"https://107.152.37.223/") - r = BeautifulSoup(html, "lxml") + html = await http.get(f"https://107.152.37.223/") + r = BeautifulSoup(html.text, "lxml") res = r.find_all(class_="bx") for i in res: judul = i.find_all("a")[0]["title"] @@ -115,7 +115,7 @@ async def movikucc(_, msg): await m.delete() head = f"#Movieku Latest:\n--> Use /{msg.command[0]} [title] to start search with title.\n\n" msgs = "" - for c, i in enumerate(data[:15], start=1): + for c, i in enumerate(data, start=1): msgs += f"{c}. {i['judul']}\nExtract: /{msg.commnd[0]}_scrap {i['link']}\n\n" if len(head.encode("utf-8") + msgs.encode("utf-8")) >= 4000: await msg.reply( @@ -138,8 +138,8 @@ async def movikucc(_, msg): else: title = msg.text.split(" ", 1)[1] try: - html = await ambil_source(f"https://107.152.37.223/?s={title}") - r = BeautifulSoup(html, "lxml") + html = await http.get(f"https://107.152.37.223/?s={title}") + r = BeautifulSoup(html.text, "lxml") res = r.find_all(class_="bx") for i in res: judul = i.find_all("a")[0]["title"] @@ -151,7 +151,7 @@ async def movikucc(_, msg): await m.delete() head = f"#Movieku Results For: {title}\n\n" msgs = "" - for c, i in enumerate(data[:15], start=1): + for c, i in enumerate(data, start=1): msgs += f"{c}. {i['judul']}\nExtract: /{msg.command[0]}_scrap {i['link']}\n\n" if len(head.encode("utf-8") + msgs.encode("utf-8")) >= 4000: await msg.reply( @@ -197,11 +197,11 @@ async def savefilm21(_, msg): data.append({"judul": judul, "link": link}) if not data: await m.delete() - return await msg.reply("Result 404 Not found!", True) + return await msg.reply("404 Result not FOUND!", True) await m.delete() head = f"#SaveFilm21 Results For: {title}\n\n" msgs = "" - for c, i in enumerate(data[:15], start=1): + for c, i in enumerate(data, start=1): msgs += f"{c}. {i['judul']}\nExtract: /{msg.command[0]}_scrap {i['link']}\n\n" if len(head.encode("utf-8") + msgs.encode("utf-8")) >= 4000: await msg.reply( @@ -229,7 +229,7 @@ async def savefilm21(_, msg): await m.delete() head = f"#SaveFilm21 Latest:\n--> Use /{msg.command[0]} [title] to start search with title.\n\n" msgs = "" - for c, i in enumerate(data[:15], start=1): + for c, i in enumerate(data, start=1): msgs += f"{c}. {i['judul']}\nExtract: /{savefilm21_scrap} {i['link']}\n\n" if len(head.encode("utf-8") + msgs.encode("utf-8")) >= 4000: await msg.reply( @@ -276,11 +276,11 @@ async def melongmovie(_, msg): data.append({"judul": title, "link": url, "quality": quality}) if not data: await m.delete() - return await msg.reply("404 Not found!", True) + return await msg.reply("404 Result not FOUND!", True) await m.delete() head = f"#MelongMovie Results For: {judul}\n\n" msgs = "" - for c, i in enumerate(data[:15], start=1): + for c, i in enumerate(data, start=1): msgs += f"{c}. {i['judul']}\nQuality: {i['quality']}\nExtract: /{msg.command[0]}_scrap {i['link']}\n\n" if len(head.encode("utf-8") + msgs.encode("utf-8")) >= 4000: await msg.reply(head + msgs, True, disable_web_page_preview=True) @@ -307,11 +307,11 @@ async def melongmovie(_, msg): data.append({"judul": title, "link": url, "quality": quality}) if not data: await m.delete() - return await msg.reply("404 Not found!", True) + return await msg.reply("404 Result not FOUND!", True) await m.delete() head = f"#MelongMovie Latest:\n--> Use /{msg.command[0]} [title] to start search with title.\n\n" msgs = "" - for c, i in enumerate(data[:15], start=1): + for c, i in enumerate(data, start=1): msgs += f"{c}. {i['judul']}\nQuality: {i['quality']}\nExtract: /{msg.command[0]}_scrap {i['link']}\n\n" if len(head.encode("utf-8") + msgs.encode("utf-8")) >= 4000: await msg.reply(head + msgs, True, disable_web_page_preview=True) @@ -335,7 +335,7 @@ async def pahe_scrap(_, msg): res = api.json() if not res["result"]: await m.delete() - return await m.reply("Result 404 Not found!", True) + return await msg.reply("404 Result not FOUND!", True) head = ( f"#Pahe Results For: {title}\n\n" if title @@ -375,7 +375,7 @@ async def terbit21_scrap(_, msg): await m.delete() head = f"#Terbit21 Latest:\n--> Use /{msg.command[0]} [title] to start search with title.\n\n" msgs = "" - for c, i in enumerate(res["result"][:15], start=1): + for c, i in enumerate(res["result"], start=1): msgs += f"{c}. {i['judul']}\nCategory: {i['kategori']}\n--> Download\n\n" if len(head.encode("utf-8") + msgs.encode("utf-8")) >= 4000: await msg.reply(head + msgs, True, disable_web_page_preview=True) @@ -390,7 +390,7 @@ async def terbit21_scrap(_, msg): else: try: title = msg.text.split(" ", 1)[1] - r = await http.get("https://yasirapi.eu.org/terbit21?q={title}") + r = await http.get(f"https://yasirapi.eu.org/terbit21?q={title}") res = r.json() if not res["result"]: await m.delete() @@ -398,7 +398,7 @@ async def terbit21_scrap(_, msg): await m.delete() head = f"#Terbit21 Results For: {title}\n\n" msgs = "" - for c, i in enumerate(res["result"][:15], start=1): + for c, i in enumerate(res["result"], start=1): msgs += f"{c}. {i['judul']}\nCategory: {i['kategori']}\n--> Download\n\n" if len(head.encode("utf-8") + msgs.encode("utf-8")) >= 4000: await msg.reply(head + msgs, True, disable_web_page_preview=True) @@ -429,7 +429,7 @@ async def lk21_scrap(_, msg): await m.delete() head = f"#Layarkaca21 Latest:\n--> Use /{msg.command[0]} [title] to start search with title.\n\n" msgs = "" - for c, i in enumerate(res["result"][:15], start=1): + for c, i in enumerate(res["result"], start=1): msgs += f"{c}. {i['judul']}\nCategory: {i['kategori']}\n--> Download\n\n" if len(head.encode("utf-8") + msgs.encode("utf-8")) >= 4000: await msg.reply(head + msgs, True, disable_web_page_preview=True) @@ -444,7 +444,7 @@ async def lk21_scrap(_, msg): else: try: title = msg.text.split(" ", 1)[1] - r = await http.get("https://yasirapi.eu.org/lk21?q={title}") + r = await http.get(f"https://yasirapi.eu.org/lk21?q={title}") res = r.json() if res.get("detail", None): await m.delete() @@ -455,7 +455,7 @@ async def lk21_scrap(_, msg): await m.delete() head = f"#Layarkaca21 Results For: {title}\n\n" msgs = "" - for c, i in enumerate(res["result"][:15], start=1): + for c, i in enumerate(res["result"], start=1): msgs += f"{c}. {i['judul']}\nCategory: {i['kategori']}\n--> Download\n\n" if len(head.encode("utf-8") + msgs.encode("utf-8")) >= 4000: await msg.reply(head + msgs, True, disable_web_page_preview=True) @@ -501,7 +501,7 @@ async def gomov_scrap(_, msg): head = f"#Gomov Latest:\n--> Use /{msg.command[0]} [title] to start search with title.\n\n" msgs = "" await m.delete() - for c, i in enumerate(data[:15], start=1): + for c, i in enumerate(data, start=1): msgs += f"{c}. {i['judul']}\nGenre: {i['genre']}\nExtract: /{msg.command[0]}_scrap {i['link']}\n\n" if len(head.encode("utf-8") + msgs.encode("utf-8")) >= 4000: await msg.reply( @@ -532,7 +532,7 @@ async def savefilm21_scrap(_, message): soup = BeautifulSoup(html.text, "lxml") res = soup.find_all(class_="button button-shadow") res = "".join(f"{i.text}\n{i['href']}\n\n" for i in res) - await message.reply(f"Hasil Scrap dari {link}:\n\n{res}") + await message.reply(f"Hasil Scrap dari {link}:\n\n{res}", disable_web_page_preview=True) except IndexError: return await message.reply( f"Gunakan command /{message.command[0]} [link] untuk scrap link download" diff --git a/misskaty/plugins/sub_extractor.py b/misskaty/plugins/sub_extractor.py index 617899f0..66788e25 100644 --- a/misskaty/plugins/sub_extractor.py +++ b/misskaty/plugins/sub_extractor.py @@ -119,7 +119,7 @@ async def convertsrt(c, m): ) (await shell_exec(f"mediaextract -i '{dl}' '{filename}'.srt"))[0] await m.reply_document( - f"{filename}.srt", caption=f"{filename}.srt\n\nConverted by @{c.me.username}" + f"{filename}.srt", caption=f"{filename}.srt\n\nConverted by @{c.me.username}" ) await msg.delete() try: