diff --git a/.vscode/settings.json b/.vscode/settings.json
deleted file mode 100644
index f2d90cbd..00000000
--- a/.vscode/settings.json
+++ /dev/null
@@ -1,3 +0,0 @@
-{
- "python.linting.enabled": true
-}
\ No newline at end of file
diff --git a/misskaty/plugins/grup_tools.py b/misskaty/plugins/grup_tools.py
index 01df6462..92414a83 100644
--- a/misskaty/plugins/grup_tools.py
+++ b/misskaty/plugins/grup_tools.py
@@ -45,7 +45,9 @@ def draw_multiple_line_text(image, text, font, text_start_height):
lines = textwrap.wrap(text, width=50)
for line in lines:
line_width, line_height = font.getsize(line)
- draw.text(((image_width - line_width) / 2, y_text), line, font=font, fill="black")
+ draw.text(
+ ((image_width - line_width) / 2, y_text), line, font=font, fill="black"
+ )
y_text += line_height
@@ -55,8 +57,12 @@ def welcomepic(pic, user, chat, count, id):
background = background.resize((1024, 500), Image.ANTIALIAS)
pfp = Image.open(pic).convert("RGBA")
pfp = circle(pfp)
- pfp = pfp.resize((265, 265)) # Resizes the Profilepicture so it fits perfectly in the circle
- font = ImageFont.truetype("Calistoga-Regular.ttf", 37) # <- Text Font of the Member Count. Change the text size for your preference
+ pfp = pfp.resize(
+ (265, 265)
+ ) # Resizes the Profilepicture so it fits perfectly in the circle
+ font = ImageFont.truetype(
+ "Calistoga-Regular.ttf", 37
+ ) # <- Text Font of the Member Count. Change the text size for your preference
member_text = f"User#{count}, Selamat Datang {user}" # <- Text under the Profilepicture with the Membercount
draw_multiple_line_text(background, member_text, font, 395)
draw_multiple_line_text(background, chat, font, 47)
@@ -67,15 +73,23 @@ def welcomepic(pic, user, chat, count, id):
size=20,
align="right",
)
- background.paste(pfp, (379, 123), pfp) # Pastes the Profilepicture on the Background Image
- background.save(f"downloads/welcome#{id}.png") # Saves the finished Image in the folder with the filename
+ background.paste(
+ pfp, (379, 123), pfp
+ ) # Pastes the Profilepicture on the Background Image
+ background.save(
+ f"downloads/welcome#{id}.png"
+ ) # Saves the finished Image in the folder with the filename
return f"downloads/welcome#{id}.png"
@app.on_chat_member_updated(filters.group & filters.chat(-1001128045651))
@capture_err
async def member_has_joined(c: app, member: ChatMemberUpdated):
- if not member.new_chat_member or member.new_chat_member.status in {"banned", "left", "restricted"} or member.old_chat_member:
+ if (
+ not member.new_chat_member
+ or member.new_chat_member.status in {"banned", "left", "restricted"}
+ or member.old_chat_member
+ ):
return
user = member.new_chat_member.user if member.new_chat_member else member.from_user
if user.id in SUDO:
@@ -94,15 +108,21 @@ async def member_has_joined(c: app, member: ChatMemberUpdated):
pass
mention = f"{user.first_name}"
joined_date = datetime.fromtimestamp(time.time()).strftime("%Y.%m.%d %H:%M:%S")
- first_name = f"{user.first_name} {user.last_name}" if user.last_name else user.first_name
+ first_name = (
+ f"{user.first_name} {user.last_name}" if user.last_name else user.first_name
+ )
id = user.id
dc = user.dc_id or "Member tanpa PP"
count = await app.get_chat_members_count(member.chat.id)
try:
- pic = await app.download_media(user.photo.big_file_id, file_name=f"pp{user.id}.png")
+ pic = await app.download_media(
+ user.photo.big_file_id, file_name=f"pp{user.id}.png"
+ )
except AttributeError:
pic = "img/profilepic.png"
- welcomeimg = await welcomepic(pic, user.first_name, member.chat.title, count, user.id)
+ welcomeimg = await welcomepic(
+ pic, user.first_name, member.chat.title, count, user.id
+ )
temp.MELCOW[f"welcome-{member.chat.id}"] = await c.send_photo(
member.chat.id,
photo=welcomeimg,
@@ -111,18 +131,30 @@ async def member_has_joined(c: app, member: ChatMemberUpdated):
userspammer = ""
# Spamwatch Detection
try:
- headers = {"Authorization": "Bearer XvfzE4AUNXkzCy0DnIVpFDlxZi79lt6EnwKgBj8Quuzms0OSdHvf1k6zSeyzZ_lz"}
- apispamwatch = (await http.get(f"https://api.spamwat.ch/banlist/{user.id}", headers=headers)).json()
+ headers = {
+ "Authorization": "Bearer XvfzE4AUNXkzCy0DnIVpFDlxZi79lt6EnwKgBj8Quuzms0OSdHvf1k6zSeyzZ_lz"
+ }
+ apispamwatch = (
+ await http.get(
+ f"https://api.spamwat.ch/banlist/{user.id}", headers=headers
+ )
+ ).json()
if not apispamwatch.get("error"):
- await app.ban_chat_member(member.chat.id, user.id, datetime.now() + timedelta(seconds=30))
+ await app.ban_chat_member(
+ member.chat.id, user.id, datetime.now() + timedelta(seconds=30)
+ )
userspammer += f"#SpamWatch Federation Ban\nUser {mention} [{user.id}] has been kicked because {apispamwatch.get('reason')}.\n"
except Exception as err:
LOGGER.error(f"ERROR in Spamwatch Detection. {err}")
# Combot API Detection
try:
- apicombot = (await http.get(f"https://api.cas.chat/check?user_id={user.id}")).json()
+ apicombot = (
+ await http.get(f"https://api.cas.chat/check?user_id={user.id}")
+ ).json()
if apicombot.get("ok") == "true":
- await app.ban_chat_member(member.chat.id, user.id, datetime.now() + timedelta(seconds=30))
+ await app.ban_chat_member(
+ member.chat.id, user.id, datetime.now() + timedelta(seconds=30)
+ )
userspammer += f"#CAS Federation Ban\nUser {mention} [{user.id}] detected as spambot and has been kicked. Powered by Combot AntiSpam."
except Exception as err:
LOGGER.error(f"ERROR in Combot API Detection. {err}")
@@ -151,7 +183,9 @@ async def save_group(bot, message):
await db.add_chat(message.chat.id, message.chat.title)
if message.chat.id in temp.BANNED_CHATS:
# Inspired from a boat of a banana tree
- buttons = [[InlineKeyboardButton("Support", url=f"https://t.me/{SUPPORT_CHAT}")]]
+ buttons = [
+ [InlineKeyboardButton("Support", url=f"https://t.me/{SUPPORT_CHAT}")]
+ ]
reply_markup = InlineKeyboardMarkup(buttons)
k = await message.reply(
text="CHAT NOT ALLOWED 🐞\n\nMy admins has restricted me from working here ! If you want to know more about it contact support..",
@@ -166,7 +200,9 @@ async def save_group(bot, message):
return
buttons = [
[
- InlineKeyboardButton("ℹ️ Help", url=f"https://t.me/{temp.U_NAME}?start=help"),
+ InlineKeyboardButton(
+ "ℹ️ Help", url=f"https://t.me/{temp.U_NAME}?start=help"
+ ),
InlineKeyboardButton("📢 Updates", url="https://t.me/YasirPediaChannel"),
]
]
@@ -179,10 +215,14 @@ async def save_group(bot, message):
for u in message.new_chat_members:
count = await app.get_chat_members_count(message.chat.id)
try:
- pic = await app.download_media(u.photo.big_file_id, file_name=f"pp{u.id}.png")
+ pic = await app.download_media(
+ u.photo.big_file_id, file_name=f"pp{u.id}.png"
+ )
except AttributeError:
pic = "img/profilepic.png"
- welcomeimg = await welcomepic(pic, u.first_name, message.chat.title, count, u.id)
+ welcomeimg = await welcomepic(
+ pic, u.first_name, message.chat.title, count, u.id
+ )
if (temp.MELCOW).get(f"welcome-{message.chat.id}") is not None:
try:
await (temp.MELCOW[f"welcome-{message.chat.id}"]).delete()
@@ -213,7 +253,9 @@ async def leave_a_chat(bot, message):
except:
chat = chat
try:
- buttons = [[InlineKeyboardButton("Support", url=f"https://t.me/{SUPPORT_CHAT}")]]
+ buttons = [
+ [InlineKeyboardButton("Support", url=f"https://t.me/{SUPPORT_CHAT}")]
+ ]
reply_markup = InlineKeyboardMarkup(buttons)
await bot.send_message(
chat_id=chat,
@@ -245,12 +287,16 @@ async def disable_chat(bot, message):
if not cha_t:
return await message.reply("Chat Not Found In DB")
if cha_t["is_disabled"]:
- return await message.reply(f"This chat is already disabled:\nReason- {cha_t['reason']} ")
+ return await message.reply(
+ f"This chat is already disabled:\nReason- {cha_t['reason']} "
+ )
await db.disable_chat(chat_, reason)
temp.BANNED_CHATS.append(chat_)
await message.reply("Chat Succesfully Disabled")
try:
- buttons = [[InlineKeyboardButton("Support", url=f"https://t.me/{SUPPORT_CHAT}")]]
+ buttons = [
+ [InlineKeyboardButton("Support", url=f"https://t.me/{SUPPORT_CHAT}")]
+ ]
reply_markup = InlineKeyboardMarkup(buttons)
await bot.send_message(
chat_id=chat_,
@@ -295,7 +341,9 @@ async def gen_invite(bot, message):
try:
link = await bot.create_chat_invite_link(chat)
except ChatAdminRequired:
- return await message.reply("Invite Link Generation Failed, Iam Not Having Sufficient Rights")
+ return await message.reply(
+ "Invite Link Generation Failed, Iam Not Having Sufficient Rights"
+ )
except Exception as e:
return await message.reply(f"Error {e}")
await message.reply(f"Here is your Invite Link {link.invite_link}")
@@ -308,11 +356,15 @@ async def adminlist(_, message):
return await message.reply("Perintah ini hanya untuk grup")
try:
administrators = []
- async for m in app.get_chat_members(message.chat.id, filter=enums.ChatMembersFilter.ADMINISTRATORS):
+ async for m in app.get_chat_members(
+ message.chat.id, filter=enums.ChatMembersFilter.ADMINISTRATORS
+ ):
administrators.append(f"{m.user.first_name}")
res = "".join(f"~ {i}\n" for i in administrators)
- return await message.reply(f"Daftar Admin di {message.chat.title} ({message.chat.id}):\n~ {res}")
+ return await message.reply(
+ f"Daftar Admin di {message.chat.title} ({message.chat.id}):\n~ {res}"
+ )
except Exception as e:
await message.reply(f"ERROR: {str(e)}")
@@ -324,14 +376,17 @@ async def kickme(_, message):
if len(message.text.split()) >= 2:
reason = message.text.split(None, 1)[1]
try:
- await message.ban_member(message.from_user.id)
+ await message.chat.ban_member(message.from_user.id)
txt = f"Pengguna {message.from_user.mention} menendang dirinya sendiri. Mungkin dia sedang frustasi 😕"
txt += f"\nAlasan: {reason}" if reason else ""
await message.reply_text(txt)
- await message.unban_member(message.from_user.id)
+ await message.chat.unban_member(message.from_user.id)
except RPCError as ef:
- await message.reply_text(f"Sepertinya ada error, silahkan report ke owner saya. \nERROR: {str(ef)}")
- return
+ await message.reply_text(
+ f"Sepertinya ada error, silahkan report ke owner saya. \nERROR: {str(ef)}"
+ )
+ except Exception as err:
+ await message.reply(f"ERROR: {err}")
@app.on_message(filters.command("users") & filters.user(SUDO))
diff --git a/misskaty/plugins/inline_search.py b/misskaty/plugins/inline_search.py
index 63e8d0a6..97cf380a 100644
--- a/misskaty/plugins/inline_search.py
+++ b/misskaty/plugins/inline_search.py
@@ -1,6 +1,6 @@
import json, traceback
from sys import version as pyver, platform
-from misskaty import app, user, BOT_USERNAME
+from misskaty import app, user
from motor import version as mongover
from misskaty.plugins.misc_tools import get_content
from pyrogram import __version__ as pyrover
@@ -38,11 +38,16 @@ PRVT_MSGS = {}
async def inline_menu(_, inline_query: InlineQuery):
if inline_query.query.strip().lower().strip() == "":
buttons = InlineKeyboard(row_width=2)
- buttons.add(*[(InlineKeyboardButton(text=i, switch_inline_query_current_chat=i)) for i in keywords_list])
+ buttons.add(
+ *[
+ (InlineKeyboardButton(text=i, switch_inline_query_current_chat=i))
+ for i in keywords_list
+ ]
+ )
btn = InlineKeyboard(row_width=2)
- bot_state = "Alive" if await app.get_me() else "Dead"
- ubot_state = "Alive" if await user.get_me() else "Dead"
+ bot_state = "Dead" if not await app.get_me() else "Alive"
+ ubot_state = "Dead" if not await user.get_me() else "Alive"
btn.add(
InlineKeyboardButton("Stats", callback_data="stats_callback"),
InlineKeyboardButton("Go Inline!", switch_inline_query_current_chat=""),
@@ -62,21 +67,27 @@ async def inline_menu(_, inline_query: InlineQuery):
InlineQueryResultArticle(
title="Inline Commands",
description="Help Related To Inline Usage.",
- input_message_content=InputTextMessageContent("Click A Button To Get Started."),
+ input_message_content=InputTextMessageContent(
+ "Click A Button To Get Started."
+ ),
thumb_url="https://hamker.me/cy00x5x.png",
reply_markup=buttons,
),
InlineQueryResultArticle(
- title="Github Repo",
- description="Github Repo of This Bot.",
- input_message_content=InputTextMessageContent(f"Github Repo @{BOT_USERNAME}\n\nhttps://github.com/yasirarism/MissKatyPyro"),
+ title="Github Dev",
+ description="Github Owner of Bot.",
+ input_message_content=InputTextMessageContent(
+ "https://github.com/yasirarism"
+ ),
thumb_url="https://hamker.me/gjc9fo3.png",
),
InlineQueryResultArticle(
title="Alive",
description="Check Bot's Stats",
thumb_url="https://yt3.ggpht.com/ytc/AMLnZu-zbtIsllERaGYY8Aecww3uWUASPMjLUUEt7ecu=s900-c-k-c0x00ffffff-no-rj",
- input_message_content=InputTextMessageContent(msg, disable_web_page_preview=True),
+ input_message_content=InputTextMessageContent(
+ msg, disable_web_page_preview=True
+ ),
reply_markup=btn,
),
]
@@ -89,8 +100,13 @@ async def inline_menu(_, inline_query: InlineQuery):
switch_pm_parameter="inline",
)
judul = inline_query.query.split(None, 1)[1].strip()
- headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) " "Chrome/61.0.3163.100 Safari/537.36"}
- search_results = await http.get(f"https://www.google.com/search?q={judul}&num=20", headers=headers)
+ headers = {
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
+ "Chrome/61.0.3163.100 Safari/537.36"
+ }
+ search_results = await http.get(
+ f"https://www.google.com/search?q={judul}&num=20", headers=headers
+ )
soup = BeautifulSoup(search_results.text, "lxml")
data = []
for result in soup.select(".tF2Cxc"):
@@ -113,7 +129,9 @@ async def inline_menu(_, inline_query: InlineQuery):
url=link,
description=snippet,
thumb_url="https://te.legra.ph/file/ed8ea62ae636793000bb4.jpg",
- reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(text="Open Website", url=link)]]),
+ reply_markup=InlineKeyboardMarkup(
+ [[InlineKeyboardButton(text="Open Website", url=link)]]
+ ),
)
)
await inline_query.answer(
@@ -134,7 +152,7 @@ async def inline_menu(_, inline_query: InlineQuery):
_id = inline_query.query.split()[1]
msg = inline_query.query.split(None, 2)[2].strip()
- if not msg or not msg.endswith(":"):
+ if not (msg and msg.endswith(":")):
inline_query.stop_propagation()
try:
@@ -151,7 +169,11 @@ async def inline_menu(_, inline_query: InlineQuery):
)
prvte_msg = InlineKeyboardMarkup(
[
- [InlineKeyboardButton("Show Message 🔐", callback_data=f"prvtmsg({inline_query.id})")],
+ [
+ InlineKeyboardButton(
+ "Show Message 🔐", callback_data=f"prvtmsg({inline_query.id})"
+ )
+ ],
[
InlineKeyboardButton(
"Destroy☠️ this msg",
@@ -160,9 +182,14 @@ async def inline_menu(_, inline_query: InlineQuery):
],
]
)
- mention = f"@{penerima.username}" if penerima.username else f"{penerima.first_name}"
-
- msg_c = f"🔒 A private message to {mention} [{penerima.id}], "
+ mention = (
+ f"{penerima.first_name}"
+ if not penerima.username
+ else f"@{penerima.username}"
+ )
+ msg_c = (
+ f"🔒 A private message to {mention} [{penerima.id}], "
+ )
msg_c += "Only he/she can open it."
results = [
InlineQueryResultArticle(
@@ -182,7 +209,9 @@ async def inline_menu(_, inline_query: InlineQuery):
switch_pm_parameter="inline",
)
query = inline_query.query.split(None, 1)[1].strip()
- search_results = await http.get(f"https://api.github.com/search/repositories?q={query}")
+ search_results = await http.get(
+ f"https://api.github.com/search/repositories?q={query}"
+ )
srch_results = json.loads(search_results.text)
item = srch_results.get("items")
data = []
@@ -205,7 +234,9 @@ async def inline_menu(_, inline_query: InlineQuery):
url=link,
description=deskripsi,
thumb_url="https://github.githubassets.com/images/modules/logos_page/GitHub-Mark.png",
- reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(text="Open Github Link", url=link)]]),
+ reply_markup=InlineKeyboardMarkup(
+ [[InlineKeyboardButton(text="Open Github Link", url=link)]]
+ ),
)
)
await inline_query.answer(
@@ -224,7 +255,9 @@ async def inline_menu(_, inline_query: InlineQuery):
switch_pm_parameter="inline",
)
query = inline_query.query.split(None, 1)[1].strip()
- search_results = await http.get(f"https://api.hayo.my.id/api/pypi?package={query}")
+ search_results = await http.get(
+ f"https://api.hayo.my.id/api/pypi?package={query}"
+ )
srch_results = json.loads(search_results.text)
data = []
for sraeo in srch_results:
@@ -245,7 +278,9 @@ async def inline_menu(_, inline_query: InlineQuery):
url=link,
description=deskripsi,
thumb_url="https://raw.githubusercontent.com/github/explore/666de02829613e0244e9441b114edb85781e972c/topics/pip/pip.png",
- reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(text="Open Link", url=link)]]),
+ reply_markup=InlineKeyboardMarkup(
+ [[InlineKeyboardButton(text="Open Link", url=link)]]
+ ),
)
)
await inline_query.answer(
@@ -264,7 +299,9 @@ async def inline_menu(_, inline_query: InlineQuery):
switch_pm_parameter="inline",
)
judul = inline_query.query.split(None, 1)[1].strip()
- search_results = await http.get(f"https://api.abir-hasan.tk/youtube?query={judul}")
+ search_results = await http.get(
+ f"https://api.abir-hasan.tk/youtube?query={judul}"
+ )
srch_results = json.loads(search_results.text)
asroe = srch_results.get("results")
oorse = []
@@ -276,7 +313,9 @@ async def inline_menu(_, inline_query: InlineQuery):
durasi = sraeo.get("accessibility").get("duration")
publishTime = sraeo.get("publishedTime")
try:
- deskripsi = "".join(f"{i['text']} " for i in sraeo.get("descriptionSnippet"))
+ deskripsi = "".join(
+ f"{i['text']} " for i in sraeo.get("descriptionSnippet")
+ )
except:
deskripsi = "-"
message_text = f"{title}\n"
@@ -295,7 +334,9 @@ async def inline_menu(_, inline_query: InlineQuery):
url=link,
description=deskripsi,
thumb_url=thumb,
- reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(text="Watch Video 📹", url=link)]]),
+ reply_markup=InlineKeyboardMarkup(
+ [[InlineKeyboardButton(text="Watch Video 📹", url=link)]]
+ ),
)
)
await inline_query.answer(
@@ -314,7 +355,9 @@ async def inline_menu(_, inline_query: InlineQuery):
switch_pm_parameter="inline",
)
movie_name = inline_query.query.split(None, 1)[1].strip()
- search_results = await http.get(f"https://yasirapi.eu.org/imdb-search?q={movie_name}")
+ search_results = await http.get(
+ f"https://yasirapi.eu.org/imdb-search?q={movie_name}"
+ )
res = json.loads(search_results.text).get("result")
oorse = []
for midb in res:
@@ -323,7 +366,11 @@ async def inline_menu(_, inline_query: InlineQuery):
stars = midb.get("s", "")
imdb_url = f"https://imdb.com/title/{midb.get('id')}"
year = f"({midb.get('y')})" if midb.get("y") else ""
- image_url = midb.get("i").get("imageUrl").replace(".jpg", "._V1_UX360.jpg") if midb.get("i") else "https://te.legra.ph/file/e263d10ff4f4426a7c664.jpg"
+ image_url = (
+ midb.get("i").get("imageUrl").replace(".jpg", "._V1_UX360.jpg")
+ if midb.get("i")
+ else "https://telegra.ph/file/270955ef0d1a8a16831a9.jpg"
+ )
caption = f"🎬"
caption += f"{title} {year}"
oorse.append(
@@ -360,15 +407,15 @@ async def prvt_msg(_, c_q):
msg_id = str(c_q.matches[0].group(1))
if msg_id not in PRVT_MSGS:
- await c_q.answer("Message now outdated !", show_alert=True)
+ await c_q.answer("message now outdated !", show_alert=True)
return
user_id, flname, sender_id, msg = PRVT_MSGS[msg_id]
- if c_q.from_user.id in [user_id, sender_id]:
+ if c_q.from_user.id == user_id or c_q.from_user.id == sender_id:
await c_q.answer(msg, show_alert=True)
else:
- await c_q.answer(f"Only {flname} can see this Private Msg!", show_alert=True)
+ await c_q.answer(f"only {flname} can see this Private Msg!", show_alert=True)
@app.on_callback_query(filters.regex(r"destroy\((.+)\)"))
@@ -376,17 +423,17 @@ async def destroy_msg(_, c_q):
msg_id = str(c_q.matches[0].group(1))
if msg_id not in PRVT_MSGS:
- await c_q.answer("Message now outdated !", show_alert=True)
+ await c_q.answer("message now outdated !", show_alert=True)
return
user_id, flname, sender_id, msg = PRVT_MSGS[msg_id]
- if c_q.from_user.id in [user_id, sender_id]:
+ if c_q.from_user.id == user_id or c_q.from_user.id == sender_id:
del PRVT_MSGS[msg_id]
by = "receiver" if c_q.from_user.id == user_id else "sender"
await c_q.edit_message_text(f"This secret message is ☠️destroyed☠️ by msg {by}")
else:
- await c_q.answer(f"Only {flname} can see this Private Msg!", show_alert=True)
+ await c_q.answer(f"only {flname} can see this Private Msg!", show_alert=True)
@app.on_callback_query(filters.regex("^imdbinl_"))
@@ -398,46 +445,82 @@ async def imdb_inl(_, query):
url = f"https://www.imdb.com/title/{movie}/"
resp = await get_content(url)
sop = BeautifulSoup(resp, "lxml")
- r_json = json.loads(sop.find("script", attrs={"type": "application/ld+json"}).contents[0])
+ r_json = json.loads(
+ sop.find("script", attrs={"type": "application/ld+json"}).contents[0]
+ )
res_str = ""
type = f"{r_json['@type']}" if r_json.get("@type") else ""
if r_json.get("name"):
try:
- tahun = sop.select('ul[data-testid="hero-title-block__metadata"]')[0].find(class_="sc-8c396aa2-2 itZqyK").text
+ tahun = (
+ sop.select('ul[data-testid="hero-title-block__metadata"]')[0]
+ .find(class_="sc-8c396aa2-2 itZqyK")
+ .text
+ )
except:
tahun = "-"
res_str += f"📹 Judul: {r_json['name']} [{tahun}] ({type})\n"
if r_json.get("alternateName"):
- res_str += f"📢 AKA: {r_json.get('alternateName')}\n\n"
+ res_str += (
+ f"📢 AKA: {r_json.get('alternateName')}\n\n"
+ )
else:
res_str += "\n"
if sop.select('li[data-testid="title-techspec_runtime"]'):
- durasi = sop.select('li[data-testid="title-techspec_runtime"]')[0].find(class_="ipc-metadata-list-item__content-container").text
+ durasi = (
+ sop.select('li[data-testid="title-techspec_runtime"]')[0]
+ .find(class_="ipc-metadata-list-item__content-container")
+ .text
+ )
res_str += f"Durasi: {GoogleTranslator('auto', 'id').translate(durasi)}\n"
if r_json.get("contentRating"):
res_str += f"Kategori: {r_json['contentRating']} \n"
if r_json.get("aggregateRating"):
res_str += f"Peringkat: {r_json['aggregateRating']['ratingValue']}⭐️ dari {r_json['aggregateRating']['ratingCount']} pengguna \n"
if sop.select('li[data-testid="title-details-releasedate"]'):
- rilis = sop.select('li[data-testid="title-details-releasedate"]')[0].find(class_="ipc-metadata-list-item__list-content-item ipc-metadata-list-item__list-content-item--link").text
- rilis_url = sop.select('li[data-testid="title-details-releasedate"]')[0].find(class_="ipc-metadata-list-item__list-content-item ipc-metadata-list-item__list-content-item--link")["href"]
+ rilis = (
+ sop.select('li[data-testid="title-details-releasedate"]')[0]
+ .find(
+ class_="ipc-metadata-list-item__list-content-item ipc-metadata-list-item__list-content-item--link"
+ )
+ .text
+ )
+ rilis_url = sop.select('li[data-testid="title-details-releasedate"]')[
+ 0
+ ].find(
+ class_="ipc-metadata-list-item__list-content-item ipc-metadata-list-item__list-content-item--link"
+ )[
+ "href"
+ ]
res_str += f"Rilis: {rilis}\n"
if r_json.get("genre"):
- genre = "".join(f"{GENRES_EMOJI[i]} #{i.replace('-', '_').replace(' ', '_')}, " if i in GENRES_EMOJI else f"#{i.replace('-', '_').replace(' ', '_')}, " for i in r_json["genre"])
-
+ genre = ""
+ for i in r_json["genre"]:
+ if i in GENRES_EMOJI:
+ genre += f"{GENRES_EMOJI[i]} #{i.replace('-', '_').replace(' ', '_')}, "
+ else:
+ genre += f"#{i.replace('-', '_').replace(' ', '_')}, "
genre = genre[:-2]
res_str += f"Genre: {genre}\n"
if sop.select('li[data-testid="title-details-origin"]'):
country = "".join(
f"{demoji(country.text)} #{country.text.replace(' ', '_').replace('-', '_')}, "
- for country in sop.select('li[data-testid="title-details-origin"]')[0].findAll(class_="ipc-metadata-list-item__list-content-item ipc-metadata-list-item__list-content-item--link")
+ for country in sop.select('li[data-testid="title-details-origin"]')[
+ 0
+ ].findAll(
+ class_="ipc-metadata-list-item__list-content-item ipc-metadata-list-item__list-content-item--link"
+ )
)
country = country[:-2]
res_str += f"Negara: {country}\n"
if sop.select('li[data-testid="title-details-languages"]'):
language = "".join(
f"#{lang.text.replace(' ', '_').replace('-', '_')}, "
- for lang in sop.select('li[data-testid="title-details-languages"]')[0].findAll(class_="ipc-metadata-list-item__list-content-item ipc-metadata-list-item__list-content-item--link")
+ for lang in sop.select('li[data-testid="title-details-languages"]')[
+ 0
+ ].findAll(
+ class_="ipc-metadata-list-item__list-content-item ipc-metadata-list-item__list-content-item--link"
+ )
)
language = language[:-2]
res_str += f"Bahasa: {language}\n"
@@ -468,7 +551,9 @@ async def imdb_inl(_, query):
actors = actors[:-2]
res_str += f"Pemeran: {actors}\n\n"
if r_json.get("description"):
- summary = GoogleTranslator("auto", "id").translate(r_json.get("description"))
+ summary = GoogleTranslator("auto", "id").translate(
+ r_json.get("description")
+ )
res_str += f"📜 Plot: {summary}\n\n"
if r_json.get("keywords"):
keywords = r_json["keywords"].split(",")
@@ -479,11 +564,15 @@ async def imdb_inl(_, query):
key_ = key_[:-2]
res_str += f"🔥 Kata Kunci: {key_} \n"
if sop.select('li[data-testid="award_information"]'):
- awards = sop.select('li[data-testid="award_information"]')[0].find(class_="ipc-metadata-list-item__list-content-item").text
+ awards = (
+ sop.select('li[data-testid="award_information"]')[0]
+ .find(class_="ipc-metadata-list-item__list-content-item")
+ .text
+ )
res_str += f"🏆 Penghargaan: {GoogleTranslator('auto', 'id').translate(awards)}\n\n"
else:
res_str += "\n"
- res_str += f"©️ IMDb by @{BOT_USERNAME}"
+ res_str += "©️ IMDb by @MissKatyRoBot"
if r_json.get("trailer"):
trailer_url = r_json["trailer"]["url"]
markup = InlineKeyboardMarkup(
diff --git a/misskaty/plugins/misc_tools.py b/misskaty/plugins/misc_tools.py
index 9c219b3e..1240541e 100644
--- a/misskaty/plugins/misc_tools.py
+++ b/misskaty/plugins/misc_tools.py
@@ -3,6 +3,7 @@ import aiohttp
from bs4 import BeautifulSoup
import json
import traceback
+import requests
from pyrogram import Client, filters
from deep_translator import GoogleTranslator
from gtts import gTTS
@@ -14,19 +15,24 @@ from pyrogram.errors import (
WebpageMediaEmpty,
MessageTooLong,
)
-from misskaty.vars import COMMAND_HANDLER
+from info import COMMAND_HANDLER
from utils import extract_user, get_file_id, demoji
import time
from datetime import datetime
-from logging import getLogger
from pykeyboard import InlineKeyboard
-from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton, CallbackQuery
+from pyrogram.types import (
+ InlineKeyboardMarkup,
+ InlineKeyboardButton,
+ CallbackQuery,
+ InputMediaPhoto,
+)
from misskaty.core.decorator.errors import capture_err
from misskaty.helper.tools import rentry, GENRES_EMOJI
-from misskaty.helper.http import http
-from misskaty import app, BOT_USERNAME
+from misskaty import app
+import logging
-LOGGER = getLogger(__name__)
+logger = logging.getLogger(__name__)
+logger.setLevel(logging.ERROR)
__MODULE__ = "Misc"
__HELP__ = """
@@ -52,18 +58,26 @@ def remove_html_tags(text):
async def stackoverflow(client, message):
if len(message.command) == 1:
return await message.reply("Give a query to search in StackOverflow!")
- r = (await http.get(f"https://api.stackexchange.com/2.3/search/excerpts?order=asc&sort=relevance&q={message.command[1]}&accepted=True&migrated=False¬ice=False&wiki=False&site=stackoverflow")).json()
+ r = (
+ requests.get(
+ f"https://api.stackexchange.com/2.3/search/excerpts?order=asc&sort=relevance&q={message.command[1]}&accepted=True&migrated=False¬ice=False&wiki=False&site=stackoverflow"
+ )
+ ).json()
hasil = ""
for count, data in enumerate(r["items"], start=1):
question = data["question_id"]
title = data["title"]
- snippet = remove_html_tags(data["excerpt"])[:80].replace("\n", "").replace(" ", "") if len(remove_html_tags(data["excerpt"])) > 80 else remove_html_tags(data["excerpt"]).replace("\n", "").replace(" ", "")
+ snippet = (
+ remove_html_tags(data["excerpt"])[:80].replace("\n", "").replace(" ", "")
+ if len(remove_html_tags(data["excerpt"])) > 80
+ else remove_html_tags(data["excerpt"]).replace("\n", "").replace(" ", "")
+ )
hasil += f"{count}. {title}\n{snippet}\n"
try:
await message.reply(hasil)
except MessageTooLong:
url = await rentry(hasil)
- await r.edit(f"Your text pasted to rentry because has long text:\n{url}")
+ await msg.edit(f"Your text pasted to rentry because has long text:\n{url}")
except Exception as e:
await message.reply(e)
@@ -76,8 +90,11 @@ async def gsearch(client, message):
query = message.text.split(" ", maxsplit=1)[1]
msg = await message.reply_text(f"**Googling** for `{query}` ...")
try:
- headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) " "Chrome/61.0.3163.100 Safari/537.36"}
- html = await http.get(
+ headers = {
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
+ "Chrome/61.0.3163.100 Safari/537.36"
+ }
+ html = requests.get(
f"https://www.google.com/search?q={query}&gl=id&hl=id&num=17",
headers=headers,
)
@@ -105,12 +122,14 @@ async def gsearch(client, message):
arr = json.dumps(data, indent=2, ensure_ascii=False)
parse = json.loads(arr)
total = len(parse)
- res = "".join(f"{i['title']}\n{i['snippet']}\n\n" for i in parse)
+ res = "".join(
+ f"{i['title']}\n{i['snippet']}\n\n" for i in parse
+ )
except Exception:
exc = traceback.format_exc()
return await msg.edit(exc)
await msg.edit(
- text=f"Ada {total} Hasil Pencarian dari {query}:\n{res}Scraped by @{BOT_USERNAME}",
+ text=f"Ada {total} Hasil Pencarian dari {query}:\n{res}Scraped by @MissKatyRoBot",
disable_web_page_preview=True,
)
@@ -118,30 +137,45 @@ async def gsearch(client, message):
@app.on_message(filters.command(["tr", "trans", "translate"], COMMAND_HANDLER))
@capture_err
async def translate(client, message):
- if message.reply_to_message and (message.reply_to_message.text or message.reply_to_message.caption):
- target_lang = "id" if len(message.command) == 1 else message.text.split()[1]
+ if message.reply_to_message and (
+ message.reply_to_message.text or message.reply_to_message.caption
+ ):
+ if len(message.command) == 1:
+ target_lang = "id"
+ else:
+ target_lang = message.text.split()[1]
text = message.reply_to_message.text or message.reply_to_message.caption
else:
- if len(message.command) < 3:
+ if len(message.command) == 1:
return await message.reply_text(
"Berikan Kode bahasa yang valid.\n[Available options](https://telegra.ph/Lang-Codes-11-08).\nUsage: /tr en",
)
target_lang = message.text.split(None, 2)[1]
text = message.text.split(None, 2)[2]
msg = await message.reply("Menerjemahkan...")
- my_translator = GoogleTranslator(source='auto', target=target_lang)
try:
- result = my_translator.translate(text=text)
- return await msg.edit(f"Translation using source = {my_translator.source} and target = {my_translator.target}\n\n-> {result}")
+ tekstr = (
+ requests.get(
+ f"https://script.google.com/macros/s/AKfycbyhNk6uVgrtJLEFRUT6y5B2pxETQugCZ9pKvu01-bE1gKkDRsw/exec?q={text}&target={target_lang}"
+ )
+ ).json()["text"]
+ except Exception as err:
+ return await msg.edit(f"Error: {str(err)}")
+ try:
+ await msg.edit(f"{tekstr}")
except MessageTooLong:
- url = await rentry(result)
- return await msg.edit(f"Your translated text pasted to rentry because has long text:\n{url}")
+ url = await rentry(tekstr.text)
+ await msg.edit(
+ f"Your translated text pasted to rentry because has long text:\n{url}"
+ )
@app.on_message(filters.command(["tts"], COMMAND_HANDLER))
@capture_err
async def tts(_, message):
- if message.reply_to_message and (message.reply_to_message.text or message.reply_to_message.caption):
+ if message.reply_to_message and (
+ message.reply_to_message.text or message.reply_to_message.caption
+ ):
if len(message.text.split()) == 1:
target_lang = "id"
else:
@@ -170,7 +204,9 @@ async def tts(_, message):
pass
-@app.on_message(filters.command(["tosticker"], COMMAND_HANDLER))
+@app.on_message(
+ filters.command(["tosticker", "tosticker@MissKatyRoBot"], COMMAND_HANDLER)
+)
@capture_err
async def tostick(client, message):
try:
@@ -186,26 +222,30 @@ async def tostick(client, message):
await message.reply_text(str(e))
-@app.on_message(filters.command(["toimage"], COMMAND_HANDLER))
+@app.on_message(filters.command(["toimage", "toimage@MissKatyRoBot"], COMMAND_HANDLER))
@capture_err
async def topho(client, message):
try:
if not message.reply_to_message or not message.reply_to_message.sticker:
return await message.reply_text("Reply ke sticker untuk mengubah ke foto")
if message.reply_to_message.sticker.is_animated:
- return await message.reply_text("Ini sticker animasi, command ini hanya untuk sticker biasa.")
+ return await message.reply_text(
+ "Ini sticker animasi, command ini hanya untuk sticker biasa."
+ )
photo = await client.download_media(
message.reply_to_message.sticker.file_id,
f"tostick_{message.from_user.id}.jpg",
)
- await message.reply_photo(photo=photo, caption=f"Sticker -> Image\n@{BOT_USERNAME}")
+ await message.reply_photo(
+ photo=photo, caption="Sticker -> Image\n@MissKatyRoBot"
+ )
os.remove(photo)
except Exception as e:
await message.reply_text(str(e))
-@app.on_message(filters.command(["id"], COMMAND_HANDLER))
+@app.on_message(filters.command(["id", "id@MissKatyRoBot"], COMMAND_HANDLER))
async def showid(client, message):
chat_type = message.chat.type
if chat_type == "private":
@@ -231,14 +271,20 @@ async def showid(client, message):
)
file_info = get_file_id(message.reply_to_message)
else:
- _id += "➲ User ID: " f"{message.from_user.id if message.from_user else 'Anonymous'}\n"
+ _id += (
+ "➲ User ID: "
+ f"{message.from_user.id if message.from_user else 'Anonymous'}\n"
+ )
file_info = get_file_id(message)
if file_info:
- _id += f"{file_info.message_type}: " f"{file_info.file_id}\n"
+ _id += (
+ f"{file_info.message_type}: "
+ f"{file_info.file_id}\n"
+ )
await message.reply_text(_id, quote=True)
-@app.on_message(filters.command(["info"], COMMAND_HANDLER))
+@app.on_message(filters.command(["info", "info@MissKatyRoBot"], COMMAND_HANDLER))
async def who_is(client, message):
# https://github.com/SpEcHiDe/PyroGramBot/blob/master/pyrobot/plugins/admemes/whois.py#L19
status_message = await message.reply_text("`Fetching user info...`")
@@ -265,13 +311,23 @@ async def who_is(client, message):
if message.chat.type in (("supergroup", "channel")):
try:
chat_member_p = await message.chat.get_member(from_user.id)
- joined_date = datetime.fromtimestamp(chat_member_p.joined_date or time.time()).strftime("%Y.%m.%d %H:%M:%S")
- message_out_str += "➲Joined this Chat on: " f"{joined_date}" "\n"
+ joined_date = datetime.fromtimestamp(
+ chat_member_p.joined_date or time.time()
+ ).strftime("%Y.%m.%d %H:%M:%S")
+ message_out_str += (
+ "➲Joined this Chat on: " f"{joined_date}" "\n"
+ )
except UserNotParticipant:
pass
if chat_photo := from_user.photo:
local_user_photo = await client.download_media(message=chat_photo.big_file_id)
- buttons = [[InlineKeyboardButton("🔐 Close", callback_data="close_data")]]
+ buttons = [
+ [
+ InlineKeyboardButton(
+ "🔐 Close", callback_data=f"close#{message.from_user.id}"
+ )
+ ]
+ ]
reply_markup = InlineKeyboardMarkup(buttons)
await message.reply_photo(
photo=local_user_photo,
@@ -282,7 +338,13 @@ async def who_is(client, message):
)
os.remove(local_user_photo)
else:
- buttons = [[InlineKeyboardButton("🔐 Close", callback_data="close_data")]]
+ buttons = [
+ [
+ InlineKeyboardButton(
+ "🔐 Close", callback_data=f"close#{message.from_user.id}"
+ )
+ ]
+ ]
reply_markup = InlineKeyboardMarkup(buttons)
await message.reply_text(
text=message_out_str,
@@ -293,7 +355,17 @@ async def who_is(client, message):
await status_message.delete()
-headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/600.1.17 (KHTML, like Gecko) Version/7.1 Safari/537.85.10"}
+@app.on_callback_query(filters.regex("^close"))
+async def close_callback(bot: Client, query: CallbackQuery):
+ i, userid = query.data.split("#")
+ if query.from_user.id != int(userid):
+ return await query.answer("⚠️ Access Denied!", True)
+ await query.message.delete()
+
+
+headers = {
+ "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/600.1.17 (KHTML, like Gecko) Version/7.1 Safari/537.85.10"
+}
async def get_content(url):
@@ -344,18 +416,24 @@ async def mdl_callback(bot: Client, query: CallbackQuery):
await query.message.edit_text("Permintaan kamu sedang diproses.. ")
result = ""
try:
- res = (await http.get(f"https://kuryana.vercel.app/id/{slug}")).json()
+ res = requests.get(f"https://kuryana.vercel.app/id/{slug}").json()
result += f"Title: {res['data']['title']}\n"
- result += f"AKA: {res['data']['others']['also_known_as']}\n\n"
+ result += (
+ f"AKA: {res['data']['others']['also_known_as']}\n\n"
+ )
result += f"Rating: {res['data']['details']['score']}\n"
result += f"Content Rating: {res['data']['details']['content_rating']}\n"
result += f"Type: {res['data']['details']['type']}\n"
- result += f"Country: {res['data']['details']['country']}\n"
+ result += (
+ f"Country: {res['data']['details']['country']}\n"
+ )
if res["data"]["details"]["type"] == "Movie":
result += f"Release Date: {res['data']['details']['release_date']}\n"
elif res["data"]["details"]["type"] == "Drama":
result += f"Episode: {res['data']['details']['episodes']}\n"
- result += f"Aired: {res['data']['details']['aired']}\n"
+ result += (
+ f"Aired: {res['data']['details']['aired']}\n"
+ )
try:
result += f"Aired on: {res['data']['details']['aired_on']}\n"
except:
@@ -364,11 +442,17 @@ async def mdl_callback(bot: Client, query: CallbackQuery):
result += f"Original Network: {res['data']['details']['original_network']}\n"
except:
pass
- result += f"Duration: {res['data']['details']['duration']}\n"
- result += f"Genre: {res['data']['others']['genres']}\n\n"
+ result += (
+ f"Duration: {res['data']['details']['duration']}\n"
+ )
+ result += (
+ f"Genre: {res['data']['others']['genres']}\n\n"
+ )
result += f"Synopsis: {res['data']['synopsis']}\n"
result += f"Tags: {res['data']['others']['tags']}\n"
- btn = InlineKeyboardMarkup([[InlineKeyboardButton("🎬 Open MyDramaList", url=res["data"]["link"])]])
+ btn = InlineKeyboardMarkup(
+ [[InlineKeyboardButton("🎬 Open MyDramaList", url=res["data"]["link"])]]
+ )
await query.message.edit_text(result, reply_markup=btn)
except Exception as e:
await query.message.edit_text(f"ERROR:\n{e}")
@@ -382,18 +466,26 @@ async def mdl_callback(bot: Client, query: CallbackQuery):
async def imdb1_search(client, message):
BTN = []
if message.sender_chat:
- return await message.reply("Mohon maaf fitur tidak tersedia untuk akun channel, harap ganti ke akun biasa..")
+ return await message.reply(
+ "Mohon maaf fitur tidak tersedia untuk akun channel, harap ganti ke akun biasa.."
+ )
if len(message.command) == 1:
- return await message.reply("Berikan aku nama series atau movie yang ingin dicari. 🤷🏻♂️", quote=True)
+ return await message.reply(
+ "Berikan aku nama series atau movie yang ingin dicari. 🤷🏻♂️", quote=True
+ )
r, judul = message.text.split(None, 1)
- k = await message.reply("🔎 Sedang mencari di Database IMDB..", quote=True)
+ k = await message.reply_photo(
+ "AgACAgIAAxkBAAEDNDdjp-jrxffihNHv2UiQ63B2p3txagACL8QxG0hfQEkmUDEHspsSUgAIAQADAgADeAAHHgQ",
+ caption="🔎 Sedang mencari di Database IMDB..",
+ quote=True,
+ )
msg = ""
buttons = InlineKeyboard(row_width=4)
try:
r = await get_content(f"https://yasirapi.eu.org/imdb-search?q={judul}")
res = json.loads(r).get("result")
if not res:
- return await k.edit("Tidak ada hasil ditemukan.. 😕")
+ return await k.edit_caption("Tidak ada hasil ditemukan.. 😕")
msg += f"Ditemukan {len(res)} query dari {judul} ~ {message.from_user.mention}\n\n"
for count, movie in enumerate(res, start=1):
title = movie.get("l")
@@ -401,30 +493,46 @@ async def imdb1_search(client, message):
type = movie.get("q").replace("feature", "movie").capitalize()
movieID = re.findall(r"tt(\d+)", movie.get("id"))[0]
msg += f"{count}. {title} {year} ~ {type}\n"
- BTN.append(InlineKeyboardButton(text=count, callback_data=f"imdbid#{message.from_user.id}#{movieID}"))
+ BTN.append(
+ InlineKeyboardButton(
+ text=count, callback_data=f"imdbid#{message.from_user.id}#{movieID}"
+ )
+ )
+ BTN.append(
+ InlineKeyboardButton(
+ text="❌ Close", callback_data=f"close#{message.from_user.id}"
+ )
+ )
buttons.add(*BTN)
- await k.edit(msg, reply_markup=buttons)
+ await k.edit_caption(msg, reply_markup=buttons)
except Exception as err:
- await k.edit(f"Ooppss, gagal mendapatkan daftar judul di IMDb.\n\nERROR: {err}")
+ await k.edit_caption(
+ f"Ooppss, gagal mendapatkan daftar judul di IMDb.\n\nERROR: {err}"
+ )
@app.on_callback_query(filters.regex("^imdbid"))
async def imdbcb_backup(bot: Client, query: CallbackQuery):
- usr = query.message.reply_to_message
i, userid, movie = query.data.split("#")
if query.from_user.id != int(userid):
return await query.answer("⚠️ Akses Ditolak!", True)
try:
- await query.message.edit_text("Permintaan kamu sedang diproses.. ")
+ await query.message.edit_caption("Permintaan kamu sedang diproses.. ")
url = f"https://www.imdb.com/title/tt{movie}/"
resp = await get_content(url)
sop = BeautifulSoup(resp, "lxml")
- r_json = json.loads(sop.find("script", attrs={"type": "application/ld+json"}).contents[0])
+ r_json = json.loads(
+ sop.find("script", attrs={"type": "application/ld+json"}).contents[0]
+ )
res_str = ""
type = f"{r_json['@type']}" if r_json.get("@type") else ""
if r_json.get("name"):
try:
- tahun = sop.select('ul[data-testid="hero-title-block__metadata"]')[0].find(class_="sc-8c396aa2-2 itZqyK").text
+ tahun = (
+ sop.select('ul[data-testid="hero-title-block__metadata"]')[0]
+ .find(class_="sc-8c396aa2-2 itZqyK")
+ .text
+ )
except:
tahun = "-"
res_str += f"📹 Judul: {r_json['name']} [{tahun}] ({type})\n"
@@ -433,31 +541,64 @@ async def imdbcb_backup(bot: Client, query: CallbackQuery):
else:
res_str += "\n"
if sop.select('li[data-testid="title-techspec_runtime"]'):
- durasi = sop.select('li[data-testid="title-techspec_runtime"]')[0].find(class_="ipc-metadata-list-item__content-container").text
+ durasi = (
+ sop.select('li[data-testid="title-techspec_runtime"]')[0]
+ .find(class_="ipc-metadata-list-item__content-container")
+ .text
+ )
res_str += f"Durasi: {GoogleTranslator('auto', 'id').translate(durasi)}\n"
if r_json.get("contentRating"):
res_str += f"Kategori: {r_json['contentRating']} \n"
if r_json.get("aggregateRating"):
res_str += f"Peringkat: {r_json['aggregateRating']['ratingValue']}⭐️ dari {r_json['aggregateRating']['ratingCount']} pengguna \n"
if sop.select('li[data-testid="title-details-releasedate"]'):
- rilis = sop.select('li[data-testid="title-details-releasedate"]')[0].find(class_="ipc-metadata-list-item__list-content-item ipc-metadata-list-item__list-content-item--link").text
- rilis_url = sop.select('li[data-testid="title-details-releasedate"]')[0].find(class_="ipc-metadata-list-item__list-content-item ipc-metadata-list-item__list-content-item--link")["href"]
- res_str += f"Rilis: {rilis}\n"
+ rilis = (
+ sop.select('li[data-testid="title-details-releasedate"]')[0]
+ .find(
+ class_="ipc-metadata-list-item__list-content-item ipc-metadata-list-item__list-content-item--link"
+ )
+ .text
+ )
+ rilis_url = sop.select('li[data-testid="title-details-releasedate"]')[
+ 0
+ ].find(
+ class_="ipc-metadata-list-item__list-content-item ipc-metadata-list-item__list-content-item--link"
+ )[
+ "href"
+ ]
+ res_str += (
+ f"Rilis: {rilis}\n"
+ )
if r_json.get("genre"):
- genre = "".join(f"{GENRES_EMOJI[i]} #{i.replace('-', '_').replace(' ', '_')}, " if i in GENRES_EMOJI else f"#{i.replace('-', '_').replace(' ', '_')}, " for i in r_json["genre"])
-
+ genre = ""
+ for i in r_json["genre"]:
+ if i in GENRES_EMOJI:
+ genre += (
+ f"{GENRES_EMOJI[i]} #{i.replace('-', '_').replace(' ', '_')}, "
+ )
+ else:
+ genre += f"#{i.replace('-', '_').replace(' ', '_')}, "
genre = genre[:-2]
res_str += f"Genre: {genre}\n"
if sop.select('li[data-testid="title-details-origin"]'):
country = "".join(
f"{demoji(country.text)} #{country.text.replace(' ', '_').replace('-', '_')}, "
- for country in sop.select('li[data-testid="title-details-origin"]')[0].findAll(class_="ipc-metadata-list-item__list-content-item ipc-metadata-list-item__list-content-item--link")
+ for country in sop.select('li[data-testid="title-details-origin"]')[
+ 0
+ ].findAll(
+ class_="ipc-metadata-list-item__list-content-item ipc-metadata-list-item__list-content-item--link"
+ )
)
country = country[:-2]
res_str += f"Negara: {country}\n"
if sop.select('li[data-testid="title-details-languages"]'):
language = "".join(
- f"#{lang.text.replace(' ', '_').replace('-', '_')}, " for lang in sop.select('li[data-testid="title-details-languages"]')[0].findAll(class_="ipc-metadata-list-item__list-content-item ipc-metadata-list-item__list-content-item--link")
+ f"#{lang.text.replace(' ', '_').replace('-', '_')}, "
+ for lang in sop.select('li[data-testid="title-details-languages"]')[
+ 0
+ ].findAll(
+ class_="ipc-metadata-list-item__list-content-item ipc-metadata-list-item__list-content-item--link"
+ )
)
language = language[:-2]
res_str += f"Bahasa: {language}\n"
@@ -488,7 +629,9 @@ async def imdbcb_backup(bot: Client, query: CallbackQuery):
actors = actors[:-2]
res_str += f"Pemeran: {actors}\n\n"
if r_json.get("description"):
- summary = GoogleTranslator("auto", "id").translate(r_json.get("description"))
+ summary = GoogleTranslator("auto", "id").translate(
+ r_json.get("description")
+ )
res_str += f"📜 Plot: {summary}\n\n"
if r_json.get("keywords"):
keywords = r_json["keywords"].split(",")
@@ -499,56 +642,56 @@ async def imdbcb_backup(bot: Client, query: CallbackQuery):
key_ = key_[:-2]
res_str += f"🔥 Kata Kunci: {key_} \n"
if sop.select('li[data-testid="award_information"]'):
- awards = sop.select('li[data-testid="award_information"]')[0].find(class_="ipc-metadata-list-item__list-content-item").text
+ awards = (
+ sop.select('li[data-testid="award_information"]')[0]
+ .find(class_="ipc-metadata-list-item__list-content-item")
+ .text
+ )
res_str += f"🏆 Penghargaan: {GoogleTranslator('auto', 'id').translate(awards)}\n\n"
else:
res_str += "\n"
- res_str += f"©️ IMDb by @{BOT_USERNAME}"
+ res_str += "©️ IMDb by @MissKatyRoBot"
if r_json.get("trailer"):
trailer_url = r_json["trailer"]["url"]
markup = InlineKeyboardMarkup(
[
[
- InlineKeyboardButton("🎬 Open IMDB", url=f"https://www.imdb.com{r_json['url']}"),
+ InlineKeyboardButton(
+ "🎬 Open IMDB", url=f"https://www.imdb.com{r_json['url']}"
+ ),
InlineKeyboardButton("▶️ Trailer", url=trailer_url),
]
]
)
else:
- markup = InlineKeyboardMarkup([[InlineKeyboardButton("🎬 Open IMDB", url=f"https://www.imdb.com{r_json['url']}")]])
+ markup = InlineKeyboardMarkup(
+ [
+ [
+ InlineKeyboardButton(
+ "🎬 Open IMDB", url=f"https://www.imdb.com{r_json['url']}"
+ )
+ ]
+ ]
+ )
if thumb := r_json.get("image"):
try:
- await query.message.reply_photo(
- photo=thumb,
- quote=True,
- caption=res_str,
- reply_to_message_id=usr.id,
- reply_markup=markup,
+ await query.message.edit_media(
+ InputMediaPhoto(thumb, caption=res_str), reply_markup=markup
)
except (MediaEmpty, PhotoInvalidDimensions, WebpageMediaEmpty):
poster = thumb.replace(".jpg", "._V1_UX360.jpg")
- await query.message.reply_photo(
- photo=poster,
- caption=res_str,
- reply_to_message_id=usr.id,
- reply_markup=markup,
+ await query.message.edit_media(
+ InputMediaPhoto(poster, caption=res_str), reply_markup=markup
)
except Exception:
- await query.message.reply(
- res_str,
- reply_markup=markup,
- disable_web_page_preview=False,
- reply_to_message_id=usr.id,
- )
- await query.message.delete()
+ await query.message.edit_caption(res_str, reply_markup=markup)
else:
- await query.message.edit(res_str, reply_markup=markup, disable_web_page_preview=False)
- await query.answer()
+ await query.message.edit_caption(res_str, reply_markup=markup)
except MessageNotModified:
pass
except Exception:
exc = traceback.format_exc()
- await query.message.edit_text(f"ERROR:\n{exc}")
+ await query.message.edit_caption(f"ERROR:\n{exc}")
# IMDB Versi English
@@ -564,14 +707,18 @@ async def imdb_en_search(client, message):
quote=True,
)
r, title = message.text.split(None, 1)
- k = await message.reply("Searching Movie/Series in IMDB Database.. 😴", quote=True)
+ k = await message.reply_photo(
+ "AgACAgIAAxkBAAEDNDdjp-jrxffihNHv2UiQ63B2p3txagACL8QxG0hfQEkmUDEHspsSUgAIAQADAgADeAAHHgQ",
+ caption="Searching Movie/Series in IMDB Database.. 😴",
+ quote=True,
+ )
msg = ""
buttons = InlineKeyboard(row_width=4)
try:
r = await get_content(f"https://yasirapi.eu.org/imdb-search?q={title}")
res = json.loads(r).get("result")
if not res:
- return await k.edit("Sad, No Result.. 😕")
+ return await k.edit_caption("Sad, No Result.. 😕")
msg = f"Found {len(res)} result from {title} ~ {message.from_user.mention}\n\n"
for count, movie in enumerate(res, start=1):
titles = movie.get("l")
@@ -579,31 +726,47 @@ async def imdb_en_search(client, message):
type = movie.get("qid").replace("feature", "movie").capitalize()
movieID = re.findall(r"tt(\d+)", movie.get("id"))[0]
msg += f"{count}. {titles} {year} ~ {type}\n"
- BTN.append(InlineKeyboardButton(text=count, callback_data=f"imdben#{message.from_user.id}#{movieID}"))
+ BTN.append(
+ InlineKeyboardButton(
+ text=count, callback_data=f"imdben#{message.from_user.id}#{movieID}"
+ )
+ )
+ BTN.append(
+ InlineKeyboardButton(
+ text="❌ Close", callback_data=f"close#{message.from_user.id}"
+ )
+ )
buttons.add(*BTN)
- await k.edit(msg, reply_markup=buttons)
+ await k.edit_caption(msg, reply_markup=buttons)
except Exception as err:
- await k.edit(f"Ooppss, failed get movie list from IMDb.\n\nERROR: {err}")
+ await k.edit_caption(
+ f"Ooppss, failed get movie list from IMDb.\n\nERROR: {err}"
+ )
@app.on_callback_query(filters.regex("^imdben"))
@capture_err
async def imdb_en_callback(bot: Client, query: CallbackQuery):
- usr = query.message.reply_to_message
i, userid, movie = query.data.split("#")
if query.from_user.id != int(userid):
return await query.answer("⚠️ Access Denied!", True)
- await query.message.edit_text("⏳ Processing your request..")
+ await query.message.edit_caption("⏳ Processing your request..")
try:
url = f"https://www.imdb.com/title/tt{movie}/"
resp = await get_content(url)
sop = BeautifulSoup(resp, "lxml")
- r_json = json.loads(sop.find("script", attrs={"type": "application/ld+json"}).contents[0])
+ r_json = json.loads(
+ sop.find("script", attrs={"type": "application/ld+json"}).contents[0]
+ )
res_str = ""
type = f"{r_json['@type']}" if r_json.get("@type") else ""
if r_json.get("name"):
try:
- tahun = sop.select('ul[data-testid="hero-title-block__metadata"]')[0].find(class_="sc-8c396aa2-2 itZqyK").text
+ tahun = (
+ sop.select('ul[data-testid="hero-title-block__metadata"]')[0]
+ .find(class_="sc-8c396aa2-2 itZqyK")
+ .text
+ )
except:
tahun = "-"
res_str += f"📹 Title: {r_json['name']} [{tahun}] ({type})\n"
@@ -612,31 +775,62 @@ async def imdb_en_callback(bot: Client, query: CallbackQuery):
else:
res_str += "\n"
if sop.select('li[data-testid="title-techspec_runtime"]'):
- durasi = sop.select('li[data-testid="title-techspec_runtime"]')[0].find(class_="ipc-metadata-list-item__content-container").text
+ durasi = (
+ sop.select('li[data-testid="title-techspec_runtime"]')[0]
+ .find(class_="ipc-metadata-list-item__content-container")
+ .text
+ )
res_str += f"Duration: {durasi}\n"
if r_json.get("contentRating"):
res_str += f"Category: {r_json['contentRating']} \n"
if r_json.get("aggregateRating"):
res_str += f"Rating: {r_json['aggregateRating']['ratingValue']}⭐️ from {r_json['aggregateRating']['ratingCount']} user \n"
if sop.select('li[data-testid="title-details-releasedate"]'):
- rilis = sop.select('li[data-testid="title-details-releasedate"]')[0].find(class_="ipc-metadata-list-item__list-content-item ipc-metadata-list-item__list-content-item--link").text
- rilis_url = sop.select('li[data-testid="title-details-releasedate"]')[0].find(class_="ipc-metadata-list-item__list-content-item ipc-metadata-list-item__list-content-item--link")["href"]
+ rilis = (
+ sop.select('li[data-testid="title-details-releasedate"]')[0]
+ .find(
+ class_="ipc-metadata-list-item__list-content-item ipc-metadata-list-item__list-content-item--link"
+ )
+ .text
+ )
+ rilis_url = sop.select('li[data-testid="title-details-releasedate"]')[
+ 0
+ ].find(
+ class_="ipc-metadata-list-item__list-content-item ipc-metadata-list-item__list-content-item--link"
+ )[
+ "href"
+ ]
res_str += f"Release Data: {rilis}\n"
if r_json.get("genre"):
- genre = "".join(f"{GENRES_EMOJI[i]} #{i.replace('-', '_').replace(' ', '_')}, " if i in GENRES_EMOJI else f"#{i.replace('-', '_').replace(' ', '_')}, " for i in r_json["genre"])
-
+ genre = ""
+ for i in r_json["genre"]:
+ if i in GENRES_EMOJI:
+ genre += (
+ f"{GENRES_EMOJI[i]} #{i.replace('-', '_').replace(' ', '_')}, "
+ )
+ else:
+ genre += f"#{i.replace('-', '_').replace(' ', '_')}, "
genre = genre[:-2]
res_str += f"Genre: {genre}\n"
if sop.select('li[data-testid="title-details-origin"]'):
country = "".join(
f"{demoji(country.text)} #{country.text.replace(' ', '_').replace('-', '_')}, "
- for country in sop.select('li[data-testid="title-details-origin"]')[0].findAll(class_="ipc-metadata-list-item__list-content-item ipc-metadata-list-item__list-content-item--link")
+ for country in sop.select('li[data-testid="title-details-origin"]')[
+ 0
+ ].findAll(
+ class_="ipc-metadata-list-item__list-content-item ipc-metadata-list-item__list-content-item--link"
+ )
)
country = country[:-2]
res_str += f"Country: {country}\n"
if sop.select('li[data-testid="title-details-languages"]'):
language = "".join(
- f"#{lang.text.replace(' ', '_').replace('-', '_')}, " for lang in sop.select('li[data-testid="title-details-languages"]')[0].findAll(class_="ipc-metadata-list-item__list-content-item ipc-metadata-list-item__list-content-item--link")
+ f"#{lang.text.replace(' ', '_').replace('-', '_')}, "
+ for lang in sop.select('li[data-testid="title-details-languages"]')[
+ 0
+ ].findAll(
+ class_="ipc-metadata-list-item__list-content-item ipc-metadata-list-item__list-content-item--link"
+ )
)
language = language[:-2]
res_str += f"Language: {language}\n"
@@ -677,51 +871,51 @@ async def imdb_en_callback(bot: Client, query: CallbackQuery):
key_ = key_[:-2]
res_str += f"🔥 Keywords: {key_} \n"
if sop.select('li[data-testid="award_information"]'):
- awards = sop.select('li[data-testid="award_information"]')[0].find(class_="ipc-metadata-list-item__list-content-item").text
+ awards = (
+ sop.select('li[data-testid="award_information"]')[0]
+ .find(class_="ipc-metadata-list-item__list-content-item")
+ .text
+ )
res_str += f"🏆 Awards: {awards}\n\n"
else:
res_str += "\n"
- res_str += f"©️ IMDb by @{BOT_USERNAME}"
+ res_str += "©️ IMDb by @MissKatyRoBot"
if r_json.get("trailer"):
trailer_url = r_json["trailer"]["url"]
markup = InlineKeyboardMarkup(
[
[
- InlineKeyboardButton("🎬 Open IMDB", url=f"https://www.imdb.com{r_json['url']}"),
+ InlineKeyboardButton(
+ "🎬 Open IMDB", url=f"https://www.imdb.com{r_json['url']}"
+ ),
InlineKeyboardButton("▶️ Trailer", url=trailer_url),
]
]
)
else:
- markup = InlineKeyboardMarkup([[InlineKeyboardButton("🎬 Open IMDB", url=f"https://www.imdb.com{r_json['url']}")]])
+ markup = InlineKeyboardMarkup(
+ [
+ [
+ InlineKeyboardButton(
+ "🎬 Open IMDB", url=f"https://www.imdb.com{r_json['url']}"
+ )
+ ]
+ ]
+ )
if thumb := r_json.get("image"):
try:
- await query.message.reply_photo(
- photo=thumb,
- quote=True,
- caption=res_str,
- reply_to_message_id=usr.id,
- reply_markup=markup,
+ await query.message.edit_media(
+ InputMediaPhoto(thumb, caption=res_str), reply_markup=markup
)
except (MediaEmpty, PhotoInvalidDimensions, WebpageMediaEmpty):
poster = thumb.replace(".jpg", "._V1_UX360.jpg")
- await query.message.reply_photo(
- photo=poster,
- caption=res_str,
- reply_to_message_id=usr.id,
- reply_markup=markup,
+ await query.message.edit_media(
+ InputMediaPhoto(poster, caption=res_str), reply_markup=markup
)
except Exception:
- await query.message.reply(
- res_str,
- reply_markup=markup,
- disable_web_page_preview=False,
- reply_to_message_id=usr.id,
- )
- await query.message.delete()
+ await query.message.edit_caption(res_str, reply_markup=markup)
else:
- await query.message.edit(res_str, reply_markup=markup, disable_web_page_preview=False)
- await query.answer()
+ await query.message.edit_caption(res_str, reply_markup=markup)
except Exception:
exc = traceback.format_exc()
- await query.message.edit_text(f"ERROR:\n{exc}")
+ await query.message.edit_caption(f"ERROR:\n{exc}")
diff --git a/misskaty/plugins/scrapwebsite.py b/misskaty/plugins/scrapwebsite.py
index 205cab70..68fd3b2a 100644
--- a/misskaty/plugins/scrapwebsite.py
+++ b/misskaty/plugins/scrapwebsite.py
@@ -1,32 +1,24 @@
-"""
- * @author yasir
- * @date 2022-12-01 09:12:27
- * @lastModified 2022-12-01 09:32:31
- * @projectName MissKatyPyro
- * Copyright @YasirPedia All rights reserved
-"""
-
# This plugin to scrape from melongmovie, and lk21
from bs4 import BeautifulSoup
+import aiohttp
import re
+import requests
import traceback
-from misskaty import app, BOT_USERNAME
+from misskaty import app
from pyrogram import filters
from pyrogram.errors import MessageTooLong
-from misskaty.vars import COMMAND_HANDLER
+from info import COMMAND_HANDLER
from misskaty.core.decorator.errors import capture_err
from misskaty.helper.tools import rentry
-from misskaty.helper.http import http
__MODULE__ = "WebScraper"
__HELP__ = """
/melongmovie - Scrape website data from MelongMovie Web. If without query will give latest movie list.
-/lk21 [query ] - Scrape website data from LayarKaca21. If without query will give latest movie list.
-/pahe [query ] - Scrape website data from Pahe.li. If without query will give latest post list.
-/terbit21 [query ] - Scrape website data from Terbit21. If without query will give latest movie list.
-/savefilm21 [query ] - Scrape website data from Savefilm21. If without query will give latest movie list.
-/movieku [query ] - Scrape website data from Movieku.cc
-/gomov [query ] - Scrape website data from GoMov. If without query will give latest movie list.
+/lk21 [query ] - Scrape website data from LayarKaca21. If without query will give latest movie list.
+/terbit21 [query ] - Scrape website data from Terbit21. If without query will give latest movie list.
+/savefilm21 [query ] - Scrape website data from Savefilm21. If without query will give latest movie list.
+/movieku [query ] - Scrape website data from Movieku.cc
+/gomov [query ] - Scrape website data from GoMov. If without query will give latest movie list.
"""
@@ -41,8 +33,15 @@ async def nodrakor(_, message):
msg = await message.reply("Sedang proses scrap, mohon tunggu..")
try:
- headers = {"User-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"}
- html = await http.get(f"https://109.234.34.246/?s={judul}", headers=headers)
+ headers = {
+ "User-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"
+ }
+ html = requests.get(
+ f"https://109.234.34.246/?s={judul}",
+ headers=headers,
+ allow_redirects=False,
+ verify=False,
+ )
soup = BeautifulSoup(html.text, "lxml")
res = soup.find_all(class_="content-thumbnail text-center")
data = []
@@ -53,7 +52,9 @@ async def nodrakor(_, message):
if not data:
return await msg.edit("Oops, data film tidak ditemukan.")
res = "".join(f"{i['judul']}\n{i['link']}\n\n" for i in data)
- await msg.edit(f"Hasil Pencarian di Nodrakor:\n{res}\nScraped by @{BOT_USERNAME}")
+ await msg.edit(
+ f"Hasil Pencarian di Nodrakor:\n{res}\nScraped by @MissKatyRoBot"
+ )
except Exception as e:
await msg.edit(f"ERROR: {str(e)}")
@@ -62,15 +63,18 @@ async def nodrakor(_, message):
@app.on_message(filters.command(["ngefilm21"], COMMAND_HANDLER))
@capture_err
async def ngefilm21(_, message):
- if len(message.command) == 1:
- return await message.reply("Masukkan query yang akan dicari..!!")
- title = message.text.split(" ", maxsplit=1)[1]
+ try:
+ title = message.text.split(" ", maxsplit=1)[1]
+ except IndexError:
+ title = ""
msg = await message.reply("Sedang proses scrap, mohon tunggu..")
try:
- headers = {"User-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"}
+ headers = {
+ "User-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"
+ }
- html = await http.get(f"http://185.237.253.209/search?q={title}", headers=headers)
+ html = requests.get(f"http://185.237.253.209/search?q={title}", headers=headers)
soup = BeautifulSoup(html.text, "lxml")
res = soup.find_all("h2")
data = []
@@ -88,21 +92,30 @@ async def ngefilm21(_, message):
await msg.edit(f"ERROR: {str(e)}")
-# Scrape Web From Movieku.CC
@app.on_message(filters.command(["movieku"], COMMAND_HANDLER))
@capture_err
async def movikucc(_, message):
- if len(message.command) == 1:
- return await message.reply("Masukkan query yang akan dicari..!!")
- judul = message.text.split(" ", maxsplit=1)[1]
+ try:
+ judul = message.text.split(" ", maxsplit=1)[1]
+ except IndexError:
+ judul = ""
+
msg = await message.reply("Sedang proses scrap, mohon tunggu..")
try:
- headers = {"User-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"}
- html = await http.get(f"https://107.152.39.187/?s={judul}", headers=headers)
+ headers = {
+ "User-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"
+ }
+
+ html = requests.get(f"https://107.152.39.187/?s={judul}", headers=headers)
soup = BeautifulSoup(html.text, "lxml")
data = soup.find_all(class_="bx")
- res = "".join(f"Judul: {i.find_all('a')[0]['title']}\nLink: {i.find_all('a')[0]['href']}\n\n" for i in data)
- await msg.edit(f"Hasil Scrap di Movieku.cc:\n{res} ⚠️ Gunakan command /movieku_scrap [link] untuk mengambil link download (hanya untuk movie).")
+ res = "".join(
+ f"Judul: {i.find_all('a')[0]['title']}\nLink: {i.find_all('a')[0]['href']}\n\n"
+ for i in data
+ )
+ await msg.edit(
+ f"Hasil Scrap di Movieku.cc:\n{res} ⚠️ Gunakan command /movieku_scrap [link] untuk mengambil link download (hanya untuk movie)."
+ )
except Exception as e:
await msg.edit(f"ERROR: {str(e)}")
@@ -114,11 +127,16 @@ async def savefilm21(_, message):
judul = message.text.split(" ", maxsplit=1)[1]
except IndexError:
judul = ""
+
msg = await message.reply("Sedang proses scrap, mohon tunggu..")
try:
- headers = {"User-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"}
+ headers = {
+ "User-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"
+ }
- html = await http.get(f"http://185.99.135.215/?s={judul}", headers=headers, follow_redirects=False)
+ html = requests.get(
+ f"http://38.242.196.210/?s={judul}", headers=headers, allow_redirects=False
+ )
soup = BeautifulSoup(html.text, "lxml")
res = soup.find_all(class_="entry-title")
data = []
@@ -129,8 +147,12 @@ async def savefilm21(_, message):
data.append({"judul": judul, "link": link})
if not data:
return await msg.edit("Oops, data film tidak ditemukan")
- res = "".join(f"Judul: {i['judul']}\nLink: {i['link']}\n\n" for i in data)
- await msg.edit(f"Hasil Scrap {judul} dari Savefilm21:\n{res}\n\n⚠️ Gunakan /savefilm21_scrap [link] untuk mengambil link downloadnya.")
+ res = "".join(
+ f"Judul: {i['judul']}\nLink: {i['link']}\n\n" for i in data
+ )
+ await msg.edit(
+ f"Hasil Scrap {judul} dari Savefilm21:\n{res}\n\n⚠️ Gunakan /savefilm21_scrap [link] untuk mengambil link downloadnya."
+ )
except Exception as e:
await msg.edit(f"ERROR: {str(e)}")
@@ -145,9 +167,11 @@ async def melongmovie(_, message):
msg = await message.reply("Sedang proses scrap, mohon tunggu..")
try:
- headers = {"User-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"}
+ headers = {
+ "User-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"
+ }
- html = await http.get(f"http://167.99.31.48/?s={judul}", headers=headers)
+ html = requests.get(f"http://167.99.31.48/?s={judul}", headers=headers)
soup = BeautifulSoup(html.text, "lxml")
data = []
for res in soup.select(".box"):
@@ -161,100 +185,107 @@ async def melongmovie(_, message):
data.append({"judul": title, "link": url, "kualitas": kualitas})
if not data:
return await msg.edit("Oops, data film tidak ditemukan di melongmovie")
- res = "".join(f"Judul: {i['judul']}\nKualitas: {i['kualitas']}\nLink: {i['link']}\n\n" for i in data)
+ res = "".join(
+ f"Judul: {i['judul']}\nKualitas: {i['kualitas']}\nLink: {i['link']}\n\n"
+ for i in data
+ )
# return await message.reply(json.dumps(data, indent=2, ensure_ascii=False))
return await msg.edit(res)
except Exception as e:
await msg.edit(f"ERROR: {str(e)}")
-@app.on_message(filters.command(["pahe"], COMMAND_HANDLER))
-@capture_err
-async def pahe_scrap(_, message):
- judul = message.text.split(" ", maxsplit=1)[1] if len(message.command) > 1 else ""
- pesan = await message.reply("Please wait, scraping data..")
- r = await http.get(f"https://yasirapi.eu.org/pahe?q={judul}")
- res = r.json()
- if not res["result"]:
- return await pesan.edit("Yahh, no result found.")
- data = "".join(f"**{count}. {i['judul']}**\n{i['link']}\n\n" for count, i in enumerate(res["result"], start=1))
- try:
- await pesan.edit(
- f"**Daftar rilis movie terbaru di web Pahe**:\n{data}",
- disable_web_page_preview=True,
- )
- except MessageTooLong:
- msg = await rentry(data)
- await pesan.edit(f"Karena hasil scrape terlalu panjang, maka hasil scrape di post ke rentry.\n\n{msg}")
-
-
@app.on_message(filters.command(["terbit21"], COMMAND_HANDLER))
@capture_err
async def terbit21_scrap(_, message):
if len(message.command) == 1:
- r = await http.get("https://yasirapi.eu.org/terbit21")
- res = r.json()
- data = "".join(f"**Judul: {i['judul']}**\n`{i['kategori']}`\n{i['link']}\n**Download:** [Klik Disini]({i['dl']})\n\n" for i in res["result"])
+ async with aiohttp.ClientSession() as session:
+ r = await session.get(f"https://yasirapi.eu.org/terbit21")
+ res = await r.json()
+ data = "".join(
+ f"**Judul: {i['judul']}**\n`{i['kategori']}`\n{i['link']}\n**Download:** [Klik Disini]({i['dl']})\n\n"
+ for i in res["result"]
+ )
+ try:
+ return await message.reply(
+ f"**Daftar rilis movie terbaru di web Terbit21**:\n{data}",
+ disable_web_page_preview=True,
+ )
+ except MessageTooLong:
+ msg = await rentry(data)
+ return await message.reply(
+ f"Karena hasil scrape terlalu panjang, maka hasil scrape di post ke rentry.\n\n{msg}"
+ )
+ judul = message.text.split(" ", maxsplit=1)[1]
+ msg = await message.reply(f"Mencari film di Terbit21 dg keyword {judul}..")
+ async with aiohttp.ClientSession() as session:
+ r = await session.get(f"https://yasirapi.eu.org/terbit21?q={judul}")
+ res = await r.json()
+ data = "".join(
+ f"**Judul: {i['judul']}**\n`{i['kategori']}`\n{i['link']}\n**Download:** [Klik Disini]({i['dl']})\n\n"
+ for i in res["result"]
+ )
+ if not res["result"]:
+ return await msg.edit("Yahh, ga ada hasil ditemukan")
try:
- return await message.reply(
- f"**Daftar rilis movie terbaru di web Terbit21**:\n{data}",
+ await msg.edit(
+ f"Hasil pencarian query {judul} di lk21:\n{data}",
disable_web_page_preview=True,
)
except MessageTooLong:
- msg = await rentry(data)
- return await message.reply(f"Karena hasil scrape terlalu panjang, maka hasil scrape di post ke rentry.\n\n{msg}")
- judul = message.text.split(" ", maxsplit=1)[1]
- msg = await message.reply(f"Mencari film di Terbit21 dg keyword {judul}..")
- r = await http.get(f"https://yasirapi.eu.org/terbit21?q={judul}")
- res = r.json()
- data = "".join(f"**Judul: {i['judul']}**\n`{i['kategori']}`\n{i['link']}\n**Download:** [Klik Disini]({i['dl']})\n\n" for i in res["result"])
- if not res["result"]:
- return await msg.edit("Yahh, ga ada hasil ditemukan")
- try:
- await msg.edit(
- f"Hasil pencarian query {judul} di lk21:\n{data}",
- disable_web_page_preview=True,
- )
- except MessageTooLong:
- pesan = await rentry(data)
- await msg.edit(f"Karena hasil scrape terlalu panjang, maka hasil scrape di post ke rentry.\n\n{pesan}")
+ pesan = await rentry(data)
+ await msg.edit(
+ f"Karena hasil scrape terlalu panjang, maka hasil scrape di post ke rentry.\n\n{pesan}"
+ )
@app.on_message(filters.command(["lk21"], COMMAND_HANDLER))
@capture_err
async def lk21_scrap(_, message):
if len(message.command) == 1:
- msg = await message.reply("Mendapatkan daftar post film terbaru di lk21")
- r = await http.get("https://yasirapi.eu.org/lk21")
- res = r.json()
+ msg = await message.reply(f"Mendapatkan daftar post film terbaru di lk21")
+ async with aiohttp.ClientSession() as session:
+ r = await session.get(f"https://yasirapi.eu.org/lk21")
+ res = await r.json()
+ if res.get("detail", None):
+ return await msg.edit(f"ERROR: {res['detail']}")
+ data = "".join(
+ f"**Judul: {i['judul']}**\n`{i['kategori']}`\n{i['link']}\n**Download:** [Klik Disini]({i['dl']})\n\n"
+ for i in res["result"]
+ )
+ try:
+ return await msg.edit(
+ f"**Daftar rilis movie terbaru di web LK21**:\n{data}",
+ disable_web_page_preview=True,
+ )
+ except MessageTooLong:
+ msg = await rentry(data)
+ await msg.edit(
+ f"Karena hasil scrape terlalu panjang, maka hasil scrape di post ke rentry.\n\n{msg}"
+ )
+ judul = message.text.split(" ", maxsplit=1)[1]
+ msg = await message.reply(f"Mencari film di lk21 dg keyword {judul}..")
+ async with aiohttp.ClientSession() as session:
+ r = await session.get(f"https://yasirapi.eu.org/lk21?q={judul}")
+ res = await r.json()
if res.get("detail", None):
return await msg.edit(f"ERROR: {res['detail']}")
- data = "".join(f"**Judul: {i['judul']}**\n`{i['kategori']}`\n{i['link']}\n**Download:** [Klik Disini]({i['dl']})\n\n" for i in res["result"])
+ data = "".join(
+ f"**Judul: {i['judul']}**\n`{i['kategori']}`\n{i['link']}\n**Download:** [Klik Disini]({i['dl']})\n\n"
+ for i in res["result"]
+ )
+ if not res["result"]:
+ return await msg.edit("Yahh, ga ada hasil ditemukan")
try:
- return await msg.edit(
- f"**Daftar rilis movie terbaru di web LK21**:\n{data}",
+ await msg.edit(
+ f"Hasil pencarian query {judul} di lk21:\n{data}",
disable_web_page_preview=True,
)
except MessageTooLong:
- msg = await rentry(data)
- await msg.edit(f"Karena hasil scrape terlalu panjang, maka hasil scrape di post ke rentry.\n\n{msg}")
- judul = message.text.split(" ", maxsplit=1)[1]
- msg = await message.reply(f"Mencari film di lk21 dg keyword {judul}..")
- r = await http.get(f"https://yasirapi.eu.org/lk21?q={judul}")
- res = r.json()
- if res.get("detail", None):
- return await msg.edit(f"ERROR: {res['detail']}")
- data = "".join(f"**Judul: {i['judul']}**\n`{i['kategori']}`\n{i['link']}\n**Download:** [Klik Disini]({i['dl']})\n\n" for i in res["result"])
- if not res["result"]:
- return await msg.edit("Yahh, ga ada hasil ditemukan")
- try:
- await msg.edit(
- f"Hasil pencarian query {judul} di lk21:\n{data}",
- disable_web_page_preview=True,
- )
- except MessageTooLong:
- pesan = await rentry(data)
- return await msg.edit(f"Karena hasil scrape terlalu panjang, maka hasil scrape di post ke rentry.\n\n{pesan}")
+ pesan = await rentry(data)
+ return await msg.edit(
+ f"Karena hasil scrape terlalu panjang, maka hasil scrape di post ke rentry.\n\n{pesan}"
+ )
@app.on_message(filters.command(["gomov"], COMMAND_HANDLER))
@@ -265,22 +296,30 @@ async def gomov_scrap(_, message):
except IndexError:
judul = ""
- msg = await message.reply("Scraping GoMov Website..")
+ msg = await message.reply(f"Scraping GoMov Website..")
try:
- headers = {"User-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"}
+ headers = {
+ "User-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"
+ }
- html = await http.get(f"https://185.173.38.216/?s={judul}", headers=headers)
+ html = requests.get(f"https://185.173.38.216/?s={judul}", headers=headers)
soup = BeautifulSoup(html.text, "lxml")
- entry = soup.find_all(class_="entry-title")
+ entry = soup.find_all(class_="entry-header")
DATA = []
for i in entry:
- judul = i.find_all("a")[0].text
- link = i.find_all("a")[0]["href"]
- DATA.append({"judul": judul, "link": link})
+ genre = i.find(class_="gmr-movie-on").text
+ judul = i.find(class_="entry-title").find("a").text
+ link = i.find(class_="entry-title").find("a").get("href")
+ DATA.append({"judul": judul, "link": link, "genre": genre})
if not DATA:
return await msg.edit("Oops, data film tidak ditemukan di GoMov")
- res = "".join(f"Judul: {i['judul']}\n{i['link']}\n\n" for i in DATA)
- await msg.edit(f"Hasil Pencarian di website GoMov:\n{res}\nScraped by @{BOT_USERNAME}")
+ res = "".join(
+ f"{num}. {i['judul']}\n{i['genre']}\n{i['link']}\n\n"
+ for num, i in enumerate(DATA, start=1)
+ )
+ await msg.edit(
+ f"Hasil Pencarian di website GoMov:\n{res}\nScraped by @MissKatyRoBot"
+ )
except Exception:
exc = traceback.format_exc()
await msg.edit(f"ERROR: {exc}")
@@ -291,15 +330,19 @@ async def gomov_scrap(_, message):
async def savefilm21_scrap(_, message):
try:
link = message.text.split(" ", maxsplit=1)[1]
- headers = {"User-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"}
+ headers = {
+ "User-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"
+ }
- html = await http.get(link, headers=headers, follow_redirects=False)
+ html = requests.get(link, headers=headers, allow_redirects=False)
soup = BeautifulSoup(html.text, "lxml")
res = soup.find_all(class_="button button-shadow")
res = "".join(f"{i.text}\n{i['href']}\n\n" for i in res)
await message.reply(f"Hasil Scrap dari {link}:\n\n{res}")
except IndexError:
- return await message.reply("Gunakan command /savefilm21_scrap [link] untuk scrap link download")
+ return await message.reply(
+ "Gunakan command /savefilm21_scrap [link] untuk scrap link download"
+ )
except Exception as e:
await message.reply(f"ERROR: {str(e)}")
@@ -309,27 +352,32 @@ async def savefilm21_scrap(_, message):
async def nodrakor_scrap(_, message):
try:
link = message.text.split(" ", maxsplit=1)[1]
- headers = {"User-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"}
+ headers = {
+ "User-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"
+ }
- html = await http.get(link, headers=headers, follow_redirects=False)
+ html = requests.get(link, headers=headers, allow_redirects=False, verify=False)
soup = BeautifulSoup(html.text, "lxml")
hasil = soup.find_all(class_="gmr-download-wrap clearfix")[0]
await message.reply(f"Hasil Scrap dari {link}:\n{hasil}")
except IndexError:
- return await message.reply("Gunakan command /nodrakor_scrap [link] untuk scrap link download")
+ return await message.reply(
+ "Gunakan command /nodrakor_scrap [link] untuk scrap link download"
+ )
except Exception as e:
await message.reply(f"ERROR: {str(e)}")
-# Scrape Link Download Movieku.CC
@app.on_message(filters.command(["movieku_scrap"], COMMAND_HANDLER))
@capture_err
async def muviku_scrap(_, message):
try:
link = message.text.split(" ", maxsplit=1)[1]
- headers = {"User-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"}
+ headers = {
+ "User-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"
+ }
- html = await http.get(link, headers=headers)
+ html = requests.get(link, headers=headers)
soup = BeautifulSoup(html.text, "lxml")
res = soup.find_all(class_="smokeurl")
data = []
@@ -344,19 +392,26 @@ async def muviku_scrap(_, message):
res = "".join(f"Host: {i['kualitas']}\n{i['link']}\n\n" for i in data)
await message.reply(res)
except IndexError:
- return await message.reply("Gunakan command /movieku_scrap [link] untuk scrap link download")
+ return await message.reply(
+ "Gunakan command /movieku_scrap [link] untuk scrap link download"
+ )
except Exception as e:
await message.reply(f"ERROR: {str(e)}")
-@app.on_message(filters.command(["melong"], COMMAND_HANDLER) & filters.user([617426792, 1985689491, 1172699512, 2024984460]))
+@app.on_message(
+ filters.command(["melong"], COMMAND_HANDLER)
+ & filters.user([617426792, 1985689491, 1172699512, 2024984460])
+)
@capture_err
async def melong_scrap(_, message):
try:
link = message.text.split(" ", maxsplit=1)[1]
- headers = {"User-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"}
+ headers = {
+ "User-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"
+ }
- html = await http.get(link, headers=headers)
+ html = requests.get(link, headers=headers)
soup = BeautifulSoup(html.text, "lxml")
for ep in soup.findAll(text=re.compile(r"(?i)episode\s+\d+|LINK DOWNLOAD")):
hardsub = ep.findPrevious("div")
@@ -364,4 +419,30 @@ async def melong_scrap(_, message):
rep = f"{hardsub}\n{softsub}"
await message.reply(rep)
except IndexError:
- await message.reply("Gunakan command /melong [link] untuk scrap link download")
+ await message.reply(
+ "Gunakan command /melong [link] untuk scrap link download"
+ )
+
+
+@app.on_message(filters.command(["gomov_scrap"], COMMAND_HANDLER))
+@capture_err
+async def gomov_dl(_, message):
+ try:
+ link = message.text.split(" ", maxsplit=1)[1]
+ headers = {
+ "User-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"
+ }
+
+ html = requests.get(link, headers=headers, verify=False)
+ soup = BeautifulSoup(html.text, "lxml")
+ entry = soup.find(class_="gmr-download-wrap clearfix")
+ hasil = soup.find(class_="title-download").text
+ for i in entry.find(class_="list-inline gmr-download-list clearfix"):
+ title = i.find("a").text
+ link = i.find("a")["href"]
+ hasil += f"\n{title}\n{link}\n"
+ await message.reply(hasil)
+ except IndexError:
+ await message.reply(
+ "Gunakan command /melong [link] untuk scrap link download"
+ )