diff --git a/misskaty/plugins/scrapwebsite.py b/misskaty/plugins/scrapwebsite.py
index d24fd7b6..cff86d06 100644
--- a/misskaty/plugins/scrapwebsite.py
+++ b/misskaty/plugins/scrapwebsite.py
@@ -387,260 +387,4 @@ async def savefilm21(_, msg):
except Exception as e:
await m.delete()
LOGGER.error(e)
- await msg.reply(f"ERROR: {e}", True)
-
-
-@app.on_message(filters.command(["melongmovie"], COMMAND_HANDLER))
-@capture_err
-async def melongmovie(_, msg):
- SITE = "http://167.99.31.48"
- try:
- judul = msg.text.split(" ", 1)[1]
- except IndexError:
- judul = None
- data = []
- m = await msg.reply("**__ā³ Please wait, scraping data ...__**", True)
- if judul is not None:
- try:
- html = await http.get(f"{SITE}/?s={judul}", headers=headers)
- bs4 = BeautifulSoup(html.text, "lxml")
- for res in bs4.select(".box"):
- dd = res.select("a")
- url = dd[0]["href"]
- title = dd[0]["title"]
- try:
- quality = dd[0].find(class_="quality").text
- except:
- quality = "N/A"
- data.append({"judul": title, "link": url, "quality": quality})
- if not data:
- await m.delete()
- return await msg.reply("404 Result not FOUND!", True)
- await m.delete()
- head = f"#MelongMovie Results For: {judul}\n\n"
- msgs = ""
- for c, i in enumerate(data, start=1):
- msgs += f"{c}. {i['judul']}\nQuality: {i['quality']}\nExtract: /{msg.command[0]}_scrap {i['link']}\n\n"
- if len(head.encode("utf-8") + msgs.encode("utf-8")) >= 4000:
- await msg.reply(
- head + msgs,
- True,
- disable_web_page_preview=True,
- reply_markup=InlineKeyboardMarkup(
- [
- [
- InlineKeyboardButton(
- text="ā Close",
- callback_data=f"close#{msg.from_user.id}",
- )
- ]
- ]
- ),
- )
- await asyncio.sleep(2)
- msgs = ""
- if msgs != "":
- await msg.reply(
- head + msgs,
- True,
- disable_web_page_preview=True,
- reply_markup=InlineKeyboardMarkup(
- [
- [
- InlineKeyboardButton(
- text="ā Close",
- callback_data=f"close#{msg.from_user.id}",
- )
- ]
- ]
- ),
- )
- except Exception as e:
- await m.delete()
- LOGGER.error(e)
- await msg.reply(str(e), True)
- else:
- try:
- html = await http.get(SITE, headers=headers)
- bs4 = BeautifulSoup(html.text, "lxml")
- for res in bs4.select(".box"):
- dd = res.select("a")
- url = dd[0]["href"]
- title = dd[0]["title"]
- try:
- quality = dd[0].find(class_="quality").text
- except:
- quality = "N/A"
- data.append({"judul": title, "link": url, "quality": quality})
- if not data:
- await m.delete()
- return await msg.reply("404 Result not FOUND!", True)
- await m.delete()
- head = f"#MelongMovie Latest:\nš Use /{msg.command[0]} [title] to start search with title.\n\n"
- msgs = ""
- for c, i in enumerate(data, start=1):
- msgs += f"{c}. {i['judul']}\nQuality: {i['quality']}\nExtract: /{msg.command[0]}_scrap {i['link']}\n\n"
- if len(head.encode("utf-8") + msgs.encode("utf-8")) >= 4000:
- await msg.reply(
- head + msgs,
- True,
- disable_web_page_preview=True,
- reply_markup=InlineKeyboardMarkup(
- [
- [
- InlineKeyboardButton(
- text="ā Close",
- callback_data=f"close#{msg.from_user.id}",
- )
- ]
- ]
- ),
- )
- await asyncio.sleep(2)
- msgs = ""
- if msgs != "":
- await msg.reply(
- head + msgs,
- True,
- disable_web_page_preview=True,
- reply_markup=InlineKeyboardMarkup(
- [
- [
- InlineKeyboardButton(
- text="ā Close",
- callback_data=f"close#{msg.from_user.id}",
- )
- ]
- ]
- ),
- )
- except Exception as e:
- await m.delete()
- LOGGER.error(e)
- await msg.reply(str(e), True)
-
-
-@app.on_message(filters.command(["savefilm21_scrap"], COMMAND_HANDLER))
-@capture_err
-async def savefilm21_scrap(_, message):
- try:
- link = message.text.split(" ", maxsplit=1)[1]
- headers = {"User-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"}
-
- html = await http.get(link, headers=headers)
- soup = BeautifulSoup(html.text, "lxml")
- res = soup.find_all(class_="button button-shadow")
- res = "".join(f"{i.text}\n{i['href']}\n\n" for i in res)
- await message.reply(
- f"Hasil Scrap dari {link}:\n\n{res}",
- disable_web_page_preview=True,
- reply_markup=InlineKeyboardMarkup(
- [
- [
- InlineKeyboardButton(
- text="ā Close",
- callback_data=f"close#{message.from_user.id}",
- )
- ]
- ]
- ),
- )
- except IndexError:
- return await message.reply(f"Gunakan command /{message.command[0]} [link] untuk scrap link download")
- except Exception as e:
- await message.reply(f"ERROR: {str(e)}")
-
-
-@app.on_message(filters.command(["nodrakor_scrap"], COMMAND_HANDLER))
-@capture_err
-async def nodrakor_scrap(_, message):
- try:
- link = message.text.split(" ", maxsplit=1)[1]
- headers = {"User-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"}
-
- html = await http.get(link, headers=headers)
- soup = BeautifulSoup(html.text, "lxml")
- hasil = soup.find_all(class_="gmr-download-wrap clearfix")[0]
- await message.reply(f"Hasil Scrap dari {link}:\n{hasil}")
- except IndexError:
- return await message.reply(f"Gunakan command /{message.command[0]} [link] untuk scrap link download")
- except Exception as e:
- await message.reply(f"ERROR: {str(e)}")
-
-
-# Scrape Link Download Movieku.CC
-@app.on_message(filters.command(["movieku_scrap"], COMMAND_HANDLER))
-@capture_err
-async def muviku_scrap(_, message):
- try:
- link = message.text.split(" ", maxsplit=1)[1]
- headers = {"User-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"}
-
- html = await http.get(link, headers=headers)
- soup = BeautifulSoup(html.text, "lxml")
- res = soup.find_all(class_="smokeurl")
- data = []
- for i in res:
- for b in range(len(i.find_all("a"))):
- link = i.find_all("a")[b]["href"]
- kualitas = i.find_all("a")[b].text
- # print(f"{kualitas}\n{link
- data.append({"link": link, "kualitas": kualitas})
- if not data:
- return await message.reply("Oops, data film tidak ditemukan.")
- res = "".join(f"Host: {i['kualitas']}\n{i['link']}\n\n" for i in data)
- await message.reply(res)
- except IndexError:
- return await message.reply(f"Gunakan command /{message.command[0]} [link] untuk scrap link download")
- except Exception as e:
- await message.reply(f"ERROR: {str(e)}")
-
-
-@app.on_message(filters.command(["melongmovie_scrap"], COMMAND_HANDLER))
-@capture_err
-async def melong_scrap(_, message):
- try:
- link = message.text.split(" ", maxsplit=1)[1]
- headers = {"User-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"}
-
- html = await http.get(link, headers=headers)
- soup = BeautifulSoup(html.text, "lxml")
- for ep in soup.findAll(text=re.compile(r"(?i)episode\s+\d+|LINK DOWNLOAD")):
- hardsub = ep.findPrevious("div")
- softsub = ep.findNext("div")
- rep = f"{hardsub}\n{softsub}"
- await message.reply(rep)
- except IndexError:
- await message.reply(f"Gunakan command /{message.command[0]} [link] untuk scrap link download")
-
-
-@app.on_message(filters.command(["gomov_scrap", "zonafilm_scrap"], COMMAND_HANDLER))
-@capture_err
-async def gomov_zonafilm_dl(_, message):
- try:
- link = message.text.split(" ", maxsplit=1)[1]
- headers = {"User-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"}
-
- html = await http.get(link, headers=headers)
- soup = BeautifulSoup(html.text, "lxml")
- entry = soup.find(class_="gmr-download-wrap clearfix")
- hasil = soup.find(class_="title-download").text
- for i in entry.find(class_="list-inline gmr-download-list clearfix"):
- title = i.find("a").text
- link = i.find("a")["href"]
- hasil += f"\n{title}\n{link}\n"
- await message.reply(
- hasil,
- reply_markup=InlineKeyboardMarkup(
- [
- [
- InlineKeyboardButton(
- text="ā Close",
- callback_data=f"close#{message.from_user.id}",
- )
- ]
- ]
- ),
- )
- except IndexError:
- await message.reply(f"Gunakan command /{message.command[0]} [link] untuk scrap link download")
+ await msg.reply(f"ERROR: {e}", True)
\ No newline at end of file