diff --git a/misskaty/plugins/filter_request.py b/misskaty/plugins/filter_request.py
index 4b27384d..09be59c3 100644
--- a/misskaty/plugins/filter_request.py
+++ b/misskaty/plugins/filter_request.py
@@ -117,6 +117,7 @@ async def clear_reqdict():
try:
os.rmdir("downloads")
os.remove("MissKatyLogs.txt")
+ open("MissKatyLogs.txt", "w")
except:
pass
diff --git a/misskaty/plugins/imdb_search.py b/misskaty/plugins/imdb_search.py
index 8614eab0..fdc01b09 100644
--- a/misskaty/plugins/imdb_search.py
+++ b/misskaty/plugins/imdb_search.py
@@ -4,6 +4,8 @@ import re
from bs4 import BeautifulSoup
from urllib.parse import quote_plus
+
+import soupsieve
from utils import demoji
from deep_translator import GoogleTranslator
from pykeyboard import InlineButton, InlineKeyboard
@@ -297,12 +299,8 @@ async def imdb_id_callback(_, query):
ott = await search_jw(r_json.get("name"), "ID")
typee = r_json.get("@type", "")
res_str = ""
- if judul := r_json.get("name"):
- try:
- tahun = sop.select('ul[data-testid="hero-title-block__metadata"]')[0].find("span").text
- except:
- tahun = "N/A"
- res_str += f"📹 Judul: {judul} [{tahun}] ({typee})\n"
+ tahun = re.findall("\d{4}", sop.title.text) if re.findall("\d{4}", sop.title.text) else "N/A"
+ res_str += f"📹 Judul: {r_json.get('name')} [{tahun}] ({typee})\n"
if aka := r_json.get("alternateName"):
res_str += f"📢 AKA: {aka}\n\n"
else:
@@ -418,12 +416,8 @@ async def imdb_en_callback(bot, query):
ott = await search_jw(r_json.get("name"), "US")
typee = r_json.get("@type", "")
res_str = ""
- if judul := r_json.get("name"):
- try:
- tahun = sop.select('ul[data-testid="hero-title-block__metadata"]')[0].find("span").text
- except:
- tahun = "N/A"
- res_str += f"📹 Judul: {judul} [{tahun}] ({typee})\n"
+ tahun = re.findall("\d{4}", sop.title.text) if re.findall("\d{4}", sop.title.text) else "N/A"
+ res_str += f"📹 Judul: {r_json.get('name')} [{tahun}] ({typee})\n"
if aka := r_json.get("alternateName"):
res_str += f"📢 AKA: {aka}\n\n"
else:
diff --git a/misskaty/plugins/web_scraper.py b/misskaty/plugins/web_scraper.py
index 44d51cc4..beff8516 100644
--- a/misskaty/plugins/web_scraper.py
+++ b/misskaty/plugins/web_scraper.py
@@ -133,7 +133,7 @@ async def getDataKuso(msg, kueri, CurrentPage, user):
if not SCRAP_DICT.get(msg.id):
kusodata = []
data = await http.get(f"https://kusonime.com/?s={kueri}", headers=headers)
- res = BeautifulSoup(data.text, "lxml").find_all("h2", {"class": "episodeye"})
+ res = BeautifulSoup(data, "lxml").find_all("h2", {"class": "episodeye"})
for i in res:
ress = i.find_all("a")[0]
title = ress.text
@@ -168,7 +168,7 @@ async def getDataMovieku(msg, kueri, CurrentPage):
if not SCRAP_DICT.get(msg.id):
moviekudata = []
data = await http.get(f"https://107.152.37.223/?s={kueri}", headers=headers)
- r = BeautifulSoup(data.text, "lxml")
+ r = BeautifulSoup(data, "lxml")
res = r.find_all(class_="bx")
for i in res:
judul = i.find_all("a")[0]["title"]
@@ -199,7 +199,7 @@ async def getDataSavefilm21(msg, kueri, CurrentPage, user):
if not SCRAP_DICT.get(msg.id):
sfdata = []
data = await http.get(f"https://45.136.197.138/?s={kueri}", headers=headers)
- text = BeautifulSoup(data.text, "lxml")
+ text = BeautifulSoup(data, "lxml")
entry = text.find_all(class_="entry-header")
if "Tidak Ditemukan" in entry[0].text:
if not kueri:
@@ -233,7 +233,7 @@ async def getDataSavefilm21(msg, kueri, CurrentPage, user):
async def getDataLendrive(msg, kueri, CurrentPage, user):
if not SCRAP_DICT.get(msg.id):
data = await http.get(f"https://lendrive.web.id/?s={kueri}", headers=headers)
- soup = BeautifulSoup(data.text, "lxml")
+ soup = BeautifulSoup(data, "lxml")
lenddata = []
for o in soup.find_all(class_="bsx"):
title = o.find("a")["title"]
@@ -265,7 +265,7 @@ async def getDataLendrive(msg, kueri, CurrentPage, user):
async def getDataMelong(msg, kueri, CurrentPage, user):
if not SCRAP_DICT.get(msg.id):
data = await http.get(f"http://167.99.31.48/?s={kueri}", headers=headers)
- bs4 = BeautifulSoup(data.text, "lxml")
+ bs4 = BeautifulSoup(data, "lxml")
melongdata = []
for res in bs4.select(".box"):
dd = res.select("a")
@@ -300,7 +300,7 @@ async def getDataMelong(msg, kueri, CurrentPage, user):
async def getDataGomov(msg, kueri, CurrentPage, user):
if not SCRAP_DICT.get(msg.id):
gomovv = await http.get(f"https://185.173.38.216/?s={kueri}", headers=headers)
- text = BeautifulSoup(gomovv.text, "lxml")
+ text = BeautifulSoup(gomovv, "lxml")
entry = text.find_all(class_="entry-header")
if entry[0].text.strip() == "Nothing Found":
if not kueri: