mirror of
https://github.com/yasirarism/MissKatyPyro.git
synced 2026-01-02 02:44:50 +00:00
Fix
This commit is contained in:
parent
f6ca5de9e3
commit
2678070c1c
3 changed files with 13 additions and 18 deletions
|
|
@ -117,6 +117,7 @@ async def clear_reqdict():
|
|||
try:
|
||||
os.rmdir("downloads")
|
||||
os.remove("MissKatyLogs.txt")
|
||||
open("MissKatyLogs.txt", "w")
|
||||
except:
|
||||
pass
|
||||
|
||||
|
|
|
|||
|
|
@ -4,6 +4,8 @@ import re
|
|||
|
||||
from bs4 import BeautifulSoup
|
||||
from urllib.parse import quote_plus
|
||||
|
||||
import soupsieve
|
||||
from utils import demoji
|
||||
from deep_translator import GoogleTranslator
|
||||
from pykeyboard import InlineButton, InlineKeyboard
|
||||
|
|
@ -297,12 +299,8 @@ async def imdb_id_callback(_, query):
|
|||
ott = await search_jw(r_json.get("name"), "ID")
|
||||
typee = r_json.get("@type", "")
|
||||
res_str = ""
|
||||
if judul := r_json.get("name"):
|
||||
try:
|
||||
tahun = sop.select('ul[data-testid="hero-title-block__metadata"]')[0].find("span").text
|
||||
except:
|
||||
tahun = "N/A"
|
||||
res_str += f"<b>📹 Judul:</b> <a href='{url}'>{judul} [{tahun}]</a> (<code>{typee}</code>)\n"
|
||||
tahun = re.findall("\d{4}", sop.title.text) if re.findall("\d{4}", sop.title.text) else "N/A"
|
||||
res_str += f"<b>📹 Judul:</b> <a href='{url}'>{r_json.get('name')} [{tahun}]</a> (<code>{typee}</code>)\n"
|
||||
if aka := r_json.get("alternateName"):
|
||||
res_str += f"<b>📢 AKA:</b> <code>{aka}</code>\n\n"
|
||||
else:
|
||||
|
|
@ -418,12 +416,8 @@ async def imdb_en_callback(bot, query):
|
|||
ott = await search_jw(r_json.get("name"), "US")
|
||||
typee = r_json.get("@type", "")
|
||||
res_str = ""
|
||||
if judul := r_json.get("name"):
|
||||
try:
|
||||
tahun = sop.select('ul[data-testid="hero-title-block__metadata"]')[0].find("span").text
|
||||
except:
|
||||
tahun = "N/A"
|
||||
res_str += f"<b>📹 Judul:</b> <a href='{url}'>{judul} [{tahun}]</a> (<code>{typee}</code>)\n"
|
||||
tahun = re.findall("\d{4}", sop.title.text) if re.findall("\d{4}", sop.title.text) else "N/A"
|
||||
res_str += f"<b>📹 Judul:</b> <a href='{url}'>{r_json.get('name')} [{tahun}]</a> (<code>{typee}</code>)\n"
|
||||
if aka := r_json.get("alternateName"):
|
||||
res_str += f"<b>📢 AKA:</b> <code>{aka}</code>\n\n"
|
||||
else:
|
||||
|
|
|
|||
|
|
@ -133,7 +133,7 @@ async def getDataKuso(msg, kueri, CurrentPage, user):
|
|||
if not SCRAP_DICT.get(msg.id):
|
||||
kusodata = []
|
||||
data = await http.get(f"https://kusonime.com/?s={kueri}", headers=headers)
|
||||
res = BeautifulSoup(data.text, "lxml").find_all("h2", {"class": "episodeye"})
|
||||
res = BeautifulSoup(data, "lxml").find_all("h2", {"class": "episodeye"})
|
||||
for i in res:
|
||||
ress = i.find_all("a")[0]
|
||||
title = ress.text
|
||||
|
|
@ -168,7 +168,7 @@ async def getDataMovieku(msg, kueri, CurrentPage):
|
|||
if not SCRAP_DICT.get(msg.id):
|
||||
moviekudata = []
|
||||
data = await http.get(f"https://107.152.37.223/?s={kueri}", headers=headers)
|
||||
r = BeautifulSoup(data.text, "lxml")
|
||||
r = BeautifulSoup(data, "lxml")
|
||||
res = r.find_all(class_="bx")
|
||||
for i in res:
|
||||
judul = i.find_all("a")[0]["title"]
|
||||
|
|
@ -199,7 +199,7 @@ async def getDataSavefilm21(msg, kueri, CurrentPage, user):
|
|||
if not SCRAP_DICT.get(msg.id):
|
||||
sfdata = []
|
||||
data = await http.get(f"https://45.136.197.138/?s={kueri}", headers=headers)
|
||||
text = BeautifulSoup(data.text, "lxml")
|
||||
text = BeautifulSoup(data, "lxml")
|
||||
entry = text.find_all(class_="entry-header")
|
||||
if "Tidak Ditemukan" in entry[0].text:
|
||||
if not kueri:
|
||||
|
|
@ -233,7 +233,7 @@ async def getDataSavefilm21(msg, kueri, CurrentPage, user):
|
|||
async def getDataLendrive(msg, kueri, CurrentPage, user):
|
||||
if not SCRAP_DICT.get(msg.id):
|
||||
data = await http.get(f"https://lendrive.web.id/?s={kueri}", headers=headers)
|
||||
soup = BeautifulSoup(data.text, "lxml")
|
||||
soup = BeautifulSoup(data, "lxml")
|
||||
lenddata = []
|
||||
for o in soup.find_all(class_="bsx"):
|
||||
title = o.find("a")["title"]
|
||||
|
|
@ -265,7 +265,7 @@ async def getDataLendrive(msg, kueri, CurrentPage, user):
|
|||
async def getDataMelong(msg, kueri, CurrentPage, user):
|
||||
if not SCRAP_DICT.get(msg.id):
|
||||
data = await http.get(f"http://167.99.31.48/?s={kueri}", headers=headers)
|
||||
bs4 = BeautifulSoup(data.text, "lxml")
|
||||
bs4 = BeautifulSoup(data, "lxml")
|
||||
melongdata = []
|
||||
for res in bs4.select(".box"):
|
||||
dd = res.select("a")
|
||||
|
|
@ -300,7 +300,7 @@ async def getDataMelong(msg, kueri, CurrentPage, user):
|
|||
async def getDataGomov(msg, kueri, CurrentPage, user):
|
||||
if not SCRAP_DICT.get(msg.id):
|
||||
gomovv = await http.get(f"https://185.173.38.216/?s={kueri}", headers=headers)
|
||||
text = BeautifulSoup(gomovv.text, "lxml")
|
||||
text = BeautifulSoup(gomovv, "lxml")
|
||||
entry = text.find_all(class_="entry-header")
|
||||
if entry[0].text.strip() == "Nothing Found":
|
||||
if not kueri:
|
||||
|
|
|
|||
Loading…
Reference in a new issue