diff --git a/misskaty/plugins/web_scraper.py b/misskaty/plugins/web_scraper.py index 275d4c78..81b060c9 100644 --- a/misskaty/plugins/web_scraper.py +++ b/misskaty/plugins/web_scraper.py @@ -129,7 +129,7 @@ async def getDataPahe(msg, kueri, CurrentPage): async def getDataKuso(msg, kueri, CurrentPage, user): if not SCRAP_DICT.get(msg.id): kusodata = [] - data = await http.get(f'https://kusonime.com/?s={kueri}', headers=headers, verify=False) + data = await http.get(f'https://kusonime.com/?s={kueri}', headers=headers) res = BeautifulSoup(data.text, "lxml").find_all("h2", {"class": "episodeye"}) for i in res: ress = i.find_all("a")[0] @@ -168,7 +168,7 @@ async def getDataKuso(msg, kueri, CurrentPage, user): async def getDataMovieku(msg, kueri, CurrentPage): if not SCRAP_DICT.get(msg.id): moviekudata = [] - data = await http.get(f'https://107.152.37.223/?s={kueri}', headers=headers, verify=False) + data = await http.get(f'https://107.152.37.223/?s={kueri}', headers=headers) r = BeautifulSoup(data.text, "lxml") res = r.find_all(class_="bx") for i in res: @@ -199,7 +199,7 @@ async def getDataMovieku(msg, kueri, CurrentPage): async def getDataSavefilm21(msg, kueri, CurrentPage, user): if not SCRAP_DICT.get(msg.id): sfdata = [] - data = await http.get(f'https://185.99.135.215/?s={kueri}', headers=headers, verify=False) + data = await http.get(f'https://185.99.135.215/?s={kueri}', headers=headers) text = BeautifulSoup(data.text, "lxml") entry = text.find_all(class_="entry-header") if "Tidak Ditemukan" in entry[0].text: @@ -921,4 +921,4 @@ async def lendrive_dl(_, callback_query): kl += "".join(f"[ {a.text} ]\n" for a in i.findAll("a")) await editPesan(callback_query.message, f"Scrape result from {link}:\n\n{kl}", reply_markup=keyboard) except Exception as err: - await editPesan(callback_query.message, f"ERROR: {err}", reply_markup=keyboard) \ No newline at end of file + await editPesan(callback_query.message, f"ERROR: {err}", reply_markup=keyboard)