diff --git a/misskaty/__init__.py b/misskaty/__init__.py index b7ce33ba..5663e088 100644 --- a/misskaty/__init__.py +++ b/misskaty/__init__.py @@ -28,7 +28,7 @@ MOD_NOLOAD = ["subscene_dl"] HELPABLE = {} cleanmode = {} botStartTime = time.time() -misskaty_version = "v2.023.5.29 - Stable" +misskaty_version = "v2.3 - Stable" pymonclient = MongoClient(DATABASE_URI) diff --git a/misskaty/plugins/genss.py b/misskaty/plugins/genss.py index 2feba339..2c424add 100644 --- a/misskaty/plugins/genss.py +++ b/misskaty/plugins/genss.py @@ -54,7 +54,7 @@ async def genss(self: Client, ctx: Message, strings): try: downloader.start(blocking=False) except Exception as err: - return await ctx.edit(str(err)) + return await pesan.edit(str(err)) c_time = time.time() while not downloader.isFinished(): total_length = downloader.filesize or None diff --git a/misskaty/plugins/inline_search.py b/misskaty/plugins/inline_search.py index 54d5b920..b9a424b6 100644 --- a/misskaty/plugins/inline_search.py +++ b/misskaty/plugins/inline_search.py @@ -203,11 +203,11 @@ async def inline_menu(_, inline_query: InlineQuery): search_results = await http.get(f"https://www.google.com/search?q={judul}&num=20", headers=headers) soup = BeautifulSoup(search_results.text, "lxml") data = [] - for result in soup.select(".tF2Cxc"): - title = result.select_one(".DKV0Md").text - link = result.select_one(".yuRUbf a")["href"] + for result in soup.find_all("div", class_="kvH3mc BToiNc UK95Uc") + link = result.find("div", class_ ="yuRUbf").find("a").get("href") + title = result.find("div", class_ ="yuRUbf").find("h3").get_text() try: - snippet = result.select_one("#rso .lyLwlc").text + snippet = result.find("div", class_="VwiC3b yXK7lf MUxGbd yDYNvb lyLwlc lEBKkf").get_text() except: snippet = "-" message_text = f"{title}\n" diff --git a/misskaty/plugins/misc_tools.py b/misskaty/plugins/misc_tools.py index f00f5f69..3e4c7360 100644 --- a/misskaty/plugins/misc_tools.py +++ b/misskaty/plugins/misc_tools.py @@ -124,11 +124,11 @@ async def gsearch(client, message): # collect data data = [] - for result in soup.select(".tF2Cxc"): - title = result.select_one(".DKV0Md").text - link = result.select_one(".yuRUbf a")["href"] + for result in soup.find_all("div", class_="kvH3mc BToiNc UK95Uc") + link = result.find("div", class_ ="yuRUbf").find("a").get("href") + title = result.find("div", class_ ="yuRUbf").find("h3").get_text() try: - snippet = result.select_one("#rso .lyLwlc").text + snippet = result.find("div", class_="VwiC3b yXK7lf MUxGbd yDYNvb lyLwlc lEBKkf").get_text() except: snippet = "-" diff --git a/misskaty/plugins/web_scraper.py b/misskaty/plugins/web_scraper.py index 51892bba..706c56b0 100644 --- a/misskaty/plugins/web_scraper.py +++ b/misskaty/plugins/web_scraper.py @@ -8,6 +8,7 @@ import re import logging import traceback import cloudscraper +from cachetools import TTLCache from database import dbname from bs4 import BeautifulSoup from pykeyboard import InlineKeyboard, InlineButton @@ -38,8 +39,8 @@ __HELP__ = """ headers = {"User-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"} LOGGER = logging.getLogger(__name__) -SCRAP_DICT = {} -data_kuso = {} +SCRAP_DICT = TTLCache(maxsize=1000, ttl=1800) +data_kuso = TTLCache(maxsize=1000, ttl=1800) webdb = dbname.web web = {