Fix Google Search

This commit is contained in:
yasirarism 2023-06-12 04:43:09 +00:00 committed by GitHub
parent ba5e961457
commit 952b218e29
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
5 changed files with 13 additions and 12 deletions

View file

@ -28,7 +28,7 @@ MOD_NOLOAD = ["subscene_dl"]
HELPABLE = {} HELPABLE = {}
cleanmode = {} cleanmode = {}
botStartTime = time.time() botStartTime = time.time()
misskaty_version = "v2.023.5.29 - Stable" misskaty_version = "v2.3 - Stable"
pymonclient = MongoClient(DATABASE_URI) pymonclient = MongoClient(DATABASE_URI)

View file

@ -54,7 +54,7 @@ async def genss(self: Client, ctx: Message, strings):
try: try:
downloader.start(blocking=False) downloader.start(blocking=False)
except Exception as err: except Exception as err:
return await ctx.edit(str(err)) return await pesan.edit(str(err))
c_time = time.time() c_time = time.time()
while not downloader.isFinished(): while not downloader.isFinished():
total_length = downloader.filesize or None total_length = downloader.filesize or None

View file

@ -203,11 +203,11 @@ async def inline_menu(_, inline_query: InlineQuery):
search_results = await http.get(f"https://www.google.com/search?q={judul}&num=20", headers=headers) search_results = await http.get(f"https://www.google.com/search?q={judul}&num=20", headers=headers)
soup = BeautifulSoup(search_results.text, "lxml") soup = BeautifulSoup(search_results.text, "lxml")
data = [] data = []
for result in soup.select(".tF2Cxc"): for result in soup.find_all("div", class_="kvH3mc BToiNc UK95Uc")
title = result.select_one(".DKV0Md").text link = result.find("div", class_ ="yuRUbf").find("a").get("href")
link = result.select_one(".yuRUbf a")["href"] title = result.find("div", class_ ="yuRUbf").find("h3").get_text()
try: try:
snippet = result.select_one("#rso .lyLwlc").text snippet = result.find("div", class_="VwiC3b yXK7lf MUxGbd yDYNvb lyLwlc lEBKkf").get_text()
except: except:
snippet = "-" snippet = "-"
message_text = f"<a href='{link}'>{title}</a>\n" message_text = f"<a href='{link}'>{title}</a>\n"

View file

@ -124,11 +124,11 @@ async def gsearch(client, message):
# collect data # collect data
data = [] data = []
for result in soup.select(".tF2Cxc"): for result in soup.find_all("div", class_="kvH3mc BToiNc UK95Uc")
title = result.select_one(".DKV0Md").text link = result.find("div", class_ ="yuRUbf").find("a").get("href")
link = result.select_one(".yuRUbf a")["href"] title = result.find("div", class_ ="yuRUbf").find("h3").get_text()
try: try:
snippet = result.select_one("#rso .lyLwlc").text snippet = result.find("div", class_="VwiC3b yXK7lf MUxGbd yDYNvb lyLwlc lEBKkf").get_text()
except: except:
snippet = "-" snippet = "-"

View file

@ -8,6 +8,7 @@ import re
import logging import logging
import traceback import traceback
import cloudscraper import cloudscraper
from cachetools import TTLCache
from database import dbname from database import dbname
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from pykeyboard import InlineKeyboard, InlineButton from pykeyboard import InlineKeyboard, InlineButton
@ -38,8 +39,8 @@ __HELP__ = """
headers = {"User-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"} headers = {"User-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"}
LOGGER = logging.getLogger(__name__) LOGGER = logging.getLogger(__name__)
SCRAP_DICT = {} SCRAP_DICT = TTLCache(maxsize=1000, ttl=1800)
data_kuso = {} data_kuso = TTLCache(maxsize=1000, ttl=1800)
webdb = dbname.web webdb = dbname.web
web = { web = {