mirror of
https://github.com/yasirarism/MissKatyPyro.git
synced 2025-12-29 17:44:50 +00:00
Fix Google Search
This commit is contained in:
parent
ba5e961457
commit
952b218e29
5 changed files with 13 additions and 12 deletions
|
|
@ -28,7 +28,7 @@ MOD_NOLOAD = ["subscene_dl"]
|
|||
HELPABLE = {}
|
||||
cleanmode = {}
|
||||
botStartTime = time.time()
|
||||
misskaty_version = "v2.023.5.29 - Stable"
|
||||
misskaty_version = "v2.3 - Stable"
|
||||
|
||||
pymonclient = MongoClient(DATABASE_URI)
|
||||
|
||||
|
|
|
|||
|
|
@ -54,7 +54,7 @@ async def genss(self: Client, ctx: Message, strings):
|
|||
try:
|
||||
downloader.start(blocking=False)
|
||||
except Exception as err:
|
||||
return await ctx.edit(str(err))
|
||||
return await pesan.edit(str(err))
|
||||
c_time = time.time()
|
||||
while not downloader.isFinished():
|
||||
total_length = downloader.filesize or None
|
||||
|
|
|
|||
|
|
@ -203,11 +203,11 @@ async def inline_menu(_, inline_query: InlineQuery):
|
|||
search_results = await http.get(f"https://www.google.com/search?q={judul}&num=20", headers=headers)
|
||||
soup = BeautifulSoup(search_results.text, "lxml")
|
||||
data = []
|
||||
for result in soup.select(".tF2Cxc"):
|
||||
title = result.select_one(".DKV0Md").text
|
||||
link = result.select_one(".yuRUbf a")["href"]
|
||||
for result in soup.find_all("div", class_="kvH3mc BToiNc UK95Uc")
|
||||
link = result.find("div", class_ ="yuRUbf").find("a").get("href")
|
||||
title = result.find("div", class_ ="yuRUbf").find("h3").get_text()
|
||||
try:
|
||||
snippet = result.select_one("#rso .lyLwlc").text
|
||||
snippet = result.find("div", class_="VwiC3b yXK7lf MUxGbd yDYNvb lyLwlc lEBKkf").get_text()
|
||||
except:
|
||||
snippet = "-"
|
||||
message_text = f"<a href='{link}'>{title}</a>\n"
|
||||
|
|
|
|||
|
|
@ -124,11 +124,11 @@ async def gsearch(client, message):
|
|||
# collect data
|
||||
data = []
|
||||
|
||||
for result in soup.select(".tF2Cxc"):
|
||||
title = result.select_one(".DKV0Md").text
|
||||
link = result.select_one(".yuRUbf a")["href"]
|
||||
for result in soup.find_all("div", class_="kvH3mc BToiNc UK95Uc")
|
||||
link = result.find("div", class_ ="yuRUbf").find("a").get("href")
|
||||
title = result.find("div", class_ ="yuRUbf").find("h3").get_text()
|
||||
try:
|
||||
snippet = result.select_one("#rso .lyLwlc").text
|
||||
snippet = result.find("div", class_="VwiC3b yXK7lf MUxGbd yDYNvb lyLwlc lEBKkf").get_text()
|
||||
except:
|
||||
snippet = "-"
|
||||
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@ import re
|
|||
import logging
|
||||
import traceback
|
||||
import cloudscraper
|
||||
from cachetools import TTLCache
|
||||
from database import dbname
|
||||
from bs4 import BeautifulSoup
|
||||
from pykeyboard import InlineKeyboard, InlineButton
|
||||
|
|
@ -38,8 +39,8 @@ __HELP__ = """
|
|||
headers = {"User-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"}
|
||||
|
||||
LOGGER = logging.getLogger(__name__)
|
||||
SCRAP_DICT = {}
|
||||
data_kuso = {}
|
||||
SCRAP_DICT = TTLCache(maxsize=1000, ttl=1800)
|
||||
data_kuso = TTLCache(maxsize=1000, ttl=1800)
|
||||
webdb = dbname.web
|
||||
|
||||
web = {
|
||||
|
|
|
|||
Loading…
Reference in a new issue