mirror of
https://github.com/yasirarism/MissKatyPyro.git
synced 2025-12-29 17:44:50 +00:00
Changed cfscrape to cloudscraper
This commit is contained in:
parent
4b6458fff9
commit
511f33260e
5 changed files with 11 additions and 11 deletions
|
|
@ -1,9 +1,9 @@
|
|||
import cfscrape
|
||||
import cloudscraper
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
|
||||
async def down_page(url):
|
||||
f = cfscrape.create_scraper()
|
||||
f = cloudscraper.create_scraper()
|
||||
resp = f.get(url).text
|
||||
soup = BeautifulSoup(resp, "lxml")
|
||||
maindiv = soup.body.find("div", class_="subtitle").find("div", class_="top left")
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ import html
|
|||
import pickle
|
||||
import json
|
||||
import traceback
|
||||
import cfscrape
|
||||
import cloudscraper
|
||||
import aiohttp
|
||||
from datetime import datetime
|
||||
from shutil import disk_usage
|
||||
|
|
@ -212,7 +212,7 @@ async def cmd_eval(self: Client, ctx: Message, strings) -> Optional[str]:
|
|||
"re": re,
|
||||
"os": os,
|
||||
"asyncio": asyncio,
|
||||
"cfscrape": cfscrape,
|
||||
"cloudscraper": cloudscraper,
|
||||
"json": json,
|
||||
"aiohttp": aiohttp,
|
||||
"print": _print,
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@ import asyncio
|
|||
import logging
|
||||
import os
|
||||
|
||||
import cfscrape
|
||||
import cloudscraper
|
||||
from bs4 import BeautifulSoup
|
||||
from pykeyboard import InlineButton, InlineKeyboard
|
||||
from pyrogram import Client, filters
|
||||
|
|
@ -24,7 +24,7 @@ SUB_DL_DICT = {}
|
|||
async def getTitleSub(msg, kueri, CurrentPage, user):
|
||||
if not SUB_TITLE_DICT.get(msg.id):
|
||||
sdata = []
|
||||
scraper = cfscrape.create_scraper()
|
||||
scraper = cloudscraper.create_scraper()
|
||||
param = {"query": kueri}
|
||||
r = scraper.post("https://subscene.com/subtitles/searchbytitle", data=param).text
|
||||
soup = BeautifulSoup(r, "lxml")
|
||||
|
|
@ -61,7 +61,7 @@ async def getTitleSub(msg, kueri, CurrentPage, user):
|
|||
async def getListSub(msg, link, CurrentPage, user):
|
||||
if not SUB_DL_DICT.get(msg.id):
|
||||
sdata = []
|
||||
scraper = cfscrape.create_scraper()
|
||||
scraper = cloudscraper.create_scraper()
|
||||
kuki = {"LanguageFilter": "13,44,50"} # Only filter language English, Malay, Indonesian
|
||||
r = scraper.get(link, cookies=kuki).text
|
||||
soup = BeautifulSoup(r, "lxml")
|
||||
|
|
@ -194,7 +194,7 @@ async def dlsub_callback(self: Client, callback_query: CallbackQuery):
|
|||
await callback_query.answer("Invalid callback data, please send CMD again..")
|
||||
await asyncio.sleep(3)
|
||||
return await callback_query.message.delete_msg()
|
||||
scraper = cfscrape.create_scraper()
|
||||
scraper = cloudscraper.create_scraper()
|
||||
res = await down_page(link)
|
||||
dl = scraper.get(res.get("download_url"))
|
||||
f = open(f"{title}.zip", mode="wb").write(dl.content)
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@
|
|||
"""
|
||||
import re
|
||||
import logging
|
||||
import cfscrape
|
||||
import cloudscraper
|
||||
from bs4 import BeautifulSoup
|
||||
from pykeyboard import InlineKeyboard, InlineButton
|
||||
from pyrogram import filters, Client
|
||||
|
|
@ -384,7 +384,7 @@ async def getDataGomov(msg, kueri, CurrentPage, user, strings):
|
|||
# getData samehada
|
||||
async def getSame(msg, query, current_page, strings):
|
||||
if not SCRAP_DICT.get(msg.id):
|
||||
cfse = cfscrape.CloudflareScraper()
|
||||
cfse = cloudscraper.create_scraper()
|
||||
try:
|
||||
if query:
|
||||
data = cfse.get(f"{web['samehadaku']}/?s={query}", headers=headers)
|
||||
|
|
|
|||
|
|
@ -26,5 +26,5 @@ deep-translator
|
|||
telethon
|
||||
pyrate_limiter
|
||||
cachetools
|
||||
cfscrape
|
||||
cloudscraper
|
||||
openai
|
||||
Loading…
Reference in a new issue