mirror of
https://github.com/yasirarism/MissKatyPyro.git
synced 2025-12-29 09:44:50 +00:00
Add httpx exception for handle http error
Signed-off-by: Yasir Aris M <github@yasirdev.my.id>
This commit is contained in:
parent
37f3ceee1e
commit
0fd013b906
5 changed files with 124 additions and 65 deletions
|
|
@ -42,7 +42,7 @@ MOD_NOLOAD = ["subscene_dl"]
|
||||||
HELPABLE = {}
|
HELPABLE = {}
|
||||||
cleanmode = {}
|
cleanmode = {}
|
||||||
botStartTime = time.time()
|
botStartTime = time.time()
|
||||||
misskaty_version = "v2.11.2 - Stable"
|
misskaty_version = "v2.11.3 - Stable"
|
||||||
|
|
||||||
uvloop.install()
|
uvloop.install()
|
||||||
faulthandler_enable()
|
faulthandler_enable()
|
||||||
|
|
|
||||||
|
|
@ -53,7 +53,7 @@ async def handle_error(
|
||||||
f_errname = f"crash_{tgl_now.strftime('%d %B %Y')}.txt"
|
f_errname = f"crash_{tgl_now.strftime('%d %B %Y')}.txt"
|
||||||
LOGGER.error(traceback.format_exc())
|
LOGGER.error(traceback.format_exc())
|
||||||
with open(f_errname, "w+", encoding="utf-8") as log:
|
with open(f_errname, "w+", encoding="utf-8") as log:
|
||||||
log.write(f"✍️ message: {m.text or m.caption}\n\n{traceback.format_exc()}")
|
log.write(f"✍️ Message: {m.text or m.caption}\n👱♂️ User: {m.from_user.id if m.from_user else m.sender_chat.id}\n\n{traceback.format_exc()}")
|
||||||
log.close()
|
log.close()
|
||||||
if isinstance(m, pyrogram.types.Message):
|
if isinstance(m, pyrogram.types.Message):
|
||||||
with contextlib.suppress(Exception):
|
with contextlib.suppress(Exception):
|
||||||
|
|
|
||||||
|
|
@ -470,7 +470,7 @@ async def demote(client, message, strings):
|
||||||
umention = (await app.get_users(user_id)).mention
|
umention = (await app.get_users(user_id)).mention
|
||||||
await message.reply_text(f"Demoted! {umention}")
|
await message.reply_text(f"Demoted! {umention}")
|
||||||
except ChatAdminRequired:
|
except ChatAdminRequired:
|
||||||
await message.reply()
|
await message.reply("Please give permission to demote members..")
|
||||||
|
|
||||||
|
|
||||||
# Pin Messages
|
# Pin Messages
|
||||||
|
|
|
||||||
|
|
@ -2,6 +2,7 @@
|
||||||
# * @date 2023-06-21 22:12:27
|
# * @date 2023-06-21 22:12:27
|
||||||
# * @projectName MissKatyPyro
|
# * @projectName MissKatyPyro
|
||||||
# * Copyright ©YasirPedia All rights reserved
|
# * Copyright ©YasirPedia All rights reserved
|
||||||
|
import httpx
|
||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
import re
|
import re
|
||||||
|
|
@ -138,6 +139,7 @@ async def imdb_search_id(kueri, message):
|
||||||
r = await fetch.get(
|
r = await fetch.get(
|
||||||
f"https://v3.sg.media-imdb.com/suggestion/titles/x/{quote_plus(kueri)}.json"
|
f"https://v3.sg.media-imdb.com/suggestion/titles/x/{quote_plus(kueri)}.json"
|
||||||
)
|
)
|
||||||
|
r.raise_for_status()
|
||||||
res = r.json().get("d")
|
res = r.json().get("d")
|
||||||
if not res:
|
if not res:
|
||||||
return await k.edit_caption(
|
return await k.edit_caption(
|
||||||
|
|
@ -175,6 +177,8 @@ async def imdb_search_id(kueri, message):
|
||||||
)
|
)
|
||||||
buttons.add(*BTN)
|
buttons.add(*BTN)
|
||||||
await k.edit_caption(msg, reply_markup=buttons)
|
await k.edit_caption(msg, reply_markup=buttons)
|
||||||
|
except httpx.HTTPError as exc:
|
||||||
|
await k.edit_caption(f"HTTP Exception for IMDB Search - <code>{exc}</code>", disable_web_page_preview=True)
|
||||||
except (MessageIdInvalid, MessageNotModified):
|
except (MessageIdInvalid, MessageNotModified):
|
||||||
pass
|
pass
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
|
|
@ -196,6 +200,7 @@ async def imdb_search_en(kueri, message):
|
||||||
r = await fetch.get(
|
r = await fetch.get(
|
||||||
f"https://v3.sg.media-imdb.com/suggestion/titles/x/{quote_plus(kueri)}.json"
|
f"https://v3.sg.media-imdb.com/suggestion/titles/x/{quote_plus(kueri)}.json"
|
||||||
)
|
)
|
||||||
|
r.raise_for_status()
|
||||||
res = r.json().get("d")
|
res = r.json().get("d")
|
||||||
if not res:
|
if not res:
|
||||||
return await k.edit_caption(
|
return await k.edit_caption(
|
||||||
|
|
@ -233,6 +238,8 @@ async def imdb_search_en(kueri, message):
|
||||||
)
|
)
|
||||||
buttons.add(*BTN)
|
buttons.add(*BTN)
|
||||||
await k.edit_caption(msg, reply_markup=buttons)
|
await k.edit_caption(msg, reply_markup=buttons)
|
||||||
|
except httpx.HTTPError as exc:
|
||||||
|
await k.edit_caption(f"HTTP Exception for IMDB Search - <code>{exc}</code>", disable_web_page_preview=True)
|
||||||
except (MessageIdInvalid, MessageNotModified):
|
except (MessageIdInvalid, MessageNotModified):
|
||||||
pass
|
pass
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
|
|
@ -265,6 +272,7 @@ async def imdbcari(_, query: CallbackQuery):
|
||||||
r = await fetch.get(
|
r = await fetch.get(
|
||||||
f"https://v3.sg.media-imdb.com/suggestion/titles/x/{quote_plus(kueri)}.json"
|
f"https://v3.sg.media-imdb.com/suggestion/titles/x/{quote_plus(kueri)}.json"
|
||||||
)
|
)
|
||||||
|
r.raise_for_status()
|
||||||
res = r.json().get("d")
|
res = r.json().get("d")
|
||||||
if not res:
|
if not res:
|
||||||
return await query.message.edit_caption(
|
return await query.message.edit_caption(
|
||||||
|
|
@ -297,6 +305,8 @@ async def imdbcari(_, query: CallbackQuery):
|
||||||
)
|
)
|
||||||
buttons.add(*BTN)
|
buttons.add(*BTN)
|
||||||
await query.message.edit_caption(msg, reply_markup=buttons)
|
await query.message.edit_caption(msg, reply_markup=buttons)
|
||||||
|
except httpx.HTTPError as exc:
|
||||||
|
await query.message.edit_caption(f"HTTP Exception for IMDB Search - <code>{exc}</code>", disable_web_page_preview=True)
|
||||||
except (MessageIdInvalid, MessageNotModified):
|
except (MessageIdInvalid, MessageNotModified):
|
||||||
pass
|
pass
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
|
|
@ -318,6 +328,7 @@ async def imdbcari(_, query: CallbackQuery):
|
||||||
r = await fetch.get(
|
r = await fetch.get(
|
||||||
f"https://v3.sg.media-imdb.com/suggestion/titles/x/{quote_plus(kueri)}.json"
|
f"https://v3.sg.media-imdb.com/suggestion/titles/x/{quote_plus(kueri)}.json"
|
||||||
)
|
)
|
||||||
|
r.raise_for_status()
|
||||||
res = r.json().get("d")
|
res = r.json().get("d")
|
||||||
if not res:
|
if not res:
|
||||||
return await query.message.edit_caption(
|
return await query.message.edit_caption(
|
||||||
|
|
@ -350,6 +361,8 @@ async def imdbcari(_, query: CallbackQuery):
|
||||||
)
|
)
|
||||||
buttons.add(*BTN)
|
buttons.add(*BTN)
|
||||||
await query.message.edit_caption(msg, reply_markup=buttons)
|
await query.message.edit_caption(msg, reply_markup=buttons)
|
||||||
|
except httpx.HTTPError as exc:
|
||||||
|
await query.message.edit_caption(f"HTTP Exception for IMDB Search - <code>{exc}</code>", disable_web_page_preview=True)
|
||||||
except (MessageIdInvalid, MessageNotModified):
|
except (MessageIdInvalid, MessageNotModified):
|
||||||
pass
|
pass
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
|
|
@ -367,6 +380,7 @@ async def imdb_id_callback(self: Client, query: CallbackQuery):
|
||||||
await query.message.edit_caption("⏳ Permintaan kamu sedang diproses.. ")
|
await query.message.edit_caption("⏳ Permintaan kamu sedang diproses.. ")
|
||||||
imdb_url = f"https://www.imdb.com/title/tt{movie}/"
|
imdb_url = f"https://www.imdb.com/title/tt{movie}/"
|
||||||
resp = await fetch.get(imdb_url)
|
resp = await fetch.get(imdb_url)
|
||||||
|
resp.raise_for_status()
|
||||||
sop = BeautifulSoup(resp, "lxml")
|
sop = BeautifulSoup(resp, "lxml")
|
||||||
r_json = json.loads(
|
r_json = json.loads(
|
||||||
sop.find("script", attrs={"type": "application/ld+json"}).contents[0]
|
sop.find("script", attrs={"type": "application/ld+json"}).contents[0]
|
||||||
|
|
@ -510,6 +524,8 @@ async def imdb_id_callback(self: Client, query: CallbackQuery):
|
||||||
await query.message.edit_caption(
|
await query.message.edit_caption(
|
||||||
res_str, parse_mode=enums.ParseMode.HTML, reply_markup=markup
|
res_str, parse_mode=enums.ParseMode.HTML, reply_markup=markup
|
||||||
)
|
)
|
||||||
|
except httpx.HTTPError as exc:
|
||||||
|
await query.message.edit_caption(f"HTTP Exception for IMDB Search - <code>{exc}</code>", disable_web_page_preview=True)
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
await query.message.edit_caption("Maaf, gagal mendapatkan info data dari IMDB.")
|
await query.message.edit_caption("Maaf, gagal mendapatkan info data dari IMDB.")
|
||||||
except (MessageNotModified, MessageIdInvalid):
|
except (MessageNotModified, MessageIdInvalid):
|
||||||
|
|
@ -525,6 +541,7 @@ async def imdb_en_callback(self: Client, query: CallbackQuery):
|
||||||
await query.message.edit_caption("<i>⏳ Getting IMDb source..</i>")
|
await query.message.edit_caption("<i>⏳ Getting IMDb source..</i>")
|
||||||
imdb_url = f"https://www.imdb.com/title/tt{movie}/"
|
imdb_url = f"https://www.imdb.com/title/tt{movie}/"
|
||||||
resp = await fetch.get(imdb_url)
|
resp = await fetch.get(imdb_url)
|
||||||
|
resp.raise_for_status()
|
||||||
sop = BeautifulSoup(resp, "lxml")
|
sop = BeautifulSoup(resp, "lxml")
|
||||||
r_json = json.loads(
|
r_json = json.loads(
|
||||||
sop.find("script", attrs={"type": "application/ld+json"}).contents[0]
|
sop.find("script", attrs={"type": "application/ld+json"}).contents[0]
|
||||||
|
|
@ -670,6 +687,8 @@ async def imdb_en_callback(self: Client, query: CallbackQuery):
|
||||||
await query.message.edit_caption(
|
await query.message.edit_caption(
|
||||||
res_str, parse_mode=enums.ParseMode.HTML, reply_markup=markup
|
res_str, parse_mode=enums.ParseMode.HTML, reply_markup=markup
|
||||||
)
|
)
|
||||||
|
except httpx.HTTPError as exc:
|
||||||
|
await query.message.edit_caption(f"HTTP Exception for IMDB Search - <code>{exc}</code>", disable_web_page_preview=True)
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
await query.message.edit_caption("Sorry, failed getting data from IMDB.")
|
await query.message.edit_caption("Sorry, failed getting data from IMDB.")
|
||||||
except (MessageNotModified, MessageIdInvalid):
|
except (MessageNotModified, MessageIdInvalid):
|
||||||
|
|
|
||||||
|
|
@ -4,6 +4,7 @@
|
||||||
* @projectName MissKatyPyro
|
* @projectName MissKatyPyro
|
||||||
* Copyright @YasirPedia All rights reserved
|
* Copyright @YasirPedia All rights reserved
|
||||||
"""
|
"""
|
||||||
|
import httpx
|
||||||
import logging
|
import logging
|
||||||
import re
|
import re
|
||||||
import traceback
|
import traceback
|
||||||
|
|
@ -71,19 +72,20 @@ def split_arr(arr, size: 5):
|
||||||
async def getDataTerbit21(msg, kueri, CurrentPage, strings):
|
async def getDataTerbit21(msg, kueri, CurrentPage, strings):
|
||||||
if not SCRAP_DICT.get(msg.id):
|
if not SCRAP_DICT.get(msg.id):
|
||||||
try:
|
try:
|
||||||
terbitjson = (
|
if kueri:
|
||||||
(await fetch.get(f"{web['yasirapi']}/terbit21?q={kueri}")).json()
|
terbitjson = await fetch.get(f"{web['yasirapi']}/terbit21?q={kueri}")
|
||||||
if kueri
|
else:
|
||||||
else (await fetch.get("https://yasirapi.eu.org/terbit21")).json()
|
terbitjson = await fetch.get(f"{web['yasirapi']}/terbit21")
|
||||||
)
|
terbitjson.raise_for_status()
|
||||||
except:
|
except httpx.HTTPError as exc:
|
||||||
await msg.edit_msg(strings("err_getapi"))
|
await msg.edit_msg(f"ERROR: Failed to fetch data from {exc.request.url} - <code>{exc}</code>")
|
||||||
return None, None
|
return None, None
|
||||||
if not terbitjson.get("result"):
|
res = terbitjson.json()
|
||||||
|
if not res.get("result"):
|
||||||
await msg.edit_msg(strings("no_result"), del_in=5)
|
await msg.edit_msg(strings("no_result"), del_in=5)
|
||||||
return None, None
|
return None, None
|
||||||
SCRAP_DICT.add(
|
SCRAP_DICT.add(
|
||||||
msg.id, [split_arr(terbitjson["result"], 6), kueri], timeout=1800
|
msg.id, [split_arr(res["result"], 6), kueri], timeout=1800
|
||||||
)
|
)
|
||||||
try:
|
try:
|
||||||
index = int(CurrentPage - 1)
|
index = int(CurrentPage - 1)
|
||||||
|
|
@ -113,18 +115,19 @@ async def getDataTerbit21(msg, kueri, CurrentPage, strings):
|
||||||
async def getDatalk21(msg, kueri, CurrentPage, strings):
|
async def getDatalk21(msg, kueri, CurrentPage, strings):
|
||||||
if not SCRAP_DICT.get(msg.id):
|
if not SCRAP_DICT.get(msg.id):
|
||||||
try:
|
try:
|
||||||
lk21json = (
|
if kueri:
|
||||||
(await fetch.get(f"{web['yasirapi']}/lk21?q={kueri}")).json()
|
lk21json = await fetch.get(f"{web['yasirapi']}/lk21?q={kueri}")
|
||||||
if kueri
|
else:
|
||||||
else (await fetch.get("https://yasirapi.eu.org/lk21")).json()
|
lk21json = await fetch.get(f"{web['yasirapi']}/lk21")
|
||||||
)
|
lk21json.raise_for_status()
|
||||||
except:
|
except httpx.HTTPError as exc:
|
||||||
await msg.edit_msg(strings("err_getapi"))
|
await msg.edit_msg(f"ERROR: Failed to fetch data from {exc.request.url} - <code>{exc}</code>")
|
||||||
return None, None
|
return None, None
|
||||||
if not lk21json.get("result"):
|
res = lk21json.json()
|
||||||
|
if not res.get("result"):
|
||||||
await msg.edit_msg(strings("no_result"), del_in=5)
|
await msg.edit_msg(strings("no_result"), del_in=5)
|
||||||
return None, None
|
return None, None
|
||||||
SCRAP_DICT.add(msg.id, [split_arr(lk21json["result"], 6), kueri], timeout=1800)
|
SCRAP_DICT.add(msg.id, [split_arr(res["result"], 6), kueri], timeout=1800)
|
||||||
try:
|
try:
|
||||||
index = int(CurrentPage - 1)
|
index = int(CurrentPage - 1)
|
||||||
PageLen = len(SCRAP_DICT[msg.id][0])
|
PageLen = len(SCRAP_DICT[msg.id][0])
|
||||||
|
|
@ -153,14 +156,19 @@ async def getDatalk21(msg, kueri, CurrentPage, strings):
|
||||||
async def getDataPahe(msg, kueri, CurrentPage, strings):
|
async def getDataPahe(msg, kueri, CurrentPage, strings):
|
||||||
if not SCRAP_DICT.get(msg.id):
|
if not SCRAP_DICT.get(msg.id):
|
||||||
try:
|
try:
|
||||||
pahejson = (await fetch.get(f"{web['yasirapi']}/pahe?q={kueri}")).json()
|
if kueri:
|
||||||
except:
|
pahejson = await fetch.get(f"{web['yasirapi']}/pahe?q={kueri}")
|
||||||
await msg.edit_msg(strings("err_getapi"))
|
else:
|
||||||
|
pahejson = await fetch.get(f"{web['yasirapi']}/pahe")
|
||||||
|
pahejson.raise_for_status()
|
||||||
|
except httpx.HTTPError as exc:
|
||||||
|
await msg.edit_msg(f"ERROR: Failed to fetch data from {exc.request.url} - <code>{exc}</code>")
|
||||||
return None, None
|
return None, None
|
||||||
if not pahejson.get("result"):
|
res = pahejson.json()
|
||||||
|
if not res.get("result"):
|
||||||
await msg.edit_msg(strings("no_result"), del_in=5)
|
await msg.edit_msg(strings("no_result"), del_in=5)
|
||||||
return None, None
|
return None, None
|
||||||
SCRAP_DICT.add(msg.id, [split_arr(pahejson["result"], 6), kueri], timeout=1800)
|
SCRAP_DICT.add(msg.id, [split_arr(res["result"], 6), kueri], timeout=1800)
|
||||||
try:
|
try:
|
||||||
index = int(CurrentPage - 1)
|
index = int(CurrentPage - 1)
|
||||||
PageLen = len(SCRAP_DICT[msg.id][0])
|
PageLen = len(SCRAP_DICT[msg.id][0])
|
||||||
|
|
@ -185,11 +193,13 @@ async def getDataPahe(msg, kueri, CurrentPage, strings):
|
||||||
async def getDataKuso(msg, kueri, CurrentPage, user, strings):
|
async def getDataKuso(msg, kueri, CurrentPage, user, strings):
|
||||||
if not SCRAP_DICT.get(msg.id):
|
if not SCRAP_DICT.get(msg.id):
|
||||||
kusodata = []
|
kusodata = []
|
||||||
data = await fetch.get(
|
try:
|
||||||
f"{web['kusonime']}/?s={kueri}", follow_redirects=True
|
data = await fetch.get(
|
||||||
)
|
f"{web['kusonime']}/?s={kueri}", follow_redirects=True
|
||||||
if data.status_code != 200:
|
)
|
||||||
await msg.edit_msg(strings("err_getweb").format(err=err))
|
data.raise_for_status()
|
||||||
|
except httpx.HTTPError as exc:
|
||||||
|
await msg.edit_msg(f"ERROR: Failed to fetch data from {exc.request.url} - <code>{exc}</code>", disable_web_page_preview=True)
|
||||||
return None, 0, None, None
|
return None, 0, None, None
|
||||||
res = BeautifulSoup(data, "lxml").find_all("h2", {"class": "episodeye"})
|
res = BeautifulSoup(data, "lxml").find_all("h2", {"class": "episodeye"})
|
||||||
for i in res:
|
for i in res:
|
||||||
|
|
@ -237,11 +247,13 @@ async def getDataKuso(msg, kueri, CurrentPage, user, strings):
|
||||||
async def getDataMovieku(msg, kueri, CurrentPage, strings):
|
async def getDataMovieku(msg, kueri, CurrentPage, strings):
|
||||||
if not SCRAP_DICT.get(msg.id):
|
if not SCRAP_DICT.get(msg.id):
|
||||||
moviekudata = []
|
moviekudata = []
|
||||||
data = await fetch.get(
|
try:
|
||||||
f"{web['movieku']}/?s={kueri}", follow_redirects=True
|
data = await fetch.get(
|
||||||
)
|
f"{web['movieku']}/?s={kueri}", follow_redirects=True
|
||||||
if data.status_code != 200:
|
)
|
||||||
await msg.edit_msg(strings("err_getweb").format(err=err))
|
data.raise_for_status()
|
||||||
|
except httpx.HTTPError as exc:
|
||||||
|
await msg.edit_msg(f"ERROR: Failed to fetch data from {exc.request.url} - <code>{exc}</code>")
|
||||||
return None, None
|
return None, None
|
||||||
r = BeautifulSoup(data, "lxml")
|
r = BeautifulSoup(data, "lxml")
|
||||||
res = r.find_all(class_="bx")
|
res = r.find_all(class_="bx")
|
||||||
|
|
@ -277,11 +289,13 @@ async def getDataMovieku(msg, kueri, CurrentPage, strings):
|
||||||
async def getDataNodrakor(msg, kueri, CurrentPage, user, strings):
|
async def getDataNodrakor(msg, kueri, CurrentPage, user, strings):
|
||||||
if not SCRAP_DICT.get(msg.id):
|
if not SCRAP_DICT.get(msg.id):
|
||||||
nodrakordata = []
|
nodrakordata = []
|
||||||
data = await fetch.get(
|
try:
|
||||||
f"{web['nodrakor']}/?s={kueri}", follow_redirects=True,
|
data = await fetch.get(
|
||||||
)
|
f"{web['nodrakor']}/?s={kueri}", follow_redirects=True,
|
||||||
if data.status_code != 200:
|
)
|
||||||
await msg.edit_msg(strings("err_getweb").format(err=err))
|
data.raise_for_status()
|
||||||
|
except httpx.HTTPError as exc:
|
||||||
|
await msg.edit_msg(f"HTTP Exception for {exc.request.url} - <code>{exc}</code>", disable_web_page_preview=True)
|
||||||
return None, 0, None
|
return None, 0, None
|
||||||
text = BeautifulSoup(data, "lxml")
|
text = BeautifulSoup(data, "lxml")
|
||||||
entry = text.find_all(class_="entry-header")
|
entry = text.find_all(class_="entry-header")
|
||||||
|
|
@ -327,11 +341,13 @@ async def getDataNodrakor(msg, kueri, CurrentPage, user, strings):
|
||||||
async def getDataSavefilm21(msg, kueri, CurrentPage, user, strings):
|
async def getDataSavefilm21(msg, kueri, CurrentPage, user, strings):
|
||||||
if not SCRAP_DICT.get(msg.id):
|
if not SCRAP_DICT.get(msg.id):
|
||||||
sfdata = []
|
sfdata = []
|
||||||
data = await fetch.get(
|
try:
|
||||||
f"{web['savefilm21']}/?s={kueri}", follow_redirects=True,
|
data = await fetch.get(
|
||||||
)
|
f"{web['savefilm21']}/?s={kueri}", follow_redirects=True,
|
||||||
if data.status_code != 200:
|
)
|
||||||
await msg.edit_msg(strings("err_getweb").format(err=err))
|
data.raise_for_status()
|
||||||
|
except httpx.HTTPError as exc:
|
||||||
|
await msg.edit_msg(f"HTTP Exception for {exc.request.url} - <code>{exc}</code>", disable_web_page_preview=True)
|
||||||
return None, 0, None
|
return None, 0, None
|
||||||
text = BeautifulSoup(data, "lxml")
|
text = BeautifulSoup(data, "lxml")
|
||||||
entry = text.find_all(class_="entry-header")
|
entry = text.find_all(class_="entry-header")
|
||||||
|
|
@ -376,14 +392,16 @@ async def getDataSavefilm21(msg, kueri, CurrentPage, user, strings):
|
||||||
# Lendrive GetData
|
# Lendrive GetData
|
||||||
async def getDataLendrive(msg, kueri, CurrentPage, user, strings):
|
async def getDataLendrive(msg, kueri, CurrentPage, user, strings):
|
||||||
if not SCRAP_DICT.get(msg.id):
|
if not SCRAP_DICT.get(msg.id):
|
||||||
if kueri:
|
try:
|
||||||
data = await fetch.get(
|
if kueri:
|
||||||
f"{web['lendrive']}/?s={kueri}", follow_redirects=True,
|
data = await fetch.get(
|
||||||
)
|
f"{web['lendrive']}/?s={kueri}", follow_redirects=True,
|
||||||
else:
|
)
|
||||||
data = await fetch.get(web["lendrive"], follow_redirects=True)
|
else:
|
||||||
if data.status_code != 200:
|
data = await fetch.get(web["lendrive"], follow_redirects=True)
|
||||||
await msg.edit_msg(strings("err_getweb").format(err=err))
|
data.raise_for_status()
|
||||||
|
except httpx.HTTPError as exc:
|
||||||
|
await msg.edit_msg(f"ERROR: Failed to fetch data from {exc.request.url} - <code>{exc}</code>", disable_web_page_preview=True)
|
||||||
return None, 0, None
|
return None, 0, None
|
||||||
res = BeautifulSoup(data, "lxml")
|
res = BeautifulSoup(data, "lxml")
|
||||||
lenddata = []
|
lenddata = []
|
||||||
|
|
@ -434,11 +452,13 @@ async def getDataLendrive(msg, kueri, CurrentPage, user, strings):
|
||||||
# MelongMovie GetData
|
# MelongMovie GetData
|
||||||
async def getDataMelong(msg, kueri, CurrentPage, user, strings):
|
async def getDataMelong(msg, kueri, CurrentPage, user, strings):
|
||||||
if not SCRAP_DICT.get(msg.id):
|
if not SCRAP_DICT.get(msg.id):
|
||||||
data = await fetch.get(
|
try:
|
||||||
f"{web['melongmovie']}/?s={kueri}", follow_redirects=True,
|
data = await fetch.get(
|
||||||
)
|
f"{web['melongmovie']}/?s={kueri}", follow_redirects=True,
|
||||||
if data.status_code != 200:
|
)
|
||||||
await msg.edit_msg(strings("err_getweb").format(err=err))
|
data.raise_for_status()
|
||||||
|
except httpx.HTTPError as exc:
|
||||||
|
await msg.edit_msg(f"HTTP Exception for {exc.request.url} - <code>{exc}</code>", disable_web_page_preview=True)
|
||||||
return None, 0, None
|
return None, 0, None
|
||||||
bs4 = BeautifulSoup(data, "lxml")
|
bs4 = BeautifulSoup(data, "lxml")
|
||||||
melongdata = []
|
melongdata = []
|
||||||
|
|
@ -482,11 +502,13 @@ async def getDataMelong(msg, kueri, CurrentPage, user, strings):
|
||||||
# GoMov GetData
|
# GoMov GetData
|
||||||
async def getDataGomov(msg, kueri, CurrentPage, user, strings):
|
async def getDataGomov(msg, kueri, CurrentPage, user, strings):
|
||||||
if not SCRAP_DICT.get(msg.id):
|
if not SCRAP_DICT.get(msg.id):
|
||||||
gomovv = await fetch.get(
|
try:
|
||||||
f"{web['gomov']}/?s={kueri}", follow_redirects=True
|
gomovv = await fetch.get(
|
||||||
)
|
f"{web['gomov']}/?s={kueri}", follow_redirects=True
|
||||||
if gomovv.status_code != 200:
|
)
|
||||||
await msg.edit_msg(strings("err_getweb").format(err=err))
|
gomovv.raise_for_status()
|
||||||
|
except httpx.HTTPError as exc:
|
||||||
|
await msg.edit_msg(f"ERROR: Failed to fetch data from {exc.request.url} - <code>{exc}</code>", disable_web_page_preview=True)
|
||||||
return None, 0, None
|
return None, 0, None
|
||||||
text = BeautifulSoup(gomovv, "lxml")
|
text = BeautifulSoup(gomovv, "lxml")
|
||||||
entry = text.find_all(class_="entry-header")
|
entry = text.find_all(class_="entry-header")
|
||||||
|
|
@ -542,7 +564,7 @@ async def getSame(msg, query, current_page, strings):
|
||||||
else:
|
else:
|
||||||
data = cfse.get(web["samehadaku"])
|
data = cfse.get(web["samehadaku"])
|
||||||
if data.status_code != 200:
|
if data.status_code != 200:
|
||||||
await msg.edit_msg(strings("err_getweb").format(err=err))
|
await msg.edit_msg(strings("err_getweb").format(err=data.status_code))
|
||||||
return None, None
|
return None, None
|
||||||
res = BeautifulSoup(data.text, "lxml").find_all(class_="animposx")
|
res = BeautifulSoup(data.text, "lxml").find_all(class_="animposx")
|
||||||
sdata = []
|
sdata = []
|
||||||
|
|
@ -1348,12 +1370,15 @@ async def savefilm21_scrap(_, callback_query, strings):
|
||||||
)
|
)
|
||||||
try:
|
try:
|
||||||
html = await fetch.get(link)
|
html = await fetch.get(link)
|
||||||
|
html.raise_for_status()
|
||||||
soup = BeautifulSoup(html.text, "lxml")
|
soup = BeautifulSoup(html.text, "lxml")
|
||||||
res = soup.find_all(class_="button button-shadow")
|
res = soup.find_all(class_="button button-shadow")
|
||||||
res = "".join(f"{i.text}\n{i['href']}\n\n" for i in res)
|
res = "".join(f"{i.text}\n{i['href']}\n\n" for i in res)
|
||||||
await callback_query.message.edit_msg(
|
await callback_query.message.edit_msg(
|
||||||
strings("res_scrape").format(link=link, kl=res), reply_markup=keyboard
|
strings("res_scrape").format(link=link, kl=res), reply_markup=keyboard
|
||||||
)
|
)
|
||||||
|
except httpx.HTTPError as exc:
|
||||||
|
await callback_query.message.edit_msg(f"HTTP Exception for {exc.request.url} - <code>{exc}</code>", reply_markup=keyboard)
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
await callback_query.message.edit_msg(f"ERROR: {err}", reply_markup=keyboard)
|
await callback_query.message.edit_msg(f"ERROR: {err}", reply_markup=keyboard)
|
||||||
|
|
||||||
|
|
@ -1384,6 +1409,7 @@ async def nodrakorddl_scrap(_, callback_query, strings):
|
||||||
)
|
)
|
||||||
try:
|
try:
|
||||||
html = await fetch.get(link)
|
html = await fetch.get(link)
|
||||||
|
html.raise_for_status()
|
||||||
soup = BeautifulSoup(html.text, "lxml")
|
soup = BeautifulSoup(html.text, "lxml")
|
||||||
if "/tv/" in link:
|
if "/tv/" in link:
|
||||||
result = soup.find("div", {"entry-content entry-content-single"}).find_all("p")
|
result = soup.find("div", {"entry-content entry-content-single"}).find_all("p")
|
||||||
|
|
@ -1404,6 +1430,8 @@ async def nodrakorddl_scrap(_, callback_query, strings):
|
||||||
await callback_query.message.edit_msg(
|
await callback_query.message.edit_msg(
|
||||||
strings("res_scrape").format(link=link, kl=res), reply_markup=keyboard
|
strings("res_scrape").format(link=link, kl=res), reply_markup=keyboard
|
||||||
)
|
)
|
||||||
|
except httpx.HTTPError as exc:
|
||||||
|
await callback_query.message.edit_msg(f"HTTP Exception for {exc.request.url} - <code>{exc}</code>", reply_markup=keyboard)
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
await callback_query.message.edit_msg(f"ERROR: {err}", reply_markup=keyboard)
|
await callback_query.message.edit_msg(f"ERROR: {err}", reply_markup=keyboard)
|
||||||
|
|
||||||
|
|
@ -1415,6 +1443,7 @@ async def muviku_scrap(_, message, strings):
|
||||||
try:
|
try:
|
||||||
link = message.text.split(maxsplit=1)[1]
|
link = message.text.split(maxsplit=1)[1]
|
||||||
html = await fetch.get(link)
|
html = await fetch.get(link)
|
||||||
|
html.raise_for_status()
|
||||||
soup = BeautifulSoup(html.text, "lxml")
|
soup = BeautifulSoup(html.text, "lxml")
|
||||||
res = soup.find_all(class_="smokeurl")
|
res = soup.find_all(class_="smokeurl")
|
||||||
data = []
|
data = []
|
||||||
|
|
@ -1432,6 +1461,8 @@ async def muviku_scrap(_, message, strings):
|
||||||
return await message.reply(
|
return await message.reply(
|
||||||
strings("invalid_cmd_scrape").format(cmd=message.command[0])
|
strings("invalid_cmd_scrape").format(cmd=message.command[0])
|
||||||
)
|
)
|
||||||
|
except httpx.HTTPError as exc:
|
||||||
|
await message.reply(f"HTTP Exception for {exc.request.url} - <code>{exc}</code>")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
await message.reply(f"ERROR: {str(e)}")
|
await message.reply(f"ERROR: {str(e)}")
|
||||||
|
|
||||||
|
|
@ -1462,6 +1493,7 @@ async def melong_scrap(_, callback_query, strings):
|
||||||
)
|
)
|
||||||
try:
|
try:
|
||||||
html = await fetch.get(link)
|
html = await fetch.get(link)
|
||||||
|
html.raise_for_status()
|
||||||
soup = BeautifulSoup(html.text, "lxml")
|
soup = BeautifulSoup(html.text, "lxml")
|
||||||
rep = ""
|
rep = ""
|
||||||
for ep in soup.findAll(text=re.compile(r"(?i)episode\s+\d+|LINK DOWNLOAD")):
|
for ep in soup.findAll(text=re.compile(r"(?i)episode\s+\d+|LINK DOWNLOAD")):
|
||||||
|
|
@ -1471,6 +1503,8 @@ async def melong_scrap(_, callback_query, strings):
|
||||||
await callback_query.message.edit_msg(
|
await callback_query.message.edit_msg(
|
||||||
strings("res_scrape").format(link=link, kl=rep), reply_markup=keyboard
|
strings("res_scrape").format(link=link, kl=rep), reply_markup=keyboard
|
||||||
)
|
)
|
||||||
|
except httpx.HTTPError as exc:
|
||||||
|
await callback_query.message.edit_msg(f"HTTP Exception for {exc.request.url} - <code>{exc}</code>", reply_markup=keyboard)
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
await callback_query.message.edit_msg(f"ERROR: {err}", reply_markup=keyboard)
|
await callback_query.message.edit_msg(f"ERROR: {err}", reply_markup=keyboard)
|
||||||
|
|
||||||
|
|
@ -1501,6 +1535,7 @@ async def gomov_dl(_, callback_query, strings):
|
||||||
)
|
)
|
||||||
try:
|
try:
|
||||||
html = await fetch.get(link)
|
html = await fetch.get(link)
|
||||||
|
html.raise_for_status()
|
||||||
soup = BeautifulSoup(html.text, "lxml")
|
soup = BeautifulSoup(html.text, "lxml")
|
||||||
entry = soup.find(class_="gmr-download-wrap clearfix")
|
entry = soup.find(class_="gmr-download-wrap clearfix")
|
||||||
hasil = soup.find(class_="title-download").text
|
hasil = soup.find(class_="title-download").text
|
||||||
|
|
@ -1511,6 +1546,8 @@ async def gomov_dl(_, callback_query, strings):
|
||||||
await callback_query.message.edit_msg(
|
await callback_query.message.edit_msg(
|
||||||
strings("res_scrape").format(link=link, kl=hasil), reply_markup=keyboard
|
strings("res_scrape").format(link=link, kl=hasil), reply_markup=keyboard
|
||||||
)
|
)
|
||||||
|
except httpx.HTTPError as exc:
|
||||||
|
await callback_query.message.edit_msg(f"HTTP Exception for {exc.request.url} - <code>{exc}</code>", reply_markup=keyboard)
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
await callback_query.message.edit_msg(f"ERROR: {err}", reply_markup=keyboard)
|
await callback_query.message.edit_msg(f"ERROR: {err}", reply_markup=keyboard)
|
||||||
|
|
||||||
|
|
@ -1538,6 +1575,7 @@ async def lendrive_dl(_, callback_query, strings):
|
||||||
)
|
)
|
||||||
try:
|
try:
|
||||||
hmm = await fetch.get(link)
|
hmm = await fetch.get(link)
|
||||||
|
hmm.raise_for_status()
|
||||||
q = BeautifulSoup(hmm.text, "lxml")
|
q = BeautifulSoup(hmm.text, "lxml")
|
||||||
j = q.findAll("div", class_="soraurlx")
|
j = q.findAll("div", class_="soraurlx")
|
||||||
kl = ""
|
kl = ""
|
||||||
|
|
@ -1551,5 +1589,7 @@ async def lendrive_dl(_, callback_query, strings):
|
||||||
await callback_query.message.edit_msg(
|
await callback_query.message.edit_msg(
|
||||||
strings("res_scrape").format(link=link, kl=kl), reply_markup=keyboard
|
strings("res_scrape").format(link=link, kl=kl), reply_markup=keyboard
|
||||||
)
|
)
|
||||||
|
except httpx.HTTPError as exc:
|
||||||
|
await callback_query.message.edit_msg(f"HTTP Exception for {exc.request.url} - <code>{exc}</code>", reply_markup=keyboard)
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
await callback_query.message.edit_msg(f"ERROR: {err}", reply_markup=keyboard)
|
await callback_query.message.edit_msg(f"ERROR: {err}", reply_markup=keyboard)
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue