From 4e50e99ded8919acd3c15b69026eb7cbea10df6e Mon Sep 17 00:00:00 2001 From: Yasir Aris M Date: Tue, 24 Sep 2024 08:14:15 +0700 Subject: [PATCH] Fixing --- misskaty/plugins/web_scraper.py | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/misskaty/plugins/web_scraper.py b/misskaty/plugins/web_scraper.py index c6619183..83aa4485 100644 --- a/misskaty/plugins/web_scraper.py +++ b/misskaty/plugins/web_scraper.py @@ -1452,15 +1452,22 @@ async def muviku_scrap(_, message, strings): soup = BeautifulSoup(html.text, "lxml") res = soup.find_all(class_="smokeurl") data = [] - for i in res: - for b in range(len(i.find_all("a"))): - link = i.find_all("a")[b]["href"] - kualitas = i.find_all("a")[b].text - # print(f"{kualitas}\n{link - data.append({"link": link, "kualitas": kualitas}) + for div in res: + paragraphs = div.find_all('p') + for p in paragraphs: + resolution = p.find('strong').text + links = p.find_all('a') + for link in links: + href = link.get('href') + title = link.text + data.append({ + "resolusi": resolution, + "link": href, + "title": title, + }) if not data: return await message.reply(strings("no_result")) - res = "".join(f"Host: {i['kualitas']}\n\n" for i in data) + res = "".join(f"Host: {i['resolusi']} {i['title']}\n\n" for i in data) await message.reply_msg(res) except MessageTooLong: url = await post_to_telegraph(False, link, res.replace("\n", "
"))