File size: 1,572 Bytes
aff0a09
 
5ac7dcf
dc61d50
 
 
5ac7dcf
 
 
aff0a09
dc61d50
aff0a09
 
 
 
 
 
 
 
 
dc61d50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
import os
import shutil
import time
import httpx
from bs4 import BeautifulSoup


def get_file_name(prefix: str) -> str:
	return f"{prefix}_{time.strftime('%Y%m%d-%H%M%S')}.mp4"


def remove_content_from_dir(folder):
	for filename in os.listdir(folder):
		file_path = os.path.join(folder, filename)
		try:
			if os.path.isfile(file_path) or os.path.islink(file_path):
				os.unlink(file_path)
			elif os.path.isdir(file_path):
				shutil.rmtree(file_path)
		except Exception as e:
			print('Failed to delete %s. Reason: %s' % (file_path, e))


async def rumble_link_scraper(rumble_link):
	url = "https://imgpanda.com/wp-admin/admin-ajax.php"
	downloader_type = "Rumble"
	server_name = "Server2"

	headers = {
		"Content-Type": "application/x-www-form-urlencoded",
	}

	data = {
		"action": "ajax_call_custom",
		"SocialDownUrl": rumble_link,
		"downloaderType": downloader_type,
		"serverName": server_name,
	}

	async with httpx.AsyncClient() as client:
		response = await client.post(url, headers=headers, data=data)

	if response.status_code == 200:
		soup = BeautifulSoup(response.text, 'html.parser')
		inner_html_string = ' '.join(response.text.split(' ')[1:-1])
		inner_soup = BeautifulSoup(inner_html_string, 'html.parser')

		# all a-tags within the class "social-download-result"
		a_tags = inner_soup.select('a')
		last_a_tag = a_tags[-1]

		if last_a_tag:
			return last_a_tag.get('href').replace('\\', '').replace('"', '')
		else:
			print('No matching <a> tag found within the class "social-download-result"')
	else:
		print(f"Error: {response.status_code}")