TDN-M's picture
Update components/pexels.py
51b5c1e verified
import requests
import shutil
import os
import re
# Searching for the videos
def search_pexels(keyword, api_key, orientation='portrait', size='medium', endpoint='videos', num_pages=50):
if orientation not in ['portrait', 'landscape', 'square']:
raise ValueError("Error! orientation must be one of {'portrait', 'landscape', 'square'}")
if size not in ['medium', 'small', 'large']:
raise ValueError("Error! size must be one of ['medium', 'small', 'large']")
base_url = 'https://api.pexels.com/'
headers = {'Authorization': api_key}
url = f'{base_url}{endpoint}/search?query={keyword}&per_page={num_pages}&orientation={orientation}&size={size}'
response = requests.get(url, headers=headers)
if response.status_code == 200:
data = response.json()
return data
else:
print(f'Error: {response.status_code}')
return None
# Video download function
def download_video(video_data, parent_path, height, width, links, index):
for video in video_data['videos']:
if video['id'] in links:
continue
for video_file in video['video_files']:
if video_file['height'] == height and video_file['width'] == width:
# Stream the video to avoid memory issues
video_url = video_file['link']
file_path = os.path.join(parent_path, f"{index}_{video['id']}.mp4")
with requests.get(video_url, stream=True) as r:
r.raise_for_status()
with open(file_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=8192):
f.write(chunk)
print(f"Successfully saved video in {file_path}")
return video['id']
return None
# Generate videos without LLM dependency
def generate_videos(product, api_key, orientation, height, width):
prod = product.strip().replace(" ", "_")
links = []
try:
# Create directory with the product's name
if os.path.exists(prod):
shutil.rmtree(prod)
os.mkdir(prod)
# Define static sentences based on the product
sentences = [
f"Introducing {product}, the ultimate solution for your needs.",
f"Experience the power of {product} today.",
f"Why choose {product}? Discover its amazing features.",
f"Transform your life with {product}.",
f"Get ready to enjoy {product} like never before."
]
# Generate video for every sentence
print("Keywords:")
for i, sentence in enumerate(sentences):
keyword = extract_keywords(sentence) # Replace with a simple keyword extraction function
print(f"{i+1}: {keyword}")
data = search_pexels(keyword, api_key, orientation.lower())
if data and 'videos' in data:
link = download_video(data, prod, height, width, links, i)
if link:
links.append(link)
print("Success! Videos have been generated.")
except Exception as e:
print("Error! Failed generating videos.")
print(e)
return prod, sentences
# Simple keyword extraction function (replace this with a more advanced method if needed)
def extract_keywords(sentence):
# Extract nouns and key phrases using regex or simple heuristics
keywords = re.findall(r'\b\w+\b', sentence)
return " ".join(keywords[:3]) # Return the first 3 words as keywords