File size: 3,599 Bytes
53b9cd0
51b5c1e
 
 
53b9cd0
 
51b5c1e
 
 
 
 
53b9cd0
51b5c1e
 
 
53b9cd0
51b5c1e
 
 
 
 
 
 
53b9cd0
 
51b5c1e
 
 
c21b473
d5681b3
51b5c1e
 
 
 
 
 
 
 
 
 
 
 
 
 
53b9cd0
51b5c1e
 
27b206c
c21b473
53b9cd0
51b5c1e
53b9cd0
 
 
 
 
51b5c1e
 
 
 
 
 
 
 
 
53b9cd0
51b5c1e
 
 
 
53b9cd0
51b5c1e
 
 
 
 
 
 
 
 
53b9cd0
 
 
51b5c1e
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
import requests
import shutil
import os
import re

# Searching for the videos
def search_pexels(keyword, api_key, orientation='portrait', size='medium', endpoint='videos', num_pages=50):
    if orientation not in ['portrait', 'landscape', 'square']:
        raise ValueError("Error! orientation must be one of {'portrait', 'landscape', 'square'}")
    if size not in ['medium', 'small', 'large']:
        raise ValueError("Error! size must be one of ['medium', 'small', 'large']")
    
    base_url = 'https://api.pexels.com/'
    headers = {'Authorization': api_key}
    url = f'{base_url}{endpoint}/search?query={keyword}&per_page={num_pages}&orientation={orientation}&size={size}'

    response = requests.get(url, headers=headers)
    if response.status_code == 200:
        data = response.json()
        return data
    else:
        print(f'Error: {response.status_code}')
        return None

# Video download function
def download_video(video_data, parent_path, height, width, links, index):
    for video in video_data['videos']:
        if video['id'] in links:
            continue
        
        for video_file in video['video_files']:
            if video_file['height'] == height and video_file['width'] == width:
                # Stream the video to avoid memory issues
                video_url = video_file['link']
                file_path = os.path.join(parent_path, f"{index}_{video['id']}.mp4")
                
                with requests.get(video_url, stream=True) as r:
                    r.raise_for_status()
                    with open(file_path, 'wb') as f:
                        for chunk in r.iter_content(chunk_size=8192):
                            f.write(chunk)
                print(f"Successfully saved video in {file_path}")
                return video['id']
    return None

# Generate videos without LLM dependency
def generate_videos(product, api_key, orientation, height, width):
    prod = product.strip().replace(" ", "_")
    links = []
    
    try:
        # Create directory with the product's name
        if os.path.exists(prod):
            shutil.rmtree(prod)
        os.mkdir(prod)
        
        # Define static sentences based on the product
        sentences = [
            f"Introducing {product}, the ultimate solution for your needs.",
            f"Experience the power of {product} today.",
            f"Why choose {product}? Discover its amazing features.",
            f"Transform your life with {product}.",
            f"Get ready to enjoy {product} like never before."
        ]
        
        # Generate video for every sentence
        print("Keywords:")
        for i, sentence in enumerate(sentences):
            keyword = extract_keywords(sentence)  # Replace with a simple keyword extraction function
            print(f"{i+1}: {keyword}")
            
            data = search_pexels(keyword, api_key, orientation.lower())
            if data and 'videos' in data:
                link = download_video(data, prod, height, width, links, i)
                if link:
                    links.append(link)
        
        print("Success! Videos have been generated.")
    except Exception as e:
        print("Error! Failed generating videos.")
        print(e)
    
    return prod, sentences

# Simple keyword extraction function (replace this with a more advanced method if needed)
def extract_keywords(sentence):
    # Extract nouns and key phrases using regex or simple heuristics
    keywords = re.findall(r'\b\w+\b', sentence)
    return " ".join(keywords[:3])  # Return the first 3 words as keywords