TDN-M commited on
Commit
51b5c1e
·
verified ·
1 Parent(s): e75687d

Update components/pexels.py

Browse files
Files changed (1) hide show
  1. components/pexels.py +66 -58
components/pexels.py CHANGED
@@ -1,81 +1,89 @@
1
  import requests
2
- import shutil,os,re
 
 
3
 
4
  # Searching for the videos
5
- def search_pexels(keyword, api_key, orientation='potrait', size='medium', endpoint='videos', num_pages=50):
 
 
 
 
6
 
7
- if orientation not in ['potrait', 'landscape', 'square']:
8
- raise Exception("Error! orientation must be one of {'square', 'landscape', 'potrait'}")
9
-
10
- if size not in ['medium', 'small', 'large']:
11
- raise Exception("Error! size must be one of ['medium', 'small', 'large']")
12
-
13
- base_url = 'https://api.pexels.com/'
14
-
15
- headers = {
16
- 'Authorization': f'{api_key}'
17
- }
18
-
19
- url = f'{base_url}{endpoint}/search?query={keyword}&per_page={num_pages}&orientation={orientation}&size={size}'
20
-
21
-
22
- response = requests.get(url, headers=headers)
23
-
24
- # Check if request was successful (status code 200)
25
- if response.status_code == 200:
26
- data = response.json()
27
- return data
28
- else:
29
- print(f'Error: {response.status_code}')
30
 
 
 
 
 
 
 
 
31
 
32
  # Video download function
33
- def download_video(data, parent_path, height, width, links, i):
34
- for x in data['videos'] :
35
- if x['id'] in links:
36
  continue
37
 
38
- vid = x['video_files']
39
- for v in vid:
40
- if v['height'] == height and v['width'] == width :
41
- with open(f"{os.path.join(parent_path,str(i) + '_' + str(v['id']))}.mp4", 'bw') as f:
42
- f.write(requests.get(v['link']).content)
43
- print("Sucessfully saved video in", os.path.join(parent_path,str(i) + '_' + str(v['id'])) + '.mp4')
44
- return x['id']
45
-
 
 
 
 
 
 
46
 
47
- # Utilizing the LLMs to find the relevant videos
48
- def generate_videos(product, api_key, orientation, height, width, llm_chain=None, sum_llm_chain=None):
49
  prod = product.strip().replace(" ", "_")
50
  links = []
51
- try :
52
- # Split the paragraph by sentences
53
-
54
- sentences = llm_chain.run(product.strip())
55
- print('Sentence :', sentences)
56
-
57
- # sentences = sentences.split(".")[:-1]
58
- sentences = [x.strip() for x in re.split(r'\d+\.', sentences) if len(x) > 6]
59
 
60
-
61
  # Create directory with the product's name
62
  if os.path.exists(prod):
63
  shutil.rmtree(prod)
64
  os.mkdir(prod)
65
 
 
 
 
 
 
 
 
 
 
66
  # Generate video for every sentence
67
- print("Keyword :")
68
- for i,s in enumerate(sentences):
69
- keyword = sum_llm_chain.run(s)
70
- print(i+1, ":", keyword)
71
- data = search_pexels(keyword, api_key, orientation.lower())
72
- link = download_video(data, prod, height, width, links,i)
73
- links.append(link)
74
 
75
- print("Success! videos has been generated")
76
- except Exception as e :
77
- print("Error! Failed generating videos")
 
 
 
 
 
 
78
  print(e)
79
 
80
  return prod, sentences
81
-
 
 
 
 
 
 
1
  import requests
2
+ import shutil
3
+ import os
4
+ import re
5
 
6
  # Searching for the videos
7
+ def search_pexels(keyword, api_key, orientation='portrait', size='medium', endpoint='videos', num_pages=50):
8
+ if orientation not in ['portrait', 'landscape', 'square']:
9
+ raise ValueError("Error! orientation must be one of {'portrait', 'landscape', 'square'}")
10
+ if size not in ['medium', 'small', 'large']:
11
+ raise ValueError("Error! size must be one of ['medium', 'small', 'large']")
12
 
13
+ base_url = 'https://api.pexels.com/'
14
+ headers = {'Authorization': api_key}
15
+ url = f'{base_url}{endpoint}/search?query={keyword}&per_page={num_pages}&orientation={orientation}&size={size}'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
 
17
+ response = requests.get(url, headers=headers)
18
+ if response.status_code == 200:
19
+ data = response.json()
20
+ return data
21
+ else:
22
+ print(f'Error: {response.status_code}')
23
+ return None
24
 
25
  # Video download function
26
+ def download_video(video_data, parent_path, height, width, links, index):
27
+ for video in video_data['videos']:
28
+ if video['id'] in links:
29
  continue
30
 
31
+ for video_file in video['video_files']:
32
+ if video_file['height'] == height and video_file['width'] == width:
33
+ # Stream the video to avoid memory issues
34
+ video_url = video_file['link']
35
+ file_path = os.path.join(parent_path, f"{index}_{video['id']}.mp4")
36
+
37
+ with requests.get(video_url, stream=True) as r:
38
+ r.raise_for_status()
39
+ with open(file_path, 'wb') as f:
40
+ for chunk in r.iter_content(chunk_size=8192):
41
+ f.write(chunk)
42
+ print(f"Successfully saved video in {file_path}")
43
+ return video['id']
44
+ return None
45
 
46
+ # Generate videos without LLM dependency
47
+ def generate_videos(product, api_key, orientation, height, width):
48
  prod = product.strip().replace(" ", "_")
49
  links = []
 
 
 
 
 
 
 
 
50
 
51
+ try:
52
  # Create directory with the product's name
53
  if os.path.exists(prod):
54
  shutil.rmtree(prod)
55
  os.mkdir(prod)
56
 
57
+ # Define static sentences based on the product
58
+ sentences = [
59
+ f"Introducing {product}, the ultimate solution for your needs.",
60
+ f"Experience the power of {product} today.",
61
+ f"Why choose {product}? Discover its amazing features.",
62
+ f"Transform your life with {product}.",
63
+ f"Get ready to enjoy {product} like never before."
64
+ ]
65
+
66
  # Generate video for every sentence
67
+ print("Keywords:")
68
+ for i, sentence in enumerate(sentences):
69
+ keyword = extract_keywords(sentence) # Replace with a simple keyword extraction function
70
+ print(f"{i+1}: {keyword}")
 
 
 
71
 
72
+ data = search_pexels(keyword, api_key, orientation.lower())
73
+ if data and 'videos' in data:
74
+ link = download_video(data, prod, height, width, links, i)
75
+ if link:
76
+ links.append(link)
77
+
78
+ print("Success! Videos have been generated.")
79
+ except Exception as e:
80
+ print("Error! Failed generating videos.")
81
  print(e)
82
 
83
  return prod, sentences
84
+
85
+ # Simple keyword extraction function (replace this with a more advanced method if needed)
86
+ def extract_keywords(sentence):
87
+ # Extract nouns and key phrases using regex or simple heuristics
88
+ keywords = re.findall(r'\b\w+\b', sentence)
89
+ return " ".join(keywords[:3]) # Return the first 3 words as keywords