Docfile commited on
Commit
130bd89
·
verified ·
1 Parent(s): 6b13538

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -2
app.py CHANGED
@@ -6,6 +6,25 @@ import os
6
  token=os.environ.get("TOKEN")
7
  genai.configure(api_key=token)
8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  def describe_video(pp,video_file):
10
  try:
11
 
@@ -27,8 +46,8 @@ def describe_video(pp,video_file):
27
  prompt = pp
28
 
29
  # Set the model to Gemini 1.5 Pro.
30
- model = genai.GenerativeModel(model_name="models/gemini-1.5-flash-latest")
31
-
32
  # Make the LLM request.
33
  print("Making LLM inference request...")
34
  response = model.generate_content(
 
6
  token=os.environ.get("TOKEN")
7
  genai.configure(api_key=token)
8
 
9
+ safety_settings = [
10
+ {
11
+ "category": "HARM_CATEGORY_HARASSMENT",
12
+ "threshold": "BLOCK_NONE",
13
+ },
14
+ {
15
+ "category": "HARM_CATEGORY_HATE_SPEECH",
16
+ "threshold": "BLOCK_NONE",
17
+ },
18
+ {
19
+ "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
20
+ "threshold": "BLOCK_NONE",
21
+ },
22
+ {
23
+ "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
24
+ "threshold": "BLOCK_NONE",
25
+ },
26
+ ]
27
+
28
  def describe_video(pp,video_file):
29
  try:
30
 
 
46
  prompt = pp
47
 
48
  # Set the model to Gemini 1.5 Pro.
49
+ model = genai.GenerativeModel(model_name="models/gemini-1.5-pro-latest",safety_settings=safety_settings,
50
+ generation_config=generation_config,)
51
  # Make the LLM request.
52
  print("Making LLM inference request...")
53
  response = model.generate_content(