ctn8176 commited on
Commit
2aece5c
·
verified ·
1 Parent(s): 1592e82

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -39
app.py CHANGED
@@ -43,28 +43,14 @@ def get_movie_info(movie_title):
43
  year = details_data.get("release_date", "Unknown Year")[:4]
44
  genre = ", ".join(genre["name"] for genre in details_data.get("genres", []))
45
  tmdb_link = f"https://www.themoviedb.org/movie/{movie_id}"
46
- # poster_path = details_data.get("poster_path")
47
-
48
- # Convert poster_path to a complete image URL
49
- # image_url = f"https://image.tmdb.org/t/p/w500{poster_path}" if poster_path else ""
50
-
51
- return {
52
- "title": title,
53
- "year": year,
54
- "genre": genre,
55
- "tmdb_link": tmdb_link,
56
- }
57
 
58
  else:
59
- return {"error": "Movie not found"}
60
 
61
  except Exception as e:
62
- return {"error": f"Error: {e}"}
63
-
64
- def process_image(movie_info):
65
- # Process the image, return image_url
66
- # For now, let's just return a placeholder
67
- return "https://via.placeholder.com/150"
68
 
69
  def generate_response(prompt):
70
  input_text_template = (
@@ -77,23 +63,14 @@ def generate_response(prompt):
77
  # Call the get_movie_info function to enrich the response
78
  movie_info = get_movie_info(prompt)
79
 
80
- if "error" in movie_info:
81
- return f"Error: {movie_info['error']}", None
82
-
83
- # Process the image separately
84
- image_url = process_image(movie_info)
85
-
86
  # Concatenate the movie info with the input template
87
- input_text_template += (
88
- f" Movie Info: Title: {movie_info['title']}, Year: {movie_info['year']}, "
89
- f"Genre: {movie_info['genre']}\nFind more info here: {movie_info['tmdb_link']}"
90
- )
91
 
92
  model_inputs = tokenizer(input_text_template, return_tensors="pt").to(device)
93
 
94
  gen_conf = {
95
  "top_k": 20,
96
- "max_length": 20,
97
  "temperature": 0.6,
98
  "do_sample": True,
99
  "eos_token_id": tokenizer.eos_token_id,
@@ -103,19 +80,14 @@ def generate_response(prompt):
103
 
104
  generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
105
 
106
- return (
107
- f"Movie Info: {movie_info['title']}, {movie_info['year']}, {movie_info['genre']}\n\n" \
108
- f"Generated Response: {generated_text[:100]}", # Truncate to 100 characters
109
- image_url
110
- )
111
 
112
  # Define chat function for gr.ChatInterface
113
  def chat_function(message, history):
114
- response_text, response_image = generate_response(message)
115
- history.append([message, response_text])
116
- history.append([message, response_image]) # Separate history for image
117
- return response_text, response_image
118
 
119
  # Create Gradio Chat Interface
120
  chat_interface = gr.ChatInterface(chat_function)
121
- chat_interface.launch(share=True)
 
43
  year = details_data.get("release_date", "Unknown Year")[:4]
44
  genre = ", ".join(genre["name"] for genre in details_data.get("genres", []))
45
  tmdb_link = f"https://www.themoviedb.org/movie/{movie_id}"
46
+
47
+ return f"Title: {title}, Year: {year}, Genre: {genre}\nFind more info here: {tmdb_link}"
 
 
 
 
 
 
 
 
 
48
 
49
  else:
50
+ return "Movie not found", ""
51
 
52
  except Exception as e:
53
+ return f"Error: {e}", ""
 
 
 
 
 
54
 
55
  def generate_response(prompt):
56
  input_text_template = (
 
63
  # Call the get_movie_info function to enrich the response
64
  movie_info = get_movie_info(prompt)
65
 
 
 
 
 
 
 
66
  # Concatenate the movie info with the input template
67
+ input_text_template += f" Movie Info: {movie_info}"
 
 
 
68
 
69
  model_inputs = tokenizer(input_text_template, return_tensors="pt").to(device)
70
 
71
  gen_conf = {
72
  "top_k": 20,
73
+ "max_length": 20, # shortened to limit writer predictions; model loops and and prediction not coherent
74
  "temperature": 0.6,
75
  "do_sample": True,
76
  "eos_token_id": tokenizer.eos_token_id,
 
80
 
81
  generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
82
 
83
+ return f"Movie Info:\n{movie_info}\n\nGenerated Response:\n{generated_text}\n"
 
 
 
 
84
 
85
  # Define chat function for gr.ChatInterface
86
  def chat_function(message, history):
87
+ response = generate_response(message)
88
+ history.append([message, response])
89
+ return response
 
90
 
91
  # Create Gradio Chat Interface
92
  chat_interface = gr.ChatInterface(chat_function)
93
+ chat_interface.launch(share=True)