ctn8176 commited on
Commit
a7f4470
·
verified ·
1 Parent(s): 7b6b4a2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -41
app.py CHANGED
@@ -1,7 +1,7 @@
1
  import torch
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import gradio as gr
4
- import requests
5
 
6
  model_name = "Writer/palmyra-small"
7
  tokenizer = AutoTokenizer.from_pretrained(model_name)
@@ -9,47 +9,18 @@ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
9
  model = AutoModelForCausalLM.from_pretrained(model_name).to(device)
10
 
11
  def get_movie_info(movie_title):
12
- api_key = "20e959f0f28e6b3e3de49c50f358538a"
13
- search_url = f"https://api.themoviedb.org/3/search/movie"
14
-
15
- # Make a search query to TMDb
16
- params = {
17
- "api_key": api_key,
18
- "query": movie_title,
19
- "language": "en-US",
20
- "page": 1,
21
- }
22
-
23
- try:
24
- search_response = requests.get(search_url, params=params)
25
- search_data = search_response.json()
26
-
27
- # Check if any results are found
28
- if search_data.get("results"):
29
- movie_id = search_data["results"][0]["id"]
30
-
31
- # Fetch detailed information using the movie ID
32
- details_url = f"https://api.themoviedb.org/3/movie/{movie_id}"
33
- details_params = {
34
- "api_key": api_key,
35
- "language": "en-US",
36
- }
37
 
38
- details_response = requests.get(details_url, params=details_params)
39
- details_data = details_response.json()
40
 
41
- # Extract relevant information
42
- title = details_data.get("title", "Unknown Title")
43
- year = details_data.get("release_date", "Unknown Year")[:4]
44
- genre = ", ".join(genre["name"] for genre in details_data.get("genres", []))
45
-
46
- return f"Title: {title}, Year: {year}, Genre: {genre}"
47
-
48
- else:
49
- return "Movie not found"
50
-
51
- except Exception as e:
52
- return f"Error: {e}"
53
 
54
  def generate_response(prompt):
55
  input_text_template = (
@@ -59,7 +30,7 @@ def generate_response(prompt):
59
  "ASSISTANT:"
60
  )
61
 
62
- # Call the get_movie_info function to enrich the response
63
  movie_info = get_movie_info(prompt)
64
 
65
  # Concatenate the movie info with the input template
@@ -83,3 +54,4 @@ def generate_response(prompt):
83
  iface = gr.Interface(fn=generate_response, inputs="text", outputs="text", live=True)
84
  iface.launch()
85
 
 
 
1
  import torch
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import gradio as gr
4
+ from datasets import load_dataset
5
 
6
  model_name = "Writer/palmyra-small"
7
  tokenizer = AutoTokenizer.from_pretrained(model_name)
 
9
  model = AutoModelForCausalLM.from_pretrained(model_name).to(device)
10
 
11
  def get_movie_info(movie_title):
12
+ # Load the IMDb dataset
13
+ imdb = load_dataset("imdb")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
+ # Search for the movie in the IMDb dataset
16
+ results = imdb['title'].filter(lambda x: movie_title.lower() in x.lower())
17
 
18
+ # Check if any results are found
19
+ if len(results) > 0:
20
+ movie = results[0]
21
+ return f"Title: {movie['title']}, Year: {movie['year']}, Genre: {', '.join(movie['genre'])}"
22
+ else:
23
+ return "Movie not found"
 
 
 
 
 
 
24
 
25
  def generate_response(prompt):
26
  input_text_template = (
 
30
  "ASSISTANT:"
31
  )
32
 
33
+ # Call the get_movie_info function
34
  movie_info = get_movie_info(prompt)
35
 
36
  # Concatenate the movie info with the input template
 
54
  iface = gr.Interface(fn=generate_response, inputs="text", outputs="text", live=True)
55
  iface.launch()
56
 
57
+