Update app.py
Browse files
app.py
CHANGED
@@ -54,10 +54,10 @@ def get_movie_info(movie_title):
|
|
54 |
|
55 |
def generate_response(prompt):
|
56 |
input_text_template = (
|
57 |
-
"
|
58 |
-
"
|
59 |
-
f"USER: {prompt} "
|
60 |
-
"
|
61 |
)
|
62 |
|
63 |
# Call the get_movie_info function to enrich the response
|
@@ -80,7 +80,7 @@ def generate_response(prompt):
|
|
80 |
|
81 |
generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
|
82 |
|
83 |
-
return f"Movie Info:\n{movie_info}\n\
|
84 |
|
85 |
# Define chat function for gr.ChatInterface
|
86 |
def chat_function(message, history):
|
@@ -90,4 +90,4 @@ def chat_function(message, history):
|
|
90 |
|
91 |
# Create Gradio Chat Interface
|
92 |
chat_interface = gr.ChatInterface(chat_function)
|
93 |
-
chat_interface.launch(share=True)
|
|
|
54 |
|
55 |
def generate_response(prompt):
|
56 |
input_text_template = (
|
57 |
+
"Hi! I am a gen AI bot powered by the Writer/palmyra-small model. "
|
58 |
+
"I am here to give helpful, detailed, and polite answers to your movie inquiries. "
|
59 |
+
f"USER: {prompt} (e.g., Oppenheimer, La La Land, Moonlight)"
|
60 |
+
"Writer AI:"
|
61 |
)
|
62 |
|
63 |
# Call the get_movie_info function to enrich the response
|
|
|
80 |
|
81 |
generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
|
82 |
|
83 |
+
return f"Movie Info:\n{movie_info}\n\n Writer AI Generated Response:\n{generated_text}\n"
|
84 |
|
85 |
# Define chat function for gr.ChatInterface
|
86 |
def chat_function(message, history):
|
|
|
90 |
|
91 |
# Create Gradio Chat Interface
|
92 |
chat_interface = gr.ChatInterface(chat_function)
|
93 |
+
chat_interface.launch(share=True, label='Try something like: Oppenheimer, La La Land, Moonlight')
|