Guhanselvam commited on
Commit
3336ef9
·
verified ·
1 Parent(s): 1a1b05e

Update frontend.py

Browse files
Files changed (1) hide show
  1. frontend.py +5 -6
frontend.py CHANGED
@@ -2,14 +2,13 @@
2
  import requests
3
  import gradio as gr
4
 
5
- # Define the API URL
6
- API_URL = "http://localhost:8000/chat/" # URL for your FastAPI backend
7
 
8
  def get_response(user_message):
9
  try:
10
  response = requests.post(API_URL, json={"message": user_message})
11
- response_data = response.json()
12
- return response_data.get("response", "Error: Unable to get response.")
13
  except Exception as e:
14
  return f"Error: {str(e)}"
15
 
@@ -18,8 +17,8 @@ iface = gr.Interface(
18
  fn=get_response, # Function to call for generating responses
19
  inputs=gr.Textbox(label="Your Question", placeholder="Enter your message here..."),
20
  outputs=gr.Textbox(label="Chatbot Response"),
21
- title="Chatbot with Hugging Face Transformers",
22
- description="Chat with an AI assistant that uses Hugging Face Transformers to generate responses."
23
  )
24
 
25
  # Launch the Gradio interface
 
2
  import requests
3
  import gradio as gr
4
 
5
+ # Define the API URL for FastAPI
6
+ API_URL = "http://localhost:8000/chat/" # Ensure this matches your FastAPI server
7
 
8
  def get_response(user_message):
9
  try:
10
  response = requests.post(API_URL, json={"message": user_message})
11
+ return response.json().get("response", "Error: Unable to get response.")
 
12
  except Exception as e:
13
  return f"Error: {str(e)}"
14
 
 
17
  fn=get_response, # Function to call for generating responses
18
  inputs=gr.Textbox(label="Your Question", placeholder="Enter your message here..."),
19
  outputs=gr.Textbox(label="Chatbot Response"),
20
+ title="Chatbot with LLaMA API",
21
+ description="Chat with an AI assistant that uses the LLaMA model to generate responses."
22
  )
23
 
24
  # Launch the Gradio interface