Spaces:
Sleeping
Sleeping
File size: 1,267 Bytes
33a9a5f 0b6b23c 33a9a5f 33879c9 33a9a5f 33879c9 33a9a5f 33879c9 33a9a5f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 |
import gradio as gr
import requests
# Define the FastAPI API URL
API_URL = "http://0.0.0.0:8000/llm_api" # Ensure this matches your FastAPI server URL
def query_api(question):
"""
Function to send a question to the FastAPI backend and return the response.
"""
try:
# Send the question to the API
response = requests.post(API_URL, json={"question": question})
# Check response status and return the appropriate response
if response.status_code == 200:
return response.json().get("response", "No response from the model.")
else:
return f"Error: Unable to fetch response. Status Code: {response.status_code}"
except requests.exceptions.RequestException as e:
return f"Request failed: {e}"
# Create the Gradio interface
iface = gr.Interface(
fn=query_api,
inputs=gr.Textbox(label="Ask a Question"),
outputs="text",
title="Chatbot Interface",
description="Ask any question and get responses from the LLaMA model."
)
# Launch the Gradio interface
if __name__ == "__main__":
try:
iface.launch()
except KeyboardInterrupt:
print("Gradio interface stopped manually.")
except Exception as e:
print(f"An error occurred: {e}") |