Z3ktrix's picture
Update app.py
dfa5682 verified
raw
history blame
1.64 kB
#import gradio as gr
#gr.load("models/mistralai/Mistral-7B-Instruct-v0.3").launch()
import os
import gradio as gr
import requests
from dotenv import load_dotenv
import gradio_client as grc
# Load the environment variables from the .env file
load_dotenv()
API_URL = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.3"
headers = {"Authorization": f"Bearer {os.getenv('HFREAD')}"}
def query(payload):
response = requests.post(API_URL, headers=headers, json=payload)
return response.json()
def chatbot_response(input_text):
response = query({"inputs": input_text})
# Extract the generated text from the response
if isinstance(response, dict) and 'generated_text' in response:
return response['generated_text']
elif isinstance(response, list) and len(response) > 0 and 'generated_text' in response[0]:
return response[0]['generated_text']
return 'No response generated.'
# Gradio interface
def main():
with gr.Blocks() as demo:
gr.Markdown("Chatty")
with gr.Row():
input_box = gr.Textbox(label="Input Text", placeholder="Type your question here...", lines=2)
with gr.Row():
output_box = gr.Textbox(label="Response", placeholder="The response will appear here...", lines=5)
submit_button = gr.Button("Submit")
submit_button.click(fn=chatbot_response, inputs=input_box, outputs=output_box)
grc.Client("Z3ktrix/mistralai-Mistral-7B-Instruct-v0.3").deploy_discord(to_id="1247789500445233275")
# demo.launch()
#if __name__ == "__main__":
# main()