Spaces:
Sleeping
Sleeping
File size: 1,732 Bytes
0e71eea 9d3800d 0e71eea dc79f72 261fc9b 0e71eea 261fc9b 0e71eea 9d3800d dc79f72 2e6df5a dc79f72 0e71eea dc79f72 9d3800d 9b01440 0e71eea 9d3800d 9b01440 9d3800d 9b01440 9d3800d 9b01440 9d3800d 0e71eea 9b01440 9d3800d 9b01440 2e6df5a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 |
import os
import streamlit as st
from dotenv import load_dotenv # Importing load_dotenv to load environment variables
from langchain import HuggingFaceHub
# Load environment variables from the .env file
load_dotenv()
# Set your Hugging Face API token from the environment variable
HUGGINGFACE_API_TOKEN = os.getenv("HUGGINGFACE_API_TOKEN")
# Function to return the response from the Hugging Face model
def load_answer(question):
try:
# Initialize the Hugging Face model using LangChain's HuggingFaceHub class
llm = HuggingFaceHub(
repo_id="mistralai/Mistral-7B-Instruct-v0.3", # Hugging Face model repo
huggingfacehub_api_token=HUGGINGFACE_API_TOKEN, # Pass your API token
model_kwargs={"temperature": 0.1} # Set a strictly positive temperature
)
# Call the model with the user's question and get the response using .predict()
answer = llm.predict(question)
return answer
except Exception as e:
# Capture and return any exceptions or errors
return f"Error: {str(e)}"
# Streamlit App UI starts here
st.set_page_config(page_title="Hugging Face Demo", page_icon=":robot:")
st.header("Hugging Face Demo")
# Function to get user input
def get_text():
input_text = st.text_input("You: ", key="input")
return input_text
# Get user input
user_input = get_text()
# Create a button for generating the response
submit = st.button('Generate')
# If the generate button is clicked and user input is not empty
if submit and user_input:
response = load_answer(user_input)
st.subheader("Answer:")
st.write(response)
elif submit:
st.warning("Please enter a question.") # Warning for empty input
|