import streamlit as st import requests import os # Load API Key from Hugging Face Secrets HF_API_KEY = os.getenv("KEY") # API URL HF_MISTRAL_URL = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.3" # Function to call Hugging Face API def chat_with_mistral_hf(prompt): if not HF_API_KEY: return "Error: API key not found. Please set it in Hugging Face Secrets." headers = {"Authorization": f"Bearer {HF_API_KEY}"} payload = {"inputs": prompt, "parameters": {"max_length": 200, "temperature": 0.7}} response = requests.post(HF_MISTRAL_URL, json=payload, headers=headers) if response.status_code == 200: return response.json()[0]["generated_text"] else: return f"Error: {response.json()}" # Streamlit UI st.set_page_config(page_title="ChatGSKD", layout="wide") # Sidebar: Chat title rename option st.sidebar.header("Chat Settings") chat_title = st.sidebar.text_input("Rename Chat:", "Temporary Chat") # Initialize chat history if not present if "chat_history" not in st.session_state: st.session_state.chat_history = [] # Display Chat Title st.title(chat_title) # Show chat history for chat in st.session_state.chat_history: with st.chat_message(chat["role"]): st.write(chat["content"]) # Input box at the bottom user_input = st.text_area("Ask AI:", height=100, key="query", label_visibility="collapsed") # Generate response when user submits a prompt if st.button("Generate Response"): if user_input.strip(): with st.spinner("Generating response..."): response = chat_with_mistral_hf(user_input) # Store user query & response in chat history st.session_state.chat_history.append({"role": "user", "content": user_input}) st.session_state.chat_history.append({"role": "assistant", "content": response}) # Refresh the page to show new messages st.rerun() else: st.warning("Please enter a prompt before clicking Generate Response.")