GeminiLLM-QandA / app.py
hruday96's picture
Create app.py
61cca64 verified
import streamlit as st
import requests
import os
# -----------------------------------------------------
# Streamlit Page Configuration
# -----------------------------------------------------
st.set_page_config(page_title="Ollama API Tester", layout="wide")
st.title("πŸ§ͺ Ollama API Model Tester")
st.subheader("Test Ollama via API in Hugging Face Spaces.")
# -----------------------------------------------------
# API Key and URL (Replace with Your Ollama Server)
# -----------------------------------------------------
OLLAMA_API_URL = "https://your-ollama-server.com/api/chat" # Change this to your server
# -----------------------------------------------------
# User Input: Prompt
# -----------------------------------------------------
user_prompt = st.text_area("✍️ Enter your prompt:", "Explain quantum computing in simple terms.")
# -----------------------------------------------------
# Button to Generate Response
# -----------------------------------------------------
if st.button("πŸš€ Generate Response"):
with st.spinner("Fetching Ollama response..."):
try:
# Prepare API payload
payload = {
"model": "mistral", # Change model name if needed
"messages": [{"role": "user", "content": user_prompt}]
}
# Send request to Ollama API
response = requests.post(OLLAMA_API_URL, json=payload)
# Process Response
if response.status_code == 200:
response_text = response.json()["message"]["content"]
st.markdown("### πŸ€– Ollama API Response:")
st.write(f"πŸ“ {response_text}")
else:
st.error(f"❌ Ollama API Error: {response.json()}")
except Exception as e:
st.error(f"❌ Ollama Request Failed: {e}")