Spaces:
Sleeping
Sleeping
import streamlit as st | |
import requests | |
import os | |
# ----------------------------------------------------- | |
# Streamlit Page Configuration | |
# ----------------------------------------------------- | |
st.set_page_config(page_title="Ollama API Tester", layout="wide") | |
st.title("π§ͺ Ollama API Model Tester") | |
st.subheader("Test Ollama via API in Hugging Face Spaces.") | |
# ----------------------------------------------------- | |
# API Key and URL (Replace with Your Ollama Server) | |
# ----------------------------------------------------- | |
OLLAMA_API_URL = "https://your-ollama-server.com/api/chat" # Change this to your server | |
# ----------------------------------------------------- | |
# User Input: Prompt | |
# ----------------------------------------------------- | |
user_prompt = st.text_area("βοΈ Enter your prompt:", "Explain quantum computing in simple terms.") | |
# ----------------------------------------------------- | |
# Button to Generate Response | |
# ----------------------------------------------------- | |
if st.button("π Generate Response"): | |
with st.spinner("Fetching Ollama response..."): | |
try: | |
# Prepare API payload | |
payload = { | |
"model": "mistral", # Change model name if needed | |
"messages": [{"role": "user", "content": user_prompt}] | |
} | |
# Send request to Ollama API | |
response = requests.post(OLLAMA_API_URL, json=payload) | |
# Process Response | |
if response.status_code == 200: | |
response_text = response.json()["message"]["content"] | |
st.markdown("### π€ Ollama API Response:") | |
st.write(f"π {response_text}") | |
else: | |
st.error(f"β Ollama API Error: {response.json()}") | |
except Exception as e: | |
st.error(f"β Ollama Request Failed: {e}") | |