Spaces:
Sleeping
Sleeping
File size: 1,849 Bytes
61cca64 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 |
import streamlit as st
import requests
import os
# -----------------------------------------------------
# Streamlit Page Configuration
# -----------------------------------------------------
st.set_page_config(page_title="Ollama API Tester", layout="wide")
st.title("π§ͺ Ollama API Model Tester")
st.subheader("Test Ollama via API in Hugging Face Spaces.")
# -----------------------------------------------------
# API Key and URL (Replace with Your Ollama Server)
# -----------------------------------------------------
OLLAMA_API_URL = "https://your-ollama-server.com/api/chat" # Change this to your server
# -----------------------------------------------------
# User Input: Prompt
# -----------------------------------------------------
user_prompt = st.text_area("βοΈ Enter your prompt:", "Explain quantum computing in simple terms.")
# -----------------------------------------------------
# Button to Generate Response
# -----------------------------------------------------
if st.button("π Generate Response"):
with st.spinner("Fetching Ollama response..."):
try:
# Prepare API payload
payload = {
"model": "mistral", # Change model name if needed
"messages": [{"role": "user", "content": user_prompt}]
}
# Send request to Ollama API
response = requests.post(OLLAMA_API_URL, json=payload)
# Process Response
if response.status_code == 200:
response_text = response.json()["message"]["content"]
st.markdown("### π€ Ollama API Response:")
st.write(f"π {response_text}")
else:
st.error(f"β Ollama API Error: {response.json()}")
except Exception as e:
st.error(f"β Ollama Request Failed: {e}")
|