Spaces:
Building
Building
Commit
·
4b35062
1
Parent(s):
4ffa89d
✨ Added main router and integrated ollama & codelama endpoints
Browse files- .gitignore +11 -0
- api/endpoints/codelama.py +7 -13
- api/endpoints/ollama.py +6 -26
- api/main.py +8 -0
- app/app.py +4 -4
- start.sh +23 -0
.gitignore
CHANGED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
__pycache__/
|
2 |
+
*.pyc
|
3 |
+
*.pyo
|
4 |
+
*.pyd
|
5 |
+
*.db
|
6 |
+
*.sqlite
|
7 |
+
.env
|
8 |
+
*.env
|
9 |
+
*.log
|
10 |
+
venv/
|
11 |
+
qvenv/
|
api/endpoints/codelama.py
CHANGED
@@ -1,16 +1,10 @@
|
|
1 |
-
|
2 |
-
import subprocess # Import subprocess module to run system commands
|
3 |
-
import ollama
|
4 |
|
5 |
-
|
6 |
|
7 |
-
|
8 |
-
def read_root():
|
9 |
-
return {"message": "Welcome to the Quantum-API/codelama"}
|
10 |
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
result = subprocess.run(["ollama", "run", "codellama:latest"], capture_output=True, text=True)
|
16 |
-
return {"result": result.stdout}
|
|
|
1 |
+
# api/endpoints/codelama.py
|
|
|
|
|
2 |
|
3 |
+
from fastapi import APIRouter, HTTPException
|
4 |
|
5 |
+
router = APIRouter()
|
|
|
|
|
6 |
|
7 |
+
@router.post("/")
|
8 |
+
async def run_codelama(payload: dict):
|
9 |
+
# Placeholder logic
|
10 |
+
return {"status": "success", "received_input": payload}
|
|
|
|
api/endpoints/ollama.py
CHANGED
@@ -1,29 +1,9 @@
|
|
1 |
-
from fastapi import
|
2 |
-
from pydantic import BaseModel
|
3 |
-
import ollama # Make sure Ollama is installed and available
|
4 |
|
5 |
-
app = FastAPI()
|
6 |
-
|
7 |
-
# Define request model for the input data
|
8 |
-
class UserInput(BaseModel):
|
9 |
-
question: str
|
10 |
-
|
11 |
-
# Function to generate responses using Ollama
|
12 |
-
def get_ollama_response(user_input: str) -> str:
|
13 |
-
try:
|
14 |
-
# Run Ollama model for generating a response
|
15 |
-
response = ollama.chat(model="llama", messages=[{"role": "user", "content": user_input}])
|
16 |
-
return response['text'] # Ensure you're extracting the response text from Ollama's response
|
17 |
-
except Exception as e:
|
18 |
-
return f"Error processing request: {str(e)}"
|
19 |
-
|
20 |
-
# Create an API router
|
21 |
router = APIRouter()
|
22 |
|
23 |
-
@router.post("/
|
24 |
-
async def ollama_response(
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
# Include router into the FastAPI app
|
29 |
-
app.include_router(router)
|
|
|
1 |
+
from fastapi import APIRouter, Request
|
|
|
|
|
2 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
router = APIRouter()
|
4 |
|
5 |
+
@router.post("/")
|
6 |
+
async def ollama_response(request: Request):
|
7 |
+
data = await request.json()
|
8 |
+
question = data.get("question", "No question provided.")
|
9 |
+
return {"response": f"Olama received: {question}"}
|
|
|
|
api/main.py
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import FastAPI
|
2 |
+
from api.endpoints import codelama, ollama
|
3 |
+
|
4 |
+
app = FastAPI()
|
5 |
+
|
6 |
+
# Register routers if you have them
|
7 |
+
app.include_router(codelama.router, prefix="/run-codelama")
|
8 |
+
app.include_router(ollama.router, prefix="/ollama-response")
|
app/app.py
CHANGED
@@ -2,8 +2,8 @@ import streamlit as st
|
|
2 |
import requests
|
3 |
|
4 |
# URL of the FastAPI backend endpoint
|
5 |
-
API_URL_OLAMA = "http://localhost:7860/ollama
|
6 |
-
API_URL_CODELAMA = "http://localhost:7860/run
|
7 |
|
8 |
def main():
|
9 |
st.title("Quantum-API Chat Interface with Olama and CodeLlama")
|
@@ -18,7 +18,7 @@ def main():
|
|
18 |
# Display the response from Olama
|
19 |
st.write(f"Olama says: {response.json()['response']}")
|
20 |
else:
|
21 |
-
st.error("Error contacting Olama API.")
|
22 |
|
23 |
if st.button("Run Code with CodeLlama"):
|
24 |
# Make a GET request to the FastAPI server for CodeLlama
|
@@ -27,7 +27,7 @@ def main():
|
|
27 |
# Display the response from CodeLlama
|
28 |
st.write(f"CodeLlama result: {response.json()['result']}")
|
29 |
else:
|
30 |
-
st.error("Error contacting CodeLlama API.")
|
31 |
|
32 |
if __name__ == "__main__":
|
33 |
main()
|
|
|
2 |
import requests
|
3 |
|
4 |
# URL of the FastAPI backend endpoint
|
5 |
+
API_URL_OLAMA = "http://localhost:7860/ollama/response"
|
6 |
+
API_URL_CODELAMA = "http://localhost:7860/codelama/run"
|
7 |
|
8 |
def main():
|
9 |
st.title("Quantum-API Chat Interface with Olama and CodeLlama")
|
|
|
18 |
# Display the response from Olama
|
19 |
st.write(f"Olama says: {response.json()['response']}")
|
20 |
else:
|
21 |
+
st.error(f"Error contacting Olama API: {response.status_code}")
|
22 |
|
23 |
if st.button("Run Code with CodeLlama"):
|
24 |
# Make a GET request to the FastAPI server for CodeLlama
|
|
|
27 |
# Display the response from CodeLlama
|
28 |
st.write(f"CodeLlama result: {response.json()['result']}")
|
29 |
else:
|
30 |
+
st.error(f"Error contacting CodeLlama API: {response.status_code}")
|
31 |
|
32 |
if __name__ == "__main__":
|
33 |
main()
|
start.sh
CHANGED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
echo "🚀 Activating Quantum-API Startup"
|
4 |
+
|
5 |
+
# Start FastAPI (api.main:app) on port 7860
|
6 |
+
echo "🌀 Starting FastAPI on port 7860..."
|
7 |
+
uvicorn api.main:app --host 0.0.0.0 --port 7860 &> fastapi.log &
|
8 |
+
|
9 |
+
# Wait 3 seconds
|
10 |
+
sleep 3
|
11 |
+
|
12 |
+
# Show FastAPI logs if it failed
|
13 |
+
if ! pgrep -f "uvicorn api.main:app" > /dev/null; then
|
14 |
+
echo "❌ FastAPI failed to start. Logs:"
|
15 |
+
cat fastapi.log
|
16 |
+
exit 1
|
17 |
+
else
|
18 |
+
echo "✅ FastAPI is running."
|
19 |
+
fi
|
20 |
+
|
21 |
+
# Start Streamlit app on port 8000
|
22 |
+
echo "✨ Launching Streamlit on port 8000..."
|
23 |
+
streamlit run app/app.py --server.port 8000
|