π Add AutoExec AI full application files for Hugging Face Space
Browse files- .streamlit/secrets.toml +2 -0
- Dockerfile +12 -0
- README.md +34 -10
- agents/strategy_agent.py +2 -0
- app.py +58 -0
- app/__init__.py +0 -0
- app/main.py +5 -0
- app/routes/__init__.py +0 -0
- app/routes/loopagent.py +21 -0
- celery_worker.py +11 -0
- dashboard.py +12 -0
- langgraph_config.json +54 -0
- memory/database.py +26 -0
- requirements.txt +8 -0
.streamlit/secrets.toml
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
GEMINI_API_KEY = "your-gemini-api-key"
|
2 |
+
OPENAI_API_KEY = "your-openai-api-key"
|
Dockerfile
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM python:3.10
|
2 |
+
|
3 |
+
WORKDIR /code
|
4 |
+
|
5 |
+
COPY requirements.txt .
|
6 |
+
RUN apt-get update && apt-get install -y redis-server && pip install --no-cache-dir -r requirements.txt
|
7 |
+
|
8 |
+
COPY . .
|
9 |
+
|
10 |
+
EXPOSE 7860
|
11 |
+
|
12 |
+
CMD service redis-server start && uvicorn app.main:app --host 0.0.0.0 --port 7860 & celery -A celery_worker worker --loglevel=info & streamlit run app.py --server.port 7861
|
README.md
CHANGED
@@ -1,10 +1,34 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# π AutoExec AI: Full Stack AI Business Launcher (Streamlit + FastAPI + Celery + LangGraph)
|
2 |
+
|
3 |
+
## Overview
|
4 |
+
|
5 |
+
This Hugging Face Docker Space contains a unified AI-powered platform to generate and optimize digital businesses autonomously using LLMs.
|
6 |
+
|
7 |
+
### What's Inside
|
8 |
+
- Streamlit UI
|
9 |
+
- FastAPI backend
|
10 |
+
- Celery worker (auto-task scheduling)
|
11 |
+
- Redis server
|
12 |
+
- SQLite memory DB
|
13 |
+
- LangGraph JSON config
|
14 |
+
- Dashboard for agent logs
|
15 |
+
|
16 |
+
## Running Locally
|
17 |
+
|
18 |
+
```bash
|
19 |
+
uvicorn app.main:app --reload
|
20 |
+
streamlit run dashboard.py
|
21 |
+
celery -A celery_worker worker --loglevel=info
|
22 |
+
```
|
23 |
+
|
24 |
+
## Hugging Face Space
|
25 |
+
|
26 |
+
Upload the full repo to a **Docker Space** and Hugging Face will build and run it for you.
|
27 |
+
|
28 |
+
Make sure to set secrets for:
|
29 |
+
```
|
30 |
+
GEMINI_API_KEY
|
31 |
+
OPENAI_API_KEY
|
32 |
+
```
|
33 |
+
|
34 |
+
Streamlit runs on port 7861. FastAPI on port 7860.
|
agents/strategy_agent.py
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
def run_strategy(niche, business_type):
|
2 |
+
return f"LangGraph: StrategyAgent generating ideas for {niche} in {business_type} niche."
|
app.py
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import requests
|
3 |
+
import time
|
4 |
+
|
5 |
+
# Gemini + GPT-4 fallback support
|
6 |
+
GEMINI_API_KEY = st.secrets.get("GEMINI_API_KEY", "your-gemini-api-key")
|
7 |
+
OPENAI_API_KEY = st.secrets.get("OPENAI_API_KEY", "your-openai-api-key")
|
8 |
+
|
9 |
+
GEMINI_ENDPOINT = "https://generativelanguage.googleapis.com/v1/models/gemini-1.5-pro:generateContent"
|
10 |
+
|
11 |
+
import openai
|
12 |
+
openai.api_key = OPENAI_API_KEY
|
13 |
+
|
14 |
+
def call_openai(prompt):
|
15 |
+
try:
|
16 |
+
response = openai.ChatCompletion.create(
|
17 |
+
model="gpt-4",
|
18 |
+
messages=[{"role": "user", "content": prompt}],
|
19 |
+
temperature=0.7
|
20 |
+
)
|
21 |
+
return response['choices'][0]['message']['content']
|
22 |
+
except Exception as e:
|
23 |
+
return f"OpenAI Error: {str(e)}"
|
24 |
+
|
25 |
+
def call_gemini(prompt):
|
26 |
+
headers = {"Content-Type": "application/json"}
|
27 |
+
params = {"key": GEMINI_API_KEY}
|
28 |
+
payload = {"contents": [{"parts": [{"text": prompt}]}]}
|
29 |
+
response = requests.post(GEMINI_ENDPOINT, headers=headers, params=params, json=payload)
|
30 |
+
if response.status_code == 200:
|
31 |
+
return response.json()['candidates'][0]['content']['parts'][0]['text']
|
32 |
+
else:
|
33 |
+
return call_openai(prompt)
|
34 |
+
|
35 |
+
st.set_page_config(page_title="AutoExec AI", layout="wide")
|
36 |
+
st.title("π AutoExec AI: Autonomous AI Business Launcher")
|
37 |
+
|
38 |
+
st.sidebar.header("Business Setup")
|
39 |
+
niche = st.sidebar.text_input("Niche (e.g. fitness, pets)")
|
40 |
+
ad_budget = st.sidebar.slider("Ad Budget per Day", 5, 100, 10)
|
41 |
+
business_type = st.sidebar.selectbox("Business Type", ["Dropshipping", "Print-on-Demand", "Newsletter", "Course"])
|
42 |
+
platforms = st.sidebar.multiselect("E-Commerce Platforms", ["Shopify", "Gumroad", "WooCommerce"])
|
43 |
+
run = st.sidebar.button("Launch")
|
44 |
+
|
45 |
+
if run:
|
46 |
+
strategy = call_gemini(f"Give me a viral business idea in the {niche} niche using {business_type}")
|
47 |
+
st.subheader("π Strategy")
|
48 |
+
st.markdown(strategy)
|
49 |
+
|
50 |
+
copy = call_gemini(f"Write a product description and landing page for a {niche} {business_type}")
|
51 |
+
st.subheader("π Copy")
|
52 |
+
st.markdown(copy)
|
53 |
+
|
54 |
+
ads = call_gemini(f"Create a $ {ad_budget}/day ad campaign for a {niche} product.")
|
55 |
+
st.subheader("πΈ Ad Campaign")
|
56 |
+
st.markdown(ads)
|
57 |
+
|
58 |
+
st.success("Business Ready to Deploy")
|
app/__init__.py
ADDED
File without changes
|
app/main.py
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import FastAPI
|
2 |
+
from app.routes.loopagent import router as loop_router
|
3 |
+
|
4 |
+
app = FastAPI(title="AutoExec AI Backend")
|
5 |
+
app.include_router(loop_router, prefix="/loopagent", tags=["LoopAgent"])
|
app/routes/__init__.py
ADDED
File without changes
|
app/routes/loopagent.py
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import APIRouter
|
2 |
+
from datetime import datetime
|
3 |
+
from agents.strategy_agent import run_strategy
|
4 |
+
from memory.database import init_db, log_action
|
5 |
+
|
6 |
+
router = APIRouter()
|
7 |
+
|
8 |
+
@router.on_event("startup")
|
9 |
+
def startup_event():
|
10 |
+
init_db()
|
11 |
+
|
12 |
+
@router.get("/run")
|
13 |
+
def run_loopagent():
|
14 |
+
result = run_strategy("fitness", "dropshipping")
|
15 |
+
log_action("StrategyAgent", "generate_business", result)
|
16 |
+
return {
|
17 |
+
"status": "Executed",
|
18 |
+
"agent": "StrategyAgent",
|
19 |
+
"result": result,
|
20 |
+
"timestamp": datetime.utcnow().isoformat()
|
21 |
+
}
|
celery_worker.py
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from celery import Celery
|
2 |
+
from agents.strategy_agent import run_strategy
|
3 |
+
from memory.database import log_action
|
4 |
+
|
5 |
+
app = Celery("autoexec_ai", broker="redis://localhost:6379/0")
|
6 |
+
|
7 |
+
@app.task
|
8 |
+
def scheduled_loop():
|
9 |
+
result = run_strategy("fitness", "dropshipping")
|
10 |
+
log_action("StrategyAgent", "scheduled_run", result)
|
11 |
+
return result
|
dashboard.py
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import sqlite3
|
3 |
+
import pandas as pd
|
4 |
+
|
5 |
+
st.title("π§ Agent Memory Log Dashboard")
|
6 |
+
|
7 |
+
conn = sqlite3.connect("memory.db")
|
8 |
+
query = "SELECT * FROM agent_logs ORDER BY timestamp DESC"
|
9 |
+
df = pd.read_sql(query, conn)
|
10 |
+
conn.close()
|
11 |
+
|
12 |
+
st.dataframe(df)
|
langgraph_config.json
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"nodes": {
|
3 |
+
"StrategyAgent": {
|
4 |
+
"type": "llm-tool",
|
5 |
+
"tool": "agents.strategy_agent.run_strategy",
|
6 |
+
"inputs": [
|
7 |
+
"niche",
|
8 |
+
"business_type"
|
9 |
+
]
|
10 |
+
},
|
11 |
+
"CopyAgent": {
|
12 |
+
"type": "llm-tool",
|
13 |
+
"tool": "agents.copy_agent.generate_copy",
|
14 |
+
"inputs": [
|
15 |
+
"StrategyAgent"
|
16 |
+
]
|
17 |
+
},
|
18 |
+
"AdAgent": {
|
19 |
+
"type": "llm-tool",
|
20 |
+
"tool": "agents.ad_agent.plan_ads",
|
21 |
+
"inputs": [
|
22 |
+
"StrategyAgent"
|
23 |
+
]
|
24 |
+
},
|
25 |
+
"LoopAgent": {
|
26 |
+
"type": "rule-engine",
|
27 |
+
"tool": "agents.loop_agent.optimize",
|
28 |
+
"inputs": [
|
29 |
+
"AdAgent",
|
30 |
+
"CopyAgent"
|
31 |
+
]
|
32 |
+
}
|
33 |
+
},
|
34 |
+
"edges": [
|
35 |
+
[
|
36 |
+
"StrategyAgent",
|
37 |
+
"CopyAgent"
|
38 |
+
],
|
39 |
+
[
|
40 |
+
"StrategyAgent",
|
41 |
+
"AdAgent"
|
42 |
+
],
|
43 |
+
[
|
44 |
+
"AdAgent",
|
45 |
+
"LoopAgent"
|
46 |
+
],
|
47 |
+
[
|
48 |
+
"CopyAgent",
|
49 |
+
"LoopAgent"
|
50 |
+
]
|
51 |
+
],
|
52 |
+
"entry_point": "StrategyAgent",
|
53 |
+
"exit_point": "LoopAgent"
|
54 |
+
}
|
memory/database.py
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sqlite3
|
2 |
+
|
3 |
+
def init_db():
|
4 |
+
conn = sqlite3.connect("memory.db")
|
5 |
+
cursor = conn.cursor()
|
6 |
+
cursor.execute(
|
7 |
+
"""CREATE TABLE IF NOT EXISTS agent_logs (
|
8 |
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
9 |
+
agent TEXT,
|
10 |
+
action TEXT,
|
11 |
+
result TEXT,
|
12 |
+
timestamp DATETIME DEFAULT CURRENT_TIMESTAMP
|
13 |
+
)"""
|
14 |
+
)
|
15 |
+
conn.commit()
|
16 |
+
conn.close()
|
17 |
+
|
18 |
+
def log_action(agent, action, result):
|
19 |
+
conn = sqlite3.connect("memory.db")
|
20 |
+
cursor = conn.cursor()
|
21 |
+
cursor.execute(
|
22 |
+
"INSERT INTO agent_logs (agent, action, result) VALUES (?, ?, ?)",
|
23 |
+
(agent, action, result)
|
24 |
+
)
|
25 |
+
conn.commit()
|
26 |
+
conn.close()
|
requirements.txt
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
fastapi
|
2 |
+
uvicorn
|
3 |
+
celery
|
4 |
+
redis
|
5 |
+
streamlit
|
6 |
+
requests
|
7 |
+
openai==0.28
|
8 |
+
pandas
|