Spaces:
Running
Running
File size: 5,569 Bytes
4ae7ed4 2ac9a74 4ae7ed4 2ac9a74 4ae7ed4 2ac9a74 4ae7ed4 2ac9a74 4ae7ed4 2ac9a74 77389d5 4ae7ed4 77389d5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 |
import streamlit as st
import pandas as pd
import sqlite3
import os
import json
from pathlib import Path
from datetime import datetime, timezone
from crewai import Agent, Crew, Process, Task
from crewai_tools import tool
from langchain_core.prompts import ChatPromptTemplate
from langchain_groq import ChatGroq
from langchain.schema.output import LLMResult
from langchain_core.callbacks.base import BaseCallbackHandler
from langchain_community.tools.sql_database.tool import (
InfoSQLDatabaseTool,
ListSQLDatabaseTool,
QuerySQLCheckerTool,
QuerySQLDataBaseTool,
)
from langchain_community.utilities.sql_database import SQLDatabase
from datasets import load_dataset
import tempfile
os.environ["GROQ_API_KEY"] = st.secrets.get("GROQ_API_KEY", "")
class Event:
def __init__(self, event, text):
self.event = event
self.timestamp = datetime.now(timezone.utc).isoformat()
self.text = text
class LLMCallbackHandler(BaseCallbackHandler):
def __init__(self, log_path: Path):
self.log_path = log_path
def on_llm_start(self, serialized, prompts, **kwargs):
with self.log_path.open("a", encoding="utf-8") as file:
file.write(json.dumps({"event": "llm_start", "text": prompts[0], "timestamp": datetime.now().isoformat()}) + "\n")
def on_llm_end(self, response: LLMResult, **kwargs):
generation = response.generations[-1][-1].message.content
with self.log_path.open("a", encoding="utf-8") as file:
file.write(json.dumps({"event": "llm_end", "text": generation, "timestamp": datetime.now().isoformat()}) + "\n")
llm = ChatGroq(
temperature=0,
model_name="mixtral-8x7b-32768",
callbacks=[LLMCallbackHandler(Path("prompts.jsonl"))],
)
st.title("SQL-RAG using CrewAI π")
st.write("Analyze and summarize Hugging Face datasets using natural language queries with SQL-based retrieval.")
default_dataset = "datascience/ds-salaries"
st.text("Example dataset: `datascience/ds-salaries` (You can enter your own dataset name)")
dataset_name = st.text_input("Enter Hugging Face dataset name:", value=default_dataset)
if dataset_name:
with st.spinner("Loading dataset..."):
try:
dataset = load_dataset(dataset_name, split="train")
df = pd.DataFrame(dataset)
st.success(f"Dataset '{dataset_name}' loaded successfully!")
st.write("Preview of the dataset:")
st.dataframe(df.head())
temp_dir = tempfile.TemporaryDirectory()
db_path = os.path.join(temp_dir.name, "data.db")
connection = sqlite3.connect(db_path)
df.to_sql("data_table", connection, if_exists="replace", index=False)
db = SQLDatabase.from_uri(f"sqlite:///{db_path}")
@tool("list_tables")
def list_tables() -> str:
return ListSQLDatabaseTool(db=db).invoke("")
@tool("tables_schema")
def tables_schema(tables: str) -> str:
return InfoSQLDatabaseTool(db=db).invoke(tables)
@tool("execute_sql")
def execute_sql(sql_query: str) -> str:
return QuerySQLDataBaseTool(db=db).invoke(sql_query)
@tool("check_sql")
def check_sql(sql_query: str) -> str:
return QuerySQLCheckerTool(db=db, llm=llm).invoke({"query": sql_query})
sql_dev = Agent(
role="Database Developer",
goal="Extract data from the database.",
llm=llm,
tools=[list_tables, tables_schema, execute_sql, check_sql],
allow_delegation=False,
)
data_analyst = Agent(
role="Data Analyst",
goal="Analyze and provide insights.",
llm=llm,
allow_delegation=False,
)
report_writer = Agent(
role="Report Editor",
goal="Summarize the analysis.",
llm=llm,
allow_delegation=False,
)
extract_data = Task(
description="Extract data required for the query: {query}.",
expected_output="Database result for the query",
agent=sql_dev,
)
analyze_data = Task(
description="Analyze the data for: {query}.",
expected_output="Detailed analysis text",
agent=data_analyst,
context=[extract_data],
)
write_report = Task(
description="Summarize the analysis into a short report.",
expected_output="Markdown report",
agent=report_writer,
context=[analyze_data],
)
crew = Crew(
agents=[sql_dev, data_analyst, report_writer],
tasks=[extract_data, analyze_data, write_report],
process=Process.sequential,
verbose=2,
memory=False,
)
query = st.text_input("Enter your query:", placeholder="e.g., 'How does salary vary by company size?'")
if query:
with st.spinner("Processing your query..."):
inputs = {"query": query}
result = crew.kickoff(inputs=inputs)
st.markdown("### Analysis Report:")
st.markdown(result)
temp_dir.cleanup()
except Exception as e:
st.error(f"Error loading dataset: {e}")
|