|
import os |
|
import streamlit as st |
|
from llama_index.core import SimpleDirectoryReader, VectorStoreIndex |
|
from llama_index.embeddings.huggingface import HuggingFaceEmbedding |
|
from llama_index.llms.groq import Groq |
|
from crewai import Agent, Task, Crew |
|
from crewai_tools import LlamaIndexTool |
|
from langchain_openai import ChatOpenAI |
|
from langchain_groq import ChatGroq |
|
import tempfile |
|
|
|
st.set_page_config(page_title="Financial Analyst App", layout="wide") |
|
|
|
|
|
GROQ_API_KEY = os.getenv("GROQ_API_KEY") |
|
TAVILY_API_KEY = os.getenv("TAVILY_API_KEY") |
|
|
|
|
|
st.title("Financial Analysis and Content Generation App") |
|
|
|
if not GROQ_API_KEY or not TAVILY_API_KEY: |
|
st.warning("Please enter valid API keys to proceed.") |
|
st.stop() |
|
|
|
|
|
uploaded_file = st.file_uploader("Upload a PDF for Analysis", type="pdf") |
|
|
|
if uploaded_file: |
|
with tempfile.NamedTemporaryFile(delete=False) as tmp_file: |
|
tmp_file.write(uploaded_file.read()) |
|
pdf_path = tmp_file.name |
|
|
|
st.success("PDF uploaded successfully!") |
|
|
|
|
|
st.subheader("Processing PDF...") |
|
reader = SimpleDirectoryReader(input_files=[pdf_path]) |
|
docs = reader.load_data() |
|
st.write("Loaded document content: ", docs[0].text[:500]) |
|
|
|
embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5") |
|
index = VectorStoreIndex.from_documents(docs, embed_model=embed_model) |
|
query_engine = index.as_query_engine(similarity_top_k=5) |
|
|
|
st.subheader("Setting Up Query Tool") |
|
llm = ChatGroq(groq_api_key=os.getenv("GROQ_API_KEY"), model="groq/llama-3.2-90b-text-preview") |
|
|
|
|
|
|
|
query_tool = LlamaIndexTool.from_query_engine( |
|
query_engine, |
|
name="Financial Query Tool", |
|
description="Use this tool to lookup insights from the uploaded document.", |
|
) |
|
|
|
st.success("Query Engine is ready!") |
|
|
|
|
|
chat_llm = ChatOpenAI( |
|
openai_api_base="https://api.groq.com/openai/v1", |
|
openai_api_key=GROQ_API_KEY, |
|
model="groq/llama-3.2-90b-text-preview", |
|
temperature=0, |
|
max_tokens=1000, |
|
) |
|
|
|
researcher = Agent( |
|
role="Senior Financial Analyst", |
|
goal="Uncover insights about the document", |
|
backstory="You are an experienced analyst focused on extracting key financial insights.", |
|
verbose=True, |
|
allow_delegation=False, |
|
tools=[query_tool], |
|
llm=chat_llm, |
|
) |
|
|
|
writer = Agent( |
|
role="Tech Content Strategist", |
|
goal="Write an engaging blog post based on financial insights", |
|
backstory="You transform complex financial information into accessible and engaging narratives.", |
|
llm=chat_llm, |
|
verbose=True, |
|
allow_delegation=False, |
|
) |
|
|
|
|
|
task1 = Task( |
|
description="Conduct a comprehensive analysis of the uploaded document.", |
|
expected_output="Full analysis report in bullet points", |
|
agent=researcher, |
|
) |
|
|
|
task2 = Task( |
|
description="""Using the analysis insights, create an engaging blog post that highlights key findings |
|
in a simple and accessible manner.""", |
|
expected_output="A well-structured blog post with at least 4 paragraphs.", |
|
agent=writer, |
|
) |
|
|
|
|
|
crew = Crew( |
|
agents=[researcher, writer], |
|
tasks=[task1, task2], |
|
verbose=True, |
|
) |
|
|
|
if st.button("Kickoff Analysis"): |
|
st.subheader("Running Analysis and Content Generation...") |
|
result = crew.kickoff() |
|
st.subheader("Generated Output:") |
|
st.write(result) |
|
else: |
|
st.info("Please upload a PDF file to proceed.") |
|
|