SatyamSinghal's picture
Update app.py
bc32962 verified
raw
history blame
6.9 kB
import os
import gradio as gr
import openai
from langdetect import detect
# Set up OpenAI API with your custom endpoint
openai.api_key = os.getenv("API_KEY")
openai.api_base = "https://api.groq.com/openai/v1"
# Import datasets from the Python files in your project
from company_profile import company_profile
from workforce import workforce
from financials import financials
from investors import investors
from products_services import products_services
from market_trends import market_trends
from partnerships_collaborations import partnerships_collaborations
from legal_compliance import legal_compliance
from customer_insights import customer_insights
from news_updates import news_updates
from social_media import social_media
from tech_stack import tech_stack
# Command handler for specific queries
def command_handler(user_input):
if user_input.lower().startswith("define "):
term = user_input[7:].strip()
definitions = {
"market analysis": (
"Market analysis is the process of evaluating a business's position in the market by assessing "
"competitors, trends, and customer behavior to make informed decisions for growth and investment. "
"Think of it as gathering the insights needed to craft a winning strategy. 📊"
),
"financials": (
"Financial analysis provides a snapshot of a company's fiscal health. It involves reviewing metrics like "
"profit margins, revenues, and expenditures to assess if the business is sustainable and scalable. 💵"
),
"investors": (
"Investors are the capital partners that fuel innovation and expansion. They provide financial resources "
"in exchange for equity or debt, aiming to generate a return on their investment. It's all about leveraging "
"their capital to scale. 🏦"
)
}
return definitions.get(term.lower(), "I don't have a definition for that term yet. Let’s dive into your query directly!")
return None
# Function to get the response from OpenAI with professionalism and energy
def get_groq_response(message, user_language):
try:
response = openai.ChatCompletion.create(
model="llama-3.1-70b-versatile",
messages=[
{
"role": "system",
"content": (
f"You are a professional, energetic Private Market Analyst AI. Your task is to explain market trends, "
f"company insights, and investment strategies in a clear, engaging, and authoritative manner. "
f"While providing top-tier market analysis, keep the tone polished, insightful, and relevant to business leaders. "
f"Always maintain professionalism while making the content informative and engaging."
)
},
{"role": "user", "content": message}
]
)
return response.choices[0].message["content"]
except Exception as e:
return f"Oops, looks like something went wrong! Error: {str(e)}"
# Function to handle the interaction and queries
def market_analysis_agent(user_input, history=[]):
try:
# Detect the language of the user's input
detected_language = detect(user_input)
user_language = "Hindi" if detected_language == "hi" else "English"
# Handle special commands like "Define [term]"
command_response = command_handler(user_input)
if command_response:
history.append((user_input, command_response))
return history, history
# Handle private market queries with datasets
if "company" in user_input.lower():
response = company_profile
elif "financials" in user_input.lower():
response = financials
elif "investors" in user_input.lower():
response = investors
elif "products" in user_input.lower():
response = products_services
elif "news" in user_input.lower() or "updates" in user_input.lower():
# Format the news updates response
response = format_response(news_updates)
elif "legal" in user_input.lower() or "compliance" in user_input.lower():
response = legal_compliance
elif "social media" in user_input.lower() or "instagram" in user_input.lower() or "linkedin" in user_input.lower() or "twitter" in user_input.lower():
response = social_media
elif "workforce" in user_input.lower():
response = workforce
else:
# Get dynamic AI response if query doesn't match predefined terms
response = get_groq_response(user_input, user_language)
# Add some professional and engaging replies for the user
cool_replies = [
"That’s a great insight! Keep the questions coming. 🔍",
"Excellent direction! Let’s explore that further. 📈",
"Insightful! Let’s dive into the numbers. 📊",
"You've got the right focus. Let's sharpen those strategies. 🧠",
"You're on the right track. Let’s optimize that idea! 🔧"
]
response = f"{response} {cool_replies[hash(user_input) % len(cool_replies)]}"
# Add to chat history
history.append((user_input, response))
return history, history
except Exception as e:
return [(user_input, f"Oops, something went wrong: {str(e)}")], history
# Function to format dataset responses
def format_response(data):
# Format the response nicely
formatted_response = ""
for key, value in data.items():
formatted_response += f"{key.capitalize()}:\n"
for item in value:
formatted_response += f"- {item}\n"
return formatted_response
# Gradio Interface setup
chat_interface = gr.Interface(
fn=market_analysis_agent, # Function for handling user interaction
inputs=["text", "state"], # Inputs: user message and chat history
outputs=["chatbot", "state"], # Outputs: chatbot messages and updated history
live=False, # Disable live responses; show after submit
title="Private Market AI Agent", # Title of the app
description=(
"Welcome to your professional Private Market Analyst AI! 📊\n\n"
"Ask me anything about market trends, company profiles, financial analysis, investors, and more! "
"I’ll provide you with actionable insights, backed by data and clear explanations, to help you make informed decisions. "
"Let’s break down the complexities of the market with precision and insight. 🔍"
)
)
# Launch the Gradio interface
if __name__ == "__main__":
chat_interface.launch()