Tobidx's picture
Update app.py
7f4bf04 verified
raw
history blame
4.57 kB
import os
import torch
import gradio as gr
from langchain import HuggingFaceHub
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from transformers import pipeline
# 2. Model Initialization
# Initialize sentiment analyzer
sentiment_analyzer = pipeline(
"sentiment-analysis",
model="finiteautomata/bertweet-base-sentiment-analysis",
device=0 if torch.cuda.is_available() else -1
)
# Initialize LLM
llm = HuggingFaceHub(
repo_id="deepseek-ai/deepseek-coder-33b-instruct",
model_kwargs={"temperature": 0.7}
)
# 3. Templates
email_template = PromptTemplate(
input_variables=["previous_interaction", "situation_type", "tone", "urgency"],
template="""Based on these details, generate a professional follow-up email:
Previous Interaction: {previous_interaction}
Situation Type: {situation_type}
Tone: {tone}
Urgency Level: {urgency}
Generate a personalized email that:
1. Maintains {tone} tone
2. Addresses the specific situation
3. Provides clear next steps
4. Is appropriate for {urgency} urgency level
"""
)
scoring_template = """
Analyze this follow-up email carefully and provide scores on a scale of 1-10 for each category:
Email to analyze:
{email_text}
Please provide numerical scores and explanations in this exact format:
CLARITY SCORE: [1-10]
Explanation: [Why this score]
PROFESSIONALISM SCORE: [1-10]
Explanation: [Why this score]
ACTION ITEMS SCORE: [1-10]
Explanation: [Why this score]
PERSONALIZATION SCORE: [1-10]
Explanation: [Why this score]
OVERALL EFFECTIVENESS SCORE: [1-10]
Explanation: [Why this score]
IMPROVEMENT SUGGESTIONS:
1. [First suggestion]
2. [Second suggestion]
3. [Third suggestion]
"""
# 4. Create LangChain
email_chain = LLMChain(llm=llm, prompt=email_template)
# 5. Helper Functions
def analyze_sentiment(text):
try:
result = sentiment_analyzer(text)[0]
sentiment_to_tone = {
'POS': 'Friendly',
'NEU': 'Professional',
'NEG': 'Apologetic'
}
return sentiment_to_tone.get(result['label'], 'Professional')
except Exception as e:
return 'Professional'
# 6. Main Generation Function
def generate_followup_email(previous_interaction, situation_type, tone, urgency):
try:
if not tone:
tone = analyze_sentiment(previous_interaction)
# Generate email
email_result = email_chain.run({
"previous_interaction": previous_interaction,
"situation_type": situation_type,
"tone": tone,
"urgency": urgency
})
# Generate score
score_result = llm(scoring_template.format(email_text=email_result))
return email_result, score_result
except Exception as e:
return f"Error generating email: {str(e)}", "Scoring unavailable"
# 7. Gradio Interface
demo = gr.Interface(
fn=generate_followup_email,
inputs=[
gr.Textbox(
label="Previous Interaction",
lines=5,
placeholder="Describe the previous interaction with the customer..."
),
gr.Dropdown(
label="Situation Type",
choices=[
"Complaint Resolution",
"Service Issue",
"Payment Dispute",
"Product Query",
"General Follow-up"
]
),
gr.Dropdown(
label="Tone (Optional - will be automatically detected if not specified)",
choices=[
"",
"Professional",
"Apologetic",
"Friendly",
"Formal",
"Empathetic"
]
),
gr.Dropdown(
label="Urgency",
choices=["High", "Medium", "Low"]
)
],
outputs=[
gr.Textbox(label="Generated Email"),
gr.Textbox(label="Email Score and Suggestions")
],
title="Smart Sales Email Generator with Quality Scoring",
description="Generate and evaluate follow-up emails based on previous interactions",
examples=[
[
"Customer complained about slow website loading times and threatened to cancel subscription",
"Complaint Resolution",
"Apologetic",
"High"
],
[
"Client requested information about premium features and pricing",
"Product Query",
"Professional",
"Medium"
]
]
)
# 8. Launch App
if __name__ == "__main__":
demo.launch()