File size: 6,952 Bytes
286abe0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
import os
import re
from dotenv import load_dotenv
import torch
from transformers import RobertaForSequenceClassification, RobertaTokenizerFast, pipeline as text_pipeline
import gradio as gr
from openai import OpenAI

# Load environment variables from .env file
load_dotenv()

# Get API key from environment
API_KEY = os.getenv("API_KEY")

# Initialize OpenAI client
client = OpenAI(
    base_url="https://integrate.api.nvidia.com/v1",
    api_key=API_KEY
)

# Load classification model
def load_emotion_model(model_path):
    model = RobertaForSequenceClassification.from_pretrained(model_path)
    tokenizer = RobertaTokenizerFast.from_pretrained(model_path)
    return model, tokenizer

# Map prediction to readable labels
def map_to_labels(label):
    return "Happy/Positive Mindset" if label.lower() == "positive" else "Depressed/Negative Mindset"

# Classify mental state based on user input
def classify_emotion(user_input, model, tokenizer, device):
    nlp = text_pipeline("text-classification", model=model, tokenizer=tokenizer, device=device)
    result = nlp(user_input)
    return map_to_labels(result[0]['label'])

# Analyze emotion using the LLM
def emotion_analysis(user_input):

    # # Validate input
    # if not user_input.strip():  # Check for empty or blank input
    #     progress_callback("Please provide valid input before submitting.", False)
    #     return "No input provided.", ""

    # Load model
    model_path = "mentalhealth-roberta-base_nemotron_model"  # Replace with your model path
    model, tokenizer = load_emotion_model(model_path)
    device = 0 if torch.cuda.is_available() else -1

    # Step 1: Classify emotion
    predicted_emotion = classify_emotion(user_input, model, tokenizer, device)

    # Step 2: Generate LLM response
    prompt = f"""
    Task: You are a social psychologist specializing in Roy Baumeister's six-stage theory of emotional progression. Your task is to analyze emotional states based on user input while adhering to specific response boundaries.

    [Input Information]:
    **User Input**: "{user_input}"
    **Model Output**: "{predicted_emotion}"

    Specifics:
    1. Strictly respond only to questions or input related to mental health or emotional well-being. For unrelated input, reply with: "Not a valid question." 
       - Example: If the user asks about weather, sports, or other unrelated topics, respond with: "Not a valid question."
    2. Use the **User Input** as the primary source for determining the emotional state, while considering the **Model Output** ("happy" or "depressed") only as a secondary reference.
    3. Assign the user’s emotional state to one of Roy Baumeister’s six stages of emotional progression:
       - Stage 1: Falling short of expectations
       - Stage 2: Attributions to self
       - Stage 3: High self-awareness
       - Stage 4: Negative affect
       - Stage 5: Cognitive deconstruction
       - Stage 6: Disinhibition
    4. Provide specific recommendations for the assigned stage:
       - If the user is **depressed**, suggest stage-specific remedies to improve their emotional state.
       - If the user is **happy**, suggest strategies to maintain or enhance their happiness.
    5. Prioritize clarity, empathy, and practicality in your analysis and suggestions.

    [Response Rules]:
    - Do NOT attempt to provide an output if the input is not related to mental health.
    - Always analyze the user’s input independently, even if it conflicts with the model’s predicted output.

    [Desired Output Format]:
    Emotional Analysis:
    I'd say you're feeling: <Happy/Depressed>
    Emotional Stage: <Stage and brief reasoning>
    Suggested Remedies/Strategies: <Practical advice based on the stage>
    """

    try:
        completion = client.chat.completions.create(
            model="nvidia/nemotron-4-340b-instruct",
            messages=[{"role": "user", "content": prompt}],
            temperature=0.2,
            top_p=0.7,
            max_tokens=512,
            stream=True
        )

        # Iterate over the streaming response
        response = ""
        for chunk in completion:
            if chunk.choices[0].delta.content is not None:
                print(chunk.choices[0].delta.content, end="")
                # response = chunk.choices[0].delta.content
                response_chunk = chunk.choices[0].delta.content
                response += response_chunk
            else:
                print(f"Unexpected chunk format: {chunk}")

    except Exception as e:
        response = f"An error occurred while processing the response: {e}"
    response= str(response).replace("*", '')
    return response

def extract_analysis_details(analysis_text):
    feelings_match = re.search(r"I'd say you're feeling:\s*([^\n]+)", analysis_text)
    feelings = feelings_match.group(1).strip() if feelings_match else "Not Found"
    if feelings.lower() == "happy":
        feelings = feelings + " with Positive Mindset"
    elif feelings.lower() == "depressed":
        feelings = feelings + " with Negative Mindset"
    else:
        feelings

    # Extract emotional stage
    stage_match = re.search(r"Emotional Stage:\s*([^\n.]+)", analysis_text)
    emotional_stage = stage_match.group(1).strip() if stage_match else "Not Found"

    # Regex to match the section header and capture from there to the end
    pattern = r"(Suggested Remedies|Suggested Remedies/Strategies|Suggested Strategies):.*"
    match = re.search(pattern, analysis_text, re.DOTALL)
    suggestions = match.group(0).strip() if match else "No matching section found."
    # print(suggestions)

    if feelings == "Not Found":
        feelings = "Not a valid question."
    return feelings, emotional_stage, suggestions

# Gradio interface with input validation
def validate_and_run(user_input):
    if not user_input.strip():  # Check if the input is empty or just spaces
        return "Please provide valid input before submitting.", "Not Applicable", "Not Applicable"
    else:
        response = emotion_analysis(user_input)
        return extract_analysis_details(response)


# Gradio interface
iface = gr.Interface(
    fn=validate_and_run,
    inputs=gr.Textbox(#lines=2,
        label="How are you feeling today?",
        placeholder="Share your thoughts here...!"),
    outputs=[
        # gr.Textbox(label="Analysing Your State of Mind...."),
        # gr.Textbox(label="Providing Best Strategies")
        # gr.Textbox(label="Original"),
        gr.Textbox(label="Feelings"),
        gr.Textbox(label="Emotional Stage"),
        gr.Textbox(label="Providing Best Strategies")
    ],
    # live=True,
    title="Analyze your emotions and generate stage-specific psychological insights\n",
    # title = "Emotion Analysis and Dynamic Response Generator"
    # description="Analyze your emotions and receive dynamic psychological insights."
)

# Launch the app
if __name__ == "__main__":
    iface.launch()