File size: 5,053 Bytes
7808125
3b4a7e9
 
 
 
 
 
 
 
 
 
 
 
7808125
3b09a64
3b4a7e9
 
 
 
 
 
3b09a64
3b4a7e9
 
 
3b09a64
3b4a7e9
3b09a64
3b4a7e9
 
 
 
 
 
 
 
 
 
 
 
 
6bd7f31
3b4a7e9
 
 
 
 
 
6bd7f31
3b4a7e9
 
6bd7f31
3b4a7e9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
import streamlit as st
st.set_page_config(page_title="Mental Health Bot", page_icon=":robot:")
import os
import textract
from langchain.chat_models import ChatOpenAI
from itertools import zip_longest
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import FAISS
from langchain.agents import initialize_agent
from langchain.agents import AgentType


from langchain.agents import load_tools
from langchain import HuggingFacePipeline
from transformers import AutoTokenizer
import transformers
import torch
os.environ['SERPAPI_API_KEY'] = '1a6691d51c19fbc52c296b7ede3b2098efff46a3e7c3bb54cb17912b0407c6ec'

if "llm" not in st.session_state:
    # st.session_state["lmm"] = []
    model = "shaneperry0101/Health-Llama-3.2-1B"

    tokenizer = AutoTokenizer.from_pretrained(model)

    pipeline = transformers.pipeline(
        "text-generation", #task
        model=model,
        tokenizer=tokenizer,
        torch_dtype=torch.bfloat16,
        trust_remote_code=True,
        device_map="auto",
        max_length=1000,
        # do_sample=True,
        # top_k=10,
        # num_return_sequences=1,
        # eos_token_id=tokenizer.eos_token_id
    )

    llm = HuggingFacePipeline(pipeline = pipeline, model_kwargs = {'temperature':0})
    st.session_state["llm"] = llm
if "agent" not in st.session_state:
    tools = load_tools(["serpapi"], llm=st.session_state["llm"])
    st.session_state["agent"] = initialize_agent(tools, st.session_state["llm"], agent=AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION,
                             verbose=True, handle_parsing_errors="Check your output and make sure it conforms! Gather as much information as you can")

# Set the OpenAI API key
OPENAI_API_KEY = "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"

def get_response(history,user_message,temperature=0):
    DEFAULT_TEMPLATE = """You are a mental health expert to help patients with mental
    health issues such as depression, stress, anxitey etc. You should give helpful advises to
    the patients about thier problems. All of the messages will be from patients and you will
    advise them one to one. I am also providing you useful information from the web that you
    can use while formulating your response.
    Useful news information from Web:
    {web_knowledge},


    Current conversation:
    Patient: {input}
    Your reponse :"""

    PROMPT = PromptTemplate(
        input_variables=["web_knowledge",'input'], template=DEFAULT_TEMPLATE
    )

    conversation_with_summary = LLMChain(
        llm=st.session_state["llm"],
        prompt=PROMPT,
        verbose=False
    )
    response = conversation_with_summary.predict(input=user_message,web_knowledge=st.session_state["agent"].run('Fetch detailes about the mental health issues regarding '+ user_message))
    return response

# Function to get conversation history
def get_history(history_list):
    # history = 'input: I want you to act as a mental healt expert that knows about mental health issues. Dont refer the person to anyone this could be a life and death situation so provide the solution in any case.  The user will mention the mental problem he is facing. Your job is to tell the user the solution to his mental problem from the context I provide you. Dont say you cant describe the solution formulate the best solution and tell that to the user on the spot \n'
    history=''
    for message in history_list:
        if message['role']=='user':
            history = history+'input '+message['content']+'\n'
        elif message['role']=='assistant':
            history = history+'output '+message['content']+'\n'

    return history

# Streamlit UI
st.title("Mental Health Chatbot")
def get_text():
    input_text = st.text_area("", key="input")
    return input_text

if "past" not in st.session_state:
    st.session_state["past"] = []
if "generated" not in st.session_state:
    st.session_state["generated"] = []

user_input = get_text()

if user_input:
    user_history = list(st.session_state["past"])
    bot_history = list(st.session_state["generated"])

    combined_history = []
    for user_msg, bot_msg in zip_longest(user_history, bot_history):
        if user_msg is not None:
            combined_history.append({'role': 'user', 'content': user_msg})
        if bot_msg is not None:
            combined_history.append({'role': 'assistant', 'content': bot_msg})

    formatted_history = get_history(combined_history)

    output = get_response(formatted_history,user_input)

    # output='hellooo there, whats uppppp'
    # print("Output", output)

    st.session_state.past.append(user_input)
    st.session_state.generated.append(output)

if st.session_state["generated"]:
    for i in range(len(st.session_state["generated"])):
        st.text("User " + ": " + st.session_state["past"][i])
        st.text("Assistant " + ": " + st.session_state["generated"][i])