codebot / app.py
BharadhwajS's picture
Update app.py
6530946 verified
raw
history blame
1.89 kB
import os
import gradio as gr
from langchain_community.llms import HuggingFaceEndpoint
from langchain.prompts import PromptTemplate
# Initialize the chatbot
HF_TOKEN = os.getenv("HF_TOKEN")
llm = HuggingFaceEndpoint(
repo_id="google/gemma-1.1-7b-it",
task="text-generation",
max_new_tokens=512,
top_k=5,
temperature=0.1,
repetition_penalty=1.03,
huggingfacehub_api_token=HF_TOKEN
)
template = """
You are a Mental Health Chatbot, and your purpose is to provide supportive and non-judgmental guidance to users who are struggling with their mental health. Your goal is to help users identify their concerns, offer resources and coping strategies, and encourage them to seek professional help when needed.
User Context: {context}
Question: {question}
Please respond with a helpful and compassionate answer that addresses the user's concern. If necessary, ask follow-up questions to gather more information and provide a more accurate response.
Remember to prioritize the user's well-being and safety above all else. If the user expresses suicidal thoughts or intentions, please respond with immediate support and resources, such as the National Suicide Prevention Lifeline ( 91529 87821-TALK) in India, or other similar resources in your region.
Helpful Answer: """
QA_CHAIN_PROMPT = PromptTemplate(input_variables=["context", "question"],template=template)
def predict(message, history):
input_prompt = QA_CHAIN_PROMPT.format(question=message, context=history)
result = llm.generate([input_prompt])
print(result) # Print the result for inspection
# Access the generated text using the correct attribute(s)
if result.generations:
ai_msg = result.generations[0][0].text
else:
ai_msg = "I'm sorry, I couldn't generate a response for that input."
return ai_msg
gr.ChatInterface(predict).launch()