Phoenix21 commited on
Commit
67c4c64
·
verified ·
1 Parent(s): 6662121

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -15
app.py CHANGED
@@ -1,7 +1,7 @@
1
  import os
2
  import logging
3
  import re
4
- from langchain.vectorstores import Chroma
5
  from langchain_huggingface import HuggingFaceEmbeddings
6
  from langchain.text_splitter import RecursiveCharacterTextSplitter
7
  from langchain_groq import ChatGroq
@@ -105,23 +105,19 @@ def create_rag_pipeline(file_paths, model, temperature, max_tokens):
105
  embedding=embedding_model,
106
  persist_directory="/tmp/chroma_db"
107
  )
108
- vectorstore.persist()
109
 
110
  retriever = vectorstore.as_retriever()
111
 
112
- # Updated Prompt Template with Formatting Instructions
113
  custom_prompt_template = PromptTemplate(
114
  input_variables=["context", "question"],
115
  template="""
116
- You are an AI assistant specialized in daily wellness. Provide a concise, thorough, and stand-alone answer to the user's question based on the given context. Use Markdown formatting to enhance readability. For list-based answers, use numbered lists. Include relevant examples or schedules where beneficial. The final answer should be coherent, self-contained, and end with a complete sentence.
117
-
118
- **Context:**
119
  {context}
120
-
121
- **Question:**
122
  {question}
123
-
124
- **Final Answer:**
125
  """
126
  )
127
 
@@ -146,7 +142,9 @@ def answer_question(model, temperature, max_tokens, question):
146
  return "The system is currently unavailable. Please try again later."
147
  try:
148
  answer = rag_chain.run(question)
149
- complete_answer = ensure_complete_sentences(answer)
 
 
150
  return complete_answer
151
  except Exception as e_inner:
152
  logger.error(f"Error: {e_inner}")
@@ -155,7 +153,6 @@ def answer_question(model, temperature, max_tokens, question):
155
  def gradio_interface(model, temperature, max_tokens, question):
156
  return answer_question(model, temperature, max_tokens, question)
157
 
158
- # Updated Gradio Interface to Render Markdown
159
  interface = gr.Interface(
160
  fn=gradio_interface,
161
  inputs=[
@@ -164,11 +161,11 @@ interface = gr.Interface(
164
  gr.Slider(label="Max Tokens", minimum=200, maximum=2048, step=1, value=max_tokens),
165
  gr.Textbox(label="Question", placeholder="e.g., What is box breathing and how does it help reduce anxiety?")
166
  ],
167
- outputs=gr.outputs.Markdown(), # Changed from "text" to Markdown
168
  title="Daily Wellness AI",
169
- description="Ask questions about daily wellness and receive a concise, complete, and well-formatted answer.",
170
  examples=[
171
- ["llama3-8b-8192", 0.7, 500, "What are 10 steps for yoga?"],
172
  ["llama3-8b-8192", 0.6, 600, "Give me a weekly fitness schedule incorporating mindfulness exercises."]
173
  ],
174
  allow_flagging="never"
 
1
  import os
2
  import logging
3
  import re
4
+ from langchain_community.vectorstores import Chroma # Updated import
5
  from langchain_huggingface import HuggingFaceEmbeddings
6
  from langchain.text_splitter import RecursiveCharacterTextSplitter
7
  from langchain_groq import ChatGroq
 
105
  embedding=embedding_model,
106
  persist_directory="/tmp/chroma_db"
107
  )
108
+ # vectorstore.persist() # Deprecated in Chroma 0.4.x
109
 
110
  retriever = vectorstore.as_retriever()
111
 
 
112
  custom_prompt_template = PromptTemplate(
113
  input_variables=["context", "question"],
114
  template="""
115
+ You are an AI assistant specialized in daily wellness. Provide a concise, thorough, and stand-alone answer to the user's question based on the given context. Include relevant examples or schedules where beneficial. **When listing steps or guidelines, format them as a numbered list with appropriate markdown formatting.** The final answer should be coherent, self-contained, and end with a complete sentence.
116
+ Context:
 
117
  {context}
118
+ Question:
 
119
  {question}
120
+ Final Answer:
 
121
  """
122
  )
123
 
 
142
  return "The system is currently unavailable. Please try again later."
143
  try:
144
  answer = rag_chain.run(question)
145
+ # Remove or modify ensure_complete_sentences if necessary
146
+ # complete_answer = ensure_complete_sentences(answer)
147
+ complete_answer = answer
148
  return complete_answer
149
  except Exception as e_inner:
150
  logger.error(f"Error: {e_inner}")
 
153
  def gradio_interface(model, temperature, max_tokens, question):
154
  return answer_question(model, temperature, max_tokens, question)
155
 
 
156
  interface = gr.Interface(
157
  fn=gradio_interface,
158
  inputs=[
 
161
  gr.Slider(label="Max Tokens", minimum=200, maximum=2048, step=1, value=max_tokens),
162
  gr.Textbox(label="Question", placeholder="e.g., What is box breathing and how does it help reduce anxiety?")
163
  ],
164
+ outputs=gr.Markdown(label="Answer"), # Updated output component
165
  title="Daily Wellness AI",
166
+ description="Ask questions about daily wellness and receive a concise, complete answer.",
167
  examples=[
168
+ ["llama3-8b-8192", 0.7, 500, "What is box breathing and how does it help reduce anxiety?"],
169
  ["llama3-8b-8192", 0.6, 600, "Give me a weekly fitness schedule incorporating mindfulness exercises."]
170
  ],
171
  allow_flagging="never"