mindspark121 commited on
Commit
ef0b933
·
verified ·
1 Parent(s): 1ffb918

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -21
app.py CHANGED
@@ -35,33 +35,29 @@ class ChatRequest(BaseModel):
35
  class SummaryRequest(BaseModel):
36
  chat_history: list # List of messages
37
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
  @app.post("/get_questions")
39
  def get_recommended_questions(request: ChatRequest):
40
- """Retrieve the most relevant diagnostic questions with a conversational response."""
41
-
42
- # Step 1: Encode the input message for FAISS search
43
  input_embedding = embedding_model.encode([request.message], convert_to_numpy=True)
44
  distances, indices = question_index.search(input_embedding, 3)
45
-
46
- # Step 2: Retrieve the top 3 relevant questions
47
  retrieved_questions = [questions_df["Questions"].iloc[i] for i in indices[0]]
48
 
49
- # Step 3: Define Dynamic Prompt Variations
50
- empathetic_phrases = [
51
- "I hear you, and I appreciate you sharing this. Let me ask:",
52
- "That sounds challenging. Could you help me understand better by answering this:",
53
- "I understand what you're going through. Here's something to think about:",
54
- "Thank you for opening up. Let’s explore this further:",
55
- "Your feelings are completely valid. Here's a question to help us understand more:"
56
- ]
57
-
58
- # Step 4: Generate Dynamic Responses
59
- wrapped_responses = [
60
- f"{random.choice(empathetic_phrases)} *{q}*"
61
- for q in retrieved_questions
62
- ]
63
-
64
- return {"questions": wrapped_responses}
65
 
66
  @app.post("/summarize_chat")
67
  def summarize_chat(request: SummaryRequest):
 
35
  class SummaryRequest(BaseModel):
36
  chat_history: list # List of messages
37
 
38
+
39
+ # Load Local LLM (Mistral or Llama)
40
+ model_name = "mistralai/Mistral-7B-Instruct-v0.3"
41
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
42
+ model = AutoModelForCausalLM.from_pretrained(model_name)
43
+
44
+ def generate_local_emotional_response(user_input, questions):
45
+ """Generate emotional responses locally using LLaMA/Mistral."""
46
+ prompt = f"User: {user_input}\n\nBased on this, respond in an empathetic way before asking each question:\n1. {questions[0]}\n2. {questions[1]}\n3. {questions[2]}"
47
+ inputs = tokenizer(prompt, return_tensors="pt")
48
+ output = model.generate(**inputs, max_length=200)
49
+ return tokenizer.decode(output[0], skip_special_tokens=True).split("\n")
50
+
51
  @app.post("/get_questions")
52
  def get_recommended_questions(request: ChatRequest):
 
 
 
53
  input_embedding = embedding_model.encode([request.message], convert_to_numpy=True)
54
  distances, indices = question_index.search(input_embedding, 3)
 
 
55
  retrieved_questions = [questions_df["Questions"].iloc[i] for i in indices[0]]
56
 
57
+ # Generate dynamic emotional responses locally
58
+ enhanced_responses = generate_local_emotional_response(request.message, retrieved_questions)
59
+
60
+ return {"questions": enhanced_responses}
 
 
 
 
 
 
 
 
 
 
 
 
61
 
62
  @app.post("/summarize_chat")
63
  def summarize_chat(request: SummaryRequest):