Bey007 commited on
Commit
47d9e7e
Β·
verified Β·
1 Parent(s): ce9d087

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -29
app.py CHANGED
@@ -8,48 +8,58 @@ import os
8
  # Load pretrained models
9
  tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-medium")
10
  model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-medium")
11
- # Load GPT-2 model and tokenizer for story generation
12
- gpt2_tokenizer = AutoTokenizer.from_pretrained("gpt2-medium")
13
- gpt2_model = AutoModelForCausalLM.from_pretrained("gpt2-medium")
 
14
  emotion_classifier = pipeline("text-classification", model="bhadresh-savani/distilbert-base-uncased-emotion", return_all_scores=True)
15
 
16
- # Function to generate a comforting story using GPT-2
17
  def generate_story(theme):
18
  # A detailed prompt for generating a comforting story about the selected theme
19
  story_prompt = f"Write a comforting, detailed, and heartwarming story about {theme}. The story should include a character who faces a tough challenge, finds hope, and ultimately overcomes the situation with a positive resolution."
20
 
21
- # Generate story using GPT-2
22
- input_ids = gpt2_tokenizer.encode(story_prompt, return_tensors='pt')
23
 
24
- story_ids = gpt2_model.generate(
25
  input_ids,
26
- max_length=500, # Generate longer stories
27
- temperature=0.8, # Balanced creativity
28
  top_p=0.9,
29
- repetition_penalty=1.2,
30
  num_return_sequences=1
31
  )
32
 
33
  # Decode the generated text
34
- story = gpt2_tokenizer.decode(story_ids[0], skip_special_tokens=True)
35
  return story
36
 
37
 
 
38
  # Function to generate an empathetic response
 
39
  def generate_response(user_input):
40
- response_prompt = f"You are a compassionate support bot. A user has shared: '{user_input}'. Respond with empathy and encouragement."
41
- input_ids = tokenizer.encode(response_prompt, return_tensors='pt')
42
- chat_history_ids = model.generate(
 
 
 
 
 
43
  input_ids,
44
- max_length=300,
45
- temperature=0.85,
46
- top_k=50,
47
- repetition_penalty=1.2,
48
  num_return_sequences=1
49
  )
50
- response = tokenizer.decode(chat_history_ids[:, input_ids.shape[-1]:][0], skip_special_tokens=True)
 
 
51
  return response
52
 
 
53
  # Analyze user input for emotional tone
54
  def get_emotion(user_input):
55
  emotions = emotion_classifier(user_input)
@@ -83,14 +93,14 @@ with st.sidebar:
83
  tts.save(meditation_audio)
84
  st.audio(meditation_audio, format="audio/mp3")
85
 
86
- # Sidebar for additional features
87
- with st.sidebar:
88
- st.header("πŸ“– Short Comforting Story")
89
- story_theme = st.selectbox("Choose a theme for your story:", ["courage", "healing", "hope"])
90
- if st.button("Generate Story"):
91
- with st.spinner("Generating your story..."):
92
- story = generate_story(story_theme)
93
- st.text_area("Here's your story:", story, height=300)
94
 
95
 
96
  # User input section
@@ -103,12 +113,12 @@ if 'badges' not in st.session_state:
103
  st.session_state.badges = []
104
 
105
  if user_input:
106
- emotion = get_emotion(user_input)
107
  with st.spinner("Thinking..."):
108
  response = generate_response(user_input)
109
 
110
- st.session_state.previous_responses.append(response)
111
  st.text_area("Bot's Response:", response, height=250)
 
112
 
113
  # Assign motivational badges
114
  if emotion in ["joy", "optimism"]:
 
8
  # Load pretrained models
9
  tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-medium")
10
  model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-medium")
11
+ # Load Llama 2 model
12
+ llama_model_name = "meta-llama/Llama-2-7b-hf"
13
+ llama_tokenizer = AutoTokenizer.from_pretrained(llama_model_name)
14
+ llama_model = AutoModelForCausalLM.from_pretrained(llama_model_name)
15
  emotion_classifier = pipeline("text-classification", model="bhadresh-savani/distilbert-base-uncased-emotion", return_all_scores=True)
16
 
 
17
  def generate_story(theme):
18
  # A detailed prompt for generating a comforting story about the selected theme
19
  story_prompt = f"Write a comforting, detailed, and heartwarming story about {theme}. The story should include a character who faces a tough challenge, finds hope, and ultimately overcomes the situation with a positive resolution."
20
 
21
+ # Generate story using Llama 2
22
+ input_ids = llama_tokenizer.encode(story_prompt, return_tensors='pt')
23
 
24
+ story_ids = llama_model.generate(
25
  input_ids,
26
+ max_length=600, # Generate longer stories
27
+ temperature=0.7, # Balanced creativity
28
  top_p=0.9,
29
+ repetition_penalty=1.1,
30
  num_return_sequences=1
31
  )
32
 
33
  # Decode the generated text
34
+ story = llama_tokenizer.decode(story_ids[0], skip_special_tokens=True)
35
  return story
36
 
37
 
38
+
39
  # Function to generate an empathetic response
40
+ # Function to generate an empathetic response using Llama 2
41
  def generate_response(user_input):
42
+ # Create a prompt for the Llama 2 model to generate an empathetic response
43
+ response_prompt = f"You are a compassionate support bot. A user has shared: '{user_input}'. Respond with empathy, understanding, and encouragement in a supportive manner."
44
+
45
+ # Tokenize the prompt
46
+ input_ids = llama_tokenizer.encode(response_prompt, return_tensors='pt')
47
+
48
+ # Generate the response using Llama 2
49
+ response_ids = llama_model.generate(
50
  input_ids,
51
+ max_length=300, # Generate a longer response
52
+ temperature=0.7, # Adjust temperature for coherent and empathetic responses
53
+ top_p=0.9, # Use top-p sampling for balanced creativity
54
+ repetition_penalty=1.1, # Avoid repetitive responses
55
  num_return_sequences=1
56
  )
57
+
58
+ # Decode the generated text
59
+ response = llama_tokenizer.decode(response_ids[:, input_ids.shape[-1]:][0], skip_special_tokens=True)
60
  return response
61
 
62
+
63
  # Analyze user input for emotional tone
64
  def get_emotion(user_input):
65
  emotions = emotion_classifier(user_input)
 
93
  tts.save(meditation_audio)
94
  st.audio(meditation_audio, format="audio/mp3")
95
 
96
+ # Generating a comforting story
97
+ st.sidebar.header("πŸ“– Short Comforting Story")
98
+ story_theme = st.selectbox("Choose a theme for your story:", ["courage", "healing", "hope"])
99
+ if st.sidebar.button("Generate Story"):
100
+ with st.spinner("Generating your story..."):
101
+ story = generate_story(story_theme)
102
+ st.text_area("Here's your story:", story, height=300)
103
+
104
 
105
 
106
  # User input section
 
113
  st.session_state.badges = []
114
 
115
  if user_input:
 
116
  with st.spinner("Thinking..."):
117
  response = generate_response(user_input)
118
 
119
+ # Display the bot's response
120
  st.text_area("Bot's Response:", response, height=250)
121
+
122
 
123
  # Assign motivational badges
124
  if emotion in ["joy", "optimism"]: