Sephfox commited on
Commit
b726416
·
verified ·
1 Parent(s): 3bde164

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -17
app.py CHANGED
@@ -6,16 +6,14 @@ import json
6
  import random
7
  import gradio as gr
8
  import torch
9
- from sklearn.ensemble import RandomForestClassifier
10
  from sklearn.model_selection import train_test_split
11
  from sklearn.preprocessing import OneHotEncoder
12
  from transformers import AutoModelForSequenceClassification, AutoTokenizer, AutoModelForCausalLM, pipeline
13
  from deap import base, creator, tools, algorithms
14
- import gc
15
 
16
  warnings.filterwarnings('ignore', category=FutureWarning, module='huggingface_hub.file_download')
17
 
18
- # Initialize Example Emotions Dataset
19
  data = {
20
  'context': [
21
  'I am happy', 'I am sad', 'I am angry', 'I am excited', 'I am calm',
@@ -44,18 +42,10 @@ emotion_classes = pd.Categorical(df['emotion']).categories
44
  emotion_prediction_model = AutoModelForSequenceClassification.from_pretrained("bhadresh-savani/distilbert-base-uncased-emotion")
45
  emotion_prediction_tokenizer = AutoTokenizer.from_pretrained("bhadresh-savani/distilbert-base-uncased-emotion")
46
 
47
- # Lazy loading for the fine-tuned language model (DialoGPT-medium)
48
- _finetuned_lm_tokenizer = None
49
- _finetuned_lm_model = None
50
-
51
- def get_finetuned_lm_model():
52
- global _finetuned_lm_tokenizer, _finetuned_lm_model
53
- if _finetuned_lm_tokenizer is None or _finetuned_lm_model is None:
54
- model_name = "microsoft/DialoGPT-medium"
55
- _finetuned_lm_tokenizer = AutoTokenizer.from_pretrained(model_name, padding_side='left')
56
- _finetuned_lm_model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", low_cpu_mem_usage=True)
57
- _finetuned_lm_tokenizer.pad_token = _finetuned_lm_tokenizer.eos_token
58
- return _finetuned_lm_tokenizer, _finetuned_lm_model
59
 
60
  # Enhanced Emotional States
61
  emotions = {
@@ -200,8 +190,13 @@ def generate_response(context):
200
  # Ensure pad_token_id is a tensor
201
  pad_token_id = torch.tensor(tokenizer.pad_token_id)
202
 
203
- outputs = model.generate(inputs, max_length=500, num_return_sequences=1, pad_token_id=pad_token_id.item())
204
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
 
 
 
 
 
205
  return response
206
 
207
  def handle_conversation(user_input):
@@ -218,7 +213,7 @@ with gr.Blocks() as demo:
218
  user_input = gr.Textbox(label="User Input")
219
  response = gr.Textbox(label="Bot Response")
220
  submit = gr.Button("Submit")
221
- submit.click(update_ui, user_input, response)
222
 
223
  if __name__ == "__main__":
224
  demo.launch(share=True)
 
6
  import random
7
  import gradio as gr
8
  import torch
 
9
  from sklearn.model_selection import train_test_split
10
  from sklearn.preprocessing import OneHotEncoder
11
  from transformers import AutoModelForSequenceClassification, AutoTokenizer, AutoModelForCausalLM, pipeline
12
  from deap import base, creator, tools, algorithms
 
13
 
14
  warnings.filterwarnings('ignore', category=FutureWarning, module='huggingface_hub.file_download')
15
 
16
+ # Initialize Example Dataset (For Emotion Prediction)
17
  data = {
18
  'context': [
19
  'I am happy', 'I am sad', 'I am angry', 'I am excited', 'I am calm',
 
42
  emotion_prediction_model = AutoModelForSequenceClassification.from_pretrained("bhadresh-savani/distilbert-base-uncased-emotion")
43
  emotion_prediction_tokenizer = AutoTokenizer.from_pretrained("bhadresh-savani/distilbert-base-uncased-emotion")
44
 
45
+ # Load pre-trained LLM model and tokenizer for response generation
46
+ response_model_name = "microsoft/DialoGPT-medium"
47
+ response_tokenizer = AutoTokenizer.from_pretrained(response_model_name)
48
+ response_model = AutoModelForCausalLM.from_pretrained(response_model_name)
 
 
 
 
 
 
 
 
49
 
50
  # Enhanced Emotional States
51
  emotions = {
 
190
  # Ensure pad_token_id is a tensor
191
  pad_token_id = torch.tensor(tokenizer.pad_token_id)
192
 
193
+ outputs = model.generate(inputs, max_length=500, num_return_sequences=1, pad_token_id=pad_token_id.item(), eos_token_id=tokenizer.eos_token_id)
194
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
195
+
196
+ # Ensure the response does not repeat the input
197
+ if context in response:
198
+ response = response.replace(context, '').strip()
199
+
200
  return response
201
 
202
  def handle_conversation(user_input):
 
213
  user_input = gr.Textbox(label="User Input")
214
  response = gr.Textbox(label="Bot Response")
215
  submit = gr.Button("Submit")
216
+ submit.click(update_ui, inputs=[user_input], outputs=[response])
217
 
218
  if __name__ == "__main__":
219
  demo.launch(share=True)