Sephfox commited on
Commit
20e25d2
·
verified ·
1 Parent(s): 60ae9bc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -10
app.py CHANGED
@@ -6,14 +6,11 @@ import json
6
  import random
7
  import gradio as gr
8
  import torch
9
- import torch.nn as nn
10
- import torch.optim as optim
11
- from torch.utils.data import DataLoader, Dataset
12
  from sklearn.ensemble import RandomForestClassifier
13
  from sklearn.model_selection import train_test_split
14
  from sklearn.preprocessing import OneHotEncoder
15
- from deap import base, creator, tools, algorithms
16
  from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline, AutoModelForSequenceClassification
 
17
  import gc
18
 
19
  warnings.filterwarnings('ignore', category=FutureWarning, module='huggingface_hub.file_download')
@@ -72,7 +69,7 @@ emotions = {
72
  'determination': {'percentage': 10, 'motivation': 'positive', 'intensity': 0},
73
  'resentment': {'percentage': 10, 'motivation': 'negative', 'intensity': 0},
74
  'glory': {'percentage': 10, 'motivation': 'positive', 'intensity': 0},
75
- 'motivation': {'percentage': 'motivation': {'percentage': 10, 'motivation': 'positive', 'intensity': 0},
76
  'ideal_state': {'percentage': 100, 'motivation': 'balanced', 'intensity': 0},
77
  'fear': {'percentage': 10, 'motivation': 'defensive', 'intensity': 0},
78
  'surprise': {'percentage': 10, 'motivation': 'unexpected', 'intensity': 0},
@@ -163,7 +160,7 @@ def evolve_emotions():
163
  (lambda: 100,), n=1)
164
  toolbox.register("population", tools.initRepeat, list, toolbox.individual)
165
  toolbox.register("mate", tools.cxTwoPoint)
166
- toolbox.register("mutate", tools.mutGaussian, mu=0, sigma=1, indpb=0.2
167
  toolbox.register("select", tools.selNSGA2)
168
  toolbox.register("evaluate", evaluate)
169
 
@@ -263,8 +260,7 @@ def optimize_ai_model(emotion_history):
263
 
264
  encoder = OneHotEncoder(handle_unknown='ignore', sparse=False)
265
  X = encoder.fit_transform(np.array(contexts).reshape(-1, 1))
266
- y = np.array(pd.Categorical(emotions).
267
- codes)
268
  clf = RandomForestClassifier(n_estimators=100)
269
  clf.fit(X, y)
270
 
@@ -330,6 +326,6 @@ interface = gr.Interface(respond_to_user, inputs="textbox", outputs="textbox", t
330
  interface.launch()
331
 
332
  # Clean up memory usage
333
- del finetuned_lm_tokenizer
334
- del finetuned_lm_model
335
  gc.collect()
 
6
  import random
7
  import gradio as gr
8
  import torch
 
 
 
9
  from sklearn.ensemble import RandomForestClassifier
10
  from sklearn.model_selection import train_test_split
11
  from sklearn.preprocessing import OneHotEncoder
 
12
  from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline, AutoModelForSequenceClassification
13
+ from deap import base, creator, tools, algorithms
14
  import gc
15
 
16
  warnings.filterwarnings('ignore', category=FutureWarning, module='huggingface_hub.file_download')
 
69
  'determination': {'percentage': 10, 'motivation': 'positive', 'intensity': 0},
70
  'resentment': {'percentage': 10, 'motivation': 'negative', 'intensity': 0},
71
  'glory': {'percentage': 10, 'motivation': 'positive', 'intensity': 0},
72
+ 'motivation': {'percentage': 10, 'motivation': 'positive', 'intensity': 0},
73
  'ideal_state': {'percentage': 100, 'motivation': 'balanced', 'intensity': 0},
74
  'fear': {'percentage': 10, 'motivation': 'defensive', 'intensity': 0},
75
  'surprise': {'percentage': 10, 'motivation': 'unexpected', 'intensity': 0},
 
160
  (lambda: 100,), n=1)
161
  toolbox.register("population", tools.initRepeat, list, toolbox.individual)
162
  toolbox.register("mate", tools.cxTwoPoint)
163
+ toolbox.register("mutate", tools.mutGaussian, mu=0, sigma=1, indpb=0.2)
164
  toolbox.register("select", tools.selNSGA2)
165
  toolbox.register("evaluate", evaluate)
166
 
 
260
 
261
  encoder = OneHotEncoder(handle_unknown='ignore', sparse=False)
262
  X = encoder.fit_transform(np.array(contexts).reshape(-1, 1))
263
+ y = np.array(pd.Categorical(emotions).codes)
 
264
  clf = RandomForestClassifier(n_estimators=100)
265
  clf.fit(X, y)
266
 
 
326
  interface.launch()
327
 
328
  # Clean up memory usage
329
+ del _finetuned_lm_tokenizer
330
+ del _finetuned_lm_model
331
  gc.collect()