Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -65,18 +65,21 @@ class MemoryEfficientNN(nn.Module):
|
|
65 |
return self.layers(x.long())
|
66 |
|
67 |
# Memory-efficient dataset
|
68 |
-
class
|
69 |
-
def __init__(self,
|
70 |
-
self.
|
71 |
-
self.
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
|
|
|
|
|
|
80 |
X_train, X_test, y_train, y_test = train_test_split(contexts_encoded, emotions_target, test_size=0.2, random_state=42)
|
81 |
input_size = X_train.shape[1]
|
82 |
hidden_size = 64
|
@@ -269,6 +272,7 @@ def get_sentiment(text):
|
|
269 |
result = sentiment_pipeline(text)[0]
|
270 |
return f"Sentiment: {result['label']}, Score: {result['score']:.4f}"
|
271 |
|
|
|
272 |
def process_input(text):
|
273 |
try:
|
274 |
normalized_text = normalize_context(text)
|
@@ -276,7 +280,8 @@ def process_input(text):
|
|
276 |
|
277 |
rf_prediction = rf_model.predict(encoded_text)[0]
|
278 |
isolation_score = isolation_forest.decision_function(encoded_text)[0]
|
279 |
-
|
|
|
280 |
|
281 |
predicted_emotion = emotion_classes[rf_prediction]
|
282 |
sentiment_score = isolation_score
|
@@ -299,7 +304,6 @@ def process_input(text):
|
|
299 |
error_message = f"An error occurred: {str(e)}"
|
300 |
print(error_message) # Logging the error
|
301 |
return error_message, error_message, error_message, error_message
|
302 |
-
|
303 |
iface = gr.Interface(
|
304 |
fn=process_input,
|
305 |
inputs="text",
|
|
|
65 |
return self.layers(x.long())
|
66 |
|
67 |
# Memory-efficient dataset
|
68 |
+
class MemoryEfficientNN(nn.Module):
|
69 |
+
def __init__(self, input_size, hidden_size, num_classes):
|
70 |
+
super(MemoryEfficientNN, self).__init__()
|
71 |
+
self.layers = nn.Sequential(
|
72 |
+
nn.Embedding(input_size, hidden_size),
|
73 |
+
nn.ReLU(),
|
74 |
+
nn.Dropout(0.2),
|
75 |
+
nn.Linear(hidden_size, hidden_size),
|
76 |
+
nn.ReLU(),
|
77 |
+
nn.Dropout(0.2),
|
78 |
+
nn.Linear(hidden_size, num_classes)
|
79 |
+
)
|
80 |
+
|
81 |
+
def forward(self, x):
|
82 |
+
return self.layers(x.long())
|
83 |
X_train, X_test, y_train, y_test = train_test_split(contexts_encoded, emotions_target, test_size=0.2, random_state=42)
|
84 |
input_size = X_train.shape[1]
|
85 |
hidden_size = 64
|
|
|
272 |
result = sentiment_pipeline(text)[0]
|
273 |
return f"Sentiment: {result['label']}, Score: {result['score']:.4f}"
|
274 |
|
275 |
+
|
276 |
def process_input(text):
|
277 |
try:
|
278 |
normalized_text = normalize_context(text)
|
|
|
280 |
|
281 |
rf_prediction = rf_model.predict(encoded_text)[0]
|
282 |
isolation_score = isolation_forest.decision_function(encoded_text)[0]
|
283 |
+
nn_output = model(torch.LongTensor(encoded_text.toarray()).to(device))
|
284 |
+
nn_prediction = nn_output.argmax(dim=1).item()
|
285 |
|
286 |
predicted_emotion = emotion_classes[rf_prediction]
|
287 |
sentiment_score = isolation_score
|
|
|
304 |
error_message = f"An error occurred: {str(e)}"
|
305 |
print(error_message) # Logging the error
|
306 |
return error_message, error_message, error_message, error_message
|
|
|
307 |
iface = gr.Interface(
|
308 |
fn=process_input,
|
309 |
inputs="text",
|