Update app.py
Browse files
app.py
CHANGED
@@ -2,8 +2,6 @@ import torch
|
|
2 |
import torch.nn as nn
|
3 |
import random
|
4 |
from transformers import GPT2LMHeadModel, GPT2Tokenizer
|
5 |
-
from textblob import TextBlob
|
6 |
-
import gradio as gr
|
7 |
import pickle
|
8 |
import numpy as np
|
9 |
import torch.nn.functional as F
|
@@ -40,18 +38,6 @@ def load_memory(filename='chat_memory.pkl'):
|
|
40 |
|
41 |
session_memory = load_memory()
|
42 |
|
43 |
-
# ---- Sentiment Analysis ----
|
44 |
-
def analyze_sentiment(text):
|
45 |
-
blob = TextBlob(text)
|
46 |
-
return blob.sentiment.polarity # Range from -1 (negative) to 1 (positive)
|
47 |
-
|
48 |
-
def adjust_for_emotion(response, sentiment):
|
49 |
-
if sentiment > 0.2:
|
50 |
-
return f"That's wonderful! I'm glad you're feeling good: {response}"
|
51 |
-
elif sentiment < -0.2:
|
52 |
-
return f"I'm sorry to hear that: {response}. How can I assist you further?"
|
53 |
-
return response
|
54 |
-
|
55 |
# ---- Response Generation ----
|
56 |
def generate_response(prompt, max_length=512):
|
57 |
inputs = tokenizer(prompt, return_tensors='pt', padding=True, truncation=True, max_length=max_length)
|
@@ -76,33 +62,27 @@ def generate_response(prompt, max_length=512):
|
|
76 |
|
77 |
response = tokenizer.decode(output[0], skip_special_tokens=True)
|
78 |
|
79 |
-
# Split response into two parts
|
80 |
parts = response.split("\n", 1)
|
81 |
if len(parts) > 1:
|
82 |
-
before_indent =
|
83 |
-
after_indent =
|
84 |
-
|
85 |
else:
|
86 |
-
|
87 |
|
88 |
-
return
|
89 |
|
90 |
# ---- Interactive Chat Function ----
|
91 |
def advanced_agi_chat(user_input):
|
92 |
session_memory.append({"input": user_input})
|
93 |
save_memory(session_memory)
|
94 |
|
95 |
-
# Sentiment analysis of user input
|
96 |
-
user_sentiment = analyze_sentiment(user_input)
|
97 |
-
|
98 |
# Generate the response based on the prompt
|
99 |
prompt = f"User: {user_input}\nAutistic-Gertrude:"
|
100 |
response = generate_response(prompt)
|
101 |
|
102 |
-
|
103 |
-
adjusted_response = adjust_for_emotion(response, user_sentiment)
|
104 |
-
|
105 |
-
return adjusted_response
|
106 |
|
107 |
# ---- Gradio Interface ----
|
108 |
def chat_interface(user_input):
|
@@ -110,6 +90,8 @@ def chat_interface(user_input):
|
|
110 |
return response
|
111 |
|
112 |
# ---- Gradio App Setup ----
|
|
|
|
|
113 |
with gr.Blocks() as app:
|
114 |
gr.Markdown("# **Autistic Assistant vß Edition 2024 Ultra: Gertrude's Autistic Experience**")
|
115 |
|
|
|
2 |
import torch.nn as nn
|
3 |
import random
|
4 |
from transformers import GPT2LMHeadModel, GPT2Tokenizer
|
|
|
|
|
5 |
import pickle
|
6 |
import numpy as np
|
7 |
import torch.nn.functional as F
|
|
|
38 |
|
39 |
session_memory = load_memory()
|
40 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
# ---- Response Generation ----
|
42 |
def generate_response(prompt, max_length=512):
|
43 |
inputs = tokenizer(prompt, return_tensors='pt', padding=True, truncation=True, max_length=max_length)
|
|
|
62 |
|
63 |
response = tokenizer.decode(output[0], skip_special_tokens=True)
|
64 |
|
65 |
+
# Split response into two parts, where the second indent is considered the "inner thoughts"
|
66 |
parts = response.split("\n", 1)
|
67 |
if len(parts) > 1:
|
68 |
+
before_indent = parts[0].strip()
|
69 |
+
after_indent = "Inner Thoughts: " + parts[1].strip()
|
70 |
+
final_response = before_indent + '\n' + after_indent
|
71 |
else:
|
72 |
+
final_response = response.strip()
|
73 |
|
74 |
+
return final_response
|
75 |
|
76 |
# ---- Interactive Chat Function ----
|
77 |
def advanced_agi_chat(user_input):
|
78 |
session_memory.append({"input": user_input})
|
79 |
save_memory(session_memory)
|
80 |
|
|
|
|
|
|
|
81 |
# Generate the response based on the prompt
|
82 |
prompt = f"User: {user_input}\nAutistic-Gertrude:"
|
83 |
response = generate_response(prompt)
|
84 |
|
85 |
+
return response
|
|
|
|
|
|
|
86 |
|
87 |
# ---- Gradio Interface ----
|
88 |
def chat_interface(user_input):
|
|
|
90 |
return response
|
91 |
|
92 |
# ---- Gradio App Setup ----
|
93 |
+
import gradio as gr
|
94 |
+
|
95 |
with gr.Blocks() as app:
|
96 |
gr.Markdown("# **Autistic Assistant vß Edition 2024 Ultra: Gertrude's Autistic Experience**")
|
97 |
|