Update app.py
Browse files
app.py
CHANGED
@@ -40,7 +40,7 @@ def load_memory(filename='chat_memory.pkl'):
|
|
40 |
session_memory = load_memory()
|
41 |
|
42 |
# ---- Response Generation ----
|
43 |
-
def generate_response(prompt, max_length=
|
44 |
inputs = tokenizer(prompt, return_tensors='pt', padding=True, truncation=True, max_length=max_length)
|
45 |
input_ids = inputs['input_ids'].to(device)
|
46 |
attention_mask = inputs['attention_mask'].to(device)
|
@@ -67,7 +67,7 @@ def generate_response(prompt, max_length=35):
|
|
67 |
parts = response.split("\n", 1)
|
68 |
if len(parts) > 1:
|
69 |
before_indent = parts[0].strip()
|
70 |
-
after_indent = "vß
|
71 |
final_response = before_indent + '\n' + after_indent
|
72 |
else:
|
73 |
final_response = response.strip()
|
@@ -80,7 +80,7 @@ def advanced_agi_chat(user_input):
|
|
80 |
save_memory(session_memory)
|
81 |
|
82 |
# Generate the response based on the prompt
|
83 |
-
prompt = f"User: {user_input}\
|
84 |
response = generate_response(prompt)
|
85 |
|
86 |
return response
|
|
|
40 |
session_memory = load_memory()
|
41 |
|
42 |
# ---- Response Generation ----
|
43 |
+
def generate_response(prompt, max_length=25):
|
44 |
inputs = tokenizer(prompt, return_tensors='pt', padding=True, truncation=True, max_length=max_length)
|
45 |
input_ids = inputs['input_ids'].to(device)
|
46 |
attention_mask = inputs['attention_mask'].to(device)
|
|
|
67 |
parts = response.split("\n", 1)
|
68 |
if len(parts) > 1:
|
69 |
before_indent = parts[0].strip()
|
70 |
+
after_indent = "vß Gertrude" + parts[1].strip()
|
71 |
final_response = before_indent + '\n' + after_indent
|
72 |
else:
|
73 |
final_response = response.strip()
|
|
|
80 |
save_memory(session_memory)
|
81 |
|
82 |
# Generate the response based on the prompt
|
83 |
+
prompt = f"User: {user_input}\nResponse:"
|
84 |
response = generate_response(prompt)
|
85 |
|
86 |
return response
|