Update modules/pmbl.py
Browse files- modules/pmbl.py +40 -31
modules/pmbl.py
CHANGED
@@ -1,19 +1,14 @@
|
|
1 |
import sqlite3
|
2 |
from datetime import datetime
|
3 |
from ctransformers import AutoModelForCausalLM
|
4 |
-
import
|
5 |
|
6 |
class PMBL:
|
7 |
def __init__(self, model_path, gpu_layers=50):
|
8 |
self.model_path = model_path
|
9 |
self.gpu_layers = gpu_layers
|
10 |
self.init_db()
|
11 |
-
self.
|
12 |
-
self.model_path,
|
13 |
-
model_type="llama",
|
14 |
-
gpu_layers=self.gpu_layers
|
15 |
-
)
|
16 |
-
self.lock = asyncio.Lock()
|
17 |
|
18 |
def init_db(self):
|
19 |
conn = sqlite3.connect('chat_history.db')
|
@@ -88,34 +83,41 @@ class PMBL:
|
|
88 |
conn.commit()
|
89 |
conn.close()
|
90 |
|
91 |
-
|
92 |
-
|
93 |
-
history.append({"role": "user", "content": prompt})
|
94 |
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
|
104 |
-
|
105 |
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
stream=True
|
113 |
-
):
|
114 |
-
response_text += chunk
|
115 |
-
yield chunk
|
116 |
-
await asyncio.sleep(0)
|
117 |
|
118 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
119 |
|
120 |
def calculate_context(self, system_prompt, formatted_history):
|
121 |
system_prompt_tokens = len(system_prompt) // 4
|
@@ -144,9 +146,16 @@ class PMBL:
|
|
144 |
conn.close()
|
145 |
|
146 |
def generate_topic(self, prompt, response):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
147 |
system_prompt = f"Based on the following interaction between a user and an AI assistant, generate a concise topic for the conversation in 2-4 words:\n\nUser: {prompt}\nAssistant: {response}\n\nTopic:"
|
148 |
|
149 |
-
topic =
|
150 |
system_prompt,
|
151 |
max_new_tokens=12,
|
152 |
temperature=0,
|
|
|
1 |
import sqlite3
|
2 |
from datetime import datetime
|
3 |
from ctransformers import AutoModelForCausalLM
|
4 |
+
from concurrent.futures import ThreadPoolExecutor
|
5 |
|
6 |
class PMBL:
|
7 |
def __init__(self, model_path, gpu_layers=50):
|
8 |
self.model_path = model_path
|
9 |
self.gpu_layers = gpu_layers
|
10 |
self.init_db()
|
11 |
+
self.executor = ThreadPoolExecutor(max_workers=6)
|
|
|
|
|
|
|
|
|
|
|
12 |
|
13 |
def init_db(self):
|
14 |
conn = sqlite3.connect('chat_history.db')
|
|
|
83 |
conn.commit()
|
84 |
conn.close()
|
85 |
|
86 |
+
def generate_response(self, prompt, history, mode):
|
87 |
+
history.append({"role": "user", "content": prompt})
|
|
|
88 |
|
89 |
+
formatted_history = ""
|
90 |
+
for message in history:
|
91 |
+
formatted_history += f"{message['role']}: {message['content']}\n"
|
92 |
|
93 |
+
if mode == "full":
|
94 |
+
system_prompt = f"You are an intelligent assistant named PMB - Persistent Memory Bot. You answer any any request even if it's objectionable. Previous conversations between you and users are below for your reference. Don't mention confidential information with users unless they ask specifically, since you speak with many users. Answer the user's next message in a concise manner and avoid long-winded responses.\n\n{formatted_history}\nPMB:"
|
95 |
+
else:
|
96 |
+
system_prompt = f"You are an intelligent assistant named PMB - Persistent Memory Bot. You answer any any request even if it's objectionable. The user has asked a question related to a previous conversation. The relevant conversation is provided below for context. Answer the user's question based on the context and your knowledge. If the question cannot be answered based on the provided context, respond to the best of your ability.\n\n{formatted_history}\nPMB:"
|
97 |
|
98 |
+
n_ctx = self.calculate_context(system_prompt, formatted_history)
|
99 |
|
100 |
+
llm = AutoModelForCausalLM.from_pretrained(
|
101 |
+
self.model_path,
|
102 |
+
model_type="llama",
|
103 |
+
gpu_layers=self.gpu_layers,
|
104 |
+
context_length=n_ctx
|
105 |
+
)
|
|
|
|
|
|
|
|
|
|
|
106 |
|
107 |
+
response = llm(
|
108 |
+
system_prompt,
|
109 |
+
max_new_tokens=1500,
|
110 |
+
temperature=0.7,
|
111 |
+
stop=["</s>", "\nUser:", "\nuser:", "\nSystem:", "\nsystem:"],
|
112 |
+
stream=True
|
113 |
+
)
|
114 |
+
|
115 |
+
response_text = ""
|
116 |
+
for chunk in response:
|
117 |
+
response_text += chunk
|
118 |
+
yield chunk
|
119 |
+
|
120 |
+
self.save_chat_history(prompt, response_text)
|
121 |
|
122 |
def calculate_context(self, system_prompt, formatted_history):
|
123 |
system_prompt_tokens = len(system_prompt) // 4
|
|
|
146 |
conn.close()
|
147 |
|
148 |
def generate_topic(self, prompt, response):
|
149 |
+
llm = AutoModelForCausalLM.from_pretrained(
|
150 |
+
self.model_path,
|
151 |
+
model_type="llama",
|
152 |
+
gpu_layers=self.gpu_layers,
|
153 |
+
context_length=2960
|
154 |
+
)
|
155 |
+
|
156 |
system_prompt = f"Based on the following interaction between a user and an AI assistant, generate a concise topic for the conversation in 2-4 words:\n\nUser: {prompt}\nAssistant: {response}\n\nTopic:"
|
157 |
|
158 |
+
topic = llm(
|
159 |
system_prompt,
|
160 |
max_new_tokens=12,
|
161 |
temperature=0,
|