Spaces:
Runtime error
Runtime error
Commit
·
642e116
1
Parent(s):
3e64e0d
chore: fix
Browse files- main.py +1 -1
- src/agent/tools/conversation.py +32 -18
main.py
CHANGED
@@ -43,7 +43,7 @@ class LoggingDisabled:
|
|
43 |
|
44 |
def main():
|
45 |
app = Application.builder().token(
|
46 |
-
'6207542226:
|
47 |
|
48 |
run_agent(
|
49 |
agent=GirlfriendGPT(
|
|
|
43 |
|
44 |
def main():
|
45 |
app = Application.builder().token(
|
46 |
+
'6207542226:AAEeWfZzrMcGTiCmUkQSp3oXkedQJnrEaXc',).build()
|
47 |
|
48 |
run_agent(
|
49 |
agent=GirlfriendGPT(
|
src/agent/tools/conversation.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
import logging
|
2 |
from telegram import Update
|
3 |
-
from transformers import
|
4 |
import torch
|
5 |
from telegram.ext import (
|
6 |
CallbackContext,
|
@@ -16,25 +16,39 @@ Output: A text
|
|
16 |
|
17 |
GET_CON = range(1)
|
18 |
|
19 |
-
class Conversation():
|
20 |
-
tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-medium")
|
21 |
-
model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-medium")
|
22 |
-
|
23 |
-
|
24 |
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
|
33 |
-
|
34 |
async def process_conversation(self, update: Update, context: CallbackContext) -> int:
|
35 |
message = update.message.text
|
36 |
-
|
|
|
|
|
37 |
await update.message.reply_text(f'{text}')
|
38 |
-
|
39 |
-
|
40 |
-
|
|
|
1 |
import logging
|
2 |
from telegram import Update
|
3 |
+
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
4 |
import torch
|
5 |
from telegram.ext import (
|
6 |
CallbackContext,
|
|
|
16 |
|
17 |
GET_CON = range(1)
|
18 |
|
|
|
|
|
|
|
|
|
|
|
19 |
|
20 |
+
class Conversation():
|
21 |
+
tokenizer = AutoTokenizer.from_pretrained(
|
22 |
+
"microsoft/GODEL-v1_1-large-seq2seq")
|
23 |
+
model = AutoModelForSeq2SeqLM.from_pretrained(
|
24 |
+
"microsoft/GODEL-v1_1-large-seq2seq")
|
25 |
+
|
26 |
+
# async def talk(self, message: str):
|
27 |
+
# logging.info(f"{message}")
|
28 |
+
# chat_history_ids = torch.tensor([], dtype=torch.long)
|
29 |
+
# new_user_input_ids = self.tokenizer.encode(message + self.tokenizer.eos_token, return_tensors='pt')
|
30 |
+
# bot_input_ids = torch.cat([chat_history_ids, new_user_input_ids], dim=-1)
|
31 |
+
# chat_history_ids =self.model.generate(bot_input_ids, max_length=1000, pad_token_id=self.tokenizer.eos_token_id)
|
32 |
+
# return "{}".format(self.tokenizer.decode(chat_history_ids[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True))
|
33 |
+
|
34 |
+
def generate(self, instruction, knowledge, dialog):
|
35 |
+
if knowledge != '':
|
36 |
+
knowledge = '[KNOWLEDGE] ' + knowledge
|
37 |
+
dialog = ' EOS '.join(dialog)
|
38 |
+
query = f"{instruction} [CONTEXT] {dialog} {knowledge}"
|
39 |
+
input_ids = self.tokenizer(f"{query}", return_tensors="pt").input_ids
|
40 |
+
outputs = self.model.generate(
|
41 |
+
input_ids, max_length=128,
|
42 |
+
min_length=8,
|
43 |
+
top_p=0.9,
|
44 |
+
do_sample=True,
|
45 |
+
)
|
46 |
+
output = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
|
47 |
+
return output
|
48 |
|
|
|
49 |
async def process_conversation(self, update: Update, context: CallbackContext) -> int:
|
50 |
message = update.message.text
|
51 |
+
instruction = f'Instruction: given a dialog context, you need to response empathically.'
|
52 |
+
knowledge = ''
|
53 |
+
text = await self.generate(instruction, knowledge,message)
|
54 |
await update.message.reply_text(f'{text}')
|
|
|
|
|
|