Spaces:
Runtime error
Runtime error
Commit
·
636e658
1
Parent(s):
a6278f1
Upload 6 files
Browse files- finnlp/large_language_models/openai/__pycache__/__init__.cpython-310.pyc +0 -0
- finnlp/large_language_models/openai/__pycache__/openai_chat_agent.cpython-310.pyc +0 -0
- finnlp/large_language_models/openai/__pycache__/openai_chat_agent_console_copy.cpython-310.pyc +0 -0
- finnlp/large_language_models/openai/openai_chat_agent - Original.py +51 -0
- finnlp/large_language_models/openai/openai_chat_agent.py +54 -0
- finnlp/large_language_models/openai/openai_chat_agent_console_copy.py +54 -0
finnlp/large_language_models/openai/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (185 Bytes). View file
|
|
finnlp/large_language_models/openai/__pycache__/openai_chat_agent.cpython-310.pyc
ADDED
Binary file (1.98 kB). View file
|
|
finnlp/large_language_models/openai/__pycache__/openai_chat_agent_console_copy.cpython-310.pyc
ADDED
Binary file (1.99 kB). View file
|
|
finnlp/large_language_models/openai/openai_chat_agent - Original.py
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import openai
|
2 |
+
|
3 |
+
class Openai_Chat_Agent:
|
4 |
+
def __init__(self,args):
|
5 |
+
assert "token" in args.keys()
|
6 |
+
openai.api_key = args["token"]
|
7 |
+
|
8 |
+
self.temperature = args["temperature"] if "temperature" in args.keys() else 1
|
9 |
+
self.top_p = args["top_p"] if "top_p" in args.keys() else 1
|
10 |
+
self.n = args["n"] if "n" in args.keys() else 1
|
11 |
+
self.max_tokens = args["max_tokens"] if "max_tokens" in args.keys() else None
|
12 |
+
self.presence_penalty = args["presence_penalty"] if "presence_penalty" in args.keys() else 0
|
13 |
+
self.frequency_penalty = args["frequency_penalty"] if "frequency_penalty" in args.keys() else 0
|
14 |
+
|
15 |
+
self.conversation_list = []
|
16 |
+
if "init_prompt" in args.keys():
|
17 |
+
self.conversation_list.append(
|
18 |
+
{"role":"system","content":args["init_prompt"]}
|
19 |
+
)
|
20 |
+
|
21 |
+
def get_single_response(self,prompt):
|
22 |
+
self.conversation_list.append({"role":"user","content":prompt})
|
23 |
+
response = openai.ChatCompletion.create(
|
24 |
+
model = "gpt-3.5-turbo",
|
25 |
+
messages = self.conversation_list,
|
26 |
+
temperature = self.temperature,
|
27 |
+
top_p = self.top_p,
|
28 |
+
n = self.n,
|
29 |
+
max_tokens = self.max_tokens,
|
30 |
+
presence_penalty = self.presence_penalty,
|
31 |
+
frequency_penalty = self.frequency_penalty,
|
32 |
+
)
|
33 |
+
answer = response.choices[0].message['content']
|
34 |
+
self.conversation_list.append({"role":"assistant","content":answer})
|
35 |
+
return answer
|
36 |
+
|
37 |
+
def show_conversation(self):
|
38 |
+
conversation_list = self.conversation_list
|
39 |
+
for msg in conversation_list:
|
40 |
+
content = msg['content']
|
41 |
+
content = content.replace(".",".\n")
|
42 |
+
if msg['role'] == 'user':
|
43 |
+
print(f"\U0001F47B: {content}\n")
|
44 |
+
elif msg['role'] == 'system':
|
45 |
+
print(f"\U0001F4BB: {content}\n")
|
46 |
+
else:
|
47 |
+
print(f"\U0001F916: {content}\n")
|
48 |
+
|
49 |
+
def get_multiple_response(self,prompts):
|
50 |
+
pass
|
51 |
+
|
finnlp/large_language_models/openai/openai_chat_agent.py
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import openai
|
2 |
+
|
3 |
+
class Openai_Chat_Agent:
|
4 |
+
def __init__(self,args):
|
5 |
+
assert "token" in args.keys()
|
6 |
+
openai.api_key = args["token"]
|
7 |
+
|
8 |
+
self.temperature = args["temperature"] if "temperature" in args.keys() else 1
|
9 |
+
self.top_p = args["top_p"] if "top_p" in args.keys() else 1
|
10 |
+
self.n = args["n"] if "n" in args.keys() else 1
|
11 |
+
self.max_tokens = args["max_tokens"] if "max_tokens" in args.keys() else None
|
12 |
+
self.presence_penalty = args["presence_penalty"] if "presence_penalty" in args.keys() else 0
|
13 |
+
self.frequency_penalty = args["frequency_penalty"] if "frequency_penalty" in args.keys() else 0
|
14 |
+
|
15 |
+
self.conversation_list = []
|
16 |
+
if "init_prompt" in args.keys():
|
17 |
+
self.conversation_list.append(
|
18 |
+
{"role":"system","content":args["init_prompt"]}
|
19 |
+
)
|
20 |
+
|
21 |
+
def get_single_response(self,prompt):
|
22 |
+
self.conversation_list.append({"role":"user","content":prompt})
|
23 |
+
response = openai.ChatCompletion.create(
|
24 |
+
model = "gpt-3.5-turbo",
|
25 |
+
messages = self.conversation_list,
|
26 |
+
temperature = self.temperature,
|
27 |
+
top_p = self.top_p,
|
28 |
+
n = self.n,
|
29 |
+
max_tokens = self.max_tokens,
|
30 |
+
presence_penalty = self.presence_penalty,
|
31 |
+
frequency_penalty = self.frequency_penalty,
|
32 |
+
)
|
33 |
+
answer = response.choices[0].message['content']
|
34 |
+
self.conversation_list.append({"role":"assistant","content":answer})
|
35 |
+
return answer
|
36 |
+
|
37 |
+
def show_conversation(self):
|
38 |
+
conversation_list = self.conversation_list
|
39 |
+
res = ''
|
40 |
+
for msg in conversation_list:
|
41 |
+
content = msg['content']
|
42 |
+
content = content.replace(".",".\n")
|
43 |
+
if msg['role'] == 'user':
|
44 |
+
res = res + f"\U0001F47B: {content}\n"
|
45 |
+
elif msg['role'] == 'system':
|
46 |
+
res = res + f"\U0001F4BB: {content}\n"
|
47 |
+
else:
|
48 |
+
res = res + f"\U0001F916: {content}\n"
|
49 |
+
|
50 |
+
return res
|
51 |
+
|
52 |
+
def get_multiple_response(self,prompts):
|
53 |
+
pass
|
54 |
+
|
finnlp/large_language_models/openai/openai_chat_agent_console_copy.py
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import openai
|
2 |
+
|
3 |
+
class Openai_Chat_Agent:
|
4 |
+
def __init__(self,args):
|
5 |
+
assert "token" in args.keys()
|
6 |
+
openai.api_key = args["token"]
|
7 |
+
|
8 |
+
self.temperature = args["temperature"] if "temperature" in args.keys() else 1
|
9 |
+
self.top_p = args["top_p"] if "top_p" in args.keys() else 1
|
10 |
+
self.n = args["n"] if "n" in args.keys() else 1
|
11 |
+
self.max_tokens = args["max_tokens"] if "max_tokens" in args.keys() else None
|
12 |
+
self.presence_penalty = args["presence_penalty"] if "presence_penalty" in args.keys() else 0
|
13 |
+
self.frequency_penalty = args["frequency_penalty"] if "frequency_penalty" in args.keys() else 0
|
14 |
+
|
15 |
+
self.conversation_list = []
|
16 |
+
if "init_prompt" in args.keys():
|
17 |
+
self.conversation_list.append(
|
18 |
+
{"role":"system","content":args["init_prompt"]}
|
19 |
+
)
|
20 |
+
|
21 |
+
def get_single_response(self,prompt):
|
22 |
+
self.conversation_list.append({"role":"user","content":prompt})
|
23 |
+
response = openai.ChatCompletion.create(
|
24 |
+
model = "gpt-3.5-turbo",
|
25 |
+
messages = self.conversation_list,
|
26 |
+
temperature = self.temperature,
|
27 |
+
top_p = self.top_p,
|
28 |
+
n = self.n,
|
29 |
+
max_tokens = self.max_tokens,
|
30 |
+
presence_penalty = self.presence_penalty,
|
31 |
+
frequency_penalty = self.frequency_penalty,
|
32 |
+
)
|
33 |
+
answer = response.choices[0].message['content']
|
34 |
+
self.conversation_list.append({"role":"assistant","content":answer})
|
35 |
+
return answer
|
36 |
+
|
37 |
+
def show_conversation(self):
|
38 |
+
conversation_list = self.conversation_list
|
39 |
+
res = ''
|
40 |
+
for msg in conversation_list:
|
41 |
+
content = msg['content']
|
42 |
+
content = content.replace(".",".\n")
|
43 |
+
if msg['role'] == 'user':
|
44 |
+
res = res + f"\U0001F47B: {content}\n"
|
45 |
+
elif msg['role'] == 'system':
|
46 |
+
res = res + f"\U0001F4BB: {content}\n"
|
47 |
+
else:
|
48 |
+
res = res + f"\U0001F916: {content}\n"
|
49 |
+
|
50 |
+
return res
|
51 |
+
|
52 |
+
def get_multiple_response(self,prompts):
|
53 |
+
pass
|
54 |
+
|