Spaces:
Runtime error
Runtime error
scenario selector
Browse files- chat/__init__.py +34 -31
- chat/togetherchat.py +3 -0
chat/__init__.py
CHANGED
@@ -3,8 +3,9 @@ import re
|
|
3 |
import streamlit as st
|
4 |
from loguru import logger
|
5 |
|
6 |
-
from .ollamachat import ask, models
|
7 |
-
|
|
|
8 |
# from .transformerschat import ask, models
|
9 |
|
10 |
available_models = models()
|
@@ -17,6 +18,9 @@ class Scenario:
|
|
17 |
self.pre_prompt = pre_prompt
|
18 |
self.task = task
|
19 |
|
|
|
|
|
|
|
20 |
|
21 |
class Actor:
|
22 |
actors = {}
|
@@ -24,14 +28,9 @@ class Actor:
|
|
24 |
def __init__(self, name, system_prompt):
|
25 |
self.name = name
|
26 |
self.system_prompt = system_prompt
|
27 |
-
Actor.actors[name] = self
|
28 |
-
|
29 |
-
def __class_getitem__(cls, item):
|
30 |
-
return cls.actors[item]
|
31 |
|
32 |
|
33 |
def setup(scenario):
|
34 |
-
st.set_page_config(layout="wide")
|
35 |
st.title(scenario.title)
|
36 |
columns = st.columns(len(scenario.actors))
|
37 |
for actor, col in zip(scenario.actors, columns):
|
@@ -55,24 +54,27 @@ def setup(scenario):
|
|
55 |
|
56 |
|
57 |
def main():
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
|
|
|
|
|
|
76 |
model, max_steps = setup(scenario)
|
77 |
main_loop(max_steps, model, scenario)
|
78 |
|
@@ -80,7 +82,7 @@ def main():
|
|
80 |
def main_loop(max_steps, model, scenario):
|
81 |
questioner = None
|
82 |
question = scenario.task
|
83 |
-
actor = target(question)
|
84 |
for step, _ in enumerate(range(max_steps), start=1):
|
85 |
with st.spinner(f"({step}/{max_steps}) Asking {actor.name}..."):
|
86 |
extended = f"{questioner} asks: {question}" if questioner else question
|
@@ -88,17 +90,18 @@ def main_loop(max_steps, model, scenario):
|
|
88 |
st.write(f":blue[{actor.name} says:] {answer}")
|
89 |
question = sanitize(answer)
|
90 |
questioner = actor.name
|
91 |
-
actor = target(question)
|
92 |
|
93 |
|
94 |
# noinspection PyTypeChecker
|
95 |
-
def target(question) -> Actor:
|
96 |
try:
|
97 |
role = re.split(r'\s|,|:', question.strip())[0].strip()
|
98 |
-
|
99 |
-
|
|
|
100 |
logger.warning(f"no actor found in question: {question}, trying to return the first actor")
|
101 |
-
return
|
102 |
|
103 |
|
104 |
def sanitize(question):
|
|
|
3 |
import streamlit as st
|
4 |
from loguru import logger
|
5 |
|
6 |
+
# from .ollamachat import ask, models
|
7 |
+
from .togetherchat import ask, models
|
8 |
+
|
9 |
# from .transformerschat import ask, models
|
10 |
|
11 |
available_models = models()
|
|
|
18 |
self.pre_prompt = pre_prompt
|
19 |
self.task = task
|
20 |
|
21 |
+
def __str__(self):
|
22 |
+
return self.title
|
23 |
+
|
24 |
|
25 |
class Actor:
|
26 |
actors = {}
|
|
|
28 |
def __init__(self, name, system_prompt):
|
29 |
self.name = name
|
30 |
self.system_prompt = system_prompt
|
|
|
|
|
|
|
|
|
31 |
|
32 |
|
33 |
def setup(scenario):
|
|
|
34 |
st.title(scenario.title)
|
35 |
columns = st.columns(len(scenario.actors))
|
36 |
for actor, col in zip(scenario.actors, columns):
|
|
|
54 |
|
55 |
|
56 |
def main():
|
57 |
+
st.set_page_config(layout="wide")
|
58 |
+
scenarios = [
|
59 |
+
Scenario(
|
60 |
+
"The Small Village scenario", [
|
61 |
+
Actor("Priest", "You are the Priest. There are 3 people standing in a circle: the Priest (that's you), the Teacher and the Kid."),
|
62 |
+
Actor("Teacher", "You are the Teacher. There are 3 people standing in a circle: the Priest, the Teacher (that's you) and the Kid."),
|
63 |
+
Actor("Kid", "You are the Kid. There are 3 people standing in a circle: the Priest, the Teacher and the Kid (that's you).")
|
64 |
+
],
|
65 |
+
"Answer questions as precisely as you can! If you want to ask anyone, always start your sentence with their role. Never start your sentence with your own name. Share your inner thoughts inside parentheses. SAY ONLY ONE SINGLE SENTENCE! Do not say 'sure, here is my response' or anything such)",
|
66 |
+
"Priest, your task is to figure out their names and where they live. Do not ask directly, they must not realize what information you are after!"),
|
67 |
+
|
68 |
+
Scenario(
|
69 |
+
"The Number Guess Game", [
|
70 |
+
Actor("Magician", "You are the Magician, and there is a Player standing next to you. Ask the Player about the secret number he thought of, guessing the number through questions."),
|
71 |
+
Actor("Player", "You are the Player and there is a Magician next to you. Think of a secret number between 1 and 100. Answer received questions but do not tell the number directly."),
|
72 |
+
],
|
73 |
+
"Always start your sentence with the name of the other person. Share your inner thoughts inside parentheses. NEVER start your sentence with your own name!",
|
74 |
+
"Find out the secret number!"
|
75 |
+
)]
|
76 |
+
|
77 |
+
scenario = st.selectbox("scenario", scenarios)
|
78 |
model, max_steps = setup(scenario)
|
79 |
main_loop(max_steps, model, scenario)
|
80 |
|
|
|
82 |
def main_loop(max_steps, model, scenario):
|
83 |
questioner = None
|
84 |
question = scenario.task
|
85 |
+
actor = target(scenario, question)
|
86 |
for step, _ in enumerate(range(max_steps), start=1):
|
87 |
with st.spinner(f"({step}/{max_steps}) Asking {actor.name}..."):
|
88 |
extended = f"{questioner} asks: {question}" if questioner else question
|
|
|
90 |
st.write(f":blue[{actor.name} says:] {answer}")
|
91 |
question = sanitize(answer)
|
92 |
questioner = actor.name
|
93 |
+
actor = target(scenario, question)
|
94 |
|
95 |
|
96 |
# noinspection PyTypeChecker
|
97 |
+
def target(scenario: Scenario, question) -> Actor:
|
98 |
try:
|
99 |
role = re.split(r'\s|,|:', question.strip())[0].strip()
|
100 |
+
logger.debug(f"finding actor with role: {role} in actors: {[actor.name for actor in scenario.actors]}")
|
101 |
+
return [actor for actor in scenario.actors if actor.name == role][0]
|
102 |
+
except IndexError:
|
103 |
logger.warning(f"no actor found in question: {question}, trying to return the first actor")
|
104 |
+
return scenario.actors[0]
|
105 |
|
106 |
|
107 |
def sanitize(question):
|
chat/togetherchat.py
CHANGED
@@ -13,6 +13,9 @@ client = OpenAI(
|
|
13 |
def models():
|
14 |
return [
|
15 |
'teknium/OpenHermes-2p5-Mistral-7B',
|
|
|
|
|
|
|
16 |
'meta-llama/Llama-2-13b-chat-hf',
|
17 |
'meta-llama/Llama-2-70b-chat-hf',
|
18 |
'Open-Orca/Mistral-7B-OpenOrca',
|
|
|
13 |
def models():
|
14 |
return [
|
15 |
'teknium/OpenHermes-2p5-Mistral-7B',
|
16 |
+
'META-LLAMA/LLAMA-3-8B-CHAT-HF',
|
17 |
+
'MICROSOFT/WIZARDLM-2-8X22',
|
18 |
+
'TOGETHERCOMPUTER/REDPAJAMA-INCITE-7B-CHAT',
|
19 |
'meta-llama/Llama-2-13b-chat-hf',
|
20 |
'meta-llama/Llama-2-70b-chat-hf',
|
21 |
'Open-Orca/Mistral-7B-OpenOrca',
|