kjozsa commited on
Commit
528174e
·
1 Parent(s): a1c827d

refactor to Scenario class

Browse files
Files changed (2) hide show
  1. chat/__init__.py +35 -23
  2. chat/togetherchat.py +1 -1
chat/__init__.py CHANGED
@@ -9,51 +9,63 @@ from .togetherchat import ask, models
9
  available_models = models()
10
 
11
 
 
 
 
 
 
 
 
12
  class Actor:
13
  actors = {}
14
 
15
- def __init__(self, role, model, system_prompt, pre_prompt):
16
- self.role = role
17
  self.model = model
18
  self.system_prompt = system_prompt
19
  self.pre_prompt = pre_prompt
20
- Actor.actors[role] = self
21
 
22
  def __class_getitem__(cls, item):
23
  return cls.actors[item]
24
 
25
 
26
- def setup(question):
27
- pp1 = pp2 = pp3 = "Answer questions as precisely as you can! If you want to ask anyone, always start your sentence with their role. Never start your sentence with your own name. Share your inner thoughts inside parentheses. SAY ONLY ONE SINGLE SENTENCE! Do not say 'sure, here is my response' or anything such)"
28
- priest = Actor("Priest", available_models[0], "You are the Priest. There are 3 people standing in a circle: the Priest (that's you), the Teacher and the Kid.", pp1)
29
- teacher = Actor("Teacher", available_models[0], "You are the Teacher. There are 3 people standing in a circle: the Priest, the Teacher (that's you) and the Kid.", pp2)
30
- kid = Actor("Kid", available_models[0], "You are the Kid. There are 3 people standing in a circle: the Priest, the Teacher and the Kid (that's you).", pp3)
31
  st.set_page_config(layout="wide")
32
- col1, col2, col3 = st.columns(3)
33
- for actor, col in [(priest, col1), (teacher, col2), (kid, col3)]:
 
34
  with col:
35
- role = actor.role
36
- st.title(role)
37
- actor.model = st.selectbox("model", available_models, key=f"{role}-model")
38
- actor.system_prompt = st.text_area("system-prompt", actor.system_prompt, key=f"{role}-sp")
39
- actor.pre_prompt = st.text_area("pre-prompt", actor.pre_prompt, key=f"{role}-pp")
40
  max_steps = st.slider("max-steps", min_value=1, max_value=10, value=6, key="max-steps")
41
- st.text_input("Priest's task", f"{question}")
42
- return question, max_steps
43
 
44
 
45
  def main():
46
- question, max_steps = setup("Priest, your task is to figure out their names and where they live. Do not ask directly, they must not realize what information you are after!")
47
- questioner = None
 
 
 
 
 
 
48
 
49
- actor = target(sanitize(question))
 
 
50
  for step, _ in enumerate(range(max_steps), start=1):
51
- with st.spinner(f"({step}/{max_steps}) Asking {actor.role}..."):
52
  extended = f"{questioner} asks: {question}" if questioner else question
53
  answer = ask(actor.model, actor.system_prompt, actor.pre_prompt, extended)
54
- st.write(f":blue[{actor.role} says:] {answer}")
55
  question = sanitize(answer)
56
- questioner = actor.role
57
  actor = target(question)
58
 
59
 
 
9
  available_models = models()
10
 
11
 
12
+ class Scenario:
13
+ def __init__(self, title, actors, task):
14
+ self.title = title
15
+ self.actors = actors
16
+ self.task = task
17
+
18
+
19
  class Actor:
20
  actors = {}
21
 
22
+ def __init__(self, name, model, system_prompt, pre_prompt):
23
+ self.name = name
24
  self.model = model
25
  self.system_prompt = system_prompt
26
  self.pre_prompt = pre_prompt
27
+ Actor.actors[name] = self
28
 
29
  def __class_getitem__(cls, item):
30
  return cls.actors[item]
31
 
32
 
33
+ def setup(scenario):
 
 
 
 
34
  st.set_page_config(layout="wide")
35
+ columns = st.columns(len(scenario.actors))
36
+
37
+ for actor, col in zip(scenario.actors, columns):
38
  with col:
39
+ name = actor.name
40
+ st.title(name)
41
+ actor.model = st.selectbox("model", available_models, key=f"{name}-model")
42
+ actor.system_prompt = st.text_area("system-prompt", actor.system_prompt, key=f"{name}-sp")
43
+ actor.pre_prompt = st.text_area("pre-prompt", actor.pre_prompt, key=f"{name}-pp")
44
  max_steps = st.slider("max-steps", min_value=1, max_value=10, value=6, key="max-steps")
45
+ st.text_input(f"{scenario.actors[0].name}'s task", f"{scenario.task}")
46
+ return max_steps
47
 
48
 
49
  def main():
50
+ pre_prompt = "Answer questions as precisely as you can! If you want to ask anyone, always start your sentence with their role. Never start your sentence with your own name. Share your inner thoughts inside parentheses. SAY ONLY ONE SINGLE SENTENCE! Do not say 'sure, here is my response' or anything such)"
51
+ scenario = Scenario("Priest-Teacher-Kid", [
52
+ Actor("Priest", available_models[0], "You are the Priest. There are 3 people standing in a circle: the Priest (that's you), the Teacher and the Kid.", pre_prompt),
53
+ Actor("Teacher", available_models[0], "You are the Teacher. There are 3 people standing in a circle: the Priest, the Teacher (that's you) and the Kid.", pre_prompt),
54
+ Actor("Kid", available_models[0], "You are the Kid. There are 3 people standing in a circle: the Priest, the Teacher and the Kid (that's you).", pre_prompt)
55
+ ], "Priest, your task is to figure out their names and where they live. Do not ask directly, they must not realize what information you are after!")
56
+
57
+ max_steps = setup(scenario)
58
 
59
+ questioner = None
60
+ question = scenario.task
61
+ actor = target(question)
62
  for step, _ in enumerate(range(max_steps), start=1):
63
+ with st.spinner(f"({step}/{max_steps}) Asking {actor.name}..."):
64
  extended = f"{questioner} asks: {question}" if questioner else question
65
  answer = ask(actor.model, actor.system_prompt, actor.pre_prompt, extended)
66
+ st.write(f":blue[{actor.name} says:] {answer}")
67
  question = sanitize(answer)
68
+ questioner = actor.name
69
  actor = target(question)
70
 
71
 
chat/togetherchat.py CHANGED
@@ -12,10 +12,10 @@ client = OpenAI(
12
 
13
  def models():
14
  return [
 
15
  'meta-llama/Llama-2-13b-chat-hf',
16
  'meta-llama/Llama-2-70b-chat-hf',
17
  'Open-Orca/Mistral-7B-OpenOrca',
18
- 'teknium/OpenHermes-2p5-Mistral-7B',
19
  'zero-one-ai/Yi-34B-Chat',
20
  ]
21
 
 
12
 
13
  def models():
14
  return [
15
+ 'teknium/OpenHermes-2p5-Mistral-7B',
16
  'meta-llama/Llama-2-13b-chat-hf',
17
  'meta-llama/Llama-2-70b-chat-hf',
18
  'Open-Orca/Mistral-7B-OpenOrca',
 
19
  'zero-one-ai/Yi-34B-Chat',
20
  ]
21