kjozsa commited on
Commit
e3870b9
1 Parent(s): 332cf39

introduce temperature

Browse files
chat/__init__.py CHANGED
@@ -5,7 +5,6 @@ from loguru import logger
5
 
6
  # from .ollamachat import ask, models
7
  from .togetherchat import ask, models
8
-
9
  # from .transformerschat import ask, models
10
 
11
  available_models = models()
@@ -44,13 +43,14 @@ def setup(scenario):
44
  col1, col2 = st.columns([1, 4])
45
  with col1:
46
  model = st.selectbox("model", available_models)
 
47
  max_steps = st.slider("max-steps", min_value=1, max_value=10, value=6, key="max-steps")
48
  with col2:
49
  st.text_area("pre-prompt", scenario.pre_prompt)
50
 
51
  st.divider()
52
  st.header("Outcome")
53
- return model, max_steps
54
 
55
 
56
  def main():
@@ -75,18 +75,18 @@ def main():
75
  )]
76
 
77
  scenario = st.selectbox("scenario", scenarios)
78
- model, max_steps = setup(scenario)
79
- main_loop(max_steps, model, scenario)
80
 
81
 
82
- def main_loop(max_steps, model, scenario):
83
  questioner = None
84
  question = scenario.task
85
  actor = target(scenario, question)
86
  for step, _ in enumerate(range(max_steps), start=1):
87
  with st.spinner(f"({step}/{max_steps}) Asking {actor.name}..."):
88
  extended = f"{questioner} asks: {question}" if questioner else question
89
- answer = ask(model, actor.system_prompt, scenario.pre_prompt, extended)
90
  st.write(f":blue[{actor.name} says:] {answer}")
91
  question = sanitize(answer)
92
  questioner = actor.name
 
5
 
6
  # from .ollamachat import ask, models
7
  from .togetherchat import ask, models
 
8
  # from .transformerschat import ask, models
9
 
10
  available_models = models()
 
43
  col1, col2 = st.columns([1, 4])
44
  with col1:
45
  model = st.selectbox("model", available_models)
46
+ temperature = st.slider("temperature", min_value=0.0, max_value=1.0, value=0.7, key="temperature")
47
  max_steps = st.slider("max-steps", min_value=1, max_value=10, value=6, key="max-steps")
48
  with col2:
49
  st.text_area("pre-prompt", scenario.pre_prompt)
50
 
51
  st.divider()
52
  st.header("Outcome")
53
+ return model, max_steps, temperature
54
 
55
 
56
  def main():
 
75
  )]
76
 
77
  scenario = st.selectbox("scenario", scenarios)
78
+ model, max_steps, temperature = setup(scenario)
79
+ main_loop(max_steps, model, scenario, temperature)
80
 
81
 
82
+ def main_loop(max_steps, model, scenario, temperature):
83
  questioner = None
84
  question = scenario.task
85
  actor = target(scenario, question)
86
  for step, _ in enumerate(range(max_steps), start=1):
87
  with st.spinner(f"({step}/{max_steps}) Asking {actor.name}..."):
88
  extended = f"{questioner} asks: {question}" if questioner else question
89
+ answer = ask(model, actor.system_prompt, scenario.pre_prompt, extended, temperature=temperature)
90
  st.write(f":blue[{actor.name} says:] {answer}")
91
  question = sanitize(answer)
92
  questioner = actor.name
chat/ollamachat.py CHANGED
@@ -6,13 +6,13 @@ def models():
6
  return sorted([x['model'] for x in ollama.list()['models']], key=lambda x: (not x.startswith("openhermes"), x))
7
 
8
 
9
- def ask(model, system_prompt, pre_prompt, question):
10
  messages = [
11
  {'role': 'system', 'content': f"{system_prompt} {pre_prompt}", },
12
  {'role': 'user', 'content': f"{question}", },
13
  ]
14
  logger.debug(f"<< {model} << {question}")
15
- response = ollama.chat(model=model, messages=messages)
16
  answer = response['message']['content']
17
  logger.debug(f">> {model} >> {answer}")
18
  return answer
 
6
  return sorted([x['model'] for x in ollama.list()['models']], key=lambda x: (not x.startswith("openhermes"), x))
7
 
8
 
9
+ def ask(model, system_prompt, pre_prompt, question, temperature=0.7):
10
  messages = [
11
  {'role': 'system', 'content': f"{system_prompt} {pre_prompt}", },
12
  {'role': 'user', 'content': f"{question}", },
13
  ]
14
  logger.debug(f"<< {model} << {question}")
15
+ response = ollama.chat(model=model, messages=messages, options={'temperature': temperature})
16
  answer = response['message']['content']
17
  logger.debug(f">> {model} >> {answer}")
18
  return answer
chat/togetherchat.py CHANGED
@@ -23,14 +23,14 @@ def models():
23
  ]
24
 
25
 
26
- def ask(model, system_prompt, pre_prompt, question):
27
  messages = [
28
  {'role': 'system', 'content': f"{system_prompt} {pre_prompt}"},
29
  {'role': 'user', 'content': f"{question}"},
30
  ]
31
  logger.debug(f"<< {model} << {question}")
32
 
33
- chat_completion = client.chat.completions.create(messages=messages, model=model)
34
  response = chat_completion.choices[0]
35
  answer = response.message.content
36
  logger.debug(f">> {model} >> {answer}")
 
23
  ]
24
 
25
 
26
+ def ask(model, system_prompt, pre_prompt, question, temperature=0.7):
27
  messages = [
28
  {'role': 'system', 'content': f"{system_prompt} {pre_prompt}"},
29
  {'role': 'user', 'content': f"{question}"},
30
  ]
31
  logger.debug(f"<< {model} << {question}")
32
 
33
+ chat_completion = client.chat.completions.create(messages=messages, model=model, temperature=temperature)
34
  response = chat_completion.choices[0]
35
  answer = response.message.content
36
  logger.debug(f">> {model} >> {answer}")
chat/transformerschat.py CHANGED
@@ -33,7 +33,7 @@ def load():
33
  model, tokenizer = load()
34
 
35
 
36
- def ask(_, system_prompt, pre_prompt, question):
37
  messages = [
38
  {'role': 'system', 'content': f"{system_prompt} {pre_prompt}", },
39
  {'role': 'user', 'content': f"{question}", },
@@ -42,7 +42,7 @@ def ask(_, system_prompt, pre_prompt, question):
42
  inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
43
  # inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=False)
44
 
45
- outputs = model.generate(inputs, max_length=200)
46
  answer = tokenizer.batch_decode(outputs)[0]
47
  logger.debug(f">> transformers >> {answer}")
48
  return answer
 
33
  model, tokenizer = load()
34
 
35
 
36
+ def ask(_, system_prompt, pre_prompt, question, temperature=0.7):
37
  messages = [
38
  {'role': 'system', 'content': f"{system_prompt} {pre_prompt}", },
39
  {'role': 'user', 'content': f"{question}", },
 
42
  inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
43
  # inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=False)
44
 
45
+ outputs = model.generate(inputs, max_length=200, temperature=temperature)
46
  answer = tokenizer.batch_decode(outputs)[0]
47
  logger.debug(f">> transformers >> {answer}")
48
  return answer