joshuadunlop commited on
Commit
6844245
·
1 Parent(s): bca9701

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -10
app.py CHANGED
@@ -21,7 +21,7 @@ add_row = st.sidebar.button("Add row")
21
  reset = st.sidebar.button("Reset")
22
  followup_message = st.sidebar.text_area("Edit Message:")
23
  generate_all_edits = st.sidebar.button("Generate All Edits")
24
- model = st.sidebar.selectbox("Model:", ["gpt-4", "gpt-4-1106-preview"])
25
  temperature = st.sidebar.slider("Temperature:", 0.0, 1.0, 0.6, step=0.01)
26
  max_tokens = st.sidebar.number_input("Max Tokens:", min_value=1, max_value=8192, value=1000, step=1)
27
  top_p = st.sidebar.slider("Top P:", 0.0, 1.0, 1.0, step=0.01)
@@ -78,18 +78,14 @@ def generate_response(i, messages):
78
  completion = openai.ChatCompletion.create(
79
  model=model,
80
  messages=messages,
81
- max_tokens=max_tokens,
82
  temperature=temperature,
83
- top_p=top_p,
84
- frequency_penalty=0, # You can adjust this value
85
- presence_penalty=0, # You can adjust this value
86
- stop=None # You can specify stop sequences if needed
87
  )
88
 
89
- # Extracting the response from the completion
90
- response = completion['choices'][0]['message']['content']
91
- prompt_tokens = completion['usage']['prompt_tokens']
92
- response_tokens = completion['usage']['total_tokens'] - prompt_tokens
93
  word_count = len(re.findall(r'\w+', response))
94
 
95
  return (i, response, prompt_tokens, response_tokens, word_count, None)
 
21
  reset = st.sidebar.button("Reset")
22
  followup_message = st.sidebar.text_area("Edit Message:")
23
  generate_all_edits = st.sidebar.button("Generate All Edits")
24
+ model = st.sidebar.selectbox("Model:", ["gpt-4-1106-preview", "gpt-4"])
25
  temperature = st.sidebar.slider("Temperature:", 0.0, 1.0, 0.6, step=0.01)
26
  max_tokens = st.sidebar.number_input("Max Tokens:", min_value=1, max_value=8192, value=1000, step=1)
27
  top_p = st.sidebar.slider("Top P:", 0.0, 1.0, 1.0, step=0.01)
 
78
  completion = openai.ChatCompletion.create(
79
  model=model,
80
  messages=messages,
 
81
  temperature=temperature,
82
+ max_tokens=max_tokens,
83
+ top_p=top_p
 
 
84
  )
85
 
86
+ response = completion.choices[0].message.content
87
+ prompt_tokens = completion.usage['prompt_tokens']
88
+ response_tokens = completion.usage['total_tokens'] - prompt_tokens
 
89
  word_count = len(re.findall(r'\w+', response))
90
 
91
  return (i, response, prompt_tokens, response_tokens, word_count, None)