ColeGuion commited on
Commit
9b8838a
·
verified ·
1 Parent(s): 9bd7774

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -3
app.py CHANGED
@@ -3,12 +3,13 @@ import gradio as gr
3
 
4
  client = InferenceClient("grammarly/coedit-large")
5
 
6
- def generate(prompt, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0):
7
  temperature = float(temperature)
8
  if temperature < 1e-2: temperature = 1e-2
9
  top_p = float(top_p)
 
10
 
11
- generate_kwargs = dict(temperature=temperature, max_new_tokens=max_new_tokens, top_p=top_p, top_k=top_k) # seed=42,)
12
 
13
  formatted_prompt = "Fix grammatical errors in this sentence: " + prompt
14
  print("\nPROMPT: \n\t" + formatted_prompt)
@@ -28,7 +29,7 @@ additional_inputs=[
28
  gr.Slider( label="Temperature", value=0.9, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs", ),
29
  gr.Slider( label="Max new tokens", value=256, minimum=0, maximum=1048, step=64, interactive=True, info="The maximum numbers of new tokens", ),
30
  gr.Slider( label="Top-p (nucleus sampling)", value=0.90, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens", ),
31
- gr.Slider( label="Top-k", value=0.90, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more TOP-K", )
32
  ]
33
 
34
  gr.ChatInterface(
 
3
 
4
  client = InferenceClient("grammarly/coedit-large")
5
 
6
+ def generate(prompt, temperature=0.9, max_new_tokens=256, top_p=0.95, top_k=50, repetition_penalty=1.0):
7
  temperature = float(temperature)
8
  if temperature < 1e-2: temperature = 1e-2
9
  top_p = float(top_p)
10
+ top_k = int(top_k) # Ensure top_k is an integer, as it was being treated like a float
11
 
12
+ generate_kwargs = dict(temperature=temperature, max_new_tokens=max_new_tokens, top_p=top_p, top_k=top_k, repetition_penalty=repetition_penalty) # seed=42,)
13
 
14
  formatted_prompt = "Fix grammatical errors in this sentence: " + prompt
15
  print("\nPROMPT: \n\t" + formatted_prompt)
 
29
  gr.Slider( label="Temperature", value=0.9, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs", ),
30
  gr.Slider( label="Max new tokens", value=256, minimum=0, maximum=1048, step=64, interactive=True, info="The maximum numbers of new tokens", ),
31
  gr.Slider( label="Top-p (nucleus sampling)", value=0.90, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens", ),
32
+ gr.Slider(label="Top-k", value=50, minimum=0, maximum=100, step=1, interactive=True, info="Limits the number of top-k tokens considered at each step"),
33
  ]
34
 
35
  gr.ChatInterface(