ColeGuion commited on
Commit
b6c6daf
·
verified ·
1 Parent(s): 24a8b37

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -3,7 +3,7 @@ import gradio as gr
3
 
4
  client = InferenceClient("grammarly/coedit-large")
5
 
6
- def generate(prompt, temperature=0.9, max_new_tokens=256, top_p=0.95, top_k=50, repetition_penalty=1.0):
7
  print(f" TEMP: {temperature} \n\t TYPE: {type(temperature)}")
8
  print(f" TOP-P: {top_p} \n\t TYPE: {type(top_p)}")
9
  print(f" TOP-K: {top_k} \n\t TYPE: {type(top_k)}")
@@ -36,7 +36,7 @@ additional_inputs=[
36
  gr.Slider( label="Temperature", value=0.9, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs", ),
37
  gr.Slider( label="Max new tokens", value=256, minimum=0, maximum=1048, step=64, interactive=True, info="The maximum numbers of new tokens", ),
38
  gr.Slider( label="Top-p (nucleus sampling)", value=0.90, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens", ),
39
- gr.Slider(label="Top-k", value=50, minimum=0, maximum=100, step=1, interactive=True, info="Limits the number of top-k tokens considered at each step"),
40
  ]
41
 
42
  gr.ChatInterface(
 
3
 
4
  client = InferenceClient("grammarly/coedit-large")
5
 
6
+ def generate(prompt, history, temperature=0.9, max_new_tokens=256, top_p=0.95, top_k=50, repetition_penalty=1.0):
7
  print(f" TEMP: {temperature} \n\t TYPE: {type(temperature)}")
8
  print(f" TOP-P: {top_p} \n\t TYPE: {type(top_p)}")
9
  print(f" TOP-K: {top_k} \n\t TYPE: {type(top_k)}")
 
36
  gr.Slider( label="Temperature", value=0.9, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs", ),
37
  gr.Slider( label="Max new tokens", value=256, minimum=0, maximum=1048, step=64, interactive=True, info="The maximum numbers of new tokens", ),
38
  gr.Slider( label="Top-p (nucleus sampling)", value=0.90, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens", ),
39
+ gr.Slider( label="Top-k", value=50, minimum=0, maximum=100, step=1, interactive=True, info="Limits the number of top-k tokens considered at each step"),
40
  ]
41
 
42
  gr.ChatInterface(