slush0 commited on
Commit
58ea66a
·
1 Parent(s): 51cf509

Use bloom-7b1 for examples where appropriate.

Browse files
Files changed (1) hide show
  1. app.py +6 -5
app.py CHANGED
@@ -67,7 +67,7 @@ def generate(prompt, model, endseq, max_length,
67
  yield [prompt2, output]
68
  except Exception:
69
  print(traceback.format_exc())
70
- yield [prompt, "Error: " + traceback.format_exc()]
71
  return
72
 
73
  with gr.Blocks() as iface:
@@ -89,7 +89,7 @@ with gr.Blocks() as iface:
89
  value=["\\n", "</s>"], label='Extra end sequences')
90
 
91
  # Maximum length of inference session
92
- max_length = gr.Radio([64, 128, 256, 512, 1024, 2048], value=256, interactive=True, label="Max length")
93
 
94
  with gr.Row():
95
  with gr.Column():
@@ -102,11 +102,12 @@ with gr.Blocks() as iface:
102
  # Only one of top_k and top_p can be set. Requires "do_sample=True" to work.
103
  top_k = gr.Number(value=0, precision=0, interactive=True, label="top_k")
104
  top_p = gr.Number(value=0.9, precision=2, interactive=True, label="top_p")
 
105
 
106
  # Generation temperature
107
  temperature = gr.Number(value=0.75, precision=2, interactive=True, label="Temperature")
108
 
109
- prompt = gr.Textbox(lines=2, label='Prompt', placeholder="Prompt Here...")
110
 
111
  with gr.Row():
112
  button_generate = gr.Button("Generate")
@@ -122,8 +123,8 @@ with gr.Blocks() as iface:
122
 
123
  examples = gr.Examples(inputs=[prompt, model, do_sample, top_k, top_p, temperature, add_stoptoken],
124
  examples=[
125
- ["The SQL command to extract all the users whose name starts with A is: ", "bloom", False, 0, 0, 1, False],
126
- ["The Spanish translation of thank you for your help is: ", "bloom", False, 0, 0, 1, False],
127
  ["A human talks to a powerful AI that follows the human's instructions.</s>\n"
128
  "Human: Hi!</s>\n"
129
  "AI: Hi! How can I help you?</s>\n"
 
67
  yield [prompt2, output]
68
  except Exception:
69
  print(traceback.format_exc())
70
+ yield [prompt, output + "\nError: " + traceback.format_exc()]
71
  return
72
 
73
  with gr.Blocks() as iface:
 
89
  value=["\\n", "</s>"], label='Extra end sequences')
90
 
91
  # Maximum length of inference session
92
+ max_length = gr.Radio([64, 128, 256, 512, 1024, 2048], value=128, interactive=True, label="Max length")
93
 
94
  with gr.Row():
95
  with gr.Column():
 
102
  # Only one of top_k and top_p can be set. Requires "do_sample=True" to work.
103
  top_k = gr.Number(value=0, precision=0, interactive=True, label="top_k")
104
  top_p = gr.Number(value=0.9, precision=2, interactive=True, label="top_p")
105
+ # TODO num_beams
106
 
107
  # Generation temperature
108
  temperature = gr.Number(value=0.75, precision=2, interactive=True, label="Temperature")
109
 
110
+ prompt = gr.Textbox(lines=3, label='Prompt', placeholder="Prompt Here...")
111
 
112
  with gr.Row():
113
  button_generate = gr.Button("Generate")
 
123
 
124
  examples = gr.Examples(inputs=[prompt, model, do_sample, top_k, top_p, temperature, add_stoptoken],
125
  examples=[
126
+ ["The SQL command to extract all the users whose name starts with A is: ", "bloom-7b1", False, 0, 0, 1, False],
127
+ ["The Spanish translation of thank you for your help is: ", "bloom-7b1", False, 0, 0, 1, False],
128
  ["A human talks to a powerful AI that follows the human's instructions.</s>\n"
129
  "Human: Hi!</s>\n"
130
  "AI: Hi! How can I help you?</s>\n"