zmbfeng commited on
Commit
6882935
·
verified ·
1 Parent(s): 24a2b9a

first try using param and multiple output

Browse files
Files changed (1) hide show
  1. app.py +20 -3
app.py CHANGED
@@ -34,8 +34,25 @@ def create_response_original(input_str,
34
  #output_str = output_str.replace("\n", "")
35
  #output_str = output_str.replace(input_str, "")
36
  #output_str = tokenizer.decode(model.generate(**tokenizer("What are John West's hobbies?"+tokenizer.eos_token,return_tensors="pt",max_length=200))[0])
37
- output_str = tokenizer.decode(original_model.generate(**tokenizer(input_str+tokenizer.eos_token,return_tensors="pt",max_length=200))[0])
38
- return (output_str)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
 
40
  def create_response_fine_tuned(input_str):
41
  #output_raw= generator(input_str)
@@ -85,7 +102,7 @@ interface1 = gr.Interface(fn=create_response_original,
85
  "If is set to True, the generate function will use stochastic sampling, which means that it will randomly" +
86
  " select a word from the probability distribution at each step. This results in a more diverse and creative" +
87
  " output, but it might also introduce errors and inconsistencies ", value=True)
88
- ], outputs="text")
89
  interface2 = gr.Interface(fn=create_response_fine_tuned, inputs="text", outputs="text", title="Fine Tuned")
90
  demo = gr.TabbedInterface([interface1, interface2], ["Original", "Fine Tuned"])
91
  # with gr.Blocks() as demo:
 
34
  #output_str = output_str.replace("\n", "")
35
  #output_str = output_str.replace(input_str, "")
36
  #output_str = tokenizer.decode(model.generate(**tokenizer("What are John West's hobbies?"+tokenizer.eos_token,return_tensors="pt",max_length=200))[0])
37
+ # output_str = tokenizer.decode(original_model.generate(**tokenizer(input_str+tokenizer.eos_token,return_tensors="pt",max_length=200),
38
+ # num_beams=num_beams,
39
+ # num_return_sequences=num_return_sequences)[0])
40
+
41
+ input_ids = tokenizer.encode(input_str + tokenizer.eos_token, return_tensors="pt")
42
+ output_ids = original_model.generate(input_ids,
43
+ do_sample=do_sample,
44
+ max_length=100,
45
+ temperature=temperature,
46
+ top_p=top_p,
47
+ top_k=top_k,
48
+ repetition_penalty=repetition_penalty,
49
+ num_return_sequences=num_return_sequences,
50
+ num_beams=num_beams)
51
+ outputs = []]
52
+ for output_id in output_ids:
53
+ output = tokenizer.decode(output_id, skip_special_tokens=True)
54
+ outputs.append(output)
55
+ return outputs
56
 
57
  def create_response_fine_tuned(input_str):
58
  #output_raw= generator(input_str)
 
102
  "If is set to True, the generate function will use stochastic sampling, which means that it will randomly" +
103
  " select a word from the probability distribution at each step. This results in a more diverse and creative" +
104
  " output, but it might also introduce errors and inconsistencies ", value=True)
105
+ ], outputs="list")
106
  interface2 = gr.Interface(fn=create_response_fine_tuned, inputs="text", outputs="text", title="Fine Tuned")
107
  demo = gr.TabbedInterface([interface1, interface2], ["Original", "Fine Tuned"])
108
  # with gr.Blocks() as demo: