zmbfeng commited on
Commit
78683da
·
verified ·
1 Parent(s): 07cc877

add params to original

Browse files
Files changed (1) hide show
  1. app.py +42 -3
app.py CHANGED
@@ -11,8 +11,22 @@ tokenizer = GPT2Tokenizer.from_pretrained('microsoft/DialoGPT-medium')
11
  original_model = GPT2LMHeadModel.from_pretrained('microsoft/DialoGPT-medium')
12
  fine_tuned_model = GPT2LMHeadModel.from_pretrained('zmbfeng/FineTune-1')
13
 
14
- def create_response_original(input_str, num_beams, input_num):
15
- print ("input_num="+str(num))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  #output_raw= generator(input_str)
17
  """print (output_raw)"""
18
 
@@ -37,7 +51,32 @@ interface1 = gr.Interface(fn=create_response_original, inputs=[
37
  gr.Textbox(label="input text here", lines=3),
38
  gr.Number(label="num_beams (integer) explores the specified number of possible outputs and selects the most " +
39
  "likely ones (specified in num_beams)", value=7),
40
- gr.inputs.Number(label="Enter a whole number")], outputs="text", title="Original")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
  interface2 = gr.Interface(fn=create_response_fine_tuned, inputs="text", outputs="text", title="Fine Tuned")
42
  demo = gr.TabbedInterface([interface1, interface2], ["Original", "Fine Tuned"])
43
  # with gr.Blocks() as demo:
 
11
  original_model = GPT2LMHeadModel.from_pretrained('microsoft/DialoGPT-medium')
12
  fine_tuned_model = GPT2LMHeadModel.from_pretrained('zmbfeng/FineTune-1')
13
 
14
+ def create_response_original(input_str,
15
+ num_beams,
16
+ num_return_sequences,
17
+ temperature,
18
+ repetition_penalty,
19
+ top_p,
20
+ top_k,
21
+ do_sample):
22
+ print("num_beams=" + num_beams)
23
+ print("num_return_sequences" + num_return_sequences)
24
+ print("top_p" + top_p)
25
+ print("top_k" + top_k)
26
+ print("repetition_penalty" + repetition_penalty)
27
+ print("temperature" + temperature)
28
+ print("do_sample" + do_sample)
29
+
30
  #output_raw= generator(input_str)
31
  """print (output_raw)"""
32
 
 
51
  gr.Textbox(label="input text here", lines=3),
52
  gr.Number(label="num_beams (integer) explores the specified number of possible outputs and selects the most " +
53
  "likely ones (specified in num_beams)", value=7),
54
+ gr.Number(label="num_return_sequences (integer) the number of outputs selected from num_beams possible output",
55
+ value=5),
56
+ gr.Number(
57
+ label="temperature (decimal) controls the creativity or randomness of the output. A higher temperature" +
58
+ " (e.g., 0.9) results in more diverse and creative output, while a lower temperature (e.g., 0.2)" +
59
+ " makes the output more deterministic and focused",
60
+ value=0.2),
61
+ gr.Number(label="repetition_penalty (decimal) penalizes words that have already appeared in the output, " +
62
+ "making them less likely to be generated again. A higher repetition_penalty (e.g., 1.5) results" +
63
+ "in more varied and non-repetitive output.",
64
+ value=1.5),
65
+ gr.Number(label="top_p (decimal) the model will only consider the words that have a high enough probability" +
66
+ " to reach a certain threshold",
67
+ value=0.9),
68
+ gr.Number(label="top_k (integer) The number of highest probability vocabulary word will be considered" +
69
+ "This means that only the tokens with the highest probabilities are considered for sampling" +
70
+ "This reduces the diversity of the generated sequences, "+
71
+ "but also makes them more likely to be coherent and fluent.",
72
+ value=50),
73
+ gr.Checkbox(label="do_sample. If is set to False, num_return_sequences must be 1 because the generate function will use greedy decoding, " +
74
+ "which means that it will select the word with the highest probability at each step. " +
75
+ "This results in a deterministic and fluent output, but it might also lack diversity and creativity" +
76
+ "If is set to True, the generate function will use stochastic sampling, which means that it will randomly" +
77
+ " select a word from the probability distribution at each step. This results in a more diverse and creative" +
78
+ " output, but it might also introduce errors and inconsistencies ", value=True)
79
+ )], outputs="text", title="Original")
80
  interface2 = gr.Interface(fn=create_response_fine_tuned, inputs="text", outputs="text", title="Fine Tuned")
81
  demo = gr.TabbedInterface([interface1, interface2], ["Original", "Fine Tuned"])
82
  # with gr.Blocks() as demo: