zmbfeng commited on
Commit
e90d0d7
·
1 Parent(s): cf3100d

examples removed

Browse files
Files changed (1) hide show
  1. app.py +17 -31
app.py CHANGED
@@ -24,21 +24,21 @@ login(os.environ["HF_TOKEN"])
24
  dt = datetime.datetime.now()
25
  print(dt)
26
  print("loading models")
27
- tokenizer = GPT2Tokenizer.from_pretrained('microsoft/DialoGPT-medium')
28
- original_model = GPT2LMHeadModel.from_pretrained('microsoft/DialoGPT-medium')
29
- untethered_model = GPT2LMHeadModel.from_pretrained('zmbfeng/untethered_20240225_epochs_500')
30
- question_generation_tokenizer = AutoTokenizer.from_pretrained("mrm8488/t5-base-finetuned-question-generation-ap")
31
- question_generation_model = AutoModelWithLMHead.from_pretrained("mrm8488/t5-base-finetuned-question-generation-ap")
32
- paraphrase_tokenizer = AutoTokenizer.from_pretrained("Vamsi/T5_Paraphrase_Paws")
33
- paraphrase_model = AutoModelForSeq2SeqLM.from_pretrained("Vamsi/T5_Paraphrase_Paws")
34
-
35
- # tokenizer = GPT2Tokenizer.from_pretrained('microsoft/DialoGPT-medium',cache_dir="G:\My Drive\Avatar\language_models_windows")
36
- # original_model = GPT2LMHeadModel.from_pretrained('microsoft/DialoGPT-medium',cache_dir="G:\My Drive\Avatar\language_models_windows")
37
- # untethered_model = GPT2LMHeadModel.from_pretrained('zmbfeng/untethered_20240225_epochs_500',cache_dir="G:\My Drive\Avatar\language_models_windows")
38
- # question_generation_tokenizer = AutoTokenizer.from_pretrained("mrm8488/t5-base-finetuned-question-generation-ap",cache_dir="G:\\My Drive\\Avatar\\language_models_windows")
39
- # question_generation_model = AutoModelWithLMHead.from_pretrained("mrm8488/t5-base-finetuned-question-generation-ap",cache_dir="G:\\My Drive\\Avatar\\language_models_windows")
40
- # paraphrase_tokenizer = AutoTokenizer.from_pretrained("Vamsi/T5_Paraphrase_Paws",cache_dir="G:\\My Drive\\Avatar\\language_models_windows")
41
- # paraphrase_model = AutoModelForSeq2SeqLM.from_pretrained("Vamsi/T5_Paraphrase_Paws",cache_dir="G:\\My Drive\\Avatar\\language_models_windows")
42
 
43
  # tokenizer = GPT2Tokenizer.from_pretrained('microsoft/DialoGPT-medium',cache_dir="C:\\Users\\zmbfeng\\Google Drive\\language_models_windows")
44
  # original_model = GPT2LMHeadModel.from_pretrained('microsoft/DialoGPT-medium',cache_dir="C:\\Users\\zmbfeng\\Google Drive\\Avatar\\language_models_windows")
@@ -81,17 +81,7 @@ def create_response(input_str,
81
 
82
 
83
  common_examples_string="<br/>Sample Inputs:<br/>What is death?<br/>One of the best teachers in all of life turns out to be what?<br/>what is your most meaningful relationship?<br/>What actually gives life meaning?<br/>"
84
- common_examples=[
85
- ["What is death?",default_temperature], # The first example
86
- ["One of the best teachers in all of life turns out to be what?",default_temperature], # The second example
87
- ["what is your most meaningful relationship?",default_temperature], # The third example
88
- ["What actually gives life meaning?",default_temperature]
89
- ]
90
- examples = copy.deepcopy(common_examples)
91
- print(examples)
92
- for example in examples:
93
- example.append("original_model")
94
- print(examples)
95
  interface_original = gr.Interface(fn=create_response,
96
  title="original",
97
  description="original language model, no fine tuning"+common_examples_string,
@@ -112,11 +102,7 @@ interface_original = gr.Interface(fn=create_response,
112
  ],
113
  outputs="html"
114
  )
115
- examples = copy.deepcopy(common_examples)
116
- print(examples)
117
- for example in examples:
118
- example.append("untethered_model")
119
- print(examples)
120
 
121
 
122
  interface_untethered_model = gr.Interface(fn=create_response,
 
24
  dt = datetime.datetime.now()
25
  print(dt)
26
  print("loading models")
27
+ # tokenizer = GPT2Tokenizer.from_pretrained('microsoft/DialoGPT-medium')
28
+ # original_model = GPT2LMHeadModel.from_pretrained('microsoft/DialoGPT-medium')
29
+ # untethered_model = GPT2LMHeadModel.from_pretrained('zmbfeng/untethered_20240225_epochs_500')
30
+ # question_generation_tokenizer = AutoTokenizer.from_pretrained("mrm8488/t5-base-finetuned-question-generation-ap")
31
+ # question_generation_model = AutoModelWithLMHead.from_pretrained("mrm8488/t5-base-finetuned-question-generation-ap")
32
+ # paraphrase_tokenizer = AutoTokenizer.from_pretrained("Vamsi/T5_Paraphrase_Paws")
33
+ # paraphrase_model = AutoModelForSeq2SeqLM.from_pretrained("Vamsi/T5_Paraphrase_Paws")
34
+
35
+ tokenizer = GPT2Tokenizer.from_pretrained('microsoft/DialoGPT-medium',cache_dir="G:\My Drive\Avatar\language_models_windows")
36
+ original_model = GPT2LMHeadModel.from_pretrained('microsoft/DialoGPT-medium',cache_dir="G:\My Drive\Avatar\language_models_windows")
37
+ untethered_model = GPT2LMHeadModel.from_pretrained('zmbfeng/untethered_20240225_epochs_500',cache_dir="G:\My Drive\Avatar\language_models_windows")
38
+ question_generation_tokenizer = AutoTokenizer.from_pretrained("mrm8488/t5-base-finetuned-question-generation-ap",cache_dir="G:\\My Drive\\Avatar\\language_models_windows")
39
+ question_generation_model = AutoModelWithLMHead.from_pretrained("mrm8488/t5-base-finetuned-question-generation-ap",cache_dir="G:\\My Drive\\Avatar\\language_models_windows")
40
+ paraphrase_tokenizer = AutoTokenizer.from_pretrained("Vamsi/T5_Paraphrase_Paws",cache_dir="G:\\My Drive\\Avatar\\language_models_windows")
41
+ paraphrase_model = AutoModelForSeq2SeqLM.from_pretrained("Vamsi/T5_Paraphrase_Paws",cache_dir="G:\\My Drive\\Avatar\\language_models_windows")
42
 
43
  # tokenizer = GPT2Tokenizer.from_pretrained('microsoft/DialoGPT-medium',cache_dir="C:\\Users\\zmbfeng\\Google Drive\\language_models_windows")
44
  # original_model = GPT2LMHeadModel.from_pretrained('microsoft/DialoGPT-medium',cache_dir="C:\\Users\\zmbfeng\\Google Drive\\Avatar\\language_models_windows")
 
81
 
82
 
83
  common_examples_string="<br/>Sample Inputs:<br/>What is death?<br/>One of the best teachers in all of life turns out to be what?<br/>what is your most meaningful relationship?<br/>What actually gives life meaning?<br/>"
84
+
 
 
 
 
 
 
 
 
 
 
85
  interface_original = gr.Interface(fn=create_response,
86
  title="original",
87
  description="original language model, no fine tuning"+common_examples_string,
 
102
  ],
103
  outputs="html"
104
  )
105
+
 
 
 
 
106
 
107
 
108
  interface_untethered_model = gr.Interface(fn=create_response,