zmbfeng commited on
Commit
71852c2
·
1 Parent(s): fa94b9c

two interfaces

Browse files
Files changed (1) hide show
  1. app.py +20 -11
app.py CHANGED
@@ -8,10 +8,10 @@ login(os.environ["HF_TOKEN"])
8
  #https://huggingface.co/facebook/opt-1.3b
9
  #generator = pipeline('text-generation', model="microsoft/DialoGPT-medium")
10
  tokenizer = GPT2Tokenizer.from_pretrained('microsoft/DialoGPT-medium')
11
- #model = GPT2LMHeadModel.from_pretrained('microsoft/DialoGPT-medium')
12
- model = GPT2LMHeadModel.from_pretrained('zmbfeng/FineTune-1')
13
 
14
- def create_response(input_str):
15
  #output_raw= generator(input_str)
16
  """print (output_raw)"""
17
 
@@ -19,14 +19,23 @@ def create_response(input_str):
19
  #output_str = output_str.replace("\n", "")
20
  #output_str = output_str.replace(input_str, "")
21
  #output_str = tokenizer.decode(model.generate(**tokenizer("What are John West's hobbies?"+tokenizer.eos_token,return_tensors="pt",max_length=200))[0])
22
- output_str = tokenizer.decode(model.generate(**tokenizer(input_str+tokenizer.eos_token,return_tensors="pt",max_length=200))[0])
23
  return (output_str)
24
 
25
- demo = gr.Interface(
26
- fn=create_response,
27
- inputs="text",
28
- outputs="text",
29
- title="LLM",
30
- description="This interface generates a respsone (fine tuned)"
31
- )
 
 
 
 
 
 
 
 
 
32
  demo.launch()
 
8
  #https://huggingface.co/facebook/opt-1.3b
9
  #generator = pipeline('text-generation', model="microsoft/DialoGPT-medium")
10
  tokenizer = GPT2Tokenizer.from_pretrained('microsoft/DialoGPT-medium')
11
+ original_model = GPT2LMHeadModel.from_pretrained('microsoft/DialoGPT-medium')
12
+ fine_tuned_model = GPT2LMHeadModel.from_pretrained('zmbfeng/FineTune-1')
13
 
14
+ def create_response_original(input_str):
15
  #output_raw= generator(input_str)
16
  """print (output_raw)"""
17
 
 
19
  #output_str = output_str.replace("\n", "")
20
  #output_str = output_str.replace(input_str, "")
21
  #output_str = tokenizer.decode(model.generate(**tokenizer("What are John West's hobbies?"+tokenizer.eos_token,return_tensors="pt",max_length=200))[0])
22
+ output_str = tokenizer.decode(original_model.generate(**tokenizer(input_str+tokenizer.eos_token,return_tensors="pt",max_length=200))[0])
23
  return (output_str)
24
 
25
+ def create_response_fine_tuned(input_str):
26
+ #output_raw= generator(input_str)
27
+ """print (output_raw)"""
28
+
29
+ #output_str = output_raw[0]['generated_text']
30
+ #output_str = output_str.replace("\n", "")
31
+ #output_str = output_str.replace(input_str, "")
32
+ #output_str = tokenizer.decode(model.generate(**tokenizer("What are John West's hobbies?"+tokenizer.eos_token,return_tensors="pt",max_length=200))[0])
33
+ output_str = tokenizer.decode(fine_tuned_model.generate(**tokenizer(input_str+tokenizer.eos_token,return_tensors="pt",max_length=200))[0])
34
+ return (output_str)
35
+ interface1 = gr.Interface(fn=create_response_original, inputs="text", outputs="text")
36
+ interface2 = gr.Interface(fn=create_response_fine_tuned, inputs="text", outputs="text")
37
+ with gr.Blocks() as demo:
38
+ with gr.Row():
39
+ interface1.show()
40
+ interface2.show()
41
  demo.launch()