armandstrickernlp commited on
Commit
a233482
·
1 Parent(s): 86a2758

add example

Browse files
Files changed (1) hide show
  1. app.py +24 -0
app.py CHANGED
@@ -4,6 +4,7 @@ model_name = 'armandnlp/gpt2-TOD_finetuned_SGD'
4
  tokenizer_TOD = AutoTokenizer.from_pretrained(model_name)
5
  model_TOD = AutoModelForCausalLM.from_pretrained(model_name)
6
 
 
7
  def generate_response(prompt):
8
  input_ids = tokenizer_TOD(prompt, return_tensors="pt").input_ids
9
  outputs = model_TOD.generate(input_ids,
@@ -12,10 +13,32 @@ def generate_response(prompt):
12
  eos_token_id=50262)
13
  return tokenizer_TOD.batch_decode(outputs)[0]
14
 
 
 
 
 
 
 
 
 
 
 
15
 
16
 
17
  import gradio as gr
18
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  iface = gr.Interface(fn=generate_response,
20
  inputs="text",
21
  outputs="text",
@@ -23,4 +46,5 @@ iface = gr.Interface(fn=generate_response,
23
  examples=[["<|context|> <|user|> I'm super hungry ! I want to go to the restaurant.<|endofcontext|>"]],
24
  description="Passing in a task-oriented dialogue context generates a belief state, actions to take and a response based on those actions",
25
  )
 
26
  iface.launch()
 
4
  tokenizer_TOD = AutoTokenizer.from_pretrained(model_name)
5
  model_TOD = AutoModelForCausalLM.from_pretrained(model_name)
6
 
7
+
8
  def generate_response(prompt):
9
  input_ids = tokenizer_TOD(prompt, return_tensors="pt").input_ids
10
  outputs = model_TOD.generate(input_ids,
 
13
  eos_token_id=50262)
14
  return tokenizer_TOD.batch_decode(outputs)[0]
15
 
16
+ def chat(message, history):
17
+ history = history or []
18
+
19
+
20
+ output = generate_response(message)
21
+ context, response = output.split('<|endofcontext|>')
22
+
23
+ history.append((context+'<|endofcontext|>', response))
24
+
25
+ return history
26
 
27
 
28
  import gradio as gr
29
 
30
+ chatbot = gr.Chatbot(color_map=("gray", "blue"))
31
+
32
+ iface = gr.Interface(chat,
33
+ ["text", "state"],
34
+ [chatbot, "state"],
35
+ allow_screenshot=False,
36
+ allow_flagging="never",
37
+ )
38
+
39
+
40
+
41
+ """
42
  iface = gr.Interface(fn=generate_response,
43
  inputs="text",
44
  outputs="text",
 
46
  examples=[["<|context|> <|user|> I'm super hungry ! I want to go to the restaurant.<|endofcontext|>"]],
47
  description="Passing in a task-oriented dialogue context generates a belief state, actions to take and a response based on those actions",
48
  )
49
+ """
50
  iface.launch()