A B Vijay Kumar commited on
Commit
6a22981
·
1 Parent(s): 9a07720

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -10
app.py CHANGED
@@ -1,11 +1,18 @@
1
  import gradio as gr
2
- from transformers import AutoTokenizer, AutoModelForCausalLM
 
3
 
4
  model_name = "vijjuk/codegen-350M-mono-python-18k-alpaca"
5
- base_model = AutoModelForCausalLM.from_pretrained(model_name)
6
- tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
7
- tokenizer.pad_token = tokenizer.eos_token
8
- tokenizer.padding_side = "right"
 
 
 
 
 
 
9
 
10
  def query(instruction, input):
11
  prompt = f"""### Instruction:
@@ -16,12 +23,13 @@ def query(instruction, input):
16
  {input}
17
  ### Response:
18
  """
19
- input_ids = tokenizer(prompt, return_tensors="pt", truncation=True)
20
- output_base = base_model.generate(input_ids=input_ids, max_new_tokens=500, do_sample=True, top_p=0.9,temperature=0.5)
21
- response = "{tokenizer.batch_decode(output_base.detach().cpu().numpy(), skip_special_tokens=True)[0][len(prompt):]}"
22
- return response
 
23
 
24
  inputs = ["text", "text"]
25
  outputs = "text"
26
  iface = gr.Interface(fn=query, inputs=inputs, outputs=outputs)
27
- iface.launch(shape=True)
 
1
  import gradio as gr
2
+ #from transformers import AutoTokenizer, AutoModelForCausalLM
3
+ from transformers import pipeline
4
 
5
  model_name = "vijjuk/codegen-350M-mono-python-18k-alpaca"
6
+ pipe = pipeline("python-fine-tuning", model=model_name)
7
+
8
+
9
+
10
+
11
+
12
+ #base_model = AutoModelForCausalLM.from_pretrained(model_name)
13
+ #tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
14
+ #tokenizer.pad_token = tokenizer.eos_token
15
+ #tokenizer.padding_side = "right"
16
 
17
  def query(instruction, input):
18
  prompt = f"""### Instruction:
 
23
  {input}
24
  ### Response:
25
  """
26
+ #input_ids = tokenizer(prompt, return_tensors="pt", truncation=True)
27
+ #output_base = base_model.generate(input_ids=input_ids, max_new_tokens=500, do_sample=True, top_p=0.9,temperature=0.5)
28
+ #response = "{tokenizer.batch_decode(output_base.detach().cpu().numpy(), skip_special_tokens=True)[0][len(prompt):]}"
29
+ #return response
30
+ return pipe(prompt)[0]["prompt"]
31
 
32
  inputs = ["text", "text"]
33
  outputs = "text"
34
  iface = gr.Interface(fn=query, inputs=inputs, outputs=outputs)
35
+ iface.launch()