Dawoodthouseef commited on
Commit
199e441
·
1 Parent(s): d0ceab0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -6
app.py CHANGED
@@ -1,20 +1,26 @@
1
  import gradio as gr
2
- from transformers import AutoTokenizer
3
 
4
- #gr.Interface.load("models/replit/replit-code-v1-3b").launch()
5
  def code_v1(input_text):
6
  # load tokenizer
7
  tokenizer = AutoTokenizer.from_pretrained('replit/replit-code-v1-3b', trust_remote_code=True)
8
 
 
 
 
9
  # single input encoding + generation
10
  x = tokenizer.encode(input_text, return_tensors='pt')
11
  y = model.generate(x)
 
12
  # decoding, clean_up_tokenization_spaces=False to ensure syntactical correctness
13
  generated_code = tokenizer.decode(y[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)
14
  return generated_code
15
 
16
- prompt = gr.Textbox(label="Prompt")
17
- run_button = gr.Button(label="Run")
18
- output_prompt = gr.Textbox(label="OutPut")
19
- run_button.click(fn=code_v1, inputs=ips, outputs=[output_prompt])
 
 
 
20
 
 
1
  import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
 
 
4
  def code_v1(input_text):
5
  # load tokenizer
6
  tokenizer = AutoTokenizer.from_pretrained('replit/replit-code-v1-3b', trust_remote_code=True)
7
 
8
+ # load model
9
+ model = AutoModelForCausalLM.from_pretrained('replit/replit-code-v1-3b')
10
+
11
  # single input encoding + generation
12
  x = tokenizer.encode(input_text, return_tensors='pt')
13
  y = model.generate(x)
14
+
15
  # decoding, clean_up_tokenization_spaces=False to ensure syntactical correctness
16
  generated_code = tokenizer.decode(y[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)
17
  return generated_code
18
 
19
+ prompt = gr.inputs.Textbox(label="Prompt")
20
+ run_button = gr.outputs.Button(label="Run")
21
+ output_prompt = gr.outputs.Textbox(label="Output")
22
+
23
+ iface = gr.Interface(fn=code_v1, inputs=prompt, outputs=output_prompt)
24
+ iface.launch()
25
+
26