File size: 2,919 Bytes
6ed2020
 
2eff58b
6ed2020
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
import gradio as gr
import torch
from transformers import AutoTokenizer,AutoModelForCausalLM,pipeline



models=[
    "nadiamaqbool81/starcoderbase-1b-hf",
    "nadiamaqbool81/starcoderbase-1b-hf_python",
    "nadiamaqbool81/codet5-large-hf",
    "nadiamaqbool81/codet5-large-hf-python",
    "nadiamaqbool81/llama-2-7b-int4-java-code-1.178k",
    "nadiamaqbool81/llama-2-7b-int4-python-code-510"
]
names=[
    "nadiamaqbool81/starcoderbase-java",
    "nadiamaqbool81/starcoderbase-python",
    "nadiamaqbool81/codet5-java",
    "nadiamaqbool81/codet5-python",
    "nadiamaqbool81/llama-2-java",
    "nadiamaqbool81/llama-2-python"
]
model_box=[
    gr.load(f"models/{models[0]}"),
    gr.load(f"models/{models[1]}"),
    gr.load(f"models/{models[2]}"),
    gr.load(f"models/{models[3]}"),
    gr.load(f"models/{models[4]}"),
    gr.load(f"models/{models[5]}"),
]
current_model=model_box[0]
pythonFlag = "false"
javaFlag = "false"

def the_process(input_text, model_choice):
    global pythonFlag
    global javaFlag
    global output
    if(model_choice == 5):
      if(pythonFlag == "false"):
        tokenizer = AutoTokenizer.from_pretrained("nadiamaqbool81/llama-2-7b-int4-python-code-510")
        model = AutoModelForCausalLM.from_pretrained("nadiamaqbool81/llama-2-7b-int4-python-code-510", load_in_4bit=True, torch_dtype=torch.float16, device_map= {"": 0} )
        output = run_predict(input_text, model, tokenizer)
        pythonFlag = "true"
      elif(pythonFlag == "true"):
        output = run_predict(input_text, model, tokenizer)
    elif(model_choice == 4):
      if(javaFlag == "false"):
       tokenizerJava = AutoTokenizer.from_pretrained("nadiamaqbool81/llama-2-7b-int4-java-code-1.178k")
       modelJava = AutoModelForCausalLM.from_pretrained("nadiamaqbool81/llama-2-7b-int4-java-code-1.178k", load_in_4bit=True, torch_dtype=torch.float16, device_map= {"": 0})
       output = run_predict(input_text, modelJava, tokenizerJava)
       javaFlag = "true"
      elif(javaFlag == "true"):
        output = run_predict(input_text, modelJava, tokenizerJava)
    else:
      a_variable = model_box[model_choice]
      output = a_variable(input_text)
    return(output)


def run_predict(text, model, tokenizer):
    prompt = text
    pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=300)
    result = pipe(f"<s>[INST] {prompt} [/INST]")
    arr = result[0]['generated_text'].split('[/INST]')
    return arr[1]


gr.HTML("""<h1 style="font-weight:600;font-size:50;margin-top:4px;margin-bottom:4px;text-align:center;">Text to Code Generation</h1></div>""")
model_choice = gr.Dropdown(label="Select Model", choices=[m for m in names], type="index", interactive=True)
input_text = gr.Textbox(label="Input Prompt")
output_window = gr.Code(label="Generated Code")

interface = gr.Interface(fn=the_process, inputs=[input_text, model_choice], outputs="text")
interface.launch()