Spaces:
Runtime error
Runtime error
Commit
·
cd3e20f
1
Parent(s):
2486c65
Update app.py
Browse files
app.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
import gradio as gr
|
2 |
import torch
|
3 |
-
from transformers import AutoTokenizer,AutoModelForCausalLM,pipeline
|
4 |
|
5 |
|
6 |
|
@@ -30,32 +30,22 @@ model_box=[
|
|
30 |
]
|
31 |
current_model=model_box[0]
|
32 |
pythonFlag = "false"
|
33 |
-
javaFlag = "false"
|
34 |
|
35 |
def the_process(input_text, model_choice):
|
36 |
global pythonFlag
|
37 |
-
|
38 |
global output
|
39 |
-
|
40 |
-
|
41 |
-
global tokenizerJava
|
42 |
-
global modelJava
|
43 |
-
if(model_choice == 5):
|
44 |
if(pythonFlag == "false"):
|
45 |
-
|
46 |
-
|
|
|
47 |
output = run_predict(input_text, model, tokenizer)
|
|
|
48 |
pythonFlag = "true"
|
49 |
elif(pythonFlag == "true"):
|
50 |
-
|
51 |
-
elif(model_choice == 4):
|
52 |
-
if(javaFlag == "false"):
|
53 |
-
tokenizerJava = AutoTokenizer.from_pretrained("nadiamaqbool81/llama-2-7b-int4-java-code-1.178k")
|
54 |
-
modelJava = AutoModelForCausalLM.from_pretrained("nadiamaqbool81/llama-2-7b-int4-java-code-1.178k", load_in_4bit=True, torch_dtype=torch.float16, device_map= {"": 0})
|
55 |
-
output = run_predict(input_text, modelJava, tokenizerJava)
|
56 |
-
javaFlag = "true"
|
57 |
-
elif(javaFlag == "true"):
|
58 |
-
output = run_predict(input_text, modelJava, tokenizerJava)
|
59 |
else:
|
60 |
a_variable = model_box[model_choice]
|
61 |
output = a_variable(input_text)
|
@@ -74,7 +64,6 @@ gr.HTML("""<h1 style="font-weight:600;font-size:50;margin-top:4px;margin-bottom:
|
|
74 |
model_choice = gr.Dropdown(label="Select Model", choices=[m for m in names], type="index", interactive=True)
|
75 |
input_text = gr.Textbox(label="Input Prompt")
|
76 |
output_window = gr.Code(label="Generated Code")
|
77 |
-
title = "Text to Code Generation Models Comparison "
|
78 |
|
79 |
-
interface = gr.Interface(fn=the_process, inputs=[input_text, model_choice], outputs="text"
|
80 |
-
interface.launch()
|
|
|
1 |
import gradio as gr
|
2 |
import torch
|
3 |
+
from transformers import T5ForConditionalGeneration, AutoTokenizer, RobertaTokenizer,AutoModelForCausalLM,pipeline,TrainingArguments
|
4 |
|
5 |
|
6 |
|
|
|
30 |
]
|
31 |
current_model=model_box[0]
|
32 |
pythonFlag = "false"
|
|
|
33 |
|
34 |
def the_process(input_text, model_choice):
|
35 |
global pythonFlag
|
36 |
+
print("Inside the_process for python 0", pythonFlag)
|
37 |
global output
|
38 |
+
print("Inside the_process for python 1", model_choice)
|
39 |
+
if(model_choice==5):
|
|
|
|
|
|
|
40 |
if(pythonFlag == "false"):
|
41 |
+
print("Inside llama for python")
|
42 |
+
tokenizer = AutoTokenizer.from_pretrained("nadiamaqbool81/starcoderbase-1b-hf_python")
|
43 |
+
model = AutoModelForCausalLM.from_pretrained("nadiamaqbool81/starcoderbase-1b-hf_python", load_in_4bit=True, torch_dtype=torch.float16, device_map= {"": 0} )
|
44 |
output = run_predict(input_text, model, tokenizer)
|
45 |
+
print("output" , output)
|
46 |
pythonFlag = "true"
|
47 |
elif(pythonFlag == "true"):
|
48 |
+
print("pythonFlag", pythonFlag)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
else:
|
50 |
a_variable = model_box[model_choice]
|
51 |
output = a_variable(input_text)
|
|
|
64 |
model_choice = gr.Dropdown(label="Select Model", choices=[m for m in names], type="index", interactive=True)
|
65 |
input_text = gr.Textbox(label="Input Prompt")
|
66 |
output_window = gr.Code(label="Generated Code")
|
|
|
67 |
|
68 |
+
interface = gr.Interface(fn=the_process, inputs=[input_text, model_choice], outputs="text")
|
69 |
+
interface.launch(debug=True)
|