Spaces:
Sleeping
Sleeping
acecalisto3
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -1,58 +1,49 @@
|
|
1 |
-
|
2 |
-
|
|
|
3 |
|
4 |
-
# Function to merge models at equal weights
|
5 |
-
def merge_models(models):
|
6 |
-
model_weights = [1.0 / len(models) for _ in range(len(models))]
|
7 |
-
merged_model = pipeline("text-generation", model=models, model_weights=model_weights)
|
8 |
-
return merged_model
|
9 |
-
|
10 |
-
# Retrieve code-generative models with config.json
|
11 |
def get_code_generative_models():
|
12 |
-
|
13 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
|
15 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
|
17 |
-
|
18 |
-
model_id = model.modelId
|
19 |
-
model_info = api.model_info(model_id)
|
20 |
|
21 |
-
|
22 |
-
|
23 |
|
24 |
-
|
25 |
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
models = [model for model in code_generative_models[:2]] # Select the first two models for merging
|
35 |
-
merged_model = merge_models(models)
|
36 |
-
|
37 |
-
# Embed the merged model into a chat app for testing
|
38 |
-
chat_app = pipeline("text-generation", model=merged_model)
|
39 |
-
|
40 |
-
# Provide options for the user to download the code/config or deploy the merged model
|
41 |
-
print("Chat App Ready for Testing!")
|
42 |
-
print("Options:")
|
43 |
-
print("1. Download Code/Config")
|
44 |
-
print("2. Deploy as a Unique Space (Requires Write-Permission API Key)")
|
45 |
-
|
46 |
-
user_choice = input("Enter your choice (1 or 2): ")
|
47 |
-
|
48 |
-
if user_choice == "1":
|
49 |
-
# Download code/config
|
50 |
-
merged_model.save_pretrained("merged_model")
|
51 |
-
|
52 |
-
elif user_choice == "2":
|
53 |
-
# Deploy as a Unique Space with write-permission API Key
|
54 |
-
api_key = input("Enter your write-permission API Key: ")
|
55 |
-
# Code to deploy the merged model using the provided API key
|
56 |
|
57 |
if __name__ == "__main__":
|
58 |
main()
|
|
|
1 |
+
import os
|
2 |
+
import gradio as gr
|
3 |
+
from transformers import AutoModel, AutoTokenizer
|
4 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
def get_code_generative_models():
|
6 |
+
models_dir = os.path.join(os.getcwd(), "models")
|
7 |
+
models = []
|
8 |
+
for model_name in os.listdir(models_dir):
|
9 |
+
model_path = os.path.join(models_dir, model_name)
|
10 |
+
if os.path.isdir(model_path):
|
11 |
+
model_info = AutoModel.from_pretrained(model_path)
|
12 |
+
if "config.json" in [f.name for f in model_info.files]:
|
13 |
+
models.append((model_name, model_path))
|
14 |
+
return models
|
15 |
+
|
16 |
+
def model_inference(model_name, model_path, input_data):
|
17 |
+
tokenizer = AutoTokenizer.from_pretrained(model_path)
|
18 |
+
model = AutoModel.from_pretrained(model_path)
|
19 |
+
inputs = tokenizer(input_data, return_tensors="pt")
|
20 |
+
outputs = model(**inputs)
|
21 |
+
result = outputs.last_hidden_state[:, 0, :]
|
22 |
+
return result.tolist()
|
23 |
|
24 |
+
def main():
|
25 |
+
models = get_code_generative_models()
|
26 |
+
with gr.Blocks() as demo:
|
27 |
+
gr.Markdown("### Select Model and Input")
|
28 |
+
with gr.Row():
|
29 |
+
model_name = gr.Dropdown(label="Model", choices=[m[0] for m in models])
|
30 |
+
input_data = gr.Textbox(label="Input")
|
31 |
|
32 |
+
model_path = gr.State(None)
|
|
|
|
|
33 |
|
34 |
+
def update_model_path(model_name):
|
35 |
+
model_path.set(next(filter(lambda m: m[0] == model_name, models))[1])
|
36 |
|
37 |
+
input_data.change(update_model_path, inputs=model_name, outputs=model_path)
|
38 |
|
39 |
+
output = gr.Textbox(label="Output")
|
40 |
+
|
41 |
+
def infer(model_name, input_data):
|
42 |
+
return model_inference(model_name, model_path, input_data)
|
43 |
+
|
44 |
+
output.change(fn=infer, inputs=[model_name, input_data], outputs=output)
|
45 |
+
|
46 |
+
interface = demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
|
48 |
if __name__ == "__main__":
|
49 |
main()
|