Spaces:
Sleeping
Sleeping
File size: 1,912 Bytes
e217e10 a4c32a6 e217e10 2524332 e217e10 2524332 e217e10 1bcc83a 3d3d421 c7fcf9a e217e10 c7fcf9a 1bcc83a c7fcf9a 5bc6fbe c7fcf9a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 |
import torch
import gradio as gr
from transformers import AutoTokenizer, AutoModelForTextToWaveform
def install_model(namemodel,tokenn,namemodelonxx):
model = AutoModelForTextToWaveform.from_pretrained(namemodel,token=tokenn)
namemodelonxxx=convert_to_onnx(model,namemodelonxx)
return namemodelonxxx
def convert_to_onnx(model,namemodelonxx):
vocab_size = model.text_encoder.embed_tokens.weight.size(0)
example_input = torch.randint(0, vocab_size, (1, 100), dtype=torch.long)
x=f"wasmdashai/{namemodelonxx}.onnx"
torch.onnx.export(
model, # The model to be exported
example_input, # Example input for the model
x, # The filename for the exported ONNX model
opset_version=11, # Use an appropriate ONNX opset version
input_names=['input'], # Name of the input layer
output_names=['output'], # Name of the output layer
dynamic_axes={
'input': {0: 'batch_size', 1: 'sequence_length'}, # Dynamic axes for variable-length inputs
'output': {0: 'batch_size'}
}
)
return x
with gr.Blocks() as demo:
with gr.Row():
with gr.Column():
text_n_model=gr.Textbox(label="name model")
text_n_token=gr.Textbox(label="token")
text_n_onxx=gr.Textbox(label="name model onxx")
with gr.Column():
btn=gr.Button("convert")
label=gr.Label("return name model onxx")
btn.click(install_model,[text_n_model,text_n_token,text_n_onxx],[label])
demo.launch()
|