import torch import gradio as gr from transformers import AutoTokenizer, AutoModelForTextToWaveform def install_model(namemodel,tokenn,namemodelonxx): model = AutoModelForTextToWaveform.from_pretrained(namemodel,token=tokenn) namemodelonxxx=convert_to_onnx(model,namemodelonxx) return namemodelonxxx def convert_to_onnx(model,namemodelonxx): vocab_size = model.text_encoder.embed_tokens.weight.size(0) example_input = torch.randint(0, vocab_size, (1, 100), dtype=torch.long) x=f"wasmdashai/{namemodelonxx}.onnx" torch.onnx.export( model, # The model to be exported example_input, # Example input for the model x, # The filename for the exported ONNX model opset_version=11, # Use an appropriate ONNX opset version input_names=['input'], # Name of the input layer output_names=['output'], # Name of the output layer dynamic_axes={ 'input': {0: 'batch_size', 1: 'sequence_length'}, # Dynamic axes for variable-length inputs 'output': {0: 'batch_size'} } ) return x with gr.Blocks() as demo: with gr.Row(): with gr.Column(): text_n_model=gr.Textbox(label="name model") text_n_token=gr.Textbox(label="token") text_n_onxx=gr.Textbox(label="name model onxx") with gr.Column(): btn=gr.Button("convert") label=gr.Label("return name model onxx") btn.click(install_model,[text_n_model,text_n_token,text_n_onxx],[label]) demo.launch()