wasmdashai commited on
Commit
bc506be
·
verified ·
1 Parent(s): 932e4e8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +82 -0
app.py CHANGED
@@ -40,6 +40,86 @@ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
40
  # discriminator=False,
41
  # duration=False
42
  # )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
  Lst=['input_ids',
44
  'attention_mask',
45
  'waveform',
@@ -690,6 +770,8 @@ with gr.Blocks() as interface:
690
  output_ini = gr.Textbox(label="token")
691
  label=gr.Label("hhh")
692
  btn_init.click(loadd_d,[output_i],[label])
 
 
693
  with gr.Accordion("init_Starting ", open=False):
694
  btn_init = gr.Button("init start")
695
  output_init = gr.Textbox(label="init")
 
40
  # discriminator=False,
41
  # duration=False
42
  # )
43
+ class model_onxx:
44
+ def __init__(self):
45
+ self.model=None
46
+ self.n_onxx=""
47
+ pass
48
+ def function_change(self,n_model,token,n_onxx,choice):
49
+ if choice=="decoder":
50
+
51
+ V=self.convert_model_decoder_onxx(n_model,token,n_onxx)
52
+ elif choice=="all only decoder":
53
+ V=self.convert_model_decoder_onxx(n_model,token,n_onxx)
54
+ else:
55
+ V=self.convert_to_onnx_all(n_model,token,n_onxx)
56
+ return V
57
+
58
+ def install_model(self,n_model,token,n_onxx):
59
+ self.n_onxx=n_onxx
60
+ self.model= VitsModel.from_pretrained(n_model,token=token)
61
+ return self.model
62
+ def convert_model_decoder_onxx(self,n_model,token,namemodelonxx):
63
+ self.model= VitsModel.from_pretrained(n_model,token=token)
64
+ x=f"{namemodelonxx}.onnx"
65
+ return x
66
+ def convert_to_onnx_only_decoder(self,n_model,token,namemodelonxx):
67
+ model=VitsModel.from_pretrained(n_model,token=token)
68
+ x=f"{namemodelonxx}.onnx"
69
+ vocab_size = model.text_encoder.embed_tokens.weight.size(0)
70
+ example_input = torch.randint(0, vocab_size, (1, 100), dtype=torch.long)
71
+ torch.onnx.export(
72
+ model, # The model to be exported
73
+ example_input, # Example input for the model
74
+ x, # The filename for the exported ONNX model
75
+ opset_version=11, # Use an appropriate ONNX opset version
76
+ input_names=['input'], # Name of the input layer
77
+ output_names=['output'], # Name of the output layer
78
+ dynamic_axes={
79
+ 'input': {0: 'batch_size', 1: 'sequence_length'}, # Dynamic axes for variable-length inputs
80
+ 'output': {0: 'batch_size'}
81
+ }
82
+ )
83
+ return x
84
+ def convert_to_onnx_all(self,n_model,token ,namemodelonxx):
85
+
86
+ model=VitsModel.from_pretrained(n_model,token=token)
87
+ x=f"{namemodelonxx}.onnx"
88
+
89
+ vocab_size = model.text_encoder.embed_tokens.weight.size(0)
90
+ example_input = torch.randint(0, vocab_size, (1, 100), dtype=torch.long)
91
+ torch.onnx.export(
92
+ model, # The model to be exported
93
+ example_input, # Example input for the model
94
+ x, # The filename for the exported ONNX model
95
+ opset_version=11, # Use an appropriate ONNX opset version
96
+ input_names=['input'], # Name of the input layer
97
+ output_names=['output'], # Name of the output layer
98
+ dynamic_axes={
99
+ 'input': {0: 'batch_size', 1: 'sequence_length'}, # Dynamic axes for variable-length inputs
100
+ 'output': {0: 'batch_size'}
101
+ }
102
+ )
103
+ return x
104
+ def starrt(self):
105
+ #with gr.Blocks() as demo:
106
+ with gr.Row():
107
+ with gr.Column():
108
+ text_n_model=gr.Textbox(label="name model")
109
+ text_n_token=gr.Textbox(label="token")
110
+ text_n_onxx=gr.Textbox(label="name model onxx")
111
+ choice = gr.Dropdown(choices=["decoder", "all anoly decoder", "All"], label="My Dropdown")
112
+
113
+ with gr.Column():
114
+
115
+ btn=gr.Button("convert")
116
+ label=gr.Label("return name model onxx")
117
+ btn.click(self.function_change,[text_n_model,text_n_token,text_n_onxx,choice],[label])
118
+ #choice.change(fn=function_change, inputs=choice, outputs=label)
119
+ #return demo
120
+ c=model_onxx()
121
+ #cc=c.starrt()
122
+ ###############################################################
123
  Lst=['input_ids',
124
  'attention_mask',
125
  'waveform',
 
770
  output_ini = gr.Textbox(label="token")
771
  label=gr.Label("hhh")
772
  btn_init.click(loadd_d,[output_i],[label])
773
+ with gr.Accordion("read model ", open=False):
774
+ c.starrt()
775
  with gr.Accordion("init_Starting ", open=False):
776
  btn_init = gr.Button("init start")
777
  output_init = gr.Textbox(label="init")