wasmdashai commited on
Commit
0901f36
·
verified ·
1 Parent(s): 78d365d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +206 -69
app.py CHANGED
@@ -34,97 +34,234 @@ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
34
  # discriminator=False,
35
  # duration=False
36
  # )
37
- class model_onxx:
38
- def __init__(self):
39
- self.model=None
40
- self.n_onxx=""
41
- self.storage_dir = "uploads"
42
- pass
43
 
44
 
45
 
46
 
47
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
  def download_file(self,file_path):
49
  ff= gr.File(value=file_path, visible=True)
50
  file_url = ff.value['url']
51
  return file_url
52
- def function_change(self,n_model,token,n_onxx,choice):
53
- if choice=="decoder":
54
 
55
- V=self.convert_to_onnx_only_decoder(n_model,token,n_onxx)
56
- elif choice=="all only decoder":
57
- V=self.convert_to_onnx_only_decoder(n_model,token,n_onxx)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
  else:
59
- V=self.convert_to_onnx_only_decoder(n_model,token,n_onxx)
60
- return V
61
-
62
- def install_model(self,n_model,token,n_onxx):
63
- self.n_onxx=n_onxx
64
- self.model= VitsModel.from_pretrained(n_model,token=token)
65
- return self.model
66
- def convert_model_decoder_onxx(self,n_model,token,namemodelonxx):
67
- self.model= VitsModel.from_pretrained(n_model,token=token)
68
- x=f"/tmp/{namemodelonxx}.onnx"
69
- return x
70
- def convert_to_onnx_only_decoder(self,n_model,token,namemodelonxx):
71
- model=VitsModel.from_pretrained(n_model,token=token)
72
- x=f"/tmp/{namemodelonxx}.onnx"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73
 
74
- vocab_size = model.text_encoder.embed_tokens.weight.size(0)
75
- example_input = torch.randint(0, vocab_size, (1, 100), dtype=torch.long)
76
- torch.onnx.export(
77
- model, # The model to be exported
78
- example_input, # Example input for the model
79
- x,# The filename for the exported ONNX model
80
- opset_version=11, # Use an appropriate ONNX opset version
81
- input_names=['input'], # Name of the input layer
82
- output_names=['output'], # Name of the output layer
83
- dynamic_axes={
84
- 'input': {0: 'batch_size', 1: 'sequence_length'}, # Dynamic axes for variable-length inputs
85
- 'output': {0: 'batch_size'}
86
- }
87
- )
88
- return x
89
-
90
- def convert_to_onnx_all(self,n_model,token ,namemodelonxx):
91
-
92
- model=VitsModel.from_pretrained(n_model,token=token)
93
- x=f"dowload_file/{namemodelonxx}.onnx"
94
-
95
- vocab_size = model.text_encoder.embed_tokens.weight.size(0)
96
- example_input = torch.randint(0, vocab_size, (1, 100), dtype=torch.long)
97
- torch.onnx.export(
98
- model, # The model to be exported
99
- example_input, # Example input for the model
100
- x, # The filename for the exported ONNX model
101
- opset_version=11, # Use an appropriate ONNX opset version
102
- input_names=['input'], # Name of the input layer
103
- output_names=['output'], # Name of the output layer
104
- dynamic_axes={
105
- 'input': {0: 'batch_size', 1: 'sequence_length'}, # Dynamic axes for variable-length inputs
106
- 'output': {0: 'batch_size'}
107
- }
108
- )
109
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
110
  def starrt(self):
111
- #with gr.Blocks() as demo:
112
  with gr.Row():
113
  with gr.Column():
114
  text_n_model=gr.Textbox(label="name model")
115
  text_n_token=gr.Textbox(label="token")
116
  text_n_onxx=gr.Textbox(label="name model onxx")
117
- choice = gr.Dropdown(choices=["decoder", "all anoly decoder", "All"], label="My Dropdown")
118
-
119
  with gr.Column():
120
-
121
  btn=gr.Button("convert")
122
  label=gr.Label("return name model onxx")
123
- btn.click(self.function_change,[text_n_model,text_n_token,text_n_onxx,choice],[gr.File(label="Download File")])
124
  #choice.change(fn=function_change, inputs=choice, outputs=label)
125
- #return demo
126
- c=model_onxx()
127
- #cc=c.starrt()
128
  ###############################################################
129
  Lst=['input_ids',
130
  'attention_mask',
 
34
  # discriminator=False,
35
  # duration=False
36
  # )
37
+ # class model_onxx:
38
+ # def __init__(self):
39
+ # self.model=None
40
+ # self.n_onxx=""
41
+ # self.storage_dir = "uploads"
42
+ # pass
43
 
44
 
45
 
46
 
47
 
48
+ # def download_file(self,file_path):
49
+ # ff= gr.File(value=file_path, visible=True)
50
+ # file_url = ff.value['url']
51
+ # return file_url
52
+ # def function_change(self,n_model,token,n_onxx,choice):
53
+ # if choice=="decoder":
54
+
55
+ # V=self.convert_to_onnx_only_decoder(n_model,token,n_onxx)
56
+ # elif choice=="all only decoder":
57
+ # V=self.convert_to_onnx_only_decoder(n_model,token,n_onxx)
58
+ # else:
59
+ # V=self.convert_to_onnx_only_decoder(n_model,token,n_onxx)
60
+ # return V
61
+
62
+ # def install_model(self,n_model,token,n_onxx):
63
+ # self.n_onxx=n_onxx
64
+ # self.model= VitsModel.from_pretrained(n_model,token=token)
65
+ # return self.model
66
+ # def convert_model_decoder_onxx(self,n_model,token,namemodelonxx):
67
+ # self.model= VitsModel.from_pretrained(n_model,token=token)
68
+ # x=f"/tmp/{namemodelonxx}.onnx"
69
+ # return x
70
+ # def convert_to_onnx_only_decoder(self,n_model,token,namemodelonxx):
71
+ # model=VitsModel.from_pretrained(n_model,token=token)
72
+ # x=f"/tmp/{namemodelonxx}.onnx"
73
+
74
+ # vocab_size = model.text_encoder.embed_tokens.weight.size(0)
75
+ # example_input = torch.randint(0, vocab_size, (1, 100), dtype=torch.long)
76
+ # torch.onnx.export(
77
+ # model, # The model to be exported
78
+ # example_input, # Example input for the model
79
+ # x,# The filename for the exported ONNX model
80
+ # opset_version=11, # Use an appropriate ONNX opset version
81
+ # input_names=['input'], # Name of the input layer
82
+ # output_names=['output'], # Name of the output layer
83
+ # dynamic_axes={
84
+ # 'input': {0: 'batch_size', 1: 'sequence_length'}, # Dynamic axes for variable-length inputs
85
+ # 'output': {0: 'batch_size'}
86
+ # }
87
+ # )
88
+ # return x
89
+
90
+ # def convert_to_onnx_all(self,n_model,token ,namemodelonxx):
91
+
92
+ # model=VitsModel.from_pretrained(n_model,token=token)
93
+ # x=f"dowload_file/{namemodelonxx}.onnx"
94
+
95
+ # vocab_size = model.text_encoder.embed_tokens.weight.size(0)
96
+ # example_input = torch.randint(0, vocab_size, (1, 100), dtype=torch.long)
97
+ # torch.onnx.export(
98
+ # model, # The model to be exported
99
+ # example_input, # Example input for the model
100
+ # x, # The filename for the exported ONNX model
101
+ # opset_version=11, # Use an appropriate ONNX opset version
102
+ # input_names=['input'], # Name of the input layer
103
+ # output_names=['output'], # Name of the output layer
104
+ # dynamic_axes={
105
+ # 'input': {0: 'batch_size', 1: 'sequence_length'}, # Dynamic axes for variable-length inputs
106
+ # 'output': {0: 'batch_size'}
107
+ # }
108
+ # )
109
+ # return x
110
+ # def starrt(self):
111
+ # #with gr.Blocks() as demo:
112
+ # with gr.Row():
113
+ # with gr.Column():
114
+ # text_n_model=gr.Textbox(label="name model")
115
+ # text_n_token=gr.Textbox(label="token")
116
+ # text_n_onxx=gr.Textbox(label="name model onxx")
117
+ # choice = gr.Dropdown(choices=["decoder", "all anoly decoder", "All"], label="My Dropdown")
118
+
119
+ # with gr.Column():
120
+
121
+ # btn=gr.Button("convert")
122
+ # label=gr.Label("return name model onxx")
123
+ # btn.click(self.function_change,[text_n_model,text_n_token,text_n_onxx,choice],[gr.File(label="Download File")])
124
+ # #choice.change(fn=function_change, inputs=choice, outputs=label)
125
+ # #return demo
126
+ # c=model_onxx()
127
+
128
+ #3333333333333333333333333333
129
+ class OnnxModelConverter:
130
+ def __init__(self):
131
+ self.model = None
132
  def download_file(self,file_path):
133
  ff= gr.File(value=file_path, visible=True)
134
  file_url = ff.value['url']
135
  return file_url
 
 
136
 
137
+ def convert(self, model_name, token, onnx_filename, conversion_type):
138
+ """
139
+ Main function to handle different types of model conversions.
140
+
141
+ Args:
142
+ model_name (str): Name of the model to convert.
143
+ token (str): Access token for loading the model.
144
+ onnx_filename (str): Desired filename for the ONNX output.
145
+ conversion_type (str): Type of conversion ('decoder', 'only_decoder', or 'full_model').
146
+
147
+ Returns:
148
+ str: The path to the generated ONNX file.
149
+ """
150
+ if conversion_type == "decoder":
151
+ return self.convert_decoder(model_name, token, onnx_filename)
152
+ elif conversion_type == "only_decoder":
153
+ return self.convert_only_decoder(model_name, token, onnx_filename)
154
+ elif conversion_type == "full_model":
155
+ return self.convert_full_model(model_name, token, onnx_filename)
156
  else:
157
+ raise ValueError("Invalid conversion type. Choose from 'decoder', 'only_decoder', or 'full_model'.")
158
+
159
+ def convert_decoder(self, model_name, token, onnx_filename):
160
+ """
161
+ Converts only the decoder part of the Vits model to ONNX format.
162
+
163
+ Args:
164
+ model_name (str): Name of the model to convert.
165
+ token (str): Access token for loading the model.
166
+ onnx_filename (str): Desired filename for the ONNX output.
167
+
168
+ Returns:
169
+ str: The path to the generated ONNX file.
170
+ """
171
+ model = VitsModel.from_pretrained(model_name, token=token)
172
+ onnx_file = f"/tmp/{onnx_filename}.onnx"
173
+ vocab_size = model.text_encoder.embed_tokens.weight.size(0)
174
+ example_input = torch.randint(0, vocab_size, (1, 100), dtype=torch.long)
175
+
176
+ torch.onnx.export(
177
+ model,
178
+ example_input,
179
+ onnx_file,
180
+ opset_version=11,
181
+ input_names=['input'],
182
+ output_names=['output'],
183
+ dynamic_axes={'input': {0: 'batch_size', 1: 'sequence_length'}, 'output': {0: 'batch_size'}}
184
+ )
185
 
186
+ return onnx_file
187
+
188
+
189
+ def convert_only_decoder(self, model_name, token, onnx_filename):
190
+ """
191
+ Converts only the decoder part of the Vits model to ONNX format.
192
+
193
+ Args:
194
+ model_name (str): Name of the model to convert.
195
+ token (str): Access token for loading the model.
196
+ onnx_filename (str): Desired filename for the ONNX output.
197
+
198
+ Returns:
199
+ str: The path to the generated ONNX file.
200
+ """
201
+ model = Vits_models_only_decoder.from_pretrained(model_name, token=token)
202
+ onnx_file = f"/tmp/{onnx_filename}.onnx"
203
+
204
+ vocab_size = model.text_encoder.embed_tokens.weight.size(0)
205
+ example_input = torch.randint(0, vocab_size, (1, 100), dtype=torch.long)
206
+
207
+ torch.onnx.export(
208
+ model,
209
+ example_input,
210
+ onnx_file,
211
+ opset_version=11,
212
+ input_names=['input'],
213
+ output_names=['output'],
214
+ dynamic_axes={'input': {0: 'batch_size', 1: 'sequence_length'}, 'output': {0: 'batch_size'}}
215
+ )
216
+
217
+ return onnx_file
218
+
219
+ def convert_full_model(self, model_name, token, onnx_filename):
220
+ """
221
+ Converts the full Vits model (including encoder and decoder) to ONNX format.
222
+
223
+ Args:
224
+ model_name (str): Name of the model to convert.
225
+ token (str): Access token for loading the model.
226
+ onnx_filename (str): Desired filename for the ONNX output.
227
+
228
+ Returns:
229
+ str: The path to the generated ONNX file.
230
+ """
231
+ model = VitsModel.from_pretrained(model_name, token=token)
232
+ onnx_file = f"/tmp/{onnx_filename}.onnx"
233
+
234
+ vocab_size = model.text_encoder.embed_tokens.weight.size(0)
235
+ example_input = torch.randint(0, vocab_size, (1, 100), dtype=torch.long)
236
+
237
+ torch.onnx.export(
238
+ model,
239
+ example_input,
240
+ onnx_file,
241
+ opset_version=11,
242
+ input_names=['input'],
243
+ output_names=['output'],
244
+ dynamic_axes={'input': {0: 'batch_size', 1: 'sequence_length'}, 'output': {0: 'batch_size'}}
245
+ )
246
+
247
+ return onnx_file
248
  def starrt(self):
249
+ with gr.Blocks() as demo:
250
  with gr.Row():
251
  with gr.Column():
252
  text_n_model=gr.Textbox(label="name model")
253
  text_n_token=gr.Textbox(label="token")
254
  text_n_onxx=gr.Textbox(label="name model onxx")
255
+ choice = gr.Dropdown(choices=["decoder", "only_decoder", "full_model"], label="My Dropdown")
256
+
257
  with gr.Column():
258
+
259
  btn=gr.Button("convert")
260
  label=gr.Label("return name model onxx")
261
+ btn.click(self.convert,[text_n_model,text_n_token,text_n_onxx,choice],[outputs=gr.File(label="Download File")])
262
  #choice.change(fn=function_change, inputs=choice, outputs=label)
263
+ return demo
264
+ c=OnnxModelConverter()
 
265
  ###############################################################
266
  Lst=['input_ids',
267
  'attention_mask',