|
<?xml version="1.0"?>
|
|
<net name="Model6" version="11">
|
|
<layers>
|
|
<layer id="0" name="raw_spectrogram" type="Parameter" version="opset1">
|
|
<data shape="?,?,2,80" element_type="f32" />
|
|
<output>
|
|
<port id="0" precision="FP32" names="raw_spectrogram">
|
|
<dim>-1</dim>
|
|
<dim>-1</dim>
|
|
<dim>2</dim>
|
|
<dim>80</dim>
|
|
</port>
|
|
</output>
|
|
</layer>
|
|
<layer id="1" name="aten::transpose/Constant" type="Const" version="opset1">
|
|
<data element_type="i32" shape="4" offset="0" size="16" />
|
|
<output>
|
|
<port id="0" precision="I32">
|
|
<dim>4</dim>
|
|
</port>
|
|
</output>
|
|
</layer>
|
|
<layer id="2" name="aten::transpose/Transpose" type="Transpose" version="opset1">
|
|
<input>
|
|
<port id="0" precision="FP32">
|
|
<dim>-1</dim>
|
|
<dim>-1</dim>
|
|
<dim>2</dim>
|
|
<dim>80</dim>
|
|
</port>
|
|
<port id="1" precision="I32">
|
|
<dim>4</dim>
|
|
</port>
|
|
</input>
|
|
<output>
|
|
<port id="2" precision="FP32" names="19">
|
|
<dim>-1</dim>
|
|
<dim>-1</dim>
|
|
<dim>2</dim>
|
|
<dim>80</dim>
|
|
</port>
|
|
</output>
|
|
</layer>
|
|
<layer id="3" name="Constant_57260" type="Const" version="opset1">
|
|
<data element_type="i32" shape="1" offset="16" size="4" />
|
|
<output>
|
|
<port id="0" precision="I32">
|
|
<dim>1</dim>
|
|
</port>
|
|
</output>
|
|
</layer>
|
|
<layer id="4" name="aten::flatten/Constant_2" type="Const" version="opset1">
|
|
<data element_type="i32" shape="1" offset="20" size="4" />
|
|
<output>
|
|
<port id="0" precision="I32">
|
|
<dim>1</dim>
|
|
</port>
|
|
</output>
|
|
</layer>
|
|
<layer id="5" name="ShapeOf_57144" type="ShapeOf" version="opset3">
|
|
<data output_type="i32" />
|
|
<input>
|
|
<port id="0" precision="FP32">
|
|
<dim>-1</dim>
|
|
<dim>-1</dim>
|
|
<dim>2</dim>
|
|
<dim>80</dim>
|
|
</port>
|
|
</input>
|
|
<output>
|
|
<port id="1" precision="I32">
|
|
<dim>4</dim>
|
|
</port>
|
|
</output>
|
|
</layer>
|
|
<layer id="6" name="Constant_57151" type="Const" version="opset1">
|
|
<data element_type="i64" shape="1" offset="24" size="8" />
|
|
<output>
|
|
<port id="0" precision="I64">
|
|
<dim>1</dim>
|
|
</port>
|
|
</output>
|
|
</layer>
|
|
<layer id="7" name="Constant_57152" type="Const" version="opset1">
|
|
<data element_type="i64" shape="" offset="32" size="8" />
|
|
<output>
|
|
<port id="0" precision="I64" />
|
|
</output>
|
|
</layer>
|
|
<layer id="8" name="Gather_57153" type="Gather" version="opset8">
|
|
<data batch_dims="0" />
|
|
<input>
|
|
<port id="0" precision="I32">
|
|
<dim>4</dim>
|
|
</port>
|
|
<port id="1" precision="I64">
|
|
<dim>1</dim>
|
|
</port>
|
|
<port id="2" precision="I64" />
|
|
</input>
|
|
<output>
|
|
<port id="3" precision="I32">
|
|
<dim>1</dim>
|
|
</port>
|
|
</output>
|
|
</layer>
|
|
<layer id="9" name="aten::flatten/Concat" type="Concat" version="opset1">
|
|
<data axis="0" />
|
|
<input>
|
|
<port id="0" precision="I32">
|
|
<dim>1</dim>
|
|
</port>
|
|
<port id="1" precision="I32">
|
|
<dim>1</dim>
|
|
</port>
|
|
<port id="2" precision="I32">
|
|
<dim>1</dim>
|
|
</port>
|
|
</input>
|
|
<output>
|
|
<port id="3" precision="I32">
|
|
<dim>3</dim>
|
|
</port>
|
|
</output>
|
|
</layer>
|
|
<layer id="10" name="aten::flatten/Reshape" type="Reshape" version="opset1">
|
|
<data special_zero="true" />
|
|
<input>
|
|
<port id="0" precision="FP32">
|
|
<dim>-1</dim>
|
|
<dim>-1</dim>
|
|
<dim>2</dim>
|
|
<dim>80</dim>
|
|
</port>
|
|
<port id="1" precision="I32">
|
|
<dim>3</dim>
|
|
</port>
|
|
</input>
|
|
<output>
|
|
<port id="2" precision="FP32" names="22,hidden_states">
|
|
<dim>-1</dim>
|
|
<dim>-1</dim>
|
|
<dim>80</dim>
|
|
</port>
|
|
</output>
|
|
</layer>
|
|
<layer id="11" name="aten::transpose/Constant_1" type="Const" version="opset1">
|
|
<data element_type="i32" shape="3" offset="40" size="12" />
|
|
<output>
|
|
<port id="0" precision="I32">
|
|
<dim>3</dim>
|
|
</port>
|
|
</output>
|
|
</layer>
|
|
<layer id="12" name="aten::transpose/Transpose_1" type="Transpose" version="opset1">
|
|
<input>
|
|
<port id="0" precision="FP32">
|
|
<dim>-1</dim>
|
|
<dim>-1</dim>
|
|
<dim>80</dim>
|
|
</port>
|
|
<port id="1" precision="I32">
|
|
<dim>3</dim>
|
|
</port>
|
|
</input>
|
|
<output>
|
|
<port id="2" precision="FP32" names="25,input.1">
|
|
<dim>-1</dim>
|
|
<dim>80</dim>
|
|
<dim>-1</dim>
|
|
</port>
|
|
</output>
|
|
</layer>
|
|
<layer id="13" name="Multiply_57081" type="Const" version="opset1">
|
|
<data element_type="f32" shape="256, 80, 5" offset="52" size="409600" />
|
|
<output>
|
|
<port id="0" precision="FP32">
|
|
<dim>256</dim>
|
|
<dim>80</dim>
|
|
<dim>5</dim>
|
|
</port>
|
|
</output>
|
|
</layer>
|
|
<layer id="14" name="Multiply_57044" type="Convolution" version="opset1">
|
|
<data strides="1" dilations="1" pads_begin="2" pads_end="2" auto_pad="explicit" />
|
|
<input>
|
|
<port id="0" precision="FP32">
|
|
<dim>-1</dim>
|
|
<dim>80</dim>
|
|
<dim>-1</dim>
|
|
</port>
|
|
<port id="1" precision="FP32">
|
|
<dim>256</dim>
|
|
<dim>80</dim>
|
|
<dim>5</dim>
|
|
</port>
|
|
</input>
|
|
<output>
|
|
<port id="2" precision="FP32">
|
|
<dim>-1</dim>
|
|
<dim>256</dim>
|
|
<dim>-1</dim>
|
|
</port>
|
|
</output>
|
|
</layer>
|
|
<layer id="15" name="Constant_57049" type="Const" version="opset1">
|
|
<data element_type="f32" shape="1, 256, 1" offset="409652" size="1024" />
|
|
<output>
|
|
<port id="0" precision="FP32">
|
|
<dim>1</dim>
|
|
<dim>256</dim>
|
|
<dim>1</dim>
|
|
</port>
|
|
</output>
|
|
</layer>
|
|
<layer id="16" name="__module.speech_decoder_postnet.layers.0.batch_norm/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
|
|
<data auto_broadcast="numpy" />
|
|
<input>
|
|
<port id="0" precision="FP32">
|
|
<dim>-1</dim>
|
|
<dim>256</dim>
|
|
<dim>-1</dim>
|
|
</port>
|
|
<port id="1" precision="FP32">
|
|
<dim>1</dim>
|
|
<dim>256</dim>
|
|
<dim>1</dim>
|
|
</port>
|
|
</input>
|
|
<output>
|
|
<port id="2" precision="FP32" names="60,input.5">
|
|
<dim>-1</dim>
|
|
<dim>256</dim>
|
|
<dim>-1</dim>
|
|
</port>
|
|
</output>
|
|
</layer>
|
|
<layer id="17" name="__module.speech_decoder_postnet.layers.0.activation/aten::tanh/Tanh" type="Tanh" version="opset1">
|
|
<input>
|
|
<port id="0" precision="FP32">
|
|
<dim>-1</dim>
|
|
<dim>256</dim>
|
|
<dim>-1</dim>
|
|
</port>
|
|
</input>
|
|
<output>
|
|
<port id="1" precision="FP32" names="61,input.7">
|
|
<dim>-1</dim>
|
|
<dim>256</dim>
|
|
<dim>-1</dim>
|
|
</port>
|
|
</output>
|
|
</layer>
|
|
<layer id="18" name="Multiply_57085" type="Const" version="opset1">
|
|
<data element_type="f32" shape="256, 256, 5" offset="410676" size="1310720" />
|
|
<output>
|
|
<port id="0" precision="FP32">
|
|
<dim>256</dim>
|
|
<dim>256</dim>
|
|
<dim>5</dim>
|
|
</port>
|
|
</output>
|
|
</layer>
|
|
<layer id="19" name="Multiply_57051" type="Convolution" version="opset1">
|
|
<data strides="1" dilations="1" pads_begin="2" pads_end="2" auto_pad="explicit" />
|
|
<input>
|
|
<port id="0" precision="FP32">
|
|
<dim>-1</dim>
|
|
<dim>256</dim>
|
|
<dim>-1</dim>
|
|
</port>
|
|
<port id="1" precision="FP32">
|
|
<dim>256</dim>
|
|
<dim>256</dim>
|
|
<dim>5</dim>
|
|
</port>
|
|
</input>
|
|
<output>
|
|
<port id="2" precision="FP32">
|
|
<dim>-1</dim>
|
|
<dim>256</dim>
|
|
<dim>-1</dim>
|
|
</port>
|
|
</output>
|
|
</layer>
|
|
<layer id="20" name="Constant_57056" type="Const" version="opset1">
|
|
<data element_type="f32" shape="1, 256, 1" offset="1721396" size="1024" />
|
|
<output>
|
|
<port id="0" precision="FP32">
|
|
<dim>1</dim>
|
|
<dim>256</dim>
|
|
<dim>1</dim>
|
|
</port>
|
|
</output>
|
|
</layer>
|
|
<layer id="21" name="__module.speech_decoder_postnet.layers.1.batch_norm/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
|
|
<data auto_broadcast="numpy" />
|
|
<input>
|
|
<port id="0" precision="FP32">
|
|
<dim>-1</dim>
|
|
<dim>256</dim>
|
|
<dim>-1</dim>
|
|
</port>
|
|
<port id="1" precision="FP32">
|
|
<dim>1</dim>
|
|
<dim>256</dim>
|
|
<dim>1</dim>
|
|
</port>
|
|
</input>
|
|
<output>
|
|
<port id="2" precision="FP32" names="86,input.13">
|
|
<dim>-1</dim>
|
|
<dim>256</dim>
|
|
<dim>-1</dim>
|
|
</port>
|
|
</output>
|
|
</layer>
|
|
<layer id="22" name="__module.speech_decoder_postnet.layers.1.activation/aten::tanh/Tanh" type="Tanh" version="opset1">
|
|
<input>
|
|
<port id="0" precision="FP32">
|
|
<dim>-1</dim>
|
|
<dim>256</dim>
|
|
<dim>-1</dim>
|
|
</port>
|
|
</input>
|
|
<output>
|
|
<port id="1" precision="FP32" names="87,input.15">
|
|
<dim>-1</dim>
|
|
<dim>256</dim>
|
|
<dim>-1</dim>
|
|
</port>
|
|
</output>
|
|
</layer>
|
|
<layer id="23" name="Multiply_57089" type="Const" version="opset1">
|
|
<data element_type="f32" shape="256, 256, 5" offset="1722420" size="1310720" />
|
|
<output>
|
|
<port id="0" precision="FP32">
|
|
<dim>256</dim>
|
|
<dim>256</dim>
|
|
<dim>5</dim>
|
|
</port>
|
|
</output>
|
|
</layer>
|
|
<layer id="24" name="Multiply_57058" type="Convolution" version="opset1">
|
|
<data strides="1" dilations="1" pads_begin="2" pads_end="2" auto_pad="explicit" />
|
|
<input>
|
|
<port id="0" precision="FP32">
|
|
<dim>-1</dim>
|
|
<dim>256</dim>
|
|
<dim>-1</dim>
|
|
</port>
|
|
<port id="1" precision="FP32">
|
|
<dim>256</dim>
|
|
<dim>256</dim>
|
|
<dim>5</dim>
|
|
</port>
|
|
</input>
|
|
<output>
|
|
<port id="2" precision="FP32">
|
|
<dim>-1</dim>
|
|
<dim>256</dim>
|
|
<dim>-1</dim>
|
|
</port>
|
|
</output>
|
|
</layer>
|
|
<layer id="25" name="Constant_57063" type="Const" version="opset1">
|
|
<data element_type="f32" shape="1, 256, 1" offset="3033140" size="1024" />
|
|
<output>
|
|
<port id="0" precision="FP32">
|
|
<dim>1</dim>
|
|
<dim>256</dim>
|
|
<dim>1</dim>
|
|
</port>
|
|
</output>
|
|
</layer>
|
|
<layer id="26" name="__module.speech_decoder_postnet.layers.2.batch_norm/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
|
|
<data auto_broadcast="numpy" />
|
|
<input>
|
|
<port id="0" precision="FP32">
|
|
<dim>-1</dim>
|
|
<dim>256</dim>
|
|
<dim>-1</dim>
|
|
</port>
|
|
<port id="1" precision="FP32">
|
|
<dim>1</dim>
|
|
<dim>256</dim>
|
|
<dim>1</dim>
|
|
</port>
|
|
</input>
|
|
<output>
|
|
<port id="2" precision="FP32" names="112,input.21">
|
|
<dim>-1</dim>
|
|
<dim>256</dim>
|
|
<dim>-1</dim>
|
|
</port>
|
|
</output>
|
|
</layer>
|
|
<layer id="27" name="__module.speech_decoder_postnet.layers.2.activation/aten::tanh/Tanh" type="Tanh" version="opset1">
|
|
<input>
|
|
<port id="0" precision="FP32">
|
|
<dim>-1</dim>
|
|
<dim>256</dim>
|
|
<dim>-1</dim>
|
|
</port>
|
|
</input>
|
|
<output>
|
|
<port id="1" precision="FP32" names="113,input.23">
|
|
<dim>-1</dim>
|
|
<dim>256</dim>
|
|
<dim>-1</dim>
|
|
</port>
|
|
</output>
|
|
</layer>
|
|
<layer id="28" name="Multiply_57093" type="Const" version="opset1">
|
|
<data element_type="f32" shape="256, 256, 5" offset="3034164" size="1310720" />
|
|
<output>
|
|
<port id="0" precision="FP32">
|
|
<dim>256</dim>
|
|
<dim>256</dim>
|
|
<dim>5</dim>
|
|
</port>
|
|
</output>
|
|
</layer>
|
|
<layer id="29" name="Multiply_57065" type="Convolution" version="opset1">
|
|
<data strides="1" dilations="1" pads_begin="2" pads_end="2" auto_pad="explicit" />
|
|
<input>
|
|
<port id="0" precision="FP32">
|
|
<dim>-1</dim>
|
|
<dim>256</dim>
|
|
<dim>-1</dim>
|
|
</port>
|
|
<port id="1" precision="FP32">
|
|
<dim>256</dim>
|
|
<dim>256</dim>
|
|
<dim>5</dim>
|
|
</port>
|
|
</input>
|
|
<output>
|
|
<port id="2" precision="FP32">
|
|
<dim>-1</dim>
|
|
<dim>256</dim>
|
|
<dim>-1</dim>
|
|
</port>
|
|
</output>
|
|
</layer>
|
|
<layer id="30" name="Constant_57070" type="Const" version="opset1">
|
|
<data element_type="f32" shape="1, 256, 1" offset="4344884" size="1024" />
|
|
<output>
|
|
<port id="0" precision="FP32">
|
|
<dim>1</dim>
|
|
<dim>256</dim>
|
|
<dim>1</dim>
|
|
</port>
|
|
</output>
|
|
</layer>
|
|
<layer id="31" name="__module.speech_decoder_postnet.layers.3.batch_norm/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
|
|
<data auto_broadcast="numpy" />
|
|
<input>
|
|
<port id="0" precision="FP32">
|
|
<dim>-1</dim>
|
|
<dim>256</dim>
|
|
<dim>-1</dim>
|
|
</port>
|
|
<port id="1" precision="FP32">
|
|
<dim>1</dim>
|
|
<dim>256</dim>
|
|
<dim>1</dim>
|
|
</port>
|
|
</input>
|
|
<output>
|
|
<port id="2" precision="FP32" names="138,input.29">
|
|
<dim>-1</dim>
|
|
<dim>256</dim>
|
|
<dim>-1</dim>
|
|
</port>
|
|
</output>
|
|
</layer>
|
|
<layer id="32" name="__module.speech_decoder_postnet.layers.3.activation/aten::tanh/Tanh" type="Tanh" version="opset1">
|
|
<input>
|
|
<port id="0" precision="FP32">
|
|
<dim>-1</dim>
|
|
<dim>256</dim>
|
|
<dim>-1</dim>
|
|
</port>
|
|
</input>
|
|
<output>
|
|
<port id="1" precision="FP32" names="139,input.31">
|
|
<dim>-1</dim>
|
|
<dim>256</dim>
|
|
<dim>-1</dim>
|
|
</port>
|
|
</output>
|
|
</layer>
|
|
<layer id="33" name="Multiply_57097" type="Const" version="opset1">
|
|
<data element_type="f32" shape="80, 256, 5" offset="4345908" size="409600" />
|
|
<output>
|
|
<port id="0" precision="FP32">
|
|
<dim>80</dim>
|
|
<dim>256</dim>
|
|
<dim>5</dim>
|
|
</port>
|
|
</output>
|
|
</layer>
|
|
<layer id="34" name="Multiply_57072" type="Convolution" version="opset1">
|
|
<data strides="1" dilations="1" pads_begin="2" pads_end="2" auto_pad="explicit" />
|
|
<input>
|
|
<port id="0" precision="FP32">
|
|
<dim>-1</dim>
|
|
<dim>256</dim>
|
|
<dim>-1</dim>
|
|
</port>
|
|
<port id="1" precision="FP32">
|
|
<dim>80</dim>
|
|
<dim>256</dim>
|
|
<dim>5</dim>
|
|
</port>
|
|
</input>
|
|
<output>
|
|
<port id="2" precision="FP32">
|
|
<dim>-1</dim>
|
|
<dim>80</dim>
|
|
<dim>-1</dim>
|
|
</port>
|
|
</output>
|
|
</layer>
|
|
<layer id="35" name="Constant_57077" type="Const" version="opset1">
|
|
<data element_type="f32" shape="1, 80, 1" offset="4755508" size="320" />
|
|
<output>
|
|
<port id="0" precision="FP32">
|
|
<dim>1</dim>
|
|
<dim>80</dim>
|
|
<dim>1</dim>
|
|
</port>
|
|
</output>
|
|
</layer>
|
|
<layer id="36" name="__module.speech_decoder_postnet.layers.4.batch_norm/aten::batch_norm/BatchNormInference" type="Add" version="opset1">
|
|
<data auto_broadcast="numpy" />
|
|
<input>
|
|
<port id="0" precision="FP32">
|
|
<dim>-1</dim>
|
|
<dim>80</dim>
|
|
<dim>-1</dim>
|
|
</port>
|
|
<port id="1" precision="FP32">
|
|
<dim>1</dim>
|
|
<dim>80</dim>
|
|
<dim>1</dim>
|
|
</port>
|
|
</input>
|
|
<output>
|
|
<port id="2" precision="FP32" names="163,input">
|
|
<dim>-1</dim>
|
|
<dim>80</dim>
|
|
<dim>-1</dim>
|
|
</port>
|
|
</output>
|
|
</layer>
|
|
<layer id="37" name="aten::transpose/Constant_2" type="Const" version="opset1">
|
|
<data element_type="i32" shape="3" offset="40" size="12" />
|
|
<output>
|
|
<port id="0" precision="I32">
|
|
<dim>3</dim>
|
|
</port>
|
|
</output>
|
|
</layer>
|
|
<layer id="38" name="aten::transpose/Transpose_2" type="Transpose" version="opset1">
|
|
<input>
|
|
<port id="0" precision="FP32">
|
|
<dim>-1</dim>
|
|
<dim>80</dim>
|
|
<dim>-1</dim>
|
|
</port>
|
|
<port id="1" precision="I32">
|
|
<dim>3</dim>
|
|
</port>
|
|
</input>
|
|
<output>
|
|
<port id="2" precision="FP32" names="33">
|
|
<dim>-1</dim>
|
|
<dim>-1</dim>
|
|
<dim>80</dim>
|
|
</port>
|
|
</output>
|
|
</layer>
|
|
<layer id="39" name="aten::add/Add" type="Add" version="opset1">
|
|
<data auto_broadcast="numpy" />
|
|
<input>
|
|
<port id="0" precision="FP32">
|
|
<dim>-1</dim>
|
|
<dim>-1</dim>
|
|
<dim>80</dim>
|
|
</port>
|
|
<port id="1" precision="FP32">
|
|
<dim>-1</dim>
|
|
<dim>-1</dim>
|
|
<dim>80</dim>
|
|
</port>
|
|
</input>
|
|
<output>
|
|
<port id="2" precision="FP32" names="postnet_spectrogram">
|
|
<dim>-1</dim>
|
|
<dim>-1</dim>
|
|
<dim>80</dim>
|
|
</port>
|
|
</output>
|
|
</layer>
|
|
<layer id="40" name="Result_54709" type="Result" version="opset1" output_names="postnet_spectrogram">
|
|
<input>
|
|
<port id="0" precision="FP32">
|
|
<dim>-1</dim>
|
|
<dim>-1</dim>
|
|
<dim>80</dim>
|
|
</port>
|
|
</input>
|
|
</layer>
|
|
</layers>
|
|
<edges>
|
|
<edge from-layer="0" from-port="0" to-layer="2" to-port="0" />
|
|
<edge from-layer="1" from-port="0" to-layer="2" to-port="1" />
|
|
<edge from-layer="2" from-port="2" to-layer="5" to-port="0" />
|
|
<edge from-layer="2" from-port="2" to-layer="10" to-port="0" />
|
|
<edge from-layer="3" from-port="0" to-layer="9" to-port="0" />
|
|
<edge from-layer="4" from-port="0" to-layer="9" to-port="1" />
|
|
<edge from-layer="5" from-port="1" to-layer="8" to-port="0" />
|
|
<edge from-layer="6" from-port="0" to-layer="8" to-port="1" />
|
|
<edge from-layer="7" from-port="0" to-layer="8" to-port="2" />
|
|
<edge from-layer="8" from-port="3" to-layer="9" to-port="2" />
|
|
<edge from-layer="9" from-port="3" to-layer="10" to-port="1" />
|
|
<edge from-layer="10" from-port="2" to-layer="12" to-port="0" />
|
|
<edge from-layer="10" from-port="2" to-layer="39" to-port="0" />
|
|
<edge from-layer="11" from-port="0" to-layer="12" to-port="1" />
|
|
<edge from-layer="12" from-port="2" to-layer="14" to-port="0" />
|
|
<edge from-layer="13" from-port="0" to-layer="14" to-port="1" />
|
|
<edge from-layer="14" from-port="2" to-layer="16" to-port="0" />
|
|
<edge from-layer="15" from-port="0" to-layer="16" to-port="1" />
|
|
<edge from-layer="16" from-port="2" to-layer="17" to-port="0" />
|
|
<edge from-layer="17" from-port="1" to-layer="19" to-port="0" />
|
|
<edge from-layer="18" from-port="0" to-layer="19" to-port="1" />
|
|
<edge from-layer="19" from-port="2" to-layer="21" to-port="0" />
|
|
<edge from-layer="20" from-port="0" to-layer="21" to-port="1" />
|
|
<edge from-layer="21" from-port="2" to-layer="22" to-port="0" />
|
|
<edge from-layer="22" from-port="1" to-layer="24" to-port="0" />
|
|
<edge from-layer="23" from-port="0" to-layer="24" to-port="1" />
|
|
<edge from-layer="24" from-port="2" to-layer="26" to-port="0" />
|
|
<edge from-layer="25" from-port="0" to-layer="26" to-port="1" />
|
|
<edge from-layer="26" from-port="2" to-layer="27" to-port="0" />
|
|
<edge from-layer="27" from-port="1" to-layer="29" to-port="0" />
|
|
<edge from-layer="28" from-port="0" to-layer="29" to-port="1" />
|
|
<edge from-layer="29" from-port="2" to-layer="31" to-port="0" />
|
|
<edge from-layer="30" from-port="0" to-layer="31" to-port="1" />
|
|
<edge from-layer="31" from-port="2" to-layer="32" to-port="0" />
|
|
<edge from-layer="32" from-port="1" to-layer="34" to-port="0" />
|
|
<edge from-layer="33" from-port="0" to-layer="34" to-port="1" />
|
|
<edge from-layer="34" from-port="2" to-layer="36" to-port="0" />
|
|
<edge from-layer="35" from-port="0" to-layer="36" to-port="1" />
|
|
<edge from-layer="36" from-port="2" to-layer="38" to-port="0" />
|
|
<edge from-layer="37" from-port="0" to-layer="38" to-port="1" />
|
|
<edge from-layer="38" from-port="2" to-layer="39" to-port="1" />
|
|
<edge from-layer="39" from-port="2" to-layer="40" to-port="0" />
|
|
</edges>
|
|
<rt_info>
|
|
<Runtime_version value="2025.2.0-19140-c01cd93e24d-releases/2025/2" />
|
|
<conversion_parameters>
|
|
<framework value="pytorch" />
|
|
<is_python_object value="True" />
|
|
</conversion_parameters>
|
|
<optimum>
|
|
<optimum_intel_version value="1.24.0" />
|
|
<optimum_version value="1.26.1" />
|
|
<pytorch_version value="2.7.1" />
|
|
<transformers_version value="4.52.4" />
|
|
</optimum>
|
|
</rt_info>
|
|
</net>
|
|
|