text
stringlengths 1
2.05k
|
---|
%15.0;
%17 = nn.relu(%16);
%18 = nn.conv2d(%17, meta[relay.Constant][23], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%19 = nn.conv2d(%9, meta[relay.Constant][24], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%20 = add(%18, %19);
%21 = nn.batch_norm(%20, meta[relay.Constant][25], meta[relay.Constant][26], meta[relay.Constant][27], meta[relay.Constant][28]);
%22 = %21.0;
%23 = nn.relu(%22);
%24 = nn.conv2d(%23, meta[relay.Constant][29], padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]);
%25 = nn.batch_norm(%24, meta[relay.Constant][30], meta[relay.Constant][31], meta[relay.Constant][32], meta[relay.Constant][33]);
%26 = %25.0;
%27 = nn.relu(%26);
%28 = nn.conv2d(%27, meta[relay.Constant][34], padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]);
%29 = nn.batch_norm(%28, meta[relay.Constant][35], meta[relay.Constant][36], meta[relay.Constant][37], meta[relay.Constant][38]);
%30 = %29.0;
%31 = nn.relu(%30);
%32 = nn.conv2d(%31, meta[relay.Constant][39], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%33 = add(%32, %20);
%34 = nn.batch_norm(%33, meta[relay.Constant][40], meta[relay.Constant][41], meta[relay.Constant][42], meta[relay.Constant][43]);
%35 = %34.0;
%36 = nn.relu(%35);
%37 = nn.conv2d(%36, meta[relay.Constant][44], padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]);
%38 = nn.batch_norm(%37, meta[relay.Constant][45], meta[relay.Constant][46], meta[relay.Constant][47], meta[relay.Constant][48]);
%39 = %38.0;
%40 = nn.relu(%39);
%41 = nn.conv2d(%40, meta[relay.Constant][49], padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]);
%42 = nn.batch_norm(%41, meta[relay.Constant][50], meta[relay.Constant][51], meta[relay.Constant][52], meta[relay.Constant][53]);
%43 = %42.0;
%44 = nn.relu(%43 |
);
%45 = nn.conv2d(%44, meta[relay.Constant][54], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%46 = add(%45, %33);
%47 = nn.batch_norm(%46, meta[relay.Constant][55], meta[relay.Constant][56], meta[relay.Constant][57], meta[relay.Constant][58]);
%48 = %47.0;
%49 = nn.relu(%48);
%50 = nn.conv2d(%49, meta[relay.Constant][59], padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]);
%51 = nn.batch_norm(%50, meta[relay.Constant][60], meta[relay.Constant][61], meta[relay.Constant][62], meta[relay.Constant][63]);
%52 = %51.0;
%53 = nn.relu(%52);
%54 = nn.conv2d(%53, meta[relay.Constant][64], strides=[2, 2], padding=[1, 1, 1, 1], channels=128, kernel_size=[3, 3]);
%55 = nn.batch_norm(%54, meta[relay.Constant][65], meta[relay.Constant][66], meta[relay.Constant][67], meta[relay.Constant][68]);
%56 = %55.0;
%57 = nn.relu(%56);
%58 = nn.conv2d(%57, meta[relay.Constant][69], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%59 = nn.conv2d(%49, meta[relay.Constant][70], strides=[2, 2], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%60 = add(%58, %59);
%61 = nn.batch_norm(%60, meta[relay.Constant][71], meta[relay.Constant][72], meta[relay.Constant][73], meta[relay.Constant][74]);
%62 = %61.0;
%63 = nn.relu(%62);
%64 = nn.conv2d(%63, meta[relay.Constant][75], padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]);
%65 = nn.batch_norm(%64, meta[relay.Constant][76], meta[relay.Constant][77], meta[relay.Constant][78], meta[relay.Constant][79]);
%66 = %65.0;
%67 = nn.relu(%66);
%68 = nn.conv2d(%67, meta[relay.Constant][80], padding=[1, 1, 1, 1], channels=128, kernel_size=[3, 3]);
%69 = nn.batch_norm(%68, meta[relay.Constant][81], meta[relay.Constant][82], meta[relay.Constant][83], meta[relay.Constant][84]);
%70 = %69.0;
%71 = nn.relu(% |
70);
%72 = nn.conv2d(%71, meta[relay.Constant][85], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%73 = add(%72, %60);
%74 = nn.batch_norm(%73, meta[relay.Constant][86], meta[relay.Constant][87], meta[relay.Constant][88], meta[relay.Constant][89]);
%75 = %74.0;
%76 = nn.relu(%75);
%77 = nn.conv2d(%76, meta[relay.Constant][90], padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]);
%78 = nn.batch_norm(%77, meta[relay.Constant][91], meta[relay.Constant][92], meta[relay.Constant][93], meta[relay.Constant][94]);
%79 = %78.0;
%80 = nn.relu(%79);
%81 = nn.conv2d(%80, meta[relay.Constant][95], padding=[1, 1, 1, 1], channels=128, kernel_size=[3, 3]);
%82 = nn.batch_norm(%81, meta[relay.Constant][96], meta[relay.Constant][97], meta[relay.Constant][98], meta[relay.Constant][99]);
%83 = %82.0;
%84 = nn.relu(%83);
%85 = nn.conv2d(%84, meta[relay.Constant][100], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%86 = add(%85, %73);
%87 = nn.batch_norm(%86, meta[relay.Constant][101], meta[relay.Constant][102], meta[relay.Constant][103], meta[relay.Constant][104]);
%88 = %87.0;
%89 = nn.relu(%88);
%90 = nn.conv2d(%89, meta[relay.Constant][105], padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]);
%91 = nn.batch_norm(%90, meta[relay.Constant][106], meta[relay.Constant][107], meta[relay.Constant][108], meta[relay.Constant][109]);
%92 = %91.0;
%93 = nn.relu(%92);
%94 = nn.conv2d(%93, meta[relay.Constant][110], padding=[1, 1, 1, 1], channels=128, kernel_size=[3, 3]);
%95 = nn.batch_norm(%94, meta[relay.Constant][111], meta[relay.Constant][112], meta[relay.Constant][113], meta[relay.Constant][114]);
%96 = %95.0;
%97 = nn.relu(%96);
%98 = nn.conv2d(%97, meta[relay.Constant][115], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]); |
%99 = add(%98, %86);
%100 = nn.batch_norm(%99, meta[relay.Constant][116], meta[relay.Constant][117], meta[relay.Constant][118], meta[relay.Constant][119]);
%101 = %100.0;
%102 = nn.relu(%101);
%103 = nn.conv2d(%102, meta[relay.Constant][120], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%104 = nn.batch_norm(%103, meta[relay.Constant][121], meta[relay.Constant][122], meta[relay.Constant][123], meta[relay.Constant][124]);
%105 = %104.0;
%106 = nn.relu(%105);
%107 = nn.conv2d(%106, meta[relay.Constant][125], strides=[2, 2], padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]);
%108 = nn.batch_norm(%107, meta[relay.Constant][126], meta[relay.Constant][127], meta[relay.Constant][128], meta[relay.Constant][129]);
%109 = %108.0;
%110 = nn.relu(%109);
%111 = nn.conv2d(%110, meta[relay.Constant][130], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%112 = nn.conv2d(%102, meta[relay.Constant][131], strides=[2, 2], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%113 = add(%111, %112);
%114 = nn.batch_norm(%113, meta[relay.Constant][132], meta[relay.Constant][133], meta[relay.Constant][134], meta[relay.Constant][135]);
%115 = %114.0;
%116 = nn.relu(%115);
%117 = nn.conv2d(%116, meta[relay.Constant][136], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%118 = nn.batch_norm(%117, meta[relay.Constant][137], meta[relay.Constant][138], meta[relay.Constant][139], meta[relay.Constant][140]);
%119 = %118.0;
%120 = nn.relu(%119);
%121 = nn.conv2d(%120, meta[relay.Constant][141], padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]);
%122 = nn.batch_norm(%121, meta[relay.Constant][142], meta[relay.Constant][143], meta[relay.Constant][144], meta[relay.Constant][145]);
%123 = %122.0;
%124 = nn.relu(%123);
%125 = nn.conv2d(%124, meta[r |
elay.Constant][146], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%126 = add(%125, %113);
%127 = nn.batch_norm(%126, meta[relay.Constant][147], meta[relay.Constant][148], meta[relay.Constant][149], meta[relay.Constant][150]);
%128 = %127.0;
%129 = nn.relu(%128);
%130 = nn.conv2d(%129, meta[relay.Constant][151], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%131 = nn.batch_norm(%130, meta[relay.Constant][152], meta[relay.Constant][153], meta[relay.Constant][154], meta[relay.Constant][155]);
%132 = %131.0;
%133 = nn.relu(%132);
%134 = nn.conv2d(%133, meta[relay.Constant][156], padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]);
%135 = nn.batch_norm(%134, meta[relay.Constant][157], meta[relay.Constant][158], meta[relay.Constant][159], meta[relay.Constant][160]);
%136 = %135.0;
%137 = nn.relu(%136);
%138 = nn.conv2d(%137, meta[relay.Constant][161], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%139 = add(%138, %126);
%140 = nn.batch_norm(%139, meta[relay.Constant][162], meta[relay.Constant][163], meta[relay.Constant][164], meta[relay.Constant][165]);
%141 = %140.0;
%142 = nn.relu(%141);
%143 = nn.conv2d(%142, meta[relay.Constant][166], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%144 = nn.batch_norm(%143, meta[relay.Constant][167], meta[relay.Constant][168], meta[relay.Constant][169], meta[relay.Constant][170]);
%145 = %144.0;
%146 = nn.relu(%145);
%147 = nn.conv2d(%146, meta[relay.Constant][171], padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]);
%148 = nn.batch_norm(%147, meta[relay.Constant][172], meta[relay.Constant][173], meta[relay.Constant][174], meta[relay.Constant][175]);
%149 = %148.0;
%150 = nn.relu(%149);
%151 = nn.conv2d(%150, meta[relay.Constant][176], padding=[0, 0, 0, 0], channels=1024, |
kernel_size=[1, 1]);
%152 = add(%151, %139);
%153 = nn.batch_norm(%152, meta[relay.Constant][177], meta[relay.Constant][178], meta[relay.Constant][179], meta[relay.Constant][180]);
%154 = %153.0;
%155 = nn.relu(%154);
%156 = nn.conv2d(%155, meta[relay.Constant][181], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%157 = nn.batch_norm(%156, meta[relay.Constant][182], meta[relay.Constant][183], meta[relay.Constant][184], meta[relay.Constant][185]);
%158 = %157.0;
%159 = nn.relu(%158);
%160 = nn.conv2d(%159, meta[relay.Constant][186], padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]);
%161 = nn.batch_norm(%160, meta[relay.Constant][187], meta[relay.Constant][188], meta[relay.Constant][189], meta[relay.Constant][190]);
%162 = %161.0;
%163 = nn.relu(%162);
%164 = nn.conv2d(%163, meta[relay.Constant][191], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%165 = add(%164, %152);
%166 = nn.batch_norm(%165, meta[relay.Constant][192], meta[relay.Constant][193], meta[relay.Constant][194], meta[relay.Constant][195]);
%167 = %166.0;
%168 = nn.relu(%167);
%169 = nn.conv2d(%168, meta[relay.Constant][196], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%170 = nn.batch_norm(%169, meta[relay.Constant][197], meta[relay.Constant][198], meta[relay.Constant][199], meta[relay.Constant][200]);
%171 = %170.0;
%172 = nn.relu(%171);
%173 = nn.conv2d(%172, meta[relay.Constant][201], padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]);
%174 = nn.batch_norm(%173, meta[relay.Constant][202], meta[relay.Constant][203], meta[relay.Constant][204], meta[relay.Constant][205]);
%175 = %174.0;
%176 = nn.relu(%175);
%177 = nn.conv2d(%176, meta[relay.Constant][206], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%178 = add(%177, %165); |
%179 = nn.batch_norm(%178, meta[relay.Constant][207], meta[relay.Constant][208], meta[relay.Constant][209], meta[relay.Constant][210]);
%180 = %179.0;
%181 = nn.relu(%180);
%182 = nn.conv2d(%181, meta[relay.Constant][211], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%183 = nn.batch_norm(%182, meta[relay.Constant][212], meta[relay.Constant][213], meta[relay.Constant][214], meta[relay.Constant][215]);
%184 = %183.0;
%185 = nn.relu(%184);
%186 = nn.conv2d(%185, meta[relay.Constant][216], strides=[2, 2], padding=[1, 1, 1, 1], channels=512, kernel_size=[3, 3]);
%187 = nn.batch_norm(%186, meta[relay.Constant][217], meta[relay.Constant][218], meta[relay.Constant][219], meta[relay.Constant][220]);
%188 = %187.0;
%189 = nn.relu(%188);
%190 = nn.conv2d(%189, meta[relay.Constant][221], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]);
%191 = nn.conv2d(%181, meta[relay.Constant][222], strides=[2, 2], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]);
%192 = add(%190, %191);
%193 = nn.batch_norm(%192, meta[relay.Constant][223], meta[relay.Constant][224], meta[relay.Constant][225], meta[relay.Constant][226]);
%194 = %193.0;
%195 = nn.relu(%194);
%196 = nn.conv2d(%195, meta[relay.Constant][227], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%197 = nn.batch_norm(%196, meta[relay.Constant][228], meta[relay.Constant][229], meta[relay.Constant][230], meta[relay.Constant][231]);
%198 = %197.0;
%199 = nn.relu(%198);
%200 = nn.conv2d(%199, meta[relay.Constant][232], padding=[1, 1, 1, 1], channels=512, kernel_size=[3, 3]);
%201 = nn.batch_norm(%200, meta[relay.Constant][233], meta[relay.Constant][234], meta[relay.Constant][235], meta[relay.Constant][236]);
%202 = %201.0;
%203 = nn.relu(%202);
%204 = nn.conv2d(%203, meta[relay.Constant][237], pad |
ding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]);
%205 = add(%204, %192);
%206 = nn.batch_norm(%205, meta[relay.Constant][238], meta[relay.Constant][239], meta[relay.Constant][240], meta[relay.Constant][241]);
%207 = %206.0;
%208 = nn.relu(%207);
%209 = nn.conv2d(%208, meta[relay.Constant][242], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%210 = nn.batch_norm(%209, meta[relay.Constant][243], meta[relay.Constant][244], meta[relay.Constant][245], meta[relay.Constant][246]);
%211 = %210.0;
%212 = nn.relu(%211);
%213 = nn.conv2d(%212, meta[relay.Constant][247], padding=[1, 1, 1, 1], channels=512, kernel_size=[3, 3]);
%214 = nn.batch_norm(%213, meta[relay.Constant][248], meta[relay.Constant][249], meta[relay.Constant][250], meta[relay.Constant][251]);
%215 = %214.0;
%216 = nn.relu(%215);
%217 = nn.conv2d(%216, meta[relay.Constant][252], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]);
%218 = add(%217, %205);
%219 = nn.batch_norm(%218, meta[relay.Constant][253], meta[relay.Constant][254], meta[relay.Constant][255], meta[relay.Constant][256]);
%220 = %219.0;
%221 = nn.relu(%220);
%222 = nn.global_avg_pool2d(%221);
%223 = reshape(%222, newshape=[0, -1]);
%224 = nn.dense(%223, meta[relay.Constant][257], units=1000);
add(%224, meta[relay.Constant][258])
}
""",
"from_string",
None,
metatable,
)
return {
"name": "resnet50_16",
"input_shapes": {"data": [1, 3, 224, 224]},
"input_dtypes": {"data": "float16"},
"mod": mod,
"params": None,
"main_dtype": "float16",
}
def mobilenet_consts(dtype):
return make_consts(
dtype,
[
(32, 3, 3, 3),
(32,),
(32,),
(32,),
(32,),
(32, 32, 1, 1),
(32 |
,),
(32,),
(32,),
(32,),
(32, 1, 3, 3),
(32,),
(32,),
(32,),
(32,),
(16, 32, 1, 1),
(16,),
(16,),
(16,),
(16,),
(96, 16, 1, 1),
(96,),
(96,),
(96,),
(96,),
(96, 1, 3, 3),
(96,),
(96,),
(96,),
(96,),
(24, 96, 1, 1),
(24,),
(24,),
(24,),
(24,),
(144, 24, 1, 1),
(144,),
(144,),
(144,),
(144,),
(144, 1, 3, 3),
(144,),
(144,),
(144,),
(144,),
(24, 144, 1, 1),
(24,),
(24,),
(24,),
(24,),
(144, 24, 1, 1),
(144,),
(144,),
(144,),
(144,),
(144, 1, 3, 3),
(144,),
(144,),
(144,),
(144,),
(32, 144, 1, 1),
(32,),
(32,),
(32,),
(32,),
(192, 32, 1, 1),
(192,),
(192,),
(192,),
(192,),
(192, 1, 3, 3),
(192,),
(192,),
(192,),
(192,),
(32, 192, 1, 1),
(32,),
(32,),
(32,),
(32,),
(192, 32, 1, 1),
(192,),
(192,),
(192,),
(192,),
(192, 1, 3, 3),
(192,),
(192,),
(192,),
(192,),
(32, 192, 1, 1),
(32,),
(32,),
(32,),
(32,), |
(192, 32, 1, 1),
(192,),
(192,),
(192,),
(192,),
(192, 1, 3, 3),
(192,),
(192,),
(192,),
(192,),
(64, 192, 1, 1),
(64,),
(64,),
(64,),
(64,),
(384, 64, 1, 1),
(384,),
(384,),
(384,),
(384,),
(384, 1, 3, 3),
(384,),
(384,),
(384,),
(384,),
(64, 384, 1, 1),
(64,),
(64,),
(64,),
(64,),
(384, 64, 1, 1),
(384,),
(384,),
(384,),
(384,),
(384, 1, 3, 3),
(384,),
(384,),
(384,),
(384,),
(64, 384, 1, 1),
(64,),
(64,),
(64,),
(64,),
(384, 64, 1, 1),
(384,),
(384,),
(384,),
(384,),
(384, 1, 3, 3),
(384,),
(384,),
(384,),
(384,),
(64, 384, 1, 1),
(64,),
(64,),
(64,),
(64,),
(384, 64, 1, 1),
(384,),
(384,),
(384,),
(384,),
(384, 1, 3, 3),
(384,),
(384,),
(384,),
(384,),
(96, 384, 1, 1),
(96,),
(96,),
(96,),
(96,),
(576, 96, 1, 1),
(576,),
(576,),
(576,),
(576,),
(576, 1, 3, 3),
(576,),
(576,),
(576,),
(576,),
(96, 576, 1, 1),
(96,), |
(96,),
(96,),
(96,),
(576, 96, 1, 1),
(576,),
(576,),
(576,),
(576,),
(576, 1, 3, 3),
(576,),
(576,),
(576,),
(576,),
(96, 576, 1, 1),
(96,),
(96,),
(96,),
(96,),
(576, 96, 1, 1),
(576,),
(576,),
(576,),
(576,),
(576, 1, 3, 3),
(576,),
(576,),
(576,),
(576,),
(160, 576, 1, 1),
(160,),
(160,),
(160,),
(160,),
(960, 160, 1, 1),
(960,),
(960,),
(960,),
(960,),
(960, 1, 3, 3),
(960,),
(960,),
(960,),
(960,),
(160, 960, 1, 1),
(160,),
(160,),
(160,),
(160,),
(960, 160, 1, 1),
(960,),
(960,),
(960,),
(960,),
(960, 1, 3, 3),
(960,),
(960,),
(960,),
(960,),
(160, 960, 1, 1),
(160,),
(160,),
(160,),
(160,),
(960, 160, 1, 1),
(960,),
(960,),
(960,),
(960,),
(960, 1, 3, 3),
(960,),
(960,),
(960,),
(960,),
(320, 960, 1, 1),
(320,),
(320,),
(320,),
(320,),
(1280, 320, 1, 1),
(1280,),
(1280,),
(1280,),
(1280,),
(1000, 1280, 1, 1),
],
)
def mobilenet():
metat |
able = {"relay.Constant": mobilenet_consts("float32")}
mod = tvm.parser.parse(
"""
def @main(%data: Tensor[(1, 3, 224, 224), float32]) -> Tensor[(1, 1000), float32] {
%0 = nn.conv2d(%data, meta[relay.Constant][0], strides=[2, 2], padding=[1, 1, 1, 1], channels=32, kernel_size=[3, 3]);
%1 = nn.batch_norm(%0, meta[relay.Constant][1], meta[relay.Constant][2], meta[relay.Constant][3], meta[relay.Constant][4]);
%2 = %1.0;
%3 = nn.relu(%2);
%4 = nn.conv2d(%3, meta[relay.Constant][5], padding=[0, 0, 0, 0], channels=32, kernel_size=[1, 1]);
%5 = nn.batch_norm(%4, meta[relay.Constant][6], meta[relay.Constant][7], meta[relay.Constant][8], meta[relay.Constant][9]);
%6 = %5.0;
%7 = nn.relu(%6);
%8 = nn.conv2d(%7, meta[relay.Constant][10], padding=[1, 1, 1, 1], groups=32, channels=32, kernel_size=[3, 3]);
%9 = nn.batch_norm(%8, meta[relay.Constant][11], meta[relay.Constant][12], meta[relay.Constant][13], meta[relay.Constant][14]);
%10 = %9.0;
%11 = nn.relu(%10);
%12 = nn.conv2d(%11, meta[relay.Constant][15], padding=[0, 0, 0, 0], channels=16, kernel_size=[1, 1]);
%13 = nn.batch_norm(%12, meta[relay.Constant][16], meta[relay.Constant][17], meta[relay.Constant][18], meta[relay.Constant][19]);
%14 = %13.0;
%15 = nn.conv2d(%14, meta[relay.Constant][20], padding=[0, 0, 0, 0], channels=96, kernel_size=[1, 1]);
%16 = nn.batch_norm(%15, meta[relay.Constant][21], meta[relay.Constant][22], meta[relay.Constant][23], meta[relay.Constant][24]);
%17 = %16.0;
%18 = nn.relu(%17);
%19 = nn.conv2d(%18, meta[relay.Constant][25], strides=[2, 2], padding=[1, 1, 1, 1], groups=96, channels=96, kernel_size=[3, 3]);
%20 = nn.batch_norm(%19, meta[relay.Constant][26], meta[relay.Constant][27], meta[relay.Constant][28], meta[relay.Constant][29]);
%21 = %20.0;
%22 = nn.relu(%21);
%23 = nn.c |
onv2d(%22, meta[relay.Constant][30], padding=[0, 0, 0, 0], channels=24, kernel_size=[1, 1]);
%24 = nn.batch_norm(%23, meta[relay.Constant][31], meta[relay.Constant][32], meta[relay.Constant][33], meta[relay.Constant][34]);
%25 = %24.0;
%26 = nn.conv2d(%25, meta[relay.Constant][35], padding=[0, 0, 0, 0], channels=144, kernel_size=[1, 1]);
%27 = nn.batch_norm(%26, meta[relay.Constant][36], meta[relay.Constant][37], meta[relay.Constant][38], meta[relay.Constant][39]);
%28 = %27.0;
%29 = nn.relu(%28);
%30 = nn.conv2d(%29, meta[relay.Constant][40], padding=[1, 1, 1, 1], groups=144, channels=144, kernel_size=[3, 3]);
%31 = nn.batch_norm(%30, meta[relay.Constant][41], meta[relay.Constant][42], meta[relay.Constant][43], meta[relay.Constant][44]);
%32 = %31.0;
%33 = nn.relu(%32);
%34 = nn.conv2d(%33, meta[relay.Constant][45], padding=[0, 0, 0, 0], channels=24, kernel_size=[1, 1]);
%35 = nn.batch_norm(%34, meta[relay.Constant][46], meta[relay.Constant][47], meta[relay.Constant][48], meta[relay.Constant][49]);
%36 = %35.0;
%37 = add(%36, %25);
%38 = nn.conv2d(%37, meta[relay.Constant][50], padding=[0, 0, 0, 0], channels=144, kernel_size=[1, 1]);
%39 = nn.batch_norm(%38, meta[relay.Constant][51], meta[relay.Constant][52], meta[relay.Constant][53], meta[relay.Constant][54]);
%40 = %39.0;
%41 = nn.relu(%40);
%42 = nn.conv2d(%41, meta[relay.Constant][55], strides=[2, 2], padding=[1, 1, 1, 1], groups=144, channels=144, kernel_size=[3, 3]);
%43 = nn.batch_norm(%42, meta[relay.Constant][56], meta[relay.Constant][57], meta[relay.Constant][58], meta[relay.Constant][59]);
%44 = %43.0;
%45 = nn.relu(%44);
%46 = nn.conv2d(%45, meta[relay.Constant][60], padding=[0, 0, 0, 0], channels=32, kernel_size=[1, 1]);
%47 = nn.batch_norm(%46, meta[relay.Constant][61], meta[relay.Constant][62], meta[relay.Consta |
nt][63], meta[relay.Constant][64]);
%48 = %47.0;
%49 = nn.conv2d(%48, meta[relay.Constant][65], padding=[0, 0, 0, 0], channels=192, kernel_size=[1, 1]);
%50 = nn.batch_norm(%49, meta[relay.Constant][66], meta[relay.Constant][67], meta[relay.Constant][68], meta[relay.Constant][69]);
%51 = %50.0;
%52 = nn.relu(%51);
%53 = nn.conv2d(%52, meta[relay.Constant][70], padding=[1, 1, 1, 1], groups=192, channels=192, kernel_size=[3, 3]);
%54 = nn.batch_norm(%53, meta[relay.Constant][71], meta[relay.Constant][72], meta[relay.Constant][73], meta[relay.Constant][74]);
%55 = %54.0;
%56 = nn.relu(%55);
%57 = nn.conv2d(%56, meta[relay.Constant][75], padding=[0, 0, 0, 0], channels=32, kernel_size=[1, 1]);
%58 = nn.batch_norm(%57, meta[relay.Constant][76], meta[relay.Constant][77], meta[relay.Constant][78], meta[relay.Constant][79]);
%59 = %58.0;
%60 = add(%59, %48);
%61 = nn.conv2d(%60, meta[relay.Constant][80], padding=[0, 0, 0, 0], channels=192, kernel_size=[1, 1]);
%62 = nn.batch_norm(%61, meta[relay.Constant][81], meta[relay.Constant][82], meta[relay.Constant][83], meta[relay.Constant][84]);
%63 = %62.0;
%64 = nn.relu(%63);
%65 = nn.conv2d(%64, meta[relay.Constant][85], padding=[1, 1, 1, 1], groups=192, channels=192, kernel_size=[3, 3]);
%66 = nn.batch_norm(%65, meta[relay.Constant][86], meta[relay.Constant][87], meta[relay.Constant][88], meta[relay.Constant][89]);
%67 = %66.0;
%68 = nn.relu(%67);
%69 = nn.conv2d(%68, meta[relay.Constant][90], padding=[0, 0, 0, 0], channels=32, kernel_size=[1, 1]);
%70 = nn.batch_norm(%69, meta[relay.Constant][91], meta[relay.Constant][92], meta[relay.Constant][93], meta[relay.Constant][94]);
%71 = %70.0;
%72 = add(%71, %60);
%73 = nn.conv2d(%72, meta[relay.Constant][95], padding=[0, 0, 0, 0], channels=192, kernel_size=[1, 1]); |
%74 = nn.batch_norm(%73, meta[relay.Constant][96], meta[relay.Constant][97], meta[relay.Constant][98], meta[relay.Constant][99]);
%75 = %74.0;
%76 = nn.relu(%75);
%77 = nn.conv2d(%76, meta[relay.Constant][100], padding=[1, 1, 1, 1], groups=192, channels=192, kernel_size=[3, 3]);
%78 = nn.batch_norm(%77, meta[relay.Constant][101], meta[relay.Constant][102], meta[relay.Constant][103], meta[relay.Constant][104]);
%79 = %78.0;
%80 = nn.relu(%79);
%81 = nn.conv2d(%80, meta[relay.Constant][105], padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]);
%82 = nn.batch_norm(%81, meta[relay.Constant][106], meta[relay.Constant][107], meta[relay.Constant][108], meta[relay.Constant][109]);
%83 = %82.0;
%84 = nn.conv2d(%83, meta[relay.Constant][110], padding=[0, 0, 0, 0], channels=384, kernel_size=[1, 1]);
%85 = nn.batch_norm(%84, meta[relay.Constant][111], meta[relay.Constant][112], meta[relay.Constant][113], meta[relay.Constant][114]);
%86 = %85.0;
%87 = nn.relu(%86);
%88 = nn.conv2d(%87, meta[relay.Constant][115], padding=[1, 1, 1, 1], groups=384, channels=384, kernel_size=[3, 3]);
%89 = nn.batch_norm(%88, meta[relay.Constant][116], meta[relay.Constant][117], meta[relay.Constant][118], meta[relay.Constant][119]);
%90 = %89.0;
%91 = nn.relu(%90);
%92 = nn.conv2d(%91, meta[relay.Constant][120], padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]);
%93 = nn.batch_norm(%92, meta[relay.Constant][121], meta[relay.Constant][122], meta[relay.Constant][123], meta[relay.Constant][124]);
%94 = %93.0;
%95 = add(%94, %83);
%96 = nn.conv2d(%95, meta[relay.Constant][125], padding=[0, 0, 0, 0], channels=384, kernel_size=[1, 1]);
%97 = nn.batch_norm(%96, meta[relay.Constant][126], meta[relay.Constant][127], meta[relay.Constant][128], meta[relay.Constant][129]);
%98 = %97.0;
%99 = nn.relu(%98); |
%100 = nn.conv2d(%99, meta[relay.Constant][130], padding=[1, 1, 1, 1], groups=384, channels=384, kernel_size=[3, 3]);
%101 = nn.batch_norm(%100, meta[relay.Constant][131], meta[relay.Constant][132], meta[relay.Constant][133], meta[relay.Constant][134]);
%102 = %101.0;
%103 = nn.relu(%102);
%104 = nn.conv2d(%103, meta[relay.Constant][135], padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]);
%105 = nn.batch_norm(%104, meta[relay.Constant][136], meta[relay.Constant][137], meta[relay.Constant][138], meta[relay.Constant][139]);
%106 = %105.0;
%107 = add(%106, %95);
%108 = nn.conv2d(%107, meta[relay.Constant][140], padding=[0, 0, 0, 0], channels=384, kernel_size=[1, 1]);
%109 = nn.batch_norm(%108, meta[relay.Constant][141], meta[relay.Constant][142], meta[relay.Constant][143], meta[relay.Constant][144]);
%110 = %109.0;
%111 = nn.relu(%110);
%112 = nn.conv2d(%111, meta[relay.Constant][145], padding=[1, 1, 1, 1], groups=384, channels=384, kernel_size=[3, 3]);
%113 = nn.batch_norm(%112, meta[relay.Constant][146], meta[relay.Constant][147], meta[relay.Constant][148], meta[relay.Constant][149]);
%114 = %113.0;
%115 = nn.relu(%114);
%116 = nn.conv2d(%115, meta[relay.Constant][150], padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]);
%117 = nn.batch_norm(%116, meta[relay.Constant][151], meta[relay.Constant][152], meta[relay.Constant][153], meta[relay.Constant][154]);
%118 = %117.0;
%119 = add(%118, %107);
%120 = nn.conv2d(%119, meta[relay.Constant][155], padding=[0, 0, 0, 0], channels=384, kernel_size=[1, 1]);
%121 = nn.batch_norm(%120, meta[relay.Constant][156], meta[relay.Constant][157], meta[relay.Constant][158], meta[relay.Constant][159]);
%122 = %121.0;
%123 = nn.relu(%122);
%124 = nn.conv2d(%123, meta[relay.Constant][160], strides=[2, 2], padding=[1, 1, 1, 1], group |
s=384, channels=384, kernel_size=[3, 3]);
%125 = nn.batch_norm(%124, meta[relay.Constant][161], meta[relay.Constant][162], meta[relay.Constant][163], meta[relay.Constant][164]);
%126 = %125.0;
%127 = nn.relu(%126);
%128 = nn.conv2d(%127, meta[relay.Constant][165], padding=[0, 0, 0, 0], channels=96, kernel_size=[1, 1]);
%129 = nn.batch_norm(%128, meta[relay.Constant][166], meta[relay.Constant][167], meta[relay.Constant][168], meta[relay.Constant][169]);
%130 = %129.0;
%131 = nn.conv2d(%130, meta[relay.Constant][170], padding=[0, 0, 0, 0], channels=576, kernel_size=[1, 1]);
%132 = nn.batch_norm(%131, meta[relay.Constant][171], meta[relay.Constant][172], meta[relay.Constant][173], meta[relay.Constant][174]);
%133 = %132.0;
%134 = nn.relu(%133);
%135 = nn.conv2d(%134, meta[relay.Constant][175], padding=[1, 1, 1, 1], groups=576, channels=576, kernel_size=[3, 3]);
%136 = nn.batch_norm(%135, meta[relay.Constant][176], meta[relay.Constant][177], meta[relay.Constant][178], meta[relay.Constant][179]);
%137 = %136.0;
%138 = nn.relu(%137);
%139 = nn.conv2d(%138, meta[relay.Constant][180], padding=[0, 0, 0, 0], channels=96, kernel_size=[1, 1]);
%140 = nn.batch_norm(%139, meta[relay.Constant][181], meta[relay.Constant][182], meta[relay.Constant][183], meta[relay.Constant][184]);
%141 = %140.0;
%142 = add(%141, %130);
%143 = nn.conv2d(%142, meta[relay.Constant][185], padding=[0, 0, 0, 0], channels=576, kernel_size=[1, 1]);
%144 = nn.batch_norm(%143, meta[relay.Constant][186], meta[relay.Constant][187], meta[relay.Constant][188], meta[relay.Constant][189]);
%145 = %144.0;
%146 = nn.relu(%145);
%147 = nn.conv2d(%146, meta[relay.Constant][190], padding=[1, 1, 1, 1], groups=576, channels=576, kernel_size=[3, 3]);
%148 = nn.batch_norm(%147, meta[relay.Constant][191], meta[relay.Constant][192], met |
a[relay.Constant][193], meta[relay.Constant][194]);
%149 = %148.0;
%150 = nn.relu(%149);
%151 = nn.conv2d(%150, meta[relay.Constant][195], padding=[0, 0, 0, 0], channels=96, kernel_size=[1, 1]);
%152 = nn.batch_norm(%151, meta[relay.Constant][196], meta[relay.Constant][197], meta[relay.Constant][198], meta[relay.Constant][199]);
%153 = %152.0;
%154 = add(%153, %142);
%155 = nn.conv2d(%154, meta[relay.Constant][200], padding=[0, 0, 0, 0], channels=576, kernel_size=[1, 1]);
%156 = nn.batch_norm(%155, meta[relay.Constant][201], meta[relay.Constant][202], meta[relay.Constant][203], meta[relay.Constant][204]);
%157 = %156.0;
%158 = nn.relu(%157);
%159 = nn.conv2d(%158, meta[relay.Constant][205], strides=[2, 2], padding=[1, 1, 1, 1], groups=576, channels=576, kernel_size=[3, 3]);
%160 = nn.batch_norm(%159, meta[relay.Constant][206], meta[relay.Constant][207], meta[relay.Constant][208], meta[relay.Constant][209]);
%161 = %160.0;
%162 = nn.relu(%161);
%163 = nn.conv2d(%162, meta[relay.Constant][210], padding=[0, 0, 0, 0], channels=160, kernel_size=[1, 1]);
%164 = nn.batch_norm(%163, meta[relay.Constant][211], meta[relay.Constant][212], meta[relay.Constant][213], meta[relay.Constant][214]);
%165 = %164.0;
%166 = nn.conv2d(%165, meta[relay.Constant][215], padding=[0, 0, 0, 0], channels=960, kernel_size=[1, 1]);
%167 = nn.batch_norm(%166, meta[relay.Constant][216], meta[relay.Constant][217], meta[relay.Constant][218], meta[relay.Constant][219]);
%168 = %167.0;
%169 = nn.relu(%168);
%170 = nn.conv2d(%169, meta[relay.Constant][220], padding=[1, 1, 1, 1], groups=960, channels=960, kernel_size=[3, 3]);
%171 = nn.batch_norm(%170, meta[relay.Constant][221], meta[relay.Constant][222], meta[relay.Constant][223], meta[relay.Constant][224]);
%172 = %171.0;
%173 = nn.relu(%172); |
%174 = nn.conv2d(%173, meta[relay.Constant][225], padding=[0, 0, 0, 0], channels=160, kernel_size=[1, 1]);
%175 = nn.batch_norm(%174, meta[relay.Constant][226], meta[relay.Constant][227], meta[relay.Constant][228], meta[relay.Constant][229]);
%176 = %175.0;
%177 = add(%176, %165);
%178 = nn.conv2d(%177, meta[relay.Constant][230], padding=[0, 0, 0, 0], channels=960, kernel_size=[1, 1]);
%179 = nn.batch_norm(%178, meta[relay.Constant][231], meta[relay.Constant][232], meta[relay.Constant][233], meta[relay.Constant][234]);
%180 = %179.0;
%181 = nn.relu(%180);
%182 = nn.conv2d(%181, meta[relay.Constant][235], padding=[1, 1, 1, 1], groups=960, channels=960, kernel_size=[3, 3]);
%183 = nn.batch_norm(%182, meta[relay.Constant][236], meta[relay.Constant][237], meta[relay.Constant][238], meta[relay.Constant][239]);
%184 = %183.0;
%185 = nn.relu(%184);
%186 = nn.conv2d(%185, meta[relay.Constant][240], padding=[0, 0, 0, 0], channels=160, kernel_size=[1, 1]);
%187 = nn.batch_norm(%186, meta[relay.Constant][241], meta[relay.Constant][242], meta[relay.Constant][243], meta[relay.Constant][244]);
%188 = %187.0;
%189 = add(%188, %177);
%190 = nn.conv2d(%189, meta[relay.Constant][245], padding=[0, 0, 0, 0], channels=960, kernel_size=[1, 1]);
%191 = nn.batch_norm(%190, meta[relay.Constant][246], meta[relay.Constant][247], meta[relay.Constant][248], meta[relay.Constant][249]);
%192 = %191.0;
%193 = nn.relu(%192);
%194 = nn.conv2d(%193, meta[relay.Constant][250], padding=[1, 1, 1, 1], groups=960, channels=960, kernel_size=[3, 3]);
%195 = nn.batch_norm(%194, meta[relay.Constant][251], meta[relay.Constant][252], meta[relay.Constant][253], meta[relay.Constant][254]);
%196 = %195.0;
%197 = nn.relu(%196);
%198 = nn.conv2d(%197, meta[relay.Constant][255], padding=[0, 0, 0, 0], channels=320, kernel_size=[1 |
, 1]);
%199 = nn.batch_norm(%198, meta[relay.Constant][256], meta[relay.Constant][257], meta[relay.Constant][258], meta[relay.Constant][259]);
%200 = %199.0;
%201 = nn.conv2d(%200, meta[relay.Constant][260], padding=[0, 0, 0, 0], channels=1280, kernel_size=[1, 1]);
%202 = nn.batch_norm(%201, meta[relay.Constant][261], meta[relay.Constant][262], meta[relay.Constant][263], meta[relay.Constant][264]);
%203 = %202.0;
%204 = nn.relu(%203);
%205 = nn.global_avg_pool2d(%204);
%206 = nn.conv2d(%205, meta[relay.Constant][265], padding=[0, 0, 0, 0], channels=1000, kernel_size=[1, 1]);
reshape(%206, newshape=[0, -1])
}
""",
"from_string",
None,
metatable,
)
return {
"name": "mobilenet",
"input_shapes": {"data": [1, 3, 224, 224]},
"input_dtypes": {"data": "float32"},
"mod": mod,
"params": None,
"main_dtype": "float32",
}
def mobilenet_16():
metatable = {"relay.Constant": mobilenet_consts("float16")}
mod = tvm.parser.parse(
"""
def @main(%data: Tensor[(1, 3, 224, 224), float16]) -> Tensor[(1, 1000), float16] {
%0 = nn.conv2d(%data, meta[relay.Constant][0], strides=[2, 2], padding=[1, 1, 1, 1], channels=32, kernel_size=[3, 3]);
%1 = nn.batch_norm(%0, meta[relay.Constant][1], meta[relay.Constant][2], meta[relay.Constant][3], meta[relay.Constant][4]);
%2 = %1.0;
%3 = nn.relu(%2);
%4 = nn.conv2d(%3, meta[relay.Constant][5], padding=[0, 0, 0, 0], channels=32, kernel_size=[1, 1]);
%5 = nn.batch_norm(%4, meta[relay.Constant][6], meta[relay.Constant][7], meta[relay.Constant][8], meta[relay.Constant][9]);
%6 = %5.0;
%7 = nn.relu(%6);
%8 = nn.conv2d(%7, meta[relay.Constant][10], padding=[1, 1, 1, 1], groups=32, channels=32, kernel_size=[3, 3]);
%9 = nn.batch_norm(%8, meta[relay.Constant][11], meta[relay.Constant][12] |
, meta[relay.Constant][13], meta[relay.Constant][14]);
%10 = %9.0;
%11 = nn.relu(%10);
%12 = nn.conv2d(%11, meta[relay.Constant][15], padding=[0, 0, 0, 0], channels=16, kernel_size=[1, 1]);
%13 = nn.batch_norm(%12, meta[relay.Constant][16], meta[relay.Constant][17], meta[relay.Constant][18], meta[relay.Constant][19]);
%14 = %13.0;
%15 = nn.conv2d(%14, meta[relay.Constant][20], padding=[0, 0, 0, 0], channels=96, kernel_size=[1, 1]);
%16 = nn.batch_norm(%15, meta[relay.Constant][21], meta[relay.Constant][22], meta[relay.Constant][23], meta[relay.Constant][24]);
%17 = %16.0;
%18 = nn.relu(%17);
%19 = nn.conv2d(%18, meta[relay.Constant][25], strides=[2, 2], padding=[1, 1, 1, 1], groups=96, channels=96, kernel_size=[3, 3]);
%20 = nn.batch_norm(%19, meta[relay.Constant][26], meta[relay.Constant][27], meta[relay.Constant][28], meta[relay.Constant][29]);
%21 = %20.0;
%22 = nn.relu(%21);
%23 = nn.conv2d(%22, meta[relay.Constant][30], padding=[0, 0, 0, 0], channels=24, kernel_size=[1, 1]);
%24 = nn.batch_norm(%23, meta[relay.Constant][31], meta[relay.Constant][32], meta[relay.Constant][33], meta[relay.Constant][34]);
%25 = %24.0;
%26 = nn.conv2d(%25, meta[relay.Constant][35], padding=[0, 0, 0, 0], channels=144, kernel_size=[1, 1]);
%27 = nn.batch_norm(%26, meta[relay.Constant][36], meta[relay.Constant][37], meta[relay.Constant][38], meta[relay.Constant][39]);
%28 = %27.0;
%29 = nn.relu(%28);
%30 = nn.conv2d(%29, meta[relay.Constant][40], padding=[1, 1, 1, 1], groups=144, channels=144, kernel_size=[3, 3]);
%31 = nn.batch_norm(%30, meta[relay.Constant][41], meta[relay.Constant][42], meta[relay.Constant][43], meta[relay.Constant][44]);
%32 = %31.0;
%33 = nn.relu(%32);
%34 = nn.conv2d(%33, meta[relay.Constant][45], padding=[0, 0, 0, 0], channels=24, kernel_size=[1, 1]);
% |
35 = nn.batch_norm(%34, meta[relay.Constant][46], meta[relay.Constant][47], meta[relay.Constant][48], meta[relay.Constant][49]);
%36 = %35.0;
%37 = add(%36, %25);
%38 = nn.conv2d(%37, meta[relay.Constant][50], padding=[0, 0, 0, 0], channels=144, kernel_size=[1, 1]);
%39 = nn.batch_norm(%38, meta[relay.Constant][51], meta[relay.Constant][52], meta[relay.Constant][53], meta[relay.Constant][54]);
%40 = %39.0;
%41 = nn.relu(%40);
%42 = nn.conv2d(%41, meta[relay.Constant][55], strides=[2, 2], padding=[1, 1, 1, 1], groups=144, channels=144, kernel_size=[3, 3]);
%43 = nn.batch_norm(%42, meta[relay.Constant][56], meta[relay.Constant][57], meta[relay.Constant][58], meta[relay.Constant][59]);
%44 = %43.0;
%45 = nn.relu(%44);
%46 = nn.conv2d(%45, meta[relay.Constant][60], padding=[0, 0, 0, 0], channels=32, kernel_size=[1, 1]);
%47 = nn.batch_norm(%46, meta[relay.Constant][61], meta[relay.Constant][62], meta[relay.Constant][63], meta[relay.Constant][64]);
%48 = %47.0;
%49 = nn.conv2d(%48, meta[relay.Constant][65], padding=[0, 0, 0, 0], channels=192, kernel_size=[1, 1]);
%50 = nn.batch_norm(%49, meta[relay.Constant][66], meta[relay.Constant][67], meta[relay.Constant][68], meta[relay.Constant][69]);
%51 = %50.0;
%52 = nn.relu(%51);
%53 = nn.conv2d(%52, meta[relay.Constant][70], padding=[1, 1, 1, 1], groups=192, channels=192, kernel_size=[3, 3]);
%54 = nn.batch_norm(%53, meta[relay.Constant][71], meta[relay.Constant][72], meta[relay.Constant][73], meta[relay.Constant][74]);
%55 = %54.0;
%56 = nn.relu(%55);
%57 = nn.conv2d(%56, meta[relay.Constant][75], padding=[0, 0, 0, 0], channels=32, kernel_size=[1, 1]);
%58 = nn.batch_norm(%57, meta[relay.Constant][76], meta[relay.Constant][77], meta[relay.Constant][78], meta[relay.Constant][79]);
%59 = %58.0;
%60 = add(%59, %48);
%61 |
= nn.conv2d(%60, meta[relay.Constant][80], padding=[0, 0, 0, 0], channels=192, kernel_size=[1, 1]);
%62 = nn.batch_norm(%61, meta[relay.Constant][81], meta[relay.Constant][82], meta[relay.Constant][83], meta[relay.Constant][84]);
%63 = %62.0;
%64 = nn.relu(%63);
%65 = nn.conv2d(%64, meta[relay.Constant][85], padding=[1, 1, 1, 1], groups=192, channels=192, kernel_size=[3, 3]);
%66 = nn.batch_norm(%65, meta[relay.Constant][86], meta[relay.Constant][87], meta[relay.Constant][88], meta[relay.Constant][89]);
%67 = %66.0;
%68 = nn.relu(%67);
%69 = nn.conv2d(%68, meta[relay.Constant][90], padding=[0, 0, 0, 0], channels=32, kernel_size=[1, 1]);
%70 = nn.batch_norm(%69, meta[relay.Constant][91], meta[relay.Constant][92], meta[relay.Constant][93], meta[relay.Constant][94]);
%71 = %70.0;
%72 = add(%71, %60);
%73 = nn.conv2d(%72, meta[relay.Constant][95], padding=[0, 0, 0, 0], channels=192, kernel_size=[1, 1]);
%74 = nn.batch_norm(%73, meta[relay.Constant][96], meta[relay.Constant][97], meta[relay.Constant][98], meta[relay.Constant][99]);
%75 = %74.0;
%76 = nn.relu(%75);
%77 = nn.conv2d(%76, meta[relay.Constant][100], padding=[1, 1, 1, 1], groups=192, channels=192, kernel_size=[3, 3]);
%78 = nn.batch_norm(%77, meta[relay.Constant][101], meta[relay.Constant][102], meta[relay.Constant][103], meta[relay.Constant][104]);
%79 = %78.0;
%80 = nn.relu(%79);
%81 = nn.conv2d(%80, meta[relay.Constant][105], padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]);
%82 = nn.batch_norm(%81, meta[relay.Constant][106], meta[relay.Constant][107], meta[relay.Constant][108], meta[relay.Constant][109]);
%83 = %82.0;
%84 = nn.conv2d(%83, meta[relay.Constant][110], padding=[0, 0, 0, 0], channels=384, kernel_size=[1, 1]);
%85 = nn.batch_norm(%84, meta[relay.Constant][111], meta[relay.Constant][112], meta[relay.Co |
nstant][113], meta[relay.Constant][114]);
%86 = %85.0;
%87 = nn.relu(%86);
%88 = nn.conv2d(%87, meta[relay.Constant][115], padding=[1, 1, 1, 1], groups=384, channels=384, kernel_size=[3, 3]);
%89 = nn.batch_norm(%88, meta[relay.Constant][116], meta[relay.Constant][117], meta[relay.Constant][118], meta[relay.Constant][119]);
%90 = %89.0;
%91 = nn.relu(%90);
%92 = nn.conv2d(%91, meta[relay.Constant][120], padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]);
%93 = nn.batch_norm(%92, meta[relay.Constant][121], meta[relay.Constant][122], meta[relay.Constant][123], meta[relay.Constant][124]);
%94 = %93.0;
%95 = add(%94, %83);
%96 = nn.conv2d(%95, meta[relay.Constant][125], padding=[0, 0, 0, 0], channels=384, kernel_size=[1, 1]);
%97 = nn.batch_norm(%96, meta[relay.Constant][126], meta[relay.Constant][127], meta[relay.Constant][128], meta[relay.Constant][129]);
%98 = %97.0;
%99 = nn.relu(%98);
%100 = nn.conv2d(%99, meta[relay.Constant][130], padding=[1, 1, 1, 1], groups=384, channels=384, kernel_size=[3, 3]);
%101 = nn.batch_norm(%100, meta[relay.Constant][131], meta[relay.Constant][132], meta[relay.Constant][133], meta[relay.Constant][134]);
%102 = %101.0;
%103 = nn.relu(%102);
%104 = nn.conv2d(%103, meta[relay.Constant][135], padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]);
%105 = nn.batch_norm(%104, meta[relay.Constant][136], meta[relay.Constant][137], meta[relay.Constant][138], meta[relay.Constant][139]);
%106 = %105.0;
%107 = add(%106, %95);
%108 = nn.conv2d(%107, meta[relay.Constant][140], padding=[0, 0, 0, 0], channels=384, kernel_size=[1, 1]);
%109 = nn.batch_norm(%108, meta[relay.Constant][141], meta[relay.Constant][142], meta[relay.Constant][143], meta[relay.Constant][144]);
%110 = %109.0;
%111 = nn.relu(%110);
%112 = nn.conv2d(%111, m |
eta[relay.Constant][145], padding=[1, 1, 1, 1], groups=384, channels=384, kernel_size=[3, 3]);
%113 = nn.batch_norm(%112, meta[relay.Constant][146], meta[relay.Constant][147], meta[relay.Constant][148], meta[relay.Constant][149]);
%114 = %113.0;
%115 = nn.relu(%114);
%116 = nn.conv2d(%115, meta[relay.Constant][150], padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]);
%117 = nn.batch_norm(%116, meta[relay.Constant][151], meta[relay.Constant][152], meta[relay.Constant][153], meta[relay.Constant][154]);
%118 = %117.0;
%119 = add(%118, %107);
%120 = nn.conv2d(%119, meta[relay.Constant][155], padding=[0, 0, 0, 0], channels=384, kernel_size=[1, 1]);
%121 = nn.batch_norm(%120, meta[relay.Constant][156], meta[relay.Constant][157], meta[relay.Constant][158], meta[relay.Constant][159]);
%122 = %121.0;
%123 = nn.relu(%122);
%124 = nn.conv2d(%123, meta[relay.Constant][160], strides=[2, 2], padding=[1, 1, 1, 1], groups=384, channels=384, kernel_size=[3, 3]);
%125 = nn.batch_norm(%124, meta[relay.Constant][161], meta[relay.Constant][162], meta[relay.Constant][163], meta[relay.Constant][164]);
%126 = %125.0;
%127 = nn.relu(%126);
%128 = nn.conv2d(%127, meta[relay.Constant][165], padding=[0, 0, 0, 0], channels=96, kernel_size=[1, 1]);
%129 = nn.batch_norm(%128, meta[relay.Constant][166], meta[relay.Constant][167], meta[relay.Constant][168], meta[relay.Constant][169]);
%130 = %129.0;
%131 = nn.conv2d(%130, meta[relay.Constant][170], padding=[0, 0, 0, 0], channels=576, kernel_size=[1, 1]);
%132 = nn.batch_norm(%131, meta[relay.Constant][171], meta[relay.Constant][172], meta[relay.Constant][173], meta[relay.Constant][174]);
%133 = %132.0;
%134 = nn.relu(%133);
%135 = nn.conv2d(%134, meta[relay.Constant][175], padding=[1, 1, 1, 1], groups=576, channels=576, kernel_size=[3, 3]);
%136 = nn.batch |
_norm(%135, meta[relay.Constant][176], meta[relay.Constant][177], meta[relay.Constant][178], meta[relay.Constant][179]);
%137 = %136.0;
%138 = nn.relu(%137);
%139 = nn.conv2d(%138, meta[relay.Constant][180], padding=[0, 0, 0, 0], channels=96, kernel_size=[1, 1]);
%140 = nn.batch_norm(%139, meta[relay.Constant][181], meta[relay.Constant][182], meta[relay.Constant][183], meta[relay.Constant][184]);
%141 = %140.0;
%142 = add(%141, %130);
%143 = nn.conv2d(%142, meta[relay.Constant][185], padding=[0, 0, 0, 0], channels=576, kernel_size=[1, 1]);
%144 = nn.batch_norm(%143, meta[relay.Constant][186], meta[relay.Constant][187], meta[relay.Constant][188], meta[relay.Constant][189]);
%145 = %144.0;
%146 = nn.relu(%145);
%147 = nn.conv2d(%146, meta[relay.Constant][190], padding=[1, 1, 1, 1], groups=576, channels=576, kernel_size=[3, 3]);
%148 = nn.batch_norm(%147, meta[relay.Constant][191], meta[relay.Constant][192], meta[relay.Constant][193], meta[relay.Constant][194]);
%149 = %148.0;
%150 = nn.relu(%149);
%151 = nn.conv2d(%150, meta[relay.Constant][195], padding=[0, 0, 0, 0], channels=96, kernel_size=[1, 1]);
%152 = nn.batch_norm(%151, meta[relay.Constant][196], meta[relay.Constant][197], meta[relay.Constant][198], meta[relay.Constant][199]);
%153 = %152.0;
%154 = add(%153, %142);
%155 = nn.conv2d(%154, meta[relay.Constant][200], padding=[0, 0, 0, 0], channels=576, kernel_size=[1, 1]);
%156 = nn.batch_norm(%155, meta[relay.Constant][201], meta[relay.Constant][202], meta[relay.Constant][203], meta[relay.Constant][204]);
%157 = %156.0;
%158 = nn.relu(%157);
%159 = nn.conv2d(%158, meta[relay.Constant][205], strides=[2, 2], padding=[1, 1, 1, 1], groups=576, channels=576, kernel_size=[3, 3]);
%160 = nn.batch_norm(%159, meta[relay.Constant][206], meta[relay.Constant][207], meta[relay.Constant] |
[208], meta[relay.Constant][209]);
%161 = %160.0;
%162 = nn.relu(%161);
%163 = nn.conv2d(%162, meta[relay.Constant][210], padding=[0, 0, 0, 0], channels=160, kernel_size=[1, 1]);
%164 = nn.batch_norm(%163, meta[relay.Constant][211], meta[relay.Constant][212], meta[relay.Constant][213], meta[relay.Constant][214]);
%165 = %164.0;
%166 = nn.conv2d(%165, meta[relay.Constant][215], padding=[0, 0, 0, 0], channels=960, kernel_size=[1, 1]);
%167 = nn.batch_norm(%166, meta[relay.Constant][216], meta[relay.Constant][217], meta[relay.Constant][218], meta[relay.Constant][219]);
%168 = %167.0;
%169 = nn.relu(%168);
%170 = nn.conv2d(%169, meta[relay.Constant][220], padding=[1, 1, 1, 1], groups=960, channels=960, kernel_size=[3, 3]);
%171 = nn.batch_norm(%170, meta[relay.Constant][221], meta[relay.Constant][222], meta[relay.Constant][223], meta[relay.Constant][224]);
%172 = %171.0;
%173 = nn.relu(%172);
%174 = nn.conv2d(%173, meta[relay.Constant][225], padding=[0, 0, 0, 0], channels=160, kernel_size=[1, 1]);
%175 = nn.batch_norm(%174, meta[relay.Constant][226], meta[relay.Constant][227], meta[relay.Constant][228], meta[relay.Constant][229]);
%176 = %175.0;
%177 = add(%176, %165);
%178 = nn.conv2d(%177, meta[relay.Constant][230], padding=[0, 0, 0, 0], channels=960, kernel_size=[1, 1]);
%179 = nn.batch_norm(%178, meta[relay.Constant][231], meta[relay.Constant][232], meta[relay.Constant][233], meta[relay.Constant][234]);
%180 = %179.0;
%181 = nn.relu(%180);
%182 = nn.conv2d(%181, meta[relay.Constant][235], padding=[1, 1, 1, 1], groups=960, channels=960, kernel_size=[3, 3]);
%183 = nn.batch_norm(%182, meta[relay.Constant][236], meta[relay.Constant][237], meta[relay.Constant][238], meta[relay.Constant][239]);
%184 = %183.0;
%185 = nn.relu(%184);
%186 = nn.conv2d(%185, meta[rela |
y.Constant][240], padding=[0, 0, 0, 0], channels=160, kernel_size=[1, 1]);
%187 = nn.batch_norm(%186, meta[relay.Constant][241], meta[relay.Constant][242], meta[relay.Constant][243], meta[relay.Constant][244]);
%188 = %187.0;
%189 = add(%188, %177);
%190 = nn.conv2d(%189, meta[relay.Constant][245], padding=[0, 0, 0, 0], channels=960, kernel_size=[1, 1]);
%191 = nn.batch_norm(%190, meta[relay.Constant][246], meta[relay.Constant][247], meta[relay.Constant][248], meta[relay.Constant][249]);
%192 = %191.0;
%193 = nn.relu(%192);
%194 = nn.conv2d(%193, meta[relay.Constant][250], padding=[1, 1, 1, 1], groups=960, channels=960, kernel_size=[3, 3]);
%195 = nn.batch_norm(%194, meta[relay.Constant][251], meta[relay.Constant][252], meta[relay.Constant][253], meta[relay.Constant][254]);
%196 = %195.0;
%197 = nn.relu(%196);
%198 = nn.conv2d(%197, meta[relay.Constant][255], padding=[0, 0, 0, 0], channels=320, kernel_size=[1, 1]);
%199 = nn.batch_norm(%198, meta[relay.Constant][256], meta[relay.Constant][257], meta[relay.Constant][258], meta[relay.Constant][259]);
%200 = %199.0;
%201 = nn.conv2d(%200, meta[relay.Constant][260], padding=[0, 0, 0, 0], channels=1280, kernel_size=[1, 1]);
%202 = nn.batch_norm(%201, meta[relay.Constant][261], meta[relay.Constant][262], meta[relay.Constant][263], meta[relay.Constant][264]);
%203 = %202.0;
%204 = nn.relu(%203);
%205 = nn.global_avg_pool2d(%204);
%206 = nn.conv2d(%205, meta[relay.Constant][265], padding=[0, 0, 0, 0], channels=1000, kernel_size=[1, 1]);
reshape(%206, newshape=[0, -1])
}
""",
"from_string",
None,
metatable,
)
return {
"name": "mobilenet_16",
"input_shapes": {"data": [1, 3, 224, 224]},
"input_dtypes": {"data": "float16"},
"mod": mod,
"params": None,
"main_dtype": "float16" |
,
}
def batch_norm_extract():
consts = make_consts(
"float32",
[
(32,),
(32,),
(32,),
(32,),
],
)
metatable = {"relay.Constant": consts}
mod = tvm.parser.parse(
"""
def @main(%FunctionVar_0: Tensor[(1, 32, 112, 112), float32]) -> Tensor[(1, 32, 112, 112), float32] {
%3 = nn.batch_norm(%FunctionVar_0, meta[relay.Constant][0], meta[relay.Constant][1], meta[relay.Constant][2], meta[relay.Constant][3]);
%3.0
}
""",
"from_string",
None,
metatable,
)
return {
"name": "batch_norm_extract",
"input_shapes": {"FunctionVar_0": [1, 32, 112, 112]},
"input_dtypes": {"FunctionVar_0": "float32"},
"mod": mod,
"params": None,
"main_dtype": "float32",
}
def resnext50_32x4d_consts(dtype):
return make_consts(
dtype,
[
(128, 64, 1, 1),
(128, 4, 3, 3),
(256, 128, 1, 1),
(256, 64, 1, 1),
(128, 256, 1, 1),
(128, 4, 3, 3),
(256, 128, 1, 1),
(128, 256, 1, 1),
(128, 4, 3, 3),
(256, 128, 1, 1),
(256, 256, 1, 1),
(256, 8, 3, 3),
(512, 256, 1, 1),
(512, 256, 1, 1),
(256, 512, 1, 1),
(256, 8, 3, 3),
(512, 256, 1, 1),
(256, 512, 1, 1),
(256, 8, 3, 3),
(512, 256, 1, 1),
(256, 512, 1, 1),
(256, 8, 3, 3),
(512, 256, 1, 1),
(512, 512, 1, 1),
(512, 16, 3, 3),
(1024, 512, 1, 1),
(1024, 512, 1, 1),
(512, 1024, 1, 1),
(512, 16, 3, 3),
(1024, 512, 1, 1),
(512, 1024, 1, 1),
(512, 16, 3, 3),
(1024, 512, 1, 1),
(512, 1024, 1, 1) |
,
(512, 16, 3, 3),
(1024, 512, 1, 1),
(512, 1024, 1, 1),
(512, 16, 3, 3),
(1024, 512, 1, 1),
(512, 1024, 1, 1),
(512, 16, 3, 3),
(1024, 512, 1, 1),
(1024, 1024, 1, 1),
(1024, 32, 3, 3),
(2048, 1024, 1, 1),
(2048, 1024, 1, 1),
(1024, 2048, 1, 1),
(1024, 32, 3, 3),
(2048, 1024, 1, 1),
(1024, 2048, 1, 1),
(1024, 32, 3, 3),
(2048, 1024, 1, 1),
],
)
def resnext50_32x4d():
metatable = {"relay.Constant": resnext50_32x4d_consts("float32")}
mod = tvm.parser.parse(
"""
def @main(%x: Tensor[(1, 64, 56, 56), float32]) {
%0 = nn.conv2d(%x, meta[relay.Constant][0], padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]);
%1 = nn.relu(%0);
%2 = nn.conv2d(%1, meta[relay.Constant][1], padding=[1, 1, 1, 1], groups=32, channels=128, kernel_size=[3, 3]);
%3 = nn.relu(%2);
%4 = nn.conv2d(%3, meta[relay.Constant][2], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%5 = nn.conv2d(%x, meta[relay.Constant][3], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%6 = add(%4, %5);
%7 = nn.relu(%6);
%8 = nn.conv2d(%7, meta[relay.Constant][4], padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]);
%9 = nn.relu(%8);
%10 = nn.conv2d(%9, meta[relay.Constant][5], padding=[1, 1, 1, 1], groups=32, channels=128, kernel_size=[3, 3]);
%11 = nn.relu(%10);
%12 = nn.conv2d(%11, meta[relay.Constant][6], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%13 = add(%12, %7);
%14 = nn.relu(%13);
%15 = nn.conv2d(%14, meta[relay.Constant][7], padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]);
%16 = nn.relu(%15);
%17 = nn.conv2d(%16, meta[relay.Constant][8], pa |
dding=[1, 1, 1, 1], groups=32, channels=128, kernel_size=[3, 3]);
%18 = nn.relu(%17);
%19 = nn.conv2d(%18, meta[relay.Constant][9], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%20 = add(%19, %14);
%21 = nn.relu(%20);
%22 = nn.conv2d(%21, meta[relay.Constant][10], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%23 = nn.relu(%22);
%24 = nn.conv2d(%23, meta[relay.Constant][11], strides=[2, 2], padding=[1, 1, 1, 1], groups=32, channels=256, kernel_size=[3, 3]);
%25 = nn.relu(%24);
%26 = nn.conv2d(%25, meta[relay.Constant][12], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%27 = nn.conv2d(%21, meta[relay.Constant][13], strides=[2, 2], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%28 = add(%26, %27);
%29 = nn.relu(%28);
%30 = nn.conv2d(%29, meta[relay.Constant][14], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%31 = nn.relu(%30);
%32 = nn.conv2d(%31, meta[relay.Constant][15], padding=[1, 1, 1, 1], groups=32, channels=256, kernel_size=[3, 3]);
%33 = nn.relu(%32);
%34 = nn.conv2d(%33, meta[relay.Constant][16], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%35 = add(%34, %29);
%36 = nn.relu(%35);
%37 = nn.conv2d(%36, meta[relay.Constant][17], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%38 = nn.relu(%37);
%39 = nn.conv2d(%38, meta[relay.Constant][18], padding=[1, 1, 1, 1], groups=32, channels=256, kernel_size=[3, 3]);
%40 = nn.relu(%39);
%41 = nn.conv2d(%40, meta[relay.Constant][19], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%42 = add(%41, %36);
%43 = nn.relu(%42);
%44 = nn.conv2d(%43, meta[relay.Constant][20], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%45 = nn.relu(%44);
%46 = nn.conv2d(%45, meta[relay.Constant][21], padding=[ |
1, 1, 1, 1], groups=32, channels=256, kernel_size=[3, 3]);
%47 = nn.relu(%46);
%48 = nn.conv2d(%47, meta[relay.Constant][22], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%49 = add(%48, %43);
%50 = nn.relu(%49);
%51 = nn.conv2d(%50, meta[relay.Constant][23], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%52 = nn.relu(%51);
%53 = nn.conv2d(%52, meta[relay.Constant][24], strides=[2, 2], padding=[1, 1, 1, 1], groups=32, channels=512, kernel_size=[3, 3]);
%54 = nn.relu(%53);
%55 = nn.conv2d(%54, meta[relay.Constant][25], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%56 = nn.conv2d(%50, meta[relay.Constant][26], strides=[2, 2], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%57 = add(%55, %56);
%58 = nn.relu(%57);
%59 = nn.conv2d(%58, meta[relay.Constant][27], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%60 = nn.relu(%59);
%61 = nn.conv2d(%60, meta[relay.Constant][28], padding=[1, 1, 1, 1], groups=32, channels=512, kernel_size=[3, 3]);
%62 = nn.relu(%61);
%63 = nn.conv2d(%62, meta[relay.Constant][29], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%64 = add(%63, %58);
%65 = nn.relu(%64);
%66 = nn.conv2d(%65, meta[relay.Constant][30], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%67 = nn.relu(%66);
%68 = nn.conv2d(%67, meta[relay.Constant][31], padding=[1, 1, 1, 1], groups=32, channels=512, kernel_size=[3, 3]);
%69 = nn.relu(%68);
%70 = nn.conv2d(%69, meta[relay.Constant][32], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%71 = add(%70, %65);
%72 = nn.relu(%71);
%73 = nn.conv2d(%72, meta[relay.Constant][33], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%74 = nn.relu(%73);
%75 = nn.conv2d(%74, meta[relay.Constant][34], padding=[1, |
1, 1, 1], groups=32, channels=512, kernel_size=[3, 3]);
%76 = nn.relu(%75);
%77 = nn.conv2d(%76, meta[relay.Constant][35], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%78 = add(%77, %72);
%79 = nn.relu(%78);
%80 = nn.conv2d(%79, meta[relay.Constant][36], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%81 = nn.relu(%80);
%82 = nn.conv2d(%81, meta[relay.Constant][37], padding=[1, 1, 1, 1], groups=32, channels=512, kernel_size=[3, 3]);
%83 = nn.relu(%82);
%84 = nn.conv2d(%83, meta[relay.Constant][38], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%85 = add(%84, %79);
%86 = nn.relu(%85);
%87 = nn.conv2d(%86, meta[relay.Constant][39], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%88 = nn.relu(%87);
%89 = nn.conv2d(%88, meta[relay.Constant][40], padding=[1, 1, 1, 1], groups=32, channels=512, kernel_size=[3, 3]);
%90 = nn.relu(%89);
%91 = nn.conv2d(%90, meta[relay.Constant][41], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%92 = add(%91, %86);
%93 = nn.relu(%92);
%94 = nn.conv2d(%93, meta[relay.Constant][42], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%95 = nn.relu(%94);
%96 = nn.conv2d(%95, meta[relay.Constant][43], strides=[2, 2], padding=[1, 1, 1, 1], groups=32, channels=1024, kernel_size=[3, 3]);
%97 = nn.relu(%96);
%98 = nn.conv2d(%97, meta[relay.Constant][44], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]);
%99 = nn.conv2d(%93, meta[relay.Constant][45], strides=[2, 2], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]);
%100 = add(%98, %99);
%101 = nn.relu(%100);
%102 = nn.conv2d(%101, meta[relay.Constant][46], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%103 = nn.relu(%102);
%104 = nn.conv2d(%103, meta[relay.Constant][47], |
padding=[1, 1, 1, 1], groups=32, channels=1024, kernel_size=[3, 3]);
%105 = nn.relu(%104);
%106 = nn.conv2d(%105, meta[relay.Constant][48], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]);
%107 = add(%106, %101);
%108 = nn.relu(%107);
%109 = nn.conv2d(%108, meta[relay.Constant][49], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%110 = nn.relu(%109);
%111 = nn.conv2d(%110, meta[relay.Constant][50], padding=[1, 1, 1, 1], groups=32, channels=1024, kernel_size=[3, 3]);
%112 = nn.relu(%111);
%113 = nn.conv2d(%112, meta[relay.Constant][51], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]);
%114 = add(%113, %108);
nn.relu(%114)
}
""",
"from_string",
None,
metatable,
)
return {
"name": "resnext50_32x4d",
"input_shapes": {"x": [1, 64, 56, 56]},
"input_dtypes": {"x": "float32"},
"mod": mod,
"params": None,
"main_dtype": "float32",
}
def resnext50_32x4d_16():
metatable = {"relay.Constant": resnext50_32x4d_consts("float16")}
mod = tvm.parser.parse(
"""
def @main(%x: Tensor[(1, 64, 56, 56), float16]) {
%0 = nn.conv2d(%x, meta[relay.Constant][0], padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]);
%1 = nn.relu(%0);
%2 = nn.conv2d(%1, meta[relay.Constant][1], padding=[1, 1, 1, 1], groups=32, channels=128, kernel_size=[3, 3]);
%3 = nn.relu(%2);
%4 = nn.conv2d(%3, meta[relay.Constant][2], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%5 = nn.conv2d(%x, meta[relay.Constant][3], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%6 = add(%4, %5);
%7 = nn.relu(%6);
%8 = nn.conv2d(%7, meta[relay.Constant][4], padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]);
%9 = nn.relu(%8);
%10 = nn.conv2d(%9, meta[relay.Constant][5], padding= |
[1, 1, 1, 1], groups=32, channels=128, kernel_size=[3, 3]);
%11 = nn.relu(%10);
%12 = nn.conv2d(%11, meta[relay.Constant][6], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%13 = add(%12, %7);
%14 = nn.relu(%13);
%15 = nn.conv2d(%14, meta[relay.Constant][7], padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]);
%16 = nn.relu(%15);
%17 = nn.conv2d(%16, meta[relay.Constant][8], padding=[1, 1, 1, 1], groups=32, channels=128, kernel_size=[3, 3]);
%18 = nn.relu(%17);
%19 = nn.conv2d(%18, meta[relay.Constant][9], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%20 = add(%19, %14);
%21 = nn.relu(%20);
%22 = nn.conv2d(%21, meta[relay.Constant][10], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%23 = nn.relu(%22);
%24 = nn.conv2d(%23, meta[relay.Constant][11], strides=[2, 2], padding=[1, 1, 1, 1], groups=32, channels=256, kernel_size=[3, 3]);
%25 = nn.relu(%24);
%26 = nn.conv2d(%25, meta[relay.Constant][12], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%27 = nn.conv2d(%21, meta[relay.Constant][13], strides=[2, 2], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%28 = add(%26, %27);
%29 = nn.relu(%28);
%30 = nn.conv2d(%29, meta[relay.Constant][14], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%31 = nn.relu(%30);
%32 = nn.conv2d(%31, meta[relay.Constant][15], padding=[1, 1, 1, 1], groups=32, channels=256, kernel_size=[3, 3]);
%33 = nn.relu(%32);
%34 = nn.conv2d(%33, meta[relay.Constant][16], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%35 = add(%34, %29);
%36 = nn.relu(%35);
%37 = nn.conv2d(%36, meta[relay.Constant][17], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%38 = nn.relu(%37);
%39 = nn.conv2d(%38, meta[relay.Constant][18], padding=[1, 1, 1, 1 |
], groups=32, channels=256, kernel_size=[3, 3]);
%40 = nn.relu(%39);
%41 = nn.conv2d(%40, meta[relay.Constant][19], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%42 = add(%41, %36);
%43 = nn.relu(%42);
%44 = nn.conv2d(%43, meta[relay.Constant][20], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%45 = nn.relu(%44);
%46 = nn.conv2d(%45, meta[relay.Constant][21], padding=[1, 1, 1, 1], groups=32, channels=256, kernel_size=[3, 3]);
%47 = nn.relu(%46);
%48 = nn.conv2d(%47, meta[relay.Constant][22], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%49 = add(%48, %43);
%50 = nn.relu(%49);
%51 = nn.conv2d(%50, meta[relay.Constant][23], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%52 = nn.relu(%51);
%53 = nn.conv2d(%52, meta[relay.Constant][24], strides=[2, 2], padding=[1, 1, 1, 1], groups=32, channels=512, kernel_size=[3, 3]);
%54 = nn.relu(%53);
%55 = nn.conv2d(%54, meta[relay.Constant][25], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%56 = nn.conv2d(%50, meta[relay.Constant][26], strides=[2, 2], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%57 = add(%55, %56);
%58 = nn.relu(%57);
%59 = nn.conv2d(%58, meta[relay.Constant][27], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%60 = nn.relu(%59);
%61 = nn.conv2d(%60, meta[relay.Constant][28], padding=[1, 1, 1, 1], groups=32, channels=512, kernel_size=[3, 3]);
%62 = nn.relu(%61);
%63 = nn.conv2d(%62, meta[relay.Constant][29], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%64 = add(%63, %58);
%65 = nn.relu(%64);
%66 = nn.conv2d(%65, meta[relay.Constant][30], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%67 = nn.relu(%66);
%68 = nn.conv2d(%67, meta[relay.Constant][31], padding=[1, 1, 1, 1], |
groups=32, channels=512, kernel_size=[3, 3]);
%69 = nn.relu(%68);
%70 = nn.conv2d(%69, meta[relay.Constant][32], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%71 = add(%70, %65);
%72 = nn.relu(%71);
%73 = nn.conv2d(%72, meta[relay.Constant][33], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%74 = nn.relu(%73);
%75 = nn.conv2d(%74, meta[relay.Constant][34], padding=[1, 1, 1, 1], groups=32, channels=512, kernel_size=[3, 3]);
%76 = nn.relu(%75);
%77 = nn.conv2d(%76, meta[relay.Constant][35], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%78 = add(%77, %72);
%79 = nn.relu(%78);
%80 = nn.conv2d(%79, meta[relay.Constant][36], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%81 = nn.relu(%80);
%82 = nn.conv2d(%81, meta[relay.Constant][37], padding=[1, 1, 1, 1], groups=32, channels=512, kernel_size=[3, 3]);
%83 = nn.relu(%82);
%84 = nn.conv2d(%83, meta[relay.Constant][38], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%85 = add(%84, %79);
%86 = nn.relu(%85);
%87 = nn.conv2d(%86, meta[relay.Constant][39], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%88 = nn.relu(%87);
%89 = nn.conv2d(%88, meta[relay.Constant][40], padding=[1, 1, 1, 1], groups=32, channels=512, kernel_size=[3, 3]);
%90 = nn.relu(%89);
%91 = nn.conv2d(%90, meta[relay.Constant][41], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%92 = add(%91, %86);
%93 = nn.relu(%92);
%94 = nn.conv2d(%93, meta[relay.Constant][42], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%95 = nn.relu(%94);
%96 = nn.conv2d(%95, meta[relay.Constant][43], strides=[2, 2], padding=[1, 1, 1, 1], groups=32, channels=1024, kernel_size=[3, 3]);
%97 = nn.relu(%96);
%98 = nn.conv2d(%97, meta[relay.Constant][44 |
], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]);
%99 = nn.conv2d(%93, meta[relay.Constant][45], strides=[2, 2], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]);
%100 = add(%98, %99);
%101 = nn.relu(%100);
%102 = nn.conv2d(%101, meta[relay.Constant][46], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%103 = nn.relu(%102);
%104 = nn.conv2d(%103, meta[relay.Constant][47], padding=[1, 1, 1, 1], groups=32, channels=1024, kernel_size=[3, 3]);
%105 = nn.relu(%104);
%106 = nn.conv2d(%105, meta[relay.Constant][48], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]);
%107 = add(%106, %101);
%108 = nn.relu(%107);
%109 = nn.conv2d(%108, meta[relay.Constant][49], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%110 = nn.relu(%109);
%111 = nn.conv2d(%110, meta[relay.Constant][50], padding=[1, 1, 1, 1], groups=32, channels=1024, kernel_size=[3, 3]);
%112 = nn.relu(%111);
%113 = nn.conv2d(%112, meta[relay.Constant][51], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]);
%114 = add(%113, %108);
nn.relu(%114)
}
""",
"from_string",
None,
metatable,
)
return {
"name": "resnext50_32x4d_16",
"input_shapes": {"x": [1, 64, 56, 56]},
"input_dtypes": {"x": "float16"},
"mod": mod,
"params": None,
"main_dtype": "float16",
}
def describe_onnx(name, filename):
"""Returns the description of the ONNX model at filename, which can be passed to from_onnx to actually load
the model. Note that ? (ie unknown) shape dimensions must be manually changed to concrete dimensions
which are consistent with the overall model."""
onnx_model = onnx.load(MODEL_PREFIX + filename)
input_shapes = {}
input_dtypes = {}
initializer_names = [n.name for n in onnx_model.graph.initializer]
for input_info in onnx_model.g |
raph.input:
if input_info.name not in initializer_names:
_, shape, dtype, _ = tvm.relay.frontend.onnx.get_info(input_info)
if dtype is None:
raise ValueError(f"Unknown dtype on input '{input_info.name}' is not supported.")
input_shapes.update({input_info.name: shape})
input_dtypes.update({input_info.name: dtype})
print(
f"{{'name': '{name}', 'filename': '{filename}', 'input_shapes': {input_shapes}, 'input_dtypes': {input_dtypes}, 'main_dtype': 'float32'}}"
)
def from_onnx(model):
logging.info("-------------------- BEGIN ONNX IMPORT --------------------")
filename = MODEL_PREFIX + model["filename"]
logging.info(f"Loading ONNX model from {filename}")
onnx_model = onnx.load(filename)
logging.info(f"Loaded model from {filename}")
mod, params = tvm.relay.frontend.from_onnx(
onnx_model, model["input_shapes"], freeze_params=True
)
mod = tvm.relay.transform.InferType()(mod)
logging.info("-------------------- END ONNX IMPORT --------------------")
logging.info(f"Imported model:\n{mod}")
logging.info(f"Params:\n{params}")
return {
"name": model["name"],
"input_shapes": model["input_shapes"],
"input_dtypes": model["input_dtypes"],
"mod": mod,
"params": params,
"main_dtype": model["main_dtype"],
}
def to_onnx(model):
logging.info("-------------------- BEGIN ONNX EXPORT --------------------")
short_filename = model["name"] + ".onnx"
filename = MODEL_PREFIX + short_filename
logging.info(f"Saving ONNX model to {filename}")
params = model["params"]
if params is None:
params = {}
tvm.contrib.target.onnx.to_onnx(model["mod"], params, model["name"], path=filename)
logging.info("-------------------- END ONNX EXPORT --------------------")
return {
"name": model["name"],
"filename": short_filename,
"input_shapes": model["input_shapes"],
"input_dtypes": mode |
l["input_dtypes"],
"main_dtype": model["main_dtype"],
} |
import tvm |
import logging |
import tvm.testing
logging.basicConfig(level=logging.INFO)
partition_for_testing = tvm._ffi.get_global_func("relay.collage.PartitionForTesting")
def print_with_indexes(mod):
mod = tvm.relay.transform.CapturePostDfsIndexInSpans()(mod)
print(mod)
def run(in_mod, expected_mod, max_outputs, allow_taps, compiler, map):
expected_mod = tvm.relay.transform.InferType()(expected_mod)
in_mod = tvm.relay.transform.InferType()(in_mod)
in_mod = tvm.relay.transform.CapturePostDfsIndexInSpans()(in_mod)
indexes = [i for l, iss in map.items() for i in iss]
labels = [l for l, iss in map.items() for i in iss]
actual_mod = partition_for_testing(max_outputs, allow_taps, compiler, indexes, labels)(in_mod)
if not tvm.ir.structural_equal(actual_mod, expected_mod, True):
print("Input module:")
print(in_mod)
print("Expected module:")
print(expected_mod)
print("Actual module:")
print(actual_mod)
tvm.ir.assert_structural_equal(actual_mod, expected_mod, map_free_vars=True)
def test_single_op():
def input():
return tvm.parser.fromtext(
"""
def @main(%a: Tensor[(5, 7), float32], %b: Tensor[(5, 7), float32],
%c: Tensor[(5, 7), float32], %d: Tensor[(5, 7), float32]) {
%0 = add(%a, %b);
%1 = add(%c, %d);
subtract(%0, %1)
}
"""
)
def expected():
return tvm.parser.fromtext(
"""
def @main(%a: Tensor[(5, 7), float32], %b: Tensor[(5, 7), float32],
%c: Tensor[(5, 7), float32], %d: Tensor[(5, 7), float32]) {
%0 = add(%a, %b);
%1 = (fn(%x, %y, Compiler="foo") { add(%x, %y) })(%c, %d);
subtract(%0, %1)
}
"""
)
run(input(), expected(), 1, False, "foo", {"": [7]})
def test_multi_output():
def input():
return tvm.parser.fromtext( |
"""
def @main(%a: Tensor[(5, 7), float32], %b: Tensor[(5, 7), float32],
%c: Tensor[(5, 7), float32], %d: Tensor[(5, 7), float32]) {
%0 = add(%a, %b);
%1 = add(%c, %d);
subtract(%0, %1)
}
"""
)
def expected():
return tvm.parser.fromtext(
"""
def @main(%a: Tensor[(5, 7), float32], %b: Tensor[(5, 7), float32],
%c: Tensor[(5, 7), float32], %d: Tensor[(5, 7), float32]) {
%0 = (fn(%w, %x, %y, %z, Compiler="foo") { (add(%y, %z), add(%w, %x)) })(%c, %d, %a, %b);
%1 = %0.0;
%2 = %0.1;
subtract(%1, %2)
}
"""
)
run(input(), input(), 1, False, "foo", {"": [6, 7]})
run(input(), expected(), 2, False, "foo", {"": [6, 7]})
def test_classic_conv2d_add_relu():
def input():
return tvm.parser.fromtext(
"""
def @main(%a: Tensor[(5, 3, 32, 32), float32], %b: Tensor[(2, 3, 5, 5), float32],
%c: Tensor[(5, 2, 28, 28), float32], %d: Tensor[(5, 2, 28, 28), float32]) {
%0 = nn.conv2d(%a, %b);
%1 = add(%0, %c);
%2 = nn.relu(%1);
subtract(%2, %d)
}
"""
)
def expected():
return tvm.parser.fromtext(
"""
def @main(%a: Tensor[(5, 3, 32, 32), float32], %b: Tensor[(2, 3, 5, 5), float32],
%c: Tensor[(5, 2, 28, 28), float32], %d: Tensor[(5, 2, 28, 28), float32]) {
%2 = (fn(%x, %y, %z, Compiler="foo") {
%0 = nn.conv2d(%x, %y);
%1 = add(%0, %z);
nn.relu(%1)
})(%a, %b, %c);
subtract(%2, %d)
}
"""
)
run(input(), expected(), 1, False, "foo", {"": [8, 9, 10]})
def test_diamond_single_output():
def |
input():
return tvm.parser.fromtext(
"""
def @main(%a: Tensor[(5, 3, 32, 32), float32], %b: Tensor[(2, 3, 5, 5), float32]) {
%0 = nn.conv2d(%a, %b, padding=[0, 0, 0, 0]);
%1 = nn.relu(%0);
%2 = nn.relu(%1);
%3 = nn.leaky_relu(%0, alpha=0f);
add(%2, %3)
}
"""
)
def expected():
return tvm.parser.fromtext(
"""
def @main(%a: Tensor[(5, 3, 32, 32), float32], %b: Tensor[(2, 3, 5, 5), float32]) {
(fn (%x: Tensor[(5, 3, 32, 32), float32], %y: Tensor[(2, 3, 5, 5), float32], Compiler="foo") {
%0 = nn.conv2d(%x, %y, padding=[0, 0, 0, 0]);
%1 = nn.relu(%0);
%2 = nn.relu(%1);
%3 = nn.leaky_relu(%0, alpha=0f);
add(%2, %3)
})(%a, %b)
}
"""
)
run(input(), expected(), 1, False, "foo", {"": [5, 6, 7, 9, 10]})
def test_diamond_multi_output():
def input():
return tvm.parser.fromtext(
"""
def @main(%a: Tensor[(5, 3, 32, 32), float32], %b: Tensor[(2, 3, 5, 5), float32]) {
%0 = nn.conv2d(%a, %b, padding=[0, 0, 0, 0]);
%1 = nn.relu(%0);
%2 = nn.relu(%1);
%3 = nn.leaky_relu(%0, alpha=0f);
add(%2, %3)
}
"""
)
def expected():
return tvm.parser.fromtext(
"""
def @main(%a: Tensor[(5, 3, 32, 32), float32], %b: Tensor[(2, 3, 5, 5), float32]) {
%4 = (fn (%x: Tensor[(5, 3, 32, 32), float32], %y: Tensor[(2, 3, 5, 5), float32], Compiler="foo") {
%0 = nn.conv2d(%x, %y, padding=[0, 0, 0, 0]);
%1 = nn.relu |
(%0);
%2 = nn.relu(%1);
%3 = nn.leaky_relu(%0, alpha=0f);
(%2, %3)
})(%a, %b);
%5 = %4.0;
%6 = %4.1;
add(%5, %6)
}
"""
)
run(input(), expected(), 2, False, "foo", {"": [5, 6, 7, 9]})
def test_with_tap():
def input():
return tvm.parser.fromtext(
"""
def @main(%a: Tensor[(5, 3, 32, 32), float32], %b: Tensor[(2, 3, 5, 5), float32]) {
%0 = nn.conv2d(%a, %b, padding=[0, 0, 0, 0]);
%1 = nn.relu(%0);
add(%1, %0)
}
"""
)
def expected():
return tvm.parser.fromtext(
"""
def @main(%a: Tensor[(5, 3, 32, 32), float32], %b: Tensor[(2, 3, 5, 5), float32]) {
%2 = (fn (%x, %y, Compiler="foo") {
%0 = nn.conv2d(%x, %y, padding=[0, 0, 0, 0]);
%1 = nn.relu(%0);
(%0, %1)
})(%a, %b);
%3 = %2.1;
%4 = %2.0;
add(%3, %4)
}
"""
)
run(input(), input(), 2, False, "foo", {"": [5, 6]})
run(input(), expected(), 2, True, "foo", {"": [5, 6]})
def test_no_cycles():
def input():
return tvm.parser.fromtext(
"""
def @main(%a: Tensor[(5, 7), float32], %b: Tensor[(5, 7), float32]) {
%0 = add(%a, %b);
%1 = add(%0, %b);
add(%1, %b)
}
"""
)
def expected():
return tvm.parser.fromtext(
"""
def @main(%a: Tensor[(5, 7), float32], %b: Tensor[(5, 7), float32]) {
(fn(%x, %y, Compiler="foo") {
%0 = add(%x, %y);
%1 = add(%0, %y);
add(%1, %y)
})(%a, %b)
}
"""
)
run(input(), input(), 2, |
False, "foo", {"": [3, 5]})
run(input(), expected(), 2, False, "foo", {"": [3, 4, 5]})
def test_labels_direct_connection():
def input():
return tvm.parser.fromtext(
"""
def @main(%a: Tensor[(5, 7), float32]) {
%0 = nn.relu(%a);
%1 = nn.relu(%0);
%2 = nn.relu(%1);
%3 = nn.relu(%1);
%4 = add(%2, %3);
%5 = nn.relu(%4);
%6 = nn.relu(%4);
%7 = add(%5, %6);
nn.relu(%7)
}
"""
)
def expected():
return tvm.parser.fromtext(
"""
def @main(%a: Tensor[(5, 7), float32]) {
(fn(%aa: Tensor[(5, 7), float32], Compiler="foo") {
%0 = nn.relu(%aa);
%4 = (fn(%y, Composite="a") {
%1 = nn.relu(%y);
%2 = nn.relu(%1);
%3 = nn.relu(%1);
add(%2, %3)
})(%0);
%7 = (fn(%z, Composite="b") {
%5 = nn.relu(%z);
%6 = nn.relu(%z);
add(%5, %6)
})(%4);
nn.relu(%7)
})(%a)
}
"""
)
run(input(), expected(), 1, False, "foo", {"": [3, 11], "a": [4, 5, 6, 7], "b": [8, 9, 10]})
def test_labels_nested_tap():
def input():
return tvm.parser.fromtext(
"""
def @main(%a: Tensor[(5, 7), float32]) {
%0 = nn.relu(%a);
%1 = nn.relu(%0);
%2 = nn.relu(%1);
%3 = nn.relu(%1);
%4 = add(%2, %3);
%5 = nn.relu(%4);
%6 = nn.relu(%4);
%7 = add(%5, %6);
add(%2, %7)
}
"""
)
def expected():
return tvm.parser.fromtext(
"""
def @main(%a: Tensor[(5, |
7), float32]) {
%0 = nn.relu(%a);
%9 = (fn(%x: Tensor[(5, 7), float32], Compiler="foo") {
%5 = (fn(%y, Composite="a") {
%1 = nn.relu(%y);
%2 = nn.relu(%1);
%3 = nn.relu(%1);
%4 = add(%2, %3);
(%2, %4)
})(%x);
%8 = (fn(%z, Composite="b") {
%6 = nn.relu(%z);
%7 = nn.relu(%z);
add(%6, %7)
})(%5.1);
(%5.0, %8)
})(%0);
add(%9.0, %9.1)
}
"""
)
run(input(), expected(), 2, True, "foo", {"a": [4, 5, 6, 7], "b": [8, 9, 10]})
if __name__ == "__main__":
tvm.testing.main() |
"""
Support level10 operator test cases.
""" |
import numpy as np |
import tvm
from tvm |
import relay
from tvm.relay.testing |
import run_infer_type |
import tvm.topi.testing |
import random |
import tvm.testing
executor_kind = tvm.testing.parameter("debug", "vm")
@tvm.testing.uses_gpu
def test_broadcast_to(executor_kind):
def verify_more_dynamic_broadcast_to(x_shape, out_shape):
rank = len(out_shape)
dtype = "float32"
shape_type = "int64"
reshape_shape = relay.Var("shape", relay.ty.TensorType((len(x_shape),), shape_type))
broadcast_shape = relay.Var("shape", relay.ty.TensorType((rank,), shape_type))
x = relay.Var("x", relay.ty.TensorType((np.prod(x_shape),), dtype))
r = relay.reshape(x, reshape_shape)
z = relay.broadcast_to(r, broadcast_shape)
func = relay.Function([x, reshape_shape, broadcast_shape], z)
x = np.random.uniform(size=np.prod(x_shape)).astype(dtype)
ref_res = np.broadcast_to(np.reshape(x, x_shape), out_shape)
for target, dev in tvm.testing.enabled_targets():
mod = tvm.ir.IRModule.from_expr(func)
op_res = relay.create_executor(
executor_kind, mod=mod, device=dev, target=target
).evaluate(func)(
x, np.array(x_shape).astype(shape_type), np.array(out_shape).astype(shape_type)
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
verify_more_dynamic_broadcast_to((4, 3), (3, 4, 3))
def verify_broadcast_to(x_shape, out_shape):
rank = len(out_shape)
dtype = "float32"
shape_type = "int64"
dyn_shape = relay.Var("shape", relay.ty.TensorType((rank,), shape_type))
x = relay.Var("x", relay.ty.TensorType(x_shape, dtype))
z = relay.broadcast_to(x, dyn_shape)
zz = run_infer_type(z)
assert zz.checked_type == relay.ty.TensorType((relay.Any(),) * rank, dtype)
func = relay.Function([x, dyn_shape], z)
x = np.random.uniform(size=x_shape).astype(dtype)
ref_res = np.broadcast_to(x, out_shape)
for target, dev in tvm.testing.enabled_targets():
mod = tvm.ir.IRModule.from_expr(func) |
op_res = relay.create_executor(
executor_kind, mod=mod, device=dev, target=target
).evaluate(func)(x, np.array(out_shape).astype(shape_type))
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
verify_broadcast_to((1,), (1, 1, 1))
verify_broadcast_to((1, 1), (4, 1, 1))
verify_broadcast_to((4, 1), (1, 4, 3))
@tvm.testing.uses_gpu
def test_dyn_broadcast_to(executor_kind):
dtype = "uint8"
rank = 3
shape_type = "int64"
dyn_shape = relay.Var("shape", relay.ty.TensorType((rank,), shape_type))
x_shape = (1,)
x = relay.Var("x", relay.ty.TensorType(x_shape, dtype))
z = relay.broadcast_to(x, dyn_shape)
zz = run_infer_type(z)
assert zz.checked_type == relay.ty.TensorType((relay.Any(),) * rank, dtype)
func = relay.Function([x, dyn_shape], z)
x = np.random.uniform(size=x_shape).astype(dtype)
dyn_shape = (1,) * rank
ref_res = np.broadcast_to(x, dyn_shape)
for target, dev in tvm.testing.enabled_targets():
mod = tvm.ir.IRModule.from_expr(func)
op_res = relay.create_executor(executor_kind, mod=mod, device=dev, target=target).evaluate(
func
)(x, np.array(dyn_shape).astype(shape_type))
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
@tvm.testing.uses_gpu
def test_dyn_one_hot(executor_kind):
def _get_oshape(indices_shape, depth, axis):
oshape = []
true_axis = len(indices_shape) if axis == -1 else axis
ndim = len(indices_shape) + 1
indices_index = 0
for i in range(0, ndim):
if i == true_axis:
oshape.append(depth)
else:
oshape.append(indices_shape[indices_index])
indices_index += 1
return oshape
def _verify(indices_shape, depth, on_value, off_value, axis, dtype):
indices = relay.var("indices", relay.TensorType(indices_shape, "int32"))
depth_var = relay.var("depth", relay.TensorType((), "int32"))
o |
n_value_const = relay.const(on_value)
off_value_const = relay.const(off_value)
out = relay.one_hot(indices, on_value_const, off_value_const, depth_var, axis, dtype)
func = relay.Function([indices, depth_var], out)
indices_np = np.random.randint(0, depth, size=indices_shape).astype("int32")
out_np = tvm.topi.testing.one_hot(indices_np, on_value, off_value, depth, axis, dtype)
for target, dev in tvm.testing.enabled_targets():
mod = tvm.ir.IRModule.from_expr(func)
out_relay = relay.create_executor(
executor_kind, mod=mod, device=dev, target=target
).evaluate()(indices_np, np.array(depth).astype("int32"))
tvm.testing.assert_allclose(out_relay.numpy(), out_np)
_verify((3,), 3, 1, 0, -1, "int32")
_verify((3,), 3, 1.0, 0.0, -1, "float32")
_verify((2, 2), 5, 2, -2, 0, "int32")
_verify((2, 2), 5, 0.5, -0.5, 1, "float32")
_verify((3, 2, 4, 5), 6, 1, 0, 1, "int32")
_verify((3, 2, 4, 5), 6, 1.0, 0.0, 0, "float32")
if __name__ == "__main__":
tvm.testing.main() |
""" Support level2 dynamic operator test cases.
""" |
import numpy as np |
import tvm
from tvm |
import relay
from tvm |
import te
from tvm.relay.testing |
import enabled_targets |
import random
from test_dynamic_op_level3 |
import verify_func |
import tvm.topi.testing
from tvm.relay.testing |
import run_infer_type
executor_kind = tvm.testing.parameter("debug", "vm")
@tvm.testing.uses_gpu
def test_dyn_upsampling_run(executor_kind):
def verify_upsampling(dshape, scale_h, scale_w, layout, method, align_corners=False):
if layout == "NCHW":
(n, c, h, w) = dshape
x_data = np.random.uniform(size=(n, c, h, w)).astype("float32")
elif layout == "NHWC":
(n, h, w, c) = dshape
x_data = np.random.uniform(size=(n, h, w, c)).astype("float32")
ref_res = tvm.topi.testing.resize2d_python(
x_data,
(scale_h, scale_w),
layout,
method[2:] if method[0:2] == "bi" else method,
"align_corners" if align_corners else "asymmetric",
)
x = relay.Var("x", relay.TensorType(dshape, "float32"))
scale_h_var = relay.var("scale_h", relay.TensorType((), "float32"))
scale_w_var = relay.var("scale_h", relay.TensorType((), "float32"))
z = relay.nn.upsampling(
x, scale_h_var, scale_w_var, method=method, layout=layout, align_corners=align_corners
)
zz = run_infer_type(z)
func = relay.Function([x, scale_h_var, scale_w_var], z)
for target, dev in tvm.testing.enabled_targets():
mod = tvm.ir.IRModule.from_expr(func)
op_res = relay.create_executor(
executor_kind, mod=mod, device=dev, target=target
).evaluate()(
x_data, np.array(scale_h).astype("float32"), np.array(scale_w).astype("float32")
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-4, atol=1e-6)
verify_upsampling((1, 16, 32, 32), 3, 2.0, "NCHW", "nearest_neighbor")
verify_upsampling((1, 16, 32, 32), 5, 2.0, "NCHW", "bilinear", True)
verify_upsampling((1, 16, 32, 32), 2.0, 6, "NHWC", "nearest_neighbor")
verify_upsampling((1, 16, 32, 32), 2.0, 2.0, "NHWC", "bilinear", True)
@tvm.testing.uses_gpu
def test_dyn_upsampling_infer_type_const():
n, c, |
h, w = te.size_var("n"), te.size_var("c"), te.size_var("h"), te.size_var("w")
data = relay.var("data", relay.TensorType((n, c, h, w), "int8"))
scale_w = relay.Var("scale_w", relay.TensorType((), "float32"))
z = relay.nn.upsampling(data, 2.0, scale_w)
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((n, c, relay.Any(), relay.Any()), "int8")
@tvm.testing.uses_gpu
def test_dyn_upsampling3d_run(executor_kind):
def verify_upsampling3d(
dshape, scale_d, scale_h, scale_w, layout, method, coord_trans="asymmetric"
):
if layout == "NCDHW":
(n, c, d, h, w) = dshape
x_data = np.random.uniform(size=(n, c, d, h, w)).astype("float32")
elif layout == "NDHWC":
(n, d, h, w, c) = dshape
x_data = np.random.uniform(size=(n, d, h, w, c)).astype("float32")
ref_res = tvm.topi.testing.resize3d_python(
x_data,
(scale_d, scale_h, scale_w),
layout,
method[3:] if method[0:3] == "tri" else method,
coord_trans,
)
x = relay.Var("x", relay.TensorType(dshape, "float32"))
scale_d_var = relay.var("scale_d", relay.TensorType((), "float32"))
scale_h_var = relay.var("scale_h", relay.TensorType((), "float32"))
scale_w_var = relay.var("scale_h", relay.TensorType((), "float32"))
z = relay.nn.upsampling3d(
x,
scale_d_var,
scale_h_var,
scale_w_var,
method=method,
layout=layout,
coordinate_transformation_mode=coord_trans,
)
zz = run_infer_type(z)
func = relay.Function([x, scale_d_var, scale_h_var, scale_w_var], z)
for target, dev in enabled_targets():
mod = tvm.ir.IRModule.from_expr(func)
op_res = relay.create_executor(
executor_kind, mod=mod, device=dev, target=target
).evaluate()(
x_data,
np.array(scale_d).astype("float |
32"),
np.array(scale_h).astype("float32"),
np.array(scale_w).astype("float32"),
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-4, atol=1e-6)
verify_upsampling3d((1, 1, 1, 1, 1), 2, 3, 4, "NCDHW", "nearest_neighbor")
verify_upsampling3d((1, 8, 16, 16, 16), 2.0, 3.0, 4.0, "NCDHW", "nearest_neighbor")
verify_upsampling3d((1, 8, 16, 16, 16), 2.0, 5.0, 1.0, "NCDHW", "trilinear", "align_corners")
verify_upsampling3d((1, 20, 3, 4, 16), 2.0, 2.0, 2.0, "NDHWC", "nearest_neighbor")
verify_upsampling3d((1, 8, 4, 16, 15), 2.0, 2.0, 2.0, "NDHWC", "trilinear", "align_corners")
def test_dyn_upsampling3d_infer_type_const():
n, c, d, h, w = (
te.size_var("n"),
te.size_var("c"),
te.size_var("d"),
te.size_var("h"),
te.size_var("w"),
)
data = relay.var("data", relay.TensorType((n, c, d, h, w), "int8"))
scale_d = relay.Var("scale_h", relay.TensorType((), "float32"))
scale_w = relay.Var("scale_w", relay.TensorType((), "float32"))
z = relay.nn.upsampling3d(data, scale_d, 2.0, scale_w, layout="NCDHW", method="trilinear")
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType(
(n, c, relay.Any(), relay.Any(), relay.Any()), "int8"
)
@tvm.testing.uses_gpu
def test_dyn_pad(executor_kind):
def verify_pad(dshape, pad_width, pad_val, dtype):
x = relay.var("x", relay.TensorType(dshape, dtype))
ndim = len(dshape)
pad_width_var = relay.var("pad_width_var", relay.TensorType((ndim, 2), "int64"))
pad_val_var = relay.var("pad_val_var", relay.TensorType((), dtype))
y = relay.nn.pad(x, pad_width_var, pad_val_var)
yy = run_infer_type(y)
assert yy.checked_type == relay.ty.TensorType((relay.Any(),) * ndim, dtype)
func = relay.Function([x, pad_width_var, pad_val_var], y)
data = np.random.uniform(size=dshape).astype(dtype)
ref_res = np.pad(data, pad_width, "constant", constant_value |
s=(((pad_val,) * 2),) * ndim)
pad_width = np.array(pad_width).astype("int64")
verify_func(
executor_kind, func, [data, pad_width, np.array(pad_val).astype(dtype)], ref_res
)
def verify_pad_default_fill(dshape, pad_width, dtype):
x = relay.var("x", relay.TensorType(dshape, dtype))
ndim = len(dshape)
pad_width_var = relay.var("pad_width_var", relay.TensorType((ndim, 2), "int64"))
y = relay.nn.pad(x, pad_width_var)
yy = run_infer_type(y)
assert yy.checked_type == relay.ty.TensorType((relay.Any(),) * ndim, dtype)
func = relay.Function([x, pad_width_var], y)
data = np.random.uniform(size=dshape).astype(dtype)
ref_res = np.pad(data, pad_width)
pad_width = np.array(pad_width).astype("int64")
verify_func(executor_kind, func, [data, pad_width], ref_res)
verify_pad((4, 10, 7, 7), ((1, 1), (2, 2), (3, 3), (4, 4)), 2.0, "int32")
verify_pad((2, 7), ((1, 4), (2, 2)), 4.0, "float64")
verify_pad_default_fill((4, 10, 7, 7), ((1, 1), (2, 2), (3, 3), (4, 4)), "float64")
verify_pad_default_fill((2, 7), ((1, 4), (2, 2)), "int32")
if __name__ == "__main__":
tvm.testing.main() |
""" Support level3 operator test cases.
""" |
import numpy as np |
import pytest |
import tvm |
import tvm.testing
from tvm |
import relay, te
from tvm.relay.testing |
import check_grad, run_infer_type
executor_kind = tvm.testing.parameter("debug", "vm")
def verify_func(executor_kind, func, data, ref_res, target_device=tvm.testing.enabled_targets()):
assert isinstance(data, list)
for target, dev in target_device:
mod = tvm.ir.IRModule.from_expr(func)
op_res = relay.create_executor(
executor_kind, mod=mod, device=dev, target=target
).evaluate()(*data)
if isinstance(op_res, tvm.runtime.container.ADT):
assert len(op_res) == len(
ref_res
), "Outputs from TVM and Python implementation must be equal "
for op_result, ref_result in zip(op_res, ref_res):
tvm.testing.assert_allclose(op_result.numpy(), ref_result, rtol=1e-5)
else:
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
relay.backend.te_compiler.get().clear()
def check_on_vm(target, dev, args, expected_result, mod):
"""
Check that evaluating `expr` applied to the arguments produces
`result` on Relay VM.
"""
rts_result = relay.create_executor("vm", device=dev, target=target, mod=mod).evaluate()(*args)
tvm.testing.assert_allclose(expected_result, rts_result.numpy())
@tvm.testing.uses_gpu
def test_dyn_reshape(executor_kind):
def verify_reshape(shape, newshape, oshape):
x = relay.var("x", relay.TensorType(shape, "float32"))
y = relay.var("y", relay.TensorType((len(newshape),), "int64"))
z = relay.reshape(x, y)
func = relay.Function([x, y], z)
x_data = np.random.uniform(low=-1, high=1, size=shape).astype("float32")
x_data = np.ones(shape).astype("float32")
ref_res = np.reshape(x_data, oshape)
check_grad(
run_infer_type(func),
inputs=[x_data, np.array(newshape).astype("int64")],
test_inputs=[x_data],
eps=1e-3,
)
verify_func(executor_kind, func, [x_data, np.array(newshape).astype("int64")], ref_res)
verify_resh |
ape((2, 3, 4), (8, 3), (8, 3))
verify_reshape((4, 7), (2, 7, 2), (2, 7, 2))
verify_reshape((2, 3, 4), (4, 0, 2), (4, 3, 2))
verify_reshape((2, 3, 4), (2, 0, 0), (2, 3, 4))
verify_reshape((2, 3, 4), (0, -1), (2, 12))
verify_reshape((2, 3, 4), (-1, 0), (8, 3))
verify_reshape((2, 3, 4), (-3, 4), (6, 4))
verify_reshape((2, 3, 4, 5), (-3, -3), (6, 20))
verify_reshape((2, 3, 4), (0, -3), (2, 12))
@tvm.testing.uses_gpu
def test_dyn_shape_reshape(executor_kind):
def verify_reshape(shape, newshape, oshape):
x = relay.var("x", relay.TensorType(shape, "float32"))
y = relay.var("y", relay.TensorType(newshape, "float32"))
z = relay.reshape(x, relay.shape_of(y))
func = relay.Function([x, y], z)
x_data = np.random.uniform(low=-1, high=1, size=shape).astype("float32")
y_data = np.random.uniform(low=-1, high=1, size=newshape).astype("float32")
ref_res = np.reshape(x_data, oshape)
check_grad(run_infer_type(func), inputs=[x_data, y_data], eps=1e-3)
verify_func(executor_kind, func, [x_data, y_data], ref_res)
verify_reshape((2, 3, 4), (8, 3), (8, 3))
verify_reshape((4, 7), (2, 7, 2), (2, 7, 2))
def test_squeeze(executor_kind):
def verify_squeeze(shape, dtype, axis):
x = relay.var("x", relay.TensorType(shape, dtype))
assert axis is not None
np_axis = tuple(axis)
axis = relay.var("axis", relay.TensorType([len(axis)], "int64"))
squeeze = relay.squeeze(x, axis=axis)
func = relay.Function([x, axis], squeeze)
x_data = np.random.random_sample(shape).astype(dtype)
ref_res = np.squeeze(x_data, axis=np_axis)
verify_func(executor_kind, func, [x_data, np.array(np_axis).astype("int64")], ref_res)
verify_squeeze((1, 3, 1), "float32", [0])
verify_squeeze((1, 2, 1, 2, 1), "float32", [0, 2])
@tvm.testing.uses_gpu
def test_dyn_expand_dims(executor_kind):
def verify_expand_dims(
dshape, dtype, oshape, axis, num_newaxis, target_device=t |
vm.testing.enabled_targets()
):
x = relay.Var("x", relay.TensorType(dshape, dtype))
y = relay.var("axis", shape=[], dtype="int64")
z = relay.expand_dims(x, axis=y, num_newaxis=num_newaxis)
func = relay.Function([x, y], z)
data_np = np.random.uniform(size=dshape).astype(dtype)
axis_np = np.array(axis).astype("int64")
ref_res = data_np.reshape(oshape)
verify_func(executor_kind, func, [data_np, axis_np], ref_res, target_device=target_device)
for dtype in ["float16", "float32"]:
verify_expand_dims((2, 2), dtype, (2, 2, 1), 2, 1)
verify_expand_dims((2, 2), dtype, (2, 1, 2), 1, 1)
verify_expand_dims((2, 2), dtype, (1, 2, 2), 0, 1)
llvm_target_only = [x for x in tvm.testing.enabled_targets() if "llvm" in x]
verify_expand_dims((2, 2), dtype, (2, 2, 1, 1), 2, 2, target_device=llvm_target_only)
verify_expand_dims((2, 2), dtype, (2, 1, 1, 1, 2), 1, 3, target_device=llvm_target_only)
verify_expand_dims((2, 2), dtype, (1, 1, 1, 1, 2, 2), 0, 4, target_device=llvm_target_only)
@tvm.testing.uses_gpu
def test_dyn_tile(executor_kind):
def verify_tile(dshape, reps):
x = relay.var("x", relay.TensorType(dshape, "float32"))
r = relay.var("reps", relay.TensorType((len(reps),), "float32"))
z = relay.tile(x, r)
func = relay.Function([x, r], z)
x_data = np.random.uniform(low=-1, high=1, size=dshape).astype("float32")
ref_res = np.tile(x_data, reps=reps)
reps_data = np.array(reps).astype("float32")
verify_func(executor_kind, func, [x_data, np.array(reps).astype("float32")], ref_res)
verify_tile((2, 3, 4), (3, 2, 1))
verify_tile((2, 3, 4), (1, 2))
verify_tile((2, 3), (3, 2, 1))
@tvm.testing.uses_gpu
def test_dyn_zeros_ones(executor_kind):
def verify_zeros_ones(shape, dtype):
for op, ref in [(relay.zeros, np.zeros), (relay.ones, np.ones)]:
rank = len(shape)
dyn_shape |
= relay.Var("shape", relay.ty.TensorType((rank,), "int64"))
y = op(dyn_shape, dtype)
yy = run_infer_type(y)
assert yy.checked_type == relay.ty.TensorType((relay.Any(),) * rank, dtype)
func = relay.Function([dyn_shape], y)
ref_res = ref(shape, dtype)
verify_func(
executor_kind, func, [np.array(shape).astype("int64")], ref_res.astype("int64")
)
verify_zeros_ones((1, 3), "int64")
verify_zeros_ones((8, 9, 1, 2), "float32")
@tvm.testing.uses_gpu
def test_dyn_full(executor_kind):
def verify_full(fill_value, src_shape, dtype):
x = relay.var("x", relay.scalar_type(dtype))
rank = len(src_shape)
dyn_src_shape = relay.var("dyn_scr_shape", relay.ty.TensorType((rank,), "int64"))
z = relay.full(x, dyn_src_shape, dtype)
func = relay.Function([x, dyn_src_shape], z)
ref_res = np.full(src_shape, fill_value).astype(dtype)
verify_func(
executor_kind,
func,
[np.array(fill_value).astype(dtype), np.array(src_shape).astype("int64")],
ref_res,
)
verify_full(4, (1, 3, 4, 4), "int32")
verify_full(4, (1, 3, 4, 4), "int64")
verify_full(4.0, (2, 50), "float32")
@tvm.testing.uses_gpu
def test_dyn_sparse_to_dense(executor_kind):
def verify_sparse_to_dense(sparse_indices, sparse_values, default_value, output_shape, xpected):
sparse_indices_data = np.array(sparse_indices)
sparse_values_data = np.array(sparse_values)
default_value_data = np.array(default_value)
output_shape_data = np.array(output_shape)
a = relay.var(
"a", relay.TensorType(sparse_indices_data.shape, str(sparse_indices_data.dtype))
)
b = relay.var(
"b", relay.TensorType(sparse_values_data.shape, str(sparse_values_data.dtype))
)
output_shape_var = relay.var(
"output_shape", relay.TensorType(output_shape_data.shape, str(output_shape |
_data.dtype))
)
if default_value is None:
args = [a, b, output_shape_var]
d = relay.sparse_to_dense(a, output_shape_var, b)
else:
c = relay.var(
"c", relay.TensorType(default_value_data.shape, str(default_value_data.dtype))
)
args = [a, b, c, output_shape_var]
d = relay.sparse_to_dense(a, output_shape_var, b, c)
zz = run_infer_type(d)
assert len(zz.checked_type.shape) == len(output_shape)
func = relay.Function(args, d)
if default_value is None:
arguments = [sparse_indices_data, sparse_values_data, output_shape_data]
else:
arguments = [
sparse_indices_data,
sparse_values_data,
default_value_data,
output_shape_data,
]
verify_func(executor_kind, func, arguments, xpected)
verify_sparse_to_dense(1, 3, 0, [5], [0, 3, 0, 0, 0])
verify_sparse_to_dense([0, 1, 4], [3, 3, 3], 0, [5], [3, 3, 0, 0, 3])
verify_sparse_to_dense(
[[0, 0], [1, 2]], [1, 2], 0, [3, 4], [[1, 0, 0, 0], [0, 0, 2, 0], [0, 0, 0, 0]]
)
verify_sparse_to_dense(
[[0, 0, 0], [1, 2, 3]],
[1, 2],
4,
[2, 3, 4],
[[[1, 4, 4, 4], [4, 4, 4, 4], [4, 4, 4, 4]], [[4, 4, 4, 4], [4, 4, 4, 4], [4, 4, 4, 2]]],
)
verify_sparse_to_dense(
[0, 1, 4], [3.1, 3.1, 3.1], 3.5, [5], [3.1, 3.1, 3.5, 3.5, 3.1]
)
verify_sparse_to_dense(1, 3, None, [5], [0, 3, 0, 0, 0])
@pytest.mark.parametrize(
"sparse_indices, sparse_values, dense_shape, default_value",
[
(
np.array([[0, 1], [0, 3], [2, 0], [3, 1]], dtype=np.int64),
np.array([1, 2, 3, 4], dtype=np.int64),
np.array([5, 6], dtype=np.int64),
np.array([10], dtype=np.int64),
),
(
np.array([[1, 1, 1], [1, 3, 1], [2, 0, 5], [3, 1, 6]], dtype=np.int64),
np.array([1, 2, 3, 4 |
], dtype=np.int64),
np.array([7, 7, 7], dtype=np.int64),
np.array([5], dtype=np.int64),
),
(
np.array([[1], [2]], dtype=np.int64),
np.array([7, 8], dtype=np.int64),
np.array([5], dtype=np.int64),
np.array([4], dtype=np.int64),
),
(
np.ones((0, 1), dtype=np.int64),
np.array([], dtype=np.int64),
np.array([5], dtype=np.int64),
np.array([4], dtype=np.int64),
),
(
np.ones((0, 3), dtype=np.int64),
np.array([], dtype=np.int64),
np.array([9, 3, 7], dtype=np.int64),
np.array([100], dtype=np.int64),
),
],
)
@pytest.mark.parametrize("dtype", [np.int64, np.int32])
@pytest.mark.parametrize("use_dyn", [True, False])
def test_sparse_fill_empty_rows(
sparse_indices, sparse_values, dense_shape, default_value, dtype, use_dyn, executor_kind
):
def ref_sparse_fill_empty_rows(
sparse_indices: np.ndarray,
sparse_values: np.ndarray,
dense_shape: np.ndarray,
default_value: np.ndarray,
) -> None:
"""
This function calculates the expected output of sparse_fill_empty_rows operator given the
inputs.
"""
def check_add_rows(current_idx, limit_idx):
while current_idx < limit_idx:
new_sparse_indices.append([current_idx] + [0] * (num_cols - 1))
new_sparse_values.append(default_value[0])
empty_row_indicator[current_idx] = True
current_idx += 1
return current_idx
current_idx = 0
new_sparse_indices = []
new_sparse_values = []
empty_row_indicator = [False for _ in range(dense_shape[0])]
num_cols = sparse_indices.shape[1]
for sparse_row, sparse_value in zip(sparse_indices, sparse_values):
limit_idx = sparse_row[0]
current_idx = check_add_rows(current_idx, limit_idx)
new_sp |
arse_indices.append(list(sparse_row))
new_sparse_values.append(sparse_value)
current_idx = limit_idx + 1
check_add_rows(current_idx, dense_shape[0])
return new_sparse_indices, new_sparse_values, empty_row_indicator
def verify_sparse_fill_empty_rows(
sparse_indices_np: np.ndarray,
sparse_values_np: np.ndarray,
dense_shape_np: np.ndarray,
default_value_np: np.ndarray,
) -> None:
"""
This function verifies the relay output of sparse_fill_empty_rows with its expected output.
"""
if use_dyn:
sparse_indices = relay.var(
"sparse_indices",
shape=[relay.Any(), relay.Any()],
dtype=str(sparse_indices_np.dtype),
)
sparse_values = relay.var(
"sparse_values",
shape=[relay.Any()],
dtype=str(sparse_values_np.dtype),
)
dense_shape = relay.var(
"dense_shape",
shape=[relay.Any()],
dtype=str(dense_shape_np.dtype),
)
default_value = relay.var(
"default_value",
shape=[relay.Any()],
dtype=str(default_value_np.dtype),
)
else:
sparse_indices = relay.var(
"sparse_indices",
relay.TensorType(sparse_indices_np.shape, str(sparse_indices_np.dtype)),
)
sparse_values = relay.var(
"sparse_values",
relay.TensorType(sparse_values_np.shape, str(sparse_values_np.dtype)),
)
dense_shape = relay.var(
"dense_shape",
relay.TensorType(dense_shape_np.shape, str(dense_shape_np.dtype)),
)
default_value = relay.var(
"default_value",
relay.TensorType(default_value_np.shape, str(default_value_np.dtype)),
)
z = relay.sparse_fill_empt |
y_rows(sparse_indices, sparse_values, dense_shape, default_value)
func = relay.Function([sparse_indices, sparse_values, dense_shape, default_value], z)
ref_res = ref_sparse_fill_empty_rows(
sparse_indices_np,
sparse_values_np,
dense_shape_np,
default_value_np,
)
(
new_sparse_indices_infer_type,
new_sparse_values_infer_type,
empty_row_indicator_infer_type,
) = run_infer_type(z)
assert new_sparse_indices_infer_type.checked_type.dtype == sparse_indices_np.dtype
assert new_sparse_values_infer_type.checked_type.dtype == sparse_indices_np.dtype
assert empty_row_indicator_infer_type.checked_type.dtype == "bool"
verify_func(
executor_kind,
func,
[sparse_indices_np, sparse_values_np, dense_shape_np, default_value_np],
ref_res,
[("llvm", tvm.cpu())],
)
verify_sparse_fill_empty_rows(
sparse_indices.astype(dtype),
sparse_values.astype(dtype),
dense_shape.astype(dtype),
default_value.astype(dtype),
)
def test_dyn_copy():
target = tvm.target.Target("llvm")
dev = tvm.cpu()
mod = tvm.parser.fromtext(
"""
def @main(%x: Tensor[(?, 3), int64]) -> Tensor[(?, 3), int64] {
copy(%x)
}
"""
)
x_data = np.random.rand(15, 3).astype("int64")
expected = x_data
check_on_vm(target, dev, [x_data], expected, mod)
def test_dyn_copy_scalar():
target = tvm.target.Target("llvm")
dev = tvm.cpu()
mod = tvm.parser.fromtext(
"""
def @main(%x: int32, %y: Tensor[(?), int32]) -> Tensor[(?), int32] {
%0 = copy(%x);
%1 = expand_dims(%0, axis=0);
%2 = (%y, %1);
concatenate(%2)
}
"""
)
x_data = 3
y_data = np.random.rand(7).astype("int32")
expected = np.concatenate((y_data, np.expand_dims(x_data, axis=0))) |
check_on_vm(target, dev, [x_data, y_data], expected, mod)
def test_dyn_cast():
target = tvm.target.Target("llvm")
dev = tvm.cpu()
mod = tvm.parser.fromtext(
"""
def @main(%x: Tensor[(?, 3), int64]) -> Tensor[(?, 3), int32] {
cast(%x, dtype="int32")
}
"""
)
x_data = np.random.rand(15, 3).astype("int64")
expected = x_data.astype("int32")
check_on_vm(target, dev, [x_data], expected, mod)
if __name__ == "__main__":
tvm.testing.main() |
import tvm
from tvm |
import te |
import numpy as np
from tvm |
import relay
from tvm.relay |
import transform
from tvm.relay.testing |
import run_infer_type |
import tvm.topi.testing
@tvm.testing.uses_gpu
def test_dynamic_strided_slice():
def verify(dshape, begin, end, strides, slice_mode="end", test_ref=True, dtype="int32"):
x = relay.var("x", relay.TensorType(dshape, "float32"))
ndim = len(dshape)
slice_dim = len(begin)
begin = begin if begin else [0] * ndim
end = end if end else list(dshape)[:slice_dim]
if strides:
if len(strides) == 1:
strides = strides * slice_dim
else:
strides = [1] * slice_dim
num_static_axes = len(dshape) - len(begin)
x_data = np.random.uniform(size=dshape).astype("float32")
ref_res = tvm.topi.testing.strided_slice_python(x_data, begin, end, strides, slice_mode)
data = [x_data, np.array(begin, dtype=dtype), np.array(end, dtype=dtype)]
begin = relay.var("begin", shape=[len(begin)], dtype=dtype)
end = relay.var("end", shape=[len(end)], dtype=dtype)
inputs = [x, begin, end]
if strides:
data.append(np.array(strides, dtype=dtype))
strides = relay.var("strides", shape=[len(strides)], dtype=dtype)
inputs.append(strides)
z = relay.strided_slice(x, begin=begin, end=end, strides=strides, slice_mode=slice_mode)
else:
z = relay.strided_slice(x, begin=begin, end=end, slice_mode=slice_mode)
func = relay.Function(inputs, z)
func = run_infer_type(func)
if num_static_axes > 0:
oshape = run_infer_type(z).checked_type.shape
assert tuple(oshape[-num_static_axes:]) == dshape[-num_static_axes:]
if not test_ref:
return
for target, dev in tvm.testing.enabled_targets():
mod = tvm.ir.IRModule.from_expr(func)
op_res = relay.create_executor("vm", mod=mod, device=dev, target=target).evaluate()(
*data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res)
verify(
(1, 224, 224, 3) |
,
[0, 20, 20, 0],
[1, 140, 140, 3],
[1, 1, 1, 1],
dtype="int64",
)
verify((3, 4, 3), [1, 1, 0], [4, 4, 3], [2, 1, 1], dtype="int16")
verify((3, 4, 3), [0, 0, 0], [4, -5, 4], [1, -1, 2])
verify((3, 4, 3), [1, 1, 0], [4, 4, 3], None)
verify((3, 4, 3), [1, 1, 0], [4, 1000, 3], None)
verify((3, 4, 3), [1, 1, 0], [4, 4, 4], None)
verify((3, 4, 3), [1, 1, 0], [4, 4, 3], None)
verify((3, 4, 3), [1, -1, 0], [4, -5, 3], [2, -1, 1])
verify((3, 4, 3), [1, -1, 0], [2, -3, 3], [1, -1, 1])
verify((20, 10, 5), [20, 10, 4], [0, 0, 1], [-1, -3, -2])
verify((3, 4, 3), [1, 0, 0], [3, -1, 3], [1, 1, 1], slice_mode="size", test_ref=False)
verify((3, 4, 3), [1, 0, 0], [-1, 2, 3], [1, 1, 1], slice_mode="size", test_ref=True)
verify((3, 4, 3), [0], [2], None)
verify((3, 4, 3), [1], [4], [2])
verify((3, 4, 3), [1, 0], [4, 2], [2, 1])
if __name__ == "__main__":
test_dynamic_strided_slice() |
""" Support level5 operator test cases.
""" |
import math |
import numpy as np |
import tvm
from tvm |
import te
from tvm |
import relay
from tvm.relay |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.