--- library_name: keras tags: - keras pipeline_tag: image-to-text --- ``` API_URL = "https://api-inference.huggingface.co/models/CIS-5190-CIA/Ensamble" from huggingface_hub import InferenceClient client = InferenceClient(     "CIS-5190-CIA/Ensamble",     token="TOKEN HERE", ) ``` ## How to Run In the notebook Run_ensamble.ipynb, replace the line: ```python dataset_test = load_dataset("gydou/released_img") ``` with the proper location of the testing dataset. ## Training Dataset Statistics ```python lat_mean = 39.95173281562989 lat_std = 0.0006925131397316982 lon_mean = -75.19143805846498 lon_std = 0.0006552266653111098 ``` ## Helper Functions to Predict from & Evaluate Ensamble These functions will allow you to use the ensamble to predict and evaluate the model They use the following paramaters: - models: this is a dictionary of the models, in the format of: ``` models = { "RNNModel1": CNNModel1(num_outputs=2).to(device), "RNNModel2": CNNModel2(num_outputs=2).to(device), "RNNModel3": CNNModel3(num_outputs=2).to(device), } ``` - dataloader: this is the data loader provided to us for the project - lat_mean, lon_mean, lat_std, lon_std ``` def ensemble_predict(models, dataloader, lat_mean, lon_mean, lat_std, lon_std): model_outputs = [] for model_name, model in models.items(): model.eval() outputs = [] with torch.no_grad(): for images, _ in dataloader: images = images.to(device) outputs.append(model(images)) model_outputs.append(torch.cat(outputs, dim=0)) # average the predictions across all models ensemble_output = torch.stack(model_outputs, dim=0).mean(dim=0) # denormalize the ensemble predictions ensemble_output_denorm = ensemble_output.cpu().numpy() * np.array([lat_std, lon_std]) + np.array([lat_mean, lon_mean]) return ensemble_output_denorm # evaluate Ensemble with Geodesic Distance def evaluate_ensemble(models, dataloader, lat_mean, lon_mean, lat_std, lon_std): ensemble_outputs = ensemble_predict(models, dataloader, lat_mean, lon_mean, lat_std, lon_std) all_targets = [] for _, targets in dataloader: all_targets.append(targets) all_targets = torch.cat(all_targets, dim=0).cpu().numpy() all_targets_denorm = all_targets * np.array([lat_std, lon_std]) + np.array([lat_mean, lon_mean]) total_samples = all_targets_denorm.shape[0] ensemble_loss = 0.0 # compute Geodesic Distance Metrics for pred, actual in zip(ensemble_outputs, all_targets_denorm): distance = geodesic((actual[0], actual[1]), (pred[0], pred[1])).meters ensemble_loss += distance ** 2 ensemble_loss /= total_samples ensemble_rmse = np.sqrt(ensemble_loss) return ensemble_loss, ensemble_rmse ``` # Our Custom Models for the ensamble We used the following 3 model architectures and then created the ensamble to create an output ## Model 1: ``` class CNNModel1(nn.Module): def __init__(self, num_outputs=2): super(CNNModel1, self).__init__() self.features = nn.Sequential( nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2), nn.BatchNorm2d(64), nn.Conv2d(64, 192, kernel_size=5, padding=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2), nn.BatchNorm2d(192), nn.Conv2d(192, 384, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(384, 256, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2) ) self.classifier = nn.Sequential( nn.Dropout(), nn.Linear(256 * 6 * 6, 4096), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(inplace=True), nn.Linear(4096, num_outputs) ) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.classifier(x) return x ``` ## Model 2: ``` class ResidualBlock(nn.Module): def __init__(self, in_channels, out_channels, stride=1, downsample=None): super(ResidualBlock, self).__init__() self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1) self.bn1 = nn.BatchNorm2d(out_channels) self.relu = nn.ReLU(inplace=True) self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1) self.bn2 = nn.BatchNorm2d(out_channels) self.downsample = downsample def forward(self, x): identity = x if self.downsample: identity = self.downsample(x) out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out += identity out = self.relu(out) return out class CNNModel2(nn.Module): def __init__(self, num_outputs=2): super(CNNModel2, self).__init__() self.in_channels = 64 self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(64, 2, stride=1) self.layer2 = self._make_layer(128, 2, stride=2) self.layer3 = self._make_layer(256, 2, stride=2) self.layer4 = self._make_layer(512, 2, stride=2) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.fc = nn.Linear(512, num_outputs) def _make_layer(self, out_channels, blocks, stride): downsample = None if stride != 1 or self.in_channels != out_channels: downsample = nn.Sequential( nn.Conv2d(self.in_channels, out_channels, kernel_size=1, stride=stride), nn.BatchNorm2d(out_channels) ) layers = [] layers.append(ResidualBlock(self.in_channels, out_channels, stride, downsample)) self.in_channels = out_channels for _ in range(1, blocks): layers.append(ResidualBlock(out_channels, out_channels)) return nn.Sequential(*layers) def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.avgpool(x) x = x.view(x.size(0), -1) x = self.fc(x) return x ``` ## Model 3: ``` class InceptionModule(nn.Module): def __init__(self, in_channels, ch1x1, ch3x3_reduce, ch3x3, ch5x5_reduce, ch5x5, pool_proj): super(InceptionModule, self).__init__() self.branch1 = nn.Sequential( nn.Conv2d(in_channels, ch1x1, kernel_size=1), nn.ReLU(inplace=True) ) self.branch2 = nn.Sequential( nn.Conv2d(in_channels, ch3x3_reduce, kernel_size=1), nn.ReLU(inplace=True), nn.Conv2d(ch3x3_reduce, ch3x3, kernel_size=3, padding=1), nn.ReLU(inplace=True) ) self.branch3 = nn.Sequential( nn.Conv2d(in_channels, ch5x5_reduce, kernel_size=1), nn.ReLU(inplace=True), nn.Conv2d(ch5x5_reduce, ch5x5, kernel_size=5, padding=2), nn.ReLU(inplace=True) ) self.branch4 = nn.Sequential( nn.MaxPool2d(kernel_size=3, stride=1, padding=1), nn.Conv2d(in_channels, pool_proj, kernel_size=1), nn.ReLU(inplace=True) ) def forward(self, x): branch1 = self.branch1(x) branch2 = self.branch2(x) branch3 = self.branch3(x) branch4 = self.branch4(x) outputs = torch.cat([branch1, branch2, branch3, branch4], 1) return outputs class CNNModel3(nn.Module): def __init__(self, num_outputs=2): super(CNNModel3, self).__init__() self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3) self.maxpool1 = nn.MaxPool2d(3, stride=2) self.conv2 = nn.Conv2d(64, 192, kernel_size=3, padding=1) self.maxpool2 = nn.MaxPool2d(3, stride=2) self.inception3a = InceptionModule(192, 64, 96, 128, 16, 32, 32) self.inception3b = InceptionModule(256, 128, 128, 192, 32, 96, 64) self.maxpool3 = nn.MaxPool2d(3, stride=2) self.inception4a = InceptionModule(480, 192, 96, 208, 16, 48, 64) self.inception4b = InceptionModule(512, 160, 112, 224, 24, 64, 64) self.maxpool4 = nn.MaxPool2d(3, stride=2) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.dropout = nn.Dropout(0.4) self.fc = nn.Linear(512, num_outputs) def forward(self, x): x = self.conv1(x) x = self.maxpool1(x) x = self.conv2(x) x = self.maxpool2(x) x = self.inception3a(x) x = self.inception3b(x) x = self.maxpool3(x) x = self.inception4a(x) x = self.inception4b(x) x = self.maxpool4(x) x = self.avgpool(x) x = x.view(x.size(0), -1) x = self.dropout(x) x = self.fc(x) return x ```