repo
stringlengths 1
99
| file
stringlengths 13
215
| code
stringlengths 12
59.2M
| file_length
int64 12
59.2M
| avg_line_length
float64 3.82
1.48M
| max_line_length
int64 12
2.51M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
gbm-bench | gbm-bench-master/runme.py | #!/usr/bin/env python
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import sys
import argparse
import json
import ast
import psutil
import algorithms
from metrics import get_metrics
from datasets import prepare_dataset
def get_number_processors(args):
if args.cpus == 0:
return psutil.cpu_count(logical=False)
return args.cpus
def print_sys_info(args):
try:
import xgboost # pylint: disable=import-outside-toplevel
print("Xgboost : %s" % xgboost.__version__)
except ImportError:
pass
try:
import lightgbm # pylint: disable=import-outside-toplevel
print("LightGBM: %s" % lightgbm.__version__)
except (ImportError, OSError):
pass
try:
import catboost # pylint: disable=import-outside-toplevel
print("Catboost: %s" % catboost.__version__)
except ImportError:
pass
print("System : %s" % sys.version)
print("#jobs : %d" % args.cpus)
def parse_args():
parser = argparse.ArgumentParser(
description="Benchmark xgboost/lightgbm/catboost on real datasets")
parser.add_argument("-dataset", default="all", type=str,
help="The dataset to be used for benchmarking. 'all' for all datasets.")
parser.add_argument("-root", default="/opt/gbm-datasets",
type=str, help="The root datasets folder")
parser.add_argument("-algorithm", default="all", type=str,
help=("Comma-separated list of algorithms to run; "
"'all' run all"))
parser.add_argument("-gpus", default=-1, type=int,
help=("#GPUs to use for the benchmarks; "
"ignored when not supported. Default is to use all."))
parser.add_argument("-cpus", default=0, type=int,
help=("#CPUs to use for the benchmarks; "
"0 means psutil.cpu_count(logical=False)"))
parser.add_argument("-output", default=sys.path[0] + "/results.json", type=str,
help="Output json file with runtime/accuracy stats")
parser.add_argument("-ntrees", default=500, type=int,
help=("Number of trees. Default is as specified in "
"the respective dataset configuration"))
parser.add_argument("-nrows", default=None, type=int,
help=(
"Subset of rows in the datasets to use. Useful for test running "
"benchmarks on small amounts of data. WARNING: Some datasets will "
"give incorrect accuracy results if nrows is specified as they have "
"predefined train/test splits."))
parser.add_argument("-warmup", action="store_true",
help=("Whether to run a small benchmark (fraud) as a warmup"))
parser.add_argument("-verbose", action="store_true", help="Produce verbose output")
parser.add_argument("-extra", default='{}', help="Extra arguments as a python dictionary")
args = parser.parse_args()
# default value for output json file
if not args.output:
args.output = "%s.json" % args.dataset
return args
# benchmarks a single dataset
def benchmark(args, dataset_folder, dataset):
data = prepare_dataset(dataset_folder, dataset, args.nrows)
results = {}
# "all" runs all algorithms
if args.algorithm == "all":
args.algorithm = "xgb-gpu,xgb-cpu,xgb-gpu-dask,lgbm-cpu,lgbm-gpu,cat-cpu,cat-gpu"
for alg in args.algorithm.split(","):
print("Running '%s' ..." % alg)
runner = algorithms.Algorithm.create(alg)
with runner:
train_time = runner.fit(data, args)
pred = runner.test(data)
results[alg] = {
"train_time": train_time,
"accuracy": get_metrics(data, pred),
}
return results
def main():
args = parse_args()
args.cpus = get_number_processors(args)
args.extra = ast.literal_eval(args.extra)
print_sys_info(args)
if args.warmup:
benchmark(args, os.path.join(args.root, "fraud"), "fraud")
if args.dataset == 'all':
args.dataset = 'airline,bosch,fraud,higgs,year,epsilon,covtype,newsgroups'
results = {}
for dataset in args.dataset.split(","):
folder = os.path.join(args.root, dataset)
results.update({dataset: benchmark(args, folder, dataset)})
print(json.dumps({dataset: results[dataset]}, indent=2, sort_keys=True))
output = json.dumps(results, indent=2, sort_keys=True)
output_file = open(args.output, "w")
output_file.write(output + "\n")
output_file.close()
print("Results written to file '%s'" % args.output)
if __name__ == "__main__":
main()
| 6,334 | 42.095238 | 97 | py |
gbm-bench | gbm-bench-master/3rdparty/fast_retraining/experiments/libs/loaders.py | import os
import pandas as pd
import arff
import numpy as np
from functools import reduce
import sqlite3
import logging
from libs.planet_kaggle import (to_multi_label_dict, get_file_count, enrich_with_feature_encoding,
featurise_images, generate_validation_files)
import tensorflow as tf
from keras.applications.resnet50 import ResNet50
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
_FRAUD_PATH = 'fraud_detection', 'credit_card_fraud_kaggle', 'creditcard.csv'
_IOT_PATH = 'iot', 'sensor_stream_berkeley', 'sensor.arff'
_AIRLINE_PATH = 'airline', 'airline_14col.data'
_FOOTBALL_PATH = 'football', 'database.sqlite'
_BCI_PATH = 'bci', 'data.npz'
_HIGGS_PATH = 'higgs', 'HIGGS.csv'
_KAGGLE_ROOT = 'planet'
_PLANET_KAGGLE_LABEL_CSV = 'train_v2.csv'
_PLANET_KAGGLE_TRAIN_DIR = 'train-jpg'
_PLANET_KAGGLE_VAL_DIR = 'validate-jpg'
def _get_datapath():
try:
datapath = os.environ['MOUNT_POINT']
except KeyError:
logger.info("MOUNT_POINT not found in environment. Defaulting to /fileshare")
datapath = '/fileshare'
return datapath
def load_fraud():
""" Loads the credit card fraud data
The datasets contains transactions made by credit cards in September 2013 by european cardholders.
This dataset presents transactions that occurred in two days, where we have 492 frauds out of 284,807 transactions.
The dataset is highly unbalanced, the positive class (frauds) account for 0.172% of all transactions.
It contains only numerical input variables which are the result of a PCA transformation.
Unfortunately, due to confidentiality issues, we cannot provide the original features and more background information about
the data.
Features V1, V2, ... V28 are the principal components obtained with PCA, the only features which have not been transformed
with PCA are 'Time' and 'Amount'. Feature 'Time' contains the seconds elapsed between each transaction and the first
transaction in the dataset.
The feature 'Amount' is the transaction Amount, this feature can be used for example-dependant cost-senstive learning.
Feature 'Class' is the response variable and it takes value 1 in case of fraud and 0 otherwise.
Given the class imbalance ratio, we recommend measuring the accuracy using the Area Under the Precision-Recall Curve
(AUPRC).
Confusion matrix accuracy is not meaningful for unbalanced classification.
The dataset has been collected and analysed during a research collaboration of Worldline and the Machine Learning Group
(http://mlg.ulb.ac.be) of ULB (Universite Libre de Bruxelles) on big data mining and fraud detection. More details
on current and past projects on related topics are available on http://mlg.ulb.ac.be/BruFence
and http://mlg.ulb.ac.be/ARTML
Please cite: Andrea Dal Pozzolo, Olivier Caelen, Reid A. Johnson and Gianluca Bontempi. Calibrating Probability with
Undersampling for Unbalanced Classification. In Symposium on Computational Intelligence and Data Mining (CIDM), IEEE, 2015
Returns
-------
pandas DataFrame
"""
return pd.read_csv(reduce(os.path.join, _FRAUD_PATH, _get_datapath()))
def load_iot():
""" Loads iot data
Sensor stream contains information (temperature, humidity, light, and sensor voltage) collected from 54 sensors deployed
in Intel Berkeley Research Lab. The whole stream contains consecutive information recorded over a 2 months
period (1 reading per 1-3 minutes). I used the sensor ID as the class label, so the learning task of the stream is
to correctly identify the sensor ID (1 out of 54 sensors) purely based on the sensor data and the corresponding recording
time.
While the data stream flow over time, so does the concepts underlying the stream. For example, the lighting during
the working hours is generally stronger than the night, and the temperature of specific sensors (conference room)
may regularly rise during the meetings.
Returns
-------
pandas DataFrame
"""
dataset = arff.load(open(reduce(os.path.join, _IOT_PATH, _get_datapath())))
columns = [i[0] for i in dataset['attributes']]
return pd.DataFrame(dataset['data'], columns=columns)
def load_airline():
""" Loads airline data
The dataset consists of a large amount of records, containing flight arrival and departure details for all the
commercial flights within the USA, from October 1987 to April 2008. Its size is around 116 million records and
5.76 GB of memory.
There are 13 attributes, each represented in a separate column: Year (1987-2008), Month (1-12), Day of Month (1-31),
Day of Week (1:Monday - 7:Sunday), CRS Departure Time (local time as hhmm), CRS Arrival Time (local time as hhmm),
Unique Carrier, Flight Number, Actual Elapsed Time (in min), Origin, Destination, Distance (in miles), and Diverted
(1=yes, 0=no).
The target attribute is Arrival Delay, it is a positive or negative value measured in minutes.
Link to the source: http://kt.ijs.si/elena_ikonomovska/data.html
Returns
-------
pandas DataFrame
"""
cols = ['Year', 'Month', 'DayofMonth', 'DayofWeek', 'CRSDepTime', 'CRSArrTime', 'UniqueCarrier', 'FlightNum', 'ActualElapsedTime', 'Origin', 'Dest', 'Distance', 'Diverted', 'ArrDelay']
return pd.read_csv(reduce(os.path.join, _AIRLINE_PATH, _get_datapath()), names=cols)
def load_football():
""" Loads football data
Dataset of football stats. +25,000 matches, +10,000 players from 11 European Countries with their lead championship
Seasons 2008 to 2016. It also contains players attributes sourced from EA Sports' FIFA video game series,
including the weekly updates, team line up with squad formation (X, Y coordinates), betting odds from up to 10
providers and detailed match events (goal types, possession, corner, cross, fouls, cards etc...) for +10,000 matches.
The meaning of the columns can be found here: http://www.football-data.co.uk/notes.txt
Number of attributes in each table (size of the dataframe):
countries (11, 2)
matches (25979, 115)
leagues (11, 3)
teams (299, 5)
players (183978, 42)
Link to the source: https://www.kaggle.com/hugomathien/soccer
Returns
-------
list of pandas DataFrame
"""
database_path = reduce(os.path.join, _FOOTBALL_PATH, _get_datapath())
with sqlite3.connect(database_path) as con:
countries = pd.read_sql_query("SELECT * from Country", con)
matches = pd.read_sql_query("SELECT * from Match", con)
leagues = pd.read_sql_query("SELECT * from League", con)
teams = pd.read_sql_query("SELECT * from Team", con)
players = pd.read_sql("SELECT * FROM Player_Attributes;", con)
return countries, matches, leagues, teams, players
def load_bci():
""" Loads BCI data
Contains measurements from 64 EEG sensors on the scalp of a single participant.
The purpose of the recording is to determine from the electrical brain activity when the participant is paying attention.
Returns
-------
A tuple containing four numpy arrays
train features
train labels
test features
test labels
"""
npzfile = np.load(reduce(os.path.join, _BCI_PATH, _get_datapath()))
return npzfile['train_X'], npzfile['train_y'], npzfile['test_X'], npzfile['test_y']
def load_higgs():
""" Loads HIGGS data
Dataset of atomic particles measurements. The total size of the data is 11 millions of observations.
It can be used in a classification problem to distinguish between a signal process which produces Higgs
bosons and a background process which does not.
The data has been produced using Monte Carlo simulations. The first 21 features (columns 2-22) are kinematic
properties measured by the particle detectors in the accelerator. The last seven features are functions of
the first 21 features; these are high-level features derived by physicists to help discriminate between the
two classes. The first column is the class label (1 for signal, 0 for background), followed by the 28
features (21 low-level features then 7 high-level features): lepton pT, lepton eta, lepton phi,
missing energy magnitude, missing energy phi, jet 1 pt, jet 1 eta, jet 1 phi, jet 1 b-tag, jet 2 pt, jet 2 eta,
jet 2 phi, jet 2 b-tag, jet 3 pt, jet 3 eta, jet 3 phi, jet 3 b-tag, jet 4 pt, jet 4 eta, jet 4 phi,
jet 4 b-tag, m_jj, m_jjj, m_lv, m_jlv, m_bb, m_wbb, m_wwbb.
Link to the source: https://archive.ics.uci.edu/ml/datasets/HIGGS
Returns
-------
pandas DataFrame
"""
cols = ['boson','lepton_pT','lepton_eta','lepton_phi','missing_energy_magnitude','missing_energy_phi','jet_1_pt','jet_1_eta','jet_1_phi','jet_1_b-tag','jet_2_pt','jet_2_eta','jet_2_phi','jet_2_b-tag','jet_3_pt','jet_3_eta','jet_3_phi','jet_3_b-tag','jet_4_pt','jet_4_eta','jet_4_phi','jet_4_b-tag','m_jj','m_jjj','m_lv','m_jlv','m_bb','m_wbb','m_wwbb']
return pd.read_csv(reduce(os.path.join, _HIGGS_PATH, _get_datapath()), names=cols)
def load_planet_kaggle():
""" Loads Planet Kaggle data
Dataset of satellite images of the Amazon. The objective of this dataset is to label satellite image chips
with atmospheric conditions and various classes of land cover/land use. Resulting algorithms will help the
global community better understand where, how, and why deforestation happens all over the world. The images
use the GeoTiff format and each contain four bands of data: red, green, blue, and near infrared.
To treat the images we used transfer learning with the CNN ResNet50. The images are featurized with this
deep neural network. Once the features are generated we can use a boosted tree to classify them.
Link to the source: https://www.kaggle.com/c/planet-understanding-the-amazon-from-space/data
Returns
-------
A tuple containing four numpy arrays
train_features
y_train
validation_features
y_val
"""
csv_path = reduce(os.path.join, (_KAGGLE_ROOT, _PLANET_KAGGLE_LABEL_CSV), _get_datapath())
train_path = reduce(os.path.join, (_KAGGLE_ROOT, _PLANET_KAGGLE_TRAIN_DIR), _get_datapath())
val_path = reduce(os.path.join, (_KAGGLE_ROOT, _PLANET_KAGGLE_VAL_DIR), _get_datapath())
assert os.path.isfile(csv_path)
assert os.path.exists(train_path)
if not os.path.exists(val_path): os.mkdir(val_path)
if not os.listdir(val_path):
logger.info('Validation folder is empty, moving files...')
generate_validation_files(train_path, val_path)
logger.info('Reading in labels')
labels_df = pd.read_csv(csv_path).pipe(enrich_with_feature_encoding)
multi_label_dict = to_multi_label_dict(labels_df)
nb_train_samples = get_file_count(os.path.join(train_path, '*.jpg'))
nb_validation_samples = get_file_count(os.path.join(val_path, '*.jpg'))
logger.debug('Number of training files {}'.format(nb_train_samples))
logger.debug('Number of validation files {}'.format(nb_validation_samples))
logger.debug('Loading model')
model = ResNet50(include_top=False)
train_features, train_names = featurise_images(model,
train_path,
'train_{}',
range(nb_train_samples),
desc='Featurising training images')
validation_features, validation_names = featurise_images(model,
val_path,
'train_{}',
range(nb_train_samples, nb_train_samples+nb_validation_samples),
desc='Featurising validation images')
# Prepare data
y_train = np.array([multi_label_dict[name] for name in train_names])
y_val = np.array([multi_label_dict[name] for name in validation_names])
return train_features, y_train, validation_features, y_val
| 12,263 | 48.853659 | 356 | py |
gbm-bench | gbm-bench-master/3rdparty/fast_retraining/experiments/libs/planet_kaggle.py | import os
import numpy as np
import glob
from tqdm import tqdm
import shutil
from keras.preprocessing import image
from keras.applications.imagenet_utils import preprocess_input
def labels_from(labels_df):
""" Extracts the unique labels from the labels dataframe
"""
# Build list with unique labels
label_list = []
for tag_str in labels_df.tags.values:
labels = tag_str.split(' ')
for label in labels:
if label not in label_list:
label_list.append(label)
return label_list
def enrich_with_feature_encoding(labels_df):
# Add onehot features for every label
for label in labels_from(labels_df):
labels_df[label] = labels_df['tags'].apply(lambda x: 1 if label in x.split(' ') else 0)
return labels_df
def to_multi_label_dict(enriched_labels_df):
df = enriched_labels_df.set_index('image_name').drop('tags', axis=1)
return dict((filename, encoded_array) for filename, encoded_array in zip(df.index, df.values))
def get_file_count(folderpath):
""" Returns the number of files in a folder
"""
return len(glob.glob(folderpath))
def threshold_prediction(pred_y, threshold=0.5):# TODO: Needs to be tuned?
return pred_y > threshold
def read_images(filepath, filenames):
""" Read images in batches
"""
img_data = list()
for name in filenames:
img_path = os.path.join(filepath, name+'.jpg')
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
img_data.append(preprocess_input(x))
return np.concatenate(img_data)
def chunks(l, n):
for i in range(0, len(l), n):
yield l[i:i + n]
def featurise_images(model, filepath, nameformat, num_iter, batch_size=32, desc=None):
""" Use DL model to featurise images
"""
features = list()
img_names = list()
num_list = list(num_iter)
num_batches = np.ceil(len(num_list)/batch_size)
for num_chunk in tqdm(chunks(num_list, batch_size), total=num_batches, desc=desc):
filenames = [nameformat.format(index) for index in num_chunk]
batch_images = read_images(filepath, filenames)
img_names.extend(filenames)
features.extend(model.predict_on_batch(batch_images).squeeze())
return np.array(features), img_names
def generate_validation_files(train_path, val_path, num_train = 35000):
""" Creates the validation files from the train files.
"""
num_train_ini = get_file_count(os.path.join(train_path, '*.jpg'))
assert num_train_ini > num_train
order = 'mv ' + train_path + '/train_{' + str(num_train) + '..' + str(num_train_ini) + '}.jpg ' + val_path
os.system(order)
| 2,761 | 30.033708 | 110 | py |
dataqa | dataqa-master/continuum/validation_tool/report.py | from __future__ import division
from functions import get_pixel_area, get_stats, flux_at_freq, axis_lim
import os
import collections
from datetime import datetime
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt, mpld3
from matplotlib import cm, ticker, colors
from mpld3 import plugins
from matplotlib.patches import Ellipse
import matplotlib.image as image
#import seaborn
from astropy.io import fits as f
from astropy.io import votable
from astropy.table import Table
from astropy.coordinates import SkyCoord
from astropy.utils.exceptions import AstropyWarning
from astropy.wcs import WCS
import warnings
from inspect import currentframe, getframeinfo
#ignore annoying astropy warnings and set my own obvious warning output
warnings.simplefilter('ignore', category=AstropyWarning)
cf = currentframe()
WARN = '\n\033[91mWARNING: \033[0m' + getframeinfo(cf).filename
class report(object):
def __init__(self, cat, main_dir, img=None, plot_to='html', css_style=None,
src_cnt_bins=50, redo=False, write=True, verbose=True,
fig_font={'fontname':'Serif', 'fontsize' : 18},
fig_size={'figsize' : (8,8)}, cmap='plasma', cbins=20,
label_size={'labelsize' : 12},
markers={'s' : 20, 'linewidth' : 1, 'marker' : 'o', 'color' : 'b'},
colour_markers={'marker' : 'o', 's' : 30, 'linewidth' : 0},
arrows={'color' : 'r', 'width' : 0.04, 'scale' : 20}):
"""Initialise a report object for writing a html report of the image and cross-matches, including plots.
Arguments:
----------
cat : catalogue
Catalogue object with the data for plotting.
main_dir : string
Main directory that contains all the necessary files.
Keyword arguments:
------------------
img : radio_image
Radio image object used to write report table. If None, report will not be written, but plots will be made.
plot_to : string
Where to show or write the plot. Options are:
'html' - save as a html file using mpld3.
'screen' - plot to screen [i.e. call plt.show()].
'extn' - write file with this extension (e.g. 'pdf', 'eps', 'png', etc).
css_style : string
A css format to be inserted in <head>.
fig_font : dict
Dictionary of kwargs for font name and size for title and axis labels of matplotlib figure.
fig_size : dict
Dictionary of kwargs to pass into pyplot.figure.
label_size : dict
Dictionary of kwargs for tick params.
markers : dict
Dictionary of kwargs to pass into pyplot.figure.scatter, etc (when single colour used).
colour_markers : dict
Dictionary of kwargs to pass into pyplot.figure.scatter, etc (when colourmap used).
arrows : dict
Dictionary of kwargs to pass into pyplot.figure.quiver.
redo: bool
Produce all plots and save them, even if the files already exist.
write : bool
Write the source counts and figures to file. Input False to only write report.
verbose : bool
Verbose output.
See Also:
---------
matplotlib.pyplot
mpld3"""
self.cat = cat
self.img = img
self.plot_to = plot_to
self.fig_font = fig_font
self.fig_size = fig_size
self.label_size = label_size
self.markers = markers
self.colour_markers = colour_markers
self.arrows = arrows
self.cmap = plt.get_cmap(cmap,cbins)
self.src_cnt_bins = src_cnt_bins
self.main_dir = main_dir
self.redo = redo
self.write = write
self.verbose = verbose
self.apercal_version, self.apercal_path = self.apercal_specs()
#set name of directory for figures and create if doesn't exist
self.figDir = 'figures'
if self.write and not os.path.exists(self.figDir):
os.mkdir(self.figDir)
#use css style passed in or default style for CASS web server below
if css_style is not None:
self.css_style = css_style
else:
self.css_style = """<?php include("base.inc"); ?>
<meta name="DCTERMS.Creator" lang="en" content="" />
<meta name="DC.Title" lang="en" content="Continuum Validation Report" />
<meta name="DC.Description" lang="en" content="Continuum validation report summarising science readiness of data via several metrics" />
<?php standard_head(); ?>
<style>
.reportTable {
border-collapse: collapse;
width: 100%;
}
.reportTable th, .reportTable td {
padding: 15px;
text-align: middle;
border-bottom: 1px solid #ddd;
vertical-align: top;
}
.reportTable tr {
text-align:center;
vertical-align:middle;
}
.reportTable tr:hover{background-color:#f5f5f5}
#good {
background-color:#00FA9A;
}
#uncertain {
background-color:#FFA500;
}
#bad {
background-color:#FF6347;
}
</style>\n"""
self.css_style += "<title>{0} Continuum Validation Report</title>\n""".format(self.cat.name)
#filename of html report
self.name = 'index.html'
#Open file html file and write css style, title and heading
self.write_html_head()
#write table summary of observations and image if radio_image object passed in
if img is not None:
self.write_html_img_table(img)
rms_map = f.open(img.rms_map)[0]
solid_ang = 0
#otherwise assume area based on catalogue RA/DEC limits
else:
rms_map = None
solid_ang = self.cat.area*(np.pi/180)**2
self.write_html_cat_table()
#plot the int/peak flux as a function of peak flux
self.int_peak_flux(usePeak=True)
#write source counts to report using rms map to measure solid angle or approximate solid angle
if self.cat.name in self.cat.flux.keys():
self.source_counts(self.cat.flux[self.cat.name],self.cat.freq[self.cat.name],
rms_map=rms_map,solid_ang=solid_ang,write=self.write)
else:
self.sc_red_chi_sq = -1
#write cross-match table header
self.write_html_cross_match_table()
#store dictionary of metrics, where they come from, how many matches they're derived from, and their level (0,1 or 2)
#spectral index defaults to -99, as there is a likelihood it will not be needed (if Taylor-term imaging is not done)
#RA and DEC offsets used temporarily and then dropped before final metrics computed
key_value_pairs = [ ('Flux Ratio' , 0),
('Flux Ratio Uncertainty' , 0),
('Positional Offset' , 0),
('Positional Offset Uncertainty' , 0),
('Resolved Fraction' , self.cat.resolved_frac),
('Spectral Index' , 0),
('RMS', self.cat.img_rms),
('Source Counts Reduced Chi-squared' , self.sc_red_chi_sq),
('RA Offset' , 0),
('DEC Offset' , 0)]
self.metric_val = collections.OrderedDict(key_value_pairs)
self.metric_source = self.metric_val.copy()
self.metric_count = self.metric_val.copy()
self.metric_level = self.metric_val.copy()
def apercal_specs(self):
try:
import apercal
aver = apercal.__version__
apath = apercal.__file__
except:
aver = ''
apath = ''
return aver, apath
# def get_dynamic_range(self, radius=30, box=50):
# """
# get source and local dynamic range for the sources
# within the radius [arcmin] of the beam center
# The box [pixels] is the box to estimate local DR.
# """
def write_html_head(self):
"""Open the report html file and write the head."""
self.html = open(self.name,'w')
self.html.write("""<!DOCTYPE HTML>
<html lang="en">
<head>
{0}
</head>
<?php title_bar("atnf"); ?>
<body>
<h1 align="middle">{1} Continuum Data Validation Report</h1>""".format(self.css_style,self.cat.name))
def write_html_img_table(self, img):
"""Write an observations and image and catalogue report tables derived from fits image and header.
Arguments:
----------
img : radio_image
A radio image object used to write values to the html table."""
#generate link to confluence page for each project code
project = img.project
# if project.startswith('AS'):
# project = self.add_html_link("https://confluence.csiro.au/display/askapsst/{0}+Data".format(img.project),img.project,file=False)
#Write observations report table
self.html.write("""
<h2 align="middle">Observations</h2>
<table class="reportTable">
<tr>
<th>SBID</th>
<th>Project</th>
<th>Date</th>
<th>Duration<br>(hours)</th>
<th>Field Centre</th>
<th>Central Frequency<br>(MHz)</th>
</tr>
<tr>
<td>{0}</td>
<td>{1}</td>
<td>{2}</td>
<td>{3}</td>
<td>{4}</td>
<td>{5:.2f}</td>
</tr>
</table>""".format( img.sbid,
project,
img.date,
img.duration,
img.centre,
img.freq))
#Write image report table
self.html.write("""
<h2 align="middle">Image</h2>
<h4 align="middle"><i>File: '{0}'</i></h3>
<table class="reportTable">
<tr>
<th>IMAGE SIZE<br>(pixels)</th>
<th>PIXEL SIZE<br>(arcsec)</th>
<th>Synthesised Beam<br>(arcsec)</th>
<th>Median r.m.s.<br>(uJy)</th>
<th>Image peak<br>(Jy)</th>
<th>Image DR</th>
<th>Source DR</th>
<th>Local DR</th>
<th>Sky Area<br>(deg<sup>2</sup>)</th>
<th>Normaltest</th>
</tr>
<tr>
<td>{1}</td>
<td>{2}</td>
<td>{3:.1f} x {4:.1f}</td>
<td>{5}</td>
<td>{6:.2f}</td>
<td>{7:.0f} </td>
<td>{8:.0f} - {9:.0f} </td>
<td>{10:.0f} - {11:.0f} </td>
<td>{12:.2f}</td>
<td>{13:s}</td>
</tr>
</table>""".format( img.name,
img.imsizestr,
img.pixsizestr,
img.bmaj,
img.bmin,
self.cat.img_rms,
self.cat.img_peak,
self.cat.dynamic_range,
self.cat.source_dynrange[0], self.cat.source_dynrange[1],
self.cat.local_dynrange[0], self.cat.local_dynrange[1],
self.cat.area,
img.gaussianity))
def write_html_cat_table(self):
"""Write an observations and image and catalogue report tables derived from fits image, header and catalogue."""
flux_type = 'integrated'
if self.cat.use_peak:
flux_type = 'peak'
if self.cat.med_si == -99:
med_si = ''
else:
med_si = '{0:.2f}'.format(self.cat.med_si)
#Write catalogue report table
self.html.write("""
<h2 align="middle">Catalogue</h2>
<h4 align="middle"><i>File: '{0}'</i></h3>
<table class="reportTable">
<tr>
<th>Source Finder</th>
<th>Flux Type</th>
<th>Number of<br>sources (≥{1}σ)</th>
<th>Multi-component<br>islands</th>
<th>Sum of image flux vs.<br>sum of catalogue flux</th>
<th>Median in-band spectral index</th>
<th>Median int/peak flux</th>
<th>Source Counts<br>χ<sub>red</sub><sup>2</sup></th>
</tr>
<tr>
<td>{2}</td>
<td>{3}</td>
<td>{4}</td>
<td>{5}</td>
<td>{6:.1f} Jy vs. {7:.1f} Jy</td>
<td>{8}</td>""".format( self.cat.filename,
self.cat.SNR,
self.cat.finder,
flux_type,
self.cat.initial_count,
self.cat.blends,
self.cat.img_flux,
self.cat.cat_flux,
med_si))
def write_html_cross_match_table(self):
"""Write the header of the cross-matches table."""
self.html.write("""
<h2 align="middle">Cross-matches</h2>
<table class="reportTable">
<tr>
<th>Survey</th>
<th>Frequency<br>(MHz)</th>
<th>Cross-matches</th>
<th>Median offset<br>(arcsec)</th>
<th>Median flux ratio</th>
<th>Median spectral index</th>
</tr>""")
def get_metric_level(self,good_condition,uncertain_condition):
"""Return metric level 1 (good), 2 (uncertain) or 3 (bad), according to the two input conditions.
Arguments:
----------
good_condition : bool
Condition for metric being good.
uncertain_condition : bool
Condition for metric being uncertain."""
if good_condition:
return 1
if uncertain_condition:
return 2
return 3
def assign_metric_levels(self):
"""Assign level 1 (good), 2 (uncertain) or 3 (bad) to each metric, depending on specific tolerenace values.
See https://confluence.csiro.au/display/askapsst/Continuum+validation+metrics"""
for metric in self.metric_val.keys():
# Remove keys that don't have a valid value (value=-99 or -1111)
if self.metric_val[metric] == -99 or self.metric_val[metric] == -111:
self.metric_val.pop(metric)
self.metric_source.pop(metric)
self.metric_level.pop(metric)
else:
#flux ratio within 5/10%?
if metric == 'Flux Ratio':
val = np.abs(self.metric_val[metric]-1)
good_condition = val < 0.05
uncertain_condition = val < 0.1
self.metric_source[metric] = 'Median flux density ratio [APERTIF / {0}]'.format(self.metric_source[metric])
#uncertainty on flux ratio less than 10/20%?
elif metric == 'Flux Ratio Uncertainty':
good_condition = self.metric_val[metric] < 0.1
uncertain_condition = self.metric_val[metric] < 0.2
self.metric_source[metric] = 'R.M.S. of median flux density ratio [APERTIF / {0}]'.format(self.metric_source[metric])
self.metric_source[metric] += ' (estimated from median absolute deviation from median)'
#positional offset < 1/5 arcsec
elif metric == 'Positional Offset':
good_condition = self.metric_val[metric] < 1
uncertain_condition = self.metric_val[metric] < 5
self.metric_source[metric] = 'Median positional offset (arcsec) [APERTIF-{0}]'.format(self.metric_source[metric])
#uncertainty on positional offset < 1/5 arcsec
elif metric == 'Positional Offset Uncertainty':
good_condition = self.metric_val[metric] < 5
uncertain_condition = self.metric_val[metric] < 10
self.metric_source[metric] = 'R.M.S. of median positional offset (arcsec) [APERTIF-{0}]'.format(self.metric_source[metric])
self.metric_source[metric] += ' (estimated from median absolute deviation from median)'
#reduced chi-squared of source counts < 3/50?
elif metric == 'Source Counts Reduced Chi-squared':
good_condition = self.metric_val[metric] < 3
uncertain_condition = self.metric_val[metric] < 50
self.metric_source[metric] = 'Reduced chi-squared of source counts'
#resolved fraction of sources between 5-20%?
elif metric == 'Resolved Fraction':
good_condition = self.metric_val[metric] > 0.05 and self.metric_val[metric] < 0.2
uncertain_condition = self.metric_val[metric] < 0.3
self.metric_source[metric] = 'Fraction of sources resolved according to int/peak flux densities'
#spectral index less than 0.2 away from -0.8?
elif metric == 'Spectral Index':
val = np.abs(self.metric_val[metric]+0.8)
good_condition = val < 0.2
uncertain_condition = False
self.metric_source[metric] = 'Median in-band spectral index'
elif metric == 'RMS':
good_condition = self.metric_val[metric] < 100
uncertain_condition = self.metric_val[metric] < 500
self.metric_source[metric] = 'Median image R.M.S. (uJy) from noise map'
#if unknown metric, set it to 3 (bad)
else:
good_condition = False
uncertain_condition = False
#assign level to metric
self.metric_level[metric] = self.get_metric_level(good_condition,uncertain_condition)
if self.img is not None:
self.write_CASDA_xml()
def write_pipeline_offset_params(self):
"""Write a txt file with offset params for soft pipeline for user to easily import into config file, and then drop them from metrics.
See http://www.atnf.csiro.au/computing/software/askapsoft/sdp/docs/current/pipelines/ScienceFieldContinuumImaging.html?highlight=offset"""
txt = open('offset_pipeline_params.txt','w')
txt.write("DO_POSITION_OFFSET=true\n")
txt.write("RA_POSITION_OFFSET={0:.2f}\n".format(-self.metric_val['RA Offset']))
txt.write("DEC_POSITION_OFFSET={0:.2f}\n".format(-self.metric_val['DEC Offset']))
txt.close()
for metric in ['RA Offset','DEC Offset']:
self.metric_val.pop(metric)
self.metric_source.pop(metric)
self.metric_level.pop(metric)
self.metric_count.pop(metric)
def write_CASDA_xml(self):
"""Write xml table with all metrics for CASDA."""
tmp_table = Table( [self.metric_val.keys(),self.metric_val.values(),self.metric_level.values(),self.metric_source.values()],
names=['metric_name','metric_value','metric_status','metric_description'],
dtype=[str,float,np.int32,str])
vot = votable.from_table(tmp_table)
vot.version = 1.3
table = vot.get_first_table()
table.params.extend([votable.tree.Param(vot, name="project", datatype="char", arraysize="*", value=self.img.project)])
valuefield=table.fields[1]
valuefield.precision='2'
prefix = ''
if self.img.project != '':
prefix = '{0}_'.format(self.img.project)
xml_filename = '{0}CASDA_continuum_validation.xml'.format(prefix)
votable.writeto(vot, xml_filename)
def write_html_end(self):
"""Write the end of the html report file (including table of metrics) and close it."""
#Close cross-matches table and write header of validation summary table
self.html.write("""
</td>
</tr>
</table>
<h2 align="middle">{0} continuum validation metrics</h2>
<table class="reportTable">
<tr>
<th>Flux Ratio<br>({0} / {1})</th>
<th>Flux Ratio Uncertainty<br>({0} / {1})</th>
<th>Positional Offset (arcsec)<br>({0} — {2})</th>
<th>Positional Offset Uncertainty (arcsec)<br>({0} — {2})</th>
<th>Resolved Fraction from int/peak Flux<br>({0})</th>
<th>Source Counts χ<sub>red</sub><sup>2</sup><br>({0})</th>
<th>r.m.s. (uJy)<br>({0})</th>
""".format(self.cat.name,self.metric_source['Flux Ratio'],self.metric_source['Positional Offset']))
#assign levels to each metric
self.assign_metric_levels()
#flag if in-band spectral indices not derived
spec_index = False
if 'Spectral Index' in self.metric_val:
spec_index = True
if spec_index:
self.html.write('<th>Median in-band<br>spectral index</th>')
#Write table with values of metrics and colour them according to level
self.html.write("""</tr>
<tr>
<td {0}>{1:.2f}</td>
<td {2}>{3:.2f}</td>
<td {4}>{5:.2f}</td>
<td {6}>{7:.2f}</td>
<td {8}>{9:.2f}</td>
<td {10}>{11:.2f}</td>
<td {12}>{13}</td>
""".format(self.html_colour(self.metric_level['Flux Ratio']),self.metric_val['Flux Ratio'],
self.html_colour(self.metric_level['Flux Ratio Uncertainty']),self.metric_val['Flux Ratio Uncertainty'],
self.html_colour(self.metric_level['Positional Offset']),self.metric_val['Positional Offset'],
self.html_colour(self.metric_level['Positional Offset Uncertainty']),self.metric_val['Positional Offset Uncertainty'],
self.html_colour(self.metric_level['Resolved Fraction']),self.metric_val['Resolved Fraction'],
self.html_colour(self.metric_level['Source Counts Reduced Chi-squared']),self.metric_val['Source Counts Reduced Chi-squared'],
self.html_colour(self.metric_level['RMS']),self.metric_val['RMS']))
if spec_index:
self.html.write('<td {0}>{1:.2f}</td>'.format(self.html_colour(self.metric_level['Spectral Index']),
self.metric_val['Spectral Index']))
# by = ''
# if self.cat.name != 'ASKAP':
# by = """ by <a href="mailto:[email protected]">Jordan Collier</a>"""
#Close table, write time generated, and close html file
self.html.write("""</tr>
</table>
<p><i>Generated at {0}</i></p>
<?php footer(); ?>
</body>
</html>""".format(datetime.now().strftime("%Y-%m-%d %H:%M:%S"))) #,by))
self.html.close()
print "Continuum validation report written to '{0}'.".format(self.name)
def add_html_link(self,target,link,file=True,newline=False):
"""Return the html for a link to a URL or file.
Arguments:
----------
target : string
The name of the target (a file or URL).
link : string
The link to this file (thumbnail file name or string to list as link name).
Keyword Arguments:
------------------
file : bool
The input link is a file (e.g. a thumbnail).
newline : bool
Write a newline / html break after the link.
Returns:
--------
html : string
The html link."""
html = """<a href="{0}">""".format(target)
if file:
html += """<IMG SRC="{0}"></a>""".format(link)
else:
html += "{0}</a>".format(link)
if newline:
html += "<br>"
return html
def text_to_html(self,text):
"""Take a string of text that may include LaTeX, and return the html code that will generate it as LaTeX.
Arguments:
----------
text : string
A string of text that may include LaTeX.
Returns:
--------
html : string
The same text readable as html."""
#this will allow everything between $$ to be generated as LaTeX
html = """
<script type="text/x-mathjax-config">
MathJax.Hub.Config({tex2jax: {inlineMath: [['$','$'], ['\\(','\\)']]}});
</script>
<script type="text/javascript"
src="http://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML">
</script>
<br>
"""
#write a newline / break for each '\n' in string
for line in text.split('\n'):
html += line + '<br>'
return html
def html_colour(self,level):
"""Return a string representing green, yellow or red in html if level is 1, 2 or 3.
Arguments:
----------
level : int
A validation level.
Returns:
--------
colour : string
The html for green, yellow or red."""
if level == 1:
colour = "id='good'"
elif level == 2:
colour = "id='uncertain'"
else:
colour = "id='bad'"
return colour
def int_peak_flux(self,usePeak=False):
"""Plot the int/peak fluxes as a function of peak flux.
Keyword Arguments:
------------------
usePeak : bool
Use peak flux as x axis, instead of SNR."""
ratioCol = '{0}_int_peak_ratio'.format(self.cat.name)
self.cat.df[ratioCol] = self.cat.df[self.cat.flux_col] / self.cat.df[self.cat.peak_col]
SNR = self.cat.df[self.cat.peak_col]/self.cat.df[self.cat.rms_val]
ratio = self.cat.df[ratioCol]
peak = self.cat.df[self.cat.peak_col]
xaxis = SNR
if usePeak:
xaxis = peak
#plot the int/peak flux ratio
fig = plt.figure(**self.fig_size)
title = "{0} int/peak flux ratio".format(self.cat.name)
if self.plot_to == 'html':
if usePeak:
xlabel = 'Peak flux ({0})'.format(self.cat.flux_unit.replace('j','J'))
else:
xlabel = 'S/N'
ylabel = 'Int / Peak Flux Ratio'
else:
xlabel = r'${\rm S_{peak}$'
if usePeak:
xlabel += ' ({0})'.format(self.cat.flux_unit.replace('j','J'))
else:
xlabel += r'$ / \sigma_{rms}}$'
ylabel = r'${\rm S_{int} / S_{peak}}$'
if self.plot_to != 'screen':
filename = '{0}/{1}_int_peak_ratio.{2}'.format(self.figDir,self.cat.name,self.plot_to)
else:
filename = ''
#get non-nan data shared between each used axis as a numpy array
x,y,c,indices = self.shared_indices(xaxis,yaxis=ratio)#,caxis=self.cat.dec[self.cat.name])
plt.loglog()
plt.gca().grid(b=True, which='minor', color='w', linewidth=0.5)
#hack to overlay resolved sources in red
xres,yres= xaxis[self.cat.resolved],ratio[self.cat.resolved]
markers = self.markers.copy()
markers['color'] = 'r'
markers.pop('s')
data, = plt.plot(xres,yres,'o',zorder=50,**markers)
leg_labels = ['Resolved','Unresolved']
#derive the statistics of y and store in string
ymed,ymean,ystd,yerr,ymad = get_stats(ratio)
txt = '$\widetilde{Ratio}$: %.2f\n' % ymed
txt += '$\overline{Ratio}$: %.2f\n' % ymean
txt += '$\sigma_{Ratio}$: %.2f\n' % ystd
txt += '$\sigma_{\overline{Ratio}}$: %.2f' % yerr
#store median int/peak flux ratio and write to report table
self.int_peak_ratio = ymed
self.html.write('<td>{0:.2f}<br>'.format(ymed))
#plot the int/peak flux ratio
self.plot(x,
y=y,
c=c,
figure=fig,
line_funcs=[self.y1],
title=title,
xlabel=xlabel,
ylabel=ylabel,
text=txt,
loc='tl',
axis_perc=0,
filename=filename,
leg_labels=leg_labels,
handles=[data],
redo=self.redo)
def source_counts(self,fluxes,freq,rms_map=None,solid_ang=0,write=True):
"""Compute and plot the (differential euclidean) source counts based on the input flux densities.
Arguments:
----------
fluxes : list-like
A list of fluxes in Jy.
freq : float
The frequency of these fluxes in MHz.
Keyword arguments:
------------------
rms_map : astropy.io.fits
A fits image of the local rms in Jy.
solid_ang : float
A fixed solid angle over which the source counts are computed. Only used when rms_map is None.
write : bool
Write the source counts to file."""
#derive file names based on user input
filename = 'screen'
counts_file = '{0}_source_counts.csv'.format(self.cat.basename)
if self.plot_to != 'screen':
filename = '{0}/{1}_source_counts.{2}'.format(self.figDir,self.cat.name,self.plot_to)
#read the log of the source counts from Norris+11 from same directory of this script
df_Norris = pd.read_table('{0}/all_counts.txt'.format(self.main_dir),sep=' ')
x = df_Norris['S']-3 #convert from log of flux in mJy to log of flux in Jy
y = df_Norris['Counts']
yerr = (df_Norris['ErrDown'],df_Norris['ErrUp'])
#fit 6th degree polynomial to Norris+11 data
deg = 6
poly_paras = np.polyfit(x,y,deg)
f = np.poly1d(poly_paras)
xlin = np.linspace(min(x)*1.2,max(x)*1.2)
ylin = f(xlin)
#perform source counts if not already written to file or user specifies to re-do
if not os.path.exists(counts_file) or self.redo:
#warn user if they haven't input an rms map or fixed solid angle
if rms_map is None and solid_ang == 0:
warnings.warn_explicit("You must input a fixed solid angle or an rms map to compute the source counts!\n",UserWarning,WARN,cf.f_lineno)
return
#get the number of bins from the user
nbins = self.src_cnt_bins
print "Deriving source counts for {0} using {1} bins.".format(self.cat.name,nbins)
#Normalise the fluxes to 1.4 GHz
fluxes = flux_at_freq(1400,freq,fluxes,-0.8)
#Correct for Eddington bias for every flux, assuming Hogg+98 model
r = self.cat.df[self.cat.flux_col] / self.cat.df[self.cat.rms_val]
slope = np.polyder(f)
q = 1.5 - slope(fluxes)
bias = 0.5 + 0.5*np.sqrt(1 - (4*q+4)/(r**2))
#q is derived in log space, so correct for the bias in log space
fluxes = 10**(np.log10(fluxes)/bias)
if rms_map is not None:
w = WCS(rms_map.header)
if self.verbose:
print "Using rms map '{0}' to derive solid angle for each flux bin.".format(self.img.rms_map)
total_area = get_pixel_area(rms_map, flux=100, w=w)[0]
else:
total_area = 0
#add one more bin and then discard it, since this is dominated by the few brightest sources
#we also add one more to the bins since there's one more bin edge than number of bins
edges = np.percentile(fluxes,np.linspace(0,100,nbins+2))
dN,edges,patches=plt.hist(fluxes,bins=edges)
dN = dN[:-1]
edges = edges[:-1]
#derive the lower and upper edges and dS
lower = edges[:-1]
upper = edges[1:]
dS = upper-lower
S = np.zeros(len(dN))
solid_angs = np.zeros(len(dN))
for i in range(len(dN)):
#derive the mean flux from all fluxes in current bin
indices = (fluxes > lower[i]) & (fluxes < upper[i])
S[i] = np.mean(fluxes[indices])
#Get the pixels from the r.m.s. map where SNR*r.m.s. < flux
if rms_map is not None:
solid_angs[i] = get_pixel_area(rms_map, flux=S[i]/self.cat.SNR, w=w)[1]
# print solid_angs
#otherwise use the fixed value passed in
else:
solid_angs[i] = solid_ang
#compute the differential Euclidean source counts and uncertanties in linear space
# FIXME: this workaround looks ugly
solid_angs[solid_angs == 0] = 1e-19
counts = (S**2.5)*dN/dS/solid_angs
err = (S**2.5)*np.sqrt(dN)/dS/solid_angs
#Store these and the log of these values in pandas data frame
df = pd.DataFrame()
df['dN'] = dN
df['area'] = solid_angs/((np.pi/180)**2)
df['S'] = S
df['logS'] = np.log10(S)
df['logCounts'] = np.log10(counts)
df['logErrUp'] = np.log10(counts+err) - np.log10(counts)
df['logErrDown'] = np.abs(np.log10(counts-err) - np.log10(counts))
#remove all bins with less than 10% of total solid angle
bad_bins = df['area'] / total_area < 0.1
output = ['Solid angle for bin S={0:.2f} mJy less than 10% of total image. Removing bin.'.\
format(S) for S in S[np.where(bad_bins)]*1e3]
if self.verbose:
for line in output:
print line
df = df[~bad_bins]
if write:
if self.verbose:
print "Writing source counts to '{0}'.".format(counts_file)
df.to_csv(counts_file,index=False)
#otherwise simply read in source counts from file
else:
print "File '{0}' already exists. Reading source counts from this file.".format(counts_file)
df = pd.read_csv(counts_file)
#create a figure for the source counts
plt.close()
fig=plt.figure(**self.fig_size)
title = '{0} 1.4 GHz source counts'.format(self.cat.name,self.cat.freq[self.cat.name])
#write axes using unicode (for html) or LaTeX
if self.plot_to == 'html':
ylabel = u"log\u2081\u2080 S\u00B2\u22C5\u2075 dN/dS [Jy\u00B9\u22C5\u2075 sr\u207B\u00B9]"
xlabel = u"log\u2081\u2080 S [Jy]"
else:
ylabel = r"$\log_{10}$ S$^{2.5}$ dN/dS [Jy$^{1.5}$ sr$^{-1}$]"
xlabel = r"$\log_{10}$ S [Jy]"
#for html plots, add labels for the bin centre, count and area for every data point
labels = [u'S: {0:.2f} mJy, dN: {1:.0f}, Area: {2:.2f} deg\u00B2'.format(bin,count,area) for bin,count,area in zip(df['S']*1e3,df['dN'],df['area'])]
#derive the square of the residuals (chi squared), and their sum
#divided by the number of data points (reduced chi squared)
chi = ((df['logCounts']-f(df['logS']))/df['logErrDown'])**2
red_chi_sq = np.sum(chi)/len(df)
#store reduced chi squared value
self.sc_red_chi_sq = red_chi_sq
#Plot Norris+11 data
data = plt.errorbar(x,y,yerr=yerr,linestyle='none',marker='.',c='r')
line, = plt.plot(xlin,ylin,c='black',linestyle='--',zorder=5)
txt = ''
if self.plot_to == 'html':
txt += 'Data from <a href="http://adsabs.harvard.edu/abs/2011PASA...28..215N">Norris+11</a>'
txt += ' (updated from <a href="http://adsabs.harvard.edu/abs/2003AJ....125..465H">Hopkins+03</a>)\n'
txt += '$\chi^2_{red}$: %.2f' % red_chi_sq
#Legend labels for the Norris data and line, and the data
xlab = 'Norris+11'
leg_labels = [xlab,'{0}th degree polynomial fit to {1}'.format(deg,xlab),self.cat.name]
#write reduced chi squared to report table
self.html.write('</td><td>{0:.2f}<br>'.format(red_chi_sq))
#Plot data on top of Norris+11 data
self.plot(df['logS'],
y=df['logCounts'],
yerr=(df['logErrDown'],df['logErrUp']),
figure=fig,
title=title,
labels=labels,
xlabel=xlabel,
ylabel=ylabel,
axis_perc=0,
text=txt,
loc='br',
leg_labels=leg_labels,
handles=[data,line],
filename=filename,
redo=self.redo)
self.html.write("""</td>
</tr>
</table>""")
def x(self,x,y):
"""For given x and y data, return a line at y=x.
Arguments:
----------
x : list-like
A list of x values.
y : list-like
A list of y values.
Returns:
--------
x : list-like
The same list of x values.
y : list-like
The list of x values."""
return x,x
def y0(self,x,y):
"""For given x and y data, return a line at y=0.
Arguments:
----------
x : list-like
A list of x values.
y : list-like
A list of y values.
Returns:
--------
x : list-like
The same list of x values.
y : list-like
A list of zeros."""
return x,x*0
def y1(self,x,y):
"""For given x and y data, return a line at y=1.
Arguments:
----------
x : list-like
A list of x values.
y : list-like
A list of y values.
Returns:
--------
x : list-like
The same list of x values.
y : list-like
A list of ones."""
return x,[1]*len(x)
def x0(self,x,y):
"""For given x and y data, return a line at x=0.
Arguments:
----------
x : list-like
A list of x values.
y : list-like
A list of y values.
Returns:
--------
x : list-like
A list of zeros.
y : list-like
The same list of y values."""
return y*0,y
def ratio_err_max(self,SNR,ratio):
"""For given x and y data (flux ratio as a function of S/N), return the maximum uncertainty in flux ratio.
Arguments:
----------
SNR : list-like
A list of S/N ratios.
ratio : list-like
A list of flux ratios.
Returns:
--------
SNR : list-like
All S/N values > 0.
ratio : list-like
The maximum uncertainty in the flux ratio for S/N values > 0."""
return SNR[SNR > 0],1+3*np.sqrt(2)/SNR[SNR > 0]
def ratio_err_min(self,SNR,ratio):
"""For given x and y data (flux ratio as a function of S/N), return the minimum uncertainty in flux ratio.
Arguments:
----------
SNR : list-like
A list of S/N ratios.
ratio : list-like
A list of flux ratios.
Returns:
--------
SNR : list-like
All S/N values > 0.
ratio : list-like
The minimum uncertainty in the flux ratio for S/N values > 0."""
return SNR[SNR > 0],1-3*np.sqrt(2)/SNR[SNR > 0]
def axis_to_np(self,axis):
"""Return a numpy array of the non-nan data from the input axis.
Arguments:
----------
axis : string or numpy.array or pandas.Series or list
The data for a certain axis. String are interpreted as column names from catalogue object passed into constructor.
Returns:
--------
axis : numpy.array
All non-nan values of the data.
See Also
--------
numpy.array
pandas.Series"""
#convert input to numpy array
if type(axis) is str:
axis = self.cat.df[axis].values
elif axis is pd.Series:
axis = axis.values
return axis
def shared_indices(self,xaxis,yaxis=None,caxis=None):
"""Return a list of non-nan indices shared between all used axes.
Arguments:
----------
xaxis : string or numpy.array or pandas.Series or list
A list of the x axis data. String are interpreted as column names from catalogue object passed into constructor.
yaxis : string or numpy.array or pandas.Series or list
A list of the y axis data. String are interpreted as column names from catalogue object passed into constructor.
If this is None, yaxis and caxis will be ignored.
caxis : string or numpy.array or pandas.Series or list
A list of the colour axis data. String are interpreted as column names from catalogue object passed into constructor.
If this is None, caxis will be ignored.
Returns:
--------
x : list
The non-nan x data shared between all used axes.
y : list
The non-nan y data shared between all used axes. None returned if yaxis is None.
c : list
The non-nan colour data shared between all used axes. None returned if yaxis or caxis are None.
indices : list
The non-nan indices.
See Also
--------
numpy.array
pandas.Series"""
#convert each axis to numpy array (or leave as None)
x = self.axis_to_np(xaxis)
y = self.axis_to_np(yaxis)
c = self.axis_to_np(caxis)
#get all shared indices from used axes that aren't nan
if yaxis is None:
indices = np.where(~np.isnan(x))[0]
return x[indices],None,None,indices
elif caxis is None:
indices = np.where((~np.isnan(x)) & (~np.isnan(y)))[0]
return x[indices],y[indices],None,indices
else:
indices = np.where((~np.isnan(x)) & (~np.isnan(y)) & (~np.isnan(c)))[0]
return x[indices],y[indices],c[indices],indices
def plot(self, x, y=None, c=None, yerr=None, figure=None, arrows=None, line_funcs=None,
title='', labels=None, text=None, reverse_x=False,
xlabel='', ylabel='', clabel='', leg_labels='', handles=[], loc='bl',
ellipses=None, axis_perc=10, filename='screen', redo=False):
"""Create and write a scatter plot of the data from an input x axis, and optionally, a y and colour axis.
This function assumes shared_indices() has already been called and all input axes
are equal in length and the same data type.
Arguments:
----------
x : numpy.array
The data to plot on the x axis.
Keyword arguments:
------------------
y : numpy.array or pandas.Series
The data to plot on the y axis. Use None to plot a histogram.
c : numpy.array or pandas.Series
The data to plot as the colour axis. Use None for no colour axis.
yerr : numpy.array or pandas.Series
The data to plot as the uncertainty on the y axis. Use None for no uncertainties.
figure : pyplot.figure
Use this matplotlib figure object.
arrows : tuple
A 2-element tuple with the lengths of the arrows to plot at x and y (usually a list) - i.e. (dx[],dy[])
line_funcs : list-like
A list of functions for drawing lines (e.g. [self.x0(), self.y1()]).
title : string
The title of the plot.
lables : list
A list of string labels to give each data point. Length must be the same as all used axes.
text : string
Annotate this text on the figure (written to bottom of page for html figures).
reverse_x : bool
Reverse the x-axis?
xlabel : string
The label of the x axis.
ylabel : string
The label of the y axis.
clabel : string
The label of the colour axis.
leg_labels : list
A list of labels to include as a legend.
handles : list
A list of pre-defined handles associated the legend labels.
loc : string
Location of the annotated text (not used for html plots). Options are 'bl', 'br', 'tl' and 'tr'.
ellipses : list of matplotlib.patches.Ellipse objects
Draw these ellipses on the figure.
axis_perc : float
The percentage beyond which to calculate the axis limits. Use 0 for no limits.
filename : string
Write the plot to this file name. If string contains 'html', file will be written to html using mpld3.
If it is 'screen', it will be shown on screen. Otherwise, it will attempt to write an image file.
redo: bool
Produce this plot and write it, even if the file already exists.
See Also
--------
numpy.array
pandas.Series
matplotlib.patches.Ellipse"""
#only write figure if user wants it
if self.write:
#derive name of thumbnail file
thumb = '{0}_thumb.png'.format(filename[:-1-len(self.plot_to)])
#don't produce plot if file exists and user didn't specify to re-do
if os.path.exists(filename) and not redo:
if self.verbose:
print 'File already exists. Skipping plot.'
else:
#open html file for plot
if 'html' in filename:
html_fig = open(filename,'w')
#use figure passed in or create new one
if figure is not None:
fig = figure
else:
fig = plt.figure(**self.fig_size)
ax = plt.subplot(111)
norm = None
#plot histogram
if y is None:
edges = np.linspace(-3,2,11) #specific to spectral index
err_data = ax.hist(x,bins=edges)
#plot scatter of data points with fixed colour
elif c is None:
markers = self.markers.copy()
markers.pop('s')
#hack to display markers in legend (https://github.com/mpld3/mpld3/issues/290)
#have to use ax.plot() since ax.scatter() has problems (https://github.com/mpld3/mpld3/issues/227)
#hack to display html labels when line or ellipse overlaid
ax.plot(x,y,'o',zorder=20,alpha=0.0,**markers)
data, = ax.plot(x,y,'o',**markers)
handles.append(data)
#plot scatter of data points with colour axis
else:
#normalise the colour bar so each bin contains equal number of data points
norm = colors.BoundaryNorm(np.percentile(c,np.linspace(0,100,self.cmap.N+1)),self.cmap.N)
data = ax.scatter(x,y,c=c,cmap=self.cmap,norm=norm,**self.colour_markers)
cbar = plt.colorbar(data)
cbar.ax.tick_params(**self.label_size)
cbar.set_label(clabel,**self.fig_font)
data = ax.scatter(x,y,c=c,cmap=self.cmap,zorder=20,alpha=0.0,norm=norm,**self.colour_markers) #same hack as above
#plot error bars and add to list of handles
if yerr is not None:
err_data = ax.errorbar(x, y, yerr=yerr, zorder=4,
linestyle='none', marker=self.markers['marker'],
color=self.markers['color'])
handles.append(err_data)
#set default min and max axis limits, which may change
xmin,xmax = ax.get_xlim()
ymin,ymax = ax.get_ylim()
#derive limits of x and y axes axis_perc % beyond their current limit
if axis_perc > 0:
xmin = axis_lim(x, min, perc=axis_perc)
xmax = axis_lim(x, max, perc=axis_perc)
ymin = axis_lim(y, min, perc=axis_perc)
ymax = axis_lim(y, max, perc=axis_perc)
#plot each line according to the input functions
if line_funcs is not None:
xlin = np.linspace(xmin, xmax, num=1000)
ylin = np.linspace(ymin, ymax, num=1000)
for func in line_funcs:
xline,yline = func(xlin, ylin)
_ = plt.plot(xline,yline, lw=2, color='black', linestyle='-', zorder=12)
#doing this here forces the lines in html plots to not increase the axis limits
if reverse_x:
plt.xlim(xmax,xmin)
else:
plt.xlim(xmin,xmax)
plt.ylim(ymin,ymax)
#overlay the title and labels according to given fonts and sizes
plt.tick_params(**self.label_size)
plt.title(title,**self.fig_font)
plt.xlabel(xlabel,**self.fig_font)
plt.ylabel(ylabel,**self.fig_font)
#overlay arrows on each data point
if arrows is not None:
if not (type(arrows) is tuple and len(arrows) == 2):
warnings.warn_explicit('Arrows not formatted correctly for plt.quiver(). Input a 2-element tuple.\n',UserWarning,WARN,cf.f_lineno)
elif c is None:
plt.quiver(x,y,arrows[0],arrows[1],units='x',**self.arrows)
else:
plt.quiver(x,y,arrows[0],arrows[1],c,units='x',cmap=self.cmap,norm=norm,**self.arrows)
#annotate input text
if text is not None and 'html' not in filename:
#write to given location on plot
kwargs = self.fig_font.copy()
if loc == 'tl':
args = (xmin,ymax,text)
kwargs.update({'horizontalalignment' : 'left', 'verticalalignment' : 'top'})
elif loc == 'tr':
args = (xmax,ymax,text)
kwargs.update({'horizontalalignment' : 'right', 'verticalalignment' : 'top'})
elif loc == 'br':
args = (xmax,ymin,text)
kwargs.update({'horizontalalignment' : 'right', 'verticalalignment' : 'bottom'})
else:
args = (xmin,ymin,text)
kwargs.update({'horizontalalignment' : 'left', 'verticalalignment' : 'bottom'})
plt.text(*args,**kwargs)
#write a legend
if len(leg_labels) > 0:
plt.legend(handles,leg_labels,fontsize=self.fig_font['fontsize']//1.5)
#BELOW NOT CURRENTLY WORKING WELL
#if 'html' in filename:
#interactive_legend = plugins.InteractiveLegendPlugin(handles,leg_labels)
#plugins.connect(fig, interactive_legend)
#overlay ellipses on figure
if ellipses is not None:
for e in ellipses:
ax.add_patch(e)
if self.verbose:
print "Writing figure to '{0}'.".format(filename)
#write thumbnail of this figure
if filename != 'screen':
plt.savefig(thumb)
image.thumbnail(thumb,thumb,scale=0.05)
#write html figure
if 'html' in filename:
#include label for every datapoint
if labels is not None:
tooltip = plugins.PointHTMLTooltip(data, labels=labels)
plugins.connect(fig, tooltip)
#print coordinates of mouse as it moves across figure
plugins.connect(fig, plugins.MousePosition(fontsize=self.fig_font['fontsize']))
html_fig.write(mpld3.fig_to_html(fig))
#write annotations to end of html file if user wants html plots
if text is not None:
html_fig.write(self.text_to_html(text))
#otherwise show figure on screen
elif filename == 'screen':
plt.show()
#otherwise write with given extension
else:
plt.savefig(filename)
#Add link and thumbnail to html report table
self.html.write(self.add_html_link(filename,thumb))
plt.close()
def validate(self, name1, name2, redo=False):
"""Produce a validation report between two catalogues, and optionally produce plots.
Arguments:
----------
name1 : string
The dictionary key / name of a catalogue from the main catalogue object used to compare other data.
name2 : string
The dictionary key / name of a catalogue from the main catalogue object used as a comparison.
Keyword Arguments:
------------------
redo: bool
Produce this plot and write it, even if the file already exists.
Returns:
--------
ratio_med : float
The median flux density ratio. -1 if this is not derived.
sep_med : float
The median sky separation between the two catalogues.
alpha_med : float
The median spectral index. -1 if this is not derived."""
print 'Validating {0} with {1}...'.format(name1,name2)
filename = 'screen'
#write survey and number of matched to cross-matches report table
self.html.write("""<tr>
<td>{0}</td>
<td>{1}</td>
<td>{2}""".format(name2,
self.cat.freq[name2],
self.cat.count[name2]))
#plot the positional offsets
fig = plt.figure(**self.fig_size)
title = u"{0} \u2014 {1} positional offsets".format(name1,name2)
if self.plot_to != 'screen':
filename = '{0}/{1}_{2}_astrometry.{3}'.format(self.figDir,name1,name2,self.plot_to)
#compute the S/N and its log based on main catalogue
if name1 in self.cat.flux.keys():
self.cat.df['SNR'] = self.cat.flux[name1] / self.cat.flux_err[name1]
self.cat.df['logSNR'] = np.log10(self.cat.df['SNR'])
caxis = 'logSNR'
else:
caxis = None
#get non-nan data shared between each used axis as a numpy array
x,y,c,indices = self.shared_indices(self.cat.dRA[name2],yaxis=self.cat.dDEC[name2],caxis=caxis)
#derive the statistics of x and y and store in string to annotate on figure
dRAmed,dRAmean,dRAstd,dRAerr,dRAmad = get_stats(x)
dDECmed,dDECmean,dDECstd,dDECerr,dDECmad = get_stats(y)
txt = '$\widetilde{\Delta RA}$: %.2f\n' % dRAmed
txt += '$\overline{\Delta RA}$: %.2f\n' % dRAmean
txt += '$\sigma_{\Delta RA}$: %.2f\n' % dRAstd
txt += '$\sigma_{\overline{\Delta RA}}$: %.2f\n' % dRAerr
txt += '$\widetilde{\Delta DEC}$: %.2f\n' % dDECmed
txt += '$\overline{\Delta DEC}$: %.2f\n' % dDECmean
txt += '$\sigma_{\Delta DEC}$: %.2f\n' % dDECstd
txt += '$\sigma_{\overline{\Delta DEC}}$: %.2f' % dDECerr
#create an ellipse at the position of the median with axes of standard deviation
e1 = Ellipse((dRAmed,dDECmed),width=dRAstd,height=dDECstd,color='black',
fill=False,linewidth=3,zorder=10,alpha=0.9)
#force axis limits of the search radius
radius = max(self.cat.radius[name1],self.cat.radius[name2])
plt.axis('equal')
plt.xlim(-radius,radius)
plt.ylim(-radius,radius)
#create an ellipse at 0,0 with width 2 x search radius
e2 = Ellipse((0,0),width=radius*2,height=radius*2,color='grey',fill=False,
linewidth=3,linestyle='--',zorder=1,alpha=0.9)
#format labels according to destination of figure
if self.plot_to == 'html':
xlabel = u'\u0394RA (arcsec)'
ylabel = u'\u0394DEC (arcsec)'
clabel = u'log\u2081\u2080 S/N'
else:
xlabel = '$\Delta$RA (arcsec)'
ylabel = '$\Delta$DEC (arcsec)'
clabel = r'$\log_{10}$ S/N'
#for html plots, add S/N and separation labels for every data point
if caxis is not None:
labels = ['S/N = {0:.2f}, separation = {1:.2f}\"'.format(cval,totSep)\
for cval,totSep in zip(self.cat.df.loc[indices,'SNR'],self.cat.sep[name2][indices])]
else:
labels = ['Separation = {0:.2f}\"'.format(cval) for cval in self.cat.sep[name2][indices]]
#get median separation in arcsec
c1 = SkyCoord(ra=0,dec=0,unit='arcsec,arcsec')
c2 = SkyCoord(ra=dRAmed,dec=dDECmed,unit='arcsec,arcsec')
sep_med = c1.separation(c2).arcsec
#get mad of separation in arcsec
c1 = SkyCoord(ra=0,dec=0,unit='arcsec,arcsec')
c2 = SkyCoord(ra=dRAmad,dec=dDECmad,unit='arcsec,arcsec')
sep_mad = c1.separation(c2).arcsec
#write the dRA and dDEC to html table
self.html.write("""</td>
<td>{0:.2f} ± {1:.2f} (RA)<br>{2:.2f} ± {3:.2f} (Dec)<br>""".\
format(dRAmed,dRAmad,dDECmed,dDECmad))
#plot the positional offsets
self.plot(x,
y=y,
c=c,
figure=fig,
line_funcs=(self.x0,self.y0),
title=title,
xlabel=xlabel,
ylabel=ylabel,
clabel=clabel,
text=txt,
ellipses=(e1,e2),
axis_perc=0,
loc='tr',
filename=filename,
labels=labels,
redo=redo)
#plot the positional offsets across the sky
title += " by sky position"
xlabel = 'RA (deg)'
ylabel = 'DEC (deg)'
if self.plot_to != 'screen':
filename = '{0}/{1}_{2}_astrometry_sky.{3}'.format(self.figDir,name1,name2,self.plot_to)
#get non-nan data shared between each used axis as a numpy array
x,y,c,indices = self.shared_indices(self.cat.ra[name2],yaxis=self.cat.dec[name2],caxis=caxis)
#for html plots, add S/N and separation labels for every data point
if caxis is not None:
labels = [u'S/N = {0:.2f}, \u0394RA = {1:.2f}\", \u0394DEC = {2:.2f}\"'.format(cval,dra,ddec) for cval,dra,ddec\
in zip(self.cat.df.loc[indices,'SNR'],self.cat.dRA[name2][indices],self.cat.dDEC[name2][indices])]
else:
labels = [u'\u0394RA = {0:.2f}\", \u0394DEC = {1:.2f}\"'.format(dra,ddec) for dra,ddec\
in zip(self.cat.dRA[name2][indices],self.cat.dDEC[name2][indices])]
#plot the positional offsets across the sky
self.plot(x,
y=y,
c=c,
title=title,
xlabel=xlabel,
ylabel=ylabel,
reverse_x=True,
arrows=(self.cat.dRA[name2][indices],self.cat.dDEC[name2][indices]),
clabel=clabel,
axis_perc=0,
filename=filename,
labels=labels,
redo=redo)
#derive column names and check if they exist
freq = int(round(self.cat.freq[name1]))
fitted_flux_col = '{0}_extrapolated_{1}MHz_flux'.format(name2,freq)
fitted_ratio_col = '{0}_extrapolated_{1}MHz_{2}_flux_ratio'.format(name2,freq,name1)
ratio_col = '{0}_{1}_flux_ratio'.format(name2,name1)
#only plot flux ratio if it was derived
if ratio_col not in self.cat.df.columns and (fitted_ratio_col not in self.cat.df.columns or np.all(np.isnan(self.cat.df[fitted_ratio_col]))):
print "Can't plot flux ratio since you haven't derived the fitted flux density at this frequency."
ratio_med = -111
ratio_mad = -111
flux_ratio_type = ''
self.html.write('<td>')
else:
#compute flux ratio based on which one exists and rename variable for figure title
if ratio_col in self.cat.df.columns:
ratio = self.cat.df[ratio_col]
flux_ratio_type = name2
elif fitted_ratio_col in self.cat.df.columns:
ratio = self.cat.df[fitted_ratio_col]
flux_ratio_type = '{0}-extrapolated'.format(name2)
logRatio = np.log10(ratio)
#plot the flux ratio as a function of S/N
fig = plt.figure(**self.fig_size)
title = "{0} / {1} flux ratio".format(name1,flux_ratio_type)
xlabel = 'S/N'
ylabel = 'Flux Density Ratio'
if self.plot_to != 'screen':
filename = '{0}/{1}_{2}_ratio.{3}'.format(self.figDir,name1,name2,self.plot_to)
#get non-nan data shared between each used axis as a numpy array
x,y,c,indices = self.shared_indices('SNR',yaxis=ratio)#,caxis=self.cat.dec[name1])
plt.loglog()
plt.gca().grid(b=True, which='minor', color='w', linewidth=0.5)
#derive the ratio statistics and store in string to append to plot
ratio_med,ratio_mean,ratio_std,ratio_err,ratio_mad = get_stats(y)
txt = '$\widetilde{Ratio}$: %.2f\n' % ratio_med
txt += '$\overline{Ratio}$: %.2f\n' % ratio_mean
txt += '$\sigma_{Ratio}$: %.2f\n' % ratio_std
txt += '$\sigma_{\overline{Ratio}}$: %.2f' % ratio_err
#for html plots, add flux labels for every data point
if flux_ratio_type == name2:
labels = ['{0} flux = {1:.2f} mJy, {2} flux = {3:.2f} mJy'.format(name1,flux1,name2,flux2)\
for flux1,flux2 in zip(self.cat.flux[name1][indices]*1e3,self.cat.flux[name2][indices]*1e3)]
else:
labels = ['{0} flux = {1:.2f} mJy, {2} flux = {3:.2f} mJy'.format(name1,flux1,flux_ratio_type,flux2)\
for flux1,flux2 in zip(self.cat.flux[name1][indices]*1e3,self.cat.df[fitted_flux_col][indices]*1e3)]
#write the ratio to html report table
if flux_ratio_type == name2:
type = 'measured'
else:
type = 'extrapolated'
self.html.write("""</td>
<td>{0:.2f} ± {1:.2f} ({2})<br>""".format(ratio_med,ratio_mad,type))
#plot the flux ratio as a function of S/N
self.plot(x,
y=y,
c=c,
figure=fig,
line_funcs=(self.y1,self.ratio_err_min,self.ratio_err_max),
title=title,
xlabel=xlabel,
ylabel=ylabel,
text=txt,
loc='tr',
axis_perc=0,
filename=filename,
labels=labels,
redo=redo)
#plot the flux ratio across the sky
fig = plt.figure(**self.fig_size)
title += " by sky position"
xlabel = 'RA (deg)'
ylabel = 'DEC (deg)'
if self.plot_to != 'screen':
filename = '{0}/{1}_{2}_ratio_sky.{3}'.format(self.figDir,name1,name2,self.plot_to)
#get non-nan data shared between each used axis as a numpy array
x,y,c,indices = self.shared_indices(self.cat.ra[name2],yaxis=self.cat.dec[name2],caxis=logRatio)
#format labels according to destination of figure
if self.plot_to == 'html':
clabel = u'log\u2081\u2080 Flux Ratio'
else:
clabel = r'$\log_{10}$ Flux Ratio'
#for html plots, add flux ratio labels for every data point
labels = [u'{0} = {1:.2f}'.format('Flux Ratio',cval) for cval in ratio[indices]]
#plot the flux ratio across the sky
self.plot(x,
y=y,
c=c,
figure=fig,
title=title,
xlabel=xlabel,
ylabel=ylabel,
clabel=clabel,
reverse_x=True,
axis_perc=0,
filename=filename,
labels=labels,
redo=redo)
#derive spectral index column name and check if exists
si_column = '{0}_{1}_alpha'.format(name1,name2)
if not si_column in self.cat.df.columns:
print "Can't plot spectral index between {0} and {1}, since it was not derived.".format(name1,name2)
alpha_med = -111 #null flag
self.html.write('<td>')
else:
#plot the spectral index
fig = plt.figure(**self.fig_size)
plt.xlim(-3,2)
title = "{0}-{1} Spectral Index".format(name1,name2)
if self.plot_to != 'screen':
filename = '{0}/{1}_{2}_spectal_index.{3}'.format(self.figDir,name1,name2,self.plot_to)
#get non-nan data shared between each used axis as a numpy array
x,y,c,indices = self.shared_indices(si_column)
#format labels according to destination of figure
freq1 = int(round(min(self.cat.freq[name1],self.cat.freq[name2])))
freq2 = int(round(max(self.cat.freq[name1],self.cat.freq[name2])))
if self.plot_to == 'html':
xlabel = u'\u03B1 [{0}-{1} MHz]'.format(freq1,freq2)
else:
xlabel = r'$\alpha_{%s}^{%s}$' % (freq1,freq2)
#derive the statistics of x and store in string
alpha_med,alpha_mean,alpha_std,alpha_err,alpha_mad = get_stats(x)
txt = '$\widetilde{\\alpha}$: %.2f\n' % alpha_med
txt += '$\overline{\\alpha}$: %.2f\n' % alpha_mean
txt += '$\sigma_{\\alpha}$: %.2f\n' % alpha_std
txt += '$\sigma_{\overline{\\alpha}}$: %.2f' % alpha_err
#write the ratio to html report table
self.html.write("""</td>
<td>{0:.2f} ± {1:.2f}<br>""".format(alpha_med,alpha_mad))
#plot the spectral index
self.plot(x,
figure=fig,
title=title,
xlabel=xlabel,
ylabel='N',
axis_perc=0,
filename=filename,
text=txt,
loc='tl',
redo=redo)
#write the end of the html report table row
self.html.write("""</td>
</tr>""")
alpha_med = self.cat.med_si
alpha_type = '{0}'.format(name1)
#create dictionary of validation metrics and where they come from
metric_val = { 'Flux Ratio' : ratio_med,
'Flux Ratio Uncertainty' : ratio_mad,
'RA Offset' : dRAmed,
'DEC Offset' : dDECmed,
'Positional Offset' : sep_med,
'Positional Offset Uncertainty' : sep_mad,
'Spectral Index' : alpha_med}
metric_source = {'Flux Ratio' : flux_ratio_type,
'Flux Ratio Uncertainty' : flux_ratio_type,
'RA Offset' : name2,
'DEC Offset' : name2,
'Positional Offset' : name2,
'Positional Offset Uncertainty' : name2,
'Spectral Index' : alpha_type}
count = self.cat.count[name2]
#overwrite values if they are valid and come from a larger catalogue
for key in metric_val.keys():
if count > self.metric_count[key] and metric_val[key] != -111:
self.metric_count[key] = count
self.metric_val[key] = metric_val[key]
self.metric_source[key] = metric_source[key]
| 69,888 | 39.63314 | 156 | py |
robust-nli | robust-nli-master/src/losses.py | import torch
import torch.nn as nn
import torch.nn.functional as F
def convert_2d_prob_to_3d(prob_dist):
prob_dist = torch.cat([(prob_dist[:, 0] / 2.0).view(-1, 1),
prob_dist[:, 1].view(-1, 1),
(prob_dist[:, 0] / 2.0).view(-1, 1)], dim=1)
return prob_dist
# Focal loss's implementation is adapted from
# https://github.com/zhoudaxia233/focal_loss_pytorch/blob/master/multi_class_focal_loss.py
class FocalLoss(nn.Module):
def __init__(self, alpha=1.0, gamma=2.0, size_average=True, ensemble_training=False, aggregate_ensemble="mean"):
super().__init__()
self.alpha = alpha
self.gamma = gamma
self.size_average = size_average
self.ensemble_training = ensemble_training
self.aggregate_ensemble=aggregate_ensemble
def compute_probs(self, inputs, targets):
prob_dist = F.softmax(inputs, dim=1)
pt = prob_dist.gather(1, targets)
return pt
def aggregate(self, p1, p2, operation):
if self.aggregate_ensemble == "mean":
result = (p1+p2)/2
return result
elif self.aggregate_ensemble == "multiply":
result = p1*p2
return result
else:
assert NotImplementedError("Operation ", operation, "is not implemented.")
def forward(self, inputs, targets, inputs_adv=None, second_inputs_adv=None):
targets = targets.view(-1, 1)
norm = 0.0
pt = self.compute_probs(inputs, targets)
pt_scale = self.compute_probs(inputs if inputs_adv is None else inputs_adv, targets)
if self.ensemble_training:
pt_scale_second = self.compute_probs(second_inputs_adv, targets)
if self.aggregate_ensemble in ["mean", "multiply"]:
pt_scale_total = self.aggregate(pt_scale, pt_scale_second, "mean")
batch_loss = -self.alpha * (torch.pow((1 - pt_scale_total), self.gamma)) * torch.log(pt)
else:
batch_loss = -self.alpha * (torch.pow((1 - pt_scale), self.gamma)) * torch.log(pt)
norm += self.alpha * (torch.pow((1 - pt_scale), self.gamma))
if self.size_average:
loss = batch_loss.mean()
else:
loss = batch_loss.sum()
return loss
class POELoss(nn.Module):
"""Implements the product of expert loss."""
def __init__(self, size_average=True, ensemble_training=False, poe_alpha=1):
super().__init__()
self.size_average = size_average
self.ensemble_training=ensemble_training
self.poe_alpha = poe_alpha
def compute_probs(self, inputs):
prob_dist = F.softmax(inputs, dim=1)
return prob_dist
def forward(self, inputs, targets, inputs_adv, second_inputs_adv=None):
targets = targets.view(-1, 1)
pt = self.compute_probs(inputs)
pt_adv = self.compute_probs(inputs_adv)
if self.ensemble_training:
pt_adv_second = self.compute_probs(second_inputs_adv)
joint_pt = F.softmax((torch.log(pt) + torch.log(pt_adv) + torch.log(pt_adv_second)), dim=1)
else:
joint_pt = F.softmax((torch.log(pt) + self.poe_alpha*torch.log(pt_adv)), dim=1)
joint_p = joint_pt.gather(1, targets)
batch_loss = -torch.log(joint_p)
if self.size_average:
loss = batch_loss.mean()
else:
loss = batch_loss.sum()
return loss
class RUBILoss(nn.Module):
# Implements the RUBI loss.
def __init__(self, num_labels, size_average=True):
super().__init__()
self.size_average = size_average
self.num_labels = num_labels
self.loss_fct = torch.nn.CrossEntropyLoss()
def compute_probs(self, inputs):
prob_dist = F.softmax(inputs, dim=1)
return prob_dist
def forward(self, inputs, targets, inputs_adv):
inputs = inputs.view(-1, self.num_labels)
inputs_adv = inputs_adv.view(-1, self.num_labels)
targets = targets.view(-1)
logits = inputs*torch.sigmoid(inputs_adv)
logits = logits.view(-1, self.num_labels)
loss = self.loss_fct(logits, targets)
return loss
epsilon = 1e-8
def log(x):
"""
We assume the given input is a probability and this is not over 1 or below 0.
"""
return torch.log(torch.clamp(x, min=epsilon, max=1-epsilon))
| 4,401 | 35.081967 | 116 | py |
robust-nli | robust-nli-master/src/BERT/utils_glue.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" BERT classification fine-tuning: utilities to work with GLUE tasks """
from __future__ import absolute_import, division, print_function
import csv
import logging
import os
import sys
import jsonlines
from io import open
import numpy as np
import torch
import torch.nn.functional as f
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import f1_score
from os.path import join
from heuristics_utils import have_lexical_overlap, is_subsequence, parse_phrase_list, is_constituent
logger = logging.getLogger(__name__)
def dot_product_matrix_attention(matrix_1, matrix_2):
return matrix_1.bmm(matrix_2.transpose(2, 1))
def get_emb(tokens, word_vec):
matrix = np.zeros((len(tokens), 300))
for i, p in enumerate(tokens):
matrix[i, :] = word_vec[p]
return matrix
def get_word_similarity_new(prem_matrix, hyp_matrix, scores, h_mask, p_mask):
# normalize the token embeddings.
# [8, 64, 768]
prem_matrix = f.normalize(prem_matrix, p=2, dim=2)
hyp_matrix = f.normalize(hyp_matrix, p=2, dim=2)
prem_matrix = prem_matrix*p_mask.view(prem_matrix.shape[0], prem_matrix.shape[1], 1).float()
hyp_matrix = hyp_matrix*h_mask.view(hyp_matrix.shape[0], hyp_matrix.shape[1], 1).float()
similarity_matrix = hyp_matrix.bmm(prem_matrix.transpose(2, 1)) #batch_size*seqlen(h)*seqlen(p)
similarity = torch.max(similarity_matrix, 2)[0] #batch_size*seqlen => hsize
sim_score = []
if "min" in scores or "second_min" in scores:
# compute the min and second min in the similarities.
similarity_replace = similarity.clone()
# all the similarity values are smaller than 1 so 10 is a good number
# so that the masked elements are not selected during the top minimum computations.
similarity_replace[h_mask == 0] = 10
y, i = torch.topk(similarity_replace, k=2, dim=1, largest=False, sorted=True)
if "min" in scores:
sim_score.append(y[:, 0].view(-1, 1))
if "second_min" in scores:
sim_score.append(y[:, 1].view(-1, 1))
if "mean" in scores:
h_lens = torch.sum(h_mask, 1)
# note that to account for zero values, we have to consider the length not
# getting mean.
sum_similarity = torch.sum(similarity, 1)
mean_similarity = sum_similarity/h_lens.float()
sim_score.append(mean_similarity.view(-1, 1))
if "max" in scores:
max_similarity = torch.max(similarity, 1)[0]
sim_score.append(max_similarity.view(-1, 1))
similarity_score = torch.cat(sim_score, 1)
return similarity_score
def get_length_features(p_mask, h_mask, length_features):
features = []
p_lengths = torch.sum(p_mask, dim=1)
h_lengths = torch.sum(h_mask, dim=1)
if "log-len-diff" in length_features:
features.append((torch.log(torch.max((p_lengths-h_lengths), torch.ones_like(p_lengths)).float())).view(-1, 1))
if "len-diff" in length_features:
features.append((p_lengths-h_lengths).float().view(-1, 1))
return torch.cat(features, 1)
def get_hans_features(premise, hypothesis, parse):
constituent = is_constituent(premise, hypothesis, parse)
subsequence = is_subsequence(premise, hypothesis)
lexical_overlap, overlap_rate = have_lexical_overlap(premise, hypothesis)
return constituent, subsequence, lexical_overlap, overlap_rate
def get_hans_features_new(premise, hypothesis, parse, tokenizer):
premise_tokens = tokenizer.tokenize(premise)
hyp_tokens = tokenizer.tokenize(hypothesis)
premise_tokens = [p.lower() for p in premise_tokens]
hyp_tokens = [h.lower() for h in hyp_tokens]
premise_tokens = " ".join(premise_tokens)
hyp_tokens = " ".join(hyp_tokens)
constituent = is_constituent(premise_tokens, hyp_tokens, parse)
subsequence = is_subsequence(premise_tokens, hyp_tokens)
lexical_overlap, overlap_rate = have_lexical_overlap(premise_tokens, hyp_tokens, get_hans_new_features=True)
return constituent, subsequence, lexical_overlap, overlap_rate
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None, parse=None, binary_label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
self.parse = parse
self.binary_label = binary_label
class RUBIInputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id, h_ids, input_mask_h):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.h_ids = h_ids
self.input_mask_h = input_mask_h
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
class HansInputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id, h_ids, input_mask_h,
p_ids, input_mask_p, have_overlap, overlap_rate, subsequence, constituent, binary_label=None):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.h_ids = h_ids
self.input_mask_h = input_mask_h
self.p_ids = p_ids
self.input_mask_p = input_mask_p
self.have_overlap = have_overlap
self.overlap_rate = overlap_rate
self.subsequence = subsequence
self.constituent = constituent
self.binary_label = binary_label
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding="utf-8-sig") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
if sys.version_info[0] == 2:
line = list(unicode(cell, 'utf-8') for cell in line)
lines.append(line)
return lines
@classmethod
def _read_jsonl(cls, filepath):
""" Reads the jsonl file path. """
lines = []
with jsonlines.open(filepath) as f:
for line in f:
lines.append(line)
return lines
class MnliProcessor(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version)."""
def __init__(self, hans=False):
# It joins the other two label to one label.
self.num_classes = 3
self.hans = hans
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")),
"dev_matched")
def get_dev_labels(self, data_dir):
lines = self._read_tsv(os.path.join(data_dir, "dev_matched.tsv"))
labels = []
for (i, line) in enumerate(lines):
if i == 0:
continue
label = line[-1]
labels.append(label)
return np.array(labels)
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[8]
text_b = line[9]
label = line[-1]
if self.hans:
parse = line[6]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label, parse=parse))
else:
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MnliMismatchedProcessor(MnliProcessor):
"""Processor for the MultiNLI Mismatched data set (GLUE version)."""
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_mismatched.tsv")),
"dev_matched")
def get_dev_labels(self, data_dir):
lines = self._read_tsv(os.path.join(data_dir, "dev_mismatched.tsv"))
labels = []
for (i, line) in enumerate(lines):
if i == 0:
continue
label = line[-1]
labels.append(label)
return np.array(labels)
class SnliProcessor(DataProcessor):
"""Processor for the SNLI data set (GLUE version)."""
def __init__(self):
self.num_classes = 3
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_validation_dev_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def get_dev_labels(self, data_dir):
lines = self._read_tsv(os.path.join(data_dir, "test.tsv"))
labels = []
for (i, line) in enumerate(lines):
if i == 0:
continue
label = line[-1]
labels.append(label)
return np.array(labels)
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[7]
text_b = line[8]
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class NliProcessor(DataProcessor):
"""Processor for the dataset of the format of SNLI
(InferSent version), could be 2 or 3 classes."""
# We use get_labels() class to convert the labels to indices,
# later during the transfer it will be problematic if the labels
# are not the same order as the SNLI/MNLI so we return the whole
# 3 labels, but for getting the actual number of classes, we use
# self.num_classes.
def __init__(self, data_dir):
# We assume there is a training file there and we read labels from there.
labels = [line.rstrip() for line in open(join(data_dir, 'labels.train'))]
self.labels = list(set(labels))
labels = ["contradiction", "entailment", "neutral"]
ordered_labels = []
for l in labels:
if l in self.labels:
ordered_labels.append(l)
self.labels = ordered_labels
self.num_classes = len(self.labels)
def get_dev_labels(self, data_dir):
labels = [line.rstrip() for line in open(join(data_dir, 'labels.test'))]
return np.array(labels)
def get_validation_dev_examples(self, data_dir):
return self._create_examples(data_dir, "dev")
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(data_dir, "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(data_dir, "test")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, data_dir, set_type):
"""Creates examples for the training and dev sets."""
s1s = [line.rstrip() for line in open(join(data_dir, 's1.'+set_type))]
s2s = [line.rstrip() for line in open(join(data_dir, 's2.'+set_type))]
labels = [line.rstrip() for line in open(join(data_dir, 'labels.'+set_type))]
examples = []
for (i, line) in enumerate(s1s):
guid = "%s-%s" % (set_type, i)
text_a = s1s[i]
text_b = s2s[i]
label = labels[i]
# In case of hidden labels, changes it with entailment.
if label == "hidden":
label = "entailment"
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class FEVERProcessor(DataProcessor):
"""Processor for the processed FEVER dataset."""
def __init__(self):
self.num_classes = 3
def read_jsonl(self, filepath):
""" Reads the jsonl file path. """
lines = []
with jsonlines.open(filepath) as f:
for line in f:
lines.append(line)
return lines
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(\
self.read_jsonl(join(data_dir, "nli.train.jsonl")),\
"train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(\
self.read_jsonl(join(data_dir, "nli.dev.jsonl")),\
"dev")
def get_labels(self):
"""See base class."""
return ["SUPPORTS", "REFUTES", "NOT ENOUGH INFO"]
def _create_examples(self, items, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, item) in enumerate(items):
guid = "%s-%s" % (set_type, i)
# Claim has artifacts so this needs to be text_b.
text_a = items[i]["claim"]
text_b = items[i]["evidence"] if "evidence" in items[i] else items[i]["evidence_sentence"]
label = items[i]["gold_label"] if "gold_label" in items[i] else items[i]["label"]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class HansProcessor(DataProcessor):
"""Processor for the processed Hans dataset."""
def __init__(self, hans=False):
self.num_classes = 2
self.hans = hans # this is added only to test hans-only classifier on HANS dataset.
def read_jsonl(self, filepath):
""" Reads the jsonl file path. """
lines = []
with jsonlines.open(filepath) as f:
for line in f:
lines.append(line)
return lines
def get_train_examples(self, data_dir):
"""See base class."""
pass
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(\
self._read_tsv(join(data_dir, "heuristics_evaluation_set.txt")), \
"dev")
def get_dev_labels(self, data_dir):
items = self._read_tsv(os.path.join(data_dir, "heuristics_evaluation_set.txt"))
labels = []
for (i, item) in enumerate(items):
if i == 0:
continue
label = items[i][0]
labels.append(label)
return np.array(labels)
def get_labels(self):
"""See base class."""
return ["non-entailment", "entailment"]
def _create_examples(self, items, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, item) in enumerate(items):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
# Claim has artifacts so this needs to be text_b.
text_a = items[i][5]
text_b = items[i][6]
label = items[i][0]
if self.hans:
parse = items[i][3]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label, parse=parse))
else:
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def convert_examples_to_features(examples, label_list, max_seq_length,
tokenizer, output_mode,
cls_token_at_end=False, pad_on_left=False,
cls_token='[CLS]', sep_token='[SEP]', pad_token=0,
sequence_a_segment_id=0, sequence_b_segment_id=1,
cls_token_segment_id=1, pad_token_segment_id=0,
mask_padding_with_zero=True, rubi=False, rubi_text="b",
hans=False, hans_features=False):
""" Loads a data file into a list of `InputBatch`s
`cls_token_at_end` define the location of the CLS token:
- False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]
- True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]
`cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)
rubi: In case of having this option, it also adds on the hypothesis only examples
to the dataset created.
"""
label_map = {label : i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(examples)))
if rubi:
tokens_h = tokenizer.tokenize(example.text_b if rubi_text == "b" else example.text_a)
half_max_seq_length = int(max_seq_length/2)
if len(tokens_h) > (half_max_seq_length-2):
tokens_h = tokens_h[:(half_max_seq_length-2)]
tokens_h = ["[CLS]"]+tokens_h+["[SEP]"]
h_ids = tokenizer.convert_tokens_to_ids(tokens_h)
input_mask_h = [1]*len(h_ids)
padding_h = [0]*(half_max_seq_length-len(h_ids))
h_ids += padding_h
input_mask_h += padding_h
assert len(h_ids) == half_max_seq_length
assert len(input_mask_h) == half_max_seq_length
if hans: # this is only for rubi, so only compute this for p
def get_ids_mask(text, max_seq_length):
tokens_h = tokenizer.tokenize(text)
half_max_seq_length = int(max_seq_length / 2)
if len(tokens_h) > (half_max_seq_length - 2):
tokens_h = tokens_h[:(half_max_seq_length - 2)]
tokens_h = ["[CLS]"] + tokens_h + ["[SEP]"]
h_ids = tokenizer.convert_tokens_to_ids(tokens_h)
input_mask_h = [1] * len(h_ids)
padding_h = [0] * (half_max_seq_length - len(h_ids))
h_ids += padding_h
input_mask_h += padding_h
assert len(h_ids) == half_max_seq_length
assert len(input_mask_h) == half_max_seq_length
return h_ids, input_mask_h
p_ids, input_mask_p = get_ids_mask(example.text_a if rubi_text == "b" else example.text_b, max_seq_length)
if hans_features:
have_overlap, constituent, subsequence, overlap_rate = get_hans_features_new(example.text_a, example.text_b, example.parse, tokenizer)
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = tokens_a + [sep_token]
segment_ids = [sequence_a_segment_id] * len(tokens)
if tokens_b:
tokens += tokens_b + [sep_token]
segment_ids += [sequence_b_segment_id] * (len(tokens_b) + 1)
if cls_token_at_end:
tokens = tokens + [cls_token]
segment_ids = segment_ids + [cls_token_segment_id]
else:
tokens = [cls_token] + tokens
segment_ids = [cls_token_segment_id] + segment_ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
else:
input_ids = input_ids + ([pad_token] * padding_length)
input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
segment_ids = segment_ids + ([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
if output_mode == "classification":
label_id = label_map[example.label]
elif output_mode == "regression":
label_id = float(example.label)
else:
raise KeyError(output_mode)
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("tokens: %s" % " ".join(
[str(x) for x in tokens]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
logger.info("label: %s (id = %d)" % (example.label, label_id))
if rubi:
if hans:
features.append(
HansInputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
h_ids=h_ids,
input_mask_h=input_mask_h,
p_ids=p_ids,
input_mask_p=input_mask_p,
have_overlap=have_overlap,
overlap_rate=overlap_rate,
subsequence=subsequence,
constituent=constituent,
))
else:
features.append(
RUBIInputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
h_ids = h_ids,
input_mask_h=input_mask_h))
else:
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id))
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def simple_accuracy(preds, labels):
return (preds == labels).mean()
def per_class_accuracy(preds, labels):
unique_labels = np.unique(labels)
results = {}
for l in unique_labels:
indices = (l == labels)
acc = (preds[indices] == labels[indices]).mean()
results["acc_"+str(int(l))] = acc
acc = (preds == labels).mean()
results["acc"] = acc
return results
def acc_and_f1(preds, labels):
acc = simple_accuracy(preds, labels)
f1 = f1_score(y_true=labels, y_pred=preds)
return {
"acc": acc,
"f1": f1,
"acc_and_f1": (acc + f1) / 2,
}
def pearson_and_spearman(preds, labels):
pearson_corr = pearsonr(preds, labels)[0]
spearman_corr = spearmanr(preds, labels)[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def compute_metrics(task_name, preds, labels):
assert len(preds) == len(labels)
if task_name == "mnli":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "mnli-mm":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "snli":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "nli":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "fever":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "hans":
return per_class_accuracy(preds, labels)
else:
raise KeyError(task_name)
processors = {
"mnli": MnliProcessor,
"mnli-mm": MnliMismatchedProcessor,
"snli": SnliProcessor,
"nli": NliProcessor,
"fever": FEVERProcessor,
"hans": HansProcessor,
}
output_modes = {
"mnli": "classification",
"mnli-mm": "classification",
"snli": "classification",
"nli": "classification",
"fever": "classification",
"hans": "classification",
}
GLUE_TASKS_NUM_LABELS = {
"mnli": 3,
"mnli-mm": 3,
"snli": 3,
"fever": 3,
"hans": 2,
}
| 29,876 | 37.550968 | 154 | py |
robust-nli | robust-nli-master/src/BERT/run_glue.py | """ Finetuning the library models for sequence classification on GLUE (Bert, XLM, XLNet)."""
from __future__ import absolute_import, division, print_function
import logging
import os
import random
from utils_glue import GLUE_TASKS_NUM_LABELS
from eval_utils import load_and_cache_examples, evaluate, get_parser
import numpy as np
import torch
from torch.utils.data import (DataLoader, RandomSampler)
from tqdm import tqdm, trange
from mutils import write_to_csv
from pytorch_transformers import (WEIGHTS_NAME, BertConfig, BertTokenizer,
XLMConfig, XLMForSequenceClassification,
XLMTokenizer, XLNetConfig,
XLNetForSequenceClassification,
XLNetTokenizer)
from utils_bert import BertDebiasForSequenceClassification
from pytorch_transformers import AdamW, WarmupLinearSchedule
from utils_glue import (compute_metrics, convert_examples_to_features,
processors)
from eval_utils import task_to_data_dir, nli_task_names, actual_task_names, ALL_MODELS
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.CRITICAL)
from eval_utils import MODEL_CLASSES
from eval_utils import do_evaluate
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
def save_model(args, global_step, model, logger):
output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, 'training_args.bin'))
logger.info("Saving model checkpoint to %s", output_dir)
def train(args, train_dataset, model, tokenizer):
""" Train the model """
args.train_batch_size = args.per_gpu_train_batch_size
train_sampler = RandomSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=False)
set_seed(args) # Added here for reproductibility (even between python 2 and 3)
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=False)
for step, batch in enumerate(epoch_iterator):
model.train()
batch = tuple(t.to(args.device) for t in batch)
if args.hans:
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None,
# XLM don't use segment_ids
'labels': batch[3],
'h_ids': batch[4],
'h_attention_mask': batch[5],
'p_ids': batch[6],
'p_attention_mask': batch[7],
'have_overlap': batch[8],
'overlap_rate': batch[9],
'subsequence': batch[10],
'constituent': batch[11]
}
elif args.rubi or args.hypothesis_only or args.focal_loss or args.poe_loss:
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None, # XLM don't use segment_ids
'labels': batch[3],
'h_ids': batch[4],
'h_attention_mask': batch[5]}
else:
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None, # XLM don't use segment_ids
'labels': batch[3]}
outputs = model(**inputs)
loss = outputs["bert"][0] # model outputs are always tuple in pytorch-transformers (see doc)
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
scheduler.step() # Update learning rate schedule
optimizer.step()
model.zero_grad()
global_step += 1
if args.logging_steps > 0 and global_step % args.logging_steps == 0:
# Log metrics
if args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate(args, model, tokenizer)
logging_loss = tr_loss
if args.save_steps > 0 and global_step % args.save_steps == 0:
# Save model checkpoint
save_model(args, global_step, model, logger)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
#tb_writer.close()
return global_step, tr_loss / global_step
def main():
parser = get_parser()
args = parser.parse_args()
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir:
raise ValueError("Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(args.output_dir))
# add all variations of hans automatically
if "HANS" in args.eval_task_names:
hans_variations = ["HANS-const", "HANS-lex", "HANS-sub"]
for variation in hans_variations:
if variation not in args.eval_task_names:
args.eval_task_names.append(variation)
# Setup CUDA, GPU & distributed training
device = torch.device("cuda")
args.device = device
# All of these tasks use the NliProcessor
args.actual_task_names = actual_task_names
# By default we evaluate on the task itself.
if len(args.eval_task_names) == 0:
args.eval_task_names = [args.task_name]
if "all" in args.eval_task_names:
args.eval_task_names = args.eval_task_names + nli_task_names + ["snli", "mnli"]
args.eval_task_names.remove("all")
print(args.eval_task_names)
# Setup logging
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
-1, device, 1, bool(False), args.fp16)
# Set seed
set_seed(args)
# Prepare GLUE task
args.task_name = args.task_name.lower()
if args.task_name.startswith("fever"):
processor = processors["fever"]()
elif args.task_name in nli_task_names:
processor = processors["nli"](task_to_data_dir[args.task_name])
elif args.task_name in ["mnli"]:
processor = processors["mnli"](hans=args.hans)
elif args.task_name.startswith("HANS"):
processor = processors["hans"]()
elif args.task_name in args.actual_task_names:
processor = processors[args.task_name]()
else:
raise ValueError("Task not found: %s" % (args.task_name))
label_list = processor.get_labels()
num_labels = len(label_list)
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path, num_labels=num_labels, finetuning_task=args.task_name)
tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case)
# Adds rubi parameters here.
config.rubi = args.rubi
config.hans = args.hans
config.nonlinear_h_classifier = args.nonlinear_h_classifier
config.hypothesis_only = args.hypothesis_only
config.lambda_h = args.lambda_h
config.focal_loss = args.focal_loss
config.poe_loss = args.poe_loss
config.similarity = args.similarity
config.gamma_focal = args.gamma_focal
config.weighted_bias_only = args.weighted_bias_only
config.length_features = args.length_features
config.hans_features=args.hans_features
config.hans_only = args.hans_only
config.ensemble_training = args.ensemble_training
config.aggregate_ensemble = args.aggregate_ensemble
config.poe_alpha = args.poe_alpha
model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config)
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
train_dataset, _, _ = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=False)
global_step, tr_loss = train(args, train_dataset, model, tokenizer)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
# Create output directory if needed
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, 'training_args.bin'))
print("model is saved in ", os.path.join(args.output_dir, 'training_args.bin'))
# Load a trained model and vocabulary that you have fine-tuned
model = model_class.from_pretrained(args.output_dir)
model.to(args.device)
# Evaluation
results = {}
if args.do_eval:
result, _ = do_evaluate(args, args.output_dir, tokenizer, model, config)
for r in result:
results.update(r)
# saves the results.
print(results)
if args.outputfile is not None:
write_to_csv(results, args, args.outputfile)
return results
if __name__ == "__main__":
main()
| 13,321 | 42.966997 | 163 | py |
robust-nli | robust-nli-master/src/BERT/utils_bert.py | import torch
from torch import nn
import sys
sys.path.append("../")
from torch.nn import CrossEntropyLoss, MSELoss
from pytorch_transformers.modeling_bert import BertPreTrainedModel, BertModel
from losses import FocalLoss, POELoss, RUBILoss
from utils_glue import get_word_similarity_new, get_length_features
from mutils import grad_mul_const
class BertDebiasForSequenceClassification(BertPreTrainedModel):
r"""
**labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for computing the sequence classification/regression loss.
Indices should be in ``[0, ..., config.num_labels - 1]``.
If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),
If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Classification (or regression if config.num_labels==1) loss.
**logits**: ``torch.FloatTensor`` of shape ``(batch_size, config.num_labels)``
Classification (or regression if config.num_labels==1) scores (before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForSequenceClassification.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
labels = torch.tensor([1]).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, logits = outputs[:2]
"""
def __init__(self, config):
super(BertDebiasForSequenceClassification, self).__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)
self.apply(self.init_weights)
self.config = config
self.hypothesis_only = self.get_bool_value(config, "hypothesis_only")
self.gamma_focal = config.gamma_focal if hasattr(config, "gamma_focal") else 2
self.ensemble_training = self.get_bool_value(config, "ensemble_training")
self.poe_alpha = config.poe_alpha if hasattr(config, 'poe_alpha') else 1
# Sets the rubi parameters.
self.similarity = self.get_list_value(config, "similarity")
self.rubi = self.get_bool_value(config, 'rubi')
self.hans = self.get_bool_value(config, 'hans')
self.hans_features = self.get_bool_value(config, 'hans_features')
self.focal_loss = self.get_bool_value(config, 'focal_loss')
self.length_features = self.get_list_value(config, "length_features")
self.hans_only = self.get_bool_value(config, 'hans_only')
self.aggregate_ensemble=self.get_str_value(config, 'aggregate_ensemble')
self.poe_loss = self.get_bool_value(config, 'poe_loss')
self.weighted_bias_only = self.get_bool_value(config, "weighted_bias_only")
num_labels_bias_only = self.config.num_labels
if self.rubi or self.hypothesis_only or self.focal_loss or self.poe_loss or self.hans_only:
if self.hans:
num_features = 4 + len(self.similarity)
if self.hans_features:
num_features += len(self.length_features)
if not config.nonlinear_h_classifier:
self.h_classifier1 = nn.Linear(num_features, num_labels_bias_only)
else:
self.h_classifier1 = nn.Sequential(
nn.Linear(num_features, num_features),
nn.Tanh(),
nn.Linear(num_features, num_features),
nn.Tanh(),
nn.Linear(num_features, num_labels_bias_only))
if self.ensemble_training:
self.h_classifier1_second = self.get_classifier(config, config.nonlinear_h_classifier,
num_labels_bias_only)
else:
# Loads the classifiers from the pretrained model.
self.h_classifier1 = self.get_classifier(config, config.nonlinear_h_classifier, num_labels_bias_only)
self.lambda_h = config.lambda_h
def get_bool_value(self, config, attribute):
return True if hasattr(config, attribute) and eval('config.'+attribute) else False
def get_str_value(self, config, attribute):
return eval('config.'+attribute) if hasattr(config, attribute) else ""
def get_list_value(self, config, attribute):
return eval('config.' + attribute) if hasattr(config, attribute) else []
def get_classifier(self, config, nonlinear, num_labels):
if nonlinear == "deep":
classifier = nn.Sequential(
nn.Linear(config.hidden_size, config.hidden_size),
nn.Tanh(),
nn.Linear(config.hidden_size, int(config.hidden_size/2)),
nn.Tanh(),
nn.Linear(int(config.hidden_size/2), int(config.hidden_size/4)),
nn.Tanh(),
nn.Linear(int(config.hidden_size/4), num_labels),
)
else:
classifier = nn.Linear(config.hidden_size, num_labels)
return classifier
def set_ensemble_training(self, ensemble_training):
self.ensemble_training = ensemble_training
def set_hans(self, hans):
self.hans = hans
def set_rubi(self, rubi):
self.rubi = rubi
def set_poe_loss(self, poe_loss):
self.poe_loss = poe_loss
def set_focal_loss(self, focal_loss):
self.focal_loss = focal_loss
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None,
position_ids=None, head_mask=None, h_ids=None,
h_attention_mask=None, p_ids=None, p_attention_mask=None, have_overlap=None,
overlap_rate=None, subsequence=None, constituent=None, binary_labels=None):
if self.hypothesis_only:
outputs = self.bert(h_ids, token_type_ids=None, attention_mask=h_attention_mask)
pooled_h = outputs[1]
pooled_h_g = self.dropout(pooled_h)
logits = self.h_classifier1(pooled_h_g)
outputs = (logits,) + outputs[2:]
elif not self.hans_only:
outputs = self.bert(input_ids, position_ids=position_ids,\
token_type_ids=token_type_ids,\
attention_mask=attention_mask, head_mask=head_mask)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
# add hidden states and attention if they are here
outputs = (logits,) + outputs[2:]
if self.hans: # if both are correct.
h_outputs = self.bert(h_ids, token_type_ids=None, attention_mask=h_attention_mask)
if self.ensemble_training: # also computes the h-only results.
pooled_h_second = h_outputs[1]
h_embd_second = grad_mul_const(pooled_h_second, 0.0)
pooled_h_g_second = self.dropout(h_embd_second)
h_logits_second = self.h_classifier1_second(pooled_h_g_second)
h_outputs_second = (h_logits_second,) + h_outputs[2:]
h_matrix = h_outputs[0]
h_matrix = grad_mul_const(h_matrix, 0.0)
h_matrix = self.dropout(h_matrix)
p_outputs = self.bert(p_ids, token_type_ids=None, attention_mask=p_attention_mask)
p_matrix = p_outputs[0]
p_matrix = grad_mul_const(p_matrix, 0.0)
p_matrix = self.dropout(p_matrix)
# compute similarity features.
if self.hans_features:
simialrity_score = get_word_similarity_new(h_matrix, p_matrix, self.similarity,\
h_attention_mask, p_attention_mask)
# this is the default case.
hans_h_inputs = torch.cat((simialrity_score,\
have_overlap.view(-1, 1), overlap_rate.view(-1, 1), subsequence.view(-1, 1), constituent.view(-1, 1)), 1)
if self.hans_features and len(self.length_features) != 0:
length_features = get_length_features(p_attention_mask, h_attention_mask, self.length_features)
hans_h_inputs = torch.cat((hans_h_inputs, length_features), 1)
h_logits = self.h_classifier1(hans_h_inputs)
h_outputs = (h_logits,) + h_outputs[2:]
if self.hans_only:
logits = h_logits
# overwrite outputs.
outputs = h_outputs
elif self.focal_loss or self.poe_loss or self.rubi:
h_outputs = self.bert(h_ids, token_type_ids=None, attention_mask=h_attention_mask)
pooled_h = h_outputs[1]
h_embd = grad_mul_const(pooled_h, 0.0)
pooled_h_g = self.dropout(h_embd)
h_logits = self.h_classifier1(pooled_h_g)
h_outputs = (h_logits,) + h_outputs[2:]
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
if self.focal_loss:
loss_fct = FocalLoss(gamma=self.gamma_focal,\
ensemble_training=self.ensemble_training,
aggregate_ensemble=self.aggregate_ensemble)
elif self.poe_loss:
loss_fct = POELoss(ensemble_training=self.ensemble_training, poe_alpha=self.poe_alpha)
elif self.rubi:
loss_fct = RUBILoss(num_labels=self.num_labels)
elif self.hans_only:
if self.weighted_bias_only and self.hans:
weights = torch.tensor([0.5, 1.0, 0.5]).cuda()
loss_fct = CrossEntropyLoss(weight=weights)
else:
loss_fct = CrossEntropyLoss()
if self.rubi or self.focal_loss or self.poe_loss:
if self.ensemble_training:
model_loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1), \
h_logits.view(-1, self.num_labels), h_logits_second.view(-1, self.num_labels))
else:
model_loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1),\
h_logits.view(-1, self.num_labels))
if self.weighted_bias_only and self.hans:
weights = torch.tensor([0.5, 1.0, 0.5]).cuda()
h_loss_fct = CrossEntropyLoss(weight=weights)
if self.ensemble_training:
h_loss_fct_second = CrossEntropyLoss()
else:
h_loss_fct = CrossEntropyLoss()
h_loss = h_loss_fct(h_logits.view(-1, self.num_labels), labels.view(-1))
if self.ensemble_training:
h_loss += h_loss_fct_second(h_logits_second.view(-1, self.num_labels), labels.view(-1))
loss = model_loss + self.lambda_h * h_loss
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
all_outputs = {}
all_outputs["bert"] = outputs
if self.rubi or self.focal_loss or self.poe_loss:
all_outputs["h"] = h_outputs
if self.ensemble_training:
all_outputs["h_second"] = h_outputs_second
return all_outputs # (loss), logits, (hidden_states), (attentions)
| 12,786 | 48.949219 | 134 | py |
robust-nli | robust-nli-master/src/BERT/eval_utils.py | from torch.utils.data import (DataLoader, SequentialSampler, TensorDataset)
from os.path import join
import numpy as np
from utils_glue import (compute_metrics, convert_examples_to_features,
processors)
import argparse
import torch
import os
import glob
import logging
from tqdm import tqdm, trange
from pytorch_transformers import (WEIGHTS_NAME, BertConfig, BertTokenizer,
XLMConfig, XLMForSequenceClassification,
XLMTokenizer, XLNetConfig,
XLNetForSequenceClassification,
XLNetTokenizer)
from utils_bert import BertDebiasForSequenceClassification
MODEL_CLASSES = {
'bert': (BertConfig, BertDebiasForSequenceClassification, BertTokenizer),
'xlnet': (XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer),
'xlm': (XLMConfig, XLMForSequenceClassification, XLMTokenizer),
}
ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in (BertConfig, XLNetConfig, XLMConfig)), ())
task_to_data_dir = {
"snli": "../../data/datasets/SNLI/original",
"mnli": "../../data/datasets/MNLI",
"mnli-mm": "../../data/datasets/MNLI",
"addonerte": "../../data/datasets/AddOneRTE",
"dpr": "../../data/datasets/DPR/",
"sprl": "../../data/datasets/SPRL/",
"fnplus": "../../data/datasets/FNPLUS/",
"joci": "../../data/datasets/JOCI/",
"mpe": "../../data/datasets/MPE/",
"scitail": "../../data/datasets/SciTail/",
"sick": "../../data/datasets/SICK/",
"glue": "../../data/datasets/GLUEDiagnostic/",
"QQP": "../../data/datasets/QQP/",
"snlihard": "../../data/datasets/SNLIHard/",
"MNLIMatchedHard": "../../data/datasets/MNLIMatchedHard/",
"MNLIMismatchedHard": "../../data/datasets/MNLIMismatchedHard/",
"mnlimatched": "../../data/datasets/MNLIMatched/",
"mnlimismatched": "../../data/datasets/MNLIMismatched/",
"fever": "../../data/datasets/FEVER/",
"fever-symmetric-generated": "../../data/datasets/FEVER-symmetric-generated/",
"MNLIMatchedHardWithHardTest": "../../data/datasets/MNLIMatchedHardWithHardTest/",
"MNLIMismatchedHardWithHardTest": "../../data/datasets/MNLIMismatchedHardWithHardTest/",
"MNLITrueMatched": "../../data/datasets/MNLITrueMatched",
"MNLITrueMismatched": "../../data/datasets/MNLITrueMismatched",
"HANS": "../../data/datasets/HANS",
"HANS-const": "../../data/datasets/HANS/constituent",
"HANS-lex": "../../data/datasets/HANS/lexical_overlap",
"HANS-sub": "../../data/datasets/HANS/subsequence",
}
# All of these tasks use the NliProcessor # I added snli to this one as well.
nli_task_names = ["addonerte", "dpr", "sprl", "fnplus", "joci", "mpe", "scitail", "sick", "glue", "QQP",\
"snlihard", "mnlimatched", "mnlimismatched", "MNLIMatchedHardWithHardTest", \
"MNLIMismatchedHardWithHardTest", "MNLITrueMismatched", "MNLITrueMatched", "MNLIMatchedHard", "MNLIMismatchedHard"]
actual_task_names = ["snli", "mnli", "mnli-mm"]
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.CRITICAL)
def get_parser():
parser = argparse.ArgumentParser()
# RUBI parameters, this is deactivated by default.
parser.add_argument("--ensemble_training", action="store_true", help="Train the h-only and hans bias-only together\
on MNLI.")
parser.add_argument("--poe_alpha", default=1.0, type=float, help="alpha for poe method.")
parser.add_argument("--aggregate_ensemble", choices=["mean"], default="mean",
help="When using ensemble training with focal loss, one can combine the\
two bias only predictions with mean.")
parser.add_argument("--hans_only", action="store_true")
parser.add_argument("--weighted_bias_only", action="store_true", help="If specified bias-only\
model's loss is weighted. Only impacts hans.")
parser.add_argument("--gamma_focal", type=float, default=2.0)
parser.add_argument("--similarity", type=str, nargs="+", default=[], choices=["max", "mean", "min", "second_min"])
parser.add_argument("--hans", action="store_true", help="If selected trains the bias-only with hans features.")
parser.add_argument("--length_features", type=str, nargs="+", default=[], help="options are len-diff, log-len-diff")
parser.add_argument("--hans_features", action="store_true", help="If selected, computes the features for the hans experiment")
parser.add_argument("--rubi_text", choices=["a", "b"], default="b")
parser.add_argument("--poe_loss", action="store_true", help="Uses the product of the expert loss.")
parser.add_argument("--focal_loss", action="store_true", help="Uses the focal loss for classification,\
where instead of the probabilities of the objects, we use the h only probabilities")
parser.add_argument("--lambda_h", default=1.0, type=float)
parser.add_argument("--rubi", action="store_true", help="If specified use rubi network.")
parser.add_argument("--hypothesis_only", action="store_true")
parser.add_argument("--nonlinear_h_classifier", choices=["deep", None], default=None)
parser.add_argument("--save_labels_file", type=str, default=None, \
help="If specified, saves the labels.")
parser.add_argument("--output_label_format", type=str, default="kaggle", choices=["kaggle", "numpy"],
help="the format of saving the labels.")
# Bert parameters.
parser.add_argument("--outputfile", type=str, default=None, help="If specified, saves the results.")
parser.add_argument("--binerize_eval", action="store_true",
help="If specified, it binerize the dataset. During eval")
parser.add_argument("--use_cached_dataset", action="store_true", help="If specified will use the cached dataset")
parser.add_argument("--model_type", default=None, type=str,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()))
parser.add_argument("--model_name_or_path", default=None, type=str, #required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(
ALL_MODELS))
parser.add_argument("--task_name", default=None, type=str, required=True,
help="The name of the task to train selected in the list: " + ", ".join(processors.keys()))
parser.add_argument("--eval_task_names", nargs='+', type=str, default=[], \
help="list of the tasks to evaluate on them.")
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
## Other parameters
parser.add_argument("--config_name", default="", type=str,
help="Pretrained config name or path if not the same as model_name")
parser.add_argument("--tokenizer_name", default="", type=str,
help="Pretrained tokenizer name or path if not the same as model_name")
parser.add_argument("--cache_dir", default="", type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument("--max_seq_length", default=128, type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.")
parser.add_argument("--do_train", action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval", action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--evaluate_during_training", action='store_true',
help="evaluation during training at each logging step.")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--per_gpu_train_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for evaluation.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--learning_rate", default=2e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float,
help="Weight deay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--num_train_epochs", default=3.0, type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--max_steps", default=-1, type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
parser.add_argument("--warmup_steps", default=0, type=int,
help="Linear warmup over warmup_steps.")
parser.add_argument('--logging_steps', type=int, default=100000, # this was 10000 # 50
help="Log every X updates steps.")
parser.add_argument('--save_steps', type=int, default=100000, # this was 10000 # 50
help="Save checkpoint every X updates steps.")
parser.add_argument("--eval_all_checkpoints", action='store_true',
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number")
parser.add_argument('--overwrite_output_dir', action='store_true',
help="Overwrite the content of the output directory")
parser.add_argument('--overwrite_cache', action='store_true',
help="Overwrite the cached training and evaluation sets")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
return parser
# writes the labels in the kaggle format.
def write_in_kaggle_format(args, label_ids, gold_labels, save_labels_file, eval_task):
# make a dictionary from the labels.
labels_map = {}
i = 0
for label in gold_labels:
labels_map[i] = label
i = i + 1
ids_file = join(task_to_data_dir[eval_task], "ids.test")
ids = [line.strip('\n') for line in open(ids_file)]
with open(save_labels_file, 'w') as f:
f.write("pairID,gold_label\n")
for i, l in enumerate(label_ids):
label = labels_map[l]
f.write("{0},{1}\n".format(ids[i], label))
def write_in_numpy_format(args, preds, save_labels_file):
np.save(save_labels_file, preds)
def binarize_preds(preds):
# maps the third label (neutral one) to first, which is contradiction.
preds[preds==2] = 0
return preds
def load_and_cache_examples(args, task, tokenizer, evaluate=False, dev_evaluate=False):
data_dir = task_to_data_dir[task]
if task.startswith("fever"):
processor = processors["fever"]()
elif task in nli_task_names:
processor = processors["nli"](data_dir)
elif task in ["mnli"]:
processor = processors["mnli"](hans=args.hans)
elif task == "mnli-mm":
processor = processors["mnli-mm"](hans=args.hans)
elif task.startswith("HANS"):
processor = processors["hans"](hans=args.hans)
else:
processor = processors[task]()
# Load data features from cache or dataset file
cached_features_file = os.path.join(data_dir, 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train',
list(filter(None, args.model_name_or_path.split('/'))).pop(),
str(args.max_seq_length),
str(task)))
print("File is: ", cached_features_file)
if False: #os.path.exists(cached_features_file) and args.use_cached_dataset:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", data_dir)
label_list = processor.get_labels()
if dev_evaluate: # and task in nli_task_names:
examples = processor.get_validation_dev_examples(data_dir)
else:
examples = processor.get_dev_examples(data_dir) if evaluate else\
processor.get_train_examples(data_dir)
features = convert_examples_to_features(examples, label_list, args.max_seq_length, tokenizer, "classification",
cls_token_at_end=bool(args.model_type in ['xlnet']), # xlnet has a cls token at the end
cls_token=tokenizer.cls_token,
sep_token=tokenizer.sep_token,
cls_token_segment_id=2 if args.model_type in ['xlnet'] else 0,
pad_on_left=bool(args.model_type in ['xlnet']), # pad on the left for xlnet
pad_token_segment_id=4 if args.model_type in ['xlnet'] else 0,
rubi=args.rubi or args.hypothesis_only or args.focal_loss or args.poe_loss or args.hans_only,
rubi_text=args.rubi_text, hans=(args.hans and not evaluate) or args.hans_only,\
hans_features=args.hans_features)
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long)
if (args.hans and not evaluate) or args.hans_only:
all_h_ids = torch.tensor([f.h_ids for f in features], dtype=torch.long)
all_h_masks = torch.tensor([f.input_mask_h for f in features], dtype=torch.long)
all_p_ids = torch.tensor([f.p_ids for f in features], dtype=torch.long)
all_p_masks = torch.tensor([f.input_mask_p for f in features], dtype=torch.long)
all_have_overlap = torch.tensor([f.have_overlap for f in features], dtype=torch.float)
all_overlap_rate = torch.tensor([f.overlap_rate for f in features], dtype=torch.float)
all_subsequence = torch.tensor([f.subsequence for f in features], dtype=torch.float)
all_constituent = torch.tensor([f.constituent for f in features], dtype=torch.float)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids,\
all_h_ids, all_h_masks, all_p_ids, all_p_masks, all_have_overlap, all_overlap_rate,\
all_subsequence, all_constituent)
elif args.rubi or args.hypothesis_only or args.focal_loss or args.poe_loss:
# Hypothesis representations.
all_h_ids = torch.tensor([f.h_ids for f in features], dtype=torch.long)
all_h_masks = torch.tensor([f.input_mask_h for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids,\
all_h_ids, all_h_masks)
else:
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
return dataset, processor.get_labels(), processor.num_classes
def get_batch_emebddings(model, args, input_ids, token_type_ids=None, attention_mask=None,
position_ids=None, head_mask=None, h_ids=None, h_attention_mask=None, labels=None):
if args.hypothesis_only:
outputs = model.bert(h_ids, token_type_ids=None, attention_mask=h_attention_mask)
pooled_output = outputs[1]
else:
outputs = model.bert(input_ids, position_ids=position_ids, \
token_type_ids=token_type_ids, \
attention_mask=attention_mask, head_mask=head_mask)
pooled_output = outputs[1]
return pooled_output
def get_embeddings(args, model, tokenizer):
# Loop to handle MNLI double evaluation (matched, mis-matched)
if "mnli" in args.task_name and "mnli-mm" not in args.task_name:
args.eval_task_names.append("mnli-mm")
results = {}
for eval_task in args.eval_task_names:
# eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):
eval_dataset, eval_labels, num_classes = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True)
print(eval_dataset)
args.eval_batch_size = args.per_gpu_eval_batch_size
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
embeddings = []
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
if args.hypothesis_only or args.focal_loss or args.poe_loss:
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None,
# XLM don't use segment_ids
'labels': batch[3],
'h_ids': batch[4],
'h_attention_mask': batch[5]}
else:
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None,
# XLM don't use segment_ids
'labels': batch[3]}
embedding = get_batch_emebddings(model, args, **inputs)
embeddings.append(embedding)
results[eval_task] = torch.cat(embeddings, dim=0)
return results
def evaluate(args, model, tokenizer, prefix="", dev_evaluate=False):
# Loop to handle MNLI double evaluation (matched, mis-matched)
if "mnli" in args.task_name and "mnli-mm" not in args.task_name:
args.eval_task_names.append("mnli-mm")
results = {}
for eval_task in args.eval_task_names:
# eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):
eval_dataset, eval_labels, num_classes = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True,\
dev_evaluate=dev_evaluate)
print("num_classes ", num_classes, "eval_labels ", eval_labels)
print(eval_dataset)
args.eval_batch_size = args.per_gpu_eval_batch_size
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
if args.hypothesis_only or args.focal_loss or args.poe_loss:
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None,
# XLM don't use segment_ids
'labels': batch[3],
'h_ids': batch[4],
'h_attention_mask': batch[5]}
elif args.hans_only:
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None,
# XLM don't use segment_ids
'labels': batch[3],
'h_ids': batch[4],
'h_attention_mask': batch[5],
'p_ids': batch[6],
'p_attention_mask': batch[7],
'have_overlap': batch[8],
'overlap_rate': batch[9],
'subsequence': batch[10],
'constituent': batch[11]
}
else:
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None,
# XLM don't use segment_ids
'labels': batch[3]}
outputs = model(**inputs)["bert"]
tmp_eval_loss, logits = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs['labels'].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
max_preds = np.argmax(preds, axis=1)
# convert 1,2 labels to 1 in case of binary dataset.
if num_classes == 2 and args.binerize_eval:
max_preds = binarize_preds(max_preds)
out_label_ids = binarize_preds(out_label_ids)
if eval_task in nli_task_names:
eval_task_metric = "nli"
elif eval_task.startswith("fever"):
eval_task_metric = "fever"
elif eval_task.startswith("HANS"):
eval_task_metric = "hans"
else:
eval_task_metric = eval_task
result = compute_metrics(eval_task_metric, max_preds, out_label_ids)
if args.save_labels_file is not None:
save_labels_file = args.save_labels_file + "_" + eval_task
if args.output_label_format == "kaggle":
write_in_kaggle_format(args, max_preds, eval_labels, save_labels_file, eval_task)
elif args.output_label_format == "numpy":
write_in_numpy_format(args, preds, save_labels_file)
results[eval_task] = result["acc"]
if eval_task.startswith("HANS"):
results[eval_task + "_not-entailment"] = result["acc_0"]
results[eval_task + "_entailment"] = result["acc_1"]
print("results is ", result, " eval_task ", eval_task)
return results, preds
def do_evaluate(args, output_dir, tokenizer, model, config, return_embeddings=False, dev_evaluate=False):
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
tokenizer = tokenizer_class.from_pretrained(output_dir, do_lower_case=args.do_lower_case)
checkpoints = [output_dir]
results = []
preds_list = []
if args.eval_all_checkpoints:
checkpoints = list(
os.path.dirname(c) for c in sorted(glob.glob(output_dir + '/**/' + WEIGHTS_NAME, recursive=True)))
logging.getLogger("pytorch_transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else ""
model = model_class.from_pretrained(checkpoint)
model.set_rubi(False)
model.set_ensemble_training(False)
if args.hans:
model.set_hans(False)
model.set_focal_loss(False)
model.set_poe_loss(False)
if args.hans_only:
model.set_hans(True)
model.to(args.device)
if return_embeddings:
result = get_embeddings(args, model, tokenizer)
else:
result, preds = evaluate(args, model, tokenizer, prefix=global_step, dev_evaluate=dev_evaluate)
preds_list.append(preds)
results.append(result)
if return_embeddings:
return results
else:
return results, preds_list
| 25,678 | 51.620902 | 135 | py |
robust-nli | robust-nli-master/src/BERT/mutils.py | import csv
import os
import torch
def write_to_csv(scores, params, outputfile):
"""
This function writes the parameters and the scores with their names in a
csv file.
"""
# creates the file if not existing.
file = open(outputfile, 'a')
# If file is empty writes the keys to the file.
params_dict = vars(params)
if os.stat(outputfile).st_size == 0:
# Writes the configuration parameters
for key in params_dict.keys():
file.write(key+";")
for i, key in enumerate(scores.keys()):
ending = ";" if i < len(scores.keys())-1 else ""
file.write(key+ending)
file.write("\n")
file.close()
# Writes the values to each corresponding column.
with open(outputfile, 'r') as f:
reader = csv.reader(f, delimiter=';')
headers = next(reader)
# Iterates over the header names and write the corresponding values.
with open(outputfile, 'a') as f:
for i, key in enumerate(headers):
ending = ";" if i < len(headers)-1 else ""
if key in params_dict:
f.write(str(params_dict[key])+ending)
elif key in scores:
f.write(str(scores[key])+ending)
else:
raise AssertionError("Key not found in the given dictionary")
f.write("\n")
# Multiplies the gradient of the given parameter by a constant.
class GradMulConst(torch.autograd.Function):
@staticmethod
def forward(ctx, x, const):
ctx.const = const
return x.view_as(x)
@staticmethod
def backward(ctx, grad_output):
return grad_output*ctx.const, None
def grad_mul_const(x, const):
return GradMulConst.apply(x, const)
| 1,741 | 30.107143 | 77 | py |
robust-nli | robust-nli-master/src/InferSent/data.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import numpy as np
import torch
def get_batch(batch, word_vec, emb_dim=300):
# sent in batch in decreasing order of lengths (bsize, max_len, word_dim)
lengths = np.array([len(x) for x in batch])
max_len = np.max(lengths)
embed = np.zeros((max_len, len(batch), emb_dim))
for i in range(len(batch)):
for j in range(len(batch[i])):
embed[j, i, :] = word_vec[batch[i][j]]
return torch.from_numpy(embed).float(), lengths
def get_word_dict(sentences):
# create vocab of words
word_dict = {}
for sent in sentences:
for word in sent.split():
if word not in word_dict:
word_dict[word] = ''
word_dict['<s>'] = ''
word_dict['</s>'] = ''
word_dict['<p>'] = ''
return word_dict
def get_glove(word_dict, glove_path):
# create word_vec with glove vectors
word_vec = {}
with open(glove_path) as f:
for line in f:
word, vec = line.split(' ', 1)
if word in word_dict:
word_vec[word] = np.array(list(map(float, vec.split())))
print('Found {0}(/{1}) words with glove vectors'.format(
len(word_vec), len(word_dict)))
return word_vec
def build_vocab(sentences, glove_path):
word_dict = get_word_dict(sentences)
word_vec = get_glove(word_dict, glove_path)
print('Vocab size : {0}'.format(len(word_vec)))
return word_vec
def get_nli(data_path, n_classes):
s1 = {}
s2 = {}
target = {}
if n_classes == 3:
dico_label = {'entailment': 0, 'neutral': 1, 'contradiction': 2, 'hidden':0}
else:
dico_label = {'entailment': 0, 'neutral': 1, 'contradiction': 1, 'hidden':0}
for data_type in ['train', 'dev', 'test']:
s1[data_type], s2[data_type], target[data_type] = {}, {}, {}
s1[data_type]['path'] = os.path.join(data_path, 's1.' + data_type)
s2[data_type]['path'] = os.path.join(data_path, 's2.' + data_type)
target[data_type]['path'] = os.path.join(data_path,
'labels.' + data_type)
s1[data_type]['sent'] = [line.rstrip() for line in
open(s1[data_type]['path'], 'r')]
s2[data_type]['sent'] = [line.rstrip() for line in
open(s2[data_type]['path'], 'r')]
target[data_type]['data'] = np.array([dico_label[line.rstrip('\n')]
for line in open(target[data_type]['path'], 'r')])
assert len(s1[data_type]['sent']) == len(s2[data_type]['sent']) == \
len(target[data_type]['data'])
print('** {0} DATA : Found {1} pairs of {2} sentences.'.format(
data_type.upper(), len(s1[data_type]['sent']), data_type))
train = {'s1': s1['train']['sent'], 's2': s2['train']['sent'],
'label': target['train']['data']}
dev = {'s1': s1['dev']['sent'], 's2': s2['dev']['sent'],
'label': target['dev']['data']}
test = {'s1': s1['test']['sent'], 's2': s2['test']['sent'],
'label': target['test']['data']}
return train, dev, test
| 3,317 | 33.926316 | 84 | py |
robust-nli | robust-nli-master/src/InferSent/models.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
"""
This file contains the definition of encoders used in https://arxiv.org/pdf/1705.02364.pdf
"""
import time
import sys
sys.path.append("../")
import numpy as np
import torch
import torch.nn as nn
from mutils import grad_mul_const
from losses import FocalLoss, POELoss, RUBILoss
from torch.nn import CrossEntropyLoss
"""
BLSTM (max/mean) encoder
"""
class InferSent(nn.Module):
def __init__(self, config):
super(InferSent, self).__init__()
self.bsize = config['bsize']
self.word_emb_dim = config['word_emb_dim']
self.enc_lstm_dim = config['enc_lstm_dim']
self.pool_type = config['pool_type']
self.dpout_model = config['dpout_model']
self.version = 1 if 'version' not in config else config['version']
self.enc_lstm = nn.LSTM(self.word_emb_dim, self.enc_lstm_dim, 1,
bidirectional=True, dropout=self.dpout_model)
assert self.version in [1, 2]
if self.version == 1:
self.bos = '<s>'
self.eos = '</s>'
self.max_pad = True
self.moses_tok = False
elif self.version == 2:
self.bos = '<p>'
self.eos = '</p>'
self.max_pad = False
self.moses_tok = True
def is_cuda(self):
# either all weights are on cpu or they are on gpu
return self.enc_lstm.bias_hh_l0.data.is_cuda
def forward(self, sent_tuple, return_all_emb=False):
# sent_len: [max_len, ..., min_len] (bsize)
# sent: (seqlen x bsize x worddim)
sent, sent_len = sent_tuple
# Sort by length (keep idx)
sent_len_sorted, idx_sort = np.sort(sent_len)[::-1], np.argsort(-sent_len)
sent_len_sorted = sent_len_sorted.copy()
idx_unsort = np.argsort(idx_sort)
idx_sort = torch.from_numpy(idx_sort).cuda() if self.is_cuda() \
else torch.from_numpy(idx_sort)
sent = sent.index_select(1, idx_sort)
# Handling padding in Recurrent Networks
sent_packed = nn.utils.rnn.pack_padded_sequence(sent, sent_len_sorted)
sent_output = self.enc_lstm(sent_packed)[0] # seqlen x batch x 2*nhid
sent_output = nn.utils.rnn.pad_packed_sequence(sent_output)[0]
# Un-sort by length
idx_unsort = torch.from_numpy(idx_unsort).cuda() if self.is_cuda() \
else torch.from_numpy(idx_unsort)
sent_output = sent_output.index_select(1, idx_unsort)
# Pooling
if self.pool_type == "mean":
sent_len = torch.FloatTensor(sent_len.copy()).unsqueeze(1).cuda()
emb = torch.sum(sent_output, 0).squeeze(0)
emb = emb / sent_len.expand_as(emb)
elif self.pool_type == "max":
if not self.max_pad:
sent_output[sent_output == 0] = -1e9
emb = torch.max(sent_output, 0)[0]
if emb.ndimension() == 3:
emb = emb.squeeze(0)
assert emb.ndimension() == 2
if return_all_emb:
all_emb = sent_output.permute(1, 0, 2)
return emb, all_emb
else:
return emb
def set_w2v_path(self, w2v_path):
self.w2v_path = w2v_path
def get_word_dict(self, sentences, tokenize=True):
# create vocab of words
word_dict = {}
sentences = [s.split() if not tokenize else self.tokenize(s) for s in sentences]
for sent in sentences:
for word in sent:
if word not in word_dict:
word_dict[word] = ''
word_dict[self.bos] = ''
word_dict[self.eos] = ''
return word_dict
def get_w2v(self, word_dict):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
# create word_vec with w2v vectors
word_vec = {}
with open(self.w2v_path) as f:
for line in f:
word, vec = line.split(' ', 1)
if word in word_dict:
word_vec[word] = np.fromstring(vec, sep=' ')
print('Found %s(/%s) words with w2v vectors' % (len(word_vec), len(word_dict)))
return word_vec
def get_w2v_k(self, K):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
# create word_vec with k first w2v vectors
k = 0
word_vec = {}
with open(self.w2v_path) as f:
for line in f:
word, vec = line.split(' ', 1)
if k <= K:
word_vec[word] = np.fromstring(vec, sep=' ')
k += 1
if k > K:
if word in [self.bos, self.eos]:
word_vec[word] = np.fromstring(vec, sep=' ')
if k > K and all([w in word_vec for w in [self.bos, self.eos]]):
break
return word_vec
def build_vocab(self, sentences, tokenize=True):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
word_dict = self.get_word_dict(sentences, tokenize)
self.word_vec = self.get_w2v(word_dict)
print('Vocab size : %s' % (len(self.word_vec)))
# build w2v vocab with k most frequent words
def build_vocab_k_words(self, K):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
self.word_vec = self.get_w2v_k(K)
print('Vocab size : %s' % (K))
def update_vocab(self, sentences, tokenize=True):
assert hasattr(self, 'w2v_path'), 'warning : w2v path not set'
assert hasattr(self, 'word_vec'), 'build_vocab before updating it'
word_dict = self.get_word_dict(sentences, tokenize)
# keep only new words
for word in self.word_vec:
if word in word_dict:
del word_dict[word]
# udpate vocabulary
if word_dict:
new_word_vec = self.get_w2v(word_dict)
self.word_vec.update(new_word_vec)
else:
new_word_vec = []
print('New vocab size : %s (added %s words)'% (len(self.word_vec), len(new_word_vec)))
def get_batch(self, batch):
# sent in batch in decreasing order of lengths
# batch: (bsize, max_len, word_dim)
embed = np.zeros((len(batch[0]), len(batch), self.word_emb_dim))
for i in range(len(batch)):
for j in range(len(batch[i])):
embed[j, i, :] = self.word_vec[batch[i][j]]
return torch.FloatTensor(embed)
def tokenize(self, s):
from nltk.tokenize import word_tokenize
if self.moses_tok:
s = ' '.join(word_tokenize(s))
s = s.replace(" n't ", "n 't ") # HACK to get ~MOSES tokenization
return s.split()
else:
return word_tokenize(s)
def prepare_samples(self, sentences, bsize, tokenize, verbose):
sentences = [[self.bos] + s.split() + [self.eos] if not tokenize else
[self.bos] + self.tokenize(s) + [self.eos] for s in sentences]
n_w = np.sum([len(x) for x in sentences])
# filters words without w2v vectors
for i in range(len(sentences)):
s_f = [word for word in sentences[i] if word in self.word_vec]
if not s_f:
import warnings
warnings.warn('No words in "%s" (idx=%s) have w2v vectors. \
Replacing by "</s>"..' % (sentences[i], i))
s_f = [self.eos]
sentences[i] = s_f
lengths = np.array([len(s) for s in sentences])
n_wk = np.sum(lengths)
if verbose:
print('Nb words kept : %s/%s (%.1f%s)' % (
n_wk, n_w, 100.0 * n_wk / n_w, '%'))
# sort by decreasing length
lengths, idx_sort = np.sort(lengths)[::-1], np.argsort(-lengths)
sentences = np.array(sentences)[idx_sort]
return sentences, lengths, idx_sort
def encode(self, sentences, bsize=64, tokenize=True, verbose=False):
tic = time.time()
sentences, lengths, idx_sort = self.prepare_samples(
sentences, bsize, tokenize, verbose)
embeddings = []
for stidx in range(0, len(sentences), bsize):
batch = self.get_batch(sentences[stidx:stidx + bsize])
if self.is_cuda():
batch = batch.cuda()
with torch.no_grad():
batch = self.forward((batch, lengths[stidx:stidx + bsize])).data.cpu().numpy()
embeddings.append(batch)
embeddings = np.vstack(embeddings)
# unsort
idx_unsort = np.argsort(idx_sort)
embeddings = embeddings[idx_unsort]
if verbose:
print('Speed : %.1f sentences/s (%s mode, bsize=%s)' % (
len(embeddings)/(time.time()-tic),
'gpu' if self.is_cuda() else 'cpu', bsize))
return embeddings
def visualize(self, sent, tokenize=True):
sent = sent.split() if not tokenize else self.tokenize(sent)
sent = [[self.bos] + [word for word in sent if word in self.word_vec] + [self.eos]]
if ' '.join(sent[0]) == '%s %s' % (self.bos, self.eos):
import warnings
warnings.warn('No words in "%s" have w2v vectors. Replacing \
by "%s %s"..' % (sent, self.bos, self.eos))
batch = self.get_batch(sent)
if self.is_cuda():
batch = batch.cuda()
output = self.enc_lstm(batch)[0]
output, idxs = torch.max(output, 0)
# output, idxs = output.squeeze(), idxs.squeeze()
idxs = idxs.data.cpu().numpy()
argmaxs = [np.sum((idxs == k)) for k in range(len(sent[0]))]
# visualize model
import matplotlib.pyplot as plt
x = range(len(sent[0]))
y = [100.0 * n / np.sum(argmaxs) for n in argmaxs]
plt.xticks(x, sent[0], rotation=45)
plt.bar(x, y)
plt.ylabel('%')
plt.title('Visualisation of words importance')
plt.show()
return output, idxs
"""
Main module for Natural Language Inference
"""
class NLINet(nn.Module):
def __init__(self, config):
super(NLINet, self).__init__()
# classifier
self.nonlinear_fc = config['nonlinear_fc']
self.fc_dim = config['fc_dim']
self.n_classes = config['n_classes']
self.enc_lstm_dim = config['enc_lstm_dim']
self.encoder_type = config['encoder_type']
self.dpout_fc = config['dpout_fc']
self.encoder = eval(self.encoder_type)(config)
self.inputdim = 4*2*self.enc_lstm_dim
if self.nonlinear_fc:
self.classifier = nn.Sequential(
nn.Dropout(p=self.dpout_fc),
nn.Linear(self.inputdim, self.fc_dim),
nn.Tanh(),
nn.Dropout(p=self.dpout_fc),
nn.Linear(self.fc_dim, self.fc_dim),
nn.Tanh(),
nn.Dropout(p=self.dpout_fc),
nn.Linear(self.fc_dim, self.n_classes),
)
else:
self.classifier = nn.Sequential(
nn.Linear(self.inputdim, self.fc_dim),
nn.Linear(self.fc_dim, self.fc_dim),
nn.Linear(self.fc_dim, self.n_classes)
)
def forward(self, s1, s2):
# s1 : (s1, s1_len)
u = self.encoder(s1)
v = self.encoder(s2)
features = torch.cat((u, v, torch.abs(u-v), u*v), 1)
output = self.classifier(features)
return output
def encode(self, s1):
emb = self.encoder(s1)
return emb
"""
Main module for Debiasing.
"""
class DebiasNet(nn.Module):
""" This module wrap the NLI model and applied the debiasing technique to it."""
def __init__(self, config):
super(DebiasNet, self).__init__()
# Loss options.
self.focal_loss = config['focal_loss']
self.poe_loss = config['poe_loss']
self.rubi = config['rubi']
self.n_classes = config['n_classes']
self.gamma_focal = config['gamma_focal']
self.poe_alpha = config['poe_alpha'] if 'poe_alpha' in config else 1.0
if self.focal_loss:
self.loss_fct = FocalLoss(gamma=self.gamma_focal)
elif self.poe_loss:
self.loss_fct = POELoss(poe_alpha=self.poe_alpha)
elif self.rubi:
self.loss_fct = RUBILoss(num_labels=self.n_classes)
else:
self.loss_fct = CrossEntropyLoss()
self.ensemble = self.rubi or self.focal_loss or self.poe_loss
self.loss_fct_h = CrossEntropyLoss()
self.h_loss_weight = config['h_loss_weight']
self.nli_model = config['nli_net']
# Let figure out the dimension of the classifier here.
self.fc_dim = config['fc_dim']
self.encoder_type = config['encoder_type']
self.enc_lstm_dim = config['enc_lstm_dim']
self.inputdim = 4 * 2 * self.enc_lstm_dim
self.dpout_fc = config['dpout_fc']
self.nonlinear_fc = config['nonlinear_fc']
if self.ensemble:
self.nonlinear_h_classifier = config['nonlinear_h_classifier']
self.c1 = self.get_classifier(self.nonlinear_h_classifier)
def get_classifier(self, nonlinear_fc):
if nonlinear_fc:
classifier = nn.Sequential(
nn.Dropout(p=self.dpout_fc),
nn.Linear(int(self.inputdim / 4), self.fc_dim),
nn.Tanh(),
nn.Dropout(p=self.dpout_fc),
nn.Linear(self.fc_dim, self.fc_dim),
nn.Tanh(),
nn.Dropout(p=self.dpout_fc),
nn.Linear(self.fc_dim, self.n_classes),
)
else:
classifier = nn.Sequential(
nn.Linear(int(self.inputdim / 4), self.fc_dim),
nn.Linear(self.fc_dim, self.fc_dim),
nn.Linear(self.fc_dim, self.n_classes),
)
return classifier
def get_loss(self, output, adv_output, labels):
loss = self.loss_fct(output, labels, adv_output)
h_output = adv_output
loss += self.h_loss_weight * self.loss_fct_h(h_output, labels)
return loss
def forward(self, s1, s2, labels):
nli_output = self.nli_model(s1, s2)
h_pred = None
if self.ensemble:
# gets the embedding for the hypotheses.
h_embeddings = self.nli_model.encoder(s2)
h_embeddings = grad_mul_const(h_embeddings, 0.0) # do not backpropagate through the hypothesis encoder.
h_pred = self.c1(h_embeddings)
total_loss = self.get_loss(nli_output, h_pred, labels)
else:
total_loss = self.loss_fct(nli_output, labels)
outputs = {}
outputs['total_loss'] = total_loss
outputs['nli'] = nli_output
outputs['h'] = h_pred
return outputs
| 15,032 | 34.878282 | 115 | py |
robust-nli | robust-nli-master/src/InferSent/train_nli.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import sys
import time
import argparse
import os
import numpy as np
import torch
from torch.autograd import Variable
from data import get_nli, get_batch, build_vocab
from mutils import write_to_csv, get_optimizer, construct_model_name
from models import NLINet, DebiasNet
parser = argparse.ArgumentParser(description='NLI training')
# losses.
parser.add_argument("--poe_alpha", type=float, default=1.0)
parser.add_argument("--gamma_focal", type=float, default=2.0)
parser.add_argument("--nonlinear_h_classifier", action="store_true", help="If specified uses a nonlinear classifier for h model.")
parser.add_argument("--use_early_stopping", action="store_true")
parser.add_argument("--rubi", action="store_true")
parser.add_argument("--poe_loss", action="store_true", help="Uses the product of the expert loss.")
parser.add_argument("--focal_loss", action="store_true", help="Uses the focal loss for classification,\
where instead of the probabilities of the objects, we use the h only probabilities")
# paths
parser.add_argument("--outputfile", type=str, default="results.csv", help="writes the final results\
in this file in a csv format.")
parser.add_argument("--dataset", type=str, default="SNLI", help="this will be set automatically.")
parser.add_argument("--outputdir", type=str, default='savedir/', help="Output directory")
parser.add_argument("--outputmodelname", type=str, nargs='+', default=['model.pickle'])
parser.add_argument("--word_emb_path", type=str, default="../../data/GloVe/glove.840B.300d.txt", help="word embedding file path")
# training
parser.add_argument('--h_loss_weight', type=float, default=1.0, help="defines the weight of the adversary loss.")
parser.add_argument("--n_epochs", type=int, default=20)
parser.add_argument("--batch_size", type=int, default=64)
parser.add_argument("--dpout_model", type=float, default=0., help="encoder dropout")
parser.add_argument("--dpout_fc", type=float, default=0., help="classifier dropout")
parser.add_argument("--nonlinear_fc", action="store_true", help="use nonlinearity in fc")
parser.add_argument("--optimizer", type=str, default="sgd,lr=0.1", help="adam or sgd,lr=0.1")
parser.add_argument("--lrshrink", type=float, default=5, help="shrink factor for sgd")
parser.add_argument("--decay", type=float, default=0.99, help="lr decay")
parser.add_argument("--minlr", type=float, default=1e-5, help="minimum lr")
parser.add_argument("--max_norm", type=float, default=5., help="max norm (grad clipping)")
# model
parser.add_argument("--version", type=int, default=2, help="Defines the version of the model.")
parser.add_argument("--encoder_type", type=str, default='InferSent', choices=['InferSent'], help="see list of encoders")
parser.add_argument("--enc_lstm_dim", type=int, default=2048, help="encoder nhid dimension")
parser.add_argument("--n_enc_layers", type=int, default=1, help="encoder num layers")
parser.add_argument("--fc_dim", type=int, default=512, help="nhid of fc layers")
parser.add_argument("--pool_type", type=str, default='max', help="max or mean")
# gpu
parser.add_argument("--gpu_id", type=int, default=0, help="GPU ID")
parser.add_argument("--seed", type=int, default=1234, help="seed")
# data
parser.add_argument("--word_emb_dim", type=int, default=300, help="word embedding dimension")
params, unknowns = parser.parse_known_args()
if len(unknowns) != 0:
raise AssertionError("There exists unknown parameters: ", unknowns)
all_datasets = {
'SNLI': {'path': "../../data/datasets/SNLI", 'n_classes': 3},
'SNLIHard': {'path': "../../data/datasets/SNLIHard", 'n_classes': 3},
'MNLIMatched': {'path': "../../data/datasets/MNLIMatched/", 'n_classes': 3},
'MNLIMismatched': {'path': "../../data/datasets/MNLIMismatched/", 'n_classes': 3},
'MNLIMismatchedHardWithHardTest': {'path': "../../data/datasets/MNLIMismatchedHardWithHardTest/", 'n_classes':3},
'MNLIMatchedHardWithHardTest': {'path': "../../data/datasets/MNLIMatchedHardWithHardTest/", 'n_classes':3},
'JOCI': {'path': "../../data/datasets/JOCI", 'n_classes': 3},
'SICK-E': {'path': "../../data/datasets/SICK-E", 'n_classes': 3},
'AddOneRTE': {'path': "../../data/datasets/AddOneRTE", 'n_classes': 2},
'DPR': {'path': "../../data/datasets/DPR", 'n_classes': 2},
'FNPLUS': {'path': "../../data/datasets/FNPLUS", 'n_classes': 2},
'SciTail': {'path': "../../data/datasets/SciTail", 'n_classes': 2},
'SPRL': {'path': "../../data/datasets/SPRL", 'n_classes': 2},
'MPE': {'path': "../../data/datasets/MPE", 'n_classes': 3},
'QQP': {'path': "../../data/datasets/QQP", 'n_classes': 2},
'GLUEDiagnostic': {'path': "../../data/datasets/GLUEDiagnostic", 'n_classes': 3},
}
params.nlipath = all_datasets[params.dataset]['path']
params.n_classes = all_datasets[params.dataset]['n_classes']
params.outputmodelname = construct_model_name(params, params.outputmodelname)
# set gpu device
torch.cuda.set_device(params.gpu_id)
# print parameters passed, and all parameters
print('\ntogrep : {0}\n'.format(sys.argv[1:]))
print(params)
# this function clears the gradient of the given model.
def clear_gradients(model, name):
for param in eval('model.'+name).parameters():
if param.grad is not None:
param.grad *= 0.0
"""
SEED
"""
np.random.seed(params.seed)
torch.manual_seed(params.seed)
torch.cuda.manual_seed(params.seed)
"""
DATA
"""
train, valid, test = get_nli(params.nlipath, params.n_classes)
word_vec = build_vocab(train['s1'] + train['s2'] +
valid['s1'] + valid['s2'] +
test['s1'] + test['s2'], params.word_emb_path)
for split in ['s1', 's2']:
for data_type in ['train', 'valid', 'test']:
eval(data_type)[split] = np.array([['<s>'] +
[word for word in sent.split() if word in word_vec] +
['</s>'] for sent in eval(data_type)[split]])
"""
MODEL
"""
# model config
config_nli_model = {
'n_words' : len(word_vec) ,
'word_emb_dim' : params.word_emb_dim ,
'enc_lstm_dim' : params.enc_lstm_dim ,
'n_enc_layers' : params.n_enc_layers ,
'dpout_model' : params.dpout_model ,
'dpout_fc' : params.dpout_fc ,
'fc_dim' : params.fc_dim ,
'bsize' : params.batch_size ,
'n_classes' : params.n_classes ,
'pool_type' : params.pool_type ,
'nonlinear_fc' : params.nonlinear_fc ,
'encoder_type' : params.encoder_type ,
'use_cuda' : True ,
'version' : params.version ,
}
nli_net = NLINet(config_nli_model)
print(nli_net)
config_debias_model = {
'n_words' : len(word_vec) ,
'word_emb_dim' : params.word_emb_dim ,
'enc_lstm_dim' : params.enc_lstm_dim ,
'n_enc_layers' : params.n_enc_layers ,
'dpout_model' : params.dpout_model ,
'dpout_fc' : params.dpout_fc ,
'fc_dim' : params.fc_dim ,
'bsize' : params.batch_size ,
'n_classes' : params.n_classes ,
'pool_type' : params.pool_type ,
'nonlinear_fc' : params.nonlinear_fc ,
'encoder_type' : params.encoder_type ,
'use_cuda' : True ,
'nli_net' : nli_net ,
'version' : params.version ,
"poe_loss" : params.poe_loss ,
"focal_loss" : params.focal_loss ,
"h_loss_weight" : params.h_loss_weight ,
"rubi" : params.rubi ,
"nonlinear_h_classifier" : params.nonlinear_h_classifier,
"gamma_focal" : params.gamma_focal,
"poe_alpha" : params.poe_alpha,
}
# model
encoder_types = ['InferSent']
assert params.encoder_type in encoder_types, "encoder_type must be in " + \
str(encoder_types)
debias_net = DebiasNet(config_debias_model)
print(debias_net)
# optimizer
optim_fn, optim_params = get_optimizer(params.optimizer)
optimizer = optim_fn(debias_net.parameters(), **optim_params)
# cuda by default
debias_net.cuda()
"""
TRAIN
"""
val_acc_best = -1e10
adam_stop = False
stop_training = False
lr = optim_params['lr'] if 'sgd' in params.optimizer else None
def trainepoch(epoch):
print('\nTRAINING : Epoch ' + str(epoch))
nli_net.train()
debias_net.train()
all_costs = []
logs = []
words_count = 0
last_time = time.time()
correct = 0.
# shuffle the data
permutation = np.random.permutation(len(train['s1']))
s1 = train['s1'][permutation]
s2 = train['s2'][permutation]
target = train['label'][permutation]
optimizer.param_groups[0]['lr'] = optimizer.param_groups[0]['lr'] * params.decay if epoch>1\
and 'sgd' in params.optimizer else optimizer.param_groups[0]['lr']
print('Learning rate : {0}'.format(optimizer.param_groups[0]['lr']))
for stidx in range(0, len(s1), params.batch_size):
# prepare batch
s1_batch, s1_len = get_batch(s1[stidx:stidx + params.batch_size],
word_vec, params.word_emb_dim)
s2_batch, s2_len = get_batch(s2[stidx:stidx + params.batch_size],
word_vec, params.word_emb_dim)
s1_batch, s2_batch = Variable(s1_batch.cuda()), Variable(s2_batch.cuda())
tgt_batch = Variable(torch.LongTensor(target[stidx:stidx + params.batch_size])).cuda()
k = s1_batch.size(1) # actual batch size
# model forward
outputs = debias_net((s1_batch, s1_len), (s2_batch, s2_len), tgt_batch)
pred = outputs['nli'].data.max(1)[1]
correct += pred.long().eq(tgt_batch.data.long()).cpu().sum()
assert len(pred) == len(s1[stidx:stidx + params.batch_size])
# define the losses here.
all_costs.append(outputs['total_loss'].item())
words_count += (s1_batch.nelement() + s2_batch.nelement()) / params.word_emb_dim
# backward
optimizer.zero_grad()
# lets do the backward in the several steps.
outputs['total_loss'].backward()
# gradient clipping (off by default)
shrink_factor = 1
total_norm = 0
for p in debias_net.parameters():
if p.requires_grad:
total_norm += p.grad.data.norm() ** 2
total_norm = np.sqrt(total_norm.cpu())
if total_norm > params.max_norm:
shrink_factor = params.max_norm / total_norm
current_lr = optimizer.param_groups[0]['lr'] # current lr (no external "lr", for adam)
optimizer.param_groups[0]['lr'] = current_lr * shrink_factor # just for update
# optimizer step
optimizer.step()
optimizer.param_groups[0]['lr'] = current_lr
if len(all_costs) == 100:
logs_outputs = '{0} ; total loss {1} ; sentence/s {2} ;\
words/s {3} ; accuracy train : {4}'.format(
stidx, round(np.mean(all_costs), 2),
int(len(all_costs) * params.batch_size / (time.time() - last_time)),
int(words_count * 1.0 / (time.time() - last_time)),
round(100.*correct.item()/(stidx+k), 2))
logs.append(logs_outputs)
print(logs[-1])
last_time = time.time()
words_count = 0
all_costs = []
train_acc = round(100 * correct.item()/len(s1), 2)
print('results : epoch {0} ; mean accuracy train : {1}'
.format(epoch, train_acc))
return train_acc
def evaluate(epoch, eval_type='valid', final_eval=False):
nli_net.eval()
debias_net.eval()
correct = 0.
global val_acc_best, lr, stop_training, adam_stop
if eval_type == 'valid':
print('\nVALIDATION : Epoch {0}'.format(epoch))
s1 = valid['s1'] if eval_type == 'valid' else test['s1']
s2 = valid['s2'] if eval_type == 'valid' else test['s2']
target = valid['label'] if eval_type == 'valid' else test['label']
for i in range(0, len(s1), params.batch_size):
# prepare batch
s1_batch, s1_len = get_batch(s1[i:i + params.batch_size], word_vec, params.word_emb_dim)
s2_batch, s2_len = get_batch(s2[i:i + params.batch_size], word_vec, params.word_emb_dim)
s1_batch, s2_batch = Variable(s1_batch.cuda()), Variable(s2_batch.cuda())
tgt_batch = Variable(torch.LongTensor(target[i:i + params.batch_size])).cuda()
# model forward
outputs = debias_net((s1_batch, s1_len), (s2_batch, s2_len), tgt_batch)
pred = outputs['nli'].data.max(1)[1]
correct += pred.long().eq(tgt_batch.data.long()).cpu().sum()
# save model
eval_acc = round(100 * correct.item() / len(s1), 2)
if final_eval:
print('finalgrep : accuracy {0} : {1}'.format(eval_type, eval_acc))
else:
print('togrep : results : epoch {0} ; mean accuracy {1} :\
{2}'.format(epoch, eval_type, eval_acc))
if eval_type == 'valid' and epoch <= params.n_epochs:
if eval_acc > val_acc_best:
print('saving model at epoch {0}'.format(epoch))
if not os.path.exists(params.outputdir):
os.makedirs(params.outputdir)
torch.save(debias_net, os.path.join(params.outputdir,
params.outputmodelname))
val_acc_best = eval_acc
else:
if 'sgd' in params.optimizer:
optimizer.param_groups[0]['lr'] = optimizer.param_groups[0]['lr'] / params.lrshrink
print('Shrinking lr by : {0}. New lr = {1}'
.format(params.lrshrink,
optimizer.param_groups[0]['lr']))
if optimizer.param_groups[0]['lr'] < params.minlr and params.use_early_stopping:
stop_training = True
if 'adam' in params.optimizer and params.use_early_stopping:
# early stopping (at 2nd decrease in accuracy)
stop_training = adam_stop
adam_stop = True
return eval_acc
"""
Train model on Natural Language Inference task
"""
epoch = 1
while not stop_training and epoch <= params.n_epochs:
train_acc = trainepoch(epoch)
eval_acc = evaluate(epoch, 'valid')
epoch += 1
# Run best model on test set.
debias_net = torch.load(os.path.join(params.outputdir, params.outputmodelname))
scores = {}
print('\nTEST : Epoch {0}'.format(epoch))
scores['NLI_val'] = evaluate(1e6, 'valid', True)
scores['NLI_test'] = evaluate(0, 'test', True)
write_to_csv(scores, params, params.outputfile)
| 14,974 | 39.582656 | 130 | py |
robust-nli | robust-nli-master/src/InferSent/mutils.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import re
import inspect
from torch import optim
import torch
import os
import csv
def construct_model_name(params, names_params):
if len(names_params) == 1:
return names_params[0]
else:
params_dict = vars(params)
outputmodelname=""
for n in names_params:
outputmodelname += str(n) + ":" + str(params_dict[str(n)]) + "-"
return outputmodelname
def write_to_csv(scores, params, outputfile):
"""
This function writes the parameters and the scores with their names in a
csv file.
"""
# creates the file if not existing.
file = open(outputfile, 'a')
# If file is empty writes the keys to the file.
params_dict = vars(params)
if os.stat(outputfile).st_size == 0:
# Writes the configuration parameters
for key in params_dict.keys():
file.write(key+";")
for i, key in enumerate(scores.keys()):
ending = ";" if i < len(scores.keys())-1 else ""
file.write(key+ending)
file.write("\n")
file.close()
# Writes the values to each corresponding column.
with open(outputfile, 'r') as f:
reader = csv.reader(f, delimiter=';')
headers = next(reader)
# Iterates over the header names and write the corresponding values.
with open(outputfile, 'a') as f:
for i, key in enumerate(headers):
ending = ";" if i < len(headers)-1 else ""
if key in params_dict:
f.write(str(params_dict[key])+ending)
elif key in scores:
f.write(str(scores[key])+ending)
else:
raise AssertionError("Key not found in the given dictionary")
f.write("\n")
def get_optimizer(s):
"""
Parse optimizer parameters.
Input should be of the form:
- "sgd,lr=0.01"
- "adagrad,lr=0.1,lr_decay=0.05"
"""
if "," in s:
method = s[:s.find(',')]
optim_params = {}
for x in s[s.find(',') + 1:].split(','):
split = x.split('=')
assert len(split) == 2
assert re.match("^[+-]?(\d+(\.\d*)?|\.\d+)$", split[1]) is not None
optim_params[split[0]] = float(split[1])
else:
method = s
optim_params = {}
if method == 'adadelta':
optim_fn = optim.Adadelta
elif method == 'adagrad':
optim_fn = optim.Adagrad
elif method == 'adam':
optim_fn = optim.Adam
elif method == 'adamax':
optim_fn = optim.Adamax
elif method == 'asgd':
optim_fn = optim.ASGD
elif method == 'rmsprop':
optim_fn = optim.RMSprop
elif method == 'rprop':
optim_fn = optim.Rprop
elif method == 'sgd':
optim_fn = optim.SGD
assert 'lr' in optim_params
else:
raise Exception('Unknown optimization method: "%s"' % method)
# check that we give good parameters to the optimizer
expected_args = inspect.getargspec(optim_fn.__init__)[0]
assert expected_args[:2] == ['self', 'params']
if not all(k in expected_args[2:] for k in optim_params.keys()):
raise Exception('Unexpected parameters: expected "%s", got "%s"' % (
str(expected_args[2:]), str(optim_params.keys())))
return optim_fn, optim_params
"""
Importing batcher and prepare for SentEval
"""
def batcher(batch, params):
# batch contains list of words
batch = [['<s>'] + s + ['</s>'] for s in batch]
sentences = [' '.join(s) for s in batch]
embeddings = params.infersent.encode(sentences, bsize=params.batch_size,
tokenize=False)
return embeddings
def prepare(params, samples):
params.infersent.build_vocab([' '.join(s) for s in samples],
params.glove_path, tokenize=False)
class dotdict(dict):
""" dot.notation access to dictionary attributes """
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
# Multiplies the gradient of the given parameter by a constant.
class GradMulConst(torch.autograd.Function):
@staticmethod
def forward(ctx, x, const):
ctx.const = const
return x.view_as(x)
@staticmethod
def backward(ctx, grad_output):
return grad_output*ctx.const, None
def grad_mul_const(x, const):
return GradMulConst.apply(x, const)
| 4,580 | 28.941176 | 79 | py |
meta-transfer-learning | meta-transfer-learning-main/pytorch/main.py | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Yaoyao Liu
## Tianjin University
## [email protected]
## Copyright (c) 2019
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
""" Main function for this repo. """
import argparse
import torch
from utils.misc import pprint
from utils.gpu_tools import set_gpu
from trainer.meta import MetaTrainer
from trainer.pre import PreTrainer
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Basic parameters
parser.add_argument('--model_type', type=str, default='ResNet', choices=['ResNet']) # The network architecture
parser.add_argument('--dataset', type=str, default='MiniImageNet', choices=['miniImageNet', 'tieredImageNet', 'FC100']) # Dataset
parser.add_argument('--phase', type=str, default='meta_train', choices=['pre_train', 'meta_train', 'meta_eval']) # Phase
parser.add_argument('--seed', type=int, default=0) # Manual seed for PyTorch, "0" means using random seed
parser.add_argument('--gpu', default='1') # GPU id
parser.add_argument('--dataset_dir', type=str, default='./data/mini/') # Dataset folder
# Parameters for meta-train phase
parser.add_argument('--max_epoch', type=int, default=100) # Epoch number for meta-train phase
parser.add_argument('--num_batch', type=int, default=100) # The number for different tasks used for meta-train
parser.add_argument('--shot', type=int, default=1) # Shot number, how many samples for one class in a task
parser.add_argument('--way', type=int, default=5) # Way number, how many classes in a task
parser.add_argument('--train_query', type=int, default=15) # The number of training samples for each class in a task
parser.add_argument('--val_query', type=int, default=15) # The number of test samples for each class in a task
parser.add_argument('--meta_lr1', type=float, default=0.0001) # Learning rate for SS weights
parser.add_argument('--meta_lr2', type=float, default=0.001) # Learning rate for FC weights
parser.add_argument('--base_lr', type=float, default=0.01) # Learning rate for the inner loop
parser.add_argument('--update_step', type=int, default=50) # The number of updates for the inner loop
parser.add_argument('--step_size', type=int, default=10) # The number of epochs to reduce the meta learning rates
parser.add_argument('--gamma', type=float, default=0.5) # Gamma for the meta-train learning rate decay
parser.add_argument('--init_weights', type=str, default=None) # The pre-trained weights for meta-train phase
parser.add_argument('--eval_weights', type=str, default=None) # The meta-trained weights for meta-eval phase
parser.add_argument('--meta_label', type=str, default='exp1') # Additional label for meta-train
# Parameters for pretain phase
parser.add_argument('--pre_max_epoch', type=int, default=100) # Epoch number for pre-train phase
parser.add_argument('--pre_batch_size', type=int, default=128) # Batch size for pre-train phase
parser.add_argument('--pre_lr', type=float, default=0.1) # Learning rate for pre-train phase
parser.add_argument('--pre_gamma', type=float, default=0.2) # Gamma for the pre-train learning rate decay
parser.add_argument('--pre_step_size', type=int, default=30) # The number of epochs to reduce the pre-train learning rate
parser.add_argument('--pre_custom_momentum', type=float, default=0.9) # Momentum for the optimizer during pre-train
parser.add_argument('--pre_custom_weight_decay', type=float, default=0.0005) # Weight decay for the optimizer during pre-train
# Set and print the parameters
args = parser.parse_args()
pprint(vars(args))
# Set the GPU id
set_gpu(args.gpu)
# Set manual seed for PyTorch
if args.seed==0:
print ('Using random seed.')
torch.backends.cudnn.benchmark = True
else:
print ('Using manual seed:', args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Start trainer for pre-train, meta-train or meta-eval
if args.phase=='meta_train':
trainer = MetaTrainer(args)
trainer.train()
elif args.phase=='meta_eval':
trainer = MetaTrainer(args)
trainer.eval()
elif args.phase=='pre_train':
trainer = PreTrainer(args)
trainer.train()
else:
raise ValueError('Please set correct phase.')
| 4,678 | 54.702381 | 133 | py |
meta-transfer-learning | meta-transfer-learning-main/pytorch/trainer/meta.py | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Yaoyao Liu
## Modified from: https://github.com/Sha-Lab/FEAT
## Tianjin University
## [email protected]
## Copyright (c) 2019
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
""" Trainer for meta-train phase. """
import os.path as osp
import os
import tqdm
import numpy as np
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from dataloader.samplers import CategoriesSampler
from models.mtl import MtlLearner
from utils.misc import Averager, Timer, count_acc, compute_confidence_interval, ensure_path
from tensorboardX import SummaryWriter
from dataloader.dataset_loader import DatasetLoader as Dataset
class MetaTrainer(object):
"""The class that contains the code for the meta-train phase and meta-eval phase."""
def __init__(self, args):
# Set the folder to save the records and checkpoints
log_base_dir = './logs/'
if not osp.exists(log_base_dir):
os.mkdir(log_base_dir)
meta_base_dir = osp.join(log_base_dir, 'meta')
if not osp.exists(meta_base_dir):
os.mkdir(meta_base_dir)
save_path1 = '_'.join([args.dataset, args.model_type, 'MTL'])
save_path2 = 'shot' + str(args.shot) + '_way' + str(args.way) + '_query' + str(args.train_query) + \
'_step' + str(args.step_size) + '_gamma' + str(args.gamma) + '_lr1' + str(args.meta_lr1) + '_lr2' + str(args.meta_lr2) + \
'_batch' + str(args.num_batch) + '_maxepoch' + str(args.max_epoch) + \
'_baselr' + str(args.base_lr) + '_updatestep' + str(args.update_step) + \
'_stepsize' + str(args.step_size) + '_' + args.meta_label
args.save_path = meta_base_dir + '/' + save_path1 + '_' + save_path2
ensure_path(args.save_path)
# Set args to be shareable in the class
self.args = args
# Load meta-train set
self.trainset = Dataset('train', self.args)
self.train_sampler = CategoriesSampler(self.trainset.label, self.args.num_batch, self.args.way, self.args.shot + self.args.train_query)
self.train_loader = DataLoader(dataset=self.trainset, batch_sampler=self.train_sampler, num_workers=8, pin_memory=True)
# Load meta-val set
self.valset = Dataset('val', self.args)
self.val_sampler = CategoriesSampler(self.valset.label, 600, self.args.way, self.args.shot + self.args.val_query)
self.val_loader = DataLoader(dataset=self.valset, batch_sampler=self.val_sampler, num_workers=8, pin_memory=True)
# Build meta-transfer learning model
self.model = MtlLearner(self.args)
# Set optimizer
self.optimizer = torch.optim.Adam([{'params': filter(lambda p: p.requires_grad, self.model.encoder.parameters())}, \
{'params': self.model.base_learner.parameters(), 'lr': self.args.meta_lr2}], lr=self.args.meta_lr1)
# Set learning rate scheduler
self.lr_scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=self.args.step_size, gamma=self.args.gamma)
# load pretrained model without FC classifier
self.model_dict = self.model.state_dict()
if self.args.init_weights is not None:
pretrained_dict = torch.load(self.args.init_weights)['params']
else:
pre_base_dir = osp.join(log_base_dir, 'pre')
pre_save_path1 = '_'.join([args.dataset, args.model_type])
pre_save_path2 = 'batchsize' + str(args.pre_batch_size) + '_lr' + str(args.pre_lr) + '_gamma' + str(args.pre_gamma) + '_step' + \
str(args.pre_step_size) + '_maxepoch' + str(args.pre_max_epoch)
pre_save_path = pre_base_dir + '/' + pre_save_path1 + '_' + pre_save_path2
pretrained_dict = torch.load(osp.join(pre_save_path, 'max_acc.pth'))['params']
pretrained_dict = {'encoder.'+k: v for k, v in pretrained_dict.items()}
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in self.model_dict}
print(pretrained_dict.keys())
self.model_dict.update(pretrained_dict)
self.model.load_state_dict(self.model_dict)
# Set model to GPU
if torch.cuda.is_available():
torch.backends.cudnn.benchmark = True
self.model = self.model.cuda()
def save_model(self, name):
"""The function to save checkpoints.
Args:
name: the name for saved checkpoint
"""
torch.save(dict(params=self.model.state_dict()), osp.join(self.args.save_path, name + '.pth'))
def train(self):
"""The function for the meta-train phase."""
# Set the meta-train log
trlog = {}
trlog['args'] = vars(self.args)
trlog['train_loss'] = []
trlog['val_loss'] = []
trlog['train_acc'] = []
trlog['val_acc'] = []
trlog['max_acc'] = 0.0
trlog['max_acc_epoch'] = 0
# Set the timer
timer = Timer()
# Set global count to zero
global_count = 0
# Set tensorboardX
writer = SummaryWriter(comment=self.args.save_path)
# Generate the labels for train set of the episodes
label_shot = torch.arange(self.args.way).repeat(self.args.shot)
if torch.cuda.is_available():
label_shot = label_shot.type(torch.cuda.LongTensor)
else:
label_shot = label_shot.type(torch.LongTensor)
# Start meta-train
for epoch in range(1, self.args.max_epoch + 1):
# Update learning rate
self.lr_scheduler.step()
# Set the model to train mode
self.model.train()
# Set averager classes to record training losses and accuracies
train_loss_averager = Averager()
train_acc_averager = Averager()
# Generate the labels for test set of the episodes during meta-train updates
label = torch.arange(self.args.way).repeat(self.args.train_query)
if torch.cuda.is_available():
label = label.type(torch.cuda.LongTensor)
else:
label = label.type(torch.LongTensor)
# Using tqdm to read samples from train loader
tqdm_gen = tqdm.tqdm(self.train_loader)
for i, batch in enumerate(tqdm_gen, 1):
# Update global count number
global_count = global_count + 1
if torch.cuda.is_available():
data, _ = [_.cuda() for _ in batch]
else:
data = batch[0]
p = self.args.shot * self.args.way
data_shot, data_query = data[:p], data[p:]
# Output logits for model
logits = self.model((data_shot, label_shot, data_query))
# Calculate meta-train loss
loss = F.cross_entropy(logits, label)
# Calculate meta-train accuracy
acc = count_acc(logits, label)
# Write the tensorboardX records
writer.add_scalar('data/loss', float(loss), global_count)
writer.add_scalar('data/acc', float(acc), global_count)
# Print loss and accuracy for this step
tqdm_gen.set_description('Epoch {}, Loss={:.4f} Acc={:.4f}'.format(epoch, loss.item(), acc))
# Add loss and accuracy for the averagers
train_loss_averager.add(loss.item())
train_acc_averager.add(acc)
# Loss backwards and optimizer updates
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# Update the averagers
train_loss_averager = train_loss_averager.item()
train_acc_averager = train_acc_averager.item()
# Start validation for this epoch, set model to eval mode
self.model.eval()
# Set averager classes to record validation losses and accuracies
val_loss_averager = Averager()
val_acc_averager = Averager()
# Generate the labels for test set of the episodes during meta-val for this epoch
label = torch.arange(self.args.way).repeat(self.args.val_query)
if torch.cuda.is_available():
label = label.type(torch.cuda.LongTensor)
else:
label = label.type(torch.LongTensor)
# Print previous information
if epoch % 10 == 0:
print('Best Epoch {}, Best Val Acc={:.4f}'.format(trlog['max_acc_epoch'], trlog['max_acc']))
# Run meta-validation
for i, batch in enumerate(self.val_loader, 1):
if torch.cuda.is_available():
data, _ = [_.cuda() for _ in batch]
else:
data = batch[0]
p = self.args.shot * self.args.way
data_shot, data_query = data[:p], data[p:]
logits = self.model((data_shot, label_shot, data_query))
loss = F.cross_entropy(logits, label)
acc = count_acc(logits, label)
val_loss_averager.add(loss.item())
val_acc_averager.add(acc)
# Update validation averagers
val_loss_averager = val_loss_averager.item()
val_acc_averager = val_acc_averager.item()
# Write the tensorboardX records
writer.add_scalar('data/val_loss', float(val_loss_averager), epoch)
writer.add_scalar('data/val_acc', float(val_acc_averager), epoch)
# Print loss and accuracy for this epoch
print('Epoch {}, Val, Loss={:.4f} Acc={:.4f}'.format(epoch, val_loss_averager, val_acc_averager))
# Update best saved model
if val_acc_averager > trlog['max_acc']:
trlog['max_acc'] = val_acc_averager
trlog['max_acc_epoch'] = epoch
self.save_model('max_acc')
# Save model every 10 epochs
if epoch % 10 == 0:
self.save_model('epoch'+str(epoch))
# Update the logs
trlog['train_loss'].append(train_loss_averager)
trlog['train_acc'].append(train_acc_averager)
trlog['val_loss'].append(val_loss_averager)
trlog['val_acc'].append(val_acc_averager)
# Save log
torch.save(trlog, osp.join(self.args.save_path, 'trlog'))
if epoch % 10 == 0:
print('Running Time: {}, Estimated Time: {}'.format(timer.measure(), timer.measure(epoch / self.args.max_epoch)))
writer.close()
def eval(self):
"""The function for the meta-eval phase."""
# Load the logs
trlog = torch.load(osp.join(self.args.save_path, 'trlog'))
# Load meta-test set
test_set = Dataset('test', self.args)
sampler = CategoriesSampler(test_set.label, 600, self.args.way, self.args.shot + self.args.val_query)
loader = DataLoader(test_set, batch_sampler=sampler, num_workers=8, pin_memory=True)
# Set test accuracy recorder
test_acc_record = np.zeros((600,))
# Load model for meta-test phase
if self.args.eval_weights is not None:
self.model.load_state_dict(torch.load(self.args.eval_weights)['params'])
else:
self.model.load_state_dict(torch.load(osp.join(self.args.save_path, 'max_acc' + '.pth'))['params'])
# Set model to eval mode
self.model.eval()
# Set accuracy averager
ave_acc = Averager()
# Generate labels
label = torch.arange(self.args.way).repeat(self.args.val_query)
if torch.cuda.is_available():
label = label.type(torch.cuda.LongTensor)
else:
label = label.type(torch.LongTensor)
label_shot = torch.arange(self.args.way).repeat(self.args.shot)
if torch.cuda.is_available():
label_shot = label_shot.type(torch.cuda.LongTensor)
else:
label_shot = label_shot.type(torch.LongTensor)
# Start meta-test
for i, batch in enumerate(loader, 1):
if torch.cuda.is_available():
data, _ = [_.cuda() for _ in batch]
else:
data = batch[0]
k = self.args.way * self.args.shot
data_shot, data_query = data[:k], data[k:]
logits = self.model((data_shot, label_shot, data_query))
acc = count_acc(logits, label)
ave_acc.add(acc)
test_acc_record[i-1] = acc
if i % 100 == 0:
print('batch {}: {:.2f}({:.2f})'.format(i, ave_acc.item() * 100, acc * 100))
# Calculate the confidence interval, update the logs
m, pm = compute_confidence_interval(test_acc_record)
print('Val Best Epoch {}, Acc {:.4f}, Test Acc {:.4f}'.format(trlog['max_acc_epoch'], trlog['max_acc'], ave_acc.item()))
print('Test Acc {:.4f} + {:.4f}'.format(m, pm))
| 13,374 | 44.493197 | 143 | py |
meta-transfer-learning | meta-transfer-learning-main/pytorch/trainer/pre.py | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Yaoyao Liu
## Tianjin University
## [email protected]
## Copyright (c) 2019
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
""" Trainer for pretrain phase. """
import os.path as osp
import os
import tqdm
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from dataloader.samplers import CategoriesSampler
from models.mtl import MtlLearner
from utils.misc import Averager, Timer, count_acc, ensure_path
from tensorboardX import SummaryWriter
from dataloader.dataset_loader import DatasetLoader as Dataset
class PreTrainer(object):
"""The class that contains the code for the pretrain phase."""
def __init__(self, args):
# Set the folder to save the records and checkpoints
log_base_dir = './logs/'
if not osp.exists(log_base_dir):
os.mkdir(log_base_dir)
pre_base_dir = osp.join(log_base_dir, 'pre')
if not osp.exists(pre_base_dir):
os.mkdir(pre_base_dir)
save_path1 = '_'.join([args.dataset, args.model_type])
save_path2 = 'batchsize' + str(args.pre_batch_size) + '_lr' + str(args.pre_lr) + '_gamma' + str(args.pre_gamma) + '_step' + \
str(args.pre_step_size) + '_maxepoch' + str(args.pre_max_epoch)
args.save_path = pre_base_dir + '/' + save_path1 + '_' + save_path2
ensure_path(args.save_path)
# Set args to be shareable in the class
self.args = args
# Load pretrain set
self.trainset = Dataset('train', self.args, train_aug=True)
self.train_loader = DataLoader(dataset=self.trainset, batch_size=args.pre_batch_size, shuffle=True, num_workers=8, pin_memory=True)
# Load meta-val set
self.valset = Dataset('val', self.args)
self.val_sampler = CategoriesSampler(self.valset.label, 600, self.args.way, self.args.shot + self.args.val_query)
self.val_loader = DataLoader(dataset=self.valset, batch_sampler=self.val_sampler, num_workers=8, pin_memory=True)
# Set pretrain class number
num_class_pretrain = self.trainset.num_class
# Build pretrain model
self.model = MtlLearner(self.args, mode='pre', num_cls=num_class_pretrain)
# Set optimizer
self.optimizer = torch.optim.SGD([{'params': self.model.encoder.parameters(), 'lr': self.args.pre_lr}, \
{'params': self.model.pre_fc.parameters(), 'lr': self.args.pre_lr}], \
momentum=self.args.pre_custom_momentum, nesterov=True, weight_decay=self.args.pre_custom_weight_decay)
# Set learning rate scheduler
self.lr_scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=self.args.pre_step_size, \
gamma=self.args.pre_gamma)
# Set model to GPU
if torch.cuda.is_available():
torch.backends.cudnn.benchmark = True
self.model = self.model.cuda()
def save_model(self, name):
"""The function to save checkpoints.
Args:
name: the name for saved checkpoint
"""
torch.save(dict(params=self.model.encoder.state_dict()), osp.join(self.args.save_path, name + '.pth'))
def train(self):
"""The function for the pre-train phase."""
# Set the pretrain log
trlog = {}
trlog['args'] = vars(self.args)
trlog['train_loss'] = []
trlog['val_loss'] = []
trlog['train_acc'] = []
trlog['val_acc'] = []
trlog['max_acc'] = 0.0
trlog['max_acc_epoch'] = 0
# Set the timer
timer = Timer()
# Set global count to zero
global_count = 0
# Set tensorboardX
writer = SummaryWriter(comment=self.args.save_path)
# Start pretrain
for epoch in range(1, self.args.pre_max_epoch + 1):
# Update learning rate
self.lr_scheduler.step()
# Set the model to train mode
self.model.train()
self.model.mode = 'pre'
# Set averager classes to record training losses and accuracies
train_loss_averager = Averager()
train_acc_averager = Averager()
# Using tqdm to read samples from train loader
tqdm_gen = tqdm.tqdm(self.train_loader)
for i, batch in enumerate(tqdm_gen, 1):
# Update global count number
global_count = global_count + 1
if torch.cuda.is_available():
data, _ = [_.cuda() for _ in batch]
else:
data = batch[0]
label = batch[1]
if torch.cuda.is_available():
label = label.type(torch.cuda.LongTensor)
else:
label = label.type(torch.LongTensor)
# Output logits for model
logits = self.model(data)
# Calculate train loss
loss = F.cross_entropy(logits, label)
# Calculate train accuracy
acc = count_acc(logits, label)
# Write the tensorboardX records
writer.add_scalar('data/loss', float(loss), global_count)
writer.add_scalar('data/acc', float(acc), global_count)
# Print loss and accuracy for this step
tqdm_gen.set_description('Epoch {}, Loss={:.4f} Acc={:.4f}'.format(epoch, loss.item(), acc))
# Add loss and accuracy for the averagers
train_loss_averager.add(loss.item())
train_acc_averager.add(acc)
# Loss backwards and optimizer updates
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# Update the averagers
train_loss_averager = train_loss_averager.item()
train_acc_averager = train_acc_averager.item()
# Start validation for this epoch, set model to eval mode
self.model.eval()
self.model.mode = 'preval'
# Set averager classes to record validation losses and accuracies
val_loss_averager = Averager()
val_acc_averager = Averager()
# Generate the labels for test
label = torch.arange(self.args.way).repeat(self.args.val_query)
if torch.cuda.is_available():
label = label.type(torch.cuda.LongTensor)
else:
label = label.type(torch.LongTensor)
label_shot = torch.arange(self.args.way).repeat(self.args.shot)
if torch.cuda.is_available():
label_shot = label_shot.type(torch.cuda.LongTensor)
else:
label_shot = label_shot.type(torch.LongTensor)
# Print previous information
if epoch % 10 == 0:
print('Best Epoch {}, Best Val acc={:.4f}'.format(trlog['max_acc_epoch'], trlog['max_acc']))
# Run meta-validation
for i, batch in enumerate(self.val_loader, 1):
if torch.cuda.is_available():
data, _ = [_.cuda() for _ in batch]
else:
data = batch[0]
p = self.args.shot * self.args.way
data_shot, data_query = data[:p], data[p:]
logits = self.model((data_shot, label_shot, data_query))
loss = F.cross_entropy(logits, label)
acc = count_acc(logits, label)
val_loss_averager.add(loss.item())
val_acc_averager.add(acc)
# Update validation averagers
val_loss_averager = val_loss_averager.item()
val_acc_averager = val_acc_averager.item()
# Write the tensorboardX records
writer.add_scalar('data/val_loss', float(val_loss_averager), epoch)
writer.add_scalar('data/val_acc', float(val_acc_averager), epoch)
# Print loss and accuracy for this epoch
print('Epoch {}, Val, Loss={:.4f} Acc={:.4f}'.format(epoch, val_loss_averager, val_acc_averager))
# Update best saved model
if val_acc_averager > trlog['max_acc']:
trlog['max_acc'] = val_acc_averager
trlog['max_acc_epoch'] = epoch
self.save_model('max_acc')
# Save model every 10 epochs
if epoch % 10 == 0:
self.save_model('epoch'+str(epoch))
# Update the logs
trlog['train_loss'].append(train_loss_averager)
trlog['train_acc'].append(train_acc_averager)
trlog['val_loss'].append(val_loss_averager)
trlog['val_acc'].append(val_acc_averager)
# Save log
torch.save(trlog, osp.join(self.args.save_path, 'trlog'))
if epoch % 10 == 0:
print('Running Time: {}, Estimated Time: {}'.format(timer.measure(), timer.measure(epoch / self.args.max_epoch)))
writer.close()
| 9,314 | 42.528037 | 139 | py |
meta-transfer-learning | meta-transfer-learning-main/pytorch/models/mtl.py | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Yaoyao Liu
## Tianjin University
## [email protected]
## Copyright (c) 2019
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
""" Model for meta-transfer learning. """
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.resnet_mtl import ResNetMtl
class BaseLearner(nn.Module):
"""The class for inner loop."""
def __init__(self, args, z_dim):
super().__init__()
self.args = args
self.z_dim = z_dim
self.vars = nn.ParameterList()
self.fc1_w = nn.Parameter(torch.ones([self.args.way, self.z_dim]))
torch.nn.init.kaiming_normal_(self.fc1_w)
self.vars.append(self.fc1_w)
self.fc1_b = nn.Parameter(torch.zeros(self.args.way))
self.vars.append(self.fc1_b)
def forward(self, input_x, the_vars=None):
if the_vars is None:
the_vars = self.vars
fc1_w = the_vars[0]
fc1_b = the_vars[1]
net = F.linear(input_x, fc1_w, fc1_b)
return net
def parameters(self):
return self.vars
class MtlLearner(nn.Module):
"""The class for outer loop."""
def __init__(self, args, mode='meta', num_cls=64):
super().__init__()
self.args = args
self.mode = mode
self.update_lr = args.base_lr
self.update_step = args.update_step
z_dim = 640
self.base_learner = BaseLearner(args, z_dim)
if self.mode == 'meta':
self.encoder = ResNetMtl()
else:
self.encoder = ResNetMtl(mtl=False)
self.pre_fc = nn.Sequential(nn.Linear(640, 1000), nn.ReLU(), nn.Linear(1000, num_cls))
def forward(self, inp):
"""The function to forward the model.
Args:
inp: input images.
Returns:
the outputs of MTL model.
"""
if self.mode=='pre':
return self.pretrain_forward(inp)
elif self.mode=='meta':
data_shot, label_shot, data_query = inp
return self.meta_forward(data_shot, label_shot, data_query)
elif self.mode=='preval':
data_shot, label_shot, data_query = inp
return self.preval_forward(data_shot, label_shot, data_query)
else:
raise ValueError('Please set the correct mode.')
def pretrain_forward(self, inp):
"""The function to forward pretrain phase.
Args:
inp: input images.
Returns:
the outputs of pretrain model.
"""
return self.pre_fc(self.encoder(inp))
def meta_forward(self, data_shot, label_shot, data_query):
"""The function to forward meta-train phase.
Args:
data_shot: train images for the task
label_shot: train labels for the task
data_query: test images for the task.
Returns:
logits_q: the predictions for the test samples.
"""
embedding_query = self.encoder(data_query)
embedding_shot = self.encoder(data_shot)
logits = self.base_learner(embedding_shot)
loss = F.cross_entropy(logits, label_shot)
grad = torch.autograd.grad(loss, self.base_learner.parameters())
fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, self.base_learner.parameters())))
logits_q = self.base_learner(embedding_query, fast_weights)
for _ in range(1, self.update_step):
logits = self.base_learner(embedding_shot, fast_weights)
loss = F.cross_entropy(logits, label_shot)
grad = torch.autograd.grad(loss, fast_weights)
fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, fast_weights)))
logits_q = self.base_learner(embedding_query, fast_weights)
return logits_q
def preval_forward(self, data_shot, label_shot, data_query):
"""The function to forward meta-validation during pretrain phase.
Args:
data_shot: train images for the task
label_shot: train labels for the task
data_query: test images for the task.
Returns:
logits_q: the predictions for the test samples.
"""
embedding_query = self.encoder(data_query)
embedding_shot = self.encoder(data_shot)
logits = self.base_learner(embedding_shot)
loss = F.cross_entropy(logits, label_shot)
grad = torch.autograd.grad(loss, self.base_learner.parameters())
fast_weights = list(map(lambda p: p[1] - 0.01 * p[0], zip(grad, self.base_learner.parameters())))
logits_q = self.base_learner(embedding_query, fast_weights)
for _ in range(1, 100):
logits = self.base_learner(embedding_shot, fast_weights)
loss = F.cross_entropy(logits, label_shot)
grad = torch.autograd.grad(loss, fast_weights)
fast_weights = list(map(lambda p: p[1] - 0.01 * p[0], zip(grad, fast_weights)))
logits_q = self.base_learner(embedding_query, fast_weights)
return logits_q
| 5,292 | 38.796992 | 115 | py |
meta-transfer-learning | meta-transfer-learning-main/pytorch/models/conv2d_mtl.py | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Yaoyao Liu
## Modified from: https://github.com/pytorch/pytorch
## Tianjin University
## [email protected]
## Copyright (c) 2019
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
""" MTL CONV layers. """
import math
import torch
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
from torch.nn.modules.utils import _pair
class _ConvNdMtl(Module):
"""The class for meta-transfer convolution"""
def __init__(self, in_channels, out_channels, kernel_size, stride,
padding, dilation, transposed, output_padding, groups, bias):
super(_ConvNdMtl, self).__init__()
if in_channels % groups != 0:
raise ValueError('in_channels must be divisible by groups')
if out_channels % groups != 0:
raise ValueError('out_channels must be divisible by groups')
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.transposed = transposed
self.output_padding = output_padding
self.groups = groups
if transposed:
self.weight = Parameter(torch.Tensor(
in_channels, out_channels // groups, *kernel_size))
self.mtl_weight = Parameter(torch.ones(in_channels, out_channels // groups, 1, 1))
else:
self.weight = Parameter(torch.Tensor(
out_channels, in_channels // groups, *kernel_size))
self.mtl_weight = Parameter(torch.ones(out_channels, in_channels // groups, 1, 1))
self.weight.requires_grad=False
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
self.bias.requires_grad=False
self.mtl_bias = Parameter(torch.zeros(out_channels))
else:
self.register_parameter('bias', None)
self.register_parameter('mtl_bias', None)
self.reset_parameters()
def reset_parameters(self):
n = self.in_channels
for k in self.kernel_size:
n *= k
stdv = 1. / math.sqrt(n)
self.weight.data.uniform_(-stdv, stdv)
self.mtl_weight.data.uniform_(1, 1)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
self.mtl_bias.data.uniform_(0, 0)
def extra_repr(self):
s = ('{in_channels}, {out_channels}, kernel_size={kernel_size}'
', stride={stride}')
if self.padding != (0,) * len(self.padding):
s += ', padding={padding}'
if self.dilation != (1,) * len(self.dilation):
s += ', dilation={dilation}'
if self.output_padding != (0,) * len(self.output_padding):
s += ', output_padding={output_padding}'
if self.groups != 1:
s += ', groups={groups}'
if self.bias is None:
s += ', bias=False'
return s.format(**self.__dict__)
class Conv2dMtl(_ConvNdMtl):
"""The class for meta-transfer convolution"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(Conv2dMtl, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
False, _pair(0), groups, bias)
def forward(self, inp):
new_mtl_weight = self.mtl_weight.expand(self.weight.shape)
new_weight = self.weight.mul(new_mtl_weight)
if self.bias is not None:
new_bias = self.bias + self.mtl_bias
else:
new_bias = None
return F.conv2d(inp, new_weight, new_bias, self.stride,
self.padding, self.dilation, self.groups)
| 4,195 | 40.137255 | 94 | py |
meta-transfer-learning | meta-transfer-learning-main/pytorch/models/resnet_mtl.py | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Yaoyao Liu
## Modified from: https://github.com/Sha-Lab/FEAT
## Tianjin University
## [email protected]
## Copyright (c) 2019
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
""" ResNet with MTL. """
import torch.nn as nn
from models.conv2d_mtl import Conv2dMtl
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
def conv3x3mtl(in_planes, out_planes, stride=1):
return Conv2dMtl(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlockMtl(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlockMtl, self).__init__()
self.conv1 = conv3x3mtl(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3mtl(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class BottleneckMtl(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BottleneckMtl, self).__init__()
self.conv1 = Conv2dMtl(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = Conv2dMtl(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = Conv2dMtl(planes, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNetMtl(nn.Module):
def __init__(self, layers=[4, 4, 4], mtl=True):
super(ResNetMtl, self).__init__()
if mtl:
self.Conv2d = Conv2dMtl
block = BasicBlockMtl
else:
self.Conv2d = nn.Conv2d
block = BasicBlock
cfg = [160, 320, 640]
self.inplanes = iChannels = int(cfg[0]/2)
self.conv1 = self.Conv2d(3, iChannels, kernel_size=3, stride=1, padding=1)
self.bn1 = nn.BatchNorm2d(iChannels)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, cfg[0], layers[0], stride=2)
self.layer2 = self._make_layer(block, cfg[1], layers[1], stride=2)
self.layer3 = self._make_layer(block, cfg[2], layers[2], stride=2)
self.avgpool = nn.AvgPool2d(10, stride=1)
for m in self.modules():
if isinstance(m, self.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
self.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
return x
| 6,842 | 30.246575 | 90 | py |
meta-transfer-learning | meta-transfer-learning-main/pytorch/utils/misc.py | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Yaoyao Liu
## Modified from: https://github.com/Sha-Lab/FEAT
## Tianjin University
## [email protected]
## Copyright (c) 2019
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
""" Additional utility functions. """
import os
import time
import pprint
import torch
import numpy as np
import torch.nn.functional as F
def ensure_path(path):
"""The function to make log path.
Args:
path: the generated saving path.
"""
if os.path.exists(path):
pass
else:
os.mkdir(path)
class Averager():
"""The class to calculate the average."""
def __init__(self):
self.n = 0
self.v = 0
def add(self, x):
self.v = (self.v * self.n + x) / (self.n + 1)
self.n += 1
def item(self):
return self.v
def count_acc(logits, label):
"""The function to calculate the .
Args:
logits: input logits.
label: ground truth labels.
Return:
The output accuracy.
"""
pred = F.softmax(logits, dim=1).argmax(dim=1)
if torch.cuda.is_available():
return (pred == label).type(torch.cuda.FloatTensor).mean().item()
return (pred == label).type(torch.FloatTensor).mean().item()
class Timer():
"""The class for timer."""
def __init__(self):
self.o = time.time()
def measure(self, p=1):
x = (time.time() - self.o) / p
x = int(x)
if x >= 3600:
return '{:.1f}h'.format(x / 3600)
if x >= 60:
return '{}m'.format(round(x / 60))
return '{}s'.format(x)
_utils_pp = pprint.PrettyPrinter()
def pprint(x):
_utils_pp.pprint(x)
def compute_confidence_interval(data):
"""The function to calculate the .
Args:
data: input records
label: ground truth labels.
Return:
m: mean value
pm: confidence interval.
"""
a = 1.0 * np.array(data)
m = np.mean(a)
std = np.std(a)
pm = 1.96 * (std / np.sqrt(len(a)))
return m, pm
| 2,219 | 24.227273 | 75 | py |
meta-transfer-learning | meta-transfer-learning-main/pytorch/utils/gpu_tools.py | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Yaoyao Liu
## Tianjin University
## [email protected]
## Copyright (c) 2019
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
""" Tools for GPU. """
import os
import torch
def set_gpu(cuda_device):
os.environ['CUDA_VISIBLE_DEVICES'] = cuda_device
print('Using gpu:', cuda_device)
| 547 | 31.235294 | 75 | py |
meta-transfer-learning | meta-transfer-learning-main/pytorch/dataloader/dataset_loader.py | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Yaoyao Liu
## Modified from: https://github.com/Sha-Lab/FEAT
## Tianjin University
## [email protected]
## Copyright (c) 2019
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
""" Dataloader for all datasets. """
import os.path as osp
import os
from PIL import Image
from torch.utils.data import Dataset
from torchvision import transforms
import numpy as np
class DatasetLoader(Dataset):
"""The class to load the dataset"""
def __init__(self, setname, args, train_aug=False):
# Set the path according to train, val and test
if setname=='train':
THE_PATH = osp.join(args.dataset_dir, 'train')
label_list = os.listdir(THE_PATH)
elif setname=='test':
THE_PATH = osp.join(args.dataset_dir, 'test')
label_list = os.listdir(THE_PATH)
elif setname=='val':
THE_PATH = osp.join(args.dataset_dir, 'val')
label_list = os.listdir(THE_PATH)
else:
raise ValueError('Wrong setname.')
# Generate empty list for data and label
data = []
label = []
# Get folders' name
folders = [osp.join(THE_PATH, the_label) for the_label in label_list if os.path.isdir(osp.join(THE_PATH, the_label))]
# Get the images' paths and labels
for idx, this_folder in enumerate(folders):
this_folder_images = os.listdir(this_folder)
for image_path in this_folder_images:
data.append(osp.join(this_folder, image_path))
label.append(idx)
# Set data, label and class number to be accessable from outside
self.data = data
self.label = label
self.num_class = len(set(label))
# Transformation
if train_aug:
image_size = 80
self.transform = transforms.Compose([
transforms.Resize(92),
transforms.RandomResizedCrop(88),
transforms.CenterCrop(image_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(np.array([x / 255.0 for x in [125.3, 123.0, 113.9]]),
np.array([x / 255.0 for x in [63.0, 62.1, 66.7]]))])
else:
image_size = 80
self.transform = transforms.Compose([
transforms.Resize(92),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
transforms.Normalize(np.array([x / 255.0 for x in [125.3, 123.0, 113.9]]),
np.array([x / 255.0 for x in [63.0, 62.1, 66.7]]))])
def __len__(self):
return len(self.data)
def __getitem__(self, i):
path, label = self.data[i], self.label[i]
image = self.transform(Image.open(path).convert('RGB'))
return image, label
| 3,153 | 37.463415 | 125 | py |
meta-transfer-learning | meta-transfer-learning-main/pytorch/dataloader/samplers.py | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Yaoyao Liu
## Modified from: https://github.com/Sha-Lab/FEAT
## Tianjin University
## [email protected]
## Copyright (c) 2019
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
""" Sampler for dataloader. """
import torch
import numpy as np
class CategoriesSampler():
"""The class to generate episodic data"""
def __init__(self, label, n_batch, n_cls, n_per):
self.n_batch = n_batch
self.n_cls = n_cls
self.n_per = n_per
label = np.array(label)
self.m_ind = []
for i in range(max(label) + 1):
ind = np.argwhere(label == i).reshape(-1)
ind = torch.from_numpy(ind)
self.m_ind.append(ind)
def __len__(self):
return self.n_batch
def __iter__(self):
for i_batch in range(self.n_batch):
batch = []
classes = torch.randperm(len(self.m_ind))[:self.n_cls]
for c in classes:
l = self.m_ind[c]
pos = torch.randperm(len(l))[:self.n_per]
batch.append(l[pos])
batch = torch.stack(batch).t().reshape(-1)
yield batch
| 1,381 | 32.707317 | 75 | py |
adventures-in-ml-code | adventures-in-ml-code-master/keras_word2vec.py | from keras.models import Model
from keras.layers import Input, Dense, Reshape, merge
from keras.layers.embeddings import Embedding
from keras.preprocessing.sequence import skipgrams
from keras.preprocessing import sequence
import urllib
import collections
import os
import zipfile
import numpy as np
import tensorflow as tf
def maybe_download(filename, url, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
if not os.path.exists(filename):
filename, _ = urllib.urlretrieve(url + filename, filename)
statinfo = os.stat(filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
print(statinfo.st_size)
raise Exception(
'Failed to verify ' + filename + '. Can you get to it with a browser?')
return filename
# Read the data into a list of strings.
def read_data(filename):
"""Extract the first file enclosed in a zip file as a list of words."""
with zipfile.ZipFile(filename) as f:
data = tf.compat.as_str(f.read(f.namelist()[0])).split()
return data
def build_dataset(words, n_words):
"""Process raw inputs into a dataset."""
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(n_words - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0 # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reversed_dictionary
def collect_data(vocabulary_size=10000):
url = 'http://mattmahoney.net/dc/'
filename = maybe_download('text8.zip', url, 31344016)
vocabulary = read_data(filename)
print(vocabulary[:7])
data, count, dictionary, reverse_dictionary = build_dataset(vocabulary,
vocabulary_size)
del vocabulary # Hint to reduce memory.
return data, count, dictionary, reverse_dictionary
vocab_size = 10000
data, count, dictionary, reverse_dictionary = collect_data(vocabulary_size=vocab_size)
print(data[:7])
window_size = 3
vector_dim = 300
epochs = 200000
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
sampling_table = sequence.make_sampling_table(vocab_size)
couples, labels = skipgrams(data, vocab_size, window_size=window_size, sampling_table=sampling_table)
word_target, word_context = zip(*couples)
word_target = np.array(word_target, dtype="int32")
word_context = np.array(word_context, dtype="int32")
print(couples[:10], labels[:10])
# create some input variables
input_target = Input((1,))
input_context = Input((1,))
embedding = Embedding(vocab_size, vector_dim, input_length=1, name='embedding')
target = embedding(input_target)
target = Reshape((vector_dim, 1))(target)
context = embedding(input_context)
context = Reshape((vector_dim, 1))(context)
# setup a cosine similarity operation which will be output in a secondary model
similarity = merge([target, context], mode='cos', dot_axes=0)
# now perform the dot product operation to get a similarity measure
dot_product = merge([target, context], mode='dot', dot_axes=1)
dot_product = Reshape((1,))(dot_product)
# add the sigmoid output layer
output = Dense(1, activation='sigmoid')(dot_product)
# create the primary training model
model = Model(input=[input_target, input_context], output=output)
model.compile(loss='binary_crossentropy', optimizer='rmsprop')
# create a secondary validation model to run our similarity checks during training
validation_model = Model(input=[input_target, input_context], output=similarity)
class SimilarityCallback:
def run_sim(self):
for i in range(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
sim = self._get_sim(valid_examples[i])
nearest = (-sim).argsort()[1:top_k + 1]
log_str = 'Nearest to %s:' % valid_word
for k in range(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = '%s %s,' % (log_str, close_word)
print(log_str)
@staticmethod
def _get_sim(valid_word_idx):
sim = np.zeros((vocab_size,))
in_arr1 = np.zeros((1,))
in_arr2 = np.zeros((1,))
in_arr1[0,] = valid_word_idx
for i in range(vocab_size):
in_arr2[0,] = i
out = validation_model.predict_on_batch([in_arr1, in_arr2])
sim[i] = out
return sim
sim_cb = SimilarityCallback()
arr_1 = np.zeros((1,))
arr_2 = np.zeros((1,))
arr_3 = np.zeros((1,))
for cnt in range(epochs):
idx = np.random.randint(0, len(labels)-1)
arr_1[0,] = word_target[idx]
arr_2[0,] = word_context[idx]
arr_3[0,] = labels[idx]
loss = model.train_on_batch([arr_1, arr_2], arr_3)
if cnt % 100 == 0:
print("Iteration {}, loss={}".format(cnt, loss))
if cnt % 10000 == 0:
sim_cb.run_sim()
| 5,397 | 34.513158 | 101 | py |
adventures-in-ml-code | adventures-in-ml-code-master/dueling_q_tf2_atari.py | import gym
import tensorflow as tf
from tensorflow import keras
import random
import numpy as np
import datetime as dt
import imageio
STORE_PATH = 'C:\\Users\\Andy\\TensorFlowBook\\TensorBoard'
MAX_EPSILON = 1
MIN_EPSILON = 0.1
EPSILON_MIN_ITER = 500000
GAMMA = 0.99
BATCH_SIZE = 32
TAU = 0.08
POST_PROCESS_IMAGE_SIZE = (105, 80, 1)
DELAY_TRAINING = 50000
NUM_FRAMES = 4
GIF_RECORDING_FREQ = 100
env = gym.make("SpaceInvaders-v0")
num_actions = env.action_space.n
class DQModel(keras.Model):
def __init__(self, hidden_size: int, num_actions: int, dueling: bool):
super(DQModel, self).__init__()
self.dueling = dueling
self.conv1 = keras.layers.Conv2D(16, (8, 8), (4, 4), activation='relu')
self.conv2 = keras.layers.Conv2D(32, (4, 4), (2, 2), activation='relu')
self.flatten = keras.layers.Flatten()
self.adv_dense = keras.layers.Dense(hidden_size, activation='relu',
kernel_initializer=keras.initializers.he_normal())
self.adv_out = keras.layers.Dense(num_actions,
kernel_initializer=keras.initializers.he_normal())
if dueling:
self.v_dense = keras.layers.Dense(hidden_size, activation='relu',
kernel_initializer=keras.initializers.he_normal())
self.v_out = keras.layers.Dense(1, kernel_initializer=keras.initializers.he_normal())
self.lambda_layer = keras.layers.Lambda(lambda x: x - tf.reduce_mean(x))
self.combine = keras.layers.Add()
def call(self, input):
x = self.conv1(input)
x = self.conv2(x)
x = self.flatten(x)
adv = self.adv_dense(x)
adv = self.adv_out(adv)
if self.dueling:
v = self.v_dense(x)
v = self.v_out(v)
norm_adv = self.lambda_layer(adv)
combined = self.combine([v, norm_adv])
return combined
return adv
primary_network = DQModel(256, num_actions, True)
target_network = DQModel(256, num_actions, True)
primary_network.compile(optimizer=keras.optimizers.Adam(), loss='mse')
# make target_network = primary_network
for t, e in zip(target_network.trainable_variables, primary_network.trainable_variables):
t.assign(e)
primary_network.compile(optimizer=keras.optimizers.Adam(), loss=tf.keras.losses.Huber())
class Memory:
def __init__(self, max_memory):
self._max_memory = max_memory
self._actions = np.zeros(max_memory, dtype=np.int32)
self._rewards = np.zeros(max_memory, dtype=np.float32)
self._frames = np.zeros((POST_PROCESS_IMAGE_SIZE[0], POST_PROCESS_IMAGE_SIZE[1], max_memory), dtype=np.float32)
self._terminal = np.zeros(max_memory, dtype=np.bool)
self._i = 0
def add_sample(self, frame, action, reward, terminal):
self._actions[self._i] = action
self._rewards[self._i] = reward
self._frames[:, :, self._i] = frame[:, :, 0]
self._terminal[self._i] = terminal
if self._i % (self._max_memory - 1) == 0 and self._i != 0:
self._i = BATCH_SIZE + NUM_FRAMES + 1
else:
self._i += 1
def sample(self):
if self._i < BATCH_SIZE + NUM_FRAMES + 1:
raise ValueError("Not enough memory to extract a batch")
else:
rand_idxs = np.random.randint(NUM_FRAMES + 1, self._i, size=BATCH_SIZE)
states = np.zeros((BATCH_SIZE, POST_PROCESS_IMAGE_SIZE[0], POST_PROCESS_IMAGE_SIZE[1], NUM_FRAMES),
dtype=np.float32)
next_states = np.zeros((BATCH_SIZE, POST_PROCESS_IMAGE_SIZE[0], POST_PROCESS_IMAGE_SIZE[1], NUM_FRAMES),
dtype=np.float32)
for i, idx in enumerate(rand_idxs):
states[i] = self._frames[:, :, idx - 1 - NUM_FRAMES:idx - 1]
next_states[i] = self._frames[:, :, idx - NUM_FRAMES:idx]
return states, self._actions[rand_idxs], self._rewards[rand_idxs], next_states, self._terminal[rand_idxs]
memory = Memory(500000)
# memory = Memory(100)
def image_preprocess(image, new_size=(105, 80)):
# convert to greyscale, resize and normalize the image
image = tf.image.rgb_to_grayscale(image)
image = tf.image.resize(image, new_size)
image = image / 255
return image
def choose_action(state, primary_network, eps, step):
if step < DELAY_TRAINING:
return random.randint(0, num_actions - 1)
else:
if random.random() < eps:
return random.randint(0, num_actions - 1)
else:
return np.argmax(primary_network(tf.reshape(state, (1, POST_PROCESS_IMAGE_SIZE[0],
POST_PROCESS_IMAGE_SIZE[1], NUM_FRAMES)).numpy()))
def update_network(primary_network, target_network):
# update target network parameters slowly from primary network
for t, e in zip(target_network.trainable_variables, primary_network.trainable_variables):
t.assign(t * (1 - TAU) + e * TAU)
def process_state_stack(state_stack, state):
for i in range(1, state_stack.shape[-1]):
state_stack[:, :, i - 1].assign(state_stack[:, :, i])
state_stack[:, :, -1].assign(state[:, :, 0])
return state_stack
def record_gif(frame_list, episode, fps=50):
imageio.mimsave(STORE_PATH + f"/SPACE_INVADERS_EPISODE-{episode}.gif", frame_list, fps=fps) #duration=duration_per_frame)
def train(primary_network, memory, target_network=None):
states, actions, rewards, next_states, terminal = memory.sample()
# predict Q(s,a) given the batch of states
prim_qt = primary_network(states)
# predict Q(s',a') from the evaluation network
prim_qtp1 = primary_network(next_states)
# copy the prim_qt tensor into the target_q tensor - we then will update one index corresponding to the max action
target_q = prim_qt.numpy()
updates = rewards
valid_idxs = terminal != True
batch_idxs = np.arange(BATCH_SIZE)
if target_network is None:
updates[valid_idxs] += GAMMA * np.amax(prim_qtp1.numpy()[valid_idxs, :], axis=1)
else:
prim_action_tp1 = np.argmax(prim_qtp1.numpy(), axis=1)
q_from_target = target_network(next_states)
updates[valid_idxs] += GAMMA * q_from_target.numpy()[batch_idxs[valid_idxs], prim_action_tp1[valid_idxs]]
target_q[batch_idxs, actions] = updates
loss = primary_network.train_on_batch(states, target_q)
return loss
num_episodes = 1000000
eps = MAX_EPSILON
render = False
train_writer = tf.summary.create_file_writer(STORE_PATH + f"/DuelingQSI_{dt.datetime.now().strftime('%d%m%Y%H%M')}")
double_q = True
steps = 0
for i in range(num_episodes):
state = env.reset()
state = image_preprocess(state)
state_stack = tf.Variable(np.repeat(state.numpy(), NUM_FRAMES).reshape((POST_PROCESS_IMAGE_SIZE[0],
POST_PROCESS_IMAGE_SIZE[1],
NUM_FRAMES)))
cnt = 1
avg_loss = 0
tot_reward = 0
if i % GIF_RECORDING_FREQ == 0:
frame_list = []
while True:
if render:
env.render()
action = choose_action(state_stack, primary_network, eps, steps)
next_state, reward, done, info = env.step(action)
tot_reward += reward
if i % GIF_RECORDING_FREQ == 0:
frame_list.append(tf.cast(tf.image.resize(next_state, (480, 320)), tf.uint8).numpy())
next_state = image_preprocess(next_state)
state_stack = process_state_stack(state_stack, next_state)
# store in memory
memory.add_sample(next_state, action, reward, done)
if steps > DELAY_TRAINING:
loss = train(primary_network, memory, target_network if double_q else None)
update_network(primary_network, target_network)
else:
loss = -1
avg_loss += loss
# linearly decay the eps value
if steps > DELAY_TRAINING:
eps = MAX_EPSILON - ((steps - DELAY_TRAINING) / EPSILON_MIN_ITER) * \
(MAX_EPSILON - MIN_EPSILON) if steps < EPSILON_MIN_ITER else \
MIN_EPSILON
steps += 1
if done:
if steps > DELAY_TRAINING:
avg_loss /= cnt
print(f"Episode: {i}, Reward: {tot_reward}, avg loss: {avg_loss:.5f}, eps: {eps:.3f}")
with train_writer.as_default():
tf.summary.scalar('reward', tot_reward, step=i)
tf.summary.scalar('avg loss', avg_loss, step=i)
else:
print(f"Pre-training...Episode: {i}")
if i % GIF_RECORDING_FREQ == 0:
record_gif(frame_list, i)
break
cnt += 1 | 8,874 | 39.711009 | 125 | py |
adventures-in-ml-code | adventures-in-ml-code-master/policy_gradient_reinforce_tf2.py | import gym
import tensorflow as tf
from tensorflow import keras
import numpy as np
import datetime as dt
STORE_PATH = '/Users/andrewthomas/Adventures in ML/TensorFlowBook/TensorBoard/PolicyGradientCartPole'
GAMMA = 0.95
env = gym.make("CartPole-v0")
state_size = 4
num_actions = env.action_space.n
network = keras.Sequential([
keras.layers.Dense(30, activation='relu', kernel_initializer=keras.initializers.he_normal()),
keras.layers.Dense(30, activation='relu', kernel_initializer=keras.initializers.he_normal()),
keras.layers.Dense(num_actions, activation='softmax')
])
network.compile(loss='categorical_crossentropy',optimizer=keras.optimizers.Adam())
def get_action(network, state, num_actions):
softmax_out = network(state.reshape((1, -1)))
selected_action = np.random.choice(num_actions, p=softmax_out.numpy()[0])
return selected_action
def update_network(network, rewards, states):
reward_sum = 0
discounted_rewards = []
for reward in rewards[::-1]: # reverse buffer r
reward_sum = reward + GAMMA * reward_sum
discounted_rewards.append(reward_sum)
discounted_rewards.reverse()
discounted_rewards = np.array(discounted_rewards)
# standardise the rewards
discounted_rewards -= np.mean(discounted_rewards)
discounted_rewards /= np.std(discounted_rewards)
states = np.vstack(states)
loss = network.train_on_batch(states, discounted_rewards)
return loss
num_episodes = 10000000
train_writer = tf.summary.create_file_writer(STORE_PATH + f"/PGCartPole_{dt.datetime.now().strftime('%d%m%Y%H%M')}")
for episode in range(num_episodes):
state = env.reset()
rewards = []
states = []
actions = []
while True:
action = get_action(network, state, num_actions)
new_state, reward, done, _ = env.step(action)
states.append(state)
rewards.append(reward)
actions.append(action)
if done:
loss = update_network(network, rewards, states)
tot_reward = sum(rewards)
print(f"Episode: {episode}, Reward: {tot_reward}, avg loss: {loss:.5f}")
with train_writer.as_default():
tf.summary.scalar('reward', tot_reward, step=episode)
tf.summary.scalar('avg loss', loss, step=episode)
break
state = new_state | 2,344 | 34 | 116 | py |
adventures-in-ml-code | adventures-in-ml-code-master/per_duelingq_spaceinv_tf2.py | import gym
import tensorflow as tf
from tensorflow import keras
import random
import numpy as np
import datetime as dt
import imageio
import os
# STORE_PATH = '/Users/andrewthomas/Adventures in ML/TensorFlowBook/TensorBoard'
# STORE_PATH = "tensorboard"
STORE_PATH = "C:\\Users\\Andy\\TensorFlowBook\\TensorBoard"
MAX_EPSILON = 1
MIN_EPSILON = 0.1
EPSILON_MIN_ITER = 500000
GAMMA = 0.99
BATCH_SIZE = 32
TAU = 0.08
POST_PROCESS_IMAGE_SIZE = (105, 80, 1)
DELAY_TRAINING = 50000
BETA_DECAY_ITERS = 500000
MIN_BETA = 0.4
MAX_BETA = 1.0
NUM_FRAMES = 4
GIF_RECORDING_FREQ = 100
MODEL_SAVE_FREQ = 100
env = gym.make("SpaceInvaders-v0")
num_actions = env.action_space.n
# huber_loss = keras.losses.Huber()
def huber_loss(loss):
return 0.5 * loss ** 2 if abs(loss) < 1.0 else abs(loss) - 0.5
class DQModel(keras.Model):
def __init__(self, hidden_size: int, num_actions: int, dueling: bool):
super(DQModel, self).__init__()
self.dueling = dueling
self.conv1 = keras.layers.Conv2D(16, (8, 8), (4, 4), activation='relu')
self.conv2 = keras.layers.Conv2D(32, (4, 4), (2, 2), activation='relu')
self.flatten = keras.layers.Flatten()
self.adv_dense = keras.layers.Dense(hidden_size, activation='relu',
kernel_initializer=keras.initializers.he_normal())
self.adv_out = keras.layers.Dense(num_actions,
kernel_initializer=keras.initializers.he_normal())
if dueling:
self.v_dense = keras.layers.Dense(hidden_size, activation='relu',
kernel_initializer=keras.initializers.he_normal())
self.v_out = keras.layers.Dense(1, kernel_initializer=keras.initializers.he_normal())
self.lambda_layer = keras.layers.Lambda(lambda x: x - tf.reduce_mean(x))
self.combine = keras.layers.Add()
def call(self, input):
x = self.conv1(input)
x = self.conv2(x)
x = self.flatten(x)
adv = self.adv_dense(x)
adv = self.adv_out(adv)
if self.dueling:
v = self.v_dense(x)
v = self.v_out(v)
norm_adv = self.lambda_layer(adv)
combined = self.combine([v, norm_adv])
return combined
return adv
primary_network = DQModel(256, num_actions, True)
target_network = DQModel(256, num_actions, True)
primary_network.compile(optimizer=keras.optimizers.Adam(), loss=tf.keras.losses.Huber())
# make target_network = primary_network
for t, e in zip(target_network.trainable_variables, primary_network.trainable_variables):
t.assign(e)
class Node:
def __init__(self, left, right, is_leaf: bool = False, idx = None):
self.left = left
self.right = right
self.is_leaf = is_leaf
self.value = sum(n.value for n in (left, right) if n is not None)
self.parent = None
self.idx = idx # this value is only set for leaf nodes
if left is not None:
left.parent = self
if right is not None:
right.parent = self
@classmethod
def create_leaf(cls, value, idx):
leaf = cls(None, None, is_leaf=True, idx=idx)
leaf.value = value
return leaf
def create_tree(input: list):
nodes = [Node.create_leaf(v, i) for i, v in enumerate(input)]
leaf_nodes = nodes
while len(nodes) > 1:
inodes = iter(nodes)
nodes = [Node(*pair) for pair in zip(inodes, inodes)]
return nodes[0], leaf_nodes
def retrieve(value: float, node: Node):
if node.is_leaf:
return node
if node.left.value >= value:
return retrieve(value, node.left)
else:
return retrieve(value - node.left.value, node.right)
def update(node: Node, new_value: float):
change = new_value - node.value
node.value = new_value
propagate_changes(change, node.parent)
def propagate_changes(change: float, node: Node):
node.value += change
if node.parent is not None:
propagate_changes(change, node.parent)
class Memory(object):
def __init__(self, size: int):
self.size = size
self.curr_write_idx = 0
self.available_samples = 0
self.buffer = [(np.zeros((POST_PROCESS_IMAGE_SIZE[0], POST_PROCESS_IMAGE_SIZE[1]),
dtype=np.float32), 0.0, 0.0, 0.0) for i in range(self.size)]
self.base_node, self.leaf_nodes = create_tree([0 for i in range(self.size)])
self.frame_idx = 0
self.action_idx = 1
self.reward_idx = 2
self.terminal_idx = 3
self.beta = 0.4
self.alpha = 0.6
self.min_priority = 0.01
def append(self, experience: tuple, priority: float):
self.buffer[self.curr_write_idx] = experience
self.update(self.curr_write_idx, priority)
self.curr_write_idx += 1
# reset the current writer position index if creater than the allowed size
if self.curr_write_idx >= self.size:
self.curr_write_idx = 0
# max out available samples at the memory buffer size
if self.available_samples + 1 < self.size:
self.available_samples += 1
else:
self.available_samples = self.size - 1
def update(self, idx: int, priority: float):
update(self.leaf_nodes[idx], self.adjust_priority(priority))
def adjust_priority(self, priority: float):
return np.power(priority + self.min_priority, self.alpha)
def sample(self, num_samples: int):
sampled_idxs = []
is_weights = []
sample_no = 0
while sample_no < num_samples:
sample_val = np.random.uniform(0, self.base_node.value)
samp_node = retrieve(sample_val, self.base_node)
if NUM_FRAMES - 1 < samp_node.idx < self.available_samples - 1:
sampled_idxs.append(samp_node.idx)
p = samp_node.value / self.base_node.value
is_weights.append((self.available_samples + 1) * p)
sample_no += 1
# apply the beta factor and normalise so that the maximum is_weight < 1
is_weights = np.array(is_weights)
is_weights = np.power(is_weights, -self.beta)
is_weights = is_weights / np.max(is_weights)
# now load up the state and next state variables according to sampled idxs
states = np.zeros((num_samples, POST_PROCESS_IMAGE_SIZE[0], POST_PROCESS_IMAGE_SIZE[1], NUM_FRAMES),
dtype=np.float32)
next_states = np.zeros((num_samples, POST_PROCESS_IMAGE_SIZE[0], POST_PROCESS_IMAGE_SIZE[1], NUM_FRAMES),
dtype=np.float32)
actions, rewards, terminal = [], [], []
for i, idx in enumerate(sampled_idxs):
for j in range(NUM_FRAMES):
states[i, :, :, j] = self.buffer[idx + j - NUM_FRAMES + 1][self.frame_idx][:, :, 0]
next_states[i, :, :, j] = self.buffer[idx + j - NUM_FRAMES + 2][self.frame_idx][:, :, 0]
actions.append(self.buffer[idx][self.action_idx])
rewards.append(self.buffer[idx][self.reward_idx])
terminal.append(self.buffer[idx][self.terminal_idx])
return states, np.array(actions), np.array(rewards), next_states, np.array(terminal), sampled_idxs, is_weights
memory = Memory(200000)
def image_preprocess(image, new_size=(105, 80)):
# convert to greyscale, resize and normalize the image
image = tf.image.rgb_to_grayscale(image)
image = tf.image.resize(image, new_size)
image = image / 255
return image
def choose_action(state, primary_network, eps, step):
if step < DELAY_TRAINING:
return random.randint(0, num_actions - 1)
else:
if random.random() < eps:
return random.randint(0, num_actions - 1)
else:
return np.argmax(primary_network(tf.reshape(state, (1, POST_PROCESS_IMAGE_SIZE[0],
POST_PROCESS_IMAGE_SIZE[1], NUM_FRAMES)).numpy()))
def update_network(primary_network, target_network):
# update target network parameters slowly from primary network
for t, e in zip(target_network.trainable_variables, primary_network.trainable_variables):
t.assign(t * (1 - TAU) + e * TAU)
def process_state_stack(state_stack, state):
for i in range(1, state_stack.shape[-1]):
state_stack[:, :, i - 1].assign(state_stack[:, :, i])
state_stack[:, :, -1].assign(state[:, :, 0])
return state_stack
def record_gif(frame_list, episode, fps=50):
imageio.mimsave(STORE_PATH + "\\SPACE_INVADERS_EPISODE-eps{}-r{}.gif".format(episode, reward), frame_list, fps=fps) #duration=duration_per_frame)ation_per_frame)
def get_per_error(states, actions, rewards, next_states, terminal, primary_network, target_network):
# predict Q(s,a) given the batch of states
prim_qt = primary_network(states)
# predict Q(s',a') from the evaluation network
prim_qtp1 = primary_network(next_states)
# copy the prim_qt tensor into the target_q tensor - we then will update one index corresponding to the max action
target_q = prim_qt.numpy()
# the action selection from the primary / online network
prim_action_tp1 = np.argmax(prim_qtp1.numpy(), axis=1)
# the q value for the prim_action_tp1 from the target network
q_from_target = target_network(next_states)
updates = rewards + (1 - terminal) * GAMMA * q_from_target.numpy()[:, prim_action_tp1]
target_q[:, actions] = updates
# calculate the loss / error to update priorites
error = [huber_loss(target_q[i, actions[i]] - prim_qt.numpy()[i, actions[i]]) for i in range(states.shape[0])]
return target_q, error
def train(primary_network, memory, target_network):
states, actions, rewards, next_states, terminal, idxs, is_weights = memory.sample(BATCH_SIZE)
target_q, error = get_per_error(states, actions, rewards, next_states, terminal,
primary_network, target_network)
for i in range(len(idxs)):
memory.update(idxs[i], error[i])
loss = primary_network.train_on_batch(states, target_q, is_weights)
return loss
num_episodes = 1000000
eps = MAX_EPSILON
render = False
train_writer = tf.summary.create_file_writer(STORE_PATH + "/DuelingQPERSI_{}".format(dt.datetime.now().strftime('%d%m%Y%H%M')))
steps = 0
for i in range(num_episodes):
state = env.reset()
state = image_preprocess(state)
state_stack = tf.Variable(np.repeat(state.numpy(), NUM_FRAMES).reshape((POST_PROCESS_IMAGE_SIZE[0],
POST_PROCESS_IMAGE_SIZE[1],
NUM_FRAMES)))
cnt = 1
avg_loss = 0
tot_reward = 0
if i % GIF_RECORDING_FREQ == 0:
frame_list = []
while True:
if render:
env.render()
action = choose_action(state_stack, primary_network, eps, steps)
next_state, reward, done, info = env.step(action)
tot_reward += reward
if i % GIF_RECORDING_FREQ == 0:
frame_list.append(tf.cast(tf.image.resize(next_state, (480, 320)), tf.uint8).numpy())
next_state = image_preprocess(next_state)
old_state_stack = state_stack
state_stack = process_state_stack(state_stack, next_state)
if steps > DELAY_TRAINING:
loss = train(primary_network, memory, target_network)
update_network(primary_network, target_network)
_, error = get_per_error(tf.reshape(old_state_stack, (1, POST_PROCESS_IMAGE_SIZE[0],
POST_PROCESS_IMAGE_SIZE[1], NUM_FRAMES)),
np.array([action]), np.array([reward]),
tf.reshape(state_stack, (1, POST_PROCESS_IMAGE_SIZE[0],
POST_PROCESS_IMAGE_SIZE[1], NUM_FRAMES)), np.array([done]))
# store in memory
memory.append((next_state, action, reward, done), error[0])
else:
loss = -1
# store in memory - default the priority to the reward
memory.append((next_state, action, reward, done), reward)
avg_loss += loss
# linearly decay the eps and PER beta values
if steps > DELAY_TRAINING:
eps = MAX_EPSILON - ((steps - DELAY_TRAINING) / EPSILON_MIN_ITER) * \
(MAX_EPSILON - MIN_EPSILON) if steps < EPSILON_MIN_ITER else \
MIN_EPSILON
beta = MIN_BETA + ((steps - DELAY_TRAINING) / BETA_DECAY_ITERS) * \
(MAX_BETA - MIN_BETA) if steps < BETA_DECAY_ITERS else \
MAX_BETA
memory.beta = beta
steps += 1
if done:
if steps > DELAY_TRAINING:
avg_loss /= cnt
print("Episode: {}, Reward: {}, avg loss: {:.5f}, eps: {:.3f}".format(i, tot_reward, avg_loss, eps))
with train_writer.as_default():
tf.summary.scalar('reward', tot_reward, step=i)
tf.summary.scalar('avg loss', avg_loss, step=i)
else:
print("Pre-training...Episode: {}".format(i))
if i % GIF_RECORDING_FREQ == 0:
record_gif(frame_list, i, tot_reward)
break
cnt += 1
if i % MODEL_SAVE_FREQ == 0: # and i != 0:
primary_network.save_weights(STORE_PATH + "/checkpoints/cp_primary_network_episode_{}.ckpt".format(i))
target_network.save_weights(STORE_PATH + "/checkpoints/cp_target_network_episode_{}.ckpt".format(i)) | 13,766 | 40.844985 | 165 | py |
adventures-in-ml-code | adventures-in-ml-code-master/gensim_word2vec.py | import gensim
from gensim.models import word2vec
import logging
from keras.layers import Input, Embedding, merge
from keras.models import Model
import tensorflow as tf
import numpy as np
import urllib.request
import os
import zipfile
vector_dim = 300
root_path = "C:\\Users\Andy\PycharmProjects\\adventures-in-ml-code\\"
def maybe_download(filename, url, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
if not os.path.exists(filename):
filename, _ = urllib.request.urlretrieve(url + filename, filename)
statinfo = os.stat(filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
print(statinfo.st_size)
raise Exception(
'Failed to verify ' + filename + '. Can you get to it with a browser?')
return filename
# convert the input data into a list of integer indexes aligning with the wv indexes
# Read the data into a list of strings.
def read_data(filename):
"""Extract the first file enclosed in a zip file as a list of words."""
with zipfile.ZipFile(filename) as f:
data = f.read(f.namelist()[0]).split()
return data
def convert_data_to_index(string_data, wv):
index_data = []
for word in string_data:
if word in wv:
index_data.append(wv.vocab[word].index)
return index_data
def gensim_demo():
url = 'http://mattmahoney.net/dc/'
filename = maybe_download('text8.zip', url, 31344016)
if not os.path.exists((root_path + filename).strip('.zip')):
zipfile.ZipFile(root_path+filename).extractall()
sentences = word2vec.Text8Corpus((root_path + filename).strip('.zip'))
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
model = word2vec.Word2Vec(sentences, iter=10, min_count=10, size=300, workers=4)
# get the word vector of "the"
print(model.wv['the'])
# get the most common words
print(model.wv.index2word[0], model.wv.index2word[1], model.wv.index2word[2])
# get the least common words
vocab_size = len(model.wv.vocab)
print(model.wv.index2word[vocab_size - 1], model.wv.index2word[vocab_size - 2], model.wv.index2word[vocab_size - 3])
# find the index of the 2nd most common word ("of")
print('Index of "of" is: {}'.format(model.wv.vocab['of'].index))
# some similarity fun
print(model.wv.similarity('woman', 'man'), model.wv.similarity('man', 'elephant'))
# what doesn't fit?
print(model.wv.doesnt_match("green blue red zebra".split()))
str_data = read_data(root_path + filename)
index_data = convert_data_to_index(str_data, model.wv)
print(str_data[:4], index_data[:4])
# save and reload the model
model.save(root_path + "mymodel")
def create_embedding_matrix(model):
# convert the wv word vectors into a numpy matrix that is suitable for insertion
# into our TensorFlow and Keras models
embedding_matrix = np.zeros((len(model.wv.vocab), vector_dim))
for i in range(len(model.wv.vocab)):
embedding_vector = model.wv[model.wv.index2word[i]]
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
return embedding_matrix
def tf_model(embedding_matrix, wv):
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# embedding layer weights are frozen to avoid updating embeddings while training
saved_embeddings = tf.constant(embedding_matrix)
embedding = tf.Variable(initial_value=saved_embeddings, trainable=False)
# create the cosine similarity operations
norm = tf.sqrt(tf.reduce_sum(tf.square(embedding), 1, keep_dims=True))
normalized_embeddings = embedding / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(valid_embeddings, normalized_embeddings, transpose_b=True)
# Add variable initializer.
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
# call our similarity operation
sim = similarity.eval()
# run through each valid example, finding closest words
for i in range(valid_size):
valid_word = wv.index2word[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log_str = 'Nearest to %s:' % valid_word
for k in range(top_k):
close_word = wv.index2word[nearest[k]]
log_str = '%s %s,' % (log_str, close_word)
print(log_str)
def keras_model(embedding_matrix, wv):
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
# input words - in this case we do sample by sample evaluations of the similarity
valid_word = Input((1,), dtype='int32')
other_word = Input((1,), dtype='int32')
# setup the embedding layer
embeddings = Embedding(input_dim=embedding_matrix.shape[0], output_dim=embedding_matrix.shape[1],
weights=[embedding_matrix])
embedded_a = embeddings(valid_word)
embedded_b = embeddings(other_word)
similarity = merge([embedded_a, embedded_b], mode='cos', dot_axes=2)
# create the Keras model
k_model = Model(input=[valid_word, other_word], output=similarity)
def get_sim(valid_word_idx, vocab_size):
sim = np.zeros((vocab_size,))
in_arr1 = np.zeros((1,))
in_arr2 = np.zeros((1,))
in_arr1[0,] = valid_word_idx
for i in range(vocab_size):
in_arr2[0,] = i
out = k_model.predict_on_batch([in_arr1, in_arr2])
sim[i] = out
return sim
# now run the model and get the closest words to the valid examples
for i in range(valid_size):
valid_word = wv.index2word[valid_examples[i]]
top_k = 8 # number of nearest neighbors
sim = get_sim(valid_examples[i], len(wv.vocab))
nearest = (-sim).argsort()[1:top_k + 1]
log_str = 'Nearest to %s:' % valid_word
for k in range(top_k):
close_word = wv.index2word[nearest[k]]
log_str = '%s %s,' % (log_str, close_word)
print(log_str)
if __name__ == "__main__":
run_opt = 2
if run_opt == 1:
gensim_demo()
elif run_opt == 2:
model = gensim.models.Word2Vec.load(root_path + "mymodel")
embedding_matrix = create_embedding_matrix(model)
tf_model(embedding_matrix, model.wv)
elif run_opt == 3:
model = gensim.models.Word2Vec.load(root_path + "mymodel")
embedding_matrix = create_embedding_matrix(model)
keras_model(embedding_matrix, model.wv)
| 7,078 | 38.327778 | 120 | py |
adventures-in-ml-code | adventures-in-ml-code-master/a2c_tf2_cartpole.py | import tensorflow as tf
from tensorflow import keras
import numpy as np
import gym
import datetime as dt
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
STORE_PATH = '/Users/andrewthomas/Adventures in ML/TensorFlowBook/TensorBoard/A2CCartPole'
CRITIC_LOSS_WEIGHT = 0.5
ACTOR_LOSS_WEIGHT = 1.0
ENTROPY_LOSS_WEIGHT = 0.05
BATCH_SIZE = 64
GAMMA = 0.95
env = gym.make("CartPole-v0")
state_size = 4
num_actions = env.action_space.n
class Model(keras.Model):
def __init__(self, num_actions):
super().__init__()
self.num_actions = num_actions
self.dense1 = keras.layers.Dense(64, activation='relu',
kernel_initializer=keras.initializers.he_normal())
self.dense2 = keras.layers.Dense(64, activation='relu',
kernel_initializer=keras.initializers.he_normal())
self.value = keras.layers.Dense(1)
self.policy_logits = keras.layers.Dense(num_actions)
def call(self, inputs):
x = self.dense1(inputs)
x = self.dense2(x)
return self.value(x), self.policy_logits(x)
def action_value(self, state):
value, logits = self.predict_on_batch(state)
action = tf.random.categorical(logits, 1)[0]
return action, value
def critic_loss(discounted_rewards, predicted_values):
return keras.losses.mean_squared_error(discounted_rewards, predicted_values) * CRITIC_LOSS_WEIGHT
def actor_loss(combined, policy_logits):
actions = combined[:, 0]
advantages = combined[:, 1]
sparse_ce = keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.SUM
)
actions = tf.cast(actions, tf.int32)
policy_loss = sparse_ce(actions, policy_logits, sample_weight=advantages)
probs = tf.nn.softmax(policy_logits)
entropy_loss = keras.losses.categorical_crossentropy(probs, probs)
return policy_loss * ACTOR_LOSS_WEIGHT - entropy_loss * ENTROPY_LOSS_WEIGHT
def discounted_rewards_advantages(rewards, dones, values, next_value):
discounted_rewards = np.array(rewards + [next_value[0]])
for t in reversed(range(len(rewards))):
discounted_rewards[t] = rewards[t] + GAMMA * discounted_rewards[t+1] * (1-dones[t])
discounted_rewards = discounted_rewards[:-1]
# advantages are bootstrapped discounted rewards - values, using Bellman's equation
advantages = discounted_rewards - np.stack(values)[:, 0]
return discounted_rewards, advantages
model = Model(num_actions)
model.compile(optimizer=keras.optimizers.Adam(), loss=[critic_loss, actor_loss])
train_writer = tf.summary.create_file_writer(STORE_PATH + f"/A2C-CartPole_{dt.datetime.now().strftime('%d%m%Y%H%M')}")
num_steps = 10000000
episode_reward_sum = 0
state = env.reset()
episode = 1
for step in range(num_steps):
rewards = []
actions = []
values = []
states = []
dones = []
for _ in range(BATCH_SIZE):
_, policy_logits = model(state.reshape(1, -1))
action, value = model.action_value(state.reshape(1, -1))
new_state, reward, done, _ = env.step(action.numpy()[0])
actions.append(action)
values.append(value.numpy()[0])
states.append(state)
dones.append(done)
episode_reward_sum += reward
state = new_state
if done:
rewards.append(0.0)
state = env.reset()
print(f"Episode: {episode}, latest episode reward: {episode_reward_sum}, loss: {loss}")
with train_writer.as_default():
tf.summary.scalar('rewards', episode_reward_sum, episode)
episode_reward_sum = 0
episode += 1
else:
rewards.append(reward)
_, next_value = model.action_value(state.reshape(1, -1))
discounted_rewards, advantages = discounted_rewards_advantages(rewards, dones, values, next_value.numpy()[0])
# combine the actions and advantages into a combined array for passing to
# actor_loss function
combined = np.zeros((len(actions), 2))
combined[:, 0] = actions
combined[:, 1] = advantages
loss = model.train_on_batch(tf.stack(states), [discounted_rewards, combined])
with train_writer.as_default():
tf.summary.scalar('tot_loss', np.sum(loss), step) | 4,312 | 32.96063 | 118 | py |
adventures-in-ml-code | adventures-in-ml-code-master/pytorch_nn.py | import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
def simple_gradient():
# print the gradient of 2x^2 + 5x
x = Variable(torch.ones(2, 2) * 2, requires_grad=True)
z = 2 * (x * x) + 5 * x
# run the backpropagation
z.backward(torch.ones(2, 2))
print(x.grad)
def create_nn(batch_size=200, learning_rate=0.01, epochs=10,
log_interval=10):
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=batch_size, shuffle=True)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(28 * 28, 200)
self.fc2 = nn.Linear(200, 200)
self.fc3 = nn.Linear(200, 10)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return F.log_softmax(x)
net = Net()
print(net)
# create a stochastic gradient descent optimizer
optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9)
# create a loss function
criterion = nn.NLLLoss()
# run the main training loop
for epoch in range(epochs):
for batch_idx, (data, target) in enumerate(train_loader):
data, target = Variable(data), Variable(target)
# resize data from (batch_size, 1, 28, 28) to (batch_size, 28*28)
data = data.view(-1, 28*28)
optimizer.zero_grad()
net_out = net(data)
loss = criterion(net_out, target)
loss.backward()
optimizer.step()
if batch_idx % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.data[0]))
# run a test loop
test_loss = 0
correct = 0
for data, target in test_loader:
data, target = Variable(data, volatile=True), Variable(target)
data = data.view(-1, 28 * 28)
net_out = net(data)
# sum up batch loss
test_loss += criterion(net_out, target).data[0]
pred = net_out.data.max(1)[1] # get the index of the max log-probability
correct += pred.eq(target.data).sum()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
if __name__ == "__main__":
run_opt = 2
if run_opt == 1:
simple_gradient()
elif run_opt == 2:
create_nn() | 3,316 | 33.915789 | 81 | py |
adventures-in-ml-code | adventures-in-ml-code-master/tensor_flow_tutorial.py | import tensorflow as tf
import numpy as np
import datetime as dt
from tensorflow.keras.datasets import mnist
STORE_PATH = '/Users/andrewthomas/Adventures in ML/TensorBoard'
def run_simple_graph():
# create TensorFlow variables
const = tf.Variable(2.0, name="const")
b = tf.Variable(2.0, name='b')
c = tf.Variable(1.0, name='c')
# now create some operations
d = tf.add(b, c, name='d')
e = tf.add(c, const, name='e')
a = tf.multiply(d, e, name='a')
# alternatively (and more naturally)
d = b + c
e = c + 2
a = d * e
print(f"Variable a is {a.numpy()}")
def run_simple_graph_multiple():
const = tf.Variable(2.0, name="const")
b = tf.Variable(np.arange(0, 10), name='b')
c = tf.Variable(1.0, name='c')
d = tf.cast(b, tf.float32) + c
e = c + const
a = d * e
print(f"Variable a is {a.numpy()}")
# the line below would cause an error - tensors are immutable
# b[1] = 10
# need to use assignment instead
b[1].assign(10)
d = tf.cast(b, tf.float32) + c
e = c + const
a = d * e
print(f"Variable a is {a.numpy()}")
b[6:9].assign([10, 10, 10])
f = b[2:5]
print(f.numpy())
def get_batch(x_data, y_data, batch_size):
idxs = np.random.randint(0, len(y_data), batch_size)
return x_data[idxs,:,:], y_data[idxs]
def nn_model(x_input, W1, b1, W2, b2):
# flatten the input image from 28 x 28 to 784
x_input = tf.reshape(x_input, (x_input.shape[0], -1))
x = tf.add(tf.matmul(tf.cast(x_input, tf.float32), W1), b1)
x = tf.nn.relu(x)
logits = tf.add(tf.matmul(x, W2), b2)
return logits
def loss_fn(logits, labels):
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=labels,
logits=logits))
return cross_entropy
def nn_example():
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# Python optimisation variables
epochs = 10
batch_size = 100
# normalize the input images by dividing by 255.0
x_train = x_train / 255.0
x_test = x_test / 255.0
# convert x_test to tensor to pass through model (train data will be converted to
# tensors on the fly)
x_test = tf.Variable(x_test)
# now declare the weights connecting the input to the hidden layer
W1 = tf.Variable(tf.random.normal([784, 300], stddev=0.03), name='W1')
b1 = tf.Variable(tf.random.normal([300]), name='b1')
# and the weights connecting the hidden layer to the output layer
W2 = tf.Variable(tf.random.normal([300, 10], stddev=0.03), name='W2')
b2 = tf.Variable(tf.random.normal([10]), name='b2')
# setup the optimizer
optimizer = tf.keras.optimizers.Adam()
# create a summary writer to view loss in TensorBoard
train_summary_writer = tf.summary.create_file_writer(STORE_PATH +
"/TensorFlow_Intro_Chapter_" +
f"{dt.datetime.now().strftime('%d%m%Y%H%M')}")
total_batch = int(len(y_train) / batch_size)
for epoch in range(epochs):
avg_loss = 0
for i in range(total_batch):
batch_x, batch_y = get_batch(x_train, y_train, batch_size=batch_size)
# create tensors
batch_x = tf.Variable(batch_x)
batch_y = tf.Variable(batch_y)
# create a one hot vector
batch_y = tf.one_hot(batch_y, 10)
with tf.GradientTape() as tape:
logits = nn_model(batch_x, W1, b1, W2, b2)
loss = loss_fn(logits, batch_y)
gradients = tape.gradient(loss, [W1, b1, W2, b2])
optimizer.apply_gradients(zip(gradients, [W1, b1, W2, b2]))
avg_loss += loss / total_batch
test_logits = nn_model(x_test, W1, b1, W2, b2)
max_idxs = tf.argmax(test_logits, axis=1)
test_acc = np.sum(max_idxs.numpy() == y_test) / len(y_test)
print(f"Epoch: {epoch + 1}, loss={avg_loss:.3f}, test set accuracy={test_acc*100:.3f}%")
with train_summary_writer.as_default():
tf.summary.scalar('loss', avg_loss, step=epoch)
tf.summary.scalar('accuracy', test_acc, step=epoch)
print("\nTraining complete!")
if __name__ == "__main__":
# run_simple_graph()
# run_simple_graph_multiple()
nn_example() | 4,411 | 32.424242 | 103 | py |
adventures-in-ml-code | adventures-in-ml-code-master/double_q_tensorflow2.py | import gym
import tensorflow as tf
from tensorflow import keras
import random
import numpy as np
import datetime as dt
import math
STORE_PATH = '/Users/andrewthomas/Adventures in ML/TensorFlowBook/TensorBoard'
MAX_EPSILON = 1
MIN_EPSILON = 0.01
LAMBDA = 0.0005
GAMMA = 0.95
BATCH_SIZE = 32
TAU = 0.08
RANDOM_REWARD_STD = 1.0
env = gym.make("CartPole-v0")
state_size = 4
num_actions = env.action_space.n
primary_network = keras.Sequential([
keras.layers.Dense(30, activation='relu', kernel_initializer=keras.initializers.he_normal()),
keras.layers.Dense(30, activation='relu', kernel_initializer=keras.initializers.he_normal()),
keras.layers.Dense(num_actions)
])
target_network = keras.Sequential([
keras.layers.Dense(30, activation='relu', kernel_initializer=keras.initializers.he_normal()),
keras.layers.Dense(30, activation='relu', kernel_initializer=keras.initializers.he_normal()),
keras.layers.Dense(num_actions)
])
primary_network.compile(optimizer=keras.optimizers.Adam(), loss='mse')
class Memory:
def __init__(self, max_memory):
self._max_memory = max_memory
self._samples = []
def add_sample(self, sample):
self._samples.append(sample)
if len(self._samples) > self._max_memory:
self._samples.pop(0)
def sample(self, no_samples):
if no_samples > len(self._samples):
return random.sample(self._samples, len(self._samples))
else:
return random.sample(self._samples, no_samples)
@property
def num_samples(self):
return len(self._samples)
memory = Memory(500000)
def choose_action(state, primary_network, eps):
if random.random() < eps:
return random.randint(0, num_actions - 1)
else:
return np.argmax(primary_network(state.reshape(1, -1)))
def train(primary_network, memory, target_network=None):
if memory.num_samples < BATCH_SIZE * 3:
return 0
batch = memory.sample(BATCH_SIZE)
states = np.array([val[0] for val in batch])
actions = np.array([val[1] for val in batch])
rewards = np.array([val[2] for val in batch])
next_states = np.array([(np.zeros(state_size)
if val[3] is None else val[3]) for val in batch])
# predict Q(s,a) given the batch of states
prim_qt = primary_network(states)
# predict Q(s',a') from the evaluation network
prim_qtp1 = primary_network(next_states)
# copy the prim_qt tensor into the target_q tensor - we then will update one index corresponding to the max action
target_q = prim_qt.numpy()
updates = rewards
valid_idxs = np.array(next_states).sum(axis=1) != 0
batch_idxs = np.arange(BATCH_SIZE)
if target_network is None:
updates[valid_idxs] += GAMMA * np.amax(prim_qtp1.numpy()[valid_idxs, :], axis=1)
else:
prim_action_tp1 = np.argmax(prim_qtp1.numpy(), axis=1)
q_from_target = target_network(next_states)
updates[valid_idxs] += GAMMA * q_from_target.numpy()[batch_idxs[valid_idxs], prim_action_tp1[valid_idxs]]
target_q[batch_idxs, actions] = updates
loss = primary_network.train_on_batch(states, target_q)
if target_network is not None:
# update target network parameters slowly from primary network
for t, e in zip(target_network.trainable_variables, primary_network.trainable_variables):
t.assign(t * (1 - TAU) + e * TAU)
return loss
num_episodes = 1000
eps = MAX_EPSILON
render = False
train_writer = tf.summary.create_file_writer(STORE_PATH + f"/DoubleQ_{dt.datetime.now().strftime('%d%m%Y%H%M')}")
double_q = False
steps = 0
for i in range(num_episodes):
state = env.reset()
cnt = 0
avg_loss = 0
while True:
if render:
env.render()
action = choose_action(state, primary_network, eps)
next_state, reward, done, info = env.step(action)
reward = np.random.normal(1.0, RANDOM_REWARD_STD)
if done:
next_state = None
# store in memory
memory.add_sample((state, action, reward, next_state))
loss = train(primary_network, memory, target_network if double_q else None)
avg_loss += loss
state = next_state
# exponentially decay the eps value
steps += 1
eps = MIN_EPSILON + (MAX_EPSILON - MIN_EPSILON) * math.exp(-LAMBDA * steps)
if done:
avg_loss /= cnt
print(f"Episode: {i}, Reward: {cnt}, avg loss: {avg_loss:.3f}, eps: {eps:.3f}")
with train_writer.as_default():
tf.summary.scalar('reward', cnt, step=i)
tf.summary.scalar('avg loss', avg_loss, step=i)
break
cnt += 1
| 4,711 | 32.41844 | 118 | py |
adventures-in-ml-code | adventures-in-ml-code-master/keras_lstm.py | from __future__ import print_function
import collections
import os
import tensorflow as tf
from keras.models import Sequential, load_model
from keras.layers import Dense, Activation, Embedding, Dropout, TimeDistributed
from keras.layers import LSTM
from keras.optimizers import Adam
from keras.utils import to_categorical
from keras.callbacks import ModelCheckpoint
import numpy as np
import argparse
"""To run this code, you'll need to first download and extract the text dataset
from here: http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz. Change the
data_path variable below to your local exraction path"""
data_path = "C:\\Users\Andy\Documents\simple-examples\data"
parser = argparse.ArgumentParser()
parser.add_argument('run_opt', type=int, default=1, help='An integer: 1 to train, 2 to test')
parser.add_argument('--data_path', type=str, default=data_path, help='The full path of the training data')
args = parser.parse_args()
if args.data_path:
data_path = args.data_path
def read_words(filename):
with tf.gfile.GFile(filename, "r") as f:
return f.read().decode("utf-8").replace("\n", "<eos>").split()
def build_vocab(filename):
data = read_words(filename)
counter = collections.Counter(data)
count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))
words, _ = list(zip(*count_pairs))
word_to_id = dict(zip(words, range(len(words))))
return word_to_id
def file_to_word_ids(filename, word_to_id):
data = read_words(filename)
return [word_to_id[word] for word in data if word in word_to_id]
def load_data():
# get the data paths
train_path = os.path.join(data_path, "ptb.train.txt")
valid_path = os.path.join(data_path, "ptb.valid.txt")
test_path = os.path.join(data_path, "ptb.test.txt")
# build the complete vocabulary, then convert text data to list of integers
word_to_id = build_vocab(train_path)
train_data = file_to_word_ids(train_path, word_to_id)
valid_data = file_to_word_ids(valid_path, word_to_id)
test_data = file_to_word_ids(test_path, word_to_id)
vocabulary = len(word_to_id)
reversed_dictionary = dict(zip(word_to_id.values(), word_to_id.keys()))
print(train_data[:5])
print(word_to_id)
print(vocabulary)
print(" ".join([reversed_dictionary[x] for x in train_data[:10]]))
return train_data, valid_data, test_data, vocabulary, reversed_dictionary
train_data, valid_data, test_data, vocabulary, reversed_dictionary = load_data()
class KerasBatchGenerator(object):
def __init__(self, data, num_steps, batch_size, vocabulary, skip_step=5):
self.data = data
self.num_steps = num_steps
self.batch_size = batch_size
self.vocabulary = vocabulary
# this will track the progress of the batches sequentially through the
# data set - once the data reaches the end of the data set it will reset
# back to zero
self.current_idx = 0
# skip_step is the number of words which will be skipped before the next
# batch is skimmed from the data set
self.skip_step = skip_step
def generate(self):
x = np.zeros((self.batch_size, self.num_steps))
y = np.zeros((self.batch_size, self.num_steps, self.vocabulary))
while True:
for i in range(self.batch_size):
if self.current_idx + self.num_steps >= len(self.data):
# reset the index back to the start of the data set
self.current_idx = 0
x[i, :] = self.data[self.current_idx:self.current_idx + self.num_steps]
temp_y = self.data[self.current_idx + 1:self.current_idx + self.num_steps + 1]
# convert all of temp_y into a one hot representation
y[i, :, :] = to_categorical(temp_y, num_classes=self.vocabulary)
self.current_idx += self.skip_step
yield x, y
num_steps = 30
batch_size = 20
train_data_generator = KerasBatchGenerator(train_data, num_steps, batch_size, vocabulary,
skip_step=num_steps)
valid_data_generator = KerasBatchGenerator(valid_data, num_steps, batch_size, vocabulary,
skip_step=num_steps)
hidden_size = 500
use_dropout=True
model = Sequential()
model.add(Embedding(vocabulary, hidden_size, input_length=num_steps))
model.add(LSTM(hidden_size, return_sequences=True))
model.add(LSTM(hidden_size, return_sequences=True))
if use_dropout:
model.add(Dropout(0.5))
model.add(TimeDistributed(Dense(vocabulary)))
model.add(Activation('softmax'))
optimizer = Adam()
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['categorical_accuracy'])
print(model.summary())
checkpointer = ModelCheckpoint(filepath=data_path + '/model-{epoch:02d}.hdf5', verbose=1)
num_epochs = 50
if args.run_opt == 1:
model.fit_generator(train_data_generator.generate(), len(train_data)//(batch_size*num_steps), num_epochs,
validation_data=valid_data_generator.generate(),
validation_steps=len(valid_data)//(batch_size*num_steps), callbacks=[checkpointer])
# model.fit_generator(train_data_generator.generate(), 2000, num_epochs,
# validation_data=valid_data_generator.generate(),
# validation_steps=10)
model.save(data_path + "final_model.hdf5")
elif args.run_opt == 2:
model = load_model(data_path + "\model-40.hdf5")
dummy_iters = 40
example_training_generator = KerasBatchGenerator(train_data, num_steps, 1, vocabulary,
skip_step=1)
print("Training data:")
for i in range(dummy_iters):
dummy = next(example_training_generator.generate())
num_predict = 10
true_print_out = "Actual words: "
pred_print_out = "Predicted words: "
for i in range(num_predict):
data = next(example_training_generator.generate())
prediction = model.predict(data[0])
predict_word = np.argmax(prediction[:, num_steps-1, :])
true_print_out += reversed_dictionary[train_data[num_steps + dummy_iters + i]] + " "
pred_print_out += reversed_dictionary[predict_word] + " "
print(true_print_out)
print(pred_print_out)
# test data set
dummy_iters = 40
example_test_generator = KerasBatchGenerator(test_data, num_steps, 1, vocabulary,
skip_step=1)
print("Test data:")
for i in range(dummy_iters):
dummy = next(example_test_generator.generate())
num_predict = 10
true_print_out = "Actual words: "
pred_print_out = "Predicted words: "
for i in range(num_predict):
data = next(example_test_generator.generate())
prediction = model.predict(data[0])
predict_word = np.argmax(prediction[:, num_steps - 1, :])
true_print_out += reversed_dictionary[test_data[num_steps + dummy_iters + i]] + " "
pred_print_out += reversed_dictionary[predict_word] + " "
print(true_print_out)
print(pred_print_out)
| 7,148 | 39.619318 | 109 | py |
adventures-in-ml-code | adventures-in-ml-code-master/dueling_q_tensorflow2.py | import gym
import tensorflow as tf
from tensorflow import keras
import random
import numpy as np
import datetime as dt
import math
STORE_PATH = '/Users/andrewthomas/Adventures in ML/TensorFlowBook/TensorBoard'
MAX_EPSILON = 1
MIN_EPSILON = 0.01
EPSILON_MIN_ITER = 5000
DELAY_TRAINING = 300
GAMMA = 0.95
BATCH_SIZE = 32
TAU = 0.08
RANDOM_REWARD_STD = 1.0
env = gym.make("CartPole-v0")
state_size = 4
num_actions = env.action_space.n
class DQModel(keras.Model):
def __init__(self, hidden_size: int, num_actions: int, dueling: bool):
super(DQModel, self).__init__()
self.dueling = dueling
self.dense1 = keras.layers.Dense(hidden_size, activation='relu',
kernel_initializer=keras.initializers.he_normal())
self.dense2 = keras.layers.Dense(hidden_size, activation='relu',
kernel_initializer=keras.initializers.he_normal())
self.adv_dense = keras.layers.Dense(hidden_size, activation='relu',
kernel_initializer=keras.initializers.he_normal())
self.adv_out = keras.layers.Dense(num_actions,
kernel_initializer=keras.initializers.he_normal())
if dueling:
self.v_dense = keras.layers.Dense(hidden_size, activation='relu',
kernel_initializer=keras.initializers.he_normal())
self.v_out = keras.layers.Dense(1, kernel_initializer=keras.initializers.he_normal())
self.lambda_layer = keras.layers.Lambda(lambda x: x - tf.reduce_mean(x))
self.combine = keras.layers.Add()
def call(self, input):
x = self.dense1(input)
x = self.dense2(x)
adv = self.adv_dense(x)
adv = self.adv_out(adv)
if self.dueling:
v = self.v_dense(x)
v = self.v_out(v)
norm_adv = self.lambda_layer(adv)
combined = self.combine([v, norm_adv])
return combined
return adv
primary_network = DQModel(30, num_actions, True)
target_network = DQModel(30, num_actions, True)
primary_network.compile(optimizer=keras.optimizers.Adam(), loss='mse')
# make target_network = primary_network
for t, e in zip(target_network.trainable_variables, primary_network.trainable_variables):
t.assign(e)
def update_network(primary_network, target_network):
# update target network parameters slowly from primary network
for t, e in zip(target_network.trainable_variables, primary_network.trainable_variables):
t.assign(t * (1 - TAU) + e * TAU)
class Memory:
def __init__(self, max_memory):
self._max_memory = max_memory
self._samples = []
def add_sample(self, sample):
self._samples.append(sample)
if len(self._samples) > self._max_memory:
self._samples.pop(0)
def sample(self, no_samples):
if no_samples > len(self._samples):
return random.sample(self._samples, len(self._samples))
else:
return random.sample(self._samples, no_samples)
@property
def num_samples(self):
return len(self._samples)
memory = Memory(500000)
def choose_action(state, primary_network, eps):
if random.random() < eps:
return random.randint(0, num_actions - 1)
else:
return np.argmax(primary_network(state.reshape(1, -1)))
def train(primary_network, memory, target_network):
batch = memory.sample(BATCH_SIZE)
states = np.array([val[0] for val in batch])
actions = np.array([val[1] for val in batch])
rewards = np.array([val[2] for val in batch])
next_states = np.array([(np.zeros(state_size)
if val[3] is None else val[3]) for val in batch])
# predict Q(s,a) given the batch of states
prim_qt = primary_network(states)
# predict Q(s',a') from the evaluation network
prim_qtp1 = primary_network(next_states)
# copy the prim_qt tensor into the target_q tensor - we then will update one index corresponding to the max action
target_q = prim_qt.numpy()
updates = rewards
valid_idxs = np.array(next_states).sum(axis=1) != 0
batch_idxs = np.arange(BATCH_SIZE)
# extract the best action from the next state
prim_action_tp1 = np.argmax(prim_qtp1.numpy(), axis=1)
# get all the q values for the next state
q_from_target = target_network(next_states)
# add the discounted estimated reward from the selected action (prim_action_tp1)
updates[valid_idxs] += GAMMA * q_from_target.numpy()[batch_idxs[valid_idxs], prim_action_tp1[valid_idxs]]
# update the q target to train towards
target_q[batch_idxs, actions] = updates
# run a training batch
loss = primary_network.train_on_batch(states, target_q)
return loss
num_episodes = 1000000
eps = MAX_EPSILON
render = False
train_writer = tf.summary.create_file_writer(STORE_PATH + f"/DuelingQ_{dt.datetime.now().strftime('%d%m%Y%H%M')}")
steps = 0
for i in range(num_episodes):
cnt = 1
avg_loss = 0
tot_reward = 0
state = env.reset()
while True:
if render:
env.render()
action = choose_action(state, primary_network, eps)
next_state, _, done, info = env.step(action)
reward = np.random.normal(1.0, RANDOM_REWARD_STD)
tot_reward += reward
if done:
next_state = None
# store in memory
memory.add_sample((state, action, reward, next_state))
if steps > DELAY_TRAINING:
loss = train(primary_network, memory, target_network)
update_network(primary_network, target_network)
else:
loss = -1
avg_loss += loss
# linearly decay the eps value
if steps > DELAY_TRAINING:
eps = MAX_EPSILON - ((steps - DELAY_TRAINING) / EPSILON_MIN_ITER) * \
(MAX_EPSILON - MIN_EPSILON) if steps < EPSILON_MIN_ITER else \
MIN_EPSILON
steps += 1
if done:
if steps > DELAY_TRAINING:
avg_loss /= cnt
print(f"Episode: {i}, Reward: {cnt}, avg loss: {avg_loss:.5f}, eps: {eps:.3f}")
with train_writer.as_default():
tf.summary.scalar('reward', cnt, step=i)
tf.summary.scalar('avg loss', avg_loss, step=i)
else:
print(f"Pre-training...Episode: {i}")
break
state = next_state
cnt += 1
| 6,519 | 35.629213 | 118 | py |
adventures-in-ml-code | adventures-in-ml-code-master/tf_visualization.py | import tensorflow as tf
import numpy as np
from tensorflow.keras.datasets import mnist
STORE_PATH = 'C:\\Users\\Andy\\TensorFlowBook\\TensorBoard'
def get_batch(x_data, y_data, batch_size):
idxs = np.random.randint(0, len(y_data), batch_size)
return x_data[idxs,:,:], y_data[idxs]
def nn_example():
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# Python optimisation variables
learning_rate = 0.5
epochs = 20
batch_size = 100
with tf.name_scope("inputs"):
# declare the training data placeholders
x = tf.placeholder(tf.float32, [None, 28, 28])
# reshape input x - for 28 x 28 pixels = 784
x_rs = tf.reshape(x, [-1, 784])
# scale the input data (maximum is 255.0, minimum is 0.0)
x_sc = tf.div(x_rs, 255.0)
# now declare the output data placeholder - 10 digits
y = tf.placeholder(tf.int64, [None, 1])
# convert the y data to one hot values
y_one_hot = tf.reshape(tf.one_hot(y, 10), [-1, 10])
with tf.name_scope("layer_1"):
# now declare the weights connecting the input to the hidden layer
W1 = tf.Variable(tf.random_normal([784, 300], stddev=0.01), name='W')
b1 = tf.Variable(tf.random_normal([300]), name='b')
hidden_logits = tf.add(tf.matmul(x_sc, W1), b1)
hidden_out = tf.nn.sigmoid(hidden_logits)
tf.summary.histogram("Hidden_logits", hidden_logits)
tf.summary.histogram("Hidden_output", hidden_out)
with tf.name_scope("layer_2"):
# and the weights connecting the hidden layer to the output layer
W2 = tf.Variable(tf.random_normal([300, 10], stddev=0.05), name='W')
b2 = tf.Variable(tf.random_normal([10]), name='b')
logits = tf.add(tf.matmul(hidden_out, W2), b2)
# now let's define the cost function which we are going to train the model on
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_one_hot,
logits=logits))
# add an optimiser
optimiser = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cross_entropy)
# finally setup the initialisation operator
init_op = tf.global_variables_initializer()
# define an accuracy assessment operation
with tf.name_scope("accuracy"):
correct_prediction = tf.equal(tf.argmax(y_one_hot, 1), tf.argmax(logits, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
with tf.variable_scope("getimages"):
correct_inputs = tf.boolean_mask(x_sc, correct_prediction)
image_summary_true = tf.summary.image('correct_images', tf.reshape(correct_inputs, (-1, 28, 28, 1)),
max_outputs=5)
incorrect_inputs = tf.boolean_mask(x_sc, tf.logical_not(correct_prediction))
image_summary_false = tf.summary.image('incorrect_images', tf.reshape(incorrect_inputs, (-1, 28, 28, 1)),
max_outputs=5)
# add a summary to store the accuracy
tf.summary.scalar('acc_summary', accuracy)
merged = tf.summary.merge_all()
# start the session
with tf.Session() as sess:
sess.run(init_op)
writer = tf.summary.FileWriter(STORE_PATH, sess.graph)
# initialise the variables
total_batch = int(len(y_train) / batch_size)
for epoch in range(epochs):
avg_cost = 0
for i in range(total_batch):
batch_x, batch_y = get_batch(x_train, y_train, batch_size=batch_size)
_, c = sess.run([optimiser, cross_entropy], feed_dict={x: batch_x, y: batch_y.reshape(-1, 1)})
avg_cost += c / total_batch
acc, summary = sess.run([accuracy, merged], feed_dict={x: x_test, y: y_test.reshape(-1, 1)})
print("Epoch: {}, cost={:.3f}, test set accuracy={:.3f}%".format(epoch + 1, avg_cost, acc*100))
writer.add_summary(summary, epoch)
print("\nTraining complete!")
if __name__ == "__main__":
nn_example() | 4,100 | 44.065934 | 113 | py |
adventures-in-ml-code | adventures-in-ml-code-master/ppo_tf2_cartpole.py | import tensorflow as tf
from tensorflow import keras
import tensorflow_probability as tfp
import numpy as np
import gym
import datetime as dt
STORE_PATH = 'C:\\Users\\andre\\TensorBoard\\PPOCartpole'
CRITIC_LOSS_WEIGHT = 0.5
ENTROPY_LOSS_WEIGHT = 0.01
ENT_DISCOUNT_RATE = 0.995
BATCH_SIZE = 64
GAMMA = 0.99
CLIP_VALUE = 0.2
LR = 0.001
NUM_TRAIN_EPOCHS = 10
env = gym.make("CartPole-v0")
state_size = 4
num_actions = env.action_space.n
ent_discount_val = ENTROPY_LOSS_WEIGHT
class Model(keras.Model):
def __init__(self, num_actions):
super().__init__()
self.num_actions = num_actions
self.dense1 = keras.layers.Dense(64, activation='relu',
kernel_initializer=keras.initializers.he_normal())
self.dense2 = keras.layers.Dense(64, activation='relu',
kernel_initializer=keras.initializers.he_normal())
self.value = keras.layers.Dense(1)
self.policy_logits = keras.layers.Dense(num_actions)
def call(self, inputs):
x = self.dense1(inputs)
x = self.dense2(x)
return self.value(x), self.policy_logits(x)
def action_value(self, state):
value, logits = self.predict_on_batch(state)
dist = tfp.distributions.Categorical(logits=logits)
action = dist.sample()
return action, value
def critic_loss(discounted_rewards, value_est):
return tf.cast(tf.reduce_mean(keras.losses.mean_squared_error(discounted_rewards, value_est)) * CRITIC_LOSS_WEIGHT,
tf.float32)
def entropy_loss(policy_logits, ent_discount_val):
probs = tf.nn.softmax(policy_logits)
entropy_loss = -tf.reduce_mean(keras.losses.categorical_crossentropy(probs, probs))
return entropy_loss * ent_discount_val
def actor_loss(advantages, old_probs, action_inds, policy_logits):
probs = tf.nn.softmax(policy_logits)
new_probs = tf.gather_nd(probs, action_inds)
ratio = new_probs / old_probs
policy_loss = -tf.reduce_mean(tf.math.minimum(
ratio * advantages,
tf.clip_by_value(ratio, 1.0 - CLIP_VALUE, 1.0 + CLIP_VALUE) * advantages
))
return policy_loss
def train_model(action_inds, old_probs, states, advantages, discounted_rewards, optimizer, ent_discount_val):
with tf.GradientTape() as tape:
values, policy_logits = model.call(tf.stack(states))
act_loss = actor_loss(advantages, old_probs, action_inds, policy_logits)
ent_loss = entropy_loss(policy_logits, ent_discount_val)
c_loss = critic_loss(discounted_rewards, values)
tot_loss = act_loss + ent_loss + c_loss
grads = tape.gradient(tot_loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
return tot_loss, c_loss, act_loss, ent_loss
def get_advantages(rewards, dones, values, next_value):
discounted_rewards = np.array(rewards + [next_value[0]])
for t in reversed(range(len(rewards))):
discounted_rewards[t] = rewards[t] + GAMMA * discounted_rewards[t+1] * (1-dones[t])
discounted_rewards = discounted_rewards[:-1]
# advantages are bootstrapped discounted rewards - values, using Bellman's equation
advantages = discounted_rewards - np.stack(values)[:, 0]
# standardise advantages
advantages -= np.mean(advantages)
advantages /= (np.std(advantages) + 1e-10)
# standardise rewards too
discounted_rewards -= np.mean(discounted_rewards)
discounted_rewards /= (np.std(discounted_rewards) + 1e-8)
return discounted_rewards, advantages
model = Model(num_actions)
optimizer = keras.optimizers.Adam(learning_rate=LR)
train_writer = tf.summary.create_file_writer(STORE_PATH + f"/PPO-CartPole_{dt.datetime.now().strftime('%d%m%Y%H%M')}")
num_steps = 10000000
episode_reward_sum = 0
state = env.reset()
episode = 1
total_loss = None
for step in range(num_steps):
rewards = []
actions = []
values = []
states = []
dones = []
probs = []
for _ in range(BATCH_SIZE):
_, policy_logits = model(state.reshape(1, -1))
action, value = model.action_value(state.reshape(1, -1))
new_state, reward, done, _ = env.step(action.numpy()[0])
actions.append(action)
values.append(value[0])
states.append(state)
dones.append(done)
probs.append(policy_logits)
episode_reward_sum += reward
state = new_state
if done:
rewards.append(0.0)
state = env.reset()
if total_loss is not None:
print(f"Episode: {episode}, latest episode reward: {episode_reward_sum}, "
f"total loss: {np.mean(total_loss)}, critic loss: {np.mean(c_loss)}, "
f"actor loss: {np.mean(act_loss)}, entropy loss {np.mean(ent_loss)}")
with train_writer.as_default():
tf.summary.scalar('rewards', episode_reward_sum, episode)
episode_reward_sum = 0
episode += 1
else:
rewards.append(reward)
_, next_value = model.action_value(state.reshape(1, -1))
discounted_rewards, advantages = get_advantages(rewards, dones, values, next_value[0])
actions = tf.squeeze(tf.stack(actions))
probs = tf.nn.softmax(tf.squeeze(tf.stack(probs)))
action_inds = tf.stack([tf.range(0, actions.shape[0]), tf.cast(actions, tf.int32)], axis=1)
total_loss = np.zeros((NUM_TRAIN_EPOCHS))
act_loss = np.zeros((NUM_TRAIN_EPOCHS))
c_loss = np.zeros(((NUM_TRAIN_EPOCHS)))
ent_loss = np.zeros((NUM_TRAIN_EPOCHS))
for epoch in range(NUM_TRAIN_EPOCHS):
loss_tuple = train_model(action_inds, tf.gather_nd(probs, action_inds),
states, advantages, discounted_rewards, optimizer,
ent_discount_val)
total_loss[epoch] = loss_tuple[0]
c_loss[epoch] = loss_tuple[1]
act_loss[epoch] = loss_tuple[2]
ent_loss[epoch] = loss_tuple[3]
ent_discount_val *= ENT_DISCOUNT_RATE
with train_writer.as_default():
tf.summary.scalar('tot_loss', np.mean(total_loss), step)
tf.summary.scalar('critic_loss', np.mean(c_loss), step)
tf.summary.scalar('actor_loss', np.mean(act_loss), step)
tf.summary.scalar('entropy_loss', np.mean(ent_loss), step) | 6,351 | 35.297143 | 119 | py |
adventures-in-ml-code | adventures-in-ml-code-master/keras_eager_tf_2.py | import tensorflow as tf
from tensorflow import keras
import datetime as dt
tf.enable_eager_execution()
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()
# prepare training data
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)).batch(32).shuffle(10000)
train_dataset = train_dataset.map(lambda x, y: (tf.div(tf.cast(x, tf.float32), 255.0), tf.reshape(tf.one_hot(y, 10), (-1, 10))))
train_dataset = train_dataset.map(lambda x, y: (tf.image.random_flip_left_right(x), y))
train_dataset = train_dataset.repeat()
# prepare validation data
valid_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(5000).shuffle(10000)
valid_dataset = valid_dataset.map(lambda x, y: (tf.div(tf.cast(x, tf.float32),255.0), tf.reshape(tf.one_hot(y, 10), (-1, 10))))
valid_dataset = valid_dataset.repeat()
class CIFAR10Model(keras.Model):
def __init__(self):
super(CIFAR10Model, self).__init__(name='cifar_cnn')
self.conv1 = keras.layers.Conv2D(64, 5,
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.initializers.variance_scaling,
kernel_regularizer=keras.regularizers.l2(l=0.001))
self.max_pool2d = keras.layers.MaxPooling2D((3, 3), (2, 2), padding='same')
self.max_norm = keras.layers.BatchNormalization()
self.conv2 = keras.layers.Conv2D(64, 5,
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.initializers.variance_scaling,
kernel_regularizer=keras.regularizers.l2(l=0.001))
self.flatten = keras.layers.Flatten()
self.fc1 = keras.layers.Dense(750, activation=tf.nn.relu,
kernel_initializer=tf.initializers.variance_scaling,
kernel_regularizer=keras.regularizers.l2(l=0.001))
self.dropout = keras.layers.Dropout(0.5)
self.fc2 = keras.layers.Dense(10)
self.softmax = keras.layers.Softmax()
def call(self, x):
x = self.max_pool2d(self.conv1(x))
x = self.max_norm(x)
x = self.max_pool2d(self.conv2(x))
x = self.max_norm(x)
x = self.flatten(x)
x = self.dropout(self.fc1(x))
x = self.fc2(x)
return self.softmax(x)
model = CIFAR10Model()
model.compile(optimizer=tf.train.AdamOptimizer(),
loss='categorical_crossentropy',
metrics=['accuracy'])
callbacks = [
# Write TensorBoard logs to `./logs` directory
keras.callbacks.TensorBoard(log_dir='./log/{}'.format(dt.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")), write_images=True)
]
model.fit(train_dataset, epochs=200, steps_per_epoch=1500,
validation_data=valid_dataset,
validation_steps=3, callbacks=callbacks)
| 3,037 | 44.343284 | 128 | py |
adventures-in-ml-code | adventures-in-ml-code-master/r_learning_python.py | import gym
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, InputLayer
import matplotlib.pylab as plt
env = gym.make('NChain-v0')
def naive_sum_reward_agent(env, num_episodes=500):
# this is the table that will hold our summated rewards for
# each action in each state
r_table = np.zeros((5, 2))
for g in range(num_episodes):
s = env.reset()
done = False
while not done:
if np.sum(r_table[s, :]) == 0:
# make a random selection of actions
a = np.random.randint(0, 2)
else:
# select the action with highest cummulative reward
a = np.argmax(r_table[s, :])
new_s, r, done, _ = env.step(a)
r_table[s, a] += r
s = new_s
return r_table
def q_learning_with_table(env, num_episodes=500):
q_table = np.zeros((5, 2))
y = 0.95
lr = 0.8
for i in range(num_episodes):
s = env.reset()
done = False
while not done:
if np.sum(q_table[s,:]) == 0:
# make a random selection of actions
a = np.random.randint(0, 2)
else:
# select the action with largest q value in state s
a = np.argmax(q_table[s, :])
new_s, r, done, _ = env.step(a)
q_table[s, a] += r + lr*(y*np.max(q_table[new_s, :]) - q_table[s, a])
s = new_s
return q_table
def eps_greedy_q_learning_with_table(env, num_episodes=500):
q_table = np.zeros((5, 2))
y = 0.95
eps = 0.5
lr = 0.8
decay_factor = 0.999
for i in range(num_episodes):
s = env.reset()
eps *= decay_factor
done = False
while not done:
if np.random.random() < eps or np.sum(q_table[s, :]) == 0:
a = np.random.randint(0, 2)
else:
a = np.argmax(q_table[s, :])
# pdb.set_trace()
new_s, r, done, _ = env.step(a)
q_table[s, a] += r + lr * (y * np.max(q_table[new_s, :]) - q_table[s, a])
s = new_s
return q_table
def test_methods(env, num_iterations=100):
winner = np.zeros((3,))
for g in range(num_iterations):
m0_table = naive_sum_reward_agent(env, 500)
m1_table = q_learning_with_table(env, 500)
m2_table = eps_greedy_q_learning_with_table(env, 500)
m0 = run_game(m0_table, env)
m1 = run_game(m1_table, env)
m2 = run_game(m2_table, env)
w = np.argmax(np.array([m0, m1, m2]))
winner[w] += 1
print("Game {} of {}".format(g + 1, num_iterations))
return winner
def run_game(table, env):
s = env.reset()
tot_reward = 0
done = False
while not done:
a = np.argmax(table[s, :])
s, r, done, _ = env.step(a)
tot_reward += r
return tot_reward
def q_learning_keras(env, num_episodes=1000):
# create the keras model
model = Sequential()
model.add(InputLayer(batch_input_shape=(1, 5)))
model.add(Dense(10, activation='sigmoid'))
model.add(Dense(2, activation='linear'))
model.compile(loss='mse', optimizer='adam', metrics=['mae'])
# now execute the q learning
y = 0.95
eps = 0.5
decay_factor = 0.999
r_avg_list = []
for i in range(num_episodes):
s = env.reset()
eps *= decay_factor
if i % 100 == 0:
print("Episode {} of {}".format(i + 1, num_episodes))
done = False
r_sum = 0
while not done:
if np.random.random() < eps:
a = np.random.randint(0, 2)
else:
a = np.argmax(model.predict(np.identity(5)[s:s + 1]))
new_s, r, done, _ = env.step(a)
target = r + y * np.max(model.predict(np.identity(5)[new_s:new_s + 1]))
target_vec = model.predict(np.identity(5)[s:s + 1])[0]
target_vec[a] = target
model.fit(np.identity(5)[s:s + 1], target_vec.reshape(-1, 2), epochs=1, verbose=0)
s = new_s
r_sum += r
r_avg_list.append(r_sum / 1000)
plt.plot(r_avg_list)
plt.ylabel('Average reward per game')
plt.xlabel('Number of games')
plt.show()
for i in range(5):
print("State {} - action {}".format(i, model.predict(np.identity(5)[i:i + 1])))
if __name__ == "__main__":
q_learning_keras(env) | 4,424 | 32.522727 | 94 | py |
adventures-in-ml-code | adventures-in-ml-code-master/conv_net_py_torch.py | import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
import torchvision.datasets
from bokeh.plotting import figure
from bokeh.io import show
from bokeh.models import LinearAxis, Range1d
import numpy as np
# Hyperparameters
num_epochs = 6
num_classes = 10
batch_size = 100
learning_rate = 0.001
DATA_PATH = 'C:\\Users\Andy\PycharmProjects\MNISTData'
MODEL_STORE_PATH = 'C:\\Users\Andy\PycharmProjects\pytorch_models\\'
# transforms to apply to the data
trans = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
# MNIST dataset
train_dataset = torchvision.datasets.MNIST(root=DATA_PATH, train=True, transform=trans, download=True)
test_dataset = torchvision.datasets.MNIST(root=DATA_PATH, train=False, transform=trans)
# Data loader
train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False)
# Convolutional neural network (two convolutional layers)
class ConvNet(nn.Module):
def __init__(self):
super(ConvNet, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(1, 32, kernel_size=5, stride=1, padding=2),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2))
self.layer2 = nn.Sequential(
nn.Conv2d(32, 64, kernel_size=5, stride=1, padding=2),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2))
self.drop_out = nn.Dropout()
self.fc1 = nn.Linear(7 * 7 * 64, 1000)
self.fc2 = nn.Linear(1000, 10)
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = out.reshape(out.size(0), -1)
out = self.drop_out(out)
out = self.fc1(out)
out = self.fc2(out)
return out
model = ConvNet()
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# Train the model
total_step = len(train_loader)
loss_list = []
acc_list = []
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
# Run the forward pass
outputs = model(images)
loss = criterion(outputs, labels)
loss_list.append(loss.item())
# Backprop and perform Adam optimisation
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Track the accuracy
total = labels.size(0)
_, predicted = torch.max(outputs.data, 1)
correct = (predicted == labels).sum().item()
acc_list.append(correct / total)
if (i + 1) % 100 == 0:
print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}, Accuracy: {:.2f}%'
.format(epoch + 1, num_epochs, i + 1, total_step, loss.item(),
(correct / total) * 100))
# Test the model
model.eval()
with torch.no_grad():
correct = 0
total = 0
for images, labels in test_loader:
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Test Accuracy of the model on the 10000 test images: {} %'.format((correct / total) * 100))
# Save the model and plot
torch.save(model.state_dict(), MODEL_STORE_PATH + 'conv_net_model.ckpt')
p = figure(y_axis_label='Loss', width=850, y_range=(0, 1), title='PyTorch ConvNet results')
p.extra_y_ranges = {'Accuracy': Range1d(start=0, end=100)}
p.add_layout(LinearAxis(y_range_name='Accuracy', axis_label='Accuracy (%)'), 'right')
p.line(np.arange(len(loss_list)), loss_list)
p.line(np.arange(len(loss_list)), np.array(acc_list) * 100, y_range_name='Accuracy', color='red')
show(p)
| 3,793 | 32.575221 | 102 | py |
adventures-in-ml-code | adventures-in-ml-code-master/keras_cnn.py | from __future__ import print_function
import keras
from keras.datasets import mnist
from keras.layers import Dense, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.models import Sequential
import matplotlib.pylab as plt
batch_size = 128
num_classes = 10
epochs = 10
# input image dimensions
img_x, img_y = 28, 28
# load the MNIST data set, which already splits into train and test sets for us
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# reshape the data into a 4D tensor - (sample_number, x_img_size, y_img_size, num_channels)
# because the MNIST is greyscale, we only have a single channel - RGB colour images would have 3
x_train = x_train.reshape(x_train.shape[0], img_x, img_y, 1)
x_test = x_test.reshape(x_test.shape[0], img_x, img_y, 1)
input_shape = (img_x, img_y, 1)
# convert the data to the right type
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices - this is for use in the
# categorical_crossentropy loss below
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv2D(32, kernel_size=(5, 5), strides=(1, 1),
activation='relu',
input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Conv2D(64, (5, 5), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(1000, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adam(),
metrics=['accuracy'])
class AccuracyHistory(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.acc = []
def on_epoch_end(self, batch, logs={}):
self.acc.append(logs.get('acc'))
history = AccuracyHistory()
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test),
callbacks=[history])
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
plt.plot(range(1, 11), history.acc)
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.show()
| 2,477 | 31.181818 | 96 | py |
query-selected-attention | query-selected-attention-main/test.py | import os
import torch
from options.test_options import TestOptions
from data import create_dataset
from models import create_model
from util.visualizer import save_images
from util import html
import util.util as util
if __name__ == '__main__':
opt = TestOptions().parse() # get test options
# hard-code some parameters for test
opt.num_threads = 0 # test code only supports num_threads = 1
opt.batch_size = 1 # test code only supports batch_size = 1
opt.serial_batches = True # disable data shuffling; comment this line if results on randomly chosen images are needed.
opt.no_flip = True # no flip; comment this line if results on flipped images are needed.
opt.display_id = -1 # no visdom display; the test code saves the results to a HTML file.
dataset = create_dataset(opt) # create a dataset given opt.dataset_mode and other options
train_dataset = create_dataset(util.copyconf(opt, phase="train"))
model = create_model(opt) # create a model given opt.model and other options
# create a webpage for viewing the results
web_dir = os.path.join(opt.results_dir, opt.name, '{}_{}'.format(opt.phase, opt.epoch)) # define the website directory
print('creating web directory', web_dir)
webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.epoch))
for i, data in enumerate(dataset):
model.set_input(data) # unpack data from data loader
if i == 0:
model.data_dependent_initialize()
model.setup(opt) # regular setup: load and print networks; create schedulers
model.parallelize()
if opt.eval:
model.eval()
if i >= opt.num_test: # only apply our model to opt.num_test images.
break
model.test() # run inference
visuals = model.get_current_visuals() # get image results
img_path = model.get_image_paths() # get image paths
if i % 5 == 0: # save images to an HTML file
print('processing (%04d)-th image... %s' % (i, img_path))
save_images(webpage, visuals, img_path, width=opt.display_winsize)
webpage.save() # save the HTML
| 2,235 | 49.818182 | 123 | py |
query-selected-attention | query-selected-attention-main/train.py | import time
import torch
from options.train_options import TrainOptions
from data import create_dataset
from models import create_model
from util.visualizer import Visualizer
if __name__ == '__main__':
opt = TrainOptions().parse() # get training options
dataset = create_dataset(opt) # create a dataset given opt.dataset_mode and other options
dataset_size = len(dataset) # get the number of images in the dataset.
model = create_model(opt) # create a model given opt.model and other options
print('The number of training images = %d' % dataset_size)
visualizer = Visualizer(opt) # create a visualizer that display/save images and plots
opt.visualizer = visualizer
total_iters = 0 # the total number of training iterations
optimize_time = 0.1
times = []
for epoch in range(opt.epoch_count, opt.n_epochs + opt.n_epochs_decay + 1): # outer loop for different epochs; we save the model by <epoch_count>, <epoch_count>+<save_latest_freq>
epoch_start_time = time.time() # timer for entire epoch
iter_data_time = time.time() # timer for data loading per iteration
epoch_iter = 0 # the number of training iterations in current epoch, reset to 0 every epoch
visualizer.reset() # reset the visualizer: make sure it saves the results to HTML at least once every epoch
dataset.set_epoch(epoch)
for i, data in enumerate(dataset): # inner loop within one epoch
iter_start_time = time.time() # timer for computation per iteration
if total_iters % opt.print_freq == 0:
t_data = iter_start_time - iter_data_time
batch_size = data["A"].size(0)
total_iters += batch_size
epoch_iter += batch_size
torch.cuda.synchronize()
optimize_start_time = time.time()
model.set_input(data) # unpack data from dataset and apply preprocessing
if epoch == opt.epoch_count and i == 0:
model.data_dependent_initialize()
model.setup(opt) # regular setup: load and print networks; create schedulers
model.parallelize()
model.optimize_parameters() # calculate loss functions, get gradients, update network weights
torch.cuda.synchronize()
optimize_time = (time.time() - optimize_start_time) / batch_size * 0.005 + 0.995 * optimize_time
if total_iters % opt.display_freq == 0: # display images on visdom and save images to a HTML file
save_result = total_iters % opt.update_html_freq == 0
model.compute_visuals()
visualizer.display_current_results(model.get_current_visuals(), epoch, save_result)
if total_iters % opt.print_freq == 0: # print training losses and save logging information to the disk
losses = model.get_current_losses()
visualizer.print_current_losses(epoch, epoch_iter, losses, optimize_time, t_data)
if opt.display_id is None or opt.display_id > 0:
visualizer.plot_current_losses(epoch, float(epoch_iter) / dataset_size, losses)
if total_iters % opt.save_latest_freq == 0: # cache our latest model every <save_latest_freq> iterations
print('saving the latest model (epoch %d, total_iters %d)' % (epoch, total_iters))
print(opt.name) # it's useful to occasionally show the experiment name on console
save_suffix = 'iter_%d' % total_iters if opt.save_by_iter else 'latest'
model.save_networks(save_suffix)
iter_data_time = time.time()
if epoch % opt.save_epoch_freq == 0: # cache our model every <save_epoch_freq> epochs
print('saving the model at the end of epoch %d, iters %d' % (epoch, total_iters))
model.save_networks('latest')
model.save_networks(epoch)
print('End of epoch %d / %d \t Time Taken: %d sec' % (epoch, opt.n_epochs + opt.n_epochs_decay, time.time() - epoch_start_time))
model.update_learning_rate() # update learning rates at the end of every epoch.
| 4,279 | 55.315789 | 186 | py |
query-selected-attention | query-selected-attention-main/options/base_options.py | import argparse
import os
from util import util
import torch
import models
import data
class BaseOptions():
"""This class defines options used during both training and test time.
It also implements several helper functions such as parsing, printing, and saving the options.
It also gathers additional options defined in <modify_commandline_options> functions in both dataset class and model class.
"""
def __init__(self, cmd_line=None):
"""Reset the class; indicates the class hasn't been initailized"""
self.initialized = False
self.cmd_line = None
if cmd_line is not None:
self.cmd_line = cmd_line.split()
def initialize(self, parser):
"""Define the common options that are used in both training and test."""
# basic parameters
parser.add_argument('--dataroot', default='./datasets/horse2zebra', help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')
parser.add_argument('--name', type=str, default='horse2zebra_qsattn_global', help='name of the experiment. It decides where to store samples and models')
parser.add_argument('--easy_label', type=str, default='experiment_name', help='Interpretable name')
parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
# model parameters
parser.add_argument('--model', type=str, default='qs', help='chooses which model to use.')
parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels: 3 for RGB and 1 for grayscale')
parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels: 3 for RGB and 1 for grayscale')
parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in the last conv layer')
parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in the first conv layer')
parser.add_argument('--netD', type=str, default='basic', choices=['basic', 'n_layers', 'pixel', 'patch', 'tilestylegan2', 'stylegan2'], help='specify discriminator architecture. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator')
parser.add_argument('--netG', type=str, default='resnet_9blocks', choices=['resnet_9blocks', 'resnet_6blocks', 'unet_256', 'unet_128', 'stylegan2', 'smallstylegan2', 'resnet_cat'], help='specify generator architecture')
parser.add_argument('--n_layers_D', type=int, default=3, help='only used if netD==n_layers')
parser.add_argument('--normG', type=str, default='instance', choices=['instance', 'batch', 'none'], help='instance normalization or batch normalization for G')
parser.add_argument('--normD', type=str, default='instance', choices=['instance', 'batch', 'none'], help='instance normalization or batch normalization for D')
parser.add_argument('--init_type', type=str, default='xavier', choices=['normal', 'xavier', 'kaiming', 'orthogonal'], help='network initialization')
parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.')
parser.add_argument('--no_dropout', type=util.str2bool, nargs='?', const=True, default=True,
help='no dropout for the generator')
parser.add_argument('--no_antialias', action='store_true', help='if specified, use stride=2 convs instead of antialiased-downsampling (sad)')
parser.add_argument('--no_antialias_up', action='store_true', help='if specified, use [upconv(learned filter)] instead of [upconv(hard-coded [1,3,3,1] filter), conv]')
# dataset parameters
parser.add_argument('--dataset_mode', type=str, default='unaligned', help='chooses how datasets are loaded. [unaligned | aligned | single | colorization]')
parser.add_argument('--direction', type=str, default='AtoB', help='AtoB or BtoA')
parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
parser.add_argument('--num_threads', default=4, type=int, help='# threads for loading data')
parser.add_argument('--batch_size', type=int, default=1, help='input batch size')
parser.add_argument('--load_size', type=int, default=286, help='scale images to this size')
parser.add_argument('--crop_size', type=int, default=256, help='then crop to this size')
parser.add_argument('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
parser.add_argument('--preprocess', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop | crop | scale_width | scale_width_and_crop | none]')
parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation')
parser.add_argument('--display_winsize', type=int, default=256, help='display window size for both visdom and HTML')
# additional parameters
parser.add_argument('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information')
parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}')
self.initialized = True
return parser
def gather_options(self):
"""Initialize our parser with basic options(only once).
Add additional model-specific and dataset-specific options.
These options are defined in the <modify_commandline_options> function
in model and dataset classes.
"""
if not self.initialized: # check if it has been initialized
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser = self.initialize(parser)
# get the basic options
if self.cmd_line is None:
opt, _ = parser.parse_known_args()
else:
opt, _ = parser.parse_known_args(self.cmd_line)
# modify model-related parser options
model_name = opt.model
model_option_setter = models.get_option_setter(model_name)
parser = model_option_setter(parser, self.isTrain)
if self.cmd_line is None:
opt, _ = parser.parse_known_args() # parse again with new defaults
else:
opt, _ = parser.parse_known_args(self.cmd_line) # parse again with new defaults
# modify dataset-related parser options
dataset_name = opt.dataset_mode
dataset_option_setter = data.get_option_setter(dataset_name)
parser = dataset_option_setter(parser, self.isTrain)
# save and return the parser
self.parser = parser
if self.cmd_line is None:
return parser.parse_args()
else:
return parser.parse_args(self.cmd_line)
def print_options(self, opt):
"""Print and save options
It will print both current options and default values(if different).
It will save options into a text file / [checkpoints_dir] / opt.txt
"""
message = ''
message += '----------------- Options ---------------\n'
for k, v in sorted(vars(opt).items()):
comment = ''
default = self.parser.get_default(k)
if v != default:
comment = '\t[default: %s]' % str(default)
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
message += '----------------- End -------------------'
print(message)
# save to the disk
expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
util.mkdirs(expr_dir)
file_name = os.path.join(expr_dir, '{}_opt.txt'.format(opt.phase))
try:
with open(file_name, 'wt') as opt_file:
opt_file.write(message)
opt_file.write('\n')
except PermissionError as error:
print("permission error {}".format(error))
pass
def parse(self):
"""Parse our options, create checkpoints directory suffix, and set up gpu device."""
opt = self.gather_options()
opt.isTrain = self.isTrain # train or test
# process opt.suffix
if opt.suffix:
suffix = ('_' + opt.suffix.format(**vars(opt))) if opt.suffix != '' else ''
opt.name = opt.name + suffix
self.print_options(opt)
# set gpu ids
str_ids = opt.gpu_ids.split(',')
opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if id >= 0:
opt.gpu_ids.append(id)
if len(opt.gpu_ids) > 0:
torch.cuda.set_device(opt.gpu_ids[0])
self.opt = opt
return self.opt
| 9,260 | 57.613924 | 287 | py |
query-selected-attention | query-selected-attention-main/models/base_model.py | import os
import torch
from collections import OrderedDict
from abc import ABC, abstractmethod
from . import networks_global
class BaseModel(ABC):
"""This class is an abstract base class (ABC) for models.
To create a subclass, you need to implement the following five functions:
-- <__init__>: initialize the class; first call BaseModel.__init__(self, opt).
-- <set_input>: unpack data from dataset and apply preprocessing.
-- <forward>: produce intermediate results.
-- <optimize_parameters>: calculate losses, gradients, and update network weights.
-- <modify_commandline_options>: (optionally) add model-specific options and set default options.
"""
def __init__(self, opt):
"""Initialize the BaseModel class.
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
When creating your custom class, you need to implement your own initialization.
In this fucntion, you should first call <BaseModel.__init__(self, opt)>
Then, you need to define four lists:
-- self.loss_names (str list): specify the training losses that you want to plot and save.
-- self.model_names (str list): specify the images that you want to display and save.
-- self.visual_names (str list): define networks used in our training.
-- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example.
"""
self.opt = opt
self.gpu_ids = opt.gpu_ids
self.isTrain = opt.isTrain
self.device = torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu') # get device name: CPU or GPU
self.save_dir = os.path.join(opt.checkpoints_dir, opt.name) # save all the checkpoints to save_dir
if opt.preprocess != 'scale_width': # with [scale_width], input images might have different sizes, which hurts the performance of cudnn.benchmark.
torch.backends.cudnn.benchmark = True
self.loss_names = []
self.model_names = []
self.visual_names = []
self.optimizers = []
self.image_paths = []
self.metric = 0 # used for learning rate policy 'plateau'
@staticmethod
def dict_grad_hook_factory(add_func=lambda x: x):
saved_dict = dict()
def hook_gen(name):
def grad_hook(grad):
saved_vals = add_func(grad)
saved_dict[name] = saved_vals
return grad_hook
return hook_gen, saved_dict
@staticmethod
def modify_commandline_options(parser, is_train):
"""Add new model-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
"""
return parser
@abstractmethod
def set_input(self, input):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input (dict): includes the data itself and its metadata information.
"""
pass
@abstractmethod
def forward(self):
"""Run forward pass; called by both functions <optimize_parameters> and <test>."""
pass
@abstractmethod
def optimize_parameters(self):
"""Calculate losses, gradients, and update network weights; called in every training iteration"""
pass
def setup(self, opt):
"""Load and print networks; create schedulers
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
if self.isTrain:
self.schedulers = [networks_global.get_scheduler(optimizer, opt) for optimizer in self.optimizers]
if not self.isTrain or opt.continue_train:
load_suffix = opt.epoch
self.load_networks(load_suffix)
self.print_networks(opt.verbose)
def parallelize(self):
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, 'net' + name)
setattr(self, 'net' + name, torch.nn.DataParallel(net, self.opt.gpu_ids))
def data_dependent_initialize(self):
pass
def eval(self):
"""Make models eval mode during test time"""
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, 'net' + name)
net.eval()
def test(self):
"""Forward function used in test time.
This function wraps <forward> function in no_grad() so we don't save intermediate steps for backprop
It also calls <compute_visuals> to produce additional visualization results
"""
with torch.no_grad():
self.forward()
self.compute_visuals()
def compute_visuals(self):
"""Calculate additional output images for visdom and HTML visualization"""
pass
def get_image_paths(self):
""" Return image paths that are used to load current data"""
return self.image_paths
def update_learning_rate(self):
"""Update learning rates for all the networks; called at the end of every epoch"""
for scheduler in self.schedulers:
if self.opt.lr_policy == 'plateau':
scheduler.step(self.metric)
else:
scheduler.step()
lr = self.optimizers[0].param_groups[0]['lr']
print('learning rate = %.7f' % lr)
def get_current_visuals(self):
"""Return visualization images. train.py will display these images with visdom, and save the images to a HTML"""
visual_ret = OrderedDict()
for name in self.visual_names:
if isinstance(name, str):
visual_ret[name] = getattr(self, name)
return visual_ret
def get_current_losses(self):
"""Return traning losses / errors. train.py will print out these errors on console, and save them to a file"""
errors_ret = OrderedDict()
for name in self.loss_names:
if isinstance(name, str):
errors_ret[name] = float(getattr(self, 'loss_' + name)) # float(...) works for both scalar tensor and float number
return errors_ret
def save_networks(self, epoch):
"""Save all the networks to the disk.
Parameters:
epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
"""
for name in self.model_names:
if isinstance(name, str):
save_filename = '%s_net_%s.pth' % (epoch, name)
save_path = os.path.join(self.save_dir, save_filename)
net = getattr(self, 'net' + name)
if len(self.gpu_ids) > 0 and torch.cuda.is_available():
torch.save(net.module.cpu().state_dict(), save_path)
net.cuda(self.gpu_ids[0])
else:
torch.save(net.cpu().state_dict(), save_path)
def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0):
"""Fix InstanceNorm checkpoints incompatibility (prior to 0.4)"""
key = keys[i]
if i + 1 == len(keys): # at the end, pointing to a parameter/buffer
if module.__class__.__name__.startswith('InstanceNorm') and \
(key == 'running_mean' or key == 'running_var'):
if getattr(module, key) is None:
state_dict.pop('.'.join(keys))
if module.__class__.__name__.startswith('InstanceNorm') and \
(key == 'num_batches_tracked'):
state_dict.pop('.'.join(keys))
else:
self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1)
def load_networks(self, epoch):
"""Load all the networks from the disk.
Parameters:
epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
"""
for name in self.model_names:
if isinstance(name, str):
load_filename = '%s_net_%s.pth' % (epoch, name)
if self.opt.isTrain and self.opt.pretrained_name is not None:
load_dir = os.path.join(self.opt.checkpoints_dir, self.opt.pretrained_name)
else:
load_dir = self.save_dir
load_path = os.path.join(load_dir, load_filename)
net = getattr(self, 'net' + name)
if isinstance(net, torch.nn.DataParallel):
net = net.module
print('loading the model from %s' % load_path)
# if you are using PyTorch newer than 0.4 (e.g., built from
# GitHub source), you can remove str() on self.device
state_dict = torch.load(load_path, map_location=str(self.device))
if hasattr(state_dict, '_metadata'):
del state_dict._metadata
# patch InstanceNorm checkpoints prior to 0.4
# for key in list(state_dict.keys()): # need to copy keys here because we mutate in loop
# self.__patch_instance_norm_state_dict(state_dict, net, key.split('.'))
net.load_state_dict(state_dict)
def print_networks(self, verbose):
"""Print the total number of parameters in the network and (if verbose) network architecture
Parameters:
verbose (bool) -- if verbose: print the network architecture
"""
print('---------- Networks initialized -------------')
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, 'net' + name)
num_params = 0
for param in net.parameters():
num_params += param.numel()
if verbose:
print(net)
print('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6))
print('-----------------------------------------------')
def set_requires_grad(self, nets, requires_grad=False):
"""Set requies_grad=Fasle for all the networks to avoid unnecessary computations
Parameters:
nets (network list) -- a list of networks
requires_grad (bool) -- whether the networks require gradients or not
"""
if not isinstance(nets, list):
nets = [nets]
for net in nets:
if net is not None:
for param in net.parameters():
param.requires_grad = requires_grad
def generate_visuals_for_evaluation(self, data, mode):
return {}
| 11,231 | 42.366795 | 260 | py |
query-selected-attention | query-selected-attention-main/models/patchnce.py | from packaging import version
import torch
from torch import nn
class PatchNCELoss(nn.Module):
def __init__(self, opt):
super().__init__()
self.opt = opt
self.cross_entropy_loss = torch.nn.CrossEntropyLoss(reduction='none')
self.mask_dtype = torch.uint8 if version.parse(torch.__version__) < version.parse('1.2.0') else torch.bool
def forward(self, feat_q, feat_k):
batchSize = feat_q.shape[0]
dim = feat_q.shape[1]
feat_k = feat_k.detach()
# pos logit
l_pos = torch.bmm(feat_q.view(batchSize, 1, -1), feat_k.view(batchSize, -1, 1))
l_pos = l_pos.view(batchSize, 1)
# neg logit -- current batch
# reshape features to batch size
feat_q = feat_q.view(self.opt.batch_size, -1, dim)
feat_k = feat_k.view(self.opt.batch_size, -1, dim)
npatches = feat_q.size(1)
l_neg_curbatch = torch.bmm(feat_q, feat_k.transpose(2, 1)) # b*np*np
# diagonal entries are similarity between same features, and hence meaningless.
# just fill the diagonal with very small number, which is exp(-10) and almost zero
diagonal = torch.eye(npatches, device=feat_q.device, dtype=self.mask_dtype)[None, :, :]
l_neg_curbatch.masked_fill_(diagonal, -10.0)
l_neg = l_neg_curbatch.view(-1, npatches)
out = torch.cat((l_pos, l_neg), dim=1) / self.opt.nce_T
loss = self.cross_entropy_loss(out, torch.zeros(out.size(0), dtype=torch.long,
device=feat_q.device))
return loss
| 1,598 | 38 | 114 | py |
query-selected-attention | query-selected-attention-main/models/qs_model.py | import numpy as np
import torch
from .base_model import BaseModel
from . import networks_global, networks_local, networks_local_global
from .patchnce import PatchNCELoss
import util.util as util
class QSModel(BaseModel):
@staticmethod
def modify_commandline_options(parser, is_train=True):
parser.add_argument('--QS_mode', type=str, default="global", choices='(global, local, local_global)')
parser.add_argument('--lambda_GAN', type=float, default=1.0, help='weight for GAN loss:GAN(G(X))')
parser.add_argument('--lambda_NCE', type=float, default=1.0, help='weight for NCE loss: NCE(G(X), X)')
parser.add_argument('--nce_idt', type=util.str2bool, default=True, help='use NCE loss for identity mapping: NCE(G(Y), Y))')
parser.add_argument('--nce_layers', type=str, default='0,4,8,12,16', help='compute NCE loss on which layers')
parser.add_argument('--netF', type=str, default='mlp_sample', choices=['sample', 'reshape', 'mlp_sample'], help='how to downsample the feature map')
parser.add_argument('--netF_nc', type=int, default=256)
parser.add_argument('--nce_T', type=float, default=0.07, help='temperature for NCE loss')
parser.add_argument('--num_patches', type=int, default=256, help='number of patches per layer')
parser.add_argument('--flip_equivariance',
type=util.str2bool, nargs='?', const=True, default=False,
help="Enforce flip-equivariance as additional regularization. It's used by FastCUT, but not CUT")
parser.set_defaults(pool_size=0) # no image pooling
opt, _ = parser.parse_known_args()
return parser
def __init__(self, opt):
BaseModel.__init__(self, opt)
# specify the training losses you want to print out.
# The training/test scripts will call <BaseModel.get_current_losses>
self.loss_names = ['G_GAN', 'D_real', 'D_fake', 'G', 'NCE']
self.visual_names = ['real_A', 'fake_B', 'real_B']
self.nce_layers = [int(i) for i in self.opt.nce_layers.split(',')]
if opt.nce_idt and self.isTrain:
self.loss_names += ['NCE_Y']
self.visual_names += ['idt_B']
if self.isTrain:
self.model_names = ['G', 'F', 'D']
else: # during test time, only load G
self.model_names = ['G']
if self.opt.QS_mode == 'global':
networks = networks_global
elif self.opt.QS_mode == 'local':
networks = networks_local
else:
networks = networks_local_global
# define networks (both generator and discriminator)
self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.normG, not opt.no_dropout, opt.init_type, opt.init_gain, opt.no_antialias, opt.no_antialias_up, self.gpu_ids, opt)
self.netF = networks.define_F(opt.input_nc, opt.netF, opt.normG, not opt.no_dropout, opt.init_type, opt.init_gain, opt.no_antialias, self.gpu_ids, opt)
if self.isTrain:
self.netD = networks.define_D(opt.output_nc, opt.ndf, opt.netD, opt.n_layers_D, opt.normD, opt.init_type, opt.init_gain, opt.no_antialias, self.gpu_ids, opt)
# define loss functions
self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device)
self.criterionNCE = []
for nce_layer in self.nce_layers:
self.criterionNCE.append(PatchNCELoss(opt).to(self.device))
self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2))
self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2))
self.optimizers.append(self.optimizer_G)
self.optimizers.append(self.optimizer_D)
def data_dependent_initialize(self):
"""
The feature network netF is defined in terms of the shape of the intermediate, extracted
features of the encoder portion of netG. Because of this, the weights of netF are
initialized at the first feedforward pass with some input images.
Please also see PatchSampleF.create_mlp(), which is called at the first forward() call.
"""
bs_per_gpu = self.real_A.size(0) // len(self.opt.gpu_ids)
self.real_A = self.real_A[:bs_per_gpu]
self.real_B = self.real_B[:bs_per_gpu]
self.forward() # compute fake images: G(A)
if self.opt.isTrain:
self.backward_D() # calculate gradients for D
self.backward_G() # calculate graidents for G
if self.opt.lambda_NCE > 0.0:
self.optimizer_F = torch.optim.Adam(self.netF.parameters(), lr=self.opt.lr, betas=(self.opt.beta1, self.opt.beta2))
self.optimizers.append(self.optimizer_F)
def optimize_parameters(self):
# forward
self.forward() # compute fake images: G(A)
# update D
self.set_requires_grad(self.netD, True) # enable backprop for D
self.optimizer_D.zero_grad() # set D's gradients to zero
self.backward_D() # calculate gradients for D
self.optimizer_D.step() # update D's weights
# update G
self.set_requires_grad(self.netD, False) # D requires no gradients when optimizing G
self.optimizer_G.zero_grad() # set G's gradients to zero
if self.opt.netF == 'mlp_sample':
self.optimizer_F.zero_grad()
self.backward_G() # calculate graidents for G
self.optimizer_G.step() # udpate G's weights
if self.opt.netF == 'mlp_sample':
self.optimizer_F.step()
def set_input(self, input):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input (dict): include the data itself and its metadata information.
The option 'direction' can be used to swap domain A and domain B.
"""
AtoB = self.opt.direction == 'AtoB'
self.real_A = input['A' if AtoB else 'B'].to(self.device)
self.real_B = input['B' if AtoB else 'A'].to(self.device)
self.image_paths = input['A_paths' if AtoB else 'B_paths']
def forward(self):
"""Run forward pass; called by both functions <optimize_parameters> and <test>."""
self.real = torch.cat((self.real_A, self.real_B), dim=0) if self.opt.nce_idt else self.real_A
if self.opt.flip_equivariance:
self.flipped_for_equivariance = self.opt.isTrain and (np.random.random() < 0.5)
if self.flipped_for_equivariance:
self.real = torch.flip(self.real, [3])
self.fake = self.netG(self.real)
self.fake_B = self.fake[:self.real_A.size(0)]
if self.opt.nce_idt:
self.idt_B = self.fake[self.real_A.size(0):]
self.feat_k = self.netG(self.real_A, self.nce_layers, encode_only=True)
def backward_D(self):
if self.opt.lambda_GAN > 0.0:
"""Calculate GAN loss for the discriminator"""
fake = self.fake_B.detach()
# Fake; stop backprop to the generator by detaching fake_B
pred_fake = self.netD(fake)
self.loss_D_fake = self.criterionGAN(pred_fake, False).mean()
# Real
pred_real = self.netD(self.real_B)
loss_D_real_unweighted = self.criterionGAN(pred_real, True)
self.loss_D_real = loss_D_real_unweighted.mean()
# combine loss and calculate gradients
self.loss_D = (self.loss_D_fake + self.loss_D_real) * 0.5
self.loss_D.backward()
else:
self.loss_D_real, self.loss_D_fake, self.loss_D = 0.0, 0.0, 0.0
def backward_G(self):
"""Calculate GAN and NCE loss for the generator"""
fake = self.fake_B
# First, G(A) should fake the discriminator
if self.opt.lambda_GAN > 0.0:
pred_fake = self.netD(fake)
self.loss_G_GAN = self.criterionGAN(pred_fake, True).mean() * self.opt.lambda_GAN
else:
self.loss_G_GAN = 0.0
if self.opt.lambda_NCE > 0.0:
self.loss_NCE = self.calculate_NCE_loss(self.real_A, self.fake_B)
else:
self.loss_NCE = 0.0
if self.opt.nce_idt and self.opt.lambda_NCE > 0.0:
self.loss_NCE_Y = self.calculate_NCE_loss(self.real_B, self.idt_B)
loss_NCE_both = (self.loss_NCE + self.loss_NCE_Y) * 0.5
else:
loss_NCE_both = self.loss_NCE
self.loss_G = self.loss_G_GAN + loss_NCE_both
self.loss_G.backward()
def calculate_NCE_loss(self, src, tgt):
n_layers = len(self.nce_layers)
feat_q = self.netG(tgt, self.nce_layers, encode_only=True)
if self.opt.flip_equivariance and self.flipped_for_equivariance:
feat_q = [torch.flip(fq, [3]) for fq in feat_q]
feat_k = self.netG(src, self.nce_layers, encode_only=True)
feat_k_pool, sample_ids, attn_mats = self.netF(feat_k, self.opt.num_patches, None, None)
feat_q_pool, _, _ = self.netF(feat_q, self.opt.num_patches, sample_ids, attn_mats)
total_nce_loss = 0.0
for f_q, f_k, crit, nce_layer in zip(feat_q_pool, feat_k_pool, self.criterionNCE, self.nce_layers):
loss = crit(f_q, f_k) * self.opt.lambda_NCE
total_nce_loss += loss.mean()
return total_nce_loss / n_layers
| 9,580 | 47.145729 | 204 | py |
query-selected-attention | query-selected-attention-main/models/networks_local.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
import functools
from torch.optim import lr_scheduler
import numpy as np
###############################################################################
# Helper Functions
###############################################################################
def get_filter(filt_size=3):
if(filt_size == 1):
a = np.array([1., ])
elif(filt_size == 2):
a = np.array([1., 1.])
elif(filt_size == 3):
a = np.array([1., 2., 1.])
elif(filt_size == 4):
a = np.array([1., 3., 3., 1.])
elif(filt_size == 5):
a = np.array([1., 4., 6., 4., 1.])
elif(filt_size == 6):
a = np.array([1., 5., 10., 10., 5., 1.])
elif(filt_size == 7):
a = np.array([1., 6., 15., 20., 15., 6., 1.])
filt = torch.Tensor(a[:, None] * a[None, :])
filt = filt / torch.sum(filt)
return filt
class Downsample(nn.Module):
def __init__(self, channels, pad_type='reflect', filt_size=3, stride=2, pad_off=0):
super(Downsample, self).__init__()
self.filt_size = filt_size
self.pad_off = pad_off
self.pad_sizes = [int(1. * (filt_size - 1) / 2), int(np.ceil(1. * (filt_size - 1) / 2)), int(1. * (filt_size - 1) / 2), int(np.ceil(1. * (filt_size - 1) / 2))]
self.pad_sizes = [pad_size + pad_off for pad_size in self.pad_sizes]
self.stride = stride
self.off = int((self.stride - 1) / 2.)
self.channels = channels
filt = get_filter(filt_size=self.filt_size)
self.register_buffer('filt', filt[None, None, :, :].repeat((self.channels, 1, 1, 1)))
self.pad = get_pad_layer(pad_type)(self.pad_sizes)
def forward(self, inp):
if(self.filt_size == 1):
if(self.pad_off == 0):
return inp[:, :, ::self.stride, ::self.stride]
else:
return self.pad(inp)[:, :, ::self.stride, ::self.stride]
else:
return F.conv2d(self.pad(inp), self.filt, stride=self.stride, groups=inp.shape[1])
class Upsample2(nn.Module):
def __init__(self, scale_factor, mode='nearest'):
super().__init__()
self.factor = scale_factor
self.mode = mode
def forward(self, x):
return torch.nn.functional.interpolate(x, scale_factor=self.factor, mode=self.mode)
class Upsample(nn.Module):
def __init__(self, channels, pad_type='repl', filt_size=4, stride=2):
super(Upsample, self).__init__()
self.filt_size = filt_size
self.filt_odd = np.mod(filt_size, 2) == 1
self.pad_size = int((filt_size - 1) / 2)
self.stride = stride
self.off = int((self.stride - 1) / 2.)
self.channels = channels
filt = get_filter(filt_size=self.filt_size) * (stride**2)
self.register_buffer('filt', filt[None, None, :, :].repeat((self.channels, 1, 1, 1)))
self.pad = get_pad_layer(pad_type)([1, 1, 1, 1])
def forward(self, inp):
ret_val = F.conv_transpose2d(self.pad(inp), self.filt, stride=self.stride, padding=1 + self.pad_size, groups=inp.shape[1])[:, :, 1:, 1:]
if(self.filt_odd):
return ret_val
else:
return ret_val[:, :, :-1, :-1]
def get_pad_layer(pad_type):
if(pad_type in ['refl', 'reflect']):
PadLayer = nn.ReflectionPad2d
elif(pad_type in ['repl', 'replicate']):
PadLayer = nn.ReplicationPad2d
elif(pad_type == 'zero'):
PadLayer = nn.ZeroPad2d
else:
print('Pad type [%s] not recognized' % pad_type)
return PadLayer
class Identity(nn.Module):
def forward(self, x):
return x
def get_norm_layer(norm_type='instance'):
"""Return a normalization layer
Parameters:
norm_type (str) -- the name of the normalization layer: batch | instance | none
For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).
For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.
"""
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
elif norm_type == 'none':
def norm_layer(x):
return Identity()
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
def get_scheduler(optimizer, opt):
"""Return a learning rate scheduler
Parameters:
optimizer -- the optimizer of the network
opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions.
opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine
For 'linear', we keep the same learning rate for the first <opt.n_epochs> epochs
and linearly decay the rate to zero over the next <opt.n_epochs_decay> epochs.
For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers.
See https://pytorch.org/docs/stable/optim.html for more details.
"""
if opt.lr_policy == 'linear':
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.n_epochs) / float(opt.n_epochs_decay + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif opt.lr_policy == 'plateau':
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
elif opt.lr_policy == 'cosine':
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.n_epochs, eta_min=0)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler
def init_weights(net, init_type='normal', init_gain=0.02, debug=False):
"""Initialize network weights.
Parameters:
net (network) -- network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might
work better for some applications. Feel free to try yourself.
"""
def init_func(m): # define the initialization function
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if debug:
print(classname)
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, init_gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=init_gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=init_gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.
init.normal_(m.weight.data, 1.0, init_gain)
init.constant_(m.bias.data, 0.0)
net.apply(init_func) # apply the initialization function <init_func>
def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[], debug=False, initialize_weights=True):
"""Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights
Parameters:
net (network) -- the network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Return an initialized network.
"""
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
net.to(gpu_ids[0])
# if not amp:
# net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs for non-AMP training
if initialize_weights:
init_weights(net, init_type, init_gain=init_gain, debug=debug)
return net
def define_G(input_nc, output_nc, ngf, netG, norm='batch', use_dropout=False, init_type='normal',
init_gain=0.02, no_antialias=False, no_antialias_up=False, gpu_ids=[], opt=None):
"""Create a generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
netG (str) -- the architecture's name: resnet_9blocks | resnet_6blocks | unet_256 | unet_128
norm (str) -- the name of normalization layers used in the network: batch | instance | none
use_dropout (bool) -- if use dropout layers.
init_type (str) -- the name of our initialization method.
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Returns a generator
Our current implementation provides two types of generators:
U-Net: [unet_128] (for 128x128 input images) and [unet_256] (for 256x256 input images)
The original U-Net paper: https://arxiv.org/abs/1505.04597
Resnet-based generator: [resnet_6blocks] (with 6 Resnet blocks) and [resnet_9blocks] (with 9 Resnet blocks)
Resnet-based generator consists of several Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code from Justin Johnson's neural style transfer project (https://github.com/jcjohnson/fast-neural-style).
The generator has been initialized by <init_net>. It uses RELU for non-linearity.
"""
net = None
norm_layer = get_norm_layer(norm_type=norm)
if netG == 'resnet_9blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, no_antialias=no_antialias, no_antialias_up=no_antialias_up, n_blocks=9, opt=opt)
elif netG == 'resnet_6blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, no_antialias=no_antialias, no_antialias_up=no_antialias_up, n_blocks=6, opt=opt)
elif netG == 'resnet_4blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, no_antialias=no_antialias, no_antialias_up=no_antialias_up, n_blocks=4, opt=opt)
elif netG == 'unet_128':
net = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
elif netG == 'unet_256':
net = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
elif netG == 'resnet_cat':
n_blocks = 8
net = G_Resnet(input_nc, output_nc, opt.nz, num_downs=2, n_res=n_blocks - 4, ngf=ngf, norm='inst', nl_layer='relu')
else:
raise NotImplementedError('Generator model name [%s] is not recognized' % netG)
return init_net(net, init_type, init_gain, gpu_ids, initialize_weights=('stylegan2' not in netG))
def define_F(input_nc, netF, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, no_antialias=False, gpu_ids=[], opt=None):
if netF == 'global_pool':
net = PoolingF()
elif netF == 'reshape':
net = ReshapeF()
elif netF == 'sample':
net = PatchSampleF(use_mlp=False, init_type=init_type, init_gain=init_gain, gpu_ids=gpu_ids, nc=opt.netF_nc)
elif netF == 'mlp_sample':
net = PatchSampleF(use_mlp=True, init_type=init_type, init_gain=init_gain, gpu_ids=gpu_ids, nc=opt.netF_nc)
elif netF == 'strided_conv':
net = StridedConvF(init_type=init_type, init_gain=init_gain, gpu_ids=gpu_ids)
else:
raise NotImplementedError('projection model name [%s] is not recognized' % netF)
return init_net(net, init_type, init_gain, gpu_ids)
def define_D(input_nc, ndf, netD, n_layers_D=3, norm='batch', init_type='normal', init_gain=0.02, no_antialias=False, gpu_ids=[], opt=None):
"""Create a discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the first conv layer
netD (str) -- the architecture's name: basic | n_layers | pixel
n_layers_D (int) -- the number of conv layers in the discriminator; effective when netD=='n_layers'
norm (str) -- the type of normalization layers used in the network.
init_type (str) -- the name of the initialization method.
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Returns a discriminator
Our current implementation provides three types of discriminators:
[basic]: 'PatchGAN' classifier described in the original pix2pix paper.
It can classify whether 70×70 overlapping patches are real or fake.
Such a patch-level discriminator architecture has fewer parameters
than a full-image discriminator and can work on arbitrarily-sized images
in a fully convolutional fashion.
[n_layers]: With this mode, you cna specify the number of conv layers in the discriminator
with the parameter <n_layers_D> (default=3 as used in [basic] (PatchGAN).)
[pixel]: 1x1 PixelGAN discriminator can classify whether a pixel is real or not.
It encourages greater color diversity but has no effect on spatial statistics.
The discriminator has been initialized by <init_net>. It uses Leaky RELU for non-linearity.
"""
net = None
norm_layer = get_norm_layer(norm_type=norm)
if netD == 'basic': # default PatchGAN classifier
net = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer, no_antialias=no_antialias,)
elif netD == 'n_layers': # more options
net = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer, no_antialias=no_antialias,)
elif netD == 'pixel': # classify if each pixel is real or fake
net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer)
elif netD == "patch":
net = PatchDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer, no_antialias=no_antialias)
else:
raise NotImplementedError('Discriminator model name [%s] is not recognized' % netD)
return init_net(net, init_type, init_gain, gpu_ids,
initialize_weights=('stylegan2' not in netD))
##############################################################################
# Classes
##############################################################################
class GANLoss(nn.Module):
"""Define different GAN objectives.
The GANLoss class abstracts away the need to create the target label tensor
that has the same size as the input.
"""
def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0):
""" Initialize the GANLoss class.
Parameters:
gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.
target_real_label (bool) - - label for a real image
target_fake_label (bool) - - label of a fake image
Note: Do not use sigmoid as the last layer of Discriminator.
LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.
"""
super(GANLoss, self).__init__()
self.register_buffer('real_label', torch.tensor(target_real_label))
self.register_buffer('fake_label', torch.tensor(target_fake_label))
self.gan_mode = gan_mode
if gan_mode == 'lsgan':
self.loss = nn.MSELoss()
elif gan_mode == 'vanilla':
self.loss = nn.BCEWithLogitsLoss()
elif gan_mode in ['wgangp', 'nonsaturating']:
self.loss = None
else:
raise NotImplementedError('gan mode %s not implemented' % gan_mode)
def get_target_tensor(self, prediction, target_is_real):
"""Create label tensors with the same size as the input.
Parameters:
prediction (tensor) - - tpyically the prediction from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
A label tensor filled with ground truth label, and with the size of the input
"""
if target_is_real:
target_tensor = self.real_label
else:
target_tensor = self.fake_label
return target_tensor.expand_as(prediction)
def __call__(self, prediction, target_is_real):
"""Calculate loss given Discriminator's output and grount truth labels.
Parameters:
prediction (tensor) - - tpyically the prediction output from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
the calculated loss.
"""
bs = prediction.size(0)
if self.gan_mode in ['lsgan', 'vanilla']:
target_tensor = self.get_target_tensor(prediction, target_is_real)
loss = self.loss(prediction, target_tensor)
elif self.gan_mode == 'wgangp':
if target_is_real:
loss = -prediction.mean()
else:
loss = prediction.mean()
elif self.gan_mode == 'nonsaturating':
if target_is_real:
loss = F.softplus(-prediction).view(bs, -1).mean(dim=1)
else:
loss = F.softplus(prediction).view(bs, -1).mean(dim=1)
return loss
def cal_gradient_penalty(netD, real_data, fake_data, device, type='mixed', constant=1.0, lambda_gp=10.0):
"""Calculate the gradient penalty loss, used in WGAN-GP paper https://arxiv.org/abs/1704.00028
Arguments:
netD (network) -- discriminator network
real_data (tensor array) -- real images
fake_data (tensor array) -- generated images from the generator
device (str) -- GPU / CPU: from torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')
type (str) -- if we mix real and fake data or not [real | fake | mixed].
constant (float) -- the constant used in formula ( | |gradient||_2 - constant)^2
lambda_gp (float) -- weight for this loss
Returns the gradient penalty loss
"""
if lambda_gp > 0.0:
if type == 'real': # either use real images, fake images, or a linear interpolation of two.
interpolatesv = real_data
elif type == 'fake':
interpolatesv = fake_data
elif type == 'mixed':
alpha = torch.rand(real_data.shape[0], 1, device=device)
alpha = alpha.expand(real_data.shape[0], real_data.nelement() // real_data.shape[0]).contiguous().view(*real_data.shape)
interpolatesv = alpha * real_data + ((1 - alpha) * fake_data)
else:
raise NotImplementedError('{} not implemented'.format(type))
interpolatesv.requires_grad_(True)
disc_interpolates = netD(interpolatesv)
gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolatesv,
grad_outputs=torch.ones(disc_interpolates.size()).to(device),
create_graph=True, retain_graph=True, only_inputs=True)
gradients = gradients[0].view(real_data.size(0), -1) # flat the data
gradient_penalty = (((gradients + 1e-16).norm(2, dim=1) - constant) ** 2).mean() * lambda_gp # added eps
return gradient_penalty, gradients
else:
return 0.0, None
class Normalize(nn.Module):
def __init__(self, power=2):
super(Normalize, self).__init__()
self.power = power
def forward(self, x):
norm = x.pow(self.power).sum(1, keepdim=True).pow(1. / self.power)
out = x.div(norm + 1e-7)
return out
class PoolingF(nn.Module):
def __init__(self):
super(PoolingF, self).__init__()
model = [nn.AdaptiveMaxPool2d(1)]
self.model = nn.Sequential(*model)
self.l2norm = Normalize(2)
def forward(self, x):
return self.l2norm(self.model(x))
class ReshapeF(nn.Module):
def __init__(self):
super(ReshapeF, self).__init__()
model = [nn.AdaptiveAvgPool2d(4)]
self.model = nn.Sequential(*model)
self.l2norm = Normalize(2)
def forward(self, x):
x = self.model(x)
x_reshape = x.permute(0, 2, 3, 1).flatten(0, 2)
return self.l2norm(x_reshape)
class StridedConvF(nn.Module):
def __init__(self, init_type='normal', init_gain=0.02, gpu_ids=[]):
super().__init__()
# self.conv1 = nn.Conv2d(256, 128, 3, stride=2)
# self.conv2 = nn.Conv2d(128, 64, 3, stride=1)
self.l2_norm = Normalize(2)
self.mlps = {}
self.moving_averages = {}
self.init_type = init_type
self.init_gain = init_gain
self.gpu_ids = gpu_ids
def create_mlp(self, x):
C, H = x.shape[1], x.shape[2]
n_down = int(np.rint(np.log2(H / 32)))
mlp = []
for i in range(n_down):
mlp.append(nn.Conv2d(C, max(C // 2, 64), 3, stride=2))
mlp.append(nn.ReLU())
C = max(C // 2, 64)
mlp.append(nn.Conv2d(C, 64, 3))
mlp = nn.Sequential(*mlp)
init_net(mlp, self.init_type, self.init_gain, self.gpu_ids)
return mlp
def update_moving_average(self, key, x):
if key not in self.moving_averages:
self.moving_averages[key] = x.detach()
self.moving_averages[key] = self.moving_averages[key] * 0.999 + x.detach() * 0.001
def forward(self, x, use_instance_norm=False):
C, H = x.shape[1], x.shape[2]
key = '%d_%d' % (C, H)
if key not in self.mlps:
self.mlps[key] = self.create_mlp(x)
self.add_module("child_%s" % key, self.mlps[key])
mlp = self.mlps[key]
x = mlp(x)
self.update_moving_average(key, x)
x = x - self.moving_averages[key]
if use_instance_norm:
x = F.instance_norm(x)
return self.l2_norm(x)
class PatchSampleF(nn.Module):
def __init__(self, use_mlp=False, init_type='normal', init_gain=0.02, nc=256, gpu_ids=[]):
# potential issues: currently, we use the same patch_ids for multiple images in the batch
super(PatchSampleF, self).__init__()
self.l2norm = Normalize(2)
self.use_mlp = use_mlp
self.nc = nc # hard-coded
self.mlp_init = False
self.init_type = init_type
self.init_gain = init_gain
self.gpu_ids = gpu_ids
def create_mlp(self, feats):
for mlp_id, feat in enumerate(feats):
input_nc = feat.shape[1]
mlp = nn.Sequential(*[nn.Linear(input_nc, self.nc), nn.ReLU(), nn.Linear(self.nc, self.nc)])
mlp.cuda()
setattr(self, 'mlp_%d' % mlp_id, mlp)
init_net(self, self.init_type, self.init_gain, self.gpu_ids)
self.mlp_init = True
def forward(self, feats, num_patches=64, patch_ids=None, attn_mats=None):
return_ids = []
return_feats = []
return_mats = []
k_s = 7 # kernel size in unfold
if self.use_mlp and not self.mlp_init:
self.create_mlp(feats)
for feat_id, feat in enumerate(feats):
B, C, H, W = feat.shape[0], feat.shape[1], feat.shape[2], feat.shape[3]
feat_reshape = feat.permute(0, 2, 3, 1).flatten(1, 2) # B*HW*C
if num_patches > 0:
if feat_id < 3:
if patch_ids is not None:
patch_id = patch_ids[feat_id]
else:
patch_id = torch.randperm(feat_reshape.shape[1], device=feats[0].device) # random id in [0, HW]
patch_id = patch_id[:int(min(num_patches, patch_id.shape[0]))] # .to(patch_ids.device)
x_sample = feat_reshape[:, patch_id, :].flatten(0, 1) # reshape(-1, x.shape[1])
attn_qs = torch.zeros(1).to(feat.device)
else:
feat_local = F.unfold(feat, kernel_size=k_s, stride=1, padding=3) # (B, ks*ks*C, L)
L = feat_local.shape[2]
if attn_mats is not None:
patch_id = patch_ids[feat_id]
attn_qs = attn_mats[feat_id]
else:
feat_k = feat_local.permute(0, 2, 1).reshape(B, L, k_s * k_s, C).flatten(0, 1) # (B*L, ks*ks, C)
feat_q = feat_reshape.reshape(B*L, C, 1)
dots_local = torch.bmm(feat_k, feat_q) # (B*L, ks*ks, 1)
attn_local = dots_local.softmax(dim=1)
attn_local = attn_local.reshape(B, L, -1) # (B, L, ks*ks)
prob = -torch.log(attn_local)
prob = torch.where(torch.isinf(prob), torch.full_like(prob, 0), prob)
entropy = torch.sum(torch.mul(attn_local, prob), dim=2)
_, index = torch.sort(entropy)
patch_id = index[:, :num_patches]
attn_qs = attn_local[torch.arange(B)[:, None], patch_id, :] # (B, n_p, ks*ks)
attn_qs = attn_qs.flatten(0, 1).unsqueeze(1) # (B*n_p, 1, ks*ks)
feat_v = feat_local[torch.arange(B)[:, None], :, patch_id].permute(0, 2, 1) # (B, n_p, ks*ks*C)
feat_v = feat_v.flatten(0, 1).view(B*num_patches, k_s*k_s, C)
feat_reshape = torch.bmm(attn_qs, feat_v) # (B*n_p, 1, C)
x_sample = feat_reshape.flatten(0, 1)
else:
x_sample = feat_reshape
patch_id = []
if self.use_mlp:
mlp = getattr(self, 'mlp_%d' % feat_id)
x_sample = mlp(x_sample)
return_ids.append(patch_id)
return_mats.append(attn_qs)
x_sample = self.l2norm(x_sample)
if num_patches == 0:
x_sample = x_sample.permute(0, 2, 1).reshape([B, x_sample.shape[-1], H, W])
return_feats.append(x_sample)
return return_feats, return_ids, return_mats
class G_Resnet(nn.Module):
def __init__(self, input_nc, output_nc, nz, num_downs, n_res, ngf=64,
norm=None, nl_layer=None):
super(G_Resnet, self).__init__()
n_downsample = num_downs
pad_type = 'reflect'
self.enc_content = ContentEncoder(n_downsample, n_res, input_nc, ngf, norm, nl_layer, pad_type=pad_type)
if nz == 0:
self.dec = Decoder(n_downsample, n_res, self.enc_content.output_dim, output_nc, norm=norm, activ=nl_layer, pad_type=pad_type, nz=nz)
else:
self.dec = Decoder_all(n_downsample, n_res, self.enc_content.output_dim, output_nc, norm=norm, activ=nl_layer, pad_type=pad_type, nz=nz)
def decode(self, content, style=None):
return self.dec(content, style)
def forward(self, image, style=None, nce_layers=[], encode_only=False):
content, feats = self.enc_content(image, nce_layers=nce_layers, encode_only=encode_only)
if encode_only:
return feats
else:
images_recon = self.decode(content, style)
if len(nce_layers) > 0:
return images_recon, feats
else:
return images_recon
##################################################################################
# Encoder and Decoders
##################################################################################
class E_adaIN(nn.Module):
def __init__(self, input_nc, output_nc=1, nef=64, n_layers=4,
norm=None, nl_layer=None, vae=False):
# style encoder
super(E_adaIN, self).__init__()
self.enc_style = StyleEncoder(n_layers, input_nc, nef, output_nc, norm='none', activ='relu', vae=vae)
def forward(self, image):
style = self.enc_style(image)
return style
class StyleEncoder(nn.Module):
def __init__(self, n_downsample, input_dim, dim, style_dim, norm, activ, vae=False):
super(StyleEncoder, self).__init__()
self.vae = vae
self.model = []
self.model += [Conv2dBlock(input_dim, dim, 7, 1, 3, norm=norm, activation=activ, pad_type='reflect')]
for i in range(2):
self.model += [Conv2dBlock(dim, 2 * dim, 4, 2, 1, norm=norm, activation=activ, pad_type='reflect')]
dim *= 2
for i in range(n_downsample - 2):
self.model += [Conv2dBlock(dim, dim, 4, 2, 1, norm=norm, activation=activ, pad_type='reflect')]
self.model += [nn.AdaptiveAvgPool2d(1)] # global average pooling
if self.vae:
self.fc_mean = nn.Linear(dim, style_dim) # , 1, 1, 0)
self.fc_var = nn.Linear(dim, style_dim) # , 1, 1, 0)
else:
self.model += [nn.Conv2d(dim, style_dim, 1, 1, 0)]
self.model = nn.Sequential(*self.model)
self.output_dim = dim
def forward(self, x):
if self.vae:
output = self.model(x)
output = output.view(x.size(0), -1)
output_mean = self.fc_mean(output)
output_var = self.fc_var(output)
return output_mean, output_var
else:
return self.model(x).view(x.size(0), -1)
class ContentEncoder(nn.Module):
def __init__(self, n_downsample, n_res, input_dim, dim, norm, activ, pad_type='zero'):
super(ContentEncoder, self).__init__()
self.model = []
self.model += [Conv2dBlock(input_dim, dim, 7, 1, 3, norm=norm, activation=activ, pad_type='reflect')]
# downsampling blocks
for i in range(n_downsample):
self.model += [Conv2dBlock(dim, 2 * dim, 4, 2, 1, norm=norm, activation=activ, pad_type='reflect')]
dim *= 2
# residual blocks
self.model += [ResBlocks(n_res, dim, norm=norm, activation=activ, pad_type=pad_type)]
self.model = nn.Sequential(*self.model)
self.output_dim = dim
def forward(self, x, nce_layers=[], encode_only=False):
if len(nce_layers) > 0:
feat = x
feats = []
for layer_id, layer in enumerate(self.model):
feat = layer(feat)
if layer_id in nce_layers:
feats.append(feat)
if layer_id == nce_layers[-1] and encode_only:
return None, feats
return feat, feats
else:
return self.model(x), None
for layer_id, layer in enumerate(self.model):
print(layer_id, layer)
class Decoder_all(nn.Module):
def __init__(self, n_upsample, n_res, dim, output_dim, norm='batch', activ='relu', pad_type='zero', nz=0):
super(Decoder_all, self).__init__()
# AdaIN residual blocks
self.resnet_block = ResBlocks(n_res, dim, norm, activ, pad_type=pad_type, nz=nz)
self.n_blocks = 0
# upsampling blocks
for i in range(n_upsample):
block = [Upsample2(scale_factor=2), Conv2dBlock(dim + nz, dim // 2, 5, 1, 2, norm='ln', activation=activ, pad_type='reflect')]
setattr(self, 'block_{:d}'.format(self.n_blocks), nn.Sequential(*block))
self.n_blocks += 1
dim //= 2
# use reflection padding in the last conv layer
setattr(self, 'block_{:d}'.format(self.n_blocks), Conv2dBlock(dim + nz, output_dim, 7, 1, 3, norm='none', activation='tanh', pad_type='reflect'))
self.n_blocks += 1
def forward(self, x, y=None):
if y is not None:
output = self.resnet_block(cat_feature(x, y))
for n in range(self.n_blocks):
block = getattr(self, 'block_{:d}'.format(n))
if n > 0:
output = block(cat_feature(output, y))
else:
output = block(output)
return output
class Decoder(nn.Module):
def __init__(self, n_upsample, n_res, dim, output_dim, norm='batch', activ='relu', pad_type='zero', nz=0):
super(Decoder, self).__init__()
self.model = []
# AdaIN residual blocks
self.model += [ResBlocks(n_res, dim, norm, activ, pad_type=pad_type, nz=nz)]
# upsampling blocks
for i in range(n_upsample):
if i == 0:
input_dim = dim + nz
else:
input_dim = dim
self.model += [Upsample2(scale_factor=2), Conv2dBlock(input_dim, dim // 2, 5, 1, 2, norm='ln', activation=activ, pad_type='reflect')]
dim //= 2
# use reflection padding in the last conv layer
self.model += [Conv2dBlock(dim, output_dim, 7, 1, 3, norm='none', activation='tanh', pad_type='reflect')]
self.model = nn.Sequential(*self.model)
def forward(self, x, y=None):
if y is not None:
return self.model(cat_feature(x, y))
else:
return self.model(x)
##################################################################################
# Sequential Models
##################################################################################
class ResBlocks(nn.Module):
def __init__(self, num_blocks, dim, norm='inst', activation='relu', pad_type='zero', nz=0):
super(ResBlocks, self).__init__()
self.model = []
for i in range(num_blocks):
self.model += [ResBlock(dim, norm=norm, activation=activation, pad_type=pad_type, nz=nz)]
self.model = nn.Sequential(*self.model)
def forward(self, x):
return self.model(x)
##################################################################################
# Basic Blocks
##################################################################################
def cat_feature(x, y):
y_expand = y.view(y.size(0), y.size(1), 1, 1).expand(
y.size(0), y.size(1), x.size(2), x.size(3))
x_cat = torch.cat([x, y_expand], 1)
return x_cat
class ResBlock(nn.Module):
def __init__(self, dim, norm='inst', activation='relu', pad_type='zero', nz=0):
super(ResBlock, self).__init__()
model = []
model += [Conv2dBlock(dim + nz, dim, 3, 1, 1, norm=norm, activation=activation, pad_type=pad_type)]
model += [Conv2dBlock(dim, dim + nz, 3, 1, 1, norm=norm, activation='none', pad_type=pad_type)]
self.model = nn.Sequential(*model)
def forward(self, x):
residual = x
out = self.model(x)
out += residual
return out
class Conv2dBlock(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size, stride,
padding=0, norm='none', activation='relu', pad_type='zero'):
super(Conv2dBlock, self).__init__()
self.use_bias = True
# initialize padding
if pad_type == 'reflect':
self.pad = nn.ReflectionPad2d(padding)
elif pad_type == 'zero':
self.pad = nn.ZeroPad2d(padding)
else:
assert 0, "Unsupported padding type: {}".format(pad_type)
# initialize normalization
norm_dim = output_dim
if norm == 'batch':
self.norm = nn.BatchNorm2d(norm_dim)
elif norm == 'inst':
self.norm = nn.InstanceNorm2d(norm_dim, track_running_stats=False)
elif norm == 'ln':
self.norm = LayerNorm(norm_dim)
elif norm == 'none':
self.norm = None
else:
assert 0, "Unsupported normalization: {}".format(norm)
# initialize activation
if activation == 'relu':
self.activation = nn.ReLU(inplace=True)
elif activation == 'lrelu':
self.activation = nn.LeakyReLU(0.2, inplace=True)
elif activation == 'prelu':
self.activation = nn.PReLU()
elif activation == 'selu':
self.activation = nn.SELU(inplace=True)
elif activation == 'tanh':
self.activation = nn.Tanh()
elif activation == 'none':
self.activation = None
else:
assert 0, "Unsupported activation: {}".format(activation)
# initialize convolution
self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride, bias=self.use_bias)
def forward(self, x):
x = self.conv(self.pad(x))
if self.norm:
x = self.norm(x)
if self.activation:
x = self.activation(x)
return x
class LinearBlock(nn.Module):
def __init__(self, input_dim, output_dim, norm='none', activation='relu'):
super(LinearBlock, self).__init__()
use_bias = True
# initialize fully connected layer
self.fc = nn.Linear(input_dim, output_dim, bias=use_bias)
# initialize normalization
norm_dim = output_dim
if norm == 'batch':
self.norm = nn.BatchNorm1d(norm_dim)
elif norm == 'inst':
self.norm = nn.InstanceNorm1d(norm_dim)
elif norm == 'ln':
self.norm = LayerNorm(norm_dim)
elif norm == 'none':
self.norm = None
else:
assert 0, "Unsupported normalization: {}".format(norm)
# initialize activation
if activation == 'relu':
self.activation = nn.ReLU(inplace=True)
elif activation == 'lrelu':
self.activation = nn.LeakyReLU(0.2, inplace=True)
elif activation == 'prelu':
self.activation = nn.PReLU()
elif activation == 'selu':
self.activation = nn.SELU(inplace=True)
elif activation == 'tanh':
self.activation = nn.Tanh()
elif activation == 'none':
self.activation = None
else:
assert 0, "Unsupported activation: {}".format(activation)
def forward(self, x):
out = self.fc(x)
if self.norm:
out = self.norm(out)
if self.activation:
out = self.activation(out)
return out
##################################################################################
# Normalization layers
##################################################################################
class LayerNorm(nn.Module):
def __init__(self, num_features, eps=1e-5, affine=True):
super(LayerNorm, self).__init__()
self.num_features = num_features
self.affine = affine
self.eps = eps
if self.affine:
self.gamma = nn.Parameter(torch.Tensor(num_features).uniform_())
self.beta = nn.Parameter(torch.zeros(num_features))
def forward(self, x):
shape = [-1] + [1] * (x.dim() - 1)
mean = x.view(x.size(0), -1).mean(1).view(*shape)
std = x.view(x.size(0), -1).std(1).view(*shape)
x = (x - mean) / (std + self.eps)
if self.affine:
shape = [1, -1] + [1] * (x.dim() - 2)
x = x * self.gamma.view(*shape) + self.beta.view(*shape)
return x
class ResnetGenerator(nn.Module):
"""Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style)
"""
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect', no_antialias=False, no_antialias_up=False, opt=None):
"""Construct a Resnet-based generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers
n_blocks (int) -- the number of ResNet blocks
padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
"""
assert(n_blocks >= 0)
super(ResnetGenerator, self).__init__()
self.opt = opt
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling): # add downsampling layers
mult = 2 ** i
if(no_antialias):
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
else:
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=1, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True),
Downsample(ngf * mult * 2)]
mult = 2 ** n_downsampling
for i in range(n_blocks): # add ResNet blocks
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
for i in range(n_downsampling): # add upsampling layers
mult = 2 ** (n_downsampling - i)
if no_antialias_up:
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
else:
model += [Upsample(ngf * mult),
nn.Conv2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=1,
padding=1, # output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input, layers=[], encode_only=False):
if -1 in layers:
layers.append(len(self.model))
if len(layers) > 0:
feat = input
feats = []
for layer_id, layer in enumerate(self.model):
# print(layer_id, layer)
feat = layer(feat)
if layer_id in layers:
# print("%d: adding the output of %s %d" % (layer_id, layer.__class__.__name__, feat.size(1)))
feats.append(feat)
else:
# print("%d: skipping %s %d" % (layer_id, layer.__class__.__name__, feat.size(1)))
pass
if layer_id == layers[-1] and encode_only:
# print('encoder only return features')
return feats # return intermediate features alone; stop in the last layers
return feat, feats # return both output and intermediate features
else:
"""Standard forward"""
fake = self.model(input)
return fake
class ResnetDecoder(nn.Module):
"""Resnet-based decoder that consists of a few Resnet blocks + a few upsampling operations.
"""
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect', no_antialias=False):
"""Construct a Resnet-based decoder
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers
n_blocks (int) -- the number of ResNet blocks
padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
"""
assert(n_blocks >= 0)
super(ResnetDecoder, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = []
n_downsampling = 2
mult = 2 ** n_downsampling
for i in range(n_blocks): # add ResNet blocks
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
for i in range(n_downsampling): # add upsampling layers
mult = 2 ** (n_downsampling - i)
if(no_antialias):
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
else:
model += [Upsample(ngf * mult),
nn.Conv2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=1,
padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input):
"""Standard forward"""
return self.model(input)
class ResnetEncoder(nn.Module):
"""Resnet-based encoder that consists of a few downsampling + several Resnet blocks
"""
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect', no_antialias=False):
"""Construct a Resnet-based encoder
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers
n_blocks (int) -- the number of ResNet blocks
padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
"""
assert(n_blocks >= 0)
super(ResnetEncoder, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling): # add downsampling layers
mult = 2 ** i
if(no_antialias):
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
else:
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=1, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True),
Downsample(ngf * mult * 2)]
mult = 2 ** n_downsampling
for i in range(n_blocks): # add ResNet blocks
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
self.model = nn.Sequential(*model)
def forward(self, input):
"""Standard forward"""
return self.model(input)
class ResnetBlock(nn.Module):
"""Define a Resnet block"""
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Initialize the Resnet block
A resnet block is a conv block with skip connections
We construct a conv block with build_conv_block function,
and implement skip connections in <forward> function.
Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf
"""
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Construct a convolutional block.
Parameters:
dim (int) -- the number of channels in the conv layer.
padding_type (str) -- the name of padding layer: reflect | replicate | zero
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
use_bias (bool) -- if the conv layer uses bias or not
Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
"""
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
"""Forward function (with skip connections)"""
out = x + self.conv_block(x) # add skip connections
return out
class UnetGenerator(nn.Module):
"""Create a Unet-based generator"""
def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):
"""Construct a Unet generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
num_downs (int) -- the number of downsamplings in UNet. For example, # if |num_downs| == 7,
image of size 128x128 will become of size 1x1 # at the bottleneck
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
We construct the U-Net from the innermost layer to the outermost layer.
It is a recursive process.
"""
super(UnetGenerator, self).__init__()
# construct unet structure
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True) # add the innermost layer
for i in range(num_downs - 5): # add intermediate layers with ngf * 8 filters
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
# gradually reduce the number of filters from ngf * 8 to ngf
unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
self.model = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer) # add the outermost layer
def forward(self, input):
"""Standard forward"""
return self.model(input)
class UnetSkipConnectionBlock(nn.Module):
"""Defines the Unet submodule with skip connection.
X -------------------identity----------------------
|-- downsampling -- |submodule| -- upsampling --|
"""
def __init__(self, outer_nc, inner_nc, input_nc=None,
submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
"""Construct a Unet submodule with skip connections.
Parameters:
outer_nc (int) -- the number of filters in the outer conv layer
inner_nc (int) -- the number of filters in the inner conv layer
input_nc (int) -- the number of channels in input images/features
submodule (UnetSkipConnectionBlock) -- previously defined submodules
outermost (bool) -- if this module is the outermost module
innermost (bool) -- if this module is the innermost module
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
"""
super(UnetSkipConnectionBlock, self).__init__()
self.outermost = outermost
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
if input_nc is None:
input_nc = outer_nc
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,
stride=2, padding=1, bias=use_bias)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc)
uprelu = nn.ReLU(True)
upnorm = norm_layer(outer_nc)
if outermost:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downconv]
up = [uprelu, upconv, nn.Tanh()]
model = down + [submodule] + up
elif innermost:
upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv]
up = [uprelu, upconv, upnorm]
model = down + up
else:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv, downnorm]
up = [uprelu, upconv, upnorm]
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else: # add skip connections
return torch.cat([x, self.model(x)], 1)
class NLayerDiscriminator(nn.Module):
"""Defines a PatchGAN discriminator"""
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, no_antialias=False):
"""Construct a PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
n_layers (int) -- the number of conv layers in the discriminator
norm_layer -- normalization layer
"""
super(NLayerDiscriminator, self).__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
kw = 4
padw = 1
if(no_antialias):
sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
else:
sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=1, padding=padw), nn.LeakyReLU(0.2, True), Downsample(ndf)]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers): # gradually increase the number of filters
nf_mult_prev = nf_mult
nf_mult = min(2 ** n, 8)
if(no_antialias):
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
else:
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True),
Downsample(ndf * nf_mult)]
nf_mult_prev = nf_mult
nf_mult = min(2 ** n_layers, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map
self.model = nn.Sequential(*sequence)
def forward(self, input):
"""Standard forward."""
return self.model(input)
class PixelDiscriminator(nn.Module):
"""Defines a 1x1 PatchGAN discriminator (pixelGAN)"""
def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d):
"""Construct a 1x1 PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
"""
super(PixelDiscriminator, self).__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
self.net = [
nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias),
norm_layer(ndf * 2),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)]
self.net = nn.Sequential(*self.net)
def forward(self, input):
"""Standard forward."""
return self.net(input)
class PatchDiscriminator(NLayerDiscriminator):
"""Defines a PatchGAN discriminator"""
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, no_antialias=False):
super().__init__(input_nc, ndf, 2, norm_layer, no_antialias)
def forward(self, input):
B, C, H, W = input.size(0), input.size(1), input.size(2), input.size(3)
size = 16
Y = H // size
X = W // size
input = input.view(B, C, Y, size, X, size)
input = input.permute(0, 2, 4, 1, 3, 5).contiguous().view(B * Y * X, C, size, size)
return super().forward(input)
class GroupedChannelNorm(nn.Module):
def __init__(self, num_groups):
super().__init__()
self.num_groups = num_groups
def forward(self, x):
shape = list(x.shape)
new_shape = [shape[0], self.num_groups, shape[1] // self.num_groups] + shape[2:]
x = x.view(*new_shape)
mean = x.mean(dim=2, keepdim=True)
std = x.std(dim=2, keepdim=True)
x_norm = (x - mean) / (std + 1e-7)
return x_norm.view(*shape)
| 61,828 | 42.480309 | 187 | py |
query-selected-attention | query-selected-attention-main/models/networks_global.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
import functools
from torch.optim import lr_scheduler
import numpy as np
###############################################################################
# Helper Functions
###############################################################################
def get_filter(filt_size=3):
if(filt_size == 1):
a = np.array([1., ])
elif(filt_size == 2):
a = np.array([1., 1.])
elif(filt_size == 3):
a = np.array([1., 2., 1.])
elif(filt_size == 4):
a = np.array([1., 3., 3., 1.])
elif(filt_size == 5):
a = np.array([1., 4., 6., 4., 1.])
elif(filt_size == 6):
a = np.array([1., 5., 10., 10., 5., 1.])
elif(filt_size == 7):
a = np.array([1., 6., 15., 20., 15., 6., 1.])
filt = torch.Tensor(a[:, None] * a[None, :])
filt = filt / torch.sum(filt)
return filt
class Downsample(nn.Module):
def __init__(self, channels, pad_type='reflect', filt_size=3, stride=2, pad_off=0):
super(Downsample, self).__init__()
self.filt_size = filt_size
self.pad_off = pad_off
self.pad_sizes = [int(1. * (filt_size - 1) / 2), int(np.ceil(1. * (filt_size - 1) / 2)), int(1. * (filt_size - 1) / 2), int(np.ceil(1. * (filt_size - 1) / 2))]
self.pad_sizes = [pad_size + pad_off for pad_size in self.pad_sizes]
self.stride = stride
self.off = int((self.stride - 1) / 2.)
self.channels = channels
filt = get_filter(filt_size=self.filt_size)
self.register_buffer('filt', filt[None, None, :, :].repeat((self.channels, 1, 1, 1)))
self.pad = get_pad_layer(pad_type)(self.pad_sizes)
def forward(self, inp):
if(self.filt_size == 1):
if(self.pad_off == 0):
return inp[:, :, ::self.stride, ::self.stride]
else:
return self.pad(inp)[:, :, ::self.stride, ::self.stride]
else:
return F.conv2d(self.pad(inp), self.filt, stride=self.stride, groups=inp.shape[1])
class Upsample2(nn.Module):
def __init__(self, scale_factor, mode='nearest'):
super().__init__()
self.factor = scale_factor
self.mode = mode
def forward(self, x):
return torch.nn.functional.interpolate(x, scale_factor=self.factor, mode=self.mode)
class Upsample(nn.Module):
def __init__(self, channels, pad_type='repl', filt_size=4, stride=2):
super(Upsample, self).__init__()
self.filt_size = filt_size
self.filt_odd = np.mod(filt_size, 2) == 1
self.pad_size = int((filt_size - 1) / 2)
self.stride = stride
self.off = int((self.stride - 1) / 2.)
self.channels = channels
filt = get_filter(filt_size=self.filt_size) * (stride**2)
self.register_buffer('filt', filt[None, None, :, :].repeat((self.channels, 1, 1, 1)))
self.pad = get_pad_layer(pad_type)([1, 1, 1, 1])
def forward(self, inp):
ret_val = F.conv_transpose2d(self.pad(inp), self.filt, stride=self.stride, padding=1 + self.pad_size, groups=inp.shape[1])[:, :, 1:, 1:]
if(self.filt_odd):
return ret_val
else:
return ret_val[:, :, :-1, :-1]
def get_pad_layer(pad_type):
if(pad_type in ['refl', 'reflect']):
PadLayer = nn.ReflectionPad2d
elif(pad_type in ['repl', 'replicate']):
PadLayer = nn.ReplicationPad2d
elif(pad_type == 'zero'):
PadLayer = nn.ZeroPad2d
else:
print('Pad type [%s] not recognized' % pad_type)
return PadLayer
class Identity(nn.Module):
def forward(self, x):
return x
def get_norm_layer(norm_type='instance'):
"""Return a normalization layer
Parameters:
norm_type (str) -- the name of the normalization layer: batch | instance | none
For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).
For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.
"""
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
elif norm_type == 'none':
def norm_layer(x):
return Identity()
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
def get_scheduler(optimizer, opt):
"""Return a learning rate scheduler
Parameters:
optimizer -- the optimizer of the network
opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions.
opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine
For 'linear', we keep the same learning rate for the first <opt.n_epochs> epochs
and linearly decay the rate to zero over the next <opt.n_epochs_decay> epochs.
For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers.
See https://pytorch.org/docs/stable/optim.html for more details.
"""
if opt.lr_policy == 'linear':
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.n_epochs) / float(opt.n_epochs_decay + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif opt.lr_policy == 'plateau':
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
elif opt.lr_policy == 'cosine':
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.n_epochs, eta_min=0)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler
def init_weights(net, init_type='normal', init_gain=0.02, debug=False):
"""Initialize network weights.
Parameters:
net (network) -- network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might
work better for some applications. Feel free to try yourself.
"""
def init_func(m): # define the initialization function
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if debug:
print(classname)
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, init_gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=init_gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=init_gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.
init.normal_(m.weight.data, 1.0, init_gain)
init.constant_(m.bias.data, 0.0)
net.apply(init_func) # apply the initialization function <init_func>
def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[], debug=False, initialize_weights=True):
"""Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights
Parameters:
net (network) -- the network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Return an initialized network.
"""
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
net.to(gpu_ids[0])
# if not amp:
# net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs for non-AMP training
if initialize_weights:
init_weights(net, init_type, init_gain=init_gain, debug=debug)
return net
def define_G(input_nc, output_nc, ngf, netG, norm='batch', use_dropout=False, init_type='normal',
init_gain=0.02, no_antialias=False, no_antialias_up=False, gpu_ids=[], opt=None):
"""Create a generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
netG (str) -- the architecture's name: resnet_9blocks | resnet_6blocks | unet_256 | unet_128
norm (str) -- the name of normalization layers used in the network: batch | instance | none
use_dropout (bool) -- if use dropout layers.
init_type (str) -- the name of our initialization method.
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Returns a generator
Our current implementation provides two types of generators:
U-Net: [unet_128] (for 128x128 input images) and [unet_256] (for 256x256 input images)
The original U-Net paper: https://arxiv.org/abs/1505.04597
Resnet-based generator: [resnet_6blocks] (with 6 Resnet blocks) and [resnet_9blocks] (with 9 Resnet blocks)
Resnet-based generator consists of several Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code from Justin Johnson's neural style transfer project (https://github.com/jcjohnson/fast-neural-style).
The generator has been initialized by <init_net>. It uses RELU for non-linearity.
"""
net = None
norm_layer = get_norm_layer(norm_type=norm)
if netG == 'resnet_9blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, no_antialias=no_antialias, no_antialias_up=no_antialias_up, n_blocks=9, opt=opt)
elif netG == 'resnet_6blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, no_antialias=no_antialias, no_antialias_up=no_antialias_up, n_blocks=6, opt=opt)
elif netG == 'resnet_4blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, no_antialias=no_antialias, no_antialias_up=no_antialias_up, n_blocks=4, opt=opt)
elif netG == 'unet_128':
net = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
elif netG == 'unet_256':
net = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
elif netG == 'resnet_cat':
n_blocks = 8
net = G_Resnet(input_nc, output_nc, opt.nz, num_downs=2, n_res=n_blocks - 4, ngf=ngf, norm='inst', nl_layer='relu')
else:
raise NotImplementedError('Generator model name [%s] is not recognized' % netG)
return init_net(net, init_type, init_gain, gpu_ids, initialize_weights=('stylegan2' not in netG))
def define_F(input_nc, netF, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, no_antialias=False, gpu_ids=[], opt=None):
if netF == 'global_pool':
net = PoolingF()
elif netF == 'reshape':
net = ReshapeF()
elif netF == 'sample':
net = PatchSampleF(use_mlp=False, init_type=init_type, init_gain=init_gain, gpu_ids=gpu_ids, nc=opt.netF_nc)
elif netF == 'mlp_sample':
net = PatchSampleF(use_mlp=True, init_type=init_type, init_gain=init_gain, gpu_ids=gpu_ids, nc=opt.netF_nc)
elif netF == 'strided_conv':
net = StridedConvF(init_type=init_type, init_gain=init_gain, gpu_ids=gpu_ids)
else:
raise NotImplementedError('projection model name [%s] is not recognized' % netF)
return init_net(net, init_type, init_gain, gpu_ids)
def define_D(input_nc, ndf, netD, n_layers_D=3, norm='batch', init_type='normal', init_gain=0.02, no_antialias=False, gpu_ids=[], opt=None):
"""Create a discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the first conv layer
netD (str) -- the architecture's name: basic | n_layers | pixel
n_layers_D (int) -- the number of conv layers in the discriminator; effective when netD=='n_layers'
norm (str) -- the type of normalization layers used in the network.
init_type (str) -- the name of the initialization method.
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Returns a discriminator
Our current implementation provides three types of discriminators:
[basic]: 'PatchGAN' classifier described in the original pix2pix paper.
It can classify whether 70×70 overlapping patches are real or fake.
Such a patch-level discriminator architecture has fewer parameters
than a full-image discriminator and can work on arbitrarily-sized images
in a fully convolutional fashion.
[n_layers]: With this mode, you cna specify the number of conv layers in the discriminator
with the parameter <n_layers_D> (default=3 as used in [basic] (PatchGAN).)
[pixel]: 1x1 PixelGAN discriminator can classify whether a pixel is real or not.
It encourages greater color diversity but has no effect on spatial statistics.
The discriminator has been initialized by <init_net>. It uses Leaky RELU for non-linearity.
"""
net = None
norm_layer = get_norm_layer(norm_type=norm)
if netD == 'basic': # default PatchGAN classifier
net = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer, no_antialias=no_antialias,)
elif netD == 'n_layers': # more options
net = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer, no_antialias=no_antialias,)
elif netD == 'pixel': # classify if each pixel is real or fake
net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer)
elif netD == "patch":
net = PatchDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer, no_antialias=no_antialias)
else:
raise NotImplementedError('Discriminator model name [%s] is not recognized' % netD)
return init_net(net, init_type, init_gain, gpu_ids,
initialize_weights=('stylegan2' not in netD))
##############################################################################
# Classes
##############################################################################
class GANLoss(nn.Module):
"""Define different GAN objectives.
The GANLoss class abstracts away the need to create the target label tensor
that has the same size as the input.
"""
def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0):
""" Initialize the GANLoss class.
Parameters:
gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.
target_real_label (bool) - - label for a real image
target_fake_label (bool) - - label of a fake image
Note: Do not use sigmoid as the last layer of Discriminator.
LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.
"""
super(GANLoss, self).__init__()
self.register_buffer('real_label', torch.tensor(target_real_label))
self.register_buffer('fake_label', torch.tensor(target_fake_label))
self.gan_mode = gan_mode
if gan_mode == 'lsgan':
self.loss = nn.MSELoss()
elif gan_mode == 'vanilla':
self.loss = nn.BCEWithLogitsLoss()
elif gan_mode in ['wgangp', 'nonsaturating']:
self.loss = None
else:
raise NotImplementedError('gan mode %s not implemented' % gan_mode)
def get_target_tensor(self, prediction, target_is_real):
"""Create label tensors with the same size as the input.
Parameters:
prediction (tensor) - - tpyically the prediction from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
A label tensor filled with ground truth label, and with the size of the input
"""
if target_is_real:
target_tensor = self.real_label
else:
target_tensor = self.fake_label
return target_tensor.expand_as(prediction)
def __call__(self, prediction, target_is_real):
"""Calculate loss given Discriminator's output and grount truth labels.
Parameters:
prediction (tensor) - - tpyically the prediction output from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
the calculated loss.
"""
bs = prediction.size(0)
if self.gan_mode in ['lsgan', 'vanilla']:
target_tensor = self.get_target_tensor(prediction, target_is_real)
loss = self.loss(prediction, target_tensor)
elif self.gan_mode == 'wgangp':
if target_is_real:
loss = -prediction.mean()
else:
loss = prediction.mean()
elif self.gan_mode == 'nonsaturating':
if target_is_real:
loss = F.softplus(-prediction).view(bs, -1).mean(dim=1)
else:
loss = F.softplus(prediction).view(bs, -1).mean(dim=1)
return loss
def cal_gradient_penalty(netD, real_data, fake_data, device, type='mixed', constant=1.0, lambda_gp=10.0):
"""Calculate the gradient penalty loss, used in WGAN-GP paper https://arxiv.org/abs/1704.00028
Arguments:
netD (network) -- discriminator network
real_data (tensor array) -- real images
fake_data (tensor array) -- generated images from the generator
device (str) -- GPU / CPU: from torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')
type (str) -- if we mix real and fake data or not [real | fake | mixed].
constant (float) -- the constant used in formula ( | |gradient||_2 - constant)^2
lambda_gp (float) -- weight for this loss
Returns the gradient penalty loss
"""
if lambda_gp > 0.0:
if type == 'real': # either use real images, fake images, or a linear interpolation of two.
interpolatesv = real_data
elif type == 'fake':
interpolatesv = fake_data
elif type == 'mixed':
alpha = torch.rand(real_data.shape[0], 1, device=device)
alpha = alpha.expand(real_data.shape[0], real_data.nelement() // real_data.shape[0]).contiguous().view(*real_data.shape)
interpolatesv = alpha * real_data + ((1 - alpha) * fake_data)
else:
raise NotImplementedError('{} not implemented'.format(type))
interpolatesv.requires_grad_(True)
disc_interpolates = netD(interpolatesv)
gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolatesv,
grad_outputs=torch.ones(disc_interpolates.size()).to(device),
create_graph=True, retain_graph=True, only_inputs=True)
gradients = gradients[0].view(real_data.size(0), -1) # flat the data
gradient_penalty = (((gradients + 1e-16).norm(2, dim=1) - constant) ** 2).mean() * lambda_gp # added eps
return gradient_penalty, gradients
else:
return 0.0, None
class Normalize(nn.Module):
def __init__(self, power=2):
super(Normalize, self).__init__()
self.power = power
def forward(self, x):
norm = x.pow(self.power).sum(1, keepdim=True).pow(1. / self.power)
out = x.div(norm + 1e-7)
return out
class PoolingF(nn.Module):
def __init__(self):
super(PoolingF, self).__init__()
model = [nn.AdaptiveMaxPool2d(1)]
self.model = nn.Sequential(*model)
self.l2norm = Normalize(2)
def forward(self, x):
return self.l2norm(self.model(x))
class ReshapeF(nn.Module):
def __init__(self):
super(ReshapeF, self).__init__()
model = [nn.AdaptiveAvgPool2d(4)]
self.model = nn.Sequential(*model)
self.l2norm = Normalize(2)
def forward(self, x):
x = self.model(x)
x_reshape = x.permute(0, 2, 3, 1).flatten(0, 2)
return self.l2norm(x_reshape)
class StridedConvF(nn.Module):
def __init__(self, init_type='normal', init_gain=0.02, gpu_ids=[]):
super().__init__()
# self.conv1 = nn.Conv2d(256, 128, 3, stride=2)
# self.conv2 = nn.Conv2d(128, 64, 3, stride=1)
self.l2_norm = Normalize(2)
self.mlps = {}
self.moving_averages = {}
self.init_type = init_type
self.init_gain = init_gain
self.gpu_ids = gpu_ids
def create_mlp(self, x):
C, H = x.shape[1], x.shape[2]
n_down = int(np.rint(np.log2(H / 32)))
mlp = []
for i in range(n_down):
mlp.append(nn.Conv2d(C, max(C // 2, 64), 3, stride=2))
mlp.append(nn.ReLU())
C = max(C // 2, 64)
mlp.append(nn.Conv2d(C, 64, 3))
mlp = nn.Sequential(*mlp)
init_net(mlp, self.init_type, self.init_gain, self.gpu_ids)
return mlp
def update_moving_average(self, key, x):
if key not in self.moving_averages:
self.moving_averages[key] = x.detach()
self.moving_averages[key] = self.moving_averages[key] * 0.999 + x.detach() * 0.001
def forward(self, x, use_instance_norm=False):
C, H = x.shape[1], x.shape[2]
key = '%d_%d' % (C, H)
if key not in self.mlps:
self.mlps[key] = self.create_mlp(x)
self.add_module("child_%s" % key, self.mlps[key])
mlp = self.mlps[key]
x = mlp(x)
self.update_moving_average(key, x)
x = x - self.moving_averages[key]
if use_instance_norm:
x = F.instance_norm(x)
return self.l2_norm(x)
class PatchSampleF(nn.Module):
def __init__(self, use_mlp=False, init_type='normal', init_gain=0.02, nc=256, gpu_ids=[]):
# potential issues: currently, we use the same patch_ids for multiple images in the batch
super(PatchSampleF, self).__init__()
self.l2norm = Normalize(2)
self.use_mlp = use_mlp
self.nc = nc # hard-coded
self.mlp_init = False
self.init_type = init_type
self.init_gain = init_gain
self.gpu_ids = gpu_ids
def create_mlp(self, feats):
for mlp_id, feat in enumerate(feats):
input_nc = feat.shape[1]
mlp = nn.Sequential(*[nn.Linear(input_nc, self.nc), nn.ReLU(), nn.Linear(self.nc, self.nc)])
mlp.cuda()
setattr(self, 'mlp_%d' % mlp_id, mlp)
init_net(self, self.init_type, self.init_gain, self.gpu_ids)
self.mlp_init = True
def forward(self, feats, num_patches=64, patch_ids=None, attn_mats=None):
return_ids = []
return_feats = []
return_mats = []
if self.use_mlp and not self.mlp_init:
self.create_mlp(feats)
for feat_id, feat in enumerate(feats):
B, C, H, W = feat.shape[0], feat.shape[1], feat.shape[2], feat.shape[3]
feat_reshape = feat.permute(0, 2, 3, 1).flatten(1, 2) # B*HW*C
if num_patches > 0:
if feat_id < 3:
if patch_ids is not None:
patch_id = patch_ids[feat_id]
else:
patch_id = torch.randperm(feat_reshape.shape[1], device=feats[0].device) # random id in [0, HW]
patch_id = patch_id[:int(min(num_patches, patch_id.shape[0]))] # .to(patch_ids.device)
x_sample = feat_reshape[:, patch_id, :].flatten(0, 1) # reshape(-1, x.shape[1])
attn_qs = torch.zeros(1).to(feat.device)
else:
if attn_mats is not None:
attn_qs = attn_mats[feat_id]
else:
feat_q = feat_reshape
feat_k = feat_reshape.permute(0, 2, 1)
dots = torch.bmm(feat_q, feat_k) # (B, HW, HW)
attn = dots.softmax(dim=2)
prob = -torch.log(attn)
prob = torch.where(torch.isinf(prob), torch.full_like(prob, 0), prob)
entropy = torch.sum(torch.mul(attn, prob), dim=2)
_, index = torch.sort(entropy)
patch_id = index[:, :num_patches]
attn_qs = attn[torch.arange(B)[:, None], patch_id, :]
feat_reshape = torch.bmm(attn_qs, feat_reshape) # (B, n_p, C)
x_sample = feat_reshape.flatten(0, 1)
patch_id = []
else:
x_sample = feat_reshape
patch_id = []
if self.use_mlp:
mlp = getattr(self, 'mlp_%d' % feat_id)
x_sample = mlp(x_sample)
return_ids.append(patch_id)
return_mats.append(attn_qs)
x_sample = self.l2norm(x_sample)
if num_patches == 0:
x_sample = x_sample.permute(0, 2, 1).reshape([B, x_sample.shape[-1], H, W])
return_feats.append(x_sample)
return return_feats, return_ids, return_mats
class G_Resnet(nn.Module):
def __init__(self, input_nc, output_nc, nz, num_downs, n_res, ngf=64,
norm=None, nl_layer=None):
super(G_Resnet, self).__init__()
n_downsample = num_downs
pad_type = 'reflect'
self.enc_content = ContentEncoder(n_downsample, n_res, input_nc, ngf, norm, nl_layer, pad_type=pad_type)
if nz == 0:
self.dec = Decoder(n_downsample, n_res, self.enc_content.output_dim, output_nc, norm=norm, activ=nl_layer, pad_type=pad_type, nz=nz)
else:
self.dec = Decoder_all(n_downsample, n_res, self.enc_content.output_dim, output_nc, norm=norm, activ=nl_layer, pad_type=pad_type, nz=nz)
def decode(self, content, style=None):
return self.dec(content, style)
def forward(self, image, style=None, nce_layers=[], encode_only=False):
content, feats = self.enc_content(image, nce_layers=nce_layers, encode_only=encode_only)
if encode_only:
return feats
else:
images_recon = self.decode(content, style)
if len(nce_layers) > 0:
return images_recon, feats
else:
return images_recon
##################################################################################
# Encoder and Decoders
##################################################################################
class E_adaIN(nn.Module):
def __init__(self, input_nc, output_nc=1, nef=64, n_layers=4,
norm=None, nl_layer=None, vae=False):
# style encoder
super(E_adaIN, self).__init__()
self.enc_style = StyleEncoder(n_layers, input_nc, nef, output_nc, norm='none', activ='relu', vae=vae)
def forward(self, image):
style = self.enc_style(image)
return style
class StyleEncoder(nn.Module):
def __init__(self, n_downsample, input_dim, dim, style_dim, norm, activ, vae=False):
super(StyleEncoder, self).__init__()
self.vae = vae
self.model = []
self.model += [Conv2dBlock(input_dim, dim, 7, 1, 3, norm=norm, activation=activ, pad_type='reflect')]
for i in range(2):
self.model += [Conv2dBlock(dim, 2 * dim, 4, 2, 1, norm=norm, activation=activ, pad_type='reflect')]
dim *= 2
for i in range(n_downsample - 2):
self.model += [Conv2dBlock(dim, dim, 4, 2, 1, norm=norm, activation=activ, pad_type='reflect')]
self.model += [nn.AdaptiveAvgPool2d(1)] # global average pooling
if self.vae:
self.fc_mean = nn.Linear(dim, style_dim) # , 1, 1, 0)
self.fc_var = nn.Linear(dim, style_dim) # , 1, 1, 0)
else:
self.model += [nn.Conv2d(dim, style_dim, 1, 1, 0)]
self.model = nn.Sequential(*self.model)
self.output_dim = dim
def forward(self, x):
if self.vae:
output = self.model(x)
output = output.view(x.size(0), -1)
output_mean = self.fc_mean(output)
output_var = self.fc_var(output)
return output_mean, output_var
else:
return self.model(x).view(x.size(0), -1)
class ContentEncoder(nn.Module):
def __init__(self, n_downsample, n_res, input_dim, dim, norm, activ, pad_type='zero'):
super(ContentEncoder, self).__init__()
self.model = []
self.model += [Conv2dBlock(input_dim, dim, 7, 1, 3, norm=norm, activation=activ, pad_type='reflect')]
# downsampling blocks
for i in range(n_downsample):
self.model += [Conv2dBlock(dim, 2 * dim, 4, 2, 1, norm=norm, activation=activ, pad_type='reflect')]
dim *= 2
# residual blocks
self.model += [ResBlocks(n_res, dim, norm=norm, activation=activ, pad_type=pad_type)]
self.model = nn.Sequential(*self.model)
self.output_dim = dim
def forward(self, x, nce_layers=[], encode_only=False):
if len(nce_layers) > 0:
feat = x
feats = []
for layer_id, layer in enumerate(self.model):
feat = layer(feat)
if layer_id in nce_layers:
feats.append(feat)
if layer_id == nce_layers[-1] and encode_only:
return None, feats
return feat, feats
else:
return self.model(x), None
for layer_id, layer in enumerate(self.model):
print(layer_id, layer)
class Decoder_all(nn.Module):
def __init__(self, n_upsample, n_res, dim, output_dim, norm='batch', activ='relu', pad_type='zero', nz=0):
super(Decoder_all, self).__init__()
# AdaIN residual blocks
self.resnet_block = ResBlocks(n_res, dim, norm, activ, pad_type=pad_type, nz=nz)
self.n_blocks = 0
# upsampling blocks
for i in range(n_upsample):
block = [Upsample2(scale_factor=2), Conv2dBlock(dim + nz, dim // 2, 5, 1, 2, norm='ln', activation=activ, pad_type='reflect')]
setattr(self, 'block_{:d}'.format(self.n_blocks), nn.Sequential(*block))
self.n_blocks += 1
dim //= 2
# use reflection padding in the last conv layer
setattr(self, 'block_{:d}'.format(self.n_blocks), Conv2dBlock(dim + nz, output_dim, 7, 1, 3, norm='none', activation='tanh', pad_type='reflect'))
self.n_blocks += 1
def forward(self, x, y=None):
if y is not None:
output = self.resnet_block(cat_feature(x, y))
for n in range(self.n_blocks):
block = getattr(self, 'block_{:d}'.format(n))
if n > 0:
output = block(cat_feature(output, y))
else:
output = block(output)
return output
class Decoder(nn.Module):
def __init__(self, n_upsample, n_res, dim, output_dim, norm='batch', activ='relu', pad_type='zero', nz=0):
super(Decoder, self).__init__()
self.model = []
# AdaIN residual blocks
self.model += [ResBlocks(n_res, dim, norm, activ, pad_type=pad_type, nz=nz)]
# upsampling blocks
for i in range(n_upsample):
if i == 0:
input_dim = dim + nz
else:
input_dim = dim
self.model += [Upsample2(scale_factor=2), Conv2dBlock(input_dim, dim // 2, 5, 1, 2, norm='ln', activation=activ, pad_type='reflect')]
dim //= 2
# use reflection padding in the last conv layer
self.model += [Conv2dBlock(dim, output_dim, 7, 1, 3, norm='none', activation='tanh', pad_type='reflect')]
self.model = nn.Sequential(*self.model)
def forward(self, x, y=None):
if y is not None:
return self.model(cat_feature(x, y))
else:
return self.model(x)
##################################################################################
# Sequential Models
##################################################################################
class ResBlocks(nn.Module):
def __init__(self, num_blocks, dim, norm='inst', activation='relu', pad_type='zero', nz=0):
super(ResBlocks, self).__init__()
self.model = []
for i in range(num_blocks):
self.model += [ResBlock(dim, norm=norm, activation=activation, pad_type=pad_type, nz=nz)]
self.model = nn.Sequential(*self.model)
def forward(self, x):
return self.model(x)
##################################################################################
# Basic Blocks
##################################################################################
def cat_feature(x, y):
y_expand = y.view(y.size(0), y.size(1), 1, 1).expand(
y.size(0), y.size(1), x.size(2), x.size(3))
x_cat = torch.cat([x, y_expand], 1)
return x_cat
class ResBlock(nn.Module):
def __init__(self, dim, norm='inst', activation='relu', pad_type='zero', nz=0):
super(ResBlock, self).__init__()
model = []
model += [Conv2dBlock(dim + nz, dim, 3, 1, 1, norm=norm, activation=activation, pad_type=pad_type)]
model += [Conv2dBlock(dim, dim + nz, 3, 1, 1, norm=norm, activation='none', pad_type=pad_type)]
self.model = nn.Sequential(*model)
def forward(self, x):
residual = x
out = self.model(x)
out += residual
return out
class Conv2dBlock(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size, stride,
padding=0, norm='none', activation='relu', pad_type='zero'):
super(Conv2dBlock, self).__init__()
self.use_bias = True
# initialize padding
if pad_type == 'reflect':
self.pad = nn.ReflectionPad2d(padding)
elif pad_type == 'zero':
self.pad = nn.ZeroPad2d(padding)
else:
assert 0, "Unsupported padding type: {}".format(pad_type)
# initialize normalization
norm_dim = output_dim
if norm == 'batch':
self.norm = nn.BatchNorm2d(norm_dim)
elif norm == 'inst':
self.norm = nn.InstanceNorm2d(norm_dim, track_running_stats=False)
elif norm == 'ln':
self.norm = LayerNorm(norm_dim)
elif norm == 'none':
self.norm = None
else:
assert 0, "Unsupported normalization: {}".format(norm)
# initialize activation
if activation == 'relu':
self.activation = nn.ReLU(inplace=True)
elif activation == 'lrelu':
self.activation = nn.LeakyReLU(0.2, inplace=True)
elif activation == 'prelu':
self.activation = nn.PReLU()
elif activation == 'selu':
self.activation = nn.SELU(inplace=True)
elif activation == 'tanh':
self.activation = nn.Tanh()
elif activation == 'none':
self.activation = None
else:
assert 0, "Unsupported activation: {}".format(activation)
# initialize convolution
self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride, bias=self.use_bias)
def forward(self, x):
x = self.conv(self.pad(x))
if self.norm:
x = self.norm(x)
if self.activation:
x = self.activation(x)
return x
class LinearBlock(nn.Module):
def __init__(self, input_dim, output_dim, norm='none', activation='relu'):
super(LinearBlock, self).__init__()
use_bias = True
# initialize fully connected layer
self.fc = nn.Linear(input_dim, output_dim, bias=use_bias)
# initialize normalization
norm_dim = output_dim
if norm == 'batch':
self.norm = nn.BatchNorm1d(norm_dim)
elif norm == 'inst':
self.norm = nn.InstanceNorm1d(norm_dim)
elif norm == 'ln':
self.norm = LayerNorm(norm_dim)
elif norm == 'none':
self.norm = None
else:
assert 0, "Unsupported normalization: {}".format(norm)
# initialize activation
if activation == 'relu':
self.activation = nn.ReLU(inplace=True)
elif activation == 'lrelu':
self.activation = nn.LeakyReLU(0.2, inplace=True)
elif activation == 'prelu':
self.activation = nn.PReLU()
elif activation == 'selu':
self.activation = nn.SELU(inplace=True)
elif activation == 'tanh':
self.activation = nn.Tanh()
elif activation == 'none':
self.activation = None
else:
assert 0, "Unsupported activation: {}".format(activation)
def forward(self, x):
out = self.fc(x)
if self.norm:
out = self.norm(out)
if self.activation:
out = self.activation(out)
return out
##################################################################################
# Normalization layers
##################################################################################
class LayerNorm(nn.Module):
def __init__(self, num_features, eps=1e-5, affine=True):
super(LayerNorm, self).__init__()
self.num_features = num_features
self.affine = affine
self.eps = eps
if self.affine:
self.gamma = nn.Parameter(torch.Tensor(num_features).uniform_())
self.beta = nn.Parameter(torch.zeros(num_features))
def forward(self, x):
shape = [-1] + [1] * (x.dim() - 1)
mean = x.view(x.size(0), -1).mean(1).view(*shape)
std = x.view(x.size(0), -1).std(1).view(*shape)
x = (x - mean) / (std + self.eps)
if self.affine:
shape = [1, -1] + [1] * (x.dim() - 2)
x = x * self.gamma.view(*shape) + self.beta.view(*shape)
return x
class ResnetGenerator(nn.Module):
"""Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style)
"""
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect', no_antialias=False, no_antialias_up=False, opt=None):
"""Construct a Resnet-based generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers
n_blocks (int) -- the number of ResNet blocks
padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
"""
assert(n_blocks >= 0)
super(ResnetGenerator, self).__init__()
self.opt = opt
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling): # add downsampling layers
mult = 2 ** i
if(no_antialias):
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
else:
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=1, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True),
Downsample(ngf * mult * 2)]
mult = 2 ** n_downsampling
for i in range(n_blocks): # add ResNet blocks
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
for i in range(n_downsampling): # add upsampling layers
mult = 2 ** (n_downsampling - i)
if no_antialias_up:
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
else:
model += [Upsample(ngf * mult),
nn.Conv2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=1,
padding=1, # output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input, layers=[], encode_only=False):
if -1 in layers:
layers.append(len(self.model))
if len(layers) > 0:
feat = input
feats = []
for layer_id, layer in enumerate(self.model):
# print(layer_id, layer)
feat = layer(feat)
if layer_id in layers:
# print("%d: adding the output of %s %d" % (layer_id, layer.__class__.__name__, feat.size(1)))
feats.append(feat)
else:
# print("%d: skipping %s %d" % (layer_id, layer.__class__.__name__, feat.size(1)))
pass
if layer_id == layers[-1] and encode_only:
# print('encoder only return features')
return feats # return intermediate features alone; stop in the last layers
return feat, feats # return both output and intermediate features
else:
"""Standard forward"""
fake = self.model(input)
return fake
class ResnetDecoder(nn.Module):
"""Resnet-based decoder that consists of a few Resnet blocks + a few upsampling operations.
"""
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect', no_antialias=False):
"""Construct a Resnet-based decoder
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers
n_blocks (int) -- the number of ResNet blocks
padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
"""
assert(n_blocks >= 0)
super(ResnetDecoder, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = []
n_downsampling = 2
mult = 2 ** n_downsampling
for i in range(n_blocks): # add ResNet blocks
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
for i in range(n_downsampling): # add upsampling layers
mult = 2 ** (n_downsampling - i)
if(no_antialias):
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
else:
model += [Upsample(ngf * mult),
nn.Conv2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=1,
padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input):
"""Standard forward"""
return self.model(input)
class ResnetEncoder(nn.Module):
"""Resnet-based encoder that consists of a few downsampling + several Resnet blocks
"""
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect', no_antialias=False):
"""Construct a Resnet-based encoder
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers
n_blocks (int) -- the number of ResNet blocks
padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
"""
assert(n_blocks >= 0)
super(ResnetEncoder, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling): # add downsampling layers
mult = 2 ** i
if(no_antialias):
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
else:
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=1, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True),
Downsample(ngf * mult * 2)]
mult = 2 ** n_downsampling
for i in range(n_blocks): # add ResNet blocks
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
self.model = nn.Sequential(*model)
def forward(self, input):
"""Standard forward"""
return self.model(input)
class ResnetBlock(nn.Module):
"""Define a Resnet block"""
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Initialize the Resnet block
A resnet block is a conv block with skip connections
We construct a conv block with build_conv_block function,
and implement skip connections in <forward> function.
Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf
"""
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Construct a convolutional block.
Parameters:
dim (int) -- the number of channels in the conv layer.
padding_type (str) -- the name of padding layer: reflect | replicate | zero
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
use_bias (bool) -- if the conv layer uses bias or not
Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
"""
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
"""Forward function (with skip connections)"""
out = x + self.conv_block(x) # add skip connections
return out
class UnetGenerator(nn.Module):
"""Create a Unet-based generator"""
def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):
"""Construct a Unet generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
num_downs (int) -- the number of downsamplings in UNet. For example, # if |num_downs| == 7,
image of size 128x128 will become of size 1x1 # at the bottleneck
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
We construct the U-Net from the innermost layer to the outermost layer.
It is a recursive process.
"""
super(UnetGenerator, self).__init__()
# construct unet structure
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True) # add the innermost layer
for i in range(num_downs - 5): # add intermediate layers with ngf * 8 filters
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
# gradually reduce the number of filters from ngf * 8 to ngf
unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
self.model = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer) # add the outermost layer
def forward(self, input):
"""Standard forward"""
return self.model(input)
class UnetSkipConnectionBlock(nn.Module):
"""Defines the Unet submodule with skip connection.
X -------------------identity----------------------
|-- downsampling -- |submodule| -- upsampling --|
"""
def __init__(self, outer_nc, inner_nc, input_nc=None,
submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
"""Construct a Unet submodule with skip connections.
Parameters:
outer_nc (int) -- the number of filters in the outer conv layer
inner_nc (int) -- the number of filters in the inner conv layer
input_nc (int) -- the number of channels in input images/features
submodule (UnetSkipConnectionBlock) -- previously defined submodules
outermost (bool) -- if this module is the outermost module
innermost (bool) -- if this module is the innermost module
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
"""
super(UnetSkipConnectionBlock, self).__init__()
self.outermost = outermost
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
if input_nc is None:
input_nc = outer_nc
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,
stride=2, padding=1, bias=use_bias)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc)
uprelu = nn.ReLU(True)
upnorm = norm_layer(outer_nc)
if outermost:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downconv]
up = [uprelu, upconv, nn.Tanh()]
model = down + [submodule] + up
elif innermost:
upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv]
up = [uprelu, upconv, upnorm]
model = down + up
else:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv, downnorm]
up = [uprelu, upconv, upnorm]
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else: # add skip connections
return torch.cat([x, self.model(x)], 1)
class NLayerDiscriminator(nn.Module):
"""Defines a PatchGAN discriminator"""
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, no_antialias=False):
"""Construct a PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
n_layers (int) -- the number of conv layers in the discriminator
norm_layer -- normalization layer
"""
super(NLayerDiscriminator, self).__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
kw = 4
padw = 1
if(no_antialias):
sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
else:
sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=1, padding=padw), nn.LeakyReLU(0.2, True), Downsample(ndf)]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers): # gradually increase the number of filters
nf_mult_prev = nf_mult
nf_mult = min(2 ** n, 8)
if(no_antialias):
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
else:
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True),
Downsample(ndf * nf_mult)]
nf_mult_prev = nf_mult
nf_mult = min(2 ** n_layers, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map
self.model = nn.Sequential(*sequence)
def forward(self, input):
"""Standard forward."""
return self.model(input)
class PixelDiscriminator(nn.Module):
"""Defines a 1x1 PatchGAN discriminator (pixelGAN)"""
def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d):
"""Construct a 1x1 PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
"""
super(PixelDiscriminator, self).__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
self.net = [
nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias),
norm_layer(ndf * 2),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)]
self.net = nn.Sequential(*self.net)
def forward(self, input):
"""Standard forward."""
return self.net(input)
class PatchDiscriminator(NLayerDiscriminator):
"""Defines a PatchGAN discriminator"""
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, no_antialias=False):
super().__init__(input_nc, ndf, 2, norm_layer, no_antialias)
def forward(self, input):
B, C, H, W = input.size(0), input.size(1), input.size(2), input.size(3)
size = 16
Y = H // size
X = W // size
input = input.view(B, C, Y, size, X, size)
input = input.permute(0, 2, 4, 1, 3, 5).contiguous().view(B * Y * X, C, size, size)
return super().forward(input)
class GroupedChannelNorm(nn.Module):
def __init__(self, num_groups):
super().__init__()
self.num_groups = num_groups
def forward(self, x):
shape = list(x.shape)
new_shape = [shape[0], self.num_groups, shape[1] // self.num_groups] + shape[2:]
x = x.view(*new_shape)
mean = x.mean(dim=2, keepdim=True)
std = x.std(dim=2, keepdim=True)
x_norm = (x - mean) / (std + 1e-7)
return x_norm.view(*shape)
| 61,118 | 42.19364 | 187 | py |
query-selected-attention | query-selected-attention-main/models/networks_local_global.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
import functools
from torch.optim import lr_scheduler
import numpy as np
###############################################################################
# Helper Functions
###############################################################################
def get_filter(filt_size=3):
if(filt_size == 1):
a = np.array([1., ])
elif(filt_size == 2):
a = np.array([1., 1.])
elif(filt_size == 3):
a = np.array([1., 2., 1.])
elif(filt_size == 4):
a = np.array([1., 3., 3., 1.])
elif(filt_size == 5):
a = np.array([1., 4., 6., 4., 1.])
elif(filt_size == 6):
a = np.array([1., 5., 10., 10., 5., 1.])
elif(filt_size == 7):
a = np.array([1., 6., 15., 20., 15., 6., 1.])
filt = torch.Tensor(a[:, None] * a[None, :])
filt = filt / torch.sum(filt)
return filt
class Downsample(nn.Module):
def __init__(self, channels, pad_type='reflect', filt_size=3, stride=2, pad_off=0):
super(Downsample, self).__init__()
self.filt_size = filt_size
self.pad_off = pad_off
self.pad_sizes = [int(1. * (filt_size - 1) / 2), int(np.ceil(1. * (filt_size - 1) / 2)), int(1. * (filt_size - 1) / 2), int(np.ceil(1. * (filt_size - 1) / 2))]
self.pad_sizes = [pad_size + pad_off for pad_size in self.pad_sizes]
self.stride = stride
self.off = int((self.stride - 1) / 2.)
self.channels = channels
filt = get_filter(filt_size=self.filt_size)
self.register_buffer('filt', filt[None, None, :, :].repeat((self.channels, 1, 1, 1)))
self.pad = get_pad_layer(pad_type)(self.pad_sizes)
def forward(self, inp):
if(self.filt_size == 1):
if(self.pad_off == 0):
return inp[:, :, ::self.stride, ::self.stride]
else:
return self.pad(inp)[:, :, ::self.stride, ::self.stride]
else:
return F.conv2d(self.pad(inp), self.filt, stride=self.stride, groups=inp.shape[1])
class Upsample2(nn.Module):
def __init__(self, scale_factor, mode='nearest'):
super().__init__()
self.factor = scale_factor
self.mode = mode
def forward(self, x):
return torch.nn.functional.interpolate(x, scale_factor=self.factor, mode=self.mode)
class Upsample(nn.Module):
def __init__(self, channels, pad_type='repl', filt_size=4, stride=2):
super(Upsample, self).__init__()
self.filt_size = filt_size
self.filt_odd = np.mod(filt_size, 2) == 1
self.pad_size = int((filt_size - 1) / 2)
self.stride = stride
self.off = int((self.stride - 1) / 2.)
self.channels = channels
filt = get_filter(filt_size=self.filt_size) * (stride**2)
self.register_buffer('filt', filt[None, None, :, :].repeat((self.channels, 1, 1, 1)))
self.pad = get_pad_layer(pad_type)([1, 1, 1, 1])
def forward(self, inp):
ret_val = F.conv_transpose2d(self.pad(inp), self.filt, stride=self.stride, padding=1 + self.pad_size, groups=inp.shape[1])[:, :, 1:, 1:]
if(self.filt_odd):
return ret_val
else:
return ret_val[:, :, :-1, :-1]
def get_pad_layer(pad_type):
if(pad_type in ['refl', 'reflect']):
PadLayer = nn.ReflectionPad2d
elif(pad_type in ['repl', 'replicate']):
PadLayer = nn.ReplicationPad2d
elif(pad_type == 'zero'):
PadLayer = nn.ZeroPad2d
else:
print('Pad type [%s] not recognized' % pad_type)
return PadLayer
class Identity(nn.Module):
def forward(self, x):
return x
def get_norm_layer(norm_type='instance'):
"""Return a normalization layer
Parameters:
norm_type (str) -- the name of the normalization layer: batch | instance | none
For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).
For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.
"""
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
elif norm_type == 'none':
def norm_layer(x):
return Identity()
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
def get_scheduler(optimizer, opt):
"""Return a learning rate scheduler
Parameters:
optimizer -- the optimizer of the network
opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions.
opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine
For 'linear', we keep the same learning rate for the first <opt.n_epochs> epochs
and linearly decay the rate to zero over the next <opt.n_epochs_decay> epochs.
For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers.
See https://pytorch.org/docs/stable/optim.html for more details.
"""
if opt.lr_policy == 'linear':
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.n_epochs) / float(opt.n_epochs_decay + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif opt.lr_policy == 'plateau':
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
elif opt.lr_policy == 'cosine':
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.n_epochs, eta_min=0)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler
def init_weights(net, init_type='normal', init_gain=0.02, debug=False):
"""Initialize network weights.
Parameters:
net (network) -- network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might
work better for some applications. Feel free to try yourself.
"""
def init_func(m): # define the initialization function
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if debug:
print(classname)
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, init_gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=init_gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=init_gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.
init.normal_(m.weight.data, 1.0, init_gain)
init.constant_(m.bias.data, 0.0)
net.apply(init_func) # apply the initialization function <init_func>
def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[], debug=False, initialize_weights=True):
"""Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights
Parameters:
net (network) -- the network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Return an initialized network.
"""
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
net.to(gpu_ids[0])
# if not amp:
# net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs for non-AMP training
if initialize_weights:
init_weights(net, init_type, init_gain=init_gain, debug=debug)
return net
def define_G(input_nc, output_nc, ngf, netG, norm='batch', use_dropout=False, init_type='normal',
init_gain=0.02, no_antialias=False, no_antialias_up=False, gpu_ids=[], opt=None):
"""Create a generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
netG (str) -- the architecture's name: resnet_9blocks | resnet_6blocks | unet_256 | unet_128
norm (str) -- the name of normalization layers used in the network: batch | instance | none
use_dropout (bool) -- if use dropout layers.
init_type (str) -- the name of our initialization method.
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Returns a generator
Our current implementation provides two types of generators:
U-Net: [unet_128] (for 128x128 input images) and [unet_256] (for 256x256 input images)
The original U-Net paper: https://arxiv.org/abs/1505.04597
Resnet-based generator: [resnet_6blocks] (with 6 Resnet blocks) and [resnet_9blocks] (with 9 Resnet blocks)
Resnet-based generator consists of several Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code from Justin Johnson's neural style transfer project (https://github.com/jcjohnson/fast-neural-style).
The generator has been initialized by <init_net>. It uses RELU for non-linearity.
"""
net = None
norm_layer = get_norm_layer(norm_type=norm)
if netG == 'resnet_9blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, no_antialias=no_antialias, no_antialias_up=no_antialias_up, n_blocks=9, opt=opt)
elif netG == 'resnet_6blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, no_antialias=no_antialias, no_antialias_up=no_antialias_up, n_blocks=6, opt=opt)
elif netG == 'resnet_4blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, no_antialias=no_antialias, no_antialias_up=no_antialias_up, n_blocks=4, opt=opt)
elif netG == 'unet_128':
net = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
elif netG == 'unet_256':
net = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
elif netG == 'resnet_cat':
n_blocks = 8
net = G_Resnet(input_nc, output_nc, opt.nz, num_downs=2, n_res=n_blocks - 4, ngf=ngf, norm='inst', nl_layer='relu')
else:
raise NotImplementedError('Generator model name [%s] is not recognized' % netG)
return init_net(net, init_type, init_gain, gpu_ids, initialize_weights=('stylegan2' not in netG))
def define_F(input_nc, netF, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, no_antialias=False, gpu_ids=[], opt=None):
if netF == 'global_pool':
net = PoolingF()
elif netF == 'reshape':
net = ReshapeF()
elif netF == 'sample':
net = PatchSampleF(use_mlp=False, init_type=init_type, init_gain=init_gain, gpu_ids=gpu_ids, nc=opt.netF_nc)
elif netF == 'mlp_sample':
net = PatchSampleF(use_mlp=True, init_type=init_type, init_gain=init_gain, gpu_ids=gpu_ids, nc=opt.netF_nc)
elif netF == 'strided_conv':
net = StridedConvF(init_type=init_type, init_gain=init_gain, gpu_ids=gpu_ids)
else:
raise NotImplementedError('projection model name [%s] is not recognized' % netF)
return init_net(net, init_type, init_gain, gpu_ids)
def define_D(input_nc, ndf, netD, n_layers_D=3, norm='batch', init_type='normal', init_gain=0.02, no_antialias=False, gpu_ids=[], opt=None):
"""Create a discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the first conv layer
netD (str) -- the architecture's name: basic | n_layers | pixel
n_layers_D (int) -- the number of conv layers in the discriminator; effective when netD=='n_layers'
norm (str) -- the type of normalization layers used in the network.
init_type (str) -- the name of the initialization method.
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Returns a discriminator
Our current implementation provides three types of discriminators:
[basic]: 'PatchGAN' classifier described in the original pix2pix paper.
It can classify whether 70×70 overlapping patches are real or fake.
Such a patch-level discriminator architecture has fewer parameters
than a full-image discriminator and can work on arbitrarily-sized images
in a fully convolutional fashion.
[n_layers]: With this mode, you cna specify the number of conv layers in the discriminator
with the parameter <n_layers_D> (default=3 as used in [basic] (PatchGAN).)
[pixel]: 1x1 PixelGAN discriminator can classify whether a pixel is real or not.
It encourages greater color diversity but has no effect on spatial statistics.
The discriminator has been initialized by <init_net>. It uses Leaky RELU for non-linearity.
"""
net = None
norm_layer = get_norm_layer(norm_type=norm)
if netD == 'basic': # default PatchGAN classifier
net = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer, no_antialias=no_antialias,)
elif netD == 'n_layers': # more options
net = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer, no_antialias=no_antialias,)
elif netD == 'pixel': # classify if each pixel is real or fake
net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer)
elif netD == "patch":
net = PatchDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer, no_antialias=no_antialias)
else:
raise NotImplementedError('Discriminator model name [%s] is not recognized' % netD)
return init_net(net, init_type, init_gain, gpu_ids,
initialize_weights=('stylegan2' not in netD))
##############################################################################
# Classes
##############################################################################
class GANLoss(nn.Module):
"""Define different GAN objectives.
The GANLoss class abstracts away the need to create the target label tensor
that has the same size as the input.
"""
def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0):
""" Initialize the GANLoss class.
Parameters:
gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.
target_real_label (bool) - - label for a real image
target_fake_label (bool) - - label of a fake image
Note: Do not use sigmoid as the last layer of Discriminator.
LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.
"""
super(GANLoss, self).__init__()
self.register_buffer('real_label', torch.tensor(target_real_label))
self.register_buffer('fake_label', torch.tensor(target_fake_label))
self.gan_mode = gan_mode
if gan_mode == 'lsgan':
self.loss = nn.MSELoss()
elif gan_mode == 'vanilla':
self.loss = nn.BCEWithLogitsLoss()
elif gan_mode in ['wgangp', 'nonsaturating']:
self.loss = None
else:
raise NotImplementedError('gan mode %s not implemented' % gan_mode)
def get_target_tensor(self, prediction, target_is_real):
"""Create label tensors with the same size as the input.
Parameters:
prediction (tensor) - - tpyically the prediction from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
A label tensor filled with ground truth label, and with the size of the input
"""
if target_is_real:
target_tensor = self.real_label
else:
target_tensor = self.fake_label
return target_tensor.expand_as(prediction)
def __call__(self, prediction, target_is_real):
"""Calculate loss given Discriminator's output and grount truth labels.
Parameters:
prediction (tensor) - - tpyically the prediction output from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
the calculated loss.
"""
bs = prediction.size(0)
if self.gan_mode in ['lsgan', 'vanilla']:
target_tensor = self.get_target_tensor(prediction, target_is_real)
loss = self.loss(prediction, target_tensor)
elif self.gan_mode == 'wgangp':
if target_is_real:
loss = -prediction.mean()
else:
loss = prediction.mean()
elif self.gan_mode == 'nonsaturating':
if target_is_real:
loss = F.softplus(-prediction).view(bs, -1).mean(dim=1)
else:
loss = F.softplus(prediction).view(bs, -1).mean(dim=1)
return loss
def cal_gradient_penalty(netD, real_data, fake_data, device, type='mixed', constant=1.0, lambda_gp=10.0):
"""Calculate the gradient penalty loss, used in WGAN-GP paper https://arxiv.org/abs/1704.00028
Arguments:
netD (network) -- discriminator network
real_data (tensor array) -- real images
fake_data (tensor array) -- generated images from the generator
device (str) -- GPU / CPU: from torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')
type (str) -- if we mix real and fake data or not [real | fake | mixed].
constant (float) -- the constant used in formula ( | |gradient||_2 - constant)^2
lambda_gp (float) -- weight for this loss
Returns the gradient penalty loss
"""
if lambda_gp > 0.0:
if type == 'real': # either use real images, fake images, or a linear interpolation of two.
interpolatesv = real_data
elif type == 'fake':
interpolatesv = fake_data
elif type == 'mixed':
alpha = torch.rand(real_data.shape[0], 1, device=device)
alpha = alpha.expand(real_data.shape[0], real_data.nelement() // real_data.shape[0]).contiguous().view(*real_data.shape)
interpolatesv = alpha * real_data + ((1 - alpha) * fake_data)
else:
raise NotImplementedError('{} not implemented'.format(type))
interpolatesv.requires_grad_(True)
disc_interpolates = netD(interpolatesv)
gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolatesv,
grad_outputs=torch.ones(disc_interpolates.size()).to(device),
create_graph=True, retain_graph=True, only_inputs=True)
gradients = gradients[0].view(real_data.size(0), -1) # flat the data
gradient_penalty = (((gradients + 1e-16).norm(2, dim=1) - constant) ** 2).mean() * lambda_gp # added eps
return gradient_penalty, gradients
else:
return 0.0, None
class Normalize(nn.Module):
def __init__(self, power=2):
super(Normalize, self).__init__()
self.power = power
def forward(self, x):
norm = x.pow(self.power).sum(1, keepdim=True).pow(1. / self.power)
out = x.div(norm + 1e-7)
return out
class PoolingF(nn.Module):
def __init__(self):
super(PoolingF, self).__init__()
model = [nn.AdaptiveMaxPool2d(1)]
self.model = nn.Sequential(*model)
self.l2norm = Normalize(2)
def forward(self, x):
return self.l2norm(self.model(x))
class ReshapeF(nn.Module):
def __init__(self):
super(ReshapeF, self).__init__()
model = [nn.AdaptiveAvgPool2d(4)]
self.model = nn.Sequential(*model)
self.l2norm = Normalize(2)
def forward(self, x):
x = self.model(x)
x_reshape = x.permute(0, 2, 3, 1).flatten(0, 2)
return self.l2norm(x_reshape)
class StridedConvF(nn.Module):
def __init__(self, init_type='normal', init_gain=0.02, gpu_ids=[]):
super().__init__()
# self.conv1 = nn.Conv2d(256, 128, 3, stride=2)
# self.conv2 = nn.Conv2d(128, 64, 3, stride=1)
self.l2_norm = Normalize(2)
self.mlps = {}
self.moving_averages = {}
self.init_type = init_type
self.init_gain = init_gain
self.gpu_ids = gpu_ids
def create_mlp(self, x):
C, H = x.shape[1], x.shape[2]
n_down = int(np.rint(np.log2(H / 32)))
mlp = []
for i in range(n_down):
mlp.append(nn.Conv2d(C, max(C // 2, 64), 3, stride=2))
mlp.append(nn.ReLU())
C = max(C // 2, 64)
mlp.append(nn.Conv2d(C, 64, 3))
mlp = nn.Sequential(*mlp)
init_net(mlp, self.init_type, self.init_gain, self.gpu_ids)
return mlp
def update_moving_average(self, key, x):
if key not in self.moving_averages:
self.moving_averages[key] = x.detach()
self.moving_averages[key] = self.moving_averages[key] * 0.999 + x.detach() * 0.001
def forward(self, x, use_instance_norm=False):
C, H = x.shape[1], x.shape[2]
key = '%d_%d' % (C, H)
if key not in self.mlps:
self.mlps[key] = self.create_mlp(x)
self.add_module("child_%s" % key, self.mlps[key])
mlp = self.mlps[key]
x = mlp(x)
self.update_moving_average(key, x)
x = x - self.moving_averages[key]
if use_instance_norm:
x = F.instance_norm(x)
return self.l2_norm(x)
class PatchSampleF(nn.Module):
def __init__(self, use_mlp=False, init_type='normal', init_gain=0.02, nc=256, gpu_ids=[]):
# potential issues: currently, we use the same patch_ids for multiple images in the batch
super(PatchSampleF, self).__init__()
self.l2norm = Normalize(2)
self.use_mlp = use_mlp
self.nc = nc # hard-coded
self.mlp_init = False
self.init_type = init_type
self.init_gain = init_gain
self.gpu_ids = gpu_ids
def create_mlp(self, feats):
for mlp_id, feat in enumerate(feats):
input_nc = feat.shape[1]
mlp = nn.Sequential(*[nn.Linear(input_nc, self.nc), nn.ReLU(), nn.Linear(self.nc, self.nc)])
mlp.cuda()
setattr(self, 'mlp_%d' % mlp_id, mlp)
init_net(self, self.init_type, self.init_gain, self.gpu_ids)
self.mlp_init = True
def forward(self, feats, num_patches=64, patch_ids=None, attn_mats=None):
return_ids = []
return_feats = []
return_mats = []
k_s = 7 # kernel size in unfold
if self.use_mlp and not self.mlp_init:
self.create_mlp(feats)
for feat_id, feat in enumerate(feats):
B, C, H, W = feat.shape[0], feat.shape[1], feat.shape[2], feat.shape[3]
feat_reshape = feat.permute(0, 2, 3, 1).flatten(1, 2) # B*HW*C
if num_patches > 0:
if feat_id < 3:
if patch_ids is not None:
patch_id = patch_ids[feat_id]
else:
patch_id = torch.randperm(feat_reshape.shape[1], device=feats[0].device) # random id in [0, HW]
patch_id = patch_id[:int(min(num_patches, patch_id.shape[0]))] # .to(patch_ids.device)
x_sample = feat_reshape[:, patch_id, :].flatten(0, 1) # reshape(-1, x.shape[1])
attn_qs = torch.zeros(1).to(feat.device)
else:
if attn_mats is not None:
attn_qs = attn_mats[feat_id]
else:
feat_local = F.unfold(feat, kernel_size=k_s, stride=1, padding=3) # (B, ks*ks*C, L)
L = feat_local.shape[2]
feat_k_local = feat_local.permute(0, 2, 1).reshape(B, L, k_s*k_s, C).flatten(0, 1) # (B*L, ks*ks, C)
feat_q_local = feat_reshape.reshape(B*L, C, 1)
dots_local = torch.bmm(feat_k_local, feat_q_local) # (B*L, ks*ks, 1)
attn_local = dots_local.softmax(dim=1)
attn_local = attn_local.reshape(B, L, -1) # (B, L, ks*ks)
prob = -torch.log(attn_local)
prob = torch.where(torch.isinf(prob), torch.full_like(prob, 0), prob)
entropy = torch.sum(torch.mul(attn_local, prob), dim=2)
_, index = torch.sort(entropy)
patch_id = index[:, :num_patches]
feat_q_global = feat_reshape
feat_k_global = feat_reshape.permute(0, 2, 1)
dots_global = torch.bmm(feat_q_global, feat_k_global) # (B, HW, HW)
attn_global = dots_global.softmax(dim=2)
attn_qs = attn_global[torch.arange(B)[:, None], patch_id, :]
feat_reshape = torch.bmm(attn_qs, feat_reshape) # (B, n_p, C)
x_sample = feat_reshape.flatten(0, 1)
patch_id = []
else:
x_sample = feat_reshape
patch_id = []
if self.use_mlp:
mlp = getattr(self, 'mlp_%d' % feat_id)
x_sample = mlp(x_sample)
return_ids.append(patch_id)
return_mats.append(attn_qs)
x_sample = self.l2norm(x_sample)
if num_patches == 0:
x_sample = x_sample.permute(0, 2, 1).reshape([B, x_sample.shape[-1], H, W])
return_feats.append(x_sample)
return return_feats, return_ids, return_mats
class G_Resnet(nn.Module):
def __init__(self, input_nc, output_nc, nz, num_downs, n_res, ngf=64,
norm=None, nl_layer=None):
super(G_Resnet, self).__init__()
n_downsample = num_downs
pad_type = 'reflect'
self.enc_content = ContentEncoder(n_downsample, n_res, input_nc, ngf, norm, nl_layer, pad_type=pad_type)
if nz == 0:
self.dec = Decoder(n_downsample, n_res, self.enc_content.output_dim, output_nc, norm=norm, activ=nl_layer, pad_type=pad_type, nz=nz)
else:
self.dec = Decoder_all(n_downsample, n_res, self.enc_content.output_dim, output_nc, norm=norm, activ=nl_layer, pad_type=pad_type, nz=nz)
def decode(self, content, style=None):
return self.dec(content, style)
def forward(self, image, style=None, nce_layers=[], encode_only=False):
content, feats = self.enc_content(image, nce_layers=nce_layers, encode_only=encode_only)
if encode_only:
return feats
else:
images_recon = self.decode(content, style)
if len(nce_layers) > 0:
return images_recon, feats
else:
return images_recon
##################################################################################
# Encoder and Decoders
##################################################################################
class E_adaIN(nn.Module):
def __init__(self, input_nc, output_nc=1, nef=64, n_layers=4,
norm=None, nl_layer=None, vae=False):
# style encoder
super(E_adaIN, self).__init__()
self.enc_style = StyleEncoder(n_layers, input_nc, nef, output_nc, norm='none', activ='relu', vae=vae)
def forward(self, image):
style = self.enc_style(image)
return style
class StyleEncoder(nn.Module):
def __init__(self, n_downsample, input_dim, dim, style_dim, norm, activ, vae=False):
super(StyleEncoder, self).__init__()
self.vae = vae
self.model = []
self.model += [Conv2dBlock(input_dim, dim, 7, 1, 3, norm=norm, activation=activ, pad_type='reflect')]
for i in range(2):
self.model += [Conv2dBlock(dim, 2 * dim, 4, 2, 1, norm=norm, activation=activ, pad_type='reflect')]
dim *= 2
for i in range(n_downsample - 2):
self.model += [Conv2dBlock(dim, dim, 4, 2, 1, norm=norm, activation=activ, pad_type='reflect')]
self.model += [nn.AdaptiveAvgPool2d(1)] # global average pooling
if self.vae:
self.fc_mean = nn.Linear(dim, style_dim) # , 1, 1, 0)
self.fc_var = nn.Linear(dim, style_dim) # , 1, 1, 0)
else:
self.model += [nn.Conv2d(dim, style_dim, 1, 1, 0)]
self.model = nn.Sequential(*self.model)
self.output_dim = dim
def forward(self, x):
if self.vae:
output = self.model(x)
output = output.view(x.size(0), -1)
output_mean = self.fc_mean(output)
output_var = self.fc_var(output)
return output_mean, output_var
else:
return self.model(x).view(x.size(0), -1)
class ContentEncoder(nn.Module):
def __init__(self, n_downsample, n_res, input_dim, dim, norm, activ, pad_type='zero'):
super(ContentEncoder, self).__init__()
self.model = []
self.model += [Conv2dBlock(input_dim, dim, 7, 1, 3, norm=norm, activation=activ, pad_type='reflect')]
# downsampling blocks
for i in range(n_downsample):
self.model += [Conv2dBlock(dim, 2 * dim, 4, 2, 1, norm=norm, activation=activ, pad_type='reflect')]
dim *= 2
# residual blocks
self.model += [ResBlocks(n_res, dim, norm=norm, activation=activ, pad_type=pad_type)]
self.model = nn.Sequential(*self.model)
self.output_dim = dim
def forward(self, x, nce_layers=[], encode_only=False):
if len(nce_layers) > 0:
feat = x
feats = []
for layer_id, layer in enumerate(self.model):
feat = layer(feat)
if layer_id in nce_layers:
feats.append(feat)
if layer_id == nce_layers[-1] and encode_only:
return None, feats
return feat, feats
else:
return self.model(x), None
for layer_id, layer in enumerate(self.model):
print(layer_id, layer)
class Decoder_all(nn.Module):
def __init__(self, n_upsample, n_res, dim, output_dim, norm='batch', activ='relu', pad_type='zero', nz=0):
super(Decoder_all, self).__init__()
# AdaIN residual blocks
self.resnet_block = ResBlocks(n_res, dim, norm, activ, pad_type=pad_type, nz=nz)
self.n_blocks = 0
# upsampling blocks
for i in range(n_upsample):
block = [Upsample2(scale_factor=2), Conv2dBlock(dim + nz, dim // 2, 5, 1, 2, norm='ln', activation=activ, pad_type='reflect')]
setattr(self, 'block_{:d}'.format(self.n_blocks), nn.Sequential(*block))
self.n_blocks += 1
dim //= 2
# use reflection padding in the last conv layer
setattr(self, 'block_{:d}'.format(self.n_blocks), Conv2dBlock(dim + nz, output_dim, 7, 1, 3, norm='none', activation='tanh', pad_type='reflect'))
self.n_blocks += 1
def forward(self, x, y=None):
if y is not None:
output = self.resnet_block(cat_feature(x, y))
for n in range(self.n_blocks):
block = getattr(self, 'block_{:d}'.format(n))
if n > 0:
output = block(cat_feature(output, y))
else:
output = block(output)
return output
class Decoder(nn.Module):
def __init__(self, n_upsample, n_res, dim, output_dim, norm='batch', activ='relu', pad_type='zero', nz=0):
super(Decoder, self).__init__()
self.model = []
# AdaIN residual blocks
self.model += [ResBlocks(n_res, dim, norm, activ, pad_type=pad_type, nz=nz)]
# upsampling blocks
for i in range(n_upsample):
if i == 0:
input_dim = dim + nz
else:
input_dim = dim
self.model += [Upsample2(scale_factor=2), Conv2dBlock(input_dim, dim // 2, 5, 1, 2, norm='ln', activation=activ, pad_type='reflect')]
dim //= 2
# use reflection padding in the last conv layer
self.model += [Conv2dBlock(dim, output_dim, 7, 1, 3, norm='none', activation='tanh', pad_type='reflect')]
self.model = nn.Sequential(*self.model)
def forward(self, x, y=None):
if y is not None:
return self.model(cat_feature(x, y))
else:
return self.model(x)
##################################################################################
# Sequential Models
##################################################################################
class ResBlocks(nn.Module):
def __init__(self, num_blocks, dim, norm='inst', activation='relu', pad_type='zero', nz=0):
super(ResBlocks, self).__init__()
self.model = []
for i in range(num_blocks):
self.model += [ResBlock(dim, norm=norm, activation=activation, pad_type=pad_type, nz=nz)]
self.model = nn.Sequential(*self.model)
def forward(self, x):
return self.model(x)
##################################################################################
# Basic Blocks
##################################################################################
def cat_feature(x, y):
y_expand = y.view(y.size(0), y.size(1), 1, 1).expand(
y.size(0), y.size(1), x.size(2), x.size(3))
x_cat = torch.cat([x, y_expand], 1)
return x_cat
class ResBlock(nn.Module):
def __init__(self, dim, norm='inst', activation='relu', pad_type='zero', nz=0):
super(ResBlock, self).__init__()
model = []
model += [Conv2dBlock(dim + nz, dim, 3, 1, 1, norm=norm, activation=activation, pad_type=pad_type)]
model += [Conv2dBlock(dim, dim + nz, 3, 1, 1, norm=norm, activation='none', pad_type=pad_type)]
self.model = nn.Sequential(*model)
def forward(self, x):
residual = x
out = self.model(x)
out += residual
return out
class Conv2dBlock(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size, stride,
padding=0, norm='none', activation='relu', pad_type='zero'):
super(Conv2dBlock, self).__init__()
self.use_bias = True
# initialize padding
if pad_type == 'reflect':
self.pad = nn.ReflectionPad2d(padding)
elif pad_type == 'zero':
self.pad = nn.ZeroPad2d(padding)
else:
assert 0, "Unsupported padding type: {}".format(pad_type)
# initialize normalization
norm_dim = output_dim
if norm == 'batch':
self.norm = nn.BatchNorm2d(norm_dim)
elif norm == 'inst':
self.norm = nn.InstanceNorm2d(norm_dim, track_running_stats=False)
elif norm == 'ln':
self.norm = LayerNorm(norm_dim)
elif norm == 'none':
self.norm = None
else:
assert 0, "Unsupported normalization: {}".format(norm)
# initialize activation
if activation == 'relu':
self.activation = nn.ReLU(inplace=True)
elif activation == 'lrelu':
self.activation = nn.LeakyReLU(0.2, inplace=True)
elif activation == 'prelu':
self.activation = nn.PReLU()
elif activation == 'selu':
self.activation = nn.SELU(inplace=True)
elif activation == 'tanh':
self.activation = nn.Tanh()
elif activation == 'none':
self.activation = None
else:
assert 0, "Unsupported activation: {}".format(activation)
# initialize convolution
self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride, bias=self.use_bias)
def forward(self, x):
x = self.conv(self.pad(x))
if self.norm:
x = self.norm(x)
if self.activation:
x = self.activation(x)
return x
class LinearBlock(nn.Module):
def __init__(self, input_dim, output_dim, norm='none', activation='relu'):
super(LinearBlock, self).__init__()
use_bias = True
# initialize fully connected layer
self.fc = nn.Linear(input_dim, output_dim, bias=use_bias)
# initialize normalization
norm_dim = output_dim
if norm == 'batch':
self.norm = nn.BatchNorm1d(norm_dim)
elif norm == 'inst':
self.norm = nn.InstanceNorm1d(norm_dim)
elif norm == 'ln':
self.norm = LayerNorm(norm_dim)
elif norm == 'none':
self.norm = None
else:
assert 0, "Unsupported normalization: {}".format(norm)
# initialize activation
if activation == 'relu':
self.activation = nn.ReLU(inplace=True)
elif activation == 'lrelu':
self.activation = nn.LeakyReLU(0.2, inplace=True)
elif activation == 'prelu':
self.activation = nn.PReLU()
elif activation == 'selu':
self.activation = nn.SELU(inplace=True)
elif activation == 'tanh':
self.activation = nn.Tanh()
elif activation == 'none':
self.activation = None
else:
assert 0, "Unsupported activation: {}".format(activation)
def forward(self, x):
out = self.fc(x)
if self.norm:
out = self.norm(out)
if self.activation:
out = self.activation(out)
return out
##################################################################################
# Normalization layers
##################################################################################
class LayerNorm(nn.Module):
def __init__(self, num_features, eps=1e-5, affine=True):
super(LayerNorm, self).__init__()
self.num_features = num_features
self.affine = affine
self.eps = eps
if self.affine:
self.gamma = nn.Parameter(torch.Tensor(num_features).uniform_())
self.beta = nn.Parameter(torch.zeros(num_features))
def forward(self, x):
shape = [-1] + [1] * (x.dim() - 1)
mean = x.view(x.size(0), -1).mean(1).view(*shape)
std = x.view(x.size(0), -1).std(1).view(*shape)
x = (x - mean) / (std + self.eps)
if self.affine:
shape = [1, -1] + [1] * (x.dim() - 2)
x = x * self.gamma.view(*shape) + self.beta.view(*shape)
return x
class ResnetGenerator(nn.Module):
"""Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style)
"""
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect', no_antialias=False, no_antialias_up=False, opt=None):
"""Construct a Resnet-based generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers
n_blocks (int) -- the number of ResNet blocks
padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
"""
assert(n_blocks >= 0)
super(ResnetGenerator, self).__init__()
self.opt = opt
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling): # add downsampling layers
mult = 2 ** i
if(no_antialias):
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
else:
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=1, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True),
Downsample(ngf * mult * 2)]
mult = 2 ** n_downsampling
for i in range(n_blocks): # add ResNet blocks
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
for i in range(n_downsampling): # add upsampling layers
mult = 2 ** (n_downsampling - i)
if no_antialias_up:
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
else:
model += [Upsample(ngf * mult),
nn.Conv2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=1,
padding=1, # output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input, layers=[], encode_only=False):
if -1 in layers:
layers.append(len(self.model))
if len(layers) > 0:
feat = input
feats = []
for layer_id, layer in enumerate(self.model):
# print(layer_id, layer)
feat = layer(feat)
if layer_id in layers:
# print("%d: adding the output of %s %d" % (layer_id, layer.__class__.__name__, feat.size(1)))
feats.append(feat)
else:
# print("%d: skipping %s %d" % (layer_id, layer.__class__.__name__, feat.size(1)))
pass
if layer_id == layers[-1] and encode_only:
# print('encoder only return features')
return feats # return intermediate features alone; stop in the last layers
return feat, feats # return both output and intermediate features
else:
"""Standard forward"""
fake = self.model(input)
return fake
class ResnetDecoder(nn.Module):
"""Resnet-based decoder that consists of a few Resnet blocks + a few upsampling operations.
"""
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect', no_antialias=False):
"""Construct a Resnet-based decoder
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers
n_blocks (int) -- the number of ResNet blocks
padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
"""
assert(n_blocks >= 0)
super(ResnetDecoder, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = []
n_downsampling = 2
mult = 2 ** n_downsampling
for i in range(n_blocks): # add ResNet blocks
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
for i in range(n_downsampling): # add upsampling layers
mult = 2 ** (n_downsampling - i)
if(no_antialias):
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
else:
model += [Upsample(ngf * mult),
nn.Conv2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=1,
padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input):
"""Standard forward"""
return self.model(input)
class ResnetEncoder(nn.Module):
"""Resnet-based encoder that consists of a few downsampling + several Resnet blocks
"""
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect', no_antialias=False):
"""Construct a Resnet-based encoder
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers
n_blocks (int) -- the number of ResNet blocks
padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
"""
assert(n_blocks >= 0)
super(ResnetEncoder, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling): # add downsampling layers
mult = 2 ** i
if(no_antialias):
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
else:
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=1, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True),
Downsample(ngf * mult * 2)]
mult = 2 ** n_downsampling
for i in range(n_blocks): # add ResNet blocks
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
self.model = nn.Sequential(*model)
def forward(self, input):
"""Standard forward"""
return self.model(input)
class ResnetBlock(nn.Module):
"""Define a Resnet block"""
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Initialize the Resnet block
A resnet block is a conv block with skip connections
We construct a conv block with build_conv_block function,
and implement skip connections in <forward> function.
Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf
"""
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Construct a convolutional block.
Parameters:
dim (int) -- the number of channels in the conv layer.
padding_type (str) -- the name of padding layer: reflect | replicate | zero
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
use_bias (bool) -- if the conv layer uses bias or not
Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
"""
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
"""Forward function (with skip connections)"""
out = x + self.conv_block(x) # add skip connections
return out
class UnetGenerator(nn.Module):
"""Create a Unet-based generator"""
def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):
"""Construct a Unet generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
num_downs (int) -- the number of downsamplings in UNet. For example, # if |num_downs| == 7,
image of size 128x128 will become of size 1x1 # at the bottleneck
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
We construct the U-Net from the innermost layer to the outermost layer.
It is a recursive process.
"""
super(UnetGenerator, self).__init__()
# construct unet structure
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True) # add the innermost layer
for i in range(num_downs - 5): # add intermediate layers with ngf * 8 filters
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
# gradually reduce the number of filters from ngf * 8 to ngf
unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
self.model = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer) # add the outermost layer
def forward(self, input):
"""Standard forward"""
return self.model(input)
class UnetSkipConnectionBlock(nn.Module):
"""Defines the Unet submodule with skip connection.
X -------------------identity----------------------
|-- downsampling -- |submodule| -- upsampling --|
"""
def __init__(self, outer_nc, inner_nc, input_nc=None,
submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
"""Construct a Unet submodule with skip connections.
Parameters:
outer_nc (int) -- the number of filters in the outer conv layer
inner_nc (int) -- the number of filters in the inner conv layer
input_nc (int) -- the number of channels in input images/features
submodule (UnetSkipConnectionBlock) -- previously defined submodules
outermost (bool) -- if this module is the outermost module
innermost (bool) -- if this module is the innermost module
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
"""
super(UnetSkipConnectionBlock, self).__init__()
self.outermost = outermost
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
if input_nc is None:
input_nc = outer_nc
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,
stride=2, padding=1, bias=use_bias)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc)
uprelu = nn.ReLU(True)
upnorm = norm_layer(outer_nc)
if outermost:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downconv]
up = [uprelu, upconv, nn.Tanh()]
model = down + [submodule] + up
elif innermost:
upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv]
up = [uprelu, upconv, upnorm]
model = down + up
else:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv, downnorm]
up = [uprelu, upconv, upnorm]
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else: # add skip connections
return torch.cat([x, self.model(x)], 1)
class NLayerDiscriminator(nn.Module):
"""Defines a PatchGAN discriminator"""
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, no_antialias=False):
"""Construct a PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
n_layers (int) -- the number of conv layers in the discriminator
norm_layer -- normalization layer
"""
super(NLayerDiscriminator, self).__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
kw = 4
padw = 1
if(no_antialias):
sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
else:
sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=1, padding=padw), nn.LeakyReLU(0.2, True), Downsample(ndf)]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers): # gradually increase the number of filters
nf_mult_prev = nf_mult
nf_mult = min(2 ** n, 8)
if(no_antialias):
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
else:
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True),
Downsample(ndf * nf_mult)]
nf_mult_prev = nf_mult
nf_mult = min(2 ** n_layers, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map
self.model = nn.Sequential(*sequence)
def forward(self, input):
"""Standard forward."""
return self.model(input)
class PixelDiscriminator(nn.Module):
"""Defines a 1x1 PatchGAN discriminator (pixelGAN)"""
def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d):
"""Construct a 1x1 PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
"""
super(PixelDiscriminator, self).__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
self.net = [
nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias),
norm_layer(ndf * 2),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)]
self.net = nn.Sequential(*self.net)
def forward(self, input):
"""Standard forward."""
return self.net(input)
class PatchDiscriminator(NLayerDiscriminator):
"""Defines a PatchGAN discriminator"""
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, no_antialias=False):
super().__init__(input_nc, ndf, 2, norm_layer, no_antialias)
def forward(self, input):
B, C, H, W = input.size(0), input.size(1), input.size(2), input.size(3)
size = 16
Y = H // size
X = W // size
input = input.view(B, C, Y, size, X, size)
input = input.permute(0, 2, 4, 1, 3, 5).contiguous().view(B * Y * X, C, size, size)
return super().forward(input)
class GroupedChannelNorm(nn.Module):
def __init__(self, num_groups):
super().__init__()
self.num_groups = num_groups
def forward(self, x):
shape = list(x.shape)
new_shape = [shape[0], self.num_groups, shape[1] // self.num_groups] + shape[2:]
x = x.view(*new_shape)
mean = x.mean(dim=2, keepdim=True)
std = x.std(dim=2, keepdim=True)
x_norm = (x - mean) / (std + 1e-7)
return x_norm.view(*shape)
| 61,819 | 42.443429 | 187 | py |
query-selected-attention | query-selected-attention-main/util/image_pool.py | import random
import torch
class ImagePool():
"""This class implements an image buffer that stores previously generated images.
This buffer enables us to update discriminators using a history of generated images
rather than the ones produced by the latest generators.
"""
def __init__(self, pool_size):
"""Initialize the ImagePool class
Parameters:
pool_size (int) -- the size of image buffer, if pool_size=0, no buffer will be created
"""
self.pool_size = pool_size
if self.pool_size > 0: # create an empty pool
self.num_imgs = 0
self.images = []
def query(self, images):
"""Return an image from the pool.
Parameters:
images: the latest generated images from the generator
Returns images from the buffer.
By 50/100, the buffer will return input images.
By 50/100, the buffer will return images previously stored in the buffer,
and insert the current images to the buffer.
"""
if self.pool_size == 0: # if the buffer size is 0, do nothing
return images
return_images = []
for image in images:
image = torch.unsqueeze(image.data, 0)
if self.num_imgs < self.pool_size: # if the buffer is not full; keep inserting current images to the buffer
self.num_imgs = self.num_imgs + 1
self.images.append(image)
return_images.append(image)
else:
p = random.uniform(0, 1)
if p > 0.5: # by 50% chance, the buffer will return a previously stored image, and insert the current image into the buffer
random_id = random.randint(0, self.pool_size - 1) # randint is inclusive
tmp = self.images[random_id].clone()
self.images[random_id] = image
return_images.append(tmp)
else: # by another 50% chance, the buffer will return the current image
return_images.append(image)
return_images = torch.cat(return_images, 0) # collect all the images and return
return return_images
| 2,226 | 39.490909 | 140 | py |
query-selected-attention | query-selected-attention-main/util/util.py | """This module contains simple helper functions """
from __future__ import print_function
import torch
import numpy as np
from PIL import Image
import os
import importlib
import argparse
from argparse import Namespace
import torchvision
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def copyconf(default_opt, **kwargs):
conf = Namespace(**vars(default_opt))
for key in kwargs:
setattr(conf, key, kwargs[key])
return conf
def find_class_in_module(target_cls_name, module):
target_cls_name = target_cls_name.replace('_', '').lower()
clslib = importlib.import_module(module)
cls = None
for name, clsobj in clslib.__dict__.items():
if name.lower() == target_cls_name:
cls = clsobj
assert cls is not None, "In %s, there should be a class whose name matches %s in lowercase without underscore(_)" % (module, target_cls_name)
return cls
def tensor2im(input_image, imtype=np.uint8):
""""Converts a Tensor array into a numpy image array.
Parameters:
input_image (tensor) -- the input image tensor array
imtype (type) -- the desired type of the converted numpy array
"""
if not isinstance(input_image, np.ndarray):
if isinstance(input_image, torch.Tensor): # get the data from a variable
image_tensor = input_image.data
else:
return input_image
image_numpy = image_tensor[0].clamp(-1.0, 1.0).cpu().float().numpy() # convert it into a numpy array
if image_numpy.shape[0] == 1: # grayscale to RGB
image_numpy = np.tile(image_numpy, (3, 1, 1))
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0 # post-processing: tranpose and scaling
else: # if it is a numpy array, do nothing
image_numpy = input_image
return image_numpy.astype(imtype)
def diagnose_network(net, name='network'):
"""Calculate and print the mean of average absolute(gradients)
Parameters:
net (torch network) -- Torch network
name (str) -- the name of the network
"""
mean = 0.0
count = 0
for param in net.parameters():
if param.grad is not None:
mean += torch.mean(torch.abs(param.grad.data))
count += 1
if count > 0:
mean = mean / count
print(name)
print(mean)
def save_image(image_numpy, image_path, aspect_ratio=1.0):
"""Save a numpy image to the disk
Parameters:
image_numpy (numpy array) -- input numpy array
image_path (str) -- the path of the image
"""
image_pil = Image.fromarray(image_numpy)
h, w, _ = image_numpy.shape
if aspect_ratio is None:
pass
elif aspect_ratio > 1.0:
image_pil = image_pil.resize((h, int(w * aspect_ratio)), Image.BICUBIC)
elif aspect_ratio < 1.0:
image_pil = image_pil.resize((int(h / aspect_ratio), w), Image.BICUBIC)
image_pil.save(image_path)
def print_numpy(x, val=True, shp=False):
"""Print the mean, min, max, median, std, and size of a numpy array
Parameters:
val (bool) -- if print the values of the numpy array
shp (bool) -- if print the shape of the numpy array
"""
x = x.astype(np.float64)
if shp:
print('shape,', x.shape)
if val:
x = x.flatten()
print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (
np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))
def mkdirs(paths):
"""create empty directories if they don't exist
Parameters:
paths (str list) -- a list of directory paths
"""
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
"""create a single empty directory if it didn't exist
Parameters:
path (str) -- a single directory path
"""
if not os.path.exists(path):
os.makedirs(path)
def correct_resize_label(t, size):
device = t.device
t = t.detach().cpu()
resized = []
for i in range(t.size(0)):
one_t = t[i, :1]
one_np = np.transpose(one_t.numpy().astype(np.uint8), (1, 2, 0))
one_np = one_np[:, :, 0]
one_image = Image.fromarray(one_np).resize(size, Image.NEAREST)
resized_t = torch.from_numpy(np.array(one_image)).long()
resized.append(resized_t)
return torch.stack(resized, dim=0).to(device)
def correct_resize(t, size, mode=Image.BICUBIC):
device = t.device
t = t.detach().cpu()
resized = []
for i in range(t.size(0)):
one_t = t[i:i + 1]
one_image = Image.fromarray(tensor2im(one_t)).resize(size, Image.BICUBIC)
resized_t = torchvision.transforms.functional.to_tensor(one_image) * 2 - 1.0
resized.append(resized_t)
return torch.stack(resized, dim=0).to(device)
| 5,135 | 29.754491 | 145 | py |
query-selected-attention | query-selected-attention-main/data/base_dataset.py | """This module implements an abstract base class (ABC) 'BaseDataset' for datasets.
It also includes common transformation functions (e.g., get_transform, __scale_width), which can be later used in subclasses.
"""
import random
import numpy as np
import torch.utils.data as data
from PIL import Image
import torchvision.transforms as transforms
from abc import ABC, abstractmethod
class BaseDataset(data.Dataset, ABC):
"""This class is an abstract base class (ABC) for datasets.
To create a subclass, you need to implement the following four functions:
-- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt).
-- <__len__>: return the size of dataset.
-- <__getitem__>: get a data point.
-- <modify_commandline_options>: (optionally) add dataset-specific options and set default options.
"""
def __init__(self, opt):
"""Initialize the class; save the options in the class
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
self.opt = opt
self.root = opt.dataroot
self.current_epoch = 0
@staticmethod
def modify_commandline_options(parser, is_train):
"""Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
"""
return parser
@abstractmethod
def __len__(self):
"""Return the total number of images in the dataset."""
return 0
@abstractmethod
def __getitem__(self, index):
"""Return a data point and its metadata information.
Parameters:
index - - a random integer for data indexing
Returns:
a dictionary of data with their names. It ususally contains the data itself and its metadata information.
"""
pass
def get_params(opt, size):
w, h = size
new_h = h
new_w = w
if opt.preprocess == 'resize_and_crop':
new_h = new_w = opt.load_size
elif opt.preprocess == 'scale_width_and_crop':
new_w = opt.load_size
new_h = opt.load_size * h // w
x = random.randint(0, np.maximum(0, new_w - opt.crop_size))
y = random.randint(0, np.maximum(0, new_h - opt.crop_size))
flip = random.random() > 0.5
return {'crop_pos': (x, y), 'flip': flip}
def get_transform(opt, params=None, grayscale=False, method=Image.BICUBIC, convert=True):
transform_list = []
if grayscale:
transform_list.append(transforms.Grayscale(1))
if 'fixsize' in opt.preprocess:
transform_list.append(transforms.Resize(params["size"], method))
if 'resize' in opt.preprocess:
osize = [opt.load_size, opt.load_size]
if "gta2cityscapes" in opt.dataroot:
osize[0] = opt.load_size // 2
transform_list.append(transforms.Resize(osize, method))
elif 'scale_width' in opt.preprocess:
transform_list.append(transforms.Lambda(lambda img: __scale_width(img, opt.load_size, opt.crop_size, method)))
elif 'scale_shortside' in opt.preprocess:
transform_list.append(transforms.Lambda(lambda img: __scale_shortside(img, opt.load_size, opt.crop_size, method)))
if 'zoom' in opt.preprocess:
if params is None:
transform_list.append(transforms.Lambda(lambda img: __random_zoom(img, opt.load_size, opt.crop_size, method)))
else:
transform_list.append(transforms.Lambda(lambda img: __random_zoom(img, opt.load_size, opt.crop_size, method, factor=params["scale_factor"])))
if 'crop' in opt.preprocess:
if params is None or 'crop_pos' not in params:
transform_list.append(transforms.RandomCrop(opt.crop_size))
else:
transform_list.append(transforms.Lambda(lambda img: __crop(img, params['crop_pos'], opt.crop_size)))
if 'patch' in opt.preprocess:
transform_list.append(transforms.Lambda(lambda img: __patch(img, params['patch_index'], opt.crop_size)))
if 'trim' in opt.preprocess:
transform_list.append(transforms.Lambda(lambda img: __trim(img, opt.crop_size)))
# if opt.preprocess == 'none':
transform_list.append(transforms.Lambda(lambda img: __make_power_2(img, base=4, method=method)))
if not opt.no_flip:
if params is None or 'flip' not in params:
transform_list.append(transforms.RandomHorizontalFlip())
elif 'flip' in params:
transform_list.append(transforms.Lambda(lambda img: __flip(img, params['flip'])))
if convert:
transform_list += [transforms.ToTensor()]
if grayscale:
transform_list += [transforms.Normalize((0.5,), (0.5,))]
else:
transform_list += [transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
return transforms.Compose(transform_list)
def __make_power_2(img, base, method=Image.BICUBIC):
ow, oh = img.size
h = int(round(oh / base) * base)
w = int(round(ow / base) * base)
if h == oh and w == ow:
return img
return img.resize((w, h), method)
def __random_zoom(img, target_width, crop_width, method=Image.BICUBIC, factor=None):
if factor is None:
zoom_level = np.random.uniform(0.8, 1.0, size=[2])
else:
zoom_level = (factor[0], factor[1])
iw, ih = img.size
zoomw = max(crop_width, iw * zoom_level[0])
zoomh = max(crop_width, ih * zoom_level[1])
img = img.resize((int(round(zoomw)), int(round(zoomh))), method)
return img
def __scale_shortside(img, target_width, crop_width, method=Image.BICUBIC):
ow, oh = img.size
shortside = min(ow, oh)
if shortside >= target_width:
return img
else:
scale = target_width / shortside
return img.resize((round(ow * scale), round(oh * scale)), method)
def __trim(img, trim_width):
ow, oh = img.size
if ow > trim_width:
xstart = np.random.randint(ow - trim_width)
xend = xstart + trim_width
else:
xstart = 0
xend = ow
if oh > trim_width:
ystart = np.random.randint(oh - trim_width)
yend = ystart + trim_width
else:
ystart = 0
yend = oh
return img.crop((xstart, ystart, xend, yend))
def __scale_width(img, target_width, crop_width, method=Image.BICUBIC):
ow, oh = img.size
if ow == target_width and oh >= crop_width:
return img
w = target_width
h = int(max(target_width * oh / ow, crop_width))
return img.resize((w, h), method)
def __crop(img, pos, size):
ow, oh = img.size
x1, y1 = pos
tw = th = size
if (ow > tw or oh > th):
return img.crop((x1, y1, x1 + tw, y1 + th))
return img
def __patch(img, index, size):
ow, oh = img.size
nw, nh = ow // size, oh // size
roomx = ow - nw * size
roomy = oh - nh * size
startx = np.random.randint(int(roomx) + 1)
starty = np.random.randint(int(roomy) + 1)
index = index % (nw * nh)
ix = index // nh
iy = index % nh
gridx = startx + ix * size
gridy = starty + iy * size
return img.crop((gridx, gridy, gridx + size, gridy + size))
def __flip(img, flip):
if flip:
return img.transpose(Image.FLIP_LEFT_RIGHT)
return img
def __print_size_warning(ow, oh, w, h):
"""Print warning information about image size(only print once)"""
if not hasattr(__print_size_warning, 'has_printed'):
print("The image size needs to be a multiple of 4. "
"The loaded image size was (%d, %d), so it was adjusted to "
"(%d, %d). This adjustment will be done to all images "
"whose sizes are not multiples of 4" % (ow, oh, w, h))
__print_size_warning.has_printed = True
| 8,026 | 33.748918 | 153 | py |
query-selected-attention | query-selected-attention-main/data/image_folder.py | """A modified image folder class
We modify the official PyTorch image folder (https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py)
so that this class can load images from both current directory and its subdirectories.
"""
import torch.utils.data as data
from PIL import Image
import os
import os.path
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
'.tif', '.TIF', '.tiff', '.TIFF',
]
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def make_dataset(dir, max_dataset_size=float("inf")):
images = []
assert os.path.isdir(dir) or os.path.islink(dir), '%s is not a valid directory' % dir
for root, _, fnames in sorted(os.walk(dir, followlinks=True)):
for fname in fnames:
if is_image_file(fname):
path = os.path.join(root, fname)
images.append(path)
return images[:min(max_dataset_size, len(images))]
def default_loader(path):
return Image.open(path).convert('RGB')
class ImageFolder(data.Dataset):
def __init__(self, root, transform=None, return_paths=False,
loader=default_loader):
imgs = make_dataset(root)
if len(imgs) == 0:
raise(RuntimeError("Found 0 images in: " + root + "\n"
"Supported image extensions are: " + ",".join(IMG_EXTENSIONS)))
self.root = root
self.imgs = imgs
self.transform = transform
self.return_paths = return_paths
self.loader = loader
def __getitem__(self, index):
path = self.imgs[index]
img = self.loader(path)
if self.transform is not None:
img = self.transform(img)
if self.return_paths:
return img, path
else:
return img
def __len__(self):
return len(self.imgs)
| 1,941 | 27.985075 | 122 | py |
query-selected-attention | query-selected-attention-main/data/__init__.py | """This package includes all the modules related to data loading and preprocessing
To add a custom dataset class called 'dummy', you need to add a file called 'dummy_dataset.py' and define a subclass 'DummyDataset' inherited from BaseDataset.
You need to implement four functions:
-- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt).
-- <__len__>: return the size of dataset.
-- <__getitem__>: get a data point from data loader.
-- <modify_commandline_options>: (optionally) add dataset-specific options and set default options.
Now you can use the dataset class by specifying flag '--dataset_mode dummy'.
See our template dataset class 'template_dataset.py' for more details.
"""
import importlib
import torch.utils.data
from data.base_dataset import BaseDataset
def find_dataset_using_name(dataset_name):
"""Import the module "data/[dataset_name]_dataset.py".
In the file, the class called DatasetNameDataset() will
be instantiated. It has to be a subclass of BaseDataset,
and it is case-insensitive.
"""
dataset_filename = "data." + dataset_name + "_dataset"
datasetlib = importlib.import_module(dataset_filename)
dataset = None
target_dataset_name = dataset_name.replace('_', '') + 'dataset'
for name, cls in datasetlib.__dict__.items():
if name.lower() == target_dataset_name.lower() \
and issubclass(cls, BaseDataset):
dataset = cls
if dataset is None:
raise NotImplementedError("In %s.py, there should be a subclass of BaseDataset with class name that matches %s in lowercase." % (dataset_filename, target_dataset_name))
return dataset
def get_option_setter(dataset_name):
"""Return the static method <modify_commandline_options> of the dataset class."""
dataset_class = find_dataset_using_name(dataset_name)
return dataset_class.modify_commandline_options
def create_dataset(opt):
"""Create a dataset given the option.
This function wraps the class CustomDatasetDataLoader.
This is the main interface between this package and 'train.py'/'test.py'
Example:
>>> from data import create_dataset
>>> dataset = create_dataset(opt)
"""
data_loader = CustomDatasetDataLoader(opt)
dataset = data_loader.load_data()
return dataset
class CustomDatasetDataLoader():
"""Wrapper class of Dataset class that performs multi-threaded data loading"""
def __init__(self, opt):
"""Initialize this class
Step 1: create a dataset instance given the name [dataset_mode]
Step 2: create a multi-threaded data loader.
"""
self.opt = opt
dataset_class = find_dataset_using_name(opt.dataset_mode)
self.dataset = dataset_class(opt)
print("dataset [%s] was created" % type(self.dataset).__name__)
self.dataloader = torch.utils.data.DataLoader(
self.dataset,
batch_size=opt.batch_size,
shuffle=not opt.serial_batches,
num_workers=int(opt.num_threads),
drop_last=True
)
def set_epoch(self, epoch):
self.dataset.current_epoch = epoch
def load_data(self):
return self
def __len__(self):
"""Return the number of data in the dataset"""
return min(len(self.dataset), self.opt.max_dataset_size)
def __iter__(self):
"""Return a batch of data"""
for i, data in enumerate(self.dataloader):
if i * self.opt.batch_size >= self.opt.max_dataset_size:
break
yield data
| 3,667 | 36.050505 | 176 | py |
bottom-up-attention | bottom-up-attention-master/tools/compress_net.py | #!/usr/bin/env python
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Compress a Fast R-CNN network using truncated SVD."""
import _init_paths
import caffe
import argparse
import numpy as np
import os, sys
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='Compress a Fast R-CNN network')
parser.add_argument('--def', dest='prototxt',
help='prototxt file defining the uncompressed network',
default=None, type=str)
parser.add_argument('--def-svd', dest='prototxt_svd',
help='prototxt file defining the SVD compressed network',
default=None, type=str)
parser.add_argument('--net', dest='caffemodel',
help='model to compress',
default=None, type=str)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
def compress_weights(W, l):
"""Compress the weight matrix W of an inner product (fully connected) layer
using truncated SVD.
Parameters:
W: N x M weights matrix
l: number of singular values to retain
Returns:
Ul, L: matrices such that W \approx Ul*L
"""
# numpy doesn't seem to have a fast truncated SVD algorithm...
# this could be faster
U, s, V = np.linalg.svd(W, full_matrices=False)
Ul = U[:, :l]
sl = s[:l]
Vl = V[:l, :]
L = np.dot(np.diag(sl), Vl)
return Ul, L
def main():
args = parse_args()
# prototxt = 'models/VGG16/test.prototxt'
# caffemodel = 'snapshots/vgg16_fast_rcnn_iter_40000.caffemodel'
net = caffe.Net(args.prototxt, args.caffemodel, caffe.TEST)
# prototxt_svd = 'models/VGG16/svd/test_fc6_fc7.prototxt'
# caffemodel = 'snapshots/vgg16_fast_rcnn_iter_40000.caffemodel'
net_svd = caffe.Net(args.prototxt_svd, args.caffemodel, caffe.TEST)
print('Uncompressed network {} : {}'.format(args.prototxt, args.caffemodel))
print('Compressed network prototxt {}'.format(args.prototxt_svd))
out = os.path.splitext(os.path.basename(args.caffemodel))[0] + '_svd'
out_dir = os.path.dirname(args.caffemodel)
# Compress fc6
if net_svd.params.has_key('fc6_L'):
l_fc6 = net_svd.params['fc6_L'][0].data.shape[0]
print(' fc6_L bottleneck size: {}'.format(l_fc6))
# uncompressed weights and biases
W_fc6 = net.params['fc6'][0].data
B_fc6 = net.params['fc6'][1].data
print(' compressing fc6...')
Ul_fc6, L_fc6 = compress_weights(W_fc6, l_fc6)
assert(len(net_svd.params['fc6_L']) == 1)
# install compressed matrix factors (and original biases)
net_svd.params['fc6_L'][0].data[...] = L_fc6
net_svd.params['fc6_U'][0].data[...] = Ul_fc6
net_svd.params['fc6_U'][1].data[...] = B_fc6
out += '_fc6_{}'.format(l_fc6)
# Compress fc7
if net_svd.params.has_key('fc7_L'):
l_fc7 = net_svd.params['fc7_L'][0].data.shape[0]
print ' fc7_L bottleneck size: {}'.format(l_fc7)
W_fc7 = net.params['fc7'][0].data
B_fc7 = net.params['fc7'][1].data
print(' compressing fc7...')
Ul_fc7, L_fc7 = compress_weights(W_fc7, l_fc7)
assert(len(net_svd.params['fc7_L']) == 1)
net_svd.params['fc7_L'][0].data[...] = L_fc7
net_svd.params['fc7_U'][0].data[...] = Ul_fc7
net_svd.params['fc7_U'][1].data[...] = B_fc7
out += '_fc7_{}'.format(l_fc7)
filename = '{}/{}.caffemodel'.format(out_dir, out)
net_svd.save(filename)
print 'Wrote svd model to: {:s}'.format(filename)
if __name__ == '__main__':
main()
| 3,918 | 30.103175 | 81 | py |
bottom-up-attention | bottom-up-attention-master/tools/train_faster_rcnn_alt_opt.py | #!/usr/bin/env python
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Train a Faster R-CNN network using alternating optimization.
This tool implements the alternating optimization algorithm described in our
NIPS 2015 paper ("Faster R-CNN: Towards Real-time Object Detection with Region
Proposal Networks." Shaoqing Ren, Kaiming He, Ross Girshick, Jian Sun.)
"""
import _init_paths
from fast_rcnn.train import get_training_roidb, train_net
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from datasets.factory import get_imdb
from rpn.generate import imdb_proposals
import argparse
import pprint
import numpy as np
import sys, os
import multiprocessing as mp
import cPickle
import shutil
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train a Faster R-CNN network')
parser.add_argument('--gpu', dest='gpu_id',
help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--net_name', dest='net_name',
help='network name (e.g., "ZF")',
default=None, type=str)
parser.add_argument('--weights', dest='pretrained_model',
help='initialize with pretrained model weights',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default=None, type=str)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to train on',
default='voc_2007_trainval', type=str)
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
def get_roidb(imdb_name, rpn_file=None):
imdb = get_imdb(imdb_name)
print 'Loaded dataset `{:s}` for training'.format(imdb.name)
imdb.set_proposal_method(cfg.TRAIN.PROPOSAL_METHOD)
print 'Set proposal method: {:s}'.format(cfg.TRAIN.PROPOSAL_METHOD)
if rpn_file is not None:
imdb.config['rpn_file'] = rpn_file
roidb = get_training_roidb(imdb)
return roidb, imdb
def get_solvers(net_name):
# Faster R-CNN Alternating Optimization
n = 'faster_rcnn_alt_opt'
# Solver for each training stage
solvers = [[net_name, n, 'stage1_rpn_solver60k80k.pt'],
[net_name, n, 'stage1_fast_rcnn_solver30k40k.pt'],
[net_name, n, 'stage2_rpn_solver60k80k.pt'],
[net_name, n, 'stage2_fast_rcnn_solver30k40k.pt']]
solvers = [os.path.join(cfg.MODELS_DIR, *s) for s in solvers]
# Iterations for each training stage
max_iters = [80000, 40000, 80000, 40000]
# max_iters = [100, 100, 100, 100]
# Test prototxt for the RPN
rpn_test_prototxt = os.path.join(
cfg.MODELS_DIR, net_name, n, 'rpn_test.pt')
return solvers, max_iters, rpn_test_prototxt
# ------------------------------------------------------------------------------
# Pycaffe doesn't reliably free GPU memory when instantiated nets are discarded
# (e.g. "del net" in Python code). To work around this issue, each training
# stage is executed in a separate process using multiprocessing.Process.
# ------------------------------------------------------------------------------
def _init_caffe(cfg):
"""Initialize pycaffe in a training process.
"""
import caffe
# fix the random seeds (numpy and caffe) for reproducibility
np.random.seed(cfg.RNG_SEED)
caffe.set_random_seed(cfg.RNG_SEED)
# set up caffe
caffe.set_mode_gpu()
caffe.set_device(cfg.GPU_ID)
def train_rpn(queue=None, imdb_name=None, init_model=None, solver=None,
max_iters=None, cfg=None):
"""Train a Region Proposal Network in a separate training process.
"""
# Not using any proposals, just ground-truth boxes
cfg.TRAIN.HAS_RPN = True
cfg.TRAIN.BBOX_REG = False # applies only to Fast R-CNN bbox regression
cfg.TRAIN.PROPOSAL_METHOD = 'gt'
cfg.TRAIN.IMS_PER_BATCH = 1
print 'Init model: {}'.format(init_model)
print('Using config:')
pprint.pprint(cfg)
import caffe
_init_caffe(cfg)
roidb, imdb = get_roidb(imdb_name)
print 'roidb len: {}'.format(len(roidb))
output_dir = get_output_dir(imdb)
print 'Output will be saved to `{:s}`'.format(output_dir)
model_paths = train_net(solver, roidb, output_dir,
pretrained_model=init_model,
max_iters=max_iters)
# Cleanup all but the final model
for i in model_paths[:-1]:
os.remove(i)
rpn_model_path = model_paths[-1]
# Send final model path through the multiprocessing queue
queue.put({'model_path': rpn_model_path})
def rpn_generate(queue=None, imdb_name=None, rpn_model_path=None, cfg=None,
rpn_test_prototxt=None):
"""Use a trained RPN to generate proposals.
"""
cfg.TEST.RPN_PRE_NMS_TOP_N = -1 # no pre NMS filtering
cfg.TEST.RPN_POST_NMS_TOP_N = 2000 # limit top boxes after NMS
print 'RPN model: {}'.format(rpn_model_path)
print('Using config:')
pprint.pprint(cfg)
import caffe
_init_caffe(cfg)
# NOTE: the matlab implementation computes proposals on flipped images, too.
# We compute them on the image once and then flip the already computed
# proposals. This might cause a minor loss in mAP (less proposal jittering).
imdb = get_imdb(imdb_name)
print 'Loaded dataset `{:s}` for proposal generation'.format(imdb.name)
# Load RPN and configure output directory
rpn_net = caffe.Net(rpn_test_prototxt, rpn_model_path, caffe.TEST)
output_dir = get_output_dir(imdb)
print 'Output will be saved to `{:s}`'.format(output_dir)
# Generate proposals on the imdb
rpn_proposals = imdb_proposals(rpn_net, imdb)
# Write proposals to disk and send the proposal file path through the
# multiprocessing queue
rpn_net_name = os.path.splitext(os.path.basename(rpn_model_path))[0]
rpn_proposals_path = os.path.join(
output_dir, rpn_net_name + '_proposals.pkl')
with open(rpn_proposals_path, 'wb') as f:
cPickle.dump(rpn_proposals, f, cPickle.HIGHEST_PROTOCOL)
print 'Wrote RPN proposals to {}'.format(rpn_proposals_path)
queue.put({'proposal_path': rpn_proposals_path})
def train_fast_rcnn(queue=None, imdb_name=None, init_model=None, solver=None,
max_iters=None, cfg=None, rpn_file=None):
"""Train a Fast R-CNN using proposals generated by an RPN.
"""
cfg.TRAIN.HAS_RPN = False # not generating prosals on-the-fly
cfg.TRAIN.PROPOSAL_METHOD = 'rpn' # use pre-computed RPN proposals instead
cfg.TRAIN.IMS_PER_BATCH = 2
print 'Init model: {}'.format(init_model)
print 'RPN proposals: {}'.format(rpn_file)
print('Using config:')
pprint.pprint(cfg)
import caffe
_init_caffe(cfg)
roidb, imdb = get_roidb(imdb_name, rpn_file=rpn_file)
output_dir = get_output_dir(imdb)
print 'Output will be saved to `{:s}`'.format(output_dir)
# Train Fast R-CNN
model_paths = train_net(solver, roidb, output_dir,
pretrained_model=init_model,
max_iters=max_iters)
# Cleanup all but the final model
for i in model_paths[:-1]:
os.remove(i)
fast_rcnn_model_path = model_paths[-1]
# Send Fast R-CNN model path over the multiprocessing queue
queue.put({'model_path': fast_rcnn_model_path})
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
cfg.GPU_ID = args.gpu_id
# --------------------------------------------------------------------------
# Pycaffe doesn't reliably free GPU memory when instantiated nets are
# discarded (e.g. "del net" in Python code). To work around this issue, each
# training stage is executed in a separate process using
# multiprocessing.Process.
# --------------------------------------------------------------------------
# queue for communicated results between processes
mp_queue = mp.Queue()
# solves, iters, etc. for each training stage
solvers, max_iters, rpn_test_prototxt = get_solvers(args.net_name)
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 1 RPN, init from ImageNet model'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
cfg.TRAIN.SNAPSHOT_INFIX = 'stage1'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
init_model=args.pretrained_model,
solver=solvers[0],
max_iters=max_iters[0],
cfg=cfg)
p = mp.Process(target=train_rpn, kwargs=mp_kwargs)
p.start()
rpn_stage1_out = mp_queue.get()
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 1 RPN, generate proposals'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
rpn_model_path=str(rpn_stage1_out['model_path']),
cfg=cfg,
rpn_test_prototxt=rpn_test_prototxt)
p = mp.Process(target=rpn_generate, kwargs=mp_kwargs)
p.start()
rpn_stage1_out['proposal_path'] = mp_queue.get()['proposal_path']
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 1 Fast R-CNN using RPN proposals, init from ImageNet model'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
cfg.TRAIN.SNAPSHOT_INFIX = 'stage1'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
init_model=args.pretrained_model,
solver=solvers[1],
max_iters=max_iters[1],
cfg=cfg,
rpn_file=rpn_stage1_out['proposal_path'])
p = mp.Process(target=train_fast_rcnn, kwargs=mp_kwargs)
p.start()
fast_rcnn_stage1_out = mp_queue.get()
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 2 RPN, init from stage 1 Fast R-CNN model'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
cfg.TRAIN.SNAPSHOT_INFIX = 'stage2'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
init_model=str(fast_rcnn_stage1_out['model_path']),
solver=solvers[2],
max_iters=max_iters[2],
cfg=cfg)
p = mp.Process(target=train_rpn, kwargs=mp_kwargs)
p.start()
rpn_stage2_out = mp_queue.get()
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 2 RPN, generate proposals'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
rpn_model_path=str(rpn_stage2_out['model_path']),
cfg=cfg,
rpn_test_prototxt=rpn_test_prototxt)
p = mp.Process(target=rpn_generate, kwargs=mp_kwargs)
p.start()
rpn_stage2_out['proposal_path'] = mp_queue.get()['proposal_path']
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 2 Fast R-CNN, init from stage 2 RPN R-CNN model'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
cfg.TRAIN.SNAPSHOT_INFIX = 'stage2'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
init_model=str(rpn_stage2_out['model_path']),
solver=solvers[3],
max_iters=max_iters[3],
cfg=cfg,
rpn_file=rpn_stage2_out['proposal_path'])
p = mp.Process(target=train_fast_rcnn, kwargs=mp_kwargs)
p.start()
fast_rcnn_stage2_out = mp_queue.get()
p.join()
# Create final model (just a copy of the last stage)
final_path = os.path.join(
os.path.dirname(fast_rcnn_stage2_out['model_path']),
args.net_name + '_faster_rcnn_final.caffemodel')
print 'cp {} -> {}'.format(
fast_rcnn_stage2_out['model_path'], final_path)
shutil.copy(fast_rcnn_stage2_out['model_path'], final_path)
print 'Final model: {}'.format(final_path)
| 12,871 | 37.423881 | 80 | py |
bottom-up-attention | bottom-up-attention-master/tools/test_net.py | #!/usr/bin/env python
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Test a Fast R-CNN network on an image database."""
import _init_paths
from fast_rcnn.test import test_net,test_net_with_gt_boxes
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list
from datasets.factory import get_imdb
import caffe
import argparse
import pprint
import time, os, sys
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Test a Fast R-CNN network')
parser.add_argument('--gpu', dest='gpu_id', help='GPU id to use',
default=0, type=int)
parser.add_argument('--def', dest='prototxt',
help='prototxt file defining the network',
default=None, type=str)
parser.add_argument('--net', dest='caffemodel',
help='model to test',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file', default=None, type=str)
parser.add_argument('--wait', dest='wait',
help='wait until net file exists',
default=True, type=bool)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to test',
default='voc_2007_test', type=str)
parser.add_argument('--comp', dest='comp_mode', help='competition mode',
action='store_true')
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
parser.add_argument('--vis', dest='vis', help='visualize detections',
action='store_true')
parser.add_argument('--num_dets', dest='max_per_image',
help='max number of detections per image',
default=400, type=int)
parser.add_argument('--rpn_file', dest='rpn_file',
default=None, type=str)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
cfg.GPU_ID = args.gpu_id
print('Using config:')
pprint.pprint(cfg)
while not os.path.exists(args.caffemodel) and args.wait:
print('Waiting for {} to exist...'.format(args.caffemodel))
time.sleep(10)
caffe.set_mode_gpu()
caffe.set_device(args.gpu_id)
net = caffe.Net(args.prototxt, caffe.TEST, weights=args.caffemodel)
net.name = os.path.splitext(os.path.basename(args.caffemodel))[0]
imdb = get_imdb(args.imdb_name)
imdb.competition_mode(args.comp_mode)
if not cfg.TEST.HAS_RPN:
imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD)
if cfg.TEST.PROPOSAL_METHOD == 'rpn':
imdb.config['rpn_file'] = args.rpn_file
test_net(net, imdb, max_per_image=args.max_per_image, vis=args.vis)
if cfg.TEST.HAS_ATTRIBUTES or cfg.TEST.HAS_RELATIONS:
net = caffe.Net(args.prototxt.replace(".prototxt","_gt.prototxt"), caffe.TEST, weights=args.caffemodel)
net.name = os.path.splitext(os.path.basename(args.caffemodel))[0]
test_net_with_gt_boxes(net, imdb, max_per_image=args.max_per_image, vis=args.vis)
| 3,742 | 35.696078 | 111 | py |
bottom-up-attention | bottom-up-attention-master/tools/_init_paths.py | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Set up paths for Fast R-CNN."""
import os.path as osp
import sys
def add_path(path):
if path not in sys.path:
sys.path.insert(0, path)
this_dir = osp.dirname(__file__)
# Add caffe to PYTHONPATH
caffe_path = osp.join(this_dir, '..', 'caffe', 'python')
add_path(caffe_path)
# Add lib to PYTHONPATH
lib_path = osp.join(this_dir, '..', 'lib')
add_path(lib_path)
| 627 | 23.153846 | 58 | py |
bottom-up-attention | bottom-up-attention-master/tools/demo_rfcn.py | #!/usr/bin/env python
# --------------------------------------------------------
# R-FCN
# Copyright (c) 2016 Yuwen Xiong
# Licensed under The MIT License [see LICENSE for details]
# Written by Yuwen Xiong
# --------------------------------------------------------
"""
Demo script showing detections in sample images.
See README.md for installation instructions before running.
"""
import _init_paths
from fast_rcnn.config import cfg
from fast_rcnn.test import im_detect
from fast_rcnn.nms_wrapper import nms
from utils.timer import Timer
import matplotlib.pyplot as plt
import numpy as np
import scipy.io as sio
import caffe, os, sys, cv2
import argparse
CLASSES = ('__background__',
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
NETS = {'ResNet-101': ('ResNet-101',
'resnet101_rfcn_final.caffemodel'),
'ResNet-50': ('ResNet-50',
'resnet50_rfcn_final.caffemodel')}
def vis_detections(im, class_name, dets, thresh=0.5):
"""Draw detected bounding boxes."""
inds = np.where(dets[:, -1] >= thresh)[0]
if len(inds) == 0:
return
im = im[:, :, (2, 1, 0)]
fig, ax = plt.subplots(figsize=(12, 12))
ax.imshow(im, aspect='equal')
for i in inds:
bbox = dets[i, :4]
score = dets[i, -1]
ax.add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='red', linewidth=3.5)
)
ax.text(bbox[0], bbox[1] - 2,
'{:s} {:.3f}'.format(class_name, score),
bbox=dict(facecolor='blue', alpha=0.5),
fontsize=14, color='white')
ax.set_title(('{} detections with '
'p({} | box) >= {:.1f}').format(class_name, class_name,
thresh),
fontsize=14)
plt.axis('off')
plt.tight_layout()
plt.draw()
def demo(net, image_name):
"""Detect object classes in an image using pre-computed object proposals."""
# Load the demo image
im_file = os.path.join(cfg.DATA_DIR, 'demo', image_name)
im = cv2.imread(im_file)
# Detect all object classes and regress object bounds
timer = Timer()
timer.tic()
scores, boxes = im_detect(net, im)
timer.toc()
print ('Detection took {:.3f}s for '
'{:d} object proposals').format(timer.total_time, boxes.shape[0])
# Visualize detections for each class
CONF_THRESH = 0.8
NMS_THRESH = 0.3
for cls_ind, cls in enumerate(CLASSES[1:]):
cls_ind += 1 # because we skipped background
cls_boxes = boxes[:, 4:8]
cls_scores = scores[:, cls_ind]
dets = np.hstack((cls_boxes,
cls_scores[:, np.newaxis])).astype(np.float32)
keep = nms(dets, NMS_THRESH)
dets = dets[keep, :]
vis_detections(im, cls, dets, thresh=CONF_THRESH)
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='Faster R-CNN demo')
parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--cpu', dest='cpu_mode',
help='Use CPU mode (overrides --gpu)',
action='store_true')
parser.add_argument('--net', dest='demo_net', help='Network to use [ResNet-101]',
choices=NETS.keys(), default='ResNet-101')
args = parser.parse_args()
return args
if __name__ == '__main__':
cfg.TEST.HAS_RPN = True # Use RPN for proposals
args = parse_args()
prototxt = os.path.join(cfg.MODELS_DIR, NETS[args.demo_net][0],
'rfcn_end2end', 'test_agnostic.prototxt')
caffemodel = os.path.join(cfg.DATA_DIR, 'rfcn_models',
NETS[args.demo_net][1])
if not os.path.isfile(caffemodel):
raise IOError(('{:s} not found.\n').format(caffemodel))
if args.cpu_mode:
caffe.set_mode_cpu()
else:
caffe.set_mode_gpu()
caffe.set_device(args.gpu_id)
cfg.GPU_ID = args.gpu_id
net = caffe.Net(prototxt, caffemodel, caffe.TEST)
print '\n\nLoaded network {:s}'.format(caffemodel)
# Warmup on a dummy image
im = 128 * np.ones((300, 500, 3), dtype=np.uint8)
for i in xrange(2):
_, _= im_detect(net, im)
im_names = ['000456.jpg', '000542.jpg', '001150.jpg',
'001763.jpg', '004545.jpg']
for im_name in im_names:
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Demo for data/demo/{}'.format(im_name)
demo(net, im_name)
plt.show()
| 4,938 | 31.708609 | 85 | py |
bottom-up-attention | bottom-up-attention-master/tools/demo.py | #!/usr/bin/env python
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""
Demo script showing detections in sample images.
See README.md for installation instructions before running.
"""
import matplotlib
matplotlib.use('Agg')
import _init_paths
from fast_rcnn.config import cfg
from fast_rcnn.test import im_detect
from fast_rcnn.nms_wrapper import nms
from utils.timer import Timer
import matplotlib.pyplot as plt
import numpy as np
import scipy.io as sio
import caffe, os, sys, cv2
import argparse
CLASSES = ('__background__',
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
NETS = {'vgg16': ('VGG16',
'VGG16_faster_rcnn_final.caffemodel'),
'zf': ('ZF',
'ZF_faster_rcnn_final.caffemodel')}
def vis_detections(im, class_name, dets, thresh=0.5):
"""Draw detected bounding boxes."""
inds = np.where(dets[:, -1] >= thresh)[0]
if len(inds) == 0:
return
im = im[:, :, (2, 1, 0)]
fig, ax = plt.subplots(figsize=(12, 12))
ax.imshow(im, aspect='equal')
for i in inds:
bbox = dets[i, :4]
score = dets[i, -1]
ax.add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='red', linewidth=3.5)
)
ax.text(bbox[0], bbox[1] - 2,
'{:s} {:.3f}'.format(class_name, score),
bbox=dict(facecolor='blue', alpha=0.5),
fontsize=14, color='white')
ax.set_title(('{} detections with '
'p({} | box) >= {:.1f}').format(class_name, class_name,
thresh),
fontsize=14)
plt.axis('off')
plt.tight_layout()
plt.draw()
def demo(net, image_name):
"""Detect object classes in an image using pre-computed object proposals."""
# Load the demo image
im_file = os.path.join(cfg.DATA_DIR, 'demo', image_name)
im = cv2.imread(im_file)
# Detect all object classes and regress object bounds
timer = Timer()
timer.tic()
scores, boxes = im_detect(net, im)
timer.toc()
print ('Detection took {:.3f}s for '
'{:d} object proposals').format(timer.total_time, boxes.shape[0])
# Visualize detections for each class
CONF_THRESH = 0.8
NMS_THRESH = 0.3
for cls_ind, cls in enumerate(CLASSES[1:]):
cls_ind += 1 # because we skipped background
cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
cls_scores = scores[:, cls_ind]
dets = np.hstack((cls_boxes,
cls_scores[:, np.newaxis])).astype(np.float32)
keep = nms(dets, NMS_THRESH)
dets = dets[keep, :]
vis_detections(im, cls, dets, thresh=CONF_THRESH)
plt.savefig(im_file.replace(".jpg", "_demo.jpg"))
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='Faster R-CNN demo')
parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--cpu', dest='cpu_mode',
help='Use CPU mode (overrides --gpu)',
action='store_true')
parser.add_argument('--net', dest='demo_net', help='Network to use [vgg16]',
choices=NETS.keys(), default='vgg16')
args = parser.parse_args()
return args
if __name__ == '__main__':
cfg.TEST.HAS_RPN = True # Use RPN for proposals
args = parse_args()
prototxt = os.path.join(cfg.MODELS_DIR, NETS[args.demo_net][0],
'faster_rcnn_alt_opt', 'faster_rcnn_test.pt')
caffemodel = os.path.join(cfg.DATA_DIR, 'faster_rcnn_models',
NETS[args.demo_net][1])
if not os.path.isfile(caffemodel):
raise IOError(('{:s} not found.\nDid you run ./data/script/'
'fetch_faster_rcnn_models.sh?').format(caffemodel))
if args.cpu_mode:
caffe.set_mode_cpu()
else:
caffe.set_mode_gpu()
caffe.set_device(args.gpu_id)
cfg.GPU_ID = args.gpu_id
net = caffe.Net(prototxt, caffemodel, caffe.TEST)
print '\n\nLoaded network {:s}'.format(caffemodel)
# Warmup on a dummy image
im = 128 * np.ones((300, 500, 3), dtype=np.uint8)
for i in xrange(2):
_, _= im_detect(net, im)
im_names = ['000456.jpg', '000542.jpg', '001150.jpg',
'001763.jpg', '004545.jpg']
for im_name in im_names:
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Demo for data/demo/{}'.format(im_name)
demo(net, im_name)
plt.show()
| 5,123 | 31.846154 | 80 | py |
bottom-up-attention | bottom-up-attention-master/tools/train_svms.py | #!/usr/bin/env python
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""
Train post-hoc SVMs using the algorithm and hyper-parameters from
traditional R-CNN.
"""
import _init_paths
from fast_rcnn.config import cfg, cfg_from_file
from datasets.factory import get_imdb
from fast_rcnn.test import im_detect
from utils.timer import Timer
import caffe
import argparse
import pprint
import numpy as np
import numpy.random as npr
import cv2
from sklearn import svm
import os, sys
class SVMTrainer(object):
"""
Trains post-hoc detection SVMs for all classes using the algorithm
and hyper-parameters of traditional R-CNN.
"""
def __init__(self, net, imdb):
self.imdb = imdb
self.net = net
self.layer = 'fc7'
self.hard_thresh = -1.0001
self.neg_iou_thresh = 0.3
dim = net.params['cls_score'][0].data.shape[1]
scale = self._get_feature_scale()
print('Feature dim: {}'.format(dim))
print('Feature scale: {:.3f}'.format(scale))
self.trainers = [SVMClassTrainer(cls, dim, feature_scale=scale)
for cls in imdb.classes]
def _get_feature_scale(self, num_images=100):
TARGET_NORM = 20.0 # Magic value from traditional R-CNN
_t = Timer()
roidb = self.imdb.roidb
total_norm = 0.0
count = 0.0
inds = npr.choice(xrange(self.imdb.num_images), size=num_images,
replace=False)
for i_, i in enumerate(inds):
im = cv2.imread(self.imdb.image_path_at(i))
if roidb[i]['flipped']:
im = im[:, ::-1, :]
_t.tic()
scores, boxes = im_detect(self.net, im, roidb[i]['boxes'])
_t.toc()
feat = self.net.blobs[self.layer].data
total_norm += np.sqrt((feat ** 2).sum(axis=1)).sum()
count += feat.shape[0]
print('{}/{}: avg feature norm: {:.3f}'.format(i_ + 1, num_images,
total_norm / count))
return TARGET_NORM * 1.0 / (total_norm / count)
def _get_pos_counts(self):
counts = np.zeros((len(self.imdb.classes)), dtype=np.int)
roidb = self.imdb.roidb
for i in xrange(len(roidb)):
for j in xrange(1, self.imdb.num_classes):
I = np.where(roidb[i]['gt_classes'] == j)[0]
counts[j] += len(I)
for j in xrange(1, self.imdb.num_classes):
print('class {:s} has {:d} positives'.
format(self.imdb.classes[j], counts[j]))
return counts
def get_pos_examples(self):
counts = self._get_pos_counts()
for i in xrange(len(counts)):
self.trainers[i].alloc_pos(counts[i])
_t = Timer()
roidb = self.imdb.roidb
num_images = len(roidb)
# num_images = 100
for i in xrange(num_images):
im = cv2.imread(self.imdb.image_path_at(i))
if roidb[i]['flipped']:
im = im[:, ::-1, :]
gt_inds = np.where(roidb[i]['gt_classes'] > 0)[0]
gt_boxes = roidb[i]['boxes'][gt_inds]
_t.tic()
scores, boxes = im_detect(self.net, im, gt_boxes)
_t.toc()
feat = self.net.blobs[self.layer].data
for j in xrange(1, self.imdb.num_classes):
cls_inds = np.where(roidb[i]['gt_classes'][gt_inds] == j)[0]
if len(cls_inds) > 0:
cls_feat = feat[cls_inds, :]
self.trainers[j].append_pos(cls_feat)
print 'get_pos_examples: {:d}/{:d} {:.3f}s' \
.format(i + 1, len(roidb), _t.average_time)
def initialize_net(self):
# Start all SVM parameters at zero
self.net.params['cls_score'][0].data[...] = 0
self.net.params['cls_score'][1].data[...] = 0
# Initialize SVMs in a smart way. Not doing this because its such
# a good initialization that we might not learn something close to
# the SVM solution.
# # subtract background weights and biases for the foreground classes
# w_bg = self.net.params['cls_score'][0].data[0, :]
# b_bg = self.net.params['cls_score'][1].data[0]
# self.net.params['cls_score'][0].data[1:, :] -= w_bg
# self.net.params['cls_score'][1].data[1:] -= b_bg
# # set the background weights and biases to 0 (where they shall remain)
# self.net.params['cls_score'][0].data[0, :] = 0
# self.net.params['cls_score'][1].data[0] = 0
def update_net(self, cls_ind, w, b):
self.net.params['cls_score'][0].data[cls_ind, :] = w
self.net.params['cls_score'][1].data[cls_ind] = b
def train_with_hard_negatives(self):
_t = Timer()
roidb = self.imdb.roidb
num_images = len(roidb)
# num_images = 100
for i in xrange(num_images):
im = cv2.imread(self.imdb.image_path_at(i))
if roidb[i]['flipped']:
im = im[:, ::-1, :]
_t.tic()
scores, boxes = im_detect(self.net, im, roidb[i]['boxes'])
_t.toc()
feat = self.net.blobs[self.layer].data
for j in xrange(1, self.imdb.num_classes):
hard_inds = \
np.where((scores[:, j] > self.hard_thresh) &
(roidb[i]['gt_overlaps'][:, j].toarray().ravel() <
self.neg_iou_thresh))[0]
if len(hard_inds) > 0:
hard_feat = feat[hard_inds, :].copy()
new_w_b = \
self.trainers[j].append_neg_and_retrain(feat=hard_feat)
if new_w_b is not None:
self.update_net(j, new_w_b[0], new_w_b[1])
print(('train_with_hard_negatives: '
'{:d}/{:d} {:.3f}s').format(i + 1, len(roidb),
_t.average_time))
def train(self):
# Initialize SVMs using
# a. w_i = fc8_w_i - fc8_w_0
# b. b_i = fc8_b_i - fc8_b_0
# c. Install SVMs into net
self.initialize_net()
# Pass over roidb to count num positives for each class
# a. Pre-allocate arrays for positive feature vectors
# Pass over roidb, computing features for positives only
self.get_pos_examples()
# Pass over roidb
# a. Compute cls_score with forward pass
# b. For each class
# i. Select hard negatives
# ii. Add them to cache
# c. For each class
# i. If SVM retrain criteria met, update SVM
# ii. Install new SVM into net
self.train_with_hard_negatives()
# One final SVM retraining for each class
# Install SVMs into net
for j in xrange(1, self.imdb.num_classes):
new_w_b = self.trainers[j].append_neg_and_retrain(force=True)
self.update_net(j, new_w_b[0], new_w_b[1])
class SVMClassTrainer(object):
"""Manages post-hoc SVM training for a single object class."""
def __init__(self, cls, dim, feature_scale=1.0,
C=0.001, B=10.0, pos_weight=2.0):
self.pos = np.zeros((0, dim), dtype=np.float32)
self.neg = np.zeros((0, dim), dtype=np.float32)
self.B = B
self.C = C
self.cls = cls
self.pos_weight = pos_weight
self.dim = dim
self.feature_scale = feature_scale
self.svm = svm.LinearSVC(C=C, class_weight={1: 2, -1: 1},
intercept_scaling=B, verbose=1,
penalty='l2', loss='l1',
random_state=cfg.RNG_SEED, dual=True)
self.pos_cur = 0
self.num_neg_added = 0
self.retrain_limit = 2000
self.evict_thresh = -1.1
self.loss_history = []
def alloc_pos(self, count):
self.pos_cur = 0
self.pos = np.zeros((count, self.dim), dtype=np.float32)
def append_pos(self, feat):
num = feat.shape[0]
self.pos[self.pos_cur:self.pos_cur + num, :] = feat
self.pos_cur += num
def train(self):
print('>>> Updating {} detector <<<'.format(self.cls))
num_pos = self.pos.shape[0]
num_neg = self.neg.shape[0]
print('Cache holds {} pos examples and {} neg examples'.
format(num_pos, num_neg))
X = np.vstack((self.pos, self.neg)) * self.feature_scale
y = np.hstack((np.ones(num_pos),
-np.ones(num_neg)))
self.svm.fit(X, y)
w = self.svm.coef_
b = self.svm.intercept_[0]
scores = self.svm.decision_function(X)
pos_scores = scores[:num_pos]
neg_scores = scores[num_pos:]
pos_loss = (self.C * self.pos_weight *
np.maximum(0, 1 - pos_scores).sum())
neg_loss = self.C * np.maximum(0, 1 + neg_scores).sum()
reg_loss = 0.5 * np.dot(w.ravel(), w.ravel()) + 0.5 * b ** 2
tot_loss = pos_loss + neg_loss + reg_loss
self.loss_history.append((tot_loss, pos_loss, neg_loss, reg_loss))
for i, losses in enumerate(self.loss_history):
print((' {:d}: obj val: {:.3f} = {:.3f} '
'(pos) + {:.3f} (neg) + {:.3f} (reg)').format(i, *losses))
# Sanity check
scores_ret = (
X * 1.0 / self.feature_scale).dot(w.T * self.feature_scale) + b
assert np.allclose(scores, scores_ret[:, 0], atol=1e-5), \
"Scores from returned model don't match decision function"
return ((w * self.feature_scale, b), pos_scores, neg_scores)
def append_neg_and_retrain(self, feat=None, force=False):
if feat is not None:
num = feat.shape[0]
self.neg = np.vstack((self.neg, feat))
self.num_neg_added += num
if self.num_neg_added > self.retrain_limit or force:
self.num_neg_added = 0
new_w_b, pos_scores, neg_scores = self.train()
# scores = np.dot(self.neg, new_w_b[0].T) + new_w_b[1]
# easy_inds = np.where(neg_scores < self.evict_thresh)[0]
not_easy_inds = np.where(neg_scores >= self.evict_thresh)[0]
if len(not_easy_inds) > 0:
self.neg = self.neg[not_easy_inds, :]
# self.neg = np.delete(self.neg, easy_inds)
print(' Pruning easy negatives')
print(' Cache holds {} pos examples and {} neg examples'.
format(self.pos.shape[0], self.neg.shape[0]))
print(' {} pos support vectors'.format((pos_scores <= 1).sum()))
print(' {} neg support vectors'.format((neg_scores >= -1).sum()))
return new_w_b
else:
return None
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train SVMs (old skool)')
parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--def', dest='prototxt',
help='prototxt file defining the network',
default=None, type=str)
parser.add_argument('--net', dest='caffemodel',
help='model to test',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file', default=None, type=str)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to train on',
default='voc_2007_trainval', type=str)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
if __name__ == '__main__':
# Must turn this off to prevent issues when digging into the net blobs to
# pull out features (tricky!)
cfg.DEDUP_BOXES = 0
# Must turn this on because we use the test im_detect() method to harvest
# hard negatives
cfg.TEST.SVM = True
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
print('Using config:')
pprint.pprint(cfg)
# fix the random seed for reproducibility
np.random.seed(cfg.RNG_SEED)
# set up caffe
caffe.set_mode_gpu()
if args.gpu_id is not None:
caffe.set_device(args.gpu_id)
net = caffe.Net(args.prototxt, args.caffemodel, caffe.TEST)
net.name = os.path.splitext(os.path.basename(args.caffemodel))[0]
out = os.path.splitext(os.path.basename(args.caffemodel))[0] + '_svm'
out_dir = os.path.dirname(args.caffemodel)
imdb = get_imdb(args.imdb_name)
print 'Loaded dataset `{:s}` for training'.format(imdb.name)
# enhance roidb to contain flipped examples
if cfg.TRAIN.USE_FLIPPED:
print 'Appending horizontally-flipped training examples...'
imdb.append_flipped_images()
print 'done'
SVMTrainer(net, imdb).train()
filename = '{}/{}.caffemodel'.format(out_dir, out)
net.save(filename)
print 'Wrote svm model to: {:s}'.format(filename)
| 13,480 | 37.081921 | 80 | py |
bottom-up-attention | bottom-up-attention-master/tools/train_net_multi_gpu.py | #!/usr/bin/env python
# --------------------------------------------------------
# Written by Bharat Singh
# Modified version of py-R-FCN
# --------------------------------------------------------
"""Train a Fast R-CNN network on a region of interest database."""
import _init_paths
from fast_rcnn.train_multi_gpu import get_training_roidb, train_net_multi_gpu
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from datasets.factory import get_imdb
import datasets.imdb
import caffe
import argparse
import pprint
import numpy as np
import sys
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train a Fast R-CNN network')
parser.add_argument("--gpu_id", type=str, default='0',
help="List of device ids.")
parser.add_argument('--solver', dest='solver',
help='solver prototxt',
default=None, type=str)
parser.add_argument('--iters', dest='max_iters',
help='number of iterations to train',
default=40000, type=int)
parser.add_argument('--weights', dest='pretrained_model',
help='initialize with pretrained model weights',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default=None, type=str)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to train on',
default='voc_2007_trainval', type=str)
parser.add_argument('--rand', dest='randomize',
help='randomize (do not use a fixed seed)',
action='store_true')
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
def combined_roidb(imdb_names):
def get_roidb(imdb_name):
imdb = get_imdb(imdb_name)
print 'Loaded dataset `{:s}` for training'.format(imdb.name)
imdb.set_proposal_method(cfg.TRAIN.PROPOSAL_METHOD)
print 'Set proposal method: {:s}'.format(cfg.TRAIN.PROPOSAL_METHOD)
roidb = get_training_roidb(imdb)
return roidb
roidbs = [get_roidb(s) for s in imdb_names.split('+')]
roidb = roidbs[0]
if len(roidbs) > 1:
for r in roidbs[1:]:
roidb.extend(r)
imdb = datasets.imdb.imdb(imdb_names)
else:
imdb = get_imdb(imdb_names)
return imdb, roidb
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
gpu_id = args.gpu_id
gpu_list = gpu_id.split(',')
gpus = [int(i) for i in gpu_list]
print('Using config:')
pprint.pprint(cfg)
if not args.randomize:
# fix the random seeds (numpy and caffe) for reproducibility
np.random.seed(cfg.RNG_SEED)
#caffe.set_random_seed(cfg.RNG_SEED)
# set up caffe
imdb, roidb = combined_roidb(args.imdb_name)
print '{:d} roidb entries'.format(len(roidb))
output_dir = get_output_dir(imdb)
print 'Output will be saved to `{:s}`'.format(output_dir)
train_net_multi_gpu(args.solver, roidb, output_dir,
pretrained_model=args.pretrained_model,
max_iter=args.max_iters, gpus=gpus)
| 3,684 | 32.5 | 78 | py |
bottom-up-attention | bottom-up-attention-master/tools/generate_tsv.py | #!/usr/bin/env python
"""Generate bottom-up attention features as a tsv file. Can use multiple gpus, each produces a
separate tsv file that can be merged later (e.g. by using merge_tsv function).
Modify the load_image_ids script as necessary for your data location. """
# Example:
# ./tools/generate_tsv.py --gpu 0,1,2,3,4,5,6,7 --cfg experiments/cfgs/faster_rcnn_end2end_resnet.yml --def models/vg/ResNet-101/faster_rcnn_end2end/test.prototxt --out test2014_resnet101_faster_rcnn_genome.tsv --net data/faster_rcnn_models/resnet101_faster_rcnn_final.caffemodel --split coco_test2014
import _init_paths
from fast_rcnn.config import cfg, cfg_from_file
from fast_rcnn.test import im_detect,_get_blobs
from fast_rcnn.nms_wrapper import nms
from utils.timer import Timer
import caffe
import argparse
import pprint
import time, os, sys
import base64
import numpy as np
import cv2
import csv
from multiprocessing import Process
import random
import json
csv.field_size_limit(sys.maxsize)
FIELDNAMES = ['image_id', 'image_w','image_h','num_boxes', 'boxes', 'features']
# Settings for the number of features per image. To re-create pretrained features with 36 features
# per image, set both values to 36.
MIN_BOXES = 10
MAX_BOXES = 100
def load_image_ids(split_name):
''' Load a list of (path,image_id tuples). Modify this to suit your data locations. '''
split = []
if split_name == 'coco_test2014':
with open('/data/coco/annotations/image_info_test2014.json') as f:
data = json.load(f)
for item in data['images']:
image_id = int(item['id'])
filepath = os.path.join('/data/test2014/', item['file_name'])
split.append((filepath,image_id))
elif split_name == 'coco_test2015':
with open('/data/coco/annotations/image_info_test2015.json') as f:
data = json.load(f)
for item in data['images']:
image_id = int(item['id'])
filepath = os.path.join('/data/test2015/', item['file_name'])
split.append((filepath,image_id))
elif split_name == 'genome':
with open('/data/visualgenome/image_data.json') as f:
for item in json.load(f):
image_id = int(item['image_id'])
filepath = os.path.join('/data/visualgenome/', item['url'].split('rak248/')[-1])
split.append((filepath,image_id))
else:
print 'Unknown split'
return split
def get_detections_from_im(net, im_file, image_id, conf_thresh=0.2):
im = cv2.imread(im_file)
scores, boxes, attr_scores, rel_scores = im_detect(net, im)
# Keep the original boxes, don't worry about the regresssion bbox outputs
rois = net.blobs['rois'].data.copy()
# unscale back to raw image space
blobs, im_scales = _get_blobs(im, None)
cls_boxes = rois[:, 1:5] / im_scales[0]
cls_prob = net.blobs['cls_prob'].data
pool5 = net.blobs['pool5_flat'].data
# Keep only the best detections
max_conf = np.zeros((rois.shape[0]))
for cls_ind in range(1,cls_prob.shape[1]):
cls_scores = scores[:, cls_ind]
dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32)
keep = np.array(nms(dets, cfg.TEST.NMS))
max_conf[keep] = np.where(cls_scores[keep] > max_conf[keep], cls_scores[keep], max_conf[keep])
keep_boxes = np.where(max_conf >= conf_thresh)[0]
if len(keep_boxes) < MIN_BOXES:
keep_boxes = np.argsort(max_conf)[::-1][:MIN_BOXES]
elif len(keep_boxes) > MAX_BOXES:
keep_boxes = np.argsort(max_conf)[::-1][:MAX_BOXES]
return {
'image_id': image_id,
'image_h': np.size(im, 0),
'image_w': np.size(im, 1),
'num_boxes' : len(keep_boxes),
'boxes': base64.b64encode(cls_boxes[keep_boxes]),
'features': base64.b64encode(pool5[keep_boxes])
}
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Generate bbox output from a Fast R-CNN network')
parser.add_argument('--gpu', dest='gpu_id', help='GPU id(s) to use',
default='0', type=str)
parser.add_argument('--def', dest='prototxt',
help='prototxt file defining the network',
default=None, type=str)
parser.add_argument('--net', dest='caffemodel',
help='model to use',
default=None, type=str)
parser.add_argument('--out', dest='outfile',
help='output filepath',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file', default=None, type=str)
parser.add_argument('--split', dest='data_split',
help='dataset to use',
default='karpathy_train', type=str)
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
def generate_tsv(gpu_id, prototxt, weights, image_ids, outfile):
# First check if file exists, and if it is complete
wanted_ids = set([int(image_id[1]) for image_id in image_ids])
found_ids = set()
if os.path.exists(outfile):
with open(outfile) as tsvfile:
reader = csv.DictReader(tsvfile, delimiter='\t', fieldnames = FIELDNAMES)
for item in reader:
found_ids.add(int(item['image_id']))
missing = wanted_ids - found_ids
if len(missing) == 0:
print 'GPU {:d}: already completed {:d}'.format(gpu_id, len(image_ids))
else:
print 'GPU {:d}: missing {:d}/{:d}'.format(gpu_id, len(missing), len(image_ids))
if len(missing) > 0:
caffe.set_mode_gpu()
caffe.set_device(gpu_id)
net = caffe.Net(prototxt, caffe.TEST, weights=weights)
with open(outfile, 'ab') as tsvfile:
writer = csv.DictWriter(tsvfile, delimiter = '\t', fieldnames = FIELDNAMES)
_t = {'misc' : Timer()}
count = 0
for im_file,image_id in image_ids:
if int(image_id) in missing:
_t['misc'].tic()
writer.writerow(get_detections_from_im(net, im_file, image_id))
_t['misc'].toc()
if (count % 100) == 0:
print 'GPU {:d}: {:d}/{:d} {:.3f}s (projected finish: {:.2f} hours)' \
.format(gpu_id, count+1, len(missing), _t['misc'].average_time,
_t['misc'].average_time*(len(missing)-count)/3600)
count += 1
def merge_tsvs():
test = ['/work/data/tsv/test2015/resnet101_faster_rcnn_final_test.tsv.%d' % i for i in range(8)]
outfile = '/work/data/tsv/merged.tsv'
with open(outfile, 'ab') as tsvfile:
writer = csv.DictWriter(tsvfile, delimiter = '\t', fieldnames = FIELDNAMES)
for infile in test:
with open(infile) as tsv_in_file:
reader = csv.DictReader(tsv_in_file, delimiter='\t', fieldnames = FIELDNAMES)
for item in reader:
try:
writer.writerow(item)
except Exception as e:
print e
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
gpu_id = args.gpu_id
gpu_list = gpu_id.split(',')
gpus = [int(i) for i in gpu_list]
print('Using config:')
pprint.pprint(cfg)
assert cfg.TEST.HAS_RPN
image_ids = load_image_ids(args.data_split)
random.seed(10)
random.shuffle(image_ids)
# Split image ids between gpus
image_ids = [image_ids[i::len(gpus)] for i in range(len(gpus))]
caffe.init_log()
caffe.log('Using devices %s' % str(gpus))
procs = []
for i,gpu_id in enumerate(gpus):
outfile = '%s.%d' % (args.outfile, gpu_id)
p = Process(target=generate_tsv,
args=(gpu_id, args.prototxt, args.caffemodel, image_ids[i], outfile))
p.daemon = True
p.start()
procs.append(p)
for p in procs:
p.join()
| 8,584 | 35.688034 | 301 | py |
bottom-up-attention | bottom-up-attention-master/tools/rpn_generate.py | #!/usr/bin/env python
# --------------------------------------------------------
# Fast/er/ R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Generate RPN proposals."""
import _init_paths
import numpy as np
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from datasets.factory import get_imdb
from rpn.generate import imdb_proposals
import cPickle
import caffe
import argparse
import pprint
import time, os, sys
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Test a Fast R-CNN network')
parser.add_argument('--gpu', dest='gpu_id', help='GPU id to use',
default=0, type=int)
parser.add_argument('--def', dest='prototxt',
help='prototxt file defining the network',
default=None, type=str)
parser.add_argument('--net', dest='caffemodel',
help='model to test',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file', default=None, type=str)
parser.add_argument('--wait', dest='wait',
help='wait until net file exists',
default=True, type=bool)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to test',
default='voc_2007_test', type=str)
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
cfg.GPU_ID = args.gpu_id
# RPN test settings
cfg.TEST.RPN_PRE_NMS_TOP_N = -1
cfg.TEST.RPN_POST_NMS_TOP_N = 2000
print('Using config:')
pprint.pprint(cfg)
while not os.path.exists(args.caffemodel) and args.wait:
print('Waiting for {} to exist...'.format(args.caffemodel))
time.sleep(10)
caffe.set_mode_gpu()
caffe.set_device(args.gpu_id)
net = caffe.Net(args.prototxt, args.caffemodel, caffe.TEST)
net.name = os.path.splitext(os.path.basename(args.caffemodel))[0]
imdb = get_imdb(args.imdb_name)
imdb_boxes = imdb_proposals(net, imdb)
output_dir = get_output_dir(imdb, net)
rpn_file = os.path.join(output_dir, net.name + '_rpn_proposals.pkl')
with open(rpn_file, 'wb') as f:
cPickle.dump(imdb_boxes, f, cPickle.HIGHEST_PROTOCOL)
print 'Wrote RPN proposals to {}'.format(rpn_file)
| 2,994 | 31.554348 | 78 | py |
bottom-up-attention | bottom-up-attention-master/tools/train_rfcn_alt_opt_5stage.py | #!/usr/bin/env python
# --------------------------------------------------------
# R-FCN
# Copyright (c) 2016 Yuwen Xiong, Haozhi Qi
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
"""Train a R-FCN network using alternating optimization.
This tool implements the alternating optimization algorithm described in our
NIPS 2015 paper ("R-FCN: Towards Real-time Object Detection with Region
Proposal Networks." Shaoqing Ren, Kaiming He, Ross Girshick, Jian Sun.)
"""
import _init_paths
from fast_rcnn.train import get_training_roidb, train_net
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from datasets.factory import get_imdb
from rpn.generate import imdb_proposals, imdb_rpn_compute_stats
import argparse
import pprint
import numpy as np
import sys, os
import multiprocessing as mp
import cPickle
import shutil
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train a R-FCN network')
parser.add_argument('--gpu', dest='gpu_id',
help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--net_name', dest='net_name',
help='network name (e.g., "ResNet-101")',
default=None, type=str)
parser.add_argument('--weights', dest='pretrained_model',
help='initialize with pretrained model weights',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default=None, type=str)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to train on',
default='voc_2007_trainval', type=str)
parser.add_argument('--imdb_test', dest='imdb_test_name',
help='dataset to test',
default='voc_2007_test', type=str)
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
parser.add_argument('--model', dest='model_name',
help='folder name of model',
default=None, type=str)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
def get_roidb(imdb_name, rpn_file=None):
imdb = get_imdb(imdb_name)
print 'Loaded dataset `{:s}` for training'.format(imdb.name)
imdb.set_proposal_method(cfg.TRAIN.PROPOSAL_METHOD)
print 'Set proposal method: {:s}'.format(cfg.TRAIN.PROPOSAL_METHOD)
if rpn_file is not None:
imdb.config['rpn_file'] = rpn_file
roidb = get_training_roidb(imdb)
return roidb, imdb
def get_solvers(imdb_name, net_name, model_name):
# R-FCN Alternating Optimization
# Solver for each training stage
if imdb_name.startswith('coco'):
solvers = [[net_name, model_name, 'stage1_rpn_solver360k480k.pt'],
[net_name, model_name, 'stage1_rfcn_ohem_solver360k480k.pt'],
[net_name, model_name, 'stage2_rpn_solver360k480k.pt'],
[net_name, model_name, 'stage2_rfcn_ohem_solver360k480k.pt'],
[net_name, model_name, 'stage3_rpn_solver360k480k.pt']]
solvers = [os.path.join('.', 'models', 'coco', *s) for s in solvers]
# Iterations for each training stage
max_iters = [480000, 480000, 480000, 480000, 480000]
# Test prototxt for the RPN
rpn_test_prototxt = os.path.join(
'.', 'models', 'coco', net_name, model_name, 'rpn_test.pt')
else:
solvers = [[net_name, model_name, 'stage1_rpn_solver60k80k.pt'],
[net_name, model_name, 'stage1_rfcn_ohem_solver80k120k.pt'],
[net_name, model_name, 'stage2_rpn_solver60k80k.pt'],
[net_name, model_name, 'stage2_rfcn_ohem_solver80k120k.pt'],
[net_name, model_name, 'stage3_rpn_solver60k80k.pt']]
solvers = [os.path.join(cfg.MODELS_DIR, *s) for s in solvers]
# Iterations for each training stage
max_iters = [80000, 120000, 80000, 120000, 80000]
# Test prototxt for the RPN
rpn_test_prototxt = os.path.join(
cfg.MODELS_DIR, net_name, model_name, 'rpn_test.pt')
return solvers, max_iters, rpn_test_prototxt
def _init_caffe(cfg):
"""Initialize pycaffe in a training process.
"""
import caffe
# fix the random seeds (numpy and caffe) for reproducibility
np.random.seed(cfg.RNG_SEED)
caffe.set_random_seed(cfg.RNG_SEED)
# set up caffe
caffe.set_mode_gpu()
caffe.set_device(cfg.GPU_ID)
def train_rpn(queue=None, imdb_name=None, init_model=None, solver=None,
max_iters=None, cfg=None, output_cache=None):
"""Train a Region Proposal Network in a separate training process.
"""
# Not using any proposals, just ground-truth boxes
cfg.TRAIN.HAS_RPN = True
cfg.TRAIN.BBOX_REG = False # applies only to R-FCN bbox regression
cfg.TRAIN.PROPOSAL_METHOD = 'gt'
cfg.TRAIN.IMS_PER_BATCH = 1
print 'Init model: {}'.format(init_model)
print('Using config:')
pprint.pprint(cfg)
import caffe
_init_caffe(cfg)
roidb, imdb = get_roidb(imdb_name)
print 'roidb len: {}'.format(len(roidb))
output_dir = get_output_dir(imdb)
print 'Output will be saved to `{:s}`'.format(output_dir)
final_caffemodel = os.path.join(output_dir, output_cache)
if os.path.exists(final_caffemodel):
queue.put({'model_path': final_caffemodel})
else:
model_paths = train_net(solver, roidb, output_dir,
pretrained_model=init_model,
max_iters=max_iters)
# Cleanup all but the final model
for i in model_paths[:-1]:
os.remove(i)
rpn_model_path = model_paths[-1]
# Send final model path through the multiprocessing queue
shutil.copyfile(rpn_model_path, final_caffemodel)
queue.put({'model_path': final_caffemodel})
def rpn_generate(queue=None, imdb_name=None, rpn_model_path=None, cfg=None,
rpn_test_prototxt=None):
"""Use a trained RPN to generate proposals.
"""
cfg.TEST.RPN_PRE_NMS_TOP_N = 6000 # no pre NMS filtering
cfg.TEST.RPN_POST_NMS_TOP_N = 300 # limit top boxes after NMS
print 'RPN model: {}'.format(rpn_model_path)
print('Using config:')
pprint.pprint(cfg)
import caffe
_init_caffe(cfg)
# NOTE: the matlab implementation computes proposals on flipped images, too.
# We compute them on the image once and then flip the already computed
# proposals. This might cause a minor loss in mAP (less proposal jittering).
imdb = get_imdb(imdb_name)
print 'Loaded dataset `{:s}` for proposal generation'.format(imdb.name)
# Load RPN and configure output directory
rpn_net = caffe.Net(rpn_test_prototxt, rpn_model_path, caffe.TEST)
output_dir = get_output_dir(imdb)
print 'Output will be saved to `{:s}`'.format(output_dir)
rpn_net_name = os.path.splitext(os.path.basename(rpn_model_path))[0]
rpn_proposals_path = os.path.join(
output_dir, rpn_net_name + '_proposals.pkl')
# Generate proposals on the imdb
# Write proposals to disk and send the proposal file path through the
# multiprocessing queue
if not os.path.exists(rpn_proposals_path):
rpn_proposals = imdb_proposals(rpn_net, imdb)
with open(rpn_proposals_path, 'wb') as f:
cPickle.dump(rpn_proposals, f, cPickle.HIGHEST_PROTOCOL)
queue.put({'proposal_path': rpn_proposals_path})
print 'Wrote RPN proposals to {}'.format(rpn_proposals_path)
def train_rfcn(queue=None, imdb_name=None, init_model=None, solver=None,
max_iters=None, cfg=None, rpn_file=None, output_cache=None):
"""Train a R-FCN using proposals generated by an RPN.
"""
cfg.TRAIN.HAS_RPN = False # not generating prosals on-the-fly
cfg.TRAIN.PROPOSAL_METHOD = 'rpn' # use pre-computed RPN proposals instead
cfg.TRAIN.IMS_PER_BATCH = 1
print 'Init model: {}'.format(init_model)
print 'RPN proposals: {}'.format(rpn_file)
print('Using config:')
pprint.pprint(cfg)
import caffe
_init_caffe(cfg)
roidb, imdb = get_roidb(imdb_name, rpn_file=rpn_file)
output_dir = get_output_dir(imdb)
print 'Output will be saved to `{:s}`'.format(output_dir)
# Train R-FCN
# Send R-FCN model path over the multiprocessing queue
final_caffemodel = os.path.join(output_dir, output_cache)
if os.path.exists(final_caffemodel):
queue.put({'model_path': final_caffemodel})
else:
model_paths = train_net(solver, roidb, output_dir,
pretrained_model=init_model,
max_iters=max_iters)
# Cleanup all but the final model
for i in model_paths[:-1]:
os.remove(i)
rfcn_model_path = model_paths[-1]
# Send final model path through the multiprocessing queue
shutil.copyfile(rfcn_model_path, final_caffemodel)
queue.put({'model_path': final_caffemodel})
def rpn_compute_stats(queue=None, imdb_name=None, cfg=None, rpn_test_prototxt=None):
"""Compute mean stds for anchors
"""
cfg.TRAIN.HAS_RPN = True
cfg.TRAIN.BBOX_REG = False # applies only to R-FCN bbox regression
cfg.TRAIN.PROPOSAL_METHOD = 'gt'
cfg.TRAIN.IMS_PER_BATCH = 1
import caffe
_init_caffe(cfg)
# NOTE: the matlab implementation computes proposals on flipped images, too.
# We compute them on the image once and then flip the already computed
# proposals. This might cause a minor loss in mAP (less proposal jittering).
roidb, imdb = get_roidb(imdb_name)
print 'Loaded dataset `{:s}` for proposal generation'.format(imdb.name)
mean_file = os.path.join(imdb.cache_path, imdb.name + '_means.npy')
std_file = os.path.join(imdb.cache_path, imdb.name + '_stds.npy')
if os.path.exists(mean_file) and os.path.exists(std_file):
means = np.load(mean_file)
stds = np.load(std_file)
else:
# Load RPN and configure output directory
rpn_net = caffe.Net(rpn_test_prototxt, caffe.TEST)
# Generate proposals on the imdb
print 'start computing means/stds, it may take several minutes...'
if imdb_name.startswith('coco'):
means, stds = imdb_rpn_compute_stats(rpn_net, imdb, anchor_scales=(4, 8, 16, 32))
else:
means, stds = imdb_rpn_compute_stats(rpn_net, imdb, anchor_scales=(8, 16, 32))
np.save(mean_file, means)
np.save(std_file, stds)
queue.put({'means': means, 'stds': stds})
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
cfg.GPU_ID = args.gpu_id
# --------------------------------------------------------------------------
# Pycaffe doesn't reliably free GPU memory when instantiated nets are
# discarded (e.g. "del net" in Python code). To work around this issue, each
# training stage is executed in a separate process using
# multiprocessing.Process.
# --------------------------------------------------------------------------
# queue for communicated results between processes
mp_queue = mp.Queue()
# solves, iters, etc. for each training stage
solvers, max_iters, rpn_test_prototxt = get_solvers(args.imdb_name, args.net_name, args.model_name)
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 0 RPN, compute normalization means and stds'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
cfg=cfg,
rpn_test_prototxt=rpn_test_prototxt)
p = mp.Process(target=rpn_compute_stats, kwargs=mp_kwargs)
p.start()
stage0_anchor_stats = mp_queue.get()
p.join()
cfg.TRAIN.RPN_NORMALIZE_MEANS = stage0_anchor_stats['means']
cfg.TRAIN.RPN_NORMALIZE_STDS = stage0_anchor_stats['stds']
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 1 RPN, init from ImageNet model'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
cfg.TRAIN.SNAPSHOT_INFIX = 'stage1'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
init_model=args.pretrained_model,
solver=solvers[0],
max_iters=max_iters[0],
cfg=cfg,
output_cache='stage1_rpn_final.caffemodel')
p = mp.Process(target=train_rpn, kwargs=mp_kwargs)
p.start()
rpn_stage1_out = mp_queue.get()
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 1 RPN, generate proposals'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
rpn_model_path=str(rpn_stage1_out['model_path']),
cfg=cfg,
rpn_test_prototxt=rpn_test_prototxt)
p = mp.Process(target=rpn_generate, kwargs=mp_kwargs)
p.start()
rpn_stage1_out['proposal_path'] = mp_queue.get()['proposal_path']
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 1 RPN, generate proposals'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_test_name,
rpn_model_path=str(rpn_stage1_out['model_path']),
cfg=cfg,
rpn_test_prototxt=rpn_test_prototxt)
p = mp.Process(target=rpn_generate, kwargs=mp_kwargs)
p.start()
rpn_stage1_out['test_proposal_path'] = mp_queue.get()['proposal_path']
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 1 R-FCN using RPN proposals, init from ImageNet model'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
cfg.TRAIN.SNAPSHOT_INFIX = 'stage1'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
init_model=args.pretrained_model,
solver=solvers[1],
max_iters=max_iters[1],
cfg=cfg,
rpn_file=rpn_stage1_out['proposal_path'],
output_cache='stage1_rfcn_final.caffemodel')
p = mp.Process(target=train_rfcn, kwargs=mp_kwargs)
p.start()
rfcn_stage1_out = mp_queue.get()
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 2 RPN, init from stage1 R-FCN model'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
cfg.TRAIN.SNAPSHOT_INFIX = 'stage2'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
init_model=str(rfcn_stage1_out['model_path']),
solver=solvers[2],
max_iters=max_iters[2],
cfg=cfg,
output_cache='stage2_rpn_final.caffemodel')
p = mp.Process(target=train_rpn, kwargs=mp_kwargs)
p.start()
rpn_stage2_out = mp_queue.get()
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 2 RPN, generate proposals'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
rpn_model_path=str(rpn_stage2_out['model_path']),
cfg=cfg,
rpn_test_prototxt=rpn_test_prototxt)
p = mp.Process(target=rpn_generate, kwargs=mp_kwargs)
p.start()
rpn_stage2_out['proposal_path'] = mp_queue.get()['proposal_path']
p.join()
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_test_name,
rpn_model_path=str(rpn_stage2_out['model_path']),
cfg=cfg,
rpn_test_prototxt=rpn_test_prototxt)
p = mp.Process(target=rpn_generate, kwargs=mp_kwargs)
p.start()
rpn_stage2_out['test_proposal_path'] = mp_queue.get()['proposal_path']
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 2 R-FCN using Stage-2 RPN proposals, init from ImageNet model'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
cfg.TRAIN.SNAPSHOT_INFIX = 'stage2'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
init_model=args.pretrained_model,
solver=solvers[3],
max_iters=max_iters[3],
cfg=cfg,
rpn_file=rpn_stage2_out['proposal_path'],
output_cache='stage2_rfcn_final.caffemodel')
p = mp.Process(target=train_rfcn, kwargs=mp_kwargs)
p.start()
rfcn_stage2_out = mp_queue.get()
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 3 RPN, init from stage1 R-FCN model'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
cfg.TRAIN.SNAPSHOT_INFIX = 'stage3'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
init_model=str(rfcn_stage2_out['model_path']),
solver=solvers[4],
max_iters=max_iters[4],
cfg=cfg,
output_cache='stage3_rpn_final.caffemodel')
p = mp.Process(target=train_rpn, kwargs=mp_kwargs)
p.start()
rpn_stage3_out = mp_queue.get()
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 3 RPN, generate test proposals only'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_test_name,
rpn_model_path=str(rpn_stage3_out['model_path']),
cfg=cfg,
rpn_test_prototxt=rpn_test_prototxt)
p = mp.Process(target=rpn_generate, kwargs=mp_kwargs)
p.start()
rpn_stage3_out['test_proposal_path'] = mp_queue.get()['proposal_path']
p.join()
print 'Final model: {}'.format(str(rfcn_stage2_out['model_path']))
print 'Final RPN: {}'.format(str(rpn_stage3_out['test_proposal_path']))
| 18,472 | 37.646444 | 103 | py |
bottom-up-attention | bottom-up-attention-master/tools/demo_vg.py | #!/usr/bin/env python
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""
Demo script showing detections in sample images.
See README.md for installation instructions before running.
"""
import matplotlib
matplotlib.use('Agg')
import _init_paths
from fast_rcnn.config import cfg
from fast_rcnn.test import im_detect
from fast_rcnn.nms_wrapper import nms
from utils.timer import Timer
import matplotlib.pyplot as plt
import numpy as np
import scipy.io as sio
import caffe, os, sys, cv2
import argparse
CLASSES = ['__background__']
with open(os.path.join(cfg.DATA_DIR, 'vg/objects_vocab.txt')) as f:
for object in f.readlines():
CLASSES.append(object.lower().strip())
ATTRS = []
with open(os.path.join(cfg.DATA_DIR, 'vg/attributes_vocab.txt')) as f:
for attr in f.readlines():
ATTRS.append(attr.lower().strip())
RELATIONS = []
with open(os.path.join(cfg.DATA_DIR, 'vg/relations_vocab.txt')) as f:
for rel in f.readlines():
RELATIONS.append(rel.lower().strip())
NETS = ['VGG']
MODELS = [
'faster_rcnn_end2end',
'faster_rcnn_end2end_attr',
'faster_rcnn_end2end_attr_rel',
'faster_rcnn_end2end_attr_rel_softmax_primed',
'faster_rcnn_end2end_attr_softmax_primed'
]
def vis_detections(ax, class_name, dets, attributes, rel_argmax, rel_score, thresh=0.5):
"""Draw detected bounding boxes."""
inds = np.where(dets[:, -1] >= thresh)[0]
if len(inds) == 0:
return
for i in inds:
bbox = dets[i, :4]
score = dets[i, 4]
ax.add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='red', linewidth=3.5)
)
if attributes is not None:
att = np.argmax(attributes[i])
ax.text(bbox[0], bbox[1] - 2,
'{:s} {:.3f} ({:s})'.format(class_name, score, ATTRS[att]),
bbox=dict(facecolor='blue', alpha=0.5),
fontsize=14, color='white')
else:
ax.text(bbox[0], bbox[1] - 2,
'{:s} {:.3f}'.format(class_name, score),
bbox=dict(facecolor='blue', alpha=0.5),
fontsize=14, color='white')
#print class_name
#print 'Outgoing relation: %s' % RELATIONS[np.argmax(rel_score[i])]
ax.set_title(('detections with '
'p(object | box) >= {:.1f}').format(thresh),
fontsize=14)
plt.axis('off')
plt.tight_layout()
plt.draw()
def demo_tuples(net, image_name):
"""Detect objects, attributes and relations in an image using pre-computed object proposals."""
# Load the demo image
im_file = os.path.join(cfg.DATA_DIR, image_name)
im = cv2.imread(im_file)
# Detect all object classes and regress object bounds
timer = Timer()
timer.tic()
scores, boxes, attr_scores, rel_scores = im_detect(net, im)
if attr_scores is not None:
print 'Found attribute scores'
if rel_scores is not None:
print 'Found relation scores'
rel_scores = rel_scores[:,1:] # drop no relation
rel_argmax = np.argmax(rel_scores, axis=1).reshape((boxes.shape[0],boxes.shape[0]))
rel_score = np.max(rel_scores, axis=1).reshape((boxes.shape[0],boxes.shape[0]))
timer.toc()
print ('Detection took {:.3f}s for '
'{:d} object proposals').format(timer.total_time, boxes.shape[0])
# Visualize detections for each class
CONF_THRESH = 0.1
NMS_THRESH = 0.05
ATTR_THRESH = 0.1
im = im[:, :, (2, 1, 0)]
fig, ax = plt.subplots(figsize=(12, 12))
ax.imshow(im)
# Detections
det_indices = []
det_scores = []
det_objects = []
det_bboxes = []
det_attrs = []
for cls_ind, cls in enumerate(CLASSES[1:]):
cls_ind += 1 # because we skipped background
cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
cls_scores = scores[:, cls_ind]
dets = np.hstack((cls_boxes,
cls_scores[:, np.newaxis])).astype(np.float32)
keep = np.array(nms(dets, NMS_THRESH))
dets = dets[keep, :]
inds = np.where(dets[:, -1] >= CONF_THRESH)[0]
if len(inds) > 0:
keep = keep[inds]
for k in keep:
det_indices.append(k)
det_bboxes.append(cls_boxes[k])
det_scores.append(cls_scores[k])
det_objects.append(cls)
if attr_scores is not None:
attr_inds = np.where(attr_scores[k][1:] >= ATTR_THRESH)[0]
det_attrs.append([ATTRS[ix] for ix in attr_inds])
else:
det_attrs.append([])
rel_score = rel_score[det_indices].T[det_indices].T
rel_argmax = rel_argmax[det_indices].T[det_indices].T
for i,(idx,score,obj,bbox,attr) in enumerate(zip(det_indices,det_scores,det_objects,det_bboxes,det_attrs)):
ax.add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='red', linewidth=3.5)
)
box_text = '{:s} {:.3f}'.format(obj, score)
if len(attr) > 0:
box_text += "(" + ",".join(attr) + ")"
ax.text(bbox[0], bbox[1] - 2,
box_text,
bbox=dict(facecolor='blue', alpha=0.5),
fontsize=14, color='white')
# Outgoing
score = np.max(rel_score[i])
ix = np.argmax(rel_score[i])
subject = det_objects[ix]
relation = RELATIONS[rel_argmax[i][ix]]
print 'Relation: %.2f %s -> %s -> %s' % (score, obj, relation, subject)
# Incoming
score = np.max(rel_score.T[i])
ix = np.argmax(rel_score.T[i])
subject = det_objects[ix]
relation = RELATIONS[rel_argmax[ix][i]]
print 'Relation: %.2f %s -> %s -> %s' % (score, subject, relation, obj)
ax.set_title(('detections with '
'p(object|box) >= {:.1f}').format(CONF_THRESH),
fontsize=14)
plt.axis('off')
plt.tight_layout()
plt.draw()
plt.savefig('data/demo/'+im_file.split('/')[-1].replace(".jpg", "_demo.jpg"))
def demo(net, image_name):
"""Detect object classes in an image using pre-computed object proposals."""
# Load the demo image
im_file = os.path.join(cfg.DATA_DIR, image_name)
im = cv2.imread(im_file)
# Detect all object classes and regress object bounds
timer = Timer()
timer.tic()
scores, boxes, attr_scores, rel_scores = im_detect(net, im)
#print 'relations'
#print rel_scores.shape
#rel_argmax = np.argsort(rel_scores, axis=1).reshape((boxes.shape[0],boxes.shape[0]))
#rel_score = np.max(rel_scores, axis=1).reshape((boxes.shape[0],boxes.shape[0]))
#print rel_argmax.shape
#print rel_score.shape
#print np.min(rel_score)
#print np.max(rel_score)
#np.savetxt('rel_score.csv', rel_score, delimiter=',')
#np.savetxt('rel_argmax.csv', rel_argmax, delimiter=',')
#print fail
timer.toc()
print ('Detection took {:.3f}s for '
'{:d} object proposals').format(timer.total_time, boxes.shape[0])
# Visualize detections for each class
CONF_THRESH = 0.4
NMS_THRESH = 0.3
im = im[:, :, (2, 1, 0)]
fig, ax = plt.subplots(figsize=(12, 12))
ax.imshow(im, aspect='equal')
for cls_ind, cls in enumerate(CLASSES[1:]):
cls_ind += 1 # because we skipped background
cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
cls_scores = scores[:, cls_ind]
dets = np.hstack((cls_boxes,
cls_scores[:, np.newaxis])).astype(np.float32)
keep = nms(dets, NMS_THRESH)
dets = dets[keep, :]
if attr_scores is not None:
attributes = attr_scores[keep]
else:
attributes = None
if rel_scores is not None:
rel_argmax_c = rel_argmax[keep]
rel_score_c = rel_score[keep]
else:
rel_argmax_c = None
rel_score_c = None
vis_detections(ax, cls, dets, attributes, rel_argmax_c, rel_score_c, thresh=CONF_THRESH)
plt.savefig('data/demo/'+im_file.split('/')[-1].replace(".jpg", "_demo.jpg"))
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='Faster R-CNN demo')
parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--cpu', dest='cpu_mode',
help='Use CPU mode (overrides --gpu)',
action='store_true')
parser.add_argument('--net', dest='net', help='Network to use, e.g. VGG16',
choices=NETS, default='VGG16')
parser.add_argument('--model', dest='model', help='Model to use, e.g. faster_rcnn_end2end',
choices=MODELS, default='faster_rcnn_end2end_attr_rel_softmax_primed')
args = parser.parse_args()
return args
if __name__ == '__main__':
cfg.TEST.HAS_RPN = True # Use RPN for proposals
args = parse_args()
prototxt = os.path.join(cfg.ROOT_DIR, 'models/vg', args.net, args.model, 'test.prototxt')
caffemodel = os.path.join(cfg.ROOT_DIR, 'output/faster_rcnn_end2end/vg_train/vgg16_faster_rcnn_attr_rel_softmax_primed_heatmap_iter_250000.caffemodel')
if not os.path.isfile(caffemodel):
raise IOError(('{:s} not found.\nDid you run ./data/script/'
'fetch_faster_rcnn_models.sh?').format(caffemodel))
if args.cpu_mode:
caffe.set_mode_cpu()
else:
caffe.set_mode_gpu()
caffe.set_device(args.gpu_id)
cfg.GPU_ID = args.gpu_id
net = caffe.Net(prototxt, caffemodel, caffe.TEST)
print '\n\nLoaded network {:s}'.format(caffemodel)
# Warmup on a dummy image
im = 128 * np.ones((300, 500, 3), dtype=np.uint8)
for i in xrange(2):
_, _, _, _= im_detect(net, im)
im_names = ['demo/000456.jpg',
'demo/000542.jpg',
'demo/001150.jpg',
'demo/001763.jpg',
'demo/004545.jpg',
'demo/2587.jpg',
'demo/2985.jpg',
'demo/3067.jpg',
'demo/3761.jpg',
'vg/VG_100K_2/2404579.jpg',
'vg/VG_100K/2323401.jpg',
'vg/VG_100K_2/2415196.jpg',
'vg/VG_100K_2/2403358.jpg',
'vg/VG_100K_2/2380967.jpg',
'vg/VG_100K_2/2393625.jpg',
'vg/VG_100K/2321134.jpg',
'vg/VG_100K/2319899.jpg',
'vg/VG_100K/1592589.jpg',
'vg/VG_100K_2/2400441.jpg',
'vg/VG_100K/2374686.jpg',
'vg/VG_100K/2372269.jpg',
'vg/VG_100K_2/2378526.jpg',
'vg/VG_100K_2/2403861.jpg',
]
for im_name in im_names:
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Demo for {}'.format(im_name)
demo_tuples(net, im_name)
plt.show()
| 11,553 | 34.550769 | 155 | py |
bottom-up-attention | bottom-up-attention-master/tools/train_net.py | #!/usr/bin/env python
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Train a Fast R-CNN network on a region of interest database."""
import _init_paths
from fast_rcnn.train import get_training_roidb, train_net
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from datasets.factory import get_imdb
import datasets.imdb
import caffe
import argparse
import pprint
import numpy as np
import sys
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train a Fast R-CNN network')
parser.add_argument('--gpu', dest='gpu_id',
help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--solver', dest='solver',
help='solver prototxt',
default=None, type=str)
parser.add_argument('--iters', dest='max_iters',
help='number of iterations to train',
default=40000, type=int)
parser.add_argument('--weights', dest='pretrained_model',
help='initialize with pretrained model weights',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default=None, type=str)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to train on',
default='voc_2007_trainval', type=str)
parser.add_argument('--rand', dest='randomize',
help='randomize (do not use a fixed seed)',
action='store_true')
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
def combined_roidb(imdb_names):
def get_roidb(imdb_name):
imdb = get_imdb(imdb_name)
print 'Loaded dataset `{:s}` for training'.format(imdb.name)
imdb.set_proposal_method(cfg.TRAIN.PROPOSAL_METHOD)
print 'Set proposal method: {:s}'.format(cfg.TRAIN.PROPOSAL_METHOD)
roidb = get_training_roidb(imdb)
return roidb
roidbs = [get_roidb(s) for s in imdb_names.split('+')]
roidb = roidbs[0]
if len(roidbs) > 1:
for r in roidbs[1:]:
roidb.extend(r)
imdb = datasets.imdb.imdb(imdb_names)
else:
imdb = get_imdb(imdb_names)
return imdb, roidb
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
cfg.GPU_ID = args.gpu_id
print('Using config:')
pprint.pprint(cfg)
if not args.randomize:
# fix the random seeds (numpy and caffe) for reproducibility
np.random.seed(cfg.RNG_SEED)
caffe.set_random_seed(cfg.RNG_SEED)
# set up caffe
caffe.set_mode_gpu()
caffe.set_device(args.gpu_id)
imdb, roidb = combined_roidb(args.imdb_name)
print '{:d} roidb entries'.format(len(roidb))
output_dir = get_output_dir(imdb)
print 'Output will be saved to `{:s}`'.format(output_dir)
train_net(args.solver, roidb, output_dir,
pretrained_model=args.pretrained_model,
max_iters=args.max_iters)
| 3,747 | 32.168142 | 78 | py |
bottom-up-attention | bottom-up-attention-master/caffe/tools/extra/summarize.py | #!/usr/bin/env python
"""Net summarization tool.
This tool summarizes the structure of a net in a concise but comprehensive
tabular listing, taking a prototxt file as input.
Use this tool to check at a glance that the computation you've specified is the
computation you expect.
"""
from caffe.proto import caffe_pb2
from google import protobuf
import re
import argparse
# ANSI codes for coloring blobs (used cyclically)
COLORS = ['92', '93', '94', '95', '97', '96', '42', '43;30', '100',
'444', '103;30', '107;30']
DISCONNECTED_COLOR = '41'
def read_net(filename):
net = caffe_pb2.NetParameter()
with open(filename) as f:
protobuf.text_format.Parse(f.read(), net)
return net
def format_param(param):
out = []
if len(param.name) > 0:
out.append(param.name)
if param.lr_mult != 1:
out.append('x{}'.format(param.lr_mult))
if param.decay_mult != 1:
out.append('Dx{}'.format(param.decay_mult))
return ' '.join(out)
def printed_len(s):
return len(re.sub(r'\033\[[\d;]+m', '', s))
def print_table(table, max_width):
"""Print a simple nicely-aligned table.
table must be a list of (equal-length) lists. Columns are space-separated,
and as narrow as possible, but no wider than max_width. Text may overflow
columns; note that unlike string.format, this will not affect subsequent
columns, if possible."""
max_widths = [max_width] * len(table[0])
column_widths = [max(printed_len(row[j]) + 1 for row in table)
for j in range(len(table[0]))]
column_widths = [min(w, max_w) for w, max_w in zip(column_widths, max_widths)]
for row in table:
row_str = ''
right_col = 0
for cell, width in zip(row, column_widths):
right_col += width
row_str += cell + ' '
row_str += ' ' * max(right_col - printed_len(row_str), 0)
print row_str
def summarize_net(net):
disconnected_tops = set()
for lr in net.layer:
disconnected_tops |= set(lr.top)
disconnected_tops -= set(lr.bottom)
table = []
colors = {}
for lr in net.layer:
tops = []
for ind, top in enumerate(lr.top):
color = colors.setdefault(top, COLORS[len(colors) % len(COLORS)])
if top in disconnected_tops:
top = '\033[1;4m' + top
if len(lr.loss_weight) > 0:
top = '{} * {}'.format(lr.loss_weight[ind], top)
tops.append('\033[{}m{}\033[0m'.format(color, top))
top_str = ', '.join(tops)
bottoms = []
for bottom in lr.bottom:
color = colors.get(bottom, DISCONNECTED_COLOR)
bottoms.append('\033[{}m{}\033[0m'.format(color, bottom))
bottom_str = ', '.join(bottoms)
if lr.type == 'Python':
type_str = lr.python_param.module + '.' + lr.python_param.layer
else:
type_str = lr.type
# Summarize conv/pool parameters.
# TODO support rectangular/ND parameters
conv_param = lr.convolution_param
if (lr.type in ['Convolution', 'Deconvolution']
and len(conv_param.kernel_size) == 1):
arg_str = str(conv_param.kernel_size[0])
if len(conv_param.stride) > 0 and conv_param.stride[0] != 1:
arg_str += '/' + str(conv_param.stride[0])
if len(conv_param.pad) > 0 and conv_param.pad[0] != 0:
arg_str += '+' + str(conv_param.pad[0])
arg_str += ' ' + str(conv_param.num_output)
if conv_param.group != 1:
arg_str += '/' + str(conv_param.group)
elif lr.type == 'Pooling':
arg_str = str(lr.pooling_param.kernel_size)
if lr.pooling_param.stride != 1:
arg_str += '/' + str(lr.pooling_param.stride)
if lr.pooling_param.pad != 0:
arg_str += '+' + str(lr.pooling_param.pad)
else:
arg_str = ''
if len(lr.param) > 0:
param_strs = map(format_param, lr.param)
if max(map(len, param_strs)) > 0:
param_str = '({})'.format(', '.join(param_strs))
else:
param_str = ''
else:
param_str = ''
table.append([lr.name, type_str, param_str, bottom_str, '->', top_str,
arg_str])
return table
def main():
parser = argparse.ArgumentParser(description="Print a concise summary of net computation.")
parser.add_argument('filename', help='net prototxt file to summarize')
parser.add_argument('-w', '--max-width', help='maximum field width',
type=int, default=30)
args = parser.parse_args()
net = read_net(args.filename)
table = summarize_net(net)
print_table(table, max_width=args.max_width)
if __name__ == '__main__':
main()
| 4,880 | 33.617021 | 95 | py |
bottom-up-attention | bottom-up-attention-master/caffe/tools/extra/parse_log.py | #!/usr/bin/env python
"""
Parse training log
Evolved from parse_log.sh
"""
import os
import re
import extract_seconds
import argparse
import csv
from collections import OrderedDict
def parse_log(path_to_log):
"""Parse log file
Returns (train_dict_list, test_dict_list)
train_dict_list and test_dict_list are lists of dicts that define the table
rows
"""
regex_iteration = re.compile('Iteration (\d+)')
regex_train_output = re.compile('Train net output #(\d+): (\S+) = ([\.\deE+-]+)')
regex_test_output = re.compile('Test net output #(\d+): (\S+) = ([\.\deE+-]+)')
regex_learning_rate = re.compile('lr = ([-+]?[0-9]*\.?[0-9]+([eE]?[-+]?[0-9]+)?)')
# Pick out lines of interest
iteration = -1
learning_rate = float('NaN')
train_dict_list = []
test_dict_list = []
train_row = None
test_row = None
logfile_year = extract_seconds.get_log_created_year(path_to_log)
with open(path_to_log) as f:
start_time = extract_seconds.get_start_time(f, logfile_year)
last_time = start_time
for line in f:
iteration_match = regex_iteration.search(line)
if iteration_match:
iteration = float(iteration_match.group(1))
if iteration == -1:
# Only start parsing for other stuff if we've found the first
# iteration
continue
try:
time = extract_seconds.extract_datetime_from_line(line,
logfile_year)
except ValueError:
# Skip lines with bad formatting, for example when resuming solver
continue
# if it's another year
if time.month < last_time.month:
logfile_year += 1
time = extract_seconds.extract_datetime_from_line(line, logfile_year)
last_time = time
seconds = (time - start_time).total_seconds()
learning_rate_match = regex_learning_rate.search(line)
if learning_rate_match:
learning_rate = float(learning_rate_match.group(1))
train_dict_list, train_row = parse_line_for_net_output(
regex_train_output, train_row, train_dict_list,
line, iteration, seconds, learning_rate
)
test_dict_list, test_row = parse_line_for_net_output(
regex_test_output, test_row, test_dict_list,
line, iteration, seconds, learning_rate
)
fix_initial_nan_learning_rate(train_dict_list)
fix_initial_nan_learning_rate(test_dict_list)
return train_dict_list, test_dict_list
def parse_line_for_net_output(regex_obj, row, row_dict_list,
line, iteration, seconds, learning_rate):
"""Parse a single line for training or test output
Returns a a tuple with (row_dict_list, row)
row: may be either a new row or an augmented version of the current row
row_dict_list: may be either the current row_dict_list or an augmented
version of the current row_dict_list
"""
output_match = regex_obj.search(line)
if output_match:
if not row or row['NumIters'] != iteration:
# Push the last row and start a new one
if row:
# If we're on a new iteration, push the last row
# This will probably only happen for the first row; otherwise
# the full row checking logic below will push and clear full
# rows
row_dict_list.append(row)
row = OrderedDict([
('NumIters', iteration),
('Seconds', seconds),
('LearningRate', learning_rate)
])
# output_num is not used; may be used in the future
# output_num = output_match.group(1)
output_name = output_match.group(2)
output_val = output_match.group(3)
row[output_name] = float(output_val)
if row and len(row_dict_list) >= 1 and len(row) == len(row_dict_list[0]):
# The row is full, based on the fact that it has the same number of
# columns as the first row; append it to the list
row_dict_list.append(row)
row = None
return row_dict_list, row
def fix_initial_nan_learning_rate(dict_list):
"""Correct initial value of learning rate
Learning rate is normally not printed until after the initial test and
training step, which means the initial testing and training rows have
LearningRate = NaN. Fix this by copying over the LearningRate from the
second row, if it exists.
"""
if len(dict_list) > 1:
dict_list[0]['LearningRate'] = dict_list[1]['LearningRate']
def save_csv_files(logfile_path, output_dir, train_dict_list, test_dict_list,
delimiter=',', verbose=False):
"""Save CSV files to output_dir
If the input log file is, e.g., caffe.INFO, the names will be
caffe.INFO.train and caffe.INFO.test
"""
log_basename = os.path.basename(logfile_path)
train_filename = os.path.join(output_dir, log_basename + '.train')
write_csv(train_filename, train_dict_list, delimiter, verbose)
test_filename = os.path.join(output_dir, log_basename + '.test')
write_csv(test_filename, test_dict_list, delimiter, verbose)
def write_csv(output_filename, dict_list, delimiter, verbose=False):
"""Write a CSV file
"""
if not dict_list:
if verbose:
print('Not writing %s; no lines to write' % output_filename)
return
dialect = csv.excel
dialect.delimiter = delimiter
with open(output_filename, 'w') as f:
dict_writer = csv.DictWriter(f, fieldnames=dict_list[0].keys(),
dialect=dialect)
dict_writer.writeheader()
dict_writer.writerows(dict_list)
if verbose:
print 'Wrote %s' % output_filename
def parse_args():
description = ('Parse a Caffe training log into two CSV files '
'containing training and testing information')
parser = argparse.ArgumentParser(description=description)
parser.add_argument('logfile_path',
help='Path to log file')
parser.add_argument('output_dir',
help='Directory in which to place output CSV files')
parser.add_argument('--verbose',
action='store_true',
help='Print some extra info (e.g., output filenames)')
parser.add_argument('--delimiter',
default=',',
help=('Column delimiter in output files '
'(default: \'%(default)s\')'))
args = parser.parse_args()
return args
def main():
args = parse_args()
train_dict_list, test_dict_list = parse_log(args.logfile_path)
save_csv_files(args.logfile_path, args.output_dir, train_dict_list,
test_dict_list, delimiter=args.delimiter)
if __name__ == '__main__':
main()
| 7,114 | 32.720379 | 86 | py |
bottom-up-attention | bottom-up-attention-master/caffe/examples/web_demo/app.py | import os
import time
import cPickle
import datetime
import logging
import flask
import werkzeug
import optparse
import tornado.wsgi
import tornado.httpserver
import numpy as np
import pandas as pd
from PIL import Image
import cStringIO as StringIO
import urllib
import exifutil
import caffe
REPO_DIRNAME = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + '/../..')
UPLOAD_FOLDER = '/tmp/caffe_demos_uploads'
ALLOWED_IMAGE_EXTENSIONS = set(['png', 'bmp', 'jpg', 'jpe', 'jpeg', 'gif'])
# Obtain the flask app object
app = flask.Flask(__name__)
@app.route('/')
def index():
return flask.render_template('index.html', has_result=False)
@app.route('/classify_url', methods=['GET'])
def classify_url():
imageurl = flask.request.args.get('imageurl', '')
try:
string_buffer = StringIO.StringIO(
urllib.urlopen(imageurl).read())
image = caffe.io.load_image(string_buffer)
except Exception as err:
# For any exception we encounter in reading the image, we will just
# not continue.
logging.info('URL Image open error: %s', err)
return flask.render_template(
'index.html', has_result=True,
result=(False, 'Cannot open image from URL.')
)
logging.info('Image: %s', imageurl)
result = app.clf.classify_image(image)
return flask.render_template(
'index.html', has_result=True, result=result, imagesrc=imageurl)
@app.route('/classify_upload', methods=['POST'])
def classify_upload():
try:
# We will save the file to disk for possible data collection.
imagefile = flask.request.files['imagefile']
filename_ = str(datetime.datetime.now()).replace(' ', '_') + \
werkzeug.secure_filename(imagefile.filename)
filename = os.path.join(UPLOAD_FOLDER, filename_)
imagefile.save(filename)
logging.info('Saving to %s.', filename)
image = exifutil.open_oriented_im(filename)
except Exception as err:
logging.info('Uploaded image open error: %s', err)
return flask.render_template(
'index.html', has_result=True,
result=(False, 'Cannot open uploaded image.')
)
result = app.clf.classify_image(image)
return flask.render_template(
'index.html', has_result=True, result=result,
imagesrc=embed_image_html(image)
)
def embed_image_html(image):
"""Creates an image embedded in HTML base64 format."""
image_pil = Image.fromarray((255 * image).astype('uint8'))
image_pil = image_pil.resize((256, 256))
string_buf = StringIO.StringIO()
image_pil.save(string_buf, format='png')
data = string_buf.getvalue().encode('base64').replace('\n', '')
return 'data:image/png;base64,' + data
def allowed_file(filename):
return (
'.' in filename and
filename.rsplit('.', 1)[1] in ALLOWED_IMAGE_EXTENSIONS
)
class ImagenetClassifier(object):
default_args = {
'model_def_file': (
'{}/models/bvlc_reference_caffenet/deploy.prototxt'.format(REPO_DIRNAME)),
'pretrained_model_file': (
'{}/models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel'.format(REPO_DIRNAME)),
'mean_file': (
'{}/python/caffe/imagenet/ilsvrc_2012_mean.npy'.format(REPO_DIRNAME)),
'class_labels_file': (
'{}/data/ilsvrc12/synset_words.txt'.format(REPO_DIRNAME)),
'bet_file': (
'{}/data/ilsvrc12/imagenet.bet.pickle'.format(REPO_DIRNAME)),
}
for key, val in default_args.iteritems():
if not os.path.exists(val):
raise Exception(
"File for {} is missing. Should be at: {}".format(key, val))
default_args['image_dim'] = 256
default_args['raw_scale'] = 255.
def __init__(self, model_def_file, pretrained_model_file, mean_file,
raw_scale, class_labels_file, bet_file, image_dim, gpu_mode):
logging.info('Loading net and associated files...')
if gpu_mode:
caffe.set_mode_gpu()
else:
caffe.set_mode_cpu()
self.net = caffe.Classifier(
model_def_file, pretrained_model_file,
image_dims=(image_dim, image_dim), raw_scale=raw_scale,
mean=np.load(mean_file).mean(1).mean(1), channel_swap=(2, 1, 0)
)
with open(class_labels_file) as f:
labels_df = pd.DataFrame([
{
'synset_id': l.strip().split(' ')[0],
'name': ' '.join(l.strip().split(' ')[1:]).split(',')[0]
}
for l in f.readlines()
])
self.labels = labels_df.sort('synset_id')['name'].values
self.bet = cPickle.load(open(bet_file))
# A bias to prefer children nodes in single-chain paths
# I am setting the value to 0.1 as a quick, simple model.
# We could use better psychological models here...
self.bet['infogain'] -= np.array(self.bet['preferences']) * 0.1
def classify_image(self, image):
try:
starttime = time.time()
scores = self.net.predict([image], oversample=True).flatten()
endtime = time.time()
indices = (-scores).argsort()[:5]
predictions = self.labels[indices]
# In addition to the prediction text, we will also produce
# the length for the progress bar visualization.
meta = [
(p, '%.5f' % scores[i])
for i, p in zip(indices, predictions)
]
logging.info('result: %s', str(meta))
# Compute expected information gain
expected_infogain = np.dot(
self.bet['probmat'], scores[self.bet['idmapping']])
expected_infogain *= self.bet['infogain']
# sort the scores
infogain_sort = expected_infogain.argsort()[::-1]
bet_result = [(self.bet['words'][v], '%.5f' % expected_infogain[v])
for v in infogain_sort[:5]]
logging.info('bet result: %s', str(bet_result))
return (True, meta, bet_result, '%.3f' % (endtime - starttime))
except Exception as err:
logging.info('Classification error: %s', err)
return (False, 'Something went wrong when classifying the '
'image. Maybe try another one?')
def start_tornado(app, port=5000):
http_server = tornado.httpserver.HTTPServer(
tornado.wsgi.WSGIContainer(app))
http_server.listen(port)
print("Tornado server starting on port {}".format(port))
tornado.ioloop.IOLoop.instance().start()
def start_from_terminal(app):
"""
Parse command line options and start the server.
"""
parser = optparse.OptionParser()
parser.add_option(
'-d', '--debug',
help="enable debug mode",
action="store_true", default=False)
parser.add_option(
'-p', '--port',
help="which port to serve content on",
type='int', default=5000)
parser.add_option(
'-g', '--gpu',
help="use gpu mode",
action='store_true', default=False)
opts, args = parser.parse_args()
ImagenetClassifier.default_args.update({'gpu_mode': opts.gpu})
# Initialize classifier + warm start by forward for allocation
app.clf = ImagenetClassifier(**ImagenetClassifier.default_args)
app.clf.net.forward()
if opts.debug:
app.run(debug=True, host='0.0.0.0', port=opts.port)
else:
start_tornado(app, opts.port)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
if not os.path.exists(UPLOAD_FOLDER):
os.makedirs(UPLOAD_FOLDER)
start_from_terminal(app)
| 7,793 | 33.184211 | 105 | py |
bottom-up-attention | bottom-up-attention-master/caffe/examples/pycaffe/caffenet.py | from __future__ import print_function
from caffe import layers as L, params as P, to_proto
from caffe.proto import caffe_pb2
# helper function for common structures
def conv_relu(bottom, ks, nout, stride=1, pad=0, group=1):
conv = L.Convolution(bottom, kernel_size=ks, stride=stride,
num_output=nout, pad=pad, group=group)
return conv, L.ReLU(conv, in_place=True)
def fc_relu(bottom, nout):
fc = L.InnerProduct(bottom, num_output=nout)
return fc, L.ReLU(fc, in_place=True)
def max_pool(bottom, ks, stride=1):
return L.Pooling(bottom, pool=P.Pooling.MAX, kernel_size=ks, stride=stride)
def caffenet(lmdb, batch_size=256, include_acc=False):
data, label = L.Data(source=lmdb, backend=P.Data.LMDB, batch_size=batch_size, ntop=2,
transform_param=dict(crop_size=227, mean_value=[104, 117, 123], mirror=True))
# the net itself
conv1, relu1 = conv_relu(data, 11, 96, stride=4)
pool1 = max_pool(relu1, 3, stride=2)
norm1 = L.LRN(pool1, local_size=5, alpha=1e-4, beta=0.75)
conv2, relu2 = conv_relu(norm1, 5, 256, pad=2, group=2)
pool2 = max_pool(relu2, 3, stride=2)
norm2 = L.LRN(pool2, local_size=5, alpha=1e-4, beta=0.75)
conv3, relu3 = conv_relu(norm2, 3, 384, pad=1)
conv4, relu4 = conv_relu(relu3, 3, 384, pad=1, group=2)
conv5, relu5 = conv_relu(relu4, 3, 256, pad=1, group=2)
pool5 = max_pool(relu5, 3, stride=2)
fc6, relu6 = fc_relu(pool5, 4096)
drop6 = L.Dropout(relu6, in_place=True)
fc7, relu7 = fc_relu(drop6, 4096)
drop7 = L.Dropout(relu7, in_place=True)
fc8 = L.InnerProduct(drop7, num_output=1000)
loss = L.SoftmaxWithLoss(fc8, label)
if include_acc:
acc = L.Accuracy(fc8, label)
return to_proto(loss, acc)
else:
return to_proto(loss)
def make_net():
with open('train.prototxt', 'w') as f:
print(caffenet('/path/to/caffe-train-lmdb'), file=f)
with open('test.prototxt', 'w') as f:
print(caffenet('/path/to/caffe-val-lmdb', batch_size=50, include_acc=True), file=f)
if __name__ == '__main__':
make_net()
| 2,112 | 36.732143 | 91 | py |
bottom-up-attention | bottom-up-attention-master/caffe/examples/pycaffe/tools.py | import numpy as np
class SimpleTransformer:
"""
SimpleTransformer is a simple class for preprocessing and deprocessing
images for caffe.
"""
def __init__(self, mean=[128, 128, 128]):
self.mean = np.array(mean, dtype=np.float32)
self.scale = 1.0
def set_mean(self, mean):
"""
Set the mean to subtract for centering the data.
"""
self.mean = mean
def set_scale(self, scale):
"""
Set the data scaling.
"""
self.scale = scale
def preprocess(self, im):
"""
preprocess() emulate the pre-processing occurring in the vgg16 caffe
prototxt.
"""
im = np.float32(im)
im = im[:, :, ::-1] # change to BGR
im -= self.mean
im *= self.scale
im = im.transpose((2, 0, 1))
return im
def deprocess(self, im):
"""
inverse of preprocess()
"""
im = im.transpose(1, 2, 0)
im /= self.scale
im += self.mean
im = im[:, :, ::-1] # change to RGB
return np.uint8(im)
class CaffeSolver:
"""
Caffesolver is a class for creating a solver.prototxt file. It sets default
values and can export a solver parameter file.
Note that all parameters are stored as strings. Strings variables are
stored as strings in strings.
"""
def __init__(self, testnet_prototxt_path="testnet.prototxt",
trainnet_prototxt_path="trainnet.prototxt", debug=False):
self.sp = {}
# critical:
self.sp['base_lr'] = '0.001'
self.sp['momentum'] = '0.9'
# speed:
self.sp['test_iter'] = '100'
self.sp['test_interval'] = '250'
# looks:
self.sp['display'] = '25'
self.sp['snapshot'] = '2500'
self.sp['snapshot_prefix'] = '"snapshot"' # string within a string!
# learning rate policy
self.sp['lr_policy'] = '"fixed"'
# important, but rare:
self.sp['gamma'] = '0.1'
self.sp['weight_decay'] = '0.0005'
self.sp['train_net'] = '"' + trainnet_prototxt_path + '"'
self.sp['test_net'] = '"' + testnet_prototxt_path + '"'
# pretty much never change these.
self.sp['max_iter'] = '100000'
self.sp['test_initialization'] = 'false'
self.sp['average_loss'] = '25' # this has to do with the display.
self.sp['iter_size'] = '1' # this is for accumulating gradients
if (debug):
self.sp['max_iter'] = '12'
self.sp['test_iter'] = '1'
self.sp['test_interval'] = '4'
self.sp['display'] = '1'
def add_from_file(self, filepath):
"""
Reads a caffe solver prototxt file and updates the Caffesolver
instance parameters.
"""
with open(filepath, 'r') as f:
for line in f:
if line[0] == '#':
continue
splitLine = line.split(':')
self.sp[splitLine[0].strip()] = splitLine[1].strip()
def write(self, filepath):
"""
Export solver parameters to INPUT "filepath". Sorted alphabetically.
"""
f = open(filepath, 'w')
for key, value in sorted(self.sp.items()):
if not(type(value) is str):
raise TypeError('All solver parameters must be strings')
f.write('%s: %s\n' % (key, value))
| 3,457 | 27.344262 | 79 | py |
bottom-up-attention | bottom-up-attention-master/caffe/examples/pycaffe/layers/pascal_multilabel_datalayers.py | # imports
import json
import time
import pickle
import scipy.misc
import skimage.io
import caffe
import numpy as np
import os.path as osp
from xml.dom import minidom
from random import shuffle
from threading import Thread
from PIL import Image
from tools import SimpleTransformer
class PascalMultilabelDataLayerSync(caffe.Layer):
"""
This is a simple synchronous datalayer for training a multilabel model on
PASCAL.
"""
def setup(self, bottom, top):
self.top_names = ['data', 'label']
# === Read input parameters ===
# params is a python dictionary with layer parameters.
params = eval(self.param_str)
# Check the parameters for validity.
check_params(params)
# store input as class variables
self.batch_size = params['batch_size']
# Create a batch loader to load the images.
self.batch_loader = BatchLoader(params, None)
# === reshape tops ===
# since we use a fixed input image size, we can shape the data layer
# once. Else, we'd have to do it in the reshape call.
top[0].reshape(
self.batch_size, 3, params['im_shape'][0], params['im_shape'][1])
# Note the 20 channels (because PASCAL has 20 classes.)
top[1].reshape(self.batch_size, 20)
print_info("PascalMultilabelDataLayerSync", params)
def forward(self, bottom, top):
"""
Load data.
"""
for itt in range(self.batch_size):
# Use the batch loader to load the next image.
im, multilabel = self.batch_loader.load_next_image()
# Add directly to the caffe data layer
top[0].data[itt, ...] = im
top[1].data[itt, ...] = multilabel
def reshape(self, bottom, top):
"""
There is no need to reshape the data, since the input is of fixed size
(rows and columns)
"""
pass
def backward(self, top, propagate_down, bottom):
"""
These layers does not back propagate
"""
pass
class BatchLoader(object):
"""
This class abstracts away the loading of images.
Images can either be loaded singly, or in a batch. The latter is used for
the asyncronous data layer to preload batches while other processing is
performed.
"""
def __init__(self, params, result):
self.result = result
self.batch_size = params['batch_size']
self.pascal_root = params['pascal_root']
self.im_shape = params['im_shape']
# get list of image indexes.
list_file = params['split'] + '.txt'
self.indexlist = [line.rstrip('\n') for line in open(
osp.join(self.pascal_root, 'ImageSets/Main', list_file))]
self._cur = 0 # current image
# this class does some simple data-manipulations
self.transformer = SimpleTransformer()
print "BatchLoader initialized with {} images".format(
len(self.indexlist))
def load_next_image(self):
"""
Load the next image in a batch.
"""
# Did we finish an epoch?
if self._cur == len(self.indexlist):
self._cur = 0
shuffle(self.indexlist)
# Load an image
index = self.indexlist[self._cur] # Get the image index
image_file_name = index + '.jpg'
im = np.asarray(Image.open(
osp.join(self.pascal_root, 'JPEGImages', image_file_name)))
im = scipy.misc.imresize(im, self.im_shape) # resize
# do a simple horizontal flip as data augmentation
flip = np.random.choice(2)*2-1
im = im[:, ::flip, :]
# Load and prepare ground truth
multilabel = np.zeros(20).astype(np.float32)
anns = load_pascal_annotation(index, self.pascal_root)
for label in anns['gt_classes']:
# in the multilabel problem we don't care how MANY instances
# there are of each class. Only if they are present.
# The "-1" is b/c we are not interested in the background
# class.
multilabel[label - 1] = 1
self._cur += 1
return self.transformer.preprocess(im), multilabel
def load_pascal_annotation(index, pascal_root):
"""
This code is borrowed from Ross Girshick's FAST-RCNN code
(https://github.com/rbgirshick/fast-rcnn).
It parses the PASCAL .xml metadata files.
See publication for further details: (http://arxiv.org/abs/1504.08083).
Thanks Ross!
"""
classes = ('__background__', # always index 0
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
class_to_ind = dict(zip(classes, xrange(21)))
filename = osp.join(pascal_root, 'Annotations', index + '.xml')
# print 'Loading: {}'.format(filename)
def get_data_from_tag(node, tag):
return node.getElementsByTagName(tag)[0].childNodes[0].data
with open(filename) as f:
data = minidom.parseString(f.read())
objs = data.getElementsByTagName('object')
num_objs = len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, 21), dtype=np.float32)
# Load object bounding boxes into a data frame.
for ix, obj in enumerate(objs):
# Make pixel indexes 0-based
x1 = float(get_data_from_tag(obj, 'xmin')) - 1
y1 = float(get_data_from_tag(obj, 'ymin')) - 1
x2 = float(get_data_from_tag(obj, 'xmax')) - 1
y2 = float(get_data_from_tag(obj, 'ymax')) - 1
cls = class_to_ind[
str(get_data_from_tag(obj, "name")).lower().strip()]
boxes[ix, :] = [x1, y1, x2, y2]
gt_classes[ix] = cls
overlaps[ix, cls] = 1.0
overlaps = scipy.sparse.csr_matrix(overlaps)
return {'boxes': boxes,
'gt_classes': gt_classes,
'gt_overlaps': overlaps,
'flipped': False,
'index': index}
def check_params(params):
"""
A utility function to check the parameters for the data layers.
"""
assert 'split' in params.keys(
), 'Params must include split (train, val, or test).'
required = ['batch_size', 'pascal_root', 'im_shape']
for r in required:
assert r in params.keys(), 'Params must include {}'.format(r)
def print_info(name, params):
"""
Output some info regarding the class
"""
print "{} initialized for split: {}, with bs: {}, im_shape: {}.".format(
name,
params['split'],
params['batch_size'],
params['im_shape'])
| 6,846 | 30.552995 | 78 | py |
bottom-up-attention | bottom-up-attention-master/caffe/examples/pycaffe/layers/pyloss.py | import caffe
import numpy as np
class EuclideanLossLayer(caffe.Layer):
"""
Compute the Euclidean Loss in the same manner as the C++ EuclideanLossLayer
to demonstrate the class interface for developing layers in Python.
"""
def setup(self, bottom, top):
# check input pair
if len(bottom) != 2:
raise Exception("Need two inputs to compute distance.")
def reshape(self, bottom, top):
# check input dimensions match
if bottom[0].count != bottom[1].count:
raise Exception("Inputs must have the same dimension.")
# difference is shape of inputs
self.diff = np.zeros_like(bottom[0].data, dtype=np.float32)
# loss output is scalar
top[0].reshape(1)
def forward(self, bottom, top):
self.diff[...] = bottom[0].data - bottom[1].data
top[0].data[...] = np.sum(self.diff**2) / bottom[0].num / 2.
def backward(self, top, propagate_down, bottom):
for i in range(2):
if not propagate_down[i]:
continue
if i == 0:
sign = 1
else:
sign = -1
bottom[i].diff[...] = sign * self.diff / bottom[i].num
| 1,223 | 31.210526 | 79 | py |
bottom-up-attention | bottom-up-attention-master/caffe/examples/finetune_flickr_style/assemble_data.py | #!/usr/bin/env python
"""
Form a subset of the Flickr Style data, download images to dirname, and write
Caffe ImagesDataLayer training file.
"""
import os
import urllib
import hashlib
import argparse
import numpy as np
import pandas as pd
from skimage import io
import multiprocessing
# Flickr returns a special image if the request is unavailable.
MISSING_IMAGE_SHA1 = '6a92790b1c2a301c6e7ddef645dca1f53ea97ac2'
example_dirname = os.path.abspath(os.path.dirname(__file__))
caffe_dirname = os.path.abspath(os.path.join(example_dirname, '../..'))
training_dirname = os.path.join(caffe_dirname, 'data/flickr_style')
def download_image(args_tuple):
"For use with multiprocessing map. Returns filename on fail."
try:
url, filename = args_tuple
if not os.path.exists(filename):
urllib.urlretrieve(url, filename)
with open(filename) as f:
assert hashlib.sha1(f.read()).hexdigest() != MISSING_IMAGE_SHA1
test_read_image = io.imread(filename)
return True
except KeyboardInterrupt:
raise Exception() # multiprocessing doesn't catch keyboard exceptions
except:
return False
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Download a subset of Flickr Style to a directory')
parser.add_argument(
'-s', '--seed', type=int, default=0,
help="random seed")
parser.add_argument(
'-i', '--images', type=int, default=-1,
help="number of images to use (-1 for all [default])",
)
parser.add_argument(
'-w', '--workers', type=int, default=-1,
help="num workers used to download images. -x uses (all - x) cores [-1 default]."
)
parser.add_argument(
'-l', '--labels', type=int, default=0,
help="if set to a positive value, only sample images from the first number of labels."
)
args = parser.parse_args()
np.random.seed(args.seed)
# Read data, shuffle order, and subsample.
csv_filename = os.path.join(example_dirname, 'flickr_style.csv.gz')
df = pd.read_csv(csv_filename, index_col=0, compression='gzip')
df = df.iloc[np.random.permutation(df.shape[0])]
if args.labels > 0:
df = df.loc[df['label'] < args.labels]
if args.images > 0 and args.images < df.shape[0]:
df = df.iloc[:args.images]
# Make directory for images and get local filenames.
if training_dirname is None:
training_dirname = os.path.join(caffe_dirname, 'data/flickr_style')
images_dirname = os.path.join(training_dirname, 'images')
if not os.path.exists(images_dirname):
os.makedirs(images_dirname)
df['image_filename'] = [
os.path.join(images_dirname, _.split('/')[-1]) for _ in df['image_url']
]
# Download images.
num_workers = args.workers
if num_workers <= 0:
num_workers = multiprocessing.cpu_count() + num_workers
print('Downloading {} images with {} workers...'.format(
df.shape[0], num_workers))
pool = multiprocessing.Pool(processes=num_workers)
map_args = zip(df['image_url'], df['image_filename'])
results = pool.map(download_image, map_args)
# Only keep rows with valid images, and write out training file lists.
df = df[results]
for split in ['train', 'test']:
split_df = df[df['_split'] == split]
filename = os.path.join(training_dirname, '{}.txt'.format(split))
split_df[['image_filename', 'label']].to_csv(
filename, sep=' ', header=None, index=None)
print('Writing train/val for {} successfully downloaded images.'.format(
df.shape[0]))
| 3,636 | 35.737374 | 94 | py |
bottom-up-attention | bottom-up-attention-master/caffe/src/caffe/test/test_data/generate_sample_data.py | """
Generate data used in the HDF5DataLayer and GradientBasedSolver tests.
"""
import os
import numpy as np
import h5py
script_dir = os.path.dirname(os.path.abspath(__file__))
# Generate HDF5DataLayer sample_data.h5
num_cols = 8
num_rows = 10
height = 6
width = 5
total_size = num_cols * num_rows * height * width
data = np.arange(total_size)
data = data.reshape(num_rows, num_cols, height, width)
data = data.astype('float32')
# We had a bug where data was copied into label, but the tests weren't
# catching it, so let's make label 1-indexed.
label = 1 + np.arange(num_rows)[:, np.newaxis]
label = label.astype('float32')
# We add an extra label2 dataset to test HDF5 layer's ability
# to handle arbitrary number of output ("top") Blobs.
label2 = label + 1
print data
print label
with h5py.File(script_dir + '/sample_data.h5', 'w') as f:
f['data'] = data
f['label'] = label
f['label2'] = label2
with h5py.File(script_dir + '/sample_data_2_gzip.h5', 'w') as f:
f.create_dataset(
'data', data=data + total_size,
compression='gzip', compression_opts=1
)
f.create_dataset(
'label', data=label,
compression='gzip', compression_opts=1,
dtype='uint8',
)
f.create_dataset(
'label2', data=label2,
compression='gzip', compression_opts=1,
dtype='uint8',
)
with open(script_dir + '/sample_data_list.txt', 'w') as f:
f.write('src/caffe/test/test_data/sample_data.h5\n')
f.write('src/caffe/test/test_data/sample_data_2_gzip.h5\n')
# Generate GradientBasedSolver solver_data.h5
num_cols = 3
num_rows = 8
height = 10
width = 10
data = np.random.randn(num_rows, num_cols, height, width)
data = data.reshape(num_rows, num_cols, height, width)
data = data.astype('float32')
targets = np.random.randn(num_rows, 1)
targets = targets.astype('float32')
print data
print targets
with h5py.File(script_dir + '/solver_data.h5', 'w') as f:
f['data'] = data
f['targets'] = targets
with open(script_dir + '/solver_data_list.txt', 'w') as f:
f.write('src/caffe/test/test_data/solver_data.h5\n')
| 2,104 | 24.670732 | 70 | py |
bottom-up-attention | bottom-up-attention-master/caffe/python/draw_net.py | #!/usr/bin/env python
"""
Draw a graph of the net architecture.
"""
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from google.protobuf import text_format
import caffe
import caffe.draw
from caffe.proto import caffe_pb2
def parse_args():
"""Parse input arguments
"""
parser = ArgumentParser(description=__doc__,
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('input_net_proto_file',
help='Input network prototxt file')
parser.add_argument('output_image_file',
help='Output image file')
parser.add_argument('--rankdir',
help=('One of TB (top-bottom, i.e., vertical), '
'RL (right-left, i.e., horizontal), or another '
'valid dot option; see '
'http://www.graphviz.org/doc/info/'
'attrs.html#k:rankdir'),
default='LR')
parser.add_argument('--phase',
help=('Which network phase to draw: can be TRAIN, '
'TEST, or ALL. If ALL, then all layers are drawn '
'regardless of phase.'),
default="ALL")
args = parser.parse_args()
return args
def main():
args = parse_args()
net = caffe_pb2.NetParameter()
text_format.Merge(open(args.input_net_proto_file).read(), net)
print('Drawing net to %s' % args.output_image_file)
phase=None;
if args.phase == "TRAIN":
phase = caffe.TRAIN
elif args.phase == "TEST":
phase = caffe.TEST
elif args.phase != "ALL":
raise ValueError("Unknown phase: " + args.phase)
caffe.draw.draw_net_to_file(net, args.output_image_file, args.rankdir,
phase)
if __name__ == '__main__':
main()
| 1,934 | 31.79661 | 81 | py |
bottom-up-attention | bottom-up-attention-master/caffe/python/detect.py | #!/usr/bin/env python
"""
detector.py is an out-of-the-box windowed detector
callable from the command line.
By default it configures and runs the Caffe reference ImageNet model.
Note that this model was trained for image classification and not detection,
and finetuning for detection can be expected to improve results.
The selective_search_ijcv_with_python code required for the selective search
proposal mode is available at
https://github.com/sergeyk/selective_search_ijcv_with_python
TODO:
- batch up image filenames as well: don't want to load all of them into memory
- come up with a batching scheme that preserved order / keeps a unique ID
"""
import numpy as np
import pandas as pd
import os
import argparse
import time
import caffe
CROP_MODES = ['list', 'selective_search']
COORD_COLS = ['ymin', 'xmin', 'ymax', 'xmax']
def main(argv):
pycaffe_dir = os.path.dirname(__file__)
parser = argparse.ArgumentParser()
# Required arguments: input and output.
parser.add_argument(
"input_file",
help="Input txt/csv filename. If .txt, must be list of filenames.\
If .csv, must be comma-separated file with header\
'filename, xmin, ymin, xmax, ymax'"
)
parser.add_argument(
"output_file",
help="Output h5/csv filename. Format depends on extension."
)
# Optional arguments.
parser.add_argument(
"--model_def",
default=os.path.join(pycaffe_dir,
"../models/bvlc_reference_caffenet/deploy.prototxt"),
help="Model definition file."
)
parser.add_argument(
"--pretrained_model",
default=os.path.join(pycaffe_dir,
"../models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel"),
help="Trained model weights file."
)
parser.add_argument(
"--crop_mode",
default="selective_search",
choices=CROP_MODES,
help="How to generate windows for detection."
)
parser.add_argument(
"--gpu",
action='store_true',
help="Switch for gpu computation."
)
parser.add_argument(
"--mean_file",
default=os.path.join(pycaffe_dir,
'caffe/imagenet/ilsvrc_2012_mean.npy'),
help="Data set image mean of H x W x K dimensions (numpy array). " +
"Set to '' for no mean subtraction."
)
parser.add_argument(
"--input_scale",
type=float,
help="Multiply input features by this scale to finish preprocessing."
)
parser.add_argument(
"--raw_scale",
type=float,
default=255.0,
help="Multiply raw input by this scale before preprocessing."
)
parser.add_argument(
"--channel_swap",
default='2,1,0',
help="Order to permute input channels. The default converts " +
"RGB -> BGR since BGR is the Caffe default by way of OpenCV."
)
parser.add_argument(
"--context_pad",
type=int,
default='16',
help="Amount of surrounding context to collect in input window."
)
args = parser.parse_args()
mean, channel_swap = None, None
if args.mean_file:
mean = np.load(args.mean_file)
if mean.shape[1:] != (1, 1):
mean = mean.mean(1).mean(1)
if args.channel_swap:
channel_swap = [int(s) for s in args.channel_swap.split(',')]
if args.gpu:
caffe.set_mode_gpu()
print("GPU mode")
else:
caffe.set_mode_cpu()
print("CPU mode")
# Make detector.
detector = caffe.Detector(args.model_def, args.pretrained_model, mean=mean,
input_scale=args.input_scale, raw_scale=args.raw_scale,
channel_swap=channel_swap,
context_pad=args.context_pad)
# Load input.
t = time.time()
print("Loading input...")
if args.input_file.lower().endswith('txt'):
with open(args.input_file) as f:
inputs = [_.strip() for _ in f.readlines()]
elif args.input_file.lower().endswith('csv'):
inputs = pd.read_csv(args.input_file, sep=',', dtype={'filename': str})
inputs.set_index('filename', inplace=True)
else:
raise Exception("Unknown input file type: not in txt or csv.")
# Detect.
if args.crop_mode == 'list':
# Unpack sequence of (image filename, windows).
images_windows = [
(ix, inputs.iloc[np.where(inputs.index == ix)][COORD_COLS].values)
for ix in inputs.index.unique()
]
detections = detector.detect_windows(images_windows)
else:
detections = detector.detect_selective_search(inputs)
print("Processed {} windows in {:.3f} s.".format(len(detections),
time.time() - t))
# Collect into dataframe with labeled fields.
df = pd.DataFrame(detections)
df.set_index('filename', inplace=True)
df[COORD_COLS] = pd.DataFrame(
data=np.vstack(df['window']), index=df.index, columns=COORD_COLS)
del(df['window'])
# Save results.
t = time.time()
if args.output_file.lower().endswith('csv'):
# csv
# Enumerate the class probabilities.
class_cols = ['class{}'.format(x) for x in range(NUM_OUTPUT)]
df[class_cols] = pd.DataFrame(
data=np.vstack(df['feat']), index=df.index, columns=class_cols)
df.to_csv(args.output_file, cols=COORD_COLS + class_cols)
else:
# h5
df.to_hdf(args.output_file, 'df', mode='w')
print("Saved to {} in {:.3f} s.".format(args.output_file,
time.time() - t))
if __name__ == "__main__":
import sys
main(sys.argv)
| 5,734 | 31.95977 | 88 | py |
bottom-up-attention | bottom-up-attention-master/caffe/python/classify.py | #!/usr/bin/env python
"""
classify.py is an out-of-the-box image classifer callable from the command line.
By default it configures and runs the Caffe reference ImageNet model.
"""
import numpy as np
import os
import sys
import argparse
import glob
import time
import caffe
def main(argv):
pycaffe_dir = os.path.dirname(__file__)
parser = argparse.ArgumentParser()
# Required arguments: input and output files.
parser.add_argument(
"input_file",
help="Input image, directory, or npy."
)
parser.add_argument(
"output_file",
help="Output npy filename."
)
# Optional arguments.
parser.add_argument(
"--model_def",
default=os.path.join(pycaffe_dir,
"../models/bvlc_reference_caffenet/deploy.prototxt"),
help="Model definition file."
)
parser.add_argument(
"--pretrained_model",
default=os.path.join(pycaffe_dir,
"../models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel"),
help="Trained model weights file."
)
parser.add_argument(
"--gpu",
action='store_true',
help="Switch for gpu computation."
)
parser.add_argument(
"--center_only",
action='store_true',
help="Switch for prediction from center crop alone instead of " +
"averaging predictions across crops (default)."
)
parser.add_argument(
"--images_dim",
default='256,256',
help="Canonical 'height,width' dimensions of input images."
)
parser.add_argument(
"--mean_file",
default=os.path.join(pycaffe_dir,
'caffe/imagenet/ilsvrc_2012_mean.npy'),
help="Data set image mean of [Channels x Height x Width] dimensions " +
"(numpy array). Set to '' for no mean subtraction."
)
parser.add_argument(
"--input_scale",
type=float,
help="Multiply input features by this scale to finish preprocessing."
)
parser.add_argument(
"--raw_scale",
type=float,
default=255.0,
help="Multiply raw input by this scale before preprocessing."
)
parser.add_argument(
"--channel_swap",
default='2,1,0',
help="Order to permute input channels. The default converts " +
"RGB -> BGR since BGR is the Caffe default by way of OpenCV."
)
parser.add_argument(
"--ext",
default='jpg',
help="Image file extension to take as input when a directory " +
"is given as the input file."
)
args = parser.parse_args()
image_dims = [int(s) for s in args.images_dim.split(',')]
mean, channel_swap = None, None
if args.mean_file:
mean = np.load(args.mean_file)
if args.channel_swap:
channel_swap = [int(s) for s in args.channel_swap.split(',')]
if args.gpu:
caffe.set_mode_gpu()
print("GPU mode")
else:
caffe.set_mode_cpu()
print("CPU mode")
# Make classifier.
classifier = caffe.Classifier(args.model_def, args.pretrained_model,
image_dims=image_dims, mean=mean,
input_scale=args.input_scale, raw_scale=args.raw_scale,
channel_swap=channel_swap)
# Load numpy array (.npy), directory glob (*.jpg), or image file.
args.input_file = os.path.expanduser(args.input_file)
if args.input_file.endswith('npy'):
print("Loading file: %s" % args.input_file)
inputs = np.load(args.input_file)
elif os.path.isdir(args.input_file):
print("Loading folder: %s" % args.input_file)
inputs =[caffe.io.load_image(im_f)
for im_f in glob.glob(args.input_file + '/*.' + args.ext)]
else:
print("Loading file: %s" % args.input_file)
inputs = [caffe.io.load_image(args.input_file)]
print("Classifying %d inputs." % len(inputs))
# Classify.
start = time.time()
predictions = classifier.predict(inputs, not args.center_only)
print("Done in %.2f s." % (time.time() - start))
# Save
print("Saving results into %s" % args.output_file)
np.save(args.output_file, predictions)
if __name__ == '__main__':
main(sys.argv)
| 4,262 | 29.669065 | 88 | py |
bottom-up-attention | bottom-up-attention-master/caffe/python/train.py | #!/usr/bin/env python
"""
Trains a model using one or more GPUs.
"""
from multiprocessing import Process
import caffe
def train(
solver, # solver proto definition
snapshot, # solver snapshot to restore
gpus, # list of device ids
timing=False, # show timing info for compute and communications
):
# NCCL uses a uid to identify a session
uid = caffe.NCCL.new_uid()
caffe.init_log()
caffe.log('Using devices %s' % str(gpus))
procs = []
for rank in range(len(gpus)):
p = Process(target=solve,
args=(solver, snapshot, gpus, timing, uid, rank))
p.daemon = True
p.start()
procs.append(p)
for p in procs:
p.join()
def time(solver, nccl):
fprop = []
bprop = []
total = caffe.Timer()
allrd = caffe.Timer()
for _ in range(len(solver.net.layers)):
fprop.append(caffe.Timer())
bprop.append(caffe.Timer())
display = solver.param.display
def show_time():
if solver.iter % display == 0:
s = '\n'
for i in range(len(solver.net.layers)):
s += 'forw %3d %8s ' % (i, solver.net._layer_names[i])
s += ': %.2f\n' % fprop[i].ms
for i in range(len(solver.net.layers) - 1, -1, -1):
s += 'back %3d %8s ' % (i, solver.net._layer_names[i])
s += ': %.2f\n' % bprop[i].ms
s += 'solver total: %.2f\n' % total.ms
s += 'allreduce: %.2f\n' % allrd.ms
caffe.log(s)
solver.net.before_forward(lambda layer: fprop[layer].start())
solver.net.after_forward(lambda layer: fprop[layer].stop())
solver.net.before_backward(lambda layer: bprop[layer].start())
solver.net.after_backward(lambda layer: bprop[layer].stop())
solver.add_callback(lambda: total.start(), lambda: (total.stop(), allrd.start()))
solver.add_callback(nccl)
solver.add_callback(lambda: '', lambda: (allrd.stop(), show_time()))
def solve(proto, snapshot, gpus, timing, uid, rank):
caffe.set_mode_gpu()
caffe.set_device(gpus[rank])
caffe.set_solver_count(len(gpus))
caffe.set_solver_rank(rank)
caffe.set_multiprocess(True)
solver = caffe.SGDSolver(proto)
if snapshot and len(snapshot) != 0:
solver.restore(snapshot)
nccl = caffe.NCCL(solver, uid)
nccl.bcast()
if timing and rank == 0:
time(solver, nccl)
else:
solver.add_callback(nccl)
if solver.param.layer_wise_reduce:
solver.net.after_backward(nccl)
solver.step(solver.param.max_iter)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--solver", required=True, help="Solver proto definition.")
parser.add_argument("--snapshot", help="Solver snapshot to restore.")
parser.add_argument("--gpus", type=int, nargs='+', default=[0],
help="List of device ids.")
parser.add_argument("--timing", action='store_true', help="Show timing info.")
args = parser.parse_args()
train(args.solver, args.snapshot, args.gpus, args.timing)
| 3,145 | 30.148515 | 85 | py |
bottom-up-attention | bottom-up-attention-master/caffe/python/caffe/net_spec.py | """Python net specification.
This module provides a way to write nets directly in Python, using a natural,
functional style. See examples/pycaffe/caffenet.py for an example.
Currently this works as a thin wrapper around the Python protobuf interface,
with layers and parameters automatically generated for the "layers" and
"params" pseudo-modules, which are actually objects using __getattr__ magic
to generate protobuf messages.
Note that when using to_proto or Top.to_proto, names of intermediate blobs will
be automatically generated. To explicitly specify blob names, use the NetSpec
class -- assign to its attributes directly to name layers, and call
NetSpec.to_proto to serialize all assigned layers.
This interface is expected to continue to evolve as Caffe gains new capabilities
for specifying nets. In particular, the automatically generated layer names
are not guaranteed to be forward-compatible.
"""
from collections import OrderedDict, Counter
from .proto import caffe_pb2
from google import protobuf
import six
def param_name_dict():
"""Find out the correspondence between layer names and parameter names."""
layer = caffe_pb2.LayerParameter()
# get all parameter names (typically underscore case) and corresponding
# type names (typically camel case), which contain the layer names
# (note that not all parameters correspond to layers, but we'll ignore that)
param_names = [f.name for f in layer.DESCRIPTOR.fields if f.name.endswith('_param')]
param_type_names = [type(getattr(layer, s)).__name__ for s in param_names]
# strip the final '_param' or 'Parameter'
param_names = [s[:-len('_param')] for s in param_names]
param_type_names = [s[:-len('Parameter')] for s in param_type_names]
return dict(zip(param_type_names, param_names))
def to_proto(*tops):
"""Generate a NetParameter that contains all layers needed to compute
all arguments."""
layers = OrderedDict()
autonames = Counter()
for top in tops:
top.fn._to_proto(layers, {}, autonames)
net = caffe_pb2.NetParameter()
net.layer.extend(layers.values())
return net
def assign_proto(proto, name, val):
"""Assign a Python object to a protobuf message, based on the Python
type (in recursive fashion). Lists become repeated fields/messages, dicts
become messages, and other types are assigned directly. For convenience,
repeated fields whose values are not lists are converted to single-element
lists; e.g., `my_repeated_int_field=3` is converted to
`my_repeated_int_field=[3]`."""
is_repeated_field = hasattr(getattr(proto, name), 'extend')
if is_repeated_field and not isinstance(val, list):
val = [val]
if isinstance(val, list):
if isinstance(val[0], dict):
for item in val:
proto_item = getattr(proto, name).add()
for k, v in six.iteritems(item):
assign_proto(proto_item, k, v)
else:
getattr(proto, name).extend(val)
elif isinstance(val, dict):
for k, v in six.iteritems(val):
assign_proto(getattr(proto, name), k, v)
else:
setattr(proto, name, val)
class Top(object):
"""A Top specifies a single output blob (which could be one of several
produced by a layer.)"""
def __init__(self, fn, n):
self.fn = fn
self.n = n
def to_proto(self):
"""Generate a NetParameter that contains all layers needed to compute
this top."""
return to_proto(self)
def _to_proto(self, layers, names, autonames):
return self.fn._to_proto(layers, names, autonames)
class Function(object):
"""A Function specifies a layer, its parameters, and its inputs (which
are Tops from other layers)."""
def __init__(self, type_name, inputs, params):
self.type_name = type_name
self.inputs = inputs
self.params = params
self.ntop = self.params.get('ntop', 1)
# use del to make sure kwargs are not double-processed as layer params
if 'ntop' in self.params:
del self.params['ntop']
self.in_place = self.params.get('in_place', False)
if 'in_place' in self.params:
del self.params['in_place']
self.tops = tuple(Top(self, n) for n in range(self.ntop))
def _get_name(self, names, autonames):
if self not in names and self.ntop > 0:
names[self] = self._get_top_name(self.tops[0], names, autonames)
elif self not in names:
autonames[self.type_name] += 1
names[self] = self.type_name + str(autonames[self.type_name])
return names[self]
def _get_top_name(self, top, names, autonames):
if top not in names:
autonames[top.fn.type_name] += 1
names[top] = top.fn.type_name + str(autonames[top.fn.type_name])
return names[top]
def _to_proto(self, layers, names, autonames):
if self in layers:
return
bottom_names = []
for inp in self.inputs:
inp._to_proto(layers, names, autonames)
bottom_names.append(layers[inp.fn].top[inp.n])
layer = caffe_pb2.LayerParameter()
layer.type = self.type_name
layer.bottom.extend(bottom_names)
if self.in_place:
layer.top.extend(layer.bottom)
else:
for top in self.tops:
layer.top.append(self._get_top_name(top, names, autonames))
layer.name = self._get_name(names, autonames)
for k, v in six.iteritems(self.params):
# special case to handle generic *params
if k.endswith('param'):
assign_proto(layer, k, v)
else:
try:
assign_proto(getattr(layer,
_param_names[self.type_name] + '_param'), k, v)
except (AttributeError, KeyError):
assign_proto(layer, k, v)
layers[self] = layer
class NetSpec(object):
"""A NetSpec contains a set of Tops (assigned directly as attributes).
Calling NetSpec.to_proto generates a NetParameter containing all of the
layers needed to produce all of the assigned Tops, using the assigned
names."""
def __init__(self):
super(NetSpec, self).__setattr__('tops', OrderedDict())
def __setattr__(self, name, value):
self.tops[name] = value
def __getattr__(self, name):
return self.tops[name]
def __setitem__(self, key, value):
self.__setattr__(key, value)
def __getitem__(self, item):
return self.__getattr__(item)
def to_proto(self):
names = {v: k for k, v in six.iteritems(self.tops)}
autonames = Counter()
layers = OrderedDict()
for name, top in six.iteritems(self.tops):
top._to_proto(layers, names, autonames)
net = caffe_pb2.NetParameter()
net.layer.extend(layers.values())
return net
class Layers(object):
"""A Layers object is a pseudo-module which generates functions that specify
layers; e.g., Layers().Convolution(bottom, kernel_size=3) will produce a Top
specifying a 3x3 convolution applied to bottom."""
def __getattr__(self, name):
def layer_fn(*args, **kwargs):
fn = Function(name, args, kwargs)
if fn.ntop == 0:
return fn
elif fn.ntop == 1:
return fn.tops[0]
else:
return fn.tops
return layer_fn
class Parameters(object):
"""A Parameters object is a pseudo-module which generates constants used
in layer parameters; e.g., Parameters().Pooling.MAX is the value used
to specify max pooling."""
def __getattr__(self, name):
class Param:
def __getattr__(self, param_name):
return getattr(getattr(caffe_pb2, name + 'Parameter'), param_name)
return Param()
_param_names = param_name_dict()
layers = Layers()
params = Parameters()
| 8,048 | 34.45815 | 88 | py |
bottom-up-attention | bottom-up-attention-master/caffe/python/caffe/classifier.py | #!/usr/bin/env python
"""
Classifier is an image classifier specialization of Net.
"""
import numpy as np
import caffe
class Classifier(caffe.Net):
"""
Classifier extends Net for image class prediction
by scaling, center cropping, or oversampling.
Parameters
----------
image_dims : dimensions to scale input for cropping/sampling.
Default is to scale to net input size for whole-image crop.
mean, input_scale, raw_scale, channel_swap: params for
preprocessing options.
"""
def __init__(self, model_file, pretrained_file, image_dims=None,
mean=None, input_scale=None, raw_scale=None,
channel_swap=None):
caffe.Net.__init__(self, model_file, pretrained_file, caffe.TEST)
# configure pre-processing
in_ = self.inputs[0]
self.transformer = caffe.io.Transformer(
{in_: self.blobs[in_].data.shape})
self.transformer.set_transpose(in_, (2, 0, 1))
if mean is not None:
self.transformer.set_mean(in_, mean)
if input_scale is not None:
self.transformer.set_input_scale(in_, input_scale)
if raw_scale is not None:
self.transformer.set_raw_scale(in_, raw_scale)
if channel_swap is not None:
self.transformer.set_channel_swap(in_, channel_swap)
self.crop_dims = np.array(self.blobs[in_].data.shape[2:])
if not image_dims:
image_dims = self.crop_dims
self.image_dims = image_dims
def predict(self, inputs, oversample=True):
"""
Predict classification probabilities of inputs.
Parameters
----------
inputs : iterable of (H x W x K) input ndarrays.
oversample : boolean
average predictions across center, corners, and mirrors
when True (default). Center-only prediction when False.
Returns
-------
predictions: (N x C) ndarray of class probabilities for N images and C
classes.
"""
# Scale to standardize input dimensions.
input_ = np.zeros((len(inputs),
self.image_dims[0],
self.image_dims[1],
inputs[0].shape[2]),
dtype=np.float32)
for ix, in_ in enumerate(inputs):
input_[ix] = caffe.io.resize_image(in_, self.image_dims)
if oversample:
# Generate center, corner, and mirrored crops.
input_ = caffe.io.oversample(input_, self.crop_dims)
else:
# Take center crop.
center = np.array(self.image_dims) / 2.0
crop = np.tile(center, (1, 2))[0] + np.concatenate([
-self.crop_dims / 2.0,
self.crop_dims / 2.0
])
crop = crop.astype(int)
input_ = input_[:, crop[0]:crop[2], crop[1]:crop[3], :]
# Classify
caffe_in = np.zeros(np.array(input_.shape)[[0, 3, 1, 2]],
dtype=np.float32)
for ix, in_ in enumerate(input_):
caffe_in[ix] = self.transformer.preprocess(self.inputs[0], in_)
out = self.forward_all(**{self.inputs[0]: caffe_in})
predictions = out[self.outputs[0]]
# For oversampling, average predictions across crops.
if oversample:
predictions = predictions.reshape((len(predictions) / 10, 10, -1))
predictions = predictions.mean(1)
return predictions
| 3,537 | 34.737374 | 78 | py |
bottom-up-attention | bottom-up-attention-master/caffe/python/caffe/coord_map.py | """
Determine spatial relationships between layers to relate their coordinates.
Coordinates are mapped from input-to-output (forward), but can
be mapped output-to-input (backward) by the inverse mapping too.
This helps crop and align feature maps among other uses.
"""
from __future__ import division
import numpy as np
from caffe import layers as L
PASS_THROUGH_LAYERS = ['AbsVal', 'BatchNorm', 'Bias', 'BNLL', 'Dropout',
'Eltwise', 'ELU', 'Log', 'LRN', 'Exp', 'MVN', 'Power',
'ReLU', 'PReLU', 'Scale', 'Sigmoid', 'Split', 'TanH',
'Threshold']
def conv_params(fn):
"""
Extract the spatial parameters that determine the coordinate mapping:
kernel size, stride, padding, and dilation.
Implementation detail: Convolution, Deconvolution, and Im2col layers
define these in the convolution_param message, while Pooling has its
own fields in pooling_param. This method deals with these details to
extract canonical parameters.
"""
params = fn.params.get('convolution_param', fn.params)
axis = params.get('axis', 1)
ks = np.array(params['kernel_size'], ndmin=1)
dilation = np.array(params.get('dilation', 1), ndmin=1)
assert len({'pad_h', 'pad_w', 'kernel_h', 'kernel_w', 'stride_h',
'stride_w'} & set(fn.params)) == 0, \
'cropping does not support legacy _h/_w params'
return (axis, np.array(params.get('stride', 1), ndmin=1),
(ks - 1) * dilation + 1,
np.array(params.get('pad', 0), ndmin=1))
def crop_params(fn):
"""
Extract the crop layer parameters with defaults.
"""
params = fn.params.get('crop_param', fn.params)
axis = params.get('axis', 2) # default to spatial crop for N, C, H, W
offset = np.array(params.get('offset', 0), ndmin=1)
return (axis, offset)
class UndefinedMapException(Exception):
"""
Exception raised for layers that do not have a defined coordinate mapping.
"""
pass
def coord_map(fn):
"""
Define the coordinate mapping by its
- axis
- scale: output coord[i * scale] <- input_coord[i]
- shift: output coord[i] <- output_coord[i + shift]
s.t. the identity mapping, as for pointwise layers like ReLu, is defined by
(None, 1, 0) since it is independent of axis and does not transform coords.
"""
if fn.type_name in ['Convolution', 'Pooling', 'Im2col']:
axis, stride, ks, pad = conv_params(fn)
return axis, 1 / stride, (pad - (ks - 1) / 2) / stride
elif fn.type_name == 'Deconvolution':
axis, stride, ks, pad = conv_params(fn)
return axis, stride, (ks - 1) / 2 - pad
elif fn.type_name in PASS_THROUGH_LAYERS:
return None, 1, 0
elif fn.type_name == 'Crop':
axis, offset = crop_params(fn)
axis -= 1 # -1 for last non-coordinate dim.
return axis, 1, - offset
else:
raise UndefinedMapException
class AxisMismatchException(Exception):
"""
Exception raised for mappings with incompatible axes.
"""
pass
def compose(base_map, next_map):
"""
Compose a base coord map with scale a1, shift b1 with a further coord map
with scale a2, shift b2. The scales multiply and the further shift, b2,
is scaled by base coord scale a1.
"""
ax1, a1, b1 = base_map
ax2, a2, b2 = next_map
if ax1 is None:
ax = ax2
elif ax2 is None or ax1 == ax2:
ax = ax1
else:
raise AxisMismatchException
return ax, a1 * a2, a1 * b2 + b1
def inverse(coord_map):
"""
Invert a coord map by de-scaling and un-shifting;
this gives the backward mapping for the gradient.
"""
ax, a, b = coord_map
return ax, 1 / a, -b / a
def coord_map_from_to(top_from, top_to):
"""
Determine the coordinate mapping betweeen a top (from) and a top (to).
Walk the graph to find a common ancestor while composing the coord maps for
from and to until they meet. As a last step the from map is inverted.
"""
# We need to find a common ancestor of top_from and top_to.
# We'll assume that all ancestors are equivalent here (otherwise the graph
# is an inconsistent state (which we could improve this to check for)).
# For now use a brute-force algorithm.
def collect_bottoms(top):
"""
Collect the bottoms to walk for the coordinate mapping.
The general rule is that all the bottoms of a layer can be mapped, as
most layers have the same coordinate mapping for each bottom.
Crop layer is a notable exception. Only the first/cropped bottom is
mappable; the second/dimensions bottom is excluded from the walk.
"""
bottoms = top.fn.inputs
if top.fn.type_name == 'Crop':
bottoms = bottoms[:1]
return bottoms
# walk back from top_from, keeping the coord map as we go
from_maps = {top_from: (None, 1, 0)}
frontier = {top_from}
while frontier:
top = frontier.pop()
try:
bottoms = collect_bottoms(top)
for bottom in bottoms:
from_maps[bottom] = compose(from_maps[top], coord_map(top.fn))
frontier.add(bottom)
except UndefinedMapException:
pass
# now walk back from top_to until we hit a common blob
to_maps = {top_to: (None, 1, 0)}
frontier = {top_to}
while frontier:
top = frontier.pop()
if top in from_maps:
return compose(to_maps[top], inverse(from_maps[top]))
try:
bottoms = collect_bottoms(top)
for bottom in bottoms:
to_maps[bottom] = compose(to_maps[top], coord_map(top.fn))
frontier.add(bottom)
except UndefinedMapException:
continue
# if we got here, we did not find a blob in common
raise RuntimeError('Could not compute map between tops; are they '
'connected by spatial layers?')
def crop(top_from, top_to):
"""
Define a Crop layer to crop a top (from) to another top (to) by
determining the coordinate mapping between the two and net spec'ing
the axis and shift parameters of the crop.
"""
ax, a, b = coord_map_from_to(top_from, top_to)
assert (a == 1).all(), 'scale mismatch on crop (a = {})'.format(a)
assert (b <= 0).all(), 'cannot crop negative offset (b = {})'.format(b)
assert (np.round(b) == b).all(), 'cannot crop noninteger offset ' \
'(b = {})'.format(b)
return L.Crop(top_from, top_to,
crop_param=dict(axis=ax + 1, # +1 for first cropping dim.
offset=list(-np.round(b).astype(int))))
| 6,721 | 35.139785 | 79 | py |
bottom-up-attention | bottom-up-attention-master/caffe/python/caffe/detector.py | #!/usr/bin/env python
"""
Do windowed detection by classifying a number of images/crops at once,
optionally using the selective search window proposal method.
This implementation follows ideas in
Ross Girshick, Jeff Donahue, Trevor Darrell, Jitendra Malik.
Rich feature hierarchies for accurate object detection and semantic
segmentation.
http://arxiv.org/abs/1311.2524
The selective_search_ijcv_with_python code required for the selective search
proposal mode is available at
https://github.com/sergeyk/selective_search_ijcv_with_python
"""
import numpy as np
import os
import caffe
class Detector(caffe.Net):
"""
Detector extends Net for windowed detection by a list of crops or
selective search proposals.
Parameters
----------
mean, input_scale, raw_scale, channel_swap : params for preprocessing
options.
context_pad : amount of surrounding context to take s.t. a `context_pad`
sized border of pixels in the network input image is context, as in
R-CNN feature extraction.
"""
def __init__(self, model_file, pretrained_file, mean=None,
input_scale=None, raw_scale=None, channel_swap=None,
context_pad=None):
caffe.Net.__init__(self, model_file, pretrained_file, caffe.TEST)
# configure pre-processing
in_ = self.inputs[0]
self.transformer = caffe.io.Transformer(
{in_: self.blobs[in_].data.shape})
self.transformer.set_transpose(in_, (2, 0, 1))
if mean is not None:
self.transformer.set_mean(in_, mean)
if input_scale is not None:
self.transformer.set_input_scale(in_, input_scale)
if raw_scale is not None:
self.transformer.set_raw_scale(in_, raw_scale)
if channel_swap is not None:
self.transformer.set_channel_swap(in_, channel_swap)
self.configure_crop(context_pad)
def detect_windows(self, images_windows):
"""
Do windowed detection over given images and windows. Windows are
extracted then warped to the input dimensions of the net.
Parameters
----------
images_windows: (image filename, window list) iterable.
context_crop: size of context border to crop in pixels.
Returns
-------
detections: list of {filename: image filename, window: crop coordinates,
predictions: prediction vector} dicts.
"""
# Extract windows.
window_inputs = []
for image_fname, windows in images_windows:
image = caffe.io.load_image(image_fname).astype(np.float32)
for window in windows:
window_inputs.append(self.crop(image, window))
# Run through the net (warping windows to input dimensions).
in_ = self.inputs[0]
caffe_in = np.zeros((len(window_inputs), window_inputs[0].shape[2])
+ self.blobs[in_].data.shape[2:],
dtype=np.float32)
for ix, window_in in enumerate(window_inputs):
caffe_in[ix] = self.transformer.preprocess(in_, window_in)
out = self.forward_all(**{in_: caffe_in})
predictions = out[self.outputs[0]]
# Package predictions with images and windows.
detections = []
ix = 0
for image_fname, windows in images_windows:
for window in windows:
detections.append({
'window': window,
'prediction': predictions[ix],
'filename': image_fname
})
ix += 1
return detections
def detect_selective_search(self, image_fnames):
"""
Do windowed detection over Selective Search proposals by extracting
the crop and warping to the input dimensions of the net.
Parameters
----------
image_fnames: list
Returns
-------
detections: list of {filename: image filename, window: crop coordinates,
predictions: prediction vector} dicts.
"""
import selective_search_ijcv_with_python as selective_search
# Make absolute paths so MATLAB can find the files.
image_fnames = [os.path.abspath(f) for f in image_fnames]
windows_list = selective_search.get_windows(
image_fnames,
cmd='selective_search_rcnn'
)
# Run windowed detection on the selective search list.
return self.detect_windows(zip(image_fnames, windows_list))
def crop(self, im, window):
"""
Crop a window from the image for detection. Include surrounding context
according to the `context_pad` configuration.
Parameters
----------
im: H x W x K image ndarray to crop.
window: bounding box coordinates as ymin, xmin, ymax, xmax.
Returns
-------
crop: cropped window.
"""
# Crop window from the image.
crop = im[window[0]:window[2], window[1]:window[3]]
if self.context_pad:
box = window.copy()
crop_size = self.blobs[self.inputs[0]].width # assumes square
scale = crop_size / (1. * crop_size - self.context_pad * 2)
# Crop a box + surrounding context.
half_h = (box[2] - box[0] + 1) / 2.
half_w = (box[3] - box[1] + 1) / 2.
center = (box[0] + half_h, box[1] + half_w)
scaled_dims = scale * np.array((-half_h, -half_w, half_h, half_w))
box = np.round(np.tile(center, 2) + scaled_dims)
full_h = box[2] - box[0] + 1
full_w = box[3] - box[1] + 1
scale_h = crop_size / full_h
scale_w = crop_size / full_w
pad_y = round(max(0, -box[0]) * scale_h) # amount out-of-bounds
pad_x = round(max(0, -box[1]) * scale_w)
# Clip box to image dimensions.
im_h, im_w = im.shape[:2]
box = np.clip(box, 0., [im_h, im_w, im_h, im_w])
clip_h = box[2] - box[0] + 1
clip_w = box[3] - box[1] + 1
assert(clip_h > 0 and clip_w > 0)
crop_h = round(clip_h * scale_h)
crop_w = round(clip_w * scale_w)
if pad_y + crop_h > crop_size:
crop_h = crop_size - pad_y
if pad_x + crop_w > crop_size:
crop_w = crop_size - pad_x
# collect with context padding and place in input
# with mean padding
context_crop = im[box[0]:box[2], box[1]:box[3]]
context_crop = caffe.io.resize_image(context_crop, (crop_h, crop_w))
crop = np.ones(self.crop_dims, dtype=np.float32) * self.crop_mean
crop[pad_y:(pad_y + crop_h), pad_x:(pad_x + crop_w)] = context_crop
return crop
def configure_crop(self, context_pad):
"""
Configure crop dimensions and amount of context for cropping.
If context is included, make the special input mean for context padding.
Parameters
----------
context_pad : amount of context for cropping.
"""
# crop dimensions
in_ = self.inputs[0]
tpose = self.transformer.transpose[in_]
inv_tpose = [tpose[t] for t in tpose]
self.crop_dims = np.array(self.blobs[in_].data.shape[1:])[inv_tpose]
#.transpose(inv_tpose)
# context padding
self.context_pad = context_pad
if self.context_pad:
in_ = self.inputs[0]
transpose = self.transformer.transpose.get(in_)
channel_order = self.transformer.channel_swap.get(in_)
raw_scale = self.transformer.raw_scale.get(in_)
# Padding context crops needs the mean in unprocessed input space.
mean = self.transformer.mean.get(in_)
if mean is not None:
inv_transpose = [transpose[t] for t in transpose]
crop_mean = mean.copy().transpose(inv_transpose)
if channel_order is not None:
channel_order_inverse = [channel_order.index(i)
for i in range(crop_mean.shape[2])]
crop_mean = crop_mean[:, :, channel_order_inverse]
if raw_scale is not None:
crop_mean /= raw_scale
self.crop_mean = crop_mean
else:
self.crop_mean = np.zeros(self.crop_dims, dtype=np.float32)
| 8,541 | 38.364055 | 80 | py |
bottom-up-attention | bottom-up-attention-master/caffe/python/caffe/__init__.py | from .pycaffe import Net, SGDSolver, NesterovSolver, AdaGradSolver, RMSPropSolver, AdaDeltaSolver, AdamSolver, NCCL, Timer
from ._caffe import init_log, log, set_mode_cpu, set_mode_gpu, set_device, Layer, get_solver, layer_type_list, set_random_seed, solver_count, set_solver_count, solver_rank, set_solver_rank, set_multiprocess, Layer, get_solver
from ._caffe import __version__
from .proto.caffe_pb2 import TRAIN, TEST
from .classifier import Classifier
from .detector import Detector
from . import io
from .net_spec import layers, params, NetSpec, to_proto
| 561 | 61.444444 | 225 | py |
bottom-up-attention | bottom-up-attention-master/caffe/python/caffe/pycaffe.py | """
Wrap the internal caffe C++ module (_caffe.so) with a clean, Pythonic
interface.
"""
from collections import OrderedDict
try:
from itertools import izip_longest
except:
from itertools import zip_longest as izip_longest
import numpy as np
from ._caffe import Net, SGDSolver, NesterovSolver, AdaGradSolver, \
RMSPropSolver, AdaDeltaSolver, AdamSolver, NCCL, Timer
import caffe.io
import six
# We directly update methods from Net here (rather than using composition or
# inheritance) so that nets created by caffe (e.g., by SGDSolver) will
# automatically have the improved interface.
@property
def _Net_blobs(self):
"""
An OrderedDict (bottom to top, i.e., input to output) of network
blobs indexed by name
"""
if not hasattr(self, '_blobs_dict'):
self._blobs_dict = OrderedDict(zip(self._blob_names, self._blobs))
return self._blobs_dict
@property
def _Net_blob_loss_weights(self):
"""
An OrderedDict (bottom to top, i.e., input to output) of network
blob loss weights indexed by name
"""
if not hasattr(self, '_blobs_loss_weights_dict'):
self._blob_loss_weights_dict = OrderedDict(zip(self._blob_names,
self._blob_loss_weights))
return self._blob_loss_weights_dict
@property
def _Net_params(self):
"""
An OrderedDict (bottom to top, i.e., input to output) of network
parameters indexed by name; each is a list of multiple blobs (e.g.,
weights and biases)
"""
if not hasattr(self, '_params_dict'):
self._params_dict = OrderedDict([(name, lr.blobs)
for name, lr in zip(
self._layer_names, self.layers)
if len(lr.blobs) > 0])
return self._params_dict
@property
def _Net_inputs(self):
if not hasattr(self, '_input_list'):
keys = list(self.blobs.keys())
self._input_list = [keys[i] for i in self._inputs]
return self._input_list
@property
def _Net_outputs(self):
if not hasattr(self, '_output_list'):
keys = list(self.blobs.keys())
self._output_list = [keys[i] for i in self._outputs]
return self._output_list
def _Net_forward(self, blobs=None, start=None, end=None, **kwargs):
"""
Forward pass: prepare inputs and run the net forward.
Parameters
----------
blobs : list of blobs to return in addition to output blobs.
kwargs : Keys are input blob names and values are blob ndarrays.
For formatting inputs for Caffe, see Net.preprocess().
If None, input is taken from data layers.
start : optional name of layer at which to begin the forward pass
end : optional name of layer at which to finish the forward pass
(inclusive)
Returns
-------
outs : {blob name: blob ndarray} dict.
"""
if blobs is None:
blobs = []
if start is not None:
start_ind = list(self._layer_names).index(start)
else:
start_ind = 0
if end is not None:
end_ind = list(self._layer_names).index(end)
outputs = set([end] + blobs)
else:
end_ind = len(self.layers) - 1
outputs = set(self.outputs + blobs)
if kwargs:
if set(kwargs.keys()) != set(self.inputs):
raise Exception('Input blob arguments do not match net inputs.')
# Set input according to defined shapes and make arrays single and
# C-contiguous as Caffe expects.
for in_, blob in six.iteritems(kwargs):
if blob.shape[0] != self.blobs[in_].shape[0]:
raise Exception('Input is not batch sized')
self.blobs[in_].data[...] = blob
self._forward(start_ind, end_ind)
# Unpack blobs to extract
return {out: self.blobs[out].data for out in outputs}
def _Net_backward(self, diffs=None, start=None, end=None, **kwargs):
"""
Backward pass: prepare diffs and run the net backward.
Parameters
----------
diffs : list of diffs to return in addition to bottom diffs.
kwargs : Keys are output blob names and values are diff ndarrays.
If None, top diffs are taken from forward loss.
start : optional name of layer at which to begin the backward pass
end : optional name of layer at which to finish the backward pass
(inclusive)
Returns
-------
outs: {blob name: diff ndarray} dict.
"""
if diffs is None:
diffs = []
if start is not None:
start_ind = list(self._layer_names).index(start)
else:
start_ind = len(self.layers) - 1
if end is not None:
end_ind = list(self._layer_names).index(end)
outputs = set([end] + diffs)
else:
end_ind = 0
outputs = set(self.inputs + diffs)
if kwargs:
if set(kwargs.keys()) != set(self.outputs):
raise Exception('Top diff arguments do not match net outputs.')
# Set top diffs according to defined shapes and make arrays single and
# C-contiguous as Caffe expects.
for top, diff in six.iteritems(kwargs):
if diff.shape[0] != self.blobs[top].shape[0]:
raise Exception('Diff is not batch sized')
self.blobs[top].diff[...] = diff
self._backward(start_ind, end_ind)
# Unpack diffs to extract
return {out: self.blobs[out].diff for out in outputs}
def _Net_forward_all(self, blobs=None, **kwargs):
"""
Run net forward in batches.
Parameters
----------
blobs : list of blobs to extract as in forward()
kwargs : Keys are input blob names and values are blob ndarrays.
Refer to forward().
Returns
-------
all_outs : {blob name: list of blobs} dict.
"""
# Collect outputs from batches
all_outs = {out: [] for out in set(self.outputs + (blobs or []))}
for batch in self._batch(kwargs):
outs = self.forward(blobs=blobs, **batch)
for out, out_blob in six.iteritems(outs):
all_outs[out].extend(out_blob.copy())
# Package in ndarray.
for out in all_outs:
all_outs[out] = np.asarray(all_outs[out])
# Discard padding.
pad = len(six.next(six.itervalues(all_outs))) - len(six.next(six.itervalues(kwargs)))
if pad:
for out in all_outs:
all_outs[out] = all_outs[out][:-pad]
return all_outs
def _Net_forward_backward_all(self, blobs=None, diffs=None, **kwargs):
"""
Run net forward + backward in batches.
Parameters
----------
blobs: list of blobs to extract as in forward()
diffs: list of diffs to extract as in backward()
kwargs: Keys are input (for forward) and output (for backward) blob names
and values are ndarrays. Refer to forward() and backward().
Prefilled variants are called for lack of input or output blobs.
Returns
-------
all_blobs: {blob name: blob ndarray} dict.
all_diffs: {blob name: diff ndarray} dict.
"""
# Batch blobs and diffs.
all_outs = {out: [] for out in set(self.outputs + (blobs or []))}
all_diffs = {diff: [] for diff in set(self.inputs + (diffs or []))}
forward_batches = self._batch({in_: kwargs[in_]
for in_ in self.inputs if in_ in kwargs})
backward_batches = self._batch({out: kwargs[out]
for out in self.outputs if out in kwargs})
# Collect outputs from batches (and heed lack of forward/backward batches).
for fb, bb in izip_longest(forward_batches, backward_batches, fillvalue={}):
batch_blobs = self.forward(blobs=blobs, **fb)
batch_diffs = self.backward(diffs=diffs, **bb)
for out, out_blobs in six.iteritems(batch_blobs):
all_outs[out].extend(out_blobs.copy())
for diff, out_diffs in six.iteritems(batch_diffs):
all_diffs[diff].extend(out_diffs.copy())
# Package in ndarray.
for out, diff in zip(all_outs, all_diffs):
all_outs[out] = np.asarray(all_outs[out])
all_diffs[diff] = np.asarray(all_diffs[diff])
# Discard padding at the end and package in ndarray.
pad = len(six.next(six.itervalues(all_outs))) - len(six.next(six.itervalues(kwargs)))
if pad:
for out, diff in zip(all_outs, all_diffs):
all_outs[out] = all_outs[out][:-pad]
all_diffs[diff] = all_diffs[diff][:-pad]
return all_outs, all_diffs
def _Net_set_input_arrays(self, data, labels):
"""
Set input arrays of the in-memory MemoryDataLayer.
(Note: this is only for networks declared with the memory data layer.)
"""
if labels.ndim == 1:
labels = np.ascontiguousarray(labels[:, np.newaxis, np.newaxis,
np.newaxis])
return self._set_input_arrays(data, labels)
def _Net_batch(self, blobs):
"""
Batch blob lists according to net's batch size.
Parameters
----------
blobs: Keys blob names and values are lists of blobs (of any length).
Naturally, all the lists should have the same length.
Yields
------
batch: {blob name: list of blobs} dict for a single batch.
"""
num = len(six.next(six.itervalues(blobs)))
batch_size = six.next(six.itervalues(self.blobs)).shape[0]
remainder = num % batch_size
num_batches = num // batch_size
# Yield full batches.
for b in range(num_batches):
i = b * batch_size
yield {name: blobs[name][i:i + batch_size] for name in blobs}
# Yield last padded batch, if any.
if remainder > 0:
padded_batch = {}
for name in blobs:
padding = np.zeros((batch_size - remainder,)
+ blobs[name].shape[1:])
padded_batch[name] = np.concatenate([blobs[name][-remainder:],
padding])
yield padded_batch
def _Net_get_id_name(func, field):
"""
Generic property that maps func to the layer names into an OrderedDict.
Used for top_names and bottom_names.
Parameters
----------
func: function id -> [id]
field: implementation field name (cache)
Returns
------
A one-parameter function that can be set as a property.
"""
@property
def get_id_name(self):
if not hasattr(self, field):
id_to_name = list(self.blobs)
res = OrderedDict([(self._layer_names[i],
[id_to_name[j] for j in func(self, i)])
for i in range(len(self.layers))])
setattr(self, field, res)
return getattr(self, field)
return get_id_name
# Attach methods to Net.
Net.blobs = _Net_blobs
Net.blob_loss_weights = _Net_blob_loss_weights
Net.params = _Net_params
Net.forward = _Net_forward
Net.backward = _Net_backward
Net.forward_all = _Net_forward_all
Net.forward_backward_all = _Net_forward_backward_all
Net.set_input_arrays = _Net_set_input_arrays
Net._batch = _Net_batch
Net.inputs = _Net_inputs
Net.outputs = _Net_outputs
Net.top_names = _Net_get_id_name(Net._top_ids, "_top_names")
Net.bottom_names = _Net_get_id_name(Net._bottom_ids, "_bottom_names")
| 11,256 | 32.602985 | 89 | py |
bottom-up-attention | bottom-up-attention-master/caffe/python/caffe/draw.py | """
Caffe network visualization: draw the NetParameter protobuffer.
.. note::
This requires pydot>=1.0.2, which is not included in requirements.txt since
it requires graphviz and other prerequisites outside the scope of the
Caffe.
"""
from caffe.proto import caffe_pb2
"""
pydot is not supported under python 3 and pydot2 doesn't work properly.
pydotplus works nicely (pip install pydotplus)
"""
try:
# Try to load pydotplus
import pydotplus as pydot
except ImportError:
import pydot
# Internal layer and blob styles.
LAYER_STYLE_DEFAULT = {'shape': 'record',
'fillcolor': '#6495ED',
'style': 'filled'}
NEURON_LAYER_STYLE = {'shape': 'record',
'fillcolor': '#90EE90',
'style': 'filled'}
BLOB_STYLE = {'shape': 'octagon',
'fillcolor': '#E0E0E0',
'style': 'filled'}
def get_pooling_types_dict():
"""Get dictionary mapping pooling type number to type name
"""
desc = caffe_pb2.PoolingParameter.PoolMethod.DESCRIPTOR
d = {}
for k, v in desc.values_by_name.items():
d[v.number] = k
return d
def get_edge_label(layer):
"""Define edge label based on layer type.
"""
if layer.type == 'Data':
edge_label = 'Batch ' + str(layer.data_param.batch_size)
elif layer.type == 'Convolution' or layer.type == 'Deconvolution':
edge_label = str(layer.convolution_param.num_output)
elif layer.type == 'InnerProduct':
edge_label = str(layer.inner_product_param.num_output)
else:
edge_label = '""'
return edge_label
def get_layer_label(layer, rankdir):
"""Define node label based on layer type.
Parameters
----------
layer : ?
rankdir : {'LR', 'TB', 'BT'}
Direction of graph layout.
Returns
-------
string :
A label for the current layer
"""
if rankdir in ('TB', 'BT'):
# If graph orientation is vertical, horizontal space is free and
# vertical space is not; separate words with spaces
separator = ' '
else:
# If graph orientation is horizontal, vertical space is free and
# horizontal space is not; separate words with newlines
separator = '\\n'
if layer.type == 'Convolution' or layer.type == 'Deconvolution':
# Outer double quotes needed or else colon characters don't parse
# properly
node_label = '"%s%s(%s)%skernel size: %d%sstride: %d%spad: %d"' %\
(layer.name,
separator,
layer.type,
separator,
layer.convolution_param.kernel_size[0] if len(layer.convolution_param.kernel_size._values) else 1,
separator,
layer.convolution_param.stride[0] if len(layer.convolution_param.stride._values) else 1,
separator,
layer.convolution_param.pad[0] if len(layer.convolution_param.pad._values) else 0)
elif layer.type == 'Pooling':
pooling_types_dict = get_pooling_types_dict()
node_label = '"%s%s(%s %s)%skernel size: %d%sstride: %d%spad: %d"' %\
(layer.name,
separator,
pooling_types_dict[layer.pooling_param.pool],
layer.type,
separator,
layer.pooling_param.kernel_size,
separator,
layer.pooling_param.stride,
separator,
layer.pooling_param.pad)
else:
node_label = '"%s%s(%s)"' % (layer.name, separator, layer.type)
return node_label
def choose_color_by_layertype(layertype):
"""Define colors for nodes based on the layer type.
"""
color = '#6495ED' # Default
if layertype == 'Convolution' or layertype == 'Deconvolution':
color = '#FF5050'
elif layertype == 'Pooling':
color = '#FF9900'
elif layertype == 'InnerProduct':
color = '#CC33FF'
return color
def get_pydot_graph(caffe_net, rankdir, label_edges=True, phase=None):
"""Create a data structure which represents the `caffe_net`.
Parameters
----------
caffe_net : object
rankdir : {'LR', 'TB', 'BT'}
Direction of graph layout.
label_edges : boolean, optional
Label the edges (default is True).
phase : {caffe_pb2.Phase.TRAIN, caffe_pb2.Phase.TEST, None} optional
Include layers from this network phase. If None, include all layers.
(the default is None)
Returns
-------
pydot graph object
"""
pydot_graph = pydot.Dot(caffe_net.name if caffe_net.name else 'Net',
graph_type='digraph',
rankdir=rankdir)
pydot_nodes = {}
pydot_edges = []
for layer in caffe_net.layer:
if phase is not None:
included = False
if len(layer.include) == 0:
included = True
if len(layer.include) > 0 and len(layer.exclude) > 0:
raise ValueError('layer ' + layer.name + ' has both include '
'and exclude specified.')
for layer_phase in layer.include:
included = included or layer_phase.phase == phase
for layer_phase in layer.exclude:
included = included and not layer_phase.phase == phase
if not included:
continue
node_label = get_layer_label(layer, rankdir)
node_name = "%s_%s" % (layer.name, layer.type)
if (len(layer.bottom) == 1 and len(layer.top) == 1 and
layer.bottom[0] == layer.top[0]):
# We have an in-place neuron layer.
pydot_nodes[node_name] = pydot.Node(node_label,
**NEURON_LAYER_STYLE)
else:
layer_style = LAYER_STYLE_DEFAULT
layer_style['fillcolor'] = choose_color_by_layertype(layer.type)
pydot_nodes[node_name] = pydot.Node(node_label, **layer_style)
for bottom_blob in layer.bottom:
pydot_nodes[bottom_blob + '_blob'] = pydot.Node('%s' % bottom_blob,
**BLOB_STYLE)
edge_label = '""'
pydot_edges.append({'src': bottom_blob + '_blob',
'dst': node_name,
'label': edge_label})
for top_blob in layer.top:
pydot_nodes[top_blob + '_blob'] = pydot.Node('%s' % (top_blob))
if label_edges:
edge_label = get_edge_label(layer)
else:
edge_label = '""'
pydot_edges.append({'src': node_name,
'dst': top_blob + '_blob',
'label': edge_label})
# Now, add the nodes and edges to the graph.
for node in pydot_nodes.values():
pydot_graph.add_node(node)
for edge in pydot_edges:
pydot_graph.add_edge(
pydot.Edge(pydot_nodes[edge['src']],
pydot_nodes[edge['dst']],
label=edge['label']))
return pydot_graph
def draw_net(caffe_net, rankdir, ext='png', phase=None):
"""Draws a caffe net and returns the image string encoded using the given
extension.
Parameters
----------
caffe_net : a caffe.proto.caffe_pb2.NetParameter protocol buffer.
ext : string, optional
The image extension (the default is 'png').
phase : {caffe_pb2.Phase.TRAIN, caffe_pb2.Phase.TEST, None} optional
Include layers from this network phase. If None, include all layers.
(the default is None)
Returns
-------
string :
Postscript representation of the graph.
"""
return get_pydot_graph(caffe_net, rankdir, phase=phase).create(format=ext)
def draw_net_to_file(caffe_net, filename, rankdir='LR', phase=None):
"""Draws a caffe net, and saves it to file using the format given as the
file extension. Use '.raw' to output raw text that you can manually feed
to graphviz to draw graphs.
Parameters
----------
caffe_net : a caffe.proto.caffe_pb2.NetParameter protocol buffer.
filename : string
The path to a file where the networks visualization will be stored.
rankdir : {'LR', 'TB', 'BT'}
Direction of graph layout.
phase : {caffe_pb2.Phase.TRAIN, caffe_pb2.Phase.TEST, None} optional
Include layers from this network phase. If None, include all layers.
(the default is None)
"""
ext = filename[filename.rfind('.')+1:]
with open(filename, 'wb') as fid:
fid.write(draw_net(caffe_net, rankdir, ext, phase))
| 8,813 | 34.97551 | 120 | py |
bottom-up-attention | bottom-up-attention-master/caffe/python/caffe/io.py | import numpy as np
import skimage.io
from scipy.ndimage import zoom
from skimage.transform import resize
try:
# Python3 will most likely not be able to load protobuf
from caffe.proto import caffe_pb2
except:
import sys
if sys.version_info >= (3, 0):
print("Failed to include caffe_pb2, things might go wrong!")
else:
raise
## proto / datum / ndarray conversion
def blobproto_to_array(blob, return_diff=False):
"""
Convert a blob proto to an array. In default, we will just return the data,
unless return_diff is True, in which case we will return the diff.
"""
# Read the data into an array
if return_diff:
data = np.array(blob.diff)
else:
data = np.array(blob.data)
# Reshape the array
if blob.HasField('num') or blob.HasField('channels') or blob.HasField('height') or blob.HasField('width'):
# Use legacy 4D shape
return data.reshape(blob.num, blob.channels, blob.height, blob.width)
else:
return data.reshape(blob.shape.dim)
def array_to_blobproto(arr, diff=None):
"""Converts a N-dimensional array to blob proto. If diff is given, also
convert the diff. You need to make sure that arr and diff have the same
shape, and this function does not do sanity check.
"""
blob = caffe_pb2.BlobProto()
blob.shape.dim.extend(arr.shape)
blob.data.extend(arr.astype(float).flat)
if diff is not None:
blob.diff.extend(diff.astype(float).flat)
return blob
def arraylist_to_blobprotovector_str(arraylist):
"""Converts a list of arrays to a serialized blobprotovec, which could be
then passed to a network for processing.
"""
vec = caffe_pb2.BlobProtoVector()
vec.blobs.extend([array_to_blobproto(arr) for arr in arraylist])
return vec.SerializeToString()
def blobprotovector_str_to_arraylist(str):
"""Converts a serialized blobprotovec to a list of arrays.
"""
vec = caffe_pb2.BlobProtoVector()
vec.ParseFromString(str)
return [blobproto_to_array(blob) for blob in vec.blobs]
def array_to_datum(arr, label=None):
"""Converts a 3-dimensional array to datum. If the array has dtype uint8,
the output data will be encoded as a string. Otherwise, the output data
will be stored in float format.
"""
if arr.ndim != 3:
raise ValueError('Incorrect array shape.')
datum = caffe_pb2.Datum()
datum.channels, datum.height, datum.width = arr.shape
if arr.dtype == np.uint8:
datum.data = arr.tostring()
else:
datum.float_data.extend(arr.flat)
if label is not None:
datum.label = label
return datum
def datum_to_array(datum):
"""Converts a datum to an array. Note that the label is not returned,
as one can easily get it by calling datum.label.
"""
if len(datum.data):
return np.fromstring(datum.data, dtype=np.uint8).reshape(
datum.channels, datum.height, datum.width)
else:
return np.array(datum.float_data).astype(float).reshape(
datum.channels, datum.height, datum.width)
## Pre-processing
class Transformer:
"""
Transform input for feeding into a Net.
Note: this is mostly for illustrative purposes and it is likely better
to define your own input preprocessing routine for your needs.
Parameters
----------
net : a Net for which the input should be prepared
"""
def __init__(self, inputs):
self.inputs = inputs
self.transpose = {}
self.channel_swap = {}
self.raw_scale = {}
self.mean = {}
self.input_scale = {}
def __check_input(self, in_):
if in_ not in self.inputs:
raise Exception('{} is not one of the net inputs: {}'.format(
in_, self.inputs))
def preprocess(self, in_, data):
"""
Format input for Caffe:
- convert to single
- resize to input dimensions (preserving number of channels)
- transpose dimensions to K x H x W
- reorder channels (for instance color to BGR)
- scale raw input (e.g. from [0, 1] to [0, 255] for ImageNet models)
- subtract mean
- scale feature
Parameters
----------
in_ : name of input blob to preprocess for
data : (H' x W' x K) ndarray
Returns
-------
caffe_in : (K x H x W) ndarray for input to a Net
"""
self.__check_input(in_)
caffe_in = data.astype(np.float32, copy=False)
transpose = self.transpose.get(in_)
channel_swap = self.channel_swap.get(in_)
raw_scale = self.raw_scale.get(in_)
mean = self.mean.get(in_)
input_scale = self.input_scale.get(in_)
in_dims = self.inputs[in_][2:]
if caffe_in.shape[:2] != in_dims:
caffe_in = resize_image(caffe_in, in_dims)
if transpose is not None:
caffe_in = caffe_in.transpose(transpose)
if channel_swap is not None:
caffe_in = caffe_in[channel_swap, :, :]
if raw_scale is not None:
caffe_in *= raw_scale
if mean is not None:
caffe_in -= mean
if input_scale is not None:
caffe_in *= input_scale
return caffe_in
def deprocess(self, in_, data):
"""
Invert Caffe formatting; see preprocess().
"""
self.__check_input(in_)
decaf_in = data.copy().squeeze()
transpose = self.transpose.get(in_)
channel_swap = self.channel_swap.get(in_)
raw_scale = self.raw_scale.get(in_)
mean = self.mean.get(in_)
input_scale = self.input_scale.get(in_)
if input_scale is not None:
decaf_in /= input_scale
if mean is not None:
decaf_in += mean
if raw_scale is not None:
decaf_in /= raw_scale
if channel_swap is not None:
decaf_in = decaf_in[np.argsort(channel_swap), :, :]
if transpose is not None:
decaf_in = decaf_in.transpose(np.argsort(transpose))
return decaf_in
def set_transpose(self, in_, order):
"""
Set the input channel order for e.g. RGB to BGR conversion
as needed for the reference ImageNet model.
Parameters
----------
in_ : which input to assign this channel order
order : the order to transpose the dimensions
"""
self.__check_input(in_)
if len(order) != len(self.inputs[in_]) - 1:
raise Exception('Transpose order needs to have the same number of '
'dimensions as the input.')
self.transpose[in_] = order
def set_channel_swap(self, in_, order):
"""
Set the input channel order for e.g. RGB to BGR conversion
as needed for the reference ImageNet model.
N.B. this assumes the channels are the first dimension AFTER transpose.
Parameters
----------
in_ : which input to assign this channel order
order : the order to take the channels.
(2,1,0) maps RGB to BGR for example.
"""
self.__check_input(in_)
if len(order) != self.inputs[in_][1]:
raise Exception('Channel swap needs to have the same number of '
'dimensions as the input channels.')
self.channel_swap[in_] = order
def set_raw_scale(self, in_, scale):
"""
Set the scale of raw features s.t. the input blob = input * scale.
While Python represents images in [0, 1], certain Caffe models
like CaffeNet and AlexNet represent images in [0, 255] so the raw_scale
of these models must be 255.
Parameters
----------
in_ : which input to assign this scale factor
scale : scale coefficient
"""
self.__check_input(in_)
self.raw_scale[in_] = scale
def set_mean(self, in_, mean):
"""
Set the mean to subtract for centering the data.
Parameters
----------
in_ : which input to assign this mean.
mean : mean ndarray (input dimensional or broadcastable)
"""
self.__check_input(in_)
ms = mean.shape
if mean.ndim == 1:
# broadcast channels
if ms[0] != self.inputs[in_][1]:
raise ValueError('Mean channels incompatible with input.')
mean = mean[:, np.newaxis, np.newaxis]
else:
# elementwise mean
if len(ms) == 2:
ms = (1,) + ms
if len(ms) != 3:
raise ValueError('Mean shape invalid')
if ms != self.inputs[in_][1:]:
raise ValueError('Mean shape incompatible with input shape.')
self.mean[in_] = mean
def set_input_scale(self, in_, scale):
"""
Set the scale of preprocessed inputs s.t. the blob = blob * scale.
N.B. input_scale is done AFTER mean subtraction and other preprocessing
while raw_scale is done BEFORE.
Parameters
----------
in_ : which input to assign this scale factor
scale : scale coefficient
"""
self.__check_input(in_)
self.input_scale[in_] = scale
## Image IO
def load_image(filename, color=True):
"""
Load an image converting from grayscale or alpha as needed.
Parameters
----------
filename : string
color : boolean
flag for color format. True (default) loads as RGB while False
loads as intensity (if image is already grayscale).
Returns
-------
image : an image with type np.float32 in range [0, 1]
of size (H x W x 3) in RGB or
of size (H x W x 1) in grayscale.
"""
img = skimage.img_as_float(skimage.io.imread(filename, as_grey=not color)).astype(np.float32)
if img.ndim == 2:
img = img[:, :, np.newaxis]
if color:
img = np.tile(img, (1, 1, 3))
elif img.shape[2] == 4:
img = img[:, :, :3]
return img
def resize_image(im, new_dims, interp_order=1):
"""
Resize an image array with interpolation.
Parameters
----------
im : (H x W x K) ndarray
new_dims : (height, width) tuple of new dimensions.
interp_order : interpolation order, default is linear.
Returns
-------
im : resized ndarray with shape (new_dims[0], new_dims[1], K)
"""
if im.shape[-1] == 1 or im.shape[-1] == 3:
im_min, im_max = im.min(), im.max()
if im_max > im_min:
# skimage is fast but only understands {1,3} channel images
# in [0, 1].
im_std = (im - im_min) / (im_max - im_min)
resized_std = resize(im_std, new_dims, order=interp_order)
resized_im = resized_std * (im_max - im_min) + im_min
else:
# the image is a constant -- avoid divide by 0
ret = np.empty((new_dims[0], new_dims[1], im.shape[-1]),
dtype=np.float32)
ret.fill(im_min)
return ret
else:
# ndimage interpolates anything but more slowly.
scale = tuple(np.array(new_dims, dtype=float) / np.array(im.shape[:2]))
resized_im = zoom(im, scale + (1,), order=interp_order)
return resized_im.astype(np.float32)
def oversample(images, crop_dims):
"""
Crop images into the four corners, center, and their mirrored versions.
Parameters
----------
image : iterable of (H x W x K) ndarrays
crop_dims : (height, width) tuple for the crops.
Returns
-------
crops : (10*N x H x W x K) ndarray of crops for number of inputs N.
"""
# Dimensions and center.
im_shape = np.array(images[0].shape)
crop_dims = np.array(crop_dims)
im_center = im_shape[:2] / 2.0
# Make crop coordinates
h_indices = (0, im_shape[0] - crop_dims[0])
w_indices = (0, im_shape[1] - crop_dims[1])
crops_ix = np.empty((5, 4), dtype=int)
curr = 0
for i in h_indices:
for j in w_indices:
crops_ix[curr] = (i, j, i + crop_dims[0], j + crop_dims[1])
curr += 1
crops_ix[4] = np.tile(im_center, (1, 2)) + np.concatenate([
-crop_dims / 2.0,
crop_dims / 2.0
])
crops_ix = np.tile(crops_ix, (2, 1))
# Extract crops
crops = np.empty((10 * len(images), crop_dims[0], crop_dims[1],
im_shape[-1]), dtype=np.float32)
ix = 0
for im in images:
for crop in crops_ix:
crops[ix] = im[crop[0]:crop[2], crop[1]:crop[3], :]
ix += 1
crops[ix-5:ix] = crops[ix-5:ix, :, ::-1, :] # flip for mirrors
return crops
| 12,729 | 32.151042 | 110 | py |
bottom-up-attention | bottom-up-attention-master/caffe/python/caffe/test/test_coord_map.py | import unittest
import numpy as np
import random
import caffe
from caffe import layers as L
from caffe import params as P
from caffe.coord_map import coord_map_from_to, crop
def coord_net_spec(ks=3, stride=1, pad=0, pool=2, dstride=2, dpad=0):
"""
Define net spec for simple conv-pool-deconv pattern common to all
coordinate mapping tests.
"""
n = caffe.NetSpec()
n.data = L.Input(shape=dict(dim=[2, 1, 100, 100]))
n.aux = L.Input(shape=dict(dim=[2, 1, 20, 20]))
n.conv = L.Convolution(
n.data, num_output=10, kernel_size=ks, stride=stride, pad=pad)
n.pool = L.Pooling(
n.conv, pool=P.Pooling.MAX, kernel_size=pool, stride=pool, pad=0)
# for upsampling kernel size is 2x stride
try:
deconv_ks = [s*2 for s in dstride]
except:
deconv_ks = dstride*2
n.deconv = L.Deconvolution(
n.pool, num_output=10, kernel_size=deconv_ks, stride=dstride, pad=dpad)
return n
class TestCoordMap(unittest.TestCase):
def setUp(self):
pass
def test_conv_pool_deconv(self):
"""
Map through conv, pool, and deconv.
"""
n = coord_net_spec()
# identity for 2x pool, 2x deconv
ax, a, b = coord_map_from_to(n.deconv, n.data)
self.assertEquals(ax, 1)
self.assertEquals(a, 1)
self.assertEquals(b, 0)
# shift-by-one for 4x pool, 4x deconv
n = coord_net_spec(pool=4, dstride=4)
ax, a, b = coord_map_from_to(n.deconv, n.data)
self.assertEquals(ax, 1)
self.assertEquals(a, 1)
self.assertEquals(b, -1)
def test_pass(self):
"""
A pass-through layer (ReLU) and conv (1x1, stride 1, pad 0)
both do identity mapping.
"""
n = coord_net_spec()
ax, a, b = coord_map_from_to(n.deconv, n.data)
n.relu = L.ReLU(n.deconv)
n.conv1x1 = L.Convolution(
n.relu, num_output=10, kernel_size=1, stride=1, pad=0)
for top in [n.relu, n.conv1x1]:
ax_pass, a_pass, b_pass = coord_map_from_to(top, n.data)
self.assertEquals(ax, ax_pass)
self.assertEquals(a, a_pass)
self.assertEquals(b, b_pass)
def test_padding(self):
"""
Padding conv adds offset while padding deconv subtracts offset.
"""
n = coord_net_spec()
ax, a, b = coord_map_from_to(n.deconv, n.data)
pad = random.randint(0, 10)
# conv padding
n = coord_net_spec(pad=pad)
_, a_pad, b_pad = coord_map_from_to(n.deconv, n.data)
self.assertEquals(a, a_pad)
self.assertEquals(b - pad, b_pad)
# deconv padding
n = coord_net_spec(dpad=pad)
_, a_pad, b_pad = coord_map_from_to(n.deconv, n.data)
self.assertEquals(a, a_pad)
self.assertEquals(b + pad, b_pad)
# pad both to cancel out
n = coord_net_spec(pad=pad, dpad=pad)
_, a_pad, b_pad = coord_map_from_to(n.deconv, n.data)
self.assertEquals(a, a_pad)
self.assertEquals(b, b_pad)
def test_multi_conv(self):
"""
Multiple bottoms/tops of a layer are identically mapped.
"""
n = coord_net_spec()
# multi bottom/top
n.conv_data, n.conv_aux = L.Convolution(
n.data, n.aux, ntop=2, num_output=10, kernel_size=5, stride=2,
pad=0)
ax1, a1, b1 = coord_map_from_to(n.conv_data, n.data)
ax2, a2, b2 = coord_map_from_to(n.conv_aux, n.aux)
self.assertEquals(ax1, ax2)
self.assertEquals(a1, a2)
self.assertEquals(b1, b2)
def test_rect(self):
"""
Anisotropic mapping is equivalent to its isotropic parts.
"""
n3x3 = coord_net_spec(ks=3, stride=1, pad=0)
n5x5 = coord_net_spec(ks=5, stride=2, pad=10)
n3x5 = coord_net_spec(ks=[3, 5], stride=[1, 2], pad=[0, 10])
ax_3x3, a_3x3, b_3x3 = coord_map_from_to(n3x3.deconv, n3x3.data)
ax_5x5, a_5x5, b_5x5 = coord_map_from_to(n5x5.deconv, n5x5.data)
ax_3x5, a_3x5, b_3x5 = coord_map_from_to(n3x5.deconv, n3x5.data)
self.assertTrue(ax_3x3 == ax_5x5 == ax_3x5)
self.assertEquals(a_3x3, a_3x5[0])
self.assertEquals(b_3x3, b_3x5[0])
self.assertEquals(a_5x5, a_3x5[1])
self.assertEquals(b_5x5, b_3x5[1])
def test_nd_conv(self):
"""
ND conv maps the same way in more dimensions.
"""
n = caffe.NetSpec()
# define data with 3 spatial dimensions, otherwise the same net
n.data = L.Input(shape=dict(dim=[2, 3, 100, 100, 100]))
n.conv = L.Convolution(
n.data, num_output=10, kernel_size=[3, 3, 3], stride=[1, 1, 1],
pad=[0, 1, 2])
n.pool = L.Pooling(
n.conv, pool=P.Pooling.MAX, kernel_size=2, stride=2, pad=0)
n.deconv = L.Deconvolution(
n.pool, num_output=10, kernel_size=4, stride=2, pad=0)
ax, a, b = coord_map_from_to(n.deconv, n.data)
self.assertEquals(ax, 1)
self.assertTrue(len(a) == len(b))
self.assertTrue(np.all(a == 1))
self.assertEquals(b[0] - 1, b[1])
self.assertEquals(b[1] - 1, b[2])
def test_crop_of_crop(self):
"""
Map coordinates through Crop layer:
crop an already-cropped output to the input and check change in offset.
"""
n = coord_net_spec()
offset = random.randint(0, 10)
ax, a, b = coord_map_from_to(n.deconv, n.data)
n.crop = L.Crop(n.deconv, n.data, axis=2, offset=offset)
ax_crop, a_crop, b_crop = coord_map_from_to(n.crop, n.data)
self.assertEquals(ax, ax_crop)
self.assertEquals(a, a_crop)
self.assertEquals(b + offset, b_crop)
def test_crop_helper(self):
"""
Define Crop layer by crop().
"""
n = coord_net_spec()
crop(n.deconv, n.data)
def test_catch_unconnected(self):
"""
Catch mapping spatially unconnected tops.
"""
n = coord_net_spec()
n.ip = L.InnerProduct(n.deconv, num_output=10)
with self.assertRaises(RuntimeError):
coord_map_from_to(n.ip, n.data)
def test_catch_scale_mismatch(self):
"""
Catch incompatible scales, such as when the top to be cropped
is mapped to a differently strided reference top.
"""
n = coord_net_spec(pool=3, dstride=2) # pool 3x but deconv 2x
with self.assertRaises(AssertionError):
crop(n.deconv, n.data)
def test_catch_negative_crop(self):
"""
Catch impossible offsets, such as when the top to be cropped
is mapped to a larger reference top.
"""
n = coord_net_spec(dpad=10) # make output smaller than input
with self.assertRaises(AssertionError):
crop(n.deconv, n.data)
| 6,894 | 34.725389 | 79 | py |
bottom-up-attention | bottom-up-attention-master/caffe/python/caffe/test/test_python_layer_with_param_str.py | import unittest
import tempfile
import os
import six
import caffe
class SimpleParamLayer(caffe.Layer):
"""A layer that just multiplies by the numeric value of its param string"""
def setup(self, bottom, top):
try:
self.value = float(self.param_str)
except ValueError:
raise ValueError("Parameter string must be a legible float")
def reshape(self, bottom, top):
top[0].reshape(*bottom[0].data.shape)
def forward(self, bottom, top):
top[0].data[...] = self.value * bottom[0].data
def backward(self, top, propagate_down, bottom):
bottom[0].diff[...] = self.value * top[0].diff
def python_param_net_file():
with tempfile.NamedTemporaryFile(mode='w+', delete=False) as f:
f.write("""name: 'pythonnet' force_backward: true
input: 'data' input_shape { dim: 10 dim: 9 dim: 8 }
layer { type: 'Python' name: 'mul10' bottom: 'data' top: 'mul10'
python_param { module: 'test_python_layer_with_param_str'
layer: 'SimpleParamLayer' param_str: '10' } }
layer { type: 'Python' name: 'mul2' bottom: 'mul10' top: 'mul2'
python_param { module: 'test_python_layer_with_param_str'
layer: 'SimpleParamLayer' param_str: '2' } }""")
return f.name
@unittest.skipIf('Python' not in caffe.layer_type_list(),
'Caffe built without Python layer support')
class TestLayerWithParam(unittest.TestCase):
def setUp(self):
net_file = python_param_net_file()
self.net = caffe.Net(net_file, caffe.TRAIN)
os.remove(net_file)
def test_forward(self):
x = 8
self.net.blobs['data'].data[...] = x
self.net.forward()
for y in self.net.blobs['mul2'].data.flat:
self.assertEqual(y, 2 * 10 * x)
def test_backward(self):
x = 7
self.net.blobs['mul2'].diff[...] = x
self.net.backward()
for y in self.net.blobs['data'].diff.flat:
self.assertEqual(y, 2 * 10 * x)
| 2,031 | 31.774194 | 79 | py |
bottom-up-attention | bottom-up-attention-master/caffe/python/caffe/test/test_io.py | import numpy as np
import unittest
import caffe
class TestBlobProtoToArray(unittest.TestCase):
def test_old_format(self):
data = np.zeros((10,10))
blob = caffe.proto.caffe_pb2.BlobProto()
blob.data.extend(list(data.flatten()))
shape = (1,1,10,10)
blob.num, blob.channels, blob.height, blob.width = shape
arr = caffe.io.blobproto_to_array(blob)
self.assertEqual(arr.shape, shape)
def test_new_format(self):
data = np.zeros((10,10))
blob = caffe.proto.caffe_pb2.BlobProto()
blob.data.extend(list(data.flatten()))
blob.shape.dim.extend(list(data.shape))
arr = caffe.io.blobproto_to_array(blob)
self.assertEqual(arr.shape, data.shape)
def test_no_shape(self):
data = np.zeros((10,10))
blob = caffe.proto.caffe_pb2.BlobProto()
blob.data.extend(list(data.flatten()))
with self.assertRaises(ValueError):
caffe.io.blobproto_to_array(blob)
def test_scalar(self):
data = np.ones((1)) * 123
blob = caffe.proto.caffe_pb2.BlobProto()
blob.data.extend(list(data.flatten()))
arr = caffe.io.blobproto_to_array(blob)
self.assertEqual(arr, 123)
class TestArrayToDatum(unittest.TestCase):
def test_label_none_size(self):
# Set label
d1 = caffe.io.array_to_datum(
np.ones((10,10,3)), label=1)
# Don't set label
d2 = caffe.io.array_to_datum(
np.ones((10,10,3)))
# Not setting the label should result in a smaller object
self.assertGreater(
len(d1.SerializeToString()),
len(d2.SerializeToString()))
| 1,694 | 28.736842 | 65 | py |
bottom-up-attention | bottom-up-attention-master/caffe/python/caffe/test/test_solver.py | import unittest
import tempfile
import os
import numpy as np
import six
import caffe
from test_net import simple_net_file
class TestSolver(unittest.TestCase):
def setUp(self):
self.num_output = 13
net_f = simple_net_file(self.num_output)
f = tempfile.NamedTemporaryFile(mode='w+', delete=False)
f.write("""net: '""" + net_f + """'
test_iter: 10 test_interval: 10 base_lr: 0.01 momentum: 0.9
weight_decay: 0.0005 lr_policy: 'inv' gamma: 0.0001 power: 0.75
display: 100 max_iter: 100 snapshot_after_train: false
snapshot_prefix: "model" """)
f.close()
self.solver = caffe.SGDSolver(f.name)
# also make sure get_solver runs
caffe.get_solver(f.name)
caffe.set_mode_cpu()
# fill in valid labels
self.solver.net.blobs['label'].data[...] = \
np.random.randint(self.num_output,
size=self.solver.net.blobs['label'].data.shape)
self.solver.test_nets[0].blobs['label'].data[...] = \
np.random.randint(self.num_output,
size=self.solver.test_nets[0].blobs['label'].data.shape)
os.remove(f.name)
os.remove(net_f)
def test_solve(self):
self.assertEqual(self.solver.iter, 0)
self.solver.solve()
self.assertEqual(self.solver.iter, 100)
def test_net_memory(self):
"""Check that nets survive after the solver is destroyed."""
nets = [self.solver.net] + list(self.solver.test_nets)
self.assertEqual(len(nets), 2)
del self.solver
total = 0
for net in nets:
for ps in six.itervalues(net.params):
for p in ps:
total += p.data.sum() + p.diff.sum()
for bl in six.itervalues(net.blobs):
total += bl.data.sum() + bl.diff.sum()
def test_snapshot(self):
self.solver.snapshot()
# Check that these files exist and then remove them
files = ['model_iter_0.caffemodel', 'model_iter_0.solverstate']
for fn in files:
assert os.path.isfile(fn)
os.remove(fn)
| 2,165 | 33.380952 | 76 | py |
bottom-up-attention | bottom-up-attention-master/caffe/python/caffe/test/test_layer_type_list.py | import unittest
import caffe
class TestLayerTypeList(unittest.TestCase):
def test_standard_types(self):
#removing 'Data' from list
for type_name in ['Data', 'Convolution', 'InnerProduct']:
self.assertIn(type_name, caffe.layer_type_list(),
'%s not in layer_type_list()' % type_name)
| 338 | 27.25 | 65 | py |
bottom-up-attention | bottom-up-attention-master/caffe/python/caffe/test/test_net.py | import unittest
import tempfile
import os
import numpy as np
import six
from collections import OrderedDict
import caffe
def simple_net_file(num_output):
"""Make a simple net prototxt, based on test_net.cpp, returning the name
of the (temporary) file."""
f = tempfile.NamedTemporaryFile(mode='w+', delete=False)
f.write("""name: 'testnet' force_backward: true
layer { type: 'DummyData' name: 'data' top: 'data' top: 'label'
dummy_data_param { num: 5 channels: 2 height: 3 width: 4
num: 5 channels: 1 height: 1 width: 1
data_filler { type: 'gaussian' std: 1 }
data_filler { type: 'constant' } } }
layer { type: 'Convolution' name: 'conv' bottom: 'data' top: 'conv'
convolution_param { num_output: 11 kernel_size: 2 pad: 3
weight_filler { type: 'gaussian' std: 1 }
bias_filler { type: 'constant' value: 2 } }
param { decay_mult: 1 } param { decay_mult: 0 }
}
layer { type: 'InnerProduct' name: 'ip' bottom: 'conv' top: 'ip'
inner_product_param { num_output: """ + str(num_output) + """
weight_filler { type: 'gaussian' std: 2.5 }
bias_filler { type: 'constant' value: -3 } } }
layer { type: 'SoftmaxWithLoss' name: 'loss' bottom: 'ip' bottom: 'label'
top: 'loss' }""")
f.close()
return f.name
class TestNet(unittest.TestCase):
def setUp(self):
self.num_output = 13
net_file = simple_net_file(self.num_output)
self.net = caffe.Net(net_file, caffe.TRAIN)
# fill in valid labels
self.net.blobs['label'].data[...] = \
np.random.randint(self.num_output,
size=self.net.blobs['label'].data.shape)
os.remove(net_file)
def test_memory(self):
"""Check that holding onto blob data beyond the life of a Net is OK"""
params = sum(map(list, six.itervalues(self.net.params)), [])
blobs = self.net.blobs.values()
del self.net
# now sum everything (forcing all memory to be read)
total = 0
for p in params:
total += p.data.sum() + p.diff.sum()
for bl in blobs:
total += bl.data.sum() + bl.diff.sum()
def test_forward_backward(self):
self.net.forward()
self.net.backward()
def test_clear_param_diffs(self):
# Run a forward/backward step to have non-zero diffs
self.net.forward()
self.net.backward()
diff = self.net.params["conv"][0].diff
# Check that we have non-zero diffs
self.assertTrue(diff.max() > 0)
self.net.clear_param_diffs()
# Check that the diffs are now 0
self.assertTrue((diff == 0).all())
def test_inputs_outputs(self):
self.assertEqual(self.net.inputs, [])
self.assertEqual(self.net.outputs, ['loss'])
def test_top_bottom_names(self):
self.assertEqual(self.net.top_names,
OrderedDict([('data', ['data', 'label']),
('conv', ['conv']),
('ip', ['ip']),
('loss', ['loss'])]))
self.assertEqual(self.net.bottom_names,
OrderedDict([('data', []),
('conv', ['data']),
('ip', ['conv']),
('loss', ['ip', 'label'])]))
def test_save_and_read(self):
f = tempfile.NamedTemporaryFile(mode='w+', delete=False)
f.close()
self.net.save(f.name)
net_file = simple_net_file(self.num_output)
# Test legacy constructor
# should print deprecation warning
caffe.Net(net_file, f.name, caffe.TRAIN)
# Test named constructor
net2 = caffe.Net(net_file, caffe.TRAIN, weights=f.name)
os.remove(net_file)
os.remove(f.name)
for name in self.net.params:
for i in range(len(self.net.params[name])):
self.assertEqual(abs(self.net.params[name][i].data
- net2.params[name][i].data).sum(), 0)
def test_save_hdf5(self):
f = tempfile.NamedTemporaryFile(mode='w+', delete=False)
f.close()
self.net.save_hdf5(f.name)
net_file = simple_net_file(self.num_output)
net2 = caffe.Net(net_file, caffe.TRAIN)
net2.load_hdf5(f.name)
os.remove(net_file)
os.remove(f.name)
for name in self.net.params:
for i in range(len(self.net.params[name])):
self.assertEqual(abs(self.net.params[name][i].data
- net2.params[name][i].data).sum(), 0)
class TestLevels(unittest.TestCase):
TEST_NET = """
layer {
name: "data"
type: "DummyData"
top: "data"
dummy_data_param { shape { dim: 1 dim: 1 dim: 10 dim: 10 } }
}
layer {
name: "NoLevel"
type: "InnerProduct"
bottom: "data"
top: "NoLevel"
inner_product_param { num_output: 1 }
}
layer {
name: "Level0Only"
type: "InnerProduct"
bottom: "data"
top: "Level0Only"
include { min_level: 0 max_level: 0 }
inner_product_param { num_output: 1 }
}
layer {
name: "Level1Only"
type: "InnerProduct"
bottom: "data"
top: "Level1Only"
include { min_level: 1 max_level: 1 }
inner_product_param { num_output: 1 }
}
layer {
name: "Level>=0"
type: "InnerProduct"
bottom: "data"
top: "Level>=0"
include { min_level: 0 }
inner_product_param { num_output: 1 }
}
layer {
name: "Level>=1"
type: "InnerProduct"
bottom: "data"
top: "Level>=1"
include { min_level: 1 }
inner_product_param { num_output: 1 }
}
"""
def setUp(self):
self.f = tempfile.NamedTemporaryFile(mode='w+', delete=False)
self.f.write(self.TEST_NET)
self.f.close()
def tearDown(self):
os.remove(self.f.name)
def check_net(self, net, blobs):
net_blobs = [b for b in net.blobs.keys() if 'data' not in b]
self.assertEqual(net_blobs, blobs)
def test_0(self):
net = caffe.Net(self.f.name, caffe.TEST)
self.check_net(net, ['NoLevel', 'Level0Only', 'Level>=0'])
def test_1(self):
net = caffe.Net(self.f.name, caffe.TEST, level=1)
self.check_net(net, ['NoLevel', 'Level1Only', 'Level>=0', 'Level>=1'])
class TestStages(unittest.TestCase):
TEST_NET = """
layer {
name: "data"
type: "DummyData"
top: "data"
dummy_data_param { shape { dim: 1 dim: 1 dim: 10 dim: 10 } }
}
layer {
name: "A"
type: "InnerProduct"
bottom: "data"
top: "A"
include { stage: "A" }
inner_product_param { num_output: 1 }
}
layer {
name: "B"
type: "InnerProduct"
bottom: "data"
top: "B"
include { stage: "B" }
inner_product_param { num_output: 1 }
}
layer {
name: "AorB"
type: "InnerProduct"
bottom: "data"
top: "AorB"
include { stage: "A" }
include { stage: "B" }
inner_product_param { num_output: 1 }
}
layer {
name: "AandB"
type: "InnerProduct"
bottom: "data"
top: "AandB"
include { stage: "A" stage: "B" }
inner_product_param { num_output: 1 }
}
"""
def setUp(self):
self.f = tempfile.NamedTemporaryFile(mode='w+', delete=False)
self.f.write(self.TEST_NET)
self.f.close()
def tearDown(self):
os.remove(self.f.name)
def check_net(self, net, blobs):
net_blobs = [b for b in net.blobs.keys() if 'data' not in b]
self.assertEqual(net_blobs, blobs)
def test_A(self):
net = caffe.Net(self.f.name, caffe.TEST, stages=['A'])
self.check_net(net, ['A', 'AorB'])
def test_B(self):
net = caffe.Net(self.f.name, caffe.TEST, stages=['B'])
self.check_net(net, ['B', 'AorB'])
def test_AandB(self):
net = caffe.Net(self.f.name, caffe.TEST, stages=['A', 'B'])
self.check_net(net, ['A', 'B', 'AorB', 'AandB'])
class TestAllInOne(unittest.TestCase):
TEST_NET = """
layer {
name: "train_data"
type: "DummyData"
top: "data"
top: "label"
dummy_data_param {
shape { dim: 1 dim: 1 dim: 10 dim: 10 }
shape { dim: 1 dim: 1 dim: 1 dim: 1 }
}
include { phase: TRAIN stage: "train" }
}
layer {
name: "val_data"
type: "DummyData"
top: "data"
top: "label"
dummy_data_param {
shape { dim: 1 dim: 1 dim: 10 dim: 10 }
shape { dim: 1 dim: 1 dim: 1 dim: 1 }
}
include { phase: TEST stage: "val" }
}
layer {
name: "deploy_data"
type: "Input"
top: "data"
input_param { shape { dim: 1 dim: 1 dim: 10 dim: 10 } }
include { phase: TEST stage: "deploy" }
}
layer {
name: "ip"
type: "InnerProduct"
bottom: "data"
top: "ip"
inner_product_param { num_output: 2 }
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "ip"
bottom: "label"
top: "loss"
include: { phase: TRAIN stage: "train" }
include: { phase: TEST stage: "val" }
}
layer {
name: "pred"
type: "Softmax"
bottom: "ip"
top: "pred"
include: { phase: TEST stage: "deploy" }
}
"""
def setUp(self):
self.f = tempfile.NamedTemporaryFile(mode='w+', delete=False)
self.f.write(self.TEST_NET)
self.f.close()
def tearDown(self):
os.remove(self.f.name)
def check_net(self, net, outputs):
self.assertEqual(list(net.blobs['data'].shape), [1,1,10,10])
self.assertEqual(net.outputs, outputs)
def test_train(self):
net = caffe.Net(self.f.name, caffe.TRAIN, stages=['train'])
self.check_net(net, ['loss'])
def test_val(self):
net = caffe.Net(self.f.name, caffe.TEST, stages=['val'])
self.check_net(net, ['loss'])
def test_deploy(self):
net = caffe.Net(self.f.name, caffe.TEST, stages=['deploy'])
self.check_net(net, ['pred'])
| 9,722 | 27.101156 | 78 | py |
bottom-up-attention | bottom-up-attention-master/caffe/python/caffe/test/test_net_spec.py | import unittest
import tempfile
import caffe
from caffe import layers as L
from caffe import params as P
def lenet(batch_size):
n = caffe.NetSpec()
n.data, n.label = L.DummyData(shape=[dict(dim=[batch_size, 1, 28, 28]),
dict(dim=[batch_size, 1, 1, 1])],
transform_param=dict(scale=1./255), ntop=2)
n.conv1 = L.Convolution(n.data, kernel_size=5, num_output=20,
weight_filler=dict(type='xavier'))
n.pool1 = L.Pooling(n.conv1, kernel_size=2, stride=2, pool=P.Pooling.MAX)
n.conv2 = L.Convolution(n.pool1, kernel_size=5, num_output=50,
weight_filler=dict(type='xavier'))
n.pool2 = L.Pooling(n.conv2, kernel_size=2, stride=2, pool=P.Pooling.MAX)
n.ip1 = L.InnerProduct(n.pool2, num_output=500,
weight_filler=dict(type='xavier'))
n.relu1 = L.ReLU(n.ip1, in_place=True)
n.ip2 = L.InnerProduct(n.relu1, num_output=10,
weight_filler=dict(type='xavier'))
n.loss = L.SoftmaxWithLoss(n.ip2, n.label)
return n.to_proto()
def anon_lenet(batch_size):
data, label = L.DummyData(shape=[dict(dim=[batch_size, 1, 28, 28]),
dict(dim=[batch_size, 1, 1, 1])],
transform_param=dict(scale=1./255), ntop=2)
conv1 = L.Convolution(data, kernel_size=5, num_output=20,
weight_filler=dict(type='xavier'))
pool1 = L.Pooling(conv1, kernel_size=2, stride=2, pool=P.Pooling.MAX)
conv2 = L.Convolution(pool1, kernel_size=5, num_output=50,
weight_filler=dict(type='xavier'))
pool2 = L.Pooling(conv2, kernel_size=2, stride=2, pool=P.Pooling.MAX)
ip1 = L.InnerProduct(pool2, num_output=500,
weight_filler=dict(type='xavier'))
relu1 = L.ReLU(ip1, in_place=True)
ip2 = L.InnerProduct(relu1, num_output=10,
weight_filler=dict(type='xavier'))
loss = L.SoftmaxWithLoss(ip2, label)
return loss.to_proto()
def silent_net():
n = caffe.NetSpec()
n.data, n.data2 = L.DummyData(shape=dict(dim=3), ntop=2)
n.silence_data = L.Silence(n.data, ntop=0)
n.silence_data2 = L.Silence(n.data2, ntop=0)
return n.to_proto()
class TestNetSpec(unittest.TestCase):
def load_net(self, net_proto):
f = tempfile.NamedTemporaryFile(mode='w+', delete=False)
f.write(str(net_proto))
f.close()
return caffe.Net(f.name, caffe.TEST)
def test_lenet(self):
"""Construct and build the Caffe version of LeNet."""
net_proto = lenet(50)
# check that relu is in-place
self.assertEqual(net_proto.layer[6].bottom,
net_proto.layer[6].top)
net = self.load_net(net_proto)
# check that all layers are present
self.assertEqual(len(net.layers), 9)
# now the check the version with automatically-generated layer names
net_proto = anon_lenet(50)
self.assertEqual(net_proto.layer[6].bottom,
net_proto.layer[6].top)
net = self.load_net(net_proto)
self.assertEqual(len(net.layers), 9)
def test_zero_tops(self):
"""Test net construction for top-less layers."""
net_proto = silent_net()
net = self.load_net(net_proto)
self.assertEqual(len(net.forward()), 0)
| 3,287 | 39.097561 | 77 | py |
bottom-up-attention | bottom-up-attention-master/caffe/python/caffe/test/test_python_layer.py | import unittest
import tempfile
import os
import six
import caffe
class SimpleLayer(caffe.Layer):
"""A layer that just multiplies by ten"""
def setup(self, bottom, top):
pass
def reshape(self, bottom, top):
top[0].reshape(*bottom[0].data.shape)
def forward(self, bottom, top):
top[0].data[...] = 10 * bottom[0].data
def backward(self, top, propagate_down, bottom):
bottom[0].diff[...] = 10 * top[0].diff
class ExceptionLayer(caffe.Layer):
"""A layer for checking exceptions from Python"""
def setup(self, bottom, top):
raise RuntimeError
class ParameterLayer(caffe.Layer):
"""A layer that just multiplies by ten"""
def setup(self, bottom, top):
self.blobs.add_blob(1)
self.blobs[0].data[0] = 0
def reshape(self, bottom, top):
top[0].reshape(*bottom[0].data.shape)
def forward(self, bottom, top):
pass
def backward(self, top, propagate_down, bottom):
self.blobs[0].diff[0] = 1
class PhaseLayer(caffe.Layer):
"""A layer for checking attribute `phase`"""
def setup(self, bottom, top):
pass
def reshape(self, bootom, top):
top[0].reshape()
def forward(self, bottom, top):
top[0].data[()] = self.phase
def python_net_file():
with tempfile.NamedTemporaryFile(mode='w+', delete=False) as f:
f.write("""name: 'pythonnet' force_backward: true
input: 'data' input_shape { dim: 10 dim: 9 dim: 8 }
layer { type: 'Python' name: 'one' bottom: 'data' top: 'one'
python_param { module: 'test_python_layer' layer: 'SimpleLayer' } }
layer { type: 'Python' name: 'two' bottom: 'one' top: 'two'
python_param { module: 'test_python_layer' layer: 'SimpleLayer' } }
layer { type: 'Python' name: 'three' bottom: 'two' top: 'three'
python_param { module: 'test_python_layer' layer: 'SimpleLayer' } }""")
return f.name
def exception_net_file():
with tempfile.NamedTemporaryFile(mode='w+', delete=False) as f:
f.write("""name: 'pythonnet' force_backward: true
input: 'data' input_shape { dim: 10 dim: 9 dim: 8 }
layer { type: 'Python' name: 'layer' bottom: 'data' top: 'top'
python_param { module: 'test_python_layer' layer: 'ExceptionLayer' } }
""")
return f.name
def parameter_net_file():
with tempfile.NamedTemporaryFile(mode='w+', delete=False) as f:
f.write("""name: 'pythonnet' force_backward: true
input: 'data' input_shape { dim: 10 dim: 9 dim: 8 }
layer { type: 'Python' name: 'layer' bottom: 'data' top: 'top'
python_param { module: 'test_python_layer' layer: 'ParameterLayer' } }
""")
return f.name
def phase_net_file():
with tempfile.NamedTemporaryFile(mode='w+', delete=False) as f:
f.write("""name: 'pythonnet' force_backward: true
layer { type: 'Python' name: 'layer' top: 'phase'
python_param { module: 'test_python_layer' layer: 'PhaseLayer' } }
""")
return f.name
@unittest.skipIf('Python' not in caffe.layer_type_list(),
'Caffe built without Python layer support')
class TestPythonLayer(unittest.TestCase):
def setUp(self):
net_file = python_net_file()
self.net = caffe.Net(net_file, caffe.TRAIN)
os.remove(net_file)
def test_forward(self):
x = 8
self.net.blobs['data'].data[...] = x
self.net.forward()
for y in self.net.blobs['three'].data.flat:
self.assertEqual(y, 10**3 * x)
def test_backward(self):
x = 7
self.net.blobs['three'].diff[...] = x
self.net.backward()
for y in self.net.blobs['data'].diff.flat:
self.assertEqual(y, 10**3 * x)
def test_reshape(self):
s = 4
self.net.blobs['data'].reshape(s, s, s, s)
self.net.forward()
for blob in six.itervalues(self.net.blobs):
for d in blob.data.shape:
self.assertEqual(s, d)
def test_exception(self):
net_file = exception_net_file()
self.assertRaises(RuntimeError, caffe.Net, net_file, caffe.TEST)
os.remove(net_file)
def test_parameter(self):
net_file = parameter_net_file()
net = caffe.Net(net_file, caffe.TRAIN)
# Test forward and backward
net.forward()
net.backward()
layer = net.layers[list(net._layer_names).index('layer')]
self.assertEqual(layer.blobs[0].data[0], 0)
self.assertEqual(layer.blobs[0].diff[0], 1)
layer.blobs[0].data[0] += layer.blobs[0].diff[0]
self.assertEqual(layer.blobs[0].data[0], 1)
# Test saving and loading
h, caffemodel_file = tempfile.mkstemp()
net.save(caffemodel_file)
layer.blobs[0].data[0] = -1
self.assertEqual(layer.blobs[0].data[0], -1)
net.copy_from(caffemodel_file)
self.assertEqual(layer.blobs[0].data[0], 1)
os.remove(caffemodel_file)
# Test weight sharing
net2 = caffe.Net(net_file, caffe.TRAIN)
net2.share_with(net)
layer = net.layers[list(net2._layer_names).index('layer')]
self.assertEqual(layer.blobs[0].data[0], 1)
os.remove(net_file)
def test_phase(self):
net_file = phase_net_file()
for phase in caffe.TRAIN, caffe.TEST:
net = caffe.Net(net_file, phase)
self.assertEqual(net.forward()['phase'], phase)
| 5,510 | 31.609467 | 81 | py |
bottom-up-attention | bottom-up-attention-master/caffe/scripts/cpp_lint.py | #!/usr/bin/python2
#
# Copyright (c) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Does google-lint on c++ files.
The goal of this script is to identify places in the code that *may*
be in non-compliance with google style. It does not attempt to fix
up these problems -- the point is to educate. It does also not
attempt to find all problems, or to ensure that everything it does
find is legitimately a problem.
In particular, we can get very confused by /* and // inside strings!
We do a small hack, which is to ignore //'s with "'s after them on the
same line, but it is far from perfect (in either direction).
"""
import codecs
import copy
import getopt
import math # for log
import os
import re
import sre_compile
import string
import sys
import unicodedata
_USAGE = """
Syntax: cpp_lint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...]
[--counting=total|toplevel|detailed] [--root=subdir]
[--linelength=digits]
<file> [file] ...
The style guidelines this tries to follow are those in
http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml
Every problem is given a confidence score from 1-5, with 5 meaning we are
certain of the problem, and 1 meaning it could be a legitimate construct.
This will miss some errors, and is not a substitute for a code review.
To suppress false-positive errors of a certain category, add a
'NOLINT(category)' comment to the line. NOLINT or NOLINT(*)
suppresses errors of all categories on that line.
The files passed in will be linted; at least one file must be provided.
Default linted extensions are .cc, .cpp, .cu, .cuh and .h. Change the
extensions with the --extensions flag.
Flags:
output=vs7
By default, the output is formatted to ease emacs parsing. Visual Studio
compatible output (vs7) may also be used. Other formats are unsupported.
verbose=#
Specify a number 0-5 to restrict errors to certain verbosity levels.
filter=-x,+y,...
Specify a comma-separated list of category-filters to apply: only
error messages whose category names pass the filters will be printed.
(Category names are printed with the message and look like
"[whitespace/indent]".) Filters are evaluated left to right.
"-FOO" and "FOO" means "do not print categories that start with FOO".
"+FOO" means "do print categories that start with FOO".
Examples: --filter=-whitespace,+whitespace/braces
--filter=whitespace,runtime/printf,+runtime/printf_format
--filter=-,+build/include_what_you_use
To see a list of all the categories used in cpplint, pass no arg:
--filter=
counting=total|toplevel|detailed
The total number of errors found is always printed. If
'toplevel' is provided, then the count of errors in each of
the top-level categories like 'build' and 'whitespace' will
also be printed. If 'detailed' is provided, then a count
is provided for each category like 'build/class'.
root=subdir
The root directory used for deriving header guard CPP variable.
By default, the header guard CPP variable is calculated as the relative
path to the directory that contains .git, .hg, or .svn. When this flag
is specified, the relative path is calculated from the specified
directory. If the specified directory does not exist, this flag is
ignored.
Examples:
Assuing that src/.git exists, the header guard CPP variables for
src/chrome/browser/ui/browser.h are:
No flag => CHROME_BROWSER_UI_BROWSER_H_
--root=chrome => BROWSER_UI_BROWSER_H_
--root=chrome/browser => UI_BROWSER_H_
linelength=digits
This is the allowed line length for the project. The default value is
80 characters.
Examples:
--linelength=120
extensions=extension,extension,...
The allowed file extensions that cpplint will check
Examples:
--extensions=hpp,cpp
"""
# We categorize each error message we print. Here are the categories.
# We want an explicit list so we can list them all in cpplint --filter=.
# If you add a new error message with a new category, add it to the list
# here! cpplint_unittest.py should tell you if you forget to do this.
_ERROR_CATEGORIES = [
'build/class',
'build/deprecated',
'build/endif_comment',
'build/explicit_make_pair',
'build/forward_decl',
'build/header_guard',
'build/include',
'build/include_alpha',
'build/include_dir',
'build/include_order',
'build/include_what_you_use',
'build/namespaces',
'build/printf_format',
'build/storage_class',
'caffe/alt_fn',
'caffe/data_layer_setup',
'caffe/random_fn',
'legal/copyright',
'readability/alt_tokens',
'readability/braces',
'readability/casting',
'readability/check',
'readability/constructors',
'readability/fn_size',
'readability/function',
'readability/multiline_comment',
'readability/multiline_string',
'readability/namespace',
'readability/nolint',
'readability/nul',
'readability/streams',
'readability/todo',
'readability/utf8',
'runtime/arrays',
'runtime/casting',
'runtime/explicit',
'runtime/int',
'runtime/init',
'runtime/invalid_increment',
'runtime/member_string_references',
'runtime/memset',
'runtime/operator',
'runtime/printf',
'runtime/printf_format',
'runtime/references',
'runtime/string',
'runtime/threadsafe_fn',
'runtime/vlog',
'whitespace/blank_line',
'whitespace/braces',
'whitespace/comma',
'whitespace/comments',
'whitespace/empty_conditional_body',
'whitespace/empty_loop_body',
'whitespace/end_of_line',
'whitespace/ending_newline',
'whitespace/forcolon',
'whitespace/indent',
'whitespace/line_length',
'whitespace/newline',
'whitespace/operators',
'whitespace/parens',
'whitespace/semicolon',
'whitespace/tab',
'whitespace/todo'
]
# The default state of the category filter. This is overrided by the --filter=
# flag. By default all errors are on, so only add here categories that should be
# off by default (i.e., categories that must be enabled by the --filter= flags).
# All entries here should start with a '-' or '+', as in the --filter= flag.
_DEFAULT_FILTERS = [
'-build/include_dir',
'-readability/todo',
]
# We used to check for high-bit characters, but after much discussion we
# decided those were OK, as long as they were in UTF-8 and didn't represent
# hard-coded international strings, which belong in a separate i18n file.
# C++ headers
_CPP_HEADERS = frozenset([
# Legacy
'algobase.h',
'algo.h',
'alloc.h',
'builtinbuf.h',
'bvector.h',
'complex.h',
'defalloc.h',
'deque.h',
'editbuf.h',
'fstream.h',
'function.h',
'hash_map',
'hash_map.h',
'hash_set',
'hash_set.h',
'hashtable.h',
'heap.h',
'indstream.h',
'iomanip.h',
'iostream.h',
'istream.h',
'iterator.h',
'list.h',
'map.h',
'multimap.h',
'multiset.h',
'ostream.h',
'pair.h',
'parsestream.h',
'pfstream.h',
'procbuf.h',
'pthread_alloc',
'pthread_alloc.h',
'rope',
'rope.h',
'ropeimpl.h',
'set.h',
'slist',
'slist.h',
'stack.h',
'stdiostream.h',
'stl_alloc.h',
'stl_relops.h',
'streambuf.h',
'stream.h',
'strfile.h',
'strstream.h',
'tempbuf.h',
'tree.h',
'type_traits.h',
'vector.h',
# 17.6.1.2 C++ library headers
'algorithm',
'array',
'atomic',
'bitset',
'chrono',
'codecvt',
'complex',
'condition_variable',
'deque',
'exception',
'forward_list',
'fstream',
'functional',
'future',
'initializer_list',
'iomanip',
'ios',
'iosfwd',
'iostream',
'istream',
'iterator',
'limits',
'list',
'locale',
'map',
'memory',
'mutex',
'new',
'numeric',
'ostream',
'queue',
'random',
'ratio',
'regex',
'set',
'sstream',
'stack',
'stdexcept',
'streambuf',
'string',
'strstream',
'system_error',
'thread',
'tuple',
'typeindex',
'typeinfo',
'type_traits',
'unordered_map',
'unordered_set',
'utility',
'valarray',
'vector',
# 17.6.1.2 C++ headers for C library facilities
'cassert',
'ccomplex',
'cctype',
'cerrno',
'cfenv',
'cfloat',
'cinttypes',
'ciso646',
'climits',
'clocale',
'cmath',
'csetjmp',
'csignal',
'cstdalign',
'cstdarg',
'cstdbool',
'cstddef',
'cstdint',
'cstdio',
'cstdlib',
'cstring',
'ctgmath',
'ctime',
'cuchar',
'cwchar',
'cwctype',
])
# Assertion macros. These are defined in base/logging.h and
# testing/base/gunit.h. Note that the _M versions need to come first
# for substring matching to work.
_CHECK_MACROS = [
'DCHECK', 'CHECK',
'EXPECT_TRUE_M', 'EXPECT_TRUE',
'ASSERT_TRUE_M', 'ASSERT_TRUE',
'EXPECT_FALSE_M', 'EXPECT_FALSE',
'ASSERT_FALSE_M', 'ASSERT_FALSE',
]
# Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE
_CHECK_REPLACEMENT = dict([(m, {}) for m in _CHECK_MACROS])
for op, replacement in [('==', 'EQ'), ('!=', 'NE'),
('>=', 'GE'), ('>', 'GT'),
('<=', 'LE'), ('<', 'LT')]:
_CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement
_CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement
_CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement
_CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement
_CHECK_REPLACEMENT['EXPECT_TRUE_M'][op] = 'EXPECT_%s_M' % replacement
_CHECK_REPLACEMENT['ASSERT_TRUE_M'][op] = 'ASSERT_%s_M' % replacement
for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'),
('>=', 'LT'), ('>', 'LE'),
('<=', 'GT'), ('<', 'GE')]:
_CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement
_CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement
_CHECK_REPLACEMENT['EXPECT_FALSE_M'][op] = 'EXPECT_%s_M' % inv_replacement
_CHECK_REPLACEMENT['ASSERT_FALSE_M'][op] = 'ASSERT_%s_M' % inv_replacement
# Alternative tokens and their replacements. For full list, see section 2.5
# Alternative tokens [lex.digraph] in the C++ standard.
#
# Digraphs (such as '%:') are not included here since it's a mess to
# match those on a word boundary.
_ALT_TOKEN_REPLACEMENT = {
'and': '&&',
'bitor': '|',
'or': '||',
'xor': '^',
'compl': '~',
'bitand': '&',
'and_eq': '&=',
'or_eq': '|=',
'xor_eq': '^=',
'not': '!',
'not_eq': '!='
}
# Compile regular expression that matches all the above keywords. The "[ =()]"
# bit is meant to avoid matching these keywords outside of boolean expressions.
#
# False positives include C-style multi-line comments and multi-line strings
# but those have always been troublesome for cpplint.
_ALT_TOKEN_REPLACEMENT_PATTERN = re.compile(
r'[ =()](' + ('|'.join(_ALT_TOKEN_REPLACEMENT.keys())) + r')(?=[ (]|$)')
# These constants define types of headers for use with
# _IncludeState.CheckNextIncludeOrder().
_C_SYS_HEADER = 1
_CPP_SYS_HEADER = 2
_LIKELY_MY_HEADER = 3
_POSSIBLE_MY_HEADER = 4
_OTHER_HEADER = 5
# These constants define the current inline assembly state
_NO_ASM = 0 # Outside of inline assembly block
_INSIDE_ASM = 1 # Inside inline assembly block
_END_ASM = 2 # Last line of inline assembly block
_BLOCK_ASM = 3 # The whole block is an inline assembly block
# Match start of assembly blocks
_MATCH_ASM = re.compile(r'^\s*(?:asm|_asm|__asm|__asm__)'
r'(?:\s+(volatile|__volatile__))?'
r'\s*[{(]')
_regexp_compile_cache = {}
# Finds occurrences of NOLINT[_NEXT_LINE] or NOLINT[_NEXT_LINE](...).
_RE_SUPPRESSION = re.compile(r'\bNOLINT(_NEXT_LINE)?\b(\([^)]*\))?')
# {str, set(int)}: a map from error categories to sets of linenumbers
# on which those errors are expected and should be suppressed.
_error_suppressions = {}
# Finds Copyright.
_RE_COPYRIGHT = re.compile(r'Copyright')
# The root directory used for deriving header guard CPP variable.
# This is set by --root flag.
_root = None
# The allowed line length of files.
# This is set by --linelength flag.
_line_length = 80
# The allowed extensions for file names
# This is set by --extensions flag.
_valid_extensions = set(['cc', 'h', 'cpp', 'hpp', 'cu', 'cuh'])
def ParseNolintSuppressions(filename, raw_line, linenum, error):
"""Updates the global list of error-suppressions.
Parses any NOLINT comments on the current line, updating the global
error_suppressions store. Reports an error if the NOLINT comment
was malformed.
Args:
filename: str, the name of the input file.
raw_line: str, the line of input text, with comments.
linenum: int, the number of the current line.
error: function, an error handler.
"""
# FIXME(adonovan): "NOLINT(" is misparsed as NOLINT(*).
matched = _RE_SUPPRESSION.search(raw_line)
if matched:
if matched.group(1) == '_NEXT_LINE':
linenum += 1
category = matched.group(2)
if category in (None, '(*)'): # => "suppress all"
_error_suppressions.setdefault(None, set()).add(linenum)
else:
if category.startswith('(') and category.endswith(')'):
category = category[1:-1]
if category in _ERROR_CATEGORIES:
_error_suppressions.setdefault(category, set()).add(linenum)
else:
error(filename, linenum, 'readability/nolint', 5,
'Unknown NOLINT error category: %s' % category)
def ResetNolintSuppressions():
"Resets the set of NOLINT suppressions to empty."
_error_suppressions.clear()
def IsErrorSuppressedByNolint(category, linenum):
"""Returns true if the specified error category is suppressed on this line.
Consults the global error_suppressions map populated by
ParseNolintSuppressions/ResetNolintSuppressions.
Args:
category: str, the category of the error.
linenum: int, the current line number.
Returns:
bool, True iff the error should be suppressed due to a NOLINT comment.
"""
return (linenum in _error_suppressions.get(category, set()) or
linenum in _error_suppressions.get(None, set()))
def Match(pattern, s):
"""Matches the string with the pattern, caching the compiled regexp."""
# The regexp compilation caching is inlined in both Match and Search for
# performance reasons; factoring it out into a separate function turns out
# to be noticeably expensive.
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].match(s)
def ReplaceAll(pattern, rep, s):
"""Replaces instances of pattern in a string with a replacement.
The compiled regex is kept in a cache shared by Match and Search.
Args:
pattern: regex pattern
rep: replacement text
s: search string
Returns:
string with replacements made (or original string if no replacements)
"""
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].sub(rep, s)
def Search(pattern, s):
"""Searches the string for the pattern, caching the compiled regexp."""
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].search(s)
class _IncludeState(dict):
"""Tracks line numbers for includes, and the order in which includes appear.
As a dict, an _IncludeState object serves as a mapping between include
filename and line number on which that file was included.
Call CheckNextIncludeOrder() once for each header in the file, passing
in the type constants defined above. Calls in an illegal order will
raise an _IncludeError with an appropriate error message.
"""
# self._section will move monotonically through this set. If it ever
# needs to move backwards, CheckNextIncludeOrder will raise an error.
_INITIAL_SECTION = 0
_MY_H_SECTION = 1
_C_SECTION = 2
_CPP_SECTION = 3
_OTHER_H_SECTION = 4
_TYPE_NAMES = {
_C_SYS_HEADER: 'C system header',
_CPP_SYS_HEADER: 'C++ system header',
_LIKELY_MY_HEADER: 'header this file implements',
_POSSIBLE_MY_HEADER: 'header this file may implement',
_OTHER_HEADER: 'other header',
}
_SECTION_NAMES = {
_INITIAL_SECTION: "... nothing. (This can't be an error.)",
_MY_H_SECTION: 'a header this file implements',
_C_SECTION: 'C system header',
_CPP_SECTION: 'C++ system header',
_OTHER_H_SECTION: 'other header',
}
def __init__(self):
dict.__init__(self)
self.ResetSection()
def ResetSection(self):
# The name of the current section.
self._section = self._INITIAL_SECTION
# The path of last found header.
self._last_header = ''
def SetLastHeader(self, header_path):
self._last_header = header_path
def CanonicalizeAlphabeticalOrder(self, header_path):
"""Returns a path canonicalized for alphabetical comparison.
- replaces "-" with "_" so they both cmp the same.
- removes '-inl' since we don't require them to be after the main header.
- lowercase everything, just in case.
Args:
header_path: Path to be canonicalized.
Returns:
Canonicalized path.
"""
return header_path.replace('-inl.h', '.h').replace('-', '_').lower()
def IsInAlphabeticalOrder(self, clean_lines, linenum, header_path):
"""Check if a header is in alphabetical order with the previous header.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
header_path: Canonicalized header to be checked.
Returns:
Returns true if the header is in alphabetical order.
"""
# If previous section is different from current section, _last_header will
# be reset to empty string, so it's always less than current header.
#
# If previous line was a blank line, assume that the headers are
# intentionally sorted the way they are.
if (self._last_header > header_path and
not Match(r'^\s*$', clean_lines.elided[linenum - 1])):
return False
return True
def CheckNextIncludeOrder(self, header_type):
"""Returns a non-empty error message if the next header is out of order.
This function also updates the internal state to be ready to check
the next include.
Args:
header_type: One of the _XXX_HEADER constants defined above.
Returns:
The empty string if the header is in the right order, or an
error message describing what's wrong.
"""
error_message = ('Found %s after %s' %
(self._TYPE_NAMES[header_type],
self._SECTION_NAMES[self._section]))
last_section = self._section
if header_type == _C_SYS_HEADER:
if self._section <= self._C_SECTION:
self._section = self._C_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _CPP_SYS_HEADER:
if self._section <= self._CPP_SECTION:
self._section = self._CPP_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _LIKELY_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
self._section = self._OTHER_H_SECTION
elif header_type == _POSSIBLE_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
# This will always be the fallback because we're not sure
# enough that the header is associated with this file.
self._section = self._OTHER_H_SECTION
else:
assert header_type == _OTHER_HEADER
self._section = self._OTHER_H_SECTION
if last_section != self._section:
self._last_header = ''
return ''
class _CppLintState(object):
"""Maintains module-wide state.."""
def __init__(self):
self.verbose_level = 1 # global setting.
self.error_count = 0 # global count of reported errors
# filters to apply when emitting error messages
self.filters = _DEFAULT_FILTERS[:]
self.counting = 'total' # In what way are we counting errors?
self.errors_by_category = {} # string to int dict storing error counts
# output format:
# "emacs" - format that emacs can parse (default)
# "vs7" - format that Microsoft Visual Studio 7 can parse
self.output_format = 'emacs'
def SetOutputFormat(self, output_format):
"""Sets the output format for errors."""
self.output_format = output_format
def SetVerboseLevel(self, level):
"""Sets the module's verbosity, and returns the previous setting."""
last_verbose_level = self.verbose_level
self.verbose_level = level
return last_verbose_level
def SetCountingStyle(self, counting_style):
"""Sets the module's counting options."""
self.counting = counting_style
def SetFilters(self, filters):
"""Sets the error-message filters.
These filters are applied when deciding whether to emit a given
error message.
Args:
filters: A string of comma-separated filters (eg "+whitespace/indent").
Each filter should start with + or -; else we die.
Raises:
ValueError: The comma-separated filters did not all start with '+' or '-'.
E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter"
"""
# Default filters always have less priority than the flag ones.
self.filters = _DEFAULT_FILTERS[:]
for filt in filters.split(','):
clean_filt = filt.strip()
if clean_filt:
self.filters.append(clean_filt)
for filt in self.filters:
if not (filt.startswith('+') or filt.startswith('-')):
raise ValueError('Every filter in --filters must start with + or -'
' (%s does not)' % filt)
def ResetErrorCounts(self):
"""Sets the module's error statistic back to zero."""
self.error_count = 0
self.errors_by_category = {}
def IncrementErrorCount(self, category):
"""Bumps the module's error statistic."""
self.error_count += 1
if self.counting in ('toplevel', 'detailed'):
if self.counting != 'detailed':
category = category.split('/')[0]
if category not in self.errors_by_category:
self.errors_by_category[category] = 0
self.errors_by_category[category] += 1
def PrintErrorCounts(self):
"""Print a summary of errors by category, and the total."""
for category, count in self.errors_by_category.iteritems():
sys.stderr.write('Category \'%s\' errors found: %d\n' %
(category, count))
sys.stderr.write('Total errors found: %d\n' % self.error_count)
_cpplint_state = _CppLintState()
def _OutputFormat():
"""Gets the module's output format."""
return _cpplint_state.output_format
def _SetOutputFormat(output_format):
"""Sets the module's output format."""
_cpplint_state.SetOutputFormat(output_format)
def _VerboseLevel():
"""Returns the module's verbosity setting."""
return _cpplint_state.verbose_level
def _SetVerboseLevel(level):
"""Sets the module's verbosity, and returns the previous setting."""
return _cpplint_state.SetVerboseLevel(level)
def _SetCountingStyle(level):
"""Sets the module's counting options."""
_cpplint_state.SetCountingStyle(level)
def _Filters():
"""Returns the module's list of output filters, as a list."""
return _cpplint_state.filters
def _SetFilters(filters):
"""Sets the module's error-message filters.
These filters are applied when deciding whether to emit a given
error message.
Args:
filters: A string of comma-separated filters (eg "whitespace/indent").
Each filter should start with + or -; else we die.
"""
_cpplint_state.SetFilters(filters)
class _FunctionState(object):
"""Tracks current function name and the number of lines in its body."""
_NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc.
_TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER.
def __init__(self):
self.in_a_function = False
self.lines_in_function = 0
self.current_function = ''
def Begin(self, function_name):
"""Start analyzing function body.
Args:
function_name: The name of the function being tracked.
"""
self.in_a_function = True
self.lines_in_function = 0
self.current_function = function_name
def Count(self):
"""Count line in current function body."""
if self.in_a_function:
self.lines_in_function += 1
def Check(self, error, filename, linenum):
"""Report if too many lines in function body.
Args:
error: The function to call with any errors found.
filename: The name of the current file.
linenum: The number of the line to check.
"""
if Match(r'T(EST|est)', self.current_function):
base_trigger = self._TEST_TRIGGER
else:
base_trigger = self._NORMAL_TRIGGER
trigger = base_trigger * 2**_VerboseLevel()
if self.lines_in_function > trigger:
error_level = int(math.log(self.lines_in_function / base_trigger, 2))
# 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ...
if error_level > 5:
error_level = 5
error(filename, linenum, 'readability/fn_size', error_level,
'Small and focused functions are preferred:'
' %s has %d non-comment lines'
' (error triggered by exceeding %d lines).' % (
self.current_function, self.lines_in_function, trigger))
def End(self):
"""Stop analyzing function body."""
self.in_a_function = False
class _IncludeError(Exception):
"""Indicates a problem with the include order in a file."""
pass
class FileInfo:
"""Provides utility functions for filenames.
FileInfo provides easy access to the components of a file's path
relative to the project root.
"""
def __init__(self, filename):
self._filename = filename
def FullName(self):
"""Make Windows paths like Unix."""
return os.path.abspath(self._filename).replace('\\', '/')
def RepositoryName(self):
"""FullName after removing the local path to the repository.
If we have a real absolute path name here we can try to do something smart:
detecting the root of the checkout and truncating /path/to/checkout from
the name so that we get header guards that don't include things like
"C:\Documents and Settings\..." or "/home/username/..." in them and thus
people on different computers who have checked the source out to different
locations won't see bogus errors.
"""
fullname = self.FullName()
if os.path.exists(fullname):
project_dir = os.path.dirname(fullname)
if os.path.exists(os.path.join(project_dir, ".svn")):
# If there's a .svn file in the current directory, we recursively look
# up the directory tree for the top of the SVN checkout
root_dir = project_dir
one_up_dir = os.path.dirname(root_dir)
while os.path.exists(os.path.join(one_up_dir, ".svn")):
root_dir = os.path.dirname(root_dir)
one_up_dir = os.path.dirname(one_up_dir)
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by
# searching up from the current path.
root_dir = os.path.dirname(fullname)
while (root_dir != os.path.dirname(root_dir) and
not os.path.exists(os.path.join(root_dir, ".git")) and
not os.path.exists(os.path.join(root_dir, ".hg")) and
not os.path.exists(os.path.join(root_dir, ".svn"))):
root_dir = os.path.dirname(root_dir)
if (os.path.exists(os.path.join(root_dir, ".git")) or
os.path.exists(os.path.join(root_dir, ".hg")) or
os.path.exists(os.path.join(root_dir, ".svn"))):
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Don't know what to do; header guard warnings may be wrong...
return fullname
def Split(self):
"""Splits the file into the directory, basename, and extension.
For 'chrome/browser/browser.cc', Split() would
return ('chrome/browser', 'browser', '.cc')
Returns:
A tuple of (directory, basename, extension).
"""
googlename = self.RepositoryName()
project, rest = os.path.split(googlename)
return (project,) + os.path.splitext(rest)
def BaseName(self):
"""File base name - text after the final slash, before the final period."""
return self.Split()[1]
def Extension(self):
"""File extension - text following the final period."""
return self.Split()[2]
def NoExtension(self):
"""File has no source file extension."""
return '/'.join(self.Split()[0:2])
def IsSource(self):
"""File has a source file extension."""
return self.Extension()[1:] in ('c', 'cc', 'cpp', 'cxx')
def _ShouldPrintError(category, confidence, linenum):
"""If confidence >= verbose, category passes filter and is not suppressed."""
# There are three ways we might decide not to print an error message:
# a "NOLINT(category)" comment appears in the source,
# the verbosity level isn't high enough, or the filters filter it out.
if IsErrorSuppressedByNolint(category, linenum):
return False
if confidence < _cpplint_state.verbose_level:
return False
is_filtered = False
for one_filter in _Filters():
if one_filter.startswith('-'):
if category.startswith(one_filter[1:]):
is_filtered = True
elif one_filter.startswith('+'):
if category.startswith(one_filter[1:]):
is_filtered = False
else:
assert False # should have been checked for in SetFilter.
if is_filtered:
return False
return True
def Error(filename, linenum, category, confidence, message):
"""Logs the fact we've found a lint error.
We log where the error was found, and also our confidence in the error,
that is, how certain we are this is a legitimate style regression, and
not a misidentification or a use that's sometimes justified.
False positives can be suppressed by the use of
"cpplint(category)" comments on the offending line. These are
parsed into _error_suppressions.
Args:
filename: The name of the file containing the error.
linenum: The number of the line containing the error.
category: A string used to describe the "category" this bug
falls under: "whitespace", say, or "runtime". Categories
may have a hierarchy separated by slashes: "whitespace/indent".
confidence: A number from 1-5 representing a confidence score for
the error, with 5 meaning that we are certain of the problem,
and 1 meaning that it could be a legitimate construct.
message: The error message.
"""
if _ShouldPrintError(category, confidence, linenum):
_cpplint_state.IncrementErrorCount(category)
if _cpplint_state.output_format == 'vs7':
sys.stderr.write('%s(%s): %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
elif _cpplint_state.output_format == 'eclipse':
sys.stderr.write('%s:%s: warning: %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
else:
sys.stderr.write('%s:%s: %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
# Matches standard C++ escape sequences per 2.13.2.3 of the C++ standard.
_RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile(
r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)')
# Matches strings. Escape codes should already be removed by ESCAPES.
_RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES = re.compile(r'"[^"]*"')
# Matches characters. Escape codes should already be removed by ESCAPES.
_RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES = re.compile(r"'.'")
# Matches multi-line C++ comments.
# This RE is a little bit more complicated than one might expect, because we
# have to take care of space removals tools so we can handle comments inside
# statements better.
# The current rule is: We only clear spaces from both sides when we're at the
# end of the line. Otherwise, we try to remove spaces from the right side,
# if this doesn't work we try on left side but only if there's a non-character
# on the right.
_RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile(
r"""(\s*/\*.*\*/\s*$|
/\*.*\*/\s+|
\s+/\*.*\*/(?=\W)|
/\*.*\*/)""", re.VERBOSE)
def IsCppString(line):
"""Does line terminate so, that the next symbol is in string constant.
This function does not consider single-line nor multi-line comments.
Args:
line: is a partial line of code starting from the 0..n.
Returns:
True, if next character appended to 'line' is inside a
string constant.
"""
line = line.replace(r'\\', 'XX') # after this, \\" does not match to \"
return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1
def CleanseRawStrings(raw_lines):
"""Removes C++11 raw strings from lines.
Before:
static const char kData[] = R"(
multi-line string
)";
After:
static const char kData[] = ""
(replaced by blank line)
"";
Args:
raw_lines: list of raw lines.
Returns:
list of lines with C++11 raw strings replaced by empty strings.
"""
delimiter = None
lines_without_raw_strings = []
for line in raw_lines:
if delimiter:
# Inside a raw string, look for the end
end = line.find(delimiter)
if end >= 0:
# Found the end of the string, match leading space for this
# line and resume copying the original lines, and also insert
# a "" on the last line.
leading_space = Match(r'^(\s*)\S', line)
line = leading_space.group(1) + '""' + line[end + len(delimiter):]
delimiter = None
else:
# Haven't found the end yet, append a blank line.
line = ''
else:
# Look for beginning of a raw string.
# See 2.14.15 [lex.string] for syntax.
matched = Match(r'^(.*)\b(?:R|u8R|uR|UR|LR)"([^\s\\()]*)\((.*)$', line)
if matched:
delimiter = ')' + matched.group(2) + '"'
end = matched.group(3).find(delimiter)
if end >= 0:
# Raw string ended on same line
line = (matched.group(1) + '""' +
matched.group(3)[end + len(delimiter):])
delimiter = None
else:
# Start of a multi-line raw string
line = matched.group(1) + '""'
lines_without_raw_strings.append(line)
# TODO(unknown): if delimiter is not None here, we might want to
# emit a warning for unterminated string.
return lines_without_raw_strings
def FindNextMultiLineCommentStart(lines, lineix):
"""Find the beginning marker for a multiline comment."""
while lineix < len(lines):
if lines[lineix].strip().startswith('/*'):
# Only return this marker if the comment goes beyond this line
if lines[lineix].strip().find('*/', 2) < 0:
return lineix
lineix += 1
return len(lines)
def FindNextMultiLineCommentEnd(lines, lineix):
"""We are inside a comment, find the end marker."""
while lineix < len(lines):
if lines[lineix].strip().endswith('*/'):
return lineix
lineix += 1
return len(lines)
def RemoveMultiLineCommentsFromRange(lines, begin, end):
"""Clears a range of lines for multi-line comments."""
# Having // dummy comments makes the lines non-empty, so we will not get
# unnecessary blank line warnings later in the code.
for i in range(begin, end):
lines[i] = '// dummy'
def RemoveMultiLineComments(filename, lines, error):
"""Removes multiline (c-style) comments from lines."""
lineix = 0
while lineix < len(lines):
lineix_begin = FindNextMultiLineCommentStart(lines, lineix)
if lineix_begin >= len(lines):
return
lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin)
if lineix_end >= len(lines):
error(filename, lineix_begin + 1, 'readability/multiline_comment', 5,
'Could not find end of multi-line comment')
return
RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1)
lineix = lineix_end + 1
def CleanseComments(line):
"""Removes //-comments and single-line C-style /* */ comments.
Args:
line: A line of C++ source.
Returns:
The line with single-line comments removed.
"""
commentpos = line.find('//')
if commentpos != -1 and not IsCppString(line[:commentpos]):
line = line[:commentpos].rstrip()
# get rid of /* ... */
return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
class CleansedLines(object):
"""Holds 3 copies of all lines with different preprocessing applied to them.
1) elided member contains lines without strings and comments,
2) lines member contains lines without comments, and
3) raw_lines member contains all the lines without processing.
All these three members are of <type 'list'>, and of the same length.
"""
def __init__(self, lines):
self.elided = []
self.lines = []
self.raw_lines = lines
self.num_lines = len(lines)
self.lines_without_raw_strings = CleanseRawStrings(lines)
for linenum in range(len(self.lines_without_raw_strings)):
self.lines.append(CleanseComments(
self.lines_without_raw_strings[linenum]))
elided = self._CollapseStrings(self.lines_without_raw_strings[linenum])
self.elided.append(CleanseComments(elided))
def NumLines(self):
"""Returns the number of lines represented."""
return self.num_lines
@staticmethod
def _CollapseStrings(elided):
"""Collapses strings and chars on a line to simple "" or '' blocks.
We nix strings first so we're not fooled by text like '"http://"'
Args:
elided: The line being processed.
Returns:
The line with collapsed strings.
"""
if not _RE_PATTERN_INCLUDE.match(elided):
# Remove escaped characters first to make quote/single quote collapsing
# basic. Things that look like escaped characters shouldn't occur
# outside of strings and chars.
elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided)
elided = _RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES.sub("''", elided)
elided = _RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES.sub('""', elided)
return elided
def FindEndOfExpressionInLine(line, startpos, depth, startchar, endchar):
"""Find the position just after the matching endchar.
Args:
line: a CleansedLines line.
startpos: start searching at this position.
depth: nesting level at startpos.
startchar: expression opening character.
endchar: expression closing character.
Returns:
On finding matching endchar: (index just after matching endchar, 0)
Otherwise: (-1, new depth at end of this line)
"""
for i in xrange(startpos, len(line)):
if line[i] == startchar:
depth += 1
elif line[i] == endchar:
depth -= 1
if depth == 0:
return (i + 1, 0)
return (-1, depth)
def CloseExpression(clean_lines, linenum, pos):
"""If input points to ( or { or [ or <, finds the position that closes it.
If lines[linenum][pos] points to a '(' or '{' or '[' or '<', finds the
linenum/pos that correspond to the closing of the expression.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: A position on the line.
Returns:
A tuple (line, linenum, pos) pointer *past* the closing brace, or
(line, len(lines), -1) if we never find a close. Note we ignore
strings and comments when matching; and the line we return is the
'cleansed' line at linenum.
"""
line = clean_lines.elided[linenum]
startchar = line[pos]
if startchar not in '({[<':
return (line, clean_lines.NumLines(), -1)
if startchar == '(': endchar = ')'
if startchar == '[': endchar = ']'
if startchar == '{': endchar = '}'
if startchar == '<': endchar = '>'
# Check first line
(end_pos, num_open) = FindEndOfExpressionInLine(
line, pos, 0, startchar, endchar)
if end_pos > -1:
return (line, linenum, end_pos)
# Continue scanning forward
while linenum < clean_lines.NumLines() - 1:
linenum += 1
line = clean_lines.elided[linenum]
(end_pos, num_open) = FindEndOfExpressionInLine(
line, 0, num_open, startchar, endchar)
if end_pos > -1:
return (line, linenum, end_pos)
# Did not find endchar before end of file, give up
return (line, clean_lines.NumLines(), -1)
def FindStartOfExpressionInLine(line, endpos, depth, startchar, endchar):
"""Find position at the matching startchar.
This is almost the reverse of FindEndOfExpressionInLine, but note
that the input position and returned position differs by 1.
Args:
line: a CleansedLines line.
endpos: start searching at this position.
depth: nesting level at endpos.
startchar: expression opening character.
endchar: expression closing character.
Returns:
On finding matching startchar: (index at matching startchar, 0)
Otherwise: (-1, new depth at beginning of this line)
"""
for i in xrange(endpos, -1, -1):
if line[i] == endchar:
depth += 1
elif line[i] == startchar:
depth -= 1
if depth == 0:
return (i, 0)
return (-1, depth)
def ReverseCloseExpression(clean_lines, linenum, pos):
"""If input points to ) or } or ] or >, finds the position that opens it.
If lines[linenum][pos] points to a ')' or '}' or ']' or '>', finds the
linenum/pos that correspond to the opening of the expression.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: A position on the line.
Returns:
A tuple (line, linenum, pos) pointer *at* the opening brace, or
(line, 0, -1) if we never find the matching opening brace. Note
we ignore strings and comments when matching; and the line we
return is the 'cleansed' line at linenum.
"""
line = clean_lines.elided[linenum]
endchar = line[pos]
if endchar not in ')}]>':
return (line, 0, -1)
if endchar == ')': startchar = '('
if endchar == ']': startchar = '['
if endchar == '}': startchar = '{'
if endchar == '>': startchar = '<'
# Check last line
(start_pos, num_open) = FindStartOfExpressionInLine(
line, pos, 0, startchar, endchar)
if start_pos > -1:
return (line, linenum, start_pos)
# Continue scanning backward
while linenum > 0:
linenum -= 1
line = clean_lines.elided[linenum]
(start_pos, num_open) = FindStartOfExpressionInLine(
line, len(line) - 1, num_open, startchar, endchar)
if start_pos > -1:
return (line, linenum, start_pos)
# Did not find startchar before beginning of file, give up
return (line, 0, -1)
def CheckForCopyright(filename, lines, error):
"""Logs an error if a Copyright message appears at the top of the file."""
# We'll check up to line 10. Don't forget there's a
# dummy line at the front.
for line in xrange(1, min(len(lines), 11)):
if _RE_COPYRIGHT.search(lines[line], re.I):
error(filename, 0, 'legal/copyright', 5,
'Copyright message found. '
'You should not include a copyright line.')
def GetHeaderGuardCPPVariable(filename):
"""Returns the CPP variable that should be used as a header guard.
Args:
filename: The name of a C++ header file.
Returns:
The CPP variable that should be used as a header guard in the
named file.
"""
# Restores original filename in case that cpplint is invoked from Emacs's
# flymake.
filename = re.sub(r'_flymake\.h$', '.h', filename)
filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename)
fileinfo = FileInfo(filename)
file_path_from_root = fileinfo.RepositoryName()
if _root:
file_path_from_root = re.sub('^' + _root + os.sep, '', file_path_from_root)
return re.sub(r'[-./\s]', '_', file_path_from_root).upper() + '_'
def CheckForHeaderGuard(filename, lines, error):
"""Checks that the file contains a header guard.
Logs an error if no #ifndef header guard is present. For other
headers, checks that the full pathname is used.
Args:
filename: The name of the C++ header file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
cppvar = GetHeaderGuardCPPVariable(filename)
ifndef = None
ifndef_linenum = 0
define = None
endif = None
endif_linenum = 0
for linenum, line in enumerate(lines):
linesplit = line.split()
if len(linesplit) >= 2:
# find the first occurrence of #ifndef and #define, save arg
if not ifndef and linesplit[0] == '#ifndef':
# set ifndef to the header guard presented on the #ifndef line.
ifndef = linesplit[1]
ifndef_linenum = linenum
if not define and linesplit[0] == '#define':
define = linesplit[1]
# find the last occurrence of #endif, save entire line
if line.startswith('#endif'):
endif = line
endif_linenum = linenum
if not ifndef:
error(filename, 0, 'build/header_guard', 5,
'No #ifndef header guard found, suggested CPP variable is: %s' %
cppvar)
return
if not define:
error(filename, 0, 'build/header_guard', 5,
'No #define header guard found, suggested CPP variable is: %s' %
cppvar)
return
# The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__
# for backward compatibility.
if ifndef != cppvar:
error_level = 0
if ifndef != cppvar + '_':
error_level = 5
ParseNolintSuppressions(filename, lines[ifndef_linenum], ifndef_linenum,
error)
error(filename, ifndef_linenum, 'build/header_guard', error_level,
'#ifndef header guard has wrong style, please use: %s' % cppvar)
if define != ifndef:
error(filename, 0, 'build/header_guard', 5,
'#ifndef and #define don\'t match, suggested CPP variable is: %s' %
cppvar)
return
if endif != ('#endif // %s' % cppvar):
error_level = 0
if endif != ('#endif // %s' % (cppvar + '_')):
error_level = 5
ParseNolintSuppressions(filename, lines[endif_linenum], endif_linenum,
error)
error(filename, endif_linenum, 'build/header_guard', error_level,
'#endif line should be "#endif // %s"' % cppvar)
def CheckForBadCharacters(filename, lines, error):
"""Logs an error for each line containing bad characters.
Two kinds of bad characters:
1. Unicode replacement characters: These indicate that either the file
contained invalid UTF-8 (likely) or Unicode replacement characters (which
it shouldn't). Note that it's possible for this to throw off line
numbering if the invalid UTF-8 occurred adjacent to a newline.
2. NUL bytes. These are problematic for some tools.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
for linenum, line in enumerate(lines):
if u'\ufffd' in line:
error(filename, linenum, 'readability/utf8', 5,
'Line contains invalid UTF-8 (or Unicode replacement character).')
if '\0' in line:
error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.')
def CheckForNewlineAtEOF(filename, lines, error):
"""Logs an error if there is no newline char at the end of the file.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
# The array lines() was created by adding two newlines to the
# original file (go figure), then splitting on \n.
# To verify that the file ends in \n, we just have to make sure the
# last-but-two element of lines() exists and is empty.
if len(lines) < 3 or lines[-2]:
error(filename, len(lines) - 2, 'whitespace/ending_newline', 5,
'Could not find a newline character at the end of the file.')
def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error):
"""Logs an error if we see /* ... */ or "..." that extend past one line.
/* ... */ comments are legit inside macros, for one line.
Otherwise, we prefer // comments, so it's ok to warn about the
other. Likewise, it's ok for strings to extend across multiple
lines, as long as a line continuation character (backslash)
terminates each line. Although not currently prohibited by the C++
style guide, it's ugly and unnecessary. We don't do well with either
in this lint program, so we warn about both.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Remove all \\ (escaped backslashes) from the line. They are OK, and the
# second (escaped) slash may trigger later \" detection erroneously.
line = line.replace('\\\\', '')
if line.count('/*') > line.count('*/'):
error(filename, linenum, 'readability/multiline_comment', 5,
'Complex multi-line /*...*/-style comment found. '
'Lint may give bogus warnings. '
'Consider replacing these with //-style comments, '
'with #if 0...#endif, '
'or with more clearly structured multi-line comments.')
if (line.count('"') - line.count('\\"')) % 2:
error(filename, linenum, 'readability/multiline_string', 5,
'Multi-line string ("...") found. This lint script doesn\'t '
'do well with such strings, and may give bogus warnings. '
'Use C++11 raw strings or concatenation instead.')
caffe_alt_function_list = (
('memset', ['caffe_set', 'caffe_memset']),
('cudaMemset', ['caffe_gpu_set', 'caffe_gpu_memset']),
('memcpy', ['caffe_copy']),
('cudaMemcpy', ['caffe_copy', 'caffe_gpu_memcpy']),
)
def CheckCaffeAlternatives(filename, clean_lines, linenum, error):
"""Checks for C(++) functions for which a Caffe substitute should be used.
For certain native C functions (memset, memcpy), there is a Caffe alternative
which should be used instead.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
for function, alts in caffe_alt_function_list:
ix = line.find(function + '(')
if ix >= 0 and (ix == 0 or (not line[ix - 1].isalnum() and
line[ix - 1] not in ('_', '.', '>'))):
disp_alts = ['%s(...)' % alt for alt in alts]
error(filename, linenum, 'caffe/alt_fn', 2,
'Use Caffe function %s instead of %s(...).' %
(' or '.join(disp_alts), function))
def CheckCaffeDataLayerSetUp(filename, clean_lines, linenum, error):
"""Except the base classes, Caffe DataLayer should define DataLayerSetUp
instead of LayerSetUp.
The base DataLayers define common SetUp steps, the subclasses should
not override them.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
ix = line.find('DataLayer<Dtype>::LayerSetUp')
if ix >= 0 and (
line.find('void DataLayer<Dtype>::LayerSetUp') != -1 or
line.find('void ImageDataLayer<Dtype>::LayerSetUp') != -1 or
line.find('void MemoryDataLayer<Dtype>::LayerSetUp') != -1 or
line.find('void WindowDataLayer<Dtype>::LayerSetUp') != -1):
error(filename, linenum, 'caffe/data_layer_setup', 2,
'Except the base classes, Caffe DataLayer should define'
+ ' DataLayerSetUp instead of LayerSetUp. The base DataLayers'
+ ' define common SetUp steps, the subclasses should'
+ ' not override them.')
ix = line.find('DataLayer<Dtype>::DataLayerSetUp')
if ix >= 0 and (
line.find('void Base') == -1 and
line.find('void DataLayer<Dtype>::DataLayerSetUp') == -1 and
line.find('void ImageDataLayer<Dtype>::DataLayerSetUp') == -1 and
line.find('void MemoryDataLayer<Dtype>::DataLayerSetUp') == -1 and
line.find('void WindowDataLayer<Dtype>::DataLayerSetUp') == -1):
error(filename, linenum, 'caffe/data_layer_setup', 2,
'Except the base classes, Caffe DataLayer should define'
+ ' DataLayerSetUp instead of LayerSetUp. The base DataLayers'
+ ' define common SetUp steps, the subclasses should'
+ ' not override them.')
c_random_function_list = (
'rand(',
'rand_r(',
'random(',
)
def CheckCaffeRandom(filename, clean_lines, linenum, error):
"""Checks for calls to C random functions (rand, rand_r, random, ...).
Caffe code should (almost) always use the caffe_rng_* functions rather
than these, as the internal state of these C functions is independent of the
native Caffe RNG system which should produce deterministic results for a
fixed Caffe seed set using Caffe::set_random_seed(...).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
for function in c_random_function_list:
ix = line.find(function)
# Comparisons made explicit for clarity -- pylint: disable=g-explicit-bool-comparison
if ix >= 0 and (ix == 0 or (not line[ix - 1].isalnum() and
line[ix - 1] not in ('_', '.', '>'))):
error(filename, linenum, 'caffe/random_fn', 2,
'Use caffe_rng_rand() (or other caffe_rng_* function) instead of '
+ function +
') to ensure results are deterministic for a fixed Caffe seed.')
threading_list = (
('asctime(', 'asctime_r('),
('ctime(', 'ctime_r('),
('getgrgid(', 'getgrgid_r('),
('getgrnam(', 'getgrnam_r('),
('getlogin(', 'getlogin_r('),
('getpwnam(', 'getpwnam_r('),
('getpwuid(', 'getpwuid_r('),
('gmtime(', 'gmtime_r('),
('localtime(', 'localtime_r('),
('strtok(', 'strtok_r('),
('ttyname(', 'ttyname_r('),
)
def CheckPosixThreading(filename, clean_lines, linenum, error):
"""Checks for calls to thread-unsafe functions.
Much code has been originally written without consideration of
multi-threading. Also, engineers are relying on their old experience;
they have learned posix before threading extensions were added. These
tests guide the engineers to use thread-safe functions (when using
posix directly).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
for single_thread_function, multithread_safe_function in threading_list:
ix = line.find(single_thread_function)
# Comparisons made explicit for clarity -- pylint: disable=g-explicit-bool-comparison
if ix >= 0 and (ix == 0 or (not line[ix - 1].isalnum() and
line[ix - 1] not in ('_', '.', '>'))):
error(filename, linenum, 'runtime/threadsafe_fn', 2,
'Consider using ' + multithread_safe_function +
'...) instead of ' + single_thread_function +
'...) for improved thread safety.')
def CheckVlogArguments(filename, clean_lines, linenum, error):
"""Checks that VLOG() is only used for defining a logging level.
For example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and
VLOG(FATAL) are not.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
if Search(r'\bVLOG\((INFO|ERROR|WARNING|DFATAL|FATAL)\)', line):
error(filename, linenum, 'runtime/vlog', 5,
'VLOG() should be used with numeric verbosity level. '
'Use LOG() if you want symbolic severity levels.')
# Matches invalid increment: *count++, which moves pointer instead of
# incrementing a value.
_RE_PATTERN_INVALID_INCREMENT = re.compile(
r'^\s*\*\w+(\+\+|--);')
def CheckInvalidIncrement(filename, clean_lines, linenum, error):
"""Checks for invalid increment *count++.
For example following function:
void increment_counter(int* count) {
*count++;
}
is invalid, because it effectively does count++, moving pointer, and should
be replaced with ++*count, (*count)++ or *count += 1.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
if _RE_PATTERN_INVALID_INCREMENT.match(line):
error(filename, linenum, 'runtime/invalid_increment', 5,
'Changing pointer instead of value (or unused value of operator*).')
class _BlockInfo(object):
"""Stores information about a generic block of code."""
def __init__(self, seen_open_brace):
self.seen_open_brace = seen_open_brace
self.open_parentheses = 0
self.inline_asm = _NO_ASM
def CheckBegin(self, filename, clean_lines, linenum, error):
"""Run checks that applies to text up to the opening brace.
This is mostly for checking the text after the class identifier
and the "{", usually where the base class is specified. For other
blocks, there isn't much to check, so we always pass.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
pass
def CheckEnd(self, filename, clean_lines, linenum, error):
"""Run checks that applies to text after the closing brace.
This is mostly used for checking end of namespace comments.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
pass
class _ClassInfo(_BlockInfo):
"""Stores information about a class."""
def __init__(self, name, class_or_struct, clean_lines, linenum):
_BlockInfo.__init__(self, False)
self.name = name
self.starting_linenum = linenum
self.is_derived = False
if class_or_struct == 'struct':
self.access = 'public'
self.is_struct = True
else:
self.access = 'private'
self.is_struct = False
# Remember initial indentation level for this class. Using raw_lines here
# instead of elided to account for leading comments.
initial_indent = Match(r'^( *)\S', clean_lines.raw_lines[linenum])
if initial_indent:
self.class_indent = len(initial_indent.group(1))
else:
self.class_indent = 0
# Try to find the end of the class. This will be confused by things like:
# class A {
# } *x = { ...
#
# But it's still good enough for CheckSectionSpacing.
self.last_line = 0
depth = 0
for i in range(linenum, clean_lines.NumLines()):
line = clean_lines.elided[i]
depth += line.count('{') - line.count('}')
if not depth:
self.last_line = i
break
def CheckBegin(self, filename, clean_lines, linenum, error):
# Look for a bare ':'
if Search('(^|[^:]):($|[^:])', clean_lines.elided[linenum]):
self.is_derived = True
def CheckEnd(self, filename, clean_lines, linenum, error):
# Check that closing brace is aligned with beginning of the class.
# Only do this if the closing brace is indented by only whitespaces.
# This means we will not check single-line class definitions.
indent = Match(r'^( *)\}', clean_lines.elided[linenum])
if indent and len(indent.group(1)) != self.class_indent:
if self.is_struct:
parent = 'struct ' + self.name
else:
parent = 'class ' + self.name
error(filename, linenum, 'whitespace/indent', 3,
'Closing brace should be aligned with beginning of %s' % parent)
class _NamespaceInfo(_BlockInfo):
"""Stores information about a namespace."""
def __init__(self, name, linenum):
_BlockInfo.__init__(self, False)
self.name = name or ''
self.starting_linenum = linenum
def CheckEnd(self, filename, clean_lines, linenum, error):
"""Check end of namespace comments."""
line = clean_lines.raw_lines[linenum]
# Check how many lines is enclosed in this namespace. Don't issue
# warning for missing namespace comments if there aren't enough
# lines. However, do apply checks if there is already an end of
# namespace comment and it's incorrect.
#
# TODO(unknown): We always want to check end of namespace comments
# if a namespace is large, but sometimes we also want to apply the
# check if a short namespace contained nontrivial things (something
# other than forward declarations). There is currently no logic on
# deciding what these nontrivial things are, so this check is
# triggered by namespace size only, which works most of the time.
if (linenum - self.starting_linenum < 10
and not Match(r'};*\s*(//|/\*).*\bnamespace\b', line)):
return
# Look for matching comment at end of namespace.
#
# Note that we accept C style "/* */" comments for terminating
# namespaces, so that code that terminate namespaces inside
# preprocessor macros can be cpplint clean.
#
# We also accept stuff like "// end of namespace <name>." with the
# period at the end.
#
# Besides these, we don't accept anything else, otherwise we might
# get false negatives when existing comment is a substring of the
# expected namespace.
if self.name:
# Named namespace
if not Match((r'};*\s*(//|/\*).*\bnamespace\s+' + re.escape(self.name) +
r'[\*/\.\\\s]*$'),
line):
error(filename, linenum, 'readability/namespace', 5,
'Namespace should be terminated with "// namespace %s"' %
self.name)
else:
# Anonymous namespace
if not Match(r'};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$', line):
error(filename, linenum, 'readability/namespace', 5,
'Namespace should be terminated with "// namespace"')
class _PreprocessorInfo(object):
"""Stores checkpoints of nesting stacks when #if/#else is seen."""
def __init__(self, stack_before_if):
# The entire nesting stack before #if
self.stack_before_if = stack_before_if
# The entire nesting stack up to #else
self.stack_before_else = []
# Whether we have already seen #else or #elif
self.seen_else = False
class _NestingState(object):
"""Holds states related to parsing braces."""
def __init__(self):
# Stack for tracking all braces. An object is pushed whenever we
# see a "{", and popped when we see a "}". Only 3 types of
# objects are possible:
# - _ClassInfo: a class or struct.
# - _NamespaceInfo: a namespace.
# - _BlockInfo: some other type of block.
self.stack = []
# Stack of _PreprocessorInfo objects.
self.pp_stack = []
def SeenOpenBrace(self):
"""Check if we have seen the opening brace for the innermost block.
Returns:
True if we have seen the opening brace, False if the innermost
block is still expecting an opening brace.
"""
return (not self.stack) or self.stack[-1].seen_open_brace
def InNamespaceBody(self):
"""Check if we are currently one level inside a namespace body.
Returns:
True if top of the stack is a namespace block, False otherwise.
"""
return self.stack and isinstance(self.stack[-1], _NamespaceInfo)
def UpdatePreprocessor(self, line):
"""Update preprocessor stack.
We need to handle preprocessors due to classes like this:
#ifdef SWIG
struct ResultDetailsPageElementExtensionPoint {
#else
struct ResultDetailsPageElementExtensionPoint : public Extension {
#endif
We make the following assumptions (good enough for most files):
- Preprocessor condition evaluates to true from #if up to first
#else/#elif/#endif.
- Preprocessor condition evaluates to false from #else/#elif up
to #endif. We still perform lint checks on these lines, but
these do not affect nesting stack.
Args:
line: current line to check.
"""
if Match(r'^\s*#\s*(if|ifdef|ifndef)\b', line):
# Beginning of #if block, save the nesting stack here. The saved
# stack will allow us to restore the parsing state in the #else case.
self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack)))
elif Match(r'^\s*#\s*(else|elif)\b', line):
# Beginning of #else block
if self.pp_stack:
if not self.pp_stack[-1].seen_else:
# This is the first #else or #elif block. Remember the
# whole nesting stack up to this point. This is what we
# keep after the #endif.
self.pp_stack[-1].seen_else = True
self.pp_stack[-1].stack_before_else = copy.deepcopy(self.stack)
# Restore the stack to how it was before the #if
self.stack = copy.deepcopy(self.pp_stack[-1].stack_before_if)
else:
# TODO(unknown): unexpected #else, issue warning?
pass
elif Match(r'^\s*#\s*endif\b', line):
# End of #if or #else blocks.
if self.pp_stack:
# If we saw an #else, we will need to restore the nesting
# stack to its former state before the #else, otherwise we
# will just continue from where we left off.
if self.pp_stack[-1].seen_else:
# Here we can just use a shallow copy since we are the last
# reference to it.
self.stack = self.pp_stack[-1].stack_before_else
# Drop the corresponding #if
self.pp_stack.pop()
else:
# TODO(unknown): unexpected #endif, issue warning?
pass
def Update(self, filename, clean_lines, linenum, error):
"""Update nesting state with current line.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Update pp_stack first
self.UpdatePreprocessor(line)
# Count parentheses. This is to avoid adding struct arguments to
# the nesting stack.
if self.stack:
inner_block = self.stack[-1]
depth_change = line.count('(') - line.count(')')
inner_block.open_parentheses += depth_change
# Also check if we are starting or ending an inline assembly block.
if inner_block.inline_asm in (_NO_ASM, _END_ASM):
if (depth_change != 0 and
inner_block.open_parentheses == 1 and
_MATCH_ASM.match(line)):
# Enter assembly block
inner_block.inline_asm = _INSIDE_ASM
else:
# Not entering assembly block. If previous line was _END_ASM,
# we will now shift to _NO_ASM state.
inner_block.inline_asm = _NO_ASM
elif (inner_block.inline_asm == _INSIDE_ASM and
inner_block.open_parentheses == 0):
# Exit assembly block
inner_block.inline_asm = _END_ASM
# Consume namespace declaration at the beginning of the line. Do
# this in a loop so that we catch same line declarations like this:
# namespace proto2 { namespace bridge { class MessageSet; } }
while True:
# Match start of namespace. The "\b\s*" below catches namespace
# declarations even if it weren't followed by a whitespace, this
# is so that we don't confuse our namespace checker. The
# missing spaces will be flagged by CheckSpacing.
namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line)
if not namespace_decl_match:
break
new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum)
self.stack.append(new_namespace)
line = namespace_decl_match.group(2)
if line.find('{') != -1:
new_namespace.seen_open_brace = True
line = line[line.find('{') + 1:]
# Look for a class declaration in whatever is left of the line
# after parsing namespaces. The regexp accounts for decorated classes
# such as in:
# class LOCKABLE API Object {
# };
#
# Templates with class arguments may confuse the parser, for example:
# template <class T
# class Comparator = less<T>,
# class Vector = vector<T> >
# class HeapQueue {
#
# Because this parser has no nesting state about templates, by the
# time it saw "class Comparator", it may think that it's a new class.
# Nested templates have a similar problem:
# template <
# typename ExportedType,
# typename TupleType,
# template <typename, typename> class ImplTemplate>
#
# To avoid these cases, we ignore classes that are followed by '=' or '>'
class_decl_match = Match(
r'\s*(template\s*<[\w\s<>,:]*>\s*)?'
r'(class|struct)\s+([A-Z_]+\s+)*(\w+(?:::\w+)*)'
r'(([^=>]|<[^<>]*>|<[^<>]*<[^<>]*>\s*>)*)$', line)
if (class_decl_match and
(not self.stack or self.stack[-1].open_parentheses == 0)):
self.stack.append(_ClassInfo(
class_decl_match.group(4), class_decl_match.group(2),
clean_lines, linenum))
line = class_decl_match.group(5)
# If we have not yet seen the opening brace for the innermost block,
# run checks here.
if not self.SeenOpenBrace():
self.stack[-1].CheckBegin(filename, clean_lines, linenum, error)
# Update access control if we are inside a class/struct
if self.stack and isinstance(self.stack[-1], _ClassInfo):
classinfo = self.stack[-1]
access_match = Match(
r'^(.*)\b(public|private|protected|signals)(\s+(?:slots\s*)?)?'
r':(?:[^:]|$)',
line)
if access_match:
classinfo.access = access_match.group(2)
# Check that access keywords are indented +1 space. Skip this
# check if the keywords are not preceded by whitespaces.
indent = access_match.group(1)
if (len(indent) != classinfo.class_indent + 1 and
Match(r'^\s*$', indent)):
if classinfo.is_struct:
parent = 'struct ' + classinfo.name
else:
parent = 'class ' + classinfo.name
slots = ''
if access_match.group(3):
slots = access_match.group(3)
error(filename, linenum, 'whitespace/indent', 3,
'%s%s: should be indented +1 space inside %s' % (
access_match.group(2), slots, parent))
# Consume braces or semicolons from what's left of the line
while True:
# Match first brace, semicolon, or closed parenthesis.
matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line)
if not matched:
break
token = matched.group(1)
if token == '{':
# If namespace or class hasn't seen a opening brace yet, mark
# namespace/class head as complete. Push a new block onto the
# stack otherwise.
if not self.SeenOpenBrace():
self.stack[-1].seen_open_brace = True
else:
self.stack.append(_BlockInfo(True))
if _MATCH_ASM.match(line):
self.stack[-1].inline_asm = _BLOCK_ASM
elif token == ';' or token == ')':
# If we haven't seen an opening brace yet, but we already saw
# a semicolon, this is probably a forward declaration. Pop
# the stack for these.
#
# Similarly, if we haven't seen an opening brace yet, but we
# already saw a closing parenthesis, then these are probably
# function arguments with extra "class" or "struct" keywords.
# Also pop these stack for these.
if not self.SeenOpenBrace():
self.stack.pop()
else: # token == '}'
# Perform end of block checks and pop the stack.
if self.stack:
self.stack[-1].CheckEnd(filename, clean_lines, linenum, error)
self.stack.pop()
line = matched.group(2)
def InnermostClass(self):
"""Get class info on the top of the stack.
Returns:
A _ClassInfo object if we are inside a class, or None otherwise.
"""
for i in range(len(self.stack), 0, -1):
classinfo = self.stack[i - 1]
if isinstance(classinfo, _ClassInfo):
return classinfo
return None
def CheckCompletedBlocks(self, filename, error):
"""Checks that all classes and namespaces have been completely parsed.
Call this when all lines in a file have been processed.
Args:
filename: The name of the current file.
error: The function to call with any errors found.
"""
# Note: This test can result in false positives if #ifdef constructs
# get in the way of brace matching. See the testBuildClass test in
# cpplint_unittest.py for an example of this.
for obj in self.stack:
if isinstance(obj, _ClassInfo):
error(filename, obj.starting_linenum, 'build/class', 5,
'Failed to find complete declaration of class %s' %
obj.name)
elif isinstance(obj, _NamespaceInfo):
error(filename, obj.starting_linenum, 'build/namespaces', 5,
'Failed to find complete declaration of namespace %s' %
obj.name)
def CheckForNonStandardConstructs(filename, clean_lines, linenum,
nesting_state, error):
r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
Complain about several constructs which gcc-2 accepts, but which are
not standard C++. Warning about these in lint is one way to ease the
transition to new compilers.
- put storage class first (e.g. "static const" instead of "const static").
- "%lld" instead of %qd" in printf-type functions.
- "%1$d" is non-standard in printf-type functions.
- "\%" is an undefined character escape sequence.
- text after #endif is not allowed.
- invalid inner-style forward declaration.
- >? and <? operators, and their >?= and <?= cousins.
Additionally, check for constructor/destructor style violations and reference
members, as it is very convenient to do so while checking for
gcc-2 compliance.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A _NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
"""
# Remove comments from the line, but leave in strings for now.
line = clean_lines.lines[linenum]
if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
error(filename, linenum, 'runtime/printf_format', 3,
'%q in format strings is deprecated. Use %ll instead.')
if Search(r'printf\s*\(.*".*%\d+\$', line):
error(filename, linenum, 'runtime/printf_format', 2,
'%N$ formats are unconventional. Try rewriting to avoid them.')
# Remove escaped backslashes before looking for undefined escapes.
line = line.replace('\\\\', '')
if Search(r'("|\').*\\(%|\[|\(|{)', line):
error(filename, linenum, 'build/printf_format', 3,
'%, [, (, and { are undefined character escapes. Unescape them.')
# For the rest, work with both comments and strings removed.
line = clean_lines.elided[linenum]
if Search(r'\b(const|volatile|void|char|short|int|long'
r'|float|double|signed|unsigned'
r'|schar|u?int8|u?int16|u?int32|u?int64)'
r'\s+(register|static|extern|typedef)\b',
line):
error(filename, linenum, 'build/storage_class', 5,
'Storage class (static, extern, typedef, etc) should be first.')
if Match(r'\s*#\s*endif\s*[^/\s]+', line):
error(filename, linenum, 'build/endif_comment', 5,
'Uncommented text after #endif is non-standard. Use a comment.')
if Match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line):
error(filename, linenum, 'build/forward_decl', 5,
'Inner-style forward declarations are invalid. Remove this line.')
if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?',
line):
error(filename, linenum, 'build/deprecated', 3,
'>? and <? (max and min) operators are non-standard and deprecated.')
if Search(r'^\s*const\s*string\s*&\s*\w+\s*;', line):
# TODO(unknown): Could it be expanded safely to arbitrary references,
# without triggering too many false positives? The first
# attempt triggered 5 warnings for mostly benign code in the regtest, hence
# the restriction.
# Here's the original regexp, for the reference:
# type_name = r'\w+((\s*::\s*\w+)|(\s*<\s*\w+?\s*>))?'
# r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;'
error(filename, linenum, 'runtime/member_string_references', 2,
'const string& members are dangerous. It is much better to use '
'alternatives, such as pointers or simple constants.')
# Everything else in this function operates on class declarations.
# Return early if the top of the nesting stack is not a class, or if
# the class head is not completed yet.
classinfo = nesting_state.InnermostClass()
if not classinfo or not classinfo.seen_open_brace:
return
# The class may have been declared with namespace or classname qualifiers.
# The constructor and destructor will not have those qualifiers.
base_classname = classinfo.name.split('::')[-1]
# Look for single-argument constructors that aren't marked explicit.
# Technically a valid construct, but against style.
args = Match(r'\s+(?:inline\s+)?%s\s*\(([^,()]+)\)'
% re.escape(base_classname),
line)
if (args and
args.group(1) != 'void' and
not Match(r'(const\s+)?%s(\s+const)?\s*(?:<\w+>\s*)?&'
% re.escape(base_classname), args.group(1).strip())):
error(filename, linenum, 'runtime/explicit', 5,
'Single-argument constructors should be marked explicit.')
def CheckSpacingForFunctionCall(filename, line, linenum, error):
"""Checks for the correctness of various spacing around function calls.
Args:
filename: The name of the current file.
line: The text of the line to check.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Since function calls often occur inside if/for/while/switch
# expressions - which have their own, more liberal conventions - we
# first see if we should be looking inside such an expression for a
# function call, to which we can apply more strict standards.
fncall = line # if there's no control flow construct, look at whole line
for pattern in (r'\bif\s*\((.*)\)\s*{',
r'\bfor\s*\((.*)\)\s*{',
r'\bwhile\s*\((.*)\)\s*[{;]',
r'\bswitch\s*\((.*)\)\s*{'):
match = Search(pattern, line)
if match:
fncall = match.group(1) # look inside the parens for function calls
break
# Except in if/for/while/switch, there should never be space
# immediately inside parens (eg "f( 3, 4 )"). We make an exception
# for nested parens ( (a+b) + c ). Likewise, there should never be
# a space before a ( when it's a function argument. I assume it's a
# function argument when the char before the whitespace is legal in
# a function name (alnum + _) and we're not starting a macro. Also ignore
# pointers and references to arrays and functions coz they're too tricky:
# we use a very simple way to recognize these:
# " (something)(maybe-something)" or
# " (something)(maybe-something," or
# " (something)[something]"
# Note that we assume the contents of [] to be short enough that
# they'll never need to wrap.
if ( # Ignore control structures.
not Search(r'\b(if|for|while|switch|return|new|delete|catch|sizeof)\b',
fncall) and
# Ignore pointers/references to functions.
not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and
# Ignore pointers/references to arrays.
not Search(r' \([^)]+\)\[[^\]]+\]', fncall)):
if Search(r'\w\s*\(\s(?!\s*\\$)', fncall): # a ( used for a fn call
error(filename, linenum, 'whitespace/parens', 4,
'Extra space after ( in function call')
elif Search(r'\(\s+(?!(\s*\\)|\()', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Extra space after (')
if (Search(r'\w\s+\(', fncall) and
not Search(r'#\s*define|typedef', fncall) and
not Search(r'\w\s+\((\w+::)*\*\w+\)\(', fncall)):
error(filename, linenum, 'whitespace/parens', 4,
'Extra space before ( in function call')
# If the ) is followed only by a newline or a { + newline, assume it's
# part of a control statement (if/while/etc), and don't complain
if Search(r'[^)]\s+\)\s*[^{\s]', fncall):
# If the closing parenthesis is preceded by only whitespaces,
# try to give a more descriptive error message.
if Search(r'^\s+\)', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Closing ) should be moved to the previous line')
else:
error(filename, linenum, 'whitespace/parens', 2,
'Extra space before )')
def IsBlankLine(line):
"""Returns true if the given line is blank.
We consider a line to be blank if the line is empty or consists of
only white spaces.
Args:
line: A line of a string.
Returns:
True, if the given line is blank.
"""
return not line or line.isspace()
def CheckForFunctionLengths(filename, clean_lines, linenum,
function_state, error):
"""Reports for long function bodies.
For an overview why this is done, see:
http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions
Uses a simplistic algorithm assuming other style guidelines
(especially spacing) are followed.
Only checks unindented functions, so class members are unchecked.
Trivial bodies are unchecked, so constructors with huge initializer lists
may be missed.
Blank/comment lines are not counted so as to avoid encouraging the removal
of vertical space and comments just to get through a lint check.
NOLINT *on the last line of a function* disables this check.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
function_state: Current function name and lines in body so far.
error: The function to call with any errors found.
"""
lines = clean_lines.lines
line = lines[linenum]
raw = clean_lines.raw_lines
raw_line = raw[linenum]
joined_line = ''
starting_func = False
regexp = r'(\w(\w|::|\*|\&|\s)*)\(' # decls * & space::name( ...
match_result = Match(regexp, line)
if match_result:
# If the name is all caps and underscores, figure it's a macro and
# ignore it, unless it's TEST or TEST_F.
function_name = match_result.group(1).split()[-1]
if function_name == 'TEST' or function_name == 'TEST_F' or (
not Match(r'[A-Z_]+$', function_name)):
starting_func = True
if starting_func:
body_found = False
for start_linenum in xrange(linenum, clean_lines.NumLines()):
start_line = lines[start_linenum]
joined_line += ' ' + start_line.lstrip()
if Search(r'(;|})', start_line): # Declarations and trivial functions
body_found = True
break # ... ignore
elif Search(r'{', start_line):
body_found = True
function = Search(r'((\w|:)*)\(', line).group(1)
if Match(r'TEST', function): # Handle TEST... macros
parameter_regexp = Search(r'(\(.*\))', joined_line)
if parameter_regexp: # Ignore bad syntax
function += parameter_regexp.group(1)
else:
function += '()'
function_state.Begin(function)
break
if not body_found:
# No body for the function (or evidence of a non-function) was found.
error(filename, linenum, 'readability/fn_size', 5,
'Lint failed to find start of function body.')
elif Match(r'^\}\s*$', line): # function end
function_state.Check(error, filename, linenum)
function_state.End()
elif not Match(r'^\s*$', line):
function_state.Count() # Count non-blank/non-comment lines.
_RE_PATTERN_TODO = re.compile(r'^//(\s*)TODO(\(.+?\))?:?(\s|$)?')
def CheckComment(comment, filename, linenum, error):
"""Checks for common mistakes in TODO comments.
Args:
comment: The text of the comment from the line in question.
filename: The name of the current file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
match = _RE_PATTERN_TODO.match(comment)
if match:
# One whitespace is correct; zero whitespace is handled elsewhere.
leading_whitespace = match.group(1)
if len(leading_whitespace) > 1:
error(filename, linenum, 'whitespace/todo', 2,
'Too many spaces before TODO')
username = match.group(2)
if not username:
error(filename, linenum, 'readability/todo', 2,
'Missing username in TODO; it should look like '
'"// TODO(my_username): Stuff."')
middle_whitespace = match.group(3)
# Comparisons made explicit for correctness -- pylint: disable=g-explicit-bool-comparison
if middle_whitespace != ' ' and middle_whitespace != '':
error(filename, linenum, 'whitespace/todo', 2,
'TODO(my_username) should be followed by a space')
def CheckAccess(filename, clean_lines, linenum, nesting_state, error):
"""Checks for improper use of DISALLOW* macros.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A _NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum] # get rid of comments and strings
matched = Match((r'\s*(DISALLOW_COPY_AND_ASSIGN|'
r'DISALLOW_EVIL_CONSTRUCTORS|'
r'DISALLOW_IMPLICIT_CONSTRUCTORS)'), line)
if not matched:
return
if nesting_state.stack and isinstance(nesting_state.stack[-1], _ClassInfo):
if nesting_state.stack[-1].access != 'private':
error(filename, linenum, 'readability/constructors', 3,
'%s must be in the private: section' % matched.group(1))
else:
# Found DISALLOW* macro outside a class declaration, or perhaps it
# was used inside a function when it should have been part of the
# class declaration. We could issue a warning here, but it
# probably resulted in a compiler error already.
pass
def FindNextMatchingAngleBracket(clean_lines, linenum, init_suffix):
"""Find the corresponding > to close a template.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: Current line number.
init_suffix: Remainder of the current line after the initial <.
Returns:
True if a matching bracket exists.
"""
line = init_suffix
nesting_stack = ['<']
while True:
# Find the next operator that can tell us whether < is used as an
# opening bracket or as a less-than operator. We only want to
# warn on the latter case.
#
# We could also check all other operators and terminate the search
# early, e.g. if we got something like this "a<b+c", the "<" is
# most likely a less-than operator, but then we will get false
# positives for default arguments and other template expressions.
match = Search(r'^[^<>(),;\[\]]*([<>(),;\[\]])(.*)$', line)
if match:
# Found an operator, update nesting stack
operator = match.group(1)
line = match.group(2)
if nesting_stack[-1] == '<':
# Expecting closing angle bracket
if operator in ('<', '(', '['):
nesting_stack.append(operator)
elif operator == '>':
nesting_stack.pop()
if not nesting_stack:
# Found matching angle bracket
return True
elif operator == ',':
# Got a comma after a bracket, this is most likely a template
# argument. We have not seen a closing angle bracket yet, but
# it's probably a few lines later if we look for it, so just
# return early here.
return True
else:
# Got some other operator.
return False
else:
# Expecting closing parenthesis or closing bracket
if operator in ('<', '(', '['):
nesting_stack.append(operator)
elif operator in (')', ']'):
# We don't bother checking for matching () or []. If we got
# something like (] or [), it would have been a syntax error.
nesting_stack.pop()
else:
# Scan the next line
linenum += 1
if linenum >= len(clean_lines.elided):
break
line = clean_lines.elided[linenum]
# Exhausted all remaining lines and still no matching angle bracket.
# Most likely the input was incomplete, otherwise we should have
# seen a semicolon and returned early.
return True
def FindPreviousMatchingAngleBracket(clean_lines, linenum, init_prefix):
"""Find the corresponding < that started a template.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: Current line number.
init_prefix: Part of the current line before the initial >.
Returns:
True if a matching bracket exists.
"""
line = init_prefix
nesting_stack = ['>']
while True:
# Find the previous operator
match = Search(r'^(.*)([<>(),;\[\]])[^<>(),;\[\]]*$', line)
if match:
# Found an operator, update nesting stack
operator = match.group(2)
line = match.group(1)
if nesting_stack[-1] == '>':
# Expecting opening angle bracket
if operator in ('>', ')', ']'):
nesting_stack.append(operator)
elif operator == '<':
nesting_stack.pop()
if not nesting_stack:
# Found matching angle bracket
return True
elif operator == ',':
# Got a comma before a bracket, this is most likely a
# template argument. The opening angle bracket is probably
# there if we look for it, so just return early here.
return True
else:
# Got some other operator.
return False
else:
# Expecting opening parenthesis or opening bracket
if operator in ('>', ')', ']'):
nesting_stack.append(operator)
elif operator in ('(', '['):
nesting_stack.pop()
else:
# Scan the previous line
linenum -= 1
if linenum < 0:
break
line = clean_lines.elided[linenum]
# Exhausted all earlier lines and still no matching angle bracket.
return False
def CheckSpacing(filename, clean_lines, linenum, nesting_state, error):
"""Checks for the correctness of various spacing issues in the code.
Things we check for: spaces around operators, spaces after
if/for/while/switch, no spaces around parens in function calls, two
spaces between code and comment, don't start a block with a blank
line, don't end a function with a blank line, don't add a blank line
after public/protected/private, don't have too many blank lines in a row.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A _NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Don't use "elided" lines here, otherwise we can't check commented lines.
# Don't want to use "raw" either, because we don't want to check inside C++11
# raw strings,
raw = clean_lines.lines_without_raw_strings
line = raw[linenum]
# Before nixing comments, check if the line is blank for no good
# reason. This includes the first line after a block is opened, and
# blank lines at the end of a function (ie, right before a line like '}'
#
# Skip all the blank line checks if we are immediately inside a
# namespace body. In other words, don't issue blank line warnings
# for this block:
# namespace {
#
# }
#
# A warning about missing end of namespace comments will be issued instead.
if IsBlankLine(line) and not nesting_state.InNamespaceBody():
elided = clean_lines.elided
prev_line = elided[linenum - 1]
prevbrace = prev_line.rfind('{')
# TODO(unknown): Don't complain if line before blank line, and line after,
# both start with alnums and are indented the same amount.
# This ignores whitespace at the start of a namespace block
# because those are not usually indented.
if prevbrace != -1 and prev_line[prevbrace:].find('}') == -1:
# OK, we have a blank line at the start of a code block. Before we
# complain, we check if it is an exception to the rule: The previous
# non-empty line has the parameters of a function header that are indented
# 4 spaces (because they did not fit in a 80 column line when placed on
# the same line as the function name). We also check for the case where
# the previous line is indented 6 spaces, which may happen when the
# initializers of a constructor do not fit into a 80 column line.
exception = False
if Match(r' {6}\w', prev_line): # Initializer list?
# We are looking for the opening column of initializer list, which
# should be indented 4 spaces to cause 6 space indentation afterwards.
search_position = linenum-2
while (search_position >= 0
and Match(r' {6}\w', elided[search_position])):
search_position -= 1
exception = (search_position >= 0
and elided[search_position][:5] == ' :')
else:
# Search for the function arguments or an initializer list. We use a
# simple heuristic here: If the line is indented 4 spaces; and we have a
# closing paren, without the opening paren, followed by an opening brace
# or colon (for initializer lists) we assume that it is the last line of
# a function header. If we have a colon indented 4 spaces, it is an
# initializer list.
exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)',
prev_line)
or Match(r' {4}:', prev_line))
if not exception:
error(filename, linenum, 'whitespace/blank_line', 2,
'Redundant blank line at the start of a code block '
'should be deleted.')
# Ignore blank lines at the end of a block in a long if-else
# chain, like this:
# if (condition1) {
# // Something followed by a blank line
#
# } else if (condition2) {
# // Something else
# }
if linenum + 1 < clean_lines.NumLines():
next_line = raw[linenum + 1]
if (next_line
and Match(r'\s*}', next_line)
and next_line.find('} else ') == -1):
error(filename, linenum, 'whitespace/blank_line', 3,
'Redundant blank line at the end of a code block '
'should be deleted.')
matched = Match(r'\s*(public|protected|private):', prev_line)
if matched:
error(filename, linenum, 'whitespace/blank_line', 3,
'Do not leave a blank line after "%s:"' % matched.group(1))
# Next, we complain if there's a comment too near the text
commentpos = line.find('//')
if commentpos != -1:
# Check if the // may be in quotes. If so, ignore it
# Comparisons made explicit for clarity -- pylint: disable=g-explicit-bool-comparison
if (line.count('"', 0, commentpos) -
line.count('\\"', 0, commentpos)) % 2 == 0: # not in quotes
# Allow one space for new scopes, two spaces otherwise:
if (not Match(r'^\s*{ //', line) and
((commentpos >= 1 and
line[commentpos-1] not in string.whitespace) or
(commentpos >= 2 and
line[commentpos-2] not in string.whitespace))):
error(filename, linenum, 'whitespace/comments', 2,
'At least two spaces is best between code and comments')
# There should always be a space between the // and the comment
commentend = commentpos + 2
if commentend < len(line) and not line[commentend] == ' ':
# but some lines are exceptions -- e.g. if they're big
# comment delimiters like:
# //----------------------------------------------------------
# or are an empty C++ style Doxygen comment, like:
# ///
# or C++ style Doxygen comments placed after the variable:
# ///< Header comment
# //!< Header comment
# or they begin with multiple slashes followed by a space:
# //////// Header comment
match = (Search(r'[=/-]{4,}\s*$', line[commentend:]) or
Search(r'^/$', line[commentend:]) or
Search(r'^!< ', line[commentend:]) or
Search(r'^/< ', line[commentend:]) or
Search(r'^/+ ', line[commentend:]))
if not match:
error(filename, linenum, 'whitespace/comments', 4,
'Should have a space between // and comment')
CheckComment(line[commentpos:], filename, linenum, error)
line = clean_lines.elided[linenum] # get rid of comments and strings
# Don't try to do spacing checks for operator methods
line = re.sub(r'operator(==|!=|<|<<|<=|>=|>>|>)\(', 'operator\(', line)
# We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )".
# Otherwise not. Note we only check for non-spaces on *both* sides;
# sometimes people put non-spaces on one side when aligning ='s among
# many lines (not that this is behavior that I approve of...)
if Search(r'[\w.]=[\w.]', line) and not Search(r'\b(if|while) ', line):
error(filename, linenum, 'whitespace/operators', 4,
'Missing spaces around =')
# It's ok not to have spaces around binary operators like + - * /, but if
# there's too little whitespace, we get concerned. It's hard to tell,
# though, so we punt on this one for now. TODO.
# You should always have whitespace around binary operators.
#
# Check <= and >= first to avoid false positives with < and >, then
# check non-include lines for spacing around < and >.
match = Search(r'[^<>=!\s](==|!=|<=|>=)[^<>=!\s]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around %s' % match.group(1))
# We allow no-spaces around << when used like this: 10<<20, but
# not otherwise (particularly, not when used as streams)
# Also ignore using ns::operator<<;
match = Search(r'(operator|\S)(?:L|UL|ULL|l|ul|ull)?<<(\S)', line)
if (match and
not (match.group(1).isdigit() and match.group(2).isdigit()) and
not (match.group(1) == 'operator' and match.group(2) == ';')):
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around <<')
elif not Match(r'#.*include', line):
# Avoid false positives on ->
reduced_line = line.replace('->', '')
# Look for < that is not surrounded by spaces. This is only
# triggered if both sides are missing spaces, even though
# technically should should flag if at least one side is missing a
# space. This is done to avoid some false positives with shifts.
match = Search(r'[^\s<]<([^\s=<].*)', reduced_line)
if (match and
not FindNextMatchingAngleBracket(clean_lines, linenum, match.group(1))):
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around <')
# Look for > that is not surrounded by spaces. Similar to the
# above, we only trigger if both sides are missing spaces to avoid
# false positives with shifts.
match = Search(r'^(.*[^\s>])>[^\s=>]', reduced_line)
if (match and
not FindPreviousMatchingAngleBracket(clean_lines, linenum,
match.group(1))):
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around >')
# We allow no-spaces around >> for almost anything. This is because
# C++11 allows ">>" to close nested templates, which accounts for
# most cases when ">>" is not followed by a space.
#
# We still warn on ">>" followed by alpha character, because that is
# likely due to ">>" being used for right shifts, e.g.:
# value >> alpha
#
# When ">>" is used to close templates, the alphanumeric letter that
# follows would be part of an identifier, and there should still be
# a space separating the template type and the identifier.
# type<type<type>> alpha
match = Search(r'>>[a-zA-Z_]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around >>')
# There shouldn't be space around unary operators
match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line)
if match:
error(filename, linenum, 'whitespace/operators', 4,
'Extra space for operator %s' % match.group(1))
# A pet peeve of mine: no spaces after an if, while, switch, or for
match = Search(r' (if\(|for\(|while\(|switch\()', line)
if match:
error(filename, linenum, 'whitespace/parens', 5,
'Missing space before ( in %s' % match.group(1))
# For if/for/while/switch, the left and right parens should be
# consistent about how many spaces are inside the parens, and
# there should either be zero or one spaces inside the parens.
# We don't want: "if ( foo)" or "if ( foo )".
# Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed.
match = Search(r'\b(if|for|while|switch)\s*'
r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$',
line)
if match:
if len(match.group(2)) != len(match.group(4)):
if not (match.group(3) == ';' and
len(match.group(2)) == 1 + len(match.group(4)) or
not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)):
error(filename, linenum, 'whitespace/parens', 5,
'Mismatching spaces inside () in %s' % match.group(1))
if len(match.group(2)) not in [0, 1]:
error(filename, linenum, 'whitespace/parens', 5,
'Should have zero or one spaces inside ( and ) in %s' %
match.group(1))
# You should always have a space after a comma (either as fn arg or operator)
#
# This does not apply when the non-space character following the
# comma is another comma, since the only time when that happens is
# for empty macro arguments.
#
# We run this check in two passes: first pass on elided lines to
# verify that lines contain missing whitespaces, second pass on raw
# lines to confirm that those missing whitespaces are not due to
# elided comments.
if Search(r',[^,\s]', line) and Search(r',[^,\s]', raw[linenum]):
error(filename, linenum, 'whitespace/comma', 3,
'Missing space after ,')
# You should always have a space after a semicolon
# except for few corner cases
# TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more
# space after ;
if Search(r';[^\s};\\)/]', line):
error(filename, linenum, 'whitespace/semicolon', 3,
'Missing space after ;')
# Next we will look for issues with function calls.
CheckSpacingForFunctionCall(filename, line, linenum, error)
# Except after an opening paren, or after another opening brace (in case of
# an initializer list, for instance), you should have spaces before your
# braces. And since you should never have braces at the beginning of a line,
# this is an easy test.
match = Match(r'^(.*[^ ({]){', line)
if match:
# Try a bit harder to check for brace initialization. This
# happens in one of the following forms:
# Constructor() : initializer_list_{} { ... }
# Constructor{}.MemberFunction()
# Type variable{};
# FunctionCall(type{}, ...);
# LastArgument(..., type{});
# LOG(INFO) << type{} << " ...";
# map_of_type[{...}] = ...;
#
# We check for the character following the closing brace, and
# silence the warning if it's one of those listed above, i.e.
# "{.;,)<]".
#
# To account for nested initializer list, we allow any number of
# closing braces up to "{;,)<". We can't simply silence the
# warning on first sight of closing brace, because that would
# cause false negatives for things that are not initializer lists.
# Silence this: But not this:
# Outer{ if (...) {
# Inner{...} if (...){ // Missing space before {
# }; }
#
# There is a false negative with this approach if people inserted
# spurious semicolons, e.g. "if (cond){};", but we will catch the
# spurious semicolon with a separate check.
(endline, endlinenum, endpos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
trailing_text = ''
if endpos > -1:
trailing_text = endline[endpos:]
for offset in xrange(endlinenum + 1,
min(endlinenum + 3, clean_lines.NumLines() - 1)):
trailing_text += clean_lines.elided[offset]
if not Match(r'^[\s}]*[{.;,)<\]]', trailing_text):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before {')
# Make sure '} else {' has spaces.
if Search(r'}else', line):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before else')
# You shouldn't have spaces before your brackets, except maybe after
# 'delete []' or 'new char * []'.
if Search(r'\w\s+\[', line) and not Search(r'delete\s+\[', line):
error(filename, linenum, 'whitespace/braces', 5,
'Extra space before [')
# You shouldn't have a space before a semicolon at the end of the line.
# There's a special case for "for" since the style guide allows space before
# the semicolon there.
if Search(r':\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Semicolon defining empty statement. Use {} instead.')
elif Search(r'^\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Line contains only semicolon. If this should be an empty statement, '
'use {} instead.')
elif (Search(r'\s+;\s*$', line) and
not Search(r'\bfor\b', line)):
error(filename, linenum, 'whitespace/semicolon', 5,
'Extra space before last semicolon. If this should be an empty '
'statement, use {} instead.')
# In range-based for, we wanted spaces before and after the colon, but
# not around "::" tokens that might appear.
if (Search('for *\(.*[^:]:[^: ]', line) or
Search('for *\(.*[^: ]:[^:]', line)):
error(filename, linenum, 'whitespace/forcolon', 2,
'Missing space around colon in range-based for loop')
def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error):
"""Checks for additional blank line issues related to sections.
Currently the only thing checked here is blank line before protected/private.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
class_info: A _ClassInfo objects.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Skip checks if the class is small, where small means 25 lines or less.
# 25 lines seems like a good cutoff since that's the usual height of
# terminals, and any class that can't fit in one screen can't really
# be considered "small".
#
# Also skip checks if we are on the first line. This accounts for
# classes that look like
# class Foo { public: ... };
#
# If we didn't find the end of the class, last_line would be zero,
# and the check will be skipped by the first condition.
if (class_info.last_line - class_info.starting_linenum <= 24 or
linenum <= class_info.starting_linenum):
return
matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum])
if matched:
# Issue warning if the line before public/protected/private was
# not a blank line, but don't do this if the previous line contains
# "class" or "struct". This can happen two ways:
# - We are at the beginning of the class.
# - We are forward-declaring an inner class that is semantically
# private, but needed to be public for implementation reasons.
# Also ignores cases where the previous line ends with a backslash as can be
# common when defining classes in C macros.
prev_line = clean_lines.lines[linenum - 1]
if (not IsBlankLine(prev_line) and
not Search(r'\b(class|struct)\b', prev_line) and
not Search(r'\\$', prev_line)):
# Try a bit harder to find the beginning of the class. This is to
# account for multi-line base-specifier lists, e.g.:
# class Derived
# : public Base {
end_class_head = class_info.starting_linenum
for i in range(class_info.starting_linenum, linenum):
if Search(r'\{\s*$', clean_lines.lines[i]):
end_class_head = i
break
if end_class_head < linenum - 1:
error(filename, linenum, 'whitespace/blank_line', 3,
'"%s:" should be preceded by a blank line' % matched.group(1))
def GetPreviousNonBlankLine(clean_lines, linenum):
"""Return the most recent non-blank line and its line number.
Args:
clean_lines: A CleansedLines instance containing the file contents.
linenum: The number of the line to check.
Returns:
A tuple with two elements. The first element is the contents of the last
non-blank line before the current line, or the empty string if this is the
first non-blank line. The second is the line number of that line, or -1
if this is the first non-blank line.
"""
prevlinenum = linenum - 1
while prevlinenum >= 0:
prevline = clean_lines.elided[prevlinenum]
if not IsBlankLine(prevline): # if not a blank line...
return (prevline, prevlinenum)
prevlinenum -= 1
return ('', -1)
def CheckBraces(filename, clean_lines, linenum, error):
"""Looks for misplaced braces (e.g. at the end of line).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum] # get rid of comments and strings
if Match(r'\s*{\s*$', line):
# We allow an open brace to start a line in the case where someone is using
# braces in a block to explicitly create a new scope, which is commonly used
# to control the lifetime of stack-allocated variables. Braces are also
# used for brace initializers inside function calls. We don't detect this
# perfectly: we just don't complain if the last non-whitespace character on
# the previous non-blank line is ',', ';', ':', '(', '{', or '}', or if the
# previous line starts a preprocessor block.
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if (not Search(r'[,;:}{(]\s*$', prevline) and
not Match(r'\s*#', prevline)):
error(filename, linenum, 'whitespace/braces', 4,
'{ should almost always be at the end of the previous line')
# An else clause should be on the same line as the preceding closing brace.
if Match(r'\s*else\s*', line):
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if Match(r'\s*}\s*$', prevline):
error(filename, linenum, 'whitespace/newline', 4,
'An else should appear on the same line as the preceding }')
# If braces come on one side of an else, they should be on both.
# However, we have to worry about "else if" that spans multiple lines!
if Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line):
if Search(r'}\s*else if([^{]*)$', line): # could be multi-line if
# find the ( after the if
pos = line.find('else if')
pos = line.find('(', pos)
if pos > 0:
(endline, _, endpos) = CloseExpression(clean_lines, linenum, pos)
if endline[endpos:].find('{') == -1: # must be brace after if
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
else: # common case: else not followed by a multi-line if
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
# Likewise, an else should never have the else clause on the same line
if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line):
error(filename, linenum, 'whitespace/newline', 4,
'Else clause should never be on same line as else (use 2 lines)')
# In the same way, a do/while should never be on one line
if Match(r'\s*do [^\s{]', line):
error(filename, linenum, 'whitespace/newline', 4,
'do/while clauses should not be on a single line')
# Block bodies should not be followed by a semicolon. Due to C++11
# brace initialization, there are more places where semicolons are
# required than not, so we use a whitelist approach to check these
# rather than a blacklist. These are the places where "};" should
# be replaced by just "}":
# 1. Some flavor of block following closing parenthesis:
# for (;;) {};
# while (...) {};
# switch (...) {};
# Function(...) {};
# if (...) {};
# if (...) else if (...) {};
#
# 2. else block:
# if (...) else {};
#
# 3. const member function:
# Function(...) const {};
#
# 4. Block following some statement:
# x = 42;
# {};
#
# 5. Block at the beginning of a function:
# Function(...) {
# {};
# }
#
# Note that naively checking for the preceding "{" will also match
# braces inside multi-dimensional arrays, but this is fine since
# that expression will not contain semicolons.
#
# 6. Block following another block:
# while (true) {}
# {};
#
# 7. End of namespaces:
# namespace {};
#
# These semicolons seems far more common than other kinds of
# redundant semicolons, possibly due to people converting classes
# to namespaces. For now we do not warn for this case.
#
# Try matching case 1 first.
match = Match(r'^(.*\)\s*)\{', line)
if match:
# Matched closing parenthesis (case 1). Check the token before the
# matching opening parenthesis, and don't warn if it looks like a
# macro. This avoids these false positives:
# - macro that defines a base class
# - multi-line macro that defines a base class
# - macro that defines the whole class-head
#
# But we still issue warnings for macros that we know are safe to
# warn, specifically:
# - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P
# - TYPED_TEST
# - INTERFACE_DEF
# - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED:
#
# We implement a whitelist of safe macros instead of a blacklist of
# unsafe macros, even though the latter appears less frequently in
# google code and would have been easier to implement. This is because
# the downside for getting the whitelist wrong means some extra
# semicolons, while the downside for getting the blacklist wrong
# would result in compile errors.
#
# In addition to macros, we also don't want to warn on compound
# literals.
closing_brace_pos = match.group(1).rfind(')')
opening_parenthesis = ReverseCloseExpression(
clean_lines, linenum, closing_brace_pos)
if opening_parenthesis[2] > -1:
line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]]
macro = Search(r'\b([A-Z_]+)\s*$', line_prefix)
if ((macro and
macro.group(1) not in (
'TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST',
'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED',
'LOCKS_EXCLUDED', 'INTERFACE_DEF')) or
Search(r'\s+=\s*$', line_prefix)):
match = None
else:
# Try matching cases 2-3.
match = Match(r'^(.*(?:else|\)\s*const)\s*)\{', line)
if not match:
# Try matching cases 4-6. These are always matched on separate lines.
#
# Note that we can't simply concatenate the previous line to the
# current line and do a single match, otherwise we may output
# duplicate warnings for the blank line case:
# if (cond) {
# // blank line
# }
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if prevline and Search(r'[;{}]\s*$', prevline):
match = Match(r'^(\s*)\{', line)
# Check matching closing brace
if match:
(endline, endlinenum, endpos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
if endpos > -1 and Match(r'^\s*;', endline[endpos:]):
# Current {} pair is eligible for semicolon check, and we have found
# the redundant semicolon, output warning here.
#
# Note: because we are scanning forward for opening braces, and
# outputting warnings for the matching closing brace, if there are
# nested blocks with trailing semicolons, we will get the error
# messages in reversed order.
error(filename, endlinenum, 'readability/braces', 4,
"You don't need a ; after a }")
def CheckEmptyBlockBody(filename, clean_lines, linenum, error):
"""Look for empty loop/conditional body with only a single semicolon.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Search for loop keywords at the beginning of the line. Because only
# whitespaces are allowed before the keywords, this will also ignore most
# do-while-loops, since those lines should start with closing brace.
#
# We also check "if" blocks here, since an empty conditional block
# is likely an error.
line = clean_lines.elided[linenum]
matched = Match(r'\s*(for|while|if)\s*\(', line)
if matched:
# Find the end of the conditional expression
(end_line, end_linenum, end_pos) = CloseExpression(
clean_lines, linenum, line.find('('))
# Output warning if what follows the condition expression is a semicolon.
# No warning for all other cases, including whitespace or newline, since we
# have a separate check for semicolons preceded by whitespace.
if end_pos >= 0 and Match(r';', end_line[end_pos:]):
if matched.group(1) == 'if':
error(filename, end_linenum, 'whitespace/empty_conditional_body', 5,
'Empty conditional bodies should use {}')
else:
error(filename, end_linenum, 'whitespace/empty_loop_body', 5,
'Empty loop bodies should use {} or continue')
def CheckCheck(filename, clean_lines, linenum, error):
"""Checks the use of CHECK and EXPECT macros.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Decide the set of replacement macros that should be suggested
lines = clean_lines.elided
check_macro = None
start_pos = -1
for macro in _CHECK_MACROS:
i = lines[linenum].find(macro)
if i >= 0:
check_macro = macro
# Find opening parenthesis. Do a regular expression match here
# to make sure that we are matching the expected CHECK macro, as
# opposed to some other macro that happens to contain the CHECK
# substring.
matched = Match(r'^(.*\b' + check_macro + r'\s*)\(', lines[linenum])
if not matched:
continue
start_pos = len(matched.group(1))
break
if not check_macro or start_pos < 0:
# Don't waste time here if line doesn't contain 'CHECK' or 'EXPECT'
return
# Find end of the boolean expression by matching parentheses
(last_line, end_line, end_pos) = CloseExpression(
clean_lines, linenum, start_pos)
if end_pos < 0:
return
if linenum == end_line:
expression = lines[linenum][start_pos + 1:end_pos - 1]
else:
expression = lines[linenum][start_pos + 1:]
for i in xrange(linenum + 1, end_line):
expression += lines[i]
expression += last_line[0:end_pos - 1]
# Parse expression so that we can take parentheses into account.
# This avoids false positives for inputs like "CHECK((a < 4) == b)",
# which is not replaceable by CHECK_LE.
lhs = ''
rhs = ''
operator = None
while expression:
matched = Match(r'^\s*(<<|<<=|>>|>>=|->\*|->|&&|\|\||'
r'==|!=|>=|>|<=|<|\()(.*)$', expression)
if matched:
token = matched.group(1)
if token == '(':
# Parenthesized operand
expression = matched.group(2)
(end, _) = FindEndOfExpressionInLine(expression, 0, 1, '(', ')')
if end < 0:
return # Unmatched parenthesis
lhs += '(' + expression[0:end]
expression = expression[end:]
elif token in ('&&', '||'):
# Logical and/or operators. This means the expression
# contains more than one term, for example:
# CHECK(42 < a && a < b);
#
# These are not replaceable with CHECK_LE, so bail out early.
return
elif token in ('<<', '<<=', '>>', '>>=', '->*', '->'):
# Non-relational operator
lhs += token
expression = matched.group(2)
else:
# Relational operator
operator = token
rhs = matched.group(2)
break
else:
# Unparenthesized operand. Instead of appending to lhs one character
# at a time, we do another regular expression match to consume several
# characters at once if possible. Trivial benchmark shows that this
# is more efficient when the operands are longer than a single
# character, which is generally the case.
matched = Match(r'^([^-=!<>()&|]+)(.*)$', expression)
if not matched:
matched = Match(r'^(\s*\S)(.*)$', expression)
if not matched:
break
lhs += matched.group(1)
expression = matched.group(2)
# Only apply checks if we got all parts of the boolean expression
if not (lhs and operator and rhs):
return
# Check that rhs do not contain logical operators. We already know
# that lhs is fine since the loop above parses out && and ||.
if rhs.find('&&') > -1 or rhs.find('||') > -1:
return
# At least one of the operands must be a constant literal. This is
# to avoid suggesting replacements for unprintable things like
# CHECK(variable != iterator)
#
# The following pattern matches decimal, hex integers, strings, and
# characters (in that order).
lhs = lhs.strip()
rhs = rhs.strip()
match_constant = r'^([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')$'
if Match(match_constant, lhs) or Match(match_constant, rhs):
# Note: since we know both lhs and rhs, we can provide a more
# descriptive error message like:
# Consider using CHECK_EQ(x, 42) instead of CHECK(x == 42)
# Instead of:
# Consider using CHECK_EQ instead of CHECK(a == b)
#
# We are still keeping the less descriptive message because if lhs
# or rhs gets long, the error message might become unreadable.
error(filename, linenum, 'readability/check', 2,
'Consider using %s instead of %s(a %s b)' % (
_CHECK_REPLACEMENT[check_macro][operator],
check_macro, operator))
def CheckAltTokens(filename, clean_lines, linenum, error):
"""Check alternative keywords being used in boolean expressions.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Avoid preprocessor lines
if Match(r'^\s*#', line):
return
# Last ditch effort to avoid multi-line comments. This will not help
# if the comment started before the current line or ended after the
# current line, but it catches most of the false positives. At least,
# it provides a way to workaround this warning for people who use
# multi-line comments in preprocessor macros.
#
# TODO(unknown): remove this once cpplint has better support for
# multi-line comments.
if line.find('/*') >= 0 or line.find('*/') >= 0:
return
for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line):
error(filename, linenum, 'readability/alt_tokens', 2,
'Use operator %s instead of %s' % (
_ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1)))
def GetLineWidth(line):
"""Determines the width of the line in column positions.
Args:
line: A string, which may be a Unicode string.
Returns:
The width of the line in column positions, accounting for Unicode
combining characters and wide characters.
"""
if isinstance(line, unicode):
width = 0
for uc in unicodedata.normalize('NFC', line):
if unicodedata.east_asian_width(uc) in ('W', 'F'):
width += 2
elif not unicodedata.combining(uc):
width += 1
return width
else:
return len(line)
def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state,
error):
"""Checks rules from the 'C++ style rules' section of cppguide.html.
Most of these rules are hard to test (naming, comment style), but we
do what we can. In particular we check for 2-space indents, line lengths,
tab usage, spaces inside code, etc.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
nesting_state: A _NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Don't use "elided" lines here, otherwise we can't check commented lines.
# Don't want to use "raw" either, because we don't want to check inside C++11
# raw strings,
raw_lines = clean_lines.lines_without_raw_strings
line = raw_lines[linenum]
if line.find('\t') != -1:
error(filename, linenum, 'whitespace/tab', 1,
'Tab found; better to use spaces')
# One or three blank spaces at the beginning of the line is weird; it's
# hard to reconcile that with 2-space indents.
# NOTE: here are the conditions rob pike used for his tests. Mine aren't
# as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces
# if(RLENGTH > 20) complain = 0;
# if(match($0, " +(error|private|public|protected):")) complain = 0;
# if(match(prev, "&& *$")) complain = 0;
# if(match(prev, "\\|\\| *$")) complain = 0;
# if(match(prev, "[\",=><] *$")) complain = 0;
# if(match($0, " <<")) complain = 0;
# if(match(prev, " +for \\(")) complain = 0;
# if(prevodd && match(prevprev, " +for \\(")) complain = 0;
initial_spaces = 0
cleansed_line = clean_lines.elided[linenum]
while initial_spaces < len(line) and line[initial_spaces] == ' ':
initial_spaces += 1
if line and line[-1].isspace():
error(filename, linenum, 'whitespace/end_of_line', 4,
'Line ends in whitespace. Consider deleting these extra spaces.')
# There are certain situations we allow one space, notably for section labels
elif ((initial_spaces == 1 or initial_spaces == 3) and
not Match(r'\s*\w+\s*:\s*$', cleansed_line)):
error(filename, linenum, 'whitespace/indent', 3,
'Weird number of spaces at line-start. '
'Are you using a 2-space indent?')
# Check if the line is a header guard.
is_header_guard = False
if file_extension == 'h':
cppvar = GetHeaderGuardCPPVariable(filename)
if (line.startswith('#ifndef %s' % cppvar) or
line.startswith('#define %s' % cppvar) or
line.startswith('#endif // %s' % cppvar)):
is_header_guard = True
# #include lines and header guards can be long, since there's no clean way to
# split them.
#
# URLs can be long too. It's possible to split these, but it makes them
# harder to cut&paste.
#
# The "$Id:...$" comment may also get very long without it being the
# developers fault.
if (not line.startswith('#include') and not is_header_guard and
not Match(r'^\s*//.*http(s?)://\S*$', line) and
not Match(r'^// \$Id:.*#[0-9]+ \$$', line)):
line_width = GetLineWidth(line)
extended_length = int((_line_length * 1.25))
if line_width > extended_length:
error(filename, linenum, 'whitespace/line_length', 4,
'Lines should very rarely be longer than %i characters' %
extended_length)
elif line_width > _line_length:
error(filename, linenum, 'whitespace/line_length', 2,
'Lines should be <= %i characters long' % _line_length)
if (cleansed_line.count(';') > 1 and
# for loops are allowed two ;'s (and may run over two lines).
cleansed_line.find('for') == -1 and
(GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or
GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and
# It's ok to have many commands in a switch case that fits in 1 line
not ((cleansed_line.find('case ') != -1 or
cleansed_line.find('default:') != -1) and
cleansed_line.find('break;') != -1)):
error(filename, linenum, 'whitespace/newline', 0,
'More than one command on the same line')
# Some more style checks
CheckBraces(filename, clean_lines, linenum, error)
CheckEmptyBlockBody(filename, clean_lines, linenum, error)
CheckAccess(filename, clean_lines, linenum, nesting_state, error)
CheckSpacing(filename, clean_lines, linenum, nesting_state, error)
CheckCheck(filename, clean_lines, linenum, error)
CheckAltTokens(filename, clean_lines, linenum, error)
classinfo = nesting_state.InnermostClass()
if classinfo:
CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error)
_RE_PATTERN_INCLUDE_NEW_STYLE = re.compile(r'#include +"[^/]+\.h"')
_RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$')
# Matches the first component of a filename delimited by -s and _s. That is:
# _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo.cc').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo-bar_baz.cc').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo_bar-baz.cc').group(0) == 'foo'
_RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+')
def _DropCommonSuffixes(filename):
"""Drops common suffixes like _test.cc or -inl.h from filename.
For example:
>>> _DropCommonSuffixes('foo/foo-inl.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/bar/foo.cc')
'foo/bar/foo'
>>> _DropCommonSuffixes('foo/foo_internal.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/foo_unusualinternal.h')
'foo/foo_unusualinternal'
Args:
filename: The input filename.
Returns:
The filename with the common suffix removed.
"""
for suffix in ('test.cc', 'regtest.cc', 'unittest.cc',
'inl.h', 'impl.h', 'internal.h'):
if (filename.endswith(suffix) and len(filename) > len(suffix) and
filename[-len(suffix) - 1] in ('-', '_')):
return filename[:-len(suffix) - 1]
return os.path.splitext(filename)[0]
def _IsTestFilename(filename):
"""Determines if the given filename has a suffix that identifies it as a test.
Args:
filename: The input filename.
Returns:
True if 'filename' looks like a test, False otherwise.
"""
if (filename.endswith('_test.cc') or
filename.endswith('_unittest.cc') or
filename.endswith('_regtest.cc')):
return True
else:
return False
def _ClassifyInclude(fileinfo, include, is_system):
"""Figures out what kind of header 'include' is.
Args:
fileinfo: The current file cpplint is running over. A FileInfo instance.
include: The path to a #included file.
is_system: True if the #include used <> rather than "".
Returns:
One of the _XXX_HEADER constants.
For example:
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True)
_C_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True)
_CPP_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False)
_LIKELY_MY_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'),
... 'bar/foo_other_ext.h', False)
_POSSIBLE_MY_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False)
_OTHER_HEADER
"""
# This is a list of all standard c++ header files, except
# those already checked for above.
is_cpp_h = include in _CPP_HEADERS
if is_system:
if is_cpp_h:
return _CPP_SYS_HEADER
else:
return _C_SYS_HEADER
# If the target file and the include we're checking share a
# basename when we drop common extensions, and the include
# lives in . , then it's likely to be owned by the target file.
target_dir, target_base = (
os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName())))
include_dir, include_base = os.path.split(_DropCommonSuffixes(include))
if target_base == include_base and (
include_dir == target_dir or
include_dir == os.path.normpath(target_dir + '/../public')):
return _LIKELY_MY_HEADER
# If the target and include share some initial basename
# component, it's possible the target is implementing the
# include, so it's allowed to be first, but we'll never
# complain if it's not there.
target_first_component = _RE_FIRST_COMPONENT.match(target_base)
include_first_component = _RE_FIRST_COMPONENT.match(include_base)
if (target_first_component and include_first_component and
target_first_component.group(0) ==
include_first_component.group(0)):
return _POSSIBLE_MY_HEADER
return _OTHER_HEADER
def CheckIncludeLine(filename, clean_lines, linenum, include_state, error):
"""Check rules that are applicable to #include lines.
Strings on #include lines are NOT removed from elided line, to make
certain tasks easier. However, to prevent false positives, checks
applicable to #include lines in CheckLanguage must be put here.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
include_state: An _IncludeState instance in which the headers are inserted.
error: The function to call with any errors found.
"""
fileinfo = FileInfo(filename)
line = clean_lines.lines[linenum]
# "include" should use the new style "foo/bar.h" instead of just "bar.h"
if _RE_PATTERN_INCLUDE_NEW_STYLE.search(line):
error(filename, linenum, 'build/include_dir', 4,
'Include the directory when naming .h files')
# we shouldn't include a file more than once. actually, there are a
# handful of instances where doing so is okay, but in general it's
# not.
match = _RE_PATTERN_INCLUDE.search(line)
if match:
include = match.group(2)
is_system = (match.group(1) == '<')
if include in include_state:
error(filename, linenum, 'build/include', 4,
'"%s" already included at %s:%s' %
(include, filename, include_state[include]))
else:
include_state[include] = linenum
# We want to ensure that headers appear in the right order:
# 1) for foo.cc, foo.h (preferred location)
# 2) c system files
# 3) cpp system files
# 4) for foo.cc, foo.h (deprecated location)
# 5) other google headers
#
# We classify each include statement as one of those 5 types
# using a number of techniques. The include_state object keeps
# track of the highest type seen, and complains if we see a
# lower type after that.
error_message = include_state.CheckNextIncludeOrder(
_ClassifyInclude(fileinfo, include, is_system))
if error_message:
error(filename, linenum, 'build/include_order', 4,
'%s. Should be: %s.h, c system, c++ system, other.' %
(error_message, fileinfo.BaseName()))
canonical_include = include_state.CanonicalizeAlphabeticalOrder(include)
if not include_state.IsInAlphabeticalOrder(
clean_lines, linenum, canonical_include):
error(filename, linenum, 'build/include_alpha', 4,
'Include "%s" not in alphabetical order' % include)
include_state.SetLastHeader(canonical_include)
# Look for any of the stream classes that are part of standard C++.
match = _RE_PATTERN_INCLUDE.match(line)
if match:
include = match.group(2)
if Match(r'(f|ind|io|i|o|parse|pf|stdio|str|)?stream$', include):
# Many unit tests use cout, so we exempt them.
if not _IsTestFilename(filename):
error(filename, linenum, 'readability/streams', 3,
'Streams are highly discouraged.')
def _GetTextInside(text, start_pattern):
r"""Retrieves all the text between matching open and close parentheses.
Given a string of lines and a regular expression string, retrieve all the text
following the expression and between opening punctuation symbols like
(, [, or {, and the matching close-punctuation symbol. This properly nested
occurrences of the punctuations, so for the text like
printf(a(), b(c()));
a call to _GetTextInside(text, r'printf\(') will return 'a(), b(c())'.
start_pattern must match string having an open punctuation symbol at the end.
Args:
text: The lines to extract text. Its comments and strings must be elided.
It can be single line and can span multiple lines.
start_pattern: The regexp string indicating where to start extracting
the text.
Returns:
The extracted text.
None if either the opening string or ending punctuation could not be found.
"""
# TODO(sugawarayu): Audit cpplint.py to see what places could be profitably
# rewritten to use _GetTextInside (and use inferior regexp matching today).
# Give opening punctuations to get the matching close-punctuations.
matching_punctuation = {'(': ')', '{': '}', '[': ']'}
closing_punctuation = set(matching_punctuation.itervalues())
# Find the position to start extracting text.
match = re.search(start_pattern, text, re.M)
if not match: # start_pattern not found in text.
return None
start_position = match.end(0)
assert start_position > 0, (
'start_pattern must ends with an opening punctuation.')
assert text[start_position - 1] in matching_punctuation, (
'start_pattern must ends with an opening punctuation.')
# Stack of closing punctuations we expect to have in text after position.
punctuation_stack = [matching_punctuation[text[start_position - 1]]]
position = start_position
while punctuation_stack and position < len(text):
if text[position] == punctuation_stack[-1]:
punctuation_stack.pop()
elif text[position] in closing_punctuation:
# A closing punctuation without matching opening punctuations.
return None
elif text[position] in matching_punctuation:
punctuation_stack.append(matching_punctuation[text[position]])
position += 1
if punctuation_stack:
# Opening punctuations left without matching close-punctuations.
return None
# punctuations match.
return text[start_position:position - 1]
# Patterns for matching call-by-reference parameters.
#
# Supports nested templates up to 2 levels deep using this messy pattern:
# < (?: < (?: < [^<>]*
# >
# | [^<>] )*
# >
# | [^<>] )*
# >
_RE_PATTERN_IDENT = r'[_a-zA-Z]\w*' # =~ [[:alpha:]][[:alnum:]]*
_RE_PATTERN_TYPE = (
r'(?:const\s+)?(?:typename\s+|class\s+|struct\s+|union\s+|enum\s+)?'
r'(?:\w|'
r'\s*<(?:<(?:<[^<>]*>|[^<>])*>|[^<>])*>|'
r'::)+')
# A call-by-reference parameter ends with '& identifier'.
_RE_PATTERN_REF_PARAM = re.compile(
r'(' + _RE_PATTERN_TYPE + r'(?:\s*(?:\bconst\b|[*]))*\s*'
r'&\s*' + _RE_PATTERN_IDENT + r')\s*(?:=[^,()]+)?[,)]')
# A call-by-const-reference parameter either ends with 'const& identifier'
# or looks like 'const type& identifier' when 'type' is atomic.
_RE_PATTERN_CONST_REF_PARAM = (
r'(?:.*\s*\bconst\s*&\s*' + _RE_PATTERN_IDENT +
r'|const\s+' + _RE_PATTERN_TYPE + r'\s*&\s*' + _RE_PATTERN_IDENT + r')')
def CheckLanguage(filename, clean_lines, linenum, file_extension,
include_state, nesting_state, error):
"""Checks rules from the 'C++ language rules' section of cppguide.html.
Some of these rules are hard to test (function overloading, using
uint32 inappropriately), but we do the best we can.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
include_state: An _IncludeState instance in which the headers are inserted.
nesting_state: A _NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# If the line is empty or consists of entirely a comment, no need to
# check it.
line = clean_lines.elided[linenum]
if not line:
return
match = _RE_PATTERN_INCLUDE.search(line)
if match:
CheckIncludeLine(filename, clean_lines, linenum, include_state, error)
return
# Reset include state across preprocessor directives. This is meant
# to silence warnings for conditional includes.
if Match(r'^\s*#\s*(?:ifdef|elif|else|endif)\b', line):
include_state.ResetSection()
# Make Windows paths like Unix.
fullname = os.path.abspath(filename).replace('\\', '/')
# TODO(unknown): figure out if they're using default arguments in fn proto.
# Check to see if they're using an conversion function cast.
# I just try to capture the most common basic types, though there are more.
# Parameterless conversion functions, such as bool(), are allowed as they are
# probably a member operator declaration or default constructor.
match = Search(
r'(\bnew\s+)?\b' # Grab 'new' operator, if it's there
r'(int|float|double|bool|char|int32|uint32|int64|uint64)'
r'(\([^)].*)', line)
if match:
matched_new = match.group(1)
matched_type = match.group(2)
matched_funcptr = match.group(3)
# gMock methods are defined using some variant of MOCK_METHODx(name, type)
# where type may be float(), int(string), etc. Without context they are
# virtually indistinguishable from int(x) casts. Likewise, gMock's
# MockCallback takes a template parameter of the form return_type(arg_type),
# which looks much like the cast we're trying to detect.
#
# std::function<> wrapper has a similar problem.
#
# Return types for function pointers also look like casts if they
# don't have an extra space.
if (matched_new is None and # If new operator, then this isn't a cast
not (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or
Search(r'\bMockCallback<.*>', line) or
Search(r'\bstd::function<.*>', line)) and
not (matched_funcptr and
Match(r'\((?:[^() ]+::\s*\*\s*)?[^() ]+\)\s*\(',
matched_funcptr))):
# Try a bit harder to catch gmock lines: the only place where
# something looks like an old-style cast is where we declare the
# return type of the mocked method, and the only time when we
# are missing context is if MOCK_METHOD was split across
# multiple lines. The missing MOCK_METHOD is usually one or two
# lines back, so scan back one or two lines.
#
# It's not possible for gmock macros to appear in the first 2
# lines, since the class head + section name takes up 2 lines.
if (linenum < 2 or
not (Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\((?:\S+,)?\s*$',
clean_lines.elided[linenum - 1]) or
Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\(\s*$',
clean_lines.elided[linenum - 2]))):
error(filename, linenum, 'readability/casting', 4,
'Using deprecated casting style. '
'Use static_cast<%s>(...) instead' %
matched_type)
CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum],
'static_cast',
r'\((int|float|double|bool|char|u?int(16|32|64))\)', error)
# This doesn't catch all cases. Consider (const char * const)"hello".
#
# (char *) "foo" should always be a const_cast (reinterpret_cast won't
# compile).
if CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum],
'const_cast', r'\((char\s?\*+\s?)\)\s*"', error):
pass
else:
# Check pointer casts for other than string constants
CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum],
'reinterpret_cast', r'\((\w+\s?\*+\s?)\)', error)
# In addition, we look for people taking the address of a cast. This
# is dangerous -- casts can assign to temporaries, so the pointer doesn't
# point where you think.
match = Search(
r'(?:&\(([^)]+)\)[\w(])|'
r'(?:&(static|dynamic|down|reinterpret)_cast\b)', line)
if match and match.group(1) != '*':
error(filename, linenum, 'runtime/casting', 4,
('Are you taking an address of a cast? '
'This is dangerous: could be a temp var. '
'Take the address before doing the cast, rather than after'))
# Create an extended_line, which is the concatenation of the current and
# next lines, for more effective checking of code that may span more than one
# line.
if linenum + 1 < clean_lines.NumLines():
extended_line = line + clean_lines.elided[linenum + 1]
else:
extended_line = line
# Check for people declaring static/global STL strings at the top level.
# This is dangerous because the C++ language does not guarantee that
# globals with constructors are initialized before the first access.
match = Match(
r'((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\b(.*)',
line)
# Make sure it's not a function.
# Function template specialization looks like: "string foo<Type>(...".
# Class template definitions look like: "string Foo<Type>::Method(...".
#
# Also ignore things that look like operators. These are matched separately
# because operator names cross non-word boundaries. If we change the pattern
# above, we would decrease the accuracy of matching identifiers.
if (match and
not Search(r'\boperator\W', line) and
not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)?\s*\(([^"]|$)', match.group(3))):
error(filename, linenum, 'runtime/string', 4,
'For a static/global string constant, use a C style string instead: '
'"%schar %s[]".' %
(match.group(1), match.group(2)))
if Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line):
error(filename, linenum, 'runtime/init', 4,
'You seem to be initializing a member variable with itself.')
if file_extension == 'h':
# TODO(unknown): check that 1-arg constructors are explicit.
# How to tell it's a constructor?
# (handled in CheckForNonStandardConstructs for now)
# TODO(unknown): check that classes have DISALLOW_EVIL_CONSTRUCTORS
# (level 1 error)
pass
# Check if people are using the verboten C basic types. The only exception
# we regularly allow is "unsigned short port" for port.
if Search(r'\bshort port\b', line):
if not Search(r'\bunsigned short port\b', line):
error(filename, linenum, 'runtime/int', 4,
'Use "unsigned short" for ports, not "short"')
else:
match = Search(r'\b(short|long(?! +double)|long long)\b', line)
if match:
error(filename, linenum, 'runtime/int', 4,
'Use int16/int64/etc, rather than the C type %s' % match.group(1))
# When snprintf is used, the second argument shouldn't be a literal.
match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line)
if match and match.group(2) != '0':
# If 2nd arg is zero, snprintf is used to calculate size.
error(filename, linenum, 'runtime/printf', 3,
'If you can, use sizeof(%s) instead of %s as the 2nd arg '
'to snprintf.' % (match.group(1), match.group(2)))
# Check if some verboten C functions are being used.
if Search(r'\bsprintf\b', line):
error(filename, linenum, 'runtime/printf', 5,
'Never use sprintf. Use snprintf instead.')
match = Search(r'\b(strcpy|strcat)\b', line)
if match:
error(filename, linenum, 'runtime/printf', 4,
'Almost always, snprintf is better than %s' % match.group(1))
# Check if some verboten operator overloading is going on
# TODO(unknown): catch out-of-line unary operator&:
# class X {};
# int operator&(const X& x) { return 42; } // unary operator&
# The trick is it's hard to tell apart from binary operator&:
# class Y { int operator&(const Y& x) { return 23; } }; // binary operator&
if Search(r'\boperator\s*&\s*\(\s*\)', line):
error(filename, linenum, 'runtime/operator', 4,
'Unary operator& is dangerous. Do not use it.')
# Check for suspicious usage of "if" like
# } if (a == b) {
if Search(r'\}\s*if\s*\(', line):
error(filename, linenum, 'readability/braces', 4,
'Did you mean "else if"? If not, start a new line for "if".')
# Check for potential format string bugs like printf(foo).
# We constrain the pattern not to pick things like DocidForPrintf(foo).
# Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str())
# TODO(sugawarayu): Catch the following case. Need to change the calling
# convention of the whole function to process multiple line to handle it.
# printf(
# boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line);
printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(')
if printf_args:
match = Match(r'([\w.\->()]+)$', printf_args)
if match and match.group(1) != '__VA_ARGS__':
function_name = re.search(r'\b((?:string)?printf)\s*\(',
line, re.I).group(1)
error(filename, linenum, 'runtime/printf', 4,
'Potential format string bug. Do %s("%%s", %s) instead.'
% (function_name, match.group(1)))
# Check for potential memset bugs like memset(buf, sizeof(buf), 0).
match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line)
if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)):
error(filename, linenum, 'runtime/memset', 4,
'Did you mean "memset(%s, 0, %s)"?'
% (match.group(1), match.group(2)))
if Search(r'\busing namespace\b', line):
error(filename, linenum, 'build/namespaces', 5,
'Do not use namespace using-directives. '
'Use using-declarations instead.')
# Detect variable-length arrays.
match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line)
if (match and match.group(2) != 'return' and match.group(2) != 'delete' and
match.group(3).find(']') == -1):
# Split the size using space and arithmetic operators as delimiters.
# If any of the resulting tokens are not compile time constants then
# report the error.
tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3))
is_const = True
skip_next = False
for tok in tokens:
if skip_next:
skip_next = False
continue
if Search(r'sizeof\(.+\)', tok): continue
if Search(r'arraysize\(\w+\)', tok): continue
tok = tok.lstrip('(')
tok = tok.rstrip(')')
if not tok: continue
if Match(r'\d+', tok): continue
if Match(r'0[xX][0-9a-fA-F]+', tok): continue
if Match(r'k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue
# A catch all for tricky sizeof cases, including 'sizeof expression',
# 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)'
# requires skipping the next token because we split on ' ' and '*'.
if tok.startswith('sizeof'):
skip_next = True
continue
is_const = False
break
if not is_const:
error(filename, linenum, 'runtime/arrays', 1,
'Do not use variable-length arrays. Use an appropriately named '
"('k' followed by CamelCase) compile-time constant for the size.")
# If DISALLOW_EVIL_CONSTRUCTORS, DISALLOW_COPY_AND_ASSIGN, or
# DISALLOW_IMPLICIT_CONSTRUCTORS is present, then it should be the last thing
# in the class declaration.
match = Match(
(r'\s*'
r'(DISALLOW_(EVIL_CONSTRUCTORS|COPY_AND_ASSIGN|IMPLICIT_CONSTRUCTORS))'
r'\(.*\);$'),
line)
if match and linenum + 1 < clean_lines.NumLines():
next_line = clean_lines.elided[linenum + 1]
# We allow some, but not all, declarations of variables to be present
# in the statement that defines the class. The [\w\*,\s]* fragment of
# the regular expression below allows users to declare instances of
# the class or pointers to instances, but not less common types such
# as function pointers or arrays. It's a tradeoff between allowing
# reasonable code and avoiding trying to parse more C++ using regexps.
if not Search(r'^\s*}[\w\*,\s]*;', next_line):
error(filename, linenum, 'readability/constructors', 3,
match.group(1) + ' should be the last thing in the class')
# Check for use of unnamed namespaces in header files. Registration
# macros are typically OK, so we allow use of "namespace {" on lines
# that end with backslashes.
if (file_extension == 'h'
and Search(r'\bnamespace\s*{', line)
and line[-1] != '\\'):
error(filename, linenum, 'build/namespaces', 4,
'Do not use unnamed namespaces in header files. See '
'http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
' for more information.')
def CheckForNonConstReference(filename, clean_lines, linenum,
nesting_state, error):
"""Check for non-const references.
Separate from CheckLanguage since it scans backwards from current
line, instead of scanning forward.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A _NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Do nothing if there is no '&' on current line.
line = clean_lines.elided[linenum]
if '&' not in line:
return
# Long type names may be broken across multiple lines, usually in one
# of these forms:
# LongType
# ::LongTypeContinued &identifier
# LongType::
# LongTypeContinued &identifier
# LongType<
# ...>::LongTypeContinued &identifier
#
# If we detected a type split across two lines, join the previous
# line to current line so that we can match const references
# accordingly.
#
# Note that this only scans back one line, since scanning back
# arbitrary number of lines would be expensive. If you have a type
# that spans more than 2 lines, please use a typedef.
if linenum > 1:
previous = None
if Match(r'\s*::(?:[\w<>]|::)+\s*&\s*\S', line):
# previous_line\n + ::current_line
previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+[\w<>])\s*$',
clean_lines.elided[linenum - 1])
elif Match(r'\s*[a-zA-Z_]([\w<>]|::)+\s*&\s*\S', line):
# previous_line::\n + current_line
previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+::)\s*$',
clean_lines.elided[linenum - 1])
if previous:
line = previous.group(1) + line.lstrip()
else:
# Check for templated parameter that is split across multiple lines
endpos = line.rfind('>')
if endpos > -1:
(_, startline, startpos) = ReverseCloseExpression(
clean_lines, linenum, endpos)
if startpos > -1 and startline < linenum:
# Found the matching < on an earlier line, collect all
# pieces up to current line.
line = ''
for i in xrange(startline, linenum + 1):
line += clean_lines.elided[i].strip()
# Check for non-const references in function parameters. A single '&' may
# found in the following places:
# inside expression: binary & for bitwise AND
# inside expression: unary & for taking the address of something
# inside declarators: reference parameter
# We will exclude the first two cases by checking that we are not inside a
# function body, including one that was just introduced by a trailing '{'.
# TODO(unknwon): Doesn't account for preprocessor directives.
# TODO(unknown): Doesn't account for 'catch(Exception& e)' [rare].
check_params = False
if not nesting_state.stack:
check_params = True # top level
elif (isinstance(nesting_state.stack[-1], _ClassInfo) or
isinstance(nesting_state.stack[-1], _NamespaceInfo)):
check_params = True # within class or namespace
elif Match(r'.*{\s*$', line):
if (len(nesting_state.stack) == 1 or
isinstance(nesting_state.stack[-2], _ClassInfo) or
isinstance(nesting_state.stack[-2], _NamespaceInfo)):
check_params = True # just opened global/class/namespace block
# We allow non-const references in a few standard places, like functions
# called "swap()" or iostream operators like "<<" or ">>". Do not check
# those function parameters.
#
# We also accept & in static_assert, which looks like a function but
# it's actually a declaration expression.
whitelisted_functions = (r'(?:[sS]wap(?:<\w:+>)?|'
r'operator\s*[<>][<>]|'
r'static_assert|COMPILE_ASSERT'
r')\s*\(')
if Search(whitelisted_functions, line):
check_params = False
elif not Search(r'\S+\([^)]*$', line):
# Don't see a whitelisted function on this line. Actually we
# didn't see any function name on this line, so this is likely a
# multi-line parameter list. Try a bit harder to catch this case.
for i in xrange(2):
if (linenum > i and
Search(whitelisted_functions, clean_lines.elided[linenum - i - 1])):
check_params = False
break
if check_params:
decls = ReplaceAll(r'{[^}]*}', ' ', line) # exclude function body
for parameter in re.findall(_RE_PATTERN_REF_PARAM, decls):
if not Match(_RE_PATTERN_CONST_REF_PARAM, parameter):
error(filename, linenum, 'runtime/references', 2,
'Is this a non-const reference? '
'If so, make const or use a pointer: ' +
ReplaceAll(' *<', '<', parameter))
def CheckCStyleCast(filename, linenum, line, raw_line, cast_type, pattern,
error):
"""Checks for a C-style cast by looking for the pattern.
Args:
filename: The name of the current file.
linenum: The number of the line to check.
line: The line of code to check.
raw_line: The raw line of code to check, with comments.
cast_type: The string for the C++ cast to recommend. This is either
reinterpret_cast, static_cast, or const_cast, depending.
pattern: The regular expression used to find C-style casts.
error: The function to call with any errors found.
Returns:
True if an error was emitted.
False otherwise.
"""
match = Search(pattern, line)
if not match:
return False
# Exclude lines with sizeof, since sizeof looks like a cast.
sizeof_match = Match(r'.*sizeof\s*$', line[0:match.start(1) - 1])
if sizeof_match:
return False
# operator++(int) and operator--(int)
if (line[0:match.start(1) - 1].endswith(' operator++') or
line[0:match.start(1) - 1].endswith(' operator--')):
return False
# A single unnamed argument for a function tends to look like old
# style cast. If we see those, don't issue warnings for deprecated
# casts, instead issue warnings for unnamed arguments where
# appropriate.
#
# These are things that we want warnings for, since the style guide
# explicitly require all parameters to be named:
# Function(int);
# Function(int) {
# ConstMember(int) const;
# ConstMember(int) const {
# ExceptionMember(int) throw (...);
# ExceptionMember(int) throw (...) {
# PureVirtual(int) = 0;
#
# These are functions of some sort, where the compiler would be fine
# if they had named parameters, but people often omit those
# identifiers to reduce clutter:
# (FunctionPointer)(int);
# (FunctionPointer)(int) = value;
# Function((function_pointer_arg)(int))
# <TemplateArgument(int)>;
# <(FunctionPointerTemplateArgument)(int)>;
remainder = line[match.end(0):]
if Match(r'^\s*(?:;|const\b|throw\b|=|>|\{|\))', remainder):
# Looks like an unnamed parameter.
# Don't warn on any kind of template arguments.
if Match(r'^\s*>', remainder):
return False
# Don't warn on assignments to function pointers, but keep warnings for
# unnamed parameters to pure virtual functions. Note that this pattern
# will also pass on assignments of "0" to function pointers, but the
# preferred values for those would be "nullptr" or "NULL".
matched_zero = Match(r'^\s=\s*(\S+)\s*;', remainder)
if matched_zero and matched_zero.group(1) != '0':
return False
# Don't warn on function pointer declarations. For this we need
# to check what came before the "(type)" string.
if Match(r'.*\)\s*$', line[0:match.start(0)]):
return False
# Don't warn if the parameter is named with block comments, e.g.:
# Function(int /*unused_param*/);
if '/*' in raw_line:
return False
# Passed all filters, issue warning here.
error(filename, linenum, 'readability/function', 3,
'All parameters should be named in a function')
return True
# At this point, all that should be left is actual casts.
error(filename, linenum, 'readability/casting', 4,
'Using C-style cast. Use %s<%s>(...) instead' %
(cast_type, match.group(1)))
return True
_HEADERS_CONTAINING_TEMPLATES = (
('<deque>', ('deque',)),
('<functional>', ('unary_function', 'binary_function',
'plus', 'minus', 'multiplies', 'divides', 'modulus',
'negate',
'equal_to', 'not_equal_to', 'greater', 'less',
'greater_equal', 'less_equal',
'logical_and', 'logical_or', 'logical_not',
'unary_negate', 'not1', 'binary_negate', 'not2',
'bind1st', 'bind2nd',
'pointer_to_unary_function',
'pointer_to_binary_function',
'ptr_fun',
'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t',
'mem_fun_ref_t',
'const_mem_fun_t', 'const_mem_fun1_t',
'const_mem_fun_ref_t', 'const_mem_fun1_ref_t',
'mem_fun_ref',
)),
('<limits>', ('numeric_limits',)),
('<list>', ('list',)),
('<map>', ('map', 'multimap',)),
('<memory>', ('allocator',)),
('<queue>', ('queue', 'priority_queue',)),
('<set>', ('set', 'multiset',)),
('<stack>', ('stack',)),
('<string>', ('char_traits', 'basic_string',)),
('<utility>', ('pair',)),
('<vector>', ('vector',)),
# gcc extensions.
# Note: std::hash is their hash, ::hash is our hash
('<hash_map>', ('hash_map', 'hash_multimap',)),
('<hash_set>', ('hash_set', 'hash_multiset',)),
('<slist>', ('slist',)),
)
_RE_PATTERN_STRING = re.compile(r'\bstring\b')
_re_pattern_algorithm_header = []
for _template in ('copy', 'max', 'min', 'min_element', 'sort', 'swap',
'transform'):
# Match max<type>(..., ...), max(..., ...), but not foo->max, foo.max or
# type::max().
_re_pattern_algorithm_header.append(
(re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'),
_template,
'<algorithm>'))
_re_pattern_templates = []
for _header, _templates in _HEADERS_CONTAINING_TEMPLATES:
for _template in _templates:
_re_pattern_templates.append(
(re.compile(r'(\<|\b)' + _template + r'\s*\<'),
_template + '<>',
_header))
def FilesBelongToSameModule(filename_cc, filename_h):
"""Check if these two filenames belong to the same module.
The concept of a 'module' here is a as follows:
foo.h, foo-inl.h, foo.cc, foo_test.cc and foo_unittest.cc belong to the
same 'module' if they are in the same directory.
some/path/public/xyzzy and some/path/internal/xyzzy are also considered
to belong to the same module here.
If the filename_cc contains a longer path than the filename_h, for example,
'/absolute/path/to/base/sysinfo.cc', and this file would include
'base/sysinfo.h', this function also produces the prefix needed to open the
header. This is used by the caller of this function to more robustly open the
header file. We don't have access to the real include paths in this context,
so we need this guesswork here.
Known bugs: tools/base/bar.cc and base/bar.h belong to the same module
according to this implementation. Because of this, this function gives
some false positives. This should be sufficiently rare in practice.
Args:
filename_cc: is the path for the .cc file
filename_h: is the path for the header path
Returns:
Tuple with a bool and a string:
bool: True if filename_cc and filename_h belong to the same module.
string: the additional prefix needed to open the header file.
"""
if not filename_cc.endswith('.cc'):
return (False, '')
filename_cc = filename_cc[:-len('.cc')]
if filename_cc.endswith('_unittest'):
filename_cc = filename_cc[:-len('_unittest')]
elif filename_cc.endswith('_test'):
filename_cc = filename_cc[:-len('_test')]
filename_cc = filename_cc.replace('/public/', '/')
filename_cc = filename_cc.replace('/internal/', '/')
if not filename_h.endswith('.h'):
return (False, '')
filename_h = filename_h[:-len('.h')]
if filename_h.endswith('-inl'):
filename_h = filename_h[:-len('-inl')]
filename_h = filename_h.replace('/public/', '/')
filename_h = filename_h.replace('/internal/', '/')
files_belong_to_same_module = filename_cc.endswith(filename_h)
common_path = ''
if files_belong_to_same_module:
common_path = filename_cc[:-len(filename_h)]
return files_belong_to_same_module, common_path
def UpdateIncludeState(filename, include_state, io=codecs):
"""Fill up the include_state with new includes found from the file.
Args:
filename: the name of the header to read.
include_state: an _IncludeState instance in which the headers are inserted.
io: The io factory to use to read the file. Provided for testability.
Returns:
True if a header was successfully added. False otherwise.
"""
headerfile = None
try:
headerfile = io.open(filename, 'r', 'utf8', 'replace')
except IOError:
return False
linenum = 0
for line in headerfile:
linenum += 1
clean_line = CleanseComments(line)
match = _RE_PATTERN_INCLUDE.search(clean_line)
if match:
include = match.group(2)
# The value formatting is cute, but not really used right now.
# What matters here is that the key is in include_state.
include_state.setdefault(include, '%s:%d' % (filename, linenum))
return True
def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error,
io=codecs):
"""Reports for missing stl includes.
This function will output warnings to make sure you are including the headers
necessary for the stl containers and functions that you use. We only give one
reason to include a header. For example, if you use both equal_to<> and
less<> in a .h file, only one (the latter in the file) of these will be
reported as a reason to include the <functional>.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
include_state: An _IncludeState instance.
error: The function to call with any errors found.
io: The IO factory to use to read the header file. Provided for unittest
injection.
"""
required = {} # A map of header name to linenumber and the template entity.
# Example of required: { '<functional>': (1219, 'less<>') }
for linenum in xrange(clean_lines.NumLines()):
line = clean_lines.elided[linenum]
if not line or line[0] == '#':
continue
# String is special -- it is a non-templatized type in STL.
matched = _RE_PATTERN_STRING.search(line)
if matched:
# Don't warn about strings in non-STL namespaces:
# (We check only the first match per line; good enough.)
prefix = line[:matched.start()]
if prefix.endswith('std::') or not prefix.endswith('::'):
required['<string>'] = (linenum, 'string')
for pattern, template, header in _re_pattern_algorithm_header:
if pattern.search(line):
required[header] = (linenum, template)
# The following function is just a speed up, no semantics are changed.
if not '<' in line: # Reduces the cpu time usage by skipping lines.
continue
for pattern, template, header in _re_pattern_templates:
if pattern.search(line):
required[header] = (linenum, template)
# The policy is that if you #include something in foo.h you don't need to
# include it again in foo.cc. Here, we will look at possible includes.
# Let's copy the include_state so it is only messed up within this function.
include_state = include_state.copy()
# Did we find the header for this file (if any) and successfully load it?
header_found = False
# Use the absolute path so that matching works properly.
abs_filename = FileInfo(filename).FullName()
# For Emacs's flymake.
# If cpplint is invoked from Emacs's flymake, a temporary file is generated
# by flymake and that file name might end with '_flymake.cc'. In that case,
# restore original file name here so that the corresponding header file can be
# found.
# e.g. If the file name is 'foo_flymake.cc', we should search for 'foo.h'
# instead of 'foo_flymake.h'
abs_filename = re.sub(r'_flymake\.cc$', '.cc', abs_filename)
# include_state is modified during iteration, so we iterate over a copy of
# the keys.
header_keys = include_state.keys()
for header in header_keys:
(same_module, common_path) = FilesBelongToSameModule(abs_filename, header)
fullpath = common_path + header
if same_module and UpdateIncludeState(fullpath, include_state, io):
header_found = True
# If we can't find the header file for a .cc, assume it's because we don't
# know where to look. In that case we'll give up as we're not sure they
# didn't include it in the .h file.
# TODO(unknown): Do a better job of finding .h files so we are confident that
# not having the .h file means there isn't one.
if filename.endswith('.cc') and not header_found:
return
# All the lines have been processed, report the errors found.
for required_header_unstripped in required:
template = required[required_header_unstripped][1]
if required_header_unstripped.strip('<>"') not in include_state:
error(filename, required[required_header_unstripped][0],
'build/include_what_you_use', 4,
'Add #include ' + required_header_unstripped + ' for ' + template)
_RE_PATTERN_EXPLICIT_MAKEPAIR = re.compile(r'\bmake_pair\s*<')
def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error):
"""Check that make_pair's template arguments are deduced.
G++ 4.6 in C++0x mode fails badly if make_pair's template arguments are
specified explicitly, and such use isn't intended in any case.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line)
if match:
error(filename, linenum, 'build/explicit_make_pair',
4, # 4 = high confidence
'For C++11-compatibility, omit template arguments from make_pair'
' OR use pair directly OR if appropriate, construct a pair directly')
def ProcessLine(filename, file_extension, clean_lines, line,
include_state, function_state, nesting_state, error,
extra_check_functions=[]):
"""Processes a single line in the file.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
clean_lines: An array of strings, each representing a line of the file,
with comments stripped.
line: Number of line being processed.
include_state: An _IncludeState instance in which the headers are inserted.
function_state: A _FunctionState instance which counts function lines, etc.
nesting_state: A _NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
raw_lines = clean_lines.raw_lines
ParseNolintSuppressions(filename, raw_lines[line], line, error)
nesting_state.Update(filename, clean_lines, line, error)
if nesting_state.stack and nesting_state.stack[-1].inline_asm != _NO_ASM:
return
CheckForFunctionLengths(filename, clean_lines, line, function_state, error)
CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error)
CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error)
CheckLanguage(filename, clean_lines, line, file_extension, include_state,
nesting_state, error)
CheckForNonConstReference(filename, clean_lines, line, nesting_state, error)
CheckForNonStandardConstructs(filename, clean_lines, line,
nesting_state, error)
CheckVlogArguments(filename, clean_lines, line, error)
CheckCaffeAlternatives(filename, clean_lines, line, error)
CheckCaffeDataLayerSetUp(filename, clean_lines, line, error)
CheckCaffeRandom(filename, clean_lines, line, error)
CheckPosixThreading(filename, clean_lines, line, error)
CheckInvalidIncrement(filename, clean_lines, line, error)
CheckMakePairUsesDeduction(filename, clean_lines, line, error)
for check_fn in extra_check_functions:
check_fn(filename, clean_lines, line, error)
def ProcessFileData(filename, file_extension, lines, error,
extra_check_functions=[]):
"""Performs lint checks and reports any errors to the given error function.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
lines: An array of strings, each representing a line of the file, with the
last element being empty if the file is terminated with a newline.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
lines = (['// marker so line numbers and indices both start at 1'] + lines +
['// marker so line numbers end in a known way'])
include_state = _IncludeState()
function_state = _FunctionState()
nesting_state = _NestingState()
ResetNolintSuppressions()
CheckForCopyright(filename, lines, error)
if file_extension == 'h':
CheckForHeaderGuard(filename, lines, error)
RemoveMultiLineComments(filename, lines, error)
clean_lines = CleansedLines(lines)
for line in xrange(clean_lines.NumLines()):
ProcessLine(filename, file_extension, clean_lines, line,
include_state, function_state, nesting_state, error,
extra_check_functions)
nesting_state.CheckCompletedBlocks(filename, error)
CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error)
# We check here rather than inside ProcessLine so that we see raw
# lines rather than "cleaned" lines.
CheckForBadCharacters(filename, lines, error)
CheckForNewlineAtEOF(filename, lines, error)
def ProcessFile(filename, vlevel, extra_check_functions=[]):
"""Does google-lint on a single file.
Args:
filename: The name of the file to parse.
vlevel: The level of errors to report. Every error of confidence
>= verbose_level will be reported. 0 is a good default.
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
_SetVerboseLevel(vlevel)
try:
# Support the UNIX convention of using "-" for stdin. Note that
# we are not opening the file with universal newline support
# (which codecs doesn't support anyway), so the resulting lines do
# contain trailing '\r' characters if we are reading a file that
# has CRLF endings.
# If after the split a trailing '\r' is present, it is removed
# below. If it is not expected to be present (i.e. os.linesep !=
# '\r\n' as in Windows), a warning is issued below if this file
# is processed.
if filename == '-':
lines = codecs.StreamReaderWriter(sys.stdin,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace').read().split('\n')
else:
lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n')
carriage_return_found = False
# Remove trailing '\r'.
for linenum in range(len(lines)):
if lines[linenum].endswith('\r'):
lines[linenum] = lines[linenum].rstrip('\r')
carriage_return_found = True
except IOError:
sys.stderr.write(
"Skipping input '%s': Can't open for reading\n" % filename)
return
# Note, if no dot is found, this will give the entire filename as the ext.
file_extension = filename[filename.rfind('.') + 1:]
# When reading from stdin, the extension is unknown, so no cpplint tests
# should rely on the extension.
if filename != '-' and file_extension not in _valid_extensions:
sys.stderr.write('Ignoring %s; not a valid file name '
'(%s)\n' % (filename, ', '.join(_valid_extensions)))
else:
ProcessFileData(filename, file_extension, lines, Error,
extra_check_functions)
if carriage_return_found and os.linesep != '\r\n':
# Use 0 for linenum since outputting only one error for potentially
# several lines.
Error(filename, 0, 'whitespace/newline', 1,
'One or more unexpected \\r (^M) found;'
'better to use only a \\n')
sys.stderr.write('Done processing %s\n' % filename)
def PrintUsage(message):
"""Prints a brief usage string and exits, optionally with an error message.
Args:
message: The optional error message.
"""
sys.stderr.write(_USAGE)
if message:
sys.exit('\nFATAL ERROR: ' + message)
else:
sys.exit(1)
def PrintCategories():
"""Prints a list of all the error-categories used by error messages.
These are the categories used to filter messages via --filter.
"""
sys.stderr.write(''.join(' %s\n' % cat for cat in _ERROR_CATEGORIES))
sys.exit(0)
def ParseArguments(args):
"""Parses the command line arguments.
This may set the output format and verbosity level as side-effects.
Args:
args: The command line arguments:
Returns:
The list of filenames to lint.
"""
try:
(opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=',
'counting=',
'filter=',
'root=',
'linelength=',
'extensions='])
except getopt.GetoptError:
PrintUsage('Invalid arguments.')
verbosity = _VerboseLevel()
output_format = _OutputFormat()
filters = ''
counting_style = ''
for (opt, val) in opts:
if opt == '--help':
PrintUsage(None)
elif opt == '--output':
if val not in ('emacs', 'vs7', 'eclipse'):
PrintUsage('The only allowed output formats are emacs, vs7 and eclipse.')
output_format = val
elif opt == '--verbose':
verbosity = int(val)
elif opt == '--filter':
filters = val
if not filters:
PrintCategories()
elif opt == '--counting':
if val not in ('total', 'toplevel', 'detailed'):
PrintUsage('Valid counting options are total, toplevel, and detailed')
counting_style = val
elif opt == '--root':
global _root
_root = val
elif opt == '--linelength':
global _line_length
try:
_line_length = int(val)
except ValueError:
PrintUsage('Line length must be digits.')
elif opt == '--extensions':
global _valid_extensions
try:
_valid_extensions = set(val.split(','))
except ValueError:
PrintUsage('Extensions must be comma separated list.')
if not filenames:
PrintUsage('No files were specified.')
_SetOutputFormat(output_format)
_SetVerboseLevel(verbosity)
_SetFilters(filters)
_SetCountingStyle(counting_style)
return filenames
def main():
filenames = ParseArguments(sys.argv[1:])
# Change stderr to write with replacement characters so we don't die
# if we try to print something containing non-ASCII characters.
sys.stderr = codecs.StreamReaderWriter(sys.stderr,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace')
_cpplint_state.ResetErrorCounts()
for filename in filenames:
ProcessFile(filename, _cpplint_state.verbose_level)
_cpplint_state.PrintErrorCounts()
sys.exit(_cpplint_state.error_count > 0)
if __name__ == '__main__':
main()
| 187,450 | 37.49887 | 93 | py |
bottom-up-attention | bottom-up-attention-master/caffe/scripts/split_caffe_proto.py | #!/usr/bin/env python
import mmap
import re
import os
import errno
script_path = os.path.dirname(os.path.realpath(__file__))
# a regex to match the parameter definitions in caffe.proto
r = re.compile(r'(?://.*\n)*message ([^ ]*) \{\n(?: .*\n|\n)*\}')
# create directory to put caffe.proto fragments
try:
os.mkdir(
os.path.join(script_path,
'../docs/_includes/'))
os.mkdir(
os.path.join(script_path,
'../docs/_includes/proto/'))
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
caffe_proto_fn = os.path.join(
script_path,
'../src/caffe/proto/caffe.proto')
with open(caffe_proto_fn, 'r') as fin:
for m in r.finditer(fin.read(), re.MULTILINE):
fn = os.path.join(
script_path,
'../docs/_includes/proto/%s.txt' % m.group(1))
with open(fn, 'w') as fout:
fout.write(m.group(0))
| 941 | 25.166667 | 65 | py |
bottom-up-attention | bottom-up-attention-master/caffe/scripts/download_model_binary.py | #!/usr/bin/env python
import os
import sys
import time
import yaml
import hashlib
import argparse
from six.moves import urllib
required_keys = ['caffemodel', 'caffemodel_url', 'sha1']
def reporthook(count, block_size, total_size):
"""
From http://blog.moleculea.com/2012/10/04/urlretrieve-progres-indicator/
"""
global start_time
if count == 0:
start_time = time.time()
return
duration = (time.time() - start_time) or 0.01
progress_size = int(count * block_size)
speed = int(progress_size / (1024 * duration))
percent = int(count * block_size * 100 / total_size)
sys.stdout.write("\r...%d%%, %d MB, %d KB/s, %d seconds passed" %
(percent, progress_size / (1024 * 1024), speed, duration))
sys.stdout.flush()
def parse_readme_frontmatter(dirname):
readme_filename = os.path.join(dirname, 'readme.md')
with open(readme_filename) as f:
lines = [line.strip() for line in f.readlines()]
top = lines.index('---')
bottom = lines.index('---', top + 1)
frontmatter = yaml.load('\n'.join(lines[top + 1:bottom]))
assert all(key in frontmatter for key in required_keys)
return dirname, frontmatter
def valid_dirname(dirname):
try:
return parse_readme_frontmatter(dirname)
except Exception as e:
print('ERROR: {}'.format(e))
raise argparse.ArgumentTypeError(
'Must be valid Caffe model directory with a correct readme.md')
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Download trained model binary.')
parser.add_argument('dirname', type=valid_dirname)
args = parser.parse_args()
# A tiny hack: the dirname validator also returns readme YAML frontmatter.
dirname = args.dirname[0]
frontmatter = args.dirname[1]
model_filename = os.path.join(dirname, frontmatter['caffemodel'])
# Closure-d function for checking SHA1.
def model_checks_out(filename=model_filename, sha1=frontmatter['sha1']):
with open(filename, 'rb') as f:
return hashlib.sha1(f.read()).hexdigest() == sha1
# Check if model exists.
if os.path.exists(model_filename) and model_checks_out():
print("Model already exists.")
sys.exit(0)
# Download and verify model.
urllib.request.urlretrieve(
frontmatter['caffemodel_url'], model_filename, reporthook)
if not model_checks_out():
print('ERROR: model did not download correctly! Run this again.')
sys.exit(1)
| 2,531 | 31.461538 | 78 | py |
Subsets and Splits