python_code
stringlengths
0
1.02M
repo_name
stringlengths
9
48
file_path
stringlengths
5
114
# Lint as: python3 # Copyright 2019 Deepmind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Factor Evaluation Module.""" # pylint: disable=unused-variable import collections import functools from iodine.modules import utils import shapeguard import sonnet as snt import tensorflow.compat.v1 as tf Factor = collections.namedtuple("Factor", ["name", "size", "type"]) class FactorRegressor(snt.AbstractModule): """Assess representations by learning a linear mapping to latents.""" def __init__(self, mapping=None, name="repres_content"): super().__init__(name=name) if mapping is None: self._mapping = [ Factor("color", 3, "scalar"), Factor("shape", 4, "categorical"), Factor("scale", 1, "scalar"), Factor("x", 1, "scalar"), Factor("y", 1, "scalar"), Factor("orientation", 2, "angle"), ] else: self._mapping = [Factor(*m) for m in mapping] def _build(self, z, latent, visibility, pred_mask, true_mask): sg = shapeguard.ShapeGuard() z = sg.guard(z, "B, K, Z") pred_mask = sg.guard(pred_mask, "B, K, H, W, 1") true_mask = sg.guard(true_mask, "B, L, H, W, 1") visibility = sg.guard(visibility, "B, L") num_visible_obj = tf.reduce_sum(visibility) # Map z to predictions for all latents sg.M = sum([m.size for m in self._mapping]) self.predictor = snt.Linear(sg.M, name="predict_latents") z_flat = sg.reshape(z, "B*K, Z") all_preds = sg.guard(self.predictor(z_flat), "B*K, M") all_preds = sg.reshape(all_preds, "B, 1, K, M") all_preds = tf.tile(all_preds, sg["1, L, 1, 1"]) # prepare latents latents = {} mean_var_tot = {} for m in self._mapping: with tf.name_scope(m.name): # preprocess, reshape, and tile lat_preprocess = self.get_preprocessing(m) lat = sg.guard( lat_preprocess(latent[m.name]), "B, L, {}".format(m.size)) # compute mean over latent by training a variable using mse if m.type in {"scalar", "angle"}: mvt = utils.OnlineMeanVarEstimator( axis=[0, 1], ddof=1, name="{}_mean_var".format(m.name)) mean_var_tot[m.name] = mvt(lat, visibility[:, :, tf.newaxis]) lat = tf.reshape(lat, sg["B, L, 1"] + [-1]) lat = tf.tile(lat, sg["1, 1, K, 1"]) latents[m.name] = lat # prepare predictions idx = 0 predictions = {} for m in self._mapping: with tf.name_scope(m.name): assert m.name in latent, "{} not in {}".format(m.name, latent.keys()) pred = all_preds[..., idx:idx + m.size] predictions[m.name] = sg.guard(pred, "B, L, K, {}".format(m.size)) idx += m.size # compute error total_pairwise_errors = None for m in self._mapping: with tf.name_scope(m.name): error_fn = self.get_error_func(m) sg.guard(latents[m.name], "B, L, K, {}".format(m.size)) sg.guard(predictions[m.name], "B, L, K, {}".format(m.size)) err = error_fn(latents[m.name], predictions[m.name]) sg.guard(err, "B, L, K") if total_pairwise_errors is None: total_pairwise_errors = err else: total_pairwise_errors += err # determine best assignment by comparing masks obj_mask = true_mask[:, :, tf.newaxis] pred_mask = pred_mask[:, tf.newaxis] pairwise_overlap = tf.reduce_sum(obj_mask * pred_mask, axis=[3, 4, 5]) best_match = sg.guard(tf.argmax(pairwise_overlap, axis=2), "B, L") assignment = tf.one_hot(best_match, sg.K) assignment *= visibility[:, :, tf.newaxis] # Mask non-visible objects # total error total_error = ( tf.reduce_sum(assignment * total_pairwise_errors) / num_visible_obj) # compute scalars monitored_scalars = {} for m in self._mapping: with tf.name_scope(m.name): metric = self.get_metric(m) scalar = metric( latents[m.name], predictions[m.name], assignment[:, :, :, tf.newaxis], mean_var_tot.get(m.name), num_visible_obj, ) monitored_scalars[m.name] = scalar return total_error, monitored_scalars, mean_var_tot, predictions, assignment @snt.reuse_variables def predict(self, z): sg = shapeguard.ShapeGuard() z = sg.guard(z, "B, Z") all_preds = sg.guard(self.predictor(z), "B, M") idx = 0 predictions = {} for m in self._mapping: with tf.name_scope(m.name): pred = all_preds[:, idx:idx + m.size] predictions[m.name] = sg.guard(pred, "B, {}".format(m.size)) idx += m.size return predictions @staticmethod def get_error_func(factor): if factor.type in {"scalar", "angle"}: return sse elif factor.type == "categorical": return functools.partial( tf.losses.softmax_cross_entropy, reduction="none") else: raise KeyError(factor.type) @staticmethod def get_metric(factor): if factor.type in {"scalar", "angle"}: return r2 elif factor.type == "categorical": return accuracy else: raise KeyError(factor.type) @staticmethod def one_hot(f, nr_categories): return tf.one_hot(tf.cast(f[..., 0], tf.int32), depth=nr_categories) @staticmethod def angle_to_vector(theta): return tf.concat([tf.math.cos(theta), tf.math.sin(theta)], axis=-1) @staticmethod def get_preprocessing(factor): if factor.type == "scalar": return tf.identity elif factor.type == "categorical": return functools.partial( FactorRegressor.one_hot, nr_categories=factor.size) elif factor.type == "angle": return FactorRegressor.angle_to_vector else: raise KeyError(factor.type) def sse(true, pred): # run our own sum squared error because we want to reduce sum over last dim return tf.reduce_sum(tf.square(true - pred), axis=-1) def accuracy(labels, logits, assignment, mean_var_tot, num_vis): del mean_var_tot # unused pred = tf.argmax(logits, axis=-1, output_type=tf.int32) labels = tf.argmax(labels, axis=-1, output_type=tf.int32) correct = tf.cast(tf.equal(labels, pred), tf.float32) return tf.reduce_sum(correct * assignment[..., 0]) / num_vis def r2(labels, pred, assignment, mean_var_tot, num_vis): del num_vis # unused mean, var, _ = mean_var_tot # labels, pred: (B, L, K, n) ss_res = tf.reduce_sum(tf.square(labels - pred) * assignment, axis=2) ss_tot = var[tf.newaxis, tf.newaxis, :] # (1, 1, n) return tf.reduce_mean(1.0 - ss_res / ss_tot)
deepmind-research-master
iodine/modules/factor_eval.py
# Lint as: python3 # Copyright 2019 Deepmind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Plotting tools for IODINE.""" # pylint: disable=unused-import, missing-docstring, unused-variable # pylint: disable=invalid-name, unexpected-keyword-arg import functools from iodine.modules.utils import get_mask_plot_colors from matplotlib.colors import hsv_to_rgb import matplotlib.pyplot as plt import numpy as np __all__ = ("get_mask_plot_colors", "example_plot", "iterations_plot", "inputs_plot") def clean_ax(ax, color=None, lw=4.0): ax.set_xticks([]) ax.set_yticks([]) if color is not None: for spine in ax.spines.values(): spine.set_linewidth(lw) spine.set_color(color) def optional_ax(fn): def _wrapped(*args, **kwargs): if kwargs.get("ax", None) is None: figsize = kwargs.pop("figsize", (4, 4)) fig, ax = plt.subplots(figsize=figsize) kwargs["ax"] = ax return fn(*args, **kwargs) return _wrapped def optional_clean_ax(fn): def _wrapped(*args, **kwargs): if kwargs.get("ax", None) is None: figsize = kwargs.pop("figsize", (4, 4)) fig, ax = plt.subplots(figsize=figsize) kwargs["ax"] = ax color = kwargs.pop("color", None) lw = kwargs.pop("lw", 4.0) res = fn(*args, **kwargs) clean_ax(kwargs["ax"], color, lw) return res return _wrapped @optional_clean_ax def show_img(img, mask=None, ax=None, norm=False): if norm: vmin, vmax = np.min(img), np.max(img) img = (img - vmin) / (vmax - vmin) if mask is not None: img = img * mask + np.ones_like(img) * (1.0 - mask) return ax.imshow(img.clip(0.0, 1.0), interpolation="nearest") @optional_clean_ax def show_mask(m, ax): color_conv = get_mask_plot_colors(m.shape[0]) color_mask = np.dot(np.transpose(m, [1, 2, 0]), color_conv) return ax.imshow(color_mask.clip(0.0, 1.0), interpolation="nearest") @optional_clean_ax def show_mat(m, ax, vmin=None, vmax=None, cmap="viridis"): return ax.matshow( m[..., 0], cmap=cmap, vmin=vmin, vmax=vmax, interpolation="nearest") @optional_clean_ax def show_coords(m, ax): vmin, vmax = np.min(m), np.max(m) m = (m - vmin) / (vmax - vmin) color_conv = get_mask_plot_colors(m.shape[-1]) color_mask = np.dot(m, color_conv) return ax.imshow(color_mask, interpolation="nearest") def example_plot(rinfo, b=0, t=-1, mask_components=False, size=2, column_titles=True): image = rinfo["data"]["image"][b, 0] recons = rinfo["outputs"]["recons"][b, t, 0] pred_mask = rinfo["outputs"]["pred_mask"][b, t] components = rinfo["outputs"]["components"][b, t] K, H, W, C = components.shape colors = get_mask_plot_colors(K) nrows = 1 ncols = 3 + K fig, axes = plt.subplots(ncols=ncols, figsize=(ncols * size, nrows * size)) show_img(image, ax=axes[0], color="#000000") show_img(recons, ax=axes[1], color="#000000") show_mask(pred_mask[..., 0], ax=axes[2], color="#000000") for k in range(K): mask = pred_mask[k] if mask_components else None show_img(components[k], ax=axes[k + 3], color=colors[k], mask=mask) if column_titles: labels = ["Image", "Recons.", "Mask" ] + ["Component {}".format(k + 1) for k in range(K)] for ax, title in zip(axes, labels): ax.set_title(title) plt.subplots_adjust(hspace=0.03, wspace=0.035) return fig def iterations_plot(rinfo, b=0, mask_components=False, size=2): image = rinfo["data"]["image"][b] true_mask = rinfo["data"]["true_mask"][b] recons = rinfo["outputs"]["recons"][b] pred_mask = rinfo["outputs"]["pred_mask"][b] pred_mask_logits = rinfo["outputs"]["pred_mask_logits"][b] components = rinfo["outputs"]["components"][b] T, K, H, W, C = components.shape colors = get_mask_plot_colors(K) nrows = T + 1 ncols = 2 + K fig, axes = plt.subplots( nrows=nrows, ncols=ncols, figsize=(ncols * size, nrows * size)) for t in range(T): show_img(recons[t, 0], ax=axes[t, 0]) show_mask(pred_mask[t, ..., 0], ax=axes[t, 1]) axes[t, 0].set_ylabel("iter {}".format(t)) for k in range(K): mask = pred_mask[t, k] if mask_components else None show_img(components[t, k], ax=axes[t, k + 2], color=colors[k], mask=mask) axes[0, 0].set_title("Reconstruction") axes[0, 1].set_title("Mask") show_img(image[0], ax=axes[T, 0]) show_mask(true_mask[0, ..., 0], ax=axes[T, 1]) vmin = np.min(pred_mask_logits[T - 1]) vmax = np.max(pred_mask_logits[T - 1]) for k in range(K): axes[0, k + 2].set_title("Component {}".format(k + 1)) # , color=colors[k]) show_mat( pred_mask_logits[T - 1, k], ax=axes[T, k + 2], vmin=vmin, vmax=vmax) axes[T, k + 2].set_xlabel( "Mask Logits for\nComponent {}".format(k + 1)) # , color=colors[k]) axes[T, 0].set_xlabel("Input Image") axes[T, 1].set_xlabel("Ground Truth Mask") plt.subplots_adjust(wspace=0.05, hspace=0.05) return fig def inputs_plot(rinfo, b=0, t=0, size=2): B, T, K, H, W, C = rinfo["outputs"]["components"].shape colors = get_mask_plot_colors(K) inputs = rinfo["inputs"]["spatial"] rows = [ ("image", show_img, False), ("components", show_img, False), ("dcomponents", functools.partial(show_img, norm=True), False), ("mask", show_mat, True), ("pred_mask", show_mat, True), ("dmask", functools.partial(show_mat, cmap="coolwarm"), True), ("posterior", show_mat, True), ("log_prob", show_mat, True), ("counterfactual", show_mat, True), ("coordinates", show_coords, False), ] rows = [(n, f, mcb) for n, f, mcb in rows if n in inputs] nrows = len(rows) ncols = K + 1 fig, axes = plt.subplots( nrows=nrows, ncols=ncols, figsize=(ncols * size - size * 0.9, nrows * size), gridspec_kw={"width_ratios": [1] * K + [0.1]}, ) for r, (name, plot_fn, make_cbar) in enumerate(rows): axes[r, 0].set_ylabel(name) if make_cbar: vmin = np.min(inputs[name][b, t]) vmax = np.max(inputs[name][b, t]) if np.abs(vmin - vmax) < 1e-6: vmin -= 0.1 vmax += 0.1 plot_fn = functools.partial(plot_fn, vmin=vmin, vmax=vmax) # print("range of {:<16}: [{:0.2f}, {:0.2f}]".format(name, vmin, vmax)) for k in range(K): if inputs[name].shape[2] == 1: m = inputs[name][b, t, 0] color = (0.0, 0.0, 0.0) else: m = inputs[name][b, t, k] color = colors[k] mappable = plot_fn(m, ax=axes[r, k], color=color) if make_cbar: fig.colorbar(mappable, cax=axes[r, K]) else: axes[r, K].set_visible(False) for k in range(K): axes[0, k].set_title("Component {}".format(k + 1)) # , color=colors[k]) plt.subplots_adjust(hspace=0.05, wspace=0.05) return fig
deepmind-research-master
iodine/modules/plotting.py
# Copyright 2019 Deepmind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
deepmind-research-master
iodine/modules/__init__.py
# Lint as: python3 # Copyright 2019 Deepmind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Iterative refinement modules.""" # pylint: disable=g-doc-bad-indent, unused-variable from iodine.modules import utils import shapeguard import sonnet as snt import tensorflow.compat.v1 as tf class RefinementCore(snt.RNNCore): """Recurrent Refinement Module. Refinement modules take as inputs: * previous state (which could be an arbitrary nested structure) * current inputs which include * image-space inputs like pixel-based errors, or mask-posteriors * latent-space inputs like the previous z_dist, or dz They use these inputs to produce: * output (usually a new z_dist) * new_state """ def __init__(self, encoder_net, recurrent_net, refinement_head, name="refinement"): super().__init__(name=name) self._encoder_net = encoder_net self._recurrent_net = recurrent_net self._refinement_head = refinement_head self._sg = shapeguard.ShapeGuard() def initial_state(self, batch_size, **unused_kwargs): return self._recurrent_net.initial_state(batch_size) def _build(self, inputs, prev_state): sg = self._sg assert "spatial" in inputs, inputs.keys() assert "flat" in inputs, inputs.keys() assert "zp" in inputs["flat"], inputs["flat"].keys() zp = sg.guard(inputs["flat"]["zp"], "B, K, Zp") x = sg.guard(self.prepare_spatial_inputs(inputs["spatial"]), "B*K, H, W, C") h1 = sg.guard(self._encoder_net(x).params, "B*K, H1") h2 = sg.guard(self.prepare_flat_inputs(h1, inputs["flat"]), "B*K, H2") h2_unflattened = sg.reshape(h2, "B, K, H2") h3, next_state = self._recurrent_net(h2_unflattened, prev_state) sg.guard(h3, "B, K, H3") outputs = sg.guard(self._refinement_head(zp, h3), "B, K, Y") del self._sg.B return outputs, next_state def prepare_spatial_inputs(self, inputs): values = [] for name, val in sorted(inputs.items(), key=lambda it: it[0]): if val.shape.as_list()[1] == 1: self._sg.guard(val, "B, 1, H, W, _C") val = tf.tile(val, self._sg["1, K, 1, 1, 1"]) else: self._sg.guard(val, "B, K, H, W, _C") values.append(val) concat_inputs = self._sg.guard(tf.concat(values, axis=-1), "B, K, H, W, C") return self._sg.reshape(concat_inputs, "B*K, H, W, C") def prepare_flat_inputs(self, hidden, inputs): values = [self._sg.guard(hidden, "B*K, H1")] for name, val in sorted(inputs.items(), key=lambda it: it[0]): self._sg.guard(val, "B, K, _") val_flat = tf.reshape(val, self._sg["B*K"] + [-1]) values.append(val_flat) return tf.concat(values, axis=-1) class ResHead(snt.AbstractModule): """Updates Zp using a residual mechanism.""" def __init__(self, name="residual_head"): super().__init__(name=name) def _build(self, zp_old, inputs): sg = shapeguard.ShapeGuard() sg.guard(zp_old, "B, K, Zp") sg.guard(inputs, "B, K, H") update = snt.Linear(sg.Zp) flat_zp = sg.reshape(zp_old, "B*K, Zp") flat_inputs = sg.reshape(inputs, "B*K, H") zp = flat_zp + update(flat_inputs) return sg.reshape(zp, "B, K, Zp") class PredictorCorrectorHead(snt.AbstractModule): """This refinement head is used for sequential data. At every step it computes a prediction from the λ of the previous timestep and an update from the refinement network of the current timestep. The next step λ' is computed as a gated combination of both: λ' = g * λ_corr + (1-g) * λ_pred """ def __init__( self, hidden_sizes=(64,), pred_gate_bias=0.0, corrector_gate_bias=0.0, activation=tf.nn.elu, name="predcorr_head", ): super().__init__(name=name) self._hidden_sizes = hidden_sizes self._activation = utils.get_act_func(activation) self._pred_gate_bias = pred_gate_bias self._corrector_gate_bias = corrector_gate_bias def _build(self, zp_old, inputs): sg = shapeguard.ShapeGuard() sg.guard(zp_old, "B, K, Zp") sg.guard(inputs, "B, K, H") update = snt.Linear(sg.Zp) update_gate = snt.Linear(sg.Zp) predict = snt.nets.MLP( output_sizes=list(self._hidden_sizes) + [sg.Zp * 2], activation=self._activation, ) flat_zp = sg.reshape(zp_old, "B*K, Zp") flat_inputs = sg.reshape(inputs, "B*K, H") g = tf.nn.sigmoid(update_gate(flat_inputs) + self._corrector_gate_bias) u = update(flat_inputs) # a slightly more efficient way of computing the gated update # (1-g) * flat_zp + g * u zp_corrected = flat_zp + g * (u - flat_zp) predicted = predict(flat_zp) pred_up = predicted[:, :sg.Zp] pred_gate = tf.nn.sigmoid(predicted[:, sg.Zp:] + self._pred_gate_bias) zp = zp_corrected + pred_gate * (pred_up - zp_corrected) return sg.reshape(zp, "B, K, Zp")
deepmind-research-master
iodine/modules/refinement.py
# Lint as: python3 # Copyright 2019 Deepmind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Collection of sonnet modules that wrap useful distributions.""" # pylint: disable=missing-docstring, g-doc-args, g-short-docstring-punctuation # pylint: disable=g-space-before-docstring-summary # pylint: disable=g-no-space-after-docstring-summary import collections from iodine.modules.utils import get_act_func from iodine.modules.utils import get_distribution import shapeguard import sonnet as snt import tensorflow.compat.v1 as tf import tensorflow_probability as tfp tfd = tfp.distributions FlatParameters = collections.namedtuple("ParameterOut", ["params"]) MixtureParameters = collections.namedtuple("MixtureOut", ["pixel", "mask"]) class DistributionModule(snt.AbstractModule): """Distribution Base class supporting shape inference & default priors.""" def __init__(self, name="distribution"): super().__init__(name=name) self._output_shape = None def set_output_shape(self, shape): self._output_shape = shape @property def output_shape(self): return self._output_shape @property def input_shapes(self): raise NotImplementedError() def get_default_prior(self, batch_dim=(1,)): return self( tf.zeros(list(batch_dim) + self.input_shapes.params, dtype=tf.float32)) class BernoulliOutput(DistributionModule): def __init__(self, name="bernoulli_output"): super().__init__(name=name) @property def input_shapes(self): return FlatParameters(self.output_shape) def _build(self, params): return tfd.Independent( tfd.Bernoulli(logits=params, dtype=tf.float32), reinterpreted_batch_ndims=1) class LocScaleDistribution(DistributionModule): """Generic IID location / scale distribution. Input parameters are concatenation of location and scale (2*Z,) Args: dist: Distribution or str Kind of distribution used. Supports Normal, Logistic, Laplace, and StudentT distributions. dist_kwargs: dict custom keyword arguments for the distribution scale_act: function or str or None activation function to be applied to the scale input scale: str different modes for computing the scale: * stddev: scale is computed as scale_act(s) * var: scale is computed as sqrt(scale_act(s)) * prec: scale is computed as 1./scale_act(s) * fixed: scale is a global variable (same for all pixels) if scale_val==-1. then it is a trainable variable initialized to 0.1 else it is fixed to scale_val (input shape is only (Z,) in this case) scale_val: float determines the scale value (only used if scale=='fixed'). loc_act: function or str or None activation function to be applied to the location input. Supports optional activation functions for scale and location. Supports different "modes" for scaling: * stddev: """ def __init__( self, dist=tfd.Normal, dist_kwargs=None, scale_act=tf.exp, scale="stddev", scale_val=1.0, loc_act=None, name="loc_scale_dist", ): super().__init__(name=name) self._scale_act = get_act_func(scale_act) self._loc_act = get_act_func(loc_act) # supports Normal, Logstic, Laplace, StudentT self._dist = get_distribution(dist) self._dist_kwargs = dist_kwargs or {} assert scale in ["stddev", "var", "prec", "fixed"], scale self._scale = scale self._scale_val = scale_val @property def input_shapes(self): if self._scale == "fixed": param_shape = self.output_shape else: param_shape = self.output_shape[:-1] + [self.output_shape[-1] * 2] return FlatParameters(param_shape) def _build(self, params): if self._scale == "fixed": loc = params scale = None # set later else: n_channels = params.get_shape().as_list()[-1] assert n_channels % 2 == 0 assert n_channels // 2 == self.output_shape[-1] loc = params[..., :n_channels // 2] scale = params[..., n_channels // 2:] # apply activation functions if self._scale != "fixed": scale = self._scale_act(scale) loc = self._loc_act(loc) # apply the correct parametrization if self._scale == "var": scale = tf.sqrt(scale) elif self._scale == "prec": scale = tf.reciprocal(scale) elif self._scale == "fixed": if self._scale_val == -1.0: scale_val = tf.get_variable( "scale", initializer=tf.constant(0.1, dtype=tf.float32)) else: scale_val = self._scale_val scale = tf.ones_like(loc) * scale_val # else 'stddev' dist = self._dist(loc=loc, scale=scale, **self._dist_kwargs) return tfd.Independent(dist, reinterpreted_batch_ndims=1) class MaskedMixture(DistributionModule): def __init__( self, num_components, component_dist, mask_activation=None, name="masked_mixture", ): """ Spatial Mixture Model composed of a categorical masking distribution and a custom pixel-wise component distribution (usually logistic or gaussian). Args: num_components: int Number of mixture components >= 2 component_dist: the distribution to use for the individual components mask_activation: str or function or None activation function that should be applied to the mask before the softmax. name: str """ super().__init__(name=name) self._num_components = num_components self._dist = component_dist self._mask_activation = get_act_func(mask_activation) def set_output_shape(self, shape): super().set_output_shape(shape) self._dist.set_output_shape(shape) def _build(self, pixel, mask): sg = shapeguard.ShapeGuard() # MASKING sg.guard(mask, "B, K, H, W, 1") mask = tf.transpose(mask, perm=[0, 2, 3, 4, 1]) mask = sg.reshape(mask, "B, H, W, K") mask = self._mask_activation(mask) mask = mask[:, tf.newaxis] # add K=1 axis since K is removed by mixture mix_dist = tfd.Categorical(logits=mask) # COMPONENTS sg.guard(pixel, "B, K, H, W, Cp") params = tf.transpose(pixel, perm=[0, 2, 3, 1, 4]) params = params[:, tf.newaxis] # add K=1 axis since K is removed by mixture dist = self._dist(params) return tfd.MixtureSameFamily( mixture_distribution=mix_dist, components_distribution=dist) @property def input_shapes(self): pixel = [self._num_components] + self._dist.input_shapes.params mask = pixel[:-1] + [1] return MixtureParameters(pixel, mask) def get_default_prior(self, batch_dim=(1,)): pixel = tf.zeros( list(batch_dim) + self.input_shapes.pixel, dtype=tf.float32) mask = tf.zeros(list(batch_dim) + self.input_shapes.mask, dtype=tf.float32) return self(pixel, mask)
deepmind-research-master
iodine/modules/distributions.py
# Lint as: python3 # Copyright 2019 Deepmind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Network modules.""" # pylint: disable=g-multiple-import, g-doc-args, g-short-docstring-punctuation # pylint: disable=g-no-space-after-docstring-summary from iodine.modules.distributions import FlatParameters from iodine.modules.utils import flatten_all_but_last, get_act_func import numpy as np import shapeguard import sonnet as snt import tensorflow.compat.v1 as tf class CNN(snt.AbstractModule): """ConvNet2D followed by an MLP. This is a typical encoder architecture for VAEs, and has been found to work well. One small improvement is to append coordinate channels on the input, though for most datasets the improvement obtained is negligible. """ def __init__(self, cnn_opt, mlp_opt, mode="flatten", name="cnn"): """Constructor. Args: cnn_opt: Dictionary. Kwargs for the cnn. See vae_lib.ConvNet2D for details. mlp_opt: Dictionary. Kwargs for the mlp. See vae_lib.MLP for details. name: String. Optional name. """ super().__init__(name=name) if "activation" in cnn_opt: cnn_opt["activation"] = get_act_func(cnn_opt["activation"]) self._cnn_opt = cnn_opt if "activation" in mlp_opt: mlp_opt["activation"] = get_act_func(mlp_opt["activation"]) self._mlp_opt = mlp_opt self._mode = mode def set_output_shapes(self, shape): # assert self._mlp_opt['output_sizes'][-1] is None, self._mlp_opt sg = shapeguard.ShapeGuard() sg.guard(shape, "1, Y") self._mlp_opt["output_sizes"][-1] = sg.Y def _build(self, image): """Connect model to TensorFlow graph.""" assert self._mlp_opt["output_sizes"][-1] is not None, "set output_shapes" sg = shapeguard.ShapeGuard() flat_image, unflatten = flatten_all_but_last(image, n_dims=3) sg.guard(flat_image, "B, H, W, C") cnn = snt.nets.ConvNet2D( activate_final=True, paddings=("SAME",), normalize_final=False, **self._cnn_opt) mlp = snt.nets.MLP(**self._mlp_opt) # run CNN net = cnn(flat_image) if self._mode == "flatten": # flatten net_shape = net.get_shape().as_list() flat_shape = net_shape[:-3] + [np.prod(net_shape[-3:])] net = tf.reshape(net, flat_shape) elif self._mode == "avg_pool": net = tf.reduce_mean(net, axis=[1, 2]) else: raise KeyError('Unknown mode "{}"'.format(self._mode)) # run MLP output = sg.guard(mlp(net), "B, Y") return FlatParameters(unflatten(output)) class MLP(snt.AbstractModule): """MLP.""" def __init__(self, name="mlp", **mlp_opt): super().__init__(name=name) if "activation" in mlp_opt: mlp_opt["activation"] = get_act_func(mlp_opt["activation"]) self._mlp_opt = mlp_opt assert mlp_opt["output_sizes"][-1] is None, mlp_opt def set_output_shapes(self, shape): sg = shapeguard.ShapeGuard() sg.guard(shape, "1, Y") self._mlp_opt["output_sizes"][-1] = sg.Y def _build(self, data): """Connect model to TensorFlow graph.""" assert self._mlp_opt["output_sizes"][-1] is not None, "set output_shapes" sg = shapeguard.ShapeGuard() flat_data, unflatten = flatten_all_but_last(data) sg.guard(flat_data, "B, N") mlp = snt.nets.MLP(**self._mlp_opt) # run MLP output = sg.guard(mlp(flat_data), "B, Y") return FlatParameters(unflatten(output)) class DeConv(snt.AbstractModule): """MLP followed by Deconv net. This decoder is commonly used by vanilla VAE models. However, in practice BroadcastConv (see below) seems to disentangle slightly better. """ def __init__(self, mlp_opt, cnn_opt, name="deconv"): """Constructor. Args: mlp_opt: Dictionary. Kwargs for vae_lib.MLP. cnn_opt: Dictionary. Kwargs for vae_lib.ConvNet2D for the CNN. name: Optional name. """ super().__init__(name=name) assert cnn_opt["output_channels"][-1] is None, cnn_opt if "activation" in cnn_opt: cnn_opt["activation"] = get_act_func(cnn_opt["activation"]) self._cnn_opt = cnn_opt if mlp_opt and "activation" in mlp_opt: mlp_opt["activation"] = get_act_func(mlp_opt["activation"]) self._mlp_opt = mlp_opt self._target_out_shape = None def set_output_shapes(self, shape): self._target_out_shape = shape self._cnn_opt["output_channels"][-1] = self._target_out_shape[-1] def _build(self, z): """Connect model to TensorFlow graph.""" sg = shapeguard.ShapeGuard() flat_z, unflatten = flatten_all_but_last(z) sg.guard(flat_z, "B, Z") sg.guard(self._target_out_shape, "H, W, C") mlp = snt.nets.MLP(**self._mlp_opt) cnn = snt.nets.ConvNet2DTranspose( paddings=("SAME",), normalize_final=False, **self._cnn_opt) net = mlp(flat_z) output = sg.guard(cnn(net), "B, H, W, C") return FlatParameters(unflatten(output)) class BroadcastConv(snt.AbstractModule): """MLP followed by a broadcast convolution. This decoder takes a latent vector z, (optionally) applies an MLP to it, then tiles the resulting vector across space to have dimension [B, H, W, C] i.e. tiles across H and W. Then coordinate channels are appended and a convolutional layer is applied. """ def __init__( self, cnn_opt, mlp_opt=None, coord_type="linear", coord_freqs=3, name="broadcast_conv", ): """Args: cnn_opt: dict Kwargs for vae_lib.ConvNet2D for the CNN. mlp_opt: None or dict If dictionary, then kwargs for snt.nets.MLP. If None, then the model will not process the latent vector by an mlp. coord_type: ["linear", "cos", None] type of coordinate channels to add. None: add no coordinate channels. linear: two channels with values linearly spaced from -1. to 1. in the H and W dimension respectively. cos: coord_freqs^2 many channels containing cosine basis functions. coord_freqs: int number of frequencies used to construct the cosine basis functions (only for coord_type=="cos") name: Optional name. """ super().__init__(name=name) assert cnn_opt["output_channels"][-1] is None, cnn_opt if "activation" in cnn_opt: cnn_opt["activation"] = get_act_func(cnn_opt["activation"]) self._cnn_opt = cnn_opt if mlp_opt and "activation" in mlp_opt: mlp_opt["activation"] = get_act_func(mlp_opt["activation"]) self._mlp_opt = mlp_opt self._target_out_shape = None self._coord_type = coord_type self._coord_freqs = coord_freqs def set_output_shapes(self, shape): self._target_out_shape = shape self._cnn_opt["output_channels"][-1] = self._target_out_shape[-1] def _build(self, z): """Connect model to TensorFlow graph.""" assert self._target_out_shape is not None, "Call set_output_shape" # reshape components into batch dimension before processing them sg = shapeguard.ShapeGuard() flat_z, unflatten = flatten_all_but_last(z) sg.guard(flat_z, "B, Z") sg.guard(self._target_out_shape, "H, W, C") if self._mlp_opt is None: mlp = tf.identity else: mlp = snt.nets.MLP(activate_final=True, **self._mlp_opt) mlp_output = sg.guard(mlp(flat_z), "B, hidden") # tile MLP output spatially and append coordinate channels broadcast_mlp_output = tf.tile( mlp_output[:, tf.newaxis, tf.newaxis], multiples=tf.constant(sg["1, H, W, 1"]), ) # B, H, W, Z dec_cnn_inputs = self.append_coordinate_channels(broadcast_mlp_output) cnn = snt.nets.ConvNet2D( paddings=("SAME",), normalize_final=False, **self._cnn_opt) cnn_outputs = cnn(dec_cnn_inputs) sg.guard(cnn_outputs, "B, H, W, C") return FlatParameters(unflatten(cnn_outputs)) def append_coordinate_channels(self, output): sg = shapeguard.ShapeGuard() sg.guard(output, "B, H, W, C") if self._coord_type is None: return output if self._coord_type == "linear": w_coords = tf.linspace(-1.0, 1.0, sg.W)[None, None, :, None] h_coords = tf.linspace(-1.0, 1.0, sg.H)[None, :, None, None] w_coords = tf.tile(w_coords, sg["B, H, 1, 1"]) h_coords = tf.tile(h_coords, sg["B, 1, W, 1"]) return tf.concat([output, h_coords, w_coords], axis=-1) elif self._coord_type == "cos": freqs = sg.guard(tf.range(0.0, self._coord_freqs), "F") valx = tf.linspace(0.0, np.pi, sg.W)[None, None, :, None, None] valy = tf.linspace(0.0, np.pi, sg.H)[None, :, None, None, None] x_basis = tf.cos(valx * freqs[None, None, None, :, None]) y_basis = tf.cos(valy * freqs[None, None, None, None, :]) xy_basis = tf.reshape(x_basis * y_basis, sg["1, H, W, F*F"]) coords = tf.tile(xy_basis, sg["B, 1, 1, 1"])[..., 1:] return tf.concat([output, coords], axis=-1) else: raise KeyError('Unknown coord_type: "{}"'.format(self._coord_type)) class LSTM(snt.RNNCore): """Wrapper around snt.LSTM that supports multi-layers and runs K components in parallel. Expects input data of shape (B, K, H) and outputs data of shape (B, K, Y) """ def __init__(self, hidden_sizes, name="lstm"): super().__init__(name=name) self._hidden_sizes = hidden_sizes with self._enter_variable_scope(): self._lstm_layers = [snt.LSTM(hidden_size=h) for h in self._hidden_sizes] def initial_state(self, batch_size, **kwargs): return [ lstm.initial_state(batch_size, **kwargs) for lstm in self._lstm_layers ] def _build(self, data, prev_states): assert not self._hidden_sizes or self._hidden_sizes[-1] is not None assert len(prev_states) == len(self._hidden_sizes) sg = shapeguard.ShapeGuard() sg.guard(data, "B, K, H") data = sg.reshape(data, "B*K, H") out = data new_states = [] for lstm, pstate in zip(self._lstm_layers, prev_states): out, nstate = lstm(out, pstate) new_states.append(nstate) sg.guard(out, "B*K, Y") out = sg.reshape(out, "B, K, Y") return out, new_states
deepmind-research-master
iodine/modules/networks.py
# Lint as: python3 # Copyright 2019 Deepmind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utilities for IODINE.""" # pylint: disable=g-doc-bad-indent, g-doc-return-or-yield, g-doc-args # pylint: disable=missing-docstring import importlib import math from absl import logging from matplotlib.colors import hsv_to_rgb import numpy as np import shapeguard import sonnet as snt import tensorflow.compat.v1 as tf import tensorflow_probability as tfp tfd = tfp.distributions ACT_FUNCS = { "identity": tf.identity, "sigmoid": tf.nn.sigmoid, "tanh": tf.nn.tanh, "relu": tf.nn.relu, "elu": tf.nn.elu, "selu": tf.nn.selu, "softplus": tf.nn.softplus, "exp": tf.exp, "softmax": tf.nn.softmax, } def get_act_func(name_or_func): if name_or_func is None: return tf.identity if callable(name_or_func): return name_or_func elif isinstance(name_or_func, str): return ACT_FUNCS[name_or_func.lower()] else: raise KeyError( 'Unknown activation function "{}" of type {}"'.format( name_or_func, type(name_or_func) ) ) DISTS = { "normal": tfd.Normal, "log_normal": tfd.LogNormal, "laplace": tfd.Laplace, "logistic": tfd.Logistic, } def get_distribution(name_or_dist): if isinstance(name_or_dist, type(tfd.Normal)): return name_or_dist elif isinstance(name_or_dist, str): return DISTS[name_or_dist.lower()] raise KeyError( 'Unknown distribution "{}" of type {}"'.format(name_or_dist, type(name_or_dist))) def get_mask_plot_colors(nr_colors): """Get nr_colors uniformly spaced hues to plot mask values.""" hsv_colors = np.ones((nr_colors, 3), dtype=np.float32) hsv_colors[:, 0] = np.linspace(0, 1, nr_colors, endpoint=False) color_conv = hsv_to_rgb(hsv_colors) return color_conv def color_transform(masks): with tf.name_scope("color_transform"): n_components = masks.shape.as_list()[-1] colors = tf.constant(get_mask_plot_colors(n_components), name="mask_colors") return tf.tensordot(masks, colors, axes=1) def construct_diagnostic_image( images, recons, masks, components, border_width=2, nr_images=6, clip=True, mask_components=False, ): """Construct a single image containing image, recons., mask, and components. Args: images: (B, H, W, C) recons: (B, H, W, C) masks: (B, H, W, K) components: (B, H, W, K, C) border_width: int. width of the border in pixels. (default=2) nr_images: int. Number of images to include. (default=6) clip: bool. Whether to clip the final image to range [0, 1]. Returns: diag_images: (nr, H+border_width*2, (W+border_width*2) * (K+3), 3) """ with tf.name_scope("diagnostic_image"): # transform the masks into RGB images recolored_masks = color_transform(masks[:nr_images]) if images.get_shape().as_list()[-1] == 1: # deal with grayscale images images = tf.tile(images[:nr_images], [1, 1, 1, 3]) recons = tf.tile(recons[:nr_images], [1, 1, 1, 3]) components = tf.tile(components[:nr_images], [1, 1, 1, 1, 3]) if mask_components: components *= masks[:nr_images, ..., tf.newaxis] # Pad everything no_pad, pad = (0, 0), (border_width, border_width) paddings = tf.constant([no_pad, pad, pad, no_pad]) paddings_components = tf.constant([no_pad, pad, pad, no_pad, no_pad]) pad_images = tf.pad(images[:nr_images], paddings, constant_values=0.5) pad_recons = tf.pad(recons[:nr_images], paddings, constant_values=0.5) pad_masks = tf.pad(recolored_masks, paddings, constant_values=1.0) pad_components = tf.pad( components[:nr_images], paddings_components, constant_values=0.5 ) # reshape components into single wide image pad_components = tf.transpose(pad_components, [0, 1, 3, 2, 4]) pc_shape = pad_components.shape.as_list() pc_shape[2] = pc_shape[2] * pc_shape.pop(3) pad_components = tf.reshape(pad_components, pc_shape) # concatenate all parts along width diag_imgs = tf.concat( [pad_images, pad_recons, pad_masks, pad_components], axis=2 ) # concatenate all images along height diag_shape = diag_imgs.shape.as_list() final_img = tf.reshape(diag_imgs, [1, -1, diag_shape[2], diag_shape[3]]) if clip: final_img = tf.clip_by_value(final_img, 0.0, 1.0) return final_img def construct_reconstr_image(images, recons, border_width=2, nr_images=6, clip=True): """Construct a single image containing image, and recons. Args: images: (B, H, W, C) recons: (B, H, W, C) border_width: int. width of the border in pixels. (default=2) nr_images: int. Number of images to include. (default=6) clip: bool. Whether to clip the final image to range [0, 1]. Returns: rec_images: (nr, H+border_width*2, (W+border_width*2) * 2, 3) """ with tf.name_scope("diagnostic_image"): # Pad everything no_pad, pad = (0, 0), (border_width, border_width) paddings = tf.constant([no_pad, pad, pad, no_pad]) pad_images = tf.pad(images[:nr_images], paddings, constant_values=0.5) pad_recons = tf.pad(recons[:nr_images], paddings, constant_values=0.5) # concatenate all parts along width diag_imgs = tf.concat([pad_images, pad_recons], axis=2) # concatenate all images along height diag_shape = diag_imgs.shape.as_list() final_img = tf.reshape(diag_imgs, [1, -1, diag_shape[2], diag_shape[3]]) if clip: final_img = tf.clip_by_value(final_img, 0.0, 1.0) return final_img def construct_iterations_image( images, recons, masks, border_width=2, nr_seqs=2, clip=True ): """Construct a single image containing image, and recons. Args: images: (B, T, 1, H, W, C) recons: (B, T, 1, H, W, C) masks: (B, T, K, H, W, 1) border_width: int. width of the border in pixels. (default=2) nr_seqs: int. Number of sequences to include. (default=2) clip: bool. Whether to clip the final image to range [0, 1]. Returns: rec_images: (nr, H+border_width*2, (W+border_width*2) * 2, 3) """ sg = shapeguard.ShapeGuard() sg.guard(recons, "B, T, 1, H, W, C") if images.get_shape().as_list()[1] == 1: images = tf.tile(images, sg["1, T, 1, 1, 1, 1"]) sg.guard(images, "B, T, 1, H, W, C") sg.guard(masks, " B, T, K, H, W, 1") if sg.C == 1: # deal with grayscale images = tf.tile(images, [1, 1, 1, 1, 1, 3]) recons = tf.tile(recons, [1, 1, 1, 1, 1, 3]) sg.S = min(nr_seqs, sg.B) with tf.name_scope("diagnostic_image"): # convert masks to rgb masks_trans = tf.transpose(masks[:nr_seqs], [0, 1, 5, 3, 4, 2]) recolored_masks = color_transform(masks_trans) # Pad everything no_pad, pad = (0, 0), (border_width, border_width) paddings = tf.constant([no_pad, no_pad, no_pad, pad, pad, no_pad]) pad_images = tf.pad(images[:nr_seqs], paddings, constant_values=0.5) pad_recons = tf.pad(recons[:nr_seqs], paddings, constant_values=0.5) pad_masks = tf.pad(recolored_masks, paddings, constant_values=0.5) # concatenate all parts along width triples = tf.concat([pad_images, pad_recons, pad_masks], axis=3) triples = sg.guard(triples[:, :, 0], "S, T, 3*Hp, Wp, 3") # concatenate iterations along width and sequences along height final = tf.reshape( tf.transpose(triples, [0, 2, 1, 3, 4]), sg["1, S*3*Hp, Wp*T, 3"] ) if clip: final = tf.clip_by_value(final, 0.0, 1.0) return final def get_overview_image(image, output_dist, mask_components=False): recons = output_dist.mean()[:, 0] image = image[:, 0] if hasattr(output_dist, "mixture_distribution") and hasattr( output_dist, "components_distribution" ): mask = output_dist.mixture_distribution.probs[:, 0] components = output_dist.components_distribution.mean()[:, 0] return construct_diagnostic_image( image, recons, mask, components, mask_components=mask_components ) else: return construct_reconstr_image(image, recons) class OnlineMeanVarEstimator(snt.AbstractModule): """Online estimator for mean and variance using Welford's algorithm.""" def __init__(self, axis=None, ddof=0.0, name="online_mean_var"): super().__init__(name=name) self._axis = axis self._ddof = ddof def _build(self, x, weights=None): if weights is None: weights = tf.ones_like(x) if weights.get_shape().as_list() != x.get_shape().as_list(): weights = tf.broadcast_to(weights, x.get_shape().as_list()) sum_weights = tf.reduce_sum(weights, axis=self._axis) shape = sum_weights.get_shape().as_list() total = tf.get_variable( "total", shape=shape, dtype=weights.dtype, initializer=tf.zeros_initializer(), trainable=False, ) mean = tf.get_variable( "mean", shape=shape, dtype=x.dtype, initializer=tf.zeros_initializer(), trainable=False, ) m2 = tf.get_variable( "M2", shape=shape, dtype=x.dtype, initializer=tf.zeros_initializer(), trainable=False, ) total_update = tf.assign_add(total, sum_weights) with tf.control_dependencies([total_update]): delta = (x - mean) * weights mean_update = tf.assign_add( mean, tf.reduce_sum(delta, axis=self._axis) / total ) with tf.control_dependencies([mean_update]): delta2 = x - mean m2_update = tf.assign_add( m2, tf.reduce_sum(delta * delta2, axis=self._axis) ) with tf.control_dependencies([m2_update]): return tf.identity(mean), m2 / (total - self._ddof), tf.identity(total) def print_shapes(name, value, indent=""): if isinstance(value, dict): print("{}{}:".format(indent, name)) for k, v in sorted(value.items(), key=lambda x: (isinstance(x[1], dict), x[0])): print_shapes(k, v, indent + " ") elif isinstance(value, list): print( "{}{}[{}]: {} @ {}".format( indent, name, len(value), value[0].shape, value[0].dtype ) ) elif isinstance(value, np.ndarray): print("{}{}: {} @ {}".format(indent, name, value.shape, value.dtype)) elif isinstance(value, tf.Tensor): print( "{}{}: {} @ {}".format( indent, name, value.get_shape().as_list(), value.dtype ) ) elif np.isscalar(value): print("{}{}: {}".format(indent, name, value)) else: print("{}{}.type: {}".format(indent, name, type(value))) def _pad_images(images, image_border_value=0.5, border_width=2): """Pad images to create gray borders. Args: images: Tensor of shape [B, H], [B, H, W], or [B, H, W, C]. image_border_value: Scalar value of greyscale borderfor images. border_width: Int. Border width in pixels. Raises: ValueError: if the image provided is not {2,3,4} dimensional. Returns: Tensor of same shape as images, except H and W being H + border_width and W + border_width. """ image_rank = len(images.get_shape()) border_paddings = (border_width, border_width) if image_rank == 2: # [B, H] paddings = [(0, 0), border_paddings] elif image_rank == 3: # [B, H, W] paddings = [(0, 0), border_paddings, border_paddings] elif image_rank == 4: # [B, H, W, C] paddings = [(0, 0), border_paddings, border_paddings, (0, 0)] else: raise ValueError("expected image to be 2D, 3D or 4D, got %d" % image_rank) paddings = tf.constant(paddings) return tf.pad(images, paddings, "CONSTANT", constant_values=image_border_value) def images_to_grid( images, grid_height=None, grid_width=4, max_grid_height=4, max_grid_width=4, image_border_value=0.5, ): """Combine images and arrange them in a grid. Args: images: Tensor of shape [B, H], [B, H, W], or [B, H, W, C]. grid_height: Height of the grid of images to output, or None. Either `grid_width` or `grid_height` must be set to an integer value. If None, `grid_height` is set to ceil(B/`grid_width`), and capped at `max_grid_height` when provided. grid_width: Width of the grid of images to output, or None. Either `grid_width` or `grid_height` must be set to an integer value. If None, `grid_width` is set to ceil(B/`grid_height`), and capped at `max_grid_width` when provided. max_grid_height: Maximum allowable height of the grid of images to output or None. Only used when `grid_height` is None. max_grid_width: Maximum allowable width of the grid of images to output, or None. Only used when `grid_width` is None. image_border_value: None or scalar value of greyscale borderfor images. If None, then no border is rendered. Raises: ValueError: if neither of grid_width or grid_height are set to a positive integer. Returns: images: Tensor of shape [height*H, width*W, C]. C will be set to 1 if the input was provided with no channels. Contains all input images in a grid. """ # If only one dimension is set, infer how big the other one should be. if grid_height is None: if not isinstance(grid_width, int) or grid_width <= 0: raise ValueError( "if `grid_height` is None, `grid_width` must be " "a positive integer" ) grid_height = int(math.ceil(images.get_shape()[0].value / grid_width)) if max_grid_height is not None: grid_height = min(max_grid_height, grid_height) if grid_width is None: if not isinstance(grid_height, int) or grid_height <= 0: raise ValueError( "if `grid_width` is None, `grid_height` must be " "a positive integer" ) grid_width = int(math.ceil(images.get_shape()[0].value / grid_height)) if max_grid_width is not None: grid_width = min(max_grid_width, grid_width) images = images[: grid_height * grid_width, ...] # Pad with extra blank frames if grid_height x grid_width is less than the # number of frames provided. pre_images_shape = images.get_shape().as_list() if pre_images_shape[0] < grid_height * grid_width: pre_images_shape[0] = grid_height * grid_width - pre_images_shape[0] if image_border_value is not None: dummy_frames = image_border_value * tf.ones( shape=pre_images_shape, dtype=images.dtype ) else: dummy_frames = tf.zeros(shape=pre_images_shape, dtype=images.dtype) images = tf.concat([images, dummy_frames], axis=0) if image_border_value: images = _pad_images(images, image_border_value=image_border_value) images_shape = images.get_shape().as_list() images = tf.reshape(images, [grid_height, grid_width] + images_shape[1:]) if len(images_shape) == 2: images = tf.expand_dims(images, -1) if len(images_shape) <= 3: images = tf.expand_dims(images, -1) image_height, image_width, channels = images.get_shape().as_list()[2:] images = tf.transpose(images, perm=[0, 2, 1, 3, 4]) images = tf.reshape( images, [grid_height * image_height, grid_width * image_width, channels] ) return images def flatten_all_but_last(tensor, n_dims=1): shape = tensor.shape.as_list() batch_dims = shape[:-n_dims] flat_tensor = tf.reshape(tensor, [np.prod(batch_dims)] + shape[-n_dims:]) def unflatten(other_tensor): other_shape = other_tensor.shape.as_list() return tf.reshape(other_tensor, batch_dims + other_shape[1:]) return flat_tensor, unflatten def ensure_3d(tensor): if tensor.shape.ndims == 2: return tensor[..., None] assert tensor.shape.ndims == 3 return tensor built_element_cache = { "none": None, "global_step": tf.train.get_or_create_global_step(), } def build(plan, identifier): logging.debug("building %s", identifier) if identifier in built_element_cache: logging.debug("%s is already built, returning", identifier) return built_element_cache[identifier] elif not isinstance(plan, dict): return plan elif "constructor" in plan: ctor = _resolve_constructor(plan) kwargs = { k: build(v, identifier=k) for k, v in plan.items() if k != "constructor" } with tf.variable_scope(identifier): built_element_cache[identifier] = ctor(**kwargs) return built_element_cache[identifier] else: return {k: build(v, identifier=k) for k, v in plan.items()} def _resolve_constructor(plan_subsection): assert "constructor" in plan_subsection, plan_subsection if isinstance(plan_subsection["constructor"], str): module, _, ctor = plan_subsection["constructor"].rpartition(".") mod = importlib.import_module(module) return getattr(mod, ctor) else: return plan_subsection["constructor"]
deepmind-research-master
iodine/modules/utils.py
# Lint as: python3 # Copyright 2019 Deepmind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Stochastic Variational inference Auto-Encoder.""" # pylint: disable=unused-variable, g-bad-todo import collections from iodine.modules import utils from multi_object_datasets.segmentation_metrics import adjusted_rand_index import numpy as np import shapeguard import sonnet as snt import tensorflow.compat.v1 as tf import tensorflow_probability as tfp tfd = tfp.distributions logging = tf.logging DEFAULT_INPUTS = ( "image", "zp", "mask", "components", "dmask", "dzp", "dcomponents", "posterior", "log_prob", "pred_mask", "capacity", "flat_capacity", "coordinates", "counterfactual", ) DEFAULT_PREPROCESSING = [ "dcomponents", "dmask", "dzp", "log_prob", "counterfactual" ] DEFAULT_STOP_GRADIENT = ("dzp", "dmask", "dcomponents", "log_prob", "counterfactual") class IODINE(snt.AbstractModule): """Iterative Amortized Variational Autoencoder. Args: decoder (decoders.ComponentDecoder): The decoder. refinement_core (refinement.RefinementCore): The recurrent (refinement) encoder. latent_dist (distributions.Distribution): The distribution of the latent z variables. output_dist (distributions.MaskedMixture): The pixel-wise output distribution (a spatial mixture). n_z (int): Dimensionality of the per-object latents z_k. num_components (int): Number of available object slots (K). num_iters (int): Number of refinement iterations. sequential (bool): Whether the input data is sequential. factor_evaluator (factor_eval.FactorRegressor): The factor evaluation model that is trained to predict the true factors from the inferred latents. stop_gradients (List[str]): For which refinement inputs to stop gradients from backpropagating through the iterations. (see inputs for valid values) Default is: ["dcomponents", "dmask", "dzp", "log_prob", "counterfactual"] iter_loss_weight ("linspace" | float | List[float]): How to weigh the loss terms for each timestep. Can be: "linspace": Linearly increasing weights from 0 to 1.0. float: A fixed value for all steps. List[float]: Manually specify all weight. inputs (List[str]): list of inputs to use for the refinement network. Can include the following (default is to use all): ["image", "zp", "mask", "components", "dmask", "dzp", "dcomponents", "posterior", "log_prob", "pred_mask", "capacity", "flat_capacity", "coordinates", "counterfactual"] preprocess (List[str]): Specifies the subset of inputs that be preprocessed with layernorm. Default is: ["dcomponents", "dmask", "dzp", "log_prob", "counterfactual"] coord_type (str): Type of coordinate channels to append to the refinement inputs. Can be "linear" (default) or "cos". coord_freqs (int): If using cos based coordinate channels, then this specifies the number of frequencies used. name (str): Name of the module (within the tensorflow graph). """ def __init__( self, decoder, refinement_core, latent_dist, output_dist, n_z, num_components, num_iters, sequential=False, factor_evaluator=None, stop_gradients=DEFAULT_STOP_GRADIENT, iter_loss_weight="linspace", inputs=DEFAULT_INPUTS, preprocess=None, coord_type="linear", coord_freqs=3, name="iodine", ): super().__init__(name=name) self._sg = shapeguard.ShapeGuard(dims={"K": num_components}) self.decoder = decoder self.refinement_core = refinement_core self.latent_dist = latent_dist self.output_dist = output_dist self.n_z = n_z self.num_components = num_components self.num_iters = num_iters self.sequential = sequential self.iter_loss_weights = self._parse_iter_loss_weights(iter_loss_weight) self.factor_evaluator = factor_evaluator self.stop_gradients = stop_gradients self.inputs = inputs self.preprocess = DEFAULT_PREPROCESSING if preprocess is None else preprocess self.coord_type = coord_type self.coord_freqs = coord_freqs with self._enter_variable_scope(): self.latent_dist.set_output_shape([self.n_z]) logging.info("VAE: z shape: %s", [self.n_z]) with tf.name_scope("prior"): self.prior = self.latent_dist.get_default_prior((self.num_components,)) self._sg.guard(self.prior, "K, Z") with tf.variable_scope("preprocess"): self._layernorms = { name: snt.LayerNorm(name="layer_norm_" + name) for name in self.preprocess } def _build(self, data): data["image"] = data["image"][:, :self.num_iters + 1] if "mask" in data: data["mask"] = data["mask"][:, :self.num_iters + 1] x = self._sg.guard(data["image"], "B, T, H, W, C") self._propagate_shape_info(x.get_shape().as_list()) # run iterative encoder iterations = self.encode(x) z_dist = self._sg.guard(iterations["z_dist"][-1], "B, K, Z") z = self._sg.guard(iterations["z"][-1], "B, K, Z") # decode x_params, x_dist = self.decode(z) iterations["x_dist"].append(self._sg.guard(x_dist, "B, 1, H, W, C")) # compute loss kl = self._sg.guard(self._raw_kl(z_dist), "B, K") img = self._get_image_for_iter(x, self.num_iters) re = self._sg.guard(self._reconstruction_error(x_dist, img), "B") iterations["kl"].append(kl) iterations["re"].append(re) iterations["recons_loss"] = [tf.reduce_mean(re) for re in iterations["re"]] total_rec_loss = sum([ w * re for w, re in zip(self.iter_loss_weights, iterations["recons_loss"]) ]) total_kl_loss = sum([ w * tf.reduce_mean(tf.reduce_sum(kl, axis=1)) for w, kl in zip(self.iter_loss_weights, iterations["kl"]) ]) total_loss = total_rec_loss + total_kl_loss scalars = { "loss/kl": sum([ tf.reduce_mean(tf.reduce_sum(kl, axis=1)) for kl in iterations["kl"] ]), "loss/recons": total_rec_loss, } if self.factor_evaluator: pred_mask = self._sg.guard(x_dist.mixture_distribution.probs, "B, 1, H, W, K") pred_mask = tf.transpose(pred_mask, [0, 4, 2, 3, 1]) mask_true = self._sg.guard(data["mask"], "B, T, L, H, W, 1") mask_true = self._get_image_for_iter(mask_true, self.num_iters) mask_true = mask_true[:, 0] factor_loss, factor_scalars, _, _, _ = self.factor_evaluator( tf.stop_gradient(z), data["factors"], data["visibility"], tf.stop_gradient(pred_mask), mask_true, ) total_loss += factor_loss scalars["factor/loss"] = factor_loss scalars.update({"factor/" + k: v for k, v in factor_scalars.items()}) scalars["loss/total"] = total_loss scalars.update(self._get_monitored_scalars(x_dist, data)) logging.info(self._sg.dims) return total_loss, scalars, iterations @snt.reuse_variables def encode(self, images): sg = self._sg sg.guard(images, "B, T, H, W, C") zp, z_dist, z = self._get_initial_z() iterations = { "z": [z], "zp": [zp], "z_dist": [z_dist], "x_dist": [], "inputs": [], "kl": [], "re": [], } state = self.refinement_core.initial_state(sg["B*K"][0]) for t in range(self.num_iters): img = sg.guard(self._get_image_for_iter(images, t), "B, 1, H, W, C") x_params, x_dist = self.decode(z) # compute loss kl = self._sg.guard(self._raw_kl(z_dist), "B, K") re = self._sg.guard(self._reconstruction_error(x_dist, img), "B") loss = tf.reduce_mean(re) + tf.reduce_mean(tf.reduce_sum(kl, axis=1)) inputs = self._get_inputs_for(x_params, x_dist, img, z_dist, zp, loss) zp, state = self.refinement_core(inputs, state) sg.guard(zp, "B, K, Zp") z_dist = sg.guard(self.latent_dist(zp), "B, K, Z") z = z_dist.sample() # append local variables to iteration collections for v, name in zip( [z, zp, z_dist, x_dist, inputs, kl, re], ["z", "zp", "z_dist", "x_dist", "inputs", "kl", "re"], ): iterations[name].append(v) return iterations @snt.reuse_variables def decode(self, z): sg = shapeguard.ShapeGuard() sg.guard(z, "B, K, Z") # legacy z = tf.concat([z, 5.0 * tf.ones(sg["B, K, 1"], dtype=tf.float32)], axis=2) params = self.decoder(z) out_dist = self.output_dist(*params) return params, out_dist @snt.reuse_variables def eval(self, data): total_loss, scalars, iterations = self._build(data) sg = shapeguard.ShapeGuard() def get_components(dist): return tf.transpose(dist.components_distribution.mean()[:, 0, :, :, :, :], [0, 3, 1, 2, 4]) def get_mask(dist): return tf.transpose(dist.mixture_distribution.probs[:, :, :, :, :], [0, 4, 2, 3, 1]) def get_mask_logits(dist): return tf.transpose(dist.mixture_distribution.logits[:, :, :, :, :], [0, 4, 2, 3, 1]) def stack_iters(list_of_variables, pad_zero=False): if pad_zero: list_of_variables.insert(0, tf.zeros_like(list_of_variables[0])) return tf.stack(list_of_variables, axis=1) # data image = sg.guard(data["image"], "B, 1, H, W, C") true_mask = sg.guard(data["mask"], "B, 1, L, H, W, 1") visibility = sg.guard(data["visibility"], "B, L") factors = data["factors"] # inputs inputs_flat = { k: stack_iters([inp["flat"][k] for inp in iterations["inputs"]], pad_zero=True) for k in iterations["inputs"][0]["flat"].keys() } inputs_spatial = { k: stack_iters([inp["spatial"][k] for inp in iterations["inputs"]], pad_zero=True) for k in iterations["inputs"][0]["spatial"].keys() } # latent z = sg.guard(stack_iters(iterations["z"]), "B, T, K, Z") z_mean = stack_iters([zd.mean() for zd in iterations["z_dist"]]) z_std = stack_iters([zd.stddev() for zd in iterations["z_dist"]]) # outputs recons = stack_iters([xd.mean() for xd in iterations["x_dist"]]) pred_mask = stack_iters([get_mask(xd) for xd in iterations["x_dist"]]) pred_mask_logits = stack_iters( [get_mask_logits(xd) for xd in iterations["x_dist"]]) components = stack_iters( [get_components(xd) for xd in iterations["x_dist"]]) # metrics tm = tf.transpose(true_mask[..., 0], [0, 1, 3, 4, 2]) tm = tf.reshape(tf.tile(tm, sg["1, T, 1, 1, 1"]), sg["B * T, H * W, L"]) pm = tf.transpose(pred_mask[..., 0], [0, 1, 3, 4, 2]) pm = tf.reshape(pm, sg["B * T, H * W, K"]) ari = tf.reshape(adjusted_rand_index(tm, pm), sg["B, T"]) ari_nobg = tf.reshape(adjusted_rand_index(tm[..., 1:], pm), sg["B, T"]) mse = tf.reduce_mean(tf.square(recons - image[:, None]), axis=[2, 3, 4, 5]) # losses loss_recons = stack_iters(iterations["re"]) kl = stack_iters(iterations["kl"]) info = { "data": { "image": sg.guard(image, "B, 1, H, W, C"), "true_mask": sg.guard(true_mask, "B, 1, L, H, W, 1"), "visibility": sg.guard(visibility, "B, L"), "factors": factors, }, "inputs": { "flat": inputs_flat, "spatial": inputs_spatial }, "latent": { "z": sg.guard(z, "B, T, K, Z"), "z_mean": sg.guard(z_mean, "B, T, K, Z"), "z_std": sg.guard(z_std, "B, T, K, Z"), }, "outputs": { "recons": sg.guard(recons, "B, T, 1, H, W, C"), "pred_mask": sg.guard(pred_mask, "B, T, K, H, W, 1"), "pred_mask_logits": sg.guard(pred_mask_logits, "B, T, K, H, W, 1"), "components": sg.guard(components, "B, T, K, H, W, C"), }, "losses": { "total": total_loss, "recons": sg.guard(loss_recons, "B, T"), "kl": sg.guard(kl, "B, T, K"), }, "metrics": { "ari": ari, "ari_nobg": ari_nobg, "mse": mse }, } if self.factor_evaluator: # factor evaluation information factor_info = { "loss": [], "metrics": collections.defaultdict(list), "predictions": collections.defaultdict(list), "assignment": [], } for t in range(z.get_shape().as_list()[1]): floss, fscalars, _, fpred, fass = self.factor_evaluator( z[:, t], factors, visibility, pred_mask[:, t], true_mask[:, 0]) factor_info["loss"].append(floss) factor_info["assignment"].append(fass) for k in fpred: factor_info["predictions"][k].append( tf.reduce_sum(fpred[k] * fass[..., None], axis=2)) factor_info["metrics"][k].append(fscalars[k]) info["losses"]["factor"] = sg.guard(tf.stack(factor_info["loss"]), "T") info["factor_regressor"] = { "assignment": sg.guard(stack_iters(factor_info["assignment"]), "B, T, L, K"), "metrics": { k: tf.stack(factor_info["metrics"][k], axis=0) for k in factor_info["metrics"] }, "predictions": { k: stack_iters(factor_info["predictions"][k]) for k in factor_info["predictions"] }, } return info @snt.reuse_variables def get_sample_images(self, nr_samples=16): with tf.name_scope("prior_samples"): prior_z = self.prior.sample(nr_samples) _, prior_out = self.decode(prior_z) prior_out = tf.clip_by_value(prior_out.mean(), 0.0, 1.0) return utils.images_to_grid(prior_out[:, 0])[tf.newaxis] @snt.reuse_variables def get_overview_images(self, data, nr_images=4, mask_components=False): x = data["image"][:nr_images, :self.num_iters + 1] old_b, self._sg.B = self._sg.B, x.get_shape().as_list()[0] iterations = self.encode(x) z = iterations["z"][-1] _, x_dist = self.decode(z) self._sg.B = old_b t = min(self.num_iters, x.get_shape().as_list()[1]) - 1 # iterations view recons = tf.stack([x_dist.mean() for x_dist in iterations["x_dist"]], axis=1) masks = tf.stack( [ tf.transpose(x_dist.mixture_distribution.probs, [0, 4, 2, 3, 1]) for x_dist in iterations["x_dist"] ], axis=1, ) return { "overview": utils.get_overview_image( x[:, t:t + 1], x_dist, mask_components=mask_components), "sequence": utils.construct_iterations_image(x[:, :t + 1, tf.newaxis], recons, masks), "samples": self.get_sample_images(), } def _get_initial_z(self): # Initial z distribution zp_init = tf.get_variable( "initial_sample_distribution", shape=self.latent_dist.input_shapes.params, dtype=tf.float32, ) zp = tf.tile(zp_init[tf.newaxis, tf.newaxis], self._sg["B, K, 1"]) z_dist = self.latent_dist(zp) z = z_dist.sample() self._sg.guard(zp, "B, K, Zp") self._sg.guard(z_dist, "B, K, Z") self._sg.guard(z, "B, K, Z") return zp, z_dist, z def _parse_iter_loss_weights(self, iter_loss_weight): if iter_loss_weight == "linspace": iter_weights = np.linspace(0.0, 1.0, self.num_iters + 1).tolist() elif isinstance(iter_loss_weight, (float, int)): iter_weights = [float(iter_loss_weight)] * (self.num_iters + 1) elif isinstance(iter_loss_weight, (tuple, list)): iter_weights = [float(w) for w in iter_loss_weight] else: raise ValueError("Unknown iter_loss_weight type {}.".format( repr(iter_loss_weight))) assert len(iter_weights) == (self.num_iters + 1), iter_loss_weight return iter_weights def _propagate_shape_info(self, image_shape): image_shape = image_shape[-3:] # ignore batch dims logging.info("VAE: image shape: %s", image_shape) z_param_shape = self._sg.guard(self.latent_dist.input_shapes.params, "Zp") logging.info("VAE: z parameter shape: %s", z_param_shape) self.output_dist.set_output_shape(image_shape) out_param_shapes = self.output_dist.input_shapes logging.info("VAE: output parameter shapes: %s", out_param_shapes) self.decoder.set_output_shapes(*out_param_shapes) def _get_image_for_iter(self, images, t): """Return current frame or first image.""" if self.sequential: return images[:, t:t + 1] else: return images[:, :1] @staticmethod def _get_mask_posterior(out_dist, img): p_comp = out_dist.components_distribution.prob(img[..., tf.newaxis, :]) posterior = p_comp / (tf.reduce_sum(p_comp, axis=-1, keepdims=True) + 1e-6) return tf.transpose(posterior, [0, 4, 2, 3, 1]) def _get_inputs_for(self, out_params, out_dist, img, z_dist, zp, loss): sg = self._sg # gradients of loss wrt z, components and mask dzp, dxp, dmp = tf.gradients(loss, [zp, out_params.pixel, out_params.mask]) log_prob = sg.guard( out_dist.log_prob(img)[..., tf.newaxis], "B, 1, H, W, 1") counterfactual_log_probs = [] for k in range(0, self.num_components): mask = tf.concat([out_params.mask[:, :k], out_params.mask[:, k + 1:]], axis=1) pixel = tf.concat([out_params.pixel[:, :k], out_params.pixel[:, k + 1:]], axis=1) out_dist_k = self.output_dist(pixel, mask) log_prob_k = out_dist_k.log_prob(img)[..., tf.newaxis] counterfactual_log_probs.append(log_prob_k) counterfactual = log_prob - tf.concat(counterfactual_log_probs, axis=1) pred_mask = tf.transpose(out_dist.mixture_distribution.probs, [0, 4, 2, 3, 1]) potential_inputs = { # spatial "image": sg.guard(img, "B, 1, H, W, C"), "log_prob": sg.guard(log_prob, "B, 1, H, W, 1"), "mask": sg.guard(out_params.mask, "B, K, H, W, 1"), "pred_mask": sg.guard(pred_mask, "B, K, H, W, 1"), "components": sg.guard(out_params.pixel, "B, K, H, W, Cp"), "dmask": sg.guard(dmp, "B, K, H, W, Mp"), "dcomponents": sg.guard(dxp, "B, K, H, W, Cp"), "posterior": sg.guard(self._get_mask_posterior(out_dist, img), "B, K, H, W, 1"), "capacity": 0.5 * tf.ones(sg["B, K, H, W, 1"], dtype=tf.float32), # TODO: legacy "coordinates": self._get_coord_channels(), "counterfactual": self._sg.guard(counterfactual, "B, K, H, W, 1"), # flat "zp": sg.guard(zp, "B, K, Zp"), "dzp": sg.guard(dzp, "B, K, Zp"), "flat_capacity": 0.5 * tf.ones(sg["B, K, 1"], dtype=tf.float32), # TODO: legacy } # collect used inputs, stop gradients and preprocess where needed final_inputs = {"spatial": {}, "flat": {}} for k, v in potential_inputs.items(): # skip unused inputs if k not in self.inputs: continue # stop gradients if k in self.stop_gradients: v = tf.stop_gradient(v) # preprocess v = self._apply_preprocessing(k, v) # sort into flat / spatial according to their shape structure = "flat" if len(v.get_shape().as_list()) == 3 else "spatial" final_inputs[structure][k] = v return final_inputs def _apply_preprocessing(self, name, val): if name in self.preprocess: if self._sg.matches(val, "B, K, _z"): flat_val = tf.reshape(val, self._sg["B*K"] + [-1]) elif self._sg.matches(val, "B, 1, _z"): flat_val = val[:, 0, :] elif self._sg.matches(val, "B, K, H, W, _c"): flat_val = tf.reshape(val, self._sg["B*K, H*W"] + [-1]) elif self._sg.matches(val, "B, 1, H, W, _c"): flat_val = tf.reshape(val, self._sg["B, H*W"] + [-1]) else: raise ValueError("Cannot handle shape {}".format( val.get_shape().as_list())) ln = self._layernorms[name] norm_val = ln(flat_val) return tf.reshape(norm_val, val.shape.as_list()) else: return val def _get_coord_channels(self): if self.coord_type == "linear": x_coords = tf.linspace(-1.0, 1.0, self._sg.W)[None, None, None, :, None] y_coords = tf.linspace(-1.0, 1.0, self._sg.H)[None, None, :, None, None] x_coords = tf.tile(x_coords, self._sg["B, 1, H, 1, 1"]) y_coords = tf.tile(y_coords, self._sg["B, 1, 1, W, 1"]) return tf.concat([x_coords, y_coords], axis=-1) elif self.coord_type == "cos": freqs = self._sg.guard(tf.range(0.0, self.coord_freqs), "F") valx = tf.linspace(0.0, np.pi, self._sg.W)[None, None, None, :, None, None] valy = tf.linspace(0.0, np.pi, self._sg.H)[None, None, :, None, None, None] x_basis = tf.cos(valx * freqs[None, None, None, None, :, None]) y_basis = tf.cos(valy * freqs[None, None, None, None, None, :]) xy_basis = tf.reshape(x_basis * y_basis, self._sg["1, 1, H, W, F*F"]) coords = tf.tile(xy_basis, self._sg["B, 1, 1, 1, 1"])[..., 1:] return coords else: raise KeyError('Unknown coord_type: "{}"'.format(self.coord_type)) def _raw_kl(self, z_dist): return tfd.kl_divergence(z_dist, self.prior) def _reconstruction_error(self, x_dist, img): log_prob = self._sg.guard(x_dist.log_prob(img), "B, 1, H, W") return -tf.reduce_sum(log_prob, axis=[1, 2, 3]) def _get_monitored_scalars(self, out_dist, data): self._sg.guard(out_dist, "B, 1, H, W, C") img = self._get_image_for_iter(data["image"], self.num_iters) scalars = {} with tf.name_scope("monitored_scalars"): # ######### Loss Monitoring ######### scalars["loss/mse"] = tf.losses.mean_squared_error( img, out_dist.mean()) # ########## Mask Monitoring ####### if "mask" in data: true_mask = self._sg.guard(data["mask"], "B, T, L, H, W, 1") true_mask = tf.transpose(true_mask[:, -1, ..., 0], [0, 2, 3, 1]) true_mask = self._sg.reshape(true_mask, "B, H*W, L") else: true_mask = None pred_mask = self._sg.guard(out_dist.mixture_distribution.probs, "B, 1, H, W, K") pred_mask = self._sg.reshape(pred_mask, "B, H*W, K") if pred_mask is not None and true_mask is not None: self._sg.guard(pred_mask, "B, H*W, K") self._sg.guard(true_mask, "B, H*W, L") scalars["loss/ari"] = tf.reduce_mean( adjusted_rand_index(true_mask, pred_mask)) scalars["loss/ari_nobg"] = tf.reduce_mean( adjusted_rand_index(true_mask[..., 1:], pred_mask)) return scalars
deepmind-research-master
iodine/modules/iodine.py
# Lint as: python3 # Copyright 2019 Deepmind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Data loading functionality for IODINE.""" # pylint: disable=g-multiple-import, missing-docstring, unused-import import os.path from iodine.modules.utils import flatten_all_but_last, ensure_3d from multi_object_datasets import ( clevr_with_masks, multi_dsprites, tetrominoes, objects_room, ) from shapeguard import ShapeGuard import sonnet as snt import tensorflow.compat.v1 as tf class IODINEDataset(snt.AbstractModule): num_true_objects = 1 num_channels = 3 factors = {} def __init__( self, path, batch_size, image_dim, crop_region=None, shuffle_buffer=1000, max_num_objects=None, min_num_objects=None, grayscale=False, name="dataset", **kwargs, ): super().__init__(name=name) self.path = os.path.abspath(os.path.expanduser(path)) self.batch_size = batch_size self.crop_region = crop_region self.image_dim = image_dim self.shuffle_buffer = shuffle_buffer self.max_num_objects = max_num_objects self.min_num_objects = min_num_objects self.grayscale = grayscale self.dataset = None def _build(self, subset="train"): dataset = self.dataset # filter by number of objects if self.max_num_objects is not None or self.min_num_objects is not None: dataset = self.dataset.filter(self.filter_by_num_objects) if subset == "train": # normal mode returns a shuffled dataset iterator if self.shuffle_buffer is not None: dataset = dataset.shuffle(self.shuffle_buffer) elif subset == "summary": # for generating summaries and overview images # returns a single fixed batch dataset = dataset.take(self.batch_size) # repeat and batch dataset = dataset.repeat().batch(self.batch_size, drop_remainder=True) iterator = dataset.make_one_shot_iterator() data = iterator.get_next() # preprocess the data to ensure correct format, scale images etc. data = self.preprocess(data) return data def filter_by_num_objects(self, d): if "visibility" not in d: return tf.constant(True) min_num_objects = self.max_num_objects or 0 max_num_objects = self.max_num_objects or 6 min_predicate = tf.greater_equal( tf.reduce_sum(d["visibility"]), tf.constant(min_num_objects - 1e-5, dtype=tf.float32), ) max_predicate = tf.less_equal( tf.reduce_sum(d["visibility"]), tf.constant(max_num_objects + 1e-5, dtype=tf.float32), ) return tf.logical_and(min_predicate, max_predicate) def preprocess(self, data): sg = ShapeGuard(dims={ "B": self.batch_size, "H": self.image_dim[0], "W": self.image_dim[1] }) image = sg.guard(data["image"], "B, h, w, C") mask = sg.guard(data["mask"], "B, L, h, w, 1") # to float image = tf.cast(image, tf.float32) / 255.0 mask = tf.cast(mask, tf.float32) / 255.0 # crop if self.crop_region is not None: height_slice = slice(self.crop_region[0][0], self.crop_region[0][1]) width_slice = slice(self.crop_region[1][0], self.crop_region[1][1]) image = image[:, height_slice, width_slice, :] mask = mask[:, :, height_slice, width_slice, :] flat_mask, unflatten = flatten_all_but_last(mask, n_dims=3) # rescale size = tf.constant( self.image_dim, dtype=tf.int32, shape=[2], verify_shape=True) image = tf.image.resize_images( image, size, method=tf.image.ResizeMethod.BILINEAR) mask = tf.image.resize_images( flat_mask, size, method=tf.image.ResizeMethod.NEAREST_NEIGHBOR) if self.grayscale: image = tf.reduce_mean(image, axis=-1, keepdims=True) output = { "image": sg.guard(image[:, None], "B, T, H, W, C"), "mask": sg.guard(unflatten(mask)[:, None], "B, T, L, H, W, 1"), "factors": self.preprocess_factors(data, sg), } if "visibility" in data: output["visibility"] = sg.guard(data["visibility"], "B, L") else: output["visibility"] = tf.ones(sg["B, L"], dtype=tf.float32) return output def preprocess_factors(self, data, sg): return { name: sg.guard(ensure_3d(data[name]), "B, L, *") for name in self.factors } def get_placeholders(self, batch_size=None): batch_size = batch_size or self.batch_size sg = ShapeGuard( dims={ "B": batch_size, "H": self.image_dim[0], "W": self.image_dim[1], "L": self.num_true_objects, "C": 3, "T": 1, }) return { "image": tf.placeholder(dtype=tf.float32, shape=sg["B, T, H, W, C"]), "mask": tf.placeholder(dtype=tf.float32, shape=sg["B, T, L, H, W, 1"]), "visibility": tf.placeholder(dtype=tf.float32, shape=sg["B, L"]), "factors": { name: tf.placeholder(dtype=dtype, shape=sg["B, L, {}".format(size)]) for name, (dtype, size) in self.factors }, } class CLEVR(IODINEDataset): num_true_objects = 11 num_channels = 3 factors = { "color": (tf.uint8, 1), "shape": (tf.uint8, 1), "size": (tf.uint8, 1), "position": (tf.float32, 3), "rotation": (tf.float32, 1), } def __init__( self, path, crop_region=((29, 221), (64, 256)), image_dim=(128, 128), name="clevr", **kwargs, ): super().__init__( path=path, crop_region=crop_region, image_dim=image_dim, name=name, **kwargs) self.dataset = clevr_with_masks.dataset(self.path) def preprocess_factors(self, data, sg): return { "color": sg.guard(ensure_3d(data["color"]), "B, L, 1"), "shape": sg.guard(ensure_3d(data["shape"]), "B, L, 1"), "size": sg.guard(ensure_3d(data["color"]), "B, L, 1"), "position": sg.guard(ensure_3d(data["pixel_coords"]), "B, L, 3"), "rotation": sg.guard(ensure_3d(data["rotation"]), "B, L, 1"), } class MultiDSprites(IODINEDataset): num_true_objects = 6 num_channels = 3 factors = { "color": (tf.float32, 3), "shape": (tf.uint8, 1), "scale": (tf.float32, 1), "x": (tf.float32, 1), "y": (tf.float32, 1), "orientation": (tf.float32, 1), } def __init__( self, path, # variant from ['binarized', 'colored_on_grayscale', 'colored_on_colored'] dataset_variant="colored_on_grayscale", image_dim=(64, 64), name="multi_dsprites", **kwargs, ): super().__init__(path=path, name=name, image_dim=image_dim, **kwargs) self.dataset_variant = dataset_variant self.dataset = multi_dsprites.dataset(self.path, self.dataset_variant) class Tetrominoes(IODINEDataset): num_true_objects = 6 num_channels = 3 factors = { "color": (tf.uint8, 3), "shape": (tf.uint8, 1), "position": (tf.float32, 2), } def __init__(self, path, image_dim=(35, 35), name="tetrominoes", **kwargs): super().__init__(path=path, name=name, image_dim=image_dim, **kwargs) self.dataset = tetrominoes.dataset(self.path) def preprocess_factors(self, data, sg): pos_x = ensure_3d(data["x"]) pos_y = ensure_3d(data["y"]) position = tf.concat([pos_x, pos_y], axis=2) return { "color": sg.guard(ensure_3d(data["color"]), "B, L, 3"), "shape": sg.guard(ensure_3d(data["shape"]), "B, L, 1"), "position": sg.guard(ensure_3d(position), "B, L, 2"), }
deepmind-research-master
iodine/modules/data.py
# Copyright 2020 DeepMind Technologies Limited. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Configuration parameters for MMV.""" def get_model_config(ckpt_path): """Returns the model configuration to be used with each checkpoint.""" config = { 'audio_backbone': 'resnet50', 'audio_model_kwargs': { 'bn_config': { 'create_offset': True, 'create_scale': True, 'decay_rate': 0.9, 'eps': 1.0e-5 } }, 'bn_config_proj': { 'create_offset': True, 'create_scale': True, 'decay_rate': 0.9, 'eps': 1.0e-5 }, 'config_audio_text': { 'embedding_dim': 512, 'toaud_bn_after_proj': False, 'toaud_head_mode': 'linear', 'totxt_bn_after_proj': False, 'totxt_head_mode': 'linear' }, 'config_video_audio': { 'embedding_dim': 512, 'toaud_bn_after_proj': True, 'toaud_head_mode': 'mlp@512', 'tovid_bn_after_proj': False, 'tovid_head_mode': 'linear' }, 'config_video_text': { 'embedding_dim': 256, 'totxt_bn_after_proj': True, 'totxt_head_mode': 'linear', 'tovid_bn_after_proj': False, 'tovid_head_mode': 'linear' }, 'mm_embedding_graph': 'fac_relu', 'name': 'text_audio_video', 'sentence_dim': 2048, 'use_xreplica_bn': True, 'vision_model_kwargs': { 'bn_config': { 'create_offset': True, 'create_scale': True, 'decay_rate': 0.9, 'eps': 1.0e-5 }, 'n_frames': 32, 'width_mult': 1, }, } if 's3d' in ckpt_path: config['visual_backbone'] = 's3d' if 'tsm_resnet_x1' in ckpt_path: config['visual_backbone'] = 'resnet50tsm' if 'tsm_resnet_x2' in ckpt_path: config['visual_backbone'] = 'resnet50tsm' config['vision_model_kwargs']['width_mult'] = 2 return config
deepmind-research-master
mmv/config.py
# Copyright 2020 DeepMind Technologies Limited. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """UCF101 linear evaluation.""" import functools from typing import Any, Dict, Optional from absl import app from absl import flags import haiku as hk import jax import jax.numpy as jnp import numpy as np import sklearn from sklearn import preprocessing import sklearn.linear_model import sklearn.svm import tensorflow as tf import tensorflow_datasets as tfds from mmv import config from mmv.models import mm_embeddings from mmv.utils import checkpoint from mmv.utils import ucf101_dataset flags.DEFINE_string('checkpoint_path', '~/tmp/mmv_s3d.pkl', 'The directory to load pre-trained weights from.') flags.DEFINE_string('dataset_folder', '/tmp/ucf101', 'The directory with the ucf101 dataset.') flags.DEFINE_integer('eval_batch_size', 1, 'The batch size for evaluation.') flags.DEFINE_integer('train_batch_size', 16, 'The batch size for training.') flags.DEFINE_integer('num_train_epochs', 10, 'How many epochs to collect features during training.') flags.DEFINE_integer('num_test_windows', 10, 'How many windows to average on during test.') flags.DEFINE_integer('min_resize', 224, 'Min value to resize images to during preprocessing.') flags.DEFINE_integer('crop_size', 224, 'Value to resize images to during preprocessing.') flags.DEFINE_integer('num_frames', 32, 'Number of video frames.') flags.DEFINE_integer('stride', 2, 'Stride for video frames.') flags.DEFINE_integer('ucf101_split', 1, 'Which split of ucf101 to use.') FLAGS = flags.FLAGS def get_sampling_offset(sequence: tf.Tensor, num_steps: Optional[int], is_training: bool, stride: int = 1, seed: Optional[int] = None) -> tf.Tensor: """Calculates the initial offset for a sequence where all steps will fit. Args: sequence: any tensor where the first dimension is timesteps. num_steps: The number of timesteps we will output. If None, deterministically start at the first frame. is_training: A boolean indicates whether the graph is for training or not. If False, the starting frame always the first frame. stride: distance to sample between timesteps. seed: a deterministic seed to use when sampling. Returns: The first index to begin sampling from. A best effort is made to provide a starting index such that all requested steps fit within the sequence (i.e. offset + 1 + (num_steps - 1) * stride < len(sequence)). If this is not satisfied, the starting index is chosen randomly from the full sequence. """ if num_steps is None or not is_training: return tf.constant(0) sequence_length = tf.shape(sequence)[0] max_offset = tf.cond( tf.greater(sequence_length, (num_steps - 1) * stride), lambda: sequence_length - (num_steps - 1) * stride, lambda: sequence_length) offset = tf.random.uniform( (), maxval=tf.cast(max_offset, tf.int32), dtype=tf.int32, seed=seed) return offset def sample_or_pad_sequence_indices(sequence: tf.Tensor, num_steps: Optional[int], is_training: bool, repeat_sequence: bool = True, stride: int = 1, offset: Optional[int] = None) -> tf.Tensor: """Returns indices to take for sampling or padding a sequence to fixed size. Samples num_steps from the sequence. If the sequence is shorter than num_steps, the sequence loops. If the sequence is longer than num_steps and is_training is True, then we seek to a random offset before sampling. If offset is provided, we use that deterministic offset. This method is appropriate for sampling from a tensor where you want every timestep between a start and end time. See sample_stacked_sequence_indices for more flexibility. Args: sequence: any tensor where the first dimension is timesteps. num_steps: how many steps (e.g. frames) to take. If None, all steps from start to end are considered and `is_training` has no effect. is_training: A boolean indicates whether the graph is for training or not. If False, the starting frame is deterministic. repeat_sequence: A boolean indicates whether the sequence will repeat to have enough steps for sampling. If False, a runtime error is thrown if num_steps * stride is longer than sequence length. stride: distance to sample between timesteps. offset: a deterministic offset to use regardless of the is_training value. Returns: Indices to gather from the sequence Tensor to get a fixed size sequence. """ sequence_length = tf.shape(sequence)[0] sel_idx = tf.range(sequence_length) if num_steps: if offset is None: offset = get_sampling_offset(sequence, num_steps, is_training, stride) if repeat_sequence: # Repeats sequence until num_steps are available in total. num_repeats = tf.cast( tf.math.ceil( tf.math.divide( tf.cast(num_steps * stride + offset, tf.float32), tf.cast(sequence_length, tf.float32) )), tf.int32) sel_idx = tf.tile(sel_idx, [num_repeats]) steps = tf.range(offset, offset + num_steps * stride, stride) else: steps = tf.range(0, sequence_length, stride) return tf.gather(sel_idx, steps) def random_sample_sequence(sequence: tf.Tensor, num_steps: int, stride: int = 1) -> tf.Tensor: """Randomly sample a segment of size num_steps from a given sequence.""" indices = sample_or_pad_sequence_indices( sequence=sequence, num_steps=num_steps, is_training=True, # Random sample. repeat_sequence=True, # Will repeat the sequence if request more. stride=stride, offset=None) indices.set_shape((num_steps,)) output = tf.gather(sequence, indices) return output def sample_linspace_sequence(sequence: tf.Tensor, num_windows: int, num_steps: int, stride: int = 1) -> tf.Tensor: """Samples num_windows segments from sequence with linearly spaced offsets. The samples are concatenated in a single Tensor in order to have the same format structure per timestep (e.g. a single frame). If num_steps * stride is bigger than the number of timesteps, the sequence is repeated. This function can be used in evaluation in order to extract enough segments in order to span the entire sequence. Args: sequence: Any tensor where the first dimension is timesteps. num_windows: Number of windows retrieved from the sequence. num_steps: Number of steps (e.g. frames) to take. stride: Distance to sample between timesteps. Returns: A single Tensor with first dimension num_windows * num_steps. The Tensor contains the concatenated list of num_windows tensors which offsets have been linearly spaced from input. """ sequence_length = tf.shape(sequence)[0] max_offset = tf.maximum(0, sequence_length - num_steps * stride) offsets = tf.linspace(0.0, tf.cast(max_offset, tf.float32), num_windows) offsets = tf.cast(offsets, tf.int32) all_indices = [] for i in range(num_windows): all_indices.append( sample_or_pad_sequence_indices( sequence=sequence, num_steps=num_steps, is_training=False, repeat_sequence=True, # Will repeat the sequence if request more. stride=stride, offset=offsets[i])) indices = tf.concat(all_indices, axis=0) indices.set_shape((num_windows * num_steps,)) output = tf.gather(sequence, indices) return output def resize_smallest(frames: tf.Tensor, min_resize: int) -> tf.Tensor: """Resizes frames so that min(height, width) is equal to min_resize. This function will not do anything if the min(height, width) is already equal to min_resize. This allows to save compute time. Args: frames: A Tensor of dimension [timesteps, input_h, input_w, channels]. min_resize: Minimum size of the final image dimensions. Returns: A Tensor of shape [timesteps, output_h, output_w, channels] of type frames.dtype where min(output_h, output_w) = min_resize. """ shape = tf.shape(frames) input_h = shape[1] input_w = shape[2] output_h = tf.maximum(min_resize, (input_h * min_resize) // input_w) output_w = tf.maximum(min_resize, (input_w * min_resize) // input_h) def resize_fn(): frames_resized = tf.image.resize(frames, (output_h, output_w)) return tf.cast(frames_resized, frames.dtype) should_resize = tf.math.logical_or(tf.not_equal(input_w, output_w), tf.not_equal(input_h, output_h)) frames = tf.cond(should_resize, resize_fn, lambda: frames) return frames def process_samples(features_dict, num_frames=32, stride=1, is_training=True, min_resize=224, crop_size=224, num_windows=1): """Process video frames.""" video = features_dict['video'] if is_training: assert num_windows == 1 video = random_sample_sequence(video, num_frames, stride) is_flipped = tf.random.uniform((), minval=0, maxval=2, dtype=tf.int32) video = tf.cond(tf.equal(is_flipped, 1), true_fn=lambda: tf.image.flip_left_right(video), false_fn=lambda: video) else: video = sample_linspace_sequence(video, num_windows, num_frames, stride) # Resize smallest side. video = resize_smallest(video, min_resize) if is_training: # Random crop. video = tf.image.random_crop(video, [num_frames, crop_size, crop_size, 3]) else: # Central crop. video = tf.image.resize_with_crop_or_pad(video, crop_size, crop_size) video = tf.cast(video, tf.float32) video /= 255.0 # Set between [0, 1]. features_dict['video'] = video return features_dict def space_to_depth_batch(features_dict): images = features_dict['video'] _, l, h, w, c = images.shape images = tf.reshape(images, [-1, l // 2, 2, h // 2, 2, w // 2, 2, c]) images = tf.transpose(images, [0, 1, 3, 5, 2, 4, 6, 7]) images = tf.reshape(images, [-1, l // 2, h // 2, w // 2, 8 * c]) features_dict['video'] = images return features_dict def reshape_windows(features_dict, num_frames): x = features_dict['video'] x = tf.reshape(x, (-1, num_frames, x.shape[2], x.shape[3], x.shape[4])) features_dict['video'] = x return features_dict def compute_accuracy_metrics(pred, gt, prefix=''): order_pred = np.argsort(pred, axis=1) assert len(gt.shape) == len(order_pred.shape) == 2 top1_pred = order_pred[:, -1:] top5_pred = order_pred[:, -5:] top1_acc = np.mean(top1_pred == gt) top5_acc = np.mean(np.max(top5_pred == gt, 1)) return {prefix + 'top1': top1_acc, prefix + 'top5': top5_acc} def forward_fn(images: jnp.ndarray, audio_spectrogram: jnp.ndarray, word_ids: jnp.ndarray, is_training: bool, model_config: Dict[str, Any]): """Forward pass of the model.""" # This should contain the pre-trained weights. We set it to zero because it # will be loaded from the checkpoint. language_model_vocab_size = 65536 word_embedding_dim = 300 dummy_embedding_matrix = jnp.zeros(shape=(language_model_vocab_size, word_embedding_dim)) module = mm_embeddings.AudioTextVideoEmbedding( **model_config, word_embedding_matrix=dummy_embedding_matrix) return module(images=images, audio_spectrogram=audio_spectrogram, word_ids=word_ids, is_training=is_training)['vid_repr'] def main(argv): del argv sklearn_reg = 0.001 model_config = config.get_model_config(FLAGS.checkpoint_path) forward = hk.without_apply_rng(hk.transform_with_state(forward_fn)) forward_apply = jax.jit(functools.partial(forward.apply, is_training=False, model_config=model_config)) # Get the UCF101 config. dset_config = tfds.video.ucf101.Ucf101.BUILDER_CONFIGS[FLAGS.ucf101_split] builder = ucf101_dataset.ModUcf101( data_dir=FLAGS.dataset_folder, config=dset_config) # Create the tfrecord files (no-op if already exists) dl_config = tfds.download.DownloadConfig(verify_ssl=False) builder.download_and_prepare(download_config=dl_config) # Generate the training dataset. train_ds = builder.as_dataset(split='train', shuffle_files=False) train_ds = train_ds.map(lambda x: process_samples( # pylint: disable=g-long-lambda x, num_frames=FLAGS.num_frames, stride=FLAGS.stride, is_training=True, min_resize=FLAGS.min_resize, crop_size=FLAGS.crop_size)) train_ds = train_ds.batch(batch_size=FLAGS.train_batch_size) if model_config['visual_backbone'] == 's3d': train_ds = train_ds.map(space_to_depth_batch) train_ds = train_ds.repeat(FLAGS.num_train_epochs) # Generate the test dataset. test_ds = builder.as_dataset(split='test', shuffle_files=False) test_ds = test_ds.map(lambda x: process_samples( # pylint: disable=g-long-lambda x, num_frames=FLAGS.num_frames, stride=FLAGS.stride, is_training=False, min_resize=FLAGS.min_resize, crop_size=FLAGS.crop_size, num_windows=FLAGS.num_test_windows)) test_ds = test_ds.batch(batch_size=FLAGS.eval_batch_size) test_ds = test_ds.map(lambda x: reshape_windows( # pylint: disable=g-long-lambda x, num_frames=FLAGS.num_frames)) if model_config['visual_backbone'] == 's3d': test_ds = test_ds.map(space_to_depth_batch) test_ds = test_ds.repeat(1) pretrained_weights = checkpoint.load_checkpoint(FLAGS.checkpoint_path) params = pretrained_weights['params'] state = pretrained_weights['state'] # Collect training samples. audio_frames = 96 mel_filters = 40 num_tokens = 16 dummy_audio = jnp.zeros( shape=(FLAGS.train_batch_size, audio_frames, mel_filters, 1)) dummy_word_ids = jnp.zeros( shape=(FLAGS.train_batch_size, num_tokens), dtype=jnp.int32) train_features = [] train_labels = [] print('Computing features on train') training_examples = iter(tfds.as_numpy(train_ds)) for train_ex in training_examples: vid_representation, _ = forward_apply(params=params, state=state, images=train_ex['video'], audio_spectrogram=dummy_audio, word_ids=dummy_word_ids) train_labels.append(train_ex['label']) train_features.append(vid_representation) if len(train_labels) % 50 == 0: print(f'Processed {len(train_labels)} examples.') train_labels = np.concatenate(train_labels, axis=0) train_features = np.concatenate(train_features, axis=0) print(f'Finish collecting train features of shape {train_features.shape}') # Collect test samples. dummy_audio = jnp.zeros( shape=(FLAGS.eval_batch_size, audio_frames, mel_filters, 1)) dummy_word_ids = jnp.zeros( shape=(FLAGS.eval_batch_size, num_tokens), dtype=jnp.int32) test_features = [] test_labels = [] print('Computing features on test') test_examples = iter(tfds.as_numpy(test_ds)) for test_ex in test_examples: vid_representation_test, _ = forward_apply(params=params, state=state, images=test_ex['video'], audio_spectrogram=dummy_audio, word_ids=dummy_word_ids) test_labels.append(test_ex['label']) test_features.append(vid_representation_test) if len(test_labels) % 50 == 0: print(f'Processed {len(test_labels)} examples.') test_features = np.concatenate(test_features, axis=0) test_labels = np.concatenate(test_labels, axis=0) print(f'Finish collecting test features of shape {test_features.shape}') # Train classifier print('Training linear classifier!') classifier = sklearn.svm.LinearSVC(C=sklearn_reg) scaler = preprocessing.StandardScaler().fit(train_features) train_features = scaler.transform(train_features) classifier.fit(train_features, train_labels.ravel()) print('Training done !') # Evaluation. test_features = scaler.transform(test_features) print('Running inference on train') pred_train = classifier.decision_function(train_features) print('Running inference on test') pred_test = classifier.decision_function(test_features) if FLAGS.num_test_windows > 1: pred_test = np.reshape( pred_test, (test_labels.shape[0], -1, pred_test.shape[1])) pred_test = pred_test.mean(axis=1) # Compute accuracies. metrics = compute_accuracy_metrics(pred_train, train_labels[:, None], prefix='train_') metrics.update( compute_accuracy_metrics(pred_test, test_labels[:, None], prefix='test_')) print(metrics) if __name__ == '__main__': app.run(main)
deepmind-research-master
mmv/eval_ucf101.py
# Copyright 2020 DeepMind Technologies Limited. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Checkpoint restoring utilities.""" from absl import logging import dill def load_checkpoint(checkpoint_path): try: with open(checkpoint_path, 'rb') as checkpoint_file: checkpoint_data = dill.load(checkpoint_file) logging.info('Loading checkpoint from %s', checkpoint_path) return checkpoint_data except FileNotFoundError: return None
deepmind-research-master
mmv/utils/checkpoint.py
# Copyright 2020 DeepMind Technologies Limited. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Ucf101 with custom decoding params.""" import tensorflow as tf import tensorflow_datasets as tfds # Utilities functions. tf.compat.v1.enable_eager_execution() _CITATION = """\ @article{DBLP:journals/corr/abs-1212-0402, author = {Khurram Soomro and Amir Roshan Zamir and Mubarak Shah}, title = {{UCF101:} {A} Dataset of 101 Human Actions Classes From Videos in The Wild}, journal = {CoRR}, volume = {abs/1212.0402}, year = {2012}, url = {http://arxiv.org/abs/1212.0402}, archivePrefix = {arXiv}, eprint = {1212.0402}, timestamp = {Mon, 13 Aug 2018 16:47:45 +0200}, biburl = {https://dblp.org/rec/bib/journals/corr/abs-1212-0402}, bibsource = {dblp computer science bibliography, https://dblp.org} } """ _LABELS_FNAME = 'video/ucf101_labels.txt' class ModUcf101(tfds.video.Ucf101): """Ucf101 action recognition dataset with better quality. """ def _info(self): ffmpeg_extra_args = ('-qscale:v', '2', '-r', '25', '-t', '00:00:20') video_shape = ( None, self.builder_config.height, self.builder_config.width, 3) labels_names_file = tfds.core.tfds_path(_LABELS_FNAME) features = tfds.features.FeaturesDict({ 'video': tfds.features.Video(video_shape, ffmpeg_extra_args=ffmpeg_extra_args, encoding_format='jpeg'), 'label': tfds.features.ClassLabel(names_file=labels_names_file), }) return tfds.core.DatasetInfo( builder=self, description='A 101-label video classification dataset.', features=features, homepage='https://www.crcv.ucf.edu/data-sets/ucf101/', citation=_CITATION, )
deepmind-research-master
mmv/utils/ucf101_dataset.py
# Copyright 2020 DeepMind Technologies Limited. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3. """Model for text-video-audio embeddings.""" from typing import Any, Dict, Optional import haiku as hk import jax import jax.numpy as jnp from mmv.models import normalization from mmv.models import resnet from mmv.models import s3d from mmv.models import tsm_resnet _DEFAULT_CFG_AUDTXT = { "totxt_head_mode": "linear", "toaud_head_mode": "linear", "toaud_bn_after_proj": False, "totxt_bn_after_proj": False, "embedding_dim": 512} _DEFAULT_CFG_VIDAUD = { "tovid_head_mode": "linear", "toaud_head_mode": "mlp@512", "tovid_bn_after_proj": False, "toaud_bn_after_proj": True, "embedding_dim": 512} _DEFAULT_CFG_VIDTXT = { "tovid_head_mode": "linear", "totxt_head_mode": "mlp@512", "tovid_bn_after_proj": False, "totxt_bn_after_proj": True, "embedding_dim": 512} _DEFAULT_CFG_BN = {"decay_rate": 0.9, "eps": 1e-5, "create_scale": True, "create_offset": True} def _setkey_if_not_exists(d, key, value): if key not in d: d[key] = value class AudioTextVideoEmbedding(hk.Module): """Module to fuse audio, text and video for joint embedding learning.""" def __init__( self, # Language parameters. word_embedding_matrix, sentence_dim=2048, # Audio parameters. audio_backbone="resnet18", audio_model_kwargs=None, # Vision parameters. visual_backbone="s3d", vision_model_kwargs=None, # Common parameters. mm_embedding_graph="fac_relu", use_xreplica_bn=True, bn_config_proj=None, config_video_text=None, config_video_audio=None, config_audio_text=None, use_audio_text=False, name="audio_text_video_model"): """Initialize the AudioTextVideoEmbedding class. Args: word_embedding_matrix: 2d matrix [vocab_size, embed_size] to embed words. sentence_dim: The dimension of the sentence representation. audio_backbone: Backbone for audio. audio_model_kwargs: Other specific parameters to pass to the audio module. visual_backbone: The video backbone. vision_model_kwargs: Other specific parameters to pass to the vision module. mm_embedding_graph: Embedding graph merging strategy. Can be `shared`, `disjoint` or `fac` (fac can be followed by an activation function name e.g. `fac_relu`). use_xreplica_bn: Whether or not to use the cross replica batch norm. bn_config_proj: BN config of the projection heads. config_video_text: Config for the video and the text branches. config_video_audio: Config for the video and the audio branches. config_audio_text: Config for the audio and the text branches. use_audio_text: Whether or not the audio text branch is used during training. name: graph name. """ super(AudioTextVideoEmbedding, self).__init__(name=name) # Audio parameters. self._audio_backbone = audio_backbone self._audio_model_kwargs = audio_model_kwargs # Language parameters. self._sentence_dim = sentence_dim self._word_embedding_matrix = word_embedding_matrix # Vision parameters. self._visual_backbone = visual_backbone self._vision_model_kwargs = vision_model_kwargs # Joint parameters. self._use_xreplica_bn = use_xreplica_bn if self._use_xreplica_bn: self._normalizer_name = "cross_replica_batch_norm" else: self._normalizer_name = "batch_norm" # Projection head parameters. if config_video_text is None: config_video_text = _DEFAULT_CFG_VIDTXT for k, v in _DEFAULT_CFG_VIDTXT.items(): _setkey_if_not_exists(config_video_text, k, v) self._cfg_vid_txt = config_video_text if config_video_audio is None: config_video_audio = _DEFAULT_CFG_VIDAUD for k, v in _DEFAULT_CFG_VIDAUD.items(): _setkey_if_not_exists(config_video_audio, k, v) self._cfg_vid_aud = config_video_audio if config_audio_text is None: config_audio_text = _DEFAULT_CFG_AUDTXT for k, v in _DEFAULT_CFG_AUDTXT.items(): _setkey_if_not_exists(config_audio_text, k, v) self._cfg_aud_txt = config_audio_text self._use_audio_text = use_audio_text self._mm_embedding_graph = mm_embedding_graph self._use_separate_heads = ( mm_embedding_graph == "disjoint" or mm_embedding_graph.startswith("fac")) self._bn_config_proj = bn_config_proj or _DEFAULT_CFG_BN def _get_pair_embedding_heads(self, embedding_dim_1, embedding_dim_2, mode1, mode2, use_bn_out1, use_bn_out2, name1, name2): embd1_module = EmbeddingModule( embedding_dim_1, mode=mode1, use_bn_out=use_bn_out1, bn_config=self._bn_config_proj, use_xreplica_bn=self._use_xreplica_bn, name=name1) if self._use_separate_heads: embd2_module = EmbeddingModule( embedding_dim_2, mode=mode2, use_bn_out=use_bn_out2, use_xreplica_bn=self._use_xreplica_bn, bn_config=self._bn_config_proj, name=name2) else: assert embedding_dim_1 == embedding_dim_2, ( "Using shared heads but inconsistent embedding dims where provided.") assert mode1 == mode2, ( "Using shared heads but inconsistent modes where provided.") assert use_bn_out1 == use_bn_out2, ( "Using shared heads but inconsistent bn conf where provided.") embd2_module = embd1_module return embd1_module, embd2_module def _activate_interaction(self, inputs, activation_fn, is_training, activation_module=None): """Activation function for the interaction modules.""" if activation_fn == "relu": inputs = jax.nn.relu(inputs) elif activation_fn == "bnrelu": if activation_module is None: activation_module = normalization.get_normalize_fn( normalizer_name=self._normalizer_name, normalizer_kwargs=self._bn_config_proj) inputs = activation_module(inputs, is_training=is_training) inputs = jax.nn.relu(inputs) else: raise ValueError(f"{activation_fn} not supported.") return inputs, activation_module def __call__(self, images, audio_spectrogram, word_ids, is_training, return_intermediate_audio=False): """Computes video, text and audio embeddings. Args: images: The videos tensor of shape [B1, T, H, W, 3] where B1 is the batch size, T is the number of frames per clip, H the height, W the width and 3 the rgb channels. audio_spectrogram: The audio tensor of shape [B2, T', F] where B2 is the batch size, T' is the number of temporal frames, F is the number of frequency frames. word_ids: If words_embeddings is set to None, it will use the word indices input instead so that we can compute the word embeddings within the model graph. The expected shape is [B3, N, D] where B3 is the batch size and N the maximum number of words per sentence. is_training: Whether or not to activate the graph in training mode. return_intermediate_audio: Return audio intermediate representation. Returns: if return_intermediate_audio = True audio_representation: the 4-dim audio representation taken before averaging over spatial dims in the Resnet. else visual_embd: a dict containing the video embeddings in audio and text of shape [B1, d_embd]. audio_embd: a dict containing the audio embeddings in video and text of shape [B2, d_embd]. txt_embd: a dict containing the text embeddings in video and audio of shape[B3, d_embd]. visual_representation: the video rep of shape [B1, d_visual]. audio_representation: the audio rep of shape [B2, d_audio]. """ # Computes the visual representation. video_cnn = VisualModule(backbone=self._visual_backbone, use_xreplica_bn=self._use_xreplica_bn, model_kwargs=self._vision_model_kwargs) visual_representation = video_cnn(images, is_training=is_training) # Projection heads: Video -> Text and Video -> Audio. vid2txt_embd_module, vid2aud_embd_module = self._get_pair_embedding_heads( embedding_dim_1=self._cfg_vid_txt["embedding_dim"], embedding_dim_2=self._cfg_vid_aud["embedding_dim"], mode1=self._cfg_vid_txt["totxt_head_mode"], mode2=self._cfg_vid_aud["toaud_head_mode"], use_bn_out1=self._cfg_vid_txt["totxt_bn_after_proj"], use_bn_out2=self._cfg_vid_aud["toaud_bn_after_proj"], name1="vis_embd", name2="vid2audio_embd") video_embd = {} if self._mm_embedding_graph in ["shared", "disjoint"]: video_embd["toaud"] = vid2aud_embd_module(visual_representation, is_training=is_training) video_embd["totxt"] = vid2txt_embd_module(visual_representation, is_training=is_training) elif self._mm_embedding_graph.startswith("fac"): # Activation function if specificed in the name, e.g. fac_relu. activation_fn = None if len(self._mm_embedding_graph.split("_")) == 2: activation_fn = self._mm_embedding_graph.split("_")[1] video_embd["toaud"] = vid2aud_embd_module(visual_representation, is_training=is_training) fine_rep = video_embd["toaud"] # Eventually activate the fine grained representation. if activation_fn: fine_rep, activation_module = self._activate_interaction( inputs=fine_rep, activation_fn=activation_fn, is_training=is_training) video_embd["totxt"] = vid2txt_embd_module(fine_rep, is_training=is_training) else: raise ValueError( f"{self._mm_embedding_graph} is not a valid MM embedding graph.") # Computes the audio representation. audio_cnn = AudioModule(backbone=self._audio_backbone, use_xreplica_bn=self._use_xreplica_bn, model_kwargs=self._audio_model_kwargs) if return_intermediate_audio: return audio_cnn(audio_spectrogram, is_training=is_training, return_intermediate=True) audio_representation = audio_cnn(audio_spectrogram, is_training=is_training) # Projection heads: Audio -> Video and Audio -> Text. aud2vid_embd_module, aud2txt_embd_module = self._get_pair_embedding_heads( embedding_dim_1=self._cfg_vid_aud["embedding_dim"], embedding_dim_2=self._cfg_aud_txt["embedding_dim"], mode1=self._cfg_vid_aud["tovid_head_mode"], mode2=self._cfg_aud_txt["totxt_head_mode"], use_bn_out1=self._cfg_vid_aud["tovid_bn_after_proj"], use_bn_out2=self._cfg_aud_txt["totxt_bn_after_proj"], name1="audio_embd", name2="audio2txt_embd") audio_embd = {} audio_embd["tovid"] = aud2vid_embd_module(audio_representation, is_training=is_training) # Computes the projection to the text domain depending on the MM graph mode. if (self._mm_embedding_graph.startswith("fac") and (self._use_audio_text or (not is_training))): # In case the audio text branch is not used during training, we do that # only at eval time (is_training=False) in order to not pollute the BN # stats in vid2txt_embd_module with audio features during training. fine_rep_audio = audio_embd["tovid"] if activation_fn: fine_rep_audio, _ = self._activate_interaction( inputs=fine_rep_audio, activation_fn=activation_fn, is_training=is_training, activation_module=activation_module) audio_embd["totxt"] = vid2txt_embd_module(fine_rep_audio, is_training=is_training) else: audio_embd["totxt"] = aud2txt_embd_module(audio_representation, is_training=is_training) # Computes the text representation. txt_representation = TextModule( sentence_dim=self._sentence_dim, word_embedding_matrix=self._word_embedding_matrix)( word_ids, is_training=is_training) # Projection heads: Text -> Video and Text -> Audio. txt2vid_embd_module, txt2aud_embd_module = self._get_pair_embedding_heads( embedding_dim_1=self._cfg_vid_txt["embedding_dim"], embedding_dim_2=self._cfg_aud_txt["embedding_dim"], mode1=self._cfg_vid_txt["tovid_head_mode"], mode2=self._cfg_aud_txt["toaud_head_mode"], use_bn_out1=self._cfg_vid_txt["tovid_bn_after_proj"], use_bn_out2=self._cfg_aud_txt["toaud_bn_after_proj"], name1="txt_embd", name2="txt2audio_embd") txt_embd = {} txt_embd["tovid"] = txt2vid_embd_module(txt_representation, is_training=is_training) txt_embd["toaud"] = txt2aud_embd_module(txt_representation, is_training=is_training) return { "vid_embd": video_embd, "aud_embd": audio_embd, "txt_embd": txt_embd, "vid_repr": visual_representation, "aud_repr": audio_representation, } class EmbeddingModule(hk.Module): """Final Embedding module.""" def __init__(self, embedding_dim: int, mode: str = "linear", use_bn_out: bool = False, bn_config: Optional[Dict[str, Any]] = None, use_xreplica_bn: bool = True, name="embedding_module"): self._embedding_dim = embedding_dim self._use_bn_out = use_bn_out self._mode = mode # Set default BN config. bn_config = bn_config or _DEFAULT_CFG_BN if use_xreplica_bn: normalizer_name = "cross_replica_batch_norm" else: normalizer_name = "batch_norm" self._batch_norm = normalization.get_normalize_fn( normalizer_name=normalizer_name, normalizer_kwargs=bn_config) super(EmbeddingModule, self).__init__(name=name) def __call__(self, input_feature, is_training): if self._mode == "linear": proj = hk.Linear(self._embedding_dim, name="final_projection") embedding = proj(input_feature) elif self._mode.startswith("mlp"): if "@" not in self._mode: raise ValueError( ("Please specify the inner dimensions of the MLP with `@` symbol" "e.g. mlp@512 or mlp@512@256 for a 2 layer MLP.")) inner_dims = [int(dim) for dim in self._mode.split("@")[1:]] embedding = input_feature for inner_dim in inner_dims: embedding = hk.Linear(inner_dim, with_bias=True, name="final_projection_inner")(embedding) if not self._mode.startswith("mlp_nobn"): embedding = self._batch_norm(embedding, is_training=is_training) embedding = jax.nn.relu(embedding) # Final projection. embedding = hk.Linear(self._embedding_dim, name="final_projection", with_bias=not self._use_bn_out)(embedding) else: raise NotImplementedError if self._use_bn_out: embedding = self._batch_norm(embedding, is_training=is_training) return embedding class VisualModule(hk.Module): """The visual module selects which CNN backbone to connect to the graph.""" def __init__(self, use_xreplica_bn=True, backbone="s3d", model_kwargs=None, name="visual_module"): self._backbone = backbone super(VisualModule, self).__init__(name=name) if model_kwargs is None: model_kwargs = {} bn_config = model_kwargs.get("bn_config", _DEFAULT_CFG_BN) if use_xreplica_bn: normalizer_name = "cross_replica_batch_norm" else: normalizer_name = "batch_norm" normalize_fn = normalization.get_normalize_fn( normalizer_name=normalizer_name, normalizer_kwargs=bn_config) if backbone == "s3d": self._cnn = s3d.S3D(normalize_fn=normalize_fn) elif backbone == "resnet50tsm": width_mult = model_kwargs.get("width_mult", 1) self._cnn = tsm_resnet.TSMResNetV2( normalize_fn=normalize_fn, depth=50, num_frames=model_kwargs["n_frames"], width_mult=width_mult) else: raise NotImplementedError def __call__(self, images, is_training): """Connects graph to images.""" features = self._cnn(images, is_training=is_training) return features class AudioModule(hk.Module): """The audio module selects which CNN backbone to connect to the graph.""" def __init__(self, backbone="resnet18", use_xreplica_bn=True, model_kwargs=None, name="audio_module"): super(AudioModule, self).__init__(name=name) model_kwargs = model_kwargs or {} bn_config = model_kwargs.get("bn_config", _DEFAULT_CFG_BN) backbone_to_depth = { "resnet18": 18, "resnet34": 34, "resnet50": 50, "resnet101": 101 } assert backbone in backbone_to_depth, ( f"backbone should be in {backbone_to_depth.keys()}") if use_xreplica_bn: normalizer_name = "cross_replica_batch_norm" else: normalizer_name = "batch_norm" self._cnn = resnet.ResNetV2( depth=backbone_to_depth[backbone], normalize_fn=normalization.get_normalize_fn( normalizer_name=normalizer_name, normalizer_kwargs=bn_config), num_classes=None) def __call__(self, audio_spectrogram, is_training, return_intermediate=False): """Connects graph to audio spectrogram.""" final_endpoint = "output" if return_intermediate: final_endpoint = "last_conv" return self._cnn(audio_spectrogram, is_training=is_training, final_endpoint=final_endpoint) class TextModule(hk.Module): """Text module computes the sentences representation.""" def __init__(self, word_embedding_matrix, sentence_dim=1024, name="text_module"): """Initialize text module. Args: word_embedding_matrix: 2d matrix [vocab_size, embed_size] to embed words. sentence_dim: dimension of sentence representation. name: module name. """ super(TextModule, self).__init__(name=name) self._word_embedding_module = hk.Embed( embedding_matrix=word_embedding_matrix) self._conv1d_module = hk.Conv1D(sentence_dim, 1, name="text_conv1") def __call__(self, word_ids, is_training): """Connects graph to sentence representation.""" word_embeddings = self._word_embedding_module(word_ids) word_embeddings = jax.lax.stop_gradient(word_embeddings) output = self._conv1d_module(word_embeddings) output = jax.nn.relu(output) output = jnp.amax(output, axis=1) return output
deepmind-research-master
mmv/models/mm_embeddings.py
# Copyright 2020 DeepMind Technologies Limited. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for TSM ResNet model.""" from absl.testing import absltest from absl.testing import parameterized import haiku as hk import jax import jax.numpy as jnp from mmv.models import tsm_resnet class TSMResNetTest(parameterized.TestCase): @parameterized.parameters( ('tsm_resnet_stem', (2 * 32, 56, 56, 64)), ('tsm_resnet_unit_0', (2 * 32, 56, 56, 256)), ('tsm_resnet_unit_1', (2 * 32, 28, 28, 512)), ('tsm_resnet_unit_2', (2 * 32, 14, 14, 1024)), ('tsm_resnet_unit_3', (2 * 32, 7, 7, 2048)), ('last_conv', (2 * 32, 7, 7, 2048)), ('Embeddings', (2, 2048)), ) def test_output_dimension(self, final_endpoint, expected_shape): input_shape = (2, 32, 224, 224, 3) def f(): data = jnp.zeros(input_shape) net = tsm_resnet.TSMResNetV2() return net(data, final_endpoint=final_endpoint) init_fn, apply_fn = hk.transform(f) out = apply_fn(init_fn(jax.random.PRNGKey(42)), None) self.assertEqual(out.shape, expected_shape) def test_tpu_mode(self): input_shape = (32 * 2, 224, 224, 3) def f(): data = jnp.zeros(input_shape) net = tsm_resnet.TSMResNetV2(num_frames=32) return net(data, final_endpoint='Embeddings') init_fn, apply_fn = hk.transform(f) out = apply_fn(init_fn(jax.random.PRNGKey(42)), None) self.assertEqual(out.shape, (2, 2048)) if __name__ == '__main__': absltest.main()
deepmind-research-master
mmv/models/tsm_resnet_test.py
# Copyright 2020 DeepMind Technologies Limited. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for tsm_utils.""" from absl.testing import absltest from absl.testing import parameterized import jax.numpy as jnp import numpy as np from mmv.models import tsm_utils class TsmUtilsTest(parameterized.TestCase): @parameterized.parameters( ((2, 32, 224, 224, 3), 'gpu', (2 * 32, 224, 224, 3), 32), ((32, 224, 224, 3), 'tpu', (32, 224, 224, 3), None), ) def test_prepare_inputs(self, input_shape, expected_mode, expected_shape, expected_num_frames): data = jnp.zeros(input_shape) out, mode, num_frames = tsm_utils.prepare_inputs(data) self.assertEqual(out.shape, expected_shape) self.assertEqual(mode, expected_mode) self.assertEqual(num_frames, expected_num_frames) def test_prepare_outputs(self): data = jnp.concatenate([jnp.zeros(4), jnp.ones(4)]).reshape(4, 2) out_gpu = tsm_utils.prepare_outputs(data, 'gpu', 2) out_tpu = tsm_utils.prepare_outputs(data, 'tpu', 2) expected_gpu = np.concatenate([np.zeros(2), np.ones(2)]).reshape(2, 2) expected_tpu = 0.5 * jnp.ones((2, 2)) np.testing.assert_allclose(out_gpu, expected_gpu) np.testing.assert_allclose(out_tpu, expected_tpu) def test_apply_tsm(self): shape = (32, 224, 224, 16) data = jnp.zeros(shape) out_gpu = tsm_utils.apply_temporal_shift(data, 'gpu', 16) out_tpu = tsm_utils.apply_temporal_shift(data, 'tpu', 16) self.assertEqual(out_gpu.shape, shape) self.assertEqual(out_tpu.shape, shape) if __name__ == '__main__': absltest.main()
deepmind-research-master
mmv/models/tsm_utils_test.py
# Copyright 2020 DeepMind Technologies Limited. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utils functions for TSM.""" from typing import Tuple import jax import jax.numpy as jnp from mmv.models import types def prepare_inputs( inputs: types.TensorLike) -> Tuple[jnp.ndarray, str, int]: """Deduces input mode for TSM.""" # Deduce if we run on TPU based on input shape. if len(inputs.shape) == 5: # Input is given in the standard [B, T, H, W, 3] format. tsm_mode = 'gpu' num_frames = inputs.shape[1] inputs = jnp.reshape(inputs, [-1] + list(inputs.shape[2:])) else: # Input is given in the [T * B, H, W, 3] format. tsm_mode = 'tpu' num_frames = None return inputs, tsm_mode, num_frames def prepare_outputs(outputs: types.TensorLike, tsm_mode: str, num_frames: int) -> jnp.ndarray: """Processes output of TSM by averaging representations over time axis.""" n_channels = outputs.shape[-1] if tsm_mode == 'tpu': outputs = jnp.reshape(outputs, [num_frames, -1, n_channels]) outputs = jnp.mean(outputs, axis=0) elif tsm_mode == 'gpu': outputs = jnp.reshape(outputs, [-1, num_frames, n_channels]) outputs = jnp.mean(outputs, axis=1) else: raise ValueError( f'`tsm_mode` should be \'tpu\' or \'gpu\' ({tsm_mode} given)') return outputs def apply_temporal_shift( x: types.TensorLike, tsm_mode: str, num_frames: int, channel_shift_fraction: float = 0.125) -> jnp.ndarray: """Performs a temporal shift: https://arxiv.org/abs/1811.08383 with mode.""" if tsm_mode == 'tpu': outputs = temporal_shift_tpu(x, num_frames, channel_shift_fraction) elif tsm_mode == 'gpu': outputs = temporal_shift_gpu(x, num_frames, channel_shift_fraction) else: raise ValueError( f'`tsm_mode` should be \'tpu\' or \'gpu\' ({tsm_mode} given)') return outputs def temporal_shift_gpu( x: types.TensorLike, num_frames: int, channel_shift_fraction: float = 0.125) -> jnp.ndarray: """Performs a temporal shift: https://arxiv.org/abs/1811.08383.""" # B, T, H, W, C = batch_size, num_frames, im_height, im_width, channels # Input is (B * T, H, W, C) orig_shp = tuple(x.shape) reshaped_x = jnp.reshape(x, (-1, num_frames) + orig_shp[1:]) n_channels = orig_shp[-1] n_shift = int(n_channels * channel_shift_fraction) new_shp = tuple(reshaped_x.shape) # shifted_backward = reshaped_x[:, 1:, :, :, -n_shift:] shifted_backward = jax.lax.slice( reshaped_x, (0, 1, 0, 0, new_shp[4] - n_shift), (new_shp[0], new_shp[1], new_shp[2], new_shp[3], new_shp[4])) shifted_backward_padding = ((0, 0), (0, 1), (0, 0), (0, 0), (0, 0)) shifted_backward = jnp.pad(shifted_backward, shifted_backward_padding) # shifted_forward = reshaped_x[:, :-1, :, :, :n_shift] shifted_forward = jax.lax.slice( reshaped_x, (0, 0, 0, 0, 0), (new_shp[0], new_shp[1] - 1, new_shp[2], new_shp[3], n_shift)) shifted_forward_padding = ((0, 0), (1, 0), (0, 0), (0, 0), (0, 0)) shifted_forward = jnp.pad(shifted_forward, shifted_forward_padding) no_shift = reshaped_x[:, :, :, :, n_shift:-n_shift] shifted_x = jnp.concatenate([shifted_backward, no_shift, shifted_forward], axis=4) return jnp.reshape(shifted_x, (-1,) + orig_shp[1:]) def temporal_shift_tpu( x: types.TensorLike, num_frames: int, channel_shift_fraction: float = 0.125) -> jnp.ndarray: """Performs a temporal shift: https://arxiv.org/abs/1811.08383. TPU optimized version of TSM. Reshape is avoided by having the images reshaped in [T * B, :] so that frames corresponding to same time frame in videos are contiguous in memory. Thanks to cr/288510308 which allows to fuse pad->slice into convolution, we reformulate the slice pad into a pad then slice. Finally, to avoid concatenate that prevent some fusion from happening we simply sum masked version of the features. Args: x: Input expected to be [T * B, H, W, C] (where the batch has been reshaped from a time major version of the input). num_frames: number of frames T per video. channel_shift_fraction: fraction of the channel to shift forward and backward. Returns: The temporal shifted version of x. """ # B, T, H, W, C = batch_size, num_frames, im_height, im_width, channels # Input is (T * B, H, W, C) original_shape = list(x.shape) batch_size = int(original_shape[0] / num_frames) n_channels = int(original_shape[-1]) n_shift = int(n_channels * channel_shift_fraction) # Cast to bfloat16. x = x.astype(jnp.bfloat16) # For the following, assume that x has 3 channels [x1, x2, x3] and n_shift=1. # Shift backward, we first pad by zeros [x1, x2, x3, 0, 0]. orig_shp = list(x.shape) shifted_backward_padding = ((0, batch_size, 0), (0, 0, 0), (0, 0, 0), (0, n_channels - n_shift, 0)) x_backward_padding = jax.lax.pad( x, padding_value=jnp.bfloat16(0.), padding_config=shifted_backward_padding) # The following shift gets to [x3^+1, 0, 0] (where +1 means from the future). shifted_backward = jax.lax.slice(x_backward_padding, (batch_size, 0, 0, n_channels - n_shift), (orig_shp[0] + batch_size, orig_shp[1], orig_shp[2], 2 * n_channels - n_shift)) # Shift forward, we first pad by zeros [0, 0, x1, x2, x3]. shifted_forward_padding = ((batch_size, 0, 0), (0, 0, 0), (0, 0, 0), (n_channels - n_shift, 0, 0)) x_forward_padding = jax.lax.pad( x, padding_value=jnp.bfloat16(0.), padding_config=shifted_forward_padding) # The following shift gets to [0, 0, x1^-1] (where -1 means from the past). shifted_forward = jax.lax.slice( x_forward_padding, (0, 0, 0, 0), (orig_shp[0], orig_shp[1], orig_shp[2], n_channels)) # No shift is in the middle, this gets [0, x2, 0]. mask_noshift = (jnp.reshape((jnp.arange(n_channels) >= n_shift) & (jnp.arange(n_channels) < n_channels - n_shift), (1, 1, 1, -1))).astype(jnp.bfloat16) no_shift = mask_noshift * x # By summing everything together, we end up with [x3^+1, x2, x1^-1]. # Note: channels have been reordered but that doesn't matter for the model. shifted_x = shifted_backward + shifted_forward + no_shift return shifted_x.astype(jnp.float32)
deepmind-research-master
mmv/models/tsm_utils.py
# Copyright 2020 DeepMind Technologies Limited. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Temporal Shift Module w/ ResNet-50 and ResNet-101. Based on: TSM: Temporal Shift Module for Efficient Video Understanding Ji Lin, Chuang Gan, Song Han https://arxiv.org/pdf/1811.08383.pdf. """ from typing import Optional import haiku as hk import jax import jax.numpy as jnp from mmv.models import tsm_utils as tsmu from mmv.models import types class TSMResNetBlock(hk.Module): """A ResNet subblock with Temporal Channel Shifting. Combines a typical ResNetV2 block implementation (see https://arxiv.org/abs/1512.03385) with a pre-convolution Temporal Shift Module (see https://arxiv.org/pdf/1811.08383.pdf) in the residual. """ def __init__(self, output_channels: int, stride: int, use_projection: bool, tsm_mode: str, normalize_fn: Optional[types.NormalizeFn] = None, channel_shift_fraction: float = 0.125, num_frames: int = 8, name: str = 'TSMResNetBlock'): """Initializes the TSMResNetBlock module. Args: output_channels: Number of output channels. stride: Stride used in convolutions. use_projection: Whether to use a projection for the shortcut. tsm_mode: Mode for TSM ('gpu' or 'tpu'). normalize_fn: Function used for normalization. channel_shift_fraction: The fraction of temporally shifted channels. If `channel_shift_fraction` is 0, the block is the same as a normal ResNet block. num_frames: Size of frame dimension in a single batch example name: The name of the module. """ super().__init__(name=name) self._output_channels = output_channels self._bottleneck_channels = output_channels // 4 self._stride = stride self._use_projection = use_projection self._normalize_fn = normalize_fn self._tsm_mode = tsm_mode self._channel_shift_fraction = channel_shift_fraction self._num_frames = num_frames def __call__(self, inputs: types.TensorLike, is_training: bool = True) -> jnp.ndarray: """Connects the ResNetBlock module into the graph. Args: inputs: A 4-D float array of shape `[B, H, W, C]`. is_training: Whether to use training mode. Returns: A 4-D float array of shape `[B * num_frames, new_h, new_w, output_channels]`. """ # ResNet V2 uses pre-activation, where the batch norm and relu are before # convolutions, rather than after as in ResNet V1. preact = inputs if self._normalize_fn is not None: preact = self._normalize_fn(preact, is_training=is_training) preact = jax.nn.relu(preact) if self._use_projection: shortcut = hk.Conv2D( output_channels=self._output_channels, kernel_shape=1, stride=self._stride, with_bias=False, padding='SAME', name='shortcut_conv')( preact) else: shortcut = inputs # Eventually applies Temporal Shift Module. if self._channel_shift_fraction != 0: preact = tsmu.apply_temporal_shift( preact, tsm_mode=self._tsm_mode, num_frames=self._num_frames, channel_shift_fraction=self._channel_shift_fraction) # First convolution. residual = hk.Conv2D( self._bottleneck_channels, kernel_shape=1, stride=1, with_bias=False, padding='SAME', name='conv_0')( preact) # Second convolution. if self._normalize_fn is not None: residual = self._normalize_fn(residual, is_training=is_training) residual = jax.nn.relu(residual) residual = hk.Conv2D( output_channels=self._bottleneck_channels, kernel_shape=3, stride=self._stride, with_bias=False, padding='SAME', name='conv_1')( residual) # Third convolution. if self._normalize_fn is not None: residual = self._normalize_fn(residual, is_training=is_training) residual = jax.nn.relu(residual) residual = hk.Conv2D( output_channels=self._output_channels, kernel_shape=1, stride=1, with_bias=False, padding='SAME', name='conv_2')( residual) # NOTE: we do not use block multiplier. output = shortcut + residual return output class TSMResNetUnit(hk.Module): """Block group for TSM ResNet.""" def __init__(self, output_channels: int, num_blocks: int, stride: int, tsm_mode: str, num_frames: int, normalize_fn: Optional[types.NormalizeFn] = None, channel_shift_fraction: float = 0.125, name: str = 'tsm_resnet_unit'): """Creates a TSMResNet Unit. Args: output_channels: Number of output channels. num_blocks: Number of ResNet blocks in the unit. stride: Stride of the unit. tsm_mode: Which temporal shift module to use. num_frames: Size of frame dimension in a single batch example. normalize_fn: Function used for normalization. channel_shift_fraction: The fraction of temporally shifted channels. If `channel_shift_fraction` is 0, the block is the same as a normal ResNet block. name: The name of the module. """ super().__init__(name=name) self._output_channels = output_channels self._num_blocks = num_blocks self._normalize_fn = normalize_fn self._stride = stride self._tsm_mode = tsm_mode self._channel_shift_fraction = channel_shift_fraction self._num_frames = num_frames def __call__(self, inputs: types.TensorLike, is_training: bool) -> jnp.ndarray: """Connects the module to inputs. Args: inputs: A 4-D float array of shape `[B * num_frames, H, W, C]`. is_training: Whether to use training mode. Returns: A 4-D float array of shape `[B * num_frames, H // stride, W // stride, output_channels]`. """ net = inputs for idx_block in range(self._num_blocks): net = TSMResNetBlock( self._output_channels, stride=self._stride if idx_block == 0 else 1, use_projection=idx_block == 0, normalize_fn=self._normalize_fn, tsm_mode=self._tsm_mode, channel_shift_fraction=self._channel_shift_fraction, num_frames=self._num_frames, name=f'block_{idx_block}')( net, is_training=is_training) return net class TSMResNetV2(hk.Module): """TSM based on ResNet V2 as described in https://arxiv.org/abs/1603.05027.""" # Endpoints of the model in order. VALID_ENDPOINTS = ( 'tsm_resnet_stem', 'tsm_resnet_unit_0', 'tsm_resnet_unit_1', 'tsm_resnet_unit_2', 'tsm_resnet_unit_3', 'last_conv', 'Embeddings', ) def __init__(self, normalize_fn: Optional[types.NormalizeFn] = None, depth: int = 50, num_frames: int = 16, channel_shift_fraction: float = 0.125, width_mult: int = 1, name: str = 'TSMResNetV2'): """Constructs a ResNet model. Args: normalize_fn: Function used for normalization. depth: Depth of the desired ResNet. num_frames: Number of frames (used in TPU mode). channel_shift_fraction: Fraction of channels that are temporally shifted, if `channel_shift_fraction` is 0, a regular ResNet is returned. width_mult: Whether or not to use a width multiplier. name: The name of the module. Raises: ValueError: If `channel_shift_fraction` or `depth` has invalid value. """ super().__init__(name=name) if not 0. <= channel_shift_fraction <= 1.0: raise ValueError( f'channel_shift_fraction ({channel_shift_fraction})' ' has to be in [0, 1].') self._num_frames = num_frames self._channels = (256, 512, 1024, 2048) self._strides = (1, 2, 2, 2) num_blocks = { 50: (3, 4, 6, 3), 101: (3, 4, 23, 3), 152: (3, 8, 36, 3), 200: (3, 24, 36, 3), } if depth not in num_blocks: raise ValueError( f'`depth` should be in {list(num_blocks.keys())} ({depth} given).') self._num_blocks = num_blocks[depth] self._width_mult = width_mult self._channel_shift_fraction = channel_shift_fraction self._normalize_fn = normalize_fn def __call__( self, inputs: types.TensorLike, is_training: bool = True, final_endpoint: str = 'Embeddings') -> jnp.ndarray: """Connects the TSM ResNetV2 module into the graph. Args: inputs: A 4-D float array of shape `[B, H, W, C]`. is_training: Whether to use training mode. final_endpoint: Up to which endpoint to run / return. Returns: Network output at location `final_endpoint`. A float array which shape depends on `final_endpoint`. Raises: ValueError: If `final_endpoint` is not recognized. """ # Prepare inputs for TSM. inputs, tsm_mode, num_frames = tsmu.prepare_inputs(inputs) num_frames = num_frames or self._num_frames self._final_endpoint = final_endpoint if self._final_endpoint not in self.VALID_ENDPOINTS: raise ValueError(f'Unknown final endpoint {self._final_endpoint}') # Stem convolution. end_point = 'tsm_resnet_stem' net = hk.Conv2D( output_channels=64 * self._width_mult, kernel_shape=7, stride=2, with_bias=False, name=end_point, padding='SAME')( inputs) net = hk.MaxPool( window_shape=(1, 3, 3, 1), strides=(1, 2, 2, 1), padding='SAME')( net) if self._final_endpoint == end_point: return net # Residual block. for unit_id, (channels, num_blocks, stride) in enumerate( zip(self._channels, self._num_blocks, self._strides)): end_point = f'tsm_resnet_unit_{unit_id}' net = TSMResNetUnit( output_channels=channels * self._width_mult, num_blocks=num_blocks, stride=stride, normalize_fn=self._normalize_fn, channel_shift_fraction=self._channel_shift_fraction, num_frames=num_frames, tsm_mode=tsm_mode, name=end_point)( net, is_training=is_training) if self._final_endpoint == end_point: return net if self._normalize_fn is not None: net = self._normalize_fn(net, is_training=is_training) net = jax.nn.relu(net) end_point = 'last_conv' if self._final_endpoint == end_point: return net net = jnp.mean(net, axis=(1, 2)) # Prepare embedding outputs for TSM (temporal average of features). net = tsmu.prepare_outputs(net, tsm_mode, num_frames) assert self._final_endpoint == 'Embeddings' return net
deepmind-research-master
mmv/models/tsm_resnet.py
# Copyright 2020 DeepMind Technologies Limited. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A Haiku S3D model.""" import collections from typing import Optional, Sequence import haiku as hk import jax from jax import numpy as jnp from mmv.models import types class _MaxPool(hk.MaxPool): """A `hk.MaxPool` accepting (and discarding) an `is_training` argument.""" def __call__(self, x: types.TensorLike, is_training: bool = True) -> jnp.ndarray: del is_training # Unused. return super().__call__(x) def self_gating(inputs: types.TensorLike) -> jnp.ndarray: """Feature gating as used in S3D-G. Transforms the input features by aggregating features from all spatial and temporal locations, and applying gating conditioned on the aggregated features. More details can be found at: https://arxiv.org/abs/1712.04851. Args: inputs: A 5-D float array of shape `[B, T, H, W, C]`. Returns: A tensor with the same shape as input_tensor. Raises: ValueError: If `inputs` has the wrong shape. """ if inputs.ndim != 5: raise ValueError( f'Expected an input of shape `[B, T, H, W, C]` but got {inputs.shape}.') input_shape = inputs.shape num_channels = input_shape[4] spatiotemporal_average = jnp.mean(inputs, axis=(1, 2, 3)) weights = hk.Linear(num_channels, name='self_gating')(spatiotemporal_average) weights = jax.nn.sigmoid(weights) return jnp.multiply(weights[:, None, None, None, :], inputs) class SUnit3D(hk.Module): """Base 3d Unit combining Conv3d + Batch Norm + non-linearity.""" def __init__( self, output_channels: int, kernel_shape: Sequence[int] = (1, 1, 1), stride: Sequence[int] = (1, 1, 1), with_bias: bool = False, separable: bool = False, normalize_fn: Optional[types.NormalizeFn] = None, activation_fn: Optional[types.ActivationFn] = jax.nn.relu, self_gating_fn: Optional[types.GatingFn] = None, name='SUnit3D'): """Initializes the SUnit3D module. Args: output_channels: Number of output channels. kernel_shape: The shape of the kernel. A sequence of length 3. stride: Stride for the kernel. A sequence of length 3. with_bias: Whether to add a bias to the convolution. separable: Whether to use separable. normalize_fn: Function used for normalization. activation_fn: Function used as non-linearity. self_gating_fn: Function used for self-gating. name: The name of the module. Raises: ValueError: If `kernel_shape` or `stride` has the wrong shape. """ super().__init__(name=name) # Check args. if len(kernel_shape) != 3: raise ValueError( 'Given `kernel_shape` must have length 3 but has length ' f'{len(kernel_shape)}.') if len(stride) != 3: raise ValueError( f'Given `stride` must have length 3 but has length {len(stride)}.') self._normalize_fn = normalize_fn self._activation_fn = activation_fn self._self_gating_fn = self_gating_fn k0, k1, k2 = kernel_shape if separable and k1 != 1: spatial_kernel_shape = [1, k1, k2] temporal_kernel_shape = [k0, 1, 1] s0, s1, s2 = stride spatial_stride = [1, s1, s2] temporal_stride = [s0, 1, 1] self._convolutions = [ hk.Conv3D( output_channels=output_channels, kernel_shape=spatial_kernel_shape, stride=spatial_stride, padding='SAME', with_bias=with_bias), hk.Conv3D( output_channels=output_channels, kernel_shape=temporal_kernel_shape, stride=temporal_stride, padding='SAME', with_bias=with_bias) ] else: self._convolutions = [ hk.Conv3D( output_channels=output_channels, kernel_shape=kernel_shape, stride=stride, padding='SAME', with_bias=with_bias)] def __call__( self, inputs: types.TensorLike, is_training: bool) -> jnp.ndarray: """Connects the module to inputs. Args: inputs: A 5-D float array of shape `[B, T, H, W, C]`. is_training: Whether to use training mode. Returns: A 5-D float array of shape `[B, new_t, new_h, new_w, output_channels]`. """ x = inputs for conv in self._convolutions: x = conv(x) if self._normalize_fn is not None: x = self._normalize_fn(x, is_training=is_training) if self._activation_fn is not None: x = self._activation_fn(x) if self._self_gating_fn: x = self._self_gating_fn(x) return x class InceptionBlockV13D(hk.Module): """A 3D Inception v1 block. This allows use of separable 3D convolutions and self-gating, as described in: Rethinking Spatiotemporal Feature Learning For Video Understanding. Saining Xie, Chen Sun, Jonathan Huang, Zhuowen Tu and Kevin Murphy. https://arxiv.org/abs/1712.04851. """ def __init__(self, output_channels: Sequence[int], normalize_fn: Optional[types.NormalizeFn], temporal_kernel_size: int = 3, self_gating_fn: Optional[types.GatingFn] = None, name: str = 'InceptionBlockV13D'): """Initializes the InceptionBlockV13D module. Args: output_channels: The size of the output channels of each block, ordered as [Conv2d_0a_1x1, Conv2d_0a_1x1, Conv2d_0b_3x3, Conv2d_0a_1x1, Conv2d_0b_3x3, Conv2d_0b_1x1] normalize_fn: Function used for normalization. temporal_kernel_size: The size of the temporal convolutional filters in the conv3d_spatiotemporal blocks. self_gating_fn: Function which optionally performs self-gating. If `None`, no self-gating is applied. name: The name of the module. Raises: ValueError: If `output_channels` has the wrong shape. """ super().__init__(name=name) # Check args. if len(output_channels) != 6: raise ValueError( 'Given `output_channels` must have length 6 but has length ' f'{len(output_channels)}.') self._output_channels = output_channels self._normalize_fn = normalize_fn self._temporal_kernel_size = temporal_kernel_size if self_gating_fn is None: self._self_gating_fn = lambda x: x else: self._self_gating_fn = self_gating_fn def __call__( self, inputs: types.TensorLike, is_training: bool) -> jnp.ndarray: """Connects the module to inputs. Args: inputs: A 5-D float array of shape `[B, T, H, W, C]`. is_training: Whether to use training mode. Returns: A 5-D float array of shape `[B, new_t, new_h, new_w, sum(output_channels)]`. """ # Branch 0 branch_0 = SUnit3D( output_channels=self._output_channels[0], kernel_shape=(1, 1, 1), separable=False, normalize_fn=self._normalize_fn, self_gating_fn=self._self_gating_fn, name='Branch_0_Conv2d_0a_1x1')( inputs, is_training=is_training) # Branch 1 branch_1 = SUnit3D( output_channels=self._output_channels[1], kernel_shape=(1, 1, 1), separable=False, normalize_fn=self._normalize_fn, self_gating_fn=None, name='Branch_1_Conv2d_0a_1x1')( inputs, is_training=is_training) branch_1 = SUnit3D( output_channels=self._output_channels[2], kernel_shape=(self._temporal_kernel_size, 3, 3), separable=True, normalize_fn=self._normalize_fn, self_gating_fn=self._self_gating_fn, name='Branch_1_Conv2d_0b_3x3')( branch_1, is_training=is_training) # Branch 2 branch_2 = SUnit3D( output_channels=self._output_channels[3], kernel_shape=(1, 1, 1), separable=False, normalize_fn=self._normalize_fn, self_gating_fn=None, name='Branch_2_Conv2d_0a_1x1')( inputs, is_training=is_training) branch_2 = SUnit3D( output_channels=self._output_channels[4], kernel_shape=(self._temporal_kernel_size, 3, 3), separable=True, normalize_fn=self._normalize_fn, self_gating_fn=self._self_gating_fn, name='Branch_2_Conv2d_0b_3x3')( branch_2, is_training=is_training) # Branch 3 branch_3 = hk.MaxPool( window_shape=(1, 3, 3, 3, 1), strides=(1, 1, 1, 1, 1), padding='SAME', name='Branch_3_MaxPool_0a_3x3')( inputs) branch_3 = SUnit3D( output_channels=self._output_channels[5], kernel_shape=(1, 1, 1), separable=False, normalize_fn=self._normalize_fn, self_gating_fn=self._self_gating_fn, name='Branch_3_Conv2d_0b_1x1')( branch_3, is_training=is_training) return jnp.concatenate((branch_0, branch_1, branch_2, branch_3), axis=4) _Layer = collections.namedtuple('_Layer', ('name', 'module', 'kwargs')) class S3D(hk.Module): """S3D architecture. Any intermediary representation can be obtained by choosing one of the valid `final_endpoint`s. The final value returned by this model (when 'Embeddings' is used as `final_endpoint`) is a single 1-D representation for each video in the batch. Another layer can be externally added on top of that to obtain logits. """ # Endpoints of the model in order. VALID_ENDPOINTS = ( 'Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1', 'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c', 'MaxPool_4a_3x3', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d', 'Mixed_4e', 'Mixed_4f', 'MaxPool_5a_2x2', 'Mixed_5b', 'Mixed_5c', 'Embeddings', ) def __init__(self, normalize_fn: Optional[types.NormalizeFn] = None, first_temporal_kernel_size: int = 7, temporal_conv_startat: Optional[str] = 'Conv2d_2c_3x3', gating_startat: Optional[str] = 'Conv2d_2c_3x3', name='S3D'): """Initializes the S3D module. Args: normalize_fn: Function used for normalization. first_temporal_kernel_size: Specifies the temporal kernel size for the first conv3d filter. A larger value slows down the model but provides little accuracy improvement. Must be set to one of 1, 3, 5 or 7. temporal_conv_startat: Specifies the first conv block to use separable 3D convs rather than 2D convs (implemented as [1, k, k] 3D conv). This is used to construct the inverted pyramid models. 'Conv2d_2c_3x3' is the first valid block to use separable 3D convs. If provided block name is not present, all valid blocks will use separable 3D convs. gating_startat: Specifies the first conv block to use self gating. 'Conv2d_2c_3x3' is the first valid block to use self gating. If provided block name is not present, all valid blocks will use separable 3D convs. name: The name of the module. Raises: ValueError: If `temporal_conv_startat`, `gating_startat` or `first_temporal_kernel_size` is not recognized. """ super().__init__(name=name) self._first_temporal_kernel_size = first_temporal_kernel_size self._temporal_conv_startat = temporal_conv_startat self._gating_startat = gating_startat self._normalize_fn = normalize_fn if (temporal_conv_startat not in self.VALID_ENDPOINTS and temporal_conv_startat is not None): raise ValueError( f'Provided `temporal_conv_startat`: {temporal_conv_startat} not ' f'valid. It must be one of: {self.VALID_ENDPOINTS}, or `None`.') if (gating_startat not in self.VALID_ENDPOINTS and gating_startat is not None): raise ValueError( f'Provided `gating_startat`: {gating_startat} not valid. ' f'It must be one of: {self.VALID_ENDPOINTS}, or `None`.') if first_temporal_kernel_size not in [1, 3, 5, 7]: raise ValueError('`first_temporal_kernel_size` can only be 1, 3, 5 or 7.') def __call__(self, inputs: types.TensorLike, is_training: bool, final_endpoint: str = 'Embeddings') -> jnp.ndarray: """Connects the model to inputs. Args: inputs: A 5-D float array of shape `[B, T, H, W, C]`. is_training: Whether to use training mode. final_endpoint: Up to which endpoint to run / return. Returns: A 5-D float array of shape `[B, new_t, new_h, new_w, sum(output_channels)]`. Returns: Network output at location `final_endpoint`. A float array which shape depends on `final_endpoint`. Raises: ValueError: If `final_endpoint` is not recognized. """ if final_endpoint not in self.VALID_ENDPOINTS: raise ValueError(f'Provided final_endpoint: {final_endpoint} not valid.' f' It must be one of: {self.VALID_ENDPOINTS}') x = inputs # We define layers with tuples (name, module, kwargs) # Not all kwargs are present, as we will need to fill in certain properties # as we move down the network. layers = [] # The first layer is conditional on the input data shape: the channel size # is used to identify whether the `space_to_depth` transformation has been # applied to the input. This is used to speed up computation on TPUs. if x.shape[-1] == 3: layers.append( _Layer('Conv2d_1a_7x7', SUnit3D, dict(output_channels=64, stride=(2, 2, 2), separable=False, kernel_shape=(self._first_temporal_kernel_size, 7, 7), normalize_fn=self._normalize_fn))) else: layers.append( _Layer('Conv2d_1a_7x7', SUnit3D, dict(output_channels=64, kernel_shape=(2, 4, 4), stride=(1, 1, 1), separable=False, normalize_fn=self._normalize_fn))) layers.extend([ _Layer('MaxPool_2a_3x3', _MaxPool, dict(window_shape=(1, 1, 3, 3, 1), strides=(1, 1, 2, 2, 1), padding='SAME')), _Layer('Conv2d_2b_1x1', SUnit3D, dict(output_channels=64, kernel_shape=(1, 1, 1), normalize_fn=self._normalize_fn)), _Layer('Conv2d_2c_3x3', SUnit3D, dict(output_channels=192, separable=True, normalize_fn=self._normalize_fn)), _Layer('MaxPool_3a_3x3', _MaxPool, dict(window_shape=(1, 1, 3, 3, 1), strides=(1, 1, 2, 2, 1), padding='SAME')), _Layer('Mixed_3b', InceptionBlockV13D, dict(output_channels=(64, 96, 128, 16, 32, 32), normalize_fn=self._normalize_fn)), _Layer('Mixed_3c', InceptionBlockV13D, dict(output_channels=(128, 128, 192, 32, 96, 64), normalize_fn=self._normalize_fn)), _Layer('MaxPool_4a_3x3', _MaxPool, dict(window_shape=(1, 3, 3, 3, 1), strides=(1, 2, 2, 2, 1), padding='SAME')), _Layer('Mixed_4b', InceptionBlockV13D, dict(output_channels=(192, 96, 208, 16, 48, 64), normalize_fn=self._normalize_fn)), _Layer('Mixed_4c', InceptionBlockV13D, dict(output_channels=(160, 112, 224, 24, 64, 64), normalize_fn=self._normalize_fn)), _Layer('Mixed_4d', InceptionBlockV13D, dict(output_channels=(128, 128, 256, 24, 64, 64), normalize_fn=self._normalize_fn)), _Layer('Mixed_4e', InceptionBlockV13D, dict(output_channels=(112, 144, 288, 32, 64, 64), normalize_fn=self._normalize_fn)), _Layer('Mixed_4f', InceptionBlockV13D, dict(output_channels=(256, 160, 320, 32, 128, 128), normalize_fn=self._normalize_fn)), _Layer('MaxPool_5a_2x2', _MaxPool, dict(window_shape=(1, 2, 2, 2, 1), strides=(1, 2, 2, 2, 1), padding='SAME')), _Layer('Mixed_5b', InceptionBlockV13D, dict(output_channels=(256, 160, 320, 32, 128, 128), normalize_fn=self._normalize_fn)), _Layer('Mixed_5c', InceptionBlockV13D, dict(output_channels=(384, 192, 384, 48, 128, 128), normalize_fn=self._normalize_fn)), ]) # These parameters may change thoughout the computation. self_gating_fn = None temporal_kernel_size = 1 # Iterate over layers. for layer in layers: # Update if layer.name == self._gating_startat: self_gating_fn = self_gating if layer.name == self._temporal_conv_startat: temporal_kernel_size = 3 kwargs = layer.kwargs if layer.module is SUnit3D: kwargs['self_gating_fn'] = self_gating_fn if 'kernel_shape' not in kwargs: kwargs['kernel_shape'] = (temporal_kernel_size, 3, 3) elif layer.module is InceptionBlockV13D: kwargs['self_gating_fn'] = self_gating_fn kwargs['temporal_kernel_size'] = temporal_kernel_size module = layer.module(name=layer.name, **kwargs) x = module(x, is_training=is_training) if final_endpoint == layer.name: return x assert final_endpoint == 'Embeddings' return jnp.mean(x, axis=(1, 2, 3))
deepmind-research-master
mmv/models/s3d.py
# Copyright 2020 DeepMind Technologies Limited. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Type Aliases.""" from typing import Callable, Tuple, Union import jax.numpy as jnp import numpy as np import optax TensorLike = Union[np.ndarray, jnp.DeviceArray] ActivationFn = Callable[[TensorLike], TensorLike] GatingFn = Callable[[TensorLike], TensorLike] NetworkFn = Callable[[TensorLike], TensorLike] # Callable doesn't allow kwargs to be used, and we often want to # pass in is_training=..., so ignore the arguments for the sake of pytype. NormalizeFn = Callable[..., TensorLike] OptState = Tuple[optax.TraceState, optax.ScaleByScheduleState, optax.ScaleState]
deepmind-research-master
mmv/models/types.py
# Copyright 2020 DeepMind Technologies Limited. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3. """ResNet V2 modules. Equivalent to hk.Resnet except accepting a final_endpoint to return intermediate activations. """ from typing import Optional, Sequence, Text, Type, Union import haiku as hk import jax import jax.numpy as jnp from mmv.models import types class BottleneckBlock(hk.Module): """Implements a bottleneck residual block (ResNet50 and ResNet101).""" # pylint:disable=g-bare-generic def __init__(self, channels: int, stride: Union[int, Sequence[int]], use_projection: bool, normalize_fn: Optional[types.NormalizeFn] = None, name: Optional[Text] = None): super(BottleneckBlock, self).__init__(name=name) self._channels = channels self._stride = stride self._use_projection = use_projection self._normalize_fn = normalize_fn if self._use_projection: self._proj_conv = hk.Conv2D( output_channels=channels, kernel_shape=1, stride=stride, with_bias=False, padding='SAME', name='shortcut_conv') self._conv_0 = hk.Conv2D( output_channels=channels // 4, kernel_shape=1, stride=1, with_bias=False, padding='SAME', name='conv_0') self._conv_1 = hk.Conv2D( output_channels=channels // 4, kernel_shape=3, stride=stride, with_bias=False, padding='SAME', name='conv_1') self._conv_2 = hk.Conv2D( output_channels=channels, kernel_shape=1, stride=1, with_bias=False, padding='SAME', name='conv_2') def __call__(self, inputs, is_training): net = inputs shortcut = inputs for i, conv_i in enumerate([self._conv_0, self._conv_1, self._conv_2]): if self._normalize_fn is not None: net = self._normalize_fn(net, is_training=is_training) net = jax.nn.relu(net) if i == 0 and self._use_projection: shortcut = self._proj_conv(net) # Now do the convs. net = conv_i(net) return net + shortcut class BasicBlock(hk.Module): """Implements a basic residual block (ResNet18 and ResNet34).""" # pylint:disable=g-bare-generic def __init__(self, channels: int, stride: Union[int, Sequence[int]], use_projection: bool, normalize_fn: Optional[types.NormalizeFn] = None, name: Optional[Text] = None): super(BasicBlock, self).__init__(name=name) self._channels = channels self._stride = stride self._use_projection = use_projection self._normalize_fn = normalize_fn if self._use_projection: self._proj_conv = hk.Conv2D( output_channels=channels, kernel_shape=1, stride=stride, with_bias=False, padding='SAME', name='shortcut_conv') self._conv_0 = hk.Conv2D( output_channels=channels, kernel_shape=1, stride=1, with_bias=False, padding='SAME', name='conv_0') self._conv_1 = hk.Conv2D( output_channels=channels, kernel_shape=3, stride=stride, with_bias=False, padding='SAME', name='conv_1') def __call__(self, inputs, is_training): net = inputs shortcut = inputs for i, conv_i in enumerate([self._conv_0, self._conv_1]): if self._normalize_fn is not None: net = self._normalize_fn(net, is_training=is_training) net = jax.nn.relu(net) if i == 0 and self._use_projection: shortcut = self._proj_conv(net) # Now do the convs. net = conv_i(net) return net + shortcut class ResNetUnit(hk.Module): """Unit (group of blocks) for ResNet.""" # pylint:disable=g-bare-generic def __init__(self, channels: int, num_blocks: int, stride: Union[int, Sequence[int]], block_module: Type[BottleneckBlock], normalize_fn: Optional[types.NormalizeFn] = None, name: Optional[Text] = None, remat: bool = False): super(ResNetUnit, self).__init__(name=name) self._channels = channels self._num_blocks = num_blocks self._stride = stride self._normalize_fn = normalize_fn self._block_module = block_module self._remat = remat def __call__(self, inputs, is_training): input_channels = inputs.shape[-1] self._blocks = [] for id_block in range(self._num_blocks): use_projection = id_block == 0 and self._channels != input_channels self._blocks.append( self._block_module( channels=self._channels, stride=self._stride if id_block == 0 else 1, use_projection=use_projection, normalize_fn=self._normalize_fn, name='block_%d' % id_block)) net = inputs for block in self._blocks: if self._remat: # Note: we can ignore cell-var-from-loop because the lambda is evaluated # inside every iteration of the loop. This is needed to go around the # way variables are passed to jax.remat. net = hk.remat(lambda x: block(x, is_training=is_training))(net) # pylint: disable=cell-var-from-loop else: net = block(net, is_training=is_training) return net class ResNetV2(hk.Module): """ResNetV2 model.""" # Endpoints of the model in order. VALID_ENDPOINTS = ( 'resnet_stem', 'resnet_unit_0', 'resnet_unit_1', 'resnet_unit_2', 'resnet_unit_3', 'last_conv', 'output', ) # pylint:disable=g-bare-generic def __init__(self, depth=50, num_classes: Optional[int] = 1000, width_mult: int = 1, normalize_fn: Optional[types.NormalizeFn] = None, name: Optional[Text] = None, remat: bool = False): """Creates ResNetV2 Haiku module. Args: depth: depth of the desired ResNet (18, 34, 50, 101, 152 or 202). num_classes: (int) Number of outputs in final layer. If None will not add a classification head and will return the output embedding. width_mult: multiplier for channel width. normalize_fn: normalization function, see helpers/utils.py name: Name of the module. remat: Whether to rematerialize intermediate activations (saves memory). """ super(ResNetV2, self).__init__(name=name) self._normalize_fn = normalize_fn self._num_classes = num_classes self._width_mult = width_mult self._strides = [1, 2, 2, 2] num_blocks = { 18: [2, 2, 2, 2], 34: [3, 4, 6, 3], 50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3], 200: [3, 24, 36, 3], } if depth not in num_blocks: raise ValueError( f'`depth` should be in {list(num_blocks.keys())} ({depth} given).') self._num_blocks = num_blocks[depth] if depth >= 50: self._block_module = BottleneckBlock self._channels = [256, 512, 1024, 2048] else: self._block_module = BasicBlock self._channels = [64, 128, 256, 512] self._initial_conv = hk.Conv2D( output_channels=64 * self._width_mult, kernel_shape=7, stride=2, with_bias=False, padding='SAME', name='initial_conv') if remat: self._initial_conv = hk.remat(self._initial_conv) self._block_groups = [] for i in range(4): self._block_groups.append( ResNetUnit( channels=self._channels[i] * self._width_mult, num_blocks=self._num_blocks[i], block_module=self._block_module, stride=self._strides[i], normalize_fn=self._normalize_fn, name='block_group_%d' % i, remat=remat)) if num_classes is not None: self._logits_layer = hk.Linear( output_size=num_classes, w_init=jnp.zeros, name='logits') def __call__(self, inputs, is_training, final_endpoint='output'): self._final_endpoint = final_endpoint net = self._initial_conv(inputs) net = hk.max_pool( net, window_shape=(1, 3, 3, 1), strides=(1, 2, 2, 1), padding='SAME') end_point = 'resnet_stem' if self._final_endpoint == end_point: return net for i_group, block_group in enumerate(self._block_groups): net = block_group(net, is_training=is_training) end_point = f'resnet_unit_{i_group}' if self._final_endpoint == end_point: return net end_point = 'last_conv' if self._final_endpoint == end_point: return net if self._normalize_fn is not None: net = self._normalize_fn(net, is_training=is_training) net = jax.nn.relu(net) # The actual representation net = jnp.mean(net, axis=[1, 2]) assert self._final_endpoint == 'output' if self._num_classes is None: # If num_classes was None, we just return the output # of the last block, without fully connected layer. return net return self._logits_layer(net)
deepmind-research-master
mmv/models/resnet.py
# Copyright 2020 DeepMind Technologies Limited. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Normalize functions constructors.""" from typing import Any, Dict, Optional, Sequence, Union import haiku as hk from jax import numpy as jnp from mmv.models import types class _BatchNorm(hk.BatchNorm): """A `hk.BatchNorm` with adapted default arguments.""" def __init__(self, create_scale: bool = True, create_offset: bool = True, decay_rate: float = 0.9, eps: float = 1e-5, test_local_stats: bool = False, **kwargs): # Check args. if kwargs.get('cross_replica_axis', None) is not None: raise ValueError( 'Attempting to use \'batch_norm\' normalizer, but specifying ' '`cross_replica_axis`. If you want this behavior use ' '`normalizer=\'cross_replica_batch_norm\'` directly.') self._test_local_stats = test_local_stats super().__init__(create_scale=create_scale, create_offset=create_offset, decay_rate=decay_rate, eps=eps, **kwargs) def __call__(self, x: types.TensorLike, is_training: bool) -> jnp.ndarray: return super().__call__(x, is_training, test_local_stats=self._test_local_stats) class _CrossReplicaBatchNorm(hk.BatchNorm): """A `hk.BatchNorm` with adapted default arguments for cross replica.""" def __init__(self, create_scale: bool = True, create_offset: bool = True, decay_rate: float = 0.9, eps: float = 1e-5, test_local_stats: bool = False, **kwargs): # Check args. if 'cross_replica_axis' in kwargs and kwargs['cross_replica_axis'] is None: raise ValueError( 'Attempting to use \'cross_replica_batch_norm\' normalizer, but ' 'specifying `cross_replica_axis` to be None. If you want this ' 'behavior use `normalizer=\'batch_norm\'` directly.') self._test_local_stats = test_local_stats kwargs['cross_replica_axis'] = kwargs.get('cross_replica_axis', 'i') super().__init__(create_scale=create_scale, create_offset=create_offset, decay_rate=decay_rate, eps=eps, **kwargs) def __call__(self, x: types.TensorLike, is_training: bool) -> jnp.ndarray: return super().__call__(x, is_training, test_local_stats=self._test_local_stats) class _LayerNorm(hk.LayerNorm): """A `hk.LayerNorm` accepting (and discarding) an `is_training` argument.""" def __init__(self, axis: Union[int, Sequence[int]] = (1, 2), create_scale: bool = True, create_offset: bool = True, **kwargs): super().__init__(axis=axis, create_scale=create_scale, create_offset=create_offset, **kwargs) def __call__(self, x: types.TensorLike, is_training: bool) -> jnp.ndarray: del is_training # Unused. return super().__call__(x) _NORMALIZER_NAME_TO_CLASS = { 'batch_norm': _BatchNorm, 'cross_replica_batch_norm': _CrossReplicaBatchNorm, 'layer_norm': _LayerNorm, } def get_normalize_fn( normalizer_name: str = 'batch_norm', normalizer_kwargs: Optional[Dict[str, Any]] = None, ) -> types.NormalizeFn: """Handles NormalizeFn creation. These functions are expected to be used as part of Haiku model. On each application of the returned normalization_fn, a new Haiku layer will be added to the model. Args: normalizer_name: The name of the normalizer to be constructed. normalizer_kwargs: The kwargs passed to the normalizer constructor. Returns: A `types.NormalizeFn` that when applied will create a new layer. Raises: ValueError: If `normalizer_name` is unknown. """ # Check args. if normalizer_name not in _NORMALIZER_NAME_TO_CLASS: raise ValueError(f'Unrecognized `normalizer_name` {normalizer_name}.') normalizer_class = _NORMALIZER_NAME_TO_CLASS[normalizer_name] normalizer_kwargs = normalizer_kwargs or dict() return lambda *a, **k: normalizer_class(**normalizer_kwargs)(*a, **k) # pylint: disable=unnecessary-lambda
deepmind-research-master
mmv/models/normalization.py
# Copyright 2020 DeepMind Technologies Limited. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for s3d.""" from absl.testing import absltest from absl.testing import parameterized import haiku as hk import jax import numpy as np from mmv.models import normalization from mmv.models import s3d class _CallableS3D: """Wrapper around S3D that take care of parameter book keeping.""" def __init__(self, *args, **kwargs): self._model = hk.transform_with_state( lambda *a, **k: # pylint: disable=g-long-lambda,unnecessary-lambda s3d.S3D( normalize_fn=normalization.get_normalize_fn(), *args, **kwargs)(*a, **k)) self._rng = jax.random.PRNGKey(42) self._params, self._state = None, None def init(self, inputs, **kwargs): self._params, self._state = self._model.init( self._rng, inputs, is_training=True, **kwargs) def __call__(self, inputs, **kwargs): if self._params is None: self.init(inputs) output, _ = self._model.apply( self._params, self._state, self._rng, inputs, **kwargs) return output class S3DTest(parameterized.TestCase): # Testing all layers is quite slow, added in comments for completeness. @parameterized.parameters( # dict(endpoint='Conv2d_1a_7x7', expected_size=(2, 8, 112, 112, 64)), # dict(endpoint='MaxPool_2a_3x3', expected_size=(2, 8, 56, 56, 64)), # dict(endpoint='Conv2d_2b_1x1', expected_size=(2, 8, 56, 56, 64)), # dict(endpoint='Conv2d_2c_3x3', expected_size=(2, 8, 56, 56, 192)), # dict(endpoint='MaxPool_3a_3x3', expected_size=(2, 8, 28, 28, 192)), # dict(endpoint='Mixed_3b', expected_size=(2, 8, 28, 28, 256)), # dict(endpoint='Mixed_3c', expected_size=(2, 8, 28, 28, 480)), # dict(endpoint='MaxPool_4a_3x3', expected_size=(2, 4, 14, 14, 480)), # dict(endpoint='Mixed_4b', expected_size=(2, 4, 14, 14, 512)), # dict(endpoint='Mixed_4c', expected_size=(2, 4, 14, 14, 512)), # dict(endpoint='Mixed_4d', expected_size=(2, 4, 14, 14, 512)), # dict(endpoint='Mixed_4e', expected_size=(2, 4, 14, 14, 528)), # dict(endpoint='Mixed_4f', expected_size=(2, 4, 14, 14, 832)), # dict(endpoint='MaxPool_5a_2x2', expected_size=(2, 2, 7, 7, 832)), # dict(endpoint='Mixed_5b', expected_size=(2, 2, 7, 7, 832)), # dict(endpoint='Mixed_5c', expected_size=(2, 2, 7, 7, 1024)), dict(endpoint='Embeddings', expected_size=(2, 1024)), ) def test_endpoint_expected_output_dimensions(self, endpoint, expected_size): inputs = np.random.normal(size=(2, 16, 224, 224, 3)) model = _CallableS3D() output = model(inputs, is_training=False, final_endpoint=endpoint) self.assertSameElements(output.shape, expected_size) def test_space_to_depth(self): inputs = np.random.normal(size=(2, 16//2, 224//2, 224//2, 3*2*2*2)) model = _CallableS3D() output = model(inputs, is_training=False, final_endpoint='Conv2d_1a_7x7') self.assertSameElements(output.shape, (2, 8, 112, 112, 64)) if __name__ == '__main__': absltest.main()
deepmind-research-master
mmv/models/s3d_test.py
# Copyright 2019 DeepMind Technologies Limited and Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Evaluation metrics.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow.compat.v1 as tf import tensorflow_gan as tfgan import tensorflow_hub as hub def fid(generated_sentences, real_sentences): """Compute FID rn sentences using pretrained universal sentence encoder. Args: generated_sentences: list of N strings. real_sentences: list of N strings. Returns: Frechet distance between activations. """ embed = hub.Module("https://tfhub.dev/google/universal-sentence-encoder/2") real_embed = embed(real_sentences) generated_embed = embed(generated_sentences) distance = tfgan.eval.frechet_classifier_distance_from_activations( real_embed, generated_embed) # Restrict the thread pool size to prevent excessive CPU usage. config = tf.ConfigProto() config.intra_op_parallelism_threads = 16 config.inter_op_parallelism_threads = 16 with tf.Session(config=config) as session: session.run(tf.global_variables_initializer()) session.run(tf.tables_initializer()) distance_np = session.run(distance) return distance_np
deepmind-research-master
scratchgan/eval_metrics.py
# Copyright 2019 DeepMind Technologies Limited and Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
deepmind-research-master
scratchgan/__init__.py
# Lint as: python3 # Copyright 2019 DeepMind Technologies Limited and Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Generators for text data.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl import logging import sonnet as snt import tensorflow.compat.v1 as tf import tensorflow_probability as tfp from scratchgan import utils class LSTMGen(snt.AbstractModule): """A multi-layer LSTM language model. Uses tied input/output embedding weights. """ def __init__(self, vocab_size, feature_sizes, max_sequence_length, batch_size, use_layer_norm, trainable_embedding_size, input_dropout, output_dropout, pad_token, embedding_source=None, vocab_file=None, name='lstm_gen'): super(LSTMGen, self).__init__(name=name) self._feature_sizes = feature_sizes self._max_sequence_length = max_sequence_length self._vocab_size = vocab_size self._batch_size = batch_size self._use_layer_norm = use_layer_norm self._trainable_embedding_size = trainable_embedding_size self._embedding_source = embedding_source self._vocab_file = vocab_file self._input_dropout = input_dropout self._output_dropout = output_dropout self._pad_token = pad_token if self._embedding_source: assert vocab_file def _build(self, is_training=True, temperature=1.0): input_keep_prob = (1. - self._input_dropout) if is_training else 1.0 output_keep_prob = (1. - self._output_dropout) if is_training else 1.0 batch_size = self._batch_size max_sequence_length = self._max_sequence_length if self._embedding_source: all_embeddings = utils.make_partially_trainable_embeddings( self._vocab_file, self._embedding_source, self._vocab_size, self._trainable_embedding_size) else: all_embeddings = tf.get_variable( 'trainable_embeddings', shape=[self._vocab_size, self._trainable_embedding_size], trainable=True) _, self._embedding_size = all_embeddings.shape.as_list() input_embeddings = tf.nn.dropout(all_embeddings, keep_prob=input_keep_prob) output_embeddings = tf.nn.dropout( all_embeddings, keep_prob=output_keep_prob) out_bias = tf.get_variable( 'out_bias', shape=[1, self._vocab_size], dtype=tf.float32) in_proj = tf.get_variable( 'in_proj', shape=[self._embedding_size, self._feature_sizes[0]]) # If more than 1 layer, then output has dim sum(self._feature_sizes), # which is different from input dim == self._feature_sizes[0] # So we need a different projection matrix for input and output. if len(self._feature_sizes) > 1: out_proj = tf.get_variable( 'out_proj', shape=[self._embedding_size, sum(self._feature_sizes)]) else: out_proj = in_proj encoder_cells = [] for feature_size in self._feature_sizes: encoder_cells += [ snt.LSTM(feature_size, use_layer_norm=self._use_layer_norm) ] encoder_cell = snt.DeepRNN(encoder_cells) state = encoder_cell.initial_state(batch_size) # Manual unrolling. samples_list, logits_list, logprobs_list, embeddings_list = [], [], [], [] sample = tf.tile( tf.constant(self._pad_token, dtype=tf.int32)[None], [batch_size]) logging.info('Unrolling over %d steps.', max_sequence_length) for _ in range(max_sequence_length): # Input is sampled word at t-1. embedding = tf.nn.embedding_lookup(input_embeddings, sample) embedding.shape.assert_is_compatible_with( [batch_size, self._embedding_size]) embedding_proj = tf.matmul(embedding, in_proj) embedding_proj.shape.assert_is_compatible_with( [batch_size, self._feature_sizes[0]]) outputs, state = encoder_cell(embedding_proj, state) outputs_proj = tf.matmul(outputs, out_proj, transpose_b=True) logits = tf.matmul( outputs_proj, output_embeddings, transpose_b=True) + out_bias categorical = tfp.distributions.Categorical(logits=logits/temperature) sample = categorical.sample() logprobs = categorical.log_prob(sample) samples_list.append(sample) logits_list.append(logits) logprobs_list.append(logprobs) embeddings_list.append(embedding) # Create an op to retrieve embeddings for full sequence, useful for testing. embeddings = tf.stack( # pylint: disable=unused-variable embeddings_list, axis=1, name='embeddings') sequence = tf.stack(samples_list, axis=1) logprobs = tf.stack(logprobs_list, axis=1) # The sequence stops after the first occurrence of a PAD token. sequence_length = utils.get_first_occurrence_indices( sequence, self._pad_token) mask = utils.get_mask_past_symbol(sequence, self._pad_token) masked_sequence = sequence * tf.cast(mask, tf.int32) masked_logprobs = logprobs * tf.cast(mask, tf.float32) return { 'sequence': masked_sequence, 'sequence_length': sequence_length, 'logprobs': masked_logprobs }
deepmind-research-master
scratchgan/generators.py
# Lint as: python3 # Copyright 2019 DeepMind Technologies Limited and Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utilities for parsing text files.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import json import os from absl import logging import numpy as np from tensorflow.compat.v1.io import gfile # sequences: [N, MAX_TOKENS_SEQUENCE] array of int32 # lengths: [N, 2] array of int32, such that # lengths[i, 0] is the number of non-pad tokens in sequences[i, :] FILENAMES = { "emnlp2017": ("train.json", "valid.json", "test.json"), } # EMNLP2017 sentences have max length 50, add one for a PAD token so that all # sentences end with PAD. MAX_TOKENS_SEQUENCE = {"emnlp2017": 52} UNK = "<unk>" PAD = " " PAD_INT = 0 def tokenize(sentence): """Split a string into words.""" return sentence.split(" ") + [PAD] def _build_vocab(json_data): """Builds full vocab from json data.""" vocab = collections.Counter() for sentence in json_data: tokens = tokenize(sentence["s"]) vocab.update(tokens) for title in sentence["t"]: title_tokens = tokenize(title) vocab.update(title_tokens) # Most common words first. count_pairs = sorted(list(vocab.items()), key=lambda x: (-x[1], x[0])) words, _ = list(zip(*count_pairs)) words = list(words) if UNK not in words: words = [UNK] + words word_to_id = dict(list(zip(words, list(range(len(words)))))) # Tokens are now sorted by frequency. There's no guarantee that `PAD` will # end up at `PAD_INT` index. Enforce it by swapping whatever token is # currently at the `PAD_INT` index with the `PAD` token. word = list(word_to_id.keys())[list(word_to_id.values()).index(PAD_INT)] word_to_id[PAD], word_to_id[word] = word_to_id[word], word_to_id[PAD] assert word_to_id[PAD] == PAD_INT return word_to_id def string_sequence_to_sequence(string_sequence, word_to_id): result = [] for word in string_sequence: if word in word_to_id: result.append(word_to_id[word]) else: result.append(word_to_id[UNK]) return result def _integerize(json_data, word_to_id, dataset): """Transform words into integers.""" sequences = np.full((len(json_data), MAX_TOKENS_SEQUENCE[dataset]), word_to_id[PAD], np.int32) sequence_lengths = np.zeros(shape=(len(json_data)), dtype=np.int32) for i, sentence in enumerate(json_data): sequence_i = string_sequence_to_sequence( tokenize(sentence["s"]), word_to_id) sequence_lengths[i] = len(sequence_i) sequences[i, :sequence_lengths[i]] = np.array(sequence_i) return { "sequences": sequences, "sequence_lengths": sequence_lengths, } def get_raw_data(data_path, dataset, truncate_vocab=20000): """Load raw data from data directory "data_path". Reads text files, converts strings to integer ids, and performs mini-batching of the inputs. Args: data_path: string path to the directory where simple-examples.tgz has been extracted. dataset: one of ["emnlp2017"] truncate_vocab: int, number of words to keep in the vocabulary. Returns: tuple (train_data, valid_data, vocabulary) where each of the data objects can be passed to iterator. Raises: ValueError: dataset not in ["emnlp2017"]. """ if dataset not in FILENAMES: raise ValueError("Invalid dataset {}. Valid datasets: {}".format( dataset, list(FILENAMES.keys()))) train_file, valid_file, _ = FILENAMES[dataset] train_path = os.path.join(data_path, train_file) valid_path = os.path.join(data_path, valid_file) with gfile.GFile(train_path, "r") as json_file: json_data_train = json.load(json_file) with gfile.GFile(valid_path, "r") as json_file: json_data_valid = json.load(json_file) word_to_id = _build_vocab(json_data_train) logging.info("Full vocab length: %d", len(word_to_id)) # Assume the vocab is sorted by frequency. word_to_id_truncated = { k: v for k, v in word_to_id.items() if v < truncate_vocab } logging.info("Truncated vocab length: %d", len(word_to_id_truncated)) train_data = _integerize(json_data_train, word_to_id_truncated, dataset) valid_data = _integerize(json_data_valid, word_to_id_truncated, dataset) return train_data, valid_data, word_to_id_truncated def iterator(raw_data, batch_size, random=False): """Looping iterators on the raw data.""" sequences = raw_data["sequences"] sequence_lengths = raw_data["sequence_lengths"] num_examples = sequences.shape[0] indice_range = np.arange(num_examples) if random: while True: indices = np.random.choice(indice_range, size=batch_size, replace=True) yield { "sequence": sequences[indices, :], "sequence_length": sequence_lengths[indices], } else: start = 0 while True: sequence = sequences[start:(start + batch_size), :] sequence_length = sequence_lengths[start:(start + batch_size)] start += batch_size if start + batch_size > num_examples: start = (start + batch_size) % num_examples yield { "sequence": sequence, "sequence_length": sequence_length, }
deepmind-research-master
scratchgan/reader.py
# Lint as: python3 # Copyright 2019 DeepMind Technologies Limited and Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Training script for ScratchGAN.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import time from absl import app from absl import flags from absl import logging import numpy as np import tensorflow.compat.v1 as tf from tensorflow.compat.v1.io import gfile from scratchgan import discriminator_nets from scratchgan import eval_metrics from scratchgan import generators from scratchgan import losses from scratchgan import reader from scratchgan import utils flags.DEFINE_string("dataset", "emnlp2017", "Dataset.") flags.DEFINE_integer("batch_size", 512, "Batch size") flags.DEFINE_string("gen_type", "lstm", "Generator type.") flags.DEFINE_string("disc_type", "lstm", "Discriminator type.") flags.DEFINE_string("disc_loss_type", "ce", "Loss type.") flags.DEFINE_integer("gen_feature_size", 512, "Generator feature size.") flags.DEFINE_integer("disc_feature_size", 512, "Discriminator feature size.") flags.DEFINE_integer("num_layers_gen", 2, "Number of generator layers.") flags.DEFINE_integer("num_layers_disc", 1, "Number of discriminator layers.") flags.DEFINE_bool("layer_norm_gen", False, "Layer norm generator.") flags.DEFINE_bool("layer_norm_disc", True, "Layer norm discriminator.") flags.DEFINE_float("gen_input_dropout", 0.0, "Input dropout generator.") flags.DEFINE_float("gen_output_dropout", 0.0, "Input dropout discriminator.") flags.DEFINE_float("l2_gen", 0.0, "L2 regularization generator.") flags.DEFINE_float("l2_disc", 1e-6, "L2 regularization discriminator.") flags.DEFINE_float("disc_dropout", 0.1, "Dropout discriminator") flags.DEFINE_integer("trainable_embedding_size", 64, "Size of trainable embedding.") flags.DEFINE_bool("use_pretrained_embedding", True, "Use pretrained embedding.") flags.DEFINE_integer("num_steps", int(200 * 1000), "Number of training steps.") flags.DEFINE_integer("num_disc_updates", 1, "Number of discriminator updates.") flags.DEFINE_integer("num_gen_updates", 1, "Number of generator updates.") flags.DEFINE_string("data_dir", "/tmp/emnlp2017", "Directory where data is.") flags.DEFINE_float("gen_lr", 9.59e-5, "Learning rate generator.") flags.DEFINE_float("disc_lr", 9.38e-3, "Learning rate discriminator.") flags.DEFINE_float("gen_beta1", 0.5, "Beta1 for generator.") flags.DEFINE_float("disc_beta1", 0.5, "Beta1 for discriminator.") flags.DEFINE_float("gamma", 0.23, "Discount factor.") flags.DEFINE_float("baseline_decay", 0.08, "Baseline decay rate.") flags.DEFINE_string("mode", "train", "train or evaluate_pair.") flags.DEFINE_string("checkpoint_dir", "/tmp/emnlp2017/checkpoints/", "Directory for checkpoints.") flags.DEFINE_integer("export_every", 1000, "Frequency of checkpoint exports.") flags.DEFINE_integer("num_examples_for_eval", int(1e4), "Number of examples for evaluation") EVALUATOR_SLEEP_PERIOD = 60 # Seconds evaluator sleeps if nothing to do. def main(_): config = flags.FLAGS gfile.makedirs(config.checkpoint_dir) if config.mode == "train": train(config) elif config.mode == "evaluate_pair": while True: checkpoint_path = utils.maybe_pick_models_to_evaluate( checkpoint_dir=config.checkpoint_dir) if checkpoint_path: evaluate_pair( config=config, batch_size=config.batch_size, checkpoint_path=checkpoint_path, data_dir=config.data_dir, dataset=config.dataset, num_examples_for_eval=config.num_examples_for_eval) else: logging.info("No models to evaluate found, sleeping for %d seconds", EVALUATOR_SLEEP_PERIOD) time.sleep(EVALUATOR_SLEEP_PERIOD) else: raise Exception( "Unexpected mode %s, supported modes are \"train\" or \"evaluate_pair\"" % (config.mode)) def train(config): """Train.""" logging.info("Training.") tf.reset_default_graph() np.set_printoptions(precision=4) # Get data. raw_data = reader.get_raw_data( data_path=config.data_dir, dataset=config.dataset) train_data, valid_data, word_to_id = raw_data id_to_word = {v: k for k, v in word_to_id.items()} vocab_size = len(word_to_id) max_length = reader.MAX_TOKENS_SEQUENCE[config.dataset] logging.info("Vocabulary size: %d", vocab_size) iterator = reader.iterator(raw_data=train_data, batch_size=config.batch_size) iterator_valid = reader.iterator( raw_data=valid_data, batch_size=config.batch_size) real_sequence = tf.placeholder( dtype=tf.int32, shape=[config.batch_size, max_length], name="real_sequence") real_sequence_length = tf.placeholder( dtype=tf.int32, shape=[config.batch_size], name="real_sequence_length") first_batch_np = next(iterator) valid_batch_np = next(iterator_valid) test_real_batch = {k: tf.constant(v) for k, v in first_batch_np.items()} test_fake_batch = { "sequence": tf.constant( np.random.choice( vocab_size, size=[config.batch_size, max_length]).astype(np.int32)), "sequence_length": tf.constant( np.random.choice(max_length, size=[config.batch_size]).astype(np.int32)), } valid_batch = {k: tf.constant(v) for k, v in valid_batch_np.items()} # Create generator. if config.use_pretrained_embedding: embedding_source = utils.get_embedding_path(config.data_dir, config.dataset) vocab_file = "/tmp/vocab.txt" with gfile.GFile(vocab_file, "w") as f: for i in range(len(id_to_word)): f.write(id_to_word[i] + "\n") logging.info("Temporary vocab file: %s", vocab_file) else: embedding_source = None vocab_file = None gen = generators.LSTMGen( vocab_size=vocab_size, feature_sizes=[config.gen_feature_size] * config.num_layers_gen, max_sequence_length=reader.MAX_TOKENS_SEQUENCE[config.dataset], batch_size=config.batch_size, use_layer_norm=config.layer_norm_gen, trainable_embedding_size=config.trainable_embedding_size, input_dropout=config.gen_input_dropout, output_dropout=config.gen_output_dropout, pad_token=reader.PAD_INT, embedding_source=embedding_source, vocab_file=vocab_file, ) gen_outputs = gen() # Create discriminator. disc = discriminator_nets.LSTMEmbedDiscNet( vocab_size=vocab_size, feature_sizes=[config.disc_feature_size] * config.num_layers_disc, trainable_embedding_size=config.trainable_embedding_size, embedding_source=embedding_source, use_layer_norm=config.layer_norm_disc, pad_token=reader.PAD_INT, vocab_file=vocab_file, dropout=config.disc_dropout, ) disc_logits_real = disc( sequence=real_sequence, sequence_length=real_sequence_length) disc_logits_fake = disc( sequence=gen_outputs["sequence"], sequence_length=gen_outputs["sequence_length"]) # Loss of the discriminator. if config.disc_loss_type == "ce": targets_real = tf.ones( [config.batch_size, reader.MAX_TOKENS_SEQUENCE[config.dataset]]) targets_fake = tf.zeros( [config.batch_size, reader.MAX_TOKENS_SEQUENCE[config.dataset]]) loss_real = losses.sequential_cross_entropy_loss(disc_logits_real, targets_real) loss_fake = losses.sequential_cross_entropy_loss(disc_logits_fake, targets_fake) disc_loss = 0.5 * loss_real + 0.5 * loss_fake # Loss of the generator. gen_loss, cumulative_rewards, baseline = losses.reinforce_loss( disc_logits=disc_logits_fake, gen_logprobs=gen_outputs["logprobs"], gamma=config.gamma, decay=config.baseline_decay) # Optimizers disc_optimizer = tf.train.AdamOptimizer( learning_rate=config.disc_lr, beta1=config.disc_beta1) gen_optimizer = tf.train.AdamOptimizer( learning_rate=config.gen_lr, beta1=config.gen_beta1) # Get losses and variables. disc_vars = disc.get_all_variables() gen_vars = gen.get_all_variables() l2_disc = tf.reduce_sum(tf.add_n([tf.nn.l2_loss(v) for v in disc_vars])) l2_gen = tf.reduce_sum(tf.add_n([tf.nn.l2_loss(v) for v in gen_vars])) scalar_disc_loss = tf.reduce_mean(disc_loss) + config.l2_disc * l2_disc scalar_gen_loss = tf.reduce_mean(gen_loss) + config.l2_gen * l2_gen # Update ops. global_step = tf.train.get_or_create_global_step() disc_update = disc_optimizer.minimize( scalar_disc_loss, var_list=disc_vars, global_step=global_step) gen_update = gen_optimizer.minimize( scalar_gen_loss, var_list=gen_vars, global_step=global_step) # Saver. saver = tf.train.Saver() # Metrics test_disc_logits_real = disc(**test_real_batch) test_disc_logits_fake = disc(**test_fake_batch) valid_disc_logits = disc(**valid_batch) disc_predictions_real = tf.nn.sigmoid(disc_logits_real) disc_predictions_fake = tf.nn.sigmoid(disc_logits_fake) valid_disc_predictions = tf.reduce_mean( tf.nn.sigmoid(valid_disc_logits), axis=0) test_disc_predictions_real = tf.reduce_mean( tf.nn.sigmoid(test_disc_logits_real), axis=0) test_disc_predictions_fake = tf.reduce_mean( tf.nn.sigmoid(test_disc_logits_fake), axis=0) # Only log results for the first element of the batch. metrics = { "scalar_gen_loss": scalar_gen_loss, "scalar_disc_loss": scalar_disc_loss, "disc_predictions_real": tf.reduce_mean(disc_predictions_real), "disc_predictions_fake": tf.reduce_mean(disc_predictions_fake), "test_disc_predictions_real": tf.reduce_mean(test_disc_predictions_real), "test_disc_predictions_fake": tf.reduce_mean(test_disc_predictions_fake), "valid_disc_predictions": tf.reduce_mean(valid_disc_predictions), "cumulative_rewards": tf.reduce_mean(cumulative_rewards), "baseline": tf.reduce_mean(baseline), } # Training. logging.info("Starting training") with tf.Session() as sess: sess.run(tf.global_variables_initializer()) latest_ckpt = tf.train.latest_checkpoint(config.checkpoint_dir) if latest_ckpt: saver.restore(sess, latest_ckpt) for step in range(config.num_steps): real_data_np = next(iterator) train_feed = { real_sequence: real_data_np["sequence"], real_sequence_length: real_data_np["sequence_length"], } # Update generator and discriminator. for _ in range(config.num_disc_updates): sess.run(disc_update, feed_dict=train_feed) for _ in range(config.num_gen_updates): sess.run(gen_update, feed_dict=train_feed) # Reporting if step % config.export_every == 0: gen_sequence_np, metrics_np = sess.run( [gen_outputs["sequence"], metrics], feed_dict=train_feed) metrics_np["gen_sentence"] = utils.sequence_to_sentence( gen_sequence_np[0, :], id_to_word) saver.save( sess, save_path=config.checkpoint_dir + "scratchgan", global_step=global_step) metrics_np["model_path"] = tf.train.latest_checkpoint( config.checkpoint_dir) logging.info(metrics_np) # After training, export models. saver.save( sess, save_path=config.checkpoint_dir + "scratchgan", global_step=global_step) logging.info("Saved final model at %s.", tf.train.latest_checkpoint(config.checkpoint_dir)) def evaluate_pair(config, batch_size, checkpoint_path, data_dir, dataset, num_examples_for_eval): """Evaluates a pair generator discriminator. This function loads a discriminator from disk, a generator, and evaluates the discriminator against the generator. It returns the mean probability of the discriminator against several batches, and the FID of the generator against the validation data. It also writes evaluation samples to disk. Args: config: dict, the config file. batch_size: int, size of the batch. checkpoint_path: string, full path to the TF checkpoint on disk. data_dir: string, path to a directory containing the dataset. dataset: string, "emnlp2017", to select the right dataset. num_examples_for_eval: int, number of examples for evaluation. """ tf.reset_default_graph() logging.info("Evaluating checkpoint %s.", checkpoint_path) # Build graph. train_data, valid_data, word_to_id = reader.get_raw_data( data_dir, dataset=dataset) id_to_word = {v: k for k, v in word_to_id.items()} vocab_size = len(word_to_id) train_iterator = reader.iterator(raw_data=train_data, batch_size=batch_size) valid_iterator = reader.iterator(raw_data=valid_data, batch_size=batch_size) train_sequence = tf.placeholder( dtype=tf.int32, shape=[batch_size, reader.MAX_TOKENS_SEQUENCE[dataset]], name="train_sequence") train_sequence_length = tf.placeholder( dtype=tf.int32, shape=[batch_size], name="train_sequence_length") valid_sequence = tf.placeholder( dtype=tf.int32, shape=[batch_size, reader.MAX_TOKENS_SEQUENCE[dataset]], name="valid_sequence") valid_sequence_length = tf.placeholder( dtype=tf.int32, shape=[batch_size], name="valid_sequence_length") disc_inputs_train = { "sequence": train_sequence, "sequence_length": train_sequence_length, } disc_inputs_valid = { "sequence": valid_sequence, "sequence_length": valid_sequence_length, } if config.use_pretrained_embedding: embedding_source = utils.get_embedding_path(config.data_dir, config.dataset) vocab_file = "/tmp/vocab.txt" with gfile.GFile(vocab_file, "w") as f: for i in range(len(id_to_word)): f.write(id_to_word[i] + "\n") logging.info("Temporary vocab file: %s", vocab_file) else: embedding_source = None vocab_file = None gen = generators.LSTMGen( vocab_size=vocab_size, feature_sizes=[config.gen_feature_size] * config.num_layers_gen, max_sequence_length=reader.MAX_TOKENS_SEQUENCE[config.dataset], batch_size=config.batch_size, use_layer_norm=config.layer_norm_gen, trainable_embedding_size=config.trainable_embedding_size, input_dropout=config.gen_input_dropout, output_dropout=config.gen_output_dropout, pad_token=reader.PAD_INT, embedding_source=embedding_source, vocab_file=vocab_file, ) gen_outputs = gen() disc = discriminator_nets.LSTMEmbedDiscNet( vocab_size=vocab_size, feature_sizes=[config.disc_feature_size] * config.num_layers_disc, trainable_embedding_size=config.trainable_embedding_size, embedding_source=embedding_source, use_layer_norm=config.layer_norm_disc, pad_token=reader.PAD_INT, vocab_file=vocab_file, dropout=config.disc_dropout, ) disc_inputs = { "sequence": gen_outputs["sequence"], "sequence_length": gen_outputs["sequence_length"], } gen_logits = disc(**disc_inputs) train_logits = disc(**disc_inputs_train) valid_logits = disc(**disc_inputs_valid) # Saver. saver = tf.train.Saver() # Reduce over time and batch. train_probs = tf.reduce_mean(tf.nn.sigmoid(train_logits)) valid_probs = tf.reduce_mean(tf.nn.sigmoid(valid_logits)) gen_probs = tf.reduce_mean(tf.nn.sigmoid(gen_logits)) outputs = { "train_probs": train_probs, "valid_probs": valid_probs, "gen_probs": gen_probs, "gen_sequences": gen_outputs["sequence"], "valid_sequences": valid_sequence } # Get average discriminator score and store generated sequences. all_valid_sentences = [] all_gen_sentences = [] all_gen_sequences = [] mean_train_prob = 0.0 mean_valid_prob = 0.0 mean_gen_prob = 0.0 logging.info("Graph constructed, generating batches.") num_batches = num_examples_for_eval // batch_size + 1 # Restrict the thread pool size to prevent excessive GCU usage on Borg. tf_config = tf.ConfigProto() tf_config.intra_op_parallelism_threads = 16 tf_config.inter_op_parallelism_threads = 16 with tf.Session(config=tf_config) as sess: # Restore variables from checkpoints. logging.info("Restoring variables.") saver.restore(sess, checkpoint_path) for i in range(num_batches): logging.info("Batch %d / %d", i, num_batches) train_data_np = next(train_iterator) valid_data_np = next(valid_iterator) feed_dict = { train_sequence: train_data_np["sequence"], train_sequence_length: train_data_np["sequence_length"], valid_sequence: valid_data_np["sequence"], valid_sequence_length: valid_data_np["sequence_length"], } outputs_np = sess.run(outputs, feed_dict=feed_dict) all_gen_sequences.extend(outputs_np["gen_sequences"]) gen_sentences = utils.batch_sequences_to_sentences( outputs_np["gen_sequences"], id_to_word) valid_sentences = utils.batch_sequences_to_sentences( outputs_np["valid_sequences"], id_to_word) all_valid_sentences.extend(valid_sentences) all_gen_sentences.extend(gen_sentences) mean_train_prob += outputs_np["train_probs"] / batch_size mean_valid_prob += outputs_np["valid_probs"] / batch_size mean_gen_prob += outputs_np["gen_probs"] / batch_size logging.info("Evaluating FID.") # Compute FID fid = eval_metrics.fid( generated_sentences=all_gen_sentences[:num_examples_for_eval], real_sentences=all_valid_sentences[:num_examples_for_eval]) utils.write_eval_results(config.checkpoint_dir, all_gen_sentences, os.path.basename(checkpoint_path), mean_train_prob, mean_valid_prob, mean_gen_prob, fid) if __name__ == "__main__": app.run(main)
deepmind-research-master
scratchgan/experiment.py
# Lint as: python3 # Copyright 2019 DeepMind Technologies Limited and Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utilities.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import math import os from absl import logging import numpy as np import tensorflow.compat.v1 as tf from tensorflow.compat.v1.io import gfile from scratchgan import reader EVAL_FILENAME = "evaluated_checkpoints.csv" GLOVE_DIM = 300 GLOVE_STD = 0.3836 # Standard dev. of GloVe embeddings. def _get_embedding_initializer(vocab_file, embedding_source, vocab_size): """Loads pretrained embeddings from a file in GloVe format.""" with gfile.GFile(embedding_source, "r") as f: embedding_lines = f.readlines() # First line contains embedding dim. _, embedding_dim = list(map(int, embedding_lines[0].split())) # Get the tokens as strings. tokens = [line.split()[0] for line in embedding_lines[1:]] # Get the actual embedding matrix. unsorted_emb = np.array( [[float(x) for x in line.split()[1:]] for line in embedding_lines[1:]]) # Get the expected vocab order. with gfile.GFile(vocab_file, "r") as f: tokens_order = [l.strip() for l in f.readlines()] assert vocab_size == len(tokens_order) # Put the embeddings in the order. sorted_emb = np.zeros((vocab_size, embedding_dim)) for i, token in enumerate(tokens_order): if token in tokens: sorted_emb[i, :] = unsorted_emb[tokens.index(token), :] else: # If we don't have a pretrained embedding, initialize randomly. sorted_emb[i, :] = np.random.normal( loc=0.0, scale=GLOVE_STD, size=(GLOVE_DIM,)) return sorted_emb.astype(np.float32) def append_position_signal(embeddings, position_dim=8): """Append position signal. See get_position_signal.""" batch_size, sequence_length, embedding_dim = embeddings.get_shape().as_list() positions = get_position_signal(sequence_length, position_dim) # Append to embeddings. position_inputs = tf.tile(positions[None, :, :], [batch_size, 1, 1]) embeddings_pos = tf.concat([embeddings, position_inputs], axis=2) embeddings_pos.shape.assert_is_compatible_with( [batch_size, sequence_length, embedding_dim + position_dim]) return embeddings_pos def get_position_signal(sequence_length, position_dim=8): """Return fixed position signal as sine waves. Sine waves frequencies are linearly spaced so that shortest is 2 and longest is half the maximum length. That way the longest frequency is long enough to be monotonous over the whole sequence length. Sine waves are also shifted so that they don't all start with the same value. We don't use learned positional embeddings because these embeddings are projected linearly along with the original embeddings, and the projection is learned. Args: sequence_length: int, T, length of the sequence.. position_dim: int, P, number of sine waves. Returns: A [T, P] tensor, position embeddings. """ # Compute the frequencies. periods = tf.exp( tf.lin_space( tf.log(2.0), tf.log(tf.to_float(sequence_length)), position_dim)) frequencies = 1.0 / periods # Shape [T, P]. # Compute the sine waves. xs = frequencies[None, :] * tf.to_float(tf.range(sequence_length)[:, None]) shifts = tf.lin_space(0.0, 2.0, position_dim)[None, :] # [1, P] positions = tf.math.cos(math.pi * (xs + shifts)) # [T, P] positions.shape.assert_is_compatible_with([sequence_length, position_dim]) return positions def get_mask_by_length(lengths, max_length): """Returns a mask where x[i , j] = (j < lengths[i]). Args: lengths: [B] tensor of int32 such that 0 <= lengths[i] <= max_length. max_length: scalar tensor of int32. Returns: [B, max_length] tensor of booleans such that x[i, j] is True if and only if j < lengths[i]. """ batch_size = lengths.get_shape().as_list()[0] indices = tf.range(start=0, limit=max_length) all_indices = tf.tile(indices[None, :], [batch_size, 1]) all_lengths = tf.tile(lengths[:, None], [1, max_length]) mask = (all_indices < all_lengths) mask_boolean = tf.cast(mask, tf.bool) return mask_boolean def get_mask_past_symbol(reference, symbol, optimize_for_tpu=False): """For each row, mask is True before and at the first occurrence of symbol.""" batch_size, max_length = reference.get_shape().as_list() symbol = tf.convert_to_tensor(symbol) symbol.shape.assert_is_compatible_with([]) first_indices = get_first_occurrence_indices(reference, symbol, optimize_for_tpu) first_indices.shape.assert_is_compatible_with([batch_size]) keep_lengths = tf.minimum(first_indices, max_length) mask = get_mask_by_length(keep_lengths, max_length) mask.shape.assert_is_compatible_with([batch_size, max_length]) mask.set_shape([batch_size, max_length]) return mask def get_first_occurrence_indices(reference, symbol, optimize_for_tpu=False): """For each row in reference, get index after the first occurrence of symbol. If symbol is not present on a row, return reference.shape[1] instead. Args: reference: [B, T] tensor of elements of the same type as symbol. symbol: int or [] scalar tensor of the same dtype as symbol. optimize_for_tpu: bool, whether to use a TPU-capable variant. Returns: A [B] reference of tf.int32 where x[i] is such that reference[i, x[i]-1] == symbol, and reference[i, j] != symbol for j<i-1. If symbol is not present on row i then x[i] = T. """ if optimize_for_tpu: # Run code which can be compiled on TPU. # Transpose refernce to [T, B] reference = tf.transpose(reference, [1, 0]) range_tensor = tf.range(reference.shape.as_list()[0]) indexes = tf.stack([range_tensor] * reference.shape.as_list()[1], 1) symbol = tf.stack([symbol] * reference.shape.as_list()[1], 0) initial_indices = tf.constant( reference.shape.as_list()[0], shape=[reference.shape.as_list()[1]], dtype=tf.int32) # We want a function which moves backwards. def fn(current_index, elems): ref, ind = elems return tf.where(tf.equal(ref, symbol), ind + 1, current_index) min_indexes = tf.scan( fn, (reference, indexes), initializer=initial_indices, parallel_iterations=1, reverse=True) return min_indexes[0] batch_size, max_length = reference.get_shape().as_list() symbol = tf.convert_to_tensor(symbol) symbol.shape.assert_is_compatible_with([]) # Add symbol at the end of each row, to make sure tf.where works. tensor = tf.concat( [reference, tf.tile(symbol[None, None], [batch_size, 1])], axis=1) index_all_occurrences = tf.where(tf.equal(tensor, symbol)) index_all_occurrences = tf.cast(index_all_occurrences, tf.int32) # `index_all_occurrences` is a [N, 2] tensor with coordinates of all positions # of `symbol` in `tensor`. So N will be >= batch size since there can be # several `symbol` in one row of tensor. We need to take only the position # of the first occurrence for each row. `segment_min` does that, taking the # lowest column index for each row index. index_first_occurrences = tf.segment_min(index_all_occurrences[:, 1], index_all_occurrences[:, 0]) index_first_occurrences.set_shape([batch_size]) index_first_occurrences = tf.minimum(index_first_occurrences + 1, max_length) return index_first_occurrences def sequence_to_sentence(sequence, id_to_word): """Turn a sequence into a sentence , inverse of sentence_to_sequence.""" words = [] for token_index in sequence: if token_index in id_to_word: words.append(id_to_word[token_index]) else: words.append(reader.UNK) return " ".join(words) def batch_sequences_to_sentences(sequences, id_to_word): return [sequence_to_sentence(sequence, id_to_word) for sequence in sequences] def write_eval_results(checkpoint_dir, all_gen_sentences, checkpoint_name, mean_train_prob, mean_valid_prob, mean_gen_prob, fid): """Write evaluation results to disk.""" to_write = ",".join( map(str, [ checkpoint_name, mean_train_prob, mean_valid_prob, mean_gen_prob, fid ])) eval_filepath = os.path.join(checkpoint_dir, EVAL_FILENAME) previous_eval_content = "" if gfile.exists(eval_filepath): with gfile.GFile(eval_filepath, "r") as f: previous_eval_content = f.read() with gfile.GFile(eval_filepath, "w") as f: f.write(previous_eval_content + to_write + "\n") with gfile.GFile( os.path.join(checkpoint_dir, checkpoint_name + "_sentences.txt"), "w") as f: f.write("\n".join(all_gen_sentences)) def maybe_pick_models_to_evaluate(checkpoint_dir): """Pick a checkpoint to evaluate that has not been evaluated already.""" logging.info("Picking checkpoint to evaluate from %s.", checkpoint_dir) filenames = gfile.listdir(checkpoint_dir) filenames = [f[:-5] for f in filenames if f[-5:] == ".meta"] logging.info("Found existing checkpoints: %s", filenames) evaluated_filenames = [] if gfile.exists(os.path.join(checkpoint_dir, EVAL_FILENAME)): with gfile.GFile(os.path.join(checkpoint_dir, EVAL_FILENAME), "r") as f: evaluated_filenames = [l.strip().split(",")[0] for l in f.readlines()] logging.info("Found already evaluated checkpoints: %s", evaluated_filenames) checkpoints_to_evaluate = [ f for f in filenames if f not in evaluated_filenames ] logging.info("Remaining potential checkpoints: %s", checkpoints_to_evaluate) if checkpoints_to_evaluate: return os.path.join(checkpoint_dir, checkpoints_to_evaluate[0]) else: return None def get_embedding_path(data_dir, dataset): """By convention, this is where we store the embedding.""" return os.path.join(data_dir, "glove_%s.txt" % dataset) def make_partially_trainable_embeddings(vocab_file, embedding_source, vocab_size, trainable_embedding_size): """Makes embedding matrix with pretrained GloVe [1] part and trainable part. [1] Pennington, J., Socher, R., & Manning, C. (2014, October). Glove: Global vectors for word representation. In Proceedings of the 2014 conference on empirical methods in natural language processing (EMNLP) (pp. 1532-1543). Args: vocab_file: vocabulary file. embedding_source: path to the actual embeddings. vocab_size: number of words in vocabulary. trainable_embedding_size: size of the trainable part of the embeddings. Returns: A matrix of partially pretrained embeddings. """ # Our embeddings have 2 parts: a pre-trained, frozen, GloVe part, # and a trainable, randomly initialized part. # The standard deviation of the GloVe part is used to initialize # the trainable part, so that both part have roughly the same distribution. # # Let g_ij be the j-th coordinates of the GloVe embedding of the i-th word. # So that 0 < i < |vocab| and 0 < j < 300. # Then sum_ij (g_ij - sum_kl g_kl)^2 = (0.3836)^2 # # In reality g_ij follows a truncated normal distribution # min(max(N(0, s), -4.2), 4.2) but we approximate it by N(0, 0.3836). embedding_initializer = _get_embedding_initializer( vocab_file=vocab_file, embedding_source=embedding_source, vocab_size=vocab_size) pretrained_embedding = tf.get_variable( "pretrained_embedding", initializer=embedding_initializer, dtype=tf.float32) trainable_embedding = tf.get_variable( "trainable_embedding", shape=[vocab_size, trainable_embedding_size], initializer=tf.initializers.random_normal(mean=0.0, stddev=GLOVE_STD)) # We just concatenate embeddings, they will pass through a projection # matrix afterwards. embedding = tf.concat([pretrained_embedding, trainable_embedding], axis=1) return embedding
deepmind-research-master
scratchgan/utils.py
# Lint as: python3 # Copyright 2019 DeepMind Technologies Limited and Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Losses for sequential GANs.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow.compat.v1 as tf def sequential_cross_entropy_loss(logits, expected): """The cross entropy loss for binary classification. Used to train the discriminator when not using WGAN loss. Assume logits is the log probability of classifying as 1. (real). Args: logits: a `tf.Tensor`, the model produced logits, shape [batch_size, sequence_length]. expected: a `tf.Tensor`, the expected output, shape [batch_size, sequence_length]. Returns: A scalar `tf.Tensor`, the average loss obtained on the given inputs. """ batch_size, sequence_length = logits.shape.as_list() expected = tf.cast(expected, tf.float32) ce = tf.nn.sigmoid_cross_entropy_with_logits(labels=expected, logits=logits) return tf.reshape(ce, [batch_size, sequence_length]) def reinforce_loss(disc_logits, gen_logprobs, gamma, decay): """The REINFORCE loss. Args: disc_logits: float tensor, shape [batch_size, sequence_length]. gen_logprobs: float32 tensor, shape [batch_size, sequence_length] gamma: a float, discount factor for cumulative reward. decay: a float, decay rate for the EWMA baseline of REINFORCE. Returns: Float tensor, shape [batch_size, sequence_length], the REINFORCE loss for each timestep. """ # Assume 1 logit for each timestep. batch_size, sequence_length = disc_logits.shape.as_list() gen_logprobs.shape.assert_is_compatible_with([batch_size, sequence_length]) disc_predictions = tf.nn.sigmoid(disc_logits) # MaskGAN uses log(D), but this is more stable empirically. rewards = 2.0 * disc_predictions - 1 # Compute cumulative rewards. rewards_list = tf.unstack(rewards, axis=1) cumulative_rewards = [] for t in range(sequence_length): cum_value = tf.zeros(shape=[batch_size]) for s in range(t, sequence_length): cum_value += np.power(gamma, (s - t)) * rewards_list[s] cumulative_rewards.append(cum_value) cumulative_rewards = tf.stack(cumulative_rewards, axis=1) cumulative_rewards.shape.assert_is_compatible_with( [batch_size, sequence_length]) with tf.variable_scope("reinforce", reuse=tf.AUTO_REUSE): ewma_reward = tf.get_variable("ewma_reward", initializer=0.0) mean_reward = tf.reduce_mean(cumulative_rewards) new_ewma_reward = decay * ewma_reward + (1.0 - decay) * mean_reward update_op = tf.assign(ewma_reward, new_ewma_reward) # REINFORCE with tf.control_dependencies([update_op]): advantage = cumulative_rewards - ewma_reward loss = -tf.stop_gradient(advantage) * gen_logprobs loss.shape.assert_is_compatible_with([batch_size, sequence_length]) return loss, cumulative_rewards, ewma_reward
deepmind-research-master
scratchgan/losses.py
# Copyright 2019 DeepMind Technologies Limited and Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Discriminator networks for text data.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import sonnet as snt import tensorflow.compat.v1 as tf from scratchgan import utils class LSTMEmbedDiscNet(snt.AbstractModule): """An LSTM discriminator that operates on word indexes.""" def __init__(self, feature_sizes, vocab_size, use_layer_norm, trainable_embedding_size, dropout, pad_token, embedding_source=None, vocab_file=None, name='LSTMEmbedDiscNet'): super(LSTMEmbedDiscNet, self).__init__(name=name) self._feature_sizes = feature_sizes self._vocab_size = vocab_size self._use_layer_norm = use_layer_norm self._trainable_embedding_size = trainable_embedding_size self._embedding_source = embedding_source self._vocab_file = vocab_file self._dropout = dropout self._pad_token = pad_token if self._embedding_source: assert vocab_file def _build(self, sequence, sequence_length, is_training=True): """Connect to the graph. Args: sequence: A [batch_size, max_sequence_length] tensor of int. For example the indices of words as sampled by the generator. sequence_length: A [batch_size] tensor of int. Length of the sequence. is_training: Boolean, False to disable dropout. Returns: A [batch_size, max_sequence_length, feature_size] tensor of floats. For each sequence in the batch, the features should (hopefully) allow to distinguish if the value at each timestep is real or generated. """ batch_size, max_sequence_length = sequence.shape.as_list() keep_prob = (1.0 - self._dropout) if is_training else 1.0 if self._embedding_source: all_embeddings = utils.make_partially_trainable_embeddings( self._vocab_file, self._embedding_source, self._vocab_size, self._trainable_embedding_size) else: all_embeddings = tf.get_variable( 'trainable_embedding', shape=[self._vocab_size, self._trainable_embedding_size], trainable=True) _, self._embedding_size = all_embeddings.shape.as_list() input_embeddings = tf.nn.dropout(all_embeddings, keep_prob=keep_prob) embeddings = tf.nn.embedding_lookup(input_embeddings, sequence) embeddings.shape.assert_is_compatible_with( [batch_size, max_sequence_length, self._embedding_size]) position_dim = 8 embeddings_pos = utils.append_position_signal(embeddings, position_dim) embeddings_pos = tf.reshape( embeddings_pos, [batch_size * max_sequence_length, self._embedding_size + position_dim]) lstm_inputs = snt.Linear(self._feature_sizes[0])(embeddings_pos) lstm_inputs = tf.reshape( lstm_inputs, [batch_size, max_sequence_length, self._feature_sizes[0]]) lstm_inputs.shape.assert_is_compatible_with( [batch_size, max_sequence_length, self._feature_sizes[0]]) encoder_cells = [] for feature_size in self._feature_sizes: encoder_cells += [ snt.LSTM(feature_size, use_layer_norm=self._use_layer_norm) ] encoder_cell = snt.DeepRNN(encoder_cells) initial_state = encoder_cell.initial_state(batch_size) hidden_states, _ = tf.nn.dynamic_rnn( cell=encoder_cell, inputs=lstm_inputs, sequence_length=sequence_length, initial_state=initial_state, swap_memory=True) hidden_states.shape.assert_is_compatible_with( [batch_size, max_sequence_length, sum(self._feature_sizes)]) logits = snt.BatchApply(snt.Linear(1))(hidden_states) logits.shape.assert_is_compatible_with([batch_size, max_sequence_length, 1]) logits_flat = tf.reshape(logits, [batch_size, max_sequence_length]) # Mask past first PAD symbol # # Note that we still rely on tf.nn.bidirectional_dynamic_rnn taking # into account the sequence_length properly, because otherwise # the logits at a given timestep will depend on the inputs for all other # timesteps, including the ones that should be masked. mask = utils.get_mask_past_symbol(sequence, self._pad_token) masked_logits_flat = logits_flat * tf.cast(mask, tf.float32) return masked_logits_flat
deepmind-research-master
scratchgan/discriminator_nets.py
# Lint as: python3 # pylint: disable=g-bad-file-header # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Utilities for reading open sourced Learning Complex Physics data.""" import functools import numpy as np import tensorflow.compat.v1 as tf # Create a description of the features. _FEATURE_DESCRIPTION = { 'position': tf.io.VarLenFeature(tf.string), } _FEATURE_DESCRIPTION_WITH_GLOBAL_CONTEXT = _FEATURE_DESCRIPTION.copy() _FEATURE_DESCRIPTION_WITH_GLOBAL_CONTEXT['step_context'] = tf.io.VarLenFeature( tf.string) _FEATURE_DTYPES = { 'position': { 'in': np.float32, 'out': tf.float32 }, 'step_context': { 'in': np.float32, 'out': tf.float32 } } _CONTEXT_FEATURES = { 'key': tf.io.FixedLenFeature([], tf.int64, default_value=0), 'particle_type': tf.io.VarLenFeature(tf.string) } def convert_to_tensor(x, encoded_dtype): if len(x) == 1: out = np.frombuffer(x[0].numpy(), dtype=encoded_dtype) else: out = [] for el in x: out.append(np.frombuffer(el.numpy(), dtype=encoded_dtype)) out = tf.convert_to_tensor(np.array(out)) return out def parse_serialized_simulation_example(example_proto, metadata): """Parses a serialized simulation tf.SequenceExample. Args: example_proto: A string encoding of the tf.SequenceExample proto. metadata: A dict of metadata for the dataset. Returns: context: A dict, with features that do not vary over the trajectory. parsed_features: A dict of tf.Tensors representing the parsed examples across time, where axis zero is the time axis. """ if 'context_mean' in metadata: feature_description = _FEATURE_DESCRIPTION_WITH_GLOBAL_CONTEXT else: feature_description = _FEATURE_DESCRIPTION context, parsed_features = tf.io.parse_single_sequence_example( example_proto, context_features=_CONTEXT_FEATURES, sequence_features=feature_description) for feature_key, item in parsed_features.items(): convert_fn = functools.partial( convert_to_tensor, encoded_dtype=_FEATURE_DTYPES[feature_key]['in']) parsed_features[feature_key] = tf.py_function( convert_fn, inp=[item.values], Tout=_FEATURE_DTYPES[feature_key]['out']) # There is an extra frame at the beginning so we can calculate pos change # for all frames used in the paper. position_shape = [metadata['sequence_length'] + 1, -1, metadata['dim']] # Reshape positions to correct dim: parsed_features['position'] = tf.reshape(parsed_features['position'], position_shape) # Set correct shapes of the remaining tensors. sequence_length = metadata['sequence_length'] + 1 if 'context_mean' in metadata: context_feat_len = len(metadata['context_mean']) parsed_features['step_context'] = tf.reshape( parsed_features['step_context'], [sequence_length, context_feat_len]) # Decode particle type explicitly context['particle_type'] = tf.py_function( functools.partial(convert_fn, encoded_dtype=np.int64), inp=[context['particle_type'].values], Tout=[tf.int64]) context['particle_type'] = tf.reshape(context['particle_type'], [-1]) return context, parsed_features def split_trajectory(context, features, window_length=7): """Splits trajectory into sliding windows.""" # Our strategy is to make sure all the leading dimensions are the same size, # then we can use from_tensor_slices. trajectory_length = features['position'].get_shape().as_list()[0] # We then stack window_length position changes so the final # trajectory length will be - window_length +1 (the 1 to make sure we get # the last split). input_trajectory_length = trajectory_length - window_length + 1 model_input_features = {} # Prepare the context features per step. model_input_features['particle_type'] = tf.tile( tf.expand_dims(context['particle_type'], axis=0), [input_trajectory_length, 1]) if 'step_context' in features: global_stack = [] for idx in range(input_trajectory_length): global_stack.append(features['step_context'][idx:idx + window_length]) model_input_features['step_context'] = tf.stack(global_stack) pos_stack = [] for idx in range(input_trajectory_length): pos_stack.append(features['position'][idx:idx + window_length]) # Get the corresponding positions model_input_features['position'] = tf.stack(pos_stack) return tf.data.Dataset.from_tensor_slices(model_input_features)
deepmind-research-master
learning_to_simulate/reading_utils.py
# Lint as: python3 # pylint: disable=g-bad-file-header # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Simple matplotlib rendering of a rollout prediction against ground truth. Usage (from parent directory): `python -m learning_to_simulate.render_rollout --rollout_path={OUTPUT_PATH}/rollout_test_1.pkl` Where {OUTPUT_PATH} is the output path passed to `train.py` in "eval_rollout" mode. It may require installing Tkinter with `sudo apt-get install python3.7-tk`. """ # pylint: disable=line-too-long import pickle from absl import app from absl import flags from matplotlib import animation import matplotlib.pyplot as plt import numpy as np flags.DEFINE_string("rollout_path", None, help="Path to rollout pickle file") flags.DEFINE_integer("step_stride", 3, help="Stride of steps to skip.") flags.DEFINE_boolean("block_on_show", True, help="For test purposes.") FLAGS = flags.FLAGS TYPE_TO_COLOR = { 3: "black", # Boundary particles. 0: "green", # Rigid solids. 7: "magenta", # Goop. 6: "gold", # Sand. 5: "blue", # Water. } def main(unused_argv): if not FLAGS.rollout_path: raise ValueError("A `rollout_path` must be passed.") with open(FLAGS.rollout_path, "rb") as file: rollout_data = pickle.load(file) fig, axes = plt.subplots(1, 2, figsize=(10, 5)) plot_info = [] for ax_i, (label, rollout_field) in enumerate( [("Ground truth", "ground_truth_rollout"), ("Prediction", "predicted_rollout")]): # Append the initial positions to get the full trajectory. trajectory = np.concatenate([ rollout_data["initial_positions"], rollout_data[rollout_field]], axis=0) ax = axes[ax_i] ax.set_title(label) bounds = rollout_data["metadata"]["bounds"] ax.set_xlim(bounds[0][0], bounds[0][1]) ax.set_ylim(bounds[1][0], bounds[1][1]) ax.set_xticks([]) ax.set_yticks([]) ax.set_aspect(1.) points = { particle_type: ax.plot([], [], "o", ms=2, color=color)[0] for particle_type, color in TYPE_TO_COLOR.items()} plot_info.append((ax, trajectory, points)) num_steps = trajectory.shape[0] def update(step_i): outputs = [] for _, trajectory, points in plot_info: for particle_type, line in points.items(): mask = rollout_data["particle_types"] == particle_type line.set_data(trajectory[step_i, mask, 0], trajectory[step_i, mask, 1]) outputs.append(line) return outputs unused_animation = animation.FuncAnimation( fig, update, frames=np.arange(0, num_steps, FLAGS.step_stride), interval=10) plt.show(block=FLAGS.block_on_show) if __name__ == "__main__": app.run(main)
deepmind-research-master
learning_to_simulate/render_rollout.py
# Lint as: python3 # pylint: disable=g-bad-file-header # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Full model implementation accompanying ICML 2020 submission. "Learning to Simulate Complex Physics with Graph Networks" Alvaro Sanchez-Gonzalez*, Jonathan Godwin*, Tobias Pfaff*, Rex Ying, Jure Leskovec, Peter W. Battaglia https://arxiv.org/abs/2002.09405 """ import graph_nets as gn import sonnet as snt import tensorflow.compat.v1 as tf from learning_to_simulate import connectivity_utils from learning_to_simulate import graph_network STD_EPSILON = 1e-8 class LearnedSimulator(snt.AbstractModule): """Learned simulator from https://arxiv.org/pdf/2002.09405.pdf.""" def __init__( self, num_dimensions, connectivity_radius, graph_network_kwargs, boundaries, normalization_stats, num_particle_types, particle_type_embedding_size, name="LearnedSimulator"): """Inits the model. Args: num_dimensions: Dimensionality of the problem. connectivity_radius: Scalar with the radius of connectivity. graph_network_kwargs: Keyword arguments to pass to the learned part of the graph network `model.EncodeProcessDecode`. boundaries: List of 2-tuples, containing the lower and upper boundaries of the cuboid containing the particles along each dimensions, matching the dimensionality of the problem. normalization_stats: Dictionary with statistics with keys "acceleration" and "velocity", containing a named tuple for each with mean and std fields, matching the dimensionality of the problem. num_particle_types: Number of different particle types. particle_type_embedding_size: Embedding size for the particle type. name: Name of the Sonnet module. """ super().__init__(name=name) self._connectivity_radius = connectivity_radius self._num_particle_types = num_particle_types self._boundaries = boundaries self._normalization_stats = normalization_stats with self._enter_variable_scope(): self._graph_network = graph_network.EncodeProcessDecode( output_size=num_dimensions, **graph_network_kwargs) if self._num_particle_types > 1: self._particle_type_embedding = tf.get_variable( "particle_embedding", [self._num_particle_types, particle_type_embedding_size], trainable=True, use_resource=True) def _build(self, position_sequence, n_particles_per_example, global_context=None, particle_types=None): """Produces a model step, outputting the next position for each particle. Args: position_sequence: Sequence of positions for each node in the batch, with shape [num_particles_in_batch, sequence_length, num_dimensions] n_particles_per_example: Number of particles for each graph in the batch with shape [batch_size] global_context: Tensor of shape [batch_size, context_size], with global context. particle_types: Integer tensor of shape [num_particles_in_batch] with the integer types of the particles, from 0 to `num_particle_types - 1`. If None, we assume all particles are the same type. Returns: Next position with shape [num_particles_in_batch, num_dimensions] for one step into the future from the input sequence. """ input_graphs_tuple = self._encoder_preprocessor( position_sequence, n_particles_per_example, global_context, particle_types) normalized_acceleration = self._graph_network(input_graphs_tuple) next_position = self._decoder_postprocessor( normalized_acceleration, position_sequence) return next_position def _encoder_preprocessor( self, position_sequence, n_node, global_context, particle_types): # Extract important features from the position_sequence. most_recent_position = position_sequence[:, -1] velocity_sequence = time_diff(position_sequence) # Finite-difference. # Get connectivity of the graph. (senders, receivers, n_edge ) = connectivity_utils.compute_connectivity_for_batch_pyfunc( most_recent_position, n_node, self._connectivity_radius) # Collect node features. node_features = [] # Normalized velocity sequence, merging spatial an time axis. velocity_stats = self._normalization_stats["velocity"] normalized_velocity_sequence = ( velocity_sequence - velocity_stats.mean) / velocity_stats.std flat_velocity_sequence = snt.MergeDims(start=1, size=2)( normalized_velocity_sequence) node_features.append(flat_velocity_sequence) # Normalized clipped distances to lower and upper boundaries. # boundaries are an array of shape [num_dimensions, 2], where the second # axis, provides the lower/upper boundaries. boundaries = tf.constant(self._boundaries, dtype=tf.float32) distance_to_lower_boundary = ( most_recent_position - tf.expand_dims(boundaries[:, 0], 0)) distance_to_upper_boundary = ( tf.expand_dims(boundaries[:, 1], 0) - most_recent_position) distance_to_boundaries = tf.concat( [distance_to_lower_boundary, distance_to_upper_boundary], axis=1) normalized_clipped_distance_to_boundaries = tf.clip_by_value( distance_to_boundaries / self._connectivity_radius, -1., 1.) node_features.append(normalized_clipped_distance_to_boundaries) # Particle type. if self._num_particle_types > 1: particle_type_embeddings = tf.nn.embedding_lookup( self._particle_type_embedding, particle_types) node_features.append(particle_type_embeddings) # Collect edge features. edge_features = [] # Relative displacement and distances normalized to radius normalized_relative_displacements = ( tf.gather(most_recent_position, senders) - tf.gather(most_recent_position, receivers)) / self._connectivity_radius edge_features.append(normalized_relative_displacements) normalized_relative_distances = tf.norm( normalized_relative_displacements, axis=-1, keepdims=True) edge_features.append(normalized_relative_distances) # Normalize the global context. if global_context is not None: context_stats = self._normalization_stats["context"] # Context in some datasets are all zero, so add an epsilon for numerical # stability. global_context = (global_context - context_stats.mean) / tf.math.maximum( context_stats.std, STD_EPSILON) return gn.graphs.GraphsTuple( nodes=tf.concat(node_features, axis=-1), edges=tf.concat(edge_features, axis=-1), globals=global_context, # self._graph_net will appending this to nodes. n_node=n_node, n_edge=n_edge, senders=senders, receivers=receivers, ) def _decoder_postprocessor(self, normalized_acceleration, position_sequence): # The model produces the output in normalized space so we apply inverse # normalization. acceleration_stats = self._normalization_stats["acceleration"] acceleration = ( normalized_acceleration * acceleration_stats.std ) + acceleration_stats.mean # Use an Euler integrator to go from acceleration to position, assuming # a dt=1 corresponding to the size of the finite difference. most_recent_position = position_sequence[:, -1] most_recent_velocity = most_recent_position - position_sequence[:, -2] new_velocity = most_recent_velocity + acceleration # * dt = 1 new_position = most_recent_position + new_velocity # * dt = 1 return new_position def get_predicted_and_target_normalized_accelerations( self, next_position, position_sequence_noise, position_sequence, n_particles_per_example, global_context=None, particle_types=None): # pylint: disable=g-doc-args """Produces normalized and predicted acceleration targets. Args: next_position: Tensor of shape [num_particles_in_batch, num_dimensions] with the positions the model should output given the inputs. position_sequence_noise: Tensor of the same shape as `position_sequence` with the noise to apply to each particle. position_sequence, n_node, global_context, particle_types: Inputs to the model as defined by `_build`. Returns: Tensors of shape [num_particles_in_batch, num_dimensions] with the predicted and target normalized accelerations. """ # Add noise to the input position sequence. noisy_position_sequence = position_sequence + position_sequence_noise # Perform the forward pass with the noisy position sequence. input_graphs_tuple = self._encoder_preprocessor( noisy_position_sequence, n_particles_per_example, global_context, particle_types) predicted_normalized_acceleration = self._graph_network(input_graphs_tuple) # Calculate the target acceleration, using an `adjusted_next_position `that # is shifted by the noise in the last input position. next_position_adjusted = next_position + position_sequence_noise[:, -1] target_normalized_acceleration = self._inverse_decoder_postprocessor( next_position_adjusted, noisy_position_sequence) # As a result the inverted Euler update in the `_inverse_decoder` produces: # * A target acceleration that does not explicitly correct for the noise in # the input positions, as the `next_position_adjusted` is different # from the true `next_position`. # * A target acceleration that exactly corrects noise in the input velocity # since the target next velocity calculated by the inverse Euler update # as `next_position_adjusted - noisy_position_sequence[:,-1]` # matches the ground truth next velocity (noise cancels out). return predicted_normalized_acceleration, target_normalized_acceleration def _inverse_decoder_postprocessor(self, next_position, position_sequence): """Inverse of `_decoder_postprocessor`.""" previous_position = position_sequence[:, -1] previous_velocity = previous_position - position_sequence[:, -2] next_velocity = next_position - previous_position acceleration = next_velocity - previous_velocity acceleration_stats = self._normalization_stats["acceleration"] normalized_acceleration = ( acceleration - acceleration_stats.mean) / acceleration_stats.std return normalized_acceleration def time_diff(input_sequence): return input_sequence[:, 1:] - input_sequence[:, :-1]
deepmind-research-master
learning_to_simulate/learned_simulator.py
# Lint as: python3 # pylint: disable=g-bad-file-header # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ # pylint: disable=line-too-long """Training script for https://arxiv.org/pdf/2002.09405.pdf. Example usage (from parent directory): `python -m learning_to_simulate.train --data_path={DATA_PATH} --model_path={MODEL_PATH}` Evaluate model from checkpoint (from parent directory): `python -m learning_to_simulate.train --data_path={DATA_PATH} --model_path={MODEL_PATH} --mode=eval` Produce rollouts (from parent directory): `python -m learning_to_simulate.train --data_path={DATA_PATH} --model_path={MODEL_PATH} --output_path={OUTPUT_PATH} --mode=eval_rollout` """ # pylint: enable=line-too-long import collections import functools import json import os import pickle from absl import app from absl import flags from absl import logging import numpy as np import tensorflow.compat.v1 as tf import tree from learning_to_simulate import learned_simulator from learning_to_simulate import noise_utils from learning_to_simulate import reading_utils flags.DEFINE_enum( 'mode', 'train', ['train', 'eval', 'eval_rollout'], help='Train model, one step evaluation or rollout evaluation.') flags.DEFINE_enum('eval_split', 'test', ['train', 'valid', 'test'], help='Split to use when running evaluation.') flags.DEFINE_string('data_path', None, help='The dataset directory.') flags.DEFINE_integer('batch_size', 2, help='The batch size.') flags.DEFINE_integer('num_steps', int(2e7), help='Number of steps of training.') flags.DEFINE_float('noise_std', 6.7e-4, help='The std deviation of the noise.') flags.DEFINE_string('model_path', None, help=('The path for saving checkpoints of the model. ' 'Defaults to a temporary directory.')) flags.DEFINE_string('output_path', None, help='The path for saving outputs (e.g. rollouts).') FLAGS = flags.FLAGS Stats = collections.namedtuple('Stats', ['mean', 'std']) INPUT_SEQUENCE_LENGTH = 6 # So we can calculate the last 5 velocities. NUM_PARTICLE_TYPES = 9 KINEMATIC_PARTICLE_ID = 3 def get_kinematic_mask(particle_types): """Returns a boolean mask, set to true for kinematic (obstacle) particles.""" return tf.equal(particle_types, KINEMATIC_PARTICLE_ID) def prepare_inputs(tensor_dict): """Prepares a single stack of inputs by calculating inputs and targets. Computes n_particles_per_example, which is a tensor that contains information about how to partition the axis - i.e. which nodes belong to which graph. Adds a batch axis to `n_particles_per_example` and `step_context` so they can later be batched using `batch_concat`. This batch will be the same as if the elements had been batched via stacking. Note that all other tensors have a variable size particle axis, and in this case they will simply be concatenated along that axis. Args: tensor_dict: A dict of tensors containing positions, and step context ( if available). Returns: A tuple of input features and target positions. """ # Position is encoded as [sequence_length, num_particles, dim] but the model # expects [num_particles, sequence_length, dim]. pos = tensor_dict['position'] pos = tf.transpose(pos, perm=[1, 0, 2]) # The target position is the final step of the stack of positions. target_position = pos[:, -1] # Remove the target from the input. tensor_dict['position'] = pos[:, :-1] # Compute the number of particles per example. num_particles = tf.shape(pos)[0] # Add an extra dimension for stacking via concat. tensor_dict['n_particles_per_example'] = num_particles[tf.newaxis] if 'step_context' in tensor_dict: # Take the input global context. We have a stack of global contexts, # and we take the penultimate since the final is the target. tensor_dict['step_context'] = tensor_dict['step_context'][-2] # Add an extra dimension for stacking via concat. tensor_dict['step_context'] = tensor_dict['step_context'][tf.newaxis] return tensor_dict, target_position def prepare_rollout_inputs(context, features): """Prepares an inputs trajectory for rollout.""" out_dict = {**context} # Position is encoded as [sequence_length, num_particles, dim] but the model # expects [num_particles, sequence_length, dim]. pos = tf.transpose(features['position'], [1, 0, 2]) # The target position is the final step of the stack of positions. target_position = pos[:, -1] # Remove the target from the input. out_dict['position'] = pos[:, :-1] # Compute the number of nodes out_dict['n_particles_per_example'] = [tf.shape(pos)[0]] if 'step_context' in features: out_dict['step_context'] = features['step_context'] out_dict['is_trajectory'] = tf.constant([True], tf.bool) return out_dict, target_position def batch_concat(dataset, batch_size): """We implement batching as concatenating on the leading axis.""" # We create a dataset of datasets of length batch_size. windowed_ds = dataset.window(batch_size) # The plan is then to reduce every nested dataset by concatenating. We can # do this using tf.data.Dataset.reduce. This requires an initial state, and # then incrementally reduces by running through the dataset # Get initial state. In this case this will be empty tensors of the # correct shape. initial_state = tree.map_structure( lambda spec: tf.zeros( # pylint: disable=g-long-lambda shape=[0] + spec.shape.as_list()[1:], dtype=spec.dtype), dataset.element_spec) # We run through the nest and concatenate each entry with the previous state. def reduce_window(initial_state, ds): return ds.reduce(initial_state, lambda x, y: tf.concat([x, y], axis=0)) return windowed_ds.map( lambda *x: tree.map_structure(reduce_window, initial_state, x)) def get_input_fn(data_path, batch_size, mode, split): """Gets the learning simulation input function for tf.estimator.Estimator. Args: data_path: the path to the dataset directory. batch_size: the number of graphs in a batch. mode: either 'one_step_train', 'one_step' or 'rollout' split: either 'train', 'valid' or 'test. Returns: The input function for the learning simulation model. """ def input_fn(): """Input function for learning simulation.""" # Loads the metadata of the dataset. metadata = _read_metadata(data_path) # Create a tf.data.Dataset from the TFRecord. ds = tf.data.TFRecordDataset([os.path.join(data_path, f'{split}.tfrecord')]) ds = ds.map(functools.partial( reading_utils.parse_serialized_simulation_example, metadata=metadata)) if mode.startswith('one_step'): # Splits an entire trajectory into chunks of 7 steps. # Previous 5 velocities, current velocity and target. split_with_window = functools.partial( reading_utils.split_trajectory, window_length=INPUT_SEQUENCE_LENGTH + 1) ds = ds.flat_map(split_with_window) # Splits a chunk into input steps and target steps ds = ds.map(prepare_inputs) # If in train mode, repeat dataset forever and shuffle. if mode == 'one_step_train': ds = ds.repeat() ds = ds.shuffle(512) # Custom batching on the leading axis. ds = batch_concat(ds, batch_size) elif mode == 'rollout': # Rollout evaluation only available for batch size 1 assert batch_size == 1 ds = ds.map(prepare_rollout_inputs) else: raise ValueError(f'mode: {mode} not recognized') return ds return input_fn def rollout(simulator, features, num_steps): """Rolls out a trajectory by applying the model in sequence.""" initial_positions = features['position'][:, 0:INPUT_SEQUENCE_LENGTH] ground_truth_positions = features['position'][:, INPUT_SEQUENCE_LENGTH:] global_context = features.get('step_context') def step_fn(step, current_positions, predictions): if global_context is None: global_context_step = None else: global_context_step = global_context[ step + INPUT_SEQUENCE_LENGTH - 1][tf.newaxis] next_position = simulator( current_positions, n_particles_per_example=features['n_particles_per_example'], particle_types=features['particle_type'], global_context=global_context_step) # Update kinematic particles from prescribed trajectory. kinematic_mask = get_kinematic_mask(features['particle_type']) next_position_ground_truth = ground_truth_positions[:, step] next_position = tf.where(kinematic_mask, next_position_ground_truth, next_position) updated_predictions = predictions.write(step, next_position) # Shift `current_positions`, removing the oldest position in the sequence # and appending the next position at the end. next_positions = tf.concat([current_positions[:, 1:], next_position[:, tf.newaxis]], axis=1) return (step + 1, next_positions, updated_predictions) predictions = tf.TensorArray(size=num_steps, dtype=tf.float32) _, _, predictions = tf.while_loop( cond=lambda step, state, prediction: tf.less(step, num_steps), body=step_fn, loop_vars=(0, initial_positions, predictions), back_prop=False, parallel_iterations=1) output_dict = { 'initial_positions': tf.transpose(initial_positions, [1, 0, 2]), 'predicted_rollout': predictions.stack(), 'ground_truth_rollout': tf.transpose(ground_truth_positions, [1, 0, 2]), 'particle_types': features['particle_type'], } if global_context is not None: output_dict['global_context'] = global_context return output_dict def _combine_std(std_x, std_y): return np.sqrt(std_x**2 + std_y**2) def _get_simulator(model_kwargs, metadata, acc_noise_std, vel_noise_std): """Instantiates the simulator.""" # Cast statistics to numpy so they are arrays when entering the model. cast = lambda v: np.array(v, dtype=np.float32) acceleration_stats = Stats( cast(metadata['acc_mean']), _combine_std(cast(metadata['acc_std']), acc_noise_std)) velocity_stats = Stats( cast(metadata['vel_mean']), _combine_std(cast(metadata['vel_std']), vel_noise_std)) normalization_stats = {'acceleration': acceleration_stats, 'velocity': velocity_stats} if 'context_mean' in metadata: context_stats = Stats( cast(metadata['context_mean']), cast(metadata['context_std'])) normalization_stats['context'] = context_stats simulator = learned_simulator.LearnedSimulator( num_dimensions=metadata['dim'], connectivity_radius=metadata['default_connectivity_radius'], graph_network_kwargs=model_kwargs, boundaries=metadata['bounds'], num_particle_types=NUM_PARTICLE_TYPES, normalization_stats=normalization_stats, particle_type_embedding_size=16) return simulator def get_one_step_estimator_fn(data_path, noise_std, latent_size=128, hidden_size=128, hidden_layers=2, message_passing_steps=10): """Gets one step model for training simulation.""" metadata = _read_metadata(data_path) model_kwargs = dict( latent_size=latent_size, mlp_hidden_size=hidden_size, mlp_num_hidden_layers=hidden_layers, num_message_passing_steps=message_passing_steps) def estimator_fn(features, labels, mode): target_next_position = labels simulator = _get_simulator(model_kwargs, metadata, vel_noise_std=noise_std, acc_noise_std=noise_std) # Sample the noise to add to the inputs to the model during training. sampled_noise = noise_utils.get_random_walk_noise_for_position_sequence( features['position'], noise_std_last_step=noise_std) non_kinematic_mask = tf.logical_not( get_kinematic_mask(features['particle_type'])) noise_mask = tf.cast( non_kinematic_mask, sampled_noise.dtype)[:, tf.newaxis, tf.newaxis] sampled_noise *= noise_mask # Get the predictions and target accelerations. pred_target = simulator.get_predicted_and_target_normalized_accelerations( next_position=target_next_position, position_sequence=features['position'], position_sequence_noise=sampled_noise, n_particles_per_example=features['n_particles_per_example'], particle_types=features['particle_type'], global_context=features.get('step_context')) pred_acceleration, target_acceleration = pred_target # Calculate the loss and mask out loss on kinematic particles/ loss = (pred_acceleration - target_acceleration)**2 num_non_kinematic = tf.reduce_sum( tf.cast(non_kinematic_mask, tf.float32)) loss = tf.where(non_kinematic_mask, loss, tf.zeros_like(loss)) loss = tf.reduce_sum(loss) / tf.reduce_sum(num_non_kinematic) global_step = tf.train.get_global_step() # Set learning rate to decay from 1e-4 to 1e-6 exponentially. min_lr = 1e-6 lr = tf.train.exponential_decay(learning_rate=1e-4 - min_lr, global_step=global_step, decay_steps=int(5e6), decay_rate=0.1) + min_lr opt = tf.train.AdamOptimizer(learning_rate=lr) train_op = opt.minimize(loss, global_step) # Calculate next position and add some additional eval metrics (only eval). predicted_next_position = simulator( position_sequence=features['position'], n_particles_per_example=features['n_particles_per_example'], particle_types=features['particle_type'], global_context=features.get('step_context')) predictions = {'predicted_next_position': predicted_next_position} eval_metrics_ops = { 'loss_mse': tf.metrics.mean_squared_error( pred_acceleration, target_acceleration), 'one_step_position_mse': tf.metrics.mean_squared_error( predicted_next_position, target_next_position) } return tf.estimator.EstimatorSpec( mode=mode, train_op=train_op, loss=loss, predictions=predictions, eval_metric_ops=eval_metrics_ops) return estimator_fn def get_rollout_estimator_fn(data_path, noise_std, latent_size=128, hidden_size=128, hidden_layers=2, message_passing_steps=10): """Gets the model function for tf.estimator.Estimator.""" metadata = _read_metadata(data_path) model_kwargs = dict( latent_size=latent_size, mlp_hidden_size=hidden_size, mlp_num_hidden_layers=hidden_layers, num_message_passing_steps=message_passing_steps) def estimator_fn(features, labels, mode): del labels # Labels to conform to estimator spec. simulator = _get_simulator(model_kwargs, metadata, acc_noise_std=noise_std, vel_noise_std=noise_std) num_steps = metadata['sequence_length'] - INPUT_SEQUENCE_LENGTH rollout_op = rollout(simulator, features, num_steps=num_steps) squared_error = (rollout_op['predicted_rollout'] - rollout_op['ground_truth_rollout']) ** 2 loss = tf.reduce_mean(squared_error) eval_ops = {'rollout_error_mse': tf.metrics.mean_squared_error( rollout_op['predicted_rollout'], rollout_op['ground_truth_rollout'])} # Add a leading axis, since Estimator's predict method insists that all # tensors have a shared leading batch axis fo the same dims. rollout_op = tree.map_structure(lambda x: x[tf.newaxis], rollout_op) return tf.estimator.EstimatorSpec( mode=mode, train_op=None, loss=loss, predictions=rollout_op, eval_metric_ops=eval_ops) return estimator_fn def _read_metadata(data_path): with open(os.path.join(data_path, 'metadata.json'), 'rt') as fp: return json.loads(fp.read()) def main(_): """Train or evaluates the model.""" if FLAGS.mode in ['train', 'eval']: estimator = tf.estimator.Estimator( get_one_step_estimator_fn(FLAGS.data_path, FLAGS.noise_std), model_dir=FLAGS.model_path) if FLAGS.mode == 'train': # Train all the way through. estimator.train( input_fn=get_input_fn(FLAGS.data_path, FLAGS.batch_size, mode='one_step_train', split='train'), max_steps=FLAGS.num_steps) else: # One-step evaluation from checkpoint. eval_metrics = estimator.evaluate(input_fn=get_input_fn( FLAGS.data_path, FLAGS.batch_size, mode='one_step', split=FLAGS.eval_split)) logging.info('Evaluation metrics:') logging.info(eval_metrics) elif FLAGS.mode == 'eval_rollout': if not FLAGS.output_path: raise ValueError('A rollout path must be provided.') rollout_estimator = tf.estimator.Estimator( get_rollout_estimator_fn(FLAGS.data_path, FLAGS.noise_std), model_dir=FLAGS.model_path) # Iterate through rollouts saving them one by one. metadata = _read_metadata(FLAGS.data_path) rollout_iterator = rollout_estimator.predict( input_fn=get_input_fn(FLAGS.data_path, batch_size=1, mode='rollout', split=FLAGS.eval_split)) for example_index, example_rollout in enumerate(rollout_iterator): example_rollout['metadata'] = metadata filename = f'rollout_{FLAGS.eval_split}_{example_index}.pkl' filename = os.path.join(FLAGS.output_path, filename) logging.info('Saving: %s.', filename) if not os.path.exists(FLAGS.output_path): os.mkdir(FLAGS.output_path) with open(filename, 'wb') as file: pickle.dump(example_rollout, file) if __name__ == '__main__': tf.disable_v2_behavior() app.run(main)
deepmind-research-master
learning_to_simulate/train.py
# Lint as: python3 # pylint: disable=g-bad-file-header # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tools to compute the connectivity of the graph.""" import functools import numpy as np from sklearn import neighbors import tensorflow.compat.v1 as tf def _compute_connectivity(positions, radius, add_self_edges): """Get the indices of connected edges with radius connectivity. Args: positions: Positions of nodes in the graph. Shape: [num_nodes_in_graph, num_dims]. radius: Radius of connectivity. add_self_edges: Whether to include self edges or not. Returns: senders indices [num_edges_in_graph] receiver indices [num_edges_in_graph] """ tree = neighbors.KDTree(positions) receivers_list = tree.query_radius(positions, r=radius) num_nodes = len(positions) senders = np.repeat(range(num_nodes), [len(a) for a in receivers_list]) receivers = np.concatenate(receivers_list, axis=0) if not add_self_edges: # Remove self edges. mask = senders != receivers senders = senders[mask] receivers = receivers[mask] return senders, receivers def _compute_connectivity_for_batch( positions, n_node, radius, add_self_edges): """`compute_connectivity` for a batch of graphs. Args: positions: Positions of nodes in the batch of graphs. Shape: [num_nodes_in_batch, num_dims]. n_node: Number of nodes for each graph in the batch. Shape: [num_graphs in batch]. radius: Radius of connectivity. add_self_edges: Whether to include self edges or not. Returns: senders indices [num_edges_in_batch] receiver indices [num_edges_in_batch] number of edges per graph [num_graphs_in_batch] """ # TODO(alvarosg): Consider if we want to support batches here or not. # Separate the positions corresponding to particles in different graphs. positions_per_graph_list = np.split(positions, np.cumsum(n_node[:-1]), axis=0) receivers_list = [] senders_list = [] n_edge_list = [] num_nodes_in_previous_graphs = 0 # Compute connectivity for each graph in the batch. for positions_graph_i in positions_per_graph_list: senders_graph_i, receivers_graph_i = _compute_connectivity( positions_graph_i, radius, add_self_edges) num_edges_graph_i = len(senders_graph_i) n_edge_list.append(num_edges_graph_i) # Because the inputs will be concatenated, we need to add offsets to the # sender and receiver indices according to the number of nodes in previous # graphs in the same batch. receivers_list.append(receivers_graph_i + num_nodes_in_previous_graphs) senders_list.append(senders_graph_i + num_nodes_in_previous_graphs) num_nodes_graph_i = len(positions_graph_i) num_nodes_in_previous_graphs += num_nodes_graph_i # Concatenate all of the results. senders = np.concatenate(senders_list, axis=0).astype(np.int32) receivers = np.concatenate(receivers_list, axis=0).astype(np.int32) n_edge = np.stack(n_edge_list).astype(np.int32) return senders, receivers, n_edge def compute_connectivity_for_batch_pyfunc( positions, n_node, radius, add_self_edges=True): """`_compute_connectivity_for_batch` wrapped in a pyfunc.""" partial_fn = functools.partial( _compute_connectivity_for_batch, add_self_edges=add_self_edges) senders, receivers, n_edge = tf.py_function( partial_fn, [positions, n_node, radius], [tf.int32, tf.int32, tf.int32]) senders.set_shape([None]) receivers.set_shape([None]) n_edge.set_shape(n_node.get_shape()) return senders, receivers, n_edge
deepmind-research-master
learning_to_simulate/connectivity_utils.py
# Lint as: python3 # pylint: disable=g-bad-file-header # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Methods to calculate input noise.""" import tensorflow.compat.v1 as tf from learning_to_simulate import learned_simulator def get_random_walk_noise_for_position_sequence( position_sequence, noise_std_last_step): """Returns random-walk noise in the velocity applied to the position.""" velocity_sequence = learned_simulator.time_diff(position_sequence) # We want the noise scale in the velocity at the last step to be fixed. # Because we are going to compose noise at each step using a random_walk: # std_last_step**2 = num_velocities * std_each_step**2 # so to keep `std_last_step` fixed, we apply at each step: # std_each_step `std_last_step / np.sqrt(num_input_velocities)` # TODO(alvarosg): Make sure this is consistent with the value and # description provided in the paper. num_velocities = velocity_sequence.shape.as_list()[1] velocity_sequence_noise = tf.random.normal( tf.shape(velocity_sequence), stddev=noise_std_last_step / num_velocities ** 0.5, dtype=position_sequence.dtype) # Apply the random walk. velocity_sequence_noise = tf.cumsum(velocity_sequence_noise, axis=1) # Integrate the noise in the velocity to the positions, assuming # an Euler intergrator and a dt = 1, and adding no noise to the very first # position (since that will only be used to calculate the first position # change). position_sequence_noise = tf.concat([ tf.zeros_like(velocity_sequence_noise[:, 0:1]), tf.cumsum(velocity_sequence_noise, axis=1)], axis=1) return position_sequence_noise
deepmind-research-master
learning_to_simulate/noise_utils.py
# Lint as: python3 # pylint: disable=g-bad-file-header # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Example script accompanying ICML 2020 submission. "Learning to Simulate Complex Physics with Graph Networks" Alvaro Sanchez-Gonzalez*, Jonathan Godwin*, Tobias Pfaff*, Rex Ying, Jure Leskovec, Peter W. Battaglia https://arxiv.org/abs/2002.09405 Here we provide the utility function `sample_random_position_sequence()` which returns a sequence of positions for a variable number of particles, similar to what a real dataset would provide, and connect the model to it, in both, single step inference and training mode. Dependencies include Tensorflow 1.x, Sonnet 1.x and the Graph Nets 1.1 library. """ import collections from learning_to_simulate import learned_simulator from learning_to_simulate import noise_utils import numpy as np import tensorflow.compat.v1 as tf INPUT_SEQUENCE_LENGTH = 6 SEQUENCE_LENGTH = INPUT_SEQUENCE_LENGTH + 1 # add one target position. NUM_DIMENSIONS = 3 NUM_PARTICLE_TYPES = 6 BATCH_SIZE = 5 GLOBAL_CONTEXT_SIZE = 6 Stats = collections.namedtuple("Stats", ["mean", "std"]) DUMMY_STATS = Stats( mean=np.zeros([NUM_DIMENSIONS], dtype=np.float32), std=np.ones([NUM_DIMENSIONS], dtype=np.float32)) DUMMY_CONTEXT_STATS = Stats( mean=np.zeros([GLOBAL_CONTEXT_SIZE], dtype=np.float32), std=np.ones([GLOBAL_CONTEXT_SIZE], dtype=np.float32)) DUMMY_BOUNDARIES = [(-1., 1.)] * NUM_DIMENSIONS def sample_random_position_sequence(): """Returns mock data mimicking the input features collected by the encoder.""" num_particles = tf.random_uniform( shape=(), minval=50, maxval=1000, dtype=tf.int32) position_sequence = tf.random.normal( shape=[num_particles, SEQUENCE_LENGTH, NUM_DIMENSIONS]) return position_sequence def main(): # Build the model. learnable_model = learned_simulator.LearnedSimulator( num_dimensions=NUM_DIMENSIONS, connectivity_radius=0.05, graph_network_kwargs=dict( latent_size=128, mlp_hidden_size=128, mlp_num_hidden_layers=2, num_message_passing_steps=10, ), boundaries=DUMMY_BOUNDARIES, normalization_stats={"acceleration": DUMMY_STATS, "velocity": DUMMY_STATS, "context": DUMMY_CONTEXT_STATS,}, num_particle_types=NUM_PARTICLE_TYPES, particle_type_embedding_size=16, ) # Sample a batch of particle sequences with shape: # [TOTAL_NUM_PARTICLES, SEQUENCE_LENGTH, NUM_DIMENSIONS] sampled_position_sequences = [ sample_random_position_sequence() for _ in range(BATCH_SIZE)] position_sequence_batch = tf.concat(sampled_position_sequences, axis=0) # Count how many particles are present in each element in the batch. # [BATCH_SIZE] n_particles_per_example = tf.stack( [tf.shape(seq)[0] for seq in sampled_position_sequences], axis=0) # Sample particle types. # [TOTAL_NUM_PARTICLES] particle_types = tf.random_uniform( [tf.shape(position_sequence_batch)[0]], 0, NUM_PARTICLE_TYPES, dtype=tf.int32) # Sample global context. global_context = tf.random_uniform( [BATCH_SIZE, GLOBAL_CONTEXT_SIZE], -1., 1., dtype=tf.float32) # Separate input sequence from target sequence. # [TOTAL_NUM_PARTICLES, INPUT_SEQUENCE_LENGTH, NUM_DIMENSIONS] input_position_sequence = position_sequence_batch[:, :-1] # [TOTAL_NUM_PARTICLES, NUM_DIMENSIONS] target_next_position = position_sequence_batch[:, -1] # Single step of inference with the model to predict next position for each # particle [TOTAL_NUM_PARTICLES, NUM_DIMENSIONS]. predicted_next_position = learnable_model( input_position_sequence, n_particles_per_example, global_context, particle_types) print(f"Per-particle output tensor: {predicted_next_position}") # Obtaining predicted and target normalized accelerations for training. position_sequence_noise = ( noise_utils.get_random_walk_noise_for_position_sequence( input_position_sequence, noise_std_last_step=6.7e-4)) # Both with shape [TOTAL_NUM_PARTICLES, NUM_DIMENSIONS] predicted_normalized_acceleration, target_normalized_acceleration = ( learnable_model.get_predicted_and_target_normalized_accelerations( target_next_position, position_sequence_noise, input_position_sequence, n_particles_per_example, global_context, particle_types)) print(f"Predicted norm. acceleration: {predicted_normalized_acceleration}") print(f"Target norm. acceleration: {target_normalized_acceleration}") with tf.train.SingularMonitoredSession() as sess: sess.run([predicted_next_position, predicted_normalized_acceleration, target_normalized_acceleration]) if __name__ == "__main__": tf.disable_v2_behavior() main()
deepmind-research-master
learning_to_simulate/model_demo.py
# Lint as: python3 # pylint: disable=g-bad-file-header # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Graph network implementation accompanying ICML 2020 submission. "Learning to Simulate Complex Physics with Graph Networks" Alvaro Sanchez-Gonzalez*, Jonathan Godwin*, Tobias Pfaff*, Rex Ying, Jure Leskovec, Peter W. Battaglia https://arxiv.org/abs/2002.09405 The Sonnet `EncodeProcessDecode` module provided here implements the learnable parts of the model. It assumes an encoder preprocessor has already built a graph with connectivity and features as described in the paper, with features normalized to zero-mean unit-variance. Dependencies include Tensorflow 1.x, Sonnet 1.x and the Graph Nets 1.1 library. """ from typing import Callable import graph_nets as gn import sonnet as snt import tensorflow as tf Reducer = Callable[[tf.Tensor, tf.Tensor, tf.Tensor], tf.Tensor] def build_mlp( hidden_size: int, num_hidden_layers: int, output_size: int) -> snt.Module: """Builds an MLP.""" return snt.nets.MLP( output_sizes=[hidden_size] * num_hidden_layers + [output_size]) class EncodeProcessDecode(snt.AbstractModule): """Encode-Process-Decode function approximator for learnable simulator.""" def __init__( self, latent_size: int, mlp_hidden_size: int, mlp_num_hidden_layers: int, num_message_passing_steps: int, output_size: int, reducer: Reducer = tf.math.unsorted_segment_sum, name: str = "EncodeProcessDecode"): """Inits the model. Args: latent_size: Size of the node and edge latent representations. mlp_hidden_size: Hidden layer size for all MLPs. mlp_num_hidden_layers: Number of hidden layers in all MLPs. num_message_passing_steps: Number of message passing steps. output_size: Output size of the decode node representations as required by the downstream update function. reducer: Reduction to be used when aggregating the edges in the nodes in the interaction network. This should be a callable whose signature matches tf.math.unsorted_segment_sum. name: Name of the model. """ super().__init__(name=name) self._latent_size = latent_size self._mlp_hidden_size = mlp_hidden_size self._mlp_num_hidden_layers = mlp_num_hidden_layers self._num_message_passing_steps = num_message_passing_steps self._output_size = output_size self._reducer = reducer with self._enter_variable_scope(): self._networks_builder() def _build(self, input_graph: gn.graphs.GraphsTuple) -> tf.Tensor: """Forward pass of the learnable dynamics model.""" # Encode the input_graph. latent_graph_0 = self._encode(input_graph) # Do `m` message passing steps in the latent graphs. latent_graph_m = self._process(latent_graph_0) # Decode from the last latent graph. return self._decode(latent_graph_m) def _networks_builder(self): """Builds the networks.""" def build_mlp_with_layer_norm(): mlp = build_mlp( hidden_size=self._mlp_hidden_size, num_hidden_layers=self._mlp_num_hidden_layers, output_size=self._latent_size) return snt.Sequential([mlp, snt.LayerNorm()]) # The encoder graph network independently encodes edge and node features. encoder_kwargs = dict( edge_model_fn=build_mlp_with_layer_norm, node_model_fn=build_mlp_with_layer_norm) self._encoder_network = gn.modules.GraphIndependent(**encoder_kwargs) # Create `num_message_passing_steps` graph networks with unshared parameters # that update the node and edge latent features. # Note that we can use `modules.InteractionNetwork` because # it also outputs the messages as updated edge latent features. self._processor_networks = [] for _ in range(self._num_message_passing_steps): self._processor_networks.append( gn.modules.InteractionNetwork( edge_model_fn=build_mlp_with_layer_norm, node_model_fn=build_mlp_with_layer_norm, reducer=self._reducer)) # The decoder MLP decodes node latent features into the output size. self._decoder_network = build_mlp( hidden_size=self._mlp_hidden_size, num_hidden_layers=self._mlp_num_hidden_layers, output_size=self._output_size) def _encode( self, input_graph: gn.graphs.GraphsTuple) -> gn.graphs.GraphsTuple: """Encodes the input graph features into a latent graph.""" # Copy the globals to all of the nodes, if applicable. if input_graph.globals is not None: broadcasted_globals = gn.blocks.broadcast_globals_to_nodes(input_graph) input_graph = input_graph.replace( nodes=tf.concat([input_graph.nodes, broadcasted_globals], axis=-1), globals=None) # Encode the node and edge features. latent_graph_0 = self._encoder_network(input_graph) return latent_graph_0 def _process( self, latent_graph_0: gn.graphs.GraphsTuple) -> gn.graphs.GraphsTuple: """Processes the latent graph with several steps of message passing.""" # Do `m` message passing steps in the latent graphs. # (In the shared parameters case, just reuse the same `processor_network`) latent_graph_prev_k = latent_graph_0 latent_graph_k = latent_graph_0 for processor_network_k in self._processor_networks: latent_graph_k = self._process_step( processor_network_k, latent_graph_prev_k) latent_graph_prev_k = latent_graph_k latent_graph_m = latent_graph_k return latent_graph_m def _process_step( self, processor_network_k: snt.Module, latent_graph_prev_k: gn.graphs.GraphsTuple) -> gn.graphs.GraphsTuple: """Single step of message passing with node/edge residual connections.""" # One step of message passing. latent_graph_k = processor_network_k(latent_graph_prev_k) # Add residuals. latent_graph_k = latent_graph_k.replace( nodes=latent_graph_k.nodes+latent_graph_prev_k.nodes, edges=latent_graph_k.edges+latent_graph_prev_k.edges) return latent_graph_k def _decode(self, latent_graph: gn.graphs.GraphsTuple) -> tf.Tensor: """Decodes from the latent graph.""" return self._decoder_network(latent_graph.nodes)
deepmind-research-master
learning_to_simulate/graph_network.py
# Lint as: python3 # Copyright 2020 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Sqlalchemy schema for the metadata db.""" import sqlalchemy from sqlalchemy.ext import declarative Column = sqlalchemy.Column Integer = sqlalchemy.Integer String = sqlalchemy.String LargeBinary = sqlalchemy.LargeBinary ForeignKey = sqlalchemy.ForeignKey # pylint: disable=invalid-name # https://docs.sqlalchemy.org/en/13/orm/tutorial.html Base = declarative.declarative_base() EpisodeTag = sqlalchemy.Table( 'EpisodeTags', Base.metadata, Column( 'EpisodeId', String, ForeignKey('Episodes.EpisodeId'), primary_key=True), Column('Tag', String, ForeignKey('Tags.Name'), primary_key=True)) """Table relating episodes and tags. Attributes: EpisodeId: A string of digits that uniquely identifies the episode. Tag: Human readable tag name. """ class Episode(Base): """Table describing individual episodes. Attributes: EpisodeId: A string of digits that uniquely identifies the episode. TaskId: A human readable name for the task corresponding to the behavior that generated the episode. DataPath: The name of the episode file holding the data for this episode. Timestamp: A unix timestamp recording when the episode was generated. EpisodeType: A string describing the type of policy that generated the episode. Possible values are: - `EPISODE_ROBOT_AGENT`: The behavior policy is a learned or scripted controller. - `EPISODE_ROBOT_TELEOPERATION`: The behavior policy is a human teleoperating the robot. - `EPISODE_ROBOT_DAGGER`: The behavior policy is a mix of controller and human generated actions. Tags: A list of tags attached to this episode. Rewards: A list of `RewardSequence`s containing sketched rewards for this episode. """ __tablename__ = 'Episodes' EpisodeId = Column(String, primary_key=True) TaskId = Column(String) DataPath = Column(String) Timestamp = Column(Integer) EpisodeType = Column(String) Tags = sqlalchemy.orm.relationship( 'Tag', secondary=EpisodeTag, back_populates='Episodes') Rewards = sqlalchemy.orm.relationship( 'RewardSequence', backref='Episode') class Tag(Base): """Table of tags that can be attached to episodes. Attributes: Name: Human readable tag name. Episodes: The epsidoes that have been annotated with this tag. """ __tablename__ = 'Tags' Name = Column(String, primary_key=True) Episodes = sqlalchemy.orm.relationship( 'Episode', secondary=EpisodeTag, back_populates='Tags') class RewardSequence(Base): """Table describing reward sequences for episodes. Attributes: EpisodeId: Foreign key into the `Episodes` table. RewardSequenceId: Distinguishes multiple rewards for the same episode. RewardTaskId: A human readable name of the task for this reward signal. Typically the same as the corresponding `TaskId` in the `Episodes` table. Type: A string describing the type of reward signal. Currently the only value is `REWARD_SKETCH`. User: The name of the user who produced this reward sequence. Values: A sequence of float32 values, packed as a binary blob. There is one float value for each frame of the episode, corresponding to the annotated reward. """ __tablename__ = 'RewardSequences' EpisodeId = Column( 'EpisodeId', String, ForeignKey('Episodes.EpisodeId'), primary_key=True) RewardSequenceId = Column(String, primary_key=True) RewardTaskId = Column('RewardTaskId', String) Type = Column(String) User = Column(String) Values = Column(LargeBinary) class ArchiveFile(Base): """Table describing where episodes are stored in archives. This information is relevant if you want to download or extract a specific episode from the archives they are distributed in. Attributes: EpisodeId: Foreign key into the `Episodes` table. ArchiveFile: Name of the archive file containing the corresponding episode. """ __tablename__ = 'ArchiveFiles' EpisodeId = Column( 'EpisodeId', String, ForeignKey('Episodes.EpisodeId'), primary_key=True) ArchiveFile = Column(String) # pylint: enable=invalid-name
deepmind-research-master
sketchy/metadata_schema.py
# Lint as: python3 # Copyright 2020 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Example of loading rewards from the metadata file.""" from absl import app from absl import flags import numpy as np import sqlalchemy from sketchy import metadata_schema flags.DEFINE_string( 'metadata', '/tmp/metadata.sqlite', 'Path to metadata file.') FLAGS = flags.FLAGS def main(argv): if len(argv) > 1: raise app.UsageError('Too many command-line arguments.') engine = sqlalchemy.create_engine('sqlite:///' + FLAGS.metadata) session = sqlalchemy.orm.sessionmaker(bind=engine)() episodes = session.query(metadata_schema.Episode).join( metadata_schema.RewardSequence).limit(5) for episode in episodes: rewards = np.frombuffer(episode.Rewards[0].Values, dtype=np.float32) print('---') print(f'Episode: {episode.EpisodeId}') print(f'Episode file: {episode.DataPath}') print(f'Reward type: {episode.Rewards[0].Type}') print(f'Reward values: {rewards}') if __name__ == '__main__': app.run(main)
deepmind-research-master
sketchy/reward_example.py
# Copyright 2020 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
deepmind-research-master
sketchy/__init__.py
# Lint as: python3 # Copyright 2020 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Example of loading sketchy data in tensorflow.""" from absl import app from absl import flags import matplotlib.pyplot as plt import tensorflow.compat.v2 as tf from sketchy import sketchy flags.DEFINE_boolean('show_images', False, 'Enable to show example images.') FLAGS = flags.FLAGS def main(argv): if len(argv) > 1: raise app.UsageError('Too many command-line arguments.') tf.enable_v2_behavior() # The example file contains only a few timesteps from a single episode. dataset = sketchy.load_frames('sketchy/example_data.tfrecords') dataset = dataset.prefetch(5) for example in dataset: print('---') for name, value in sorted(example.items()): print(name, value.dtype, value.shape) if FLAGS.show_images: plt.imshow(example['pixels/basket_front_left']) plt.show() if __name__ == '__main__': app.run(main)
deepmind-research-master
sketchy/dataset_example.py
# Lint as: python3 # Copyright 2020 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Interface for loading sketchy data into tensorflow.""" import tensorflow.compat.v2 as tf def load_frames(filenames, num_parallel_reads=1, num_map_threads=None): if not num_map_threads: num_map_threads = num_parallel_reads dataset = tf.data.TFRecordDataset( filenames, num_parallel_reads=num_parallel_reads) return dataset.map(_parse_example, num_parallel_calls=num_map_threads) _FEATURES = { # Actions 'actions': tf.io.FixedLenFeature(shape=7, dtype=tf.float32), # Observations 'gripper/joints/velocity': tf.io.FixedLenFeature(shape=1, dtype=tf.float32), 'gripper/joints/torque': tf.io.FixedLenFeature(shape=1, dtype=tf.float32), 'gripper/grasp': tf.io.FixedLenFeature(shape=1, dtype=tf.int64), 'gripper/joints/angle': tf.io.FixedLenFeature(shape=1, dtype=tf.float32), 'sawyer/joints/velocity': tf.io.FixedLenFeature(shape=7, dtype=tf.float32), 'sawyer/pinch/pose': tf.io.FixedLenFeature(shape=7, dtype=tf.float32), 'sawyer/tcp/pose': tf.io.FixedLenFeature(shape=7, dtype=tf.float32), 'sawyer/tcp/effort': tf.io.FixedLenFeature(shape=6, dtype=tf.float32), 'sawyer/joints/torque': tf.io.FixedLenFeature(shape=7, dtype=tf.float32), 'sawyer/tcp/velocity': tf.io.FixedLenFeature(shape=6, dtype=tf.float32), 'sawyer/joints/angle': tf.io.FixedLenFeature(shape=7, dtype=tf.float32), 'wrist/torque': tf.io.FixedLenFeature(shape=3, dtype=tf.float32), 'wrist/force': tf.io.FixedLenFeature(shape=3, dtype=tf.float32), 'pixels/basket_front_left': tf.io.FixedLenFeature(shape=1, dtype=tf.string), 'pixels/basket_back_left': tf.io.FixedLenFeature(shape=1, dtype=tf.string), 'pixels/basket_front_right': tf.io.FixedLenFeature(shape=1, dtype=tf.string), 'pixels/royale_camera_driver_depth': tf.io.FixedLenFeature(shape=(171, 224, 1), dtype=tf.float32), 'pixels/royale_camera_driver_gray': tf.io.FixedLenFeature(shape=1, dtype=tf.string), 'pixels/usbcam0': tf.io.FixedLenFeature(shape=1, dtype=tf.string), 'pixels/usbcam1': tf.io.FixedLenFeature(shape=1, dtype=tf.string), } def _parse_example(example): return _decode_images(tf.io.parse_single_example(example, _FEATURES)) def _decode_images(record): for name, value in list(record.items()): if value.dtype == tf.string: record[name] = tf.io.decode_jpeg(value[0]) return record
deepmind-research-master
sketchy/sketchy.py
# Copyright 2018 Deepmind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Defines some `predicates` for the predicate_task.""" import abc import colorsys import numpy as np HSV_SATURATION = 0.5 HSV_ACTIVATED_SATURATION = 0.75 HSV_VALUE = 1.0 WALKER_GOAL_RGBA = [0, 0, 0] WALKER_GOAL_PRESSED_RGBA = [128, 128, 128] INACTIVE_OBSERVATION_VALUE = [-1] * 5 # Define globals for the special encoding. MOVABLE_TYPES = {'walker': 0, 'box': 1} TARGET_TYPES = {'box': 0, 'target': 1} PREDICATE_TYPES = {'on': 0, 'close_to': 1, 'far_from': 2} class BasePredicate(object, metaclass=abc.ABCMeta): """Base class for all predicates.""" def __init__(self, walker): self._walker = walker @abc.abstractmethod def reinitialize(self, random_state): """Reinitializes a new, potentially random, predicate state. The reinitialize method should reset to a new predicate state which could update the `objects_in_use` by the `Predicate`. This method could be called multiple times before a finally binding predicate set has been found. Therefore no changes to the model that are not reversible should be made here (setting colors etc). Any changes affecting the Mujoco model should instead be made in the `activate_predicate` method call. Args: random_state: An instance of `np.RandomState` which may be seeded to ensure a deterministic environment. """ pass @abc.abstractmethod def activate_predicate(self): """Activates the current predicate configuration. Any changes that are non-reversible like setting object properties or affinities *must* only be done in this method. At this point, the `predicate_task` logic has confirmed that a valid predicate configuration has been found. """ pass @property def objects_in_use(self): """Returns the `set` of objects used for this episode.""" return set() @abc.abstractproperty def observation_value(self): """Returns a `dict` to be used as the predicate observable.""" pass @abc.abstractmethod def is_active(self, physics): """Boolean method indicating whether the predicate has been activated. If `True`, it implies the condition for the predicate has been satisfied and the walker can be rewarded. Args: physics: An instance of `control.Physics`. """ pass @property def inactive_observation_value(self): """observation_value indicating a `Predicate` is inactive. The `PredicateTask` randomly samples the number of active predicates to be used on each episode. For a consistent `observation_spec`, the predicates that are not active need a special observation_value that cannot be used anywhere else. Returns: A special value indicating that the predicate is inactive and is not used by any other predicate in the task. """ return INACTIVE_OBSERVATION_VALUE class MoveWalkerToTarget(BasePredicate): """Predicate to move a walker to a specific target.""" def __init__(self, walker, target, target_index=0): """Predicate to move a walker or box to a target. Args: walker: An locomotion `Walker` instance to use for this predicate. target: `locomotion.prop` instance containing an `activated` property. target_index: An 'int' argument to add to the observable to indicate the index of the target. """ super(MoveWalkerToTarget, self).__init__(walker) self._target = target self._target_id = target_index def reinitialize(self, random_state): self._target.deregister_entities() def activate_predicate(self): self._target.register_entities(self._walker) self._target.set_colors(WALKER_GOAL_RGBA, WALKER_GOAL_PRESSED_RGBA) @property def objects_in_use(self): return set([self._walker, self._target]) @property def observation_value(self): return np.array([ MOVABLE_TYPES['walker'], 0, TARGET_TYPES['target'], self._target_id, PREDICATE_TYPES['close_to'] ]) def is_active(self, physics): return self._target.activated class MoveWalkerToRandomTarget(BasePredicate): """Predicate to move a walker to a random target.""" def __init__(self, walker, targets=None): """Predicate to move a walker or box to a target. Args: walker: An locomotion `Walker` instance to use for this predicate. targets: An optional list of `locomotion.prop` instances each of which contains an `activated` property. """ super(MoveWalkerToRandomTarget, self).__init__(walker) self._targets = targets self._target_to_move_to = None def reinitialize(self, random_state): if self._target_to_move_to is not None: self._target_to_move_to.deregister_entities() self._target_to_move_to = random_state.choice(self._targets) self._target_idx = self._targets.index(self._target_to_move_to) def activate_predicate(self): self._target_to_move_to.register_entities(self._walker) self._target_to_move_to.set_colors(WALKER_GOAL_RGBA, WALKER_GOAL_PRESSED_RGBA) @property def objects_in_use(self): return set([self._walker, self._target_to_move_to]) @property def observation_value(self): return np.array([ MOVABLE_TYPES['walker'], 0, TARGET_TYPES['target'], self._target_idx, PREDICATE_TYPES['close_to'] ]) def is_active(self, physics): return self._target_to_move_to.activated class MoveWalkerToBox(BasePredicate): """Predicate to move a walker to a specific box.""" def __init__(self, walker, box, box_index=0, detection_region=None): """Predicate to move a walker to a specific box. Args: walker: An locomotion `Walker` instance to use for this predicate. box: A `manipulation.prop` instance to move. box_index: An integer index to use for the observable to identify the `box`. detection_region: A 2-tuple indicating the tolerances in x and y for the walker to be deemed `close_to` the box. If `None`, contact based detection is used. """ super(MoveWalkerToBox, self).__init__(walker) self._box = box self._detection_region = detection_region self._box_index = box_index self._walker_geoms = None def reinitialize(self, random_state): if self._walker_geoms is None: # pylint: disable=protected-access self._walker_geoms = set(self._walker._mjcf_root.find_all('geom')) def activate_predicate(self): self._box.geom.rgba[:3] = WALKER_GOAL_RGBA @property def objects_in_use(self): return set([self._walker, self._box]) @property def observation_value(self): return np.array([ MOVABLE_TYPES['walker'], 0, TARGET_TYPES['box'], self._box_index, PREDICATE_TYPES['close_to'] ]) def is_active(self, physics): if self._detection_region is None: return self._is_walker_contacting_box(physics) else: return np.all( np.abs( physics.bind(self._walker.root_body).xpos - physics.bind(self._box.geom).xpos)[:2] < self._detection_region) def _is_walker_contacting_box(self, physics): walker_geom_ids = [ physics.bind(geom).element_id for geom in self._walker_geoms ] for contact in physics.data.contact: contact_geoms = set([contact.geom1, contact.geom2]) if (physics.bind(self._box.geom).element_id in contact_geoms and contact_geoms.intersection(walker_geom_ids)): return True return False class MoveBoxToBox(BasePredicate): """Predicate to move a walker to a specific box.""" def __init__(self, walker, first_box, second_box, first_box_index=0, second_box_index=1, detection_region=None): """Predicate to move a walker to a specific box. Args: walker: An locomotion `Walker` instance to use for this predicate. first_box: A `manipulation.prop` instance to move. second_box: A `manipulation.prop` instance to move. first_box_index: An integer index to use for the observable to identify the `box`. second_box_index: An integer index to use for the observable to identify the `box`. detection_region: A 2-tuple indicating the tolerances in x and y for the walker to be deemed `close_to` the box. If `None`, contact based detection is used. """ super(MoveBoxToBox, self).__init__(walker) self._first_box = first_box self._second_box = second_box self._detection_region = detection_region self._first_box_index = first_box_index self._second_box_index = second_box_index self._walker_geoms = None def reinitialize(self, random_state): if self._walker_geoms is None: # pylint: disable=protected-access self._walker_geoms = set(self._walker._mjcf_root.find_all('geom')) def activate_predicate(self): self._first_box.geom.rgba[:3] = WALKER_GOAL_RGBA @property def objects_in_use(self): return set([self._first_box, self._second_box]) @property def observation_value(self): return np.array([ MOVABLE_TYPES['box'], self._first_box_index, TARGET_TYPES['box'], self._second_box_index, PREDICATE_TYPES['close_to'] ]) def is_active(self, physics): if self._detection_region is None: return self._are_boxes_in_contact(physics) else: return np.all( np.abs( physics.bind(self._first_box.geom).xpos - physics.bind(self._second_box.geom).xpos)[:2] < self._detection_region) def _are_boxes_in_contact(self, physics): for contact in physics.data.contact: contact_geoms = set([contact.geom1, contact.geom2]) if (physics.bind(self._first_box.geom).element_id in contact_geoms and physics.bind(self._second_box.geom).element_id in contact_geoms): return True return False class MoveBoxToTarget(BasePredicate): """Predicate to move a walker to a specific target.""" def __init__(self, walker, box, target, box_index=0, target_index=0): """Predicate to move a walker or box to a target. Args: walker: An locomotion `Walker` instance to use for this predicate. box: A `manipulation.prop` to move to the target. target: `locomotion.prop` instance containing an `activated` property. box_index: An 'int' argument to add to the observable to indicate the index of the box. target_index: An 'int' argument to add to the observable to indicate the index of the target. """ super(MoveBoxToTarget, self).__init__(walker) self._box = box self._target = target self._box_id = box_index self._target_id = target_index self._original_box_size = np.copy(box.geom.size) self._rgb = None self._activated_rgb = None def reinitialize(self, random_state): self._target.deregister_entities() self._get_box_properties(random_state) def _get_box_properties(self, random_state): hue0 = random_state.uniform() hue = (hue0 + self._target_id) % 1.0 self._rgb = colorsys.hsv_to_rgb(hue, HSV_SATURATION, HSV_VALUE) self._activated_rgb = colorsys.hsv_to_rgb(hue, HSV_ACTIVATED_SATURATION, HSV_VALUE) def activate_predicate(self): self._target.set_colors(self._rgb, self._activated_rgb) self._box.geom.rgba[:3] = self._rgb self._target.register_entities(self._box) @property def objects_in_use(self): return set([self._box, self._target]) @property def observation_value(self): return np.array([ MOVABLE_TYPES['box'], self._box_id, TARGET_TYPES['target'], self._target_id, PREDICATE_TYPES['close_to'] ]) def is_active(self, physics): return self._target.activated class MoveBoxToRandomTarget(BasePredicate): """Predicate to move a walker to a random target.""" def __init__(self, walker, box, box_index=0, targets=None): """Predicate to move a walker or box to a target. Args: walker: An locomotion `Walker` instance to use for this predicate. box: A `manipulation.prop` to move to the target. box_index: An optional 'int' argument to add to the observable to indicate the index of the box. targets: An optional list of `locomotion.prop` instances each of which contains an `activated` property. """ super(MoveBoxToRandomTarget, self).__init__(walker) self._targets = targets self._box_to_move = box self._box_index = box_index self._target_to_move_to = None self._original_box_size = np.copy(box.geom.size) self._rgb = None self._activated_rgb = None def reinitialize(self, random_state): if self._target_to_move_to is not None: self._target_to_move_to.deregister_entities() self._target_to_move_to = random_state.choice(self._targets) self._target_idx = self._targets.index(self._target_to_move_to) self._get_box_properties(random_state) def _get_box_properties(self, random_state): hue0 = random_state.uniform() hue = (hue0 + (self._target_idx / len(self._targets))) % 1.0 self._rgb = colorsys.hsv_to_rgb(hue, HSV_SATURATION, HSV_VALUE) self._activated_rgb = colorsys.hsv_to_rgb(hue, HSV_ACTIVATED_SATURATION, HSV_VALUE) def activate_predicate(self): self._target_to_move_to.set_colors(self._rgb, self._activated_rgb) self._box_to_move.geom.rgba[:3] = self._rgb self._target_to_move_to.register_entities(self._box_to_move) @property def objects_in_use(self): return set([self._box_to_move, self._target_to_move_to]) @property def observation_value(self): return np.array([ MOVABLE_TYPES['box'], self._box_index, TARGET_TYPES['target'], self._target_idx, PREDICATE_TYPES['close_to'] ]) def is_active(self, physics): return self._target_to_move_to.activated
deepmind-research-master
box_arrangement/predicates.py
# Copyright 2018 Deepmind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A task where different `Predicate`s need to be solved. In each episode a spiking reward is given for each `Predicate` solved with an extra reward bonus added when all of the predicates are solved. On each episode the number of predicates are sampled randomly. This provides a common interface to specify distributions over tasks ranging in difficulty levels but with common components. Each `Predicate` involves some manipulation of the walker, props and targets which thus allows for rich configurations of tasks to be defined. """ import colorsys import functools from dm_control import composer from dm_control.composer.observation import observable from dm_env import specs import numpy as np _FLOOR_GAP_CHAR = '#' _AMBIENT_HEADLIGHT = 0.8 _HSV_SATURATION = 0.5 _HSV_ACTIVATED_SATURATION = 0.75 _HSV_VALUE = 1.0 _PROP_SIZE = 0.5 _MAX_ITERATIONS = 1000 def _generate_target_permutation(num_targets, random_state): targets = list(range(num_targets)) random_state.shuffle(targets) return targets class PredicateTask(composer.Task): """Requires objects to be moved onto targets.""" def __init__(self, walker, maze_arena, predicates, props=None, targets=None, max_num_predicates=1, randomize_num_predicates=False, predicate_prob=None, reward_scale=1.0, terminating_reward_bonus=5.0, regenerate_predicates=False, physics_timestep=0.001, control_timestep=0.025, alive_threshold=-0.5): """Initializes a task with multiple sub-components(predicates) to be solved. This task essentially contains different flavors of go to target. The task contains a walker, props and target positions. To solve the entire task, the walker would need to solve a certain number of 'predicates' or sub-tasks. For instance, the task could contain 2 predicates for the walker going to a target position and the walker moving a box to a target position. In such a case, there is an implicit ordering of the way the walker needs to solve things to achieve the net task. Args: walker: A `Walker` instance. maze_arena: An `Entity` that defines a maze-like arena. predicates: A list of `Predicate` instances for ths task. props: An optional list of `manipulation.prop` instances for the task. These are used to generate observables for the task. targets: An optional list of `locomotion.prop` instances for the task. These are used to generate observables for the task. max_num_predicates: The maximum number of predicates to use in each episode of the task. randomize_num_predicates: A `bool` flag indicating whether the number of `valid` predicates should be randomized for each task. If set to `True`, then on each episode, between 1 and `num_predicates` are chosen as valid predicates and `predicate.invalid_observation_value` is output for the remaining slots in the observation. predicate_prob: An optional `list` containing the probabilities for each of the `predicates`. If not `None`, must have the same length as `predicates. reward_scale: `float` to scale the reward. terminating_reward_bonus: A bonus added to the reward when all predicates have been solved. regenerate_predicates: A `bool` flag indicating which when set, spawns a new set of predicates when the previous set is successful instead of terminating. physics_timestep: The time step of the physics simulation. control_timestep: Should be an integer multiple of the physics time step. alive_threshold: Aliveness in [-1., 0.]. Raises: ValueError: If `num_props` is greater than `num_targets` or if `num_predicates` is greater than `num_targets`. """ if max_num_predicates > len(predicates): raise ValueError('Not enough predicates for task. The maximum number of ' 'predicates can be ' '{} but only {} predicates provided.'.format( max_num_predicates, len(predicates))) self._arena = maze_arena self._walker = walker self._reward_scale = reward_scale self._alive_threshold = alive_threshold self._terminating_reward_bonus = terminating_reward_bonus self._arena.mjcf_model.visual.headlight.ambient = [_AMBIENT_HEADLIGHT] * 3 maze_arena.text_maze_regenerated_hook = self._regenerate_positions self._max_num_predicates = max_num_predicates self._predicates = predicates self._predicate_prob = predicate_prob self._randomize_num_predicates = randomize_num_predicates self._active_predicates = [] self._regen_predicates = regenerate_predicates self._reward = 0 # Targets. self._targets = targets for target in targets: self._arena.attach(target) if props is None: props = [] # Props. self._props = props # M Props + 1 Walker and we choose 'N' predicates as the task. for prop in props: prop.geom.rgba = [0, 0, 0, 1] # Will be randomized for each episode. self._arena.add_free_entity(prop) # Create walkers and corresponding observables. walker.create_root_joints(self._arena.attach(walker)) self._create_per_walker_observables(walker) self._generate_target_permutation = None maze_arena.text_maze_regenerated_hook = self._regenerate_positions # Set time steps. self.set_timesteps( physics_timestep=physics_timestep, control_timestep=control_timestep) def _create_per_walker_observables(self, walker): # Enable proprioceptive observables. for obs in (walker.observables.proprioception + walker.observables.kinematic_sensors + [walker.observables.position, walker.observables.orientation]): obs.enabled = True xpos_origin_callable = lambda phys: phys.bind(walker.root_body).xpos # Egocentric prop positions. # For each prop, we add the positions for the 8 corners using the sites. for prop_id, prop in enumerate(self._props): def _prop_callable(physics, prop=prop): return [physics.bind(s).xpos for s in prop.corner_sites] if len(self._props) > 1: observable_name = 'prop_{}_position'.format(prop_id) else: observable_name = 'prop_position' walker.observables.add_egocentric_vector( observable_name, observable.Generic(_prop_callable), origin_callable=xpos_origin_callable) # Egocentric target positions. def _target_callable(physics): target_list = [] for target in self._targets: target_list.append(target.site_pos(physics)) return np.array(target_list) walker.observables.add_egocentric_vector( 'target_positions', observable.Generic(_target_callable), origin_callable=xpos_origin_callable) # Whether targets are activated. def _predicate_activated_callable(physics): predicate_activated_list = np.full(self._max_num_predicates, True) for i, predicate in enumerate(self._active_predicates): predicate_activated_list[i] = predicate.is_active(physics) return predicate_activated_list walker.observables.add_observable( 'predicates_activated', observable.Generic(_predicate_activated_callable)) self._observables = self._walker.observables.as_dict() # Predicate observables. for pred_idx in range(self._max_num_predicates): def _predicate_callable(_, pred_idx=pred_idx): """Callable for the predicate observation.""" if pred_idx in range(len(self._active_predicates)): predicate = self._active_predicates[pred_idx] return predicate.observation_value else: # Use any predicates inactive observation to fill the rest. predicate = self._predicates[0] return predicate.inactive_observation_value predicate_name = 'predicate_{}'.format(pred_idx) self._observables[predicate_name] = observable.Generic( _predicate_callable) self._observables[predicate_name].enabled = True @property def observables(self): return self._observables @property def name(self): return 'predicate_task' @property def root_entity(self): return self._arena def _regenerate_positions(self): target_permutation = self._generate_target_permutation( len(self._arena.target_positions)) num_permutations = len(self._props) + len(self._targets) target_permutation = target_permutation[:num_permutations] if len(self._props) + len(self._targets) > len( self._arena.target_positions): raise RuntimeError( 'The generated maze does not contain enough target positions ' 'for the requested number of props ({}) and targets ({}): got {}.' .format( len(self._props), len(self._targets), len(self._arena.target_positions))) self._prop_positions = [] for i in range(len(self._props)): self._prop_positions.append( self._arena.target_positions[target_permutation[i]]) self._target_positions = [] for i in range(len(self._targets)): idx = i + len(self._props) self._target_positions.append( self._arena.target_positions[target_permutation[idx]]) def initialize_episode_mjcf(self, random_state): self._generate_target_permutation = functools.partial( _generate_target_permutation, random_state=random_state) self._arena.regenerate() # Set random colors for the props and targets. self._set_random_colors(random_state) self._set_active_predicates(random_state) def _set_active_predicates(self, random_state): # Reinitialize predicates to set any properties they want. iteration = 0 valid_set_found = False while not valid_set_found and iteration < _MAX_ITERATIONS: for predicate in self._predicates: predicate.reinitialize(random_state) if self._randomize_num_predicates and self._max_num_predicates > 1: num_predicates = random_state.choice( list(range(1, self._max_num_predicates + 1)), size=1)[0] else: num_predicates = self._max_num_predicates valid_set_found = self._choose_random_predicates(random_state, num_predicates) iteration += 1 if not valid_set_found: raise ValueError( 'Could not find set of active predicates with ' 'unique objects are after {} iterations.'.format(_MAX_ITERATIONS)) for predicate in self._active_predicates: predicate.activate_predicate() def _choose_random_predicates(self, random_state, num_predicates): self._active_predicates = random_state.choice( self._predicates, replace=False, size=num_predicates, p=self._predicate_prob) objects_in_common = self._active_predicates[0].objects_in_use for predicate in self._active_predicates[1:]: new_objects = predicate.objects_in_use if objects_in_common.intersection(new_objects): return False objects_in_common.union(new_objects) return True def _set_random_colors(self, random_state): hue0 = random_state.uniform() hues = [(hue0 + i / len(self._targets)) % 1.0 for i in range(len(self._targets))] rgbs = [ colorsys.hsv_to_rgb(hue, _HSV_SATURATION, _HSV_VALUE) for hue in hues ] activated_rgbs = [ colorsys.hsv_to_rgb(hue, _HSV_ACTIVATED_SATURATION, _HSV_VALUE) for hue in hues ] # There are fewer props than targets. # Pick as far apart colors for each prop as possible. if self._props: targets_per_prop = len(self._targets) // len(self._props) else: targets_per_prop = len(self._targets) for prop_id in range(len(self._props)): # The first few targets have to match the props' color. rgb_id = prop_id * targets_per_prop self._props[prop_id].geom.rgba[:3] = rgbs[rgb_id] self._targets[prop_id].set_colors(rgbs[rgb_id], activated_rgbs[rgb_id]) # Assign colors not used by any prop to decoy targets. for decoy_target_offset in range(targets_per_prop - 1): target_id = len( self._props) + prop_id * targets_per_prop + decoy_target_offset rgb_id = prop_id * targets_per_prop + decoy_target_offset self._targets[target_id].set_colors(rgbs[rgb_id], rgbs[rgb_id]) # Remainder loop for targets. for target_id in range(targets_per_prop * len(self._props), len(self._targets)): self._targets[target_id].set_colors(rgbs[target_id], rgbs[target_id]) def initialize_episode(self, physics, random_state): self._first_step = True self._was_active = [False] * len(self._active_predicates) walker = self._walker spawn_indices = random_state.permutation(len(self._arena.spawn_positions)) spawn_index = spawn_indices[0] walker.reinitialize_pose(physics, random_state) spawn_position = self._arena.spawn_positions[spawn_index] spawn_rotation = random_state.uniform(-np.pi, np.pi) spawn_quat = np.array( [np.cos(spawn_rotation / 2), 0, 0, np.sin(spawn_rotation / 2)]) walker.shift_pose( physics, [spawn_position[0], spawn_position[1], 0.0], spawn_quat, rotate_velocity=True) for prop, prop_xy_position in zip(self._props, self._prop_positions): # Position at the middle of a maze cell. prop_position = np.array( [prop_xy_position[0], prop_xy_position[1], prop.geom.size[2]]) # Randomly rotate the prop around the z-axis. prop_rotation = random_state.uniform(-np.pi, np.pi) prop_quat = np.array( [np.cos(prop_rotation / 2), 0, 0, np.sin(prop_rotation / 2)]) # Taking into account the prop's orientation, first calculate how much we # can displace the prop from the center of a maze cell without any part of # it sticking out of the cell. x, y, _ = prop.geom.size cos = np.cos(prop_rotation) sin = np.sin(prop_rotation) x_max = max([np.abs(x * cos - y * sin), np.abs(x * cos + y * sin)]) y_max = max([np.abs(y * cos + x * sin), np.abs(y * cos - x * sin)]) prop_max_displacement = self._arena.xy_scale / 2 - np.array( [x_max, y_max]) assert np.all(prop_max_displacement >= 0) prop_max_displacement *= 0.99 # Safety factor. # Then randomly displace the prop from the center of the maze cell. prop_position[:2] += prop_max_displacement * random_state.uniform( -1, 1, 2) # Commit the prop's final pose. prop.set_pose(physics, position=prop_position, quaternion=prop_quat) for target, target_position in zip(self._targets, self._target_positions): target_position[2] = _PROP_SIZE target.set_position(physics, target_position) def before_step(self, physics, actions, random_state): if isinstance(actions, list): actions = np.concatenate(actions) super(PredicateTask, self).before_step(physics, actions, random_state) if self._first_step: self._first_step = False else: self._was_active = [ predicate.is_active(physics) for predicate in self._active_predicates ] def after_step(self, physics, random_state): if self._all_predicates_satisfied() and self._regen_predicates: self._set_random_colors(random_state) self._set_active_predicates(random_state) super(PredicateTask, self).after_step(physics, random_state) def get_reward(self, physics): reward = 0.0 for predicate, was_active in zip(self._active_predicates, self._was_active): if predicate.is_active(physics) and not was_active: reward += 1.0 elif was_active and not predicate.is_active(physics): reward -= 1.0 if self._all_predicates_satisfied(): reward += self._terminating_reward_bonus self._reward = reward return reward * self._reward_scale def _all_predicates_satisfied(self): return sum(self._was_active) == len(self._active_predicates) def should_terminate_episode(self, physics): return ((self._all_predicates_satisfied() and not self._regen_predicates) or self._walker.aliveness(physics) < self._alive_threshold) def get_discount(self, physics): if self.should_terminate_episode(physics): return 0.0 return 1.0 def get_reward_spec(self): return specs.Array(shape=[], dtype=np.float32) def get_discount_spec(self): return specs.Array(shape=[], dtype=np.float32)
deepmind-research-master
box_arrangement/predicate_task.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
deepmind-research-master
box_arrangement/__init__.py
# Copyright 2020 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Setup for pip package.""" from setuptools import find_packages from setuptools import setup REQUIRED_PACKAGES = ['absl-py', 'dm_control', 'numpy', 'dm_env'] setup( name='box_arrangement', version='0.1', description=('Sparse reward tasks involving moving and pushing boxes to' ' targets.'), url='https://github.com/deepmind/deepmind-research/box_arrangement', author='DeepMind', author_email='[email protected]', # Contained modules and scripts. packages=find_packages(), install_requires=REQUIRED_PACKAGES, platforms=['any'], license='Apache 2.0', )
deepmind-research-master
box_arrangement/setup.py
# Copyright 2020 Deepmind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for box_arrangement.predicate_task.""" from absl.testing import absltest from dm_control import composer from dm_control.entities import props from dm_control.locomotion import arenas from dm_control.locomotion import walkers import numpy as np from box_arrangement import predicate_task from box_arrangement import predicates _EGOCENTRIC_OBSERVABLES = [ "walker/body_height", "walker/end_effectors_pos", "walker/joints_pos", "walker/joints_vel", "walker/sensors_accelerometer", "walker/sensors_gyro", "walker/sensors_velocimeter", "walker/world_zaxis", ] class PredicateTaskTest(absltest.TestCase): def _setup_basic_gtt_task(self, num_targets=1, reward_scale=1.0): walker = walkers.Ant() text_maze = arenas.padded_room.PaddedRoom( room_size=8, num_objects=2, pad_with_walls=True) maze_arena = arenas.MazeWithTargets(maze=text_maze) targets = [] for _ in range(num_targets): targets.append( props.PositionDetector( pos=[0, 0, 0.5], size=[0.5, 0.5, 0.5], inverted=False, visible=True)) test_predicates = [predicates.MoveWalkerToRandomTarget(walker, targets)] self._task = predicate_task.PredicateTask( walker=walker, maze_arena=maze_arena, predicates=test_predicates, targets=targets, randomize_num_predicates=False, reward_scale=reward_scale, terminating_reward_bonus=2.0, ) random_state = np.random.RandomState(12345) self._env = composer.Environment(self._task, random_state=random_state) self._walker = walker self._targets = targets def test_observables(self): self._setup_basic_gtt_task() timestep = self._env.reset() self.assertIn("predicate_0", timestep.observation) self.assertIn("walker/target_positions", timestep.observation) for observable in _EGOCENTRIC_OBSERVABLES: self.assertIn(observable, timestep.observation) def test_termination_and_discount(self): self._setup_basic_gtt_task() self._env.reset() target_pos = (0, 0, 0.5) # Initialize the walker away from the target. self._walker.set_pose( self._env.physics, position=(-2, 0, 0.0), quaternion=(1, 0, 0, 0)) self._targets[0].set_position( self._env.physics, target_pos) self._env.physics.forward() zero_action = np.zeros_like(self._env.physics.data.ctrl) for _ in range(10): timestep = self._env.step(zero_action) self.assertEqual(timestep.discount, 1.0) self.assertEqual(timestep.reward, 0.0) walker_pos = (0, 0, 0.0) self._walker.set_pose( self._env.physics, position=walker_pos) self._env.physics.forward() # For a single predicate, first the reward is +1.0 for activating the # predicate timestep = self._env.step(zero_action) self.assertEqual(timestep.discount, 1.0) self.assertEqual(timestep.reward, 1.0) # If the predicate is active and *remains* active, the discount gets to 0.0 # and the terminating reward bonus is given. timestep = self._env.step(zero_action) self.assertEqual(timestep.discount, 0.0) self.assertEqual(timestep.reward, 2.0) # Make sure this is a termination step. self.assertTrue(timestep.last()) def test_reward_scaling(self): self._setup_basic_gtt_task(reward_scale=10.0) self._env.reset() zero_action = np.zeros_like(self._env.physics.data.ctrl) target_pos = (0, 0, 0.5) walker_pos = (0, 0, 0.0) self._targets[0].set_position(self._env.physics, target_pos) self._walker.set_pose(self._env.physics, position=walker_pos) self._env.physics.forward() # For a single predicate, first the reward is +1.0 for activating the # predicate timestep = self._env.step(zero_action) self.assertEqual(timestep.discount, 1.0) self.assertEqual(timestep.reward, 10.0) # If the predicate is active and *remains* active, the discount gets to 0.0 # and the terminating reward bonus is given. timestep = self._env.step(zero_action) self.assertEqual(timestep.discount, 0.0) self.assertEqual(timestep.reward, 20.0) # Make sure this is a termination step. self.assertTrue(timestep.last()) def test_too_few_predicates_raises_exception(self): walker = walkers.Ant() num_targets = 1 text_maze = arenas.padded_room.PaddedRoom( room_size=8, num_objects=2, pad_with_walls=True) maze_arena = arenas.MazeWithTargets(maze=text_maze) targets = [] for _ in range(num_targets): targets.append( props.PositionDetector( pos=[0, 0, 0.5], size=[0.5, 0.5, 0.5], inverted=False, visible=True)) test_predicates = [] with self.assertRaisesWithLiteralMatch( ValueError, "Not enough predicates for task." " The maximum number of " "predicates can be " "1 but only 0 predicates provided."): predicate_task.PredicateTask( walker=walker, maze_arena=maze_arena, predicates=test_predicates, targets=targets, randomize_num_predicates=False, reward_scale=1.0, terminating_reward_bonus=2.0, ) def test_error_too_few_targets(self): walker = walkers.Ant() num_targets = 5 text_maze = arenas.padded_room.PaddedRoom( room_size=8, num_objects=2, pad_with_walls=True) maze_arena = arenas.MazeWithTargets(maze=text_maze) targets = [] for _ in range(num_targets): targets.append( props.PositionDetector( pos=[0, 0, 0.5], size=[0.5, 0.5, 0.5], inverted=False, visible=True)) test_predicates = [predicates.MoveWalkerToRandomTarget(walker, targets)] task = predicate_task.PredicateTask( walker=walker, maze_arena=maze_arena, predicates=test_predicates, targets=targets, randomize_num_predicates=False, reward_scale=1.0, terminating_reward_bonus=2.0, ) random_state = np.random.RandomState(12345) env = composer.Environment(task, random_state=random_state) with self.assertRaisesWithLiteralMatch( RuntimeError, "The generated maze does not contain enough target " "positions for the requested number of props (0) and targets (5): " "got 2." ): env.reset() def test_error_if_no_predicates_found(self): walker = walkers.Ant() num_targets = 2 text_maze = arenas.padded_room.PaddedRoom( room_size=8, num_objects=6, pad_with_walls=True) maze_arena = arenas.MazeWithTargets(maze=text_maze) targets = [] for _ in range(num_targets): targets.append( props.PositionDetector( pos=[0, 0, 0.5], size=[0.5, 0.5, 0.5], inverted=False, visible=True)) # Moving the walker to two targets is not possible since the walker is a # shared object in use. test_predicates = [predicates.MoveWalkerToTarget(walker, targets[0]), predicates.MoveWalkerToTarget(walker, targets[1])] task = predicate_task.PredicateTask( walker=walker, maze_arena=maze_arena, predicates=test_predicates, targets=targets[1:], randomize_num_predicates=False, max_num_predicates=2, reward_scale=1.0, terminating_reward_bonus=2.0, ) random_state = np.random.RandomState(12345) env = composer.Environment(task, random_state=random_state) with self.assertRaisesWithLiteralMatch( ValueError, "Could not find set of active predicates" " with unique objects are after 1000 iterations."): env.reset() # However moving to one of the two targets is fine. walker = walkers.Ant() num_targets = 2 text_maze = arenas.padded_room.PaddedRoom( room_size=8, num_objects=6, pad_with_walls=True) maze_arena = arenas.MazeWithTargets(maze=text_maze) targets = [] for _ in range(num_targets): targets.append( props.PositionDetector( pos=[0, 0, 0.5], size=[0.5, 0.5, 0.5], inverted=False, visible=True)) test_predicates = [predicates.MoveWalkerToTarget(walker, targets[0]), predicates.MoveWalkerToTarget(walker, targets[1])] task = predicate_task.PredicateTask( walker=walker, maze_arena=maze_arena, predicates=test_predicates, targets=targets[1:], randomize_num_predicates=False, max_num_predicates=1, reward_scale=1.0, terminating_reward_bonus=2.0, ) random_state = np.random.RandomState(12345) env = composer.Environment(task, random_state=random_state) env.reset() if __name__ == "__main__": absltest.main()
deepmind-research-master
box_arrangement/predicate_task_test.py
# Copyright 2020 Deepmind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Simple script to launch viewer with an example environment.""" from absl import app from absl import flags from dm_control import viewer from box_arrangement import task_examples FLAGS = flags.FLAGS flags.DEFINE_enum('task', 'go_to_target', [ 'go_to_target', 'move_box', 'move_box_or_go_to_target', 'move_box_and_go_to_target' ], 'The task to visualize.') TASKS = { 'go_to_target': task_examples.go_to_k_targets, 'move_box': task_examples.move_box, 'move_box_or_go_to_target': task_examples.move_box_or_gtt, 'move_box_and_go_to_target': task_examples.move_box_and_gtt, } def main(unused_argv): viewer.launch(environment_loader=TASKS[FLAGS.task]) if __name__ == '__main__': app.run(main)
deepmind-research-master
box_arrangement/explore.py
# Copyright 2019 Deepmind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Example tasks used in publications.""" from dm_control import composer from dm_control.entities import props from dm_control.locomotion import arenas as locomotion_arenas from dm_control.locomotion import walkers from dm_control.manipulation import props as manipulation_props from box_arrangement import dmlab_assets from box_arrangement import predicates as predicates_module from box_arrangement.predicate_task import PredicateTask DEFAULT_TIME_LIMIT = 20.0 DEFAULT_CONTROL_TIMESTEP = 0.05 MIN_ROOM_SIZE = 3 def _make_predicate_task(n_boxes, n_targets, include_gtt_predicates, include_move_box_predicates, max_num_predicates, control_timestep, time_limit): """Auxiliary function to construct different predicates tasks.""" walker = walkers.Ant() skybox = dmlab_assets.SkyBox(style='sky_03') wall = dmlab_assets.WallTextures(style='style_03') floor = dmlab_assets.FloorTextures(style='style_03') # Make room size become bigger once the number of objects become larger. num_objects = n_boxes + n_targets room_size = max(MIN_ROOM_SIZE, num_objects) text_maze = locomotion_arenas.padded_room.PaddedRoom( room_size=room_size, num_objects=num_objects, pad_with_walls=True) arena = locomotion_arenas.MazeWithTargets( maze=text_maze, skybox_texture=skybox, wall_textures=wall, floor_textures=floor) boxes = [] for _ in range(n_boxes): boxes.append( manipulation_props.BoxWithSites(mass=1.5, half_lengths=[0.5, 0.5, 0.5])) targets = [] for _ in range(n_targets): targets.append( props.PositionDetector( pos=[0, 0, 0.5], size=[0.5, 0.5, 0.5], inverted=False, visible=True)) predicates = [] if include_gtt_predicates: predicates.append( predicates_module.MoveWalkerToRandomTarget( walker=walker, targets=targets)) if include_move_box_predicates: for box_idx in range(len(boxes)): predicates.append( predicates_module.MoveBoxToRandomTarget( walker=walker, box=boxes[box_idx], box_index=box_idx, targets=targets)) task = PredicateTask( walker=walker, maze_arena=arena, predicates=predicates, props=boxes, targets=targets, max_num_predicates=max_num_predicates, randomize_num_predicates=False, reward_scale=10., regenerate_predicates=False, physics_timestep=0.005, control_timestep=control_timestep) env = composer.Environment(task=task, time_limit=time_limit) return env def go_to_k_targets(n_targets=3, time_limit=DEFAULT_TIME_LIMIT, control_timestep=DEFAULT_CONTROL_TIMESTEP): """Loads `go_to_k_targets` task.""" return _make_predicate_task( n_boxes=0, n_targets=n_targets, include_gtt_predicates=True, include_move_box_predicates=False, max_num_predicates=1, control_timestep=control_timestep, time_limit=time_limit) def move_box(n_targets=3, time_limit=DEFAULT_TIME_LIMIT, control_timestep=DEFAULT_CONTROL_TIMESTEP): """Loads `move_box` task.""" return _make_predicate_task( n_boxes=1, n_targets=n_targets, include_gtt_predicates=False, include_move_box_predicates=True, max_num_predicates=1, control_timestep=control_timestep, time_limit=time_limit) def move_box_or_gtt(n_targets=3, time_limit=DEFAULT_TIME_LIMIT, control_timestep=DEFAULT_CONTROL_TIMESTEP): """Loads `move_box_or_gtt` task.""" return _make_predicate_task( n_boxes=1, n_targets=n_targets, include_gtt_predicates=True, include_move_box_predicates=True, max_num_predicates=1, control_timestep=control_timestep, time_limit=time_limit) def move_box_and_gtt(n_targets=3, time_limit=DEFAULT_TIME_LIMIT, control_timestep=DEFAULT_CONTROL_TIMESTEP): """Loads `move_box_or_gtt` task.""" return _make_predicate_task( n_boxes=1, n_targets=n_targets, include_gtt_predicates=True, include_move_box_predicates=True, max_num_predicates=2, control_timestep=control_timestep, time_limit=time_limit)
deepmind-research-master
box_arrangement/task_examples.py
# Copyright 2018 Deepmind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """DeepMind Lab textures.""" from dm_control import composer from dm_control import mjcf from labmaze import assets as labmaze_assets class SkyBox(composer.Entity): """Represents a texture asset for the sky box.""" def _build(self, style): labmaze_textures = labmaze_assets.get_sky_texture_paths(style) self._mjcf_root = mjcf.RootElement(model='dmlab_' + style) self._texture = self._mjcf_root.asset.add( 'texture', type='skybox', name='texture', fileleft=labmaze_textures.left, fileright=labmaze_textures.right, fileup=labmaze_textures.up, filedown=labmaze_textures.down, filefront=labmaze_textures.front, fileback=labmaze_textures.back) @property def mjcf_model(self): return self._mjcf_root @property def texture(self): return self._texture class WallTextures(composer.Entity): """Represents wall texture assets.""" def _build(self, style): labmaze_textures = labmaze_assets.get_wall_texture_paths(style) self._mjcf_root = mjcf.RootElement(model='dmlab_' + style) self._textures = [] for texture_name, texture_path in labmaze_textures.items(): self._textures.append(self._mjcf_root.asset.add( 'texture', type='2d', name=texture_name, file=texture_path.format(texture_name))) @property def mjcf_model(self): return self._mjcf_root @property def textures(self): return self._textures class FloorTextures(composer.Entity): """Represents floor texture assets.""" def _build(self, style): labmaze_textures = labmaze_assets.get_floor_texture_paths(style) self._mjcf_root = mjcf.RootElement(model='dmlab_' + style) self._textures = [] for texture_name, texture_path in labmaze_textures.items(): self._textures.append(self._mjcf_root.asset.add( 'texture', type='2d', name=texture_name, file=texture_path.format(texture_name))) @property def mjcf_model(self): return self._mjcf_root @property def textures(self): return self._textures
deepmind-research-master
box_arrangement/dmlab_assets.py
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """ResNet (post-activation) with FixUp.""" # pylint: disable=invalid-name import functools import haiku as hk import jax import jax.numpy as jnp from nfnets import base nonlinearities = { 'swish': jax.nn.silu, 'relu': jax.nn.relu, 'identity': lambda x: x} class FixUp_ResNet(hk.Module): """Fixup based ResNet.""" variant_dict = {'ResNet50': {'depth': [3, 4, 6, 3]}, 'ResNet101': {'depth': [3, 4, 23, 3]}, 'ResNet152': {'depth': [3, 8, 36, 3]}, 'ResNet200': {'depth': [3, 24, 36, 3]}, 'ResNet288': {'depth': [24, 24, 24, 24]}, 'ResNet600': {'depth': [50, 50, 50, 50]}, } def __init__(self, num_classes, variant='ResNet50', width=4, stochdepth_rate=0.1, drop_rate=None, activation='relu', fc_init=jnp.zeros, name='FixUp_ResNet'): super().__init__(name=name) self.num_classes = num_classes self.variant = variant self.width = width # Get variant info block_params = self.variant_dict[self.variant] self.width_pattern = [item * self.width for item in [64, 128, 256, 512]] self.depth_pattern = block_params['depth'] self.activation = nonlinearities[activation] if drop_rate is None: self.drop_rate = block_params['drop_rate'] else: self.drop_rate = drop_rate self.which_conv = functools.partial(hk.Conv2D, with_bias=False) # Stem ch = int(16 * self.width) self.initial_conv = self.which_conv(ch, kernel_shape=7, stride=2, padding='SAME', name='initial_conv') # Body self.blocks = [] num_blocks = sum(self.depth_pattern) index = 0 # Overall block index block_args = (self.width_pattern, self.depth_pattern, [1, 2, 2, 2]) for block_width, stage_depth, stride in zip(*block_args): for block_index in range(stage_depth): # Block stochastic depth drop-rate block_stochdepth_rate = stochdepth_rate * index / num_blocks self.blocks += [ResBlock(ch, block_width, num_blocks, stride=stride if block_index == 0 else 1, activation=self.activation, which_conv=self.which_conv, stochdepth_rate=block_stochdepth_rate, )] ch = block_width index += 1 # Head self.fc = hk.Linear(self.num_classes, w_init=fc_init, with_bias=True) def __call__(self, x, is_training=True, return_metrics=False): """Return the output of the final layer without any [log-]softmax.""" # Stem outputs = {} out = self.initial_conv(x) bias1 = hk.get_parameter('bias1', (), x.dtype, init=jnp.zeros) out = self.activation(out + bias1) out = hk.max_pool(out, window_shape=(1, 3, 3, 1), strides=(1, 2, 2, 1), padding='SAME') if return_metrics: outputs.update(base.signal_metrics(out, 0)) # Blocks for i, block in enumerate(self.blocks): out, res_avg_var = block(out, is_training=is_training) if return_metrics: outputs.update(base.signal_metrics(out, i + 1)) outputs[f'res_avg_var_{i}'] = res_avg_var # Final-conv->activation, pool, dropout, classify pool = jnp.mean(out, [1, 2]) outputs['pool'] = pool # Optionally apply dropout if self.drop_rate > 0.0 and is_training: pool = hk.dropout(hk.next_rng_key(), self.drop_rate, pool) bias2 = hk.get_parameter('bias2', (), pool.dtype, init=jnp.zeros) outputs['logits'] = self.fc(pool + bias2) return outputs def count_flops(self, h, w): flops = [] flops += [base.count_conv_flops(3, self.initial_conv, h, w)] h, w = h / 2, w / 2 # Body FLOPs for block in self.blocks: flops += [block.count_flops(h, w)] if block.stride > 1: h, w = h / block.stride, w / block.stride # Head module FLOPs out_ch = self.blocks[-1].out_ch flops += [base.count_conv_flops(out_ch, self.final_conv, h, w)] # Count flops for classifier flops += [self.final_conv.output_channels * self.fc.output_size] return flops, sum(flops) class ResBlock(hk.Module): """Post-activation Fixup Block.""" def __init__(self, in_ch, out_ch, num_blocks, bottleneck_ratio=0.25, kernel_size=3, stride=1, which_conv=hk.Conv2D, activation=jax.nn.relu, stochdepth_rate=None, name=None): super().__init__(name=name) self.in_ch, self.out_ch = in_ch, out_ch self.kernel_size = kernel_size self.activation = activation # Bottleneck width self.width = int(self.out_ch * bottleneck_ratio) self.stride = stride # Conv 0 (typically expansion conv) conv0_init = hk.initializers.RandomNormal( stddev=((2 / self.width)**0.5) * (num_blocks**(-0.25))) self.conv0 = which_conv(self.width, kernel_shape=1, padding='SAME', name='conv0', w_init=conv0_init) # Grouped NxN conv conv1_init = hk.initializers.RandomNormal( stddev=((2 / (self.width * (kernel_size**2)))**0.5) * (num_blocks**(-0.25))) self.conv1 = which_conv(self.width, kernel_shape=kernel_size, stride=stride, padding='SAME', name='conv1', w_init=conv1_init) # Conv 2, typically projection conv self.conv2 = which_conv(self.out_ch, kernel_shape=1, padding='SAME', name='conv2', w_init=hk.initializers.Constant(0)) # Use shortcut conv on channel change or downsample. self.use_projection = stride > 1 or self.in_ch != self.out_ch if self.use_projection: shortcut_init = hk.initializers.RandomNormal( stddev=(2 / self.out_ch) ** 0.5) self.conv_shortcut = which_conv(self.out_ch, kernel_shape=1, stride=stride, padding='SAME', name='conv_shortcut', w_init=shortcut_init) # Are we using stochastic depth? self._has_stochdepth = (stochdepth_rate is not None and stochdepth_rate > 0. and stochdepth_rate < 1.0) if self._has_stochdepth: self.stoch_depth = base.StochDepth(stochdepth_rate) def __call__(self, x, is_training): bias1a = hk.get_parameter('bias1a', (), x.dtype, init=jnp.zeros) bias1b = hk.get_parameter('bias1b', (), x.dtype, init=jnp.zeros) bias2a = hk.get_parameter('bias2a', (), x.dtype, init=jnp.zeros) bias2b = hk.get_parameter('bias2b', (), x.dtype, init=jnp.zeros) bias3a = hk.get_parameter('bias3a', (), x.dtype, init=jnp.zeros) bias3b = hk.get_parameter('bias3b', (), x.dtype, init=jnp.zeros) scale = hk.get_parameter('scale', (), x.dtype, init=jnp.ones) out = x + bias1a shortcut = out if self.use_projection: # Downsample with conv1x1 shortcut = self.conv_shortcut(shortcut) out = self.conv0(out) out = self.activation(out + bias1b) out = self.conv1(out + bias2a) out = self.activation(out + bias2b) out = self.conv2(out + bias3a) out = out * scale + bias3b # Get average residual variance for reporting metrics. res_avg_var = jnp.mean(jnp.var(out, axis=[0, 1, 2])) # Apply stochdepth if applicable. if self._has_stochdepth: out = self.stoch_depth(out, is_training) # SkipInit Gain out = out + shortcut return self.activation(out), res_avg_var def count_flops(self, h, w): # Count conv FLOPs based on input HW expand_flops = base.count_conv_flops(self.in_ch, self.conv0, h, w) # If block is strided we decrease resolution here. dw_flops = base.count_conv_flops(self.width, self.conv1, h, w) if self.stride > 1: h, w = h / self.stride, w / self.stride if self.use_projection: sc_flops = base.count_conv_flops(self.in_ch, self.conv_shortcut, h, w) else: sc_flops = 0 contract_flops = base.count_conv_flops(self.width, self.conv2, h, w) return sum([expand_flops, dw_flops, contract_flops, sc_flops])
deepmind-research-master
nfnets/fixup_resnet.py
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== r"""ImageNet experiment with NF-RegNets.""" from ml_collections import config_dict from nfnets import experiment def get_config(): """Return config object for training.""" config = experiment.get_config() # Experiment config. train_batch_size = 1024 # Global batch size. images_per_epoch = 1281167 num_epochs = 360 steps_per_epoch = images_per_epoch / train_batch_size config.training_steps = ((images_per_epoch * num_epochs) // train_batch_size) config.random_seed = 0 config.experiment_kwargs = config_dict.ConfigDict( dict( config=dict( lr=0.4, num_epochs=num_epochs, label_smoothing=0.1, model='NF_RegNet', image_size=224, use_ema=True, ema_decay=0.99999, # Cinco nueves amigos ema_start=0, augment_name='mixup_cutmix', train_batch_size=train_batch_size, eval_batch_size=50, eval_subset='test', num_classes=1000, which_dataset='imagenet', which_loss='softmax_cross_entropy', # One of softmax or sigmoid bfloat16=False, lr_schedule=dict( name='WarmupCosineDecay', kwargs=dict(num_steps=config.training_steps, start_val=0, min_val=0.001, warmup_steps=5*steps_per_epoch), ), lr_scale_by_bs=False, optimizer=dict( name='SGD', kwargs={'momentum': 0.9, 'nesterov': True, 'weight_decay': 5e-5,}, ), model_kwargs=dict( variant='B0', width=0.75, expansion=2.25, se_ratio=0.5, alpha=0.2, stochdepth_rate=0.1, drop_rate=None, activation='silu', ), ))) # Set weight decay based on variant (scaled as 5e-5 + 1e-5 * level) variant = config.experiment_kwargs.config.model_kwargs.variant weight_decay = {'B0': 5e-5, 'B1': 6e-5, 'B2': 7e-5, 'B3': 8e-5, 'B4': 9e-5, 'B5': 1e-4}[variant] config.experiment_kwargs.config.optimizer.kwargs.weight_decay = weight_decay return config Experiment = experiment.Experiment
deepmind-research-master
nfnets/experiment_nf_regnets.py
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """ResNetV2 (Pre-activation) with SkipInit.""" # pylint: disable=invalid-name import haiku as hk import jax import jax.numpy as jnp from nfnets import base # Nonlinearities nonlinearities = { 'swish': jax.nn.silu, 'relu': jax.nn.relu, 'identity': lambda x: x} class SkipInit_ResNet(hk.Module): """Skip-Init based ResNet.""" variant_dict = {'ResNet50': {'depth': [3, 4, 6, 3]}, 'ResNet101': {'depth': [3, 4, 23, 3]}, 'ResNet152': {'depth': [3, 8, 36, 3]}, 'ResNet200': {'depth': [3, 24, 36, 3]}, 'ResNet288': {'depth': [24, 24, 24, 24]}, 'ResNet600': {'depth': [50, 50, 50, 50]}, } def __init__(self, num_classes, variant='ResNet50', width=4, stochdepth_rate=0.1, drop_rate=None, activation='relu', fc_init=jnp.zeros, name='SkipInit_ResNet'): super().__init__(name=name) self.num_classes = num_classes self.variant = variant self.width = width # Get variant info block_params = self.variant_dict[self.variant] self.width_pattern = [item * self.width for item in [64, 128, 256, 512]] self.depth_pattern = block_params['depth'] self.activation = nonlinearities[activation] if drop_rate is None: self.drop_rate = block_params['drop_rate'] else: self.drop_rate = drop_rate self.which_conv = hk.Conv2D # Stem ch = int(16 * self.width) self.initial_conv = self.which_conv(ch, kernel_shape=7, stride=2, padding='SAME', with_bias=False, name='initial_conv') # Body self.blocks = [] num_blocks = sum(self.depth_pattern) index = 0 # Overall block index block_args = (self.width_pattern, self.depth_pattern, [1, 2, 2, 2]) for block_width, stage_depth, stride in zip(*block_args): for block_index in range(stage_depth): # Block stochastic depth drop-rate block_stochdepth_rate = stochdepth_rate * index / num_blocks self.blocks += [NFResBlock(ch, block_width, stride=stride if block_index == 0 else 1, activation=self.activation, which_conv=self.which_conv, stochdepth_rate=block_stochdepth_rate, )] ch = block_width index += 1 # Head self.fc = hk.Linear(self.num_classes, w_init=fc_init, with_bias=True) def __call__(self, x, is_training=True, return_metrics=False): """Return the output of the final layer without any [log-]softmax.""" # Stem outputs = {} out = self.initial_conv(x) out = hk.max_pool(out, window_shape=(1, 3, 3, 1), strides=(1, 2, 2, 1), padding='SAME') if return_metrics: outputs.update(base.signal_metrics(out, 0)) # Blocks for i, block in enumerate(self.blocks): out, res_avg_var = block(out, is_training=is_training) if return_metrics: outputs.update(base.signal_metrics(out, i + 1)) outputs[f'res_avg_var_{i}'] = res_avg_var # Final-conv->activation, pool, dropout, classify pool = jnp.mean(self.activation(out), [1, 2]) outputs['pool'] = pool # Optionally apply dropout if self.drop_rate > 0.0 and is_training: pool = hk.dropout(hk.next_rng_key(), self.drop_rate, pool) outputs['logits'] = self.fc(pool) return outputs def count_flops(self, h, w): flops = [] flops += [base.count_conv_flops(3, self.initial_conv, h, w)] h, w = h / 2, w / 2 # Body FLOPs for block in self.blocks: flops += [block.count_flops(h, w)] if block.stride > 1: h, w = h / block.stride, w / block.stride # Head module FLOPs out_ch = self.blocks[-1].out_ch flops += [base.count_conv_flops(out_ch, self.final_conv, h, w)] # Count flops for classifier flops += [self.final_conv.output_channels * self.fc.output_size] return flops, sum(flops) class NFResBlock(hk.Module): """Normalizer-Free pre-activation ResNet Block.""" def __init__(self, in_ch, out_ch, bottleneck_ratio=0.25, kernel_size=3, stride=1, which_conv=hk.Conv2D, activation=jax.nn.relu, stochdepth_rate=None, name=None): super().__init__(name=name) self.in_ch, self.out_ch = in_ch, out_ch self.kernel_size = kernel_size self.activation = activation # Bottleneck width self.width = int(self.out_ch * bottleneck_ratio) self.stride = stride # Conv 0 (typically expansion conv) self.conv0 = which_conv(self.width, kernel_shape=1, padding='SAME', name='conv0') # Grouped NxN conv self.conv1 = which_conv(self.width, kernel_shape=kernel_size, stride=stride, padding='SAME', name='conv1') # Conv 2, typically projection conv self.conv2 = which_conv(self.out_ch, kernel_shape=1, padding='SAME', name='conv2') # Use shortcut conv on channel change or downsample. self.use_projection = stride > 1 or self.in_ch != self.out_ch if self.use_projection: self.conv_shortcut = which_conv(self.out_ch, kernel_shape=1, stride=stride, padding='SAME', name='conv_shortcut') # Are we using stochastic depth? self._has_stochdepth = (stochdepth_rate is not None and stochdepth_rate > 0. and stochdepth_rate < 1.0) if self._has_stochdepth: self.stoch_depth = base.StochDepth(stochdepth_rate) def __call__(self, x, is_training): out = self.activation(x) shortcut = x if self.use_projection: # Downsample with conv1x1 shortcut = self.conv_shortcut(out) out = self.conv0(out) out = self.conv1(self.activation(out)) out = self.conv2(self.activation(out)) # Get average residual standard deviation for reporting metrics. res_avg_var = jnp.mean(jnp.var(out, axis=[0, 1, 2])) # Apply stochdepth if applicable. if self._has_stochdepth: out = self.stoch_depth(out, is_training) # SkipInit Gain out = out * hk.get_parameter('skip_gain', (), out.dtype, init=jnp.zeros) return out + shortcut, res_avg_var def count_flops(self, h, w): # Count conv FLOPs based on input HW expand_flops = base.count_conv_flops(self.in_ch, self.conv0, h, w) # If block is strided we decrease resolution here. dw_flops = base.count_conv_flops(self.width, self.conv1, h, w) if self.stride > 1: h, w = h / self.stride, w / self.stride if self.use_projection: sc_flops = base.count_conv_flops(self.in_ch, self.conv_shortcut, h, w) else: sc_flops = 0 # SE flops happen on avg-pooled activations contract_flops = base.count_conv_flops(self.width, self.conv2, h, w) return sum([expand_flops, dw_flops, contract_flops, sc_flops])
deepmind-research-master
nfnets/skipinit_resnet.py
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Normalizer-Free RegNets.""" # pylint: disable=invalid-name import haiku as hk import jax import jax.numpy as jnp from nfnets import base class NF_RegNet(hk.Module): """Normalizer-Free RegNets.""" variant_dict = base.nf_regnet_params def __init__(self, num_classes, variant='B0', width=0.75, expansion=2.25, group_size=8, se_ratio=0.5, alpha=0.2, stochdepth_rate=0.1, drop_rate=None, activation='swish', fc_init=jnp.zeros, name='NF_RegNet'): super().__init__(name=name) self.num_classes = num_classes self.variant = variant self.width = width self.expansion = expansion self.group_size = group_size self.se_ratio = se_ratio # Get variant info block_params = self.variant_dict[self.variant] self.train_imsize = block_params['train_imsize'] self.test_imsize = block_params['test_imsize'] self.width_pattern = block_params['width'] self.depth_pattern = block_params['depth'] self.activation = base.nonlinearities[activation] if drop_rate is None: self.drop_rate = block_params['drop_rate'] else: self.drop_rate = drop_rate self.which_conv = base.WSConv2D # Stem ch = int(self.width_pattern[0] * self.width) self.initial_conv = self.which_conv(ch, kernel_shape=3, stride=2, padding='SAME', name='initial_conv') # Body self.blocks = [] expected_std = 1.0 num_blocks = sum(self.depth_pattern) index = 0 # Overall block index for block_width, stage_depth in zip(self.width_pattern, self.depth_pattern): for block_index in range(stage_depth): # Scalar pre-multiplier so each block sees an N(0,1) input at init beta = 1./ expected_std # Block stochastic depth drop-rate block_stochdepth_rate = stochdepth_rate * index / num_blocks # Use a bottleneck expansion ratio of 1 for first block following EffNet expand_ratio = 1 if index == 0 else expansion out_ch = (int(block_width * self.width)) self.blocks += [NFBlock(ch, out_ch, expansion=expand_ratio, se_ratio=se_ratio, group_size=self.group_size, stride=2 if block_index == 0 else 1, beta=beta, alpha=alpha, activation=self.activation, which_conv=self.which_conv, stochdepth_rate=block_stochdepth_rate, )] ch = out_ch index += 1 # Reset expected std but still give it 1 block of growth if block_index == 0: expected_std = 1.0 expected_std = (expected_std **2 + alpha**2)**0.5 # Head with final conv mimicking EffNets self.final_conv = self.which_conv(int(1280 * ch // 440), kernel_shape=1, padding='SAME', name='final_conv') self.fc = hk.Linear(self.num_classes, w_init=fc_init, with_bias=True) def __call__(self, x, is_training=True, return_metrics=False): """Return the output of the final layer without any [log-]softmax.""" # Stem outputs = {} out = self.initial_conv(x) if return_metrics: outputs.update(base.signal_metrics(out, 0)) # Blocks for i, block in enumerate(self.blocks): out, res_avg_var = block(out, is_training=is_training) if return_metrics: outputs.update(base.signal_metrics(out, i + 1)) outputs[f'res_avg_var_{i}'] = res_avg_var # Final-conv->activation, pool, dropout, classify out = self.activation(self.final_conv(out)) pool = jnp.mean(out, [1, 2]) outputs['pool'] = pool # Optionally apply dropout if self.drop_rate > 0.0 and is_training: pool = hk.dropout(hk.next_rng_key(), self.drop_rate, pool) outputs['logits'] = self.fc(pool) return outputs def count_flops(self, h, w): flops = [] flops += [base.count_conv_flops(3, self.initial_conv, h, w)] h, w = h / 2, w / 2 # Body FLOPs for block in self.blocks: flops += [block.count_flops(h, w)] if block.stride > 1: h, w = h / block.stride, w / block.stride # Head module FLOPs out_ch = self.blocks[-1].out_ch flops += [base.count_conv_flops(out_ch, self.final_conv, h, w)] # Count flops for classifier flops += [self.final_conv.output_channels * self.fc.output_size] return flops, sum(flops) class NFBlock(hk.Module): """Normalizer-Free RegNet Block.""" def __init__(self, in_ch, out_ch, expansion=2.25, se_ratio=0.5, kernel_size=3, group_size=8, stride=1, beta=1.0, alpha=0.2, which_conv=base.WSConv2D, activation=jax.nn.relu, stochdepth_rate=None, name=None): super().__init__(name=name) self.in_ch, self.out_ch = in_ch, out_ch self.expansion = expansion self.se_ratio = se_ratio self.kernel_size = kernel_size self.activation = activation self.beta, self.alpha = beta, alpha # Round expanded with based on group count width = int(self.in_ch * expansion) self.groups = width // group_size self.width = group_size * self.groups self.stride = stride # Conv 0 (typically expansion conv) self.conv0 = which_conv(self.width, kernel_shape=1, padding='SAME', name='conv0') # Grouped NxN conv self.conv1 = which_conv(self.width, kernel_shape=kernel_size, stride=stride, padding='SAME', feature_group_count=self.groups, name='conv1') # Conv 2, typically projection conv self.conv2 = which_conv(self.out_ch, kernel_shape=1, padding='SAME', name='conv2') # Use shortcut conv on channel change or downsample. self.use_projection = stride > 1 or self.in_ch != self.out_ch if self.use_projection: self.conv_shortcut = which_conv(self.out_ch, kernel_shape=1, padding='SAME', name='conv_shortcut') # Squeeze + Excite Module self.se = base.SqueezeExcite(self.width, self.width, self.se_ratio) # Are we using stochastic depth? self._has_stochdepth = (stochdepth_rate is not None and stochdepth_rate > 0. and stochdepth_rate < 1.0) if self._has_stochdepth: self.stoch_depth = base.StochDepth(stochdepth_rate) def __call__(self, x, is_training): out = self.activation(x) * self.beta if self.stride > 1: # Average-pool downsample. shortcut = hk.avg_pool(out, window_shape=(1, 2, 2, 1), strides=(1, 2, 2, 1), padding='SAME') if self.use_projection: shortcut = self.conv_shortcut(shortcut) elif self.use_projection: shortcut = self.conv_shortcut(out) else: shortcut = x out = self.conv0(out) out = self.conv1(self.activation(out)) out = 2 * self.se(out) * out # Multiply by 2 for rescaling out = self.conv2(self.activation(out)) # Get average residual standard deviation for reporting metrics. res_avg_var = jnp.mean(jnp.var(out, axis=[0, 1, 2])) # Apply stochdepth if applicable. if self._has_stochdepth: out = self.stoch_depth(out, is_training) # SkipInit Gain out = out * hk.get_parameter('skip_gain', (), out.dtype, init=jnp.zeros) return out * self.alpha + shortcut, res_avg_var def count_flops(self, h, w): # Count conv FLOPs based on input HW expand_flops = base.count_conv_flops(self.in_ch, self.conv0, h, w) # If block is strided we decrease resolution here. dw_flops = base.count_conv_flops(self.width, self.conv1, h, w) if self.stride > 1: h, w = h / self.stride, w / self.stride if self.use_projection: sc_flops = base.count_conv_flops(self.in_ch, self.conv_shortcut, h, w) else: sc_flops = 0 # SE flops happen on avg-pooled activations se_flops = self.se.fc0.output_size * self.width se_flops += self.se.fc0.output_size * self.se.fc1.output_size contract_flops = base.count_conv_flops(self.width, self.conv2, h, w) return sum([expand_flops, dw_flops, se_flops, contract_flops, sc_flops])
deepmind-research-master
nfnets/nf_regnet.py
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Quick script to test that experiment can import and run.""" import jax import jax.numpy as jnp from nfnets import experiment from nfnets import experiment_nfnets def test_experiment(): """Tests the main experiment.""" config = experiment.get_config() exp_config = config.experiment_kwargs.config exp_config.train_batch_size = 2 exp_config.eval_batch_size = 2 exp_config.lr = 0.1 exp_config.fake_data = True exp_config.model_kwargs.width = 2 print(exp_config.model_kwargs) xp = experiment.Experiment('train', exp_config, jax.random.PRNGKey(0)) bcast = jax.pmap(lambda x: x) global_step = bcast(jnp.zeros(jax.local_device_count())) rng = bcast(jnp.stack([jax.random.PRNGKey(0)] * jax.local_device_count())) print('Taking a single experiment step for test purposes!') result = xp.step(global_step, rng) print(f'Step successfully taken, resulting metrics are {result}') def test_nfnet_experiment(): """Tests the NFNet experiment.""" config = experiment_nfnets.get_config() exp_config = config.experiment_kwargs.config exp_config.train_batch_size = 2 exp_config.eval_batch_size = 2 exp_config.lr = 0.1 exp_config.fake_data = True exp_config.model_kwargs.width = 2 print(exp_config.model_kwargs) xp = experiment_nfnets.Experiment('train', exp_config, jax.random.PRNGKey(0)) bcast = jax.pmap(lambda x: x) global_step = bcast(jnp.zeros(jax.local_device_count())) rng = bcast(jnp.stack([jax.random.PRNGKey(0)] * jax.local_device_count())) print('Taking a single NFNet experiment step for test purposes!') result = xp.step(global_step, rng) print(f'NFNet Step successfully taken, resulting metrics are {result}') test_experiment() test_nfnet_experiment()
deepmind-research-master
nfnets/test.py
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Adaptive gradient clipping transform for Optax.""" import jax import jax.numpy as jnp import optax def compute_norm(x, axis, keepdims): """Axis-wise euclidean norm.""" return jnp.sum(x ** 2, axis=axis, keepdims=keepdims) ** 0.5 def unitwise_norm(x): """Compute norms of each output unit separately, also for linear layers.""" if len(jnp.squeeze(x).shape) <= 1: # Scalars and vectors axis = None keepdims = False elif len(x.shape) in [2, 3]: # Linear layers of shape IO or multihead linear axis = 0 keepdims = True elif len(x.shape) == 4: # Conv kernels of shape HWIO axis = [0, 1, 2,] keepdims = True else: raise ValueError(f'Got a parameter with shape not in [1, 2, 4]! {x}') return compute_norm(x, axis, keepdims) def my_clip(g_norm, max_norm, grad): """Applies my gradient clipping unit-wise.""" trigger = g_norm < max_norm # This little max(., 1e-6) is distinct from the normal eps and just prevents # division by zero. It technically should be impossible to engage. clipped_grad = grad * (max_norm / jnp.maximum(g_norm, 1e-6)) return jnp.where(trigger, grad, clipped_grad) def adaptive_grad_clip(clip, eps=1e-3) -> optax.GradientTransformation: """Clip updates to be at most clipping * parameter_norm. References: [Brock, Smith, De, Simonyan 2021] High-Performance Large-Scale Image Recognition Without Normalization. Args: clip: Maximum allowed ratio of update norm to parameter norm. eps: epsilon term to prevent clipping of zero-initialized params. Returns: An (init_fn, update_fn) tuple. """ def init_fn(_): return optax.ClipByGlobalNormState() def update_fn(updates, state, params): g_norm = jax.tree_map(unitwise_norm, updates) p_norm = jax.tree_map(unitwise_norm, params) # Maximum allowable norm max_norm = jax.tree_map(lambda x: clip * jnp.maximum(x, eps), p_norm) # If grad norm > clipping * param_norm, rescale updates = jax.tree_multimap(my_clip, g_norm, max_norm, updates) return updates, state return optax.GradientTransformation(init_fn, update_fn)
deepmind-research-master
nfnets/agc_optax.py
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """ImageNet dataset with typical pre-processing and advanced augs.""" # pylint: disable=logging-format-interpolation import enum import itertools as it import logging import re from typing import Generator, Mapping, Optional, Sequence, Text, Tuple import jax import jax.numpy as jnp import numpy as np import tensorflow.compat.v2 as tf import tensorflow_datasets as tfds import tensorflow_probability as tfp from nfnets import autoaugment Batch = Mapping[Text, np.ndarray] MEAN_RGB = (0.485 * 255, 0.456 * 255, 0.406 * 255) STDDEV_RGB = (0.229 * 255, 0.224 * 255, 0.225 * 255) AUTOTUNE = tf.data.experimental.AUTOTUNE class Split(enum.Enum): """Imagenet dataset split.""" TRAIN = 1 TRAIN_AND_VALID = 2 VALID = 3 TEST = 4 @classmethod def from_string(cls, name: Text) -> 'Split': return {'TRAIN': Split.TRAIN, 'TRAIN_AND_VALID': Split.TRAIN_AND_VALID, 'VALID': Split.VALID, 'VALIDATION': Split.VALID, 'TEST': Split.TEST}[name.upper()] @property def num_examples(self): return {Split.TRAIN_AND_VALID: 1281167, Split.TRAIN: 1271167, Split.VALID: 10000, Split.TEST: 50000}[self] def load( split: Split, *, is_training: bool, batch_dims: Sequence[int], name: str = 'imagenet', dtype: jnp.dtype = jnp.float32, transpose: bool = False, fake_data: bool = False, image_size: Tuple[int, int] = (224, 224), augment_name: Optional[str] = None, eval_preproc: str = 'crop_resize', augment_before_mix: bool = True, ) -> Generator[Batch, None, None]: """Loads the given split of the dataset. Args: split: Dataset split to use. is_training: If true, use training preproc and augmentation. batch_dims: List indicating how to batch the dataset (typically expected to be of shape (num_devices, bs_per_device) name: Which dataset to use, (must be 'imagenet') dtype: One of float32 or bfloat16 (bf16 may not be supported fully) transpose: If true, employs double transpose trick. fake_data: Return batches of fake data for debugging purposes. image_size: Final image size returned by dataset pipeline. Note that the exact procedure to arrive at this size will depend on the chosen preproc. augment_name: Optional additional aug strategy (applied atop the default of distorted bboxes and random L/R flips). Specified with a string such as 'cutmix_mixup_0.4_randaugment_415'. See README for deets. eval_preproc: Eval preproc method, either 'crop_resize' (crop on the long edge then resize) or `resize_crop_{pct}`, which will resize the image to `image_size / pct` on each side then take a center crop. augment_before_mix: Apply augs like RA/AA before or after cutmix/mixup. Yields: A TFDS numpy iterator. """ start, end = _shard(split, jax.host_id(), jax.host_count()) if fake_data: print('Using fake data!') images = np.zeros(tuple(batch_dims) + image_size + (3,), dtype=dtype) labels = np.zeros(tuple(batch_dims), dtype=np.int32) if transpose: axes = tuple(range(images.ndim)) axes = axes[:-4] + axes[-3:] + (axes[-4],) # NHWC -> HWCN images = np.transpose(images, axes) yield from it.repeat({'images': images, 'labels': labels}, end - start) return total_batch_size = np.prod(batch_dims) if name.lower() == 'imagenet': tfds_split = tfds.core.ReadInstruction(_to_tfds_split(split), from_=start, to=end, unit='abs') ds = tfds.load('imagenet2012:5.*.*', split=tfds_split, decoders={'image': tfds.decode.SkipDecoding()}) else: raise ValueError('Only imagenet is presently supported for this dataset.') options = ds.options() options.experimental_threading.private_threadpool_size = 48 options.experimental_threading.max_intra_op_parallelism = 1 options.experimental_optimization.map_parallelization = True options.experimental_optimization.parallel_batch = True options.experimental_optimization.hoist_random_uniform = True if is_training: options.experimental_deterministic = False if is_training: if jax.host_count() > 1: # Only cache if we are reading a subset of the dataset. ds = ds.cache() ds = ds.repeat() ds = ds.shuffle(buffer_size=10 * total_batch_size, seed=None) else: if split.num_examples % total_batch_size != 0: raise ValueError(f'Test/valid must be divisible by {total_batch_size}') def augment_normalize(batch): """Optionally augment, then normalize an image.""" batch = dict(**batch) image = _augment_image(batch['images'], is_training, augment_name) batch['images'] = _normalize_image(image) return batch def preprocess(example): image = _preprocess_image(example['image'], is_training, image_size, eval_preproc) label = tf.cast(example['label'], tf.int32) out = {'images': image, 'labels': label} if augment_name is not None and 'cutmix' in augment_name: out['mask'] = cutmix_padding(*image_size) out['cutmix_ratio'] = tf.reduce_mean(out['mask']) if augment_name is not None and 'mixup' in augment_name: mixup_alpha = 0.2 # default to alpha=0.2 # If float provided, get it if 'mixup_' in augment_name: alpha = augment_name.split('mixup_')[1].split('_') if any(alpha) and re.match(r'^-?\d+(?:\.\d+)?$', alpha[0]) is not None: mixup_alpha = float(alpha[0]) beta = tfp.distributions.Beta(mixup_alpha, mixup_alpha) out['mixup_ratio'] = beta.sample() # Apply augs before mixing? if augment_before_mix or augment_name is None: out = augment_normalize(out) return out ds = ds.map(preprocess, num_parallel_calls=AUTOTUNE) ds = ds.prefetch(AUTOTUNE) def transpose_fn(batch): # Applies the double-transpose trick for TPU. batch = dict(**batch) batch['images'] = tf.transpose(batch['images'], (1, 2, 3, 0)) return batch def cast_fn(batch): batch = dict(**batch) batch['images'] = tf.cast(batch['images'], _to_tf_dtype(dtype)) return batch for i, batch_size in enumerate(reversed(batch_dims)): if i == 0: # Deal with vectorized MixUp + CutMix ops if augment_name is not None: if 'mixup' in augment_name or 'cutmix' in augment_name: ds = ds.batch(batch_size * 2) else: ds = ds.map(augment_normalize, num_parallel_calls=AUTOTUNE) ds = ds.batch(batch_size) # Apply mixup, cutmix, or mixup + cutmix if 'mixup' in augment_name and 'cutmix' not in augment_name: logging.info('Applying MixUp!') ds = ds.map(my_mixup, num_parallel_calls=AUTOTUNE) elif 'cutmix' in augment_name and 'mixup' not in augment_name: logging.info('Applying CutMix!') ds = ds.map(my_cutmix, num_parallel_calls=AUTOTUNE) elif 'mixup' in augment_name and 'cutmix' in augment_name: logging.info('Applying MixUp and CutMix!') ds = ds.map(my_mixup_cutmix, num_parallel_calls=AUTOTUNE) # If applying augs after mixing, unbatch, map, and rebatch if (not augment_before_mix and ('mixup' in augment_name or 'cutmix' in augment_name)): ds = ds.unbatch().map(augment_normalize, num_parallel_calls=AUTOTUNE) ds = ds.batch(batch_size) else: ds = ds.batch(batch_size) # Transpose and cast as needbe if transpose: ds = ds.map(transpose_fn) # NHWC -> HWCN # NOTE: You may be tempted to move the casting earlier on in the pipeline, # but for bf16 some operations will end up silently placed on the TPU and # this causes stalls while TF and JAX battle for the accelerator. ds = ds.map(cast_fn) else: ds = ds.batch(batch_size) ds = ds.prefetch(AUTOTUNE) ds = tfds.as_numpy(ds) if dtype == jnp.bfloat16: # JAX and TF disagree on the NumPy bfloat16 type so we need to reinterpret # tf_bfloat16->jnp.bfloat16. for batch in ds: batch['images'] = batch['images'].view(jnp.bfloat16) yield batch else: yield from ds def cutmix_padding(h, w): """Returns image mask for CutMix. Taken from (https://github.com/google/edward2/blob/master/experimental /marginalization_mixup/data_utils.py#L367) Args: h: image height. w: image width. """ r_x = tf.random.uniform([], 0, w, tf.int32) r_y = tf.random.uniform([], 0, h, tf.int32) # Beta dist in paper, but they used Beta(1,1) which is just uniform. image1_proportion = tf.random.uniform([]) patch_length_ratio = tf.math.sqrt(1 - image1_proportion) r_w = tf.cast(patch_length_ratio * tf.cast(w, tf.float32), tf.int32) r_h = tf.cast(patch_length_ratio * tf.cast(h, tf.float32), tf.int32) bbx1 = tf.clip_by_value(tf.cast(r_x - r_w // 2, tf.int32), 0, w) bby1 = tf.clip_by_value(tf.cast(r_y - r_h // 2, tf.int32), 0, h) bbx2 = tf.clip_by_value(tf.cast(r_x + r_w // 2, tf.int32), 0, w) bby2 = tf.clip_by_value(tf.cast(r_y + r_h // 2, tf.int32), 0, h) # Create the binary mask. pad_left = bbx1 pad_top = bby1 pad_right = tf.maximum(w - bbx2, 0) pad_bottom = tf.maximum(h - bby2, 0) r_h = bby2 - bby1 r_w = bbx2 - bbx1 mask = tf.pad( tf.ones((r_h, r_w)), paddings=[[pad_top, pad_bottom], [pad_left, pad_right]], mode='CONSTANT', constant_values=0) mask.set_shape((h, w)) return mask[..., None] # Add channel dim. def my_cutmix(batch): """Cutmix.""" batch = dict(**batch) bs = tf.shape(batch['images'])[0] // 2 mask = batch['mask'][:bs] images = (mask * batch['images'][:bs] + (1.0 - mask) * batch['images'][bs:]) mix_labels = batch['labels'][bs:] labels = batch['labels'][:bs] ratio = batch['cutmix_ratio'][:bs] return {'images': images, 'labels': labels, 'mix_labels': mix_labels, 'ratio': ratio} def my_mixup(batch): """Mixup.""" batch = dict(**batch) bs = tf.shape(batch['images'])[0] // 2 ratio = batch['mixup_ratio'][:bs, None, None, None] images = (ratio * batch['images'][:bs] + (1.0 - ratio) * batch['images'][bs:]) mix_labels = batch['labels'][bs:] labels = batch['labels'][:bs] ratio = ratio[..., 0, 0, 0] # Unsqueeze return {'images': images, 'labels': labels, 'mix_labels': mix_labels, 'ratio': ratio} def mixup_or_cutmix(batch): """Randomly applies one of cutmix or mixup to a batch.""" logging.info('Randomly applying cutmix or mixup with 50% chance!') return tf.cond( tf.cast(tf.random.uniform([], maxval=2, dtype=tf.int32), tf.bool), lambda: my_mixup(batch), lambda: my_cutmix(batch)) def my_mixup_cutmix(batch): """Apply mixup to half the batch, and cutmix to the other.""" batch = dict(**batch) bs = tf.shape(batch['images'])[0] // 4 mixup_ratio = batch['mixup_ratio'][:bs, None, None, None] mixup_images = (mixup_ratio * batch['images'][:bs] + (1.0 - mixup_ratio) * batch['images'][bs:2*bs]) mixup_labels = batch['labels'][:bs] mixup_mix_labels = batch['labels'][bs:2*bs] cutmix_mask = batch['mask'][2*bs:3*bs] cutmix_images = (cutmix_mask * batch['images'][2*bs:3*bs] + (1.0 - cutmix_mask) * batch['images'][-bs:]) cutmix_labels = batch['labels'][2*bs:3*bs] cutmix_mix_labels = batch['labels'][-bs:] cutmix_ratio = batch['cutmix_ratio'][2*bs : 3*bs] return {'images': tf.concat([mixup_images, cutmix_images], axis=0), 'labels': tf.concat([mixup_labels, cutmix_labels], axis=0), 'mix_labels': tf.concat([mixup_mix_labels, cutmix_mix_labels], 0), 'ratio': tf.concat([mixup_ratio[..., 0, 0, 0], cutmix_ratio], axis=0)} def _to_tf_dtype(jax_dtype: jnp.dtype) -> tf.DType: if jax_dtype == jnp.bfloat16: return tf.bfloat16 else: return tf.dtypes.as_dtype(jax_dtype) def _to_tfds_split(split: Split) -> tfds.Split: """Returns the TFDS split appropriately sharded.""" if split in (Split.TRAIN, Split.TRAIN_AND_VALID, Split.VALID): return tfds.Split.TRAIN else: assert split == Split.TEST return tfds.Split.VALIDATION def _shard(split: Split, shard_index: int, num_shards: int) -> Tuple[int, int]: """Returns [start, end) for the given shard index.""" assert shard_index < num_shards arange = np.arange(split.num_examples) shard_range = np.array_split(arange, num_shards)[shard_index] start, end = shard_range[0], (shard_range[-1] + 1) if split == Split.TRAIN: # Note that our TRAIN=TFDS_TRAIN[10000:] and VALID=TFDS_TRAIN[:10000]. offset = Split.VALID.num_examples start += offset end += offset return start, end def _preprocess_image( image_bytes: tf.Tensor, is_training: bool, image_size: Sequence[int], eval_preproc: str = 'crop_resize' ) -> tf.Tensor: """Returns processed and resized images.""" # NOTE: Bicubic resize (1) casts uint8 to float32 and (2) resizes without # clamping overshoots. This means values returned will be outside the range # [0.0, 255.0] (e.g. we have observed outputs in the range [-51.1, 336.6]). if is_training: image = _decode_and_random_crop(image_bytes, image_size) image = tf.image.random_flip_left_right(image) assert image.dtype == tf.uint8 image = tf.image.resize(image, image_size, tf.image.ResizeMethod.BICUBIC) else: if eval_preproc == 'crop_resize': image = _decode_and_center_crop(image_bytes, image_size=image_size) assert image.dtype == tf.uint8 image = tf.image.resize(image, image_size, tf.image.ResizeMethod.BICUBIC) elif 'resize_crop' in eval_preproc: # Pass in crop percent crop_pct = float(eval_preproc.split('_')[-1]) image = _decode_and_resize_then_crop(image_bytes, image_size=image_size, crop_pct=crop_pct) else: raise ValueError(f'Unknown Eval Preproc {eval_preproc} provided!') return image def _augment_image( image: tf.Tensor, is_training: bool, augment_name: Optional[str] = None, ) -> tf.Tensor: """Applies AA/RA to an image.""" if is_training and augment_name: if 'autoaugment' in augment_name or 'randaugment' in augment_name: input_image_type = image.dtype image = tf.clip_by_value(image, 0.0, 255.0) # Autoaugment requires a uint8 image; we cast here and then cast back image = tf.cast(image, dtype=tf.uint8) if 'autoaugment' in augment_name: logging.info(f'Applying AutoAugment policy {augment_name}') image = autoaugment.distort_image_with_autoaugment(image, 'v0') elif 'randaugment' in augment_name: magnitude = int(augment_name.split('_')[-1]) # pytype: disable=attribute-error # Allow passing in num_layers as a magnitude > 100 if magnitude > 100: num_layers = magnitude // 100 magnitude = magnitude - int(num_layers * 100) else: num_layers = 2 logging.info(f'Applying RA {num_layers} x {magnitude}') image = autoaugment.distort_image_with_randaugment( image, num_layers=num_layers, magnitude=magnitude) image = tf.cast(image, dtype=input_image_type) return image def _normalize_image(image: tf.Tensor) -> tf.Tensor: """Normalize the image to zero mean and unit variance.""" image -= tf.constant(MEAN_RGB, shape=[1, 1, 3], dtype=image.dtype) image /= tf.constant(STDDEV_RGB, shape=[1, 1, 3], dtype=image.dtype) return image def _distorted_bounding_box_crop( image_bytes: tf.Tensor, *, jpeg_shape: tf.Tensor, bbox: tf.Tensor, min_object_covered: float, aspect_ratio_range: Tuple[float, float], area_range: Tuple[float, float], max_attempts: int, ) -> tf.Tensor: """Generates cropped_image using one of the bboxes randomly distorted.""" bbox_begin, bbox_size, _ = tf.image.sample_distorted_bounding_box( jpeg_shape, bounding_boxes=bbox, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=max_attempts, use_image_if_no_bounding_boxes=True) # Crop the image to the specified bounding box. offset_y, offset_x, _ = tf.unstack(bbox_begin) target_height, target_width, _ = tf.unstack(bbox_size) crop_window = [offset_y, offset_x, target_height, target_width] image = crop(image_bytes, crop_window) return image def _decode_and_random_crop(image_bytes: tf.Tensor, image_size: Sequence[int] = (224, 224), jpeg_shape: Optional[tf.Tensor] = None ) -> tf.Tensor: """Make a random crop of chosen size.""" if jpeg_shape is None: jpeg_shape = get_shape(image_bytes) bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4]) image = _distorted_bounding_box_crop( image_bytes, jpeg_shape=jpeg_shape, bbox=bbox, min_object_covered=0.1, aspect_ratio_range=(3 / 4, 4 / 3), area_range=(0.08, 1.0), max_attempts=10) if tf.reduce_all(tf.equal(jpeg_shape, tf.shape(image))): # If the random crop failed fall back to center crop. image = _decode_and_center_crop(image_bytes, jpeg_shape, image_size) return image def _decode_and_center_crop( image_bytes: tf.Tensor, jpeg_shape: Optional[tf.Tensor] = None, image_size: Sequence[int] = (224, 224), ) -> tf.Tensor: """Crops to center of image with padding then scales.""" if jpeg_shape is None: jpeg_shape = get_shape(image_bytes) image_height = jpeg_shape[0] image_width = jpeg_shape[1] # Pad the image with at least 32px on the short edge and take a # crop that maintains aspect ratio. scale = tf.minimum(tf.cast(image_height, tf.float32) / (image_size[0] + 32), tf.cast(image_width, tf.float32) / (image_size[1] + 32)) padded_center_crop_height = tf.cast(scale * image_size[0], tf.int32) padded_center_crop_width = tf.cast(scale * image_size[1], tf.int32) offset_height = ((image_height - padded_center_crop_height) + 1) // 2 offset_width = ((image_width - padded_center_crop_width) + 1) // 2 crop_window = [offset_height, offset_width, padded_center_crop_height, padded_center_crop_width] image = crop(image_bytes, crop_window) return image def get_shape(image_bytes): """Gets the image shape for jpeg bytes or a uint8 decoded image.""" if image_bytes.dtype == tf.dtypes.string: image_shape = tf.image.extract_jpeg_shape(image_bytes) else: image_shape = tf.shape(image_bytes) return image_shape def crop(image_bytes, crop_window): """Helper function to crop a jpeg or a decoded image.""" if image_bytes.dtype == tf.dtypes.string: image = tf.image.decode_and_crop_jpeg(image_bytes, tf.stack(crop_window), channels=3) else: image = tf.image.crop_to_bounding_box(image_bytes, *crop_window) return image def _decode_and_resize_then_crop( image_bytes: tf.Tensor, image_size: Sequence[int] = (224, 224), crop_pct: float = 1.0, ) -> tf.Tensor: """Rescales an image to image_size / crop_pct, then center crops.""" image = tf.image.decode_jpeg(image_bytes, channels=3) # Scale image to "scaled size" before taking a center crop if crop_pct > 1.0: # If crop_pct is >1, treat it as num pad pixels (like VGG) scale_size = tuple([int(x + crop_pct) for x in image_size]) else: scale_size = tuple([int(float(x) / crop_pct) for x in image_size]) image = tf.image.resize(image, scale_size, tf.image.ResizeMethod.BICUBIC) crop_height = tf.cast(image_size[0], tf.int32) crop_width = tf.cast(image_size[1], tf.int32) offset_height = ((scale_size[0] - crop_height) + 1) // 2 offset_width = ((scale_size[1] - crop_width) + 1) // 2 crop_window = [offset_height, offset_width, crop_height, crop_width] image = crop(image, crop_window) return image
deepmind-research-master
nfnets/dataset.py
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== r"""Basic Jaxline ImageNet experiment.""" import importlib import sys from absl import flags from absl import logging import haiku as hk import jax import jax.numpy as jnp from jaxline import base_config from jaxline import experiment from jaxline import platform from jaxline import utils as jl_utils from ml_collections import config_dict import numpy as np from nfnets import dataset from nfnets import optim from nfnets import utils # pylint: disable=logging-format-interpolation FLAGS = flags.FLAGS # We define the experiment launch config in the same file as the experiment to # keep things self-contained in a single file, but one might consider moving the # config and/or sweep functions to a separate file, if necessary. def get_config(): """Return config object for training.""" config = base_config.get_base_config() # Experiment config. train_batch_size = 1024 # Global batch size. images_per_epoch = 1281167 num_epochs = 90 steps_per_epoch = images_per_epoch / train_batch_size config.training_steps = ((images_per_epoch * num_epochs) // train_batch_size) config.random_seed = 0 config.experiment_kwargs = config_dict.ConfigDict( dict( config=dict( lr=0.1, num_epochs=num_epochs, label_smoothing=0.1, model='ResNet', image_size=224, use_ema=False, ema_decay=0.9999, # Quatros nuevos amigos ema_start=0, which_ema='tf1_ema', augment_name=None, # 'mixup_cutmix', augment_before_mix=True, eval_preproc='crop_resize', train_batch_size=train_batch_size, eval_batch_size=50, eval_subset='test', num_classes=1000, which_dataset='imagenet', fake_data=False, which_loss='softmax_cross_entropy', # For now, must be softmax transpose=True, # Use the double-transpose trick? bfloat16=False, lr_schedule=dict( name='WarmupCosineDecay', kwargs=dict(num_steps=config.training_steps, start_val=0, min_val=0, warmup_steps=5*steps_per_epoch), ), lr_scale_by_bs=True, optimizer=dict( name='SGD', kwargs={'momentum': 0.9, 'nesterov': True, 'weight_decay': 1e-4,}, ), model_kwargs=dict( width=4, which_norm='BatchNorm', norm_kwargs=dict(create_scale=True, create_offset=True, decay_rate=0.9, ), # cross_replica_axis='i'), variant='ResNet50', activation='relu', drop_rate=0.0, ), ),)) # Training loop config: log and checkpoint every minute config.log_train_data_interval = 60 config.log_tensors_interval = 60 config.save_checkpoint_interval = 60 config.eval_specific_checkpoint_dir = '' return config class Experiment(experiment.AbstractExperiment): """Imagenet experiment.""" CHECKPOINT_ATTRS = { '_params': 'params', '_state': 'state', '_ema_params': 'ema_params', '_ema_state': 'ema_state', '_opt_state': 'opt_state', } def __init__(self, mode, config, init_rng): super().__init__(mode=mode) self.mode = mode self.config = config self.init_rng = init_rng # Checkpointed experiment state. self._params = None self._state = None self._ema_params = None self._ema_state = None self._opt_state = None # Input pipelines. self._train_input = None self._eval_input = None # Get model, loaded in from the zoo self.model_module = importlib.import_module( ('nfnets.'+ self.config.model.lower())) self.net = hk.transform_with_state(self._forward_fn) # Assign image sizes if self.config.get('override_imsize', False): self.train_imsize = self.config.image_size self.test_imsize = self.config.get('eval_image_size', self.train_imsize) else: variant_dict = getattr(self.model_module, self.config.model).variant_dict variant_dict = variant_dict[self.config.model_kwargs.variant] self.train_imsize = variant_dict.get('train_imsize', self.config.image_size) # Test imsize defaults to model-specific value, then to config imsize test_imsize = self.config.get('eval_image_size', self.config.image_size) self.test_imsize = variant_dict.get('test_imsize', test_imsize) donate_argnums = (0, 1, 2, 6, 7) if self.config.use_ema else (0, 1, 2) self.train_fn = jax.pmap(self._train_fn, axis_name='i', donate_argnums=donate_argnums) self.eval_fn = jax.pmap(self._eval_fn, axis_name='i') def _initialize_train(self): self._train_input = self._build_train_input() # Initialize net and EMA copy of net if no params available. if self._params is None: inputs = next(self._train_input) init_net = jax.pmap(lambda *a: self.net.init(*a, is_training=True), axis_name='i') init_rng = jl_utils.bcast_local_devices(self.init_rng) self._params, self._state = init_net(init_rng, inputs) if self.config.use_ema: self._ema_params, self._ema_state = init_net(init_rng, inputs) num_params = hk.data_structures.tree_size(self._params) logging.info(f'Net parameters: {num_params / jax.local_device_count()}') self._make_opt() def _make_opt(self): # Separate conv params and gains/biases def pred(mod, name, val): # pylint:disable=unused-argument return (name in ['scale', 'offset', 'b'] or 'gain' in name or 'bias' in name) gains_biases, weights = hk.data_structures.partition(pred, self._params) # Lr schedule with batch-based LR scaling if self.config.lr_scale_by_bs: max_lr = (self.config.lr * self.config.train_batch_size) / 256 else: max_lr = self.config.lr lr_sched_fn = getattr(optim, self.config.lr_schedule.name) lr_schedule = lr_sched_fn(max_val=max_lr, **self.config.lr_schedule.kwargs) # Optimizer; no need to broadcast! opt_kwargs = {key: val for key, val in self.config.optimizer.kwargs.items()} opt_kwargs['lr'] = lr_schedule opt_module = getattr(optim, self.config.optimizer.name) self.opt = opt_module([{'params': gains_biases, 'weight_decay': None}, {'params': weights}], **opt_kwargs) if self._opt_state is None: self._opt_state = self.opt.states() else: self.opt.plugin(self._opt_state) def _forward_fn(self, inputs, is_training): net_kwargs = {'num_classes': self.config.num_classes, **self.config.model_kwargs} net = getattr(self.model_module, self.config.model)(**net_kwargs) if self.config.get('transpose', False): images = jnp.transpose(inputs['images'], (3, 0, 1, 2)) # HWCN -> NHWC else: images = inputs['images'] if self.config.bfloat16 and self.mode == 'train': images = utils.to_bf16(images) return net(images, is_training=is_training)['logits'] def _one_hot(self, value): """One-hot encoding potentially over a sequence of labels.""" y = jax.nn.one_hot(value, self.config.num_classes) return y def _loss_fn(self, params, state, inputs, rng): logits, state = self.net.apply(params, state, rng, inputs, is_training=True) y = self._one_hot(inputs['labels']) if 'mix_labels' in inputs: # Handle cutmix/mixup label mixing logging.info('Using mixup or cutmix!') y1 = self._one_hot(inputs['mix_labels']) y = inputs['ratio'][:, None] * y + (1. - inputs['ratio'][:, None]) * y1 if self.config.label_smoothing > 0: # get smoothy spositives = 1. - self.config.label_smoothing snegatives = self.config.label_smoothing / self.config.num_classes y = spositives * y + snegatives if self.config.bfloat16: # Cast logits to float32 logits = logits.astype(jnp.float32) which_loss = getattr(utils, self.config.which_loss) loss = which_loss(logits, y, reduction='mean') metrics = utils.topk_correct(logits, inputs['labels'], prefix='train_') # Average top-1 and top-5 correct labels metrics = jax.tree_map(jnp.mean, metrics) metrics['train_loss'] = loss # Metrics will be pmeaned so don't divide here scaled_loss = loss / jax.device_count() # Grads get psummed so do divide return scaled_loss, (metrics, state) def _train_fn(self, params, states, opt_states, inputs, rng, global_step, ema_params, ema_states): """Runs one batch forward + backward and run a single opt step.""" grad_fn = jax.grad(self._loss_fn, argnums=0, has_aux=True) if self.config.bfloat16: in_params, states = jax.tree_map(utils.to_bf16, (params, states)) else: in_params = params grads, (metrics, states) = grad_fn(in_params, states, inputs, rng) if self.config.bfloat16: states, metrics, grads = jax.tree_map(utils.from_bf16, (states, metrics, grads)) # Sum gradients and average losses for pmap grads = jax.lax.psum(grads, 'i') metrics = jax.lax.pmean(metrics, 'i') # Compute updates and update parameters metrics['learning_rate'] = self.opt._hyperparameters['lr'](global_step) # pylint: disable=protected-access params, opt_states = self.opt.step(params, grads, opt_states, global_step) if ema_params is not None: ema_fn = getattr(utils, self.config.get('which_ema', 'tf1_ema')) ema = lambda x, y: ema_fn(x, y, self.config.ema_decay, global_step) ema_params = jax.tree_multimap(ema, ema_params, params) ema_states = jax.tree_multimap(ema, ema_states, states) return {'params': params, 'states': states, 'opt_states': opt_states, 'ema_params': ema_params, 'ema_states': ema_states, 'metrics': metrics} # _ _ # | |_ _ __ __ _(_)_ __ # | __| '__/ _` | | '_ \ # | |_| | | (_| | | | | | # \__|_| \__,_|_|_| |_| # def step(self, global_step, rng, *unused_args, **unused_kwargs): if self._train_input is None: self._initialize_train() inputs = next(self._train_input) out = self.train_fn(params=self._params, states=self._state, opt_states=self._opt_state, inputs=inputs, rng=rng, global_step=global_step, ema_params=self._ema_params, ema_states=self._ema_state) self._params, self._state = out['params'], out['states'] self._opt_state = out['opt_states'] self._ema_params, self._ema_state = out['ema_params'], out['ema_states'] self.opt.plugin(self._opt_state) return jl_utils.get_first(out['metrics']) def _build_train_input(self): num_devices = jax.device_count() global_batch_size = self.config.train_batch_size bs_per_device, ragged = divmod(global_batch_size, num_devices) if ragged: raise ValueError( f'Global batch size {global_batch_size} must be divisible by ' f'num devices {num_devices}') return dataset.load( dataset.Split.TRAIN_AND_VALID, is_training=True, batch_dims=[jax.local_device_count(), bs_per_device], transpose=self.config.get('transpose', False), image_size=(self.train_imsize,) * 2, augment_name=self.config.augment_name, augment_before_mix=self.config.get('augment_before_mix', True), name=self.config.which_dataset, fake_data=self.config.get('fake_data', False)) # _ # _____ ____ _| | # / _ \ \ / / _` | | # | __/\ V / (_| | | # \___| \_/ \__,_|_| # def evaluate(self, global_step, **unused_args): metrics = self._eval_epoch(self._params, self._state) if self.config.use_ema: ema_metrics = self._eval_epoch(self._ema_params, self._ema_state) metrics.update({f'ema_{key}': val for key, val in ema_metrics.items()}) logging.info(f'[Step {global_step}] Eval scalars: {metrics}') return metrics def _eval_epoch(self, params, state): """Evaluates an epoch.""" num_samples = 0. summed_metrics = None for inputs in self._build_eval_input(): num_samples += np.prod(inputs['labels'].shape[:2]) # Account for pmaps metrics = self.eval_fn(params, state, inputs) # Accumulate the sum of metrics for each step. metrics = jax.tree_map(lambda x: jnp.sum(x[0], axis=0), metrics) if summed_metrics is None: summed_metrics = metrics else: summed_metrics = jax.tree_multimap(jnp.add, summed_metrics, metrics) mean_metrics = jax.tree_map(lambda x: x / num_samples, summed_metrics) return jax.device_get(mean_metrics) def _eval_fn(self, params, state, inputs): """Evaluate a single batch and return loss and top-k acc.""" logits, _ = self.net.apply(params, state, None, inputs, is_training=False) y = self._one_hot(inputs['labels']) which_loss = getattr(utils, self.config.which_loss) loss = which_loss(logits, y, reduction=None) metrics = utils.topk_correct(logits, inputs['labels'], prefix='eval_') metrics['eval_loss'] = loss return jax.lax.psum(metrics, 'i') def _build_eval_input(self): """Builds the evaluation input pipeline.""" bs_per_device = (self.config.eval_batch_size // jax.local_device_count()) split = dataset.Split.from_string(self.config.eval_subset) eval_preproc = self.config.get('eval_preproc', 'crop_resize') return dataset.load(split, is_training=False, batch_dims=[jax.local_device_count(), bs_per_device], transpose=self.config.get('transpose', False), image_size=(self.test_imsize,) * 2, name=self.config.which_dataset, eval_preproc=eval_preproc, fake_data=self.config.get('fake_data', False)) if __name__ == '__main__': flags.mark_flag_as_required('config') platform.main(Experiment, sys.argv[1:])
deepmind-research-master
nfnets/experiment.py
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== r"""ImageNet experiment with NFNets.""" import haiku as hk from ml_collections import config_dict from nfnets import experiment from nfnets import optim def get_config(): """Return config object for training.""" config = experiment.get_config() # Experiment config. train_batch_size = 4096 # Global batch size. images_per_epoch = 1281167 num_epochs = 360 steps_per_epoch = images_per_epoch / train_batch_size config.training_steps = ((images_per_epoch * num_epochs) // train_batch_size) config.random_seed = 0 config.experiment_kwargs = config_dict.ConfigDict( dict( config=dict( lr=0.1, num_epochs=num_epochs, label_smoothing=0.1, model='NFNet', image_size=224, use_ema=True, ema_decay=0.99999, ema_start=0, augment_name=None, augment_before_mix=False, eval_preproc='resize_crop_32', train_batch_size=train_batch_size, eval_batch_size=50, eval_subset='test', num_classes=1000, which_dataset='imagenet', which_loss='softmax_cross_entropy', # One of softmax or sigmoid bfloat16=True, lr_schedule=dict( name='WarmupCosineDecay', kwargs=dict(num_steps=config.training_steps, start_val=0, min_val=0.0, warmup_steps=5*steps_per_epoch), ), lr_scale_by_bs=True, optimizer=dict( name='SGD_AGC', kwargs={'momentum': 0.9, 'nesterov': True, 'weight_decay': 2e-5, 'clipping': 0.01, 'eps': 1e-3}, ), model_kwargs=dict( variant='F0', width=1.0, se_ratio=0.5, alpha=0.2, stochdepth_rate=0.25, drop_rate=None, # Use native drop-rate activation='gelu', final_conv_mult=2, final_conv_ch=None, use_two_convs=True, ), ))) # Unlike NF-RegNets, use the same weight decay for all, but vary RA levels variant = config.experiment_kwargs.config.model_kwargs.variant # RandAugment levels (e.g. 405 = 4 layers, magnitude 5, 205 = 2 layers, mag 5) augment = {'F0': '405', 'F1': '410', 'F2': '410', 'F3': '415', 'F4': '415', 'F5': '415', 'F6': '415', 'F7': '415'}[variant] aug_base_name = 'cutmix_mixup_randaugment' config.experiment_kwargs.config.augment_name = f'{aug_base_name}_{augment}' return config class Experiment(experiment.Experiment): """Experiment with correct parameter filtering for applying AGC.""" def _make_opt(self): # Separate conv params and gains/biases def pred_gb(mod, name, val): del mod, val return (name in ['scale', 'offset', 'b'] or 'gain' in name or 'bias' in name) gains_biases, weights = hk.data_structures.partition(pred_gb, self._params) def pred_fc(mod, name, val): del name, val return 'linear' in mod and 'squeeze_excite' not in mod fc_weights, weights = hk.data_structures.partition(pred_fc, weights) # Lr schedule with batch-based LR scaling if self.config.lr_scale_by_bs: max_lr = (self.config.lr * self.config.train_batch_size) / 256 else: max_lr = self.config.lr lr_sched_fn = getattr(optim, self.config.lr_schedule.name) lr_schedule = lr_sched_fn(max_val=max_lr, **self.config.lr_schedule.kwargs) # Optimizer; no need to broadcast! opt_kwargs = {key: val for key, val in self.config.optimizer.kwargs.items()} opt_kwargs['lr'] = lr_schedule opt_module = getattr(optim, self.config.optimizer.name) self.opt = opt_module([{'params': gains_biases, 'weight_decay': None,}, {'params': fc_weights, 'clipping': None}, {'params': weights}], **opt_kwargs) if self._opt_state is None: self._opt_state = self.opt.states() else: self.opt.plugin(self._opt_state)
deepmind-research-master
nfnets/experiment_nfnets.py
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """ResNet model family.""" import functools import haiku as hk import jax import jax.numpy as jnp from nfnets import base class ResNet(hk.Module): """ResNetv2 Models.""" variant_dict = {'ResNet50': {'depth': [3, 4, 6, 3]}, 'ResNet101': {'depth': [3, 4, 23, 3]}, 'ResNet152': {'depth': [3, 8, 36, 3]}, 'ResNet200': {'depth': [3, 24, 36, 3]}, 'ResNet288': {'depth': [24, 24, 24, 24]}, 'ResNet600': {'depth': [50, 50, 50, 50]}, } def __init__(self, width, num_classes, variant='ResNet50', which_norm='BatchNorm', norm_kwargs=None, activation='relu', drop_rate=0.0, fc_init=jnp.zeros, conv_kwargs=None, preactivation=True, use_se=False, se_ratio=0.25, name='ResNet'): super().__init__(name=name) self.width = width self.num_classes = num_classes self.variant = variant self.depth_pattern = self.variant_dict[variant]['depth'] self.activation = getattr(jax.nn, activation) self.drop_rate = drop_rate self.which_norm = getattr(hk, which_norm) if norm_kwargs is not None: self.which_norm = functools.partial(self.which_norm, **norm_kwargs) if conv_kwargs is not None: self.which_conv = functools.partial(hk.Conv2D, **conv_kwargs) else: self.which_conv = hk.Conv2D self.preactivation = preactivation # Stem self.initial_conv = self.which_conv(16 * self.width, kernel_shape=7, stride=2, padding='SAME', with_bias=False, name='initial_conv') if not self.preactivation: self.initial_bn = self.which_norm(name='initial_bn') which_block = ResBlockV2 if self.preactivation else ResBlockV1 # Body self.blocks = [] for multiplier, blocks_per_stage, stride in zip([64, 128, 256, 512], self.depth_pattern, [1, 2, 2, 2]): for block_index in range(blocks_per_stage): self.blocks += [which_block(multiplier * self.width, use_projection=block_index == 0, stride=stride if block_index == 0 else 1, activation=self.activation, which_norm=self.which_norm, which_conv=self.which_conv, use_se=use_se, se_ratio=se_ratio)] # Head self.final_bn = self.which_norm(name='final_bn') self.fc = hk.Linear(self.num_classes, w_init=fc_init, with_bias=True) def __call__(self, x, is_training, test_local_stats=False, return_metrics=False): """Return the output of the final layer without any [log-]softmax.""" outputs = {} # Stem out = self.initial_conv(x) if not self.preactivation: out = self.activation(self.initial_bn(out, is_training, test_local_stats)) out = hk.max_pool(out, window_shape=(1, 3, 3, 1), strides=(1, 2, 2, 1), padding='SAME') if return_metrics: outputs.update(base.signal_metrics(out, 0)) # Blocks for i, block in enumerate(self.blocks): out, res_var = block(out, is_training, test_local_stats) if return_metrics: outputs.update(base.signal_metrics(out, i + 1)) outputs[f'res_avg_var_{i}'] = res_var if self.preactivation: out = self.activation(self.final_bn(out, is_training, test_local_stats)) # Pool, dropout, classify pool = jnp.mean(out, axis=[1, 2]) # Return pool before dropout in case we want to regularize it separately. outputs['pool'] = pool # Optionally apply dropout if self.drop_rate > 0.0 and is_training: pool = hk.dropout(hk.next_rng_key(), self.drop_rate, pool) outputs['logits'] = self.fc(pool) return outputs class ResBlockV2(hk.Module): """ResNet preac block, 1x1->3x3->1x1 with strides and shortcut downsample.""" def __init__(self, out_ch, stride=1, use_projection=False, activation=jax.nn.relu, which_norm=hk.BatchNorm, which_conv=hk.Conv2D, use_se=False, se_ratio=0.25, name=None): super().__init__(name=name) self.out_ch = out_ch self.stride = stride self.use_projection = use_projection self.activation = activation self.which_norm = which_norm self.which_conv = which_conv self.use_se = use_se self.se_ratio = se_ratio self.width = self.out_ch // 4 self.bn0 = which_norm(name='bn0') self.conv0 = which_conv(self.width, kernel_shape=1, with_bias=False, padding='SAME', name='conv0') self.bn1 = which_norm(name='bn1') self.conv1 = which_conv(self.width, stride=self.stride, kernel_shape=3, with_bias=False, padding='SAME', name='conv1') self.bn2 = which_norm(name='bn2') self.conv2 = which_conv(self.out_ch, kernel_shape=1, with_bias=False, padding='SAME', name='conv2') if self.use_projection: self.conv_shortcut = which_conv(self.out_ch, stride=stride, kernel_shape=1, with_bias=False, padding='SAME', name='conv_shortcut') if self.use_se: self.se = base.SqueezeExcite(self.out_ch, self.out_ch, self.se_ratio) def __call__(self, x, is_training, test_local_stats): bn_args = (is_training, test_local_stats) out = self.activation(self.bn0(x, *bn_args)) if self.use_projection: shortcut = self.conv_shortcut(out) else: shortcut = x out = self.conv0(out) out = self.conv1(self.activation(self.bn1(out, *bn_args))) out = self.conv2(self.activation(self.bn2(out, *bn_args))) if self.use_se: out = self.se(out) * out # Get average residual standard deviation for reporting metrics. res_avg_var = jnp.mean(jnp.var(out, axis=[0, 1, 2])) return out + shortcut, res_avg_var class ResBlockV1(ResBlockV2): """Post-Ac Residual Block.""" def __call__(self, x, is_training, test_local_stats): bn_args = (is_training, test_local_stats) if self.use_projection: shortcut = self.conv_shortcut(x) shortcut = self.which_norm(name='shortcut_bn')(shortcut, *bn_args) else: shortcut = x out = self.activation(self.bn0(self.conv0(x), *bn_args)) out = self.activation(self.bn1(self.conv1(out), *bn_args)) out = self.bn2(self.conv2(out), *bn_args) if self.use_se: out = self.se(out) * out res_avg_var = jnp.mean(jnp.var(out, axis=[0, 1, 2])) return self.activation(out + shortcut), res_avg_var
deepmind-research-master
nfnets/resnet.py
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utils.""" import dill import jax import jax.numpy as jnp import tree def reduce_fn(x, mode): """Reduce fn for various losses.""" if mode == 'none' or mode is None: return jnp.asarray(x) elif mode == 'sum': return jnp.sum(x) elif mode == 'mean': return jnp.mean(x) else: raise ValueError('Unsupported reduction option.') def softmax_cross_entropy(logits, labels, reduction='sum'): """Computes softmax cross entropy given logits and one-hot class labels. Args: logits: Logit output values. labels: Ground truth one-hot-encoded labels. reduction: Type of reduction to apply to loss. Returns: Loss value. If `reduction` is `none`, this has the same shape as `labels`; otherwise, it is scalar. Raises: ValueError: If the type of `reduction` is unsupported. """ loss = -jnp.sum(labels * jax.nn.log_softmax(logits), axis=-1) return reduce_fn(loss, reduction) def topk_correct(logits, labels, mask=None, prefix='', topk=(1, 5)): """Calculate top-k error for multiple k values.""" metrics = {} argsorted_logits = jnp.argsort(logits) for k in topk: pred_labels = argsorted_logits[..., -k:] # Get the number of examples where the label is in the top-k predictions correct = any_in(pred_labels, labels).any(axis=-1).astype(jnp.float32) if mask is not None: correct *= mask metrics[f'{prefix}top_{k}_acc'] = correct return metrics @jax.vmap def any_in(prediction, target): """For each row in a and b, checks if any element of a is in b.""" return jnp.isin(prediction, target) def tf1_ema(ema_value, current_value, decay, step): """Implements EMA with TF1-style decay warmup.""" decay = jnp.minimum(decay, (1.0 + step) / (10.0 + step)) return ema_value * decay + current_value * (1 - decay) def ema(ema_value, current_value, decay, step): """Implements EMA without any warmup.""" del step return ema_value * decay + current_value * (1 - decay) to_bf16 = lambda x: x.astype(jnp.bfloat16) if x.dtype == jnp.float32 else x from_bf16 = lambda x: x.astype(jnp.float32) if x.dtype == jnp.bfloat16 else x def _replicate(x, devices=None): """Replicate an object on each device.""" x = jax.numpy.array(x) if devices is None: devices = jax.local_devices() return jax.device_put_sharded(len(devices) * [x], devices) def broadcast(obj): """Broadcasts an object to all devices.""" if obj is not None and not isinstance(obj, bool): return _replicate(obj) else: return obj def split_tree(tuple_tree, base_tree, n): """Splits tuple_tree with n-tuple leaves into n trees.""" return [tree.map_structure_up_to(base_tree, lambda x: x[i], tuple_tree) # pylint: disable=cell-var-from-loop for i in range(n)] def load_haiku_file(filename): """Loads a haiku parameter tree, using dill.""" with open(filename, 'rb') as in_file: output = dill.load(in_file) return output def flatten_haiku_tree(haiku_dict): """Flattens a haiku parameter tree into a flat dictionary.""" out = {} for module in haiku_dict.keys(): out_module = module.replace('/~/', '.').replace('/', '.') for key in haiku_dict[module]: out_key = f'{out_module}.{key}' out[out_key] = haiku_dict[module][key] return out
deepmind-research-master
nfnets/utils.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """AutoAugment and RandAugment policies for enhanced image preprocessing. AutoAugment Reference: https://arxiv.org/abs/1805.09501 RandAugment Reference: https://arxiv.org/abs/1909.13719 """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import inspect import math from ml_collections import config_dict import tensorflow.compat.v1 as tf from tensorflow_addons import image as contrib_image # pylint: disable=deprecated-method # This signifies the max integer that the controller RNN could predict for the # augmentation scheme. _MAX_LEVEL = 10. def policy_v0(): """Autoaugment policy that was used in AutoAugment Paper.""" # Each tuple is an augmentation operation of the form # (operation, probability, magnitude). Each element in policy is a # sub-policy that will be applied sequentially on the image. policy = [ [('Equalize', 0.8, 1), ('ShearY', 0.8, 4)], [('Color', 0.4, 9), ('Equalize', 0.6, 3)], [('Color', 0.4, 1), ('Rotate', 0.6, 8)], [('Solarize', 0.8, 3), ('Equalize', 0.4, 7)], [('Solarize', 0.4, 2), ('Solarize', 0.6, 2)], [('Color', 0.2, 0), ('Equalize', 0.8, 8)], [('Equalize', 0.4, 8), ('SolarizeAdd', 0.8, 3)], [('ShearX', 0.2, 9), ('Rotate', 0.6, 8)], [('Color', 0.6, 1), ('Equalize', 1.0, 2)], [('Invert', 0.4, 9), ('Rotate', 0.6, 0)], [('Equalize', 1.0, 9), ('ShearY', 0.6, 3)], [('Color', 0.4, 7), ('Equalize', 0.6, 0)], [('Posterize', 0.4, 6), ('AutoContrast', 0.4, 7)], [('Solarize', 0.6, 8), ('Color', 0.6, 9)], [('Solarize', 0.2, 4), ('Rotate', 0.8, 9)], [('Rotate', 1.0, 7), ('TranslateY', 0.8, 9)], [('ShearX', 0.0, 0), ('Solarize', 0.8, 4)], [('ShearY', 0.8, 0), ('Color', 0.6, 4)], [('Color', 1.0, 0), ('Rotate', 0.6, 2)], [('Equalize', 0.8, 4), ('Equalize', 0.0, 8)], [('Equalize', 1.0, 4), ('AutoContrast', 0.6, 2)], [('ShearY', 0.4, 7), ('SolarizeAdd', 0.6, 7)], [('Posterize', 0.8, 2), ('Solarize', 0.6, 10)], [('Solarize', 0.6, 8), ('Equalize', 0.6, 1)], [('Color', 0.8, 6), ('Rotate', 0.4, 5)], ] return policy def policy_vtest(): """Autoaugment test policy for debugging.""" # Each tuple is an augmentation operation of the form # (operation, probability, magnitude). Each element in policy is a # sub-policy that will be applied sequentially on the image. policy = [ [('TranslateX', 1.0, 4), ('Equalize', 1.0, 10)], ] return policy def blend(image1, image2, factor): """Blend image1 and image2 using 'factor'. Factor can be above 0.0. A value of 0.0 means only image1 is used. A value of 1.0 means only image2 is used. A value between 0.0 and 1.0 means we linearly interpolate the pixel values between the two images. A value greater than 1.0 "extrapolates" the difference between the two pixel values, and we clip the results to values between 0 and 255. Args: image1: An image Tensor of type uint8. image2: An image Tensor of type uint8. factor: A floating point value above 0.0. Returns: A blended image Tensor of type uint8. """ if factor == 0.0: return tf.convert_to_tensor(image1) if factor == 1.0: return tf.convert_to_tensor(image2) image1 = tf.to_float(image1) image2 = tf.to_float(image2) difference = image2 - image1 scaled = factor * difference # Do addition in float. temp = tf.to_float(image1) + scaled # Interpolate if factor > 0.0 and factor < 1.0: # Interpolation means we always stay within 0 and 255. return tf.cast(temp, tf.uint8) # Extrapolate: # # We need to clip and then cast. return tf.cast(tf.clip_by_value(temp, 0.0, 255.0), tf.uint8) def cutout(image, pad_size, replace=0): """Apply cutout (https://arxiv.org/abs/1708.04552) to image. This operation applies a (2*pad_size x 2*pad_size) mask of zeros to a random location within `img`. The pixel values filled in will be of the value `replace`. The located where the mask will be applied is randomly chosen uniformly over the whole image. Args: image: An image Tensor of type uint8. pad_size: Specifies how big the zero mask that will be generated is that is applied to the image. The mask will be of size (2*pad_size x 2*pad_size). replace: What pixel value to fill in the image in the area that has the cutout mask applied to it. Returns: An image Tensor that is of type uint8. """ image_height = tf.shape(image)[0] image_width = tf.shape(image)[1] # Sample the center location in the image where the zero mask will be applied. cutout_center_height = tf.random_uniform( shape=[], minval=0, maxval=image_height, dtype=tf.int32) cutout_center_width = tf.random_uniform( shape=[], minval=0, maxval=image_width, dtype=tf.int32) lower_pad = tf.maximum(0, cutout_center_height - pad_size) upper_pad = tf.maximum(0, image_height - cutout_center_height - pad_size) left_pad = tf.maximum(0, cutout_center_width - pad_size) right_pad = tf.maximum(0, image_width - cutout_center_width - pad_size) cutout_shape = [image_height - (lower_pad + upper_pad), image_width - (left_pad + right_pad)] padding_dims = [[lower_pad, upper_pad], [left_pad, right_pad]] mask = tf.pad( tf.zeros(cutout_shape, dtype=image.dtype), padding_dims, constant_values=1) mask = tf.expand_dims(mask, -1) mask = tf.tile(mask, [1, 1, 3]) image = tf.where( tf.equal(mask, 0), tf.ones_like(image, dtype=image.dtype) * replace, image) return image def solarize(image, threshold=128): # For each pixel in the image, select the pixel # if the value is less than the threshold. # Otherwise, subtract the pixel from 255. return tf.where(image < threshold, image, 255 - image) def solarize_add(image, addition=0, threshold=128): # For each pixel in the image less than threshold # we add 'addition' amount to it and then clip the # pixel value to be between 0 and 255. The value # of 'addition' is between -128 and 128. added_image = tf.cast(image, tf.int64) + addition added_image = tf.cast(tf.clip_by_value(added_image, 0, 255), tf.uint8) return tf.where(image < threshold, added_image, image) def color(image, factor): """Equivalent of PIL Color.""" degenerate = tf.image.grayscale_to_rgb(tf.image.rgb_to_grayscale(image)) return blend(degenerate, image, factor) def contrast(image, factor): """Equivalent of PIL Contrast.""" degenerate = tf.image.rgb_to_grayscale(image) # Cast before calling tf.histogram. degenerate = tf.cast(degenerate, tf.int32) # Compute the grayscale histogram, then compute the mean pixel value, # and create a constant image size of that value. Use that as the # blending degenerate target of the original image. hist = tf.histogram_fixed_width(degenerate, [0, 255], nbins=256) mean = tf.reduce_sum(tf.cast(hist, tf.float32)) / 256.0 degenerate = tf.ones_like(degenerate, dtype=tf.float32) * mean degenerate = tf.clip_by_value(degenerate, 0.0, 255.0) degenerate = tf.image.grayscale_to_rgb(tf.cast(degenerate, tf.uint8)) return blend(degenerate, image, factor) def brightness(image, factor): """Equivalent of PIL Brightness.""" degenerate = tf.zeros_like(image) return blend(degenerate, image, factor) def posterize(image, bits): """Equivalent of PIL Posterize.""" shift = 8 - bits return tf.bitwise.left_shift(tf.bitwise.right_shift(image, shift), shift) def rotate(image, degrees, replace): """Rotates the image by degrees either clockwise or counterclockwise. Args: image: An image Tensor of type uint8. degrees: Float, a scalar angle in degrees to rotate all images by. If degrees is positive the image will be rotated clockwise otherwise it will be rotated counterclockwise. replace: A one or three value 1D tensor to fill empty pixels caused by the rotate operation. Returns: The rotated version of image. """ # Convert from degrees to radians. degrees_to_radians = math.pi / 180.0 radians = degrees * degrees_to_radians # In practice, we should randomize the rotation degrees by flipping # it negatively half the time, but that's done on 'degrees' outside # of the function. image = contrib_image.rotate(wrap(image), radians) return unwrap(image, replace) def translate_x(image, pixels, replace): """Equivalent of PIL Translate in X dimension.""" image = contrib_image.translate(wrap(image), [-pixels, 0]) return unwrap(image, replace) def translate_y(image, pixels, replace): """Equivalent of PIL Translate in Y dimension.""" image = contrib_image.translate(wrap(image), [0, -pixels]) return unwrap(image, replace) def shear_x(image, level, replace): """Equivalent of PIL Shearing in X dimension.""" # Shear parallel to x axis is a projective transform # with a matrix form of: # [1 level # 0 1]. image = contrib_image.transform( wrap(image), [1., level, 0., 0., 1., 0., 0., 0.]) return unwrap(image, replace) def shear_y(image, level, replace): """Equivalent of PIL Shearing in Y dimension.""" # Shear parallel to y axis is a projective transform # with a matrix form of: # [1 0 # level 1]. image = contrib_image.transform( wrap(image), [1., 0., 0., level, 1., 0., 0., 0.]) return unwrap(image, replace) def autocontrast(image): """Implements Autocontrast function from PIL using TF ops. Args: image: A 3D uint8 tensor. Returns: The image after it has had autocontrast applied to it and will be of type uint8. """ def scale_channel(image): """Scale the 2D image using the autocontrast rule.""" # A possibly cheaper version can be done using cumsum/unique_with_counts # over the histogram values, rather than iterating over the entire image. # to compute mins and maxes. lo = tf.to_float(tf.reduce_min(image)) hi = tf.to_float(tf.reduce_max(image)) # Scale the image, making the lowest value 0 and the highest value 255. def scale_values(im): scale = 255.0 / (hi - lo) offset = -lo * scale im = tf.to_float(im) * scale + offset im = tf.clip_by_value(im, 0.0, 255.0) return tf.cast(im, tf.uint8) result = tf.cond(hi > lo, lambda: scale_values(image), lambda: image) return result # Assumes RGB for now. Scales each channel independently # and then stacks the result. s1 = scale_channel(image[:, :, 0]) s2 = scale_channel(image[:, :, 1]) s3 = scale_channel(image[:, :, 2]) image = tf.stack([s1, s2, s3], 2) return image def sharpness(image, factor): """Implements Sharpness function from PIL using TF ops.""" orig_image = image image = tf.cast(image, tf.float32) # Make image 4D for conv operation. image = tf.expand_dims(image, 0) # SMOOTH PIL Kernel. kernel = tf.constant( [[1, 1, 1], [1, 5, 1], [1, 1, 1]], dtype=tf.float32, shape=[3, 3, 1, 1]) / 13. # Tile across channel dimension. kernel = tf.tile(kernel, [1, 1, 3, 1]) strides = [1, 1, 1, 1] with tf.device('/cpu:0'): # Some augmentation that uses depth-wise conv will cause crashing when # training on GPU. See (b/156242594) for details. degenerate = tf.nn.depthwise_conv2d( image, kernel, strides, padding='VALID', rate=[1, 1]) degenerate = tf.clip_by_value(degenerate, 0.0, 255.0) degenerate = tf.squeeze(tf.cast(degenerate, tf.uint8), [0]) # For the borders of the resulting image, fill in the values of the # original image. mask = tf.ones_like(degenerate) padded_mask = tf.pad(mask, [[1, 1], [1, 1], [0, 0]]) padded_degenerate = tf.pad(degenerate, [[1, 1], [1, 1], [0, 0]]) result = tf.where(tf.equal(padded_mask, 1), padded_degenerate, orig_image) # Blend the final result. return blend(result, orig_image, factor) def equalize(image): """Implements Equalize function from PIL using TF ops.""" def scale_channel(im, c): """Scale the data in the channel to implement equalize.""" im = tf.cast(im[:, :, c], tf.int32) # Compute the histogram of the image channel. histo = tf.histogram_fixed_width(im, [0, 255], nbins=256) # For the purposes of computing the step, filter out the nonzeros. nonzero = tf.where(tf.not_equal(histo, 0)) nonzero_histo = tf.reshape(tf.gather(histo, nonzero), [-1]) step = (tf.reduce_sum(nonzero_histo) - nonzero_histo[-1]) // 255 def build_lut(histo, step): # Compute the cumulative sum, shifting by step // 2 # and then normalization by step. lut = (tf.cumsum(histo) + (step // 2)) // step # Shift lut, prepending with 0. lut = tf.concat([[0], lut[:-1]], 0) # Clip the counts to be in range. This is done # in the C code for image.point. return tf.clip_by_value(lut, 0, 255) # If step is zero, return the original image. Otherwise, build # lut from the full histogram and step and then index from it. result = tf.cond(tf.equal(step, 0), lambda: im, lambda: tf.gather(build_lut(histo, step), im)) return tf.cast(result, tf.uint8) # Assumes RGB for now. Scales each channel independently # and then stacks the result. s1 = scale_channel(image, 0) s2 = scale_channel(image, 1) s3 = scale_channel(image, 2) image = tf.stack([s1, s2, s3], 2) return image def invert(image): """Inverts the image pixels.""" image = tf.convert_to_tensor(image) return 255 - image def wrap(image): """Returns 'image' with an extra channel set to all 1s.""" shape = tf.shape(image) extended_channel = tf.ones([shape[0], shape[1], 1], image.dtype) extended = tf.concat([image, extended_channel], 2) return extended def unwrap(image, replace): """Unwraps an image produced by wrap. Where there is a 0 in the last channel for every spatial position, the rest of the three channels in that spatial dimension are grayed (set to 128). Operations like translate and shear on a wrapped Tensor will leave 0s in empty locations. Some transformations look at the intensity of values to do preprocessing, and we want these empty pixels to assume the 'average' value, rather than pure black. Args: image: A 3D Image Tensor with 4 channels. replace: A one or three value 1D tensor to fill empty pixels. Returns: image: A 3D image Tensor with 3 channels. """ image_shape = tf.shape(image) # Flatten the spatial dimensions. flattened_image = tf.reshape(image, [-1, image_shape[2]]) # Find all pixels where the last channel is zero. alpha_channel = flattened_image[:, 3] replace = tf.concat([replace, tf.ones([1], image.dtype)], 0) # Where they are zero, fill them in with 'replace'. flattened_image = tf.where( tf.equal(alpha_channel, 0), tf.ones_like(flattened_image, dtype=image.dtype) * replace, flattened_image) image = tf.reshape(flattened_image, image_shape) image = tf.slice(image, [0, 0, 0], [image_shape[0], image_shape[1], 3]) return image NAME_TO_FUNC = { 'AutoContrast': autocontrast, 'Equalize': equalize, 'Invert': invert, 'Rotate': rotate, 'Posterize': posterize, 'Solarize': solarize, 'SolarizeAdd': solarize_add, 'Color': color, 'Contrast': contrast, 'Brightness': brightness, 'Sharpness': sharpness, 'ShearX': shear_x, 'ShearY': shear_y, 'TranslateX': translate_x, 'TranslateY': translate_y, 'Cutout': cutout, } def _randomly_negate_tensor(tensor): """With 50% prob turn the tensor negative.""" should_flip = tf.cast(tf.floor(tf.random_uniform([]) + 0.5), tf.bool) final_tensor = tf.cond(should_flip, lambda: tensor, lambda: -tensor) return final_tensor def _rotate_level_to_arg(level): level = (level/_MAX_LEVEL) * 30. level = _randomly_negate_tensor(level) return (level,) def _shrink_level_to_arg(level): """Converts level to ratio by which we shrink the image content.""" if level == 0: return (1.0,) # if level is zero, do not shrink the image # Maximum shrinking ratio is 2.9. level = 2. / (_MAX_LEVEL / level) + 0.9 return (level,) def _enhance_level_to_arg(level): return ((level/_MAX_LEVEL) * 1.8 + 0.1,) def _shear_level_to_arg(level): level = (level/_MAX_LEVEL) * 0.3 # Flip level to negative with 50% chance. level = _randomly_negate_tensor(level) return (level,) def _translate_level_to_arg(level, translate_const): level = (level/_MAX_LEVEL) * float(translate_const) # Flip level to negative with 50% chance. level = _randomly_negate_tensor(level) return (level,) def level_to_arg(hparams): return { 'AutoContrast': lambda level: (), 'Equalize': lambda level: (), 'Invert': lambda level: (), 'Rotate': _rotate_level_to_arg, 'Posterize': lambda level: (int((level/_MAX_LEVEL) * 4),), 'Solarize': lambda level: (int((level/_MAX_LEVEL) * 256),), 'SolarizeAdd': lambda level: (int((level/_MAX_LEVEL) * 110),), 'Color': _enhance_level_to_arg, 'Contrast': _enhance_level_to_arg, 'Brightness': _enhance_level_to_arg, 'Sharpness': _enhance_level_to_arg, 'ShearX': _shear_level_to_arg, 'ShearY': _shear_level_to_arg, 'Cutout': lambda level: (int((level/_MAX_LEVEL) * hparams.cutout_const),), # pylint:disable=g-long-lambda 'TranslateX': lambda level: _translate_level_to_arg( level, hparams.translate_const), 'TranslateY': lambda level: _translate_level_to_arg( level, hparams.translate_const), # pylint:enable=g-long-lambda } def _parse_policy_info(name, prob, level, replace_value, augmentation_hparams): """Return the function that corresponds to `name` and update `level` param.""" func = NAME_TO_FUNC[name] args = level_to_arg(augmentation_hparams)[name](level) # Check to see if prob is passed into function. This is used for operations # where we alter bboxes independently. # pytype:disable=wrong-arg-types if 'prob' in inspect.getargspec(func)[0]: args = tuple([prob] + list(args)) # pytype:enable=wrong-arg-types # Add in replace arg if it is required for the function that is being called. # pytype:disable=wrong-arg-types if 'replace' in inspect.getargspec(func)[0]: # Make sure replace is the final argument assert 'replace' == inspect.getargspec(func)[0][-1] args = tuple(list(args) + [replace_value]) # pytype:enable=wrong-arg-types return (func, prob, args) def _apply_func_with_prob(func, image, args, prob): """Apply `func` to image w/ `args` as input with probability `prob`.""" assert isinstance(args, tuple) # If prob is a function argument, then this randomness is being handled # inside the function, so make sure it is always called. # pytype:disable=wrong-arg-types if 'prob' in inspect.getargspec(func)[0]: prob = 1.0 # pytype:enable=wrong-arg-types # Apply the function with probability `prob`. should_apply_op = tf.cast( tf.floor(tf.random_uniform([], dtype=tf.float32) + prob), tf.bool) augmented_image = tf.cond( should_apply_op, lambda: func(image, *args), lambda: image) return augmented_image def select_and_apply_random_policy(policies, image): """Select a random policy from `policies` and apply it to `image`.""" policy_to_select = tf.random_uniform([], maxval=len(policies), dtype=tf.int32) # Note that using tf.case instead of tf.conds would result in significantly # larger graphs and would even break export for some larger policies. for (i, policy) in enumerate(policies): image = tf.cond( tf.equal(i, policy_to_select), lambda selected_policy=policy: selected_policy(image), lambda: image) return image def build_and_apply_nas_policy(policies, image, augmentation_hparams): """Build a policy from the given policies passed in and apply to image. Args: policies: list of lists of tuples in the form `(func, prob, level)`, `func` is a string name of the augmentation function, `prob` is the probability of applying the `func` operation, `level` is the input argument for `func`. image: tf.Tensor that the resulting policy will be applied to. augmentation_hparams: Hparams associated with the NAS learned policy. Returns: A version of image that now has data augmentation applied to it based on the `policies` pass into the function. """ replace_value = [128, 128, 128] # func is the string name of the augmentation function, prob is the # probability of applying the operation and level is the parameter associated # with the tf op. # tf_policies are functions that take in an image and return an augmented # image. tf_policies = [] for policy in policies: tf_policy = [] # Link string name to the correct python function and make sure the correct # argument is passed into that function. for policy_info in policy: policy_info = list(policy_info) + [replace_value, augmentation_hparams] tf_policy.append(_parse_policy_info(*policy_info)) # Now build the tf policy that will apply the augmentation procedue # on image. def make_final_policy(tf_policy_): def final_policy(image_): for func, prob, args in tf_policy_: image_ = _apply_func_with_prob( func, image_, args, prob) return image_ return final_policy tf_policies.append(make_final_policy(tf_policy)) augmented_image = select_and_apply_random_policy( tf_policies, image) return augmented_image def distort_image_with_autoaugment(image, augmentation_name): """Applies the AutoAugment policy to `image`. AutoAugment is from the paper: https://arxiv.org/abs/1805.09501. Args: image: `Tensor` of shape [height, width, 3] representing an image. augmentation_name: The name of the AutoAugment policy to use. The available options are `v0` and `test`. `v0` is the policy used for all of the results in the paper and was found to achieve the best results on the COCO dataset. `v1`, `v2` and `v3` are additional good policies found on the COCO dataset that have slight variation in what operations were used during the search procedure along with how many operations are applied in parallel to a single image (2 vs 3). Returns: A tuple containing the augmented versions of `image`. """ available_policies = {'v0': policy_v0, 'test': policy_vtest} if augmentation_name not in available_policies: raise ValueError('Invalid augmentation_name: {}'.format(augmentation_name)) policy = available_policies[augmentation_name]() # Hparams that will be used for AutoAugment. augmentation_hparams = config_dict.ConfigDict(dict( cutout_const=100, translate_const=250)) return build_and_apply_nas_policy(policy, image, augmentation_hparams) def distort_image_with_randaugment(image, num_layers, magnitude): """Applies the RandAugment policy to `image`. RandAugment is from the paper https://arxiv.org/abs/1909.13719, Args: image: `Tensor` of shape [height, width, 3] representing an image. num_layers: Integer, the number of augmentation transformations to apply sequentially to an image. Represented as (N) in the paper. Usually best values will be in the range [1, 3]. magnitude: Integer, shared magnitude across all augmentation operations. Represented as (M) in the paper. Usually best values are in the range [5, 30]. Returns: The augmented version of `image`. """ replace_value = [128] * 3 tf.logging.info('Using RandAug.') augmentation_hparams = config_dict.ConfigDict(dict( cutout_const=40, translate_const=100)) available_ops = [ 'AutoContrast', 'Equalize', 'Invert', 'Rotate', 'Posterize', 'Solarize', 'Color', 'Contrast', 'Brightness', 'Sharpness', 'ShearX', 'ShearY', 'TranslateX', 'TranslateY', 'Cutout', 'SolarizeAdd'] for layer_num in range(num_layers): op_to_select = tf.random_uniform( [], maxval=len(available_ops), dtype=tf.int32) random_magnitude = float(magnitude) with tf.name_scope('randaug_layer_{}'.format(layer_num)): for (i, op_name) in enumerate(available_ops): prob = tf.random_uniform([], minval=0.2, maxval=0.8, dtype=tf.float32) func, _, args = _parse_policy_info(op_name, prob, random_magnitude, replace_value, augmentation_hparams) image = tf.cond( tf.equal(i, op_to_select), # pylint:disable=g-long-lambda lambda selected_func=func, selected_args=args: selected_func( image, *selected_args), # pylint:enable=g-long-lambda lambda: image) return image
deepmind-research-master
nfnets/autoaugment.py
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Norm-Free Residual Networks.""" # pylint: disable=invalid-name import haiku as hk import jax import jax.numpy as jnp from nfnets import base class NF_ResNet(hk.Module): """Norm-Free preactivation ResNet.""" variant_dict = {'ResNet50': {'depth': [3, 4, 6, 3]}, 'ResNet101': {'depth': [3, 4, 23, 3]}, 'ResNet152': {'depth': [3, 8, 36, 3]}, 'ResNet200': {'depth': [3, 24, 36, 3]}, 'ResNet288': {'depth': [24, 24, 24, 24]}, 'ResNet600': {'depth': [50, 50, 50, 50]}, } def __init__(self, num_classes, variant='ResNet50', width=4, alpha=0.2, stochdepth_rate=0.1, drop_rate=None, activation='relu', fc_init=None, skipinit_gain=jnp.zeros, use_se=False, se_ratio=0.25, name='NF_ResNet'): super().__init__(name=name) self.num_classes = num_classes self.variant = variant self.width = width # Get variant info block_params = self.variant_dict[self.variant] self.width_pattern = [item * self.width for item in [64, 128, 256, 512]] self.depth_pattern = block_params['depth'] self.activation = base.nonlinearities[activation] if drop_rate is None: self.drop_rate = block_params['drop_rate'] else: self.drop_rate = drop_rate self.which_conv = base.WSConv2D # Stem ch = int(16 * self.width) self.initial_conv = self.which_conv(ch, kernel_shape=7, stride=2, padding='SAME', with_bias=False, name='initial_conv') # Body self.blocks = [] expected_std = 1.0 num_blocks = sum(self.depth_pattern) index = 0 # Overall block index block_args = (self.width_pattern, self.depth_pattern, [1, 2, 2, 2]) for block_width, stage_depth, stride in zip(*block_args): for block_index in range(stage_depth): # Scalar pre-multiplier so each block sees an N(0,1) input at init beta = 1./ expected_std # Block stochastic depth drop-rate block_stochdepth_rate = stochdepth_rate * index / num_blocks self.blocks += [NFResBlock(ch, block_width, stride=stride if block_index == 0 else 1, beta=beta, alpha=alpha, activation=self.activation, which_conv=self.which_conv, stochdepth_rate=block_stochdepth_rate, skipinit_gain=skipinit_gain, use_se=use_se, se_ratio=se_ratio, )] ch = block_width index += 1 # Reset expected std but still give it 1 block of growth if block_index == 0: expected_std = 1.0 expected_std = (expected_std **2 + alpha**2)**0.5 # Head. By default, initialize with N(0, 0.01) if fc_init is None: fc_init = hk.initializers.RandomNormal(0.01, 0) self.fc = hk.Linear(self.num_classes, w_init=fc_init, with_bias=True) def __call__(self, x, is_training=True, return_metrics=False): """Return the output of the final layer without any [log-]softmax.""" # Stem outputs = {} out = self.initial_conv(x) out = hk.max_pool(out, window_shape=(1, 3, 3, 1), strides=(1, 2, 2, 1), padding='SAME') if return_metrics: outputs.update(base.signal_metrics(out, 0)) # Blocks for i, block in enumerate(self.blocks): out, res_avg_var = block(out, is_training=is_training) if return_metrics: outputs.update(base.signal_metrics(out, i + 1)) outputs[f'res_avg_var_{i}'] = res_avg_var # Final-conv->activation, pool, dropout, classify pool = jnp.mean(self.activation(out), [1, 2]) outputs['pool'] = pool # Optionally apply dropout if self.drop_rate > 0.0 and is_training: pool = hk.dropout(hk.next_rng_key(), self.drop_rate, pool) outputs['logits'] = self.fc(pool) return outputs def count_flops(self, h, w): flops = [] flops += [base.count_conv_flops(3, self.initial_conv, h, w)] h, w = h / 2, w / 2 # Body FLOPs for block in self.blocks: flops += [block.count_flops(h, w)] if block.stride > 1: h, w = h / block.stride, w / block.stride # Head module FLOPs out_ch = self.blocks[-1].out_ch flops += [base.count_conv_flops(out_ch, self.final_conv, h, w)] # Count flops for classifier flops += [self.final_conv.output_channels * self.fc.output_size] return flops, sum(flops) class NFResBlock(hk.Module): """Normalizer-Free pre-activation ResNet Block.""" def __init__(self, in_ch, out_ch, bottleneck_ratio=0.25, kernel_size=3, stride=1, beta=1.0, alpha=0.2, which_conv=base.WSConv2D, activation=jax.nn.relu, skipinit_gain=jnp.zeros, stochdepth_rate=None, use_se=False, se_ratio=0.25, name=None): super().__init__(name=name) self.in_ch, self.out_ch = in_ch, out_ch self.kernel_size = kernel_size self.activation = activation self.beta, self.alpha = beta, alpha self.skipinit_gain = skipinit_gain self.use_se, self.se_ratio = use_se, se_ratio # Bottleneck width self.width = int(self.out_ch * bottleneck_ratio) self.stride = stride # Conv 0 (typically expansion conv) self.conv0 = which_conv(self.width, kernel_shape=1, padding='SAME', name='conv0') # Grouped NxN conv self.conv1 = which_conv(self.width, kernel_shape=kernel_size, stride=stride, padding='SAME', name='conv1') # Conv 2, typically projection conv self.conv2 = which_conv(self.out_ch, kernel_shape=1, padding='SAME', name='conv2') # Use shortcut conv on channel change or downsample. self.use_projection = stride > 1 or self.in_ch != self.out_ch if self.use_projection: self.conv_shortcut = which_conv(self.out_ch, kernel_shape=1, stride=stride, padding='SAME', name='conv_shortcut') # Are we using stochastic depth? self._has_stochdepth = (stochdepth_rate is not None and stochdepth_rate > 0. and stochdepth_rate < 1.0) if self._has_stochdepth: self.stoch_depth = base.StochDepth(stochdepth_rate) if self.use_se: self.se = base.SqueezeExcite(self.out_ch, self.out_ch, self.se_ratio) def __call__(self, x, is_training): out = self.activation(x) * self.beta shortcut = x if self.use_projection: # Downsample with conv1x1 shortcut = self.conv_shortcut(out) out = self.conv0(out) out = self.conv1(self.activation(out)) out = self.conv2(self.activation(out)) if self.use_se: out = 2 * self.se(out) * out # Get average residual standard deviation for reporting metrics. res_avg_var = jnp.mean(jnp.var(out, axis=[0, 1, 2])) # Apply stochdepth if applicable. if self._has_stochdepth: out = self.stoch_depth(out, is_training) # SkipInit Gain out = out * hk.get_parameter('skip_gain', (), out.dtype, init=self.skipinit_gain) return out * self.alpha + shortcut, res_avg_var def count_flops(self, h, w): # Count conv FLOPs based on input HW expand_flops = base.count_conv_flops(self.in_ch, self.conv0, h, w) # If block is strided we decrease resolution here. dw_flops = base.count_conv_flops(self.width, self.conv1, h, w) if self.stride > 1: h, w = h / self.stride, w / self.stride if self.use_projection: sc_flops = base.count_conv_flops(self.in_ch, self.conv_shortcut, h, w) else: sc_flops = 0 # SE flops happen on avg-pooled activations se_flops = self.se.fc0.output_size * self.width se_flops += self.se.fc0.output_size * self.se.fc1.output_size contract_flops = base.count_conv_flops(self.width, self.conv2, h, w) return sum([expand_flops, dw_flops, se_flops, contract_flops, sc_flops])
deepmind-research-master
nfnets/nf_resnet.py
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Optimizers and Schedulers, inspired by the PyTorch API.""" from collections import ChainMap # pylint:disable=g-importing-member from typing import Callable import haiku as hk import jax import jax.numpy as jnp import tree from nfnets import utils class Optimizer(object): """Optimizer base class.""" def __init__(self, params, defaults): # Flag indicating if parameters have been broadcasted self._broadcasted = False # Optimizer hyperparameters; this is a dict to support using param_groups self._hyperparameters = {} # Mapping from model parameters to optimizer hyperparameters self._params2hyperparams = {} # Assign defaults self._hyperparameters = dict(**defaults) # Prepare parameter groups and mappings self.create_param_groups(params, defaults) # Join params at top-level if params is a list of groups if isinstance(params, list): flatmap = type(hk.data_structures.to_immutable_dict({})) if any([isinstance(group['params'], flatmap) for group in params]): params = hk.data_structures.merge(*[group['params'] for group in params]) else: params = dict(ChainMap(*[group['params'] for group in params])) # Prepare states create_buffers = lambda k, v: self.create_buffers('/'.join(k), v) self._states = tree.map_structure_with_path(create_buffers, params) def add_hyperparam_group(self, group, suffix, defaults): """Adds new hyperparameters to the hyperparams dict.""" # Use default hyperparams unless overridden by group hyperparams group_dict = {key: key for key in defaults if key not in group} for key in group: if key != 'params': # Reserved keyword 'params' group_dict[key] = '%s_%s' % (key, suffix) self._hyperparameters[group_dict[key]] = group[key] # Set up params2hyperparams def set_p2h(k, _): self._params2hyperparams['/'.join(k)] = group_dict tree.map_structure_with_path(set_p2h, group['params']) def create_param_groups(self, params, defaults): """Creates param-hyperparam mappings.""" if isinstance(params, list): for group_index, group in enumerate(params): # Add group to hyperparams and get this group's full hyperparameters self.add_hyperparam_group(group, group_index, defaults) else: mapping = {key: key for key in self._hyperparameters} def set_p2h(k, _): self._params2hyperparams['/'.join(k)] = mapping tree.map_structure_with_path(set_p2h, params) def create_buffers(self, name, params): """Method to be overridden by child classes.""" pass def get_opt_params(self, param_name, itr): """Returns hyperparams corresponding to param_name.""" mapping = self._params2hyperparams[param_name] output = {} for key in mapping: hyper = self._hyperparameters[mapping[key]] # Handle the case where a hyper is a class, for hybrids if isinstance(hyper, Callable) and not isinstance(hyper, type): output[key] = hyper(itr) else: output[key] = hyper return output def get_hyper(self, param_name, hyper_name): """Get an individual hyperparam for a given param.""" mapping = self._params2hyperparams[param_name] return self._hyperparameters[mapping[hyper_name]] def plugin(self, states): self._states = states def states(self): return self._states def broadcast(self): """Brodcasts all buffers and parameters.""" self._broadcasted = True for name, state in self._states.items(): self._states[name] = {key: utils.broadcast(state[key]) for key in state} def gather(self): """Gathers state (if broadcasted) for saving.""" states = {} for name in self._states: state = self._states[name] states[name] = {key: state[key] if state[key] is None else state[key][0] for key in state} return states def __setattr__(self, name, value): """Overrides the object's set-attribute function to register states, etc.""" if '_hyperparameters' in self.__dict__ and name in self._hyperparameters: self._hyperparameters[name] = value elif '_states' in self.__dict__ and name in self._states: self._states[name] = value else: object.__setattr__(self, name, value) def __getattr__(self, name): """Override the object's get-attribute function to return states, etc.""" if '_hyperparameters' in self.__dict__ and name in self._hyperparameters: return self._hyperparameters[name] elif '_states' in self.__dict__ and name in self._states: return self._states[name] else: object.__getattr__(self, name) def step(self, params, grads, states, itr=None): """Takes a single optimizer step. Args: params: a dict containing the parameters to be updated. grads: a dict containing the gradients for each parameter in params. states: a dict containing any optimizer buffers (momentum, etc) for each parameter in params. itr: an optional integer indicating the current step, for scheduling. Returns: The updated params and optimizer buffers. """ get_hyper = lambda k, v: self.get_opt_params('/'.join(k), itr) hypers = tree.map_structure_with_path(get_hyper, params) outs = tree.map_structure_up_to(params, self.update_param, params, grads, states, hypers) return utils.split_tree(outs, params, 2) class Schedule(object): """Hyperparameter scheduling objects.""" class CosineDecay(Schedule): """Cosine decay.""" def __init__(self, min_val, max_val, num_steps): self.min_val = min_val self.max_val = max_val self.num_steps = num_steps def __call__(self, itr): cos = (1 + jnp.cos(jnp.pi * itr / self.num_steps)) return 0.5 * (self.max_val - self.min_val) * cos + self.min_val class WarmupCosineDecay(Schedule): """Cosine decay with linear warmup.""" def __init__(self, start_val, min_val, max_val, num_steps, warmup_steps): self.start_val = start_val self.min_val = min_val self.max_val = max_val self.num_steps = num_steps self.warmup_steps = warmup_steps def __call__(self, itr): warmup_val = ((self.max_val - self.start_val) * (itr / self.warmup_steps) + self.start_val) cos_itr = (itr - self.warmup_steps) / (self.num_steps - self.warmup_steps) cos = 1 + jnp.cos(jnp.pi * cos_itr) cos_val = 0.5 * (self.max_val - self.min_val) * cos + self.min_val # Select warmup_val if itr < warmup, else cosine val values = jnp.array([warmup_val, cos_val]) index = jnp.sum(jnp.array(self.warmup_steps) < itr) return jnp.take(values, index) class WarmupExpDecay(Schedule): """Exponential step decay with linear warmup.""" def __init__(self, start_val, max_val, warmup_steps, decay_factor, decay_interval): self.start_val = start_val self.max_val = max_val self.warmup_steps = warmup_steps self.decay_factor = decay_factor self.decay_interval = decay_interval def __call__(self, itr): warmup_val = ((self.max_val - self.start_val) * (itr / self.warmup_steps) + self.start_val) # How many decay steps have we taken? num_decays = jnp.floor((itr - self.warmup_steps) / self.decay_interval) exp_val = self.max_val * (self.decay_factor ** num_decays) # Select warmup_val if itr < warmup, else exp_val values = jnp.array([warmup_val, exp_val]) index = jnp.sum(jnp.array(self.warmup_steps) < itr) return jnp.take(values, index) class SGD(Optimizer): """Standard SGD with (nesterov) momentum and weight decay. Attributes: params: Either a dict mapping param names to JAX tensors, or a list where each member of the list is a dict containing parameters and hyperparameters, allowing one to specify param-specific hyperparams. lr: Learning rate. weight_decay: Weight decay parameter. Note that this is decay, not L2 reg. momentum: Momentum parameter dampening: Dampening parameter nesterov: Bool indicating this optimizer will use the NAG formulation. """ defaults = {'weight_decay': None, 'momentum': None, 'dampening': 0, 'nesterov': None} def __init__(self, params, lr, weight_decay=None, momentum=None, dampening=0, nesterov=None): super().__init__( params, defaults={'lr': lr, 'weight_decay': weight_decay, 'momentum': momentum, 'dampening': dampening, 'nesterov': nesterov}) def create_buffers(self, name, param): """Prepares all momentum buffers for each parameter.""" state = {'step': jnp.zeros(jax.local_device_count())} if self.get_hyper(name, 'momentum') is not None: state['momentum'] = jnp.zeros_like(param) return state def update_param(self, param, grad, state, opt_params): """The actual update step for this optimizer.""" if param is None: return param, state # Apply weight decay if opt_params.get('weight_decay') is not None: grad = grad + param * opt_params['weight_decay'] # Update momentum buffers if needed if 'momentum' in state: state['momentum'] = (opt_params['momentum'] * state['momentum'] + (1 - opt_params['dampening']) * grad) if opt_params['nesterov'] is not None: grad = grad + opt_params['momentum'] * state['momentum'] else: grad = state['momentum'] state['step'] += 1 return param - opt_params['lr'] * grad, state class Adam(Optimizer): """Adam optimizer, Kingma & Ba, arxiv.org/abs/1412.6980. Args: params (iterable): nested list of params to optimize lr (float, optional): learning rate (default: 1e-3) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square (default: (0.9, 0.999)) eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8) weight_decay (float, optional): weight decay (default: 0) use_adamw (bool, optional): If not None, use decoupled weight decay as in arxiv.org/abs/1711.05101. The paper version adds an additional "schedule" hyperparameter eta, which we instead just replace with the learning rate following the PyTorch implementation. Note that this implementation will not instantiate a buffer if the beta term for that buffer is passed in as None, thus conserving memory. """ defaults = {'beta1': 0.9, 'beta2': 0.999, 'weight_decay': None, 'eps': 1e-8, 'use_adamw': None} def __init__(self, params, lr, beta1=0.9, beta2=0.999, eps=1e-8, weight_decay=None, use_adamw=None): super().__init__(params=params, defaults={'lr': lr, 'beta1': beta1, 'beta2': beta2, 'eps': eps, 'weight_decay': weight_decay, 'use_adamw': use_adamw}) def create_buffers(self, name, param): """Prepare exp_avg and exp_avg_sq buffers.""" state = {'step': jnp.zeros(jax.local_device_count())} if self.get_hyper(name, 'beta1') is not None: state['exp_avg'] = jnp.zeros_like(param) if self.get_hyper(name, 'beta2') is not None: state['exp_avg_sq'] = jnp.zeros_like(param) return state def update_param(self, param, grad, state, opt_params): """The actual update step for this optimizer.""" if param is None: return param, state state['step'] = state['step'] + 1 # Apply weight decay if opt_params.get('weight_decay') is not None: if opt_params.get('use_adamw') is not None: param = param * (1 - opt_params['lr'] * opt_params['weight_decay']) else: grad = grad + param * opt_params['weight_decay'] # First moment if 'exp_avg' in state: bias_correction1 = 1 - opt_params['beta1'] ** state['step'] state['exp_avg'] = (state['exp_avg'] * opt_params['beta1'] + (1 - opt_params['beta1']) * grad) step_size = opt_params['lr'] * state['exp_avg'] / bias_correction1 else: step_size = opt_params['lr'] * grad # Second moment if 'exp_avg_sq' in state: bias_correction2 = 1 - opt_params['beta2'] ** state['step'] state['exp_avg_sq'] = (state['exp_avg_sq'] * opt_params['beta2'] + (1 - opt_params['beta2']) * grad * grad) denom = jnp.sqrt(state['exp_avg_sq']) * jax.lax.rsqrt(bias_correction2) denom = denom + opt_params['eps'] else: denom = jnp.abs(grad) + opt_params['eps'] # Add eps to avoid divide-by-0 return param - step_size / denom, state class RMSProp(Optimizer): """RMSProp optimizer, Tieleman and Hinton, ref: powerpoint slides. Implements RMSProp as rms = decay * rms{t-1} + (1-decay) * gradient ** 2 mom = momentum * mom{t-1} + learning_rate * g_t / sqrt(rms + epsilon) param -= mom Note that the rms buffer is initialized with ones as in TF, as opposed to zeros as in all other implementations. Args: params (iterable): nested list of params to optimize lr (float): learning rate (default: 1e-3) decay (float): EMA decay rate for running estimate of squared gradient. momentum (float or None): Use heavy ball momentum instead of instant grad. eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8) weight_decay (float, optional): weight decay (NOT ADAMW (default: 0)) """ defaults = {'weight_decay': None, 'eps': 1e-8} def __init__(self, params, lr, decay, momentum, weight_decay=None, eps=1e-8): super().__init__(params=params, defaults={'lr': lr, 'decay': decay, 'momentum': momentum, 'eps': eps, 'weight_decay': weight_decay}) def create_buffers(self, name, param): """Prepare exp_avg and exp_avg_sq buffers.""" state = {'step': jnp.zeros(jax.local_device_count())} state['rms'] = jnp.ones_like(param) if self.get_hyper(name, 'momentum') is not None: state['momentum'] = jnp.zeros_like(param) return state def update_param(self, param, grad, state, opt_params): """The actual update step for this optimizer.""" if param is None: return param, state state['step'] = state['step'] + 1 # Apply weight decay if opt_params.get('weight_decay') is not None: grad = grad + param * opt_params['weight_decay'] # EMA of the squared gradient state['rms'] = (state['rms'] * opt_params['decay'] + (1 - opt_params['decay']) * (grad ** 2)) scaled_grad = (opt_params['lr'] * grad / (state['rms'] + opt_params['eps']) ** 0.5) if state['momentum'] is not None: state['momentum'] = (state['momentum'] * opt_params['momentum'] + scaled_grad) step_size = state['momentum'] else: step_size = scaled_grad return param - step_size, state class Fromage(Optimizer): """Fromage optimizer, Bernstein et al. arXiv.org/abs/2002.03432. This version optionally includes weight decay. Attributes: params (iterable): nested list of params to optimize lr (float): learning rate. eps (float, optional): Minimum allowable norm. This term is required for in case parameters are zero-initialized (default: 1e-5). weight_decay (float, optional): weight decay (default: 0). """ defaults = {'weight_decay': None, 'eps': 1e-5} def __init__(self, params, lr, weight_decay=None, eps=1e-5): super().__init__( params, defaults={'lr': lr, 'weight_decay': weight_decay, 'eps': eps}) def create_buffers(self, name, param): # pylint: disable=unused-argument """Prepares all momentum buffers for each parameter.""" return {'step': jnp.zeros(1)} def update_param(self, param, grad, state, opt_params): """The actual update step for this optimizer.""" if param is None: return param, state if opt_params['weight_decay'] is not None: grad = grad + param * opt_params['weight_decay'] grad_norm = jnp.maximum(jnp.linalg.norm(grad), opt_params['eps']) param_norm = jnp.maximum(jnp.linalg.norm(param), opt_params['eps']) mult = jax.lax.rsqrt(1 + opt_params['lr'] ** 2) out = (param - opt_params['lr'] * grad * (param_norm / grad_norm)) * mult return out, state def compute_norm(x, axis, keepdims): """Returns norm over arbitrary axis.""" norm = jnp.sum(x ** 2, axis=axis, keepdims=keepdims) ** 0.5 return norm def unitwise_norm(x): """Computes norms of each output unit separately, assuming (HW)IO weights.""" if len(jnp.squeeze(x).shape) <= 1: # Scalars and vectors axis = None keepdims = False elif len(x.shape) in [2, 3]: # Linear layers of shape IO axis = 0 keepdims = True elif len(x.shape) == 4: # Conv kernels of shape HWIO axis = [0, 1, 2,] keepdims = True else: raise ValueError(f'Got a parameter with shape not in [1, 2, 3, 4]! {x}') return compute_norm(x, axis, keepdims) class SGD_AGC(Optimizer): # pylint:disable=invalid-name """SGD with Unit-Adaptive Gradient-Clipping. References: [Brock, Smith, De, Simonyan 2021] High-Performance Large-Scale Image Recognition Without Normalization. """ defaults = {'weight_decay': None, 'momentum': None, 'dampening': 0, 'nesterov': None, 'clipping': 0.01, 'eps': 1e-3} def __init__(self, params, lr, weight_decay=None, momentum=None, dampening=0, nesterov=None, clipping=0.01, eps=1e-3): super().__init__( params, defaults={'lr': lr, 'weight_decay': weight_decay, 'momentum': momentum, 'dampening': dampening, 'clipping': clipping, 'nesterov': nesterov, 'eps': eps}) def create_buffers(self, name, param): return SGD.create_buffers(self, name, param) def update_param(self, param, grad, state, opt_params): """Clips grads if necessary, then applies the optimizer update.""" if param is None: return param, state if opt_params['clipping'] is not None: param_norm = jnp.maximum(unitwise_norm(param), opt_params['eps']) grad_norm = unitwise_norm(grad) max_norm = param_norm * opt_params['clipping'] # If grad norm > clipping * param_norm, rescale trigger = grad_norm > max_norm # Note the max(||G||, 1e-6) is technically unnecessary here, as # the clipping shouldn't trigger if the grad norm is zero, # but we include it in practice as a "just-in-case". clipped_grad = grad * (max_norm / jnp.maximum(grad_norm, 1e-6)) grad = jnp.where(trigger, clipped_grad, grad) return SGD.update_param(self, param, grad, state, opt_params) class Hybrid(Optimizer): """Optimizer which permits passing param groups with different base opts. The API for this class follows the case for any other optimizer where one specifies a list of dicts with separate hyperparams, but in this case it requires the user to also specify an 'opt' key for each group, such as e.g. [{'params': params0, 'opt': optim.Adam, 'lr': 0.1}]. The user must also provide values for any arg in the selected optimizers which does not have a default value associated """ def __init__(self, param_groups): if any(['opt' not in group for group in param_groups]): raise ValueError('All parameter groups must have an opt key!') self.defaults = ChainMap(*[group['opt'].defaults for group in param_groups]) super().__init__(param_groups, defaults=dict(self.defaults)) def create_buffers(self, name, param): return self.get_hyper(name, 'opt').create_buffers(self, name, param) def update_param(self, param, grad, state, opt_params): return opt_params['opt'].update_param(self, param, grad, state, opt_params)
deepmind-research-master
nfnets/optim.py
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Architecture definitions for different models.""" import haiku as hk import jax import jax.numpy as jnp import numpy as np # Model settings for NF-RegNets nf_regnet_params = { 'B0': {'width': [48, 104, 208, 440], 'depth': [1, 3, 6, 6], 'train_imsize': 192, 'test_imsize': 224, 'drop_rate': 0.2}, 'B1': {'width': [48, 104, 208, 440], 'depth': [2, 4, 7, 7], 'train_imsize': 224, 'test_imsize': 256, 'drop_rate': 0.2}, 'B2': {'width': [56, 112, 232, 488], 'depth': [2, 4, 8, 8], 'train_imsize': 240, 'test_imsize': 272, 'drop_rate': 0.3}, 'B3': {'width': [56, 128, 248, 528], 'depth': [2, 5, 9, 9], 'train_imsize': 288, 'test_imsize': 320, 'drop_rate': 0.3}, 'B4': {'width': [64, 144, 288, 616], 'depth': [2, 6, 11, 11], 'train_imsize': 320, 'test_imsize': 384, 'drop_rate': 0.4}, 'B5': {'width': [80, 168, 336, 704], 'depth': [3, 7, 14, 14], 'train_imsize': 384, 'test_imsize': 456, 'drop_rate': 0.4}, 'B6': {'width': [88, 184, 376, 792], 'depth': [3, 8, 16, 16], 'train_imsize': 448, 'test_imsize': 528, 'drop_rate': 0.5}, 'B7': {'width': [96, 208, 416, 880], 'depth': [4, 10, 19, 19], 'train_imsize': 512, 'test_imsize': 600, 'drop_rate': 0.5}, 'B8': {'width': [104, 232, 456, 968], 'depth': [4, 11, 22, 22], 'train_imsize': 600, 'test_imsize': 672, 'drop_rate': 0.5}, } nfnet_params = {} # F-series models nfnet_params.update(**{ 'F0': { 'width': [256, 512, 1536, 1536], 'depth': [1, 2, 6, 3], 'train_imsize': 192, 'test_imsize': 256, 'RA_level': '405', 'drop_rate': 0.2}, 'F1': { 'width': [256, 512, 1536, 1536], 'depth': [2, 4, 12, 6], 'train_imsize': 224, 'test_imsize': 320, 'RA_level': '410', 'drop_rate': 0.3}, 'F2': { 'width': [256, 512, 1536, 1536], 'depth': [3, 6, 18, 9], 'train_imsize': 256, 'test_imsize': 352, 'RA_level': '410', 'drop_rate': 0.4}, 'F3': { 'width': [256, 512, 1536, 1536], 'depth': [4, 8, 24, 12], 'train_imsize': 320, 'test_imsize': 416, 'RA_level': '415', 'drop_rate': 0.4}, 'F4': { 'width': [256, 512, 1536, 1536], 'depth': [5, 10, 30, 15], 'train_imsize': 384, 'test_imsize': 512, 'RA_level': '415', 'drop_rate': 0.5}, 'F5': { 'width': [256, 512, 1536, 1536], 'depth': [6, 12, 36, 18], 'train_imsize': 416, 'test_imsize': 544, 'RA_level': '415', 'drop_rate': 0.5}, 'F6': { 'width': [256, 512, 1536, 1536], 'depth': [7, 14, 42, 21], 'train_imsize': 448, 'test_imsize': 576, 'RA_level': '415', 'drop_rate': 0.5}, 'F7': { 'width': [256, 512, 1536, 1536], 'depth': [8, 16, 48, 24], 'train_imsize': 480, 'test_imsize': 608, 'RA_level': '415', 'drop_rate': 0.5}, }) # Minor variants FN+, slightly wider nfnet_params.update(**{ **{f'{key}+': {**nfnet_params[key], 'width': [384, 768, 2048, 2048],} for key in nfnet_params} }) # Nonlinearities with magic constants (gamma) baked in. # Note that not all nonlinearities will be stable, especially if they are # not perfectly monotonic. Good choices include relu, silu, and gelu. nonlinearities = { 'identity': lambda x: x, 'celu': lambda x: jax.nn.celu(x) * 1.270926833152771, 'elu': lambda x: jax.nn.elu(x) * 1.2716004848480225, 'gelu': lambda x: jax.nn.gelu(x) * 1.7015043497085571, 'glu': lambda x: jax.nn.glu(x) * 1.8484294414520264, 'leaky_relu': lambda x: jax.nn.leaky_relu(x) * 1.70590341091156, 'log_sigmoid': lambda x: jax.nn.log_sigmoid(x) * 1.9193484783172607, 'log_softmax': lambda x: jax.nn.log_softmax(x) * 1.0002083778381348, 'relu': lambda x: jax.nn.relu(x) * 1.7139588594436646, 'relu6': lambda x: jax.nn.relu6(x) * 1.7131484746932983, 'selu': lambda x: jax.nn.selu(x) * 1.0008515119552612, 'sigmoid': lambda x: jax.nn.sigmoid(x) * 4.803835391998291, 'silu': lambda x: jax.nn.silu(x) * 1.7881293296813965, 'soft_sign': lambda x: jax.nn.soft_sign(x) * 2.338853120803833, 'softplus': lambda x: jax.nn.softplus(x) * 1.9203323125839233, 'tanh': lambda x: jnp.tanh(x) * 1.5939117670059204, } class WSConv2D(hk.Conv2D): """2D Convolution with Scaled Weight Standardization and affine gain+bias.""" @hk.transparent def standardize_weight(self, weight, eps=1e-4): """Apply scaled WS with affine gain.""" mean = jnp.mean(weight, axis=(0, 1, 2), keepdims=True) var = jnp.var(weight, axis=(0, 1, 2), keepdims=True) fan_in = np.prod(weight.shape[:-1]) # Get gain gain = hk.get_parameter('gain', shape=(weight.shape[-1],), dtype=weight.dtype, init=jnp.ones) # Manually fused normalization, eq. to (w - mean) * gain / sqrt(N * var) scale = jax.lax.rsqrt(jnp.maximum(var * fan_in, eps)) * gain shift = mean * scale return weight * scale - shift def __call__(self, inputs: jnp.ndarray, eps: float = 1e-4) -> jnp.ndarray: w_shape = self.kernel_shape + ( inputs.shape[self.channel_index] // self.feature_group_count, self.output_channels) # Use fan-in scaled init, but WS is largely insensitive to this choice. w_init = hk.initializers.VarianceScaling(1.0, 'fan_in', 'normal') w = hk.get_parameter('w', w_shape, inputs.dtype, init=w_init) weight = self.standardize_weight(w, eps) out = jax.lax.conv_general_dilated( inputs, weight, window_strides=self.stride, padding=self.padding, lhs_dilation=self.lhs_dilation, rhs_dilation=self.kernel_dilation, dimension_numbers=self.dimension_numbers, feature_group_count=self.feature_group_count) # Always add bias bias_shape = (self.output_channels,) bias = hk.get_parameter('bias', bias_shape, inputs.dtype, init=jnp.zeros) return out + bias def signal_metrics(x, i): """Things to measure about a NCHW tensor activation.""" metrics = {} # Average channel-wise mean-squared metrics[f'avg_sq_mean_{i}'] = jnp.mean(jnp.mean(x, axis=[0, 1, 2])**2) # Average channel variance metrics[f'avg_var_{i}'] = jnp.mean(jnp.var(x, axis=[0, 1, 2])) return metrics def count_conv_flops(in_ch, conv, h, w): """For a conv layer with in_ch inputs, count the FLOPS.""" # How many outputs are we producing? Note this is wrong for VALID padding. output_shape = conv.output_channels * (h * w) / np.prod(conv.stride) # At each OHW location we do computation equal to (I//G) * kh * kw flop_per_loc = (in_ch / conv.feature_group_count) flop_per_loc *= np.prod(conv.kernel_shape) return output_shape * flop_per_loc class SqueezeExcite(hk.Module): """Simple Squeeze+Excite module.""" def __init__(self, in_ch, out_ch, se_ratio=0.5, hidden_ch=None, activation=jax.nn.relu, name=None): super().__init__(name=name) self.in_ch, self.out_ch = in_ch, out_ch if se_ratio is None: if hidden_ch is None: raise ValueError('Must provide one of se_ratio or hidden_ch') self.hidden_ch = hidden_ch else: self.hidden_ch = max(1, int(self.in_ch * se_ratio)) self.activation = activation self.fc0 = hk.Linear(self.hidden_ch, with_bias=True) self.fc1 = hk.Linear(self.out_ch, with_bias=True) def __call__(self, x): h = jnp.mean(x, axis=[1, 2]) # Mean pool over HW extent h = self.fc1(self.activation(self.fc0(h))) h = jax.nn.sigmoid(h)[:, None, None] # Broadcast along H, W return h class StochDepth(hk.Module): """Batchwise Dropout used in EfficientNet, optionally sans rescaling.""" def __init__(self, drop_rate, scale_by_keep=False, name=None): super().__init__(name=name) self.drop_rate = drop_rate self.scale_by_keep = scale_by_keep def __call__(self, x, is_training) -> jnp.ndarray: if not is_training: return x batch_size = x.shape[0] r = jax.random.uniform(hk.next_rng_key(), [batch_size, 1, 1, 1], dtype=x.dtype) keep_prob = 1. - self.drop_rate binary_tensor = jnp.floor(keep_prob + r) if self.scale_by_keep: x = x / keep_prob return x * binary_tensor
deepmind-research-master
nfnets/base.py
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Norm-Free Nets.""" # pylint: disable=unused-import # pylint: disable=invalid-name import functools import haiku as hk import jax import jax.numpy as jnp import jax.random as jrandom import numpy as np from nfnets import base class NFNet(hk.Module): """Normalizer-Free Networks with an improved architecture. References: [Brock, Smith, De, Simonyan 2021] High-Performance Large-Scale Image Recognition Without Normalization. """ variant_dict = base.nfnet_params def __init__(self, num_classes, variant='F0', width=1.0, se_ratio=0.5, alpha=0.2, stochdepth_rate=0.1, drop_rate=None, activation='gelu', fc_init=None, final_conv_mult=2, final_conv_ch=None, use_two_convs=True, name='NFNet'): super().__init__(name=name) self.num_classes = num_classes self.variant = variant self.width = width self.se_ratio = se_ratio # Get variant info block_params = self.variant_dict[self.variant] self.train_imsize = block_params['train_imsize'] self.test_imsize = block_params['test_imsize'] self.width_pattern = block_params['width'] self.depth_pattern = block_params['depth'] self.bneck_pattern = block_params.get('expansion', [0.5] * 4) self.group_pattern = block_params.get('group_width', [128] * 4) self.big_pattern = block_params.get('big_width', [True] * 4) self.activation = base.nonlinearities[activation] if drop_rate is None: self.drop_rate = block_params['drop_rate'] else: self.drop_rate = drop_rate self.which_conv = base.WSConv2D # Stem ch = self.width_pattern[0] // 2 self.stem = hk.Sequential([ self.which_conv(16, kernel_shape=3, stride=2, padding='SAME', name='stem_conv0'), self.activation, self.which_conv(32, kernel_shape=3, stride=1, padding='SAME', name='stem_conv1'), self.activation, self.which_conv(64, kernel_shape=3, stride=1, padding='SAME', name='stem_conv2'), self.activation, self.which_conv(ch, kernel_shape=3, stride=2, padding='SAME', name='stem_conv3'), ]) # Body self.blocks = [] expected_std = 1.0 num_blocks = sum(self.depth_pattern) index = 0 # Overall block index stride_pattern = [1, 2, 2, 2] block_args = zip(self.width_pattern, self.depth_pattern, self.bneck_pattern, self.group_pattern, self.big_pattern, stride_pattern) for (block_width, stage_depth, expand_ratio, group_size, big_width, stride) in block_args: for block_index in range(stage_depth): # Scalar pre-multiplier so each block sees an N(0,1) input at init beta = 1./ expected_std # Block stochastic depth drop-rate block_stochdepth_rate = stochdepth_rate * index / num_blocks out_ch = (int(block_width * self.width)) self.blocks += [NFBlock(ch, out_ch, expansion=expand_ratio, se_ratio=se_ratio, group_size=group_size, stride=stride if block_index == 0 else 1, beta=beta, alpha=alpha, activation=self.activation, which_conv=self.which_conv, stochdepth_rate=block_stochdepth_rate, big_width=big_width, use_two_convs=use_two_convs, )] ch = out_ch index += 1 # Reset expected std but still give it 1 block of growth if block_index == 0: expected_std = 1.0 expected_std = (expected_std **2 + alpha**2)**0.5 # Head if final_conv_mult is None: if final_conv_ch is None: raise ValueError('Must provide one of final_conv_mult or final_conv_ch') ch = final_conv_ch else: ch = int(final_conv_mult * ch) self.final_conv = self.which_conv(ch, kernel_shape=1, padding='SAME', name='final_conv') # By default, initialize with N(0, 0.01) if fc_init is None: fc_init = hk.initializers.RandomNormal(mean=0, stddev=0.01) self.fc = hk.Linear(self.num_classes, w_init=fc_init, with_bias=True) def __call__(self, x, is_training=True, return_metrics=False): """Return the output of the final layer without any [log-]softmax.""" # Stem outputs = {} out = self.stem(x) if return_metrics: outputs.update(base.signal_metrics(out, 0)) # Blocks for i, block in enumerate(self.blocks): out, res_avg_var = block(out, is_training=is_training) if return_metrics: outputs.update(base.signal_metrics(out, i + 1)) outputs[f'res_avg_var_{i}'] = res_avg_var # Final-conv->activation, pool, dropout, classify out = self.activation(self.final_conv(out)) pool = jnp.mean(out, [1, 2]) outputs['pool'] = pool # Optionally apply dropout if self.drop_rate > 0.0 and is_training: pool = hk.dropout(hk.next_rng_key(), self.drop_rate, pool) outputs['logits'] = self.fc(pool) return outputs def count_flops(self, h, w): flops = [] ch = 3 for module in self.stem.layers: if isinstance(module, hk.Conv2D): flops += [base.count_conv_flops(ch, module, h, w)] if any([item > 1 for item in module.stride]): h, w = h / module.stride[0], w / module.stride[1] ch = module.output_channels # Body FLOPs for block in self.blocks: flops += [block.count_flops(h, w)] if block.stride > 1: h, w = h / block.stride, w / block.stride # Head module FLOPs out_ch = self.blocks[-1].out_ch flops += [base.count_conv_flops(out_ch, self.final_conv, h, w)] # Count flops for classifier flops += [self.final_conv.output_channels * self.fc.output_size] return flops, sum(flops) class NFBlock(hk.Module): """Normalizer-Free Net Block.""" def __init__(self, in_ch, out_ch, expansion=0.5, se_ratio=0.5, kernel_shape=3, group_size=128, stride=1, beta=1.0, alpha=0.2, which_conv=base.WSConv2D, activation=jax.nn.gelu, big_width=True, use_two_convs=True, stochdepth_rate=None, name=None): super().__init__(name=name) self.in_ch, self.out_ch = in_ch, out_ch self.expansion = expansion self.se_ratio = se_ratio self.kernel_shape = kernel_shape self.activation = activation self.beta, self.alpha = beta, alpha # Mimic resnet style bigwidth scaling? width = int((self.out_ch if big_width else self.in_ch) * expansion) # Round expanded with based on group count self.groups = width // group_size self.width = group_size * self.groups self.stride = stride self.use_two_convs = use_two_convs # Conv 0 (typically expansion conv) self.conv0 = which_conv(self.width, kernel_shape=1, padding='SAME', name='conv0') # Grouped NxN conv self.conv1 = which_conv(self.width, kernel_shape=kernel_shape, stride=stride, padding='SAME', feature_group_count=self.groups, name='conv1') if self.use_two_convs: self.conv1b = which_conv(self.width, kernel_shape=kernel_shape, stride=1, padding='SAME', feature_group_count=self.groups, name='conv1b') # Conv 2, typically projection conv self.conv2 = which_conv(self.out_ch, kernel_shape=1, padding='SAME', name='conv2') # Use shortcut conv on channel change or downsample. self.use_projection = stride > 1 or self.in_ch != self.out_ch if self.use_projection: self.conv_shortcut = which_conv(self.out_ch, kernel_shape=1, padding='SAME', name='conv_shortcut') # Squeeze + Excite Module self.se = base.SqueezeExcite(self.out_ch, self.out_ch, self.se_ratio) # Are we using stochastic depth? self._has_stochdepth = (stochdepth_rate is not None and stochdepth_rate > 0. and stochdepth_rate < 1.0) if self._has_stochdepth: self.stoch_depth = base.StochDepth(stochdepth_rate) def __call__(self, x, is_training): out = self.activation(x) * self.beta if self.stride > 1: # Average-pool downsample. shortcut = hk.avg_pool(out, window_shape=(1, 2, 2, 1), strides=(1, 2, 2, 1), padding='SAME') if self.use_projection: shortcut = self.conv_shortcut(shortcut) elif self.use_projection: shortcut = self.conv_shortcut(out) else: shortcut = x out = self.conv0(out) out = self.conv1(self.activation(out)) if self.use_two_convs: out = self.conv1b(self.activation(out)) out = self.conv2(self.activation(out)) out = (self.se(out) * 2) * out # Multiply by 2 for rescaling # Get average residual standard deviation for reporting metrics. res_avg_var = jnp.mean(jnp.var(out, axis=[0, 1, 2])) # Apply stochdepth if applicable. if self._has_stochdepth: out = self.stoch_depth(out, is_training) # SkipInit Gain out = out * hk.get_parameter('skip_gain', (), out.dtype, init=jnp.zeros) return out * self.alpha + shortcut, res_avg_var def count_flops(self, h, w): # Count conv FLOPs based on input HW expand_flops = base.count_conv_flops(self.in_ch, self.conv0, h, w) # If block is strided we decrease resolution here. dw_flops = base.count_conv_flops(self.width, self.conv1, h, w) if self.stride > 1: h, w = h / self.stride, w / self.stride if self.use_two_convs: dw_flops += base.count_conv_flops(self.width, self.conv1b, h, w) if self.use_projection: sc_flops = base.count_conv_flops(self.in_ch, self.conv_shortcut, h, w) else: sc_flops = 0 # SE flops happen on avg-pooled activations se_flops = self.se.fc0.output_size * self.out_ch se_flops += self.se.fc0.output_size * self.se.fc1.output_size contract_flops = base.count_conv_flops(self.width, self.conv2, h, w) return sum([expand_flops, dw_flops, se_flops, contract_flops, sc_flops])
deepmind-research-master
nfnets/nfnet.py
# Copyright 2021 DeepMind Technologies Limited. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Default config, focused on model evaluation.""" from ml_collections import config_dict def get_config(filter_time_intervals=None): """Return config object for training.""" config = config_dict.ConfigDict() config.eval_strategy = config_dict.ConfigDict() config.eval_strategy.class_name = 'OneDeviceConfig' config.eval_strategy.kwargs = config_dict.ConfigDict( dict(device_type='v100')) ## Experiment config. config.experiment_kwargs = config_dict.ConfigDict(dict( resnet_kwargs=dict( blocks_per_group_list=[3, 4, 6, 3], # This choice is ResNet50. bn_config=dict( decay_rate=0.9, eps=1e-5), resnet_v2=False, additional_features_mode='mlp', ), optimizer_config=dict( class_name='Momentum', kwargs={'momentum': 0.9}, # Set up the learning rate schedule. lr_init=0.025, lr_factor=0.1, lr_schedule=(50e3, 100e3, 150e3), gradient_clip=5., ), l2_regularization=1e-4, total_train_batch_size=128, train_net_args={'is_training': True}, eval_batch_size=128, eval_net_args={'is_training': True}, data_config=dict( # dataset loading dataset_path=None, num_val_splits=10, val_split=0, # image cropping image_size=(80, 80, 7), train_crop_type='crop_fixed', test_crop_type='crop_fixed', n_crop_repeat=1, train_augmentations=dict( rotation_and_flip=True, rescaling=True, translation=True, ), test_augmentations=dict( rotation_and_flip=False, rescaling=False, translation=False, ), test_time_ensembling='sum', num_eval_buckets=5, eval_confidence_interval=95, task='grounded_unnormalized_regression', loss_config=dict( loss='mse', mse_normalize=False, ), model_uncertainty=True, additional_features='', time_filter_intervals=filter_time_intervals, class_boundaries={ '0': [[-1., 0]], '1': [[0, 1.]] }, frequencies_to_use='all', ), n_train_epochs=100 )) return config
deepmind-research-master
galaxy_mergers/config.py
# Copyright 2021 DeepMind Technologies Limited. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Helpers to visualize gradients and other interpretability analysis.""" import numpy as np import tensorflow.compat.v2 as tf def rotate_by_right_angle_multiple(image, rot=90): """Rotate an image by right angles.""" if rot not in [0, 90, 180, 270]: raise ValueError(f"Cannot rotate by non-90 degree angle {rot}") if rot in [90, -270]: image = np.transpose(image, (1, 0, 2)) image = image[::-1] elif rot in [180, -180]: image = image[::-1, ::-1] elif rot in [270, -90]: image = np.transpose(image, (1, 0, 2)) image = image[:, ::-1] return image def compute_gradient(images, evaluator, is_training=False): inputs = tf.Variable(images[None], dtype=tf.float32) with tf.GradientTape() as tape: tape.watch(inputs) time_sigma = evaluator.model(inputs, None, is_training) grad_time = tape.gradient(time_sigma[:, 0], inputs) return grad_time, time_sigma def compute_grads_for_rotations(images, evaluator, is_training=False): test_gradients, test_outputs = [], [] for rotation in np.arange(0, 360, 90): images_rot = rotate_by_right_angle_multiple(images, rotation) grads, time_sigma = compute_gradient(images_rot, evaluator, is_training) grads = np.squeeze(grads.numpy()) inv_grads = rotate_by_right_angle_multiple(grads, -rotation) test_gradients.append(inv_grads) test_outputs.append(time_sigma.numpy()) return np.squeeze(test_gradients), np.squeeze(test_outputs) def compute_grads_for_rotations_and_flips(images, evaluator): grads, time_sigma = compute_grads_for_rotations(images, evaluator) grads_f, time_sigma_f = compute_grads_for_rotations(images[::-1], evaluator) grads_f = grads_f[:, ::-1] all_grads = np.concatenate([grads, grads_f], 0) model_outputs = np.concatenate((time_sigma, time_sigma_f), 0) return all_grads, model_outputs
deepmind-research-master
galaxy_mergers/interpretability_helpers.py
# Copyright 2021 DeepMind Technologies Limited. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Fork of a generic ResNet to incorporate additional cosmological features.""" from typing import Mapping, Optional, Sequence, Text import sonnet.v2 as snt import tensorflow.compat.v2 as tf class ResNet(snt.Module): """ResNet model.""" def __init__(self, n_repeats: int, blocks_per_group_list: Sequence[int], num_classes: int, bn_config: Optional[Mapping[Text, float]] = None, resnet_v2: bool = False, channels_per_group_list: Sequence[int] = (256, 512, 1024, 2048), use_additional_features: bool = False, additional_features_mode: Optional[Text] = "per_block", name: Optional[Text] = None): """Constructs a ResNet model. Args: n_repeats: The batch dimension for the input is expected to have the form `B = b * n_repeats`. After the conv stack, the logits for the `n_repeats` replicas are reduced, leading to an output batch dimension of `b`. blocks_per_group_list: A sequence of length 4 that indicates the number of blocks created in each group. num_classes: The number of classes to classify the inputs into. bn_config: A dictionary of two elements, `decay_rate` and `eps` to be passed on to the `BatchNorm` layers. By default the `decay_rate` is `0.9` and `eps` is `1e-5`. resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults to False. channels_per_group_list: A sequence of length 4 that indicates the number of channels used for each block in each group. use_additional_features: If true, additional vector features will be concatenated to the residual stack before logits are computed. additional_features_mode: Mode for processing additional features. Supported modes: 'mlp' and 'per_block'. name: Name of the module. """ super(ResNet, self).__init__(name=name) self._n_repeats = n_repeats if bn_config is None: bn_config = {"decay_rate": 0.9, "eps": 1e-5} self._bn_config = bn_config self._resnet_v2 = resnet_v2 # Number of blocks in each group for ResNet. if len(blocks_per_group_list) != 4: raise ValueError( "`blocks_per_group_list` must be of length 4 not {}".format( len(blocks_per_group_list))) self._blocks_per_group_list = blocks_per_group_list # Number of channels in each group for ResNet. if len(channels_per_group_list) != 4: raise ValueError( "`channels_per_group_list` must be of length 4 not {}".format( len(channels_per_group_list))) self._channels_per_group_list = channels_per_group_list self._use_additional_features = use_additional_features self._additional_features_mode = additional_features_mode self._initial_conv = snt.Conv2D( output_channels=64, kernel_shape=7, stride=2, with_bias=False, padding="SAME", name="initial_conv") if not self._resnet_v2: self._initial_batchnorm = snt.BatchNorm( create_scale=True, create_offset=True, name="initial_batchnorm", **bn_config) self._block_groups = [] strides = [1, 2, 2, 2] for i in range(4): self._block_groups.append( snt.nets.resnet.BlockGroup( channels=self._channels_per_group_list[i], num_blocks=self._blocks_per_group_list[i], stride=strides[i], bn_config=bn_config, resnet_v2=resnet_v2, name="block_group_%d" % (i))) if self._resnet_v2: self._final_batchnorm = snt.BatchNorm( create_scale=True, create_offset=True, name="final_batchnorm", **bn_config) self._logits = snt.Linear( output_size=num_classes, w_init=snt.initializers.VarianceScaling(scale=2.0), name="logits") if self._use_additional_features: self._embedding = LinearBNReLU(output_size=16, name="embedding", **bn_config) if self._additional_features_mode == "mlp": self._feature_repr = LinearBNReLU( output_size=self._channels_per_group_list[-1], name="features_repr", **bn_config) elif self._additional_features_mode == "per_block": self._feature_repr = [] for i, ch in enumerate(self._channels_per_group_list): self._feature_repr.append( LinearBNReLU(output_size=ch, name=f"features_{i}", **bn_config)) else: raise ValueError(f"Unsupported addiitonal features mode: " f"{additional_features_mode}") def __call__(self, inputs, features, is_training): net = inputs net = self._initial_conv(net) if not self._resnet_v2: net = self._initial_batchnorm(net, is_training=is_training) net = tf.nn.relu(net) net = tf.nn.max_pool2d( net, ksize=3, strides=2, padding="SAME", name="initial_max_pool") if self._use_additional_features: assert features is not None features = self._embedding(features, is_training=is_training) for i, block_group in enumerate(self._block_groups): net = block_group(net, is_training) if (self._use_additional_features and self._additional_features_mode == "per_block"): features_i = self._feature_repr[i](features, is_training=is_training) # support for n_repeats > 1 features_i = tf.repeat(features_i, self._n_repeats, axis=0) net += features_i[:, None, None, :] # expand to spacial resolution if self._resnet_v2: net = self._final_batchnorm(net, is_training=is_training) net = tf.nn.relu(net) net = tf.reduce_mean(net, axis=[1, 2], name="final_avg_pool") # Re-split the batch dimension net = tf.reshape(net, [-1, self._n_repeats] + net.shape.as_list()[1:]) # Average over the various repeats of the input (e.g. those could have # corresponded to different crops). net = tf.reduce_mean(net, axis=1) if (self._use_additional_features and self._additional_features_mode == "mlp"): net += self._feature_repr(features, is_training=is_training) return self._logits(net) class LinearBNReLU(snt.Module): """Wrapper class for Linear layer with Batch Norm and ReLU activation.""" def __init__(self, output_size=64, w_init=snt.initializers.VarianceScaling(scale=2.0), name="linear", **bn_config): """Constructs a LinearBNReLU module. Args: output_size: Output dimension. w_init: weight Initializer for snt.Linear. name: Name of the module. **bn_config: Optional parameters to be passed to snt.BatchNorm. """ super(LinearBNReLU, self).__init__(name=name) self._linear = snt.Linear(output_size=output_size, w_init=w_init, name=f"{name}_linear") self._bn = snt.BatchNorm(create_scale=True, create_offset=True, name=f"{name}_bn", **bn_config) def __call__(self, x, is_training): x = self._linear(x) x = self._bn(x, is_training=is_training) return tf.nn.relu(x)
deepmind-research-master
galaxy_mergers/model.py
# Copyright 2021 DeepMind Technologies Limited. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Helpers to pre-process Antennae galaxy images.""" import collections import os from astropy.io import fits import numpy as np from scipy import ndimage import tensorflow.compat.v2 as tf def norm_antennae_images(images, scale=1000): return tf.math.asinh(images/scale) def renorm_antennae(images): median = np.percentile(images.numpy().flatten(), 50) img_range = np.ptp(images.numpy().flatten()) return (images - median) / (img_range / 2) def get_antennae_images(antennae_fits_dir): """Load the raw Antennae galaxy images.""" all_fits_files = [ os.path.join(antennae_fits_dir, f) for f in os.listdir(antennae_fits_dir) ] freq_mapping = {'red': 160, 'blue': 850} paired_fits_files = collections.defaultdict(list) for f in all_fits_files: redshift = float(f[-8:-5]) paired_fits_files[redshift].append(f) for redshift, files in paired_fits_files.items(): paired_fits_files[redshift] = sorted( files, key=lambda f: freq_mapping[f.split('/')[-1].split('_')[0]]) print('Reading files:', paired_fits_files) print('Redshifts:', sorted(paired_fits_files.keys())) galaxy_views = collections.defaultdict(list) for redshift in paired_fits_files: for view_path in paired_fits_files[redshift]: with open(view_path, 'rb') as f: fits_data = fits.open(f) galaxy_views[redshift].append(np.array(fits_data[0].data)) batched_images = [] for redshift in paired_fits_files: img = tf.constant(np.array(galaxy_views[redshift])) img = tf.transpose(img, (1, 2, 0)) img = tf.image.resize(img, size=(60, 60)) batched_images.append(img) return tf.stack(batched_images) def preprocess_antennae_images(antennae_images): """Pre-process the Antennae galaxy images into a reasonable range.""" rotated_antennae_images = [ ndimage.rotate(img, 10, reshape=True, cval=-1)[10:-10, 10:-10] for img in antennae_images ] rotated_antennae_images = [ np.clip(img, 0, 1e9) for img in rotated_antennae_images ] rotated_antennae_images = tf.stack(rotated_antennae_images) normed_antennae_images = norm_antennae_images(rotated_antennae_images) normed_antennae_images = tf.clip_by_value(normed_antennae_images, 1, 4.5) renormed_antennae_images = renorm_antennae(normed_antennae_images) return renormed_antennae_images
deepmind-research-master
galaxy_mergers/antennae_helpers.py
# Copyright 2021 DeepMind Technologies Limited. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Pre-processing functions for input data.""" import functools from absl import logging import tensorflow.compat.v2 as tf from galaxy_mergers import losses CROP_TYPE_NONE = 'crop_none' CROP_TYPE_FIXED = 'crop_fixed' CROP_TYPE_RANDOM = 'crop_random' DATASET_FREQUENCY_MEAN = 4.0 DATASET_FREQUENCY_RANGE = 8.0 PHYSICAL_FEATURES_MIN_MAX = { 'redshift': (0.572788, 2.112304), 'mass': (9.823963, 10.951282) } ALL_FREQUENCIES = [105, 125, 160, 435, 606, 775, 850] VALID_ADDITIONAL_FEATURES = ['redshift', 'sequence_average_redshift', 'mass'] def _make_padding_sizes(pad_size, random_centering): if random_centering: pad_size_left = tf.random.uniform( shape=[], minval=0, maxval=pad_size+1, dtype=tf.int32) else: pad_size_left = pad_size // 2 pad_size_right = pad_size - pad_size_left return pad_size_left, pad_size_right def resize_and_pad(image, target_size, random_centering): """Resize image to target_size (<= image.size) and pad to original size.""" original_shape = image.shape size = tf.reshape(target_size, [1]) size = tf.concat([size, size], axis=0) image = tf.image.resize(image, size=size) pad_size = original_shape[1] - target_size pad_size_left, pad_size_right = _make_padding_sizes( pad_size, random_centering) padding = [[pad_size_left, pad_size_right], [pad_size_left, pad_size_right], [0, 0]] if len(original_shape) == 4: padding = [[0, 0]] + padding image = tf.pad(image, padding) image.set_shape(original_shape) return image def resize_and_extract(image, target_size, random_centering): """Upscale image to target_size (>image.size), extract original size crop.""" original_shape = image.shape size = tf.reshape(target_size, [1]) size = tf.concat([size, size], axis=0) image = tf.image.resize(image, size=size) pad_size = target_size - original_shape[1] pad_size_left, pad_size_right = _make_padding_sizes( pad_size, random_centering) if len(original_shape) == 3: image = tf.expand_dims(image, 0) image = tf.cond(pad_size_right > 0, lambda: image[:, pad_size_left:-pad_size_right, :, :], lambda: image[:, pad_size_left:, :, :]) image = tf.cond(pad_size_right > 0, lambda: image[:, :, pad_size_left:-pad_size_right, :], lambda: image[:, :, pad_size_left:, :]) if len(original_shape) == 3: image = tf.squeeze(image, 0) image.set_shape(original_shape) return image def resize_and_center(image, target_size, random_centering): return tf.cond( tf.math.less_equal(target_size, image.shape[1]), lambda: resize_and_pad(image, target_size, random_centering), lambda: resize_and_extract(image, target_size, random_centering)) def random_rotation_and_flip(image): angle = tf.random.uniform(shape=[], minval=0, maxval=4, dtype=tf.int32) return tf.image.random_flip_left_right(tf.image.rot90(image, angle)) def get_all_rotations_and_flips(images): assert isinstance(images, list) new_images = [] for image in images: for rotation in range(4): new_images.append(tf.image.rot90(image, rotation)) flipped_image = tf.image.flip_left_right(image) new_images.append(tf.image.rot90(flipped_image, rotation)) return new_images def random_rescaling(image, random_centering): assert image.shape.as_list()[0] == image.shape.as_list()[1] original_size = image.shape.as_list()[1] min_size = 2 * (original_size // 4) max_size = original_size * 2 target_size = tf.random.uniform( shape=[], minval=min_size, maxval=max_size // 2, dtype=tf.int32) * 2 return resize_and_center(image, target_size, random_centering) def get_all_rescalings(images, image_width, random_centering): """Get a uniform sample of rescalings of all images in input.""" assert isinstance(images, list) min_size = 2 * (image_width // 4) max_size = image_width * 2 delta_size = (max_size + 2 - min_size) // 5 sizes = range(min_size, max_size + 2, delta_size) new_images = [] for image in images: for size in sizes: new_images.append(resize_and_center(image, size, random_centering)) return new_images def move_repeats_to_batch(image, n_repeats): width, height, n_channels = image.shape.as_list()[1:] image = tf.reshape(image, [-1, width, height, n_channels, n_repeats]) image = tf.transpose(image, [0, 4, 1, 2, 3]) # [B, repeats, x, y, c] return tf.reshape(image, [-1, width, height, n_channels]) def get_classification_label(dataset_row, class_boundaries): merge_time = dataset_row['grounded_normalized_time'] label = tf.dtypes.cast(0, tf.int64) for category, intervals in class_boundaries.items(): for interval in intervals: if merge_time > interval[0] and merge_time < interval[1]: label = tf.dtypes.cast(int(category), tf.int64) return label def get_regression_label(dataset_row, task_type): """Returns time-until-merger regression target given desired modeling task.""" if task_type == losses.TASK_NORMALIZED_REGRESSION: return tf.dtypes.cast(dataset_row['normalized_time'], tf.float32) elif task_type == losses.TASK_GROUNDED_UNNORMALIZED_REGRESSION: return tf.dtypes.cast(dataset_row['grounded_normalized_time'], tf.float32) elif task_type == losses.TASK_UNNORMALIZED_REGRESSION: return tf.dtypes.cast(dataset_row['unnormalized_time'], tf.float32) elif task_type == losses.TASK_CLASSIFICATION: return tf.dtypes.cast(dataset_row['grounded_normalized_time'], tf.float32) else: raise ValueError def get_normalized_time_target(dataset_row): return tf.dtypes.cast(dataset_row['normalized_time'], tf.float32) def apply_time_filter(dataset_row, time_interval): """Returns True if data is within the given time intervals.""" merge_time = dataset_row['grounded_normalized_time'] lower_time, upper_time = time_interval return merge_time > lower_time and merge_time < upper_time def normalize_physical_feature(name, dataset_row): min_feat, max_feat = PHYSICAL_FEATURES_MIN_MAX[name] value = getattr(dataset_row, name) return 2 * (value - min_feat) / (max_feat - min_feat) - 1 def prepare_dataset(ds, target_size, crop_type, n_repeats, augmentations, task_type, additional_features, class_boundaries, time_intervals=None, frequencies_to_use='all', additional_lambdas=None): """Prepare a zipped dataset of image, classification/regression labels.""" def _prepare_image(dataset_row): """Transpose, crop and cast an image.""" image = tf.dtypes.cast(dataset_row['image'], tf.float32) image = tf.reshape(image, tf.cast(dataset_row['image_shape'], tf.int32)) image = tf.transpose(image, perm=[1, 2, 0]) # Convert to NHWC freqs = ALL_FREQUENCIES if frequencies_to_use == 'all' else frequencies_to_use idxs_to_keep = [ALL_FREQUENCIES.index(f) for f in freqs] image = tf.gather(params=image, indices=idxs_to_keep, axis=-1) # Based on offline computation on the empirical frequency range: # Converts [0, 8.] ~~> [-1, 1] image = (image - DATASET_FREQUENCY_MEAN)/(DATASET_FREQUENCY_RANGE/2.0) def crop(image): if crop_type == CROP_TYPE_FIXED: crop_loc = tf.cast(dataset_row['proposed_crop'][0], tf.int32) crop_size = tf.cast(dataset_row['proposed_crop'][1], tf.int32) image = image[ crop_loc[0]:crop_loc[0] + crop_size[0], crop_loc[1]:crop_loc[1] + crop_size[1], :] image = tf.image.resize(image, target_size[0:2]) image.set_shape([target_size[0], target_size[1], target_size[2]]) elif crop_type == CROP_TYPE_RANDOM: image = tf.image.random_crop(image, target_size) image.set_shape([target_size[0], target_size[1], target_size[2]]) elif crop_type != CROP_TYPE_NONE: raise NotImplementedError return image repeated_images = [] for _ in range(n_repeats): repeated_images.append(crop(image)) image = tf.concat(repeated_images, axis=-1) if augmentations['rotation_and_flip']: image = random_rotation_and_flip(image) if augmentations['rescaling']: image = random_rescaling(image, augmentations['translation']) return image def get_regression_label_wrapper(dataset_row): return get_regression_label(dataset_row, task_type=task_type) def get_classification_label_wrapper(dataset_row): return get_classification_label(dataset_row, class_boundaries=class_boundaries) if time_intervals: for time_interval in time_intervals: filter_fn = functools.partial(apply_time_filter, time_interval=time_interval) ds = ds.filter(filter_fn) datasets = [ds.map(_prepare_image)] if additional_features: additional_features = additional_features.split(',') assert all([f in VALID_ADDITIONAL_FEATURES for f in additional_features]) logging.info('Running with additional features: %s.', ', '.join(additional_features)) def _prepare_additional_features(dataset_row): features = [] for f in additional_features: features.append(normalize_physical_feature(f, dataset_row)) features = tf.convert_to_tensor(features, dtype=tf.float32) features.set_shape([len(additional_features)]) return features datasets += [ds.map(_prepare_additional_features)] datasets += [ ds.map(get_classification_label_wrapper), ds.map(get_regression_label_wrapper), ds.map(get_normalized_time_target)] if additional_lambdas: for process_fn in additional_lambdas: datasets += [ds.map(process_fn)] return tf.data.Dataset.zip(tuple(datasets))
deepmind-research-master
galaxy_mergers/preprocessing.py
# Copyright 2021 DeepMind Technologies Limited. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Helpers to compute loss metrics.""" import scipy.stats import tensorflow.compat.v2 as tf import tensorflow_probability as tfp TASK_CLASSIFICATION = 'classification' TASK_NORMALIZED_REGRESSION = 'normalized_regression' TASK_UNNORMALIZED_REGRESSION = 'unnormalized_regression' TASK_GROUNDED_UNNORMALIZED_REGRESSION = 'grounded_unnormalized_regression' REGRESSION_TASKS = [TASK_NORMALIZED_REGRESSION, TASK_UNNORMALIZED_REGRESSION, TASK_GROUNDED_UNNORMALIZED_REGRESSION] ALL_TASKS = [TASK_CLASSIFICATION] + REGRESSION_TASKS LOSS_MSE = 'mse' LOSS_SOFTMAX_CROSS_ENTROPY = 'softmax_cross_entropy' ALL_LOSSES = [LOSS_SOFTMAX_CROSS_ENTROPY, LOSS_MSE] def normalize_regression_loss(regression_loss, predictions): # Normalize loss such that: # 1) E_{x uniform}[loss(x, prediction)] does not depend on prediction # 2) E_{x uniform, prediction uniform}[loss(x, prediction)] is as before. # Divides MSE regression loss by E[(prediction-x)^2]; assumes x=[-1,1] normalization = 2./3. normalized_loss = regression_loss / ((1./3 + predictions**2) / normalization) return normalized_loss def equal32(x, y): return tf.cast(tf.equal(x, y), tf.float32) def mse_loss(predicted, targets): return (predicted - targets) ** 2 def get_std_factor_from_confidence_percent(percent): dec = percent/100. inv_dec = 1 - dec return scipy.stats.norm.ppf(dec+inv_dec/2) def get_all_metric_names(task_type, model_uncertainty, loss_config, # pylint: disable=unused-argument mode='eval', return_dict=True): """Get all the scalar fields produced by compute_loss_and_metrics.""" names = ['regularization_loss', 'prediction_accuracy', str(mode)+'_loss'] if task_type == TASK_CLASSIFICATION: names += ['classification_loss'] else: names += ['regression_loss', 'avg_mu', 'var_mu'] if model_uncertainty: names += ['uncertainty_loss', 'scaled_regression_loss', 'uncertainty_plus_scaled_regression', 'avg_sigma', 'var_sigma', 'percent_in_conf_interval', 'error_sigma_correlation', 'avg_prob'] if return_dict: return {name: 0. for name in names} else: return names def compute_loss_and_metrics(mu, log_sigma_sq, regression_targets, labels, task_type, model_uncertainty, loss_config, regularization_loss=0., confidence_interval=95, mode='train'): """Computes loss statistics and other metrics.""" scalars_to_log = dict() vectors_to_log = dict() scalars_to_log['regularization_loss'] = regularization_loss vectors_to_log['mu'] = mu if task_type == TASK_CLASSIFICATION: cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits( logits=mu, labels=labels, name='cross_entropy') classification_loss = tf.reduce_mean(cross_entropy, name='class_loss') total_loss = classification_loss sigma = None scalars_to_log['classification_loss'] = classification_loss predicted_labels = tf.argmax(mu, axis=1) correct_predictions = equal32(predicted_labels, labels) else: regression_loss = mse_loss(mu, regression_targets) if 'mse_normalize' in loss_config and loss_config['mse_normalize']: assert task_type in [TASK_GROUNDED_UNNORMALIZED_REGRESSION, TASK_NORMALIZED_REGRESSION] regression_loss = normalize_regression_loss(regression_loss, mu) avg_regression_loss = tf.reduce_mean(regression_loss) vectors_to_log['regression_loss'] = regression_loss scalars_to_log['regression_loss'] = avg_regression_loss scalars_to_log['avg_mu'] = tf.reduce_mean(mu) scalars_to_log['var_mu'] = tf.reduce_mean(mse_loss(mu, tf.reduce_mean(mu))) predicted_labels = tf.cast(mu > 0, tf.int64) correct_predictions = equal32(predicted_labels, labels) if model_uncertainty: # This implements Eq. (1) in https://arxiv.org/pdf/1612.01474.pdf inv_sigma_sq = tf.math.exp(-log_sigma_sq) scaled_regression_loss = regression_loss * inv_sigma_sq scaled_regression_loss = tf.reduce_mean(scaled_regression_loss) uncertainty_loss = tf.reduce_mean(log_sigma_sq) total_loss = uncertainty_loss + scaled_regression_loss scalars_to_log['uncertainty_loss'] = uncertainty_loss scalars_to_log['scaled_regression_loss'] = scaled_regression_loss scalars_to_log['uncertainty_plus_scaled_regression'] = total_loss sigma = tf.math.exp(log_sigma_sq / 2.) vectors_to_log['sigma'] = sigma scalars_to_log['avg_sigma'] = tf.reduce_mean(sigma) var_sigma = tf.reduce_mean(mse_loss(sigma, tf.reduce_mean(sigma))) scalars_to_log['var_sigma'] = var_sigma # Compute # of labels that fall into the confidence interval. std_factor = get_std_factor_from_confidence_percent(confidence_interval) lower_bound = mu - std_factor * sigma upper_bound = mu + std_factor * sigma preds = tf.logical_and(tf.greater(regression_targets, lower_bound), tf.less(regression_targets, upper_bound)) percent_in_conf_interval = tf.reduce_mean(tf.cast(preds, tf.float32)) scalars_to_log['percent_in_conf_interval'] = percent_in_conf_interval*100 error_sigma_corr = tfp.stats.correlation(x=regression_loss, y=sigma, event_axis=None) scalars_to_log['error_sigma_correlation'] = error_sigma_corr dists = tfp.distributions.Normal(mu, sigma) probs = dists.prob(regression_targets) scalars_to_log['avg_prob'] = tf.reduce_mean(probs) else: total_loss = avg_regression_loss loss_name = str(mode)+'_loss' total_loss = tf.add(total_loss, regularization_loss, name=loss_name) scalars_to_log[loss_name] = total_loss vectors_to_log['correct_predictions'] = correct_predictions scalars_to_log['prediction_accuracy'] = tf.reduce_mean(correct_predictions) # Validate that metrics outputted are exactly what is expected expected = get_all_metric_names(task_type, model_uncertainty, loss_config, mode, False) assert set(expected) == set(scalars_to_log.keys()) return scalars_to_log, vectors_to_log
deepmind-research-master
galaxy_mergers/losses.py
# Copyright 2021 DeepMind Technologies Limited. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Simple script to model evaluation on a checkpoint and dataset.""" import ast from absl import app from absl import flags from absl import logging from galaxy_mergers import evaluator flags.DEFINE_string('checkpoint_path', '', 'Path to TF2 checkpoint to eval.') flags.DEFINE_string('data_path', '', 'Path to TFRecord(s) with data.') flags.DEFINE_string('filter_time_intervals', None, 'Merger time intervals on which to perform regression.' 'Specify None for the default time interval [-1,1], or' ' a custom list of intervals, e.g. [[-0.2,0], [0.5,1]].') FLAGS = flags.FLAGS def main(_) -> None: if FLAGS.filter_time_intervals is not None: filter_time_intervals = ast.literal_eval(FLAGS.filter_time_intervals) else: filter_time_intervals = None config, ds, experiment = evaluator.get_config_dataset_evaluator( filter_time_intervals, FLAGS.checkpoint_path, config_override={ 'experiment_kwargs.data_config.dataset_path': FLAGS.data_path, }) metrics, _, _ = evaluator.run_model_on_dataset(experiment, ds, config) logging.info('Evaluation complete. Metrics: %s', metrics) if __name__ == '__main__': app.run(main)
deepmind-research-master
galaxy_mergers/main.py
# Copyright 2021 DeepMind Technologies Limited. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Helpers for a galaxy merger model evaluation.""" import glob import os from astropy import cosmology from astropy.io import fits import matplotlib.pyplot as plt import numpy as np from PIL import Image import tensorflow.compat.v2 as tf def restore_checkpoint(checkpoint_dir, experiment): checkpoint_path = tf.train.latest_checkpoint(checkpoint_dir) global_step = tf.Variable( 0, dtype=tf.int32, trainable=False, name='global_step') checkpoint = tf.train.Checkpoint( _global_step_=global_step, **experiment.checkpoint_items) checkpoint.restore(checkpoint_path) def sum_average_transformed_mu_and_sigma(mu, log_sigma_sq): """Computes <mu>, var(mu) + <var> in transformed representation. This corresponds to assuming that the output distribution is a sum of Gaussian and computing the mean and variance of the resulting (non-Gaussian) distribution. Args: mu: Tensor of shape [B, ...] representing the means of the input distributions. log_sigma_sq: Tensor of shape [B, ...] representing log(sigma**2) of the input distributions. Can be None, in which case the variance is assumed to be zero. Returns: mu: Tensor of shape [...] representing the means of the output distributions. log_sigma_sq: Tensor of shape [...] representing log(sigma**2) of the output distributions. """ av_mu = tf.reduce_mean(mu, axis=0) var_mu = tf.math.reduce_std(mu, axis=0)**2 if log_sigma_sq is None: return av_mu, tf.math.log(var_mu) max_log_sigma_sq = tf.reduce_max(log_sigma_sq, axis=0) log_sigma_sq -= max_log_sigma_sq # (sigma/sigma_0)**2 sigma_sq = tf.math.exp(log_sigma_sq) # (<sigma**2>)/sigma_0**2 (<1) av_sigma_sq = tf.reduce_mean(sigma_sq, axis=0) # (<sigma**2> + var(mu))/sigma_0**2 av_sigma_sq += var_mu * tf.math.exp(-max_log_sigma_sq) # log(<sigma**2> + var(mu)) log_av_sigma_sq = tf.math.log(av_sigma_sq) + max_log_sigma_sq return av_mu, log_av_sigma_sq def aggregate_regression_ensemble(logits_or_times, ensemble_size, use_uncertainty, test_time_ensembling): """Aggregate output of model ensemble.""" out_shape = logits_or_times.shape.as_list()[1:] logits_or_times = tf.reshape(logits_or_times, [ensemble_size, -1] + out_shape) mus = logits_or_times[..., 0] log_sigma_sqs = logits_or_times[..., -1] if use_uncertainty else None if test_time_ensembling == 'sum': mu, log_sigma_sq = sum_average_transformed_mu_and_sigma(mus, log_sigma_sqs) elif test_time_ensembling == 'none': mu = mus[0] log_sigma_sq = log_sigma_sqs[0] if use_uncertainty else None else: raise ValueError('Unexpected test_time_ensembling') return mu, log_sigma_sq def aggregate_classification_ensemble(logits_or_times, ensemble_size, test_time_ensembling): """Averages the output logits across models in the ensemble.""" out_shape = logits_or_times.shape.as_list()[1:] logits = tf.reshape(logits_or_times, [ensemble_size, -1] + out_shape) if test_time_ensembling == 'sum': logits = tf.reduce_mean(logits, axis=0) return logits, None elif test_time_ensembling == 'none': return logits, None else: raise ValueError('Unexpected test_time_ensembling') def unpack_evaluator_output(data, return_seq_info=False, return_redshift=False): """Unpack evaluator.run_model_on_dataset output.""" mus = np.array(data[1]['mu']).flatten() sigmas = np.array(data[1]['sigma']).flatten() regression_targets = np.array(data[1]['regression_targets']).flatten() outputs = [mus, sigmas, regression_targets] if return_seq_info: seq_ids = np.array(data[2][0]).flatten() seq_ids = np.array([seq_id.decode('UTF-8') for seq_id in seq_ids]) time_idxs = np.array(data[2][1]).flatten() axes = np.array(data[2][2]).flatten() outputs += [seq_ids, axes, time_idxs] if return_redshift: redshifts = np.array(data[2][6]).flatten() outputs += [redshifts] return outputs def process_data_into_myrs(redshifts, *data_lists): """Converts normalized time to virial time using Planck cosmology.""" # small hack to avoid build tools not recognizing non-standard trickery # done in the astropy library: # https://github.com/astropy/astropy/blob/master/astropy/cosmology/core.py#L3290 # that dynamically generates and imports new classes. planck13 = getattr(cosmology, 'Plank13') hubble_constants = planck13.H(redshifts) # (km/s)/megaparsec inv_hubble_constants = 1/hubble_constants # (megaparsec*s) / km megaparsec_to_km = 1e19*3.1 seconds_to_gigayears = 1e-15/31.556 conversion_factor = megaparsec_to_km * seconds_to_gigayears hubble_time_gigayears = conversion_factor * inv_hubble_constants hubble_to_virial_time = 0.14 # approximate simulation-based conversion factor virial_dyn_time = hubble_to_virial_time*hubble_time_gigayears.value return [data_list*virial_dyn_time for data_list in data_lists] def print_rmse_and_class_accuracy(mus, regression_targets, redshifts): """Convert to virial dynamical time and print stats.""" time_pred, time_gt = process_data_into_myrs( redshifts, mus, regression_targets) time_sq_errors = (time_pred-time_gt)**2 rmse = np.sqrt(np.mean(time_sq_errors)) labels = regression_targets > 0 class_preds = mus > 0 accuracy = sum((labels == class_preds).astype(np.int8)) / len(class_preds) print(f'95% Error: {np.percentile(np.sqrt(time_sq_errors), 95)}') print(f'RMSE: {rmse}') print(f'Classification Accuracy: {accuracy}') def print_stats(vec, do_print=True): fvec = vec.flatten() if do_print: print(len(fvec), min(fvec), np.mean(fvec), np.median(fvec), max(fvec)) return (len(fvec), min(fvec), np.mean(fvec), np.median(fvec), max(fvec)) def get_image_from_fits(base_dir, seq='475_31271', time='497', axis=2): """Read *.fits galaxy image from directory.""" axis_map = {0: 'x', 1: 'y', 2: 'z'} fits_glob = f'{base_dir}/{seq}/fits_of_flux_psf/{time}/*_{axis_map[axis]}_*.fits' def get_freq_from_path(p): return int(p.split('/')[-1].split('_')[2][1:]) fits_image_paths = sorted(glob.glob(fits_glob), key=get_freq_from_path) assert len(fits_image_paths) == 7 combined_frequencies = [] for fit_path in fits_image_paths: with open(fit_path, 'rb') as f: fits_data = np.array(fits.open(f)[0].data.astype(np.float32)) combined_frequencies.append(fits_data) fits_image = np.transpose(np.array(combined_frequencies), (1, 2, 0)) return fits_image def stack_desired_galaxy_images(base_dir, seq, n_time_slices): """Searth through galaxy image directory gathering images.""" fits_sequence_dir = os.path.join(base_dir, seq, 'fits_of_flux_psf') all_times_for_seq = os.listdir(fits_sequence_dir) hop = (len(all_times_for_seq)-1)//(n_time_slices-1) desired_time_idxs = [k*hop for k in range(n_time_slices)] all_imgs = [] for j in desired_time_idxs: time = all_times_for_seq[j] img = get_image_from_fits(base_dir=base_dir, seq=seq, time=time, axis=2) all_imgs.append(img) min_img_size = min([img.shape[0] for img in all_imgs]) return all_imgs, min_img_size def draw_galaxy_image(image, target_size=None, color_map='viridis'): normalized_image = image / max(image.flatten()) color_map = plt.get_cmap(color_map) colored_image = color_map(normalized_image)[:, :, :3] colored_image = (colored_image * 255).astype(np.uint8) colored_image = Image.fromarray(colored_image, mode='RGB') if target_size: colored_image = colored_image.resize(target_size, Image.ANTIALIAS) return colored_image def collect_merger_sequence(ds, seq=b'370_11071', n_examples_to_sift=5000): images, targets, redshifts = [], [], [] for i, all_inputs in enumerate(ds): if all_inputs[4][0].numpy() == seq: images.append(all_inputs[0][0].numpy()) targets.append(all_inputs[2][0].numpy()) redshifts.append(all_inputs[10][0].numpy()) if i > n_examples_to_sift: break return np.squeeze(images), np.squeeze(targets), np.squeeze(redshifts) def take_samples(sample_idxs, *data_lists): return [np.take(l, sample_idxs, axis=0) for l in data_lists]
deepmind-research-master
galaxy_mergers/helpers.py
# Copyright 2021 DeepMind Technologies Limited. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Evaluation runner.""" import collections from absl import logging import tensorflow.compat.v2 as tf from galaxy_mergers import config as tp_config from galaxy_mergers import helpers from galaxy_mergers import losses from galaxy_mergers import model from galaxy_mergers import preprocessing class GalaxyMergeClassifierEvaluator(): """Galaxy Merge Rate Prediction Evaluation Runner.""" def __init__(self, strategy, optimizer_config, total_train_batch_size, train_net_args, eval_batch_size, eval_net_args, l2_regularization, data_config, resnet_kwargs, n_train_epochs): """Initializes evaluator/experiment.""" logging.info('Initializing evaluator...') self._strategy = strategy self._data_config = data_config self._use_additional_features = bool(data_config['additional_features']) self._eval_batch_size = eval_batch_size self._eval_net_args = eval_net_args self._num_buckets = data_config['num_eval_buckets'] self._n_repeats = data_config['n_crop_repeat'] self._image_size = data_config['image_size'] self._task_type = data_config['task'] self._loss_config = data_config['loss_config'] self._model_uncertainty = data_config['model_uncertainty'] del l2_regularization, optimizer_config, train_net_args del total_train_batch_size, n_train_epochs logging.info('Creating model...') num_classes = 2 if self._model_uncertainty else 1 if self._task_type == losses.TASK_CLASSIFICATION: num_classes = len(self._data_config['class_boundaries']) self.model = model.ResNet( n_repeats=self._data_config['n_crop_repeat'], num_classes=num_classes, use_additional_features=self._use_additional_features, **resnet_kwargs) self._eval_input = None def build_eval_input(self, additional_lambdas=None): """Create the galaxy merger evaluation dataset.""" def decode_fn(record_bytes): parsed_example = tf.io.parse_single_example( record_bytes, { 'image': tf.io.VarLenFeature(tf.float32), 'image_shape': tf.io.FixedLenFeature([3], dtype=tf.int64), 'axis': tf.io.FixedLenFeature([], dtype=tf.int64), 'proposed_crop': tf.io.FixedLenFeature([2, 2], dtype=tf.int64), 'normalized_time': tf.io.FixedLenFeature([], dtype=tf.float32), 'unnormalized_time': tf.io.FixedLenFeature([], dtype=tf.float32), 'grounded_normalized_time': tf.io.FixedLenFeature([], dtype=tf.float32), 'redshift': tf.io.FixedLenFeature([], dtype=tf.float32), 'sequence_average_redshift': tf.io.FixedLenFeature([], dtype=tf.float32), 'mass': tf.io.FixedLenFeature([], dtype=tf.float32), 'time_index': tf.io.FixedLenFeature([], dtype=tf.int64), 'sequence_id': tf.io.FixedLenFeature([], dtype=tf.string), }) parsed_example['image'] = tf.sparse.to_dense( parsed_example['image'], default_value=0) dataset_row = parsed_example return dataset_row def build_eval_pipeline(_): """Generate the processed input evaluation data.""" logging.info('Building evaluation input pipeline...') ds_path = self._data_config['dataset_path'] ds = tf.data.TFRecordDataset([ds_path]).map(decode_fn) augmentations = dict( rotation_and_flip=False, rescaling=False, translation=False ) ds = preprocessing.prepare_dataset( ds=ds, target_size=self._image_size, crop_type=self._data_config['test_crop_type'], n_repeats=self._n_repeats, augmentations=augmentations, task_type=self._task_type, additional_features=self._data_config['additional_features'], class_boundaries=self._data_config['class_boundaries'], time_intervals=self._data_config['time_filter_intervals'], frequencies_to_use=self._data_config['frequencies_to_use'], additional_lambdas=additional_lambdas) batched_ds = ds.cache().batch(self._eval_batch_size).prefetch(128) logging.info('Finished building input pipeline...') return batched_ds return self._strategy.experimental_distribute_datasets_from_function( build_eval_pipeline) def run_test_model_ensemble(self, images, physical_features, augmentations): """Run evaluation on input images.""" image_variations = [images] image_shape = images.shape.as_list() if augmentations['rotation_and_flip']: image_variations = preprocessing.get_all_rotations_and_flips( image_variations) if augmentations['rescaling']: image_variations = preprocessing.get_all_rescalings( image_variations, image_shape[1], augmentations['translation']) # Put all augmented images into the batch: batch * num_augmented augmented_images = tf.stack(image_variations, axis=0) augmented_images = tf.reshape(augmented_images, [-1] + image_shape[1:]) if self._use_additional_features: physical_features = tf.concat( [physical_features] * len(image_variations), axis=0) n_reps = self._data_config['n_crop_repeat'] augmented_images = preprocessing.move_repeats_to_batch(augmented_images, n_reps) logits_or_times = self.model(augmented_images, physical_features, **self._eval_net_args) if self._task_type == losses.TASK_CLASSIFICATION: mu, log_sigma_sq = helpers.aggregate_classification_ensemble( logits_or_times, len(image_variations), self._data_config['test_time_ensembling']) else: assert self._task_type in losses.REGRESSION_TASKS mu, log_sigma_sq = helpers.aggregate_regression_ensemble( logits_or_times, len(image_variations), self._model_uncertainty, self._data_config['test_time_ensembling']) return mu, log_sigma_sq @property def checkpoint_items(self): return {'model': self.model} def run_model_on_dataset(evaluator, dataset, config, n_batches=16): """Runs the model against a dataset, aggregates model output.""" scalar_metrics_to_log = collections.defaultdict(list) model_outputs_to_log = collections.defaultdict(list) dataset_features_to_log = collections.defaultdict(list) batch_count = 1 for all_inputs in dataset: if config.experiment_kwargs.data_config['additional_features']: images = all_inputs[0] physical_features = all_inputs[1] labels, regression_targets, _ = all_inputs[2:5] other_dataset_features = all_inputs[5:] else: images, physical_features = all_inputs[0], None labels, regression_targets, _ = all_inputs[1:4] other_dataset_features = all_inputs[4:] mu, log_sigma_sq = evaluator.run_test_model_ensemble( images, physical_features, config.experiment_kwargs.data_config['test_augmentations']) loss_config = config.experiment_kwargs.data_config['loss_config'] task_type = config.experiment_kwargs.data_config['task'] uncertainty = config.experiment_kwargs.data_config['model_uncertainty'] conf = config.experiment_kwargs.data_config['eval_confidence_interval'] scalar_metrics, vector_metrics = losses.compute_loss_and_metrics( mu, log_sigma_sq, regression_targets, labels, task_type, uncertainty, loss_config, 0, conf, mode='eval') for i, dataset_feature in enumerate(other_dataset_features): dataset_features_to_log[i].append(dataset_feature.numpy()) for scalar_metric in scalar_metrics: v = scalar_metrics[scalar_metric] val = v if isinstance(v, int) or isinstance(v, float) else v.numpy() scalar_metrics_to_log[scalar_metric].append(val) for vector_metric in vector_metrics: val = vector_metrics[vector_metric].numpy() model_outputs_to_log[vector_metric].append(val) regression_targets_np = regression_targets.numpy() labels_np = labels.numpy() model_outputs_to_log['regression_targets'].append(regression_targets_np) model_outputs_to_log['labels'].append(labels_np) model_outputs_to_log['model_input_images'].append(images.numpy()) if n_batches and batch_count >= n_batches: break batch_count += 1 return scalar_metrics_to_log, model_outputs_to_log, dataset_features_to_log def get_config_dataset_evaluator(filter_time_intervals, ckpt_path, config_override=None, setup_dataset=True): """Set-up a default config, evaluation dataset, and evaluator.""" config = tp_config.get_config(filter_time_intervals=filter_time_intervals) if config_override: with config.ignore_type(): config.update_from_flattened_dict(config_override) strategy = tf.distribute.OneDeviceStrategy(device='/gpu:0') experiment = GalaxyMergeClassifierEvaluator( strategy=strategy, **config.experiment_kwargs) helpers.restore_checkpoint(ckpt_path, experiment) if setup_dataset: additional_lambdas = [ lambda ds: ds['sequence_id'], lambda ds: ds['time_index'], lambda ds: ds['axis'], lambda ds: ds['normalized_time'], lambda ds: ds['grounded_normalized_time'], lambda ds: ds['unnormalized_time'], lambda ds: ds['redshift'], lambda ds: ds['mass'] ] ds = experiment.build_eval_input(additional_lambdas=additional_lambdas) else: ds = None return config, ds, experiment
deepmind-research-master
galaxy_mergers/evaluator.py
# Copyright 2020 Deepmind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A prop-carry task that transition between multiple phases.""" import collections import colorsys import enum from absl import logging from dm_control import composer from dm_control import mjcf from dm_control.composer.observation import observable from dm_control.locomotion.arenas import floors from dm_control.locomotion.mocap import loader as mocap_loader from dm_control.mujoco.wrapper import mjbindings import numpy as np from catch_carry import arm_opener from catch_carry import mocap_data from catch_carry import props from catch_carry import trajectories _PHYSICS_TIMESTEP = 0.005 # Maximum number of physics steps to run when settling props onto pedestals # during episode initialization. _MAX_SETTLE_STEPS = 1000 # Maximum velocity for prop to be considered settled. # Used during episode initialization only. _SETTLE_QVEL_TOL = 1e-5 # Magnitude of the sparse reward. _SPARSE_REWARD = 1.0 # Maximum distance for walkers to be considered to be "near" a pedestal/target. _TARGET_TOL = 0.65 # Defines how pedestals are placed around the arena. # Pedestals are placed at constant angle intervals around the arena's center. _BASE_PEDESTAL_DIST = 3 # Base distance from center. _PEDESTAL_DIST_DELTA = 0.5 # Maximum variation on the base distance. # Base hue-luminosity-saturation of the pedestal colors. # We rotate through the hue for each pedestal created in the environment. _BASE_PEDESTAL_H = 0.1 _BASE_PEDESTAL_L = 0.3 _BASE_PEDESTAL_S = 0.7 # Pedestal luminosity when active. _ACTIVATED_PEDESTAL_L = 0.8 _PEDESTAL_SIZE = (0.2, 0.2, 0.02) _SINGLE_PEDESTAL_COLOR = colorsys.hls_to_rgb(.3, .15, .35) + (1.0,) WALKER_PEDESTAL = 'walker_pedestal' WALKER_PROP = 'walker_prop' PROP_PEDESTAL = 'prop_pedestal' TARGET_STATE = 'target_state/' CURRENT_STATE = 'meta/current_state/' def _is_same_state(state_1, state_2): if state_1.keys() != state_2.keys(): return False for k in state_1: if not np.all(state_1[k] == state_2[k]): return False return True def _singleton_or_none(iterable): iterator = iter(iterable) try: return next(iterator) except StopIteration: return None def _generate_pedestal_colors(num_pedestals): """Function to get colors for pedestals.""" colors = [] for i in range(num_pedestals): h = _BASE_PEDESTAL_H + i / num_pedestals while h > 1: h -= 1 colors.append( colorsys.hls_to_rgb(h, _BASE_PEDESTAL_L, _BASE_PEDESTAL_S) + (1.0,)) return colors InitializationParameters = collections.namedtuple( 'InitializationParameters', ('clip_segment', 'prop_id', 'pedestal_id')) def _rotate_vector_by_quaternion(vec, quat): result = np.empty(3) mjbindings.mjlib.mju_rotVecQuat(result, np.asarray(vec), np.asarray(quat)) return result @enum.unique class WarehousePhase(enum.Enum): TERMINATED = 0 GOTOTARGET = 1 PICKUP = 2 CARRYTOTARGET = 3 PUTDOWN = 4 def _find_random_free_pedestal_id(target_state, random_state): free_pedestals = ( np.where(np.logical_not(np.any(target_state, axis=0)))[0]) return random_state.choice(free_pedestals) def _find_random_occupied_pedestal_id(target_state, random_state): occupied_pedestals = ( np.where(np.any(target_state, axis=0))[0]) return random_state.choice(occupied_pedestals) def one_hot(values, num_unique): return np.squeeze(np.eye(num_unique)[np.array(values).reshape(-1)]) class SinglePropFourPhases(object): """A phase manager that transitions between four phases for a single prop.""" def __init__(self, fixed_initialization_phase=None): self._phase = WarehousePhase.TERMINATED self._fixed_initialization_phase = fixed_initialization_phase def initialize_episode(self, target_state, random_state): """Randomly initializes an episode into one of the four phases.""" if self._fixed_initialization_phase is None: self._phase = random_state.choice([ WarehousePhase.GOTOTARGET, WarehousePhase.PICKUP, WarehousePhase.CARRYTOTARGET, WarehousePhase.PUTDOWN ]) else: self._phase = self._fixed_initialization_phase self._prop_id = random_state.randint(len(target_state[PROP_PEDESTAL])) self._pedestal_id = np.nonzero( target_state[PROP_PEDESTAL][self._prop_id])[0][0] pedestal_id_for_initialization = self._pedestal_id if self._phase == WarehousePhase.GOTOTARGET: clip_segment = trajectories.ClipSegment.APPROACH target_state[WALKER_PROP][:] = 0 target_state[WALKER_PEDESTAL][self._pedestal_id] = 1 elif self._phase == WarehousePhase.PICKUP: clip_segment = trajectories.ClipSegment.PICKUP target_state[WALKER_PROP][self._prop_id] = 1 target_state[WALKER_PEDESTAL][self._pedestal_id] = 1 # Set self._pedestal_id to the next pedestal after pickup is successful. self._pedestal_id = _find_random_free_pedestal_id( target_state[PROP_PEDESTAL], random_state) target_state[PROP_PEDESTAL][self._prop_id, :] = 0 elif self._phase == WarehousePhase.CARRYTOTARGET: clip_segment = random_state.choice([ trajectories.ClipSegment.CARRY1, trajectories.ClipSegment.CARRY2]) self._pedestal_id = _find_random_free_pedestal_id( target_state[PROP_PEDESTAL], random_state) if clip_segment == trajectories.ClipSegment.CARRY2: pedestal_id_for_initialization = self._pedestal_id target_state[WALKER_PROP][self._prop_id] = 1 target_state[WALKER_PEDESTAL][self._pedestal_id] = 1 target_state[PROP_PEDESTAL][self._prop_id, :] = 0 elif self._phase == WarehousePhase.PUTDOWN: clip_segment = trajectories.ClipSegment.PUTDOWN target_state[WALKER_PROP][:] = 0 target_state[WALKER_PEDESTAL][self._pedestal_id] = 1 return InitializationParameters( clip_segment, self._prop_id, pedestal_id_for_initialization) def on_success(self, target_state, random_state): """Transitions into the next phase upon success of current phase.""" if self._phase == WarehousePhase.GOTOTARGET: if self._prop_id is not None: self._phase = WarehousePhase.PICKUP # Set self._pedestal_id to the next pedestal after pickup is successful. self._pedestal_id = ( _find_random_free_pedestal_id( target_state[PROP_PEDESTAL], random_state)) target_state[WALKER_PROP][self._prop_id] = 1 target_state[PROP_PEDESTAL][self._prop_id, :] = 0 else: # If you go to an empty pedestal, go to pedestal with a prop. self._pedestal_id = ( _find_random_occupied_pedestal_id( target_state[PROP_PEDESTAL], random_state)) target_state[WALKER_PEDESTAL][:] = 0 target_state[WALKER_PEDESTAL][self._pedestal_id] = 1 self._prop_id = np.argwhere( target_state[PROP_PEDESTAL][:, self._pedestal_id])[0, 0] elif self._phase == WarehousePhase.PICKUP: self._phase = WarehousePhase.CARRYTOTARGET target_state[WALKER_PEDESTAL][:] = 0 target_state[WALKER_PEDESTAL][self._pedestal_id] = 1 elif self._phase == WarehousePhase.CARRYTOTARGET: self._phase = WarehousePhase.PUTDOWN target_state[WALKER_PROP][:] = 0 target_state[PROP_PEDESTAL][self._prop_id, self._pedestal_id] = 1 elif self._phase == WarehousePhase.PUTDOWN: self._phase = WarehousePhase.GOTOTARGET # Set self._pedestal_id to the next pedestal after putdown is successful. self._pedestal_id = ( _find_random_free_pedestal_id( target_state[PROP_PEDESTAL], random_state)) self._prop_id = None target_state[WALKER_PEDESTAL][:] = 0 target_state[WALKER_PEDESTAL][self._pedestal_id] = 1 return self._phase @property def phase(self): return self._phase @property def prop_id(self): return self._prop_id @property def pedestal_id(self): return self._pedestal_id class PhasedBoxCarry(composer.Task): """A prop-carry task that transitions between multiple phases.""" def __init__( self, walker, num_props, num_pedestals, proto_modifier=None, transition_class=SinglePropFourPhases, min_prop_gap=0.05, pedestal_height_range=(0.45, 0.75), log_transitions=False, negative_reward_on_failure_termination=True, use_single_pedestal_color=True, priority_friction=False, fixed_initialization_phase=None): """Initialize phased/instructed box-carrying ("warehouse") task. Args: walker: the walker to be used in this task. num_props: the number of props in the task scene. num_pedestals: the number of floating shelves (pedestals) in the task scene. proto_modifier: function to modify trajectory proto. transition_class: the object that handles the transition logic. min_prop_gap: arms are automatically opened to leave a gap around the prop to avoid problematic collisions upon initialization. pedestal_height_range: range of heights for the pedestal. log_transitions: logging/printing of transitions. negative_reward_on_failure_termination: boolean for whether to provide negative sparse rewards on failure termination. use_single_pedestal_color: boolean option for pedestals being the same color or different colors. priority_friction: sets friction priority thereby making prop objects have higher friction. fixed_initialization_phase: an instance of the `WarehousePhase` enum that specifies the phase in which to always initialize the task, or `None` if the initial task phase should be chosen randomly for each episode. """ self._num_props = num_props self._num_pedestals = num_pedestals self._proto_modifier = proto_modifier self._transition_manager = transition_class( fixed_initialization_phase=fixed_initialization_phase) self._min_prop_gap = min_prop_gap self._pedestal_height_range = pedestal_height_range self._log_transitions = log_transitions self._target_state = collections.OrderedDict([ (WALKER_PEDESTAL, np.zeros(num_pedestals)), (WALKER_PROP, np.zeros(num_props)), (PROP_PEDESTAL, np.zeros([num_props, num_pedestals])) ]) self._current_state = collections.OrderedDict([ (WALKER_PEDESTAL, np.zeros(num_pedestals)), (WALKER_PROP, np.zeros(num_props)), (PROP_PEDESTAL, np.zeros([num_props, num_pedestals])) ]) self._negative_reward_on_failure_termination = ( negative_reward_on_failure_termination) self._priority_friction = priority_friction clips = sorted( set(mocap_data.medium_pedestal()) & (set(mocap_data.small_box()) | set(mocap_data.large_box()))) loader = mocap_loader.HDF5TrajectoryLoader( mocap_data.H5_PATH, trajectories.SinglePropCarrySegmentedTrajectory) self._trajectories = [ loader.get_trajectory(clip.clip_identifier) for clip in clips] self._arena = floors.Floor() self._walker = walker self._feet_geoms = ( walker.mjcf_model.find('body', 'lfoot').find_all('geom') + walker.mjcf_model.find('body', 'rfoot').find_all('geom')) self._lhand_geoms = ( walker.mjcf_model.find('body', 'lhand').find_all('geom')) self._rhand_geoms = ( walker.mjcf_model.find('body', 'rhand').find_all('geom')) self._trajectories[0].configure_walkers([self._walker]) walker.create_root_joints(self._arena.attach(walker)) control_timestep = self._trajectories[0].dt for i, trajectory in enumerate(self._trajectories): if trajectory.dt != control_timestep: raise ValueError( 'Inconsistent control timestep: ' 'trajectories[{}].dt == {} but trajectories[0].dt == {}' .format(i, trajectory.dt, control_timestep)) self.set_timesteps(control_timestep, _PHYSICS_TIMESTEP) if use_single_pedestal_color: self._pedestal_colors = [_SINGLE_PEDESTAL_COLOR] * num_pedestals else: self._pedestal_colors = _generate_pedestal_colors(num_pedestals) self._pedestals = [props.Pedestal(_PEDESTAL_SIZE, rgba) for rgba in self._pedestal_colors] for pedestal in self._pedestals: self._arena.attach(pedestal) self._props = [ self._trajectories[0].create_props( priority_friction=self._priority_friction)[0] for _ in range(num_props) ] for prop in self._props: self._arena.add_free_entity(prop) self._task_observables = collections.OrderedDict() self._task_observables['target_phase'] = observable.Generic( lambda _: one_hot(self._transition_manager.phase.value, num_unique=5)) def ego_prop_xpos(physics): prop_id = self._focal_prop_id if prop_id is None: return np.zeros((3,)) prop = self._props[prop_id] prop_xpos, _ = prop.get_pose(physics) walker_xpos = physics.bind(self._walker.root_body).xpos return self._walker.transform_vec_to_egocentric_frame( physics, prop_xpos - walker_xpos) self._task_observables['target_prop/xpos'] = ( observable.Generic(ego_prop_xpos)) def prop_zaxis(physics): prop_id = self._focal_prop_id if prop_id is None: return np.zeros((3,)) prop = self._props[prop_id] prop_xmat = physics.bind( mjcf.get_attachment_frame(prop.mjcf_model)).xmat return prop_xmat[[2, 5, 8]] self._task_observables['target_prop/zaxis'] = ( observable.Generic(prop_zaxis)) def ego_pedestal_xpos(physics): pedestal_id = self._focal_pedestal_id if pedestal_id is None: return np.zeros((3,)) pedestal = self._pedestals[pedestal_id] pedestal_xpos, _ = pedestal.get_pose(physics) walker_xpos = physics.bind(self._walker.root_body).xpos return self._walker.transform_vec_to_egocentric_frame( physics, pedestal_xpos - walker_xpos) self._task_observables['target_pedestal/xpos'] = ( observable.Generic(ego_pedestal_xpos)) for obs in (self._walker.observables.proprioception + self._walker.observables.kinematic_sensors + self._walker.observables.dynamic_sensors + list(self._task_observables.values())): obs.enabled = True self._focal_prop_id = None self._focal_pedestal_id = None @property def root_entity(self): return self._arena @property def task_observables(self): return self._task_observables @property def name(self): return 'warehouse' def initialize_episode_mjcf(self, random_state): self._reward = 0.0 self._discount = 1.0 self._should_terminate = False self._before_step_success = False for target_value in self._target_state.values(): target_value[:] = 0 for pedestal_id, pedestal in enumerate(self._pedestals): angle = 2 * np.pi * pedestal_id / len(self._pedestals) dist = (_BASE_PEDESTAL_DIST + _PEDESTAL_DIST_DELTA * random_state.uniform(-1, 1)) height = random_state.uniform(*self._pedestal_height_range) pedestal_pos = [dist * np.cos(angle), dist * np.sin(angle), height - pedestal.geom.size[2]] mjcf.get_attachment_frame(pedestal.mjcf_model).pos = pedestal_pos for prop in self._props: prop.detach() self._props = [] self._trajectory_for_prop = [] for prop_id in range(self._num_props): trajectory = random_state.choice(self._trajectories) if self._proto_modifier: trajectory = trajectory.get_modified_trajectory( self._proto_modifier, random_state=random_state) prop = trajectory.create_props( priority_friction=self._priority_friction)[0] prop.mjcf_model.model = 'prop_{}'.format(prop_id) self._arena.add_free_entity(prop) self._props.append(prop) self._trajectory_for_prop.append(trajectory) def _settle_props(self, physics): prop_freejoints = [mjcf.get_attachment_frame(prop.mjcf_model).freejoint for prop in self._props] physics.bind(prop_freejoints).qvel = 0 physics.forward() for _ in range(_MAX_SETTLE_STEPS): self._update_current_state(physics) success = self._evaluate_target_state() stopped = max(abs(physics.bind(prop_freejoints).qvel)) < _SETTLE_QVEL_TOL if success and stopped: break else: physics.step() physics.data.time = 0 def initialize_episode(self, physics, random_state): self._ground_geomid = physics.bind( self._arena.mjcf_model.worldbody.geom[0]).element_id self._feet_geomids = set(physics.bind(self._feet_geoms).element_id) self._lhand_geomids = set(physics.bind(self._lhand_geoms).element_id) self._rhand_geomids = set(physics.bind(self._rhand_geoms).element_id) for prop_id in range(len(self._props)): pedestal_id = _find_random_free_pedestal_id( self._target_state[PROP_PEDESTAL], random_state) pedestal = self._pedestals[pedestal_id] self._target_state[PROP_PEDESTAL][prop_id, pedestal_id] = 1 for prop_id, prop in enumerate(self._props): trajectory = self._trajectory_for_prop[prop_id] pedestal_id = np.nonzero( self._target_state[PROP_PEDESTAL][prop_id])[0][0] pedestal = self._pedestals[pedestal_id] pedestal_pos, _ = pedestal.get_pose(physics) pedestal_delta = np.array( pedestal_pos - trajectory.infer_pedestal_positions()[0]) pedestal_delta[2] += pedestal.geom.size[2] prop_timestep = trajectory.get_timestep_data(0).props[0] prop_pos = prop_timestep.position + np.array(pedestal_delta) prop_quat = prop_timestep.quaternion prop_pos[:2] += random_state.uniform( -pedestal.geom.size[:2] / 2, pedestal.geom.size[:2] / 2) prop.set_pose(physics, prop_pos, prop_quat) self._settle_props(physics) init_params = self._transition_manager.initialize_episode( self._target_state, random_state) if self._log_transitions: logging.info(init_params) self._on_transition(physics) init_prop = self._props[init_params.prop_id] init_pedestal = self._pedestals[init_params.pedestal_id] self._init_prop_id = init_params.prop_id self._init_pedestal_id = init_params.pedestal_id init_trajectory = self._trajectory_for_prop[init_params.prop_id] init_timestep = init_trajectory.get_random_timestep_in_segment( init_params.clip_segment, random_state) trajectory_pedestal_pos = init_trajectory.infer_pedestal_positions()[0] init_pedestal_pos = np.array(init_pedestal.get_pose(physics)[0]) delta_pos = init_pedestal_pos - trajectory_pedestal_pos delta_pos[2] = 0 delta_angle = np.pi + np.arctan2(init_pedestal_pos[1], init_pedestal_pos[0]) delta_quat = (np.cos(delta_angle / 2), 0, 0, np.sin(delta_angle / 2)) trajectory_pedestal_to_walker = ( init_timestep.walkers[0].position - trajectory_pedestal_pos) rotated_pedestal_to_walker = _rotate_vector_by_quaternion( trajectory_pedestal_to_walker, delta_quat) self._walker.set_pose( physics, position=trajectory_pedestal_pos + rotated_pedestal_to_walker, quaternion=init_timestep.walkers[0].quaternion) self._walker.set_velocity( physics, velocity=init_timestep.walkers[0].velocity, angular_velocity=init_timestep.walkers[0].angular_velocity) self._walker.shift_pose( physics, position=delta_pos, quaternion=delta_quat, rotate_velocity=True) physics.bind(self._walker.mocap_joints).qpos = ( init_timestep.walkers[0].joints) physics.bind(self._walker.mocap_joints).qvel = ( init_timestep.walkers[0].joints_velocity) if init_params.clip_segment in (trajectories.ClipSegment.CARRY1, trajectories.ClipSegment.CARRY2, trajectories.ClipSegment.PUTDOWN): trajectory_pedestal_to_prop = ( init_timestep.props[0].position - trajectory_pedestal_pos) rotated_pedestal_to_prop = _rotate_vector_by_quaternion( trajectory_pedestal_to_prop, delta_quat) init_prop.set_pose( physics, position=trajectory_pedestal_pos + rotated_pedestal_to_prop, quaternion=init_timestep.props[0].quaternion) init_prop.set_velocity( physics, velocity=init_timestep.props[0].velocity, angular_velocity=init_timestep.props[0].angular_velocity) init_prop.shift_pose( physics, position=delta_pos, quaternion=delta_quat, rotate_velocity=True) # If we have moved the pedestal upwards during height initialization, # the prop may now be lodged inside it. We fix that here. if init_pedestal_pos[2] > trajectory_pedestal_pos[2]: init_prop_geomid = physics.bind(init_prop.geom).element_id init_pedestal_geomid = physics.bind(init_pedestal.geom).element_id disallowed_contact = sorted((init_prop_geomid, init_pedestal_geomid)) def has_disallowed_contact(): physics.forward() for contact in physics.data.contact: if sorted((contact.geom1, contact.geom2)) == disallowed_contact: return True return False while has_disallowed_contact(): init_prop.shift_pose(physics, (0, 0, 0.001)) self._move_arms_if_necessary(physics) self._update_current_state(physics) self._previous_step_success = self._evaluate_target_state() self._focal_prop_id = self._init_prop_id self._focal_pedestal_id = self._init_pedestal_id def _move_arms_if_necessary(self, physics): if self._min_prop_gap is not None: for entity in self._props + self._pedestals: try: arm_opener.open_arms_for_prop( physics, self._walker.left_arm_root, self._walker.right_arm_root, entity.mjcf_model, self._min_prop_gap) except RuntimeError as e: raise composer.EpisodeInitializationError(e) def after_step(self, physics, random_state): # First we check for failure termination. for contact in physics.data.contact: if ((contact.geom1 == self._ground_geomid and contact.geom2 not in self._feet_geomids) or (contact.geom2 == self._ground_geomid and contact.geom1 not in self._feet_geomids)): if self._negative_reward_on_failure_termination: self._reward = -_SPARSE_REWARD else: self._reward = 0.0 self._should_terminate = True self._discount = 0.0 return # Then check for normal reward and state transitions. self._update_current_state(physics) success = self._evaluate_target_state() if success and not self._previous_step_success: self._reward = _SPARSE_REWARD new_phase = ( self._transition_manager.on_success(self._target_state, random_state)) self._should_terminate = (new_phase == WarehousePhase.TERMINATED) self._on_transition(physics) self._previous_step_success = self._evaluate_target_state() else: self._reward = 0.0 def _on_transition(self, physics): self._focal_prop_id = self._transition_manager.prop_id self._focal_pedestal_id = self._transition_manager.pedestal_id if self._log_transitions: logging.info('target_state:\n%s', self._target_state) for pedestal_id, pedestal_active in enumerate( self._target_state[WALKER_PEDESTAL]): r, g, b, a = self._pedestal_colors[pedestal_id] if pedestal_active: h, _, s = colorsys.rgb_to_hls(r, g, b) r, g, b = colorsys.hls_to_rgb(h, _ACTIVATED_PEDESTAL_L, s) physics.bind(self._pedestals[pedestal_id].geom).rgba = (r, g, b, a) def get_reward(self, physics): return self._reward def get_discount(self, physics): return self._discount def should_terminate_episode(self, physics): return self._should_terminate def _update_current_state(self, physics): for current_state_value in self._current_state.values(): current_state_value[:] = 0 # Check if the walker is near each pedestal. walker_pos, _ = self._walker.get_pose(physics) for pedestal_id, pedestal in enumerate(self._pedestals): target_pos, _ = pedestal.get_pose(physics) walker_to_target_dist = np.linalg.norm(walker_pos[:2] - target_pos[:2]) if walker_to_target_dist <= _TARGET_TOL: self._current_state[WALKER_PEDESTAL][pedestal_id] = 1 prop_geomids = { physics.bind(prop.geom).element_id: prop_id for prop_id, prop in enumerate(self._props)} pedestal_geomids = { physics.bind(pedestal.geom).element_id: pedestal_id for pedestal_id, pedestal in enumerate(self._pedestals)} prop_pedestal_contact_counts = np.zeros( [self._num_props, self._num_pedestals]) prop_lhand_contact = [False] * self._num_props prop_rhand_contact = [False] * self._num_props for contact in physics.data.contact: prop_id = prop_geomids.get(contact.geom1, prop_geomids.get(contact.geom2)) pedestal_id = pedestal_geomids.get( contact.geom1, pedestal_geomids.get(contact.geom2)) has_lhand = (contact.geom1 in self._lhand_geomids or contact.geom2 in self._lhand_geomids) has_rhand = (contact.geom1 in self._rhand_geomids or contact.geom2 in self._rhand_geomids) if prop_id is not None and pedestal_id is not None: prop_pedestal_contact_counts[prop_id, pedestal_id] += 1 if prop_id is not None and has_lhand: prop_lhand_contact[prop_id] = True if prop_id is not None and has_rhand: prop_rhand_contact[prop_id] = True for prop_id in range(self._num_props): if prop_lhand_contact[prop_id] and prop_rhand_contact[prop_id]: self._current_state[WALKER_PROP][prop_id] = 1 pedestal_contact_counts = prop_pedestal_contact_counts[prop_id] for pedestal_id in range(self._num_pedestals): if pedestal_contact_counts[pedestal_id] >= 4: self._current_state[PROP_PEDESTAL][prop_id, pedestal_id] = 1 def _evaluate_target_state(self): return _is_same_state(self._current_state, self._target_state)
deepmind-research-master
catch_carry/warehouse.py
# Copyright 2020 Deepmind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utility for opening arms until they are not in contact with a prop.""" import contextlib from dm_control.mujoco.wrapper import mjbindings import numpy as np _MAX_IK_ATTEMPTS = 100 _IK_MAX_CORRECTION_WEIGHT = 0.1 _JOINT_LIMIT_TOLERANCE = 1e-4 _GAP_TOLERANCE = 0.1 class _ArmPropContactRemover(object): """Helper class for removing contacts between an arm and a prop via IK.""" def __init__(self, physics, arm_root, prop, gap): arm_geoms = arm_root.find_all('geom') self._arm_geom_ids = set(physics.bind(arm_geoms).element_id) arm_joints = arm_root.find_all('joint') self._arm_joint_ids = list(physics.bind(arm_joints).element_id) self._arm_qpos_indices = physics.model.jnt_qposadr[self._arm_joint_ids] self._arm_dof_indices = physics.model.jnt_dofadr[self._arm_joint_ids] self._prop_geoms = prop.find_all('geom') self._prop_geom_ids = set(physics.bind(self._prop_geoms).element_id) self._arm_joint_min = np.full(len(self._arm_joint_ids), float('-inf'), dtype=physics.model.jnt_range.dtype) self._arm_joint_max = np.full(len(self._arm_joint_ids), float('inf'), dtype=physics.model.jnt_range.dtype) for i, joint_id in enumerate(self._arm_joint_ids): if physics.model.jnt_limited[joint_id]: self._arm_joint_min[i], self._arm_joint_max[i] = ( physics.model.jnt_range[joint_id]) self._gap = gap def _contact_pair_is_relevant(self, contact): set1 = self._arm_geom_ids set2 = self._prop_geom_ids return ((contact.geom1 in set1 and contact.geom2 in set2) or (contact.geom2 in set1 and contact.geom1 in set2)) def _forward_and_find_next_contact(self, physics): """Forwards the physics and finds the next contact to handle.""" physics.forward() next_contact = None for contact in physics.data.contact: if (self._contact_pair_is_relevant(contact) and (next_contact is None or contact.dist < next_contact.dist)): next_contact = contact return next_contact def _remove_contact_ik_iteration(self, physics, contact): """Performs one linearized IK iteration to remove the specified contact.""" if contact.geom1 in self._arm_geom_ids: sign = -1 geom_id = contact.geom1 else: sign = 1 geom_id = contact.geom2 body_id = physics.model.geom_bodyid[geom_id] normal = sign * contact.frame[:3] jac_dtype = physics.data.qpos.dtype jac = np.empty((6, physics.model.nv), dtype=jac_dtype) jac_pos, jac_rot = jac[:3], jac[3:] mjbindings.mjlib.mj_jacPointAxis( physics.model.ptr, physics.data.ptr, jac_pos, jac_rot, contact.pos + (contact.dist / 2) * normal, normal, body_id) # Calculate corrections w.r.t. all joints, disregarding joint limits. delta_xpos = normal * max(0, self._gap - contact.dist) jac_all_joints = jac_pos[:, self._arm_dof_indices] update_unfiltered = np.linalg.lstsq( jac_all_joints, delta_xpos, rcond=None)[0] # Filter out joints at limit that are corrected in the "wrong" direction. initial_qpos = np.array(physics.data.qpos[self._arm_qpos_indices]) min_filter = np.logical_and( initial_qpos - self._arm_joint_min < _JOINT_LIMIT_TOLERANCE, update_unfiltered < 0) max_filter = np.logical_and( self._arm_joint_max - initial_qpos < _JOINT_LIMIT_TOLERANCE, update_unfiltered > 0) active_joints = np.where( np.logical_not(np.logical_or(min_filter, max_filter)))[0] # Calculate corrections w.r.t. valid joints only. active_dof_indices = self._arm_dof_indices[active_joints] jac_joints = jac_pos[:, active_dof_indices] update_filtered = np.linalg.lstsq(jac_joints, delta_xpos, rcond=None)[0] update_nv = np.zeros(physics.model.nv, dtype=jac_dtype) update_nv[active_dof_indices] = update_filtered # Calculate maximum correction weight that does not violate joint limits. weights = np.full_like(update_filtered, _IK_MAX_CORRECTION_WEIGHT) active_initial_qpos = initial_qpos[active_joints] active_joint_min = self._arm_joint_min[active_joints] active_joint_max = self._arm_joint_max[active_joints] for i in range(len(weights)): proposed_update = update_filtered[i] if proposed_update > 0: max_allowed_update = active_joint_max[i] - active_initial_qpos[i] weights[i] = min(max_allowed_update / proposed_update, weights[i]) elif proposed_update < 0: min_allowed_update = active_joint_min[i] - active_initial_qpos[i] weights[i] = min(min_allowed_update / proposed_update, weights[i]) weight = min(weights) # Integrate the correction into `qpos`. mjbindings.mjlib.mj_integratePos( physics.model.ptr, physics.data.qpos, update_nv, weight) # "Paranoid" clip the modified joint `qpos` to within joint limits. active_qpos_indices = self._arm_qpos_indices[active_joints] physics.data.qpos[active_qpos_indices] = np.clip( physics.data.qpos[active_qpos_indices], active_joint_min, active_joint_max) @contextlib.contextmanager def _override_margins_and_gaps(self, physics): """Context manager that overrides geom margins and gaps to `self._gap`.""" prop_geom_bindings = physics.bind(self._prop_geoms) original_margins = np.array(prop_geom_bindings.margin) original_gaps = np.array(prop_geom_bindings.gap) prop_geom_bindings.margin = self._gap * (1 - _GAP_TOLERANCE) prop_geom_bindings.gap = self._gap * (1 - _GAP_TOLERANCE) yield prop_geom_bindings.margin = original_margins prop_geom_bindings.gap = original_gaps physics.forward() def remove_contacts(self, physics): with self._override_margins_and_gaps(physics): for _ in range(_MAX_IK_ATTEMPTS): contact = self._forward_and_find_next_contact(physics) if contact is None: return self._remove_contact_ik_iteration(physics, contact) contact = self._forward_and_find_next_contact(physics) if contact and contact.dist < 0: raise RuntimeError( 'Failed to remove contact with prop after {} iterations. ' 'Final contact distance is {}.'.format( _MAX_IK_ATTEMPTS, contact.dist)) def open_arms_for_prop(physics, left_arm_root, right_arm_root, prop, gap): """Opens left and right arms so as to leave a specified gap with the prop.""" left_arm_opener = _ArmPropContactRemover(physics, left_arm_root, prop, gap) left_arm_opener.remove_contacts(physics) right_arm_opener = _ArmPropContactRemover(physics, right_arm_root, prop, gap) right_arm_opener.remove_contacts(physics)
deepmind-research-master
catch_carry/arm_opener.py
# Copyright 2020 Deepmind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Metadata for mocap clips that correspond to a walker carrying a prop.""" import collections import enum import os from dm_control.locomotion.mocap import loader as mocap_loader from catch_carry import trajectories H5_DIR = os.path.dirname(__file__) H5_PATH = os.path.join(H5_DIR, 'mocap_data.h5') IDENTIFIER_PREFIX = 'DeepMindCatchCarry' IDENTIFIER_TEMPLATE = IDENTIFIER_PREFIX + '-{:03d}' ClipInfo = collections.namedtuple( 'ClipInfo', ('clip_identifier', 'num_steps', 'dt', 'flags')) class Flag(enum.IntEnum): BOX = 1 << 0 BALL = 1 << 1 LIGHT_PROP = 1 << 2 HEAVY_PROP = 1 << 3 SMALL_PROP = 1 << 4 LARGE_PROP = 1 << 5 FLOOR_LEVEL = 1 << 6 MEDIUM_PEDESTAL = 1 << 7 HIGH_PEDESTAL = 1 << 8 _ALL_CLIPS = None def _get_clip_info(loader, clip_number, flags): clip = loader.get_trajectory(IDENTIFIER_TEMPLATE.format(clip_number)) return ClipInfo( clip_identifier=clip.identifier, num_steps=clip.num_steps, dt=clip.dt, flags=flags) def _get_all_clip_infos_if_necessary(): """Creates the global _ALL_CLIPS list if it has not already been created.""" global _ALL_CLIPS if _ALL_CLIPS is None: loader = mocap_loader.HDF5TrajectoryLoader( H5_PATH, trajectories.WarehouseTrajectory) clip_numbers = (1, 2, 3, 4, 5, 6, 9, 10, 11, 12, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53) clip_infos = [] for i, clip_number in enumerate(clip_numbers): flags = 0 if i in _FLOOR_LEVEL: flags |= Flag.FLOOR_LEVEL elif i in _MEDIUM_PEDESTAL: flags |= Flag.MEDIUM_PEDESTAL elif i in _HIGH_PEDESTAL: flags |= Flag.HIGH_PEDESTAL if i in _LIGHT_PROP: flags |= Flag.LIGHT_PROP elif i in _HEAVY_PROP: flags |= Flag.HEAVY_PROP if i in _SMALL_BOX: flags |= Flag.SMALL_PROP flags |= Flag.BOX elif i in _LARGE_BOX: flags |= Flag.LARGE_PROP flags |= Flag.BOX elif i in _SMALL_BALL: flags |= Flag.SMALL_PROP flags |= Flag.BALL elif i in _LARGE_BALL: flags |= Flag.LARGE_PROP flags |= Flag.BALL clip_infos.append(_get_clip_info(loader, clip_number, flags)) _ALL_CLIPS = tuple(clip_infos) def _assert_partitions_all_clips(*args): """Asserts that a given set of subcollections partitions ALL_CLIPS.""" sets = tuple(set(arg) for arg in args) # Check that the union of all the sets is ALL_CLIPS. union = set() for subset in sets: union = union | set(subset) assert union == set(range(48)) # Check that the sets are pairwise disjoint. for i in range(len(sets)): for j in range(i + 1, len(sets)): assert sets[i] & sets[j] == set() _FLOOR_LEVEL = tuple(range(0, 16)) _MEDIUM_PEDESTAL = tuple(range(16, 32)) _HIGH_PEDESTAL = tuple(range(32, 48)) _assert_partitions_all_clips(_FLOOR_LEVEL, _MEDIUM_PEDESTAL, _HIGH_PEDESTAL) _LIGHT_PROP = (0, 1, 2, 3, 8, 9, 12, 13, 16, 17, 18, 19, 24, 25, 26, 27, 34, 35, 38, 39, 42, 43, 46, 47) _HEAVY_PROP = (4, 5, 6, 7, 10, 11, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31, 32, 33, 36, 37, 40, 41, 44, 45) _assert_partitions_all_clips(_LIGHT_PROP, _HEAVY_PROP) _SMALL_BOX = (0, 1, 4, 5, 16, 17, 20, 21, 34, 35, 36, 37) _LARGE_BOX = (2, 3, 6, 7, 18, 19, 22, 23, 32, 33, 38, 39) _SMALL_BALL = (8, 9, 10, 11, 24, 25, 30, 31, 40, 41, 46, 47) _LARGE_BALL = (12, 13, 14, 15, 26, 27, 28, 29, 42, 43, 44, 45) _assert_partitions_all_clips(_SMALL_BOX, _LARGE_BOX, _SMALL_BALL, _LARGE_BALL) def all_clips(): _get_all_clip_infos_if_necessary() return _ALL_CLIPS def floor_level(): clips = all_clips() return tuple(clips[i] for i in _FLOOR_LEVEL) def medium_pedestal(): clips = all_clips() return tuple(clips[i] for i in _MEDIUM_PEDESTAL) def high_pedestal(): clips = all_clips() return tuple(clips[i] for i in _HIGH_PEDESTAL) def light_prop(): clips = all_clips() return tuple(clips[i] for i in _LIGHT_PROP) def heavy_prop(): clips = all_clips() return tuple(clips[i] for i in _HEAVY_PROP) def small_box(): clips = all_clips() return tuple(clips[i] for i in _SMALL_BOX) def large_box(): clips = all_clips() return tuple(clips[i] for i in _LARGE_BOX) def small_ball(): clips = all_clips() return tuple(clips[i] for i in _SMALL_BALL) def large_ball(): clips = all_clips() return tuple(clips[i] for i in _LARGE_BALL)
deepmind-research-master
catch_carry/mocap_data.py
# Copyright 2020 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
deepmind-research-master
catch_carry/__init__.py
# Copyright 2020 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Setup for pip package.""" from setuptools import find_packages from setuptools import setup REQUIRED_PACKAGES = ['absl-py', 'dm_control', 'numpy'] setup( name='catch_carry', version='0.1', description='Whole-body object manipulation tasks and motion capture data.', url='https://github.com/deepmind/deepmind-research/catch_carry', author='DeepMind', author_email='[email protected]', # Contained modules and scripts. packages=find_packages(), install_requires=REQUIRED_PACKAGES, platforms=['any'], license='Apache 2.0', )
deepmind-research-master
catch_carry/setup.py
# Copyright 2020 Deepmind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A ball-tossing task.""" import collections from dm_control import composer from dm_control import mjcf from dm_control.composer import variation from dm_control.composer.observation import observable from dm_control.locomotion.arenas import floors from dm_control.locomotion.mocap import loader as mocap_loader import numpy as np from catch_carry import mocap_data from catch_carry import props from catch_carry import trajectories _PHYSICS_TIMESTEP = 0.005 _BUCKET_SIZE = (0.2, 0.2, 0.02) # Magnitude of the sparse reward. _SPARSE_REWARD = 1.0 class BallToss(composer.Task): """A task involving catching and throwing a ball.""" def __init__(self, walker, proto_modifier=None, negative_reward_on_failure_termination=True, priority_friction=False, bucket_offset=1., y_range=0.5, toss_delay=0.5, randomize_init=False, ): """Initialize ball tossing task. Args: walker: the walker to be used in this task. proto_modifier: function to modify trajectory proto. negative_reward_on_failure_termination: flag to provide negative reward as task fails. priority_friction: sets friction priority thereby making prop objects have higher friction. bucket_offset: distance in meters to push bucket (away from walker) y_range: range (uniformly sampled) of distance in meters the ball is thrown left/right of the walker. toss_delay: time in seconds to delay after catching before changing reward to encourage throwing the ball. randomize_init: flag to randomize initial pose. """ self._proto_modifier = proto_modifier self._negative_reward_on_failure_termination = ( negative_reward_on_failure_termination) self._priority_friction = priority_friction self._bucket_rewarded = False self._bucket_offset = bucket_offset self._y_range = y_range self._toss_delay = toss_delay self._randomize_init = randomize_init # load a clip to grab a ball prop and initializations loader = mocap_loader.HDF5TrajectoryLoader( mocap_data.H5_PATH, trajectories.WarehouseTrajectory) clip_number = 54 self._trajectory = loader.get_trajectory( mocap_data.IDENTIFIER_TEMPLATE.format(clip_number)) # create the floor arena self._arena = floors.Floor() self._walker = walker self._walker_geoms = tuple(self._walker.mjcf_model.find_all('geom')) self._feet_geoms = ( walker.mjcf_model.find('body', 'lfoot').find_all('geom') + walker.mjcf_model.find('body', 'rfoot').find_all('geom')) self._lhand_geoms = ( walker.mjcf_model.find('body', 'lhand').find_all('geom')) self._rhand_geoms = ( walker.mjcf_model.find('body', 'rhand').find_all('geom')) # resize the humanoid based on the motion capture data subject self._trajectory.configure_walkers([self._walker]) walker.create_root_joints(self._arena.attach(walker)) control_timestep = self._trajectory.dt self.set_timesteps(control_timestep, _PHYSICS_TIMESTEP) # build and attach the bucket to the arena self._bucket = props.Bucket(_BUCKET_SIZE) self._arena.attach(self._bucket) self._prop = self._trajectory.create_props( priority_friction=self._priority_friction)[0] self._arena.add_free_entity(self._prop) self._task_observables = collections.OrderedDict() # define feature based observations (agent may or may not use these) def ego_prop_xpos(physics): prop_xpos, _ = self._prop.get_pose(physics) walker_xpos = physics.bind(self._walker.root_body).xpos return self._walker.transform_vec_to_egocentric_frame( physics, prop_xpos - walker_xpos) self._task_observables['prop_{}/xpos'.format(0)] = ( observable.Generic(ego_prop_xpos)) def prop_zaxis(physics): prop_xmat = physics.bind( mjcf.get_attachment_frame(self._prop.mjcf_model)).xmat return prop_xmat[[2, 5, 8]] self._task_observables['prop_{}/zaxis'.format(0)] = ( observable.Generic(prop_zaxis)) def ego_bucket_xpos(physics): bucket_xpos, _ = self._bucket.get_pose(physics) walker_xpos = physics.bind(self._walker.root_body).xpos return self._walker.transform_vec_to_egocentric_frame( physics, bucket_xpos - walker_xpos) self._task_observables['bucket_{}/xpos'.format(0)] = ( observable.Generic(ego_bucket_xpos)) for obs in (self._walker.observables.proprioception + self._walker.observables.kinematic_sensors + self._walker.observables.dynamic_sensors + list(self._task_observables.values())): obs.enabled = True @property def root_entity(self): return self._arena @property def task_observables(self): return self._task_observables @property def name(self): return 'ball_toss' def initialize_episode_mjcf(self, random_state): self._reward = 0.0 self._discount = 1.0 self._should_terminate = False self._prop.detach() if self._proto_modifier: trajectory = self._trajectory.get_modified_trajectory( self._proto_modifier) self._prop = trajectory.create_props( priority_friction=self._priority_friction)[0] self._arena.add_free_entity(self._prop) # set the bucket position for this episode bucket_distance = 1.*random_state.rand()+self._bucket_offset mjcf.get_attachment_frame(self._bucket.mjcf_model).pos = [bucket_distance, 0, 0] def initialize_episode(self, physics, random_state): self._ground_geomid = physics.bind( self._arena.mjcf_model.worldbody.geom[0]).element_id self._feet_geomids = set(physics.bind(self._feet_geoms).element_id) self._lhand_geomids = set(physics.bind(self._lhand_geoms).element_id) self._rhand_geomids = set(physics.bind(self._rhand_geoms).element_id) self._walker_geomids = set(physics.bind(self._walker_geoms).element_id) self._bucket_rewarded = False if self._randomize_init: timestep_ind = random_state.randint( len(self._trajectory._proto.timesteps)) # pylint: disable=protected-access else: timestep_ind = 0 walker_init_timestep = self._trajectory._proto.timesteps[timestep_ind] # pylint: disable=protected-access prop_init_timestep = self._trajectory._proto.timesteps[0] # pylint: disable=protected-access self._walker.set_pose( physics, position=walker_init_timestep.walkers[0].position, quaternion=walker_init_timestep.walkers[0].quaternion) self._walker.set_velocity( physics, velocity=walker_init_timestep.walkers[0].velocity, angular_velocity=walker_init_timestep.walkers[0].angular_velocity) physics.bind(self._walker.mocap_joints).qpos = ( walker_init_timestep.walkers[0].joints) physics.bind(self._walker.mocap_joints).qvel = ( walker_init_timestep.walkers[0].joints_velocity) initial_prop_pos = np.copy(prop_init_timestep.props[0].position) initial_prop_pos[0] += 1. # move ball (from mocap) relative to origin initial_prop_pos[1] = 0 # align ball with walker along y-axis self._prop.set_pose( physics, position=initial_prop_pos, quaternion=prop_init_timestep.props[0].quaternion) # specify the distributions of ball velocity componentwise x_vel_mag = 4.5*random_state.rand()+1.5 # m/s x_dist = 3 # approximate initial distance from walker to ball self._t_dist = x_dist/x_vel_mag # target time at which to hit the humanoid z_offset = .4*random_state.rand()+.1 # height at which to hit person # compute velocity to satisfy desired projectile trajectory z_vel_mag = (4.9*(self._t_dist**2) + z_offset)/self._t_dist y_range = variation.evaluate(self._y_range, random_state=random_state) y_vel_mag = y_range*random_state.rand()-y_range/2 trans_vel = [-x_vel_mag, y_vel_mag, z_vel_mag] ang_vel = 1.5*random_state.rand(3)-0.75 self._prop.set_velocity( physics, velocity=trans_vel, angular_velocity=ang_vel) def after_step(self, physics, random_state): # First we check for failure termination (walker or ball touches ground). ground_failure = False for contact in physics.data.contact: if ((contact.geom1 == self._ground_geomid and contact.geom2 not in self._feet_geomids) or (contact.geom2 == self._ground_geomid and contact.geom1 not in self._feet_geomids)): ground_failure = True break contact_features = self._evaluate_contacts(physics) prop_lhand, prop_rhand, bucket_prop, bucket_walker, walker_prop = contact_features # or also fail if walker hits bucket if ground_failure or bucket_walker: if self._negative_reward_on_failure_termination: self._reward = -_SPARSE_REWARD else: self._reward = 0.0 self._should_terminate = True self._discount = 0.0 return self._reward = 0.0 # give reward if prop is in bucket (prop touching bottom surface of bucket) if bucket_prop: self._reward += _SPARSE_REWARD/10 # shaping reward for being closer to bucket if physics.data.time > (self._t_dist + self._toss_delay): bucket_xy = physics.bind(self._bucket.geom).xpos[0][:2] prop_xy = self._prop.get_pose(physics)[0][:2] xy_dist = np.sum(np.array(np.abs(bucket_xy - prop_xy))) self._reward += np.exp(-xy_dist/3.)*_SPARSE_REWARD/50 else: # bonus for hands touching ball if prop_lhand: self._reward += _SPARSE_REWARD/100 if prop_rhand: self._reward += _SPARSE_REWARD/100 # combined with penalty for other body parts touching the ball if walker_prop: self._reward -= _SPARSE_REWARD/100 def get_reward(self, physics): return self._reward def get_discount(self, physics): return self._discount def should_terminate_episode(self, physics): return self._should_terminate def _evaluate_contacts(self, physics): prop_elem_id = physics.bind(self._prop.geom).element_id bucket_bottom_elem_id = physics.bind(self._bucket.geom[0]).element_id bucket_any_elem_id = set(physics.bind(self._bucket.geom).element_id) prop_lhand_contact = False prop_rhand_contact = False bucket_prop_contact = False bucket_walker_contact = False walker_prop_contact = False for contact in physics.data.contact: has_prop = (contact.geom1 == prop_elem_id or contact.geom2 == prop_elem_id) has_bucket_bottom = (contact.geom1 == bucket_bottom_elem_id or contact.geom2 == bucket_bottom_elem_id) has_bucket_any = (contact.geom1 in bucket_any_elem_id or contact.geom2 in bucket_any_elem_id) has_lhand = (contact.geom1 in self._lhand_geomids or contact.geom2 in self._lhand_geomids) has_rhand = (contact.geom1 in self._rhand_geomids or contact.geom2 in self._rhand_geomids) has_walker = (contact.geom1 in self._walker_geomids or contact.geom2 in self._walker_geomids) if has_prop and has_bucket_bottom: bucket_prop_contact = True if has_walker and has_bucket_any: bucket_walker_contact = True if has_walker and has_prop: walker_prop_contact = True if has_prop and has_lhand: prop_lhand_contact = True if has_prop and has_rhand: prop_rhand_contact = True return (prop_lhand_contact, prop_rhand_contact, bucket_prop_contact, bucket_walker_contact, walker_prop_contact)
deepmind-research-master
catch_carry/ball_toss.py
# Copyright 2020 Deepmind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Simple script to launch viewer with an example environment.""" from absl import app from absl import flags from dm_control import viewer from catch_carry import task_examples FLAGS = flags.FLAGS flags.DEFINE_enum('task', 'warehouse', ['warehouse', 'toss'], 'The task to visualize.') TASKS = { 'warehouse': task_examples.build_vision_warehouse, 'toss': task_examples.build_vision_toss, } def main(unused_argv): viewer.launch(environment_loader=TASKS[FLAGS.task]) if __name__ == '__main__': app.run(main)
deepmind-research-master
catch_carry/explore.py
# Copyright 2020 Deepmind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A rectangular pedestal.""" from dm_control import composer from dm_control import mjcf class Pedestal(composer.Entity): """A rectangular pedestal.""" def _build(self, size=(.2, .3, .05), rgba=(0, .5, 0, 1), name='pedestal'): self._mjcf_root = mjcf.RootElement(model=name) self._geom = self._mjcf_root.worldbody.add( 'geom', type='box', size=size, name='geom', rgba=rgba) @property def mjcf_model(self): return self._mjcf_root @property def geom(self): return self._geom def after_compile(self, physics, unused_random_state): super(Pedestal, self).after_compile(physics, unused_random_state) self._body_geom_ids = set( physics.bind(geom).element_id for geom in self.mjcf_model.find_all('geom')) @property def body_geom_ids(self): return self._body_geom_ids class Bucket(composer.Entity): """A rectangular bucket.""" def _build(self, size=(.2, .3, .05), rgba=(0, .5, 0, 1), name='pedestal'): self._mjcf_root = mjcf.RootElement(model=name) self._geoms = [] self._geoms.append(self._mjcf_root.worldbody.add( 'geom', type='box', size=size, name='geom_bottom', rgba=rgba)) self._geoms.append(self._mjcf_root.worldbody.add( 'geom', type='box', size=(size[2], size[1], size[0]), name='geom_s1', rgba=rgba, pos=[size[0], 0, size[0]])) self._geoms.append(self._mjcf_root.worldbody.add( 'geom', type='box', size=(size[2], size[1], size[0]), name='geom_s2', rgba=rgba, pos=[-size[0], 0, size[0]])) self._geoms.append(self._mjcf_root.worldbody.add( 'geom', type='box', size=(size[0], size[2], size[0]), name='geom_s3', rgba=rgba, pos=[0, size[1], size[0]])) self._geoms.append(self._mjcf_root.worldbody.add( 'geom', type='box', size=(size[0], size[2], size[0]), name='geom_s4', rgba=rgba, pos=[0, -size[1], size[0]])) @property def mjcf_model(self): return self._mjcf_root @property def geom(self): return self._geoms def after_compile(self, physics, unused_random_state): super(Bucket, self).after_compile(physics, unused_random_state) self._body_geom_ids = set( physics.bind(geom).element_id for geom in self.mjcf_model.find_all('geom')) @property def body_geom_ids(self): return self._body_geom_ids
deepmind-research-master
catch_carry/props.py
# Copyright 2020 Deepmind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Mocap trajectory that assumes props start stationary on pedestals.""" import copy import enum import itertools from dm_control.locomotion.mocap import mocap_pb2 from dm_control.locomotion.mocap import trajectory from dm_control.utils import transformations import numpy as np _PEDESTAL_SIZE = (0.2, 0.2, 0.02) _MAX_SETTLE_STEPS = 100 @enum.unique class ClipSegment(enum.Enum): """Annotations for subsegments within a warehouse clips.""" # Clip segment corresponding to a walker approaching an object APPROACH = 1 # Clip segment corresponding to a walker picking up an object. PICKUP = 2 # Clip segment corresponding to the "first half" of the walker carrying an # object, beginning from the walker backing away from a pedestal with # object in hand. CARRY1 = 3 # Clip segment corresponding to the "second half" of the walker carrying an # object, ending in the walker approaching a pedestal the object in hand. CARRY2 = 4 # Clip segment corresponding to a walker putting down an object on a pedestal. PUTDOWN = 5 # Clip segment corresponding to a walker backing off after successfully # placing an object on a pedestal. BACKOFF = 6 def _get_rotated_bounding_box(size, quaternion): """Calculates the bounding box of a rotated 3D box. Args: size: An array of length 3 specifying the half-lengths of a box. quaternion: A unit quaternion specifying the box's orientation. Returns: An array of length 3 specifying the half-lengths of the bounding box of the rotated box. """ corners = ((size[0], size[1], size[2]), (size[0], size[1], -size[2]), (size[0], -size[1], size[2]), (-size[0], size[1], size[2])) rotated_corners = tuple( transformations.quat_rotate(quaternion, corner) for corner in corners) return np.amax(np.abs(rotated_corners), axis=0) def _get_prop_z_extent(prop_proto, quaternion): """Calculates the "z-extent" of the prop in given orientation. This is the distance from the centre of the prop to its lowest point in the world frame, taking into account the prop's orientation. Args: prop_proto: A `mocap_pb2.Prop` protocol buffer defining a prop. quaternion: A unit quaternion specifying the prop's orientation. Returns: the distance from the centre of the prop to its lowest point in the world frame in the specified orientation. """ if prop_proto.shape == mocap_pb2.Prop.BOX: return _get_rotated_bounding_box(prop_proto.size, quaternion)[2] elif prop_proto.shape == mocap_pb2.Prop.SPHERE: return prop_proto.size[0] else: raise NotImplementedError( 'Unsupported prop shape: {}'.format(prop_proto.shape)) class WarehouseTrajectory(trajectory.Trajectory): """Mocap trajectory that assumes props start stationary on pedestals.""" def infer_pedestal_positions(self, num_averaged_steps=30, ground_height_tolerance=0.1, proto_modifier=None): proto = self._proto if proto_modifier is not None: proto = copy.copy(proto) proto_modifier(proto) if not proto.props: return [] positions = [] for timestep in itertools.islice(proto.timesteps, num_averaged_steps): positions_for_timestep = [] for prop_proto, prop_timestep in zip(proto.props, timestep.props): z_extent = _get_prop_z_extent(prop_proto, prop_timestep.quaternion) positions_for_timestep.append([prop_timestep.position[0], prop_timestep.position[1], prop_timestep.position[2] - z_extent]) positions.append(positions_for_timestep) median_positions = np.median(positions, axis=0) median_positions[:, 2][median_positions[:, 2] < ground_height_tolerance] = 0 return median_positions def get_props_z_extent(self, physics): timestep = self._proto.timesteps[self._get_step_id(physics.time())] out = [] for prop_proto, prop_timestep in zip(self._proto.props, timestep.props): z_extent = _get_prop_z_extent(prop_proto, prop_timestep.quaternion) out.append(z_extent) return out class SinglePropCarrySegmentedTrajectory(WarehouseTrajectory): """A mocap trajectory class that automatically segments prop-carry clips. The algorithm implemented in the class only works if the trajectory consists of exactly one walker and one prop. The value of `pedestal_zone_distance` the exact nature of zone crossings are determined empirically from the DeepMindCatchCarry dataset, and are likely to not work well outside of this setting. """ def __init__(self, proto, start_time=None, end_time=None, pedestal_zone_distance=0.65, start_step=None, end_step=None, zero_out_velocities=True): super(SinglePropCarrySegmentedTrajectory, self).__init__( proto, start_time, end_time, start_step=start_step, end_step=end_step, zero_out_velocities=zero_out_velocities) self._pedestal_zone_distance = pedestal_zone_distance self._generate_segments() def _generate_segments(self): pedestal_position = self.infer_pedestal_positions()[0] # First we find the timesteps at which the walker cross the pedestal's # vicinity zone. This should happen exactly 4 times: enter it to pick up, # leave it, enter it again to put down, and leave it again. was_in_pedestal_zone = False crossings = [] for i, timestep in enumerate(self._proto.timesteps): pedestal_dist = np.linalg.norm( timestep.walkers[0].position[:2] - pedestal_position[:2]) if pedestal_dist > self._pedestal_zone_distance and was_in_pedestal_zone: crossings.append(i) was_in_pedestal_zone = False elif (pedestal_dist <= self._pedestal_zone_distance and not was_in_pedestal_zone): crossings.append(i) was_in_pedestal_zone = True if len(crossings) < 3: raise RuntimeError( 'Failed to segment the given trajectory: ' 'walker should cross the pedestal zone\'s boundary >= 3 times ' 'but got {}'.format(len(crossings))) elif len(crossings) == 3: crossings.append(len(self._proto.timesteps) - 1) elif len(crossings) > 4: crossings = [crossings[0], crossings[1], crossings[-2], crossings[-1]] # Identify the pick up event during the first in-zone interval. start_position = np.array(self._proto.timesteps[0].props[0].position) end_position = np.array(self._proto.timesteps[-1].props[0].position) pick_up_step = crossings[1] - 1 while pick_up_step > crossings[0]: prev_position = self._proto.timesteps[pick_up_step - 1].props[0].position if np.linalg.norm(start_position[2] - prev_position[2]) < 0.001: break pick_up_step -= 1 # Identify the put down event during the second in-zone interval. put_down_step = crossings[2] while put_down_step <= crossings[3]: next_position = self._proto.timesteps[put_down_step + 1].props[0].position if np.linalg.norm(end_position[2] - next_position[2]) < 0.001: break put_down_step += 1 carry_halfway_step = int((crossings[1] + crossings[2]) / 2) self._segment_intervals = { ClipSegment.APPROACH: (0, crossings[0]), ClipSegment.PICKUP: (crossings[0], pick_up_step), ClipSegment.CARRY1: (pick_up_step, carry_halfway_step), ClipSegment.CARRY2: (carry_halfway_step, crossings[2]), ClipSegment.PUTDOWN: (crossings[2], put_down_step), ClipSegment.BACKOFF: (put_down_step, len(self._proto.timesteps)) } def segment_interval(self, segment): start_step, end_step = self._segment_intervals[segment] return (start_step * self._proto.dt, (end_step - 1) * self._proto.dt) def get_random_timestep_in_segment(self, segment, random_step): return self._proto.timesteps[ random_step.randint(*self._segment_intervals[segment])]
deepmind-research-master
catch_carry/trajectories.py
# Copyright 2020 Deepmind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Functions that build representative tasks.""" from dm_control import composer from dm_control.composer.variation import distributions from dm_control.locomotion.mocap import loader as mocap_loader from dm_control.locomotion.walkers import cmu_humanoid from catch_carry import ball_toss from catch_carry import warehouse def build_vision_warehouse(random_state=None): """Build canonical 4-pedestal, 2-prop task.""" # Build a position-controlled CMU humanoid walker. walker = cmu_humanoid.CMUHumanoidPositionControlled( observable_options={'egocentric_camera': dict(enabled=True)}) # Build the task. size_distribution = distributions.Uniform(low=0.75, high=1.25) mass_distribution = distributions.Uniform(low=2, high=7) prop_resizer = mocap_loader.PropResizer(size_factor=size_distribution, mass=mass_distribution) task = warehouse.PhasedBoxCarry( walker=walker, num_props=2, num_pedestals=4, proto_modifier=prop_resizer, negative_reward_on_failure_termination=True) # return the environment return composer.Environment( time_limit=15, task=task, random_state=random_state, strip_singleton_obs_buffer_dim=True, max_reset_attempts=float('inf')) def build_vision_toss(random_state=None): """Build canonical ball tossing task.""" # Build a position-controlled CMU humanoid walker. walker = cmu_humanoid.CMUHumanoidPositionControlled( observable_options={'egocentric_camera': dict(enabled=True)}) # Build the task. size_distribution = distributions.Uniform(low=0.95, high=1.5) mass_distribution = distributions.Uniform(low=2, high=4) prop_resizer = mocap_loader.PropResizer(size_factor=size_distribution, mass=mass_distribution) task = ball_toss.BallToss( walker=walker, proto_modifier=prop_resizer, negative_reward_on_failure_termination=True, priority_friction=True, bucket_offset=3., y_range=0.5, toss_delay=1.5, randomize_init=True) # return the environment return composer.Environment( time_limit=6, task=task, random_state=random_state, strip_singleton_obs_buffer_dim=True, max_reset_attempts=float('inf'))
deepmind-research-master
catch_carry/task_examples.py
# Copyright 2019 Deepmind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for the Hierarchical Probabilistic U-Net open-source version.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from model import HierarchicalProbUNet import tensorflow.compat.v1 as tf _NUM_CLASSES = 2 _BATCH_SIZE = 2 _SPATIAL_SHAPE = [32, 32] _CHANNELS_PER_BLOCK = [5, 7, 9, 11, 13] _IMAGE_SHAPE = [_BATCH_SIZE] + _SPATIAL_SHAPE + [1] _BOTTLENECK_SIZE = _SPATIAL_SHAPE[0] // 2 ** (len(_CHANNELS_PER_BLOCK) - 1) _SEGMENTATION_SHAPE = [_BATCH_SIZE] + _SPATIAL_SHAPE + [_NUM_CLASSES] _LATENT_DIMS = [3, 2, 1] _INITIALIZERS = {'w': tf.orthogonal_initializer(gain=1.0, seed=None), 'b': tf.truncated_normal_initializer(stddev=0.001)} def _get_placeholders(): """Returns placeholders for the image and segmentation.""" img = tf.placeholder(dtype=tf.float32, shape=_IMAGE_SHAPE) seg = tf.placeholder(dtype=tf.float32, shape=_SEGMENTATION_SHAPE) return img, seg class HierarchicalProbUNetTest(tf.test.TestCase): def test_shape_of_sample(self): hpu_net = HierarchicalProbUNet(latent_dims=_LATENT_DIMS, channels_per_block=_CHANNELS_PER_BLOCK, num_classes=_NUM_CLASSES, initializers=_INITIALIZERS) img, _ = _get_placeholders() sample = hpu_net.sample(img) self.assertEqual(sample.shape.as_list(), _SEGMENTATION_SHAPE) def test_shape_of_reconstruction(self): hpu_net = HierarchicalProbUNet(latent_dims=_LATENT_DIMS, channels_per_block=_CHANNELS_PER_BLOCK, num_classes=_NUM_CLASSES, initializers=_INITIALIZERS) img, seg = _get_placeholders() reconstruction = hpu_net.reconstruct(img, seg) self.assertEqual(reconstruction.shape.as_list(), _SEGMENTATION_SHAPE) def test_shapes_in_prior(self): hpu_net = HierarchicalProbUNet(latent_dims=_LATENT_DIMS, channels_per_block=_CHANNELS_PER_BLOCK, num_classes=_NUM_CLASSES, initializers=_INITIALIZERS) img, _ = _get_placeholders() prior_out = hpu_net._prior(img) distributions = prior_out['distributions'] latents = prior_out['used_latents'] encoder_features = prior_out['encoder_features'] decoder_features = prior_out['decoder_features'] # Test number of latent disctributions. self.assertEqual(len(distributions), len(_LATENT_DIMS)) # Test shapes of latent scales. for level in range(len(_LATENT_DIMS)): latent_spatial_shape = _BOTTLENECK_SIZE * 2 ** level latent_shape = [_BATCH_SIZE, latent_spatial_shape, latent_spatial_shape, _LATENT_DIMS[level]] self.assertEqual(latents[level].shape.as_list(), latent_shape) # Test encoder shapes. for level in range(len(_CHANNELS_PER_BLOCK)): spatial_shape = _SPATIAL_SHAPE[0] // 2 ** level feature_shape = [_BATCH_SIZE, spatial_shape, spatial_shape, _CHANNELS_PER_BLOCK[level]] self.assertEqual(encoder_features[level].shape.as_list(), feature_shape) # Test decoder shape. start_level = len(_LATENT_DIMS) latent_spatial_shape = _BOTTLENECK_SIZE * 2 ** start_level latent_shape = [_BATCH_SIZE, latent_spatial_shape, latent_spatial_shape, _CHANNELS_PER_BLOCK[::-1][start_level]] self.assertEqual(decoder_features.shape.as_list(), latent_shape) def test_shape_of_kl(self): hpu_net = HierarchicalProbUNet(latent_dims=_LATENT_DIMS, channels_per_block=_CHANNELS_PER_BLOCK, num_classes=_NUM_CLASSES, initializers=_INITIALIZERS) img, seg = _get_placeholders() kl_dict = hpu_net.kl(img, seg) self.assertEqual(len(kl_dict), len(_LATENT_DIMS)) if __name__ == '__main__': tf.test.main()
deepmind-research-master
hierarchical_probabilistic_unet/model_test.py
# Copyright 2019 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utility Functions for the GECO-objective. (GECO is described in `Taming VAEs`, see https://arxiv.org/abs/1810.00597). """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import sonnet as snt import tensorflow.compat.v1 as tf class MovingAverage(snt.AbstractModule): """A thin wrapper around snt.MovingAverage. The module adds the option not to differentiate through the last element that is added to the moving average, specified by means of the kwarg `differentiable`. """ def __init__(self, decay, local=True, differentiable=False, name='snt_moving_average'): super(MovingAverage, self).__init__(name=name) self._differentiable = differentiable self._moving_average = snt.MovingAverage( decay=decay, local=local, name=name) def _build(self, inputs): if not self._differentiable: inputs = tf.stop_gradient(inputs) return self._moving_average(inputs) class LagrangeMultiplier(snt.AbstractModule): """A lagrange multiplier sonnet module.""" def __init__(self, rate=1e-2, name='snt_lagrange_multiplier'): """Initializer for the sonnet module. Args: rate: Scalar used to scale the magnitude of gradients of the Lagrange multipliers, defaulting to 1e-2. name: Name of the Lagrange multiplier sonnet module. """ super(LagrangeMultiplier, self).__init__(name=name) self._rate = rate def _build(self, ma_constraint): """Connects the module to the graph. Args: ma_constraint: A loss minus a target value, denoting a constraint that shall be less or equal than zero. Returns: An op, which when added to a loss and calling minimize on the loss results in the optimizer minimizing w.r.t. to the model's parameters and maximizing w.r.t. the Lagrande multipliers, hence enforcing the constraints. """ lagmul = snt.get_lagrange_multiplier( shape=ma_constraint.shape, rate=self._rate, initializer=np.ones(ma_constraint.shape)) return lagmul def _sample_gumbel(shape, eps=1e-20): """Transforms a uniform random variable to be standard Gumbel distributed.""" return -tf.log( -tf.log(tf.random_uniform(shape, minval=0, maxval=1) + eps) + eps) def _topk_mask(score, k): """Returns a mask for the top-k elements in score.""" _, indices = tf.nn.top_k(score, k=k) return tf.scatter_nd(tf.expand_dims(indices, -1), tf.ones(k), tf.squeeze(score).shape.as_list()) def ce_loss(logits, labels, mask=None, top_k_percentage=None, deterministic=False): """Computes the cross-entropy loss. Optionally a mask and a top-k percentage for the used pixels can be specified. The top-k mask can be produced deterministically or sampled. Args: logits: A tensor of shape (b,h,w,num_classes) labels: A tensor of shape (b,h,w,num_classes) mask: None or a tensor of shape (b,h,w). top_k_percentage: None or a float in (0.,1.]. If None, a standard cross-entropy loss is calculated. deterministic: A Boolean indicating whether or not to produce the prospective top-k mask deterministically. Returns: A dictionary holding the mean and the pixelwise sum of the loss for the batch as well as the employed loss mask. """ num_classes = logits.shape.as_list()[-1] y_flat = tf.reshape(logits, (-1, num_classes), name='reshape_y') t_flat = tf.reshape(labels, (-1, num_classes), name='reshape_t') if mask is None: mask = tf.ones(shape=(t_flat.shape.as_list()[0],)) else: assert mask.shape.as_list()[:3] == labels.shape.as_list()[:3],\ 'The loss mask shape differs from the target shape: {} vs. {}.'.format( mask.shape.as_list(), labels.shape.as_list()[:3]) mask = tf.reshape(mask, (-1,), name='reshape_mask') n_pixels_in_batch = y_flat.shape.as_list()[0] xe = tf.nn.softmax_cross_entropy_with_logits_v2(labels=t_flat, logits=y_flat) if top_k_percentage is not None: assert 0.0 < top_k_percentage <= 1.0 k_pixels = tf.cast(tf.floor(n_pixels_in_batch * top_k_percentage), tf.int32) stopgrad_xe = tf.stop_gradient(xe) norm_xe = stopgrad_xe / tf.reduce_sum(stopgrad_xe) if deterministic: score = tf.log(norm_xe) else: # Use the Gumbel trick to sample the top-k pixels, equivalent to sampling # from a categorical distribution over pixels whose probabilities are # given by the normalized cross-entropy loss values. This is done by # adding Gumbel noise to the logarithmic normalized cross-entropy loss # (followed by choosing the top-k pixels). score = tf.log(norm_xe) + _sample_gumbel(norm_xe.shape.as_list()) score = score + tf.log(mask) top_k_mask = _topk_mask(score, k_pixels) mask = mask * top_k_mask # Calculate batch-averages for the sum and mean of the loss batch_size = labels.shape.as_list()[0] xe = tf.reshape(xe, shape=(batch_size, -1)) mask = tf.reshape(mask, shape=(batch_size, -1)) ce_sum_per_instance = tf.reduce_sum(mask * xe, axis=1) ce_sum = tf.reduce_mean(ce_sum_per_instance, axis=0) ce_mean = tf.reduce_sum(mask * xe) / tf.reduce_sum(mask) return {'mean': ce_mean, 'sum': ce_sum, 'mask': mask}
deepmind-research-master
hierarchical_probabilistic_unet/geco_utils.py
# Copyright 2019 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Open Source Version of the Hierarchical Probabilistic U-Net.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import geco_utils import sonnet as snt import tensorflow as tf from tensorflow_probability import distributions as tfd import unet_utils class _HierarchicalCore(snt.AbstractModule): """A U-Net encoder-decoder with a full encoder and a truncated decoder. The truncated decoder is interleaved with the hierarchical latent space and has as many levels as there are levels in the hierarchy plus one additional level. """ def __init__(self, latent_dims, channels_per_block, down_channels_per_block=None, activation_fn=tf.nn.relu, initializers=None, regularizers=None, convs_per_block=3, blocks_per_level=3, name='HierarchicalDecoderDist'): """Initializes a HierarchicalCore. Args: latent_dims: List of integers specifying the dimensions of the latents at each scale. The length of the list indicates the number of U-Net decoder scales that have latents. channels_per_block: A list of integers specifying the number of output channels for each encoder block. down_channels_per_block: A list of integers specifying the number of intermediate channels for each encoder block or None. If None, the intermediate channels are chosen equal to channels_per_block. activation_fn: A callable activation function. initializers: Optional dict containing ops to initialize the filters (with key 'w') or biases (with key 'b'). The default initializer for the weights is a truncated normal initializer, which is commonly used when the inputs are zero centered (see https://arxiv.org/pdf/1502.03167v3.pdf). The default initializer for the bias is a zero initializer. regularizers: Optional dict containing regularizers for the filters (with key 'w') and the biases (with key 'b'). As a default, no regularizers are used. A regularizer should be a function that takes a single `Tensor` as an input and returns a scalar `Tensor` output, e.g. the L1 and L2 regularizers in `tf.contrib.layers`. convs_per_block: An integer specifying the number of convolutional layers. blocks_per_level: An integer specifying the number of residual blocks per level. name: A string specifying the name of the module. """ super(_HierarchicalCore, self).__init__(name=name) self._latent_dims = latent_dims self._channels_per_block = channels_per_block self._activation_fn = activation_fn self._initializers = initializers self._regularizers = regularizers self._convs_per_block = convs_per_block self._blocks_per_level = blocks_per_level if down_channels_per_block is None: self._down_channels_per_block = channels_per_block else: self._down_channels_per_block = down_channels_per_block self._name = name def _build(self, inputs, mean=False, z_q=None): """A build-method allowing to sample from the module as specified. Args: inputs: A tensor of shape (b,h,w,c). When using the module as a prior the `inputs` tensor should be a batch of images. When using it as a posterior the tensor should be a (batched) concatentation of images and segmentations. mean: A boolean or a list of booleans. If a boolean, it specifies whether or not to use the distributions' means in ALL latent scales. If a list, each bool therein specifies whether or not to use the scale's mean. If False, the latents of the scale are sampled. z_q: None or a list of tensors. If not None, z_q provides external latents to be used instead of sampling them. This is used to employ posterior latents in the prior during training. Therefore, if z_q is not None, the value of `mean` is ignored. If z_q is None, either the distributions mean is used (in case `mean` for the respective scale is True) or else a sample from the distribution is drawn. Returns: A Dictionary holding the output feature map of the truncated U-Net decoder under key 'decoder_features', a list of the U-Net encoder features produced at the end of each encoder scale under key 'encoder_outputs', a list of the predicted distributions at each scale under key 'distributions', a list of the used latents at each scale under the key 'used_latents'. """ encoder_features = inputs encoder_outputs = [] num_levels = len(self._channels_per_block) num_latent_levels = len(self._latent_dims) if isinstance(mean, bool): mean = [mean] * num_latent_levels distributions = [] used_latents = [] # Iterate the descending levels in the U-Net encoder. for level in range(num_levels): # Iterate the residual blocks in each level. for _ in range(self._blocks_per_level): encoder_features = unet_utils.res_block( input_features=encoder_features, n_channels=self._channels_per_block[level], n_down_channels=self._down_channels_per_block[level], activation_fn=self._activation_fn, initializers=self._initializers, regularizers=self._regularizers, convs_per_block=self._convs_per_block) encoder_outputs.append(encoder_features) if level != num_levels - 1: encoder_features = unet_utils.resize_down(encoder_features, scale=2) # Iterate the ascending levels in the (truncated) U-Net decoder. decoder_features = encoder_outputs[-1] for level in range(num_latent_levels): # Predict a Gaussian distribution for each pixel in the feature map. latent_dim = self._latent_dims[level] mu_logsigma = snt.Conv2D( 2 * latent_dim, (1, 1), padding='SAME', initializers=self._initializers, regularizers=self._regularizers, )(decoder_features) mu = mu_logsigma[..., :latent_dim] logsigma = mu_logsigma[..., latent_dim:] dist = tfd.MultivariateNormalDiag(loc=mu, scale_diag=tf.exp(logsigma)) distributions.append(dist) # Get the latents to condition on. if z_q is not None: z = z_q[level] elif mean[level]: z = dist.loc else: z = dist.sample() used_latents.append(z) # Concat and upsample the latents with the previous features. decoder_output_lo = tf.concat([z, decoder_features], axis=-1) decoder_output_hi = unet_utils.resize_up(decoder_output_lo, scale=2) decoder_features = tf.concat( [decoder_output_hi, encoder_outputs[::-1][level + 1]], axis=-1) # Iterate the residual blocks in each level. for _ in range(self._blocks_per_level): decoder_features = unet_utils.res_block( input_features=decoder_features, n_channels=self._channels_per_block[::-1][level + 1], n_down_channels=self._down_channels_per_block[::-1][level + 1], activation_fn=self._activation_fn, initializers=self._initializers, regularizers=self._regularizers, convs_per_block=self._convs_per_block) return {'decoder_features': decoder_features, 'encoder_features': encoder_outputs, 'distributions': distributions, 'used_latents': used_latents} class _StitchingDecoder(snt.AbstractModule): """A module that completes the truncated U-Net decoder. Using the output of the HierarchicalCore this module fills in the missing decoder levels such that together the two form a symmetric U-Net. """ def __init__(self, latent_dims, channels_per_block, num_classes, down_channels_per_block=None, activation_fn=tf.nn.relu, initializers=None, regularizers=None, convs_per_block=3, blocks_per_level=3, name='StitchingDecoder'): """Initializes a StichtingDecoder. Args: latent_dims: List of integers specifying the dimensions of the latents at each scale. The length of the list indicates the number of U-Net decoder scales that have latents. channels_per_block: A list of integers specifying the number of output channels for each encoder block. num_classes: An integer specifying the number of segmentation classes. down_channels_per_block: A list of integers specifying the number of intermediate channels for each encoder block. If None, the intermediate channels are chosen equal to channels_per_block. activation_fn: A callable activation function. initializers: Optional dict containing ops to initialize the filters (with key 'w') or biases (with key 'b'). The default initializer for the weights is a truncated normal initializer, which is commonly used when the inputs are zero centered (see https://arxiv.org/pdf/1502.03167v3.pdf). The default initializer for the bias is a zero initializer. regularizers: Optional dict containing regularizers for the filters (with key 'w') and the biases (with key 'b'). As a default, no regularizers are used. A regularizer should be a function that takes a single `Tensor` as an input and returns a scalar `Tensor` output, e.g. the L1 and L2 regularizers in `tf.contrib.layers`. convs_per_block: An integer specifying the number of convolutional layers. blocks_per_level: An integer specifying the number of residual blocks per level. name: A string specifying the name of the module. """ super(_StitchingDecoder, self).__init__(name=name) self._latent_dims = latent_dims self._channels_per_block = channels_per_block self._num_classes = num_classes self._activation_fn = activation_fn self._initializers = initializers self._regularizers = regularizers self._convs_per_block = convs_per_block self._blocks_per_level = blocks_per_level if down_channels_per_block is None: down_channels_per_block = channels_per_block self._down_channels_per_block = down_channels_per_block def _build(self, encoder_features, decoder_features): """Build-method that returns the segmentation logits. Args: encoder_features: A list of tensors of shape (b,h_i,w_i,c_i). decoder_features: A tensor of shape (b,h,w,c). Returns: Logits, i.e. a tensor of shape (b,h,w,num_classes). """ num_latents = len(self._latent_dims) start_level = num_latents + 1 num_levels = len(self._channels_per_block) for level in range(start_level, num_levels, 1): decoder_features = unet_utils.resize_up(decoder_features, scale=2) decoder_features = tf.concat([decoder_features, encoder_features[::-1][level]], axis=-1) for _ in range(self._blocks_per_level): decoder_features = unet_utils.res_block( input_features=decoder_features, n_channels=self._channels_per_block[::-1][level], n_down_channels=self._down_channels_per_block[::-1][level], activation_fn=self._activation_fn, initializers=self._initializers, regularizers=self._regularizers, convs_per_block=self._convs_per_block) return snt.Conv2D(output_channels=self._num_classes, kernel_shape=(1, 1), padding='SAME', initializers=self._initializers, regularizers=self._regularizers, name='logits')(decoder_features) class HierarchicalProbUNet(snt.AbstractModule): """A Hierarchical Probabilistic U-Net.""" def __init__(self, latent_dims=(1, 1, 1, 1), channels_per_block=None, num_classes=2, down_channels_per_block=None, activation_fn=tf.nn.relu, initializers=None, regularizers=None, convs_per_block=3, blocks_per_level=3, loss_kwargs=None, name='HPUNet'): """Initializes a HierarchicalProbUNet. The default values are set as for the LIDC-IDRI experiments in `A Hierarchical Probabilistic U-Net for Modeling Multi-Scale Ambiguities', see https://arxiv.org/abs/1905.13077. Args: latent_dims: List of integers specifying the dimensions of the latents at each scales. The length of the list indicates the number of U-Net decoder scales that have latents. channels_per_block: A list of integers specifying the number of output channels for each encoder block. num_classes: An integer specifying the number of segmentation classes. down_channels_per_block: A list of integers specifying the number of intermediate channels for each encoder block. If None, the intermediate channels are chosen equal to channels_per_block. activation_fn: A callable activation function. initializers: Optional dict containing ops to initialize the filters (with key 'w') or biases (with key 'b'). regularizers: Optional dict containing regularizers for the filters (with key 'w') and the biases (with key 'b'). convs_per_block: An integer specifying the number of convolutional layers. blocks_per_level: An integer specifying the number of residual blocks per level. loss_kwargs: None or dictionary specifying the loss setup. name: A string specifying the name of the module. """ super(HierarchicalProbUNet, self).__init__(name=name) base_channels = 24 default_channels_per_block = ( base_channels, 2 * base_channels, 4 * base_channels, 8 * base_channels, 8 * base_channels, 8 * base_channels, 8 * base_channels, 8 * base_channels ) if channels_per_block is None: channels_per_block = default_channels_per_block if down_channels_per_block is None: down_channels_per_block =\ tuple([i / 2 for i in default_channels_per_block]) if initializers is None: initializers = { 'w': tf.orthogonal_initializer(gain=1.0, seed=None), 'b': tf.truncated_normal_initializer(stddev=0.001) } if regularizers is None: regularizers = { 'w': tf.keras.regularizers.l2(1e-5), 'b': tf.keras.regularizers.l2(1e-5) } if loss_kwargs is None: self._loss_kwargs = { 'type': 'geco', 'top_k_percentage': 0.02, 'deterministic_top_k': False, 'kappa': 0.05, 'decay': 0.99, 'rate': 1e-2, 'beta': None } else: self._loss_kwargs = loss_kwargs if down_channels_per_block is None: down_channels_per_block = channels_per_block with self._enter_variable_scope(): self._prior = _HierarchicalCore( latent_dims=latent_dims, channels_per_block=channels_per_block, down_channels_per_block=down_channels_per_block, activation_fn=activation_fn, initializers=initializers, regularizers=regularizers, convs_per_block=convs_per_block, blocks_per_level=blocks_per_level, name='prior') self._posterior = _HierarchicalCore( latent_dims=latent_dims, channels_per_block=channels_per_block, down_channels_per_block=down_channels_per_block, activation_fn=activation_fn, initializers=initializers, regularizers=regularizers, convs_per_block=convs_per_block, blocks_per_level=blocks_per_level, name='posterior') self._f_comb = _StitchingDecoder( latent_dims=latent_dims, channels_per_block=channels_per_block, num_classes=num_classes, down_channels_per_block=down_channels_per_block, activation_fn=activation_fn, initializers=initializers, regularizers=regularizers, convs_per_block=convs_per_block, blocks_per_level=blocks_per_level, name='f_comb') if self._loss_kwargs['type'] == 'geco': self._moving_average = geco_utils.MovingAverage( decay=self._loss_kwargs['decay'], differentiable=True, name='ma_test') self._lagmul = geco_utils.LagrangeMultiplier( rate=self._loss_kwargs['rate']) self._cache = () def _build(self, seg, img): """Inserts all ops used during training into the graph exactly once. The first time this method is called given the input pair (seg, img) all ops relevant for training are inserted into the graph. Calling this method more than once does not re-insert the modules into the graph (memoization), thus preventing multiple forward passes of submodules for the same inputs. The method is private and called when setting up the loss. Args: seg: A tensor of shape (b, h, w, num_classes). img: A tensor of shape (b, h, w, c) Returns: None """ inputs = (seg, img) if self._cache == inputs: return else: self._q_sample = self._posterior( tf.concat([seg, img], axis=-1), mean=False) self._q_sample_mean = self._posterior( tf.concat([seg, img], axis=-1), mean=True) self._p_sample = self._prior( img, mean=False, z_q=None) self._p_sample_z_q = self._prior( img, z_q=self._q_sample['used_latents']) self._p_sample_z_q_mean = self._prior( img, z_q=self._q_sample_mean['used_latents']) self._cache = inputs return def sample(self, img, mean=False, z_q=None): """Sample a segmentation from the prior, given an input image. Args: img: A tensor of shape (b, h, w, c). mean: A boolean or a list of booleans. If a boolean, it specifies whether or not to use the distributions' means in ALL latent scales. If a list, each bool therein specifies whether or not to use the scale's mean. If False, the latents of the scale are sampled. z_q: None or a list of tensors. If not None, z_q provides external latents to be used instead of sampling them. This is used to employ posterior latents in the prior during training. Therefore, if z_q is not None, the value of `mean` is ignored. If z_q is None, either the distributions mean is used (in case `mean` for the respective scale is True) or else a sample from the distribution is drawn Returns: A segmentation tensor of shape (b, h, w, num_classes). """ prior_out = self._prior(img, mean, z_q) encoder_features = prior_out['encoder_features'] decoder_features = prior_out['decoder_features'] return self._f_comb(encoder_features=encoder_features, decoder_features=decoder_features) def reconstruct(self, seg, img, mean=False): """Reconstruct a segmentation using the posterior. Args: seg: A tensor of shape (b, h, w, num_classes). img: A tensor of shape (b, h, w, c). mean: A boolean, specifying whether to sample from the full hierarchy of the posterior or use the posterior means at each scale of the hierarchy. Returns: A segmentation tensor of shape (b,h,w,num_classes). """ self._build(seg, img) if mean: prior_out = self._p_sample_z_q_mean else: prior_out = self._p_sample_z_q encoder_features = prior_out['encoder_features'] decoder_features = prior_out['decoder_features'] return self._f_comb(encoder_features=encoder_features, decoder_features=decoder_features) def rec_loss(self, seg, img, mask=None, top_k_percentage=None, deterministic=True): """Cross-entropy reconstruction loss employed in the ELBO-/ GECO-objective. Args: seg: A tensor of shape (b, h, w, num_classes). img: A tensor of shape (b, h, w, c). mask: A mask of shape (b, h, w) or None. If None no pixels are masked in the loss. top_k_percentage: None or a float in (0.,1.]. If None, a standard cross-entropy loss is calculated. deterministic: A Boolean indicating whether or not to produce the prospective top-k mask deterministically. Returns: A dictionary holding the mean and the pixelwise sum of the loss for the batch as well as the employed loss mask. """ reconstruction = self.reconstruct(seg, img, mean=False) return geco_utils.ce_loss( reconstruction, seg, mask, top_k_percentage, deterministic) def kl(self, seg, img): """Kullback-Leibler divergence between the posterior and the prior. Args: seg: A tensor of shape (b, h, w, num_classes). img: A tensor of shape (b, h, w, c). Returns: A dictionary with keys indexing the hierarchy's levels and corresponding values holding the KL-term for each level (per batch). """ self._build(seg, img) posterior_out = self._q_sample prior_out = self._p_sample_z_q q_dists = posterior_out['distributions'] p_dists = prior_out['distributions'] kl = {} for level, (q, p) in enumerate(zip(q_dists, p_dists)): # Shape (b, h, w). kl_per_pixel = tfd.kl_divergence(q, p) # Shape (b,). kl_per_instance = tf.reduce_sum(kl_per_pixel, axis=[1, 2]) # Shape (1,). kl[level] = tf.reduce_mean(kl_per_instance) return kl def loss(self, seg, img, mask): """The full training objective, either ELBO or GECO. Args: seg: A tensor of shape (b, h, w, num_classes). img: A tensor of shape (b, h, w, c). mask: A mask of shape (b, h, w) or None. If None no pixels are masked in the loss. Returns: A dictionary holding the loss (with key 'loss') and the tensorboard summaries (with key 'summaries'). """ summaries = {} top_k_percentage = self._loss_kwargs['top_k_percentage'] deterministic = self._loss_kwargs['deterministic_top_k'] rec_loss = self.rec_loss(seg, img, mask, top_k_percentage, deterministic) kl_dict = self.kl(seg, img) kl_sum = tf.reduce_sum( tf.stack([kl for _, kl in kl_dict.iteritems()], axis=-1)) summaries['rec_loss_mean'] = rec_loss['mean'] summaries['rec_loss_sum'] = rec_loss['sum'] summaries['kl_sum'] = kl_sum for level, kl in kl_dict.iteritems(): summaries['kl_{}'.format(level)] = kl # Set up a regular ELBO objective. if self._loss_kwargs['type'] == 'elbo': loss = rec_loss['sum'] + self._loss_kwargs['beta'] * kl_sum summaries['elbo_loss'] = loss # Set up a GECO objective (ELBO with a reconstruction constraint). elif self._loss_kwargs['type'] == 'geco': ma_rec_loss = self._moving_average(rec_loss['sum']) mask_sum_per_instance = tf.reduce_sum(rec_loss['mask'], axis=-1) num_valid_pixels = tf.reduce_mean(mask_sum_per_instance) reconstruction_threshold = self._loss_kwargs['kappa'] * num_valid_pixels rec_constraint = ma_rec_loss - reconstruction_threshold lagmul = self._lagmul(rec_constraint) loss = lagmul * rec_constraint + kl_sum summaries['geco_loss'] = loss summaries['ma_rec_loss_mean'] = ma_rec_loss / num_valid_pixels summaries['num_valid_pixels'] = num_valid_pixels summaries['lagmul'] = lagmul else: raise NotImplementedError('Loss type {} not implemeted!'.format( self._loss_kwargs['type'])) return dict(supervised_loss=loss, summaries=summaries) if __name__ == '__main__': hpu_net = HierarchicalProbUNet()
deepmind-research-master
hierarchical_probabilistic_unet/model.py
# Copyright 2019 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Setup for pip package.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from setuptools import find_packages from setuptools import setup REQUIRED_PACKAGES = ['numpy', 'dm-sonnet==1.35', 'tensorflow==1.14', 'tensorflow-probability==0.7.0'] setup( name='hpu_net', version='0.1', description='A library for the Hierarchical Probabilistic U-Net model.', url='https://github.com/deepmind/deepmind-research/hierarchical_probabilistic_unet', author='DeepMind', author_email='[email protected]', # Contained modules and scripts. packages=find_packages(), install_requires=REQUIRED_PACKAGES, platforms=['any'], license='Apache 2.0', )
deepmind-research-master
hierarchical_probabilistic_unet/setup.py
# Copyright 2019 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Architectural blocks and utility functions of the U-Net.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import sonnet as snt import tensorflow.compat.v1 as tf def res_block(input_features, n_channels, n_down_channels=None, activation_fn=tf.nn.relu, initializers=None, regularizers=None, convs_per_block=3): """A pre-activated residual block. Args: input_features: A tensor of shape (b, h, w, c). n_channels: An integer specifying the number of output channels. n_down_channels: An integer specifying the number of intermediate channels. activation_fn: A callable activation function. initializers: Initializers for the weights and biases. regularizers: Regularizers for the weights and biases. convs_per_block: An Integer specifying the number of convolutional layers. Returns: A tensor of shape (b, h, w, c). """ # Pre-activate the inputs. skip = input_features residual = activation_fn(input_features) # Set the number of intermediate channels that we compress to. if n_down_channels is None: n_down_channels = n_channels for c in range(convs_per_block): residual = snt.Conv2D(n_down_channels, (3, 3), padding='SAME', initializers=initializers, regularizers=regularizers)(residual) if c < convs_per_block - 1: residual = activation_fn(residual) incoming_channels = input_features.shape[-1] if incoming_channels != n_channels: skip = snt.Conv2D(n_channels, (1, 1), padding='SAME', initializers=initializers, regularizers=regularizers)(skip) if n_down_channels != n_channels: residual = snt.Conv2D(n_channels, (1, 1), padding='SAME', initializers=initializers, regularizers=regularizers)(residual) return skip + residual def resize_up(input_features, scale=2): """Nearest neighbor rescaling-operation for the input features. Args: input_features: A tensor of shape (b, h, w, c). scale: An integer specifying the scaling factor. Returns: A tensor of shape (b, scale * h, scale * w, c). """ assert scale >= 1 _, size_x, size_y, _ = input_features.shape.as_list() new_size_x = int(round(size_x * scale)) new_size_y = int(round(size_y * scale)) return tf.image.resize( input_features, [new_size_x, new_size_y], align_corners=True, method=tf.image.ResizeMethod.NEAREST_NEIGHBOR) def resize_down(input_features, scale=2): """Average pooling rescaling-operation for the input features. Args: input_features: A tensor of shape (b, h, w, c). scale: An integer specifying the scaling factor. Returns: A tensor of shape (b, h / scale, w / scale, c). """ assert scale >= 1 return tf.nn.avg_pool2d( input_features, ksize=(1, scale, scale, 1), strides=(1, scale, scale, 1), padding='VALID')
deepmind-research-master
hierarchical_probabilistic_unet/unet_utils.py
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Episodic Memory and Synthetic Returns Core Wrapper modules.""" import collections import haiku as hk import jax import jax.numpy as jnp SRCoreWrapperOutput = collections.namedtuple( "SRCoreWrapperOutput", ["output", "synthetic_return", "augmented_return", "sr_loss"]) class EpisodicMemory(hk.RNNCore): """Episodic Memory module.""" def __init__(self, memory_size, capacity, name="episodic_memory"): """Constructor. Args: memory_size: Integer. The size of the vectors to be stored. capacity: Integer. The maximum number of memories to store before it becomes necessary to overwrite old memories. name: String. A name for this Haiku module instance. """ super().__init__(name=name) self._memory_size = memory_size self._capacity = capacity def __call__(self, inputs, prev_state): """Writes a new memory into the episodic memory. Args: inputs: A Tensor of shape ``[batch_size, memory_size]``. prev_state: The previous state of the episodic memory, which is a tuple with a (i) counter of shape ``[batch_size, 1]`` indicating how many memories have been written so far, and (ii) a tensor of shape ``[batch_size, capacity, memory_size]`` with the full content of the episodic memory. Returns: A tuple with (i) a tensor of shape ``[batch_size, capacity, memory_size]`` with the full content of the episodic memory, including the newly written memory, and (ii) the new state of the episodic memory. """ inputs = jax.lax.stop_gradient(inputs) counter, memories = prev_state counter_mod = jnp.mod(counter, self._capacity) slot_selector = jnp.expand_dims( jax.nn.one_hot(counter_mod, self._capacity), axis=2) memories = memories * (1 - slot_selector) + ( slot_selector * jnp.expand_dims(inputs, 1)) counter = counter + 1 return memories, (counter, memories) def initial_state(self, batch_size): """Creates the initial state of the episodic memory. Args: batch_size: Integer. The batch size of the episodic memory. Returns: A tuple with (i) a counter of shape ``[batch_size, 1]`` and (ii) a tensor of shape ``[batch_size, capacity, memory_size]`` with the full content of the episodic memory. """ if batch_size is None: shape = [] else: shape = [batch_size] counter = jnp.zeros(shape) memories = jnp.zeros(shape + [self._capacity, self._memory_size]) return (counter, memories) class SyntheticReturnsCoreWrapper(hk.RNNCore): """Synthetic Returns core wrapper.""" def __init__(self, core, memory_size, capacity, hidden_layers, alpha, beta, loss_func=(lambda x, y: 0.5 * jnp.square(x - y)), apply_core_to_input=False, name="synthetic_returns_wrapper"): """Constructor. Args: core: hk.RNNCore. The recurrent core of the agent. E.g. an LSTM. memory_size: Integer. The size of the vectors to be stored in the episodic memory. capacity: Integer. The maximum number of memories to store before it becomes necessary to overwrite old memories. hidden_layers: Tuple or list of integers, indicating the size of the hidden layers of the MLPs used to produce synthetic returns, current state bias, and gate. alpha: The multiplier of the synthetic returns term in the augmented return. beta: The multiplier of the environment returns term in the augmented return. loss_func: A function of two arguments (predictions and targets) to compute the SR loss. apply_core_to_input: Boolean. Whether to apply the core on the inputs. If true, the synthetic returns will be computed from the outputs of the RNN core passed to the constructor. If false, the RNN core will be applied only at the output of this wrapper, and the synthetic returns will be computed from the inputs. name: String. A name for this Haiku module instance. """ super().__init__(name=name) self._em = EpisodicMemory(memory_size, capacity) self._capacity = capacity hidden_layers = list(hidden_layers) self._synthetic_return = hk.nets.MLP(hidden_layers + [1]) self._bias = hk.nets.MLP(hidden_layers + [1]) self._gate = hk.Sequential([ hk.nets.MLP(hidden_layers + [1]), jax.nn.sigmoid, ]) self._apply_core_to_input = apply_core_to_input self._core = core self._alpha = alpha self._beta = beta self._loss = loss_func def initial_state(self, batch_size): return ( self._em.initial_state(batch_size), self._core.initial_state(batch_size) ) def __call__(self, inputs, prev_state): current_input, return_target = inputs em_state, core_state = prev_state (counter, memories) = em_state if self._apply_core_to_input: current_input, core_state = self._core(current_input, core_state) # Synthetic return for the current state synth_return = jnp.squeeze(self._synthetic_return(current_input), -1) # Current state bias term bias = self._bias(current_input) # Gate computed from current state gate = self._gate(current_input) # When counter > capacity, mask will be all ones mask = 1 - jnp.cumsum(jax.nn.one_hot(counter, self._capacity), axis=1) mask = jnp.expand_dims(mask, axis=2) # Synthetic returns for each state in memory past_synth_returns = hk.BatchApply(self._synthetic_return)(memories) # Sum of synthetic returns from previous states sr_sum = jnp.sum(past_synth_returns * mask, axis=1) prediction = jnp.squeeze(sr_sum * gate + bias, -1) sr_loss = self._loss(prediction, return_target) augmented_return = jax.lax.stop_gradient( self._alpha * synth_return + self._beta * return_target) # Write current state to memory _, em_state = self._em(current_input, em_state) if not self._apply_core_to_input: output, core_state = self._core(current_input, core_state) else: output = current_input output = SRCoreWrapperOutput( output=output, synthetic_return=synth_return, augmented_return=augmented_return, sr_loss=sr_loss, ) return output, (em_state, core_state)
deepmind-research-master
synthetic_returns/synthetic_returns.py
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Memory & Planning Game environment.""" import string import dm_env import matplotlib.pyplot as plt import networkx as nx import numpy as np class MemoryPlanningGame(dm_env.Environment): """Memory & Planning Game environment.""" ACTION_NAMES = ['Up', 'Down', 'Left', 'Right', 'Collect'] NUM_ACTIONS = len(ACTION_NAMES) DIRECTIONS = [ (0, 1), # Up (0, -1), # Down (-1, 0), # Left (1, 0), # Right (0, 0), # Collect ] def __init__(self, maze_size=4, max_episode_steps=100, target_reward=1., per_step_reward=0., random_respawn=False, seed=None): """The Memory & Planning Game environment. Args: maze_size: (int) size of the maze dimension. max_episode_steps: (int) number of steps per episode. target_reward: (float) reward value of the target. per_step_reward: (float) reward/cost of taking a step. random_respawn: (bool) whether the agent respawns in a random location upon collecting the goal. seed: (int or None) seed for random number generator. """ self._maze_size = maze_size self._num_labels = maze_size * maze_size # The graph itself is the same across episodes, but the node labels will be # randomly sampled in each episode. self._graph = nx.grid_2d_graph( self._maze_size, self._maze_size, periodic=True) self._max_episode_steps = max_episode_steps self._target_reward = target_reward self._per_step_reward = per_step_reward self._random_respawn = random_respawn self._rng = np.random.RandomState(seed) def _one_hot(self, node): one_hot_vector = np.zeros([self._num_labels], dtype=np.int32) one_hot_vector[self._labels[node]] = 1 return one_hot_vector def step(self, action): # If previous step was the last step of an episode, reset. if self._needs_reset: return self.reset() # Increment step count and check if it's the last step of the episode. self._episode_steps += 1 if self._episode_steps >= self._max_episode_steps: self._needs_reset = True transition = dm_env.termination else: transition = dm_env.transition # Recompute agent's position given the selected action. direction = self.DIRECTIONS[action] self._position = tuple( (np.array(self._position) + np.array(direction)) % self._maze_size) self._previous_action = self.ACTION_NAMES[action] # Get reward if agent is over the goal location and the selected action is # `collect`. if self._position == self._goal and self.ACTION_NAMES[action] == 'Collect': reward = self._target_reward self._set_new_goal() else: reward = self._per_step_reward self._episode_reward += reward return transition(reward, self._observation()) def _observation(self): return { 'position': np.array(self._one_hot(self.position), dtype=np.int32), 'goal': np.array(self._one_hot(self.goal), dtype=np.int32), } def observation_spec(self): return { 'position': dm_env.specs.Array( shape=(self._num_labels,), dtype=np.int32, name='position'), 'goal': dm_env.specs.Array( shape=(self._num_labels,), dtype=np.int32, name='goal'), } def action_spec(self): return dm_env.specs.DiscreteArray(self.NUM_ACTIONS) def take_random_action(self): return self.step(self._rng.randint(self.NUM_ACTIONS)) def reset(self): self._previous_action = '' self._episode_reward = 0. self._episode_steps = 0 self._needs_reset = False random_labels = self._rng.permutation(self._num_labels) self._labels = {n: random_labels[i] for i, n in enumerate(self._graph.nodes())} self._respawn() self._set_new_goal() return dm_env.restart(self._observation()) def _respawn(self): random_idx = self._rng.randint(self._num_labels) self._position = list(self._graph.nodes())[random_idx] def _set_new_goal(self): if self._random_respawn: self._respawn() goal = self._position while goal == self._position: random_idx = self._rng.randint(self._num_labels) goal = list(self._graph.nodes())[random_idx] self._goal = goal @property def position(self): return self._position @property def goal(self): return self._goal @property def previous_action(self): return self._previous_action @property def episode_reward(self): return self._episode_reward def draw_maze(self, ax=None): if ax is None: plt.figure() ax = plt.gca() node_positions = {(x, y): (x, y) for x, y in self._graph.nodes()} letters = string.ascii_uppercase + string.ascii_lowercase labels = {n: letters[self._labels[n]] for n in self._graph.nodes()} node_list = list(self._graph.nodes()) colors = [] for n in node_list: if n == self.position: colors.append('lightblue') elif n == self.goal: colors.append('lightgreen') else: colors.append('pink') nx.draw(self._graph, pos=node_positions, nodelist=node_list, ax=ax, node_color=colors, with_labels=True, node_size=200, labels=labels) ax.set_title('{}\nEpisode reward={:.1f}'.format( self.previous_action, self.episode_reward)) ax.margins(.1) return plt.gcf(), ax
deepmind-research-master
rapid_task_solving/memory_planning_game.py
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """One-shot StreetLearn environment.""" import dm_env import matplotlib.pyplot as plt import networkx as nx import numpy as np def deg_to_rad(x): """Convert degrees to radians.""" return x / 180. * np.pi def rad_to_deg(x): """Convert radians to degrees.""" return x * 180. / np.pi class OneShotStreetLearn(dm_env.Environment): """One-shot Streetlearn environment.""" ACTION_NAMES = [ 'Forward', 'Left', 'Right', 'Collect', ] NUM_ACTIONS = len(ACTION_NAMES) def __init__(self, dataset_path, max_episode_steps, num_junctions=8, target_reward=1., per_step_reward=0., observation_length=60, seed=None): self._graph = nx.read_gexf(dataset_path) self._node_attrs = self._graph.nodes(data=True) self._num_junctions = num_junctions self._observation_length = observation_length self._max_episode_steps = max_episode_steps self._target_reward = target_reward self._per_step_reward = per_step_reward self._rng = np.random.RandomState(seed) self.reset() def reset(self): self._previous_action = '' self._episode_reward = 0. self._episode_steps = 0 self._needs_reset = False self._subgraph = self.get_random_subgraph() self._observation_map = self.randomize_observations(self._subgraph) self._position = self._rng.choice(list(self._subgraph.nodes())) neighbours = self._neighbors_bearings(self._subgraph, self._position) self._neighbour = neighbours[self._rng.randint(len(neighbours))] self._set_new_goal() return dm_env.restart(self._observation()) @property def _current_edge(self): return (self._position, self._neighbour['neighbour']) def _set_new_goal(self): goal = None edges = list(self._observation_map.keys()) while goal is None or goal == self._current_edge: goal = edges[self._rng.randint(len(edges))] self._goal = goal def _one_hot(self, edge): one_hot_vector = np.zeros([self._observation_length], dtype=np.int32) one_hot_vector[self._observation_map[edge]] = 1 return one_hot_vector def _observation(self): return { 'position': np.array(self._one_hot(self._current_edge), dtype=np.int32), 'goal': np.array(self._one_hot(self._goal), dtype=np.int32), } def observation_spec(self): return { 'position': dm_env.specs.Array( shape=(self._observation_length,), dtype=np.int32, name='position'), 'goal': dm_env.specs.Array( shape=(self._observation_length,), dtype=np.int32, name='goal'), } def action_spec(self): return dm_env.specs.DiscreteArray(self.NUM_ACTIONS) def step(self, action): # If previous step was the last step of an episode, reset. if self._needs_reset: return self.reset() # Increment step count and check if it's the last step of the episode. self._episode_steps += 1 if self._episode_steps >= self._max_episode_steps: self._needs_reset = True transition = dm_env.termination else: transition = dm_env.transition # Recompute agent's position self._move(action) self._previous_action = self.ACTION_NAMES[action] # Get reward if agent is at the goal location and the selected action is # `collect`. if (self._current_edge == self._goal and self.ACTION_NAMES[action] == 'Collect'): reward = self._target_reward self._set_new_goal() else: reward = self._per_step_reward self._episode_reward += reward return transition(reward, self._observation()) def randomize_observations(self, subgraph): edges = list(subgraph.edges()) edges.extend([(y, x) for (x, y) in edges]) obs_permutation = self._rng.permutation(self._observation_length) return {e: obs_permutation[i] for i, e in enumerate(edges)} def _calculate_bearing(self, node, neighbor): lat1 = deg_to_rad(self._node_attrs[node]['lat']) lng1 = deg_to_rad(self._node_attrs[node]['lng']) lat2 = deg_to_rad(self._node_attrs[neighbor]['lat']) lng2 = deg_to_rad(self._node_attrs[neighbor]['lng']) delta_lng = lng2 - lng1 theta = np.arctan2( np.sin(delta_lng) * np.cos(lat2), np.cos(lat1) * np.sin(lat2) - np.sin(lat1) * np.cos(lat2) * np.cos(delta_lng)) return theta def _neighbors_bearings(self, subgraph, node): bearings = [] for neighbor in list(subgraph[node]): orientation = self._calculate_bearing(node, neighbor) bearings.append({'neighbour': neighbor, 'orientation': orientation}) bearings.sort(key=lambda x: x['orientation']) return bearings def _sort_neighbors(self, node, neighbour): bearings = self._neighbors_bearings(self._subgraph, node) bs = [x['orientation'] for x in bearings] idx = np.argmin(np.abs(bs - neighbour['orientation'])) return { 'forward': bearings[idx], 'right': bearings[idx-1], 'left': bearings[(idx+1) % len(bearings)], } def _move(self, action): neighbours = self._sort_neighbors(self._position, self._neighbour) if action == 0: new_node = self._neighbour['neighbour'] neighbours = self._sort_neighbors(new_node, neighbours['forward']) new_neighbour = neighbours['forward'] else: new_node = self._position if action == 1: new_neighbour = neighbours['left'] elif action == 2: new_neighbour = neighbours['right'] else: new_neighbour = self._neighbour self._position = new_node self._neighbour = new_neighbour def _all_next_junctions(self, subgraph, node): neighbors = list(subgraph[node]) edges = [self._get_next_junction(subgraph, node, nb) for nb in neighbors] nodes = [y for (_, y) in edges] return nodes, edges def _get_next_junction(self, subgraph, initial_node, next_node): node = initial_node while subgraph.degree(next_node) == 2: neighbours = list(subgraph.neighbors(next_node)) neighbours.remove(node) node = next_node next_node = neighbours.pop() return (initial_node, next_node) def get_random_subgraph(self): graph = self._graph num_nodes = len(graph) rnd_index = self._rng.randint(num_nodes) center_node = list(graph.nodes())[rnd_index] while graph.degree(center_node) <= 2: rnd_index = self._rng.randint(num_nodes) center_node = list(graph.nodes())[rnd_index] to_visit = [center_node] visited = [] subgraph = nx.Graph() while to_visit: node = to_visit.pop(0) visited.append(node) new_nodes, new_edges = self._all_next_junctions(graph, node) subgraph.add_edges_from(new_edges) node_degrees = [subgraph.degree(n) for n in subgraph.nodes()] count_junctions = len(list(filter(lambda x: x > 2, node_degrees))) if count_junctions >= self._num_junctions: break new_nodes = filter(lambda x: x not in visited + to_visit, new_nodes) to_visit.extend(new_nodes) return subgraph def draw_subgraph(self, ax=None): if ax is None: _ = plt.figure(figsize=(3, 3)) ax = plt.gca() node_ids = list(self._subgraph.nodes()) pos = { x: (self._node_attrs[x]['lat'], self._node_attrs[x]['lng']) for x in node_ids } labels = {} nc = 'pink' ec = 'black' ns = 50 nshape = 'o' # Draw the current subgraph nx.draw(self._subgraph, pos=pos, node_color=nc, with_labels=False, node_size=ns, labels=labels, edgecolors=ec, node_shape=nshape, ax=ax) max_xy = np.array([np.array(x) for x in pos.values()]).max(0) min_xy = np.array([np.array(x) for x in pos.values()]).min(0) delta_xy = (max_xy - min_xy) / 6. ax.set_xlim([min_xy[0] - delta_xy[0], max_xy[0] + delta_xy[0]]) ax.set_ylim([min_xy[1] - delta_xy[1], max_xy[1] + delta_xy[1]]) # Draw goal position and orientation x = self._node_attrs[self._goal[0]]['lat'] y = self._node_attrs[self._goal[0]]['lng'] rotation = rad_to_deg(self._calculate_bearing(*self._goal)) _ = ax.plot(x, y, marker=(3, 0, rotation - 90), color=(0, 0, 0), markersize=14, markerfacecolor='white') _ = ax.plot(x, y, marker=(2, 0, rotation - 90), color=(0, 0, 0), markersize=12, markerfacecolor='None') # Draw current position and orientation x = self._node_attrs[self._position]['lat'] y = self._node_attrs[self._position]['lng'] rotation = rad_to_deg(self._neighbour['orientation']) _ = ax.plot(x, y, marker=(3, 0, rotation - 90), color=(0, 0, 0), markersize=14, markerfacecolor='lightgreen') _ = ax.plot(x, y, marker=(2, 0, rotation - 90), color=(0, 0, 0), markersize=12, markerfacecolor='None') ax.set_title('{}\nEpisode reward = {}'.format( self._previous_action, self._episode_reward)) return plt.gcf(), ax
deepmind-research-master
rapid_task_solving/one_shot_streetlearn.py
# Copyright 2019 Deepmind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Submission to Unrestricted Adversarial Challenge.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow.compat.v1 as tf import tensorflow_hub as hub from unrestricted_advex import eval_kit def _preprocess_image(image): image = tf.image.central_crop(image, central_fraction=0.875) image = tf.image.resize_bilinear(image, [224, 224], align_corners=False) return image def test_preprocess(image): image = _preprocess_image(image) image = tf.subtract(image, 0.5) image = tf.multiply(image, 2.0) return image def main(): g = tf.Graph() with g.as_default(): input_tensor = tf.placeholder(tf.float32, (None, 224, 224, 3)) x_np = test_preprocess(input_tensor) raw_module_1 = hub.Module( "https://tfhub.dev/deepmind/llr-pretrain-adv/latents/1") raw_module_2 = hub.Module( "https://tfhub.dev/deepmind/llr-pretrain-adv/linear/1") latents = raw_module_1(dict(inputs=x_np, decay_rate=0.1)) logits = raw_module_2(dict(inputs=latents)) logits = tf.squeeze(logits, axis=[1, 2]) two_class_logits = tf.concat([tf.nn.relu(-logits[:, 1:]), tf.nn.relu(logits[:, 1:])], axis=1) sess = tf.train.SingularMonitoredSession() def model(x_np): return sess.run(two_class_logits, feed_dict={input_tensor: x_np}) eval_kit.evaluate_bird_or_bicycle_model(model, model_name="llr_resnet") if __name__ == "__main__": main()
deepmind-research-master
unrestricted_advx/main.py
# Lint as: python2, python3 # pylint: disable=g-bad-file-header # Copyright 2019 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """nest utils.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from six.moves import range from tensorflow.contrib import framework as contrib_framework nest = contrib_framework.nest def _nest_apply_over_list(list_of_nests, fn): """Equivalent to fn, but works on list-of-nests. Transforms a list-of-nests to a nest-of-lists, then applies `fn` to each of the inner lists. It is assumed that all nests have the same structure. Elements of the nest may be None, in which case they are ignored, i.e. they do not form part of the stack. This is useful when stacking agent states where parts of the state nest have been filtered. Args: list_of_nests: A Python list of nests. fn: the function applied on the list of leaves. Returns: A nest-of-arrays, where the arrays are formed by `fn`ing a list. """ list_of_flat_nests = [nest.flatten(n) for n in list_of_nests] flat_nest_of_stacks = [] for position in range(len(list_of_flat_nests[0])): new_list = [flat_nest[position] for flat_nest in list_of_flat_nests] new_list = [x for x in new_list if x is not None] flat_nest_of_stacks.append(fn(new_list)) return nest.pack_sequence_as( structure=list_of_nests[0], flat_sequence=flat_nest_of_stacks) def _take_indices(inputs, indices): return nest.map_structure(lambda t: np.take(t, indices, axis=0), inputs) def nest_stack(list_of_nests, axis=0): """Equivalent to np.stack, but works on list-of-nests. Transforms a list-of-nests to a nest-of-lists, then applies `np.stack` to each of the inner lists. It is assumed that all nests have the same structure. Elements of the nest may be None, in which case they are ignored, i.e. they do not form part of the stack. This is useful when stacking agent states where parts of the state nest have been filtered. Args: list_of_nests: A Python list of nests. axis: Optional, the `axis` argument for `np.stack`. Returns: A nest-of-arrays, where the arrays are formed by `np.stack`ing a list. """ return _nest_apply_over_list(list_of_nests, lambda l: np.stack(l, axis=axis)) def nest_unstack(batched_inputs, batch_size): """Splits a sequence of numpy arrays along 0th dimension.""" return [_take_indices(batched_inputs, idx) for idx in range(batch_size)]
deepmind-research-master
tvt/nest_utils.py
# Lint as: python2, python3 # pylint: disable=g-bad-file-header # Copyright 2019 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Memory Reader/Writer for RMA.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import sonnet as snt import tensorflow.compat.v1 as tf ReadInformation = collections.namedtuple( 'ReadInformation', ('weights', 'indices', 'keys', 'strengths')) class MemoryWriter(snt.RNNCore): """Memory Writer Module.""" def __init__(self, mem_shape, name='memory_writer'): """Initializes the `MemoryWriter`. Args: mem_shape: The shape of the memory `(num_rows, memory_width)`. name: The name to use for the Sonnet module. """ super(MemoryWriter, self).__init__(name=name) self._mem_shape = mem_shape def _build(self, inputs, state): """Inserts z into the argmin row of usage markers and updates all rows. Returns an operation that, when executed, correctly updates the internal state and usage markers. Args: inputs: A tuple consisting of: * z, the value to write at this timestep * mem_state, the state of the memory at this timestep before writing state: The state is just the write_counter. Returns: A tuple of the new memory state and a tuple containing the next state. """ z, mem_state = inputs # Stop gradient on writes to memory. z = tf.stop_gradient(z) prev_write_counter = state new_row_value = z # Find the index to insert the next row into. num_mem_rows = self._mem_shape[0] write_index = tf.cast(prev_write_counter, dtype=tf.int32) % num_mem_rows one_hot_row = tf.one_hot(write_index, num_mem_rows) write_counter = prev_write_counter + 1 # Insert state variable to new row. # First you need to size it up to the full size. insert_new_row = lambda mem, o_hot, z: mem - (o_hot * mem) + (o_hot * z) new_mem = insert_new_row(mem_state, tf.expand_dims(one_hot_row, axis=-1), tf.expand_dims(new_row_value, axis=-2)) new_state = write_counter return new_mem, new_state @property def state_size(self): """Returns a description of the state size, without batch dimension.""" return tf.TensorShape([]) @property def output_size(self): """Returns a description of the output size, without batch dimension.""" return self._mem_shape class MemoryReader(snt.AbstractModule): """Memory Reader Module.""" def __init__(self, memory_word_size, num_read_heads, top_k=0, memory_size=None, name='memory_reader'): """Initializes the `MemoryReader`. Args: memory_word_size: The dimension of the 1-D read keys this memory reader should produce. Each row of the memory is of length `memory_word_size`. num_read_heads: The number of reads to perform. top_k: Softmax and summation when reading is only over top k most similar entries in memory. top_k=0 (default) means dense reads, i.e. no top_k. memory_size: Number of rows in memory. name: The name for this Sonnet module. """ super(MemoryReader, self).__init__(name=name) self._memory_word_size = memory_word_size self._num_read_heads = num_read_heads self._top_k = top_k # This is not an RNNCore but it is useful to expose the output size. self._output_size = num_read_heads * memory_word_size num_read_weights = top_k if top_k > 0 else memory_size self._read_info_size = ReadInformation( weights=tf.TensorShape([num_read_heads, num_read_weights]), indices=tf.TensorShape([num_read_heads, num_read_weights]), keys=tf.TensorShape([num_read_heads, memory_word_size]), strengths=tf.TensorShape([num_read_heads]), ) with self._enter_variable_scope(): # Transforms to value-based read for each read head. output_dim = (memory_word_size + 1) * num_read_heads self._keys_and_read_strengths_generator = snt.Linear(output_dim) def _build(self, inputs): """Looks up rows in memory. In the args list, we have the following conventions: B: batch size M: number of slots in a row of the memory matrix R: number of rows in the memory matrix H: number of read heads in the memory controller Args: inputs: A tuple of * read_inputs, a tensor of shape [B, ...] that will be flattened and passed through a linear layer to get read keys/read_strengths for each head. * mem_state, the primary memory tensor. Of shape [B, R, M]. Returns: The read from the memory (concatenated across read heads) and read information. """ # Assert input shapes are compatible and separate inputs. _assert_compatible_memory_reader_input(inputs) read_inputs, mem_state = inputs # Determine the read weightings for each key. flat_outputs = self._keys_and_read_strengths_generator( snt.BatchFlatten()(read_inputs)) # Separate the read_strengths from the rest of the weightings. h = self._num_read_heads flat_keys = flat_outputs[:, :-h] read_strengths = tf.nn.softplus(flat_outputs[:, -h:]) # Reshape the weights. read_shape = (self._num_read_heads, self._memory_word_size) read_keys = snt.BatchReshape(read_shape)(flat_keys) # Read from memory. memory_reads, read_weights, read_indices, read_strengths = ( read_from_memory(read_keys, read_strengths, mem_state, self._top_k)) concatenated_reads = snt.BatchFlatten()(memory_reads) return concatenated_reads, ReadInformation( weights=read_weights, indices=read_indices, keys=read_keys, strengths=read_strengths) @property def output_size(self): """Returns a description of the output size, without batch dimension.""" return self._output_size, self._read_info_size def read_from_memory(read_keys, read_strengths, mem_state, top_k): """Function for cosine similarity content based reading from memory matrix. In the args list, we have the following conventions: B: batch size M: number of slots in a row of the memory matrix R: number of rows in the memory matrix H: number of read heads (of the controller or the policy) K: top_k if top_k>0 Args: read_keys: the read keys of shape [B, H, M]. read_strengths: the coefficients used to compute the normalised weighting vector of shape [B, H]. mem_state: the primary memory tensor. Of shape [B, R, M]. top_k: only use top k read matches, other reads do not go into softmax and are zeroed out in the output. top_k=0 (default) means use dense reads. Returns: The memory reads [B, H, M], read weights [B, H, top k], read indices [B, H, top k], and read strengths [B, H, 1]. """ _assert_compatible_read_from_memory_inputs(read_keys, read_strengths, mem_state) batch_size = read_keys.shape[0] num_read_heads = read_keys.shape[1] with tf.name_scope('memory_reading'): # Scale such that all rows are L2-unit vectors, for memory and read query. scaled_read_keys = tf.math.l2_normalize(read_keys, axis=-1) # [B, H, M] scaled_mem = tf.math.l2_normalize(mem_state, axis=-1) # [B, R, M] # The cosine distance is then their dot product. # Find the cosine distance between each read head and each row of memory. cosine_distances = tf.matmul( scaled_read_keys, scaled_mem, transpose_b=True) # [B, H, R] # The rank must match cosine_distances for broadcasting to work. read_strengths = tf.expand_dims(read_strengths, axis=-1) # [B, H, 1] weighted_distances = read_strengths * cosine_distances # [B, H, R] if top_k: # Get top k indices (row indices with top k largest weighted distances). top_k_output = tf.nn.top_k(weighted_distances, top_k, sorted=False) read_indices = top_k_output.indices # [B, H, K] # Create a sub-memory for each read head with only the top k rows. # Each batch_gather is [B, K, M] and the list stacks to [B, H, K, M]. topk_mem_per_head = [tf.batch_gather(mem_state, ri_this_head) for ri_this_head in tf.unstack(read_indices, axis=1)] topk_mem = tf.stack(topk_mem_per_head, axis=1) # [B, H, K, M] topk_scaled_mem = tf.math.l2_normalize(topk_mem, axis=-1) # [B, H, K, M] # Calculate read weights for each head's top k sub-memory. expanded_scaled_read_keys = tf.expand_dims( scaled_read_keys, axis=2) # [B, H, 1, M] topk_cosine_distances = tf.reduce_sum( expanded_scaled_read_keys * topk_scaled_mem, axis=-1) # [B, H, K] topk_weighted_distances = ( read_strengths * topk_cosine_distances) # [B, H, K] read_weights = tf.nn.softmax( topk_weighted_distances, axis=-1) # [B, H, K] # For each head, read using the sub-memories and corresponding weights. expanded_weights = tf.expand_dims(read_weights, axis=-1) # [B, H, K, 1] memory_reads = tf.reduce_sum( expanded_weights * topk_mem, axis=2) # [B, H, M] else: read_weights = tf.nn.softmax(weighted_distances, axis=-1) num_rows_memory = mem_state.shape[1] all_indices = tf.range(num_rows_memory, dtype=tf.int32) all_indices = tf.reshape(all_indices, [1, 1, num_rows_memory]) read_indices = tf.tile(all_indices, [batch_size, num_read_heads, 1]) # This is the actual memory access. # Note that matmul automatically batch applies for us. memory_reads = tf.matmul(read_weights, mem_state) read_keys.shape.assert_is_compatible_with(memory_reads.shape) read_strengths = tf.squeeze(read_strengths, axis=-1) # [B, H, 1] -> [B, H] return memory_reads, read_weights, read_indices, read_strengths def _assert_compatible_read_from_memory_inputs(read_keys, read_strengths, mem_state): read_keys.shape.assert_has_rank(3) b_shape, h_shape, m_shape = read_keys.shape mem_state.shape.assert_has_rank(3) r_shape = mem_state.shape[1] read_strengths.shape.assert_is_compatible_with( tf.TensorShape([b_shape, h_shape])) mem_state.shape.assert_is_compatible_with( tf.TensorShape([b_shape, r_shape, m_shape])) def _assert_compatible_memory_reader_input(input_tensors): """Asserts MemoryReader's _build has been given the correct shapes.""" assert len(input_tensors) == 2 _, mem_state = input_tensors mem_state.shape.assert_has_rank(3)
deepmind-research-master
tvt/memory.py
# Lint as: python2, python3 # pylint: disable=g-bad-file-header # Copyright 2019 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Temporal Value Transport implementation.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from concurrent import futures import numpy as np from six.moves import range from six.moves import zip def _unstack(array, axis): """Opposite of np.stack.""" split_array = np.split(array, array.shape[axis], axis=axis) return [np.squeeze(a, axis=axis) for a in split_array] def _top_k_args(array, k): """Return top k arguments or all arguments if array size is less than k.""" if len(array) <= k: return np.arange(len(array)) return np.argpartition(array, kth=-k)[-k:] def _threshold_read_event_times(read_strengths, threshold): """Return the times of max read strengths within one threshold read event.""" chosen_times = [] over_threshold = False max_read_strength = 0. # Wait until the threshold is crossed then keep track of max read strength and # time of max read strength until the read strengths go back under the # threshold, then add that max read strength time to the chosen times. Wait # until threshold is crossed again and then repeat the process. for time, strength in enumerate(read_strengths): if strength > threshold: over_threshold = True if strength > max_read_strength: max_read_strength = strength max_read_strength_time = time else: # If coming back under threshold, add the time of the last max read. if over_threshold: chosen_times.append(max_read_strength_time) max_read_strength = 0. over_threshold = False # Add max read strength time if episode finishes before going under threshold. if over_threshold: chosen_times.append(max_read_strength_time) return np.array(chosen_times) def _tvt_rewards_single_head(read_weights, read_strengths, read_times, baselines, alpha, top_k_t1, read_strength_threshold, no_transport_period): """Compute TVT rewards for a single read head, no batch dimension. This performs the updates for one read head. `t1` and `t2` refer to times to where and from where the value is being transported, respectively. I.e. the rewards at `t1` times are being modified based on values at times `t2`. Args: read_weights: shape (ep_length, top_k). read_strengths: shape (ep_length,). read_times: shape (ep_length, top_k). baselines: shape (ep_length,). alpha: The multiplier for the temporal value transport rewards. top_k_t1: For each read event time, this determines how many time points to send tvt reward to. read_strength_threshold: Read strengths below this value are ignored. no_transport_period: Length of no_transport_period. Returns: An array of TVT rewards with shape (ep_length,). """ tvt_rewards = np.zeros_like(baselines) # Mask read_weights for reads that read back to times within # no_transport_period of current time. ep_length = read_times.shape[0] times = np.arange(ep_length) # Expand dims for correct broadcasting when subtracting read_times. times = np.expand_dims(times, -1) read_past_no_transport_period = (times - read_times) > no_transport_period read_weights_masked = np.where(read_past_no_transport_period, read_weights, np.zeros_like(read_weights)) # Find t2 times with maximum read weights. Ignore t2 times whose maximum # read weights fall inside the no_transport_period. max_read_weight_args = np.argmax(read_weights, axis=1) # (ep_length,) times = np.arange(ep_length) max_read_weight_times = read_times[times, max_read_weight_args] # (ep_length,) read_strengths_cut = np.where( times - max_read_weight_times > no_transport_period, read_strengths, np.zeros_like(read_strengths)) # Filter t2 candidates to perform value transport on local maximums # above a threshold. t2_times_with_largest_reads = _threshold_read_event_times( read_strengths_cut, read_strength_threshold) # Loop through all t2 candidates and transport value to top_k_t1 read times. for t2 in t2_times_with_largest_reads: try: baseline_value_when_reading = baselines[t2] except IndexError: raise RuntimeError("Attempting to access baselines array with length {}" " at index {}. Make sure output_baseline is set in" " the agent config.".format(len(baselines), t2)) read_times_from_t2 = read_times[t2] read_weights_from_t2 = read_weights_masked[t2] # Find the top_k_t1 read times for this t2 and their corresponding read # weights. The call to _top_k_args() here gives the array indices for the # times and weights of the top_k_t1 reads from t2. top_t1_indices = _top_k_args(read_weights_from_t2, top_k_t1) top_t1_read_times = np.take(read_times_from_t2, top_t1_indices) top_t1_read_weights = np.take(read_weights_from_t2, top_t1_indices) # For each of the top_k_t1 read times t and corresponding read weight w, # find the trajectory that contains step_num (t + shift) and modify the # reward at step_num (t + shift) using w and the baseline value at t2. # We ignore any read times t >= t2. These can emerge because if nothing # in memory matches positively with the read query, the top reads may be # in the empty region of the memory. for step_num, read_weight in zip(top_t1_read_times, top_t1_read_weights): if step_num >= t2: # Skip this step_num as it is not really a memory time. continue # Compute the tvt reward and add it on. tvt_reward = alpha * read_weight * baseline_value_when_reading tvt_rewards[step_num] += tvt_reward return tvt_rewards def _compute_tvt_rewards_from_read_info( read_weights, read_strengths, read_times, baselines, gamma, alpha=0.9, top_k_t1=50, read_strength_threshold=2., no_transport_period_when_gamma_1=25): """Compute TVT rewards given supplied read information, no batch dimension. Args: read_weights: shape (ep_length, num_read_heads, top_k). read_strengths: shape (ep_length, num_read_heads). read_times: shape (ep_length, num_read_heads, top_k). baselines: shape (ep_length,). gamma: Scalar discount factor used to calculate the no_transport_period. alpha: The multiplier for the temporal value transport rewards. top_k_t1: For each read event time, this determines how many time points to send tvt reward to. read_strength_threshold: Read strengths below this value are ignored. no_transport_period_when_gamma_1: no transport period when gamma == 1. Returns: An array of TVT rewards with shape (ep_length,). """ if gamma < 1: no_transport_period = int(1 / (1 - gamma)) else: if no_transport_period_when_gamma_1 is None: raise ValueError("No transport period must be defined when gamma == 1.") no_transport_period = no_transport_period_when_gamma_1 # Split read infos by read head. num_read_heads = read_weights.shape[1] read_weights = _unstack(read_weights, axis=1) read_strengths = _unstack(read_strengths, axis=1) read_times = _unstack(read_times, axis=1) # Calcuate TVT rewards for each read head separately and add to total. tvt_rewards = np.zeros_like(baselines) for i in range(num_read_heads): tvt_rewards += _tvt_rewards_single_head( read_weights[i], read_strengths[i], read_times[i], baselines, alpha, top_k_t1, read_strength_threshold, no_transport_period) return tvt_rewards def compute_tvt_rewards(read_infos, baselines, gamma=.96): """Compute TVT rewards from EpisodeOutputs. Args: read_infos: A memory_reader.ReadInformation namedtuple, where each element has shape (ep_length, batch_size, num_read_heads, ...). baselines: A numpy float array with shape (ep_length, batch_size). gamma: Discount factor. Returns: An array of TVT rewards with shape (ep_length,). """ if not read_infos: return np.zeros_like(baselines) # TVT reward computation is without batch dimension. so we need to process # read_infos and baselines into batchwise components. batch_size = baselines.shape[1] # Split each element of read info on batch dim. read_weights = _unstack(read_infos.weights, axis=1) read_strengths = _unstack(read_infos.strengths, axis=1) read_indices = _unstack(read_infos.indices, axis=1) # Split baselines on batch dim. baselines = _unstack(baselines, axis=1) # Comute TVT rewards for each element in the batch (threading over batch). tvt_rewards = [] with futures.ThreadPoolExecutor(max_workers=batch_size) as executor: for i in range(batch_size): tvt_rewards.append( executor.submit( _compute_tvt_rewards_from_read_info, read_weights[i], read_strengths[i], read_indices[i], baselines[i], gamma) ) tvt_rewards = [f.result() for f in tvt_rewards] # Process TVT rewards back into an array of shape (ep_length, batch_size). return np.stack(tvt_rewards, axis=1)
deepmind-research-master
tvt/tvt_rewards.py
# Lint as: python2, python3 # pylint: disable=g-bad-file-header # Copyright 2019 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Threaded batch environment wrapper.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from concurrent import futures from six.moves import range from six.moves import zip from tvt import nest_utils class BatchEnv(object): """Wrapper that steps multiple environments in separate threads. The threads are stepped in lock step, so all threads progress by one step before any move to the next step. """ def __init__(self, batch_size, env_builder, **env_kwargs): self.batch_size = batch_size self._envs = [env_builder(**env_kwargs) for _ in range(batch_size)] self._num_actions = self._envs[0].num_actions self._observation_shape = self._envs[0].observation_shape self._episode_length = self._envs[0].episode_length self._executor = futures.ThreadPoolExecutor(max_workers=self.batch_size) def reset(self): """Reset the entire batch of environments.""" def reset_environment(env): return env.reset() try: output_list = [] for env in self._envs: output_list.append(self._executor.submit(reset_environment, env)) output_list = [env_output.result() for env_output in output_list] except KeyboardInterrupt: self._executor.shutdown(wait=True) raise observations, rewards = nest_utils.nest_stack(output_list) return observations, rewards def step(self, action_list): """Step batch of envs. Args: action_list: A list of actions, one per environment in the batch. Each one should be a scalar int or a numpy scaler int. Returns: A tuple (observations, rewards): observations: A nest of observations, each one a numpy array where the first dimension has size equal to the number of environments in the batch. rewards: An array of rewards with size equal to the number of environments in the batch. """ def step_environment(env, action): return env.step(action) try: output_list = [] for env, action in zip(self._envs, action_list): output_list.append(self._executor.submit(step_environment, env, action)) output_list = [env_output.result() for env_output in output_list] except KeyboardInterrupt: self._executor.shutdown(wait=True) raise observations, rewards = nest_utils.nest_stack(output_list) return observations, rewards @property def observation_shape(self): """Observation shape per environment, i.e. with no batch dimension.""" return self._observation_shape @property def num_actions(self): return self._num_actions @property def episode_length(self): return self._episode_length def last_phase_rewards(self): return [env.last_phase_reward() for env in self._envs]
deepmind-research-master
tvt/batch_env.py