repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
BenchPress | BenchPress-master/deeplearning/benchpress/models/telemetry.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas and Chris Cummins.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file defines telemetry data gathers."""
import pathlib
import re
import json
import typing
import datetime
import glob
from absl import flags
from deeplearning.benchpress.proto import telemetry_pb2
from deeplearning.benchpress.util import pbutil
from deeplearning.benchpress.util import logging as l
FLAGS = flags.FLAGS
class TrainingLogger(object):
"""A TrainingLogger produces telemetry data of a BenchPress model as it is trained.
Telemetry data is gathered after every epoch of training. It includes a
timestamp, the model's loss, and the time spent training the epoch.
See the Keras callback docs: https://keras.io/callbacks/#lambdacallback
"""
def __init__(self, logdir: pathlib.Path):
logdir.mkdir(exist_ok = True, parents = True)
self.logdir = logdir
self.last_epoch_begin_timestamp = None
self.telemetry = None
def EpochBeginCallback(self) -> None:
self.last_epoch_begin_timestamp = datetime.datetime.utcnow()
def EpochEndCallback(self, epoch: int, loss: float):
now = datetime.datetime.utcnow()
epoch_time_ms = now - self.last_epoch_begin_timestamp
telemetry = telemetry_pb2.ModelEpochTelemetry(
timestamp_unix_epoch_ms = now.strftime("%m/%d/%Y, %H:%M:%S"),
epoch_num = epoch,
epoch_wall_time_ms = int(round(epoch_time_ms.total_seconds()*1000)),
loss = loss,
)
pbutil.ToFile(telemetry, self.logdir / f"epoch_{epoch:03d}_telemetry.pbtxt")
def KerasEpochBeginCallback(self, epoch: int, logs: typing.Union[typing.List[typing.Any], typing.Dict[str, typing.Any]]) -> None:
"""A Keras "on_epoch_end" callback."""
del epoch
del logs
self.EpochBeginCallback()
def KerasEpochEndCallback(self, epoch: int, logs: typing.Union[typing.List[typing.Any], typing.Dict[str, typing.Any]]) -> None:
"""A Keras "on_epoch_end" callback."""
# Keras epoch numbers are zero indexed.
self.EpochEndCallback(epoch + 1, logs["loss"])
def KerasCallback(self, keras):
"""Returns the keras callback to passed to a model's fit() function."""
return keras.callbacks.LambdaCallback(
on_epoch_begin=self.KerasEpochBeginCallback,
on_epoch_end=self.KerasEpochEndCallback,
)
def TfRecordEpochs(self) -> None:
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
event_acc = EventAccumulator(str(self.logdir))
event_acc.Reload()
self.tfAccumulateLoss(event_acc)
for key in event_acc.Tags()['scalars']:
_, step, value = zip(*event_acc.Scalars(key))
key_str = str(pathlib.Path(key).stem)
plt.linesSingleAxis(
{key_str: {'y': value, 'x': step}},
y_name = key_str,
x_name = "Train step",
plot_title = key_str,
path = self.logdir,
)
return
def tfAccumulateLoss(self, event_acc):
"""Open accumulator and read total_loss scalar"""
try:
self.telemetry = []
wall_time, step_num, loss = zip(*event_acc.Scalars('training/total_loss'))
for (indx, (wt, st, ls)) in enumerate(zip(wall_time, step_num, loss)):
round_wt = int(round(wt, 0))
if indx == 0:
current_time = round_wt
continue
else:
self.telemetry.append(telemetry_pb2.ModelEpochTelemetry(
timestamp_unix_epoch_ms = str(round_wt),
epoch_num = st,
epoch_wall_time_ms = round_wt - current_time,
loss = ls,
)
)
current_time = round_wt
except KeyError as e:
l.logger().warn("Model loss log not found! Available Tags: {}".format(event_acc.Tags()))
self.telemetry = [
telemetry_pb2.ModelEpochTelemetry(
timestamp_unix_epoch_ms = str(0),
epoch_num = 0,
epoch_wall_time_ms = 0,
loss = -1,
)
]
return
def EpochTelemetry(self) -> typing.List[telemetry_pb2.ModelEpochTelemetry]:
"""Return the epoch telemetry files."""
if self.telemetry is None:
if len(glob.glob(str(self.logdir / "epoch_*_telemetry.pbtxt"))) > 0:
return [
pbutil.FromFile(self.logdir / p, telemetry_pb2.ModelEpochTelemetry())
for p in sorted(self.logdir.iterdir())
if re.match(r"epoch_\d\d+_telemetry\.pbtxt", str(p.name))
]
elif len(glob.glob(str(self.logdir / "events.out.tfevents*"))) > 0:
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
event_acc = EventAccumulator(str(self.logdir))
event_acc.Reload()
self.tfAccumulateLoss(event_acc)
elif len(glob.glob(str(self.logdir / "training.json"))) == 1:
with open(self.logdir / "training.json", 'r') as jsf:
data = json.load(jsf)
self.telemetry = [
telemetry_pb2.ModelEpochTelemetry(
timestamp_unix_epoch_ms = '0',
epoch_num = x['step'],
epoch_wall_time_ms = int(round(x['batch_execution_time_ms'])) if "batch_execution_time_ms" in x else -1,
loss = x['total_loss'] if "total_loss" in x else -1.0,
) for x in data
]
else:
l.logger().warn("Training logs have not been found. Invalid reported loss.")
self.telemetry = [
telemetry_pb2.ModelEpochTelemetry(
timestamp_unix_epoch_ms = str(0),
epoch_num = 0,
epoch_wall_time_ms = 0,
loss = -1,
)
]
return self.telemetry
| 6,194 | 37.006135 | 131 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/models/bert_flags.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Shared absl flags between Pytorch and Tensorflow
BERT models.
"""
from absl import flags
FLAGS = flags.FLAGS
flags.DEFINE_integer(
"monitor_frequency",
5000,
"Choose frequency (in steps) in which tensors will be logged during training. "
"Default: 5000"
)
flags.DEFINE_integer(
"select_checkpoint_step",
-1,
"Select step checkpoint for sample. Re-training with this flag is not supported. "
"To restart from specific checkpoint, you have to manually remove the checkpoints after the desired one."
"Default: -1, flag ignored and latest checkpoint is loaded."
)
flags.DEFINE_integer("max_eval_steps", 100, "Maximum number of eval steps.")
flags.DEFINE_boolean("force_eval", False, "Run Validation no matter what.")
flags.DEFINE_integer("sample_per_epoch", 3, "Set above zero to sample model after every epoch.")
flags.DEFINE_boolean("use_tpu", False, "Whether to use TPU or GPU/CPU.")
flags.DEFINE_boolean("categorical_sampling", True, "Use categorical distribution on logits when sampling.")
| 1,617 | 33.425532 | 107 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/models/builders.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas and Chris Cummins.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file builds Keras models from BenchPress Model config protos."""
from deeplearning.benchpress.proto import model_pb2
from absl import flags
from deeplearning.benchpress.util import pbutil
from deeplearning.benchpress.models import lm_data_generator
FLAGS = flags.FLAGS
def AssertIsBuildable(config: model_pb2.Model) -> model_pb2.Model:
"""Assert that a model configuration is buildable.
Args:
config: A model proto.
Returns:
The input model proto, unmodified.
Raises:
UserError: If the model is not buildable.
InternalError: If the value of the training.optimizer field is not
understood.
"""
# Any change to the Model proto schema will require a change to this function.
try:
pbutil.AssertFieldIsSet(config, "corpus")
pbutil.AssertFieldIsSet(config, "architecture")
pbutil.AssertFieldIsSet(config, "training")
pbutil.AssertFieldIsSet(config.architecture, "backend")
if config.architecture.backend == model_pb2.NetworkArchitecture.KERAS_SEQ:
pbutil.AssertFieldIsSet(config.architecture, "neuron_type")
pbutil.AssertFieldConstraint(
config.architecture,
"embedding_size",
lambda x: 0 < x,
"NetworkArchitecture.embedding_size must be > 0",
)
elif config.architecture.backend == model_pb2.NetworkArchitecture.TENSORFLOW_SEQ:
pbutil.AssertFieldIsSet(config.architecture, "neuron_type")
pbutil.AssertFieldConstraint(
config.architecture,
"neurons_per_layer",
lambda x: 0 < x,
"NetworkArchitecture.neurons_per_layer must be > 0",
)
pbutil.AssertFieldConstraint(
config.architecture,
"num_layers",
lambda x: 0 < x,
"NetworkArchitecture.num_layers must be > 0",
)
pbutil.AssertFieldConstraint(
config.architecture,
"post_layer_dropout_micros",
lambda x: 0 <= x <= 1000000,
"NetworkArchitecture.post_layer_dropout_micros "
"must be >= 0 and <= 1000000",
)
pbutil.AssertFieldConstraint(
config.training,
"num_epochs",
lambda x: 0 < x,
"TrainingOptions.num_epochs must be > 0",
)
elif config.architecture.backend == model_pb2.NetworkArchitecture.TENSORFLOW_BERT\
or config.architecture.backend == model_pb2.NetworkArchitecture.TORCH_BERT\
or config.architecture.backend == model_pb2.NetworkArchitecture.INCODER_1B\
or config.architecture.backend == model_pb2.NetworkArchitecture.INCODER_6B:
# Data generator is needed when using bert.
pbutil.AssertFieldIsSet(config.training, "data_generator")
# Parse data_generator params.
_ = lm_data_generator.AssertConfigIsValid(config.training.data_generator)
if config.architecture.backend != model_pb2.NetworkArchitecture.INCODER_1B and config.architecture.backend != model_pb2.NetworkArchitecture.INCODER_6B:
## .architecture params
pbutil.AssertFieldIsSet(
config.architecture,
"hidden_size",
)
pbutil.AssertFieldIsSet(
config.architecture,
"num_hidden_layers",
)
pbutil.AssertFieldIsSet(
config.architecture,
"num_attention_heads",
)
pbutil.AssertFieldIsSet(
config.architecture,
"intermediate_size",
)
pbutil.AssertFieldConstraint(
config.architecture,
"hidden_size",
lambda x: x % config.architecture.num_attention_heads == 0,
"The hidden size is not a multiple of the number of attention "
"heads."
)
pbutil.AssertFieldIsSet(
config.architecture,
"hidden_act",
)
pbutil.AssertFieldIsSet(
config.architecture,
"hidden_dropout_prob",
)
pbutil.AssertFieldIsSet(
config.architecture,
"attention_probs_dropout_prob",
)
pbutil.AssertFieldIsSet(
config.architecture,
"type_vocab_size",
)
pbutil.AssertFieldIsSet(
config.architecture,
"initializer_range",
)
pbutil.AssertFieldIsSet(
config.architecture,
"layer_norm_eps",
)
## Optional feature encoder attributes
if config.architecture.HasField("feature_encoder") and config.architecture.feature_encoder == True:
pbutil.AssertFieldIsSet(
config.architecture,
"feature_sequence_length"
)
pbutil.AssertFieldIsSet(
config.architecture,
"feature_embedding_size"
)
pbutil.AssertFieldIsSet(
config.architecture,
"feature_dropout_prob"
)
pbutil.AssertFieldIsSet(
config.architecture,
"feature_singular_token_thr"
)
pbutil.AssertFieldIsSet(
config.architecture,
"feature_max_value_token"
)
pbutil.AssertFieldIsSet(
config.architecture,
"feature_token_range"
)
pbutil.AssertFieldIsSet(
config.architecture,
"feature_num_attention_heads"
)
pbutil.AssertFieldIsSet(
config.architecture,
"feature_transformer_feedforward"
)
pbutil.AssertFieldIsSet(
config.architecture,
"feature_layer_norm_eps"
)
pbutil.AssertFieldIsSet(
config.architecture,
"feature_num_hidden_layers"
)
## .training params
pbutil.AssertFieldIsSet(
config.training,
"max_predictions_per_seq",
)
pbutil.AssertFieldIsSet(
config.training,
"num_train_steps",
)
pbutil.AssertFieldIsSet(
config.training,
"num_warmup_steps",
)
if config.HasField("pre_train_corpus"):
pbutil.AssertFieldIsSet(
config.training,
"num_pretrain_steps",
)
pbutil.AssertFieldIsSet(
config.training,
"num_prewarmup_steps",
)
pbutil.AssertFieldIsSet(
config.training,
"dupe_factor",
)
pbutil.AssertFieldIsSet(
config.training,
"masked_lm_prob",
)
pbutil.AssertFieldConstraint(
config.training,
"random_seed",
lambda x: 0 <= x,
"TrainingOptions.random_seed must be >= 0",
)
pbutil.AssertFieldConstraint(
config.training,
"sequence_length",
lambda x: 1 <= x,
"TrainingOptions.sequence_length must be >= 1",
)
pbutil.AssertFieldIsSet(
config.training, "shuffle_corpus_contentfiles_between_epochs"
)
pbutil.AssertFieldConstraint(
config.training,
"batch_size",
lambda x: 0 < x,
"TrainingOptions.batch_size must be > 0",
)
pbutil.AssertFieldIsSet(config.training, "optimizer")
if config.training.HasField("adam_optimizer"):
pbutil.AssertFieldConstraint(
config.training.adam_optimizer,
"initial_learning_rate_micros",
lambda x: 0 <= x,
"AdamOptimizer.initial_learning_rate_micros must be >= 0",
)
if config.architecture.backend == model_pb2.NetworkArchitecture.KERAS_SEQ or \
config.architecture.backend == model_pb2.NetworkArchitecture.TENSORFLOW_SEQ:
pbutil.AssertFieldConstraint(
config.training.adam_optimizer,
"learning_rate_decay_per_epoch_micros",
lambda x: 0 <= x,
"AdamOptimizer.learning_rate_decay_per_epoch_micros must be >= 0",
)
pbutil.AssertFieldConstraint(
config.training.adam_optimizer,
"beta_1_micros",
lambda x: 0 <= x <= 1000000,
"AdamOptimizer.beta_1_micros must be >= 0 and <= 1000000",
)
pbutil.AssertFieldConstraint(
config.training.adam_optimizer,
"beta_2_micros",
lambda x: 0 <= x <= 1000000,
"AdamOptimizer.beta_2_micros must be >= 0 and <= 1000000",
)
pbutil.AssertFieldConstraint(
config.training.adam_optimizer,
"normalized_gradient_clip_micros",
lambda x: 0 <= x,
"AdamOptimizer.normalized_gradient_clip_micros must be >= 0",
)
elif config.training.HasField("rmsprop_optimizer"):
pbutil.AssertFieldConstraint(
config.training.rmsprop_optimizer,
"initial_learning_rate_micros",
lambda x: 0 <= x,
"RmsPropOptimizer.initial_learning_rate_micros must be >= 0",
)
pbutil.AssertFieldConstraint(
config.training.rmsprop_optimizer,
"learning_rate_decay_per_epoch_micros",
lambda x: 0 <= x,
"RmsPropOptimizer.learning_rate_decay_per_epoch_micros must be >= 0",
)
else:
raise SystemError(
"Unrecognized value: 'TrainingOptions.optimizer'"
)
except Exception as e:
raise e
return config
def BuildOptimizer(config: model_pb2.Model) -> "keras.optimizers.Optimizer":
"""Construct the training optimizer from config.
Args:
config: A Model config proto.
Raises:
InternalError: If the value of the optimizer field is not understood.
"""
# Deferred importing of Keras so that we don't have to activate the
# TensorFlow backend every time we import this module.
import keras
# We do not use *any* default values for arguments, in case for whatever
# reason the Keras API changes a default arg.
if config.training.HasField("adam_optimizer"):
opts = {}
opt = config.training.adam_optimizer
if opt.normalized_gradient_clip_micros:
opts["clipnorm"] = opt.normalized_gradient_clip_micros / 1e6
return keras.optimizers.Adam(
lr=opt.initial_learning_rate_micros / 1e6,
beta_1=opt.beta_1_micros / 1e6,
beta_2=opt.beta_2_micros / 1e6,
epsilon=None,
decay=opt.learning_rate_decay_per_epoch_micros / 1e6,
amsgrad=False,
**opts,
)
elif config.training.HasField("rmsprop_optimizer"):
opt = config.training.rmsprop_optimizer
return keras.optimizers.RMSprop(
lr=opt.initial_learning_rate_micros / 1e6,
decay=opt.initial_learning_rate_micros / 1e6,
rho=0.9,
epsilon=None,
)
else:
raise SystemError(
"Unrecognized value: 'TrainingOptions.optimizer'"
)
def BuildKerasModel(
config: model_pb2.Model, vocabulary_size: int
) -> "keras.models.Sequential":
"""Build a Keras model from a Model proto.
Args:
config: A Model proto instance.
vocabulary_size: The number of tokens in the vocabulary.
Returns:
A Sequential model instance.
"""
# Deferred importing of Keras so that we don't have to activate the
# TensorFlow backend every time we import this module.
import keras
dropout = (config.architecture.post_layer_dropout_micros or 0) / 1e6
model = keras.models.Sequential()
layer = {
model_pb2.NetworkArchitecture.LSTM: keras.layers.LSTM,
model_pb2.NetworkArchitecture.RNN: keras.layers.RNN,
model_pb2.NetworkArchitecture.GRU: keras.layers.GRU,
}[config.architecture.neuron_type]
# The input layer.
model.add(
keras.layers.Embedding(
vocabulary_size,
config.architecture.embedding_size,
batch_input_shape=(
config.training.batch_size,
config.training.sequence_length,
),
)
)
model.add(keras.layers.Dropout(dropout))
# The recurrent network layers.
for _ in range(config.architecture.num_layers):
model.add(
layer(
config.architecture.neurons_per_layer,
return_sequences=True,
stateful=True,
)
)
model.add(keras.layers.Dropout(dropout))
# The output layer.
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(vocabulary_size, activation="softmax")
)
)
return model
| 12,534 | 31.72846 | 157 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/models/lm_data_generator.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas and Chris Cummins.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data generator specifically used for Mask LM models (namely BERT)."""
import sys
import json
import time
import tqdm
import random
import progressbar
import copy
import glob
import humanize
import typing
import multiprocessing
import functools
import pathlib
import pickle
import numpy as np
from deeplearning.benchpress.util import cache
from deeplearning.benchpress.util import pbutil
from deeplearning.benchpress.util import distributions
from deeplearning.benchpress.util import monitors
from deeplearning.benchpress.util import environment
from deeplearning.benchpress.util import distrib
from deeplearning.benchpress.corpuses import tokenizers
from deeplearning.benchpress.features import extractor
from deeplearning.benchpress.proto import model_pb2
from deeplearning.benchpress.models import sequence_masking
from deeplearning.benchpress.models import lm_database
from absl import flags
from deeplearning.benchpress.util import logging as l
FLAGS = flags.FLAGS
flags.DEFINE_boolean(
"write_text_dataset",
False,
"Set True for MaskLM data generator to write dataset in text format, along with the dataset record."
)
flags.DEFINE_integer(
"memory_limit",
4,
"Set maximum amount of available memory used for masking sequences in Gb. [Default]: 4",
)
flags.DEFINE_boolean(
"force_remake_dataset",
False,
"Force data generator to re-mask encoded dataset and store dataset record."
)
flags.DEFINE_boolean(
"store_datasets_to_DB",
False,
"Set True to store masked datasets to SQL Database for observation."
)
def AssertConfigIsValid(config: model_pb2.DataGenerator) -> model_pb2.DataGenerator:
"""
Parse data generator protobuf message.
Raise Exception if format is wrong.
"""
pbutil.AssertFieldConstraint(
config,
"datapoint_type",
lambda x: x == "kernel" or x == "statement",
"Valid options for datapoint_type are 'kernel' and 'statement'",
)
pbutil.AssertFieldConstraint(
config,
"datapoint_time",
lambda x: x == "online" or x == "pre",
"Valid options for datapoint_time are 'online' and 'pre'",
)
pbutil.AssertFieldIsSet(
config,
"use_start_end",
)
pbutil.AssertFieldIsSet(
config,
"steps_per_epoch",
)
pbutil.AssertFieldConstraint(
config,
"validation_split",
lambda x : 0 <= x <= 100,
"Validation split is expressed in [0-100]%."
)
if config.datapoint_type == "kernel":
pbutil.AssertFieldIsSet(
config,
"truncate_large_kernels",
)
if len(config.validation_set) > 0:
for val_opt in config.validation_set:
if val_opt.HasField("mask"):
pbutil.AssertFieldIsSet(
val_opt.mask,
"random_placed_mask",
)
elif val_opt.HasField("hole"):
if val_opt.HasField("absolute_length"):
pbutil.AssertFieldConstraint(
val_opt.hole,
"absolute_length",
lambda x : x > 0,
"absolute length is the upper bound range of a hole's length. Therefore should be > 0."
)
else:
pbutil.AssertFieldConstraint(
val_opt.hole,
"relative_length",
lambda x : 0.0 < x <= 1.0,
"relative length must be between 0 and 100% of a kernel's actual length."
)
if val_opt.hole.HasField("normal_distribution"):
pbutil.AssertFieldIsSet(
val_opt.hole.normal_distribution,
"mean",
)
pbutil.AssertFieldIsSet(
val_opt.hole.normal_distribution,
"variance",
)
elif not val_opt.hole.HasField("uniform_distribution"):
raise ValueError("Hole length distribution has not been set.")
elif val_opt.HasField("mask_seq"):
if val_opt.HasField("absolute_length"):
pbutil.AssertFieldConstraint(
val_opt.mask_seq,
"absolute_length",
lambda x : x > 0,
"absolute length is the upper bound range of a mask_seq's length. Therefore should be > 0."
)
else:
pbutil.AssertFieldConstraint(
val_opt.mask_seq,
"relative_length",
lambda x : 0.0 < x <= 1.0,
"relative length must be between 0 and 100% of a kernel's actual length."
)
if val_opt.mask_seq.HasField("normal_distribution"):
pbutil.AssertFieldIsSet(
val_opt.mask_seq.normal_distribution,
"mean",
)
pbutil.AssertFieldIsSet(
val_opt.mask_seq.normal_distribution,
"variance",
)
elif not val_opt.mask_seq.HasField("uniform_distribution"):
raise ValueError("Hole length distribution has not been set.")
# Parse masking technique for bert's data generator
pbutil.AssertFieldIsSet(config, "mask_technique")
if config.HasField("mask"):
pbutil.AssertFieldIsSet(
config.mask,
"random_placed_mask",
)
elif config.HasField("hole"):
if config.hole.HasField("absolute_length"):
pbutil.AssertFieldConstraint(
config.hole,
"absolute_length",
lambda x : x > 0,
"absolute length is the upper bound range of a hole's length. Therefore should be > 0."
)
else:
pbutil.AssertFieldConstraint(
config.hole,
"relative_length",
lambda x : 0.0 < x <= 1.0,
"relative length must be between 0 and 100% of a kernel's actual length."
)
if config.hole.HasField("normal_distribution"):
pbutil.AssertFieldIsSet(
config.hole.normal_distribution,
"mean",
)
pbutil.AssertFieldIsSet(
config.hole.normal_distribution,
"variance",
)
elif not config.hole.HasField("uniform_distribution"):
raise ValueError("Hole length distribution has not been set.")
pbutil.AssertFieldIsSet(
config.hole,
"stage_training",
)
elif config.HasField("mask_seq"):
if config.mask_seq.HasField("absolute_length"):
pbutil.AssertFieldConstraint(
config.mask_seq,
"absolute_length",
lambda x : x > 0,
"absolute length is the upper bound range of a mask_seq's length. Therefore should be > 0."
)
else:
pbutil.AssertFieldConstraint(
config.mask_seq,
"relative_length",
lambda x : 0.0 < x <= 1.0,
"relative length must be between 0 and 100% of a kernel's actual length."
)
if config.mask_seq.HasField("normal_distribution"):
pbutil.AssertFieldIsSet(
config.mask_seq.normal_distribution,
"mean",
)
pbutil.AssertFieldIsSet(
config.mask_seq.normal_distribution,
"variance",
)
elif not config.mask_seq.HasField("uniform_distribution"):
raise ValueError("Hole length distribution has not been set.")
pbutil.AssertFieldIsSet(
config.mask_seq,
"stage_training",
)
return config
def _addStartEndPadToken(inp: typing.Union[list, tuple], tokenizer, trunc: int = None, seq_len: int = None) -> typing.Tuple[int, np.array]:
"""
Inserts [START] and [END] token at the beginnning and end of a sequence
Arguments:
inp: input_sequence
Returns:
[START] + input_sequence + [END]
"""
tokenizer = pickle.loads(tokenizer)
features = None
if isinstance(inp, tuple):
inp, features = inp
try:
if trunc is not None:
inp = inp[:trunc]
assert len(inp) != 0, "Empty list provided."
assert tokenizer.padToken not in inp, "Use this function before padding a sequence!"
start = [tokenizer.startToken] if inp[0] != tokenizer.startToken else []
end = [tokenizer.endToken ] if inp[-1] != tokenizer.endToken else []
if isinstance(inp, np.ndarray):
inp = list(inp)
ret = start + inp + end
rlen = len(ret)
if seq_len is not None:
ret += [tokenizer.padToken] * (seq_len - len(ret))
if features:
return rlen, np.array(ret), features
else:
return rlen, np.array(ret)
except AssertionError:
return None
class MaskLMDataGenerator(object):
"""Abstract class, shared among TORCH and TF BERT data generators."""
@property
def is_torch(self):
if self.file_extension == "pt_record":
return True
return False
def __init__(self, file_extension: str):
self.file_extension = file_extension
self.mask_func = sequence_masking.MPMaskSequence
self.hole_func = sequence_masking.MPHoleSequence
self.mask_seq_func = sequence_masking.HoleSequenceSeqMasks
self.dataset = None
self.corpus = None
self.tokenizer = None
self.config = None
self.cache = None
self.training_opts = None
self.pre_train = None
self.num_train_steps = None
self.steps_per_epoch = None
self.sample_batch_size = None
self.max_position_embeddings = None
self.num_epochs = None
self.feature_encoder = None
self.feature_tokenizer = None
self.feature_sequence_length = None
self.sampler = None
self.rngen = None
return
def TrainMaskLMBatchGenerator(self,
corpus : "corpuses.Corpus",
training_opts : model_pb2.TrainingOptions,
cache_path : pathlib.Path,
num_train_steps : int = None,
pre_train : bool = False,
feature_encoder : bool = False,
feature_tokenizer : tokenizers.FeatureTokenizer = None,
feature_sequence_length : int = None,
) -> "data_generator.MaskLMDataGenerator":
"""Initializes data generator for training."""
self.cache = cache.mkcache(cache_path, "dataset")
self.cache.path.mkdir(exist_ok = True, parents = True)
self.dataset = {}
self.corpus = corpus
self.tokenizer = corpus.tokenizer
self.config = training_opts.data_generator
self.training_opts = training_opts
self.rngen = np.random # random.Random(training_opts.random_seed)
self.pre_train = pre_train
self.feature_encoder = feature_encoder
self.feature_tokenizer = feature_tokenizer
self.feature_sequence_length = feature_sequence_length
if num_train_steps:
self.num_train_steps = num_train_steps
else:
self.num_train_steps = self.training_opts.num_train_steps
shaped_corpus = self.createCorpus(self.cache.path)
distrib.barrier()
if self.config.datapoint_time == "pre":
if self.feature_encoder:
raise NotImplementedError("Pre masking corpus does not work with feature encoding model.")
# 'pre' pre-processes/masks training/validation/sampling corpus for the model to use.
# 'online' stores the raw data and masks them on the fly.
self.configDataset(shaped_corpus)
return self
def SampleMaskLMBatchGenerator(self,
model_opts : model_pb2.TrainingOptions,
sampler : "samplers.Sampler",
tokenizer : tokenizers.TokenizerBase,
seed : int,
sample_batch_size : int,
max_position_embeddings : int,
cache_path : pathlib.Path,
feature_encoder : bool = False,
feature_tokenizer : tokenizers.FeatureTokenizer = None,
feature_sequence_length : int = None,
) -> "data_generator.MaskLMBatchGenerator":
"""Initializes data generator for inference."""
self.cache = cache.mkcache(cache_path, "dataset")
self.cache.path.mkdir(exist_ok = True, parents = True)
self.dataset = {}
self.sampler = sampler
self.corpus = sampler.sample_corpus
self.tokenizer = tokenizer
self.config = model_opts.data_generator
self.rngen = np.random
self.seed = seed
self.sample_batch_size = sample_batch_size
if sampler.is_active and FLAGS.sample_workload_size < (self.sample_batch_size * environment.WORLD_SIZE):
throttled_batch_size = int(FLAGS.sample_workload_size // environment.WORLD_SIZE)
l.logger().warn("Too many GPU devices for workload size. Throttling batch size from {} to {}".format(
self.sample_batch_size,
throttled_batch_size,
)
)
self.sample_batch_size = throttled_batch_size
self.max_position_embeddings = max_position_embeddings
self.feature_encoder = feature_encoder
self.feature_tokenizer = feature_tokenizer
self.feature_sequence_length = feature_sequence_length
self.training_opts = model_opts
self.training_opts.sequence_length = sampler.sequence_length
self.training_opts.batch_size = sampler.batch_size
return self
def configDataset(self,
shaped_corpus: np.array,
) -> None:
"""
Configs all necessary training and validation sets described in the model protobuf.
First constructs training set and optionally splits it into validation set, if selected in config.
Then configValidationSets is called which constructs any additional validation_set elements
provided in the model's config.
"""
if FLAGS.force_remake_dataset:
l.logger().warn("Force remaking datasets can cause lots of problems on an already trained model. Are you sure you want to proceed ? [y/n]")
a = input()
if a.lower() != "yes" and a.lower() != "y":
l.logger().warn("Overwriting dataset process was aborted. Good call.")
exit()
if len(glob.glob(str(self.cache.path / "train_dataset_*.{}".format(self.file_extension)))) == 0 or FLAGS.force_remake_dataset:
if self.config.validation_split == 0:
self._maskCorpus(
shaped_corpus, set_name = "train_dataset", path = self.cache.path, train_set = True
)
else:
split_index = (len(shaped_corpus) // 100) * self.config.validation_split
self._maskCorpus(
shaped_corpus[split_index:], set_name = "train_dataset", path = self.cache.path, train_set = True
)
self._maskCorpus(
shaped_corpus[:split_index], set_name = "validation_dataset", path = self.cache.path, train_set = False
)
else:
self.dataset["train_dataset"] = {
"file": sorted(glob.glob(str(self.cache.path / "train_dataset_*.{}".format(self.file_extension)))),
"txt" : sorted(glob.glob(str(self.cache.path / "train_dataset_*.txt"))),
}
if len(glob.glob(str(self.cache.path / "validation_dataset_*.{}".format(self.file_extension)))) != 0:
self.dataset["validation_dataset"] = {
"file": sorted(glob.glob(str(self.cache.path / "validation_dataset_*.{}".format(self.file_extension)))),
"txt" : sorted(glob.glob(str(self.cache.path / "validation_dataset_*.txt"))),
}
self.configValidationSets(self.config.validation_set, shaped_corpus, self.cache.path)
return
def configValidationSets(self,
valset_list: typing.List,
shaped_corpus: np.array,
path: pathlib.Path,
) -> None:
"""
Mask and store any extra validation datasets defined into
model protobuf.
Example:
validation_set {
max_predictions_per_seq: 10
hole {
hole_length: 15
uniform_distribution: true
}
}
Arguments:
valset_list: list of validation_set items
Returns:
None
"""
for valset in valset_list:
set_name = "pred_{}_{}".format(
valset.max_predictions_per_seq,
"mask" if valset.HasField("mask") else "hole_{}".format(valset.hole.absolute_length
if valset.hole.HasField("absolute_length")
else valset.hole.relative_length
)
)
if set_name in self.dataset or len(glob.glob(str(path / "{}_*.{}".format(set_name, self.file_extension)))) > 0:
continue
self._maskCorpus(
shaped_corpus, train_set = False, set_name = set_name, path = path, config = valset
)
return
def configSampleSets(self) -> typing.List[pathlib.Path]:
"""
Parses the types of datasets asked from sampler.
These can be training, validation or a custom sample set
(defined by type of target and hole/mask specs).
If the set does not exist, it is constructed.
Returns:
A list of paths for the requested datasets.
Raises:
FileNotFoundError:
In case sampler asks for validation set,
but this had not been constructed during training.
"""
if self.sampler.config.HasField("train_set"):
path = self.cache.path
sampledDataset = "train_dataset"
elif self.sampler.config.HasField("validation_set"):
path = self.cache.path
sampledDataset = "validation_dataset"
elif self.sampler.config.HasField("sample_set"):
path = self.cache.path
sampledDataset = "pred_{}_{}".format(
self.sampler.config.sample_set.max_predictions_per_seq,
"mask" if self.sampler.config.sample_set.HasField("mask")
else "hole_{}".format(self.sampler.config.sample_set.hole.absolute_length
if self.sampler.config.sample_set.hole.HasField("absolute_length")
else self.sampler.config.sample_set.hole.relative_length)
)
elif self.sampler.config.HasField("sample_corpus"):
path = self.sampler.corpus_directory
sampledDataset = "pred_{}_{}".format(
self.sampler.config.sample_corpus.corpus_config.max_predictions_per_seq,
"mask" if self.sampler.config.sample_corpus.corpus_config.HasField("mask")
else "hole_{}".format(self.sampler.config.sample_corpus.corpus_config.hole.absolute_length
if self.sampler.config.sample_corpus.hole.HasField("absolute_length")
else self.sampler.config.sample_corpus.hole.relative_length)
)
else:
raise ValueError("Unknown dataset type")
return self.getDatasetPath(sampledDataset, path)
def getDatasetPath(self,
set_name: str,
path: pathlib.Path,
) -> typing.List[pathlib.Path]:
"""
Based on a set name, search cache path for its existence.
If not existing, get original pickled corpus, do the masking
and save dataset in pt/tf_record format.
Returns list of created datasets.
"""
path_search = lambda: sorted(
glob.glob(
str(path / "{}_*.{}".format(set_name, self.file_extension))
)
)
path_list = path_search()
if len(path_list) == 0:
if set_name == "validation_dataset":
raise FileNotFoundError("Corpus had not been split in train-val, therefore validation dataset is not found.")
elif set_name == "train_dataset":
raise FileNotFoundError("Trying to sample training dataset, but it doesn't exist!")
shaped_corpus = self.createCorpus(path)
distrib.barrier()
if self.sampler.config.HasField("sample_set"):
config_list = [self.sampler.config.sample_set]
elif self.sampler.config.HasField("sample_corpus"):
config_list = [self.sampler.config.sample_corpus.corpus_config]
else:
raise ValueError("sampler sets can either be sample_set or sample_corpus")
self.configValidationSets(config_list, shaped_corpus, path)
return path_search()
def createCorpus(self, path: pathlib.Path) -> np.array:
"""
Constructs training corpus in text format, stores it in
shaped_corpus
Each corpus datapoint is either a single kernel or a random
sequence of size sequence_length (legacy).
If corpus has been previously pickled and stored, then it is loaded.
"""
start_time = time.time()
# Set corpus dimension parameters
sequence_length = self.training_opts.sequence_length
effect_seq_length = sequence_length - (2 if self.config.use_start_end else 0)
batch_size = self.training_opts.batch_size
dupe_factor = self.training_opts.dupe_factor
shuffle = self.training_opts.shuffle_corpus_contentfiles_between_epochs
pad = [self.tokenizer.padToken ]
start = [self.tokenizer.startToken ]
end = [self.tokenizer.endToken ]
shaped_corpus = None
corpus_file = "{}corpus.pkl".format("pre_" if self.pre_train else "")
# Monitor counts actual length distribution of kernel instances.
kernel_length_monitor = monitors.FrequencyMonitor(path, "{}kernel_length".format("pre_" if self.pre_train else ""))
# Token frequency distribution monitor.
if not self.pre_train:
feature_monitors = {
ftype: monitors.CategoricalDistribMonitor(
path,
"{}{}_distribution".format("pre_" if self.pre_train else "", ftype)
)
for ftype in extractor.extractors.keys()
}
if (path / corpus_file).exists():
with open(path / corpus_file, 'rb') as infile:
shaped_corpus = pickle.load(infile)
if self.num_train_steps:
self.num_epochs = self.num_train_steps // self.config.steps_per_epoch
self.steps_per_epoch = self.config.steps_per_epoch
l.logger().info(
"Loaded from file corpus of {} examples in {} ms.".format(
humanize.intcomma(len(shaped_corpus)),
humanize.intcomma(int((time.time() - start_time) * 1000)),
)
)
return shaped_corpus
# generate a kernel corpus
if (path / "text_corpus.pkl").exists():
# Only sampler writes a text_corpus.pkl, to do online or active sampling.
# The reason is, corpus is saved in text format, to be picked up with the
# right tokenizer. And that is the model's tokenizer.
with open(path / "text_corpus.pkl", 'rb') as infile:
encoded_corpus = [self.tokenizer.TokenizeString(x) for x in pickle.load(infile)]
else:
if self.pre_train:
if self.num_train_steps:
self.num_epochs = self.num_train_steps // self.config.steps_per_epoch
self.steps_per_epoch = self.config.steps_per_epoch
if environment.WORLD_RANK == 0:
if len(glob.glob(str(path / "pre_corpus_*.pkl"))) > 0:
return []
encoded_corpus = []
cache_lengths = {}
chunk_size = 250000
i, ch_idx = 0, 0
bar = tqdm.tqdm(total = self.corpus.encoded.size, desc = "Chunk pre-train corpus")
pool = multiprocessing.Pool()
l.logger().info("Processing pre-train corpus chunks...")
l.logger().warn("The routine below does not provide the features.")
for dp in pool.imap_unordered(
functools.partial(
_addStartEndPadToken,
tokenizer = pickle.dumps(self.tokenizer),
trunc = effect_seq_length,
seq_len = sequence_length),
self.corpus.GetTrainingDataGenerator()):
if dp:
input_features = None
if self.feature_encoder:
rlen, enc_kernel, input_features = dp
else:
rlen, enc_kernel = dp
kernel_length_monitor.register(rlen)
if self.feature_encoder:
for fspace in extractor.extractors.KEYS():
if fspace in input_features:
encoded_features = self.feature_tokenizer.TokenizeFeatureVector(input_features[fspace], fspace = fspace, seq_len = self.feature_sequence_length)
encoded_corpus.append((enc_kernel, encoded_features))
else:
encoded_corpus.append(enc_kernel)
i += 1
if i % chunk_size == 0:
encoded_corpus = np.array(encoded_corpus)
corpus_file = "pre_corpus_{}.pkl".format(ch_idx)
cache_lengths[corpus_file] = len(encoded_corpus)
l.logger().info("Storing chunk {}, len: {}".format(ch_idx, encoded_corpus.shape))
with open(path / corpus_file, 'wb') as outf:
pickle.dump(encoded_corpus, outf, protocol = 4)
with open(path / "pre_lengths_cache.json", 'w') as outf:
json.dump(cache_lengths, outf)
ch_idx += 1
encoded_corpus = []
bar.update(1)
if encoded_corpus:
if self.feature_encoder:
encoded_corpus = np.array(encoded_corpus, dtype = object)
else:
encoded_corpus = np.array(encoded_corpus)
l.logger().info("Storing chunk {}, len: {}".format(ch_idx, encoded_corpus.shape))
corpus_file = "pre_corpus_{}.pkl".format(ch_idx)
cache_lengths[corpus_file] = len(encoded_corpus)
with open(path / corpus_file, 'wb') as outf:
pickle.dump(encoded_corpus, outf, protocol = 4)
with open(path / "pre_lengths_cache.json", 'w') as outf:
json.dump(cache_lengths, outf)
kernel_length_monitor.plot()
pool.close()
else:
return []
return encoded_corpus
else:
if environment.WORLD_RANK == 0:
if not self.feature_encoder:
encoded_corpus = self.corpus.GetTrainingData(sequence_length = effect_seq_length if not self.config.truncate_large_kernels else None)
else:
encoded_corpus = self.corpus.GetTrainingDataWFeatures(sequence_length = effect_seq_length if not self.config.truncate_large_kernels else None)
if self.config.datapoint_type == "kernel":
if environment.WORLD_RANK == 0:
# Reject larger than sequence length
initial_length = copy.deepcopy(len(encoded_corpus))
if not self.pre_train:
# Get features of fitting dataset within sequence length
for feature in self.corpus.GetTrainingFeatures(effect_seq_length):
for ftype, fvector in feature.items():
feature_monitors[ftype].register(fvector)
if self.feature_encoder:
training_features = [x for _, x in encoded_corpus]
encoded_corpus = [x for x, _ in encoded_corpus]
idx, t = set(), []
if self.config.truncate_large_kernels:
for i, x in enumerate(encoded_corpus):
if len(x[:effect_seq_length]) <= effect_seq_length:
t.append(list(x[:effect_seq_length]))
else:
idx.add(i)
else:
for i, x in enumerate(encoded_corpus):
if len(x) <= effect_seq_length:
t.append(list(x))
else:
idx.add(i)
encoded_corpus = t
if self.feature_encoder:
training_features = [x for i, x in enumerate(training_features) if i not in idx]
reduced_length = copy.deepcopy(len(encoded_corpus))
# Add start/end tokens
if self.config.use_start_end:
encoded_corpus = [self._addStartEndToken(kf) for kf in encoded_corpus]
# Register the actual lengths before padding.
kernel_length_monitor.register([len(x) for x in encoded_corpus])
# pad sequences to sequence length
encoded_corpus = np.array([x + pad * (sequence_length - len(x)) for x in encoded_corpus])
if self.feature_encoder:
expanded_corpus = []
encoded_features = []
for dp, fvec in zip(encoded_corpus, training_features):
for fspace in extractor.extractors.keys():
if fspace in fvec and fvec[fspace]:
expanded_corpus.append(dp)
encoded_features.append(self.feature_tokenizer.TokenizeFeatureVector(fvec[fspace], fspace, self.feature_sequence_length))
shaped_corpus = [[src, feats] for src, feats in zip(expanded_corpus, encoded_features)]
else:
shaped_corpus = encoded_corpus
# Shuffle
if shuffle:
self.rngen.shuffle(shaped_corpus)
assert len(shaped_corpus) != 0, "Not enought data. All kernels have been rejected."
# Set corpus epoch parameters
if self.num_train_steps:
self.num_epochs = self.num_train_steps // self.config.steps_per_epoch
self.steps_per_epoch = self.config.steps_per_epoch
l.logger().info("{} kernels were rejected (larger than sequence_length)".format(initial_length - reduced_length))
l.logger().info(
"Loaded corpus of shape {}x{} multiplied by dupe factor: {} in {} ms.".format(
len(shaped_corpus),
sequence_length,
dupe_factor,
humanize.intcomma(int((time.time() - start_time) * 1000)),
)
)
else:
# Set corpus epoch parameters
if self.num_train_steps:
self.num_epochs = self.num_train_steps // self.config.steps_per_epoch
self.steps_per_epoch = self.config.steps_per_epoch
elif self.config.datapoint_type == "statement":
## This branch is legacy data processing and does not support DDP.
if shuffle:
self.rngen.shuffle(encoded_corpus)
encoded_corpus = np.concatenate(encoded_corpus)
# encoded_corpus = np.tile(encoded_corpus, dupe_factor)
# Set corpus dimension parameters
self.steps_per_epoch = len(encoded_corpus) // (batch_size * sequence_length * dupe_factor)
assert self.steps_per_epoch != 0, "Not enought data. Use smaller sequence_length and/or batch_size"
if self.num_train_steps:
self.num_epochs = self.num_train_steps // self.steps_per_epoch
# clipped_corpus_length = dupe_factor * self.steps_per_epoch * batch_size * sequence_length
clipped_corpus_length = self.steps_per_epoch * batch_size * sequence_length
clipped_corpus = encoded_corpus[:clipped_corpus_length]
# shaped_corpus = np.split(clipped_corpus, batch_size * self.steps_per_epoch * dupe_factor, 0)
shaped_corpus = np.split(clipped_corpus, batch_size * self.steps_per_epoch, 0)
# Register the actual lengths before padding.
kernel_length_monitor.register([len(x) for x in shaped_corpus])
np_corpus = np.asarray(shaped_corpus)
assert np_corpus.ndim == 2, "Wrong dimensions for shaped_corpus: {}".format(np_corpus.shape)
assert np_corpus.shape[1] == sequence_length, "Second dimension is not equal to sequence length: {}".format(np_corpus.shape[1])
l.logger().info(
"Loaded corpus of {} tokens (clipped last {} tokens) in {} ms.".format(
humanize.intcomma(clipped_corpus_length),
humanize.intcomma(len(encoded_corpus) - clipped_corpus_length),
humanize.intcomma(int((time.time() - start_time) * 1000)),
)
)
else:
raise ValueError("Unrecognized datapoint_type: {}".format(self.config.datapoint_type))
if environment.WORLD_RANK == 0:
kernel_length_monitor.plot()
if not self.pre_train:
for fm in feature_monitors.values():
fm.plot()
with open(path / corpus_file, 'wb') as outf:
pickle.dump(shaped_corpus, outf)
return shaped_corpus
def _maskCorpus(self,
corpus: np.array,
train_set: bool,
set_name: str,
path: pathlib.Path,
config = None,
)-> None:
"""
Entrypoint function that inserts masks or holes to the corpus.
Arguments:
corpus: [num_datapoints, sequence_length],
where num_datapoints = num_batches * dupe_factor * batch_size
Returns:
The masked corpus
"""
# Set-up self.dataset entry
self.dataset[set_name] = {
'file': [],
'txt' : [],
}
# Set up max predictions
if config is None:
config = self.config
max_predictions = self.training_opts.max_predictions_per_seq
else:
max_predictions = config.max_predictions_per_seq
# Apply dupe factor in stages to avoid stressing RAM.
# Limit has been set to 4GB.
single_item_bytes = self.estimatedSize(
1, self.training_opts.sequence_length, self.training_opts.max_predictions_per_seq
)
corpus_bytes = single_item_bytes * len(corpus) + sys.getsizeof(corpus)
# max_dupe is how many times (dupes) the corpus can fit into a dataset record file.
max_dupe = min((FLAGS.memory_limit * (1024**3)) // corpus_bytes, self.training_opts.dupe_factor)
assert max_dupe != 0, "Increase RAM limit to fit corpus."
iterations = self.training_opts.dupe_factor // max_dupe
remaining = self.training_opts.dupe_factor % max_dupe
def apply_dupe_factor(arr: np.array, iters: int) -> np.array:
if iters == 0:
return np.asarray([], dtype = arr.dtype)
start_len = len(arr)
arr = np.expand_dims(arr, 0) # 2D->3D
arr = np.repeat(arr, iters, 0) # -> Repeat 2D blocks over 3D space
arr = arr.reshape(iters * start_len, -1) # Flatten repetitive 2D blocks, into 2D array
return arr
extended_corpus = apply_dupe_factor(corpus, iterations)
remaining_corpus = apply_dupe_factor(corpus, remaining)
l.logger().info("Estimated element size: {}. Dupe factor {} split into {} iterations of {} (plus {} remaining)".format(
humanize.naturalsize(single_item_bytes), self.training_opts.dupe_factor, iterations, max_dupe, remaining
)
)
pool = multiprocessing.Pool()
distribution = None
# Specify the desired masking routine
if config.HasField("hole"):
distribution = distributions.Distribution.FromHoleConfig(
config.hole, path, "hole_length_{}".format(set_name)
)
maskedSeq = lambda c: pool.imap_unordered(
functools.partial(self.hole_func,
train_set = train_set,
max_predictions = max_predictions,
pickled_distribution = pickle.dumps(distribution),
pickled_tokenizer = pickle.dumps(self.tokenizer),
training_opts = self.training_opts,
is_torch = self.is_torch,
),
c
)
elif config.HasField("mask_seq"):
distribution = distributions.Distribution.FromHoleConfig(
config.mask_seq, path, "mask_seq_length_{}".format(set_name)
)
maskedSeq = lambda c: pool.imap_unordered(
functools.partial(self.mask_seq_func,
train_set = train_set,
max_predictions = max_predictions,
pickled_distribution = pickle.dumps(distribution),
pickled_tokenizer = pickle.dumps(self.tokenizer),
training_opts = self.training_opts,
is_torch = self.is_torch,
),
c
)
elif config.HasField("mask"):
maskedSeq = lambda c: pool.imap_unordered(
functools.partial(self.mask_func,
train_set = train_set,
max_predictions = max_predictions,
config = config,
pickled_tokenizer = pickle.dumps(self.tokenizer),
training_opts = self.training_opts,
is_torch = self.is_torch,
),
c
)
else:
raise AttributeError("target predictions can only be mask or hole {}".format(self.config))
# Token frequency distribution monitor.
token_monitor = monitors.NormalizedFrequencyMonitor(path, "{}_token_distribution".format(set_name))
# Monitor counts target idxs of a hole as absolute index value.
abs_start_idx_monitor = monitors.FrequencyMonitor(path, "{}_abs_target_mask_idx".format(set_name))
# Monitors count of target indices (in percentile) that were hidden by a hole.
start_idx_monitor = monitors.FrequencyMonitor(path, "{}_target_mask_idx".format(set_name))
# Monitor counts all absolute indices hidden by a hole.
abs_idx_monitor = monitors.FrequencyMonitor(path, "{}_abs_target_mask_idx".format(set_name))
# Monitors count of indices (in percentile) that were hidden by a hole.
idx_monitor = monitors.FrequencyMonitor(path, "{}_mask_idx".format(set_name))
# Monitors if left or right direction was picked for a hole expansion.
direction_monitor = monitors.FrequencyMonitor(path, "{}_masking_direction".format(set_name))
if FLAGS.store_datasets_to_DB:
lm_db = lm_database.LMDatabase("sqlite:///{}".format(self.cache.path / "{}.db".format(set_name)))
## Core loop of masking.
masked_corpus = []
bar = tqdm.tqdm(total = len(corpus) * self.training_opts.dupe_factor, desc = "Masking datapoints")
kernel_idx = 0
try:
for iteration in range(iterations + 1):
masked_corpus = []
# Select between normal iterations or dupe factor residual and shuffle
if iteration != iterations:
multiproc_corpus = maskedSeq(extended_corpus)
if self.training_opts.shuffle_corpus_contentfiles_between_epochs:
self.rngen.shuffle(extended_corpus)
elif remaining != 0:
multiproc_corpus = maskedSeq(remaining_corpus)
if self.training_opts.shuffle_corpus_contentfiles_between_epochs:
self.rngen.shuffle(remaining_corpus)
else:
continue
# Do parallel masking over corpus
for kernel, masked_idxs in multiproc_corpus:
if distribution:
distribution.register([mid.hole_length for mid in masked_idxs])
try:
if self.is_torch:
actual_length = np.where(kernel['original_input'] == self.tokenizer.padToken)[0][0]
else:
actual_length = np.where(kernel.original_input == self.tokenizer.padToken)[0][0]
except IndexError:
actual_length = len(kernel['original_input'])
token_monitor.register([
self.tokenizer.decoder[int(x)]
for x in kernel['input_ids'] if x != self.tokenizer.padToken]
)
for hole in masked_idxs:
hole_idx = hole.pos_index
selected_idx = hole.pos_index
if hole.extend_left:
selected_idx += hole.hole_length - 1 if hole.hole_length != 0 else 0
abs_start_idx_monitor.register(selected_idx)
start_idx_monitor.register(int(2 * round(100.0 * (selected_idx / actual_length) / 2.0)))
abs_idx_monitor.register([hole_idx + i for i in range(hole.hole_length)])
idx_monitor.register([int(2 * round(100.0 * ((hole_idx + i) / actual_length) / 2.0)) for i in range(hole.hole_length)])
direction_monitor.register(1 if hole.extend_left else 0)
masked_corpus.append(kernel)
bar.update(1)
kernel_idx += 1
if kernel_idx == 1:
self.LogBatchTelemetry(
self.training_opts.batch_size, self.training_opts.sequence_length,
max_predictions, self.steps_per_epoch, self.num_epochs
)
if FLAGS.store_datasets_to_DB:
with lm_db.Session(commit = True) as s:
count = lm_db.count
for idx, kernel in enumerate(masked_corpus):
s.add(
lm_database.LMInstance(**lm_database.LMInstance.FromArgs(
id = count + idx,
original_input = self.tokenizer.tokensToString(kernel['original_input'], ignore_token = self.tokenizer.padToken),
input_ids = self.tokenizer.tokensToString(kernel['input_ids'], ignore_token = self.tokenizer.padToken),
masked_lm_lengths = kernel['masked_lm_lengths'],
masked_lm_predictions = [self.tokenizer.tokensToString([x]) for x in kernel['mask_labels'] if x != -100],
))
)
# write masked_corpus before flushing the list
self.dataset[set_name]['file'].append(
path / "{}_{}.{}".format(set_name, iteration, self.file_extension)
)
self.dataset[set_name]['txt'].append(
path / "{}_{}.txt".format(set_name, iteration)
)
self._saveCorpusRecord({
'corpus': masked_corpus,
'file' : path / "{}_{}.{}".format(set_name, iteration, self.file_extension),
'txt' : path / "{}_{}.txt".format(set_name, iteration)
})
pool.close()
except KeyboardInterrupt as e:
pool.terminate()
raise e
except Exception as e:
pool.terminate()
raise e
if distribution:
distribution.plot()
token_monitor.plot()
start_idx_monitor.plot()
idx_monitor.plot()
direction_monitor.plot()
return
def estimatedSize(self, batch_size, sequence_length, max_predictions_per_seq):
"""
Calculate estimated size of single training example as a dictionary.
"""
return (
2 * np.zeros([batch_size, 1], dtype = np.int64).nbytes +
5 * np.zeros([batch_size, sequence_length], dtype = np.int64).nbytes +
2 * np.zeros([batch_size, max_predictions_per_seq], dtype = np.int64).nbytes
)
def LogBatchTelemetry(self,
batch_size: int,
sequence_length: int,
max_predictions_per_seq: int,
steps_per_epoch: int,
num_epochs: int,
) -> None:
"""Log analytics about the batch."""
if steps_per_epoch is not None and num_epochs is not None:
l.logger().info(
"Memory: {} per batch, {} per epoch, {} total.".format(
humanize.naturalsize(self.estimatedSize(1, sequence_length, max_predictions_per_seq) * batch_size, binary = True),
humanize.naturalsize(self.estimatedSize(1, sequence_length, max_predictions_per_seq) * batch_size * steps_per_epoch, binary = True),
humanize.naturalsize(self.estimatedSize(1, sequence_length, max_predictions_per_seq) * batch_size * steps_per_epoch * num_epochs, binary = True),
)
)
else:
l.logger().info(
"Memory: {} per batch.".format(
humanize.naturalsize(self.estimatedSize(1, sequence_length, max_predictions_per_seq) * batch_size, binary = True),
)
)
def _padToMaxPosition(self, input_sample):
"""
Pads a given sequence to the maximum allowed sequence length, which is max_position_embeddings
Arguments:
input_sample: np.array or list that represents a sequence
Returns:
padded sequence in np.array format
"""
return np.concatenate([input_sample,
np.array([self.tokenizer.padToken] *
(self.max_position_embeddings - len(input_sample)), dtype = np.int64)
])
def _addStartEndToken(self, inp: list) -> list:
"""
Inserts [START] and [END] token at the beginnning and end of a sequence
Arguments:
inp: input_sequence
Returns:
[START] + input_sequence + [END]
"""
assert len(inp) != 0, "Empty list provided."
assert self.tokenizer.padToken not in inp, "Use this function before padding a sequence!"
start = [self.tokenizer.startToken] if inp[0] != self.tokenizer.startToken else []
end = [self.tokenizer.endToken ] if inp[-1] != self.tokenizer.endToken else []
if isinstance(inp, np.ndarray):
inp = list(inp)
return start + inp + end
def GetShortSummary(self) -> str:
return (
"Data Generator: "
"\n"
f" dupe_factor: {self.training_opts.dupe_factor}"
"\n"
f" sequence_length: {self.training_opts.sequence_length}"
"\n"
f" batch_size: {self.training_opts.batch_size}"
"\n"
"LM config:"
"\n"
f" {self.config.hole if True else self.config.mask}"
"\n"
)
| 46,191 | 40.689531 | 164 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/models/from_pretrained.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas and Chris Cummins.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Instance of pre-trained BenchPress Instances.
In this mode, a checkpoint is fetched online and the model is only used
for interactive sampling.
"""
import os
import typing
import gdown
import shutil
import threading
import pathlib
from deeplearning.benchpress.corpuses import tokenizers
from deeplearning.benchpress.samplers import sample_observers
from deeplearning.benchpress.samplers import samplers
from deeplearning.benchpress.samplers import samples_database
from deeplearning.benchpress.models import language_models
from deeplearning.benchpress.preprocessors import opencl
from deeplearning.benchpress.proto import model_pb2
from deeplearning.benchpress.proto import sampler_pb2
from deeplearning.benchpress.proto import benchpress_pb2
from deeplearning.benchpress.util import environment
from deeplearning.benchpress.util import distrib
from deeplearning.benchpress.util import pbutil
from deeplearning.benchpress.util import pytorch
from deeplearning.benchpress.util import logging as l
from absl import app, flags
FLAGS = flags.FLAGS
PRETRAINED_MODELS = {
"base_opencl": {
'config' : "1Cr9I4b5mSZJgX9LqtC_38WRfEDkyJ9WO",
'tokenizer' : "14ZPYFgL-XT_Fknwmgp6nOatvLFS67QM1",
'checkpoint' : "1ncwxzR23_a6IQqt4F4gIgTeduggD_N9w",
}
}
class PreTrainedModel(object):
"""
Pre-trained instance wrapper for online checkpoint fetching
and sampling.
"""
@classmethod
def FromID(cls, name: str = "base_opencl") -> "PreTrainedModel":
if name not in PRETRAINED_MODELS:
raise ValueError("Pre-trained model {} does not exist. Available models: {}".format(name, ', '.join([x for x in PRETRAINED_MODELS.keys()])))
tdir = "/tmp/"
if FLAGS.local_filesystem:
tdir = FLAGS.local_filesystem
config_path = pathlib.Path(tdir) / "from_pretrained" / name/ "config.pbtxt"
tokenizer_path = pathlib.Path(tdir) / "from_pretrained" / name/ "tokenizer.pkl"
checkpoint_path = pathlib.Path(tdir) / "from_pretrained" / name/ "model-0.pt"
if environment.WORLD_RANK == 0:
config_path.parent.mkdir(exist_ok = True, parents = True)
if not config_path.exists():
gdown.download("https://drive.google.com/uc?id={}".format(PRETRAINED_MODELS[name]['config']), str(config_path))
if not tokenizer_path.exists():
gdown.download("https://drive.google.com/uc?id={}".format(PRETRAINED_MODELS[name]['tokenizer']), str(tokenizer_path))
if not checkpoint_path.exists():
gdown.download("https://drive.google.com/uc?id={}".format(PRETRAINED_MODELS[name]['checkpoint']), str(checkpoint_path))
model_config = pbutil.FromFile(config_path, benchpress_pb2.Instance()).language_model
os.environ["PWD"] = str(config_path.parent)
FLAGS.override_preprocessing = True
FLAGS.override_encoding = True
return PreTrainedModel(model_config, tokenizer_path, checkpoint_path)
@property
def tokenizer(self) -> tokenizers.TokenizerBase:
return self.language_model.tokenizer
def __init__(self,
config : model_pb2.Model,
tokenizer_path : tokenizers.TokenizerBase,
checkpoint : pathlib.Path,
):
"""
Instantiate a model.
Args:
config: A Model message.
Raises:
TypeError: If the config argument is not a Model proto.
UserError: In case on an invalid config.
"""
self.language_model = language_models.Model(config)
if environment.WORLD_RANK == 0:
if not self.language_model.corpus.tokenizer_path.exists():
shutil.copyfile(tokenizer_path, self.language_model.corpus.tokenizer_path)
if not (self.language_model.cache.path / "checkpoints" / "backup_tokenizer.pkl").exists():
shutil.copyfile(tokenizer_path, self.language_model.cache.path / "checkpoints" / "backup_tokenizer.pkl")
if not (self.language_model.cache.path / "checkpoints" / "model-0.pt").exists():
shutil.copyfile(checkpoint, self.language_model.cache.path / "checkpoints" / "model-0.pt")
if not (self.language_model.cache.path / "checkpoints" / "checkpoint.meta").exists():
with open(self.language_model.cache.path / "checkpoints" / "checkpoint.meta", 'w') as outf:
outf.write("train_step: 0")
if environment.WORLD_SIZE > 1:
distrib.barrier()
if pytorch.num_gpus == 0:
l.logger().warn("No GPUs detected. This process is going to be *very* slow on the CPU.")
return
def Sample(self,
prompt: str,
batch_size: int = 1,
temperature: float = 0.6,
sample_workload_size: int = 1,
sample_indices_limit: int = None,
print_samples: bool = True,
seed: int = None,
) -> typing.Tuple[str, samples_database.Sample]:
"""
Get a string input, tokenize and sample the backend online for a full code.
Args:
prompt:
String input to the language model.
batch_size:
Batch size for model inference.
temperature:
Sampling temperature
sample_workload_size:
How many batches to generate.
sample_indices_limit:
Add a limit to how many tokens BenchPress will generate for a hole.
By default BenchPress generates tokens until it thinks a sequence is complete
([ENDHOLE] is generated). By setting this value, generation loop will be killed
after surpassing this threshold.
"""
FLAGS.sample_workload_size = sample_workload_size
if sample_indices_limit is not None:
FLAGS.sample_indices_limit = sample_indices_limit
self.language_model.Create()
if "[START]" in prompt or "[END]" in prompt:
l.logger().error("Do not add [START] and [END] manually. They will be added automatically by the tokenizer.")
return ""
prompt = "[START]" + prompt + "[END]"
test_sampler = self.getTestSampler(prompt, batch_size, temperature, self.language_model.config.architecture.max_position_embeddings)
obs = [sample_observers.InMemorySampleSaver()]
if print_samples:
obs.append(sample_observers.PrintSampleObserver())
self.language_model.Sample(test_sampler, obs, num_batches = 1, seed = seed)
return [opencl.ClangFormat(x.text) for x in obs[0].samples], obs[0].samples
def getTestSampler(self,
prompt : str,
batch_size : int,
temperature : float,
sequence_length : int
) -> samplers.Sampler:
sampler_str = [
"start_text: \"{}\"".format(prompt),
"batch_size: {}".format(batch_size),
"sequence_length: {}".format(sequence_length),
"temperature_micros: {}".format(int(temperature * 10e6)),
]
mock_config = pbutil.FromString('\n'.join(sampler_str), sampler_pb2.Sampler())
sampler = samplers.Sampler(mock_config, sample_db_name = None)
if sampler.isFixedStr:
sampler.Specialize(self.tokenizer)
return sampler
def main(*args, **kwargs) -> None:
return
def boot() -> None:
app.run(main)
return
th = threading.Thread(target = boot)
th.setDaemon = True
th.start()
| 7,766 | 38.426396 | 146 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/models/language_models.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas and Chris Cummins.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The BenchPress language model."""
import os
import time
import shutil
import pathlib
import typing
import datetime
import humanize
import numpy as np
from deeplearning.benchpress.samplers import sample_observers as sample_observers_lib
from deeplearning.benchpress.util import pbutil
from deeplearning.benchpress.util import cache
from deeplearning.benchpress.util import crypto
from deeplearning.benchpress.util import commit
from deeplearning.benchpress.util import environment
from deeplearning.benchpress.util import distrib
from deeplearning.benchpress.features import extractor
from deeplearning.benchpress.features import hidden_state
from deeplearning.benchpress.corpuses import tokenizers
from deeplearning.benchpress.corpuses import corpuses
from deeplearning.benchpress.models import builders
from deeplearning.benchpress.models.keras_sequential import keras_sequential
from deeplearning.benchpress.models.tf_sequential import tf_sequential
from deeplearning.benchpress.models.tf_bert import tf_bert
from deeplearning.benchpress.models.torch_bert import torch_bert
from deeplearning.benchpress.models.incoder import incoder
from deeplearning.benchpress.proto import internal_pb2
from deeplearning.benchpress.proto import model_pb2
from deeplearning.benchpress.preprocessors import opencl
from absl import flags
from deeplearning.benchpress.util import logging as l
FLAGS = flags.FLAGS
flags.DEFINE_integer(
"num_train_steps",
None,
"Bypass num_train_steps provided by protobuf file."
)
flags.DEFINE_integer(
"num_pretrain_steps",
None,
"Bypass num_pretrain_steps provided by protobuf file."
)
flags.DEFINE_integer(
"num_epochs",
None,
"Bypass num_epochs provided by protobuf file."
)
flags.DEFINE_integer(
"sample_workload_size",
2048,
"Select size of workload samples for single sample step, per node."
)
class Model(object):
"""A BenchPress language model.
Please note model instances should be treated as immutable. Upon
instantiation, a model's properties are used to determine its hash. If you
modify a property after instantiation, the hash will be out of date, which
can lead to bad things happening.
"""
@property
def tokenizer(self) -> tokenizers.TokenizerBase:
return self.corpus.tokenizer
@property
def is_trained(self) -> bool:
return self.backend.is_trained
@property
def hidden_state_size(self) -> int:
return self.backend.hidden_state_size
def __init__(self, config: model_pb2.Model):
"""Instantiate a model.
Args:
config: A Model message.
Raises:
TypeError: If the config argument is not a Model proto.
UserError: In case on an invalid config.
"""
# Error early, so that a cache isn't created.
if not isinstance(config, model_pb2.Model):
t = type(config).__name__
raise TypeError(f"Config must be a Model proto. Received: '{t}'")
self.config = model_pb2.Model()
# Validate config options.
self.config.CopyFrom(builders.AssertIsBuildable(config))
if FLAGS.num_train_steps:
self.config.training.num_train_steps = FLAGS.num_train_steps
if FLAGS.num_pretrain_steps:
self.config.training.num_pretrain_steps = FLAGS.num_pretrain_steps
if FLAGS.num_epochs:
self.config.training.num_epochs = FLAGS.num_epochs
# Initialize corpuses
self.corpus = corpuses.Corpus(config.corpus)
self.pre_train_corpus = None
if config.HasField("pre_train_corpus"):
self.pre_train_corpus = corpuses.Corpus(config.pre_train_corpus)
self.hash = self._ComputeHash(self.pre_train_corpus, self.corpus, self.config)
self._created = False
if environment.WORLD_RANK == 0:
self.cache = cache.mkcache("model", self.hash)
self.cache.path.mkdir(exist_ok = True, parents = True)
else:
while not cache.cachepath("model", self.hash).exists():
time.sleep(0.5)
self.cache = cache.mkcache("model", self.hash)
if environment.WORLD_RANK == 0:
# Create the necessary cache directories.
(self.cache.path / "checkpoints").mkdir(exist_ok=True)
(self.cache.path / "samples").mkdir(exist_ok=True)
# Create symlink to encoded corpus.
symlink = self.cache.path / "corpus"
if not symlink.is_symlink():
os.symlink(
os.path.relpath(
pathlib.Path(self.corpus.encoded.url[len("sqlite:///") :]).parent,
self.cache.path,
),
symlink,
)
if self.pre_train_corpus:
symlink = self.cache.path / "pre_train_corpus"
if not symlink.is_symlink():
os.symlink(
os.path.relpath(
pathlib.Path(self.pre_train_corpus.encoded.url[len("sqlite:///") :]).parent,
self.cache.path,
),
symlink,
)
# Create symlink to the tokenizer and create a backup inside checkpoints.
symlink = self.cache.path / "tokenizer"
if not symlink.is_symlink():
os.symlink(
os.path.relpath(self.corpus.tokenizer_path, self.cache.path), symlink
)
if (self.cache.path / "checkpoints" / "backup_tokenizer.pkl").exists():
shutil.copyfile(self.cache.path / "checkpoints" / "backup_tokenizer.pkl", self.corpus.tokenizer_path)
# Validate metadata against cache.
if self.cache.get("META.pbtxt"):
cached_meta = pbutil.FromFile(
pathlib.Path(self.cache["META.pbtxt"]), internal_pb2.ModelMeta()
)
# Exclude num_epochs and corpus location from metadata comparison.
config_to_compare = model_pb2.Model()
config_to_compare.CopyFrom(self.config)
config_to_compare.corpus.ClearField("contentfiles")
if config_to_compare.HasField("pre_train_corpus"):
config_to_compare.pre_train_corpus.ClearField("contentfiles")
config_to_compare.training.ClearField("num_epochs")
config_to_compare.training.ClearField("num_train_steps")
if config_to_compare.HasField("pre_train_corpus"):
config_to_compare.training.ClearField("num_pretrain_steps")
config_to_compare.training.ClearField("batch_size")
if config_to_compare.training.HasField("data_generator"):
config_to_compare.training.data_generator.ClearField("steps_per_epoch")
config_to_compare.training.data_generator.ClearField("validation_set")
# These fields should have already been cleared, but we'll do it again
# so that metadata comparisons don't fail when the cached meta schema
# is updated.
cached_to_compare = model_pb2.Model()
cached_to_compare.CopyFrom(cached_meta.config)
cached_to_compare.corpus.ClearField("contentfiles")
if cached_to_compare.HasField("pre_train_corpus"):
cached_to_compare.pre_train_corpus.ClearField("contentfiles")
cached_to_compare.training.ClearField("num_epochs")
cached_to_compare.training.ClearField("num_train_steps")
if cached_to_compare.HasField("pre_train_corpus"):
cached_to_compare.training.ClearField("num_pretrain_steps")
cached_to_compare.training.ClearField("batch_size")
if cached_to_compare.training.HasField("data_generator"):
cached_to_compare.training.data_generator.ClearField("steps_per_epoch")
cached_to_compare.training.data_generator.ClearField("validation_set")
if cached_to_compare.training.sequence_length != config_to_compare.training.sequence_length:
l.logger().warning("Mismatch between pre-trained and current config sequence_length!\
This can only be intended in BERT model!")
cached_to_compare.training.ClearField("sequence_length")
config_to_compare.training.ClearField("sequence_length")
if config_to_compare != cached_to_compare:
raise SystemError("Metadata mismatch: {} \n\n {}".format(config_to_compare, cached_to_compare))
self.meta = cached_meta
else:
self.meta = internal_pb2.ModelMeta()
self.meta.config.CopyFrom(self.config)
self._WriteMetafile()
## Store current commit
commit.saveCommit(self.cache.path)
self.backend = {
model_pb2.NetworkArchitecture.TENSORFLOW_SEQ: tf_sequential.tfSequential,
model_pb2.NetworkArchitecture.KERAS_SEQ: keras_sequential.kerasSequential,
model_pb2.NetworkArchitecture.TENSORFLOW_BERT: tf_bert.tfBert,
model_pb2.NetworkArchitecture.TORCH_BERT: torch_bert.torchBert,
model_pb2.NetworkArchitecture.INCODER_1B: incoder.Incoder1B,
model_pb2.NetworkArchitecture.INCODER_6B: incoder.Incoder6B,
}[config.architecture.backend](self.config, self.cache, self.hash)
hidden_state.setup_lm(self.backend)
l.logger().info("Initialized {} in {}".format(self.backend, self.cache.path))
return
def GetShortSummary(self) -> str:
return self.backend.GetShortSummary()
@staticmethod
def _ComputeHash(pre_train_corpus_ : corpuses.Corpus,
corpus_ : corpuses.Corpus,
config : model_pb2.Model,
) -> str:
"""Compute model hash.
The hash is computed from the ID of the corpus and the serialized
representation of the config proto. The number of epochs that the model is
trained for does not affect the hash, since we can share checkpoints
between different models if the only variable is the epoch count. E.g.
we have a model trained for 10 epochs, we can use the checkpoint as the
starting point for a training a model for 20 epochs.
Args:
corpus: A corpus instance.
config: A Model config proto.
Returns:
The unique model ID.
"""
config_to_hash = model_pb2.Model()
config_to_hash.CopyFrom(config)
config_to_hash.ClearField("pre_train_corpus")
config_to_hash.ClearField("corpus")
config_to_hash.training.ClearField("num_epochs")
config_to_hash.training.ClearField("num_train_steps")
config_to_hash.training.ClearField("batch_size")
if config_to_hash.training.HasField("data_generator"):
config_to_hash.training.data_generator.ClearField("steps_per_epoch")
config_to_hash.training.data_generator.ClearField("validation_set")
if pre_train_corpus_:
hash_list = [pre_train_corpus_.hash, corpus_.hash, config_to_hash.SerializeToString()]
else:
hash_list = [corpus_.hash, config_to_hash.SerializeToString()]
if FLAGS.custom_incoder_ckpt is not None:
hash_list.append(FLAGS.custom_incoder_ckpt)
return crypto.sha1_list(hash_list)
def Create(self) -> bool:
if self._created:
return False
self._created = True
self.corpus.Create()
if self.pre_train_corpus:
self.pre_train_corpus.Create(self.corpus.tokenizer)
if not (self.cache.path / "checkpoints" / "backup_tokenizer.pkl").exists():
shutil.copyfile(self.corpus.tokenizer_path, self.cache.path / "checkpoints" / "backup_tokenizer.pkl")
self.backend.Create(tokenizer = self.corpus.tokenizer)
return
def PreTrain(self, **kwargs) -> "Model":
"""
Pre-Train the model. Only supported for PyTorch BERT.
Returns:
The model instance.
Raises:
UnableToAcquireLockError: If the model is locked (i.e. there is another
process currently modifying the model).
"""
self.Create()
self.backend.PreTrain(self.pre_train_corpus, **kwargs)
pre_telemetry_logs = self.backend.pre_telemetry.EpochTelemetry()
l.logger().info(
"Pre-trained model for {} {} in {} ms. " "Training loss: {}."
.format(
pre_telemetry_logs[-1].epoch_num,
"steps" if isinstance(self.backend, tf_bert.tfBert) or isinstance(self.backend, torch_bert.torchBert) else "epochs",
humanize.intcomma(sum(t.epoch_wall_time_ms for t in pre_telemetry_logs)),
pre_telemetry_logs[-1].loss,
)
)
return self
def Train(self, **kwargs) -> "Model":
"""Train the model.
Returns:
The model instance.
Raises:
UnableToAcquireLockError: If the model is locked (i.e. there is another
process currently modifying the model).
"""
self.Create()
self.backend.Train(self.corpus, **kwargs)
telemetry_logs = self.backend.telemetry.EpochTelemetry()
l.logger().info(
"Trained model for {} {} in {} ms. " "Training loss: {}."
.format(
telemetry_logs[-1].epoch_num if FLAGS.select_checkpoint_step == -1 else telemetry_logs[FLAGS.select_checkpoint_step-1].epoch_num,
"steps" if isinstance(self.backend, tf_bert.tfBert) or isinstance(self.backend, torch_bert.torchBert) else "epochs",
humanize.intcomma(sum(t.epoch_wall_time_ms for t in telemetry_logs)),
telemetry_logs[-1].loss if FLAGS.select_checkpoint_step == -1 else telemetry_logs[FLAGS.select_checkpoint_step-1].loss,
)
)
return self
def Sample(
self,
sampler: 'samplers.Sampler',
sample_observers: typing.List[sample_observers_lib.SampleObserver],
seed: int = None,
num_batches: int = None,
) -> None:
"""Sample a model.
This method uses the observer model, returning nothing. To access the
samples produced, implement a SampleObserver and pass it in as an argument.
Sampling continues indefinitely until one of the sample observers returns
False when notified of a new sample.
If the model is not already trained, calling Sample() first trains the
model. Thus a call to Sample() is equivalent to calling Train() then
Sample().
Args:
sampler: The sampler to sample using.
sample_observers: A list of SampleObserver objects that are notified of
new generated samples.
seed: A numeric value to seed the RNG with. If not present, the RNG is
seeded randomly.
Raises:
UserError: If called with no sample observers.
UnableToAcquireLockError: If the model is locked (i.e. there is another
process currently modifying the model).
InvalidStartText: If the sampler start text cannot be encoded.
InvalidSymtokTokens: If the sampler symmetrical depth tokens cannot be
encoded.
"""
if not sample_observers:
raise ValueError("Cannot sample without any observers")
self.Create()
sampler.Create()
epoch = self.backend.telemetry.EpochTelemetry()[-1].epoch_num
sample_start_time = datetime.datetime.utcnow()
if environment.WORLD_RANK == 0:
(self.cache.path / "samples" / sampler.hash).mkdir(exist_ok = True)
tokenizer = self.corpus.tokenizer
if sampler.isFixedStr and not sampler.is_active:
sampler.Specialize(tokenizer)
elif sampler.is_live:
start_text = [str(input("Live Feed: "))]
while True:
try:
start_text.append(str(input()))
except EOFError:
break
sampler.start_text = '\n'.join(start_text)
sampler.Specialize(tokenizer)
self.backend.InitSampling(sampler, seed, self.corpus)
[obs.Specialize(self, sampler) for obs in sample_observers]
if isinstance(self.backend, tf_bert.tfBert) or isinstance(self.backend, torch_bert.torchBert) or isinstance(self.backend, incoder.Incoder):
sample_batch = lambda : self._SampleLMBatch(sampler, tokenizer, sample_observers, epoch)
elif isinstance(self.backend, tf_sequential.tfSequential) or isinstance(self.backend, keras_sequential.kerasSequential):
sample_batch = lambda : self._SampleSeqBatch(sampler, tokenizer, sample_observers, epoch)
else:
raise ValueError("Unrecognized backend.")
try:
seq_count, cont, compiled = 0, True, 0
nb = 0
while cont:
if num_batches and nb >= num_batches:
break
nb+=1
cont, s, c = sample_batch()
seq_count += s
compiled += c
if sampler.is_live:
start_text = [str(input("Live Feed: "))]
while True:
try:
start_text.append(str(input()))
except EOFError:
break
sampler.start_text = '\n'.join(start_text)
sampler.Specialize(tokenizer)
except KeyboardInterrupt:
l.logger().info("Wrapping up sampling...")
except Exception as e:
raise e
if environment.WORLD_RANK == 0:
for obs in sample_observers:
obs.endSample()
if isinstance(self.backend, torch_bert.torchBert) and sampler.is_active:
self.backend.sample.data_generator.samples_cache_obs.endSample()
time_now = datetime.datetime.utcnow()
l.logger().info( "Produced {} samples at a rate of {} ms / sample. Session's compilation rate was {}%"
.format(
humanize.intcomma(seq_count),
humanize.intcomma(int(1000 * ((time_now - sample_start_time) / max(seq_count, 1)).total_seconds())),
round(100 * ((compiled / seq_count if seq_count > 0 else 0)), 3),
)
)
return
def _SampleLMBatch(self,
sampler: 'samplers.Sampler',
tokenizer: tokenizers.TokenizerBase,
sample_observers: typing.List[sample_observers_lib.SampleObserver],
epoch: int,
) -> bool:
"""
Run a sampling iteration over BERT models.
"""
start_time = datetime.datetime.utcnow()
seq_count = 0
compiled = 0
self.backend.InitSampleBatch(sampler, workload_size = FLAGS.sample_workload_size // environment.WORLD_SIZE)
try:
org_inputs, input_ids, samples, indices = self.backend.SampleNextIndices(sampler)
except StopIteration:
return False, seq_count, compiled
if not samples:
# Return empty means model has not produced something that can be stored.
# This 'if' accommodates active sampling, which is very selective.
return True, seq_count, compiled
continue_sampling = True
if environment.WORLD_RANK == 0:
assert len(org_inputs) == len(input_ids) == len(samples) == len(indices), "Length mismatch, {}-{}-{}-{}".format(len(org_inputs), len(input_ids), len(samples), len(indices))
for org, inp, sample, idxs in zip(org_inputs, input_ids, samples, indices):
src = self.tokenizer.ArrayToCode(sample, with_formatting = True)
try:
stdout = opencl.Compile(src)
compile_flag = True
compiled += 1
features = extractor.ExtractRawFeatures(src)
except ValueError:
compile_flag = False
features = ""
end_time = datetime.datetime.utcnow()
sample = model_pb2.Sample(
train_step = epoch,
text = src,
sample_indices = ','.join([self.tokenizer.decoder[idx].replace('\n', '\\n') for idx in idxs]).replace('\n', '\\n'),
encoded_sample_indices = ','.join([str(idx) for idx in idxs]),
original_input = self.tokenizer.tokensToString(org, with_formatting = False, ignore_token = self.tokenizer.padToken),
sample_feed = self.tokenizer.tokensToString(inp, with_formatting = False, ignore_token = self.tokenizer.padToken),
encoded_text = ",".join([str(x) for x in sample]),
sample_start_epoch_ms_utc = int(start_time.strftime("%s%f")),
sample_time_ms = int(round(1000 * ((end_time - start_time) / len(samples)).total_seconds())),
wall_time_ms = int(round(1000 * ((end_time - start_time) / len(samples)).total_seconds())),
feature_vector = features,
num_tokens = np.where(sample == self.tokenizer.padToken)[0][0] if self.tokenizer.padToken in sample else len(sample),
compile_status = compile_flag,
categorical_sampling = self.backend.samplesWithCategorical(),
date_added = datetime.datetime.utcnow().strftime("%m/%d/%Y, %H:%M:%S"),
)
# Notify sample observers.
continue_sampling &= all(
[obs.OnSample(sample) for obs in sample_observers]
)
seq_count += 1
if environment.WORLD_SIZE > 1:
_ = distrib.broadcast(str(continue_sampling))
else:
status = distrib.broadcast()
if status == "True":
continue_sampling = True
elif status == "False":
continue_sampling = False
else:
raise OSError("Broken distributed message: '{}'".format(status))
return continue_sampling, seq_count, compiled
def _SampleSeqBatch(
self,
sampler: 'samplers.Sampler',
tokenizer: tokenizers.TokenizerBase,
sample_observers: typing.List[sample_observers_lib.SampleObserver],
epoch: int,
) -> bool:
"""
Run a single iteration of the batched sample inner-loop for sequential models.
"""
start_time = datetime.datetime.utcnow()
self.backend.InitSampleBatch(sampler)
samples_in_progress = [
sampler.tokenized_start_text.copy() for _ in range(sampler.batch_size)
]
done = np.zeros(sampler.batch_size, dtype=np.bool)
wall_time_start = start_time
seq_count = 0
compiled = 0
# The return value of this method. If any of the sample_observers return
# False, this value is set to False.
continue_sampling = True
# Sampling loop. Continues until all samples in the batch are done.
while not done.all():
indices, _ = self.backend.SampleNextIndices(sampler, done)
# Iterate over all samples in batch to determine whether they're
# done.
for i in range(len(indices)):
if done[i]:
continue
for index in indices[i]:
samples_in_progress[i].append(tokenizer.decoder[index])
if sampler.SampleIsComplete(samples_in_progress[i]):
end_time = datetime.datetime.utcnow()
sample_kernel = [x for x in samples_in_progress[i]]
features = extractor.ExtractRawFeatures(''.join(samples_in_progress[i]))
done[i] = 1
try:
stdout = opencl.Compile(''.join(samples_in_progress[i]))
compile_flag = True
compiled += 1
except ValueError:
compile_flag = False
sample = model_pb2.Sample(
train_step = epoch,
text = ''.join(samples_in_progress[i]),
sample_indices = "",
encoded_sample_indices = "",
sample_feed = sampler.start_text,
encoded_text = ",".join([str(tokenizer.vocab[x]) for x in sample_kernel]),
sample_start_epoch_ms_utc = int(start_time.strftime("%s%f")),
sample_time_ms = int(round(1000 * ((end_time - start_time) / sampler.batch_size).total_seconds())),
wall_time_ms = int(round(1000 * ((end_time - start_time) / sampler.batch_size).total_seconds())),
feature_vector = features,
num_tokens = len(samples_in_progress[i]),
compile_status = compile_flag,
categorical_sampling = self.backend.samplesWithCategorical(),
date_added = datetime.datetime.utcnow().strftime("%m/%d/%Y, %H:%M:%S"),
)
# Notify sample observers.
continue_sampling &= all(
[obs.OnSample(sample) for obs in sample_observers]
)
if sampler.is_live and self.backend.feature_encoder:
print(sample.feature_vector)
seq_count += 1
# Wall sample time is the difference between the end of the previous
# sample and the end of the current sample.
wall_time_start = datetime.datetime.utcnow()
break
return continue_sampling, seq_count, compiled
def EncodeInputs(self, src: typing.List[str]) -> np.array:
"""
According to each LM's rules, encode a list of source codes to encoded arrays
ready to be fed into the model.
Args:
src: List of source codes.
Returns:
A list of encoded numpy arrays.
"""
return self.backend.EncodeInputs(src)
def ExtractHidden(self, encoded: typing.List[np.array]) -> np.array:
"""
Extract hidden state from backend language model.
Args:
input_ids: A list of input ids that will be provided to the LM.
Returns:
The hidden state of the provided inputs.
"""
return self.backend.ExtractHidden(encoded)
def SamplerCache(self, sampler: 'samplers.Sampler') -> pathlib.Path:
"""Get the path to a sampler cache.
Args:
sampler: A Sampler instance.
Returns:
A path to a directory. Note that this directory may not exist - it is
created only after a call to Sample().
"""
return self.cache.path / "samples" / sampler.hash
def _WriteMetafile(self) -> None:
pbutil.ToFile(self.meta, pathlib.Path(self.cache.keypath("META.pbtxt")))
def InferenceManifest(self) -> typing.List[pathlib.Path]:
"""Return the list of files which are required for model inference.
Returns:
A list of absolute paths.
"""
return sorted(
[self.cache.path / "tokenizer", self.cache.path / "META.pbtxt",]
+ self.backend.InferenceManifest()
)
def __repr__(self) -> str:
"""String representation."""
return f"model[{self.hash}]"
def __eq__(self, rhs) -> bool:
if not isinstance(rhs, Model):
return False
return rhs.hash == self.hash
def __ne__(self, rhs) -> bool:
return not self.__eq__(rhs)
| 26,387 | 38.621622 | 178 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/models/backends.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas and Chris Cummins.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Neural network backends for language models."""
import typing
import numpy as np
from deeplearning.benchpress.corpuses import tokenizers
from deeplearning.benchpress.proto import model_pb2
from deeplearning.benchpress.util import cache
from deeplearning.benchpress.util import pytorch
torch = pytorch.torch
class BackendBase(object):
"""The base class for a language model backend.
A language model backend encapsulates all of the neural network logic.
"""
def __init__(
self,
config: model_pb2.Model,
fs_cache: cache.FSCache,
hash: str,
tokenizer: tokenizers.TokenizerBase = None,
**kwargs,
):
self.config = config
self.cache = fs_cache
self.hash = hash
self.tokenizer = tokenizer
## Legacy function to support lazy creation of corpus
def Create(self, tokenizer: tokenizers.TokenizerBase) -> None:
self.tokenizer = tokenizer
def PreTrain(self, corpus: 'corpuses.Corpus', **extra_kwargs) -> None:
"""Pre-train the backend"""
raise NotImplementedError("pre-training is only supported in PyTorch BERT.")
def Train(self, corpus: 'corpuses.Corpus', **extra_kwargs) -> None:
"""Train the backend."""
raise NotImplementedError("Abstract Class.")
def TrainBatch(self, batch) -> None:
"""Incrementally train language model on a batch of data."""
raise NotImplementedError("Abstract Class.")
def InitSampling(
self, sampler: 'samplers.Sampler', seed: typing.Optional[int] = None
) -> None:
"""Initialize backend for sampling."""
raise NotImplementedError("Abstract Class.")
def InitSampleBatch(self, sampler: 'samplers.Sampler') -> None:
"""Begin a new sampling batch. Only called after InitSampling()."""
raise NotImplementedError("Abstract Class.")
def SampleNextIndices(
self, sampler: 'samplers.Sampler', done: np.ndarray, tokenizer = None
) -> np.ndarray:
"""Sample the next indices for the current sample batch.
Returns:
A numpy array of int32 values with shape (batch_size,).
"""
raise NotImplementedError("Abstract Class.")
def SampleBatch(self, batch) -> np.ndarray:
"""Specifically sample a requested batch of data."""
raise NotImplementedError("Abstract Class.")
def EncodeInputs(self, src: typing.List[str]) -> np.array:
"""Encode text inputs to numpy arrays."""
raise NotImplementedError("Abstract Class.")
def ExtractHidden(self, encoded: typing.List[np.array]) -> np.array:
"""Extract Hidden State from Language Model"""
raise NotImplementedError("Abstract Class")
def GetEncoderModule(self, **kwargs) -> torch.nn.Module:
"""Return the internal torch module of an architecture."""
raise NotImplementedError("Abstract class")
def GetDecoderModule(self, **kwargs) -> torch.nn.Module:
"""Return a decoder version of LM's decoder."""
raise NotImplementedError("Abstract class")
| 3,511 | 34.12 | 80 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/models/tf_sequential/data_generator.py | """This file defines the streaming generators for model training data.
We train models on overlapping one-hot encoded text sequences. For a corpus of
a reasonable size, the full training data may not fit in memory. This modules
provides Python Generator classes for use by a sequential Keras model's
fit_generator() method to stream batches of training data.
"""
import sys
import time
import typing
import copy
import humanize
import numpy as np
from deeplearning.benchpress.proto import model_pb2
from deeplearning.benchpress.util import logging as l
class DataBatch(typing.NamedTuple):
"""An <X,y> data tuple used for training one batch."""
X: np.array
y: np.array
@property
def sizeof_batch(self):
return sys.getsizeof(self) + self.X.nbytes + self.y.nbytes
def LogBatchTelemetry(self,
steps_per_epoch: int,
num_epochs: int,
) -> None:
"""Log analytics about the batch."""
l.logger().info("Step shape: X: {}, y" ": {}.".format(self.X.shape, self.y.shape))
l.logger().info(
"Memory: {} per batch, {} per epoch, {} total.".format(
humanize.naturalsize(self.sizeof_batch, binary = True),
humanize.naturalsize(self.sizeof_batch * steps_per_epoch, binary = True),
humanize.naturalsize(self.sizeof_batch * steps_per_epoch * num_epochs, binary = True),
)
)
return
class TensorflowBatchGenerator(object):
def __init__(
self, corpus: "corpuses.Corpus", training_opts: model_pb2.TrainingOptions
):
self.corpus = corpus
self.training_opts = training_opts
# Lazily instantiated.
self.original_encoded_corpus = None
self.encoded_corpus = None
self.num_batches = 0
self.batches = None
self.CreateBatches()
self.batches[0].LogBatchTelemetry(self.num_batches, self.training_opts.num_epochs)
return
def CreateBatches(self) -> None:
start_time = time.time()
self.i = 0
if self.original_encoded_corpus is None:
self.original_encoded_corpus = self.corpus.GetTrainingData(
shuffle=self.training_opts.shuffle_corpus_contentfiles_between_epochs
)
self.encoded_corpus = np.concatenate(self.original_encoded_corpus)
batch_size = self.training_opts.batch_size
sequence_length = self.training_opts.sequence_length
# set corpus size and number of batches
self.num_batches = int(
len(self.encoded_corpus) / (batch_size * sequence_length)
)
if self.num_batches == 0:
raise ValueError(
"Not enough data. Use a smaller sequence_length and batch_size"
)
# split into batches
clipped_corpus_length = self.num_batches * batch_size * sequence_length
clipped_corpus = self.encoded_corpus[:clipped_corpus_length]
xdata = clipped_corpus
ydata = np.copy(clipped_corpus)
# Wrap-around.
ydata[:-1] = xdata[1:]
ydata[-1] = xdata[0]
self.batches = [
DataBatch(x, y)
for x, y in zip(
np.split(xdata.reshape(batch_size, -1), self.num_batches, 1),
np.split(ydata.reshape(batch_size, -1), self.num_batches, 1),
)
]
l.logger().info(
"Encoded corpus of {} tokens (clipped last {} tokens) in {} ms.".format(
humanize.intcomma(clipped_corpus_length),
humanize.intcomma(len(self.encoded_corpus) - clipped_corpus_length),
humanize.intcomma(int((time.time() - start_time) * 1000)),
)
)
return
def NextBatch(self) -> DataBatch:
"""Fetch next batch.
Returns:
X, Y DataBatch.
"""
batch = self.batches[self.i]
self.i += 1
assert 0 <= self.i <= self.num_batches
return batch
| 3,732 | 30.635593 | 100 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/models/tf_sequential/helper.py |
import tensorflow_addons as tfa
import tensorflow_probability as tfp
from deeplearning.benchpress.util import logging as l
from deeplearning.benchpress.util import tf as local_tf
tf = local_tf.tf
class CustomInferenceHelper(tfa.seq2seq.sampler.TrainingSampler):
"""An inference helper that takes a seed text"""
def __init__(self, seed_length, embedding, temperature):
super(CustomInferenceHelper, self).__init__(time_major=False)
self._seed_length = seed_length
self._xlate = embedding
self.softmax_temperature = temperature
def initialize(self, inputs, sequence_length, name=None):
return super(CustomInferenceHelper, self).initialize(inputs = inputs,
sequence_length = sequence_length,
mask = None
)
# def sample(self, time, outputs, state, name=None):
def sample(self, time, outputs, state):
if self.softmax_temperature is not None:
outputs = outputs / self.softmax_temperature
sampler = tfp.distributions.Categorical(logits=outputs)
sample_ids = sampler.sample()
return sample_ids
## Only this function requires refactoring
# def next_inputs(self, time, outputs, state, sample_ids, name = "CIHNextInputs"):
def next_inputs(self, time, outputs, state, sample_ids):
# with tf.name_scope(name, "CIHNextInputs", [time, outputs, state]):
next_time = time + 1
finished = next_time >= self.sequence_length
all_finished = tf.reduce_all(finished)
seed_done = next_time >= self._seed_length
# def read_from_ta(inp):
# return inp.read(next_time)
next_inputs = tf.case( ## tf.case maybe deprecated
[
(
all_finished,
lambda: self.zero_inputs
),
(
tf.math.logical_not(seed_done),
lambda: tf.nest.map_structure(lambda inp: inp.read(next_time), self.input_tas),
),
],
default=lambda: tf.stop_gradient(
tf.nn.embedding_lookup(self._xlate, sample_ids)
),
)
return (finished, next_inputs, state)
| 2,171 | 33.47619 | 91 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/models/tf_sequential/tf_sequential.py | # Copyright (c) 2016-2020 Chris Cummins.
#
# clgen is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# clgen is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with clgen. If not, see <https://www.gnu.org/licenses/>.
"""BenchPress models using a Keras backend."""
import copy
import os
import pathlib
import time
import typing
import humanize
from deeplearning.benchpress.util import logging as l
import numpy as np
import progressbar
import tensorflow_addons as tfa
from deeplearning.benchpress.samplers import samplers
from deeplearning.benchpress.models import telemetry
from deeplearning.benchpress.models import backends
from deeplearning.benchpress.proto import model_pb2
from deeplearning.benchpress.util import tf as local_tf
from deeplearning.benchpress.models.tf_sequential.data_generator import TensorflowBatchGenerator
from absl import flags
FLAGS = flags.FLAGS
tf = local_tf.tf
flags.DEFINE_boolean(
"clgen_tf_backend_reset_inference_state_between_batches",
False,
"If set, reset the network state between sample batches. Else, the model "
"state is unaffected.",
)
flags.DEFINE_integer(
"clgen_tf_backend_tensorboard_summary_step_count",
25,
"The number of steps between writing tensorboard summaries.",
)
flags.DEFINE_integer(
"clgen_per_epoch_test_samples",
16,
"The number of samples to make at the end of each training epoch.",
)
class tfSequential(backends.BackendBase):
"""A model with an embedding layer, using a keras backend."""
@property
def hidden_state_size(self) -> int:
return self.config.architecture.neurons_per_layer
def __init__(self, *args, **kwargs):
"""Instantiate a model.
Args:
args: Arguments to be passed to BackendBase.__init__().
kwargs: Arguments to be passed to BackendBase.__init__().
"""
super(tfSequential, self).__init__(*args, **kwargs)
local_tf.initTensorflow()
# Attributes that will be lazily set.
self.cell = None
self.input_data = None
self.targets = None
self.lengths = None
self.seed_length = None
self.temperature = None
self.initial_state = None
self.logits = None
self.generated = None
self.loss = None
self.final_state = None
self.learning_rate = None
self.epoch = None
self.train_op = None
self.data_generator = None
self.inference_tf = None
self.inference_sess = None
self.inference_indices = None
self.inference_state = None
# Create the summary writer, shared between Train() and
# _EndOfEpochTestSample().
tf.compat.v1.disable_eager_execution()
tensorboard_dir = f"{self.cache.path}/tensorboard"
l.logger().info(
"Using tensorboard to log training progress. View progress using:\n"
f" $ tensorboard --logdir='{tensorboard_dir}'",
)
self.summary_writer = tf.compat.v1.summary.FileWriter(tensorboard_dir)
def samplesWithCategorical(self):
return True
def InitTfGraph(
self, sampler: typing.Optional[samplers.Sampler] = None
) -> "tf":
"""Instantiate a TensorFlow graph for training or inference.
The tensorflow graph is different for training and inference, so must be
reset when switching between modes.
Args:
sampler: If set, initialize the model for inference using the given
sampler. If not set, initialize model for training.
Returns:
The imported TensorFlow module.
"""
start_time = time.time()
# Quiet tensorflow.
# See: https://github.com/tensorflow/tensorflow/issues/1258
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
# Deferred importing of TensorFlow.
tf.compat.v1.disable_eager_execution()
from deeplearning.benchpress.models.tf_sequential import helper
cell_type = {
model_pb2.NetworkArchitecture.LSTM: tf.compat.v1.nn.rnn_cell.LSTMCell,
model_pb2.NetworkArchitecture.GRU: tf.compat.v1.nn.rnn_cell.GRUCell,
model_pb2.NetworkArchitecture.RNN: tf.compat.v1.nn.rnn_cell.BasicRNNCell,
}.get(self.config.architecture.neuron_type, None)
if cell_type is None:
raise NotImplementedError
# Reset the graph when switching between training and inference.
tf.compat.v1.reset_default_graph()
if sampler:
sequence_length = sampler.sequence_length
batch_size = sampler.batch_size
else:
sequence_length = self.config.training.sequence_length
batch_size = self.config.training.batch_size
vocab_size = self.tokenizer.vocab_size
cells_lst = []
for _ in range(self.config.architecture.num_layers):
cells_lst.append(cell_type(self.config.architecture.neurons_per_layer))
self.cell = cell = tf.keras.layers.StackedRNNCells(cells_lst)
self.input_data = tf.compat.v1.placeholder(
tf.int32, [batch_size, sequence_length]
)
self.targets = tf.compat.v1.placeholder(
tf.int32, [batch_size, sequence_length]
)
self.initial_state = self.cell.get_initial_state(batch_size = batch_size, dtype = tf.float32)
self.temperature = tf.Variable(1.0, trainable=False)
self.seed_length = tf.compat.v1.placeholder(name = "seed_length", dtype = tf.int32, shape = ())
if sampler:
self.lengths = tf.compat.v1.placeholder(tf.int32, [batch_size])
else:
self.lengths = tf.fill([batch_size], sequence_length)
scope_name = "rnnlm"
with tf.compat.v1.variable_scope(scope_name):
with tf.device("/cpu:0"):
embedding = tf.compat.v1.get_variable(
"embedding", [vocab_size, self.config.architecture.neurons_per_layer]
)
inputs = tf.nn.embedding_lookup(embedding, self.input_data)
if sampler:
decode_helper = helper.CustomInferenceHelper(
self.seed_length, embedding, self.temperature
)
else:
decode_helper = tfa.seq2seq.sampler.TrainingSampler(time_major=False)
decoder = tfa.seq2seq.BasicDecoder(
cell,
decode_helper,
tf.compat.v1.layers.Dense(vocab_size),
dtype = tf.float32,
)
outputs, self.final_state, _ = tfa.seq2seq.dynamic_decode(
decoder,
decoder_init_input = inputs,
decoder_init_kwargs = {
'initial_state': self.initial_state,
'sequence_length': self.lengths,
},
output_time_major=False,
impute_finished=True,
swap_memory=True,
scope=scope_name,
)
self.generated = outputs.sample_id
self.logits = outputs.rnn_output
sequence_weigths = tf.ones([batch_size, sequence_length])
self.loss = tfa.seq2seq.sequence_loss(
self.logits, self.targets, sequence_weigths
)
self.learning_rate = tf.Variable(0.0, trainable=False)
self.epoch = tf.Variable(0, trainable=False)
trainable_variables = tf.compat.v1.trainable_variables()
# TODO(cec): Support non-adam optimizers.
grads, _ = tf.clip_by_global_norm(
tf.gradients(self.loss, trainable_variables, aggregation_method=2),
self.config.training.adam_optimizer.normalized_gradient_clip_micros / 1e6,
)
optimizer = tf.compat.v1.train.AdamOptimizer(self.learning_rate)
self.train_op = optimizer.apply_gradients(zip(grads, trainable_variables))
if not sampler:
# Create tensorboard summary writers for training progress.
tf.compat.v1.summary.scalar("loss", self.loss)
tf.compat.v1.summary.scalar("learning_rate", self.learning_rate)
tf.compat.v1.summary.scalar("epoch_num", self.epoch)
num_trainable_params = int(
np.sum([np.prod(v.shape) for v in tf.compat.v1.trainable_variables()])
)
l.logger().info(
"Instantiated TensorFlow graph with {} trainable parameters " "in {} ms."
.format(
humanize.intcomma(num_trainable_params),
humanize.intcomma(int((time.time() - start_time) * 1000)),
)
)
return tf
def GetShortSummary(self) -> str:
return (
f"{self.config.architecture.neurons_per_layer}×"
f"{self.config.architecture.num_layers} "
f"{model_pb2.NetworkArchitecture.NeuronType.Name(self.config.architecture.neuron_type)} "
"network"
)
@property
def epoch_checkpoints(self) -> typing.Set[int]:
"""Get the set of epoch numbers which we have trained models for.
Note that Tensorflow checkpoint paths don't translate to actual files, but
rather a pair of <.index,.meta> files.
Returns:
A mapping of epoch numbers to paths.
"""
if not (self.cache.path / "checkpoints" / "checkpoints"):
# No saver file means no checkpoints.
return {}
# Count the number of checkpoint files which TensorFlow has created.
checkpoint_files = [
f.stem
for f in (self.cache.path / "checkpoints").iterdir()
if f.name.startswith("checkpoint-") and f.name.endswith(".meta")
]
# The checkpoint paths are appended with the epoch number.
epoch_nums = [int(x.split("-")[-1]) for x in checkpoint_files]
return set(epoch_nums)
def GetParamsPath(
self, checkpoint_state
) -> typing.Tuple[typing.Optional[str], typing.List[str]]:
"""Return path to checkpoint closest to target num of epochs."""
# Checkpoints are saved with relative path, so we must prepend cache paths.
paths = [
str(self.cache.path / "checkpoints" / p)
for p in checkpoint_state.all_model_checkpoint_paths
]
# The checkpoint paths are appended with the epoch number.
epoch_nums = [int(x.split("-")[-1]) for x in paths]
diffs = [self.config.training.num_epochs - e for e in epoch_nums]
pairs = zip(paths, diffs)
positive_only = [p for p in pairs if p[1] >= 0]
return min(positive_only, key=lambda x: x[1])[0], paths
def InferenceManifest(self) -> typing.List[pathlib.Path]:
"""Return the list of files which are required for model inference.
Returns:
A list of absolute paths.
"""
# The TensorFlow save file.
paths = [
self.cache.path / "checkpoints" / "checkpoint",
]
# Export only the TensorFlow checkpoint files for the target number of
# epochs.
paths += [
path.absolute()
for path in (self.cache.path / "checkpoints").iterdir()
if path.name.startswith(f"checkpoint-{self.config.training.num_epochs}")
]
# Include the epoch telemetry. This is not strictly required, but the files
# are small and contain useful information for describing the model, such as
# the total training time and model loss.
paths += [
path.absolute()
for path in (self.cache.path / "logs").iterdir()
if (
path.name.startswith("epoch_")
and path.name.endswith("_telemetry.pbtxt")
)
]
return sorted(paths)
def Train(
self,
corpus,
test_sampler: typing.Optional[samplers.Sampler] = None,
**unused_kwargs,
) -> None:
"""Locked training.
If there are cached epoch checkpoints, the one closest to the target number
of epochs will be loaded, and the model will be trained for only the
remaining number of epochs, if any. This means that calling this function
twice will only actually train the model the first time, and all subsequent
calls will be no-ops.
This method must only be called when the model is locked.
"""
del unused_kwargs
self.num_epochs = self.config.training.num_epochs
self.telemetry = telemetry.TrainingLogger(self.cache.path / "logs")
if self.is_trained:
return
if self.data_generator is None:
self.data_generator = TensorflowBatchGenerator(
corpus, self.config.training
)
tf = self.InitTfGraph()
# Create and merge the tensorboard summary ops.
merged = tf.compat.v1.summary.merge_all()
# training options
# TODO(cec): Enable support for multiple optimizers:
initial_learning_rate = (
self.config.training.adam_optimizer.initial_learning_rate_micros / 1e6
)
decay_rate = (
self.config.training.adam_optimizer.learning_rate_decay_per_epoch_micros
/ 1e6
)
# # resume from prior checkpoint
ckpt_path, ckpt_paths = None, None
if (self.cache.path / "checkpoints" / "checkpoint").exists():
checkpoint_state = tf.train.get_checkpoint_state(
self.cache.path / "checkpoints",
)
assert checkpoint_state
assert checkpoint_state.model_checkpoint_path
ckpt_path, ckpt_paths = self.GetParamsPath(checkpoint_state)
with tf.compat.v1.Session() as sess:
tf.compat.v1.global_variables_initializer().run()
# Keep all checkpoints.
saver = tf.compat.v1.train.Saver(
tf.compat.v1.global_variables(), max_to_keep=100, save_relative_paths=True
)
# restore model from closest checkpoint.
if ckpt_path:
l.logger().info("Restoring checkpoint {}".format(ckpt_path))
saver.restore(sess, ckpt_path)
# make sure we don't lose track of other checkpoints
if ckpt_paths:
saver.recover_last_checkpoints(ckpt_paths)
# Offset epoch counts by 1 so that they are in the range [1..n]
current_epoch = sess.run(self.epoch) + 1
max_epoch = self.config.training.num_epochs + 1
# Per-epoch training loop.
for epoch_num in range(current_epoch, max_epoch):
self.telemetry.EpochBeginCallback()
# decay and set learning rate
new_learning_rate = initial_learning_rate * (
(float(100 - decay_rate) / 100.0) ** (epoch_num - 1)
)
sess.run(tf.compat.v1.assign(self.learning_rate, new_learning_rate))
sess.run(tf.compat.v1.assign(self.epoch, epoch_num))
# TODO(cec): refactor data generator to a Python generator.
self.data_generator.CreateBatches()
l.logger().info("Epoch {}/{}:".format(epoch_num, self.config.training.num_epochs))
state = sess.run(self.initial_state)
# Per-batch inner loop.
bar = progressbar.ProgressBar(max_value=self.data_generator.num_batches)
last_log_time = time.time()
for i in bar(range(self.data_generator.num_batches)):
x, y = self.data_generator.NextBatch()
feed = {self.input_data: x, self.targets: y}
for j, (c, h) in enumerate(self.initial_state):
feed[c], feed[h] = state[j].c, state[j].h
summary, loss, state, _ = sess.run(
[merged, self.loss, self.final_state, self.train_op], feed
)
# Periodically write progress to tensorboard.
if i % FLAGS.clgen_tf_backend_tensorboard_summary_step_count == 0:
step = (epoch_num - 1) * self.data_generator.num_batches + i
self.summary_writer.add_summary(summary, step)
# Log the loss and delta.
l.logger().info("Loss: {:.6f}.".format(loss))
# Save after every epoch.
start_time = time.time()
global_step = epoch_num
checkpoint_prefix = self.cache.path / "checkpoints" / "checkpoint"
checkpoint_path = saver.save(
sess, str(checkpoint_prefix), global_step=global_step
)
l.logger().info(
"Saved checkpoint {} in {} ms."
.format(
checkpoint_path,
humanize.intcomma(int((time.time() - start_time) * 1000)),
)
)
assert pathlib.Path(
f"{checkpoint_prefix}-{global_step}.index"
).is_file()
assert pathlib.Path(f"{checkpoint_prefix}-{global_step}.meta").is_file()
self.telemetry.EpochEndCallback(epoch_num, loss)
# If we have a sampler that we can use at the end of epochs, then
# break now to run the test sampler.
# This is confusing logic! Consider a refactor to simplify things.
if test_sampler:
break
else:
return
if test_sampler and FLAGS.clgen_per_epoch_test_samples > 0:
self._EndOfEpochTestSample(corpus, test_sampler, step, epoch_num)
self.Train(corpus, test_sampler=test_sampler)
def _EndOfEpochTestSample(
self, corpus, sampler: samplers.Sampler, step: int, epoch_num: int
):
"""Run sampler"""
tf.compat.v1.disable_eager_execution()
tokenizer = corpus.tokenizer
sampler.Specialize(tokenizer)
sampler.batch_size = 1
seed = 0
self.InitSampling(sampler, seed)
self.InitSampleBatch(sampler)
samples, stats = [], []
for i in range(FLAGS.clgen_per_epoch_test_samples):
done = np.zeros(1, dtype=np.bool)
start_time = time.time()
sample_in_progress = sampler.tokenized_start_text.copy()
while not done[0]:
indices, _ = self.SampleNextIndices(sampler, done)
# Iterate over all samples in batch to determine whether they're
# done.
for index in indices[0]:
sample_in_progress.append(tokenizer.decoder[index])
if sampler.SampleIsComplete(sample_in_progress):
stats.append(
(len(sample_in_progress), int((time.time() - start_time) * 1000))
)
sample = "".join(sample_in_progress)
print(f"=== CLGEN SAMPLE ===\n\n{sample}\n")
samples.append(sample)
done[0] = True
break
samples_as_markdown = [
self.FormatCodeAsMarkdown(sample) for sample in samples
]
samples_tensor = tf.convert_to_tensor(samples_as_markdown, dtype=tf.string)
summary_op = tf.compat.v1.summary.text("samples", samples_tensor)
summary = self.inference_sess.run(summary_op)
self.summary_writer.add_summary(summary, step)
@staticmethod
def FormatCodeAsMarkdown(text: str) -> str:
return f"<pre>{text.strip()}</pre>"
def InitSampling(self,
sampler: samplers.Sampler,
seed: typing.Optional[int] = None,
*unused_args,
**unused_kwargs,
) -> None:
"""Initialize model for sampling."""
del unused_args
del unused_kwargs
tf.compat.v1.disable_eager_execution()
# Delete any previous sampling session.
if self.inference_tf:
del self.inference_tf
if self.inference_sess:
del self.inference_sess
self.inference_tf = self.InitTfGraph(sampler=sampler)
self.inference_sess = self.inference_tf.compat.v1.Session()
# Seed the RNG.
if seed is not None:
np.random.seed(seed)
self.inference_tf.compat.v1.set_random_seed(seed)
# If --clgen_tf_backend_reset_inference_state_between_batches, the state
# is reset at the beginning of every sample batch. Else, this is the only
# place it is initialized.
self.inference_state = self.inference_sess.run(
self.cell.get_initial_state(batch_size = sampler.batch_size, dtype = self.inference_tf.float32)
)
self.inference_tf.compat.v1.global_variables_initializer().run(
session=self.inference_sess
)
# Restore trained model weights.
saver = self.inference_tf.compat.v1.train.Saver(
self.inference_tf.compat.v1.global_variables()
)
checkpoint_state = self.inference_tf.train.get_checkpoint_state(
self.cache.path / "checkpoints",
)
# These assertions will fail if the model has no checkpoints. Since this
# should only ever be called after Train(), there is no good reason for
# these assertions to fail.
assert checkpoint_state
assert checkpoint_state.model_checkpoint_path
if FLAGS.select_checkpoint_step == -1:
saver.restore(self.inference_sess, checkpoint_state.model_checkpoint_path)
else:
saver.restore(self.inference_sess, str(self.cache.path / "checkpoints" / "checkpoint-{}".format(FLAGS.select_checkpoint_step)))
self.inference_sess.run(
tf.compat.v1.assign(self.temperature, sampler.temperature)
)
def InitSampleBatch(self, sampler: samplers.Sampler) -> None:
if FLAGS.clgen_tf_backend_reset_inference_state_between_batches:
self.inference_state = self.inference_sess.run(
self.cell.get_initial_state(batch_size = sampler.batch_size, dtype = self.inference_tf.float32)
)
self.inference_indices = np.tile(
sampler.encoded_start_text, [sampler.batch_size, 1]
)
def SampleNextIndices(self, sampler: samplers.Sampler, done: np.ndarray):
length = self.inference_indices.shape[1]
assert length < sampler.sequence_length
expanded_indices = np.zeros((sampler.batch_size, sampler.sequence_length))
expanded_indices[:, :length] = self.inference_indices
synthesized_lengths = np.full([sampler.batch_size], sampler.sequence_length)
synthesized_lengths[done] = 0
feed = {
self.initial_state: self.inference_state,
self.input_data: expanded_indices,
self.lengths: synthesized_lengths,
self.seed_length: length,
}
generated, self.inference_state = self.inference_sess.run(
[self.generated, self.final_state], feed
)
self.inference_indices = generated[:, -1].reshape((sampler.batch_size, 1))
if length > 1:
generated = generated[:, length - 1 :]
return generated, generated
def RandomizeSampleState(self) -> None:
tf.compat.v1.disable_eager_execution()
self.inference_state = [
tf.compat.v1.nn.rnn_cell.LSTMStateTuple(
st1 + np.random.normal(scale=0.2, size=np.shape(st1)),
st2 + np.random.normal(scale=0.2, size=np.shape(st2)),
)
for st1, st2 in self.inference_state
]
def ResetSampleState(self, sampler: samplers.Sampler, state, seed) -> None:
self.inference_state = copy.deepcopy(state)
self.inference_indices = np.tile(seed, [sampler.batch_size, 1])
def EvaluateSampleState(self, sampler: samplers.Sampler):
length = self.inference_indices.shape[1] - 1
if length == 0:
return
last_indices = self.inference_indices[:, -1:]
self.inference_indices = self.inference_indices[:, :-1]
expanded_indices = np.zeros((sampler.batch_size, sampler.sequence_length))
expanded_indices[:, :length] = self.inference_indices
synthesized_lengths = np.full([sampler.batch_size], length)
feed = {
self.initial_state: self.inference_state,
self.input_data: expanded_indices,
self.lengths: synthesized_lengths,
self.seed_length: length,
}
self.inference_state = self.inference_sess.run([self.final_state], feed)
self.inference_indices = last_indices
state_copy = copy.deepcopy(self.inference_state)
input_carry_copy = self.inference_indices[0]
return state_copy, input_carry_copy
@property
def is_trained(self) -> bool:
"""Determine if model has been trained."""
# Count the number of checkpoint files which TensorFlow has created.
checkpoint_files = [
f.stem
for f in (self.cache.path / "checkpoints").iterdir()
if f.name.startswith("checkpoint-") and f.name.endswith(".meta")
]
epoch_nums = [int(x.split("-")[-1]) for x in checkpoint_files]
return self.config.training.num_epochs in epoch_nums
| 23,354 | 34.493921 | 133 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/models/keras_sequential/data_generator.py | """This file defines the streaming generators for model training data.
We train models on overlapping one-hot encoded text sequences. For a corpus of
a reasonable size, the full training data may not fit in memory. This modules
provides Python Generator classes for use by a sequential Keras model's
fit_generator() method to stream batches of training data.
"""
import sys
import time
import typing
import humanize
import numpy as np
from deeplearning.benchpress.proto import model_pb2
from deeplearning.benchpress.util import logging as l
class DataBatch(typing.NamedTuple):
"""An <X,y> data tuple used for training one batch."""
X: np.array
y: np.array
@property
def sizeof_batch(self):
return sys.getsizeof(self) + self.X.nbytes + self.y.nbytes
def LogBatchTelemetry(self,
steps_per_epoch: int,
num_epochs: int,
) -> None:
"""Log analytics about the batch."""
l.logger().info("Step shape: X: {}, y" ": {}.".format(self.X.shape, self.y.shape))
l.logger().info(
"Memory: {} per batch, {} per epoch, {} total.".format(
humanize.naturalsize(self.sizeof_batch, binary = True),
humanize.naturalsize(self.sizeof_batch * steps_per_epoch, binary = True),
humanize.naturalsize(self.sizeof_batch * steps_per_epoch * num_epochs, binary = True),
)
)
return
class KerasBatchGenerator():
def AutoGenerator(
self, corpus: "corpuses.Corpus", training_opts: model_pb2.TrainingOptions
) -> typing.Generator[DataBatch, typing.Any, None]:
"""Determine and construct what we believe to be the best data generator.
The optimum generator will depend on the corpus, the amount of memory
available, and the vocabulary encoding.
Args:
corpus: A Corpus instance.
training_opts: A TrainingOptions proto.
Returns:
A generator suitable for use by a model's fit_generator() method.
"""
return self.BatchGenerator(corpus, training_opts)
def BatchGenerator(
self, corpus: "corpuses.Corpus", training_opts: model_pb2.TrainingOptions
) -> typing.Generator[DataBatch, typing.Any, None]:
"""A batch generator which lazily one-hot encodes the y vectors.
This reduces the memory overhead by only one-hot encoding the y vectors on a
per-batch basis. This is of course slower than one-hot encoding the entire
y corpus, but that requires more memory than is available on many systems for
a reasonable corpus.
Args:
corpus: A Corpus instance.
training_opts: A TrainingOptions proto.
Returns:
A generator suitable for use by a model's fit_generator() method.
"""
x, y, steps_per_epoch = self.GetTrainingCorpus(corpus, training_opts)
# Per-epoch outer loop.
epoch_num = 0
while True:
# Re-shuffle corpus if needed.
if epoch_num and training_opts.shuffle_corpus_contentfiles_between_epochs:
x, y, steps_per_epoch = self.GetTrainingCorpus(corpus, training_opts)
# Roll so that we don't need to reset model states over epochs.
x_epoch = np.split(np.roll(x, -epoch_num, axis=0), steps_per_epoch, axis=1)
y_epoch = np.split(np.roll(y, -epoch_num, axis=0), steps_per_epoch, axis=1)
# Per-batch inner loop.
for batch_num in range(steps_per_epoch):
batch = DataBatch(
X=x_epoch[batch_num],
# Lazy one-hot encoding.
y=self.OneHotEncode(y_epoch[batch_num], corpus.vocab_size),
)
if not batch_num and not epoch_num:
batch.LogBatchTelemetry(steps_per_epoch, training_opts.num_epochs)
yield batch
epoch_num += 1
return
def GetTrainingCorpus(
self, corpus: "corpuses.Corpus", training_opts: model_pb2.TrainingOptions
) -> typing.Tuple[np.ndarray, np.ndarray, int]:
"""Get the corpus to train over.
Args:
corpus: A Corpus instance.
training_opts: A TrainingOptions proto.
Returns:
An X, y pair of data for an epoch, and the number of steps in the epoch.
Raises:
UserError: If batch_size and sequence_length are too large for the corpus,
yielding no batches.
"""
start_time = time.time()
encoded_corpus = np.concatenate(corpus.GetTrainingData(
shuffle=training_opts.shuffle_corpus_contentfiles_between_epochs
))
corpus_length = len(encoded_corpus)
steps_per_epoch = (corpus_length - 1) // (
training_opts.batch_size * training_opts.sequence_length
)
if not steps_per_epoch:
raise ValueError(
f"Requested batch size ({training_opts.batch_size}) and "
f"sequence length ({training_opts.sequence_length}) are too large for "
f"corpus of size {corpus_length}."
)
clipped_corpus_length = (
steps_per_epoch * training_opts.batch_size * training_opts.sequence_length
)
x = np.reshape(
encoded_corpus[:clipped_corpus_length],
[training_opts.batch_size, steps_per_epoch * training_opts.sequence_length],
)
y = np.reshape(
encoded_corpus[1 : clipped_corpus_length + 1],
[training_opts.batch_size, steps_per_epoch * training_opts.sequence_length],
)
l.logger().info(
"Encoded corpus of {} tokens (clipped last {} tokens) in {} ms.".format(
humanize.intcomma(clipped_corpus_length),
humanize.intcomma(corpus_length - clipped_corpus_length),
humanize.intcomma(int((time.time() - start_time) * 1000)),
)
)
return x, y, steps_per_epoch
def OneHotEncode(self, indices: np.ndarray, vocabulary_size: int):
"""One-hot encode an array of vocabulary indices.
Args:
indices: A 1D array of vocabulary indices.
vocabulary_size: The size of the vocabulary.
Returns:
A 2D array of one-hot encoded tokens.
"""
return np.eye(vocabulary_size)[indices]
| 5,942 | 34.16568 | 100 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/models/keras_sequential/keras_sequential.py | # Copyright (c) 2016-2020 Chris Cummins.
#
# clgen is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# clgen is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with clgen. If not, see <https://www.gnu.org/licenses/>.
"""BenchPress models using a Keras backend."""
import io
import pathlib
import typing
import numpy as np
from deeplearning.benchpress.samplers import samplers
from deeplearning.benchpress.models import telemetry
from deeplearning.benchpress.models import backends
from deeplearning.benchpress.models import builders
from deeplearning.benchpress.models.keras_sequential.data_generator import KerasBatchGenerator
from absl import flags
import humanize
from deeplearning.benchpress.util import logging as l
FLAGS = flags.FLAGS
class kerasSequential(backends.BackendBase):
"""A model with an embedding layer, using a keras backend."""
def __init__(self, *args, **kwargs):
"""Instantiate a model.
Args:
args: Arguments to be passed to BackendBase.__init__().
kwargs: Arguments to be passed to BackendBase.__init__().
"""
super(kerasSequential, self).__init__(*args, **kwargs)
# Create the necessary cache directories.
(self.cache.path / "embeddings").mkdir(exist_ok=True)
# Attributes that will be lazily set.
self._training_model: typing.Optional["keras.models.Sequential"] = None
self._inference_model: typing.Optional["keras.models.Sequential"] = None
self._inference_batch_size: typing.Optional[int] = None
self.inference_indices = None
self.inference_model = None
def GetTrainingModel(self) -> "keras.models.Sequential":
"""Get the Keras model."""
if self._training_model:
return self._training_model
self._training_model = self.Train()
return self._training_model
def samplesWithCategorical(self):
return True
def Train(self, corpus, **unused_kwargs) -> "keras.models.Sequential":
"""Locked training.
If there are cached epoch checkpoints, the one closest to the target number
of epochs will be loaded, and the model will be trained for only the
remaining number of epochs, if any. This means that calling this function
twice will only actually train the model the first time, and all subsequent
calls will be no-ops.
This method must only be called when the model is locked.
Returns:
The trained Keras model.
"""
del unused_kwargs
model = builders.BuildKerasModel(self.config, self.tokenizer.vocab_size)
with open(self.cache.keypath("model.yaml"), "w") as f:
f.write(model.to_yaml())
model.compile(
loss="categorical_crossentropy",
optimizer=builders.BuildOptimizer(self.config),
)
# Print a model summary.
buf = io.StringIO()
model.summary(print_fn=lambda x: buf.write(x + "\n"))
l.logger().info("Model summary:\n{}".format(buf.getvalue()))
# TODO(cec): Add an tokenizer.CreateVocabularyFile() method, with frequency
# counts for a given corpus.
def Escape(token: str) -> str:
"""Make a token visible and printable."""
if token == "\t":
return "\\t"
elif token == "\n":
return "\\n"
elif not token.strip():
return f"'{token}'"
else:
return token
if not (self.cache.path / "embeddings" / "metadata.tsv").is_file():
with open(self.cache.path / "embeddings" / "metadata.tsv", "w") as f:
for _, token in sorted(
self.tokenizer.decoder.items(), key=lambda x: x[0]
):
f.write(Escape(token) + "\n")
self.num_epochs = self.config.training.num_epochs
starting_epoch = 0
epoch_checkpoints = self.epoch_checkpoints
if len(epoch_checkpoints) >= self.num_epochs:
# We have already trained a model to at least this number of epochs, so
# simply the weights from that epoch and call it a day.
l.logger().info( "Loading weights from {}"
.format(
epoch_checkpoints[self.num_epochs - 1]
)
)
model.load_weights(epoch_checkpoints[self.num_epochs - 1])
return model
# Now entering the point at which training is inevitable.
# with logutil.TeeLogsToFile("train", self.cache.path / "logs"):
# Deferred importing of Keras so that we don't have to activate the
# TensorFlow backend every time we import this module.
import keras
if epoch_checkpoints:
# We have already trained a model at least part of the way to our target
# number of epochs, so load the most recent one.
starting_epoch = len(epoch_checkpoints)
l.logger().info("Resuming training from epoch {}.".format(starting_epoch))
model.load_weights(epoch_checkpoints[-1])
callbacks = [
keras.callbacks.ModelCheckpoint(
str(self.cache.path / "checkpoints" / "{epoch:03d}.hdf5"),
verbose=1,
mode="min",
save_best_only=False,
),
keras.callbacks.TensorBoard(
str(self.cache.path / "embeddings"),
write_graph=True,
embeddings_freq=1,
embeddings_metadata={
"embedding_1": str(self.cache.path / "embeddings" / "metadata.tsv"),
},
),
self.telemetry.TrainingLogger(self.cache.path / "logs").KerasCallback(keras),
]
generator = KerasBatchGenerator()
steps_per_epoch = (corpus.encoded.token_count - 1) // (
self.config.training.batch_size * self.config.training.sequence_length
)
l.logger().info(
"Step counts: {} per epoch, {} left to do, {} total"
.format(
humanize.intcomma(steps_per_epoch),
humanize.intcomma((self.num_epochs - starting_epoch) * steps_per_epoch),
humanize.intcomma(self.num_epochs * steps_per_epoch),
)
)
model.fit_generator(
generator.AutoGenerator(corpus, self.config.training),
steps_per_epoch=steps_per_epoch,
callbacks=callbacks,
initial_epoch=starting_epoch,
epochs=self.num_epochs,
)
return model
def GetInferenceModel(self) -> "keras.models.Sequential":
"""Like training model, but with different batch size."""
if self._inference_model:
return self._inference_model
# Deferred importing of Keras so that we don't have to activate the
# TensorFlow backend every time we import this module.
import keras
l.logger().info("Building inference model.")
model = self.GetTrainingModel()
config = model.get_config()
l.logger().info("Sampling with batch size {}".format(sampler.batch_size))
config[0]["config"]["batch_input_shape"] = (sampler.batch_size, 1)
inference_model = keras.models.Sequential.from_config(config)
inference_model.trainable = False
inference_model.set_weights(model.get_weights())
self._inference_model = inference_model
self._inference_batch_size = sampler.batch_size
return inference_model
def InitSampling(
self, sampler: samplers.Sampler, seed: typing.Optional[int] = None
) -> None:
self.inference_model = self.GetInferenceModel()
if seed is not None:
np.random.seed(seed)
def InitSampleBatch(self, sampler: samplers.Sampler) -> None:
self.inference_model.reset_states()
# Set internal states from seed text.
for index in sampler.encoded_start_text[:-1]:
x = np.array([[index]] * sampler.batch_size)
# input shape: (batch_size, 1)
self.inference_model.predict(x)
self.inference_indices = [
sampler.encoded_start_text[-1]
] * sampler.batch_size
def SampleNextIndices(self, sampler: samplers.Sampler, done: np.ndarray):
del done
result = np.zeros((sampler.batch_size, 1024))
for idx in range(1024):
# Predict the next index for the entire batch.
x = np.reshape(self.inference_indices, [sampler.batch_size, 1])
# Input shape: (batch_size, 1).
probabilities = self.inference_model.predict(x)
# Output shape: (batch_size, 1, vocab_size).
self.inference_indices = [
WeightedPick(p.squeeze(), sampler.temperature) for p in probabilities
]
result[:, idx] = self.inference_indices
return result
def InferenceManifest(self) -> typing.List[pathlib.Path]:
"""Return the list of files which are required for model inference.
Returns:
A list of absolute paths.
"""
raise NotImplementedError
@property
def epoch_checkpoints(self) -> typing.List[pathlib.Path]:
"""Get the paths to all epoch checkpoint files in order.
Remember that the returned list is zero-indexed, so the epoch number is
the array index plus one. E.g. The checkpoint for epoch 5 is
epoch_checkpoints[4].
Returns:
A list of paths.
"""
checkpoint_dir = pathlib.Path(self.cache.path) / "checkpoints"
return [
checkpoint_dir / x
for x in sorted(pathlib.Path(self.cache["checkpoints"]).iterdir())
]
@property
def is_trained(self) -> bool:
"""Return whether the model has previously been trained."""
return len(self.epoch_checkpoints) >= self.config.training.num_epochs
def WeightedPick(predictions: np.ndarray, temperature: float) -> int:
"""Make a weighted choice from a predictions array."""
predictions = np.log(np.asarray(predictions).astype("float64")) / temperature
predictions_exp = np.exp(predictions)
# Normalize the probabilities.
predictions = predictions_exp / np.sum(predictions_exp)
predictions = np.random.multinomial(1, predictions, 1)
return np.argmax(predictions)
| 9,992 | 35.075812 | 94 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/models/torch_bert/hooks.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import numpy as np
import pathlib
from deeplearning.benchpress.util import plotter
from deeplearning.benchpress.samplers import validation_database
from deeplearning.benchpress.util import logging as l
class tensorMonitorHook(object):
def __init__(self,
cache_path: pathlib.Path,
current_step: int,
step_freq: int,
flush_freq: int = None,
average: bool = True,
):
self.cache_path = cache_path
self.current_step = current_step
self.step_freq = step_freq
self.flush_freq = flush_freq
self.average = average
self.jsonfile = cache_path / "training.json"
self.tensors = []
self.plot_tensors = {}
self.epoch_tensors = {}
self.epch_loss = []
self.delay_checkpoint = True if current_step != 0 else False
self._initTensors()
self.monitor_func = [
self._tensor2JSON,
self._tensor2plot,
]
return
@property
def epoch_loss(self):
return sum(self.epch_loss) / len(self.epch_loss)
def step(self, **tensors):
for key, value in tensors.items():
if value is None:
continue
if key in self.epoch_tensors and "num_" not in key and not "val_" in key:
# "num_" means tensor registers accumulated number and won't average.
# Therefore we are just going to take the last registed value.
self.epoch_tensors[key] += value
else:
self.epoch_tensors[key] = value
self.current_step += 1
if self._step_triggered():
self._logTensors()
self.epoch_tensors = {}
return
def end_epoch(self, **tensors):
for key, value in tensors.items():
if value is None:
continue
self.epoch_tensors[key] = value
# if self._step_triggered():
self._logTensors()
self.epoch_tensors = {}
self.epch_loss = []
return
def _initTensors(self):
if self.current_step > 0:
if self.jsonfile.exists():
with open(self.jsonfile, 'r') as js:
loaded_tensors = json.load(js)
if loaded_tensors[-1]['step'] > self.current_step:
# If previous sessions have written beyond current step, overwrite them.
back_index = -2
while loaded_tensors[back_index]['step'] > self.current_step:
back_index -= 1
self.tensors = loaded_tensors[:back_index + 1]
else:
self.tensors = loaded_tensors
for ch in self.tensors:
for k, v in ch.items():
if k == 'step':
continue
if k not in self.plot_tensors:
self.plot_tensors[k] = {'value': [], 'step': []}
self.plot_tensors[k]['value'].append(v)
self.plot_tensors[k]['step'].append(ch['step'])
else:
l.logger().error("Training json log-file not found. Will keep track from this point on.")
return
def _step_triggered(self):
if self.delay_checkpoint:
self.delay_checkpoint = False
return False
if (self.current_step) % self.step_freq == 0 or self.current_step - 1 == 0:
return True
return False
def _logTensors(self):
effective_step = self.current_step if self.current_step - 1 != 0 else 0
if self.average is True:
epoch_tensors = (self.epoch_tensors if effective_step == 0
else {k: (v / self.step_freq if not "num_" in k and not "val_" in k else v) for k, v in self.epoch_tensors.items()})
else:
epoch_tensors = (self.epoch_tensors if effective_step == 0
else {k: v for k, v in self.epoch_tensors.items()})
self.tensors.append(epoch_tensors)
self.tensors[-1]['step'] = effective_step
if 'total_loss' in epoch_tensors:
self.epch_loss.append(epoch_tensors['total_loss'])
for key, value in epoch_tensors.items():
if key == 'step':
continue
if key not in self.plot_tensors:
self.plot_tensors[key] = {'value': [], 'step': []}
self.plot_tensors[key]['value'].append(value)
self.plot_tensors[key]['step'].append(effective_step)
for func in self.monitor_func:
func()
return
def _tensor2JSON(self):
with open(self.jsonfile, 'w') as js:
json.dump(self.tensors, js, indent = 2, sort_keys = True)
return
def _tensor2plot(self):
for (key, value) in self.plot_tensors.items():
if key != "step":
plotter.SingleScatterLine(
x = value['step'],
y = value['value'],
title = key,
x_name = "Training Step",
y_name = key,
plot_name = key,
path = self.cache_path,
)
return
class validationSampleHook(object):
"""Real time storage hook for validation results"""
def __init__(self,
url,
tokenizer,
model_step,
):
self.tokenizer = tokenizer
self.val_db = validation_database.ValidationDatabase(url)
self.val_files = {}
self.val_id = self.val_db.count
self.model_step = model_step
self.mask_accuracy = [0, 0]
self.nsp_accuracy = [0, 0]
return
def step(self,
inputs,
outputs,
) -> None:
"""
Requested tensors are evaluated and their values are available
"""
seen_in_training = inputs['seen_in_training'].numpy()
original_input = inputs['original_input'].numpy()
masked_lm_lengths = inputs['masked_lm_lengths'].numpy()
input_ids = inputs['input_ids'].cpu().numpy()
input_mask = inputs['input_mask'].cpu().numpy()
next_sentence_labels = inputs['next_sentence_labels'].cpu().numpy()
mask_labels = inputs['mask_labels'].cpu().numpy()
pred_logits = outputs['prediction_logits'].cpu().numpy()
seq_rel_logits = outputs['seq_relationship_logits'].cpu().numpy()
batch_size = len(pred_logits)
masked_lm_ids = [[x for x in batch if x != -100] for batch in mask_labels]
masked_lm_positions = [[idx for idx, x in enumerate(batch) if x != -100] for batch in mask_labels]
masked_lm_predictions = [
[np.argmax(pred_logits[batch][x]) for x in masked_lm_positions[batch]]
for batch in range(batch_size)
]
next_sentence_predictions = [[np.argmax(x) for x in batch][-1] for batch in seq_rel_logits]
for target, prediction in zip(masked_lm_ids, masked_lm_predictions):
if target == prediction:
self.mask_accuracy[0] += 1
self.mask_accuracy[1] += 1
for target, prediction in zip(next_sentence_labels, next_sentence_predictions):
if target == prediction:
self.nsp_accuracy[0] += 1
self.nsp_accuracy[1] += 1
for b in range(batch_size):
f = validation_database.BERTValFile(
**validation_database.BERTValFile.FromArgs(
tokenizer = self.tokenizer,
id = self.val_id,
train_step = self.model_step,
seen_in_training = seen_in_training[b],
original_input = original_input[b],
input_ids = input_ids[b],
input_mask = input_mask[b],
masked_lm_positions = masked_lm_positions[b],
masked_lm_ids = masked_lm_ids[b],
masked_lm_weights = [],
masked_lm_lengths = masked_lm_lengths[b],
next_sentence_labels = next_sentence_labels[b],
masked_lm_predictions = masked_lm_predictions[b],
next_sentence_predictions = next_sentence_predictions[b],
)
)
if f.sha256 not in self.val_files:
self.val_files[f.sha256] = f
self.val_id += 1
# with self.val_db.Session(commit = True) as session:
# for b in range(batch_size):
# val_trace = validation_database.BERTValFile(
# **validation_database.BERTValFile.FromArgs(
# tokenizer = self.tokenizer,
# id = self.val_id,
# train_step = self.model_step,
# seen_in_training = seen_in_training[b],
# original_input = original_input[b],
# input_ids = input_ids[b],
# input_mask = input_mask[b],
# masked_lm_positions = masked_lm_positions[b],
# masked_lm_ids = masked_lm_ids[b],
# masked_lm_weights = [],
# masked_lm_lengths = masked_lm_lengths[b],
# next_sentence_labels = next_sentence_labels[b],
# masked_lm_predictions = masked_lm_predictions[b],
# next_sentence_predictions = next_sentence_predictions[b],
# )
# )
# try:
# exists = session.query(validation_database.BERTValFile.sha256).filter_by(sha256 = val_trace.sha256).scalar() is not None
# except sqlalchemy.orm.exc.MultipleResultsFound as e:
# l.logger().error("Selected sha256 has been already found more than once.")
# raise e
# if not exists:
# session.add(val_trace)
# self.val_id += 1
return
def final(self,
val_set: str,
masked_lm_loss: float,
next_sentence_loss: float,
) -> None:
if self.mask_accuracy[1] == 0 or self.nsp_accuracy[1] == 0:
return
masked_lm_accuracy = self.mask_accuracy[0] / self.mask_accuracy[1]
next_sentence_accuracy = self.nsp_accuracy[0] / self.nsp_accuracy[1]
r = [
"masked_lm_accuracy: {}".format(masked_lm_accuracy),
"masked_lm_loss: {}".format(masked_lm_loss),
"next_sentence_accuracy: {}".format(next_sentence_accuracy),
"next_sentence_loss: {}".format(next_sentence_loss),
]
with self.val_db.Session(commit = True) as session:
for f in self.val_files.values():
session.add(f)
exists = session.query(validation_database.ValResults.key).filter_by(key = val_set).scalar() is not None
if exists:
entry = session.query(validation_database.ValResults).filter_by(key = val_set).first()
entry.results = "\n".join(r)
else:
session.add(validation_database.ValResults(key = val_set, results = "\n".join(r)))
l.logger().info("LM Accuracy: {}, LM Loss: {}, NSP Accuracy: {}, NSP Loss: {}".format(
masked_lm_accuracy,
masked_lm_loss,
next_sentence_accuracy,
next_sentence_loss
)
)
return
| 11,232 | 35.470779 | 137 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/models/torch_bert/modeling_utils.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors, Facebook AI Research authors and The HuggingFace Inc. team and Foivos Tsimpourlas.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import os
import re
import typing
from deeplearning.benchpress.util import pytorch
from deeplearning.benchpress.util.pytorch import torch
from deeplearning.benchpress.models.torch_bert import generation_utils
from deeplearning.benchpress.util import logging as l
def find_pruneable_heads_and_indices(
heads: typing.List[int], n_heads: int, head_size: int, already_pruned_heads: typing.Set[int]
) -> typing.Tuple[typing.Set[int], torch.LongTensor]:
"""
Finds the heads and their indices taking :obj:`already_pruned_heads` into account.
Args:
heads (:obj:`typing.List[int]`): typing.List of the indices of heads to prune.
n_heads (:obj:`int`): The number of heads in the model.
head_size (:obj:`int`): The size of each head.
already_pruned_heads (:obj:`typing.Set[int]`): A set of already pruned heads.
Returns:
:obj:`typing.Tuple[typing.Set[int], torch.LongTensor]`: A tuple with the remaining heads and their corresponding indices.
"""
mask = torch.ones(n_heads, head_size)
heads = set(heads) - already_pruned_heads # Convert to set and remove already pruned heads
for head in heads:
# Compute how many pruned heads are before the head and move the index accordingly
head = head - sum(1 if h < head else 0 for h in already_pruned_heads)
mask[head] = 0
mask = mask.view(-1).contiguous().eq(1)
index: torch.LongTensor = torch.arange(len(mask))[mask].long()
return heads, index
class ModuleUtilsMixin:
"""
A few utilities for :obj:`torch.torch.nn.Modules`, to be used as a mixin.
"""
def num_parameters(self, only_trainable: bool = False) -> int:
"""
Get the number of (optionally, trainable) parameters in the model.
Args:
only_trainable (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to return only the number of trainable parameters
Returns:
:obj:`int`: The number of parameters.
"""
params = filter(lambda x: x.requires_grad, self.parameters()) if only_trainable else self.parameters()
return sum(p.numel() for p in params)
@staticmethod
def _hook_rss_memory_pre_forward(module, *args, **kwargs):
try:
import psutil
except (ImportError):
raise ImportError("You need to install psutil (pip install psutil) to use memory tracing.")
process = psutil.Process(os.getpid())
mem = process.memory_info()
module.mem_rss_pre_forward = mem.rss
return None
@staticmethod
def _hook_rss_memory_post_forward(module, *args, **kwargs):
try:
import psutil
except (ImportError):
raise ImportError("You need to install psutil (pip install psutil) to use memory tracing.")
process = psutil.Process(os.getpid())
mem = process.memory_info()
module.mem_rss_post_forward = mem.rss
mem_rss_diff = module.mem_rss_post_forward - module.mem_rss_pre_forward
module.mem_rss_diff = mem_rss_diff + (module.mem_rss_diff if hasattr(module, "mem_rss_diff") else 0)
return None
def add_memory_hooks(self):
"""
Add a memory hook before and after each sub-module forward pass to record increase in memory consumption.
Increase in memory consumption is stored in a :obj:`mem_rss_diff` attribute for each module and can be reset to
zero with :obj:`model.reset_memory_hooks_state()`.
"""
for module in self.modules():
module.register_forward_pre_hook(self._hook_rss_memory_pre_forward)
module.register_forward_hook(self._hook_rss_memory_post_forward)
self.reset_memory_hooks_state()
def reset_memory_hooks_state(self):
"""
Reset the :obj:`mem_rss_diff` attribute of each module (see
:func:`~transformers.modeling_utils.ModuleUtilsMixin.add_memory_hooks`).
"""
for module in self.modules():
module.mem_rss_diff = 0
module.mem_rss_post_forward = 0
module.mem_rss_pre_forward = 0
@property
def device(self) -> pytorch.device:
"""
:obj:`torch.device`: The device on which the module is (assuming that all the module parameters are on the same
device).
"""
try:
return next(self.parameters()).pytorch.device
except StopIteration:
# For torch.nn.DataParallel compatibility in PyTorch 1.5
def find_tensor_attributes(module: torch.nn.Module) -> typing.List[typing.Tuple[str, torch.Tensor]]:
tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
return tuples
gen = self._named_members(get_members_fn=find_tensor_attributes)
first_tuple = next(gen)
return first_tuple[1].pytorch.device
@property
def dtype(self) -> torch.dtype:
"""
:obj:`torch.torch.dtype`: The torch.dtype of the module (assuming that all the module parameters have the same torch.dtype).
"""
try:
return next(self.parameters()).dtype
except StopIteration:
# For torch.nn.DataParallel compatibility in PyTorch 1.5
def find_tensor_attributes(module: torch.nn.Module) -> typing.List[typing.Tuple[str, torch.Tensor]]:
tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
return tuples
gen = self._named_members(get_members_fn=find_tensor_attributes)
first_tuple = next(gen)
return first_tuple[1].dtype
def invert_attention_mask(self, encoder_attention_mask: torch.Tensor) -> torch.Tensor:
"""
Invert an attention mask (e.g., switches 0. and 1.).
Args:
encoder_attention_mask (:obj:`torch.Tensor`): An attention mask.
Returns:
:obj:`torch.Tensor`: The inverted attention mask.
"""
if encoder_attention_mask.dim() == 3:
encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
encoder_extended_attention_mask = encoder_extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
if self.dtype == torch.float16:
encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -1e4
elif self.dtype == torch.float32:
encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -1e9
else:
raise ValueError(
"{} not recognized. `torch.dtype` should be set to either `torch.float32` or `torch.float16`".format(
self.dtype
)
)
return encoder_extended_attention_mask
def get_extended_attention_mask(self, attention_mask: torch.Tensor, input_shape: typing.Tuple[int], device: device) -> torch.Tensor:
"""
Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
Arguments:
attention_mask (:obj:`torch.Tensor`):
Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
input_shape (:obj:`typing.Tuple[int]`):
The shape of the input to the model.
device: (:obj:`torch.device`):
The device of the input to the model.
Returns:
:obj:`torch.Tensor` The extended attention mask, with a the same torch.dtype as :obj:`attention_mask.torch.dtype`.
"""
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
elif attention_mask.dim() == 2:
# Provided a padding mask of dimensions [batch_size, seq_length]
# - if the model is a decoder, apply a causal mask in addition to the padding mask
# - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder:
batch_size, seq_length = input_shape
seq_ids = torch.arange(seq_length, device=device)
causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None]
# causal and attention masks must have same type with pytorch version < 1.3
causal_mask = causal_mask.to(attention_mask.dtype)
extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
else:
extended_attention_mask = attention_mask[:, None, None, :]
else:
raise ValueError(
"Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(
input_shape, attention_mask.shape
)
)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype = self.dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
def get_head_mask(
self, head_mask: typing.Optional[torch.Tensor], num_hidden_layers: int, is_attention_chunked: bool = False
) -> torch.Tensor:
"""
Prepare the head mask if needed.
Args:
head_mask (:obj:`torch.Tensor` with shape :obj:`[num_heads]` or :obj:`[num_hidden_layers x num_heads]`, `optional`):
The mask indicating if we should keep the heads or not (1.0 for keep, 0.0 for discard).
num_hidden_layers (:obj:`int`):
The number of hidden layers in the model.
is_attention_chunked: (:obj:`bool`, `optional, defaults to :obj:`False`):
Whether or not the attentions scores are computed by chunks or not.
Returns:
:obj:`torch.Tensor` with shape :obj:`[num_hidden_layers x batch x num_heads x seq_length x seq_length]`
or list with :obj:`[None]` for each layer.
"""
if head_mask is not None:
head_mask = self._convert_head_mask_to_5d(head_mask, num_hidden_layers)
if is_attention_chunked is True:
head_mask = head_mask.unsqueeze(-1)
else:
head_mask = [None] * num_hidden_layers
return head_mask
def _convert_head_mask_to_5d(self, head_mask, num_hidden_layers):
"""-> [num_hidden_layers x batch x num_heads x seq_length x seq_length]"""
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(num_hidden_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer
assert head_mask.dim() == 5, f"head_mask.dim != 5, instead {head_mask.dim()}"
head_mask = head_mask.to(dtype = self.dtype) # switch to fload if need + fp16 compatibility
return head_mask
class PreTrainedModel(torch.nn.Module, ModuleUtilsMixin, generation_utils.GenerationMixin):
r"""
Base class for all models.
:class:`~transformers.PreTrainedModel` takes care of storing the configuration of the models and handles methods
for loading, downloading and saving models as well as a few methods common to all models to:
* resize the input embeddings,
* prune heads in the self-attention heads.
Class attributes (overridden by derived classes):
- **config_class** (:class:`~transformers.PretrainedConfig`) -- A subclass of
:class:`~transformers.PretrainedConfig` to use as configuration class for this model architecture.
- **load_tf_weights** (:obj:`typing.Callable`) -- A python `method` for loading a torch.TensorFlow checkpoint in a
PyTorch model, taking as arguments:
- **model** (:class:`~transformers.PreTrainedModel`) -- An instance of the model on which to load the
torch.TensorFlow checkpoint.
- **config** (:class:`~transformers.PreTrainedConfig`) -- An instance of the configuration associated
to the model.
- **path** (:obj:`str`) -- A path to the torch.TensorFlow checkpoint.
- **base_model_prefix** (:obj:`str`) -- A string indicating the attribute associated to the base model in
derived classes of the same architecture adding modules on top of the base model.
- **authorized_missing_keys** (:obj:`typing.Optional[typing.List[str]]`) -- A list of re pattern of tensor names to ignore
when loading the model (and avoid unnecessary warnings).
"""
config_class = None
base_model_prefix = ""
authorized_missing_keys = None
@property
def dummy_inputs(self) -> typing.Dict[str, torch.Tensor]:
"""
:obj:`typing.Dict[str, torch.Tensor]`: Dummy inputs to do a forward pass in the network.
"""
return {"input_ids": torch.tensor(DUMMY_INPUTS)}
def __init__(self, config, *inputs, **kwargs):
super().__init__()
# Save config in model
self.config = config
@property
def base_model(self) -> torch.nn.Module:
"""
:obj:`torch.torch.nn.Module`: The main body of the model.
"""
return getattr(self, self.base_model_prefix, self)
def get_input_embeddings(self) -> torch.nn.Module:
"""
Returns the model's input embeddings.
Returns:
:obj:`torch.nn.Module`: A torch module mapping vocabulary to hidden states.
"""
base_model = getattr(self, self.base_model_prefix, self)
if base_model is not self:
return base_model.get_input_embeddings()
else:
raise NotImplementedError
def set_input_embeddings(self, value: torch.nn.Module):
"""
typing.Set model's input embeddings
Args:
value (:obj:`torch.nn.Module`): A module mapping vocabulary to hidden states.
"""
base_model = getattr(self, self.base_model_prefix, self)
if base_model is not self:
base_model.set_input_embeddings(value)
else:
raise NotImplementedError
def get_output_embeddings(self) -> torch.nn.Module:
"""
Returns the model's output embeddings.
Returns:
:obj:`torch.nn.Module`: A torch module mapping hidden states to vocabulary.
"""
return None # Overwrite for models with output embeddings
def tie_weights(self):
"""
Tie the weights between the input embeddings and the output embeddings.
If the :obj:`torchscript` flag is set in the configuration, can't handle parameter sharing so we are cloning
the weights instead.
"""
output_embeddings = self.get_output_embeddings()
if output_embeddings is not None:
self._tie_or_clone_weights(output_embeddings, self.get_input_embeddings())
if self.config.is_encoder_decoder and self.config.tie_encoder_decoder:
self._tie_encoder_decoder_weights(self.encoder, self.decoder, self.base_model_prefix)
@staticmethod
def _tie_encoder_decoder_weights(encoder: torch.nn.Module, decoder: torch.nn.Module, base_model_prefix: str):
uninitialized_encoder_weights: typing.List[str] = []
assert decoder.__class__ == encoder.__class__, f"{decoder.__class__} and {encoder.__class__} have to be equal."
def tie_encoder_to_decoder_recursively(
decoder_pointer: torch.nn.Module,
encoder_pointer: torch.nn.Module,
module_name: str,
uninitialized_encoder_weights: typing.List[str],
depth=0,
):
assert isinstance(decoder_pointer, torch.nn.Module) and isinstance(
encoder_pointer, torch.nn.Module
), f"{decoder_pointer} and {encoder_pointer} have to be of type torch.torch.nn.Module"
if hasattr(decoder_pointer, "weight"):
assert hasattr(encoder_pointer, "weight")
encoder_pointer.weight = decoder_pointer.weight
if hasattr(decoder_pointer, "bias"):
assert hasattr(encoder_pointer, "bias")
encoder_pointer.bias = decoder_pointer.bias
return
encoder_modules = encoder_pointer._modules
decoder_modules = decoder_pointer._modules
if len(decoder_modules) > 0:
assert (
len(encoder_modules) > 0
), f"Encoder module {encoder_pointer} does not match decoder module {decoder_pointer}"
all_encoder_weights = set([module_name + "/" + sub_name for sub_name in encoder_modules.keys()])
encoder_layer_pos = 0
for name, module in decoder_modules.items():
if name.isdigit():
encoder_name = str(int(name) + encoder_layer_pos)
decoder_name = name
if not isinstance(decoder_modules[decoder_name], type(encoder_modules[encoder_name])):
# this can happen if the name corresponds to the position in a list module list of layers
# in this case the decoder has added a cross-attention that the encoder does not have
# thus skip this step and substract one layer pos from encoder
encoder_layer_pos -= 1
continue
elif name not in encoder_modules:
continue
elif depth > 500:
raise ValueError(
"Max depth of recursive function `tie_encoder_to_decoder` reached. It seems that there is a circular dependency between two or more `torch.nn.Modules` of your model."
)
else:
decoder_name = encoder_name = name
tie_encoder_to_decoder_recursively(
decoder_modules[decoder_name],
encoder_modules[encoder_name],
module_name + "/" + name,
uninitialized_encoder_weights,
depth=depth + 1,
)
all_encoder_weights.remove(module_name + "/" + encoder_name)
uninitialized_encoder_weights += list(all_encoder_weights)
# tie weights recursively
tie_encoder_to_decoder_recursively(decoder, encoder, base_model_prefix, uninitialized_encoder_weights)
if len(uninitialized_encoder_weights) > 0:
l.logger().warning(
f"The following encoder weights were not tied to the decoder {uninitialized_encoder_weights}"
)
def _tie_or_clone_weights(self, output_embeddings, input_embeddings):
""" Tie or clone module weights depending of whether we are using TorchScript or not
"""
if self.config.torchscript:
output_embeddings.weight = torch.nn.Parameter(input_embeddings.weight.clone())
else:
output_embeddings.weight = input_embeddings.weight
if getattr(output_embeddings, "bias", None) is not None:
output_embeddings.bias.data = torch.torch.nn.functional.pad(
output_embeddings.bias.data,
(0, output_embeddings.weight.shape[0] - output_embeddings.bias.shape[0],),
"constant",
0,
)
if hasattr(output_embeddings, "out_features") and hasattr(input_embeddings, "num_embeddings"):
output_embeddings.out_features = input_embeddings.num_embeddings
def resize_token_embeddings(self, new_num_tokens: typing.Optional[int] = None) -> torch.torch.nn.Embedding:
"""
Resizes input token embeddings matrix of the model if :obj:`new_num_tokens != config.vocab_size`.
Takes care of tying weights embeddings afterwards if the model class has a :obj:`tie_weights()` method.
Arguments:
new_num_tokens (:obj:`int`, `optional`):
The number of new tokens in the embedding matrix. Increasing the size will add newly initialized
vectors at the end. Reducing the size will remove vectors from the end. If not provided or :obj:`None`,
just returns a pointer to the input tokens :obj:`torch.torch.nn.Embedding` module of the model wihtout doing
anything.
Return:
:obj:`torch.torch.nn.Embedding`: Pointer to the input tokens Embeddings Module of the model.
"""
base_model = getattr(self, self.base_model_prefix, self) # get the base model if needed
model_embeds = base_model._resize_token_embeddings(new_num_tokens)
if new_num_tokens is None:
return model_embeds
# Update base model and current model config
self.config.vocab_size = new_num_tokens
base_model.vocab_size = new_num_tokens
# Tie weights again if needed
self.tie_weights()
return model_embeds
def _resize_token_embeddings(self, new_num_tokens):
old_embeddings = self.get_input_embeddings()
new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)
self.set_input_embeddings(new_embeddings)
return self.get_input_embeddings()
def _get_resized_embeddings(
self, old_embeddings: torch.torch.nn.Embedding, new_num_tokens: typing.Optional[int] = None
) -> torch.torch.nn.Embedding:
"""
Build a resized Embedding Module from a provided token Embedding Module. Increasing the size will add newly
initialized vectors at the end. Reducing the size will remove vectors from the end
Args:
old_embeddings (:obj:`torch.torch.nn.Embedding`):
Old embeddings to be resized.
new_num_tokens (:obj:`int`, `optional`):
New number of tokens in the embedding matrix.
Increasing the size will add newly initialized vectors at the end. Reducing the size will remove
vectors from the end. If not provided or :obj:`None`, just returns a pointer to the input tokens
:obj:`torch.torch.nn.Embedding`` module of the model wihtout doing anything.
Return:
:obj:`torch.torch.nn.Embedding`: Pointer to the resized Embedding Module or the old Embedding Module if
:obj:`new_num_tokens` is :obj:`None`
"""
if new_num_tokens is None:
return old_embeddings
old_num_tokens, old_embedding_dim = old_embeddings.weight.size()
if old_num_tokens == new_num_tokens:
return old_embeddings
# Build new embeddings
new_embeddings = torch.nn.Embedding(new_num_tokens, old_embedding_dim)
new_embeddings.to(old_embeddings.weight.pytorch.device)
# initialize all new embeddings (in particular added tokens)
self._init_weights(new_embeddings)
# Copy token embeddings from the previous weights
num_tokens_to_copy = min(old_num_tokens, new_num_tokens)
new_embeddings.weight.data[:num_tokens_to_copy, :] = old_embeddings.weight.data[:num_tokens_to_copy, :]
return new_embeddings
def init_weights(self):
"""
Initializes and prunes weights if needed.
"""
# Initialize weights
self.apply(self._init_weights)
# Prune heads if needed
if self.config.pruned_heads:
self.prune_heads(self.config.pruned_heads)
# Tie weights if needed
self.tie_weights()
def prune_heads(self, heads_to_prune: typing.Dict[int, typing.List[int]]):
"""
Prunes heads of the base model.
Arguments:
heads_to_prune (:obj:`typing.Dict[int, typing.List[int]]`):
typing.Dictionary with keys being selected layer indices (:obj:`int`) and associated values being the list
of heads to prune in said layer (list of :obj:`int`). For instance {1: [0, 2], 2: [2, 3]} will
prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2.
"""
# save new sets of pruned heads as union of previously stored pruned heads and newly pruned heads
for layer, heads in heads_to_prune.items():
union_heads = set(self.config.pruned_heads.get(layer, [])) | set(heads)
self.config.pruned_heads[layer] = list(union_heads) # Unfortunately we have to store it as list for JSON
self.base_model._prune_heads(heads_to_prune)
def save_pretrained(self, save_directory):
"""
Save a model and its configuration file to a directory, so that it can be re-loaded using the
`:func:`~transformers.PreTrainedModel.from_pretrained`` class method.
Arguments:
save_directory (:obj:`str`):
Directory to which to save. Will be created if it doesn't exist.
"""
if os.path.isfile(save_directory):
l.logger().error("Provided path ({}) should be a directory, not a file".format(save_directory))
return
os.makedirs(save_directory, exist_ok=True)
# Only save the model itself if we are using distributed training
model_to_save = self.module if hasattr(self, "module") else self
# Attach architecture to the config
model_to_save.config.architectures = [model_to_save.__class__.__name__]
# If we save using the predefined names, we can load using `from_pretrained`
output_model_file = os.path.join(save_directory, WEIGHTS_NAME)
if getattr(self.config, "xla_device", False):
if pytorch.xla_model.is_master_ordinal():
# Save configuration file
model_to_save.config.save_pretrained(save_directory)
# pytorch.xla_model.save takes care of saving only from master
pytorch.xla_model.save(model_to_save.state_dict(), output_model_file)
else:
model_to_save.config.save_pretrained(save_directory)
torch.save(model_to_save.state_dict(), output_model_file)
l.logger().info("Model weights saved in {}".format(output_model_file))
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""
Instantiate a pretrained pytorch model from a pre-trained model configuration.
The model is set in evaluation mode by default using ``model.eval()`` (Dropout modules are deactivated).
To train the model, you should first set it back in training mode with ``model.train()``.
The warning `Weights from XXX not initialized from pretrained model` means that the weights of XXX do not come
pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning
task.
The warning `Weights from XXX not used in YYY` means that the layer XXX is not used by YYY, therefore those
weights are discarded.
Parameters:
pretrained_model_name_or_path (:obj:`str`, `optional`):
Can be either:
- A string with the `shortcut name` of a pretrained model to load from cache or download, e.g.,
``bert-base-uncased``.
- A string with the `identifier name` of a pretrained model that was user-uploaded to our S3, e.g.,
``dbmdz/bert-base-german-cased``.
- A path to a `directory` containing model weights saved using
:func:`~transformers.PreTrainedModel.save_pretrained`, e.g., ``./my_model_directory/``.
- A path or url to a `tensorflow index checkpoint file` (e.g, ``./tf_model/model.ckpt.index``). In
this case, ``from_tf`` should be set to :obj:`True` and a configuration object should be provided
as ``config`` argument. This loading path is slower than converting the torch.TensorFlow checkpoint in
a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
- :obj:`None` if you are both providing the configuration and state dictionary (resp. with keyword
arguments ``config`` and ``state_dict``).
model_args (sequence of positional arguments, `optional`):
All remaning positional arguments will be passed to the underlying model's ``__init__`` method.
config (:obj:`typing.Union[PretrainedConfig, str]`, `optional`):
Can be either:
- an instance of a class derived from :class:`~transformers.PretrainedConfig`,
- a string valid as input to :func:`~transformers.PretrainedConfig.from_pretrained`.
Configuration for the model to use instead of an automatically loaded configuation. Configuration can
be automatically loaded when:
- The model is a model provided by the library (loaded with the `shortcut name` string of a
pretrained model).
- The model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded
by suppling the save directory.
- The model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a
configuration JSON file named `config.json` is found in the directory.
state_dict (:obj:`typing.Dict[str, torch.Tensor]`, `optional`):
A state dictionary to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own
weights. In this case though, you should check if using
:func:`~transformers.PreTrainedModel.save_pretrained` and
:func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir (:obj:`str`, `optional`):
Path to a directory in which a downloaded pretrained model configuration should be cached if the
standard cache should not be used.
from_tf (:obj:`bool`, `optional`, defaults to :obj:`False`):
Load the model weights from a torch.TensorFlow checkpoint save file (see docstring of
``pretrained_model_name_or_path`` argument).
force_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
cached versions if they exist.
resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to delete incompletely received files. Will attempt to resume the download if such a
file exists.
proxies (:obj:`typing.Dict[str, str], `optional`):
A dictionary of proxy servers to use by protocol or endpoint, e.g.,
:obj:`{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each
request.
output_loading_info(:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether ot not to also return a dictionnary containing missing keys, unexpected keys and error
messages.
local_files_only(:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to only look at local files (e.g., not try doanloading the model).
use_cdn(:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to use Cloudfront (a Content Delivery Network, or CDN) when searching for the model on
our S3 (faster). Should be set to :obj:`False` for checkpoints larger than 20GB.
kwargs (remaining dictionary of keyword arguments, `optional`):
Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
:obj:`output_attention=True`). Behaves differently depending on whether a ``config`` is provided or
automatically loaded:
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the
underlying model's ``__init__`` method (we assume all relevant updates to the configuration have
already been done)
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class
initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of
``kwargs`` that corresponds to a configuration attribute will be used to override said attribute
with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration
attribute will be passed to the underlying model's ``__init__`` function.
Examples::
from transformers import BertConfig, BertModel
# Download model and configuration from S3 and cache.
model = BertModel.from_pretrained('bert-base-uncased')
# Model was saved using `save_pretrained('./test/saved_model/')` (for example purposes, not runnable).
model = BertModel.from_pretrained('./test/saved_model/')
# Update configuration during loading.
model = BertModel.from_pretrained('bert-base-uncased', output_attention=True)
assert model.config.output_attention == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower, for example purposes, not runnable).
config = BertConfig.from_json_file('./tf_model/my_tf_model_config.json')
model = BertModel.from_pretrained('./tf_model/my_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
state_dict = kwargs.pop("state_dict", None)
cache_dir = kwargs.pop("cache_dir", None)
from_tf = kwargs.pop("from_tf", False)
force_download = kwargs.pop("force_download", False)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
output_loading_info = kwargs.pop("output_loading_info", False)
local_files_only = kwargs.pop("local_files_only", False)
use_cdn = kwargs.pop("use_cdn", True)
model_kwargs = kwargs
# Load model
if pretrained_model_name_or_path is not None:
if os.path.isdir(pretrained_model_name_or_path):
if from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index")):
# Load from a TF 1.0 checkpoint
archive_file = os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index")
elif from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)):
# Load from a TF 2.0 checkpoint
archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)
elif os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)):
# Load from a PyTorch checkpoint
archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
else:
raise EnvironmentError(
"Error no file named {} found in directory {} or `from_tf` set to False".format(
[WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME + ".index"],
pretrained_model_name_or_path,
)
)
elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
archive_file = pretrained_model_name_or_path
elif os.path.isfile(pretrained_model_name_or_path + ".index"):
assert (
from_tf
), "We found a torch.TensorFlow checkpoint at {}, please set from_tf to True to load from this checkpoint".format(
pretrained_model_name_or_path + ".index"
)
archive_file = pretrained_model_name_or_path + ".index"
else:
archive_file = hf_bucket_url(
pretrained_model_name_or_path,
filename=(TF2_WEIGHTS_NAME if from_tf else WEIGHTS_NAME),
use_cdn=use_cdn,
)
try:
# Load from URL or cache if already cached
resolved_archive_file = cached_path(
archive_file,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
)
if resolved_archive_file is None:
raise EnvironmentError
except EnvironmentError:
msg = (
f"Can't load weights for '{pretrained_model_name_or_path}'. Make sure that:\n\n"
f"- '{pretrained_model_name_or_path}' is a correct model identifier listed on 'https://huggingface.co/models'\n\n"
f"- or '{pretrained_model_name_or_path}' is the correct path to a directory containing a file named one of {WEIGHTS_NAME}, {TF2_WEIGHTS_NAME}, {TF_WEIGHTS_NAME}.\n\n"
)
raise EnvironmentError(msg)
if resolved_archive_file == archive_file:
l.logger().info("loading weights file {}".format(archive_file))
else:
l.logger().info("loading weights file {} from cache at {}".format(archive_file, resolved_archive_file))
else:
resolved_archive_file = None
# Instantiate model.
model = cls(config, *model_args, **model_kwargs)
if state_dict is None and not from_tf:
try:
state_dict = torch.load(resolved_archive_file, map_location="cpu")
except Exception:
raise OSError(
"Unable to load weights from pytorch checkpoint file. "
"If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True. "
)
missing_keys = []
unexpected_keys = []
error_msgs = []
if from_tf:
if resolved_archive_file.endswith(".index"):
# Load from a torch.TensorFlow 1.X checkpoint - provided by original authors
model = cls.load_tf_weights(model, config, resolved_archive_file[:-6]) # Remove the '.index'
else:
# Load from our torch.TensorFlow 2.0 checkpoints
try:
from transformers import load_tf2_checkpoint_in_pytorch_model
model = load_tf2_checkpoint_in_pytorch_model(model, resolved_archive_file, allow_missing_keys=True)
except ImportError:
l.logger().error(
"Loading a torch.TensorFlow model in PyTorch, requires both PyTorch and torch.TensorFlow to be installed. Please see "
"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions."
)
raise
else:
# Convert old format to new format if needed from a PyTorch state_dict
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if "gamma" in key:
new_key = key.replace("gamma", "weight")
if "beta" in key:
new_key = key.replace("beta", "bias")
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, "_metadata", None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
# PyTorch's `_load_from_state_dict` does not copy parameters in a module's descendants
# so we need to apply the function recursively.
def load(module: torch.nn.Module, prefix=""):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs,
)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + ".")
# Make sure we are able to load base models as well as derived models (with heads)
start_prefix = ""
model_to_load = model
has_prefix_module = any(s.startswith(cls.base_model_prefix) for s in state_dict.keys())
if not hasattr(model, cls.base_model_prefix) and has_prefix_module:
start_prefix = cls.base_model_prefix + "."
if hasattr(model, cls.base_model_prefix) and not has_prefix_module:
model_to_load = getattr(model, cls.base_model_prefix)
load(model_to_load, prefix=start_prefix)
if model.__class__.__name__ != model_to_load.__class__.__name__:
base_model_state_dict = model_to_load.state_dict().keys()
head_model_state_dict_without_base_prefix = [
key.split(cls.base_model_prefix + ".")[-1] for key in model.state_dict().keys()
]
missing_keys.extend(head_model_state_dict_without_base_prefix - base_model_state_dict)
# Some models may have keys that are not in the state by design, removing them before needlessly warning
# the user.
if cls.authorized_missing_keys is not None:
for pat in cls.authorized_missing_keys:
missing_keys = [k for k in missing_keys if re.search(pat, k) is None]
if len(unexpected_keys) > 0:
l.logger().warning(
f"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when "
f"initializing {model.__class__.__name__}: {unexpected_keys}\n"
f"- This IS expected if you are initializing {model.__class__.__name__} from the checkpoint of a model trained on another task "
f"or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPretraining model).\n"
f"- This IS NOT expected if you are initializing {model.__class__.__name__} from the checkpoint of a model that you expect "
f"to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model)."
)
else:
l.logger().info(f"All model checkpoint weights were used when initializing {model.__class__.__name__}.\n")
if len(missing_keys) > 0:
l.logger().warning(
f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} "
f"and are newly initialized: {missing_keys}\n"
f"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference."
)
else:
l.logger().info(
f"All the weights of {model.__class__.__name__} were initialized from the model checkpoint at {pretrained_model_name_or_path}.\n"
f"If your task is similar to the task the model of the checkpoint was trained on, "
f"you can already use {model.__class__.__name__} for predictions without further training."
)
if len(error_msgs) > 0:
raise RuntimeError(
"Error(s) in loading state_dict for {}:\n\t{}".format(
model.__class__.__name__, "\n\t".join(error_msgs)
)
)
# make sure token embedding weights are still tied if needed
model.tie_weights()
# typing.Set model in evaluation mode to deactivate DropOut modules by default
model.eval()
if output_loading_info:
loading_info = {
"missing_keys": missing_keys,
"unexpected_keys": unexpected_keys,
"error_msgs": error_msgs,
}
return model, loading_info
if hasattr(config, "xla_device") and config.xla_device and is_torch_tpu_available():
model = pytorch.xla_model.send_cpu_data_to_device(model, pytorch.xla_model.xla_device())
model.to(pytorch.xla_model.xla_device())
return model
def prune_linear_layer(layer: torch.torch.nn.Linear, index: torch.LongTensor, dim: int = 0) -> torch.torch.nn.Linear:
"""
Prune a linear layer to keep only entries in index.
Used to remove heads.
Args:
layer (:obj:`torch.torch.nn.Linear`): The layer to prune.
index (:obj:`torch.LongTensor`): The indices to keep in the layer.
dim (:obj:`int`, `optional`, defaults to 0): The dimension on which to keep the indices.
Returns:
:obj:`torch.torch.nn.Linear`: The pruned layer as a new layer with :obj:`requires_grad=True`.
"""
index = index.to(layer.weight.pytorch.device)
W = layer.weight.index_select(dim, index).clone().detach()
if layer.bias is not None:
if dim == 1:
b = layer.bias.clone().detach()
else:
b = layer.bias[index].clone().detach()
new_size = list(layer.weight.size())
new_size[dim] = len(index)
new_layer = torch.nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None).to(layer.weight.pytorch.device)
new_layer.weight.requires_grad = False
new_layer.weight.copy_(W.contiguous())
new_layer.weight.requires_grad = True
if layer.bias is not None:
new_layer.bias.requires_grad = False
new_layer.bias.copy_(b.contiguous())
new_layer.bias.requires_grad = True
return new_layer
def apply_chunking_to_forward(
forward_fn: typing.Callable[..., torch.Tensor], chunk_size: int, chunk_dim: int, *input_tensors
) -> torch.Tensor:
"""
This function chunks the :obj:`input_tensors` into smaller input tensor parts of size :obj:`chunk_size` over the
dimension :obj:`chunk_dim`. It then applies a layer :obj:`forward_fn` to each chunk independently to save memory.
If the :obj:`forward_fn` is independent across the :obj:`chunk_dim` this function will yield the same result as
directly applying :obj:`forward_fn` to :obj:`input_tensors`.
Args:
forward_fn (:obj:`typing.Callable[..., torch.Tensor]`):
The forward function of the model.
chunk_size (:obj:`int`):
The chunk size of a chunked tensor: :obj:`num_chunks = len(input_tensors[0]) / chunk_size`.
chunk_dim (:obj:`int`):
The dimension over which the :obj:`input_tensors` should be chunked.
input_tensors (:obj:`typing.Tuple[torch.Tensor]`):
The input tensors of ``forward_fn`` which will be chunked.
Returns:
:obj:`torch.Tensor`: A tensor with the same shape as the :obj:`foward_fn` would have given if applied`.
Examples::
# rename the usual forward() fn to forward_chunk()
def forward_chunk(self, hidden_states):
hidden_states = self.decoder(hidden_states)
return hidden_states
# implement a chunked forward function
def forward(self, hidden_states):
return apply_chunking_to_forward(self.forward_chunk, self.chunk_size_lm_head, self.seq_len_dim, hidden_states)
"""
assert len(input_tensors) > 0, "{} has to be a tuple/list of tensors".format(input_tensors)
tensor_shape = input_tensors[0].shape
assert all(
input_tensor.shape == tensor_shape for input_tensor in input_tensors
), "All input tenors have to be of the same shape"
# inspect.signature exist since python 3.5 and is a python method -> no problem with backward compability
num_args_in_forward_chunk_fn = len(inspect.signature(forward_fn).parameters)
assert num_args_in_forward_chunk_fn == len(
input_tensors
), "forward_chunk_fn expects {} arguments, but only {} input tensors are given".format(
num_args_in_forward_chunk_fn, len(input_tensors)
)
if chunk_size > 0:
assert (
input_tensors[0].shape[chunk_dim] % chunk_size == 0
), "The dimension to be chunked {} has to be a multiple of the chunk size {}".format(
input_tensors[0].shape[chunk_dim], chunk_size
)
num_chunks = input_tensors[0].shape[chunk_dim] // chunk_size
# chunk input tensor into tuples
input_tensors_chunks = tuple(input_tensor.chunk(num_chunks, dim=chunk_dim) for input_tensor in input_tensors)
# apply forward fn to every tuple
output_chunks = tuple(forward_fn(*input_tensors_chunk) for input_tensors_chunk in zip(*input_tensors_chunks))
# concatenate output at same dimension
return torch.cat(output_chunks, dim=chunk_dim)
return forward_fn(*input_tensors)
| 47,214 | 45.063415 | 180 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/models/torch_bert/data_generator.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file defines the streaming generators for model training data.
We train models on overlapping one-hot encoded text sequences. For a corpus of
a reasonable size, the full training data may not fit in memory. This modules
provides Python Generator classes for use by a sequential Keras model's
fit_generator() method to stream batches of training data.
"""
import os
import typing
import copy
import datetime
import glob
import humanize
import sklearn
import pickle
import functools
import numpy as np
import pathlib
import multiprocessing
import math
import tqdm
import threading
from deeplearning.benchpress.util import pytorch
from deeplearning.benchpress.util.pytorch import torch
from deeplearning.benchpress.util import distributions
from deeplearning.benchpress.util import monitors
from deeplearning.benchpress.util import environment
from deeplearning.benchpress.util import distrib
from deeplearning.benchpress.proto import model_pb2
from deeplearning.benchpress.corpuses import tokenizers
from deeplearning.benchpress.corpuses import corpuses
from deeplearning.benchpress.features import extractor
from deeplearning.benchpress.features import feature_sampler
from deeplearning.benchpress.features import active_feed_database
from deeplearning.benchpress.features import evaluate_cand_database
from deeplearning.benchpress.models import lm_data_generator
from deeplearning.benchpress.models import sequence_masking
from deeplearning.benchpress.models.torch_bert import datasets
from deeplearning.benchpress.samplers import sample_observers
from deeplearning.benchpress.preprocessors import opencl
from absl import flags
from deeplearning.benchpress.util import logging as l
FLAGS = flags.FLAGS
flags.DEFINE_boolean(
"skip_first_queue",
False,
"Hacky way to speedup active sampling experiments."
)
flags.DEFINE_boolean(
"evaluate_candidates",
False,
"Select to do exhaustive evaluation of sampling search candidates."
)
flags.DEFINE_boolean(
"evolutionary_search",
True,
"Select to perform independent per-generation candidate search instead of son-better-than-parent paradigm."
)
flags.DEFINE_boolean(
"features_standard_scaler",
False,
"Select to use sklearn StandardScaler for generation standardization."
)
flags.DEFINE_boolean(
"start_from_cached",
False,
"Select to start from cached active feeds instead of restarting from axis origins."
)
class ActiveSampleFeed(typing.NamedTuple):
"""
Representation of an active learning input to the model.
"""
# An array of original input
input_feed : np.array
# The feature space of the original input
input_features : typing.Dict[str, float]
# Distance from target features of input feed. Valid after 1st generation.
input_score : float
# Depth increases when a valid inference sample is fed back as an input.
gen_id : int
def ActiveSampleFeed_to_JSON(f: ActiveSampleFeed) -> typing.Dict[str, typing.Any]:
"""
Convert NamedTuple to JSON serializable dictionary.
"""
return {
'input_feed' : list([int(x) for x in f.input_feed]),
'input_features' : {k: float(v) for k, v in f.input_features.items()},
'input_score' : float(f.input_score),
'gen_id' : int(f.gen_id),
}
def JSON_to_ActiveSampleFeed(d: typing.Dict[str, typing.Any]) -> ActiveSampleFeed:
"""
JSON serializable dictionary to ActiveSampleFeed.
"""
return ActiveSampleFeed(**d)
class ActiveSample(typing.NamedTuple):
"""
Representation of an active learning sample.
"""
# ActiveSampleFeed instance of model input
sample_feed : typing.TypeVar("ActiveSamplingGenerator.ActiveSampleFeed")
# Input ids that led to this prediction
input_ids : np.array
# hole lengths and positions of input ids.
hole_lengths : typing.List[sequence_masking.MaskedLmInstance]
# Model prediction
sample : np.array
# Sample indices of given prediction.
sample_indices : np.array
# number of tokens the model filled holes with.
sample_indices_size : int
# Output features of sample
features : typing.Dict[str, float]
# Runtime features of ActiveSample (will be populated lazily.)
runtime_features : typing.Dict[str, float]
# Score of sample based on active learning search.
score : typing.Union[bool, float]
def ActiveSample_to_JSON(f: ActiveSample) -> typing.Dict[str, typing.Any]:
"""
Convert NamedTuple to JSON serializable dictionary.
"""
return {
'sample_feed' : ActiveSampleFeed_to_JSON(f.sample_feed),
'input_ids' : list([int(x) for x in f.input_ids]),
'hole_lengths' : list([int(x) for x in f.hole_lengths]),
'sample' : list([int(x) for x in f.sample]),
'sample_indices' : list([int(x) for x in f.sample_indices]),
'sample_indices_size' : list([int(x) for x in f.sample_indices]),
'features' : {k: float(v) for k, v in f.features.items()},
'runtime_features' : {k: int(v) if k != "label" else str(v) for k, v in f.runtime_features.items() },
'score' : float(f.score),
}
def JSON_to_ActiveSample(d: typing.Dict[str, typing.Any]) -> ActiveSample:
"""
JSON serializable dictionary to ActiveSampleFeed.
"""
return ActiveSample(
sample_feed = JSON_to_ActiveSampleFeed(d['sample_feed']),
input_ids = d['input_ids'],
hole_lengths = d['hole_lengths'],
sample = d['sample'],
sample_indices = d['sample_indices'],
sample_indices_size = d['sample_indices_size'],
features = d['features'],
runtime_features = d['runtime_features'],
score = d['score']
)
def IR_candidate_worker(sample : np.array,
feature_space : str,
target_benchmark : feature_sampler.Benchmark,
tokenizer : tokenizers.TokenizerBase,
) -> ActiveSample:
"""
ActiveSample worker for LLVM-IR feature spaces.
"""
sample, sample_indices, input_ids, mlm_lengths, feed = sample
assert sample[0] != tokenizer.padToken, sample
try:
code = tokenizer.ArrayToCode(sample, with_formatting = False)
features = extractor.ExtractFeatures(code, [feature_space])[feature_space]
if features:
return (True, ActiveSample(
sample_feed = feed,
sample = sample,
sample_indices = [x for x in sample_indices if x != tokenizer.padToken],
input_ids = [x for x in input_ids if x != tokenizer.padToken],
hole_lengths = mlm_lengths,
sample_indices_size = len([x for x in sample_indices if x != tokenizer.padToken]),
features = features,
runtime_features = target_benchmark.runtime_features,
score = feature_sampler.calculate_distance(features, target_benchmark.features, feature_space),
))
except ValueError:
pass
except Exception as e:
raise e
return (False, ActiveSample(
sample_feed = feed,
sample = sample,
sample_indices = [x for x in sample_indices if x != tokenizer.padToken],
input_ids = [x for x in input_ids if x != tokenizer.padToken],
hole_lengths = mlm_lengths,
sample_indices_size = len([x for x in sample_indices if x != tokenizer.padToken]),
features = {},
runtime_features = target_benchmark.runtime_features,
score = math.inf,
))
def text_candidate_worker(sample : np.array,
feature_space : str,
target_benchmark : feature_sampler.Benchmark,
tokenizer : tokenizers.TokenizerBase,
) -> ActiveSample:
"""
ActiveSample worker for text-based feature spaces.
"""
sample, sample_indices, input_ids, mlm_lengths, feed = sample
assert sample[0] != tokenizer.padToken, sample
try:
code = tokenizer.ArrayToCode(sample, with_formatting = False)
_ = opencl.Compile(code)
features = extractor.ExtractFeatures(code, [feature_space])[feature_space]
if features:
return (True, ActiveSample(
sample_feed = feed,
sample = sample,
sample_indices = [x for x in sample_indices if x != tokenizer.padToken],
input_ids = [x for x in input_ids if x != tokenizer.padToken],
hole_lengths = mlm_lengths,
sample_indices_size = len([x for x in sample_indices if x != tokenizer.padToken]),
features = features,
runtime_features = target_benchmark.runtime_features,
score = feature_sampler.calculate_distance(features, target_benchmark.features, feature_space),
))
except ValueError:
pass
except Exception as e:
raise e
return (False, ActiveSample(
sample_feed = feed,
sample = sample,
sample_indices = [x for x in sample_indices if x != tokenizer.padToken],
input_ids = [x for x in input_ids if x != tokenizer.padToken],
hole_lengths = mlm_lengths,
sample_indices_size = len([x for x in sample_indices if x != tokenizer.padToken]),
features = {},
runtime_features = target_benchmark.runtime_features,
score = math.inf,
))
def hidden_state_candidate_worker(sample : np.array,
feature_space : str,
target_benchmark : feature_sampler.Benchmark,
tokenizer : tokenizers.TokenizerBase,
) -> ActiveSample:
"""
Provided hidden states by the language model, choose those that compile and create ActiveSamples.
"""
sample, sample_indices, input_ids, mlm_lengths, hidden_state, feed = sample
assert sample[0] != tokenizer.padToken, sample
try:
code = tokenizer.ArrayToCode(sample, with_formatting = False)
_ = opencl.Compile(code)
features = extractor.RawToDictFeats(hidden_state, [feature_space])[feature_space]
return (True, ActiveSample(
sample_feed = feed,
sample = sample,
sample_indices = [x for x in sample_indices if x != tokenizer.padToken],
input_ids = [x for x in input_ids if x != tokenizer.padToken],
hole_lengths = mlm_lengths,
sample_indices_size = len([x for x in sample_indices if x != tokenizer.padToken]),
features = features,
runtime_features = target_benchmark.runtime_features,
score = feature_sampler.calculate_distance(features, target_benchmark.features, feature_space),
))
except ValueError:
pass
except Exception as e:
raise e
return (False, ActiveSample(
sample_feed = feed,
sample = sample,
sample_indices = [x for x in sample_indices if x != tokenizer.padToken],
input_ids = [x for x in input_ids if x != tokenizer.padToken],
hole_lengths = mlm_lengths,
sample_indices_size = len([x for x in sample_indices if x != tokenizer.padToken]),
features = {},
runtime_features = target_benchmark.runtime_features,
score = math.inf,
))
def dataload_worker(x : int,
feed : typing.List[np.array],
func : typing.Union[
sequence_masking.HoleSequence,
sequence_masking.HoleSequenceSeqMasks,
'sequence_masking.MaskSequence'
],
batch : int,
batch_per_feed : int,
) -> typing.Dict[str, np.array]:
"""
Masking input feed worker.
"""
try:
return [f for _ in range(batch // batch_per_feed) for f in [func(fd) for fd in feed * batch_per_feed]]
except Exception as e:
raise e
def write_samples_cache(db_sample_obs : sample_observers.SamplesDatabaseObserver,
tokenizer : tokenizers.TokenizerBase,
samples : typing.List[ActiveSample],
) -> None:
"""
Candidate logging/caching worker.
"""
for sample in samples:
try:
s = model_pb2.Sample(
train_step = -1,
text = tokenizer.ArrayToCode(sample.sample, with_formatting = True),
sample_indices = "",
encoded_sample_indices = "",
original_input = "",
sample_feed = tokenizer.ArrayToCode(sample.sample_feed.input_feed, with_formatting = True),
encoded_text = "",
sample_start_epoch_ms_utc = 0,
sample_time_ms = 0,
wall_time_ms = 0,
feature_vector = '\n'.join(["{}:{}".format(k, v) for k, v in sample.features.items()]) if sample.features else "None",
num_tokens = np.where(sample.sample == tokenizer.padToken)[0][0] if tokenizer.padToken in sample.sample else len(sample),
compile_status = True,
categorical_sampling = FLAGS.categorical_sampling,
date_added = datetime.datetime.utcnow().strftime("%m/%d/%Y, %H:%M:%S"),
)
db_sample_obs.OnSample(s)
except Exception:
pass
return
def write_eval_db(eval_db : evaluate_cand_database.SearchCandidateDatabase,
tokenizer : tokenizers.TokenizerBase,
compiling_samples : typing.List[ActiveSample],
rejected_samples : typing.List[ActiveSample],
target_benchmark : typing.Tuple[str, str],
target_features : typing.Dict[str, float],
gen_id : int,
) -> None:
"""
Evaluated step and rejected candidates monitoring/caching.
"""
with eval_db.Session(commit = True) as session:
cached = {
d.sha256: d for d in
session.query(evaluate_cand_database.SearchCandidate).filter_by(target_benchmark = "// {}\n{}".format(target_benchmark[0], target_benchmark[1])).all()
}
objs = {}
for idx, samples in enumerate([compiling_samples, rejected_samples]):
for sample in samples:
if idx == 0:
compile_status = True
else:
compile_status = False
sobj = evaluate_cand_database.SearchCandidate.FromArgs(
tokenizer = tokenizer,
id = eval_db.count,
input_feed = sample.sample_feed.input_feed,
input_ids = sample.input_ids,
input_features = sample.sample_feed.input_features,
input_score = sample.sample_feed.input_score,
hole_lengths = sample.hole_lengths,
sample = sample.sample,
sample_indices = sample.sample_indices,
output_features = sample.features,
runtime_features = sample.runtime_features,
sample_score = sample.score,
target_benchmark = target_benchmark,
target_features = target_features,
compile_status = compile_status,
generation_id = gen_id,
)
if sobj.sha256 in objs:
objs[sobj.sha256][1] += 1
else:
objs[sobj.sha256] = [sobj, 1]
offset_idx = 0
try:
for sha, obj in objs.items():
if sha in cached:
entry = cached[sha]
entry.frequency += obj[1]
else:
obj[0].frequency = obj[1]
obj[0].id += offset_idx
offset_idx += 1
session.add(obj[0])
session.commit()
except Exception as e:
l.logger().error(entry)
if entry is not None:
l.logger().error(entry.id)
l.logger().error(entry.sha256)
l.logger().error(sha)
l.logger().error("count: {}".format(eval_db.count))
l.logger().error("offset_idx: {}".format(offset_idx))
print(e)
return
class torchLMDataGenerator(lm_data_generator.MaskLMDataGenerator):
"""Data generator subclass designed for PyTorch BERT model."""
@classmethod
def TrainMaskLMBatchGenerator(cls,
corpus : corpuses.Corpus,
training_opts : model_pb2.TrainingOptions,
cache_path : pathlib.Path,
num_train_steps : int = None,
pre_train : bool = False,
feature_encoder : bool = False,
feature_tokenizer : tokenizers.FeatureTokenizer = None,
feature_sequence_length : int = None,
) -> lm_data_generator.MaskLMDataGenerator:
"""Initializes data generator for training."""
d = super(torchLMDataGenerator, cls()).TrainMaskLMBatchGenerator(
corpus, training_opts, cache_path, num_train_steps, pre_train,
feature_encoder, feature_tokenizer, feature_sequence_length,
)
d.dataloader = d.train_dataloader()
return d
@classmethod
def SampleMaskLMBatchGenerator(cls,
model_opts : model_pb2.TrainingOptions,
sampler : 'samplers.Sampler',
tokenizer : tokenizers.TokenizerBase,
seed : int,
sample_batch_size : int,
max_position_embeddings : int,
cache_path : pathlib.Path,
corpus : corpuses.Corpus = None,
feature_encoder : bool = False,
feature_tokenizer : tokenizers.FeatureTokenizer = None,
feature_sequence_length : int = None,
) -> lm_data_generator.MaskLMDataGenerator:
"""Initializes data generator for inference."""
d = super(torchLMDataGenerator, cls()).SampleMaskLMBatchGenerator(
model_opts, sampler, tokenizer, seed,
sample_batch_size, max_position_embeddings, cache_path,
feature_encoder, feature_tokenizer, feature_sequence_length
)
if sampler.is_active:
corpus_config = d.sampler.config.sample_corpus.corpus_config
if corpus_config.HasField("hole"):
distribution = distributions.Distribution.FromHoleConfig(
corpus_config.hole, d.sampler.corpus_directory, "sample_corpus"
)
d.func = functools.partial(sequence_masking.HoleSequence,
train_set = False,
max_predictions = corpus_config.max_predictions_per_seq,
masked_lm_prob = corpus_config.masked_lm_prob,
distribution = distribution,
tokenizer = d.tokenizer,
)
elif corpus_config.HasField("mask_seq"):
distribution = distributions.Distribution.FromHoleConfig(
corpus_config.mask_seq, d.sampler.corpus_directory, "sample_corpus"
)
d.func = functools.partial(sequence_masking.HoleSequenceSeqMasks,
train_set = False,
max_predictions = corpus_config.max_predictions_per_seq,
masked_lm_prob = corpus_config.masked_lm_prob,
distribution = distribution,
tokenizer = d.tokenizer,
)
elif corpus_config.HasField("mask"):
d.func = functools.partial('sequence_masking.MaskSequence',
train_set = False,
max_predictions = corpus_config.max_predictions_per_seq,
masked_lm_prob = corpus_config.masked_lm_prob,
config = corpus_config,
pickled_tokenizer = d.tokenizer,
is_torch = True,
)
d.loadCheckpoint()
# Active sampling attributes.
d.active_db = active_feed_database.ActiveFeedDatabase(
url = "sqlite:///{}".format(d.sampler.corpus_directory / "active_feeds.db"),
)
d.samples_cache_obs = sample_observers.SamplesDatabaseObserver(
path = d.sampler.corpus_directory / "samples_cache.db",
must_exist = False,
)
if FLAGS.evaluate_candidates:
if environment.WORLD_RANK == 0:
d.eval_db = evaluate_cand_database.SearchCandidateDatabase(
url = "sqlite:///{}".format(d.sampler.corpus_directory / "evaluated_candidates.db"),
must_exist = False,
)
if corpus_config.active.HasField("target"):
d.feat_sampler = feature_sampler.BenchmarkSampler(
workspace = d.sampler.corpus_directory,
feature_space = corpus_config.active.feature_space,
target = corpus_config.active.target,
git_corpus = corpus,
seed = d.seed,
)
else:
d.feat_sampler = feature_sampler.ActiveSampler(
workspace = d.sampler.corpus_directory,
feature_space = corpus_config.active.feature_space,
active_learner = d.sampler.active_learner,
tokenizer = d.tokenizer,
seed = d.seed,
)
d.candidate_monitor = monitors.CategoricalDistribMonitor.loadCheckpoint(
d.sampler.corpus_directory, "feature_distance"
)
d.tsne_monitor = monitors.TSNEMonitor.loadCheckpoint(
d.sampler.corpus_directory, "tsne_feature_map"
)
d.comp_rate_mon = monitors.CategoricalHistoryMonitor.loadCheckpoint(
d.sampler.corpus_directory, "comp_rate_per_gen"
)
d.exec_time_mon = monitors.CategoricalHistoryMonitor.loadCheckpoint(
d.sampler.corpus_directory, "exec_time_per_gen"
)
# Check if benchmark set has been registed to monitor.
if not d.feat_sampler.is_active:
if d.feat_sampler.target not in d.tsne_monitor.groups_set:
for b in d.feat_sampler.benchmarks:
d.tsne_monitor.register((b.features, d.feat_sampler.target, b.name))
d.tsne_monitor.plot()
# Store unique specs to database once.
d.addToDB(
active_feed_database.ActiveSamplingSpecs.FromArgs(
act_s_dep = corpus_config.active.active_search_depth,
act_s_wid = corpus_config.active.active_search_width,
feat_space = corpus_config.active.feature_space
)
)
d.raised_keyboard_int = False
d.raised_exception = None
d.skip_first_queue = FLAGS.skip_first_queue
d.dataloader = d.predict_dataloader()
d.loader = iter(d.dataloader)
return d
def __init__(self):
super(torchLMDataGenerator, self).__init__("pt_record")
self.dataloader = None
self.loader = None
self.comp_rate = {}
self.exec_time = {}
self.feed_queue = []
self.active_db = None
self.samples_cache_obs = None
self.eval_db = None
self.feat_sampler = None
self.candidate_monitor = None
self.tsne_monitor = None
self.comp_rate_mon = None
self.exec_time_mon = None
self.raised_keyboard_int = None
self.raised_exception = None
self.skip_first_queue = None
self.bench_idx = None
return
def train_dataloader(self, set_name = 'train_dataset', is_train = True) -> torch.utils.data.dataloader:
"""
Pytorch dataloader used for training.
set_name defaults to train_dataset, and that way this function
this dataloader's function is used for training.
eval_dataloaders sets set_name to reuse the function for all different sets.
"""
if self.config.datapoint_time == "pre":
# Pre-computed dataset with system of files. [DEPRECATED].
dataset = datasets.LazyConcatDataset([x for x in self.dataset[set_name]['file']])
sampler = datasets.LazyRandomSampler(dataset, replacement = False)
elif self.config.datapoint_time == "online":
# Online masking of training instances.
if self.pre_train:
dataset = datasets.LazyOnlineDataset(self, is_train)
sampler = datasets.LazyRandomSampler(dataset, replacement = False)
else:
dataset = datasets.OnlineDataset(self, is_train)
if environment.WORLD_SIZE == 1:
sampler = torch.utils.data.RandomSampler(dataset, replacement = False)
else:
sampler = torch.utils.data.DistributedSampler(dataset)
else:
raise ValueError(self.config.datapoint_time)
dataloader = torch.utils.data.dataloader.DataLoader(
dataset = dataset,
batch_size = self.training_opts.batch_size,
sampler = (sampler
if pytorch.num_nodes <= 1 or not pytorch.torch_tpu_available or pytorch.torch_xla.xrt_world_size() <= 1
else torch.utils.data.distributed.DistributedSampler(
dataset = dataset,
num_replicas = pytorch.num_nodes if not pytorch.torch_tpu_available else pytorch.torch_xla.xrt_world_size(),
rank = pytorch.torch.distributed.get_rank() if not pytorch.torch_tpu_available else pytorch.torch_xla.get_ordinal()
)
),
num_workers = 0,
drop_last = True if environment.WORLD_SIZE > 1 else False,
)
return dataloader
def eval_dataloaders(self) -> torch.utils.data.dataloader:
"""Pytorch dataloader used for validation."""
if self.config.datapoint_time == "online":
yield "Online Corpus", self.train_dataloader(is_train = False)
else:
for set_name in self.dataset:
yield set_name, self.train_dataloader(set_name)
def predict_dataloader(self) -> torch.utils.data.dataloader:
"""
Pytorch dataloader used for inference.
isFixedStr == True means there is a fixed sample feed, e.g. 'kernel void [HOLE]'
Otherwise, a set has been given to provide random samples from it.
"""
batch_size = self.sample_batch_size
if not self.sampler.is_active and (self.sampler.isFixedStr or self.sampler.is_live):
sample_element = sequence_masking.MaskedSeqToBlob(
self.sampler.encoded_start_text, self.tokenizer, self.sampler.sequence_length, self.max_position_embeddings
)
dataset = [{k: torch.from_numpy(v) for (k, v) in sample_element.items()}] * self.sample_batch_size
sampler = torch.utils.data.SequentialSampler(dataset)
else:
if self.sampler.is_online:
"""
TODO maybe add configSampleSets here as well.
"""
if self.pre_train:
dataset = datasets.LazyOnlineDataset(self, False)
sampler = datasets.LazyRandomSampler(dataset, replacement = False)
else:
dataset = datasets.OnlineDataset(self, False)
if environment.WORLD_SIZE == 1:
sampler = torch.utils.data.RandomSampler(dataset, replacement = False)
else:
sampler = torch.utils.data.DistributedSampler(dataset)
elif self.sampler.is_active:
if self.sampler.isFixedStr:
dataset = [np.asarray(self.tokenizer.TokenizeString(self.sampler.start_text))]
else:
dataset = self.createCorpus(self.sampler.corpus_directory)
batch_size = 1
sampler = torch.utils.data.SequentialSampler(dataset)
else:
path_list = self.configSampleSets()
dataset = datasets.LazyConcatDataset(
[x for x in path_list]
)
sampler = datasets.LazyRandomSampler(dataset, replacement = False)
dataloader = torch.utils.data.dataloader.DataLoader(
dataset = dataset,
# Model's batch size is divided by sampler's batch size, in order to get
# multiple generation candidates from a given sample feed, but still
# efficiently feed big batches to make sampling faster.
# Example: model batch size 32 and sampler batch size 4.
# This dataloader will return 8 feeds. Each will be repeated 4 times.
# 32 sequences will be given to the model.
batch_size = batch_size,
sampler = (sampler
if pytorch.num_nodes <= 1 or not pytorch.torch_tpu_available or pytorch.torch_xla.xrt_world_size() <= 1
else torch.utils.data.distributed.DistributedSampler(
dataset = dataset,
num_replicas = pytorch.num_nodes if not pytorch.torch_tpu_available else pytorch.torch_xla.xrt_world_size(),
rank = pytorch.torch.distributed.get_rank() if not pytorch.torch_tpu_available else pytorch.torch_xla.get_ordinal()
)
),
num_workers = 0,
drop_last = False,
)
return dataloader
def ActiveGeneration(self,
mwrapper : typing.TypeVar('torch_bert.torchBert'),
estimator : typing.TypeVar('torch_bert.SampleBertEstimator')
) -> typing.Tuple[np.array, np.array, np.array, np.array]:
"""
Active Learning generation core routine.
This function starts with a feed from a dataset
and returns all active samples that have reached the requested feature space.
Args:
mwrapper: BERT model wrapper.
estimator: BERT model pipeline.
Returns:
A tuple of 4 arrays:
a) Original inputs
b) Original input ids
c) Generated samples
d) Sample indices
The arrays are ordered by index.
"""
if self.feat_sampler.is_terminated():
raise StopIteration
if self.raised_keyboard_int:
self.raised_keyboard_int = False
raise KeyboardInterrupt
if self.raised_exception:
raise self.raised_exception
# Active sampling specs initialization
active_search_depth = self.sampler.config.sample_corpus.corpus_config.active.active_search_depth
active_search_width = self.sampler.config.sample_corpus.corpus_config.active.active_search_width
active_dropout_prob = self.sampler.config.sample_corpus.corpus_config.active.active_dropout_prob
sample_batch_per_feed = self.sampler.config.sample_corpus.corpus_config.active.batch_size_per_feed
if sample_batch_per_feed > self.sample_batch_size:
l.logger().warn("Throttling sample batch per feed to ({}), equal to batch size".format(self.sample_batch_size))
sample_batch_per_feed = min(sample_batch_per_feed, self.sample_batch_size)
# Initialize feed queue
org_inp = self.initOrGetQueue(self.feat_sampler.target_benchmark.features)
org_ids = copy.copy(org_inp)
total_cand, total_cand_hash = [], set()
# Sample cache thread, eval cand DB thread.
write_cache_proc = None
if FLAGS.evaluate_candidates:
write_eval_proc = None
# If the sampler is active, monitor on the go each target benchmark separately.
if self.feat_sampler.is_active:
self.tsne_monitor.register((self.feat_sampler.target_benchmark.features,
self.feat_sampler.target,
self.feat_sampler.target_benchmark.name
)
)
if len(self.feat_sampler.target_benchmark.features) > 100:
pretty_features = {k: round(v, 2) for k, v in list(self.feat_sampler.target_benchmark.features.items())[:50]}
pretty_features.update({k: round(v, 2) for k, v in list(self.feat_sampler.target_benchmark.features.items())[-50:]})
else:
pretty_features = {k: round(v, 2) for k, v in self.feat_sampler.target_benchmark.features.items()}
l.logger().info(
"{}Target features: {}{}".format(
"Target benchmark: {}\n".format(self.feat_sampler.target_benchmark.name) if self.feat_sampler.target_benchmark.name != "" else "",
pretty_features,
"\nRuntime features: {}".format(self.feat_sampler.target_benchmark.runtime_features) if self.feat_sampler.target_benchmark.runtime_features else ""
)
)
try:
## BFS style. While you have jobs, keep going.
while self.feed_queue:
## Pop the feed that will provide a sample workload.
if FLAGS.evolutionary_search:
try:
# Evolutionary search will create a workload out of all current generation
init_feed = self.feed_queue.pop(0)
feeds = [init_feed]
cur_gen = init_feed.gen_id
while self.feed_queue[0].gen_id == cur_gen:
feeds.append(self.feed_queue.pop(0))
except Exception:
pass
else:
# Non-evolutionary search will do a small workload per feed and will not give up if it doesn't further reduce distance.
# p.s.: It doesn't work.
feeds = [self.feed_queue.pop(0)]
if self.skip_first_queue:
self.skip_first_queue = False
try:
feeds = [self.feed_queue.pop(0)]
except Exception:
pass
l.logger().info("Benchmark {}, generation {}".format(self.bench_idx, feeds[0].gen_id))
# Compilation rate, execution time, per generation.
cmp_rate = [0, 0]
exec_time = 0.0
if feeds[0].gen_id not in self.comp_rate:
self.comp_rate[feeds[0].gen_id] = [0, 0]
if feeds[0].gen_id not in self.exec_time:
self.exec_time[feeds[0].gen_id] = 0.0
# Specialize sampler to current sampling input.
for feed in feeds[:1]:
self.sampler.setStartText(self.tokenizer.tokensToString(feed.input_feed, ignore_token = self.tokenizer.padToken))
self.sampler.Specialize(self.tokenizer)
# Iterate until you get a better sample or surpass the limit.
better_found, it, threshold = None, 0, 160000
while not better_found and cmp_rate[1] < threshold:
## Pre-process inputs
# workload size: how many batches of sequences you need.
wsize = (FLAGS.sample_workload_size) // (self.sample_batch_size * environment.WORLD_SIZE)
if FLAGS.evolutionary_search and feeds[0].gen_id == 0 and len(feeds) == 1:
wsize = wsize * active_search_width
# Give the input feed and some specs, get the tensor ready to feed.
inputs = self.collateInputData([feed.input_feed for feed in feeds], wsize, sample_batch_per_feed)
## Workload inference.
outputs, time = mwrapper.sample_model_step(
estimator.model,
inputs,
iteration = it,
extract_hidden_state = True if self.feat_sampler.feature_space == "HiddenState" else False,
)
## Post-process outputs.
# Keep step_candidates and evaluate them. Keep rejected candidates only for eval_cand database.
step_candidates, rejected_candidates = [], []
tcs, ts = 0, 0
(cs, s), better_found = self.registerOutputData(
outputs,
[feeds[idx] for fidx, _ in enumerate(feeds) for idx in [fidx]*wsize*self.sample_batch_size],
step_candidates,
rejected_candidates,
)
tcs += cs
ts = s
# l.logger().info("Length before: {}".format(len(step_candidates)), ddp_nodes = True)
step_candidates = distrib.get_consistent(step_candidates)
rejected_candidates = distrib.get_consistent(rejected_candidates)
## Register good offsprings, along with step candidates in tsne monitor.
if not FLAGS.evolutionary_search and better_found and environment.WORLD_RANK == 0:
self.tsne_monitor.register((better_found.features, "gen_{}_accepted".format(str(feeds[0].gen_id)), str(better_found.score)))
for c in step_candidates:
self.tsne_monitor.register((c.features, "gen_{}".format(str(feeds[0].gen_id))))
## Recalculate compilation rate of generation.
cmp_rate[0] += tcs
cmp_rate[1] += ts
exec_time += time
if FLAGS.evaluate_candidates and environment.WORLD_RANK == 0:
## Write all candidates to eval_cand DB.
if write_eval_proc:
write_eval_proc.join()
write_eval_proc = multiprocessing.Process(
target = write_eval_db,
kwargs = {
'eval_db' : self.eval_db,
'tokenizer' : self.tokenizer,
'compiling_samples' : step_candidates,
'rejected_samples' : rejected_candidates,
'target_benchmark' : (self.feat_sampler.target_benchmark.name, self.feat_sampler.target_benchmark.contents),
'target_features' : self.feat_sampler.target_benchmark.features,
'gen_id' : feeds[0].gen_id,
}
)
write_eval_proc.start()
## Write to samples cache DB.
if write_cache_proc:
write_cache_proc.join()
self.samples_cache_obs.sample_id = self.samples_cache_obs.db.count
write_cache_proc = multiprocessing.Process(
target = write_samples_cache,
kwargs = {
'db_sample_obs' : self.samples_cache_obs,
'tokenizer' : self.tokenizer,
'samples' : step_candidates,
}
)
write_cache_proc.start()
if not FLAGS.evolutionary_search and better_found and feeds[0].gen_id > 0:
l.logger().info("Improved score {} -> {} in {} iterations".format(round(feed.input_score, 3), round(better_found.score, 3), it))
# Step counter.
it += 1
if FLAGS.evolutionary_search:
# No need to keep looking for better samples than parents.
# In this mode, you get a workload and keep the best independently.
break
######## End of while.
## Update all monitors.
if environment.WORLD_RANK == 0:
self.comp_rate[feeds[0].gen_id] = [sum(x) for x in zip(self.comp_rate[feeds[0].gen_id], cmp_rate)]
self.exec_time[feeds[0].gen_id] += exec_time
self.comp_rate_mon.register((feeds[0].gen_id, self.comp_rate[feeds[0].gen_id][0] / self.comp_rate[feeds[0].gen_id][1]))
self.exec_time_mon.register((feeds[0].gen_id, self.exec_time[feeds[0].gen_id] / self.comp_rate[feeds[0].gen_id][1]))
self.comp_rate_mon.plot()
self.exec_time_mon.plot()
# self.tsne_monitor.plot()
## Collect surviving candidates of generation.
# If we just started, get top-K.
if FLAGS.evolutionary_search:
best_cands = self.feat_sampler.sample_from_set(step_candidates, active_search_width, active_dropout_prob)
l.logger().info("Top-{} ({} unique) samples of generation {}: {}".format(active_search_width, len(best_cands), feeds[0].gen_id, ', '.join([str(round(c.score, 3)) for c in best_cands])))
for x in best_cands[:3]:
l.logger().info(self.tokenizer.ArrayToCode(x.sample, with_formatting = True))
elif feeds[0].gen_id == 0:
best_cands = self.feat_sampler.sample_from_set(step_candidates, active_search_width, active_dropout_prob)
l.logger().info("Starting scores: {}".format(', '.join([str(round(c.score, 3)) for c in best_cands])))
else:
# If nothing was found, there are no best cands, and we will keep searching.
if not better_found:
best_cands = []
l.logger().warn("No better candidate found...")
else:
# Otherwise, this single input feed, provides a new single better sample.
best_cands = [better_found]
# Monitor the new better candidate(s), if any.
if best_cands and environment.WORLD_RANK == 0:
self.candidate_monitor.register(
{str(best_cands[0].sample_feed.gen_id): [c.score for c in best_cands]}
)
self.candidate_monitor.plot()
# Add them back to queue and to active feed database.
found_match = False
if len(best_cands) == 0:
for feed in feeds:
self.feed_queue.append(
ActiveSampleFeed(
input_feed = feed.input_feed,
input_features = feed.input_features,
input_score = feed.input_score,
gen_id = 1 + feed.gen_id,
)
)
for nc in best_cands:
if FLAGS.evolutionary_search and environment.WORLD_RANK == 0:
self.tsne_monitor.register((nc.features, "gen_{}_accepted".format(str(feeds[0].gen_id))))
sample_hash = ''.join([str(x) for x in nc.sample])
if FLAGS.evolutionary_search or (sample_hash not in total_cand_hash):
if sample_hash not in total_cand_hash:
total_cand.append(nc)
total_cand_hash.add(sample_hash)
if nc.score == 0.0 and FLAGS.evolutionary_search:
found_match = True
if not found_match and 1+nc.sample_feed.gen_id <= active_search_depth and (FLAGS.evolutionary_search or 0 < nc.score < feed.input_score):
assert nc.sample[0] != self.tokenizer.padToken, nc.sample
self.feed_queue.append(
ActiveSampleFeed(
input_feed = nc.sample,
input_features = nc.features,
input_score = nc.score,
gen_id = 1 + nc.sample_feed.gen_id,
)
)
self.addToDB(
active_feed_database.ActiveFeed.FromArgs(
tokenizer = self.tokenizer,
id = self.active_db.active_count,
input_feed = nc.sample_feed.input_feed,
input_features = nc.sample_feed.input_features,
sample = nc.sample,
output_features = nc.features,
sample_quality = nc.score,
target_benchmark = (self.feat_sampler.target_benchmark.name, self.feat_sampler.target_benchmark.contents),
target_features = self.feat_sampler.target_benchmark.features,
compile_status = True,
generation_id = nc.sample_feed.gen_id,
)
)
if environment.WORLD_RANK == 0:
self.tsne_monitor.plot()
self.feat_sampler.step_generation(best_cands)
# save state for this generation and re-loop for the next.
self.saveCheckpoint()
# Catch threads on last iteration.
if write_cache_proc and environment.WORLD_RANK == 0:
write_cache_proc.join()
if FLAGS.evaluate_candidates and write_eval_proc and environment.WORLD_RANK == 0:
write_eval_proc.join()
## Finished, save state, switch benchmark, return samples.
self.bench_idx += 1
if environment.WORLD_RANK == 0:
self.saveCheckpoint()
distrib.barrier()
self.feat_sampler.iter_benchmark(target_samples = total_cand)
return (np.repeat([org_inp], len(total_cand), axis = 0),
np.repeat([org_ids], len(total_cand), axis = 0),
[x.sample for x in total_cand],
[[]] * len(total_cand))
except KeyboardInterrupt:
self.raised_keyboard_int = True
if write_cache_proc and environment.WORLD_RANK == 0:
write_cache_proc.terminate()
if FLAGS.evaluate_candidates and write_eval_proc and environment.WORLD_RANK == 0:
write_eval_proc.terminate()
return (np.repeat([org_inp], len(total_cand), axis = 0),
np.repeat([org_ids], len(total_cand), axis = 0),
[x.sample for x in total_cand],
[[]] * len(total_cand))
except Exception as e:
l.logger().error(e)
self.raised_exception = e
return (np.repeat([org_inp], len(total_cand), axis = 0),
np.repeat([org_ids], len(total_cand), axis = 0),
[x.sample for x in total_cand],
[[]] * len(total_cand))
def initOrGetQueue(self, target_features: typing.Dict[str, float] = None) -> np.array:
"""
If feed queue is not initialized, initialize it by getting new datapoint.
Otherwise, don't do anything as feed_queue is already loaded from checkpoint.
Adds datapoint to InputFeed table of database.
Returns:
Starting input feed of sampling.
"""
if not self.feed_queue:
# Initialize feed_queue if empty.
if FLAGS.start_from_cached and target_features is not None:
# Get cached samples to start with an advantage for new benchmark.
cached_samples = [[x.sample, {':'.join(f.split(':')[:-1]): float(f.split(':')[-1]) for f in x.output_features.split('\n')}, -1] for x in self.active_db.get_data]
if len(cached_samples) == 0:
# If no cache, re-try without caching from target.
return self.initOrGetQueue()
else:
for idx, cs in enumerate(cached_samples):
cached_samples[idx][-1] = self.feat_sampler.calculate_distance(cs[1])
sorted_cache_samples = sorted(cached_samples, key = lambda x: x[-1])
# The queue will be no longer than the beam search width specified.
for scs in sorted_cache_samples[:self.sampler.config.sample_corpus.corpus_config.active.active_search_width]:
# Tokenize, pad, add start/end tokens to be ready for inference.
tokenized = self.tokenizer.TokenizeString(scs[0])
w_start_end = self._addStartEndToken(tokenized)
padded = self._padToMaxPosition(w_start_end)[:self.sampler.sequence_length]
if padded[0] == self.tokenizer.padToken:
l.logger().error("Pad token was found again at the beginning of the sequence.")
l.logger().error(scs[0])
l.logger().error(tokenized)
l.logger().error(w_start_end)
l.logger().error(padded)
encoded = self._padToMaxPosition(self._addStartEndToken([int(x) for x in tokenized]))[:self.sampler.sequence_length]
assert encoded[0] != self.tokenizer.padToken, encoded
self.feed_queue.append(
ActiveSampleFeed(
input_feed = encoded,
input_features = scs[1],
input_score = scs[-1],
gen_id = 0,
)
)
self.addToDB(
active_feed_database.ActiveInput.FromArgs(
tokenizer = self.tokenizer, id = self.active_db.input_count,
input_feed = encoded, input_features = scs[1],
)
)
else:
# If no caching is wanted, bring whatever the dataloader
# specified in the sampler's pbtxt wants. Usually this is a start
# text, but could also be a sampled datapoint from a dataset, DB etc.
try:
cf = next(self.loader).squeeze(0)
except StopIteration:
self.loader = iter(self.dataloader)
cf = next(self.loader).squeeze(0)
cf = [int(x) for x in cf]
assert cf[0] != self.tokenizer.padToken, cf
self.feed_queue.append(
ActiveSampleFeed(
input_feed = cf,
input_features = extractor.ExtractFeatures(self.tokenizer.ArrayToCode(cf), [self.feat_sampler.feature_space])[self.feat_sampler.feature_space],
input_score = math.inf,
gen_id = 0,
)
)
self.addToDB(
active_feed_database.ActiveInput.FromArgs(
tokenizer = self.tokenizer, id = self.active_db.input_count,
input_feed = cf, input_features = self.feed_queue[-1].input_features,
)
)
l.logger().info("Feed queue input scores: {}".format(', '.join([str(round(c.input_score, 3)) for c in self.feed_queue])))
return self.feed_queue[0].input_feed
def collateInputData(self,
feed : typing.List[np.array],
wload_size : int,
sample_batch_per_feed : int,
) -> typing.Dict[str, torch.Tensor]:
"""
Create a full generation workload out of a sample feed.
If feed is already masked, then just repeat it across the whole workload.
If it is not masked, then feed is masked wload_size times.
Args:
feed: numpy array of input feed (expressed as list of a single np element),
or a list of numpys in case multiple workloads are merged.
wload_size: Number of inputs that will be fed to the model in a single workload.
Returns:
The tensor inputs dictionary filled for BERT.
"""
if self.feature_encoder:
target_features = self.feature_tokenizer.TokenizeFeatureVector(self.feat_sampler.target_benchmark.features, self.feat_sampler.feature_space, self.feature_sequence_length)
if self.tokenizer.maskToken in feed[0] or self.tokenizer.holeToken in feed[0]:
inputs = sequence_masking.MaskedSeqToBlob(
feed[0], self.tokenizer,
self.sampler.sequence_length,
self.max_position_embeddings
)
if self.feature_encoder:
inputs["input_features"] = target_features
inputs = {
k: torch.from_numpy(v).unsqueeze(0).repeat_interleave(self.sample_batch_size, dim = 0).unsqueeze(0).repeat_interleave(wload_size, dim = 0)
for k, v in inputs.items()
}
else:
inputs = {
'input_ids': [], 'input_mask': [], 'position_ids': [],
'mask_labels': [], 'masked_lm_lengths': [], 'next_sentence_labels': []
}
if self.feature_encoder:
inputs["input_features"] = []
try:
pool = multiprocessing.Pool()
for batch in pool.imap_unordered(
functools.partial(
dataload_worker, feed = feed,
func = self.func, batch = self.sample_batch_size,
batch_per_feed = sample_batch_per_feed
),range(wload_size)
):
if batch:
# convert dict values from np -> torch.Tensor.
out = {
k: torch.from_numpy(v).unsqueeze(0)
for (k, v) in batch[0].items()
}
for f in batch[1:]:
for k, v in f.items():
nt = torch.from_numpy(v).unsqueeze(0)
out[k] = torch.cat((out[k], nt), 0)
if self.feature_encoder:
out["input_features"] = torch.from_numpy(target_features).unsqueeze(0).repeat_interleave(out['input_ids'].shape[0], dim = 0)
for k in inputs.keys():
inputs[k].append(out[k])
for k, v in inputs.items():
s = torch.stack(v)
inputs[k] = s.view(-1, self.sample_batch_size, s.shape[-1])
pool.close()
pool.terminate()
except KeyboardInterrupt as e:
pool.close()
pool.terminate()
raise e
return inputs
def registerOutputData(self,
outputs : typing.Dict[str, typing.List[np.array]],
feeds : ActiveSampleFeed,
candidates : typing.List[ActiveSample],
rejected_candidates : typing.List[ActiveSample],
) -> typing.List[int]:
"""
Gets workload output from model.
In parallel, every sample is checked for compilability and features are extracted.
If sample compiles, it is stored as an active learning candidate.
Args:
outputs: Dictionary output of workload
candidates: Passed by reference and filled within this function
bar: tqdm bar for status checking
Returns:
cm_rate: List of two elements that express compilation rate of workload.
0th el: Total compiling.
1st el: Total samples.
"""
cm_rate = [0, 0]
pool = multiprocessing.Pool()
cm_rate[1] += len(outputs['generated_samples'])
better_found = None
try:
if self.feat_sampler.feature_space == "HiddenState":
it = zip(
outputs['generated_samples'], outputs['sample_indices'],
outputs['input_ids'], outputs['masked_lm_lengths'],
outputs['hidden_state'], feeds
)
else:
it = zip(
outputs['generated_samples'], outputs['sample_indices'],
outputs['input_ids'], outputs['masked_lm_lengths'],
feeds
)
if self.feat_sampler.feature_space == "GreweFeatures":
candidate_worker = functools.partial(
text_candidate_worker,
tokenizer = self.tokenizer,
feature_space = self.feat_sampler.feature_space,
target_benchmark = self.feat_sampler.target_benchmark,
)
elif self.feat_sampler.feature_space == "HiddenState":
candidate_worker = functools.partial(
hidden_state_candidate_worker,
tokenizer = self.tokenizer,
feature_space = self.feat_sampler.feature_space,
target_benchmark = self.feat_sampler.target_benchmark,
)
else:
candidate_worker = functools.partial(
IR_candidate_worker,
tokenizer = self.tokenizer,
feature_space = self.feat_sampler.feature_space,
target_benchmark = self.feat_sampler.target_benchmark,
)
t = 0
for idx, batch in tqdm.tqdm((enumerate(pool.map(candidate_worker, it))), total = len(outputs['generated_samples']), desc = "Register Output Data", leave = False):
t = idx
if batch[0]:
cm_rate[0] += 1
candidates.append(batch[1])
if 0 < batch[1].score < batch[1].sample_feed.input_score:
if better_found is None or batch[1].score < better_found.score:
better_found = batch[1]
else:
if FLAGS.evaluate_candidates:
rejected_candidates.append(batch[1])
if FLAGS.features_standard_scaler:
scaler = sklearn.preprocessing.StandardScaler()
scaler.fit([[float(y) for y in x.features.values()] for x in candidates + [self.feat_sampler.target_benchmark]])
target_feats = {k: v for k, v in zip(self.feat_sampler.target_benchmark.features.keys(), scaler.transform([[float(x) for x in self.feat_sampler.target_benchmark.features.values()]])[0])}
for idx, cd in enumerate(candidates):
outfeats = {k: v for k, v in zip(cd.features.keys(), scaler.transform([[float(x) for x in cd.features.values()]])[0])}
candidates[idx]._replace(score = feature_sampler.calculate_distance(outfeats, target_feats, self.feat_sampler.feature_space))
pool.close()
pool.terminate()
except KeyboardInterrupt as e:
pool.close()
pool.terminate()
raise e
return cm_rate, better_found
def saveCheckpoint(self):
"""
Save feed queue checkpoint for easy restart.
"""
with open(self.sampler.corpus_directory / "gen_state.pkl", 'wb') as outf:
pickle.dump({'feed_queue': self.feed_queue, 'bench_idx': self.bench_idx}, outf)
self.candidate_monitor.saveCheckpoint()
self.tsne_monitor.saveCheckpoint()
self.comp_rate_mon.saveCheckpoint()
self.exec_time_mon.saveCheckpoint()
return
def loadCheckpoint(self):
"""
Load checkpointed feed queue, if exists.
"""
if (self.sampler.corpus_directory / "gen_state.pkl").exists():
distrib.lock()
with open(self.sampler.corpus_directory / "gen_state.pkl", 'rb') as infile:
checkpoint = pickle.load(infile)
self.feed_queue = checkpoint['feed_queue']
self.bench_idx = checkpoint['bench_idx']
distrib.unlock()
else:
self.feed_queue = []
self.bench_idx = 1
return
def addToDB(self,
db_input: typing.Union[
active_feed_database.ActiveSamplingSpecs,
active_feed_database.ActiveInput,
active_feed_database.ActiveFeed
]
) -> None:
"""
If not exists, add current sample state to database
"""
with self.active_db.get_session(commit = True) as session:
exists = session.query(
type(db_input)
).filter(type(db_input).sha256 == db_input.sha256).scalar() is not None
if not exists:
session.add(db_input)
return
def _saveCorpusRecord(self, masked_corpus: typing.Dict[str, np.array]) -> None:
"""Converts corpus nparrays to torch tensors and stores corpus to pt_record"""
torch.save(
[{k: torch.from_numpy(v) for (k, v) in inst.items()} for inst in masked_corpus['corpus']],
masked_corpus['file']
)
if FLAGS.write_text_dataset:
with open(masked_corpus['txt'], 'w') as file_writer:
for instance in masked_corpus['corpus']:
file_writer.write("'seen_in_training': {}\n'original_input': {}\n'input_ids': {}\n'input_mask': {}\n'position_ids': {}\n'mask_labels': {}\n'masked_lm_lengths': {}\n'next_sentence_labels': {}\n\n"
.format((True if instance['seen_in_training'] == 1 else False),
self.tokenizer.tokensToString(instance['original_input'], ignore_token = self.tokenizer.padToken),
self.tokenizer.tokensToString(instance['input_ids'], ignore_token = self.tokenizer.padToken),
instance['input_mask'],
instance['position_ids'],
instance['mask_labels'],
instance['masked_lm_lengths'],
instance['next_sentence_labels']
)
)
l.logger().info("Wrote {} instances ({} batches of {} datapoints) to {}"
.format(len(masked_corpus['corpus']), self.steps_per_epoch, self.training_opts.batch_size, masked_corpus['file']))
return
| 59,181 | 43.800908 | 205 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/models/torch_bert/model.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team and Foivos Tsimpourlas.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model. """
import math
import typing
import numpy as np
from deeplearning.benchpress.util import pytorch
from deeplearning.benchpress.util.pytorch import torch
from deeplearning.benchpress.models.torch_bert import activations
from deeplearning.benchpress.models.torch_bert import config
from deeplearning.benchpress.models.torch_bert import modeling_utils
from deeplearning.benchpress.models.torch_bert import compiler
from deeplearning.benchpress.util import logging as l
# import tensorrt as trt
# import pycuda.autoinit
# import pycuda.driver as cuda
def mish(x):
return x * torch.tanh(torch.nn.functional.softplus(x))
ACT2FN = {
"gelu" : activations.gelu,
"relu" : torch.nn.functional.relu,
"swish" : activations.swish,
"gelu_new" : activations.gelu_new,
"mish" : mish
}
class BertEmbeddings(torch.nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super().__init__()
self.word_embeddings = torch.nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = torch.nn.Embedding(config.max_position_embeddings, config.hidden_size)
# self.token_type_embeddings = torch.nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = torch.nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = torch.nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, :seq_length]
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
# token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + position_embeddings # + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(torch.nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = torch.nn.Linear(config.hidden_size, self.all_head_size)
self.key = torch.nn.Linear(config.hidden_size, self.all_head_size)
self.value = torch.nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = torch.nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=False,
):
mixed_query_layer = self.query(hidden_states)
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
if encoder_hidden_states is not None:
mixed_key_layer = self.key(encoder_hidden_states)
mixed_value_layer = self.value(encoder_hidden_states)
attention_mask = encoder_attention_mask
else:
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = torch.nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
class BertSelfOutput(torch.nn.Module):
def __init__(self, config):
super().__init__()
self.dense = torch.nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = torch.nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = torch.nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(torch.nn.Module):
def __init__(self, config):
super().__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = modeling_utils.find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = modeling_utils.prune_linear_layer(self.self.query, index)
self.self.key = modeling_utils.prune_linear_layer(self.self.key, index)
self.self.value = modeling_utils.prune_linear_layer(self.self.value, index)
self.output.dense = modeling_utils.prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=False,
):
self_outputs = self.self(
hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, output_attentions,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class BertIntermediate(torch.nn.Module):
def __init__(self, config):
super().__init__()
self.dense = torch.nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(torch.nn.Module):
def __init__(self, config):
super().__init__()
self.dense = torch.nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = torch.nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = torch.nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(torch.nn.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = BertAttention(config)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
assert self.is_decoder, f"{self} should be used as a decoder model if cross attention is added"
self.crossattention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=False,
):
self_attention_outputs = self.attention(
hidden_states, attention_mask, head_mask, output_attentions=output_attentions,
)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
if self.is_decoder and encoder_hidden_states is not None:
assert hasattr(
self, "crossattention"
), f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`"
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:] # add cross attentions if we output attention weights
layer_output = modeling_utils.apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BertEncoder(torch.nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = torch.nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=False,
output_hidden_states=False,
):
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if getattr(self.config, "gradient_checkpointing", False):
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
head_mask[i],
encoder_hidden_states,
encoder_attention_mask,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
head_mask[i],
encoder_hidden_states,
encoder_attention_mask,
output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
class BertPooler(torch.nn.Module):
def __init__(self, config):
super().__init__()
self.dense = torch.nn.Linear(config.hidden_size, config.hidden_size)
self.activation = torch.nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class FeaturePositionalEncoding(torch.nn.Module):
def __init__(self, config):
super().__init__()
position = torch.arange(config.feature_sequence_length).unsqueeze(1)
div_term = torch.exp(torch.arange(0, config.feature_embedding_size, 2) * (-math.log(10000.0) / config.feature_embedding_size))
pe = torch.zeros(config.feature_sequence_length, 1, config.feature_embedding_size)
pe[:, 0, 0::2] = torch.sin(position * div_term)
pe[:, 0, 1::2] = torch.cos(position * div_term)
self.register_buffer('pe', pe)
self.dropout = torch.nn.Dropout(config.feature_dropout_prob)
return
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = x + self.pe[:x.size(0)]
return self.dropout(x)
class FeatureTransformer(torch.nn.Module):
def __init__(self, config):
super().__init__()
## Encoding space
self.encoder_embedding = torch.nn.Embedding(
num_embeddings = config.feature_vocab_size,
embedding_dim = config.feature_embedding_size,
padding_idx = config.feature_pad_idx
)
self.encoder_pos_encoder = FeaturePositionalEncoding(config)
encoder_layers = torch.nn.TransformerEncoderLayer(
d_model = config.feature_embedding_size,
nhead = config.feature_num_attention_heads,
dim_feedforward = config.feature_transformer_feedforward,
dropout = config.feature_dropout_prob,
batch_first = True
)
encoder_norm = torch.nn.LayerNorm(
config.feature_embedding_size,
eps = config.feature_layer_norm_eps
)
self.encoder_transformer = torch.nn.TransformerEncoder(
encoder_layer = encoder_layers,
num_layers = config.feature_num_hidden_layers,
norm = encoder_norm,
)
## Decoder space
# self.decoder_embedding = torch.nn.Embedding(
# num_embeddings = config.feature_vocab_size,
# embedding_dim = config.feature_embedding_size,
# padding_idx = feature_pad_idx
# )
# self.encoder_pos_encoder = FeaturePositionalEncoding(config)
# decoder_layers = torch.nn.TransformerDecoderLayer(
# d_model = config.feature_embedding_size,
# nhead = config.feature_num_attention_heads,
# dim_feedforward = config.feature_transformer_feedforward,
# dropout = config.feature_dropout_prob,
# batch_first = True
# )
# decoder_norm = torch.nn.LayerNorm(
# config.feature_embedding_size,
# eps = config.feature_layer_norm_eps
# )
# self.decoder_transformer = torch.nn.TransformerDecoder(
# decoder_layer = decoder_layers,
# num_layers = config.feature_num_hidden_layers,
# norm = decoder_norm,
# )
self.mapper = torch.nn.Linear(config.feature_embedding_size, config.feature_vocab_size)
self.reducer = torch.nn.Linear(config.feature_vocab_size, 1)
self.transpose = lambda t: torch.reshape(t, (-1, 1, config.feature_sequence_length))
self.repeater = lambda t, y: t.repeat(1, y, 1)
self.embedding_size = config.feature_embedding_size
self.init_weights()
return
def init_weights(self) -> None:
initrange = 0.1
self.encoder_embedding.weight.data.uniform_(-initrange, initrange)
self.mapper.bias.data.zero_()
self.mapper.weight.data.uniform_(-initrange, initrange)
self.reducer.bias.data.zero_()
self.reducer.weight.data.uniform_(-initrange, initrange)
return
def forward(self,
features : torch.Tensor,
sequence_length : torch.Size,
features_mask : torch.Tensor = None,
features_key_padding_mask : torch.Tensor = None
) -> torch.Tensor:
embed = self.encoder_embedding(features) * math.sqrt(self.embedding_size)
pos_embed = self.encoder_pos_encoder(embed)
encoded = self.encoder_transformer(
pos_embed,
mask = features_mask,
src_key_padding_mask = features_key_padding_mask
)
mapped = self.mapper(encoded)
reduced = self.reducer(mapped)
reshaped = self.transpose(reduced)
output = self.repeater(reshaped, sequence_length)
return output
class BertPredictionHeadTransform(torch.nn.Module):
def __init__(self, config):
super().__init__()
if config.feature_encoder:
input_hidden_size = config.hidden_size + config.feature_sequence_length
else:
input_hidden_size = config.hidden_size
self.dense = torch.nn.Linear(input_hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = torch.nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(torch.nn.Module):
def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = torch.nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = torch.nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
return
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
class BertLMFeaturePredictionHead(torch.nn.Module):
def __init__(self, config):
super().__init__()
# Transformer for raw features encoding.
self.feature_encoder = FeatureTransformer(config)
# BERT predictions transformation.
self.transform = BertPredictionHeadTransform(config)
## Res transform acts as a reducer for encoded_feature residual/skip connection.
self.res_transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
## Decoder maps hidden size to vocabulary size.
self.decoder = torch.nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.dbias = torch.nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.dbias
return
def forward(self, hidden_states, features):
encoded_features = self.feature_encoder(features, hidden_states.size(1))
res1 = torch.cat((hidden_states, encoded_features), -1)
hidden_states = self.transform(res1)
# res2 = torch.cat((hidden_states, encoded_features), -1)
# hidden_states = self.res_transform(res2)
hidden_states = self.decoder(hidden_states)
return hidden_states, encoded_features
class BertOnlyMLMHead(torch.nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BertLMPredictionHead(config)
def forward(self, sequence_output, features):
prediction_scores = self.predictions(sequence_output)
return prediction_scores, None
class BertMLMFeatureHead(torch.nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BertLMFeaturePredictionHead(config)
def forward(self, sequence_output, features):
prediction_scores, encoded_features = self.predictions(sequence_output, features)
return prediction_scores, encoded_features
class BertPreTrainedModel(modeling_utils.PreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = config.BertConfig
base_model_prefix = "bert"
authorized_missing_keys = [r"position_ids"]
def _init_weights(self, module):
""" Initialize the weights """
if isinstance(module, (torch.nn.Linear, torch.nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, torch.nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, torch.nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def get_output(self,
input_ids,
attention_mask,
position_ids,
token_type_ids = None,
head_mask = None,
inputs_embeds = None,
output_attentions = None,
output_hidden_states = None,
) -> typing.Tuple[torch.FloatTensor, torch.FloatTensor]:
raise NotImplementedError("Abstract class")
class BertModel(BertPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well
as a decoder, in which case a layer of cross-attention is added between
the self-attention layers, following the architecture described in `Attention is all you need`_ by Ashish Vaswani,
Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the
:obj:`is_decoder` argument of the configuration set to :obj:`True`.
To be used in a Seq2Seq model, the model needs to initialized with both :obj:`is_decoder`
argument and :obj:`add_cross_attention` set to :obj:`True`; an
:obj:`encoder_hidden_states` is then expected as an input to the forward pass.
.. _`Attention is all you need`:
https://arxiv.org/abs/1706.03762
"""
def __init__(self, config):
super().__init__(config)
self.config = config
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
See base class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=None,
output_hidden_states=None,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
if the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask
is used in the cross-attention if the model is configured as a decoder.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastabe to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output)
return (sequence_output, pooled_output) + encoder_outputs[1:]
class BertForPreTraining(BertPreTrainedModel):
def __init__(self,
config,
tokenizer = None,
use_categorical : bool = False,
temperature : int = None,
target_lm : str = "hole",
without_label_head : bool = False,
):
super().__init__(config)
self.bert = BertModel(config)
if without_label_head is False:
if self.config.feature_encoder:
self.cls = BertMLMFeatureHead(config)
else:
self.cls = BertOnlyMLMHead(config)
else:
self.cls = None
if self.config.reward_compilation >= 0 or self.config.is_sampling:
self.compile_sampler = compiler.CompilationSampler(
tokenizer, use_categorical, temperature, target_lm
)
else:
self.compile_sampler = None
self.init_weights()
def get_output_embeddings(self):
if self.cls is not None:
return self.cls.predictions.decoder
else:
return None
def get_output(self,
input_ids,
attention_mask,
position_ids,
input_features = None,
token_type_ids = None,
head_mask = None,
inputs_embeds = None,
output_attentions = None,
output_hidden_states = None,
extract_hidden_state: bool = False,
) -> typing.Tuple[torch.FloatTensor, torch.FloatTensor]:
outputs = self.bert(
input_ids = input_ids,
attention_mask = attention_mask,
position_ids = position_ids,
token_type_ids = token_type_ids,
head_mask = head_mask,
inputs_embeds = inputs_embeds,
output_attentions = output_attentions,
output_hidden_states = output_hidden_states,
)
sequence_output, pooled_output = outputs[:2]
if self.cls is None or extract_hidden_state:
prediction_scores, encoded_features = None, None
else:
prediction_scores, encoded_features = self.cls(sequence_output, input_features)
return prediction_scores, encoded_features, sequence_output, pooled_output
def forward(
self,
input_ids = None,
attention_mask = None,
input_features = None,
token_type_ids = None,
position_ids = None,
head_mask = None,
inputs_embeds = None,
masked_lm_labels = None,
next_sentence_labels = None,
workload = None,
output_attentions = None,
output_hidden_states = None,
is_validation = False,
step = -1,
**kwargs
):
r"""
labels (``torch.LongTensor`` of shape ``(batch_size, sequence_length)``, `optional`, defaults to :obj:`None`):
Labels for computing the masked language modeling loss.
Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels
in ``[0, ..., config.vocab_size]``
next_sentence_labels (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`, defaults to :obj:`None`):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see :obj:`input_ids` docstring)
Indices should be in ``[0, 1]``.
``0`` indicates sequence B is a continuation of sequence A,
``1`` indicates sequence B is a random sequence.
kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):
Used to hide legacy arguments that have been deprecated.
Returns:
Examples::
>>> from transformers import BertTokenizer, BertForPreTraining
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>> model = BertForPreTraining.from_pretrained('bert-base-uncased')
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outptus.prediction_logits
>>> seq_relationship_logits = outputs.seq_relationship_logits
"""
if workload is not None:
input_ids, attention_mask, position_ids, input_features = workload
extract_hidden_state = kwargs.get('extract_hidden_state', False)
if extract_hidden_state:
## If using only for hidden state extraction.
prediction_scores, encoded_features, hidden_state, _ = self.get_output(
input_ids,
attention_mask,
position_ids,
extract_hidden_state = False, ## Don't forget to set this to true if you don't need prediction scores.
)
return prediction_scores, hidden_state
device = input_ids.get_device()
device = device if device >= 0 else 'cpu'
## If there is a sampling workload, load it directly to the compiler.
if workload is not None:
if self.cls is None:
raise ValueError("This mode requires a classification head.")
prediction_scores, encoded_features, hidden_states, attentions = self.get_output(
input_ids[0], attention_mask[0], position_ids[0], input_features[0] if input_features is not None else None,
)
bar = kwargs.get('bar', None)
return self.compile_sampler.generateSampleWorkload(
self,
device,
input_ids,
attention_mask,
input_features,
prediction_scores,
position_ids[0],
bar = bar,
)
## Otherwise select one other mode.
prediction_scores, encoded_features, hidden_states, attentions = self.get_output(
input_ids, attention_mask, position_ids, input_features,
token_type_ids, head_mask, inputs_embeds,
output_attentions, output_hidden_states
)
## [DEPRECATED]: Training with a compile sampler is proven to not work.
if not is_validation and self.compile_sampler and step >= self.config.reward_compilation and not self.config.is_sampling:
if self.cls is None:
raise ValueError("This mode requires a classification head.")
samples, compile_flag, masked_lm_labels = self.compile_sampler.generateTrainingBatch(
self,
device,
input_ids.cpu(),
input_features.cpu(),
prediction_scores.cpu(),
torch.clone(position_ids),
masked_lm_labels.cpu().numpy(),
)
loss_fct = torch.nn.CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
total_loss = masked_lm_loss
return {
'masked_lm_loss' : masked_lm_loss,
'total_loss' : total_loss,
'prediction_logits' : prediction_scores,
'hidden_states' : hidden_states,
'attentions' : attentions,
'compile_status' : torch.LongTensor(compile_flag).to(device),
'generated_samples' : torch.LongTensor(samples).to(device),
'batch_compilation_rate' : torch.full((1,), float(sum(compile_flag)) / len(compile_flag), dtype = torch.float).to(device),
# 'sample_indices' : [0],
}
## Sampling without a workload. Not really useful anymore.
elif not is_validation and self.compile_sampler and self.config.is_sampling:
if self.cls is None:
raise ValueError("This mode requires a classification head.")
samples, sample_indices, scores_history = self.compile_sampler.generateSampleBatch(
self,
device,
input_ids,
input_features,
prediction_scores,
position_ids,
)
return {
'generated_samples': samples,
'sample_indices' : sample_indices,
}
## Training mode or Validation mode.
else:
if masked_lm_labels is not None:
loss_fct = torch.nn.CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
total_loss = masked_lm_loss
else:
masked_lm_loss, total_loss = None, None
return {
'masked_lm_loss' : masked_lm_loss,
'total_loss' : total_loss,
'prediction_logits' : prediction_scores,
'hidden_states' : hidden_states,
}
class BertForPreTrainingTRT(BertForPreTraining):
def __init__(self, config, tokenizer = None, use_categorical = False, temperature = None):
super().__init__(config, tokenizer=tokenizer, use_categorical=use_categorical, temperature=temperature)
self.forward = self._forward_pytorch
self.get_output = self._get_output_pytorch
self.TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
def init_engine(self, cache, device_id, batch_size, sequence_length, vocab_size, max_position_embeddings):
self.engine_path = cache.path / f'active_bert.{device_id}.engine'
self.model_onnx_path = cache.path / f'active_bert.{device_id}.onnx'
if not self.engine_path.exists():
self._create_engine(batch_size, sequence_length, vocab_size, max_position_embeddings)
self.runtime = trt.Runtime(self.TRT_LOGGER)
with open(self.engine_path, 'rb') as f:
self.engine = self.runtime.deserialize_cuda_engine(f.read())
self.stream = cuda.Stream()
self.inputs = []
self.outputs = []
self.bindings = []
for binding in self.engine:
shape = self.engine.get_binding_shape(binding)
size = trt.volume(shape)# * batch_size
dtype = trt.nptype(self.engine.get_binding_dtype(binding))
host_mem = cuda.pagelocked_empty(size, dtype).reshape(shape)
device_mem = cuda.mem_alloc(host_mem.nbytes)
# Append the device buffer to device bindings.
self.bindings.append(int(device_mem))
# Append to the appropriate list.
if self.engine.binding_is_input(binding):
self.inputs.append((host_mem, device_mem))
else:
self.outputs.append((host_mem, device_mem))
# Override the pytorch module () operator
self.__call__ = self._forward_trt
self.forward = self._forward_trt
self.get_output = self._get_output_trt
def _create_engine(self, batch_size, sequence_length, vocab_size, max_position_embeddings):
with torch.no_grad():
dims = (batch_size, sequence_length)
input_ids = torch.autograd.Variable(torch.randint(vocab_size, dims)).cuda()
attention_mask = torch.autograd.Variable(torch.ones(dims)).cuda()
position_ids = torch.autograd.Variable(torch.randint(max_position_embeddings, dims)).cuda()
args = (input_ids, attention_mask, position_ids)
inputs = ['input_ids', 'attention_mask', 'position_ids']
outputs = ['prediction_scores']
dynamic_axes = {
'input_ids': {0: 'batch'},
'attention_mask': {0: 'batch'},
'position_ids': {0: 'batch'},
'prediction_scores':{0: 'batch'}
}
#out = torch.onnx.export(self.sample.model, args=args, f=model_onnx_path, input_names=inputs, output_names=outputs, dynamic_axes=dynamic_axes)
out = torch.onnx.export(self, args=args, f=self.model_onnx_path, input_names=inputs, output_names=outputs)
EXPLICIT_BATCH = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(self.TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(network, self.TRT_LOGGER) as parser:
with open(self.model_onnx_path, 'rb') as model_onnx:
if not parser.parse(model_onnx.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
with trt.Builder(self.TRT_LOGGER) as builder, builder.create_builder_config() as config:
config.max_workspace_size = 1 << 29 # This determines the amount of memory available to the builder when building an optimized engine and should generally be set as high as possible.
with builder.build_engine(network, config) as engine:
with open(self.engine_path, 'wb') as f:
f.write(engine.serialize())
def _get_output_pytorch(self,
input_ids,
attention_mask,
position_ids,
token_type_ids = None,
head_mask = None,
inputs_embeds = None,
output_attentions = None,
output_hidden_states = None,
) -> typing.Tuple[torch.FloatTensor, torch.FloatTensor]:
outputs = self.bert(
input_ids = input_ids,
attention_mask = attention_mask,
position_ids = position_ids,
token_type_ids = token_type_ids,
head_mask = head_mask,
inputs_embeds = inputs_embeds,
output_attentions = output_attentions,
output_hidden_states = output_hidden_states,
)
sequence_output, pooled_output = outputs[:2]
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
return prediction_scores, seq_relationship_score, outputs[0], outputs[1]
def _forward_pytorch(
self,
input_ids,
attention_mask,
position_ids
):
prediction_scores, _, _, _ = self._get_output_pytorch(input_ids, attention_mask, position_ids)
return prediction_scores
def _get_output_trt(self,
input_ids,
attention_mask,
position_ids
) -> typing.Tuple[torch.FloatTensor, torch.FloatTensor]:
np.copyto(self.inputs[0][0], input_ids.cpu())
np.copyto(self.inputs[1][0], attention_mask.cpu())
np.copyto(self.inputs[2][0], position_ids.cpu())
for inp in self.inputs:
cuda.memcpy_htod_async(inp[1], inp[0], self.stream)
self.context.execute_async_v2(bindings=self.bindings, stream_handle=self.stream.handle)
cuda.memcpy_dtoh_async(self.outputs[0][0], self.outputs[0][1], self.stream)
self.stream.synchronize()
return torch.tensor(self.outputs[0][0]).cpu(), None, None, None
def _forward_trt(
self,
input_ids = None,
attention_mask = None,
token_type_ids = None,
position_ids = None,
head_mask = None,
inputs_embeds = None,
masked_lm_labels = None,
next_sentence_labels = None,
output_attentions = None,
output_hidden_states = None,
is_validation = False,
is_live = False,
step = -1,
**kwargs
):
if is_validation or not self.compile_sampler or not self.config.is_sampling:
raise NotImplementedError
with self.engine.create_execution_context() as self.context:
prediction_scores, _, _, _ = self._get_output_trt(input_ids, attention_mask, position_ids)
device = input_ids.get_device()
samples, sample_indices, scores_history = self.compile_sampler.generateSampleBatch(
self,
input_ids.get_device(),
input_ids.cpu(),
prediction_scores.cpu(),
position_ids,
is_live,
)
return {
'prediction_scores' : scores_history, # This is mainly used for live sampling. Else, watch out!
'generated_samples' : samples,
'sample_indices' : sample_indices,
}
| 44,186 | 39.316606 | 188 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/models/torch_bert/config.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team and Foivos Tsimpourlas.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Configuration base class and utilities."""
class BertConfig(object):
r"""
This is the configuration class to store the configuration of a :class:`~transformers.BertModel`.
It is used to instantiate an BERT model according to the specified arguments, defining the model
architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of
the BERT `bert-base-uncased <https://huggingface.co/bert-base-uncased>`__ architecture.
Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used
to control the model outputs. Read the documentation from :class:`~transformers.PretrainedConfig`
for more information.
Args:
vocab_size (:obj:`int`, optional, defaults to 30522):
Vocabulary size of the BERT model. Defines the different tokens that
can be represented by the `inputs_ids` passed to the forward method of :class:`~transformers.BertModel`.
hidden_size (:obj:`int`, optional, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (:obj:`int`, optional, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (:obj:`int`, optional, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (:obj:`int`, optional, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (:obj:`str` or :obj:`function`, optional, defaults to "gelu"):
The non-linear activation function (function or string) in the encoder and pooler.
If string, "gelu", "relu", "swish" and "gelu_new" are supported.
hidden_dropout_prob (:obj:`float`, optional, defaults to 0.1):
The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (:obj:`float`, optional, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (:obj:`int`, optional, defaults to 512):
The maximum sequence length that this model might ever be used with.
Typically set this to something large just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (:obj:`int`, optional, defaults to 2):
The vocabulary size of the `token_type_ids` passed into :class:`~transformers.BertModel`.
initializer_range (:obj:`float`, optional, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (:obj:`float`, optional, defaults to 1e-12):
The epsilon used by the layer normalization layers.
gradient_checkpointing (:obj:`bool`, optional, defaults to False):
If True, use gradient checkpointing to save memory at the expense of slower backward pass.
Example::
>>> from transformers import BertModel, BertConfig
>>> # Initializing a BERT bert-base-uncased style configuration
>>> configuration = BertConfig()
>>> # Initializing a model from the bert-base-uncased style configuration
>>> model = BertModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
Note:
A configuration file can be loaded and saved to disk. Loading the configuration file and using this file to
initialize a model does **not** load the model weights.
It only affects the model's configuration.
Class attributes (overridden by derived classes)
- **model_type** (:obj:`str`): An identifier for the model type, serialized into the JSON file, and used to
recreate the correct object in :class:`~transformers.AutoConfig`.
Args:
output_hidden_states (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the model should return all hidden-states.
output_attentions (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the model should returns all attentions.
use_cache (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not the model should return the last key/values attentions (not used by all models).
return_dict (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the model should return a :class:`~transformers.file_utils.ModelOutput` instead of a
plain tuple.
is_encoder_decoder (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether the model is used as an encoder/decoder or not.
is_decoder (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether the model is used as decoder or not (in which case it's used as an encoder).
add_cross_attention (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether cross-attention layers should be added to the model. Note, this option is only relevant for models that can be used as decoder models within the `:class:~transformers.EncoderDecoderModel` class, which consists of all models in ``AUTO_MODELS_FOR_CAUSAL_LM``.
tie_encoder_decoder (:obj:`bool`, `optional`, defaults to :obj:`False`)
Whether all encoder weights should be tied to their equivalent decoder weights. This requires the encoder and decoder model to have the exact same parameter names.
prune_heads (:obj:`Dict[int, List[int]]`, `optional`, defaults to :obj:`{}`):
Pruned heads of the model. The keys are the selected layer indices and the associated values, the list
of heads to prune in said layer.
For instance ``{1: [0, 2], 2: [2, 3]}`` will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer
2.
xla_device (:obj:`bool`, `optional`):
A flag to indicate if TPU are available or not.
chunk_size_feed_forward (:obj:`int`, `optional`, defaults to :obj:`0`):
The chunk size of all feed forward layers in the residual attention blocks.
A chunk size of :obj:`0` means that the feed forward layer is not chunked.
A chunk size of n means that the feed forward layer processes :obj:`n` < sequence_length embeddings at a time.
For more information on feed forward chunking, see `How does Feed Forward Chunking work? <../glossary.html#feed-forward-chunking>`__ .
Parameters for sequence generation
- **max_length** (:obj:`int`, `optional`, defaults to 20) -- Maximum length that will be used by
default in the :obj:`generate` method of the model.
- **min_length** (:obj:`int`, `optional`, defaults to 10) -- Minimum length that will be used by
default in the :obj:`generate` method of the model.
- **do_sample** (:obj:`bool`, `optional`, defaults to :obj:`False`) -- Flag that will be used by default in
the :obj:`generate` method of the model. Whether or not to use sampling ; use greedy decoding otherwise.
- **early_stopping** (:obj:`bool`, `optional`, defaults to :obj:`False`) -- Flag that will be used by
default in the :obj:`generate` method of the model. Whether to stop the beam search when at least
``num_beams`` sentences are finished per batch or not.
- **num_beams** (:obj:`int`, `optional`, defaults to 1) -- Number of beams for beam search that will be
used by default in the :obj:`generate` method of the model. 1 means no beam search.
- **temperature** (:obj:`float`, `optional`, defaults to 1) -- The value used to module the next token
probabilities that will be used by default in the :obj:`generate` method of the model. Must be strictly
positive.
- **top_k** (:obj:`int`, `optional`, defaults to 50) -- Number of highest probability vocabulary tokens to
keep for top-k-filtering that will be used by default in the :obj:`generate` method of the model.
- **top_p** (:obj:`float`, `optional`, defaults to 1) -- Value that will be used by default in the
:obj:`generate` method of the model for ``top_p``. If set to float < 1, only the most probable tokens
with probabilities that add up to ``top_p`` or higher are kept for generation.
- **repetition_penalty** (:obj:`float`, `optional`, defaults to 1) -- Parameter for repetition penalty
that will be used by default in the :obj:`generate` method of the model. 1.0 means no penalty.
- **length_penalty** (:obj:`float`, `optional`, defaults to 1) -- Exponential penalty to the length that
will be used by default in the :obj:`generate` method of the model.
- **no_repeat_ngram_size** (:obj:`int`, `optional`, defaults to 0) -- Value that will be used by default
in the :obj:`generate` method of the model for ``no_repeat_ngram_size``. If set to int > 0, all ngrams of
that size can only occur once.
- **bad_words_ids** (:obj:`List[int]`, `optional`) -- List of token ids that are not allowed to be
generated that will be used by default in the :obj:`generate` method of the model. In order to get the
tokens of the words that should not appear in the generated text, use
:obj:`tokenizer.encode(bad_word, add_prefix_space=True)`.
- **num_return_sequences** (:obj:`int`, `optional`, defaults to 1) -- Number of independently computed
returned sequences for each element in the batch that will be used by default in the :obj:`generate`
method of the model.
Parameters for fine-tuning tasks
- **architectures** (:obj:`List[str]`, `optional`) -- Model architectures that can be used with the
model pretrained weights.
- **finetuning_task** (:obj:`str`, `optional`) -- Name of the task used to fine-tune the model. This can be
used when converting from an original (TensorFlow or PyTorch) checkpoint.
- **id2label** (:obj:`List[str]`, `optional`) -- A map from index (for instance prediction index, or target
index) to label.
- **label2id** (:obj:`Dict[str, int]`, `optional`) -- A map from label to index for the model.
- **num_labels** (:obj:`int`, `optional`) -- Number of labels to use in the last layer added to the model,
typically for a classification task.
- **task_specific_params** (:obj:`Dict[str, Any]`, `optional`) -- Additional keyword arguments to store for
the current task.
Parameters linked to the tokenizer
- **prefix** (:obj:`str`, `optional`) -- A specific prompt that should be added at the beginning of each
text before calling the model.
- **bos_token_id** (:obj:`int`, `optional`)) -- The id of the `beginning-of-stream` token.
- **pad_token_id** (:obj:`int`, `optional`)) -- The id of the `padding` token.
- **eos_token_id** (:obj:`int`, `optional`)) -- The id of the `end-of-stream` token.
- **decoder_start_token_id** (:obj:`int`, `optional`)) -- If an encoder-decoder model starts decoding with
a different token than `bos`, the id of that token.
PyTorch specific parameters
- **torchscript** (:obj:`bool`, `optional`, defaults to :obj:`False`) -- Whether or not the model should be
used with Torchscript.
TensorFlow specific parameters
- **use_bfloat16** (:obj:`bool`, `optional`, defaults to :obj:`False`) -- Whether or not the model should
use BFloat16 scalars (only used by some TensorFlow models).
"""
model_type = "bert"
def __init__(
self,
vocab_size,
hidden_size,
num_hidden_layers,
num_attention_heads,
intermediate_size,
hidden_act,
hidden_dropout_prob,
attention_probs_dropout_prob,
max_position_embeddings,
pad_token_id,
type_vocab_size,
initializer_range,
layer_norm_eps,
**kwargs
):
## Bert-specific attributes
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.gradient_checkpointing = kwargs.pop("gradient_checkpointing", False)
# Attributes with defaults
self.reward_compilation = kwargs.pop("reward_compilation", -1)
self.is_sampling = kwargs.pop("is_sampling", False)
self.return_dict = kwargs.pop("return_dict", False)
self.output_hidden_states = kwargs.pop("output_hidden_states", False)
self.output_attentions = kwargs.pop("output_attentions", False)
self.use_cache = kwargs.pop("use_cache", True) # Not used by all models
self.torchscript = kwargs.pop("torchscript", False) # Only used by PyTorch models
self.use_bfloat16 = kwargs.pop("use_bfloat16", False)
self.pruned_heads = kwargs.pop("pruned_heads", {})
# Attributes for feature vector encoding
self.feature_encoder = kwargs.pop("feature_encoder", False)
self.feature_sequence_length = kwargs.pop("feature_sequence_length", 256)
self.feature_embedding_size = kwargs.pop("feature_embedding_size", 512)
self.feature_pad_idx = kwargs.pop("feature_pad_idx", -1)
self.feature_dropout_prob = kwargs.pop("feature_dropout_prob", 0.1)
self.feature_vocab_size = kwargs.pop("feature_vocab_size", 768)
self.feature_num_attention_heads = kwargs.pop("feature_num_attention_heads", 4)
self.feature_transformer_feedforward = kwargs.pop("feature_transformer_feedforward", 2048)
self.feature_layer_norm_eps = kwargs.pop("feature_layer_norm_eps", 1e-5)
self.feature_num_hidden_layers = kwargs.pop("feature_num_hidden_layers", 2)
# Is decoder is used in encoder-decoder models to differentiate encoder from decoder
self.is_encoder_decoder = kwargs.pop("is_encoder_decoder", False)
self.is_decoder = kwargs.pop("is_decoder", False)
self.add_cross_attention = kwargs.pop("add_cross_attention", False)
self.tie_encoder_decoder = kwargs.pop("tie_encoder_decoder", False)
# Parameters for sequence generation
self.max_length = kwargs.pop("max_length", 20)
self.min_length = kwargs.pop("min_length", 0)
self.do_sample = kwargs.pop("do_sample", False)
self.early_stopping = kwargs.pop("early_stopping", False)
self.num_beams = kwargs.pop("num_beams", 1)
self.temperature = kwargs.pop("temperature", 1.0)
self.top_k = kwargs.pop("top_k", 50)
self.top_p = kwargs.pop("top_p", 1.0)
self.repetition_penalty = kwargs.pop("repetition_penalty", 1.0)
self.length_penalty = kwargs.pop("length_penalty", 1.0)
self.no_repeat_ngram_size = kwargs.pop("no_repeat_ngram_size", 0)
self.bad_words_ids = kwargs.pop("bad_words_ids", None)
self.num_return_sequences = kwargs.pop("num_return_sequences", 1)
self.chunk_size_feed_forward = kwargs.pop("chunk_size_feed_forward", 0)
# Fine-tuning task arguments
self.architectures = kwargs.pop("architectures", None)
self.finetuning_task = kwargs.pop("finetuning_task", None)
self.id2label = kwargs.pop("id2label", None)
self.label2id = kwargs.pop("label2id", None)
if self.id2label is not None:
kwargs.pop("num_labels", None)
self.id2label = dict((int(key), value) for key, value in self.id2label.items())
# Keys are always strings in JSON so convert ids to int here.
else:
self.num_labels = kwargs.pop("num_labels", 2)
# Tokenizer arguments TODO: eventually tokenizer and models should share the same config
self.prefix = kwargs.pop("prefix", None)
self.bos_token_id = kwargs.pop("bos_token_id", None)
self.pad_token_id = kwargs.pop("pad_token_id", None)
self.eos_token_id = kwargs.pop("eos_token_id", None)
self.decoder_start_token_id = kwargs.pop("decoder_start_token_id", None)
self.chunk_size_feed_forward = kwargs.pop("chunk_size_feed_forwar", 0)
# task specific arguments
self.task_specific_params = kwargs.pop("task_specific_params", None)
# TPU arguments
self.xla_device = kwargs.pop("xla_device", None)
# Additional attributes without default values
for key, value in kwargs.items():
try:
setattr(self, key, value)
except AttributeError as err:
l.logger().error("Can't set {} with value {} for {}".format(key, value, self))
raise err
@classmethod
def from_dict(cls, bert_dict, **extra_args):
config = BertConfig(
vocab_size = bert_dict['vocab_size'],
hidden_size = bert_dict['hidden_size'],
num_hidden_layers = bert_dict['num_hidden_layers'],
num_attention_heads = bert_dict['num_attention_heads'],
intermediate_size = bert_dict['intermediate_size'],
hidden_act = bert_dict['hidden_act'],
hidden_dropout_prob = bert_dict['hidden_dropout_prob'],
attention_probs_dropout_prob = bert_dict['attention_probs_dropout_prob'],
max_position_embeddings = bert_dict['max_position_embeddings'],
type_vocab_size = bert_dict['type_vocab_size'],
initializer_range = bert_dict['initializer_range'],
layer_norm_eps = bert_dict['layer_norm_eps'],
pad_token_id = bert_dict['pad_token_id'],
**extra_args,
)
return config
@property
def use_return_dict(self) -> bool:
"""
:obj:`bool`: Whether or not return :class:`~transformers.file_utils.ModelOutput` instead of tuples.
"""
# If torchscript is set, force `return_dict=False` to avoid jit errors
return self.return_dict and not self.torchscript
@property
def num_labels(self) -> int:
"""
:obj:`int`: The number of labels for classification models.
"""
return len(self.id2label)
@num_labels.setter
def num_labels(self, num_labels: int):
self.id2label = {i: "LABEL_{}".format(i) for i in range(num_labels)}
self.label2id = dict(zip(self.id2label.values(), self.id2label.keys()))
| 19,055 | 56.225225 | 273 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/models/torch_bert/compiler.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tqdm
import typing
import pathlib
import concurrent.futures
from deeplearning.benchpress.preprocessors import opencl
from deeplearning.benchpress.corpuses import tokenizers
from deeplearning.benchpress.util import environment
from deeplearning.benchpress.util import pytorch
from deeplearning.benchpress.util.pytorch import torch
from absl import flags
from deeplearning.benchpress.util import logging as l
FLAGS = flags.FLAGS
flags.DEFINE_integer(
"sample_indices_limit",
None,
"Hard-stop model generating more indices per sample than this specified integer."
)
class CompilationSampler(object):
"""
Compilation driven generation handler.
Used during training to iteratively fill a sequence
and feed to Clang for compilation status.
Also used during sampling to fill sequence and get
compilation status.
"""
def __init__(self,
tokenizer : tokenizers.TokenizerBase,
use_categorical : bool,
temperature : float,
target_lm : str,
):
self.tokenizer = tokenizer
self.temperature = temperature
self.use_categorical = use_categorical
if target_lm == "hole":
self.step_batch = self.StepHoleSeq
elif target_lm == "mask":
self.step_batch = self.StepMaskSeq
else:
raise KeyError(target_lm)
return
def argmax(self, t):
"""Sample argmax from a tensor."""
if self.use_categorical:
try:
ct = torch.distributions.relaxed_categorical.RelaxedOneHotCategorical(
temperature = self.temperature if self.temperature is not None else 1.0,
logits = t,
validate_args = False if "1.9." in torch.__version__ else None,
).sample()
except ValueError as e:
dump_cf = ""
dump_types = ""
p = pathlib.Path("./dump_argmax_error.log").absolute()
if not p.exists():
l.logger().error(t.shape)
l.logger().error(p)
for d0 in t:
for d1 in d0:
dump_cf += str(d1) + ", "
if isinstance(d1, torch.Tensor):
dump_types += str(d1.type()) + ", "
else:
dump_types += str(type(d1)) + ", "
with open(p, 'w') as outf:
outf.write(str(t.shape) + "\n\n\n" + dump_cf + "\n\n\n" + dump_types)
raise e
return torch.argmax(ct, dim = -1)
def checkIfBatchCompiles(self,
sample: np.array
) -> int:
"""Sends a filled sequence to the compiler"""
try:
stdout = opencl.Compile(self.tokenizer.ArrayToCode(sample))
return 1
except ValueError:
return 0
def generateTrainingBatch(self,
model : typing.TypeVar("model.BertPreTrainedModel"),
device : torch.device,
input_ids : torch.LongTensor,
input_features : torch.LongTensor,
prediction_scores : torch.FloatTensor,
position_ids : torch.LongTensor,
masked_lm_labels : torch.LongTensor,
) -> typing.Tuple[typing.List[np.array], typing.List[int]]:
batch_size, sequence_length = tuple(input_ids.shape)
with concurrent.futures.ThreadPoolExecutor() as executor:
jobs = [executor.submit(self.iterTrainingSeq,
model = model,
device = device,
input_ids = input_ids [i],
input_features = input_features [i] if input_features else None,
prediction_scores = prediction_scores[i],
position_ids = position_ids [i],
masked_lm_labels = masked_lm_labels [i],
) for i in range(batch_size)]
results = [j.result() for j in jobs]
samples = [x.numpy() for (x, _, _) in results]
compile_flag = [y for (_, y, _) in results]
masked_lm_labels = torch.LongTensor([z for (_, _, z) in results]).to(device)
return samples, compile_flag, masked_lm_labels
def iterTrainingSeq(self,
model : typing.TypeVar("model.BertPreTrainedModel"),
device : torch.device,
input_ids : torch.LongTensor,
input_features : torch.LongTensor,
prediction_scores : torch.FloatTensor,
position_ids : torch.LongTensor,
masked_lm_labels : torch.LongTensor,
) -> typing.Tuple[torch.LongTensor, int]:
"""
Main training sequence filling loop.
Function takes model's initial input, prediction and states.
Fills input sequence with step predictions and keeps asking
iteratively for predictions until target [MASK] or [HOLE] tokens
are closed.
Compiler is invoked for final sequence to get binary compilation status.
##!! This function is designed to work with multithreading and exercises
said functionalities on a single sequence. CANNOT be applied to the
whole batch at the same time.
"""
new_holes, next_input_ids, attention_mask = self.StepTrainingSeq(input_ids, prediction_scores)
with torch.no_grad():
while new_holes:
next_prediction_scores, _, _, _ = model.get_output(
next_input_ids.to(device), attention_mask.to(device), position_ids, input_features
)
new_holes, next_input_ids, attention_mask = self.StepTrainingSeq(
next_input_ids[0], next_prediction_scores[0],
)
compile_flag = self.checkIfBatchCompiles(next_input_ids[0].numpy())
if compile_flag:
masked_lm_labels = np.full(masked_lm_labels.shape, -100, dtype = np.int64)
return next_input_ids[0], compile_flag, masked_lm_labels
def StepTrainingSeq(self,
seq : torch.LongTensor,
prediction_scores : torch.FloatTensor,
) -> typing.Tuple[bool, torch.LongTensor, np.array]:
"""
Applies step predictions to input sequence.
Specifically optimized for training; does not compute sample indices for speed-up.
"""
seq_length = tuple(seq.shape)[0]
allowed_incr = (seq_length - int(torch.where(seq==self.tokenizer.padToken)[0][0])
if self.tokenizer.padToken in seq
else 0)
endTokens = self.tokenizer.metaTokenValues
closed_hole = np.zeros(seq_length, dtype=np.bool)
new_hole = np.zeros(seq_length, dtype=np.bool)
temp_seq = seq.numpy().copy()
for target_idx in torch.where((seq == self.tokenizer.holeToken) | (seq == self.tokenizer.maskToken))[0]:
idx = int(target_idx)
prediction = int(self.argmax(prediction_scores[target_idx]))
is_hole = temp_seq[idx] == self.tokenizer.holeToken
if prediction in endTokens:
# Model predicted sth that will close the hole.
closed_hole[idx] = True
continue
# We replace the hole with a prediction
temp_seq[idx] = prediction
rem_adds = allowed_incr + np.sum(closed_hole) - np.sum(new_hole)
if is_hole and rem_adds:
# if this was a hole and we have more empty space, reinsert the hole
new_hole[idx] = True
new_seq = np.full(seq_length, self.tokenizer.padToken, dtype=np.int64)
new_idx = 0
for idx, t in enumerate(temp_seq):
if closed_hole[idx]:
continue
try:
new_seq[new_idx] = t
except IndexError:
l.logger().info("seq: {}".format(self.tokenizer.tokensToString([x for x in seq.cpu().numpy()])))
l.logger().info("temp_seq {}".format(self.tokenizer.tokensToString([x for x in temp_seq])))
l.logger().info("pred idx: {}".format(torch.where((seq == self.tokenizer.holeToken) | (seq == self.tokenizer.maskToken))[0]))
l.logger().info("pred_toks {}".format(self.tokenizer.tokensToString([int(self.argmax(prediction_scores[idx])) for idx in torch.where((seq == self.tokenizer.holeToken) | (seq == self.tokenizer.maskToken))[0]])))
l.logger().info("allowed_incr: {}".format(allowed_incr))
l.logger().info("new_hole: {}".format(new_hole))
l.logger().info("closed_hole: {}".format(closed_hole))
new_idx += 1
if new_hole[idx]:
try:
new_seq[new_idx] = self.tokenizer.holeToken
except IndexError:
l.logger().warn("seq: {}".format(self.tokenizer.tokensToString([x for x in seq.cpu().numpy()])))
l.logger().warn("temp_seq {}".format(self.tokenizer.tokensToString([x for x in temp_seq])))
l.logger().warn("pred idx: {}".format(torch.where((seq == self.tokenizer.holeToken) | (seq == self.tokenizer.maskToken))[0]))
l.logger().warn("pred_toks {}".format(self.tokenizer.tokensToString([int(self.argmax(prediction_scores[idx])) for idx in torch.where((seq == self.tokenizer.holeToken) | (seq == self.tokenizer.maskToken))[0]])))
l.logger().warn("allowed_incr: {}".format(allowed_incr))
l.logger().warn("new_hole: {}".format(new_hole))
l.logger().warn("closed_hole: {}".format(closed_hole))
new_idx += 1
if new_idx >= seq_length:
break
new_seq = torch.LongTensor([new_seq])
attention_mask = (new_seq != self.tokenizer.padToken)
return np.any(new_hole), new_seq, attention_mask
def generateSampleBatch(self,
model : typing.TypeVar("model.BertPreTrainedModel"),
device : torch.device,
input_ids : torch.LongTensor,
input_features : torch.LongTensor,
prediction_scores : torch.FloatTensor,
position_ids : torch.LongTensor,
is_live : bool,
) -> typing.Tuple[typing.List[np.array], typing.List[typing.List[int]]]:
"""
Get a batch of input ids and iteratively fill the holes and return a batch of samples.
"""
batch_size, sequence_length = tuple(input_ids.shape)
input_idxs = torch.arange(batch_size).to(device)
sample_indices = torch.full((batch_size, sequence_length), self.tokenizer.padToken, dtype = torch.int64).to(device)
res_idx = 0
samples = torch.zeros_like(input_ids)
new_holes = self.step_batch(input_ids, input_idxs, sample_indices, None, prediction_scores, device)
open_holes = torch.where(new_holes == True)[0]
closed_holes = torch.where(new_holes == False)[0]
samples[res_idx: res_idx + len(closed_holes)] = input_ids[closed_holes]
res_idx += len(closed_holes)
input_ids = torch.index_select(input_ids, 0, open_holes.to(device))
attention_mask = (input_ids != self.tokenizer.padToken)
while torch.any(new_holes):
prediction_scores, _, _, _ = model.get_output(
input_ids, attention_mask, position_ids[:len(input_ids)], input_features,
)
new_holes = self.step_batch(input_ids, input_idxs, sample_indices, None, prediction_scores, device)
open_holes = torch.where(new_holes == True)[0]
closed_holes = torch.where(new_holes == False)[0]
samples[res_idx: res_idx + len(closed_holes)] = input_ids[closed_holes]
res_idx += len(closed_holes)
input_ids = torch.index_select(input_ids, 0, open_holes.to(device))
attention_mask = (input_ids != self.tokenizer.padToken)
return samples, sample_indices, None
def generateSampleWorkload(self,
model : typing.TypeVar("model.BertPreTrainedModel"),
device : torch.device,
workload_input_ids : torch.LongTensor,
workload_attention_mask : torch.LongTensor,
workload_input_features : torch.LongTensor,
prediction_scores : torch.FloatTensor,
position_ids : torch.LongTensor,
bar : tqdm.tqdm = None,
) -> typing.Tuple[typing.List[np.array], typing.List[typing.List[int]]]:
"""
This function receives a full workload of input ids to be sampled.
Heavy optimisations are perfmormed to keep the GPU busy at all times.
The workload is streamed online and when a sequence is finished it is replaced
with a new one from the workload queue.
Returns a fullworkload of sampled instances.
"""
# [workload_size x batch_size x sequence_length]
wload_size, batch_size, sequence_length = tuple(workload_input_ids.shape)
# Also compute feature embeddings sequence length.
if workload_input_features is not None:
_, _, feature_sequence_length = tuple(workload_input_features.shape)
# Number of sequences
nseq = wload_size * batch_size
# Iteration idx of workload
w_idx = batch_size
# Get current input_ids - attention mask.
input_ids = workload_input_ids[0]
input_idxs = torch.arange(batch_size).to(device)
attention_mask = workload_attention_mask[0]
if workload_input_features is not None:
input_features = workload_input_features[0]
else:
input_features = None
# sample indices array that will be returned.
sample_indices = torch.full((nseq, sequence_length), self.tokenizer.padToken, dtype = torch.int64).to(device)
if FLAGS.sample_indices_limit is not None:
sidx_length = torch.full((batch_size, 1), 0, dtype = torch.int64).to(device)
# Workload of input_ids and attention_mask pairs.
# queue input_idxs ensure direct ordering from inputs -> outputs.
queue_input_ids = torch.reshape(workload_input_ids, (1, nseq, sequence_length)).squeeze(0)
queue_input_idxs = torch.arange(nseq).to(device)
queue_attention_mask = torch.reshape(workload_attention_mask, (1, nseq, sequence_length)).squeeze(0)
if workload_input_features is not None:
queue_input_features = torch.reshape(workload_input_features, (1, nseq, feature_sequence_length)).squeeze(0)
#! This is the return queue [nseq x sequence_length].
queue = torch.zeros(tuple(queue_input_ids.shape), dtype = torch.int64).to(device)
new_holes = self.step_batch(
input_ids,
input_idxs,
sample_indices,
sidx_length if FLAGS.sample_indices_limit else None,
prediction_scores,
device
)
open_holes = torch.where(new_holes == True)[0].to(device)
closed_holes = torch.where(new_holes == False)[0]
for i in closed_holes:
queue[input_idxs[i]] = input_ids[i]
if bar:
bar.update(1)
input_ids = torch.index_select(input_ids, 0, open_holes)
input_idxs = torch.index_select(input_idxs, 0, open_holes)
attention_mask = (input_ids != self.tokenizer.padToken)
if input_features is not None:
input_features = torch.index_select(input_features, 0, open_holes)
if FLAGS.sample_indices_limit:
sidx_length = torch.index_select(sidx_length, 0, open_holes)
res = batch_size - len(input_ids)
if res > 0:
input_ids = torch.cat((input_ids, queue_input_ids[w_idx: w_idx + res]), 0)
input_idxs = torch.cat((input_idxs, queue_input_idxs[w_idx: w_idx + res]), 0)
attention_mask = torch.cat((attention_mask, queue_attention_mask[w_idx: w_idx + res]), 0)
if input_features is not None:
input_features = torch.cat((input_features, queue_input_features[w_idx: w_idx + res]), 0)
if FLAGS.sample_indices_limit:
sidx_length = torch.cat((sidx_length, torch.full((res, 1), 0, dtype = torch.int64).to(device)), 0)
w_idx += res
while w_idx < nseq or torch.any(new_holes):
prediction_scores, _, _, _ = model.get_output(
input_ids, attention_mask, position_ids[:len(input_ids)], input_features
)
# Array of new hole existence per seq idx
new_holes = self.step_batch(
input_ids,
input_idxs,
sample_indices,
sidx_length if FLAGS.sample_indices_limit else None,
prediction_scores,
device
)
# Fill these holes.
open_holes = torch.where(new_holes == True)[0].to(device)
# Those are done.
closed_holes = torch.where(new_holes == False)[0]
# Add to return queue those that have finished.
for i in closed_holes:
queue[input_idxs[i]] = input_ids[i]
if bar:
bar.update(1)
input_ids = torch.index_select(input_ids, 0, open_holes)
input_idxs = torch.index_select(input_idxs, 0, open_holes)
attention_mask = (input_ids != self.tokenizer.padToken)
if input_features is not None:
input_features = torch.index_select(input_features, 0, open_holes)
if FLAGS.sample_indices_limit:
sidx_length = torch.index_select(sidx_length, 0, open_holes)
res = batch_size - len(input_ids)
if res > 0:
input_ids = torch.cat((input_ids, queue_input_ids[w_idx: w_idx + res]), 0)
input_idxs = torch.cat((input_idxs, queue_input_idxs[w_idx: w_idx + res]), 0)
attention_mask = torch.cat((attention_mask, queue_attention_mask[w_idx: w_idx + res]), 0)
if input_features is not None:
input_features = torch.cat((input_features, queue_input_features[w_idx: w_idx + res]), 0)
if FLAGS.sample_indices_limit:
sidx_length = torch.cat((sidx_length, torch.full((res, 1), 0, dtype = torch.int64).to(device)), 0)
w_idx += res
return queue, sample_indices
def StepHoleSeq(self,
batch : torch.LongTensor,
batch_idxs : torch.LongTensor,
sample_indices : torch.LongTensor,
indices_lengths : torch.LongTensor,
prediction_scores : torch.LongTensor,
device,
) -> typing.Tuple[
bool,
torch.LongTensor,
np.array,
]:
"""
Applies sample step with hole predictions to input batch.
!!!!!!WARNING!!!!!
This function works appropriately ONLY for 1 [HOLE] per sequence.
If more HOLES existed, then further operations would be needed to
re-calculate the proceeding hole indices, which would lead to unnecessary
operations. Removing this feature keeps things faster for 1 hole scenario.
"""
endTokens = self.tokenizer.metaTokenValues
# Array of boolean values, shows where holes are still left.
new_hole = torch.zeros(len(batch), dtype=np.bool)
# [seq_idx, hole_idx] of batch.
idxs, targets = torch.where(batch == self.tokenizer.holeToken)
# Predictions for these indices.
predictions = self.argmax(prediction_scores[(idxs, targets)])
for seq_idx, el_idx in zip(idxs, targets):
# seq_idx -> indices within the batch
# el_idx -> element index within a sequence
if int(predictions[seq_idx]) in endTokens:
# Close hole, shift left one position, add pad to the end.
batch[seq_idx] = torch.cat((batch[seq_idx][:el_idx], batch[seq_idx][el_idx+1:], torch.LongTensor([self.tokenizer.padToken]).to(device)), 0)
elif int(batch[seq_idx][-1]) != self.tokenizer.padToken or (indices_lengths is not None and indices_lengths[seq_idx] >= FLAGS.sample_indices_limit-1):
# No pads remaining to the right, replace hole with prediction but don't insert new hole.
# batch[seq_idx] = torch.cat((batch[seq_idx][:el_idx], predictions[seq_idx].unsqueeze(0), batch[seq_idx][el_idx+1:]), 0)
batch[seq_idx][el_idx] = predictions[seq_idx]
else:
# Replace with prediction and keep hole.
batch[seq_idx] = torch.cat((batch[seq_idx][:el_idx], predictions[seq_idx].unsqueeze(0), batch[seq_idx][el_idx:][:-1]), 0)
new_hole[seq_idx] = True
q_idx = batch_idxs[seq_idx]
sample_indices[q_idx][el_idx] = predictions[seq_idx]
if indices_lengths is not None:
indices_lengths[seq_idx] += 1
return new_hole
def StepMaskSeq(self,
batch : torch.LongTensor,
batch_idxs : torch.LongTensor,
sample_indices : torch.LongTensor,
indices_lengths : torch.LongTensor,
prediction_scores : torch.LongTensor,
device,
) -> typing.Tuple[
bool,
torch.LongTensor,
np.array,
]:
"""
Applies sample step with mask predictions to input batch.
"""
# [seq_idx, hole_idx] of batch.
idxs, targets = torch.where(batch == self.tokenizer.maskToken)
# Predictions for these indices.
predictions = self.argmax(prediction_scores[(idxs, targets)])
for p_idx, (seq_idx, el_idx) in enumerate(zip(idxs.flip(dims = (0,)), targets.flip(dims = (0,)))):
# seq_idx -> indices within the batch
# el_idx -> element index within a sequence
# Casually replace the [MASK] with the single predicted token.
batch[seq_idx][el_idx] = predictions[idxs.size(0) - 1 - p_idx]
q_idx = batch_idxs[seq_idx]
sample_indices[q_idx][el_idx] = predictions[idxs.size(0) - 1 - p_idx]
if indices_lengths is not None:
indices_lengths[seq_idx] += 1
return torch.zeros(len(batch), dtype=np.bool)
| 22,498 | 44.178715 | 220 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/models/torch_bert/generation_utils.py | # coding=utf-8
# Copyright 2022 The Google AI Language Team Authors, Facebook AI Research authors and The HuggingFace Inc. team and Foivos Tsimpourlas.
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import typing
from deeplearning.benchpress.util.pytorch import torch
class GenerationMixin:
"""
A class contraining all of the functions supporting generation, to be used as a mixin in
:class:`~transfomers.PreTrainedModel`.
"""
def prepare_inputs_for_generation(self, input_ids, **kwargs):
"""
Implement in subclasses of :class:`~transfomers.PreTrainedModel` for custom behavior to prepare inputs in the
generate method.
"""
return {"input_ids": input_ids}
def adjust_logits_during_generation(self, logits, **kwargs):
"""
Implement in subclasses of :class:`~transfomers.PreTrainedModel` for custom behavior to adjust the logits in
the generate method.
"""
return logits
def _use_cache(self, outputs, use_cache):
"""During generation, decide whether to pass the `past` variable to the next forward pass."""
if len(outputs) <= 1 or use_cache is False:
return False
if hasattr(self.config, "mem_len") and self.config.mem_len == 0:
return False
return True
def enforce_repetition_penalty_(self, lprobs, batch_size, num_beams, prev_output_tokens, repetition_penalty):
"""
Enforce the repetition penalty (from the `CTRL paper <https://arxiv.org/abs/1909.05858>`__).
"""
for i in range(batch_size * num_beams):
for previous_token in set(prev_output_tokens[i].tolist()):
# if score < 0 then repetition penalty has to multiplied to reduce the previous token probability
if lprobs[i, previous_token] < 0:
lprobs[i, previous_token] *= repetition_penalty
else:
lprobs[i, previous_token] /= repetition_penalty
def postprocess_next_token_scores(
self,
scores,
input_ids,
no_repeat_ngram_size,
bad_words_ids,
cur_len,
min_length,
max_length,
eos_token_id,
repetition_penalty,
batch_size,
num_beams,
):
# repetition penalty (from CTRL paper https://arxiv.org/abs/1909.05858)
if repetition_penalty != 1.0:
self.enforce_repetition_penalty_(
scores, batch_size, num_beams, input_ids, repetition_penalty,
)
# set eos token prob to zero if min_length is not reached
if eos_token_id is not None and cur_len < min_length:
scores[:, eos_token_id] = -float("inf")
if no_repeat_ngram_size > 0:
# calculate a list of banned tokens to prevent repetitively generating the same ngrams
num_batch_hypotheses = batch_size * num_beams
# from fairseq: https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345
banned_batch_tokens = calc_banned_ngram_tokens(
input_ids, num_batch_hypotheses, no_repeat_ngram_size, cur_len
)
for i, banned_tokens in enumerate(banned_batch_tokens):
scores[i, banned_tokens] = -float("inf")
if bad_words_ids is not None:
# Exclude EOS token (already processed)
bad_words_ids = list(filter(lambda bad_token_seq: bad_token_seq != [eos_token_id], bad_words_ids))
# calculate a list of banned tokens according to bad words
banned_tokens = calc_banned_bad_words_ids(input_ids.tolist(), bad_words_ids)
# Modify the scores in place by setting the banned tokens logits to `-inf`
set_scores_to_inf_for_banned_tokens(scores, banned_tokens)
return scores
@torch.no_grad()
def generate(
self,
input_ids: typing.Optional[torch.LongTensor] = None,
max_length: typing.Optional[int] = None,
min_length: typing.Optional[int] = None,
do_sample: typing.Optional[bool] = None,
early_stopping: typing.Optional[bool] = None,
num_beams: typing.Optional[int] = None,
temperature: typing.Optional[float] = None,
top_k: typing.Optional[int] = None,
top_p: typing.Optional[float] = None,
repetition_penalty: typing.Optional[float] = None,
bad_words_ids: typing.Optional[typing.Iterable[int]] = None,
bos_token_id: typing.Optional[int] = None,
pad_token_id: typing.Optional[int] = None,
eos_token_id: typing.Optional[int] = None,
length_penalty: typing.Optional[float] = None,
no_repeat_ngram_size: typing.Optional[int] = None,
num_return_sequences: typing.Optional[int] = None,
attention_mask: typing.Optional[torch.LongTensor] = None,
decoder_start_token_id: typing.Optional[int] = None,
use_cache: typing.Optional[bool] = None,
**model_specific_kwargs
) -> torch.LongTensor:
r"""
Generates sequences for models with a language modeling head. The method currently supports greedy decoding,
beam-search decoding, sampling with temperature, sampling with top-k or nucleus sampling.
Adapted in part from `Facebook's XLM beam search code
<https://github.com/facebookresearch/XLM/blob/9e6f6814d17be4fe5b15f2e6c43eb2b2d76daeb4/src/model/transformer.py#L529>`__.
Apart from :obj:`input_ids` and :obj:`attention_mask`, all the arguments below will default to the value of the
attribute of the same name inside the :class:`~transformers.PretrainedConfig` of the model. The default values
indicated are the default values of those config.
Most of these parameters are explained in more detail in `this blog post
<https://huggingface.co/blog/how-to-generate>`__.
Parameters:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
The sequence used as a prompt for the generation. If :obj:`None` the method initializes
it as an empty :obj:`torch.LongTensor` of shape :obj:`(1,)`.
max_length (:obj:`int`, `optional`, defaults to 20):
The maximum length of the sequence to be generated.
min_length (:obj:`int`, `optional`, defaults to 10):
The minimum length of the sequence to be generated.
do_sample (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to use sampling ; use greedy decoding otherwise.
early_stopping (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to stop the beam search when at least ``num_beams`` sentences are finished per batch or not.
num_beams (:obj:`int`, `optional`, defaults to 1):
Number of beams for beam search. 1 means no beam search.
temperature (:obj:`float`, `optional`, defaults tp 1.0):
The value used to module the next token probabilities.
top_k (:obj:`int`, `optional`, defaults to 50):
The number of highest probability vocabulary tokens to keep for top-k-filtering.
top_p (:obj:`float`, `optional`, defaults to 1.0):
If set to float < 1, only the most probable tokens with probabilities that add up to ``top_p`` or
higher are kept for generation.
repetition_penalty (:obj:`float`, `optional`, defaults to 1.0):
The parameter for repetition penalty. 1.0 means no penalty. See `this paper
<https://arxiv.org/pdf/1909.05858.pdf>`__ for more details.
pad_token_id (:obj:`int`, `optional`):
The id of the `padding` token.
bos_token_id (:obj:`int`, `optional`):
The id of the `beginning-of-sequence` token.
eos_token_id (:obj:`int`, `optional`):
The id of the `end-of-sequence` token.
length_penalty (:obj:`float`, `optional`, defaults to 1.0):
Exponential penalty to the length. 1.0 means no penalty.
Set to values < 1.0 in order to encourage the model to generate shorter sequences, to a value > 1.0 in
order to encourage the model to produce longer sequences.
no_repeat_ngram_size (:obj:`int`, `optional`, defaults to 0):
If set to int > 0, all ngrams of that size can only occur once.
bad_words_ids(:obj:`typing.List[int]`, `optional`):
typing.List of token ids that are not allowed to be generated. In order to get the tokens of the words that
should not appear in the generated text, use :obj:`tokenizer.encode(bad_word, add_prefix_space=True)`.
num_return_sequences(:obj:`int`, `optional`, defaults to 1):
The number of independently computed returned sequences for each element in the batch.
attention_mask (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values are in ``[0, 1]``, 1 for
tokens that are not masked, and 0 for masked tokens.
If not provided, will default to a tensor the same shape as :obj:`input_ids` that masks the pad token.
`What are attention masks? <../glossary.html#attention-mask>`__
decoder_start_token_id (:obj:`int`, `optional`):
If an encoder-decoder model starts decoding with a different token than `bos`, the id of that token.
use_cache: (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not the model should use the past last key/values attentions (if applicable to the model) to
speed up decoding.
model_specific_kwargs:
Additional model specific kwargs will be forwarded to the :obj:`forward` function of the model.
Return:
:obj:`torch.LongTensor` of shape :obj:`(batch_size * num_return_sequences, sequence_length)`:
The generated sequences. The second dimension (sequence_length) is either equal to :obj:`max_length` or
shorter if all batches finished early due to the :obj:`eos_token_id`.
Examples::
tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer
model = AutoModelWithLMHead.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache.
outputs = model.generate(max_length=40) # do greedy decoding
print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True)))
tokenizer = AutoTokenizer.from_pretrained('openai-gpt') # Initialize tokenizer
model = AutoModelWithLMHead.from_pretrained('openai-gpt') # Download model and configuration from S3 and cache.
input_context = 'The dog'
input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context
outputs = model.generate(input_ids=input_ids, num_beams=5, num_return_sequences=3, temperature=1.5) # generate 3 independent sequences using beam search decoding (5 beams) with sampling from initial context 'The dog'
for i in range(3): # 3 output sequences were generated
print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True)))
tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer
model = AutoModelWithLMHead.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache.
input_context = 'The dog'
input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context
outputs = model.generate(input_ids=input_ids, max_length=40, temperature=0.7, num_return_sequences=3, do_sample=True) # generate 3 candidates using sampling
for i in range(3): # 3 output sequences were generated
print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True)))
tokenizer = AutoTokenizer.from_pretrained('ctrl') # Initialize tokenizer
model = AutoModelWithLMHead.from_pretrained('ctrl') # Download model and configuration from S3 and cache.
input_context = 'Legal My neighbor is' # "Legal" is one of the control codes for ctrl
input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context
outputs = model.generate(input_ids=input_ids, max_length=50, temperature=0.7, repetition_penalty=1.2) # generate sequences
print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True)))
tokenizer = AutoTokenizer.from_pretrained('gpt2') # Initialize tokenizer
model = AutoModelWithLMHead.from_pretrained('gpt2') # Download model and configuration from S3 and cache.
input_context = 'My cute dog' # "Legal" is one of the control codes for ctrl
bad_words_ids = [tokenizer.encode(bad_word, add_prefix_space=True) for bad_word in ['idiot', 'stupid', 'shut up']]
input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context
outputs = model.generate(input_ids=input_ids, max_length=100, do_sample=True, bad_words_ids=bad_words_ids) # generate sequences without allowing bad_words to be generated
"""
# We cannot generate if the model does not have a LM head
if self.get_output_embeddings() is None:
raise AttributeError(
"You tried to generate sequences with a model that does not have a LM Head."
"Please use another model class (e.g. `OpenAIGPTLMHeadModel`, `XLNetLMHeadModel`, `GPT2LMHeadModel`, `CTRLLMHeadModel`, `T5WithLMHeadModel`, `TransfoXLLMHeadModel`, `XLMWithLMHeadModel`, `BartForConditionalGeneration` )"
)
max_length = max_length if max_length is not None else self.config.max_length
min_length = min_length if min_length is not None else self.config.min_length
do_sample = do_sample if do_sample is not None else self.config.do_sample
early_stopping = early_stopping if early_stopping is not None else self.config.early_stopping
use_cache = use_cache if use_cache is not None else self.config.use_cache
num_beams = num_beams if num_beams is not None else self.config.num_beams
temperature = temperature if temperature is not None else self.config.temperature
top_k = top_k if top_k is not None else self.config.top_k
top_p = top_p if top_p is not None else self.config.top_p
repetition_penalty = repetition_penalty if repetition_penalty is not None else self.config.repetition_penalty
bos_token_id = bos_token_id if bos_token_id is not None else self.config.bos_token_id
pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id
eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id
length_penalty = length_penalty if length_penalty is not None else self.config.length_penalty
no_repeat_ngram_size = (
no_repeat_ngram_size if no_repeat_ngram_size is not None else self.config.no_repeat_ngram_size
)
bad_words_ids = bad_words_ids if bad_words_ids is not None else self.config.bad_words_ids
num_return_sequences = (
num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences
)
decoder_start_token_id = (
decoder_start_token_id if decoder_start_token_id is not None else self.config.decoder_start_token_id
)
if input_ids is not None:
batch_size = input_ids.shape[0] # overriden by the input batch_size
else:
batch_size = 1
assert isinstance(max_length, int) and max_length > 0, "`max_length` should be a strictly positive integer."
assert isinstance(min_length, int) and min_length >= 0, "`min_length` should be a positive integer."
assert isinstance(do_sample, bool), "`do_sample` should be a boolean."
assert isinstance(early_stopping, bool), "`early_stopping` should be a boolean."
assert isinstance(use_cache, bool), "`use_cache` should be a boolean."
assert isinstance(num_beams, int) and num_beams > 0, "`num_beams` should be a strictly positive integer."
assert temperature > 0, "`temperature` should be strictly positive."
assert isinstance(top_k, int) and top_k >= 0, "`top_k` should be a positive integer."
assert 0 <= top_p <= 1, "`top_p` should be between 0 and 1."
assert repetition_penalty >= 1.0, "`repetition_penalty` should be >= 1."
assert input_ids is not None or (
isinstance(bos_token_id, int) and bos_token_id >= 0
), "If input_ids is not defined, `bos_token_id` should be a positive integer."
assert pad_token_id is None or (
isinstance(pad_token_id, int) and (pad_token_id >= 0)
), "`pad_token_id` should be a positive integer."
assert (eos_token_id is None) or (
isinstance(eos_token_id, int) and (eos_token_id >= 0)
), "`eos_token_id` should be a positive integer."
assert length_penalty > 0, "`length_penalty` should be strictly positive."
assert (
isinstance(no_repeat_ngram_size, int) and no_repeat_ngram_size >= 0
), "`no_repeat_ngram_size` should be a positive integer."
assert (
isinstance(num_return_sequences, int) and num_return_sequences > 0
), "`num_return_sequences` should be a strictly positive integer."
assert (
bad_words_ids is None or isinstance(bad_words_ids, list) and isinstance(bad_words_ids[0], list)
), "`bad_words_ids` is either `None` or a list of lists of tokens that should not be generated"
if input_ids is None:
assert isinstance(bos_token_id, int) and bos_token_id >= 0, (
"you should either supply a context to complete as `input_ids` input "
"or a `bos_token_id` (integer >= 0) as a first token to start the generation."
)
input_ids = torch.full(
(batch_size, 1), bos_token_id, dtype=torch.long, device=next(self.parameters()).device,
)
else:
assert input_ids.dim() == 2, "Input prompt should be of shape (batch_size, sequence length)."
# not allow to duplicate outputs when greedy decoding
if do_sample is False:
if num_beams == 1:
# no_beam_search greedy generation conditions
assert (
num_return_sequences == 1
), "Greedy decoding will always produce the same output for num_beams == 1 and num_return_sequences > 1. Please set num_return_sequences = 1"
else:
# beam_search greedy generation conditions
assert (
num_beams >= num_return_sequences
), "Greedy beam search decoding cannot return more sequences than it has beams. Please set num_beams >= num_return_sequences"
# create attention mask if necessary
# TODO (PVP): this should later be handled by the forward fn() in each model in the future see PR 3140
if (attention_mask is None) and (pad_token_id is not None) and (pad_token_id in input_ids):
attention_mask = input_ids.ne(pad_token_id).long()
elif attention_mask is None:
attention_mask = input_ids.new_ones(input_ids.shape)
# set pad_token_id to eos_token_id if not set. Important that this is done after
# attention_mask is created
if pad_token_id is None and eos_token_id is not None:
pad_token_id = eos_token_id
# current position and vocab size
if hasattr(self.config, "vocab_size"):
vocab_size = self.config.vocab_size
elif (
self.config.is_encoder_decoder
and hasattr(self.config, "decoder")
and hasattr(self.config.decoder, "vocab_size")
):
vocab_size = self.config.decoder.vocab_size
# set effective batch size and effective batch multiplier according to do_sample
if do_sample:
effective_batch_size = batch_size * num_return_sequences
effective_batch_mult = num_return_sequences
else:
effective_batch_size = batch_size
effective_batch_mult = 1
if self.config.is_encoder_decoder:
if decoder_start_token_id is None:
# see if BOS token can be used for decoder_start_token_id
if bos_token_id is not None:
decoder_start_token_id = bos_token_id
elif hasattr(self.config, "decoder") and hasattr(self.config.decoder, "bos_token_id"):
decoder_start_token_id = self.config.decoder.bos_token_id
else:
raise ValueError(
"decoder_start_token_id or bos_token_id has to be defined for encoder-decoder generation"
)
assert hasattr(self, "get_encoder"), "{} should have a 'get_encoder' function defined".format(self)
assert callable(self.get_encoder), "{} should be a method".format(self.get_encoder)
# get encoder and store encoder outputs
encoder = self.get_encoder()
encoder_outputs: tuple = encoder(input_ids, attention_mask=attention_mask)
# Expand input ids if num_beams > 1 or num_return_sequences > 1
if num_return_sequences > 1 or num_beams > 1:
input_ids_len = input_ids.shape[-1]
input_ids = input_ids.unsqueeze(1).expand(batch_size, effective_batch_mult * num_beams, input_ids_len)
attention_mask = attention_mask.unsqueeze(1).expand(
batch_size, effective_batch_mult * num_beams, input_ids_len
)
input_ids = input_ids.contiguous().view(
effective_batch_size * num_beams, input_ids_len
) # shape: (batch_size * num_return_sequences * num_beams, cur_len)
attention_mask = attention_mask.contiguous().view(
effective_batch_size * num_beams, input_ids_len
) # shape: (batch_size * num_return_sequences * num_beams, cur_len)
if self.config.is_encoder_decoder:
# create empty decoder_input_ids
input_ids = torch.full(
(effective_batch_size * num_beams, 1),
decoder_start_token_id,
dtype=torch.long,
device=next(self.parameters()).device,
)
cur_len = 1
assert (
batch_size == encoder_outputs[0].shape[0]
), f"expected encoder_outputs[0] to have 1st dimension bs={batch_size}, got {encoder_outputs[0].shape[0]} "
# expand batch_idx to assign correct encoder output for expanded input_ids (due to num_beams > 1 and num_return_sequences > 1)
expanded_batch_idxs = (
torch.arange(batch_size)
.view(-1, 1)
.repeat(1, num_beams * effective_batch_mult)
.view(-1)
.to(input_ids.device)
)
# expand encoder_outputs
encoder_outputs = (encoder_outputs[0].index_select(0, expanded_batch_idxs), *encoder_outputs[1:])
else:
encoder_outputs = None
cur_len = input_ids.shape[-1]
assert (
cur_len < max_length
), f"The context has {cur_len} number of tokens, but `max_length` is only {max_length}. Please make sure that `max_length` is bigger than the number of tokens, by setting either `generate(max_length=...,...)` or `config.max_length = ...`"
if num_beams > 1:
output = self._generate_beam_search(
input_ids,
cur_len=cur_len,
max_length=max_length,
min_length=min_length,
do_sample=do_sample,
early_stopping=early_stopping,
temperature=temperature,
top_k=top_k,
top_p=top_p,
repetition_penalty=repetition_penalty,
no_repeat_ngram_size=no_repeat_ngram_size,
bad_words_ids=bad_words_ids,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
batch_size=effective_batch_size,
num_return_sequences=num_return_sequences,
length_penalty=length_penalty,
num_beams=num_beams,
vocab_size=vocab_size,
encoder_outputs=encoder_outputs,
attention_mask=attention_mask,
use_cache=use_cache,
model_specific_kwargs=model_specific_kwargs,
)
else:
output = self._generate_no_beam_search(
input_ids,
cur_len=cur_len,
max_length=max_length,
min_length=min_length,
do_sample=do_sample,
temperature=temperature,
top_k=top_k,
top_p=top_p,
repetition_penalty=repetition_penalty,
no_repeat_ngram_size=no_repeat_ngram_size,
bad_words_ids=bad_words_ids,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
batch_size=effective_batch_size,
encoder_outputs=encoder_outputs,
attention_mask=attention_mask,
use_cache=use_cache,
model_specific_kwargs=model_specific_kwargs,
)
return output
def _generate_no_beam_search(
self,
input_ids,
cur_len,
max_length,
min_length,
do_sample,
temperature,
top_k,
top_p,
repetition_penalty,
no_repeat_ngram_size,
bad_words_ids,
pad_token_id,
eos_token_id,
batch_size,
encoder_outputs,
attention_mask,
use_cache,
model_specific_kwargs,
):
""" Generate sequences for each example without beam search (num_beams == 1).
All returned sequence are generated independantly.
"""
# length of generated sentences / unfinished sentences
unfinished_sents = input_ids.new(batch_size).fill_(1)
sent_lengths = input_ids.new(batch_size).fill_(max_length)
past = (encoder_outputs, None) if encoder_outputs is not None else None
while cur_len < max_length:
model_inputs = self.prepare_inputs_for_generation(
input_ids, past=past, attention_mask=attention_mask, use_cache=use_cache, **model_specific_kwargs
)
outputs = self(**model_inputs)
next_token_logits = outputs[0][:, -1, :]
scores = self.postprocess_next_token_scores(
scores=next_token_logits,
input_ids=input_ids,
no_repeat_ngram_size=no_repeat_ngram_size,
bad_words_ids=bad_words_ids,
cur_len=cur_len,
min_length=min_length,
max_length=max_length,
eos_token_id=eos_token_id,
repetition_penalty=repetition_penalty,
batch_size=batch_size,
num_beams=1,
)
# if model has past, then set the past variable to speed up decoding
if self._use_cache(outputs, use_cache):
past = outputs[1]
if do_sample:
# Temperature (higher temperature => more likely to sample low probability tokens)
if temperature != 1.0:
scores = scores / temperature
# Top-p/top-k filtering
next_token_logscores = top_k_top_p_filtering(scores, top_k=top_k, top_p=top_p)
# Sample
probs = torch.nn.functional.softmax(next_token_logscores, dim=-1)
next_token = torch.multinomial(probs, num_samples=1, replacement = True).squeeze(1)
else:
# Greedy decoding
next_token = torch.argmax(next_token_logits, dim=-1)
# update generations and finished sentences
if eos_token_id is not None:
# pad finished sentences if eos_token_id exist
tokens_to_add = next_token * unfinished_sents + (pad_token_id) * (1 - unfinished_sents)
else:
tokens_to_add = next_token
# add token and increase length by one
input_ids = torch.cat([input_ids, tokens_to_add.unsqueeze(-1)], dim=-1)
cur_len = cur_len + 1
if eos_token_id is not None:
eos_in_sents = tokens_to_add == eos_token_id
# if sentence is unfinished and the token to add is eos, sent_lengths is filled with current length
is_sents_unfinished_and_token_to_add_is_eos = unfinished_sents.mul(eos_in_sents.long()).bool()
sent_lengths.masked_fill_(is_sents_unfinished_and_token_to_add_is_eos, cur_len)
# unfinished_sents is set to zero if eos in sentence
unfinished_sents.mul_((~eos_in_sents).long())
# stop when there is a </s> in each sentence, or if we exceed the maximul length
if unfinished_sents.max() == 0:
break
# extend attention_mask for new generated input if only decoder
if self.config.is_encoder_decoder is False:
attention_mask = torch.cat(
[attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1
)
return input_ids
def _generate_beam_search(
self,
input_ids,
cur_len,
max_length,
min_length,
do_sample,
early_stopping,
temperature,
top_k,
top_p,
repetition_penalty,
no_repeat_ngram_size,
bad_words_ids,
pad_token_id,
eos_token_id,
batch_size,
num_return_sequences,
length_penalty,
num_beams,
vocab_size,
encoder_outputs,
attention_mask,
use_cache,
model_specific_kwargs,
):
""" Generate sequences for each example with beam search.
"""
# generated hypotheses
generated_hyps = [
BeamHypotheses(num_beams, max_length, length_penalty, early_stopping=early_stopping)
for _ in range(batch_size)
]
# scores for each sentence in the beam
beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device)
# for greedy decoding it is made sure that only tokens of the first beam are considered to avoid sampling the exact same tokens three times
if do_sample is False:
beam_scores[:, 1:] = -1e9
beam_scores = beam_scores.view(-1) # shape (batch_size * num_beams,)
# cache compute states
past = (encoder_outputs, None) if encoder_outputs is not None else None
# done sentences
done = [False for _ in range(batch_size)]
while cur_len < max_length:
model_inputs = self.prepare_inputs_for_generation(
input_ids, past=past, attention_mask=attention_mask, use_cache=use_cache, **model_specific_kwargs
)
outputs = self(**model_inputs) # (batch_size * num_beams, cur_len, vocab_size)
next_token_logits = outputs[0][:, -1, :] # (batch_size * num_beams, vocab_size)
# if model has past, then set the past variable to speed up decoding
if self._use_cache(outputs, use_cache):
past = outputs[1]
if self.config.is_encoder_decoder and do_sample is False:
# TODO (PVP) still a bit hacky here - there might be a better solution
next_token_logits = self.adjust_logits_during_generation(
next_token_logits, cur_len=cur_len, max_length=max_length
)
scores = torch.nn.functional.log_softmax(next_token_logits, dim=-1) # (batch_size * num_beams, vocab_size)
scores = self.postprocess_next_token_scores(
scores=scores,
input_ids=input_ids,
no_repeat_ngram_size=no_repeat_ngram_size,
bad_words_ids=bad_words_ids,
cur_len=cur_len,
min_length=min_length,
max_length=max_length,
eos_token_id=eos_token_id,
repetition_penalty=repetition_penalty,
batch_size=batch_size,
num_beams=num_beams,
)
assert scores.shape == (batch_size * num_beams, vocab_size), "Shapes of scores: {} != {}".format(
scores.shape, (batch_size * num_beams, vocab_size)
)
if do_sample:
_scores = scores + beam_scores[:, None].expand_as(scores) # (batch_size * num_beams, vocab_size)
# Temperature
if temperature != 1.0:
_scores = _scores / temperature
# Top-p/top-k filtering
_scores = top_k_top_p_filtering(
_scores, top_k=top_k, top_p=top_p, min_tokens_to_keep=2
) # (batch_size * num_beams, vocab_size)
# re-organize to group the beam together to sample from all beam_idxs
_scores = _scores.contiguous().view(
batch_size, num_beams * vocab_size
) # (batch_size, num_beams * vocab_size)
# Sample 2 next tokens for each beam (so we have some spare tokens and match output of greedy beam search)
probs = torch.nn.functional.softmax(_scores, dim=-1)
next_tokens = torch.multinomial(probs, num_samples=2 * num_beams) # (batch_size, num_beams * 2)
# Compute next scores
next_scores = torch.gather(_scores, -1, next_tokens) # (batch_size, num_beams * 2)
# sort the sampled vector to make sure that the first num_beams samples are the best
next_scores, next_scores_indices = torch.sort(next_scores, descending=True, dim=1)
next_tokens = torch.gather(next_tokens, -1, next_scores_indices) # (batch_size, num_beams * 2)
else:
next_scores = scores + beam_scores[:, None].expand_as(scores) # (batch_size * num_beams, vocab_size)
# re-organize to group the beam together (we are keeping top hypothesis accross beams)
next_scores = next_scores.view(
batch_size, num_beams * vocab_size
) # (batch_size, num_beams * vocab_size)
next_scores, next_tokens = torch.topk(next_scores, 2 * num_beams, dim=1, largest=True, sorted=True)
assert next_scores.size() == next_tokens.size() == (batch_size, 2 * num_beams)
# next batch beam content
next_batch_beam = []
# for each sentence
for batch_idx in range(batch_size):
# if we are done with this sentence, add a pad token
if done[batch_idx]:
assert (
len(generated_hyps[batch_idx]) >= num_beams
), "Batch can only be done if at least {} beams have been generated".format(num_beams)
assert (
eos_token_id is not None and pad_token_id is not None
), "generated beams >= num_beams -> eos_token_id and pad_token have to be defined"
next_batch_beam.extend([(0, pad_token_id, 0)] * num_beams) # pad the batch
continue
# next sentence beam content, this will get added to next_batch_beam
next_sent_beam = []
# next tokens for this sentence
for beam_token_rank, (beam_token_id, beam_token_score) in enumerate(
zip(next_tokens[batch_idx], next_scores[batch_idx])
):
# get beam and token IDs
beam_id = beam_token_id // vocab_size
token_id = beam_token_id % vocab_size
effective_beam_id = batch_idx * num_beams + beam_id
# add to generated hypotheses if end of sentence
if (eos_token_id is not None) and (token_id.item() == eos_token_id):
# if beam_token does not belong to top num_beams tokens, it should not be added
is_beam_token_worse_than_top_num_beams = beam_token_rank >= num_beams
if is_beam_token_worse_than_top_num_beams:
continue
generated_hyps[batch_idx].add(
input_ids[effective_beam_id].clone(), beam_token_score.item(),
)
else:
# add next predicted token since it is not eos_token
next_sent_beam.append((beam_token_score, token_id, effective_beam_id))
# once the beam for next step is full, don't add more tokens to it.
if len(next_sent_beam) == num_beams:
break
# Check if we are done so that we can save a pad step if all(done)
done[batch_idx] = done[batch_idx] or generated_hyps[batch_idx].is_done(
next_scores[batch_idx].max().item(), cur_len
)
# update next beam content
assert len(next_sent_beam) == num_beams, "Beam should always be full"
next_batch_beam.extend(next_sent_beam)
assert len(next_batch_beam) == num_beams * (batch_idx + 1), "We should have added num_beams each step"
# stop when we are done with each sentence
if all(done):
break
# sanity check / prepare next batch
assert len(next_batch_beam) == batch_size * num_beams
beam_scores = beam_scores.new([x[0] for x in next_batch_beam])
beam_tokens = input_ids.new([x[1] for x in next_batch_beam])
beam_idx = input_ids.new([x[2] for x in next_batch_beam])
# re-order batch and update current length
input_ids = input_ids[beam_idx, :]
input_ids = torch.cat([input_ids, beam_tokens.unsqueeze(1)], dim=-1)
cur_len = cur_len + 1
# re-order internal states
if past is not None:
past = self._reorder_cache(past, beam_idx)
# extend attention_mask for new generated input if only decoder
if self.config.is_encoder_decoder is False:
attention_mask = torch.cat(
[attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1
)
# finalize all open beam hypotheses and add to generated hypotheses
for batch_idx in range(batch_size):
if done[batch_idx]:
continue
# test that beam scores match previously calculated scores if not eos and batch_idx not done
if eos_token_id is not None and all(
(token_id % vocab_size).item() != eos_token_id for token_id in next_tokens[batch_idx]
):
assert torch.all(
next_scores[batch_idx, :num_beams] == beam_scores.view(batch_size, num_beams)[batch_idx]
), "If batch_idx is not done, final next scores: {} have to equal to accumulated beam_scores: {}".format(
next_scores[:, :num_beams][batch_idx], beam_scores.view(batch_size, num_beams)[batch_idx],
)
# need to add best num_beams hypotheses to generated hyps
for beam_id in range(num_beams):
effective_beam_id = batch_idx * num_beams + beam_id
final_score = beam_scores[effective_beam_id].item()
final_tokens = input_ids[effective_beam_id]
generated_hyps[batch_idx].add(final_tokens, final_score)
# depending on whether greedy generation is wanted or not define different output_batch_size and output_num_return_sequences_per_batch
output_batch_size = batch_size if do_sample else batch_size * num_return_sequences
output_num_return_sequences_per_batch = 1 if do_sample else num_return_sequences
# select the best hypotheses
sent_lengths = input_ids.new(output_batch_size)
best = []
# retrieve best hypotheses
for i, hypotheses in enumerate(generated_hyps):
sorted_hyps = sorted(hypotheses.beams, key=lambda x: x[0])
for j in range(output_num_return_sequences_per_batch):
effective_batch_idx = output_num_return_sequences_per_batch * i + j
best_hyp = sorted_hyps.pop()[1]
sent_lengths[effective_batch_idx] = len(best_hyp)
best.append(best_hyp)
# shorter batches are padded
if sent_lengths.min().item() != sent_lengths.max().item():
assert pad_token_id is not None, "`Pad_token_id` has to be defined"
sent_max_len = min(sent_lengths.max().item() + 1, max_length)
decoded = input_ids.new(output_batch_size, sent_max_len).fill_(pad_token_id)
# fill with hypothesis and eos_token_id if necessary
for i, hypo in enumerate(best):
decoded[i, : sent_lengths[i]] = hypo
if sent_lengths[i] < max_length:
decoded[i, sent_lengths[i]] = eos_token_id
else:
# none of the hypotheses have an eos_token
assert (len(hypo) == max_length for hypo in best)
decoded = torch.stack(best).type(torch.long).to(next(self.parameters()).device)
return decoded
@staticmethod
def _reorder_cache(past: typing.Tuple, beam_idx: torch.Tensor) -> typing.Tuple[torch.Tensor]:
return tuple(layer_past.index_select(1, beam_idx) for layer_past in past)
def calc_banned_ngram_tokens(prev_input_ids: torch.Tensor, num_hypos: int, no_repeat_ngram_size: int, cur_len: int) -> None:
"""Copied from fairseq for no_repeat_ngram in beam_search"""
if cur_len + 1 < no_repeat_ngram_size:
# return no banned tokens if we haven't generated no_repeat_ngram_size tokens yet
return [[] for _ in range(num_hypos)]
generated_ngrams = [{} for _ in range(num_hypos)]
for idx in range(num_hypos):
gen_tokens = prev_input_ids[idx].tolist()
generated_ngram = generated_ngrams[idx]
for ngram in zip(*[gen_tokens[i:] for i in range(no_repeat_ngram_size)]):
prev_ngram_tuple = tuple(ngram[:-1])
generated_ngram[prev_ngram_tuple] = generated_ngram.get(prev_ngram_tuple, []) + [ngram[-1]]
def _get_generated_ngrams(hypo_idx):
# Before decoding the next token, prevent decoding of ngrams that have already appeared
start_idx = cur_len + 1 - no_repeat_ngram_size
ngram_idx = tuple(prev_input_ids[hypo_idx, start_idx:cur_len].tolist())
return generated_ngrams[hypo_idx].get(ngram_idx, [])
banned_tokens = [_get_generated_ngrams(hypo_idx) for hypo_idx in range(num_hypos)]
return banned_tokens
def calc_banned_bad_words_ids(prev_input_ids: typing.Iterable[int], bad_words_ids: typing.Iterable[int]) -> typing.Iterable[int]:
banned_tokens = []
def _tokens_match(prev_tokens, tokens):
if len(tokens) == 0:
# if bad word tokens is just one token always ban it
return True
if len(tokens) > len(prev_tokens):
# if bad word tokens are longer than prev tokens they can't be equal
return False
if prev_tokens[-len(tokens) :] == tokens:
# if tokens match
return True
else:
return False
for prev_input_ids_slice in prev_input_ids:
banned_tokens_slice = []
for banned_token_seq in bad_words_ids:
assert len(banned_token_seq) > 0, "Banned words token sequences {} cannot have an empty list".format(
bad_words_ids
)
if _tokens_match(prev_input_ids_slice, banned_token_seq[:-1]) is False:
# if tokens do not match continue
continue
banned_tokens_slice.append(banned_token_seq[-1])
banned_tokens.append(banned_tokens_slice)
return banned_tokens
def set_scores_to_inf_for_banned_tokens(scores: torch.Tensor, banned_tokens: typing.List[typing.List[int]]) -> None:
""" Modifies the scores in place by setting the banned token positions to `-inf`. Banned token is expected to be
a list of list of banned tokens to ban in the format [[batch index, vocabulary position],...]
Args:
scores: logits distribution of shape (batch size, vocabulary size)
banned_tokens: list of list of tokens to ban of length (batch_size)
"""
banned_mask_list = []
for idx, batch_banned_tokens in enumerate(banned_tokens):
for token in batch_banned_tokens:
banned_mask_list.append([idx, token])
if not banned_mask_list:
return
banned_mask = torch.LongTensor(banned_mask_list)
indices = torch.ones(len(banned_mask))
# A sparse tensor is generated from a list of coordinates: [[0, 1], [0, 2], [2, 0]]. A conversion to dense tensor generates:
# [ 0 1 1 ]
# [ 0 0 0 ]
# [ 1 0 0 ]
banned_mask = torch.sparse.LongTensor(banned_mask.t(), indices, scores.size()).to(scores.device).to_dense().bool()
scores.masked_fill_(banned_mask, -float("inf"))
def top_k_top_p_filtering(
logits: torch.Tensor,
top_k: int = 0,
top_p: float = 1.0,
filter_value: float = -float("Inf"),
min_tokens_to_keep: int = 1,
) -> torch.Tensor:
""" Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
Args:
logits: logits distribution shape (batch size, vocabulary size)
if top_k > 0: keep only top k tokens with highest probability (top-k filtering).
if top_p < 1.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
Make sure we keep at least min_tokens_to_keep per batch example in the output
From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
"""
if top_k > 0:
top_k = min(max(top_k, min_tokens_to_keep), logits.size(-1)) # Safety check
# Remove all tokens with a probability less than the last token of the top-k
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p < 1.0:
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probs = torch.cumsum(torch.nn.functional.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold (token with 0 are kept)
sorted_indices_to_remove = cumulative_probs > top_p
if min_tokens_to_keep > 1:
# Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1 because we add the first one below)
sorted_indices_to_remove[..., :min_tokens_to_keep] = 0
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
# scatter sorted tensors to original indexing
indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove)
logits[indices_to_remove] = filter_value
return logits
class BeamHypotheses(object):
def __init__(self, num_beams, max_length, length_penalty, early_stopping):
"""
Initialize n-best list of hypotheses.
"""
self.max_length = max_length - 1 # ignoring bos_token
self.length_penalty = length_penalty
self.early_stopping = early_stopping
self.num_beams = num_beams
self.beams = []
self.worst_score = 1e9
def __len__(self):
"""
Number of hypotheses in the list.
"""
return len(self.beams)
def add(self, hyp, sum_logprobs):
"""
Add a new hypothesis to the list.
"""
score = sum_logprobs / len(hyp) ** self.length_penalty
if len(self) < self.num_beams or score > self.worst_score:
self.beams.append((score, hyp))
if len(self) > self.num_beams:
sorted_scores = sorted([(s, idx) for idx, (s, _) in enumerate(self.beams)])
del self.beams[sorted_scores[0][1]]
self.worst_score = sorted_scores[1][0]
else:
self.worst_score = min(score, self.worst_score)
def is_done(self, best_sum_logprobs, cur_len):
"""
If there are enough hypotheses and that none of the hypotheses being generated
can become better than the worst one in the heap, then we are done with this sentence.
"""
if len(self) < self.num_beams:
return False
elif self.early_stopping:
return True
else:
cur_score = best_sum_logprobs / cur_len ** self.length_penalty
ret = self.worst_score >= cur_score
return ret
| 45,882 | 44.160433 | 242 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/models/torch_bert/datasets.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file defines the streaming generators for model training data.
We train models on overlapping one-hot encoded text sequences. For a corpus of
a reasonable size, the full training data may not fit in memory. This modules
provides Python Generator classes for use by a sequential Keras model's
fit_generator() method to stream batches of training data.
"""
import typing
import pickle
import functools
import json
import numpy as np
import pathlib
import glob
from deeplearning.benchpress.util import pytorch
from deeplearning.benchpress.util.pytorch import torch
from deeplearning.benchpress.util import distributions
from deeplearning.benchpress.util import monitors
from deeplearning.benchpress.util import environment
from deeplearning.benchpress.models import sequence_masking
from deeplearning.benchpress.models import lm_data_generator
from absl import flags
from deeplearning.benchpress.util import logging as l
FLAGS = flags.FLAGS
class OnlineDataset(torch.utils.data.Dataset):
r"""Online pre-processing dataset of raw corpus.
This dataset holds path to raw corpus and yields
pre-processed instances on the fly.
Arguments:
dataset (path): Path for raw dataset
func (callable): Function called to pre-process sequence.
"""
def __init__(self, dg: lm_data_generator.MaskLMDataGenerator, is_train: bool):
super(OnlineDataset, self).__init__()
full_dataset = self.load_data(dg.cache.path / "{}corpus.pkl".format("pre_" if dg.pre_train else ""))
"""
TODO you've better change is_train check to something more generic.
"""
if is_train:
self.dataset = full_dataset[:int(len(full_dataset) * (1 - (dg.config.validation_split / 100)))]
else:
self.dataset = full_dataset[int(len(full_dataset) * (1 - (dg.config.validation_split / 100))):]
self.feature_encoder = dg.feature_encoder
self.cache_path = dg.cache.path
self.size = len(self.dataset)
self.cur_step = 0
self.steps_per_epoch = dg.steps_per_epoch * dg.training_opts.batch_size
self.hlen_monitor = None
if is_train:
if (self.cache_path / "{}hole_length_mon{}.pkl".format("pre_" if dg.pre_train else "", "_{}".format(environment.WORLD_RANK) if environment.WORLD_SIZE > 1 else "")).exists():
with open(self.cache_path / "{}hole_length_mon{}.pkl".format("pre_" if dg.pre_train else "", "_{}".format(environment.WORLD_RANK) if environment.WORLD_SIZE > 1 else ""), 'rb') as infile:
self.hlen_monitor = pickle.load(infile)
else:
self.hlen_monitor = monitors.NormalizedFrequencyMonitor(self.cache_path, "{}online_hole_length{}".format("pre_" if dg.pre_train else "", "_{}".format(environment.WORLD_RANK) if environment.WORLD_SIZE > 1 else ""))
"""
TODO, add custom config just like in lm_data_generator
for val sets / sample sets etc.
"""
if dg.config.HasField("mask"):
self.func = functools.partial(sequence_masking.MaskSequence,
train_set = is_train,
max_predictions = dg.training_opts.max_predictions_per_seq,
pickled_tokenizer = dg.tokenizer,
training_opts = dg.training_opts,
is_torch = True,
config = dg.config,
)
elif dg.config.HasField("hole"):
distribution = distributions.Distribution.FromHoleConfig(
dg.config.hole, dg.cache.path, "hole_length_online"
)
self.func = functools.partial(sequence_masking.HoleSequence,
train_set = is_train,
max_predictions = dg.training_opts.max_predictions_per_seq,
masked_lm_prob = dg.training_opts.masked_lm_prob,
distribution = distribution,
tokenizer = dg.tokenizer,
)
elif dg.config.HasField("mask_seq"):
distribution = distributions.Distribution.FromHoleConfig(
dg.config.mask_seq, dg.cache.path, "mask_seq_length_online"
)
self.func = functools.partial(sequence_masking.HoleSequenceSeqMasks,
train_set = is_train,
max_predictions = dg.training_opts.max_predictions_per_seq,
masked_lm_prob = dg.training_opts.masked_lm_prob,
distribution = distribution,
tokenizer = dg.tokenizer,
)
return
def __len__(self):
return self.size
def __getitem__(self, idx):
self.cur_step += 1
if idx < 0:
if -idx > len(self):
raise ValueError("absolute value of index should not exceed dataset length")
idx = len(self) + idx
if not self.feature_encoder:
k = self.func(self.dataset[idx])
else:
k = self.func(self.dataset[idx][0])
k['input_features'] = self.dataset[idx][1]
if self.hlen_monitor:
self.hlen_monitor.register([x for x in k['masked_lm_lengths'] if x >= 0])
if self.cur_step % self.steps_per_epoch == 0:
self.hlen_monitor.plot()
with open(self.cache_path / "hole_length_mon{}.pkl".format("_{}".format(environment.WORLD_RANK) if environment.WORLD_SIZE > 1 else ""), 'wb') as outf:
pickle.dump(self.hlen_monitor, outf)
# raise NotImplementedError("Fix a) init state of rngen)
return k
def load_data(self, dataset: pathlib.Path) -> typing.List[np.array]:
if dataset.exists():
with open(dataset, 'rb') as infile:
return pickle.load(infile)
else:
raise FileNotFoundError(dataset)
class LazyOnlineDataset(torch.utils.data.Dataset):
r"""Dataset as a concatenation of multiple datasets.
This class is useful to assemble different existing datasets
and instantiate them lazily, to avoid loading them all in
memory at the same time/
Arguments:
datasets (sequence): List of paths for datasets to be concatenated
"""
@staticmethod
def cumsum(sequence: typing.List[pathlib.Path], length_cache: pathlib.Path):
lts, r, s = None, [], 0 # Cached lengths list, cumulative lengths, current max length.
## If lengths cache exists, just load the dictionary.
if length_cache.exists():
with open(length_cache, 'r') as inf:
lts = json.load(inf)
## Iterate every dataset chunk, and fix the cumulative length distribution.
for e in sequence:
if lts:
lt = lts[pathlib.Path(e).name]
else:
with open(e, 'rb') as infile:
length = len(pickle.load(infile))
lt = length
assert lt > 0, "Dataset {} is empty".format(e)
r.append(lt + s)
s += lt
## If lengths cache had not been created, fix it now.
if not lts and environment.WORLD_RANK == 0:
lts = {}
s = 0
for e, rx in zip(sequence, r):
lts[pathlib.Path(e).name] = rx - s
s = rx
with open(length_cache, 'w') as outf:
json.dump(lts, outf)
return r
@property
def num_datasets(self):
return len(self.datasets)
def __init__(self, dg: lm_data_generator.MaskLMDataGenerator, is_train: bool):
super(LazyOnlineDataset, self).__init__()
self.datasets = glob.glob(str(dg.cache.path / "{}corpus_*.pkl".format("pre_" if dg.pre_train else "")))
self.cumulative_sizes = self.cumsum(self.datasets, dg.cache.path / "pre_lengths_cache.json")
self.feature_encoder = dg.feature_encoder
self.curr_dset_idx = None
self.dataset = None
self.is_train = is_train
"""
TODO you've better change is_train check to something more generic.
"""
self.vfactor = lambda l: int(l * (1 - (dg.config.validation_split / 100)))
self.cache_path = dg.cache.path
self.cur_step = 0
self.steps_per_epoch = dg.steps_per_epoch * dg.training_opts.batch_size
self.hlen_monitor = None
if is_train:
if (self.cache_path / "{}hole_length_mon{}.pkl".format("pre_" if dg.pre_train else "", "_{}".format(environment.WORLD_RANK) if environment.WORLD_SIZE > 1 else "")).exists():
with open(self.cache_path / "{}hole_length_mon{}.pkl".format("pre_" if dg.pre_train else "", "_{}".format(environment.WORLD_RANK) if environment.WORLD_SIZE > 1 else ""), 'rb') as infile:
self.hlen_monitor = pickle.load(infile)
else:
self.hlen_monitor = monitors.NormalizedFrequencyMonitor(self.cache_path, "{}online_hole_length{}".format("pre_" if dg.pre_train else "", "_{}".format(environment.WORLD_RANK) if environment.WORLD_SIZE > 1 else ""))
"""
TODO, add custom config just like in lm_data_generator
for val sets / sample sets etc.
"""
self.tokenizer = dg.tokenizer
if dg.config.HasField("mask"):
self.func = functools.partial(sequence_masking.MaskSequence,
train_set = is_train,
max_predictions = dg.training_opts.max_predictions_per_seq,
pickled_tokenizer = dg.tokenizer,
training_opts = dg.training_opts,
is_torch = True,
config = dg.config,
)
elif dg.config.HasField("hole"):
distribution = distributions.Distribution.FromHoleConfig(
dg.config.hole, dg.cache.path, "hole_length_online"
)
self.func = functools.partial(sequence_masking.HoleSequence,
train_set = is_train,
max_predictions = dg.training_opts.max_predictions_per_seq,
masked_lm_prob = dg.training_opts.masked_lm_prob,
distribution = distribution,
tokenizer = dg.tokenizer,
)
elif dg.config.HasField("mask_seq"):
distribution = distributions.Distribution.FromHoleConfig(
dg.config.mask_seq, dg.cache.path, "mask_seq_length_online"
)
self.func = functools.partial(sequence_masking.HoleSequenceSeqMasks,
train_set = is_train,
max_predictions = dg.training_opts.max_predictions_per_seq,
masked_lm_prob = dg.training_opts.masked_lm_prob,
distribution = distribution,
tokenizer = dg.tokenizer,
)
return
def __len__(self):
return self.cumulative_sizes[-1]
def __getitem__(self, idx):
self.cur_step += 1
if idx < 0:
if -idx > len(self):
raise ValueError("absolute value of index should not exceed dataset length")
idx = len(self) + idx
import bisect
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if self.curr_dset_idx != dataset_idx:
self.curr_dset_idx = dataset_idx
with open(self.datasets[dataset_idx], 'rb') as infile:
self.dataset = pickle.load(infile)
if dataset_idx == 0:
sample_idx = idx
else:
sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
k = self.func(self.dataset[sample_idx])
if self.hlen_monitor:
self.hlen_monitor.register([x for x in k['masked_lm_lengths'] if x >= 0])
if self.cur_step % self.steps_per_epoch == 0:
self.hlen_monitor.plot()
with open(self.cache_path / "hole_length_mon{}.pkl".format("_{}".format(environment.WORLD_RANK) if environment.WORLD_SIZE > 1 else ""), 'wb') as outf:
pickle.dump(self.hlen_monitor, outf)
return k
class LazyConcatDataset(torch.utils.data.Dataset):
r"""Dataset as a concatenation of multiple datasets.
This class is useful to assemble different existing datasets
and instantiate them lazily, to avoid loading them all in
memory at the same time/
Arguments:
datasets (sequence): List of paths for datasets to be concatenated
"""
@staticmethod
def cumsum(sequence: typing.List[pathlib.Path]):
r, s = [], 0
for e in sequence:
lt = len(torch.load(e))
assert lt > 0, "Dataset {} is empty".format(e)
r.append(lt + s)
s += lt
return r
@property
def num_datasets(self):
return len(self.datasets)
def __init__(self, datasets: typing.List[pathlib.Path]):
super(LazyConcatDataset, self).__init__()
assert len(datasets) > 0, 'Empty list of datasets provided.'
self.datasets = datasets
self.cumulative_sizes = self.cumsum(self.datasets)
self.curr_dset_idx = None
self.dataset = None
def __len__(self):
return self.cumulative_sizes[-1]
def __getitem__(self, idx):
import bisect
if idx < 0:
if -idx > len(self):
raise ValueError("absolute value of index should not exceed dataset length")
idx = len(self) + idx
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if self.curr_dset_idx != dataset_idx:
self.curr_dset_idx = dataset_idx
self.dataset = torch.load(self.datasets[dataset_idx])
if dataset_idx == 0:
sample_idx = idx
else:
sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
return self.dataset[sample_idx]
class LazyRandomSampler(torch.utils.data.Sampler):
r"""Samples elements randomly. If without replacement, then sample from a shuffled dataset.
If with replacement, then user can specify :attr:`num_samples` to draw.
Arguments:
data_source (Dataset): dataset to sample from
replacement (bool): samples are drawn with replacement if ``True``, default=``False``
num_samples (int): number of samples to draw, default=`len(dataset)`. This argument
is supposed to be specified only when `replacement` is ``True``.
generator (Generator): Generator used in sampling.
"""
def __init__(self, data_source, replacement = False, num_samples = None, generator = None):
self.data_source = data_source
self.replacement = replacement
self._num_samples = num_samples
self.generator = generator
self.distributed = True if environment.WORLD_SIZE > 1 else False
self.dataset_idx = self.__datasetIdx_iter__
self.epoch = None
if not isinstance(self.replacement, bool):
raise TypeError("replacement should be a boolean value, but got "
"replacement={}".format(self.replacement))
if self._num_samples is not None and not replacement:
raise ValueError("With replacement=False, num_samples should not be specified, "
"since a random permute will be performed.")
if not isinstance(self.num_samples, int) or self.num_samples <= 0:
raise ValueError("num_samples should be a positive integer "
"value, but got num_samples={}".format(self.num_samples))
@property
def num_samples(self):
# dataset size might change at runtime
if self._num_samples is None:
return len(self.data_source)
return self._num_samples
@property
def num_datasets(self):
if isinstance(self.data_source, LazyConcatDataset) or isinstance(self.data_source, LazyOnlineDataset):
return self.data_source.num_datasets
else:
return 1
@property
def __datasetIdx_iter__(self):
dataset_idx = torch.randperm(self.num_datasets, generator = self.generator).tolist()
self.dataset_tensor = iter(dataset_idx)
return self.dataset_tensor
def __iter__(self):
try:
dataset_idx = next(self.dataset_tensor)
except StopIteration:
dataset_idx = next(self.__datasetIdx_iter__)
lb, ub = self.data_source.cumulative_sizes[dataset_idx - 1] if dataset_idx else 0, self.data_source.cumulative_sizes[dataset_idx]
if isinstance(self.data_source, LazyOnlineDataset):
clen = ub - lb
if self.data_source.is_train:
bounds = (lb, lb + self.data_source.vfactor(clen))
else:
bounds = (lb + self.data_source.vfactor(clen), ub)
else:
bounds = (lb, ub)
if self.distributed:
self.generator = torch.Generator()
self.generator.manual_seed(self.epoch)
if self.replacement:
if self._num_samples is None:
size = bounds[1] - bounds[0]
else:
size = self._num_samples // self.num_datasets
rand_tensor = torch.randint(low = bounds[0], high = bounds[1], size = (size,), generator = self.generator).tolist()
else:
rand_tensor = [x + bounds[0] for x in torch.randperm(bounds[1] - bounds[0], generator = self.generator).tolist()]
if self.distributed:
rounded_total = (len(rand_tensor) // environment.WORLD_SIZE) * environment.WORLD_SIZE
rand_tensor = rand_tensor[environment.WORLD_RANK:rounded_total:environment.WORLD_SIZE]
return iter(rand_tensor)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch: int) -> None:
"""
Sets epoch for deterministic runs across DDP.
"""
self.epoch = epoch
return
| 17,820 | 39.410431 | 221 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/models/torch_bert/torch_bert.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BenchPress language model training and sampling wrapper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import copy
import shutil
import multiprocessing
import functools
import humanize
import typing
import pathlib
import datetime
import time
import numpy as np
from absl import flags
import tqdm
from collections import OrderedDict
from deeplearning.benchpress.corpuses import tokenizers
from deeplearning.benchpress.samplers import samplers
from deeplearning.benchpress.samplers import sample_observers
from deeplearning.benchpress.util import pbutil
from deeplearning.benchpress.util import plotter
from deeplearning.benchpress.util import distrib
from deeplearning.benchpress.util import environment
from deeplearning.benchpress.proto import model_pb2
from deeplearning.benchpress.proto import sampler_pb2
from deeplearning.benchpress.features import extractor
from deeplearning.benchpress.models import backends
from deeplearning.benchpress.models import telemetry
from deeplearning.benchpress.preprocessors import opencl
from deeplearning.benchpress.models.torch_bert import model
from deeplearning.benchpress.models.torch_bert import config
from deeplearning.benchpress.models.torch_bert import optimizer
from deeplearning.benchpress.models.torch_bert import hooks
from deeplearning.benchpress.models.torch_bert.data_generator import torchLMDataGenerator
from deeplearning.benchpress.util import logging as l
from eupy.hermes import client
FLAGS = flags.FLAGS
flags.DEFINE_integer(
"reward_compilation",
-1,
"Select to integrate LLVM compiler into training regime."
"During training, the target token will be asked to fill the first token of the hole."
"If this flag is selected to True, the model will fill entirely the hole, as in inference."
"The fully generated sample will be checked for syntactic correctness with LLVM."
"If the sample compiles, then loss will be zero-ed for that instance, hence will be rewarded."
"[Default: -1]: do not use comp-rewarded training."
"Any integer >= 0: Kick-in this mode after this training step. 0 uses this method from start."
)
flags.DEFINE_boolean(
"validate_per_epoch",
True,
"Calculate and plot validation loss per end of epoch."
)
flags.DEFINE_integer(
"eval_steps_per_epoch",
1000,
"Set validation steps at the end of epoch for validation loss calculation."
)
flags.DEFINE_boolean(
"is_trt",
False,
"Use TensorRT for the sampling model."
)
def worker(src, sequence_length, tokenizer):
src = list(tokenizer.TokenizeString(src))
src = [tokenizer.startToken] + src + [tokenizer.endToken]
src = src + [tokenizer.padToken] * max(0, sequence_length - len(src))
return src[:sequence_length]
class torchBert(backends.BackendBase):
class BertEstimator(typing.NamedTuple):
"""Named tuple to wrap BERT pipeline."""
model : typing.TypeVar('nn.Module')
data_generator : torchLMDataGenerator
optimizer : typing.Any
scheduler : typing.Any
class SampleBertEstimator(typing.NamedTuple):
"""Named tuple for sampling BERT."""
model : typing.List[typing.TypeVar('nn.Module')]
data_generator : torchLMDataGenerator
@property
def hidden_state_size(self):
# return self.config.architecture.max_position_embeddings * self.config.architecture.hidden_size ## Get hidden state as is.
# return self.config.architecture.hidden_size ## Get probs from prediction logits for existing token.
return ((self.config.architecture.max_position_embeddings // 16) - 1) * ((self.config.architecture.hidden_size // 16) - 1) ## Apply pooling to hidden state.
def __repr__(self):
return "BenchPress"
def __init__(self, *args, **kwargs):
super(torchBert, self).__init__(*args, **kwargs)
from deeplearning.benchpress.util import pytorch
if not pytorch.initialized:
pytorch.initPytorch()
if self.config.architecture.HasField("feature_encoder") and self.config.architecture.feature_encoder:
self.feature_encoder = True
self.feature_tokenizer = tokenizers.FeatureTokenizer.FromArgs(
self.config.architecture.feature_singular_token_thr,
self.config.architecture.feature_max_value_token,
self.config.architecture.feature_token_range
)
self.feature_sequence_length = self.config.architecture.feature_sequence_length
else:
self.feature_encoder = False
self.feature_tokenizer = None
self.feature_sequence_length = None
self.pytorch = pytorch
self.torch = pytorch.torch
self.torch_tpu_available = pytorch.torch_tpu_available
self.torch.manual_seed(self.config.training.random_seed)
self.torch.cuda.manual_seed_all(self.config.training.random_seed)
self.bertAttrs = {}
self.featureAttrs = {}
self.bert_config = None
self.train = None
self.sample = None
self.predict_generator = None
self.sampler = None
self.train_batch_size = None
self.eval_batch_size = None
self.learning_rate = None
self.num_train_steps = None
self.ckpt_path = self.cache.path / "checkpoints"
self.sample_path = self.cache.path / "samples"
self.logfile_path = self.cache.path / "logs"
if self.config.HasField("pre_train_corpus"):
self.pre_logfile_path = self.logfile_path / "pre_train"
self.telemetry = telemetry.TrainingLogger(self.logfile_path)
if self.config.HasField("pre_train_corpus"):
self.pre_telemetry = telemetry.TrainingLogger(self.logfile_path / "pre_train")
self.is_validated = False
self.trained = False
l.logger().info("BERT Model config initialized in {}".format(self.cache.path))
return
def _ConfigModelParams(self, is_sampling):
"""General model hyperparameters initialization."""
self.bertAttrs = {
"vocab_size" : self.tokenizer.vocab_size,
"hidden_size" : self.config.architecture.hidden_size,
"num_hidden_layers" : self.config.architecture.num_hidden_layers,
"num_attention_heads" : self.config.architecture.num_attention_heads,
"intermediate_size" : self.config.architecture.intermediate_size,
"hidden_act" : self.config.architecture.hidden_act,
"hidden_dropout_prob" : self.config.architecture.hidden_dropout_prob,
"attention_probs_dropout_prob" : self.config.architecture.attention_probs_dropout_prob,
"max_position_embeddings" : self.config.architecture.max_position_embeddings,
"type_vocab_size" : self.config.architecture.type_vocab_size,
"initializer_range" : self.config.architecture.initializer_range,
"layer_norm_eps" : self.config.architecture.layer_norm_eps,
"pad_token_id" : self.tokenizer.padToken,
}
if self.feature_encoder:
self.featureAttrs = {
"feature_encoder" : self.feature_encoder,
"feature_sequence_length" : self.feature_sequence_length,
"feature_embedding_size" : self.config.architecture.feature_embedding_size,
"feature_pad_idx" : self.feature_tokenizer.padToken,
"feature_dropout_prob" : self.config.architecture.feature_dropout_prob,
"feature_vocab_size" : len(self.feature_tokenizer),
"feature_num_attention_heads" : self.config.architecture.feature_num_attention_heads,
"feature_transformer_feedforward" : self.config.architecture.feature_transformer_feedforward,
"feature_layer_norm_eps" : self.config.architecture.feature_layer_norm_eps,
"feature_num_hidden_layers" : self.config.architecture.feature_num_hidden_layers,
}
self.bert_config = config.BertConfig.from_dict(
self.bertAttrs,
**self.featureAttrs,
xla_device = self.torch_tpu_available,
reward_compilation = FLAGS.reward_compilation,
is_sampling = is_sampling,
)
return
def _ConfigTrainParams(self,
data_generator: torchLMDataGenerator,
pre_train: bool,
) -> None:
"""
Model parameter initialization for training and validation.
"""
self._ConfigModelParams(is_sampling = False)
self.train_batch_size = self.config.training.batch_size
self.eval_batch_size = self.config.training.batch_size
self.learning_rate = self.config.training.adam_optimizer.initial_learning_rate_micros / 1e6
self.num_warmup_steps = self.config.training.num_warmup_steps if not pre_train else self.config.training.num_prewarmup_steps
self.max_grad_norm = 1.0
self.steps_per_epoch = data_generator.steps_per_epoch
self.current_step = None
self.num_epochs = data_generator.num_epochs
self.num_train_steps = self.steps_per_epoch * self.num_epochs
self.max_eval_steps = FLAGS.max_eval_steps
self.validation_results_file = "val_results.txt"
self.validation_results_path = os.path.join(str(self.logfile_path if not pre_train else self.pre_logfile_path), self.validation_results_file)
m = model.BertForPreTraining(
self.bert_config,
tokenizer = self.tokenizer,
target_lm = "hole" if self.config.training.data_generator.HasField("hole") else "mask"
).to(self.pytorch.offset_device)
if self.pytorch.num_nodes > 1:
distrib.barrier()
m = self.torch.nn.parallel.DistributedDataParallel(
m,
device_ids = [self.pytorch.offset_device],
output_device = self.pytorch.offset_device,
find_unused_parameters = True,
)
elif self.pytorch.num_gpus > 1:
m = self.torch.nn.DataParallel(m)
opt, lr_scheduler = optimizer.create_optimizer_and_scheduler(
model = m,
num_train_steps = self.num_train_steps,
warmup_steps = self.num_warmup_steps,
learning_rate = self.learning_rate,
)
self.train = torchBert.BertEstimator(
m, data_generator, opt, lr_scheduler
)
l.logger().info(self.GetShortSummary())
return
def _ConfigSampleParams(self,
data_generator: torchLMDataGenerator,
sampler: samplers.Sampler,
) -> None:
"""
Model parameter initialization for inference.
"""
self._ConfigModelParams(is_sampling = True)
self.sampler = sampler
self.temperature = sampler.temperature
if sampler.sequence_length > self.bertAttrs['max_position_embeddings']:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(sampler.sequence_length, self.bertAttrs['max_position_embeddings']))
if FLAGS.is_trt:
mdl = model.BertForPreTrainingTRT
else:
mdl = model.BertForPreTraining
m = mdl(
self.bert_config,
tokenizer = self.tokenizer,
use_categorical = FLAGS.categorical_sampling,
temperature = self.temperature,
target_lm = "hole" if self.config.training.data_generator.HasField("hole") else "mask"
).to(self.pytorch.offset_device)
if self.pytorch.num_nodes > 1:
m = self.torch.nn.parallel.DistributedDataParallel(
m,
device_ids = [self.pytorch.offset_device],
output_device = self.pytorch.offset_device,
find_unused_parameters = True,
)
elif self.pytorch.num_gpus > 1:
m = self.torch.nn.DataParallel(m)
if FLAGS.is_trt:
for mdl_instance, dev in zip(m, d):
mdl_instance.init_engine(self.cache, dev.index, sampler.batch_size, sampler.sequence_length, self.tokenizer.vocab_size, self.config.architecture.max_position_embeddings)
self.sample = torchBert.SampleBertEstimator(m, data_generator)
l.logger().info("Initialized model sampler in {}".format(self.sampler.cache.path))
return
def samplesWithCategorical(self):
return FLAGS.categorical_sampling
def GetEncoderModule(self,
with_checkpoint : bool = False,
without_label_head : bool = True,
**kwargs,
) -> 'torch.nn.Module':
"""Initialize BERT as decoder."""
attrs = copy.copy(self.bertAttrs)
if not with_checkpoint:
attrs = {
k: v for k, v in kwargs.items()
}
elif len(kwargs.keys()) > 0:
l.logger().warn("Encoder module with_checkpoint will not override max position embeddings, pad and vocab size!")
generic_config = config.BertConfig.from_dict(
attrs,
# **self.featureAttrs,
xla_device = self.torch_tpu_available,
reward_compilation = -1,
# This is hard-coded to True to allow compile sampler to be initialized. This does not prohibit proper re-train.
is_sampling = False,
)
m = model.BertForPreTraining(
generic_config,
tokenizer = self.tokenizer,
target_lm = "hole" if self.config.training.data_generator.HasField("hole") else "mask",
without_label_head = without_label_head,
)
if with_checkpoint:
temp_estimator = torchBert.SampleBertEstimator(m, None)
self.loadCheckpoint(temp_estimator)
return temp_estimator.model
else:
return m
def GetDecoderModule(self,
with_checkpoint : bool = False,
without_label_head : bool = False,
**kwargs,
) -> 'torch.nn.Module':
"""Return internal BERT auto-encoder module."""
attrs = copy.copy(self.bertAttrs)
if not with_checkpoint:
attrs = {
k: v for k, v in kwargs.items()
}
elif len(kwargs.keys()) > 0:
l.logger().warn("Decoder module with_checkpoint will not override max position embeddings, pad and vocab size!")
generic_config = config.BertConfig.from_dict(
attrs,
xla_device = self.torch_tpu_available,
reward_compilation = -1,
# This is hard-coded to True to allow compile sampler to be initialized. This does not prohibit proper re-train.
is_sampling = False,
is_decoder = True,
add_cross_attention = True,
)
m = copy.deepcopy(model.BertForPreTraining(
generic_config,
tokenizer = self.tokenizer,
target_lm = "hole" if self.config.training.data_generator.HasField("hole") else "mask",
without_label_head = without_label_head,
))
if with_checkpoint:
temp_estimator = torchBert.SampleBertEstimator(m, None)
self.loadCheckpoint(temp_estimator, without_label_head = without_label_head, is_decoder = True)
return temp_estimator.model
else:
return m
def to_device(self, inputs) -> 'torch.Tensor':
"""
Move input tensors to torch device and return them.
"""
inputs['input_ids'] = inputs['input_ids'].to(self.pytorch.device)
inputs['input_mask'] = inputs['input_mask'].to(self.pytorch.device)
inputs['position_ids'] = inputs['position_ids'].to(self.pytorch.device)
inputs['mask_labels'] = inputs['mask_labels'].to(self.pytorch.device)
if 'input_features' in inputs:
inputs['input_features'] = inputs['input_features'].to(self.pytorch.device)
else:
inputs['input_features'] = None
return inputs
def model_step(self,
model : 'torch.nn.Module',
inputs : typing.Dict[str, 'torch.Tensor'],
is_validation : bool = False,
step : int = -1,
extract_hidden_state: bool = False,
) -> typing.Dict[str, 'torch.Tensor']:
"""
Perform a training step on a batch of inputs.
"""
outputs = model(
input_ids = inputs['input_ids'],
attention_mask = inputs['input_mask'],
position_ids = inputs['position_ids'],
input_features = inputs['input_features'],
masked_lm_labels = inputs['mask_labels'],
is_validation = is_validation,
step = step,
extract_hidden_state = extract_hidden_state,
)
return outputs
def sample_model_step(self,
model : typing.List['torch.nn.Module'],
inputs : typing.Dict[str, 'torch.Tensor'],
iteration : int = None,
extract_hidden_state : bool = False,
) -> typing.Dict[str, typing.List[typing.List[int]]]:
"""
Specialized forward function.
Dispatches model replicas across all GPUs, one process each.
Inputs must be three-dimensional:
workload_size x batch_size x sequence_length
"""
start = time.time()
outputs = {
'generated_samples': [], 'sample_indices': [],
'input_ids': [], 'masked_lm_lengths': []
}
if extract_hidden_state:
outputs['hidden_state'] = []
if iteration is not None:
desc = "Sampling iteration: {}".format(iteration)
else:
desc = "Sampling"
wload_size = len(inputs['input_ids']) * len(inputs['input_ids'][0])
inputs = self.to_device(inputs)
if environment.WORLD_RANK == 0:
bar = tqdm.auto.trange(wload_size, desc=desc, leave = False, position = 0)
samples, sample_indices = model(
workload = (
inputs['input_ids'],
inputs['input_mask'],
inputs['position_ids'],
inputs['input_features'],
),
bar = bar if environment.WORLD_RANK == 0 else None,
)
outputs['generated_samples'] = samples.detach()
outputs['sample_indices'] = sample_indices.detach()
outputs['input_ids'] = self.torch.reshape(inputs['input_ids'], tuple(samples.shape))
outputs['masked_lm_lengths'] = self.torch.reshape(inputs['masked_lm_lengths'].to(self.pytorch.device), (samples.shape[0], -1))
if extract_hidden_state:
outputs['hidden_state'] = self.ExtractHidden(samples)
outputs['generated_samples'] = list(outputs['generated_samples'].cpu().numpy())
outputs['sample_indices'] = list(outputs['sample_indices'].cpu().numpy())
outputs['input_ids'] = list(outputs['input_ids'].cpu().numpy())
outputs['masked_lm_lengths'] = list(outputs['masked_lm_lengths'].cpu().numpy())
if extract_hidden_state:
outputs['hidden_state'] = list(outputs['hidden_state'].cpu().numpy())
end = time.time()
return outputs, end-start
def PreTrain(self,
corpus,
test_sampler: typing.Optional[samplers.Sampler] = None,
**unused_kwargs
) -> None:
"""
Pre-training entry point.
"""
self.Train(corpus, test_sampler, pre_train = True)
return
def Train(self,
corpus,
test_sampler : typing.Optional[samplers.Sampler] = None,
pre_train : bool = False,
**unused_kwargs
) -> None:
"""
Main training entry point.
"""
if FLAGS.only_sample:
del self.train
self.train = None
return
self._ConfigTrainParams(
torchLMDataGenerator.TrainMaskLMBatchGenerator(
corpus, self.config.training,
self.cache.path,
self.config.training.num_pretrain_steps if pre_train else None,
pre_train,
self.feature_encoder,
self.feature_tokenizer,
self.feature_sequence_length,
), pre_train
)
self.current_step = self.loadCheckpoint(self.train, pre_train = pre_train)
if self.pytorch.num_gpus > 0:
self.torch.cuda.empty_cache()
if self.current_step >= 0:
l.logger().info("Loaded checkpoint step {}".format(self.current_step))
self.current_step = max(0, self.current_step)
if self.current_step < self.num_train_steps:
self.train.model.zero_grad()
## Set batch size in case of TPU training or distributed training.
if self.torch_tpu_available:
total_train_batch_size = self.train_batch_size * self.pytorch.torch_xla.xrt_world_size()
else:
total_train_batch_size = (
self.train_batch_size
* (self.torch.distributed.get_world_size() if self.pytorch.num_nodes > 1 else 1)
)
# Set dataloader in case of TPU training.
if self.torch_tpu_available:
loader = self.pytorch.torch_ploader.ParallelLoader(
self.train.data_generator.dataloader, [self.pytorch.device]
).per_device_loader(self.pytorch.device)
else:
loader = self.train.data_generator.dataloader
# Get dataloader iterator and setup hooks.
batch_iterator = iter(loader)
if self.is_world_process_zero():
train_hook = hooks.tensorMonitorHook(
self.logfile_path if not pre_train else self.pre_logfile_path, self.current_step, min(self.steps_per_epoch, FLAGS.monitor_frequency)
)
if FLAGS.reward_compilation >= 0 and not pre_train:
correct_sample_obs = sample_observers.SamplesDatabaseObserver(
self.logfile_path / "correct_samples.db"
)
else:
correct_sample_obs = None
total_steps = self.config.training.num_pretrain_steps if pre_train else self.config.training.num_train_steps
l.logger().info(
"Splitting {} steps into {} equivalent epochs, {} steps each. Rejected {} redundant step(s)".format(
self.num_train_steps, self.num_epochs,
self.steps_per_epoch, total_steps - self.num_train_steps
)
)
try:
self.train.model.train()
epoch_iter = tqdm.auto.trange(self.num_epochs, desc="Epoch", leave = False) if self.is_world_process_zero() else range(self.num_epochs)
for epoch in epoch_iter:
# In distributed mode, calling the set_epoch() method at
# the beginning of each epoch before creating the DataLoader iterator
# is necessary to make shuffling work properly across multiple epochs.
# Otherwise, the same ordering will be always used.
if self.pytorch.num_nodes > 1:
loader.sampler.set_epoch(epoch)
if epoch < self.current_step // self.steps_per_epoch:
continue # Stupid bar won't resume.
batch_iter = tqdm.auto.trange(self.steps_per_epoch, desc="Batch", leave = False) if self.is_world_process_zero() else range(self.steps_per_epoch)
for step in batch_iter:
if self.is_world_process_zero():
start = datetime.datetime.utcnow()
try:
inputs = next(batch_iterator)
except StopIteration:
# dataloader has different len() than steps_per_epoch.
# This is the easiest way to infinite-loop dataloaders in pytorch.
batch_iterator = iter(loader)
inputs = next(batch_iterator)
self.current_step += 1
# Move inputs to torch device.
inputs = self.to_device(inputs)
# Run model step on batch
step_out = self.model_step(self.train.model, inputs, step = epoch * self.steps_per_epoch + step)
# Collect losses and backpropagate
total_loss = step_out['total_loss'].mean()
total_loss.backward()
self.torch.nn.utils.clip_grad_norm_(self.train.model.parameters(), self.max_grad_norm)
if self.torch_tpu_available:
self.pytorch.torch_xla.optimizer_step(self.train.optimizer)
else:
self.train.optimizer.step()
self.train.scheduler.step()
## Collect tensors for logging.
if self.pytorch.num_nodes > 1:
self.torch.distributed.barrier()
total_loss = [self.torch.zeros(tuple(step_out['total_loss' ].shape), dtype = self.torch.float32).to(self.pytorch.device) for _ in range(self.torch.distributed.get_world_size())]
masked_lm_loss = [self.torch.zeros(tuple(step_out['masked_lm_loss' ].shape), dtype = self.torch.float32).to(self.pytorch.device) for _ in range(self.torch.distributed.get_world_size())]
# next_sentence_loss = [self.torch.zeros(tuple(step_out['next_sentence_loss'].shape), dtype = self.torch.float32).to(self.pytorch.device) for _ in range(self.torch.distributed.get_world_size())]
masked_lm_lengths = [self.torch.zeros(tuple(inputs ['masked_lm_lengths' ].shape), dtype = self.torch.int64 ).to(self.pytorch.device) for _ in range(self.torch.distributed.get_world_size())]
self.torch.distributed.all_gather(masked_lm_loss, step_out["masked_lm_loss"])
# self.torch.distributed.all_gather(next_sentence_loss, step_out["next_sentence_loss"])
self.torch.distributed.all_gather(masked_lm_lengths, inputs['masked_lm_lengths'].to(self.pytorch.device))
self.torch.distributed.all_gather(total_loss, step_out['total_loss'])
else:
total_loss = step_out['total_loss' ].unsqueeze(0).cpu()
masked_lm_loss = step_out['masked_lm_loss' ].unsqueeze(0).cpu()
# next_sentence_loss = step_out['next_sentence_loss'].unsqueeze(0).cpu()
masked_lm_lengths = inputs['masked_lm_lengths' ].cpu()
if self.is_world_process_zero():
exec_time_ms = int(round((datetime.datetime.utcnow() - start).total_seconds() * 1000))
if FLAGS.reward_compilation >= 0 and FLAGS.reward_compilation <= epoch * self.steps_per_epoch + step and not pre_train:
## Logging when compiler reward is enabled in training.
## This is not compatible with using DDP, and basically compiler-rewarded training is deprecated and proven to be wrong and inefficient.
correct_samples = [(x, y) for en, (x, y) in enumerate(zip(inputs['input_ids'].cpu().numpy(), step_out['generated_samples'].cpu().numpy())) if step_out['compile_status'][en] == 1]
for s in correct_samples:
feature_vector = extractor.ExtractFeatures(self.tokenizer.ArrayToCode(s[1]))
correct_sample_obs.OnSample(model_pb2.Sample(
train_step = self.current_step,
sample_feed = self.tokenizer.tokensToString(s[0], ignore_token = self.tokenizer.padToken).replace("\\n", "\n"),
text = self.tokenizer.tokensToString(s[1], ignore_token = self.tokenizer.padToken).replace("\\n", "\n"),
encoded_text = ",".join([str(t) for t in s[1]]),
sample_indices = '',
encoded_sample_indices = '',
sample_time_ms = int(round(exec_time_ms / self.train_batch_size)),
feature_vector = "\n".join(["{}:{}".format(k, v) for (k, v) in feature_vector.items()]),
num_tokens = len([x for x in s[1] if x != self.tokenizer.padToken]),
categorical_sampling = False,
compile_status = True,
date_added = datetime.datetime.utcnow().strftime("%m/%d/%Y, %H:%M:%S"),
)
)
if not pre_train:
## Fine-tuning logging.
train_hook.step(
masked_lm_loss = sum([ml.mean().item() for ml in masked_lm_loss]) / len(masked_lm_loss),
# next_sentence_loss = sum([nsl.mean().item() for nsl in next_sentence_loss]) / len(next_sentence_loss),
total_loss = sum([tl.mean().item() for tl in total_loss]) / len(total_loss),
learning_rate = self.train.scheduler.get_last_lr()[0],
num_correct_samples = (correct_sample_obs.sample_id if correct_sample_obs is not None else None),
batch_avg_hole_len = sum([sum([int(l) for l in b if l != -1]) / len([int(l) for l in b if l != -1])
for b in masked_lm_lengths]) / len(masked_lm_lengths),
batch_execution_time_ms = exec_time_ms,
time_per_sample_ms = exec_time_ms / self.train_batch_size,
)
else:
## Pre-training logging.
train_hook.step(
masked_lm_loss = sum([ml.mean().item() for ml in masked_lm_loss]) / len(masked_lm_loss),
# next_sentence_loss = sum([nsl.mean().item() for nsl in next_sentence_loss]) / len(next_sentence_loss),
total_loss = sum([tl.mean().item() for tl in total_loss]) / len(total_loss),
learning_rate = self.train.scheduler.get_last_lr()[0],
batch_avg_hole_len = sum([sum([int(l) for l in b if l != -1]) / len([int(l) for l in b if l != -1])
for b in masked_lm_lengths]) / len(masked_lm_lengths),
batch_execution_time_ms = exec_time_ms,
time_per_sample_ms = exec_time_ms / self.train_batch_size,
)
self.train.model.zero_grad()
if self.current_step == 0:
l.logger().info("Starting Loss: {}".format(sum([tl.mean().item() for tl in total_loss]) / len(total_loss)))
# End of Epoch
self.saveCheckpoint(self.train, pre_train)
if self.is_world_process_zero():
set_mail = "Epoch {} Loss: {}\n".format(self.current_step // self.steps_per_epoch, train_hook.epoch_loss)
l.logger().info("Epoch {} Loss: {}".format(self.current_step // self.steps_per_epoch, train_hook.epoch_loss))
if FLAGS.validate_per_epoch > 0 and self.train.data_generator.config.validation_split > 0:
val_ml_loss = self.Validate(per_epoch = True, pre_train = pre_train)
if self.is_world_process_zero():
train_hook.end_epoch(
val_masked_lm_loss = val_ml_loss,
# val_next_sentence_loss = val_nsp_loss,
val_total_loss = val_ml_loss # + val_nsp_loss,
)
set_mail += "Validation Loss: {}\n".format(val_ml_loss)
elif self.is_world_process_zero():
train_hook.end_epoch()
if FLAGS.notify_me:
client.getClient().send_message("clgen:torch_bert", set_mail)
if self.torch_tpu_available:
self.pytorch.torch_xla.master_print(self.pytorch.torch_xla_met.metrics_report())
if FLAGS.sample_per_epoch > 0:
if self.is_world_process_zero():
sampler, observers = self._getTestSampler(test_sampler, self.config.training.sequence_length)
self.InitSampling(sampler, self.config.training.random_seed)
for _ in range(FLAGS.sample_per_epoch):
start_time = datetime.datetime.utcnow()
self.InitSampleBatch(sampler)
org_inputs, input_ids, samples, indices = self.SampleNextIndices()
end_time = datetime.datetime.utcnow()
for org, inp, sample, idxs in zip(org_inputs, input_ids, samples, indices):
try:
stdout = opencl.Compile(self.tokenizer.ArrayToCode(sample))
compile_flag = 1
except ValueError:
compile_flag = 0
feature_vector = extractor.ExtractFeatures(self.tokenizer.ArrayToCode(sample))
sample_proto = model_pb2.Sample(
train_step = self.current_step,
sample_feed = sampler.start_text,
original_input = self.tokenizer.tokensToString(org, with_formatting = True, ignore_token = self.tokenizer.padToken),
text = self.tokenizer.tokensToString(sample, with_formatting = True, ignore_token = self.tokenizer.padToken).replace("\\n", "\n"),
encoded_text = ",".join([str(t) for t in sample]),
sample_indices = ','.join([self.tokenizer.decoder[idx].replace('\n', '\\n') for idx in idxs]).replace('\n', '\\n'),
encoded_sample_indices = ','.join([str(idx) for idx in idxs]),
sample_time_ms = int(round(1000 * ((end_time - start_time) / sampler.batch_size).total_seconds())),
feature_vector = "\n".join(["{}:{}".format(k, v) for (k, v) in feature_vector.items()]),
num_tokens = len(sample),
compile_status = compile_flag,
categorical_sampling = self.samplesWithCategorical(),
date_added = datetime.datetime.utcnow().strftime("%m/%d/%Y, %H:%M:%S"),
)
for obs in observers:
obs.OnSample(sample_proto)
distrib.barrier()
except KeyboardInterrupt:
pass
if not FLAGS.force_eval:
_ = self.Validate(pre_train = pre_train)
if FLAGS.force_eval and not self.is_validated:
_ = self.Validate(pre_train = pre_train)
del self.train
self.train = None
return
def TrainBatch(self, inputs) -> None:
raise NotImplementedError
return
def Validate(self, per_epoch = False, pre_train = False) -> float:
"""
Validation function for torch BERT.
Arguments:
per_epoch: Set True if is called at the end of (pre)training epoch.
If true, no analytical results are appended to database.
Instead, only loss is monitored and plotted.
"""
if ( (per_epoch and FLAGS.eval_steps_per_epoch <= 0)
or (not per_epoch and FLAGS.max_eval_steps <= 0)
or self.config.training.data_generator.validation_split == 0):
l.logger().info("Skipping BERT Validation.")
return None, None
avg_mask_loss = []
avg_nsp_loss = []
preds = None
label_ids = None
self.train.model.eval()
for set_idx, (set_name, dataloader) in enumerate(self.train.data_generator.eval_dataloaders()):
l.logger().info("BERT Validation on {}".format(set_name))
if self.torch_tpu_available:
loader = self.pytorch.torch_ploader.ParallelLoader(
dataloader, [self.pytorch.device]
).per_device_loader(self.pytorch.device)
else:
loader = dataloader
if self.pytorch.num_nodes > 1:
loader.sampler.set_epoch(set_idx)
if not per_epoch and self.is_world_process_zero():
val_hook = hooks.validationSampleHook(
url = "sqlite:///{}".format(str((self.logfile_path if not pre_train else self.pre_logfile_path) / "validation_samples.db")),
tokenizer = self.tokenizer,
model_step = self.current_step
)
eval_iterator = iter(loader)
eval_steps = FLAGS.max_eval_steps if not per_epoch else FLAGS.eval_steps_per_epoch
try:
eval_iter = tqdm.auto.trange(self.num_epochs, desc="Epoch", leave = False) if self.is_world_process_zero() else range(self.num_epochs)
for step in eval_iter:
try:
inputs = next(eval_iterator)
except StopIteration:
eval_iterator = iter(loader)
inputs = next(eval_iterator)
inputs = self.to_device(inputs)
with self.torch.no_grad():
step_out = self.model_step(self.train.model, inputs, is_validation = True)
if not per_epoch and self.is_world_process_zero():
val_hook.step(inputs, step_out)
if self.pytorch.num_nodes > 1:
self.torch.distributed.barrier()
masked_lm_loss = [self.torch.zeros(tuple(step_out['masked_lm_loss' ].shape), dtype = self.torch.float32).to(self.pytorch.device) for _ in range(self.torch.distributed.get_world_size())]
# next_sentence_loss = [self.torch.zeros(tuple(step_out['next_sentence_loss'].shape), dtype = self.torch.float32).to(self.pytorch.device) for _ in range(self.torch.distributed.get_world_size())]
self.torch.distributed.all_gather(masked_lm_loss, step_out["masked_lm_loss"])
# self.torch.distributed.all_gather(next_sentence_loss, step_out["next_sentence_loss"])
else:
masked_lm_loss = step_out['masked_lm_loss' ].cpu()
# next_sentence_loss = step_out['next_sentence_loss'].cpu()
avg_mlm_loss = [x.mean().item() for x in masked_lm_loss]
avg_mask_loss.append(sum(avg_mlm_loss) / len(avg_mlm_loss))
# avg_nsp_loss.append(next_sentence_loss.mean().item())
except KeyboardInterrupt:
pass
if self.is_world_process_zero() and avg_mask_loss and not per_epoch:
val_hook.final(set_name, sum(avg_mask_loss) / len(avg_mask_loss))
if self.pytorch.torch_tpu_available:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
self.pytorch.torch_xla_model.master_print(self.pytorch.torch_xla_met.metrics_report())
if not per_epoch:
self.is_validated = True
try:
return sum(avg_mask_loss) / len(avg_mask_loss)
except ZeroDivisionError:
return float('inf'), float('inf')
def InitSampling(self,
sampler : samplers.Sampler,
seed : typing.Optional[int] = None,
corpus = None,
) -> None:
"""This is called only once. Performs basic initialization of sampling"""
sample_batch_size = sampler.batch_size
if self.pytorch.num_nodes == 1 and self.pytorch.num_gpus > 1 and sample_batch_size < self.pytorch.num_gpus:
l.logger().warn("Sampler's batch size {}, too small for {} GPUs. Increasing to {}".format(
sample_batch_size,
self.pytorch.num_gpus,
self.pytorch.num_gpus
)
)
sample_batch_size = self.pytorch.num_gpus
data_generator = torchLMDataGenerator.SampleMaskLMBatchGenerator(
self.config.training, sampler, self.tokenizer, self.config.training.random_seed, sample_batch_size,
self.config.architecture.max_position_embeddings, self.cache.path, corpus,
self.feature_encoder,
self.feature_tokenizer,
self.feature_sequence_length,
)
self._ConfigSampleParams(data_generator, sampler)
ckpt_step = self.loadCheckpoint(self.sample)
if self.pytorch.num_gpus > 0:
self.torch.cuda.empty_cache()
if ckpt_step >= 0:
l.logger().info("Loaded checkpoint step {}".format(ckpt_step))
self.step_inputs = None
self.loader = None
self.pred_iterator = None
l.logger().info("Initialized model samples in {}".format(self.sample_path / self.sampler.hash))
return
def InitSampleBatch(self, sampler: samplers.Sampler, **kwargs) -> None:
"""Batch-specific initialization. Called once when a new batch is going to be generated"""
workload_size = kwargs.get('workload_size', None)
if sampler.is_live:
# For live sampling, start text must be re-instated at each iteration.
self.sample = self.sample._replace(
data_generator = torchLMDataGenerator.SampleMaskLMBatchGenerator(
self.config.training, sampler, self.tokenizer, 0, sampler.batch_size,
self.config.architecture.max_position_embeddings, self.cache.path,
self.feature_encoder,
self.feature_tokenizer,
self.feature_sequence_length,
)
)
self.step_inputs, self.loader, self.pred_iterator = None, None, None
if self.loader is None:
if self.torch_tpu_available:
self.loader = self.pytorch.torch_ploader.ParallelLoader(
self.sample.data_generator.dataloader, [self.pytorch.device]
).per_device_loader(self.pytorch.device)
else:
self.loader = self.sample.data_generator.dataloader
if not sampler.is_active:
if self.pred_iterator is None:
self.pred_iterator = iter(self.loader)
try:
inputs = next(self.pred_iterator)
except StopIteration:
self.pred_iterator = iter(self.loader)
inputs = next(self.pred_iterator)
if workload_size is None:
## I think this dictionary holds tensors of the following size:
## [num_gpus x batch_size x seq_len] if only one node works.
## Otherwise, [1 x batch_size x seq_len] since each process manages its own GPU.
padded_wsize = self.pytorch.num_gpus if environment.WORLD_SIZE == 1 and self.pytorch.num_gpus > 1 else 1
else:
## If a workload is specified, then after you pad to the dimension of GPU or num processes
## Divide the size by GPU size or num processes size.
padded_wsize = (
(max(1, workload_size // (self.pytorch.num_gpus * sampler.batch_size))) * self.pytorch.num_gpus
if environment.WORLD_SIZE == 1 and self.pytorch.num_gpus > 1
else max(1, (workload_size // (self.pytorch.num_nodes * sampler.batch_size)) * self.pytorch.num_nodes))
self.step_inputs = {
x: inputs[x].unsqueeze(0).repeat(padded_wsize, 1, 1)
for x in inputs
}
# This loop below is purely for proper printing reasons:
sample_text = set(
[self.tokenizer.tokensToString(
seq.cpu().numpy(), ignore_token = self.tokenizer.padToken
) for seq in inputs['input_ids']]
)
for seq in sample_text:
self.sampler.setStartText(seq)
self.sampler.Specialize(self.tokenizer)
return
def SampleNextIndices(
self, *unused_args, **unused_kwargs
) -> typing.Tuple[np.array, np.array, np.array, np.array]:
"""Called iteratively to build a single batch of samples, until termination criteria stops calling"""
del unused_kwargs
del unused_args
if self.sample is None:
raise ValueError("Bert sampler has not been initialized.")
with self.torch.no_grad():
if self.sampler.is_active:
try:
return self.sample.data_generator.ActiveGeneration(self, self.sample)
except StopIteration:
raise StopIteration
else:
if self.sampler.is_live and self.feature_encoder:
batch_features = []
for _ in range(self.sampler.batch_size):
feat_space = ""
while feat_space not in {"GreweFeatures", "AutophaseFeatures", "InstCountFeatures"}:
feat_space = input("Select feature space: [g/a/i]/[GreweFeatures/AutophaseFeatures/InstCountFeatures]: ")
if feat_space == "a":
feat_space = "AutophaseFeatures"
elif feat_space == "g":
feat_space = "GreweFeatures"
elif feat_space == "i":
feat_space = "InstCountFeatures"
input_features = {
k: -1 for k in extractor.extractors[feat_space].KEYS()
}
for k in input_features.keys():
if k not in {"F2:coalesced/mem", "F4:comp/mem"}:
prompt = input("{}: ".format(k))
if prompt == 0:
val = 0
else:
val = int(prompt)
input_features[k] = val
batch_features.append(
self.feature_tokenizer.TokenizeFeatureVector(input_features, feat_space, self.feature_sequence_length)
)
self.step_inputs['input_features'] = self.torch.LongTensor(batch_features).unsqueeze(0)
elif self.feature_encoder and 'input_features' not in self.step_inputs:
feat_space = "GreweFeatures"
batch_features = []
for _ in range(self.sampler.batch_size):
input_features = {
k: -1 for k in extractor.extractors[feat_space].KEYS()
}
for k in input_features.keys():
if k not in {"F2:coalesced/mem", "F4:comp/mem"}:
input_features[k] = int(np.random.poisson(8))
print(input_features)
try:
input_features["F2:coalesced/mem"] = input_features["coalesced"] / input_features["mem"]
except ZeroDivisionError:
input_features["F2:coalesced/mem"] = 0
try:
input_features["F4:comp/mem"] = input_features["comp"] / input_features["mem"]
except ZeroDivisionError:
input_features["F4:comp/mem"] = 0
batch_features.append(
self.feature_tokenizer.TokenizeFeatureVector(input_features, feat_space, self.feature_sequence_length)
)
self.step_inputs['input_features'] = self.torch.LongTensor(batch_features).unsqueeze(0)
step_out, time = self.sample_model_step(
self.sample.model,
self.step_inputs,
)
if self.pytorch.num_nodes > 1:
self.torch.distributed.barrier()
generated_samples = [self.torch.zeros(tuple(step_out['generated_samples'].shape), dtype = self.torch.float32).to(self.pytorch.device) for _ in range(self.torch.distributed.get_world_size())]
sample_indices = [self.torch.zeros(tuple(step_out['sample_indices' ].shape), dtype = self.torch.float32).to(self.pytorch.device) for _ in range(self.torch.distributed.get_world_size())]
self.torch.distributed.all_gather(generated_samples, step_out["generated_samples"])
self.torch.distributed.all_gather(sample_indices, step_out["sample_indices"])
raise NotImplementedError("This will not work because generated_samples and sample indices are lists and not tensors")
else:
generated_samples = step_out['generated_samples']
sample_indices = step_out['sample_indices']
return (
self.step_inputs['original_input'].cpu().view(-1, self.step_inputs['original_input'].shape[2]).numpy(),
self.step_inputs['input_ids'].cpu().view(-1, self.sampler.sequence_length).numpy(),
generated_samples,
sample_indices
)
def EncodeInputs(self, srcs: typing.List[np.array]) -> typing.List[np.array]:
"""
According to each LM's rules, encode a list of source codes to encoded arrays
ready to be fed into the model.
Args:
src: List of source codes.
Returns:
A list of encoded numpy arrays.
"""
sequence_length = self.config.architecture.max_position_embeddings
pool = multiprocessing.Pool(min(os.cpu_count(), len(srcs)))
encoded = []
it = pool.imap(functools.partial(worker, sequence_length = sequence_length, tokenizer = self.tokenizer), srcs, chunksize = 256)
for enc in tqdm.tqdm(it, total = len(srcs), desc = "Encode Inputs", leave = False):
encoded.append(enc)
pool.close()
return encoded
def ExtractHidden(self, encoded: typing.List[np.array]) -> np.array:
"""
Extract hidden state from backend language model.
Args:
encoded: A list of input ids that will be provided to the LM.
Has to be two-dimensional: [num_sequences X sequence_length]
Returns:
The hidden state of the provided inputs.
"""
if not isinstance(encoded, self.torch.Tensor):
workload_input_ids = self.torch.LongTensor(encoded).to(self.pytorch.device)
else:
workload_input_ids = encoded
hidden_states = self.torch.zeros(
[workload_input_ids.shape[0], self.hidden_state_size],
dtype = self.torch.float32,
)
bar = tqdm.tqdm(total = workload_input_ids.shape[0], desc = "Extract Hidden State", leave = False)
with self.torch.no_grad():
for idx in range(0, workload_input_ids.shape[0], self.sampler.batch_size):
input_ids = workload_input_ids[idx : idx + self.sampler.batch_size].to(self.pytorch.device)
input_mask = (input_ids != self.tokenizer.padToken)
position_ids = self.torch.arange(input_ids.shape[-1], dtype = self.torch.int64).unsqueeze(0).repeat(input_ids.shape[0], 1).to(self.pytorch.device)
prediction_scores, hidden_state = self.sample.model(
input_ids = input_ids,
attention_mask = input_mask,
position_ids = position_ids,
extract_hidden_state = True,
)
real_batch_size = input_ids.shape[0]
###########################################
"""
TODO Research: Hidden states are collected from prediction_scores and have a shape of [seq_length x 1].
At each index lies the prob of the respective token in the input sequence.
"""
# sequence_length = workload_input_ids.shape[-1]
# hidden_states[idx: idx + real_batch_size] = prediction_scores[:, range(sequence_length), input_ids][range(real_batch_size), range(real_batch_size)].detach().cpu()
"""
TODO Research: Hidden states are collected from the encoder's outputs [seq_len x hidden_size]. Flatten everything out.
"""
# hidden_states[idx: idx + real_batch_size] = hidden_state.reshape((real_batch_size, -1)).detach().cpu()
"""
TODO Research: Hidden states are collected from the encoder's input (seq_len x hidden_size) and then they are avg pooled to easily reduce dimensions.
"""
hidden_states[idx: idx + real_batch_size] = self.torch.nn.AvgPool2d(32, stride = 16, count_include_pad = False)(hidden_state.detach()).reshape(real_batch_size, -1).cpu() # hidden_state.reshape((real_batch_size, -1)).detach().cpu()
###########################################
bar.update(real_batch_size)
return hidden_states
def _getTestSampler(self, test_sampler, sequence_length):
if test_sampler is None or test_sampler.is_live or test_sampler.is_active:
if self.config.training.data_generator.HasField("hole"):
sampler_str = [
"start_text: \"[START]kernel void A([HOLE]}[END]\"",
"batch_size: 2",
"sequence_length: {}".format(sequence_length),
"temperature_micros: 700000",
]
elif self.config.training.data_generator.HasField("mask_seq"):
sampler_str = [
"start_text: \"[START]kernel void A(" + ''.join(["[MASK]"] * (sequence_length - 7)) + "}[END]\"",
"batch_size: 2",
"sequence_length: {}".format(sequence_length),
"temperature_micros: 700000",
]
mock_config = pbutil.FromString('\n'.join(sampler_str), sampler_pb2.Sampler())
sampler = samplers.Sampler(mock_config, sample_db_name = "epoch_samples.db")
else:
sampler = test_sampler
if sampler.isFixedStr:
sampler.Specialize(self.tokenizer)
observers = [sample_observers.PrintSampleObserver()]
if FLAGS.store_samples_db:
observers.append(sample_observers.SamplesDatabaseObserver(
self.sample_path / sampler.hash / sampler.sample_db_name
)
)
sampler.symlinkModelDB(
self.sample_path / sampler.hash,
self.hash
)
return sampler, observers
def saveCheckpoint(self, estimator, pre_train):
"""
Saves model, scheduler, optimizer checkpoints per epoch.
"""
if self.is_world_process_zero():
ckpt_comp = lambda x: self.ckpt_path / "{}{}-{}.pt".format("pre_" if pre_train else "", x, self.current_step)
if self.torch_tpu_available:
if self.pytorch.torch_xla_model.rendezvous("saving_checkpoint"):
self.pytorch.torch_xla_model.save(estimator.model, ckpt_comp("model"))
self.pytorch.torch_xla.rendezvous("saving_optimizer_states")
self.pytorch.torch_xla.save(estimator.optimizer.state_dict(), ckpt_comp("optimizer"))
self.pytorch.torch_xla.save(estimator.scheduler.state_dict(), ckpt_comp("scheduler"))
else:
if isinstance(estimator.model, self.torch.nn.DataParallel):
self.torch.save(estimator.model.module.state_dict(), ckpt_comp("model"))
else:
self.torch.save(estimator.model.state_dict(), ckpt_comp("model"))
self.torch.save(estimator.optimizer.state_dict(), ckpt_comp("optimizer"))
self.torch.save(estimator.scheduler.state_dict(), ckpt_comp("scheduler"))
with open(self.ckpt_path / "checkpoint.meta", 'a') as mf:
mf.write("{}train_step: {}\n".format("pre_" if pre_train else "", self.current_step))
if pre_train:
mf = open(self.ckpt_path / "checkpoint.meta", 'r')
cf = mf.read()
mf.close()
if "train_step: 0" not in cf:
with open(self.ckpt_path / "checkpoint.meta", 'w') as mf:
mf.write(cf + "train_step: 0\n")
for x in {"model"}:
shutil.copyfile(str(ckpt_comp(x)), str(self.ckpt_path / "{}-0.pt".format(x)))
return
def loadCheckpoint(self,
estimator: typing.Union[
typing.TypeVar('torchBert.BertEstimator'),
typing.TypeVar('torchBert.SampleBertEstimator')
],
pre_train : bool = False,
without_label_head : bool = False,
is_decoder : bool = False,
) -> int:
"""
Load model checkpoint. Loads either most recent epoch, or selected checkpoint through FLAGS.
"""
if not (self.ckpt_path / "checkpoint.meta").exists():
return -1
with open(self.ckpt_path / "checkpoint.meta", 'r') as mf:
if pre_train:
key = "pre_train_step"
exclude = "None"
else:
key = "train_step"
exclude = "pre_train_step"
get_step = lambda x: int(x.replace("\n", "").replace("{}: ".format(key), ""))
lines = mf.readlines()
entries = set({get_step(x) for x in lines if key in x and exclude not in x})
if FLAGS.select_checkpoint_step == -1 or pre_train:
ckpt_step = max(entries)
else:
if FLAGS.select_checkpoint_step in entries:
ckpt_step = FLAGS.select_checkpoint_step
else:
raise ValueError("{} not found in checkpoint folder.".format(FLAGS.select_checkpoint_step))
ckpt_comp = lambda x: self.ckpt_path / "{}{}-{}.pt".format("pre_" if pre_train else "", x, ckpt_step)
if isinstance(estimator.model, self.torch.nn.DataParallel):
try:
if without_label_head:
new_state_dict = OrderedDict()
for k, v in self.torch.load(ckpt_comp("model")).items():
if "cls.predictions." not in k:
new_state_dict[k] = v
estimator.model.module.load_state_dict(new_state_dict, strict = False)
else:
estimator.model.module.load_state_dict(
self.torch.load(ckpt_comp("model")),
strict = False if is_decoder else True,
)
except RuntimeError:
"""
Pytorch doesn't love loading a DataParallel checkpoint
to a simple model. So, the following hack is needed
to remove the 'module.' prefix from state keys.
OR it might as well need the opposite. Transitioning from
single to multiple GPUs will mean that 'module.' prefix is missing
"""
new_state_dict = OrderedDict()
for k, v in self.torch.load(ckpt_comp("model")).items():
if k[:7] == 'module.':
name = k[7:] # remove `module.`
else:
name = 'module.' + k # Add 'module.'
if not without_label_head or (without_label_head and "cls.predictions." not in name):
new_state_dict[name] = v
estimator.model.module.load_state_dict(new_state_dict, strict = False if is_decoder or without_label_head else True)
else:
try:
if without_label_head:
new_state_dict = OrderedDict()
for k, v in self.torch.load(ckpt_comp("model")).items():
if "cls.predictions." not in k:
new_state_dict[k] = v
estimator.model.load_state_dict(new_state_dict, strict = False)
else:
estimator.model.load_state_dict(
self.torch.load(ckpt_comp("model")),
strict = False if is_decoder else True,
)
except RuntimeError:
"""
Pytorch doesn't love loading a DataParallel checkpoint
to a simple model. So, the following hack is needed
to remove the 'module.' prefix from state keys.
OR it might as well need the opposite. Transitioning from
single to multiple GPUs will mean that 'module.' prefix is missing
"""
new_state_dict = OrderedDict()
for k, v in self.torch.load(ckpt_comp("model"), map_location = lambda storage, loc: storage).items():
if k[:7] == 'module.':
name = k[7:] # remove `module.`
else:
name = 'module.' + k # Add 'module.'
if not without_label_head or (without_label_head and "cls.predictions." not in name):
new_state_dict[name] = v
estimator.model.load_state_dict(new_state_dict, strict = False if without_label_head or is_decoder else True)
if isinstance(estimator, torchBert.BertEstimator):
if estimator.optimizer is not None and estimator.scheduler is not None and ckpt_step > 0:
estimator.optimizer.load_state_dict(
self.torch.load(ckpt_comp("optimizer"), map_location=self.pytorch.device)
)
estimator.scheduler.load_state_dict(
self.torch.load(ckpt_comp("scheduler"), map_location=self.pytorch.device)
)
estimator.model.eval()
return ckpt_step
def is_world_process_zero(self) -> bool:
"""
Whether or not this process is the global main process (when training in a distributed fashion on
several machines, this is only going to be :obj:`True` for one process).
"""
if self.torch_tpu_available:
return self.pytorch.torch_xla_model.is_master_ordinal(local=False)
elif self.pytorch.num_nodes > 1:
return self.torch.distributed.get_rank() == 0
else:
return True
def count_parameters(self, model) -> int:
"""
Count and print the number of trainable parameters for the model.
"""
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def find_unused_parameters(self, model: 'torch.nn.Module') -> None:
"""
Find parameters that are unused for loss computation.
"""
param_names = []
for name, param in model.named_parameters():
if param.grad is None:
param_names.append(name)
if param_names:
l.logger().warn("Unused parameters:\n{}".format('\n'.join(param_names)))
else:
l.logger().info("No unused parameters found for grad computation.")
return
def GetShortSummary(self) -> str:
return (
"\n"
f"{model_pb2.NetworkArchitecture.Backend.Name(self.config.architecture.backend)} "
"network: "
"\n"
f" Total trainable parameters: {humanize.intcomma(self.count_parameters(self.train.model))}"
"\n"
f" hidden_size: {self.config.architecture.hidden_size}"
"\n"
f" #hidden_layers: {self.config.architecture.num_hidden_layers}"
"\n"
f" #attention_heads: {self.config.architecture.num_attention_heads}"
"\n"
f" intermediate_size: {self.config.architecture.intermediate_size}"
"\n"
f" hidden_act: {self.config.architecture.hidden_act}"
"\n"
) + (self.train.data_generator.GetShortSummary() if self.train else "")
def InferenceManifest(self) -> typing.List[pathlib.Path]:
"""Return the list of files which are required for model inference.
Returns:
A list of absolute paths.
"""
# The TensorFlow save file.
paths = [ path.absolute() for path in (self.cache.path / "checkpoints").iterdir() ]
paths += [ path.absolute() for path in (self.cache.path / "logs").iterdir() ]
paths += [ path.absolute() for path in (self.cache.path / "samples").iterdir() ]
# paths += self.data_generator.InferenceManifest # TODO
return sorted(paths)
| 61,629 | 44.889799 | 238 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/models/torch_bert/activations.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from deeplearning.benchpress.util.pytorch import torch
def swish(x):
return x * torch.sigmoid(x)
def _gelu_python(x):
""" Original Implementation of the gelu activation function in Google Bert repo when initially created.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
This is now written in C in torch.nn.functional
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def gelu_new(x):
""" Implementation of the gelu activation function currently in Google Bert repo (identical to OpenAI GPT).
Also see https://arxiv.org/abs/1606.08415
"""
return 0.5 * x * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (x + 0.044715 * torch.pow(x, 3.0))))
if torch.__version__ < "1.4.0":
gelu = _gelu_python
else:
gelu = torch.nn.functional.gelu
def gelu_fast(x):
return 0.5 * x * (1.0 + torch.tanh(x * 0.7978845608 * (1.0 + 0.044715 * x * x)))
ACT2FN = {
"relu": torch.nn.functional.relu,
"swish": swish,
"gelu": gelu,
"tanh": torch.tanh,
"gelu_new": gelu_new,
"gelu_fast": gelu_fast,
}
def get_activation(activation_string):
if activation_string in ACT2FN:
return ACT2FN[activation_string]
else:
raise KeyError("function {} not found in ACT2FN mapping {}".format(activation_string, list(ACT2FN.keys())))
| 2,059 | 31.1875 | 111 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/models/torch_bert/optimizer.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team and Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch optimization for BERT model."""
import math
import typing
from deeplearning.benchpress.util.pytorch import torch
def create_optimizer_and_scheduler(model,
num_train_steps: int,
warmup_steps: int,
learning_rate: float,
adam_beta1 = 0.9,
adam_beta2 = 0.999,
adam_epsilon = 1e-6,
weight_decay = 0.01,
):
"""
Setup the optimizer and the learning rate scheduler.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
Trainer's init through :obj:`optimizers`, or subclass and override this method in a subclass.
"""
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
opt = AdamW(
optimizer_grouped_parameters,
lr = learning_rate,
betas = (adam_beta1, adam_beta2),
eps = adam_epsilon,
)
lr_scheduler = get_linear_schedule_with_warmup(
opt, num_warmup_steps = warmup_steps, num_training_steps = num_train_steps
)
return opt, lr_scheduler
def get_constant_schedule(optimizer: torch.optim.Optimizer, last_epoch: int = -1):
"""
Create a schedule with a constant learning rate, using the learning rate set in optimizer.
Args:
optimizer (:class:`~torch.optim.Optimizer`):
The optimizer for which to schedule the learning rate.
last_epoch (:obj:`int`, `optional`, defaults to -1):
The index of the last epoch when resuming training.
Return:
:obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
return torch.optim.lr_scheduler.LambdaLR(optimizer, lambda _: 1, last_epoch=last_epoch)
def get_constant_schedule_with_warmup(optimizer: torch.optim.Optimizer, num_warmup_steps: int, last_epoch: int = -1):
"""
Create a schedule with a constant learning rate preceded by a warmup period during which the learning rate
increases linearly between 0 and the initial lr set in the optimizer.
Args:
optimizer (:class:`~torch.optim.Optimizer`):
The optimizer for which to schedule the learning rate.
num_warmup_steps (:obj:`int`):
The number of steps for the warmup phase.
last_epoch (:obj:`int`, `optional`, defaults to -1):
The index of the last epoch when resuming training.
Return:
:obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
def lr_lambda(current_step: int):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1.0, num_warmup_steps))
return 1.0
return torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda, last_epoch=last_epoch)
def get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1):
"""
Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0,
after a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer.
Args:
optimizer (:class:`~torch.optim.Optimizer`):
The optimizer for which to schedule the learning rate.
num_warmup_steps (:obj:`int`):
The number of steps for the warmup phase.
num_training_steps (:obj:`int`):
The totale number of training steps.
last_epoch (:obj:`int`, `optional`, defaults to -1):
The index of the last epoch when resuming training.
Return:
:obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
def lr_lambda(current_step: int):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
return max(
0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps))
)
return torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda, last_epoch)
def get_cosine_schedule_with_warmup(
optimizer: torch.optim.Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: float = 0.5, last_epoch: int = -1
):
"""
Create a schedule with a learning rate that decreases following the values of the cosine function between the
initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the
initial lr set in the optimizer.
Args:
optimizer (:class:`~torch.optim.Optimizer`):
The optimizer for which to schedule the learning rate.
num_warmup_steps (:obj:`int`):
The number of steps for the warmup phase.
num_training_steps (:obj:`int`):
The total number of training steps.
num_cycles (:obj:`float`, `optional`, defaults to 0.5):
The number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0
following a half-cosine).
last_epoch (:obj:`int`, `optional`, defaults to -1):
The index of the last epoch when resuming training.
Return:
:obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
def lr_lambda(current_step):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps))
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress)))
return torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda, last_epoch)
def get_cosine_with_hard_restarts_schedule_with_warmup(
optimizer: torch.optim.Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: int = 1, last_epoch: int = -1
):
"""
Create a schedule with a learning rate that decreases following the values of the cosine function between the
initial lr set in the optimizer to 0, with several hard restarts, after a warmup period during which it increases
linearly between 0 and the initial lr set in the optimizer.
Args:
optimizer (:class:`~torch.optim.Optimizer`):
The optimizer for which to schedule the learning rate.
num_warmup_steps (:obj:`int`):
The number of steps for the warmup phase.
num_training_steps (:obj:`int`):
The total number of training steps.
num_cycles (:obj:`int`, `optional`, defaults to 1):
The number of hard restarts to use.
last_epoch (:obj:`int`, `optional`, defaults to -1):
The index of the last epoch when resuming training.
Return:
:obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
def lr_lambda(current_step):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps))
if progress >= 1.0:
return 0.0
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * ((float(num_cycles) * progress) % 1.0))))
return torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda, last_epoch)
class AdamW(torch.optim.Optimizer):
"""
Implements Adam algorithm with weight decay fix as introduced in
`Decoupled Weight Decay Regularization <https://arxiv.org/abs/1711.05101>`__.
Parameters:
params (:obj:`typing.Iterable[torch.nn.parameter.Parameter]`):
typing.Iterable of parameters to optimize or dictionaries defining parameter groups.
lr (:obj:`float`, `optional`, defaults to 1e-3):
The learning rate to use.
betas (:obj:`typing.Tuple[float,float]`, `optional`, defaults to (0.9, 0.999)):
Adam's betas parameters (b1, b2).
eps (:obj:`float`, `optional`, defaults to 1e-6):
Adam's epsilon for numerical stability.
weight_decay (:obj:`float`, `optional`, defaults to 0):
Decoupled weight decay to apply.
correct_bias (:obj:`bool`, `optional`, defaults to `True`):
Whether ot not to correct bias in Adam (for instance, in Bert TF repository they use :obj:`False`).
"""
def __init__(
self,
params: typing.Iterable[torch.nn.parameter.Parameter],
lr: float = 1e-3,
betas: typing.Tuple[float, float] = (0.9, 0.999),
eps: float = 1e-6,
weight_decay: float = 0.0,
correct_bias: bool = True,
):
if lr < 0.0:
raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter: {} - should be in [0.0, 1.0[".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter: {} - should be in [0.0, 1.0[".format(betas[1]))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(eps))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, correct_bias=correct_bias)
super().__init__(params, defaults)
def step(self, closure: typing.Callable = None):
"""
Performs a single optimization step.
Arguments:
closure (:obj:`typing.Callable`, `optional`): A closure that reevaluates the model and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError("Adam does not support sparse gradients, please consider SparseAdam instead")
state = self.state[p]
# State initialization
if len(state) == 0:
state["step"] = 0
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
beta1, beta2 = group["betas"]
state["step"] += 1
# Decay the first and second moment running average coefficient
# In-place operations to update the averages at the same time
exp_avg.mul_(beta1).add_(grad, alpha=1.0 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)
denom = exp_avg_sq.sqrt().add_(group["eps"])
step_size = group["lr"]
if group["correct_bias"]: # No bias correction for Bert
bias_correction1 = 1.0 - beta1 ** state["step"]
bias_correction2 = 1.0 - beta2 ** state["step"]
step_size = step_size * math.sqrt(bias_correction2) / bias_correction1
p.data.addcdiv_(exp_avg, denom, value=-step_size)
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
# Add weight decay at the end (fixed version)
if group["weight_decay"] > 0.0:
p.data.add_(p.data, alpha=-group["lr"] * group["weight_decay"])
return loss
| 12,217 | 40 | 129 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/models/incoder/example_api.py | import typing
from deeplearning.benchpress.util import pytorch
torch = pytorch.torch
# signals the start of a document
BOS = "<|endoftext|>"
# signals the end of a generated infill
EOM = "<|endofmask|>"
def make_sentinel(i):
# signals (1) a location to insert an infill and (2) the start of the infill generation
return f"<|mask:{i}|>"
def generate(model: torch.nn.Module, inp: str, tokenizer, max_to_generate: int=128, temperature: float=0.2):
"""
Do standard left-to-right completion of the prefix `input` by sampling from the model
"""
input_ids = tokenizer(inp, return_tensors="pt").input_ids.to(pytorch.device)
max_length = max_to_generate + input_ids.flatten().size(0)
if max_length > 2048:
print("warning: max_length {} is greater than the context window {}".format(max_length, 2048))
with torch.no_grad():
output = model.generate(input_ids=input_ids, do_sample=True, top_p = 0.95, temperature=temperature, max_length=max_length)
detok_hypo_str = tokenizer.decode(output.flatten())
if detok_hypo_str.startswith(BOS):
detok_hypo_str = detok_hypo_str[len(BOS):]
return detok_hypo_str
def infill(model, inp: str, tokenizer, max_to_generate: int=128, temperature: float=0.7, extra_sentinel: bool=True, max_retries: int=1):
"""
Generate infills to complete a partial document, e.g.
[A C E] -> [A B C D E], where B and D are infills that have been generated.
parts: str. One string instance to input for sampling.
max_to_generate: int. maximum number of tokens to generate. Keep in mind
that the model context size is 2048.
temperature: float. temperature parameter for sampling.
extra_sentinel: bool. we recommend setting this to True, as it makes it
easier for the model to end generated infills. See the footnote in
section 2.2 of our paper for details.
max_retries: int. if > 1, use rejection sampling to keep sampling infills until
all infills sample a completion token.
returns a dictionary containing the following:
text: str, the completed document (with infills inserted)
parts: List[str], length N. Same as passed to the method
infills: List[str], length N-1. The list of infills generated
retries_attempted: number of retries used (if max_retries > 1)
"""
parts = inp.split('<insert>')
assert isinstance(parts, list)
retries_attempted = 0
done = False
while (not done) and (retries_attempted < max_retries):
retries_attempted += 1
infills = []
complete = []
## (1) build the prompt
if len(parts) == 1:
raise OSError
prompt = parts[0]
completion = generate(model, prompt, tokenizer, max_to_generate, temperature)
# completion = completion[len(prompt):]
if EOM not in completion:
completion += EOM
completion = completion[:completion.index(EOM) + len(EOM)]
infilled = completion[:-len(EOM)]
infills.append(infilled)
return {
'text': completion, # str, the completed document (with infills inserted)
'parts': parts, # List[str], length N. Same as passed to the method
'infills': infills, # List[str], length N-1. The list of infills generated
'retries_attempted': retries_attempted, # number of retries used (if max_retries > 1)
}
else:
prompt = ""
# encode parts separated by sentinel
for sentinel_ix, part in enumerate(parts):
prompt += part
if extra_sentinel or (sentinel_ix < len(parts) - 1):
prompt += make_sentinel(sentinel_ix)
done = True
## (2) generate infills
for sentinel_ix, part in enumerate(parts[:-1]):
complete.append(part)
prompt += make_sentinel(sentinel_ix)
# TODO: this is inefficient as it requires re-encoding prefixes repeatedly
completion = generate(model, prompt, tokenizer, max_to_generate, temperature)
completion = completion[len(prompt):]
if EOM not in completion:
completion += EOM
done = False
completion = completion[:completion.index(EOM) + len(EOM)]
infilled = completion[:-len(EOM)]
infills.append(infilled)
complete.append(infilled)
prompt += completion
complete.append(parts[-1])
text = ''.join(complete)
return {
'text': text, # str, the completed document (with infills inserted)
'parts': parts, # List[str], length N. Same as passed to the method
'infills': infills, # List[str], length N-1. The list of infills generated
'retries_attempted': retries_attempted, # number of retries used (if max_retries > 1)
}
| 4,614 | 40.954545 | 136 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/models/incoder/incoder.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API Calls to FAIR-Incoder."""
import typing
import time
import tqdm
import transformers
import numpy as np
from absl import flags
from deeplearning.benchpress.samplers import samplers
from deeplearning.benchpress.preprocessors import opencl
from deeplearning.benchpress.util import plotter
from deeplearning.benchpress.util import environment
from deeplearning.benchpress.models import backends
from deeplearning.benchpress.models import telemetry
from deeplearning.benchpress.util import distrib
from deeplearning.benchpress.models.incoder import example_api
from deeplearning.benchpress.models.incoder.data_generator import IncoderDataGenerator
from deeplearning.benchpress.util import logging as l
transformers.set_seed(np.random.RandomState().randint(0, 2**32-1) % (1 + environment.WORLD_RANK))
FLAGS = flags.FLAGS
flags.DEFINE_string(
"custom_incoder_ckpt",
None,
"Select your own path to Incoder version instead of using the standard HF ones."
)
class Incoder(backends.BackendBase):
"""
API Class for incoder collected from huggingface.
"""
class TrainEstimator(typing.NamedTuple):
"""Named tuple to wrap Incoder pipeline."""
model : typing.TypeVar('nn.Module')
data_generator : IncoderDataGenerator
optimizer : typing.Any
scheduler : typing.Any
class SampleEstimator(typing.NamedTuple):
"""Named tuple for sampling Incoder."""
model : typing.List[typing.TypeVar('nn.Module')]
data_generator : IncoderDataGenerator
@property
def hidden_state_size(self):
return -1
def __init__(self, *args, **kwargs):
super(Incoder, self).__init__(*args, **kwargs)
from deeplearning.benchpress.util import pytorch
if not pytorch.initialized:
pytorch.initPytorch()
self.pytorch = pytorch
self.torch = pytorch.torch
self.torch_tpu_available = pytorch.torch_tpu_available
self.torch.manual_seed(np.random.RandomState().randint(0, 2**32-1) % (1 + environment.WORLD_RANK))
self.torch.cuda.manual_seed_all(np.random.RandomState().randint(0, 2**32-1) % (1 + environment.WORLD_RANK))
self.incoder_version = kwargs.pop("incoder_version")
self.train = None
self.sample = None
self.predict_generator = None
self.sampler = None
self.train_batch_size = None
self.eval_batch_size = None
self.learning_rate = None
self.num_train_steps = None
self.ckpt_path = self.cache.path / "checkpoints"
self.sample_path = self.cache.path / "samples"
self.logfile_path = self.cache.path / "logs"
if self.config.HasField("pre_train_corpus"):
self.pre_logfile_path = self.logfile_path / "pre_train"
self.telemetry = telemetry.TrainingLogger(self.logfile_path)
if self.config.HasField("pre_train_corpus"):
self.pre_telemetry = telemetry.TrainingLogger(self.logfile_path / "pre_train")
self.is_validated = False
self.trained = False
l.logger().info("{} initialized".format(self.incoder_version))
return
def _ConfigModelParams(self, is_sampling):
"""General model hyperparameters initialization."""
##! Placeholder for now. If need be, will be populated.
return
def _ConfigSampleParams(self,
data_generator: IncoderDataGenerator,
sampler: samplers.Sampler,
) -> None:
"""
Model parameter initialization for inference.
"""
self._ConfigModelParams(is_sampling = True)
self.sampler = sampler
self.temperature = sampler.temperature
kwargs = {}
if self.incoder_version == "facebook/incoder-6B":
# the arguments added below will load a half precision version of the model,
# which requires less RAM than loading the full float32 version. this
# should fit in ~16GB of RAM
# NOTE: half precision should *not* be used if you plan to fine-tune the
# model. You'll need full precision and a lot of GPU memory. We have not
# tested fine-tuning in `transformers` (the model was trained in fairseq)
kwargs = dict(
revision = "float16",
torch_dtype = self.torch.float16,
low_cpu_mem_usage = True,
)
if FLAGS.custom_incoder_ckpt is None:
m = transformers.AutoModelForCausalLM.from_pretrained(
self.incoder_version, **kwargs
).to(self.pytorch.offset_device)
else:
l.logger().warn("Using custom Incoder checkpoint at {}".format(FLAGS.custom_incoder_ckpt))
m = transformers.AutoModelForCausalLM.from_pretrained(
FLAGS.custom_incoder_ckpt, **kwargs
).to(self.pytorch.offset_device)
if self.pytorch.num_nodes == 1 and self.pytorch.num_gpus > 1:
l.logger().warn("HuggingFace 'generate' function does not support DataParallel. If you want multi-GPU sampling, go to DDP.")
self.sample = Incoder.SampleEstimator(m, data_generator)
l.logger().info("Initialized model sampler in {}".format(self.sampler.cache.path))
return
def samplesWithCategorical(self) -> bool:
return True
def model_step(self) -> 'torch.Tensor':
raise NotImplementedError
return
def sample_model_step(self,
model : typing.List[typing.TypeVar('torch.nn.Module')],
inputs : typing.Dict[str, typing.TypeVar('torch.Tensor')],
is_live : bool = False,
iteration : int = None,
) -> typing.Dict[str, typing.List[typing.List[int]]]:
"""
Specialized forward function.
Dispatches model replicas across all GPUs, one process each.
Inputs must be three-dimensional:
workload_size x batch_size x sequence_length
"""
start = time.time()
total_seqs = inputs['input_ids'].shape[0] * inputs['input_ids'].shape[1]
max_to_generate = self.sampler.sequence_length - 3
outputs = {
'generated_samples': self.torch.zeros((total_seqs, self.sampler.sequence_length), dtype = self.torch.int64).to(self.pytorch.device),
'sample_indices': self.torch.zeros((total_seqs, max_to_generate), dtype = self.torch.int64).to(self.pytorch.device),
'input_ids': [], 'masked_lm_lengths': []
}
if iteration is not None:
desc = "Sampling iteration: {}".format(iteration)
else:
desc = "Sampling"
s_idx = 0
if environment.WORLD_RANK == 0:
bar = tqdm.tqdm(total = total_seqs, desc = desc)
else:
bar = None
for batch in inputs['input_ids']:
for seq in batch:
seq = [x for x in seq if x != self.tokenizer.padToken]
incode = self.tokenizer.ArrayToCode(seq).replace("<|mask:0|>", "<insert>") # This is a text where pad has been stripped off.
incode = "<| file ext=.cl |>\n{}\n<|/ file |>".format(incode)
incoded = example_api.infill(
model,
incode,
self.tokenizer.get_hf_tokenizer(),
max_to_generate = max_to_generate - len(seq) - 13,
temperature = self.temperature,
extra_sentinel = True,
max_retries = 1,
)
try:
# Dis a proper hack right here.
opening = lambda x: "<| file ext=.cl |>\n{}void".format(x)
if opening("") in incoded['text']:
incoded['text'] = opening("kernel ") + incoded['text'][len(opening("")):]
incoded['text'] = incoded['text'].replace("kernel A(", "kernel void A(")
text = opencl.ExtractSingleKernels(incoded['text'])[0] # Collect only the first kernel generated, ignore the rest.
except IndexError:
l.logger().warn(incoded['text'], ddp_nodes = True)
text = incoded['text']
text = text.replace("<| file ext=.cl |>\n", "").replace("\n<|/ file |>", "")
while "\n\n" in text:
text = text.replace("\n\n", "\n")
while text[-1] == "\n":
text = text[:-1]
sample = self.tokenizer.TokenizeString(text)[:self.sampler.sequence_length]
sample += [self.tokenizer.padToken] * (self.sampler.sequence_length - len(sample))
sample = self.torch.LongTensor(sample).to(self.pytorch.device)
indices = self.tokenizer.TokenizeString(incoded['infills'][0])[:max_to_generate]
indices += [self.tokenizer.padToken] * (max_to_generate - len(indices))
indices = self.torch.LongTensor(indices).to(self.pytorch.device)
outputs['generated_samples'][s_idx] = sample
outputs['sample_indices'][s_idx] = indices
s_idx += 1
if environment.WORLD_RANK == 0:
bar.update(1)
outputs['input_ids'] = inputs['input_ids'].reshape(-1, self.sampler.sequence_length).to(self.pytorch.device)
outputs['masked_lm_lengths'] = inputs['masked_lm_lengths'].reshape(-1, 1).to(self.pytorch.device)
outputs['generated_samples'] = list(outputs['generated_samples'].cpu().numpy())
outputs['sample_indices'] = list(outputs['sample_indices'].cpu().numpy())
outputs['input_ids'] = list(outputs['input_ids'].cpu().numpy())
outputs['masked_lm_lengths'] = list(outputs['masked_lm_lengths'].cpu().numpy())
end = time.time()
return outputs, end-start
def PreTrain(self, *args, **kwargs) -> None:
l.logger().warn("Pre-training is not supported yet for Incoder. Moving on.")
return
def Train(self, *args, **kwargs) -> None:
l.logger().warn("Pre-training is not supported yet for Incoder. Moving on.")
return
def Validate(self, *args, **kwargs) -> None:
l.logger().warn("Pre-training is not supported yet for Incoder. Moving on.")
return
def InitSampling(self,
sampler : samplers.Sampler,
seed : typing.Optional[int] = None,
corpus = None,
) -> None:
"""This is called only once. Performs basic initialization of sampling"""
sample_batch_size = sampler.batch_size
##! TODO: Replace with incoder data generator
data_generator = IncoderDataGenerator.SampleMaskLMBatchGenerator(
self.config.training, sampler, self.tokenizer, seed, sample_batch_size,
sampler.sequence_length, self.cache.path, corpus)
##! TODO: Maybe initialize inline here instead of elaborating in separate function.
self._ConfigSampleParams(data_generator, sampler)
if self.pytorch.num_gpus > 0:
self.torch.cuda.empty_cache()
self.step_inputs = None
self.loader = None
self.pred_iterator = None
l.logger().info("Initialized model samples in {}".format(self.sample_path / self.sampler.hash))
return
def InitSampleBatch(self, sampler: samplers.Sampler, **kwargs) -> None:
"""Batch-specific initialization. Called once when a new batch is going to be generated"""
workload_size = kwargs.get('workload_size', None)
if self.loader is None:
if self.torch_tpu_available:
self.loader = self.pytorch.torch_ploader.ParallelLoader(
self.sample.data_generator.dataloader, [self.pytorch.device]
).per_device_loader(self.pytorch.device)
else:
self.loader = self.sample.data_generator.dataloader
if not sampler.is_active:
if self.pred_iterator is None:
self.pred_iterator = iter(self.loader)
try:
inputs = next(self.pred_iterator)
except StopIteration:
self.pred_iterator = iter(self.loader)
inputs = next(self.pred_iterator)
if workload_size is None:
## I think this dictionary holds tensors of the following size:
## [num_gpus x batch_size x seq_len] if only one node works.
## Otherwise, [1 x batch_size x seq_len] since each process manages its own GPU.
padded_wsize = self.pytorch.num_gpus if environment.WORLD_SIZE == 1 else 1
else:
## If a workload is specified, then after you pad to the dimension of GPU or num processes
## Divide the size by GPU size or num processes size.
padded_wsize = (
(max(1, workload_size // (self.pytorch.num_gpus * sampler.batch_size))) * self.pytorch.num_gpus
if environment.WORLD_SIZE == 1
else (workload_size // (self.pytorch.num_nodes * sampler.batch_size)) * self.pytorch.num_nodes)
self.step_inputs = {
x: inputs[x].unsqueeze(0).repeat(padded_wsize, 1, 1)
for x in inputs
}
# This loop below is purely for proper printing reasons:
sample_text = set(
[self.tokenizer.tokensToString(
seq.cpu().numpy(), ignore_token = self.tokenizer.padToken
) for seq in inputs['input_ids']]
)
for seq in sample_text:
self.sampler.setStartText(seq)
self.sampler.Specialize(self.tokenizer)
return
def SampleNextIndices(
self, *unused_args, **unused_kwargs
) -> typing.Tuple[np.array, np.array, np.array, np.array]:
"""Called iteratively to build a single batch of samples, until termination criteria stops calling"""
del unused_kwargs
del unused_args
if self.sample is None:
raise ValueError("Incoder sampler has not been initialized.")
with self.torch.no_grad():
if self.sampler.is_active:
try:
return self.sample.data_generator.ActiveGeneration(self, self.sample)
except StopIteration:
raise StopIteration
else:
##!TODO: just call model's forward function. No need to do more.
step_out, time = self.sample_model_step(
self.sample.model,
self.step_inputs,
is_live = self.sampler.is_live
)
if self.pytorch.num_nodes > 1:
distrib.barrier()
generated_samples = [self.torch.zeros(tuple(step_out['generated_samples'].shape), dtype = self.torch.float32).to(self.pytorch.device) for _ in range(self.torch.distributed.get_world_size())]
sample_indices = [self.torch.zeros(tuple(step_out['sample_indices' ].shape), dtype = self.torch.float32).to(self.pytorch.device) for _ in range(self.torch.distributed.get_world_size())]
self.torch.distributed.all_gather(generated_samples, step_out["generated_samples"])
self.torch.distributed.all_gather(sample_indices, step_out["sample_indices"])
raise NotImplementedError("This will not work because generated_samples and sample indices are lists and not tensors")
else:
generated_samples = step_out['generated_samples']
sample_indices = step_out['sample_indices']
if self.sampler.is_live and input("Show logits figure ? [y/!y]") == "y":
if self.pytorch.num_nodes > 1:
prediction_scores = [self.torch.zeros(tuple(step_out['prediction_scores'].shape), dtype = self.torch.float32).to(self.pytorch.device) for _ in range(self.torch.distributed.get_world_size())]
distrib.barrier()
self.torch.distributed.all_gather(prediction_scores, step_out["prediction_scores"])
else:
prediction_scores = step_out['prediction_scores'].cpu()
for hole, indcs in zip(prediction_scores, sample_indices):
plotter.LogitsStepsDistrib(
x = self.torch.nn.Softmax(dim = 1)(self.torch.FloatTensor(hole[:10])).numpy(),
atoms = [self.tokenizer.decoder[i] for i in range(self.tokenizer.vocab_size)],
sample_indices = [self.tokenizer.decoder[i] for i in indcs[0]][:10],
plot_name = "sampling_distrib",
title = "Sampling distribution dim 1",
x_name = "Probs / sample step",
)
return (
self.step_inputs['original_input'].cpu().view(-1, self.step_inputs['original_input'].shape[2]).numpy(),
self.step_inputs['input_ids'].cpu().view(-1, self.sampler.sequence_length).numpy(),
generated_samples,
sample_indices
)
class Incoder1B(Incoder):
"""
Specified class for 'small' 1B parameter Incoder.
"""
def __init__(self, *args, **kwargs):
kwargs["incoder_version"] = "facebook/incoder-1B"
super(Incoder1B, self).__init__(*args, **kwargs)
return
def __repr__(self) -> str:
return "Incoder1B"
class Incoder6B(Incoder):
"""
Specified class for regular 6B parameter Incoder.
"""
def __init__(self, *args, **kwargs):
kwargs["incoder_version"] = "facebook/incoder-6B"
super(Incoder6B, self).__init__(*args, **kwargs)
return
def __repr__(self) -> str:
return "Incoder6B"
| 17,189 | 41.339901 | 202 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/models/incoder/data_generator.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file defines the streaming generators for model training data.
We train models on overlapping one-hot encoded text sequences. For a corpus of
a reasonable size, the full training data may not fit in memory. This modules
provides Python Generator classes for use by a sequential Keras model's
fit_generator() method to stream batches of training data.
"""
import typing
import numpy as np
import math
import pathlib
from deeplearning.benchpress.util import pytorch
from deeplearning.benchpress.util.pytorch import torch
from deeplearning.benchpress.samplers import samplers
from deeplearning.benchpress.proto import model_pb2
from deeplearning.benchpress.corpuses import corpuses
from deeplearning.benchpress.corpuses import tokenizers
from deeplearning.benchpress.features import extractor
from deeplearning.benchpress.features import active_feed_database
from deeplearning.benchpress.models.torch_bert import data_generator as torch_data_generator
from absl import flags
from deeplearning.benchpress.util import logging as l
FLAGS = flags.FLAGS
class IncoderDataGenerator(torch_data_generator.torchLMDataGenerator):
"""Data generator subclass designed for Incoder model."""
@classmethod
def TrainMaskLMBatchGenerator(cls,
corpus : corpuses.Corpus,
training_opts : model_pb2.TrainingOptions,
cache_path : pathlib.Path,
num_train_steps : int = None,
pre_train : bool = False,
feature_encoder : bool = False,
feature_tokenizer : tokenizers.FeatureTokenizer = None,
feature_sequence_length : int = None,
) -> 'IncoderDataGenerator':
"""Initializes data generator for training."""
d = super(IncoderDataGenerator, IncoderDataGenerator()).TrainMaskLMBatchGenerator(
corpus, training_opts, cache_path, num_train_steps, pre_train,
feature_encoder, feature_tokenizer, feature_sequence_length,
)
return d
@classmethod
def SampleMaskLMBatchGenerator(cls,
model_opts : model_pb2.TrainingOptions,
sampler : samplers.Sampler,
tokenizer : tokenizers.TokenizerBase,
seed : int,
sample_batch_size : int,
max_position_embeddings : int,
cache_path : pathlib.Path,
corpus : corpuses.Corpus = None,
feature_encoder : bool = False,
feature_tokenizer : tokenizers.FeatureTokenizer = None,
feature_sequence_length : int = None,
) -> 'IncoderDataGenerator':
"""Initializes data generator for inference."""
d = super(IncoderDataGenerator, IncoderDataGenerator()).SampleMaskLMBatchGenerator(
model_opts, sampler, tokenizer, seed,
sample_batch_size, max_position_embeddings, cache_path, corpus,
feature_encoder, feature_tokenizer, feature_sequence_length
)
return d
def __init__(self):
super(IncoderDataGenerator, self).__init__()
return
def initOrGetQueue(self, target_features: typing.Dict[str, float] = None) -> np.array:
"""
If feed queue is not initialized, initialize it by getting new datapoint.
Otherwise, don't do anything as feed_queue is already loaded from checkpoint.
Adds datapoint to InputFeed table of database.
Returns:
Starting input feed of sampling.
"""
if not self.feed_queue:
if FLAGS.start_from_cached and target_features is not None:
cached_samples = [[x.sample, {':'.join(f.split(':')[:-1]): float(f.split(':')[-1]) for f in x.output_features.split('\n')}, -1] for x in self.active_db.get_data]
if len(cached_samples) == 0:
return self.initOrGetQueue()
else:
for idx, cs in enumerate(cached_samples):
cached_samples[idx][-1] = self.feat_sampler.calculate_distance(cs[1])
sorted_cache_samples = sorted(cached_samples, key = lambda x: x[-1])
for scs in sorted_cache_samples[:self.sampler.config.sample_corpus.corpus_config.active.active_search_width]:
tokenized = self.tokenizer.TokenizeString(scs[0])
padded = self._padToMaxPosition(tokenized)[:self.sampler.sequence_length]
if padded[0] == self.tokenizer.padToken:
l.logger().error("Pad token was found again at the beginning of the sequence.")
l.logger().error(scs[0])
l.logger().error(tokenized)
l.logger().error(padded)
encoded = self._padToMaxPosition([int(x) for x in tokenized])[:self.sampler.sequence_length]
assert encoded[0] != self.tokenizer.padToken, encoded
self.feed_queue.append(
torch_data_generator.ActiveSampleFeed(
input_feed = encoded,
input_features = scs[1],
input_score = scs[-1],
gen_id = 0,
)
)
self.addToDB(
active_feed_database.ActiveInput.FromArgs(
tokenizer = self.tokenizer, id = self.active_db.input_count,
input_feed = encoded, input_features = scs[1],
)
)
else:
try:
cf = next(self.loader).squeeze(0)
except StopIteration:
self.loader = iter(self.dataloader)
cf = next(self.loader).squeeze(0)
cf = [int(x) for x in cf]
assert cf[0] != self.tokenizer.padToken, cf
self.feed_queue.append(
torch_data_generator.ActiveSampleFeed(
input_feed = cf,
input_features = extractor.ExtractFeatures(self.tokenizer.ArrayToCode(cf), [self.feat_sampler.feature_space])[self.feat_sampler.feature_space],
input_score = math.inf,
gen_id = 0,
)
)
self.addToDB(
active_feed_database.ActiveInput.FromArgs(
tokenizer = self.tokenizer, id = self.active_db.input_count,
input_feed = cf, input_features = self.feed_queue[-1].input_features,
)
)
l.logger().info("Feed queue input scores: {}".format(', '.join([str(round(c.input_score, 3)) for c in self.feed_queue])))
return self.feed_queue[0].input_feed | 7,533 | 48.565789 | 169 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/models/tf_bert/hooks.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import progressbar
import six
import humanize
import numpy as np
import glob
import pathlib
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
from deeplearning.benchpress.util.tf import tf
from deeplearning.benchpress.util import plotter
from deeplearning.benchpress.samplers import validation_database
from deeplearning.benchpress.util import logging as l
"""
All hooks deployed for this implementation of BERT.
These hooks must be strictly called within model_fn function
and be passed to EstimatorSpec.
"""
def _as_graph_element(obj):
"""Retrieves Graph element."""
graph = tf.python.framework.ops.get_default_graph()
if not isinstance(obj, six.string_types):
if not hasattr(obj, "graph") or obj.graph != graph:
raise ValueError("Passed %s should have graph attribute that is equal "
"to current graph %s." % (obj, graph))
return obj
if ":" in obj:
element = graph.as_graph_element(obj)
else:
element = graph.as_graph_element(obj + ":0")
# Check that there is no :1 (e.g. it's single output).
try:
graph.as_graph_element(obj + ":1")
except (KeyError, ValueError):
pass
else:
raise ValueError("Name %s is ambiguous, "
"as this `Operation` has multiple outputs "
"(at least 2)." % obj)
return element
class _tfEstimatorHooks(tf.compat.v1.train.SessionRunHook):
"""Base class for Estimator Hooks, used for this BERT model"""
def __init__(self,
mode: tf.compat.v1.estimator.ModeKeys,
):
"""
Base class hook initialization
Args:
mode: If hooks is used for training or evaluation
"""
self.session_dict = {}
if mode == tf.compat.v1.estimator.ModeKeys.TRAIN:
## Training
self.is_training = True
elif mode == tf.compat.v1.estimator.ModeKeys.EVAL:
## Validation
self.is_training = False
elif mode == tf.compat.v1.estimator.ModeKeys.PREDICT:
## Sampling
self.is_training = False
else:
raise ValueError("mode for hook has not been provided")
self.current_step = None
self.global_step = _as_graph_element(tf.compat.v1.train.get_or_create_global_step())
return
def begin(self):
"""
Initialize the session dictionary for the base class
session_dict will be incremented by derived classes that
need extra tensors to be evaluated
"""
self.session_dict = {
self.global_step: self.global_step
}
return
def before_run(self, run_context):
return tf.estimator.SessionRunArgs(self.session_dict)
def after_run(self, run_context, run_values):
if self.is_training:
self.current_step = run_values.results[self.global_step]
else:
if self.current_step is None:
self.current_step = 0
else:
self.current_step += 1
return
def end(self, session):
return
class AverageSummarySaverHook(_tfEstimatorHooks):
"""
Similar functionality to SummarySaverHook that
stores averaged tensors instead of step-instant values.
"""
def __init__(self,
tensors: dict,
save_steps: int,
output_dir: str,
show_average: bool = True,
mode: tf.compat.v1.estimator.ModeKeys = tf.compat.v1.estimator.ModeKeys.TRAIN,
):
"""
Args:
tensors: Optional string to tf.Tensor dictionary for the tensor values desired to be monitored, if set.
save_steps: If set, logs tensor values once every defined number of estimator steps
output_dir: Location of tf.event summary files.
mode: If hooks is used for training or evaluation
"""
super(AverageSummarySaverHook, self).__init__(mode)
self.tensors = {
summary_tensor.name.replace(":0", ""): tensor
for (summary_tensor, tensor) in zip(tensors[0], tensors[1])
}
self.result = {k: [] for k in self.tensors}
self.save_steps = save_steps
self.step_triggered = False
self.show_average = show_average
self.output_dir = output_dir
self.timer = tf.compat.v1.train.SecondOrStepTimer(every_steps = save_steps)
return
def begin(self):
"""
Called once at initialization stage
"""
super(AverageSummarySaverHook, self).begin()
self.summary_writer = tf.python.training.summary_io.SummaryWriterCache.get(self.output_dir)
self.trigger_step = 0
self.session_dict['tensors'] = self.tensors
self.timer.reset()
return
def before_run(self, run_context):
"""
Called before session.run()
Any tensor/op should be declared here in order to be evaluated
returns None or SessionRunArgs()
"""
self.step_triggered = True if self.trigger_step == 0 else self.timer.should_trigger_for_step(1 + self.trigger_step)
return tf.estimator.SessionRunArgs(self.session_dict)
def after_run(self, run_context, run_values):
"""
Requested tensors are evaluated and their values are available
"""
super(AverageSummarySaverHook, self).after_run(run_context, run_values)
for tag in self.tensors:
if self.show_average:
self.result[tag].append(run_values.results['tensors'][tag])
else:
self.result[tag] = [run_values.results['tensors'][tag]]
if self.current_step == 0:
self.summary_writer.add_session_log(
tf.core.util.event_pb2.SessionLog(status=tf.core.util.event_pb2.SessionLog.START),
self.current_step
)
if self.step_triggered and not (self.trigger_step == 0 and self.current_step > 0):
self.result = { k: (sum(v) / len(v)) for (k, v) in self.result.items() }
self._save_summary(self.result)
self.result = {k: [] for k in self.result}
self.trigger_step += 1
def _save_summary(self, tensor_values):
if self.is_training:
elapsed_secs, _ = self.timer.update_last_triggered_step(1 + self.trigger_step if self.trigger_step else 0)
else:
elapsed_secs = None
tensor_summary = []
for (key, value) in tensor_values.items():
tensor_summary.append(
tf.core.framework.summary_pb2.Summary.Value(
tag = key, simple_value = value
)
)
summary = tf.core.framework.summary_pb2.Summary(value = tensor_summary)
self.summary_writer.add_summary(summary, self.current_step)
self.summary_writer.flush()
class tfProgressBar(_tfEstimatorHooks):
"""Real time progressbar to capture tf Estimator training or validation"""
def __init__(self,
max_length: int,
mode: tf.compat.v1.estimator.ModeKeys = tf.compat.v1.estimator.ModeKeys.TRAIN,
):
"""
Initialize Progress Bar Hook
This hook shows a progress bar in output and prints after N steps tensor values provided.
Args:
max_length: This is the maximum threshold of the progress bar
mode: If hooks is used for training or evaluation
"""
super(tfProgressBar, self).__init__(mode)
self.max_length = max_length
def begin(self):
"""
Called once at initialization stage
"""
super(tfProgressBar, self).begin()
self.bar = progressbar.ProgressBar(max_value = self.max_length)
return
def after_run(self, run_context, run_values):
"""
Requested tensors are evaluated and their values are available
"""
super(tfProgressBar, self).after_run(run_context, run_values)
self.bar.update(1 + self.current_step)
return
class tfLogTensorHook(_tfEstimatorHooks):
def __init__(self,
tensors: dict,
log_steps: int,
show_average: bool = True,
at_end: bool = False,
mode: tf.compat.v1.estimator.ModeKeys = tf.compat.v1.estimator.ModeKeys.TRAIN,
):
"""
Args:
tensors: Optional string to tf.Tensor dictionary for the tensor values desired to be monitored, if set.
log_steps: If set, logs tensor values once every defined number of estimator steps
at_end: If set, prints tensor values at end of session
mode: If hooks is used for training or evaluation
"""
super(tfLogTensorHook, self).__init__(mode)
self.tensor_tags = sorted(tensors.keys())
self.tensors = {
tag: _as_graph_element(tensor)
for (tag, tensor) in tensors.items()
}
self.result = {k: [] for k in self.tensor_tags}
self.log_steps = log_steps
self.at_end = at_end
self.step_triggered = False
self.show_average = show_average
self.timer = tf.compat.v1.train.SecondOrStepTimer(every_steps = log_steps)
return
def begin(self):
"""
Called once at initialization stage
"""
super(tfLogTensorHook, self).begin()
self.trigger_step = 0
self.current_epoch = 0
self.session_dict['tensors'] = self.tensors
self.timer.reset()
return
def before_run(self, run_context):
"""
Called before session.run()
Any tensor/op should be declared here in order to be evaluated
returns None or SessionRunArgs()
"""
self.step_triggered = True if self.trigger_step == 0 else self.timer.should_trigger_for_step(1 + self.trigger_step)
return tf.estimator.SessionRunArgs(self.session_dict)
def after_run(self, run_context, run_values):
"""
Requested tensors are evaluated and their values are available
"""
super(tfLogTensorHook, self).after_run(run_context, run_values)
self.current_epoch = int((1 + self.current_step) / self.log_steps)
for tag in self.tensor_tags:
if self.show_average:
self.result[tag].append(run_values.results['tensors'][tag])
else:
self.result[tag] = [run_values.results['tensors'][tag]]
if self.step_triggered:
self.result = { k: (sum(v) / len(v)) for (k, v) in self.result.items() }
self._log_tensors(self.result)
self.result = {k: [] for k in self.result}
self.trigger_step += 1
def end(self, session):
"""
Called at the end of session
"""
super(tfLogTensorHook, self).end(session)
if self.at_end:
end_values = session.run(self.tensors)
for tag in self.tensor_tags:
if self.show_average:
self.result[tag].append(end_values[tag])
else:
self.results[tag] = [end_values[tag]]
self.result = { k: (sum(v) / len(v)) for (k, v) in self.result.items() }
def _log_tensors(self, tensor_values):
if self.is_training:
elapsed_secs, _ = self.timer.update_last_triggered_step(1 + self.trigger_step if self.trigger_step else 0)
else:
elapsed_secs = None
stats = []
for tag in self.tensor_tags:
stats.append("{}: {:.5f}".format(tag, tensor_values[tag]))
if elapsed_secs is not None:
l.logger().info("Epoch {} {} - {}".format(self.current_epoch, ", ".join(stats), humanize.naturaldelta(elapsed_secs)))
elif self.current_epoch > 0:
l.logger().info("Epoch {} {}".format(self.current_epoch, ", ".join(stats)))
else:
if self.is_training:
l.logger().info("Initialization: {}".format(", ".join(stats)))
else:
l.logger().info("Tensor Values: {}".format(", ".join(stats)))
class tfPlotTensorHook(_tfEstimatorHooks):
"""Real time training hook that plots tensors against training step."""
def __init__(self,
tensors: dict,
log_steps: int,
output_dir: pathlib.Path,
mode: tf.compat.v1.estimator.ModeKeys = tf.compat.v1.estimator.ModeKeys.TRAIN,
):
"""
Args:
tensors: String to tf.Tensor dictionary for the plotted values.
log_steps: If set, logs tensor values once every defined number of estimator steps
mode: If hooks is used for training or evaluation
"""
if mode != tf.compat.v1.estimator.ModeKeys.TRAIN:
raise ValueError("tfPlotTensorHook can only be used for training mode.")
super(tfPlotTensorHook, self).__init__(mode)
self.tensors = {
summary_tensor.name.replace(":0", ""): tensor
for (summary_tensor, tensor) in zip(tensors[0], tensors[1])
}
self.epoch_values = {
tag: {'value': [], 'step': []}
for tag in self.tensors
}
self.results = {
tag: {'value': [], 'step': []}
for tag in self.tensors
}
if len(glob.glob(str(output_dir / "events.out.tfevents*"))) != 0:
try:
event_acc = EventAccumulator(str(output_dir))
event_acc.Reload()
for k in self.tensors:
wt, step, value = zip(*event_acc.Scalars(k))
self.results[k] = {
'value': list(value),
'step' : list(step),
}
except KeyError:
pass
self.log_steps = log_steps
self.output_dir = output_dir
self.step_triggered = False
self.timer = tf.compat.v1.train.SecondOrStepTimer(every_steps = log_steps)
return
def begin(self):
"""
Called once at initialization stage
"""
super(tfPlotTensorHook, self).begin()
self.trigger_step = 0
self.session_dict['tensors'] = self.tensors
self.timer.reset()
return
def before_run(self, run_context):
"""
Called before session.run()
Any tensor/op should be declared here in order to be evaluated
returns None or SessionRunArgs()
"""
self.step_triggered = True if self.trigger_step == 0 else self.timer.should_trigger_for_step(1 + self.trigger_step)
return tf.estimator.SessionRunArgs(self.session_dict)
def after_run(self, run_context, run_values):
"""
Requested tensors are evaluated and their values are available
"""
super(tfPlotTensorHook, self).after_run(run_context, run_values)
for tag in self.tensors:
self.epoch_values[tag]['value'].append(run_values.results['tensors'][tag])
self.epoch_values[tag]['step'].append(run_values.results[self.global_step])
if self.step_triggered:
self.results[tag]['value'].append(sum(self.epoch_values[tag]['value']) /
len(self.epoch_values[tag]['value']))
self.results[tag]['step'].append(1 + run_values.results[self.global_step])
self.epoch_values[tag] = {'value': [], 'step': []}
if self.step_triggered and not (self.trigger_step == 0 and self.current_step > 0):
self._plot_tensors(self.results)
self.trigger_step += 1
def _plot_tensors(self, tensor_values):
_, _ = self.timer.update_last_triggered_step(1 + self.trigger_step if self.trigger_step else 0)
for (key, value) in tensor_values.items():
key_str = str(pathlib.Path(key).stem)
plotter.SingleScatterLine(
x = value['step'],
y = value['value'],
plot_name = key_str,
path = self.output_dir,
title = key_str,
x_name = "Training Step",
y_name = key_str,
)
return
class writeValidationDB(_tfEstimatorHooks):
"""Real time storage hook for validation results"""
def __init__(self,
mode,
url,
tokenizer,
seen_in_training,
original_input,
input_ids,
input_mask,
masked_lm_positions,
masked_lm_ids,
masked_lm_weights,
masked_lm_lengths,
next_sentence_labels,
masked_lm_predictions,
next_sentence_predictions,
):
"""
Initialize writeValidationDB
Stores input, target predictions, actual predictions, positions, step
during validation to database.
Args:
All input and output tensors for each single validation step.
"""
super(writeValidationDB, self).__init__(mode)
self.tokenizer = tokenizer
self.val_db = validation_database.ValidationDatabase("sqlite:///{}".format(url))
self.val_id = self.val_db.count
self.seen_in_training = seen_in_training
self.original_input = original_input
self.input_ids = input_ids
self.input_mask = input_mask
self.masked_lm_positions = masked_lm_positions
self.masked_lm_ids = masked_lm_ids
self.masked_lm_weights = masked_lm_weights
self.masked_lm_lengths = masked_lm_lengths
self.next_sentence_labels = next_sentence_labels
self.masked_lm_predictions = masked_lm_predictions
self.next_sentence_predictions = next_sentence_predictions
return
def begin(self):
"""
Called once at initialization stage
"""
super(writeValidationDB, self).begin()
self.session_dict[self.seen_in_training] = self.seen_in_training
self.session_dict[self.original_input] = self.original_input
self.session_dict[self.input_ids] = self.input_ids
self.session_dict[self.input_mask] = self.input_mask
self.session_dict[self.masked_lm_positions] = self.masked_lm_positions
self.session_dict[self.masked_lm_ids] = self.masked_lm_ids
self.session_dict[self.masked_lm_weights] = self.masked_lm_weights
self.session_dict[self.masked_lm_lengths] = self.masked_lm_lengths
self.session_dict[self.next_sentence_labels] = self.next_sentence_labels
self.session_dict[self.masked_lm_predictions] = self.masked_lm_predictions
self.session_dict[self.next_sentence_predictions] = self.next_sentence_predictions
return
def before_run(self, run_context):
"""
Called before session.run()
Any tensor/op should be declared here in order to be evaluated
returns None or SessionRunArgs()
"""
return tf.estimator.SessionRunArgs(self.session_dict)
def after_run(self, run_context, run_values):
"""
Requested tensors are evaluated and their values are available
"""
super(writeValidationDB, self).after_run(run_context, run_values)
batch_size = run_values.results[self.input_ids].shape[0]
masked_lm_predictions = np.reshape(
run_values.results[self.masked_lm_predictions],
(batch_size, int(len(run_values.results[self.masked_lm_predictions]) / batch_size))
)
next_sentence_predictions = np.reshape(
run_values.results[self.next_sentence_predictions],
(batch_size, int(len(run_values.results[self.next_sentence_predictions]) / batch_size))
)
assert run_values.results[self.original_input].shape[0] == batch_size
assert run_values.results[self.input_ids].shape[0] == batch_size
assert run_values.results[self.input_mask].shape[0] == batch_size
assert run_values.results[self.masked_lm_positions].shape[0] == batch_size
assert run_values.results[self.masked_lm_ids].shape[0] == batch_size
assert run_values.results[self.masked_lm_weights].shape[0] == batch_size
assert run_values.results[self.masked_lm_lengths].shape[0] == batch_size
assert run_values.results[self.next_sentence_labels].shape[0] == batch_size
assert masked_lm_predictions.shape[0] == batch_size
assert next_sentence_predictions.shape[0] == batch_size
with self.val_db.Session(commit = True) as session:
for b in range(batch_size):
val_trace = validation_database.BERTValFile(
**validation_database.BERTValFile.FromArgs(
tokenizer = self.tokenizer,
id = self.val_id,
train_step = run_values.results[self.global_step],
seen_in_training = run_values.results[self.seen_in_training][b],
original_input = run_values.results[self.original_input][b],
input_ids = run_values.results[self.input_ids][b],
input_mask = run_values.results[self.input_mask][b],
masked_lm_positions = run_values.results[self.masked_lm_positions][b],
masked_lm_ids = run_values.results[self.masked_lm_ids][b],
masked_lm_weights = run_values.results[self.masked_lm_weights][b],
masked_lm_lengths = run_values.results[self.masked_lm_lengths][b],
next_sentence_labels = run_values.results[self.next_sentence_labels][b],
masked_lm_predictions = masked_lm_predictions[b],
next_sentence_predictions = next_sentence_predictions[b],
)
)
try:
exists = session.query(validation_database.BERTValFile.sha256).filter_by(sha256 = val_trace.sha256).scalar() is not None
except sqlalchemy.orm.exc.MultipleResultsFound as e:
l.logger().error("Selected sha256 has been already found more than once.")
raise e
if not exists:
session.add(val_trace)
self.val_id += 1
return | 21,690 | 36.205832 | 130 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/models/tf_bert/data_generator.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file defines the streaming generators for model training data.
We train models on overlapping one-hot encoded text sequences. For a corpus of
a reasonable size, the full training data may not fit in memory. This modules
provides Python Generator classes for use by a sequential Keras model's
fit_generator() method to stream batches of training data.
"""
import typing
import random
import collections
import glob
import numpy as np
from deeplearning.benchpress.util.tf import tf
from deeplearning.benchpress.proto import model_pb2
from deeplearning.benchpress.models import lm_data_generator
from deeplearning.benchpress.models import sequence_masking
from absl import flags
from deeplearning.benchpress.util import logging as l
FLAGS = flags.FLAGS
class tfLMDataGenerator(lm_data_generator.MaskLMDataGenerator):
@classmethod
def TrainMaskLMBatchGenerator(cls,
corpus: "corpuses.Corpus",
training_opts: model_pb2.TrainingOptions,
cache_path,
) -> "data_generator.MaskLMBatchGenerator":
"""Initializes data generator for training."""
return super(tfLMDataGenerator, tfLMDataGenerator()).TrainMaskLMBatchGenerator(
corpus, training_opts, cache_path
)
@classmethod
def SampleMaskLMBatchGenerator(cls,
model_opts,
sampler,
tokenizer,
seed: int,
max_position_embeddings: int,
cache_path,
) -> "data_generator.MaskLMBatchGenerator":
"""Initializes data generator for inference."""
d = super(tfLMDataGenerator, tfLMDataGenerator()).SampleMaskLMBatchGenerator(
model_opts, sampler, tokenizer, seed, max_position_embeddings, cache_path
)
d.tfRecordSampler = d.tfRecordSampleGenerator()
return d
def __init__(self):
super(tfLMDataGenerator, self).__init__("tf_record")
self.sampleBatch = None
self.sampleIndices = None
self.tfRecordSampler = None
return
def generateTfDataset(self,
sequence_length: int,
is_training : bool,
num_cpu_threads: int,
eval_set : typing.List = None,
use_tpu : bool = False,
) -> "tf.Dataset":
"""Wrapper function that constructs a tf.Dataset used for training BERT."""
def input_fn(params):
"""
function used by tf.estimator to generate inputs for training.
"""
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
## This function assumes record is still a file (expressed as TF dataset)
## It decodes this record to tf scalars.
example = tf.io.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.cast(t, dtype = tf.int32)
example[name] = t
return example
batch_size = params["batch_size"]
name_to_features = {
"seen_in_training" : tf.io.FixedLenFeature([1], tf.int64),
"original_input" : tf.io.FixedLenFeature([sequence_length], tf.int64),
"input_ids" : tf.io.FixedLenFeature([sequence_length], tf.int64),
"input_mask" : tf.io.FixedLenFeature([sequence_length], tf.int64),
"masked_lm_positions" : tf.io.FixedLenFeature([self.training_opts.max_predictions_per_seq], tf.int64),
"masked_lm_ids" : tf.io.FixedLenFeature([self.training_opts.max_predictions_per_seq], tf.int64),
"masked_lm_weights" : tf.io.FixedLenFeature([self.training_opts.max_predictions_per_seq], tf.float32),
"masked_lm_lengths" : tf.io.FixedLenFeature([self.training_opts.max_predictions_per_seq], tf.int64),
"next_sentence_labels" : tf.io.FixedLenFeature([1], tf.int64),
}
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
if is_training:
dataset = tf.io.gfile.glob([str(p) for p in self.dataset['train_dataset']['file']])
d = tf.data.Dataset.from_tensor_slices(tf.constant(dataset))
if self.training_opts.shuffle_corpus_contentfiles_between_epochs:
d = d.shuffle(buffer_size = len(dataset), reshuffle_each_iteration=True)
d = d.repeat()
# `cycle_length` is the number of parallel files that get read.
cycle_length = min(num_cpu_threads, len(dataset))
# `sloppy` mode means that the interleaving is not exact. This adds
# even more randomness to the training pipeline.
d = d.apply(
tf.data.experimental.parallel_interleave(
tf.data.TFRecordDataset,
sloppy=is_training,
cycle_length=cycle_length))
else:
if eval_set is None:
dataset = tf.io.gfile.glob(
[str(path) for tf_set in self.dataset for path in self.dataset[tf_set]['file']]
)
else:
dataset = tf.io.gfile.glob([str(tf_set) for tf_set in eval_set])
d = tf.data.TFRecordDataset(dataset)
# Since we evaluate for a fixed number of steps we don't want to encounter
# out-of-range exceptions.
d = d.repeat()
# We must `drop_remainder` on training because the TPU requires fixed
# size dimensions. For eval, we assume we are evaluating on the CPU or GPU
# and we *don't* want to drop the remainder, otherwise we wont cover
# every sample.
d = d.apply(
tf.data.experimental.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
num_parallel_batches=num_cpu_threads,
drop_remainder=use_tpu))
return d
return input_fn
def generateTfSamples(self):
"""
Contains input_fn closure function for estimator
Returns:
input_fn callable.
"""
def input_fn(params):
"""
function used by tf.estimator to generate inputs for inference.
"""
def sample_gen(batch_size: int):
"""
This generator yields iteratively the inference input blob for each step.
In the first iteration, it yields sampler.encoded_start_text and then for each step,
self.sampleBatch is updated with the model's current output through self.updateVocabulary.
The generator stops when the model has filled all mask or hole tokens with predictions.
Arguments:
batch_size: The batch size used during inference.
Yields:
Current step's inference input for model.
Returns:
None
"""
assert batch_size == len(self.sampleBatch), "{}, {}".format(batch_size, len(self.sampleBatch))
original_input = [sample for sample in self.sampleBatch]
while True:
(input_ids, input_mask, masked_lm_positions,
masked_lm_ids, masked_lm_weights, masked_lm_lengths) = [], [], [], [], [], []
max_mask_len = max(
[len(np.where(np.in1d(np.asarray(x), [self.tokenizer.maskToken, self.tokenizer.holeToken]))[0]) for x in self.sampleBatch]
)
if max_mask_len == 0:
return
for sample in self.sampleBatch:
sample_masks = np.where(np.in1d(sample, [self.tokenizer.maskToken, self.tokenizer.holeToken]))[0]
actual_mask_len = len(sample_masks)
len_offset = max_mask_len - actual_mask_len
pad_idx = np.where(sample == self.tokenizer.padToken)[0]
inp_mask = np.ones(len(sample), dtype = np.int32)
if len(pad_idx) > 0:
inp_mask[pad_idx[0]:] = 0
input_ids.append(list(sample))
input_mask.append(list(inp_mask))
masked_lm_positions.append(list(sample_masks) + [0] * len_offset)
masked_lm_ids.append([self.tokenizer.maskToken] * actual_mask_len + [self.tokenizer.padToken] * len_offset)
masked_lm_weights.append([0.0] * (actual_mask_len + len_offset))
masked_lm_lengths.append([-1] * (actual_mask_len + len_offset))
yield (np.full([batch_size, 1], -1), original_input,
input_ids, input_mask,
masked_lm_positions, masked_lm_ids,
masked_lm_weights, masked_lm_lengths,
np.zeros([batch_size, 1]))
batch_size = params['batch_size']
sample = tf.data.Dataset.from_generator(
lambda: sample_gen(batch_size),
output_types = sequence_masking.tfSequence.tfTypes(),
output_shapes = sequence_masking.tfSequence.tfShapes(batch_size, self.sampler.sequence_length)
)
it = tf.compat.v1.data.make_one_shot_iterator(sample)
(seen_in_training, original_input,
input_ids, input_mask,
masked_lm_positions, masked_lm_ids,
masked_lm_weights, masked_lm_lengths, next_sentence_labels) = it.get_next()
return {
'seen_in_training' : seen_in_training,
'original_input' : original_input,
'input_ids' : input_ids,
'input_mask' : input_mask,
'masked_lm_positions' : masked_lm_positions,
'masked_lm_ids' : masked_lm_ids,
'masked_lm_weights' : masked_lm_weights,
'masked_lm_lengths' : masked_lm_lengths,
'next_sentence_labels' : next_sentence_labels,
}
return input_fn
def tfRecordSampleGenerator(self):
if self.sampler.isFixedStr:
return None
assert not self.sampler.config.HasField("start_text")
path_list = self.configSampleSets()
if len(path_list) == 0:
raise FileNotFoundError(path_list)
for path in path_list:
for example in tf.compat.v1.io.tf_record_iterator(path):
input_ids = np.asarray(tf.train.Example.FromString(example).features.feature['input_ids'].int64_list.value)
if self.tokenizer.padToken in input_ids:
yield input_ids[:np.where(input_ids == self.tokenizer.padToken)[0][0]]
else:
yield input_ids
def InitSampleBatch(self) -> None:
"""
Initializes data_generator for inference.
self.sampleBatch is initialized with sampler.encoded_start_text
"""
if not self.sampler.isFixedStr:
try:
start_text = next(self.tfRecordSampler)[:self.sampler.sequence_length]
except StopIteration:
l.logger().warn("Repeating iterator on dataset...")
self.tfRecordSampler = self.tfRecordSampleGenerator()
try:
start_text = next(self.tfRecordSampler)[:self.sampler.sequence_length]
except Exception as e:
raise e
self.sampler.setStartText(self.tokenizer.tokensToString(start_text))
self.sampler.Specialize(self.tokenizer)
assert self.sampler.sequence_length <= self.max_position_embeddings, "Sampler sequence length exceeds max position embeddings."
input_sample = self.sampler.encoded_start_text
assert np.ndim(input_sample) == 1, "Input samples have to be one-dimensional. {} given.".format(input_sample.shape)
target_idx = np.where(np.in1d(input_sample, [self.tokenizer.maskToken, self.tokenizer.holeToken]))[0]
assert len(target_idx) != 0, "No target prediction in sample text"
num_masks = np.count_nonzero(input_sample == self.tokenizer.maskToken)
num_holes = np.count_nonzero(input_sample == self.tokenizer.holeToken)
num_targets = num_masks + num_holes
padded_sample = self._padToMaxPosition(input_sample)
padded_sample = padded_sample[:self.sampler.sequence_length]
self.sampleBatch = np.repeat(padded_sample[None, :], self.sampler.batch_size, axis = 0)
self.sampleIndices = [[[] for i in range(num_targets)] for j in range(self.sampler.batch_size)]
return
def updateSampleBatch(self,
input_ids : np.array,
masked_lm_ids : np.array,
) -> np.array:
"""
Updates self.sampleBatch with the model's output prediction.
The output, if still contains hole or mask tokens, is fed back
to the model's input through the input_fn's sample_gen generator.
"""
assert len(input_ids) == len(masked_lm_ids), "Inputs and predictions do not have the same batch size."
updated_sequence = []
done = True
for batch_idx, _ in enumerate(input_ids):
batch = []
mask_id_index = 0
closed_hole_index = 0
for idx, token in enumerate(input_ids[batch_idx]):
if token == self.tokenizer.maskToken:
mt = masked_lm_ids[batch_idx][mask_id_index]
if mt == self.tokenizer.maskToken or mt == self.tokenizer.holeToken:
continue
if len(self.sampleIndices[batch_idx][mask_id_index]) > 0:
while(self.sampleIndices[batch_idx][mask_id_index + closed_hole_index][-1]) == self.tokenizer.endholeToken:
closed_hole_index += 1
self.sampleIndices[batch_idx][mask_id_index + closed_hole_index].append(mt)
mask_id_index += 1
batch.append(mt)
elif token == self.tokenizer.holeToken:
mt = masked_lm_ids[batch_idx][mask_id_index]
if mt == self.tokenizer.maskToken or mt == self.tokenizer.holeToken:
continue
if len(self.sampleIndices[batch_idx][mask_id_index]) > 0:
while(self.sampleIndices[batch_idx][mask_id_index + closed_hole_index][-1]) == self.tokenizer.endholeToken:
closed_hole_index += 1
self.sampleIndices[batch_idx][mask_id_index + closed_hole_index].append(mt)
mask_id_index += 1
if mt != self.tokenizer.endholeToken:
batch.append(mt)
batch.append(self.tokenizer.holeToken)
done = False
else:
batch.append(token)
batch = np.asarray(batch)
batch = self._padToMaxPosition(batch)
# TODO, chop sequence for now, but TODO it:
# If a sequence is bigger than it should, crop one or both edges,
# save them and send max_position_embeddings for next step.
# Then, concat it back.
if self.sampler.sequence_length > len(batch):
l.logger().warn("Cropped {} tokens from sample batch".format(self.sampler.sequence_length - len(batch)))
batch = batch[:self.sampler.sequence_length]
updated_sequence.append(batch)
self.sampleBatch = np.asarray(updated_sequence)
return self.sampleBatch, self.sampleIndices
def toTensorFormat(self,
datapoint: typing.TypeVar("#TODO")
) -> typing.TypeVar("#TODO"):
raise NotImplementedError("#TODO!")
def _saveCorpusRecord(self, masked_corpus: typing.Dict) -> None:
"""Converts corpus nparrays to tf Features and stores corpus to TfRecord"""
writer = tf.io.TFRecordWriter(str(masked_corpus['file']))
if FLAGS.write_text_dataset:
file_writer = open(masked_corpus['txt'], 'w')
for (inst_index, instance) in enumerate(masked_corpus['corpus']):
seen_in_training = instance.seen_in_training
original_input = instance.original_input
input_ids = instance.input_ids
input_mask = instance.input_mask
assert len(input_ids) == self.training_opts.sequence_length, "len(input_ids): {}, sequence_length: {}".format(len(input_ids), self.training_opts.sequence_length)
masked_lm_positions = instance.masked_lm_positions
masked_lm_ids = instance.masked_lm_ids
masked_lm_weights = instance.masked_lm_weights
masked_lm_lengths = instance.masked_lm_lengths
next_sentence_label = instance.next_sentence_label
features = collections.OrderedDict()
features["seen_in_training"] = tf.train.Feature(int64_list = tf.train.Int64List(
value = list([seen_in_training])))
features["original_input"] = tf.train.Feature(int64_list = tf.train.Int64List(
value = list(original_input)))
features["input_ids"] = tf.train.Feature(int64_list = tf.train.Int64List(
value = list(input_ids)))
features["input_mask"] = tf.train.Feature(int64_list = tf.train.Int64List(
value = list(input_mask)))
features["masked_lm_positions"] = tf.train.Feature(int64_list = tf.train.Int64List(
value = list(masked_lm_positions)))
features["masked_lm_ids"] = tf.train.Feature(int64_list = tf.train.Int64List(
value = list(masked_lm_ids)))
features["masked_lm_weights"] = tf.train.Feature(float_list = tf.train.FloatList(
value = list(masked_lm_weights)))
features["masked_lm_lengths"] = tf.train.Feature(int64_list = tf.train.Int64List(
value = list(masked_lm_lengths)))
features["next_sentence_labels"] = tf.train.Feature(int64_list = tf.train.Int64List(
value = list([next_sentence_label])))
tf_example = tf.train.Example(features = tf.train.Features(feature = features))
writer.write(tf_example.SerializeToString())
if FLAGS.write_text_dataset:
file_writer.write("'seen_in_training': {}\n'original_input': {}\n'input_ids': {}\n'input_mask': {}\n'masked_lm_positions': {}\n'masked_lm_ids': {}\n'masked_lm_weights': {}\n'masked_lm_lengths': {}\n'next_sentence_labels': {}\n\n"
.format((True if seen_in_training == 1 else False),
self.tokenizer.tokensToString(original_input, ignore_token = self.tokenizer.padToken),
self.tokenizer.tokensToString(input_ids, ignore_token = self.tokenizer.padToken),
input_mask,
masked_lm_positions,
self.tokenizer.tokensToString(masked_lm_ids),
masked_lm_weights,
masked_lm_lengths,
next_sentence_label)
)
writer.close()
if FLAGS.write_text_dataset:
file_writer.close()
l.logger().info("Wrote {} instances ({} batches of {} datapoints) to {}"
.format(inst_index + 1, self.steps_per_epoch, self.training_opts.batch_size, masked_corpus['file']))
return
| 20,023 | 45.675991 | 237 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/models/tf_bert/model.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The main BERT model and related functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import json
import math
import re
import numpy as np
import six
from deeplearning.benchpress.util.tf import tf
class BertConfig(object):
"""Configuration for `BertModel`."""
def __init__(self,
vocab_size,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
):
"""Constructs BertConfig.
Args:
vocab_size: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler.
hidden_dropout_prob: The dropout probability for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The stdev of the truncated_normal_initializer for
initializing all weight matrices.
layer_norm_eps: The epsilon used by the layer normalization layers.
"""
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size=None)
for (key, value) in six.iteritems(json_object):
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with tf.gfile.GFile(json_file, "r") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class BertModel(object):
"""BERT model ("Bidirectional Encoder Representations from Transformers").
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = tf.constant([[31, 51, 99], [15, 5, 0]])
input_mask = tf.constant([[1, 1, 1], [1, 1, 0]])
token_type_ids = tf.constant([[0, 0, 1], [0, 2, 0]])
config = modeling.BertConfig(vocab_size=32000, hidden_size=512,
num_hidden_layers=8, num_attention_heads=6, intermediate_size=1024)
model = modeling.BertModel(config=config, is_training=True,
input_ids=input_ids, input_mask=input_mask, token_type_ids=token_type_ids)
label_embeddings = tf.compat.v1.get_variable(...)
pooled_output = model.get_pooled_output()
logits = tf.matmul(pooled_output, label_embeddings)
...
```
"""
def __init__(self,
config,
is_training,
input_ids,
input_mask=None,
token_type_ids=None,
use_one_hot_embeddings=False,
scope=None):
"""Constructor for BertModel.
Args:
config: `BertConfig` instance.
is_training: bool. true for training model, false for eval model. Controls
whether dropout will be applied.
input_ids: int32 Tensor of shape [batch_size, seq_length].
input_mask: (optional) int32 Tensor of shape [batch_size, seq_length].
token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].
use_one_hot_embeddings: (optional) bool. Whether to use one-hot word
embeddings or tf.embedding_lookup() for the word embeddings.
scope: (optional) variable scope. Defaults to "bert".
Raises:
ValueError: The config is invalid or one of the input tensor shapes
is invalid.
"""
config = copy.deepcopy(config)
if not is_training:
config.hidden_dropout_prob = 0.0
config.attention_probs_dropout_prob = 0.0
input_shape = get_shape_list(input_ids, expected_rank=2)
batch_size = input_shape[0]
seq_length = input_shape[1]
if input_mask is None:
input_mask = tf.ones(shape=[batch_size, seq_length], dtype=tf.int32)
if token_type_ids is None:
token_type_ids = tf.zeros(shape=[batch_size, seq_length], dtype=tf.int32)
with tf.compat.v1.variable_scope(scope, default_name="bert"):
with tf.compat.v1.variable_scope("embeddings"):
# Perform embedding lookup on the word ids.
(self.embedding_output, self.embedding_table) = embedding_lookup(
input_ids=input_ids,
vocab_size=config.vocab_size,
embedding_size=config.hidden_size,
initializer_range=config.initializer_range,
word_embedding_name="word_embeddings",
use_one_hot_embeddings=use_one_hot_embeddings)
# Add positional embeddings and token type embeddings, then layer
# normalize and perform dropout.
self.embedding_output = embedding_postprocessor(
input_tensor=self.embedding_output,
use_token_type=True,
token_type_ids=token_type_ids,
token_type_vocab_size=config.type_vocab_size,
token_type_embedding_name="token_type_embeddings",
use_position_embeddings=True,
position_embedding_name="position_embeddings",
initializer_range=config.initializer_range,
max_position_embeddings=config.max_position_embeddings,
dropout_prob=config.hidden_dropout_prob,
layer_norm_eps=config.layer_norm_eps,
)
with tf.compat.v1.variable_scope("encoder"):
# This converts a 2D mask of shape [batch_size, seq_length] to a 3D
# mask of shape [batch_size, seq_length, seq_length] which is used
# for the attention scores.
attention_mask = create_attention_mask_from_input_mask(
input_ids, input_mask)
# Run the stacked transformer.
# `sequence_output` shape = [batch_size, seq_length, hidden_size].
self.all_encoder_layers = transformer_model(
input_tensor=self.embedding_output,
attention_mask=attention_mask,
hidden_size=config.hidden_size,
num_hidden_layers=config.num_hidden_layers,
num_attention_heads=config.num_attention_heads,
intermediate_size=config.intermediate_size,
intermediate_act_fn=get_activation(config.hidden_act),
hidden_dropout_prob=config.hidden_dropout_prob,
attention_probs_dropout_prob=config.attention_probs_dropout_prob,
initializer_range=config.initializer_range,
layer_norm_eps=config.layer_norm_eps,
do_return_all_layers=True)
self.sequence_output = self.all_encoder_layers[-1]
# The "pooler" converts the encoded sequence tensor of shape
# [batch_size, seq_length, hidden_size] to a tensor of shape
# [batch_size, hidden_size]. This is necessary for segment-level
# (or segment-pair-level) classification tasks where we need a fixed
# dimensional representation of the segment.
with tf.compat.v1.variable_scope("pooler"):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token. We assume that this has been pre-trained
first_token_tensor = tf.squeeze(self.sequence_output[:, 0:1, :], axis=1)
self.pooled_output = tf.compat.v1.layers.dense(
first_token_tensor,
config.hidden_size,
activation=tf.tanh,
kernel_initializer=create_initializer(config.initializer_range))
def get_pooled_output(self):
return self.pooled_output
def get_sequence_output(self):
"""Gets final hidden layer of encoder.
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size] corresponding
to the final hidden of the transformer encoder.
"""
return self.sequence_output
def get_all_encoder_layers(self):
return self.all_encoder_layers
def get_embedding_output(self):
"""Gets output of the embedding lookup (i.e., input to the transformer).
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size] corresponding
to the output of the embedding layer, after summing the word
embeddings with the positional embeddings and the token type embeddings,
then performing layer normalization. This is the input to the transformer.
"""
return self.embedding_output
def get_embedding_table(self):
return self.embedding_table
def _get_masked_lm_output(bert_config,
input_tensor,
output_weights,
positions,
label_ids,
label_weights
):
"""Get loss and log probs for the masked LM."""
input_tensor = _gather_indexes(input_tensor, positions)
with tf.compat.v1.variable_scope("cls/predictions"):
# We apply one more non-linear transformation before the output layer.
# This matrix is not used after pre-training.
with tf.compat.v1.variable_scope("transform"):
input_tensor = tf.compat.v1.layers.dense(
input_tensor,
units=bert_config.hidden_size,
activation=get_activation(bert_config.hidden_act),
kernel_initializer=create_initializer(
bert_config.initializer_range))
input_tensor = layer_norm(input_tensor, eps = bert_config.layer_norm_eps)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
output_bias = tf.compat.v1.get_variable(
"output_bias",
shape=[bert_config.vocab_size],
initializer=tf.zeros_initializer())
logits = tf.matmul(input_tensor, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
log_probs = tf.nn.log_softmax(logits, axis=-1)
label_ids = tf.reshape(label_ids, [-1])
label_weights = tf.reshape(label_weights, [-1])
one_hot_labels = tf.one_hot(
label_ids, depth=bert_config.vocab_size, dtype=tf.float32)
# The `positions` tensor might be zero-padded (if the sequence is too
# short to have the maximum number of predictions). The `label_weights`
# tensor has a value of 1.0 for every real prediction and 0.0 for the
# padding predictions.
per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1])
numerator = tf.reduce_sum(label_weights * per_example_loss)
denominator = tf.reduce_sum(label_weights) + 1e-5
loss = numerator / denominator
return (loss, per_example_loss, log_probs)
def _get_next_sentence_output(bert_config,
input_tensor,
labels
):
"""Get loss and log probs for the next sentence prediction."""
# Simple binary classification. Note that 0 is "next sentence" and 1 is
# "random sentence". This weight matrix is not used after pre-training.
with tf.compat.v1.variable_scope("cls/seq_relationship"):
output_weights = tf.compat.v1.get_variable(
"output_weights",
shape=[2, bert_config.hidden_size],
initializer=create_initializer(bert_config.initializer_range))
output_bias = tf.compat.v1.get_variable(
"output_bias", shape=[2], initializer=tf.zeros_initializer())
logits = tf.matmul(input_tensor, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
log_probs = tf.nn.log_softmax(logits, axis=-1)
labels = tf.reshape(labels, [-1])
one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
return (loss, per_example_loss, log_probs)
def _gather_indexes(sequence_tensor, positions):
"""Gathers the vectors at the specific positions over a minibatch."""
sequence_shape = get_shape_list(sequence_tensor, expected_rank=3)
batch_size = sequence_shape[0]
seq_length = sequence_shape[1]
width = sequence_shape[2]
flat_offsets = tf.reshape(
tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1])
flat_positions = tf.reshape(positions + flat_offsets, [-1])
flat_sequence_tensor = tf.reshape(sequence_tensor,
[batch_size * seq_length, width])
output_tensor = tf.gather(flat_sequence_tensor, flat_positions)
return output_tensor
def gelu(x):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
x: float Tensor to perform activation.
Returns:
`x` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.tanh(
(np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
def get_activation(activation_string):
"""Maps a string to a Python function, e.g., "relu" => `tf.nn.relu`.
Args:
activation_string: String name of the activation function.
Returns:
A Python function corresponding to the activation function. If
`activation_string` is None, empty, or "linear", this will return None.
If `activation_string` is not a string, it will return `activation_string`.
Raises:
ValueError: The `activation_string` does not correspond to a known
activation.
"""
# We assume that anything that"s not a string is already an activation
# function, so we just return it.
if not isinstance(activation_string, six.string_types):
return activation_string
if not activation_string:
return None
act = activation_string.lower()
if act == "linear":
return None
elif act == "relu":
return tf.nn.relu
elif act == "gelu":
return gelu
elif act == "tanh":
return tf.tanh
else:
raise ValueError("Unsupported activation: %s" % act)
def get_assignment_map_from_checkpoint(tvars, init_checkpoint):
"""Compute the union of the current variables and checkpoint variables."""
assignment_map = {}
initialized_variable_names = {}
name_to_variable = collections.OrderedDict()
for var in tvars:
name = var.name
m = re.match("^(.*):\\d+$", name)
if m is not None:
name = m.group(1)
name_to_variable[name] = var
init_vars = tf.train.list_variables(init_checkpoint)
assignment_map = collections.OrderedDict()
for x in init_vars:
(name, var) = (x[0], x[1])
if name not in name_to_variable:
continue
assignment_map[name] = name_to_variable[name]
initialized_variable_names[name] = 1
initialized_variable_names[name + ":0"] = 1
return (assignment_map, initialized_variable_names)
def dropout(input_tensor, dropout_prob):
"""Perform dropout.
Args:
input_tensor: float Tensor.
dropout_prob: Python float. The probability of dropping out a value (NOT of
*keeping* a dimension as in `tf.nn.dropout`).
Returns:
A version of `input_tensor` with dropout applied.
"""
if dropout_prob is None or dropout_prob == 0.0:
return input_tensor
output = tf.nn.dropout(input_tensor, 1.0 - dropout_prob)
return output
def layer_norm(input_tensor, eps = 1e-12, name=None):
"""Run layer normalization on the last dimension of the tensor."""
return tf.keras.layers.LayerNormalization(
axis = -1, epsilon=eps, dtype=tf.float32, name = name
)(input_tensor)
def layer_norm_and_dropout(input_tensor, dropout_prob, eps = 1e-12, name=None):
"""Runs layer normalization followed by dropout."""
output_tensor = layer_norm(input_tensor, eps = eps, name = name)
output_tensor = dropout(output_tensor, dropout_prob)
return output_tensor
def create_initializer(initializer_range=0.02):
"""Creates a `truncated_normal_initializer` with the given range."""
return tf.compat.v1.truncated_normal_initializer(stddev=initializer_range)
def embedding_lookup(input_ids,
vocab_size,
embedding_size=128,
initializer_range=0.02,
word_embedding_name="word_embeddings",
use_one_hot_embeddings=False):
"""Looks up words embeddings for id tensor.
Args:
input_ids: int32 Tensor of shape [batch_size, seq_length] containing word
ids.
vocab_size: int. Size of the embedding vocabulary.
embedding_size: int. Width of the word embeddings.
initializer_range: float. Embedding initialization range.
word_embedding_name: string. Name of the embedding table.
use_one_hot_embeddings: bool. If True, use one-hot method for word
embeddings. If False, use `tf.gather()`.
Returns:
float Tensor of shape [batch_size, seq_length, embedding_size].
"""
# This function assumes that the input is of shape [batch_size, seq_length,
# num_inputs].
#
# If the input is a 2D tensor of shape [batch_size, seq_length], we
# reshape to [batch_size, seq_length, 1].
if input_ids.shape.ndims == 2:
input_ids = tf.expand_dims(input_ids, axis=[-1])
embedding_table = tf.compat.v1.get_variable(
name=word_embedding_name,
shape=[vocab_size, embedding_size],
initializer=create_initializer(initializer_range))
flat_input_ids = tf.reshape(input_ids, [-1])
if use_one_hot_embeddings:
one_hot_input_ids = tf.one_hot(flat_input_ids, depth=vocab_size)
output = tf.matmul(one_hot_input_ids, embedding_table)
else:
output = tf.gather(embedding_table, flat_input_ids)
input_shape = get_shape_list(input_ids)
output = tf.reshape(output,
input_shape[0:-1] + [input_shape[-1] * embedding_size])
return (output, embedding_table)
def embedding_postprocessor(input_tensor,
use_token_type=False,
token_type_ids=None,
token_type_vocab_size=16,
token_type_embedding_name="token_type_embeddings",
use_position_embeddings=True,
position_embedding_name="position_embeddings",
initializer_range=0.02,
max_position_embeddings=512,
dropout_prob=0.1,
layer_norm_eps=1e-12,
):
"""Performs various post-processing on a word embedding tensor.
Args:
input_tensor: float Tensor of shape [batch_size, seq_length,
embedding_size].
use_token_type: bool. Whether to add embeddings for `token_type_ids`.
token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].
Must be specified if `use_token_type` is True.
token_type_vocab_size: int. The vocabulary size of `token_type_ids`.
token_type_embedding_name: string. The name of the embedding table variable
for token type ids.
use_position_embeddings: bool. Whether to add position embeddings for the
position of each token in the sequence.
position_embedding_name: string. The name of the embedding table variable
for positional embeddings.
initializer_range: float. Range of the weight initialization.
max_position_embeddings: int. Maximum sequence length that might ever be
used with this model. This can be longer than the sequence length of
input_tensor, but cannot be shorter.
dropout_prob: float. Dropout probability applied to the final output tensor.
Returns:
float tensor with same shape as `input_tensor`.
Raises:
ValueError: One of the tensor shapes or input values is invalid.
"""
input_shape = get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
width = input_shape[2]
output = input_tensor
if use_token_type:
if token_type_ids is None:
raise ValueError("`token_type_ids` must be specified if"
"`use_token_type` is True.")
token_type_table = tf.compat.v1.get_variable(
name=token_type_embedding_name,
shape=[token_type_vocab_size, width],
initializer=create_initializer(initializer_range))
# This vocab will be small so we always do one-hot here, since it is always
# faster for a small vocabulary.
flat_token_type_ids = tf.reshape(token_type_ids, [-1])
one_hot_ids = tf.one_hot(flat_token_type_ids, depth=token_type_vocab_size)
token_type_embeddings = tf.matmul(one_hot_ids, token_type_table)
token_type_embeddings = tf.reshape(token_type_embeddings,
[batch_size, seq_length, width])
output += token_type_embeddings
if use_position_embeddings:
assert_op = tf.debugging.assert_less_equal(seq_length, max_position_embeddings)
with tf.control_dependencies([assert_op]):
full_position_embeddings = tf.compat.v1.get_variable(
name=position_embedding_name,
shape=[max_position_embeddings, width],
initializer=create_initializer(initializer_range))
# Since the position embedding table is a learned variable, we create it
# using a (long) sequence length `max_position_embeddings`. The actual
# sequence length might be shorter than this, for faster training of
# tasks that do not have long sequences.
#
# So `full_position_embeddings` is effectively an embedding table
# for position [0, 1, 2, ..., max_position_embeddings-1], and the current
# sequence has positions [0, 1, 2, ... seq_length-1], so we can just
# perform a slice.
position_embeddings = tf.slice(full_position_embeddings, [0, 0],
[seq_length, -1])
num_dims = len(output.shape.as_list())
# Only the last two dimensions are relevant (`seq_length` and `width`), so
# we broadcast among the first dimensions, which is typically just
# the batch size.
position_broadcast_shape = []
for _ in range(num_dims - 2):
position_broadcast_shape.append(1)
position_broadcast_shape.extend([seq_length, width])
position_embeddings = tf.reshape(position_embeddings,
position_broadcast_shape)
output += position_embeddings
output = layer_norm_and_dropout(output, dropout_prob, layer_norm_eps)
return output
def create_attention_mask_from_input_mask(from_tensor, to_mask):
"""Create 3D attention mask from a 2D tensor mask.
Args:
from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...].
to_mask: int32 Tensor of shape [batch_size, to_seq_length].
Returns:
float Tensor of shape [batch_size, from_seq_length, to_seq_length].
"""
from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_shape = get_shape_list(to_mask, expected_rank=2)
to_seq_length = to_shape[1]
to_mask = tf.cast(
tf.reshape(to_mask, [batch_size, 1, to_seq_length]), tf.float32)
# We don't assume that `from_tensor` is a mask (although it could be). We
# don't actually care if we attend *from* padding tokens (only *to* padding)
# tokens so we create a tensor of all ones.
#
# `broadcast_ones` = [batch_size, from_seq_length, 1]
broadcast_ones = tf.ones(
shape=[batch_size, from_seq_length, 1], dtype=tf.float32)
# Here we broadcast along two dimensions to create the mask.
mask = broadcast_ones * to_mask
return mask
def attention_layer(from_tensor,
to_tensor,
attention_mask=None,
num_attention_heads=1,
size_per_head=512,
query_act=None,
key_act=None,
value_act=None,
attention_probs_dropout_prob=0.0,
initializer_range=0.02,
do_return_2d_tensor=False,
batch_size=None,
from_seq_length=None,
to_seq_length=None):
"""Performs multi-headed attention from `from_tensor` to `to_tensor`.
This is an implementation of multi-headed attention based on "Attention
is all you Need". If `from_tensor` and `to_tensor` are the same, then
this is self-attention. Each timestep in `from_tensor` attends to the
corresponding sequence in `to_tensor`, and returns a fixed-with vector.
This function first projects `from_tensor` into a "query" tensor and
`to_tensor` into "key" and "value" tensors. These are (effectively) a list
of tensors of length `num_attention_heads`, where each tensor is of shape
[batch_size, seq_length, size_per_head].
Then, the query and key tensors are dot-producted and scaled. These are
softmaxed to obtain attention probabilities. The value tensors are then
interpolated by these probabilities, then concatenated back to a single
tensor and returned.
In practice, the multi-headed attention are done with transposes and
reshapes rather than actual separate tensors.
Args:
from_tensor: float Tensor of shape [batch_size, from_seq_length,
from_width].
to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width].
attention_mask: (optional) int32 Tensor of shape [batch_size,
from_seq_length, to_seq_length]. The values should be 1 or 0. The
attention scores will effectively be set to -infinity for any positions in
the mask that are 0, and will be unchanged for positions that are 1.
num_attention_heads: int. Number of attention heads.
size_per_head: int. Size of each attention head.
query_act: (optional) Activation function for the query transform.
key_act: (optional) Activation function for the key transform.
value_act: (optional) Activation function for the value transform.
attention_probs_dropout_prob: (optional) float. Dropout probability of the
attention probabilities.
initializer_range: float. Range of the weight initializer.
do_return_2d_tensor: bool. If True, the output will be of shape [batch_size
* from_seq_length, num_attention_heads * size_per_head]. If False, the
output will be of shape [batch_size, from_seq_length, num_attention_heads
* size_per_head].
batch_size: (Optional) int. If the input is 2D, this might be the batch size
of the 3D version of the `from_tensor` and `to_tensor`.
from_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `from_tensor`.
to_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `to_tensor`.
Returns:
float Tensor of shape [batch_size, from_seq_length,
num_attention_heads * size_per_head]. (If `do_return_2d_tensor` is
true, this will be of shape [batch_size * from_seq_length,
num_attention_heads * size_per_head]).
Raises:
ValueError: Any of the arguments or tensor shapes are invalid.
"""
def transpose_for_scores(input_tensor, batch_size, num_attention_heads,
seq_length, width):
output_tensor = tf.reshape(
input_tensor, [batch_size, seq_length, num_attention_heads, width])
output_tensor = tf.transpose(output_tensor, [0, 2, 1, 3])
return output_tensor
from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
to_shape = get_shape_list(to_tensor, expected_rank=[2, 3])
if len(from_shape) != len(to_shape):
raise ValueError(
"The rank of `from_tensor` must match the rank of `to_tensor`.")
if len(from_shape) == 3:
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_seq_length = to_shape[1]
elif len(from_shape) == 2:
if (batch_size is None or from_seq_length is None or to_seq_length is None):
raise ValueError(
"When passing in rank 2 tensors to attention_layer, the values "
"for `batch_size`, `from_seq_length`, and `to_seq_length` "
"must all be specified.")
# Scalar dimensions referenced here:
# B = batch size (number of sequences)
# F = `from_tensor` sequence length
# T = `to_tensor` sequence length
# N = `num_attention_heads`
# H = `size_per_head`
from_tensor_2d = reshape_to_matrix(from_tensor)
to_tensor_2d = reshape_to_matrix(to_tensor)
# `query_layer` = [B*F, N*H]
query_layer = tf.compat.v1.layers.dense(
from_tensor_2d,
num_attention_heads * size_per_head,
activation=query_act,
name="query",
kernel_initializer=create_initializer(initializer_range))
# `key_layer` = [B*T, N*H]
key_layer = tf.compat.v1.layers.dense(
to_tensor_2d,
num_attention_heads * size_per_head,
activation=key_act,
name="key",
kernel_initializer=create_initializer(initializer_range))
# `value_layer` = [B*T, N*H]
value_layer = tf.compat.v1.layers.dense(
to_tensor_2d,
num_attention_heads * size_per_head,
activation=value_act,
name="value",
kernel_initializer=create_initializer(initializer_range))
# `query_layer` = [B, N, F, H]
query_layer = transpose_for_scores(query_layer, batch_size,
num_attention_heads, from_seq_length,
size_per_head)
# `key_layer` = [B, N, T, H]
key_layer = transpose_for_scores(key_layer, batch_size, num_attention_heads,
to_seq_length, size_per_head)
# Take the dot product between "query" and "key" to get the raw
# attention scores.
# `attention_scores` = [B, N, F, T]
attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
attention_scores = tf.multiply(attention_scores,
1.0 / math.sqrt(float(size_per_head)))
if attention_mask is not None:
# `attention_mask` = [B, 1, F, T]
attention_mask = tf.expand_dims(attention_mask, axis=[1])
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
adder = (1.0 - tf.cast(attention_mask, tf.float32)) * -10000.0
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_scores += adder
# Normalize the attention scores to probabilities.
# `attention_probs` = [B, N, F, T]
attention_probs = tf.nn.softmax(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = dropout(attention_probs, attention_probs_dropout_prob)
# `value_layer` = [B, T, N, H]
value_layer = tf.reshape(
value_layer,
[batch_size, to_seq_length, num_attention_heads, size_per_head])
# `value_layer` = [B, N, T, H]
value_layer = tf.transpose(value_layer, [0, 2, 1, 3])
# `context_layer` = [B, N, F, H]
context_layer = tf.matmul(attention_probs, value_layer)
# `context_layer` = [B, F, N, H]
context_layer = tf.transpose(context_layer, [0, 2, 1, 3])
if do_return_2d_tensor:
# `context_layer` = [B*F, N*H]
context_layer = tf.reshape(
context_layer,
[batch_size * from_seq_length, num_attention_heads * size_per_head])
else:
# `context_layer` = [B, F, N*H]
context_layer = tf.reshape(
context_layer,
[batch_size, from_seq_length, num_attention_heads * size_per_head])
return context_layer
def transformer_model(input_tensor,
attention_mask=None,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
intermediate_act_fn=gelu,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
initializer_range=0.02,
layer_norm_eps=1e-12,
do_return_all_layers=False):
"""Multi-headed, multi-layer Transformer from "Attention is All You Need".
This is almost an exact implementation of the original Transformer encoder.
See the original paper:
https://arxiv.org/abs/1706.03762
Also see:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/transformer.py
Args:
input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size].
attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length,
seq_length], with 1 for positions that can be attended to and 0 in
positions that should not be.
hidden_size: int. Hidden size of the Transformer.
num_hidden_layers: int. Number of layers (blocks) in the Transformer.
num_attention_heads: int. Number of attention heads in the Transformer.
intermediate_size: int. The size of the "intermediate" (a.k.a., feed
forward) layer.
intermediate_act_fn: function. The non-linear activation function to apply
to the output of the intermediate/feed-forward layer.
hidden_dropout_prob: float. Dropout probability for the hidden layers.
attention_probs_dropout_prob: float. Dropout probability of the attention
probabilities.
initializer_range: float. Range of the initializer (stddev of truncated
normal).
do_return_all_layers: Whether to also return all layers or just the final
layer.
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size], the final
hidden layer of the Transformer.
Raises:
ValueError: A Tensor shape or parameter is invalid.
"""
if hidden_size % num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (hidden_size, num_attention_heads))
attention_head_size = int(hidden_size / num_attention_heads)
input_shape = get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
input_width = input_shape[2]
# The Transformer performs sum residuals on all layers so the input needs
# to be the same as the hidden size.
if input_width != hidden_size:
raise ValueError("The width of the input tensor (%d) != hidden size (%d)" %
(input_width, hidden_size))
# We keep the representation as a 2D tensor to avoid re-shaping it back and
# forth from a 3D tensor to a 2D tensor. Re-shapes are normally free on
# the GPU/CPU but may not be free on the TPU, so we want to minimize them to
# help the optimizer.
prev_output = reshape_to_matrix(input_tensor)
all_layer_outputs = []
for layer_idx in range(num_hidden_layers):
with tf.compat.v1.variable_scope("layer_%d" % layer_idx):
layer_input = prev_output
with tf.compat.v1.variable_scope("attention"):
attention_heads = []
with tf.compat.v1.variable_scope("self"):
attention_head = attention_layer(
from_tensor=layer_input,
to_tensor=layer_input,
attention_mask=attention_mask,
num_attention_heads=num_attention_heads,
size_per_head=attention_head_size,
attention_probs_dropout_prob=attention_probs_dropout_prob,
initializer_range=initializer_range,
do_return_2d_tensor=True,
batch_size=batch_size,
from_seq_length=seq_length,
to_seq_length=seq_length)
attention_heads.append(attention_head)
attention_output = None
if len(attention_heads) == 1:
attention_output = attention_heads[0]
else:
# In the case where we have other sequences, we just concatenate
# them to the self-attention head before the projection.
attention_output = tf.concat(attention_heads, axis=-1)
# Run a linear projection of `hidden_size` then add a residual
# with `layer_input`.
with tf.compat.v1.variable_scope("output"):
attention_output = tf.compat.v1.layers.dense(
attention_output,
hidden_size,
kernel_initializer=create_initializer(initializer_range))
attention_output = dropout(attention_output, hidden_dropout_prob)
attention_output = layer_norm(attention_output + layer_input, eps = layer_norm_eps)
# The activation is only applied to the "intermediate" hidden layer.
with tf.compat.v1.variable_scope("intermediate"):
intermediate_output = tf.compat.v1.layers.dense(
attention_output,
intermediate_size,
activation=intermediate_act_fn,
kernel_initializer=create_initializer(initializer_range))
# Down-project back to `hidden_size` then add the residual.
with tf.compat.v1.variable_scope("output"):
layer_output = tf.compat.v1.layers.dense(
intermediate_output,
hidden_size,
kernel_initializer=create_initializer(initializer_range))
layer_output = dropout(layer_output, hidden_dropout_prob)
layer_output = layer_norm(layer_output + attention_output, eps = layer_norm_eps)
prev_output = layer_output
all_layer_outputs.append(layer_output)
if do_return_all_layers:
final_outputs = []
for layer_output in all_layer_outputs:
final_output = reshape_from_matrix(layer_output, input_shape)
final_outputs.append(final_output)
return final_outputs
else:
final_output = reshape_from_matrix(prev_output, input_shape)
return final_output
def get_shape_list(tensor, expected_rank=None, name=None):
"""Returns a list of the shape of tensor, preferring static dimensions.
Args:
tensor: A tf.Tensor object to find the shape of.
expected_rank: (optional) int. The expected rank of `tensor`. If this is
specified and the `tensor` has a different rank, and exception will be
thrown.
name: Optional name of the tensor for the error message.
Returns:
A list of dimensions of the shape of tensor. All static dimensions will
be returned as python integers, and dynamic dimensions will be returned
as tf.Tensor scalars.
"""
if name is None:
name = tensor.name
if expected_rank is not None:
assert_rank(tensor, expected_rank, name)
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
def reshape_to_matrix(input_tensor):
"""Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix)."""
ndims = input_tensor.shape.ndims
if ndims < 2:
raise ValueError("Input tensor must have at least rank 2. Shape = %s" %
(input_tensor.shape))
if ndims == 2:
return input_tensor
width = input_tensor.shape[-1]
output_tensor = tf.reshape(input_tensor, [-1, width])
return output_tensor
def reshape_from_matrix(output_tensor, orig_shape_list):
"""Reshapes a rank 2 tensor back to its original rank >= 2 tensor."""
if len(orig_shape_list) == 2:
return output_tensor
output_shape = get_shape_list(output_tensor)
orig_dims = orig_shape_list[0:-1]
width = output_shape[-1]
return tf.reshape(output_tensor, orig_dims + [width])
def assert_rank(tensor, expected_rank, name=None):
"""Raises an exception if the tensor rank is not of the expected rank.
Args:
tensor: A tf.Tensor to check the rank of.
expected_rank: Python integer or list of integers, expected rank.
name: Optional name of the tensor for the error message.
Raises:
ValueError: If the expected shape doesn't match the actual shape.
"""
if name is None:
name = tensor.name
expected_rank_dict = {}
if isinstance(expected_rank, six.integer_types):
expected_rank_dict[expected_rank] = True
else:
for x in expected_rank:
expected_rank_dict[x] = True
actual_rank = tensor.shape.ndims
if actual_rank not in expected_rank_dict:
scope_name = tf.compat.v1.get_variable_scope().name
raise ValueError("For the tensor `{}` in scope `{}`, the actual rank `{}` (shape = {}) is not equal to the expected rank `{}`"
.format(name, scope_name, actual_rank, str(tensor.shape), str(expected_rank)))
| 42,818 | 38.464516 | 130 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/models/tf_bert/tf_bert.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run masked LM/next sentence masked_lm pre-training for BERT."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import glob
import typing
import pathlib
import datetime
import tensorflow_probability as tfp
import numpy as np
from absl import flags
from deeplearning.benchpress.samplers import samplers
from deeplearning.benchpress.samplers import sample_observers
from deeplearning.benchpress.samplers import validation_database
from deeplearning.benchpress.util import pbutil
from deeplearning.benchpress.util import process
from deeplearning.benchpress.proto import model_pb2
from deeplearning.benchpress.proto import sampler_pb2
from deeplearning.benchpress.proto import internal_pb2
from deeplearning.benchpress.preprocessors import opencl
from deeplearning.benchpress.features import extractor
from deeplearning.benchpress.models import backends
from deeplearning.benchpress.models import telemetry
from deeplearning.benchpress.models import bert_flags
from deeplearning.benchpress.models.tf_bert import model
from deeplearning.benchpress.models.tf_bert import optimizer
from deeplearning.benchpress.models.tf_bert import hooks
from deeplearning.benchpress.models.tf_bert.data_generator import tfLMDataGenerator
from deeplearning.benchpress.util import logging as l
FLAGS = flags.FLAGS
flags.DEFINE_boolean("mirror_gpus", False, "Set True to distribute training across all system's GPUs. (Only usable when use_tpu is False).")
flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
class tfBert(backends.BackendBase):
class BertEstimator(typing.NamedTuple):
"""Named tuple to wrap BERT estimator pipeline."""
estimator : typing.Any # tf.compat.v1.estimator.tpu.TPUEstimator
data_generator : tfLMDataGenerator
def __init__(self, *args, **kwargs):
super(tfBert, self).__init__(*args, **kwargs)
from deeplearning.benchpress.util import tf
tf.initTensorflow()
self.tf = tf.tf
self.bertAttrs = None
self.bert_config = None
self.train = None
self.sample = None
self.predict_generator = None
self.sampler = None
self.train_batch_size = None
self.eval_batch_size = None
self.learning_rate = None
self.num_train_steps = None
self.num_warmup_steps = None
self.ckpt_path = self._ConfigCheckpointParams()
self.logfile_path = self.cache.path / "logs"
self.sample_path = self.cache.path / "samples"
self.telemetry = telemetry.TrainingLogger(self.logfile_path)
self.is_validated = False
l.logger().info("BERT Model config initialized in {}".format(self.cache.path))
return
def _ConfigCheckpointParams(self):
if FLAGS.select_checkpoint_step >= 0:
ckpt_current = self.cache.path / "checkpoints"
if not (ckpt_current / "model.ckpt-{}.index".format(FLAGS.select_checkpoint_step)).exists():
raise FileNotFoundError(ckpt_current / "model.ckpt-{}.index".format(FLAGS.select_checkpoint_step))
workspace_rel_path = self.cache.path.relative_to(pathlib.Path(os.environ.get("BENCHPRESS_CACHE")).parent)
ckpt_path = pathlib.Path("/tmp" / workspace_rel_path / "checkpoints")
ckpt_path.mkdir(exist_ok = True, parents = True)
shutil.copy2(ckpt_current / "checkpoint" , ckpt_path)
shutil.copy2(ckpt_current / "graph.pbtxt", ckpt_path)
for ckpt_file in glob.glob(str(ckpt_current / "model.ckpt-{}.*".format(FLAGS.select_checkpoint_step))):
shutil.copy2(ckpt_file, ckpt_path)
l.logger().warn("Explicit checkpoint selected. Explicit checkpoints can only be used for validation or sampling.")
elif FLAGS.select_checkpoint_step == -1:
ckpt_path = self.cache.path / "checkpoints"
else:
raise ValueError("Invalid value {} for --select_checkpoint_step".format(FLAGS.select_checkpoint_step))
l.logger().info("Configured model checkpoints in {}".format(ckpt_path))
return ckpt_path
def _ConfigModelParams(self):
self.bertAttrs = {
"vocab_size" : self.tokenizer.vocab_size,
"hidden_size" : self.config.architecture.hidden_size,
"num_hidden_layers" : self.config.architecture.num_hidden_layers,
"num_attention_heads" : self.config.architecture.num_attention_heads,
"intermediate_size" : self.config.architecture.intermediate_size,
"hidden_act" : self.config.architecture.hidden_act,
"hidden_dropout_prob" : 1.0 - self.config.architecture.hidden_dropout_prob,
"attention_probs_dropout_prob" : 1.0 - self.config.architecture.attention_probs_dropout_prob,
"max_position_embeddings" : self.config.architecture.max_position_embeddings,
"type_vocab_size" : self.config.architecture.type_vocab_size,
"initializer_range" : self.config.architecture.initializer_range,
"layer_norm_eps" : self.config.architecture.layer_norm_eps,
}
self.bert_config = model.BertConfig.from_dict(self.bertAttrs)
return
def _ConfigTrainParams(self,
data_generator: tfLMDataGenerator
) -> None:
"""
Model parameter initialization for training and validation.
"""
if self.bert_config is None:
self._ConfigModelParams()
self.train_batch_size = self.config.training.batch_size
self.eval_batch_size = self.config.training.batch_size
self.learning_rate = self.config.training.adam_optimizer.initial_learning_rate_micros / 1e6
self.num_warmup_steps = self.config.training.num_warmup_steps
self.steps_per_epoch = data_generator.steps_per_epoch
self.num_epochs = data_generator.num_epochs
self.num_train_steps = self.steps_per_epoch * self.num_epochs
self.max_eval_steps = FLAGS.max_eval_steps
self.validation_results_file = "val_results.txt"
self.validation_results_path = os.path.join(str(self.logfile_path), self.validation_results_file)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = self.tf.distribute.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone = FLAGS.tpu_zone, project = FLAGS.gcp_project)
train_distribute = self.tf.distribute.MirroredStrategy(num_gpus = gpu.numGPUs()) if FLAGS.use_tpu and FLAGS.mirror_gpus else None
is_per_host = self.tf.compat.v1.estimator.tpu.InputPipelineConfig.PER_HOST_V2
run_config = self.tf.compat.v1.estimator.tpu.RunConfig(
cluster = tpu_cluster_resolver,
master = FLAGS.master,
model_dir = str(self.ckpt_path),
save_checkpoints_steps = self.steps_per_epoch,
save_summary_steps = self.steps_per_epoch,
keep_checkpoint_max = 0,
log_step_count_steps = self.steps_per_epoch,
train_distribute = train_distribute,
tpu_config = self.tf.compat.v1.estimator.tpu.TPUConfig(
iterations_per_loop = self.steps_per_epoch,
num_shards = FLAGS.num_tpu_cores,
per_host_input_for_training = is_per_host)
)
model_fn = self._model_fn_builder(
bert_config = self.bert_config
)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
self.train = tfBert.BertEstimator(self.tf.compat.v1.estimator.tpu.TPUEstimator(
use_tpu = FLAGS.use_tpu,
model_fn = model_fn,
config = run_config,
params = None,
train_batch_size = self.train_batch_size,
eval_batch_size = self.eval_batch_size,
),
data_generator
)
l.logger().info(self.GetShortSummary())
return
def _ConfigSampleParams(self,
data_generator: tfLMDataGenerator,
sampler: samplers.Sampler,
) -> None:
"""
Model parameter initialization for inference.
"""
if self.bert_config is None:
self._ConfigModelParams()
self.sampler = sampler
if sampler.sequence_length > self.bertAttrs['max_position_embeddings']:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(sampler.sequence_length, self.bertAttrs['max_position_embeddings']))
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = self.tf.compat.v1.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone = FLAGS.tpu_zone, project = FLAGS.gcp_project)
is_per_host = self.tf.compat.v1.estimator.tpu.InputPipelineConfig.PER_HOST_V2
run_config = self.tf.compat.v1.estimator.tpu.RunConfig(
cluster = tpu_cluster_resolver,
master = FLAGS.master,
model_dir = str(self.ckpt_path),
tpu_config = self.tf.compat.v1.estimator.tpu.TPUConfig(
num_shards = FLAGS.num_tpu_cores,
per_host_input_for_training = is_per_host))
model_fn = self._model_fn_builder(bert_config = self.bert_config)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
self.sample = tfBert.BertEstimator(self.tf.compat.v1.estimator.tpu.TPUEstimator(
use_tpu = FLAGS.use_tpu,
model_fn = model_fn,
config = run_config,
params = {'sampling_temperature': sampler.temperature},
predict_batch_size = sampler.batch_size
),
data_generator
)
l.logger().info("Initialized model sampler in {}".format(self.sampler.cache.path))
return
@property
def is_trained(self):
if FLAGS.select_checkpoint_step >= 0:
return True
else:
for file_path in self.ckpt_path.iterdir():
filename = file_path.stem
if "model.ckpt-" in filename:
step_ckpt = int(filename.replace("model.ckpt-", ""))
if step_ckpt >= self.num_train_steps:
return True
return False
def samplesWithCategorical(self):
return FLAGS.categorical_sampling
def Train(self,
corpus,
test_sampler: typing.Optional[samplers.Sampler] = None,
**unused_kwargs
) -> None:
"""Training bootstrap function that isolates Train process space"""
del unused_kwargs
if self.train is None:
self._ConfigTrainParams(
tfLMDataGenerator.TrainMaskLMBatchGenerator(corpus, self.config.training, self.cache.path)
)
if not FLAGS.only_sample:
process.isolate(lambda: self._Train(corpus, test_sampler))
return
def _Train(self,
corpus,
test_sampler: typing.Optional[samplers.Sampler],
) -> None:
"""Core training function"""
if not self.is_trained:
train_input_fn = self.train.data_generator.generateTfDataset(
sequence_length = self.config.training.sequence_length,
num_cpu_threads = os.cpu_count(),
use_tpu = FLAGS.use_tpu,
is_training = True
)
l.logger().info("Splitting {} steps into {} equivalent epochs, {} steps each. Rejected {} redundant step(s)".format(
self.num_train_steps, self.num_epochs,
self.steps_per_epoch, self.config.training.num_train_steps - self.num_train_steps
)
)
try:
if FLAGS.sample_per_epoch == 0:
self.train.estimator.train(input_fn = train_input_fn, max_steps = self.num_train_steps)
else:
sampler, observers = self._getTestSampler(test_sampler, self.config.training.sequence_length)
self.InitSampling(sampler, self.config.training.random_seed)
for ep in range(self.num_epochs):
self.train.estimator.train(input_fn = train_input_fn, steps = self.steps_per_epoch)
for _ in range(FLAGS.sample_per_epoch):
start_time = datetime.datetime.utcnow()
self.InitSampleBatch()
sample_batch, sample_indices = self.SampleNextIndices()
end_time = datetime.datetime.utcnow()
for sample, sind in zip(sample_batch, sample_indices):
try:
stdout = opencl.Compile(self.tokenizer.ArrayToCode(sample))
compile_flag = 1
except ValueError:
compile_flag = 0
feature_vector = extractor.ExtractFeatures(self.tokenizer.ArrayToCode(sample))
sample_proto = model_pb2.Sample(
train_step = (ep + 1) * self.steps_per_epoch,
sample_feed = sampler.start_text,
text = self.tokenizer.tokensToString(sample, ignore_token = self.tokenizer.padToken).replace("\\n", "\n"),
encoded_text = ",".join([str(t) for t in sample]),
sample_indices = '\n'.join([self.tokenizer.tokensToString(mind).replace('\n', '\\n') for mind in sind]),
encoded_sample_indices = '\n'.join([','.join([str(x) for x in mind]) for mind in sind ]),
sample_time_ms = int(round(1000 * ((end_time - start_time) / sampler.batch_size).total_seconds())),
feature_vector = "\n".join(["{}:{}".format(k, v) for (k, v) in feature_vector.items()]),
num_tokens = len(sample),
compile_status = compile_flag,
categorical_sampling = self.samplesWithCategorical(),
date_added = datetime.datetime.utcnow().strftime("%m/%d/%Y, %H:%M:%S"),
)
for obs in observers:
obs.OnSample(sample_proto)
except KeyboardInterrupt:
pass
if not FLAGS.force_eval:
self.Validate()
if FLAGS.force_eval and not self.is_validated:
self.Validate()
# self.telemetry.TfRecordEpochs()
return
def Validate(self) -> None:
l.logger().info("BERT Validation")
if self.max_eval_steps <= 0:
return
for tf_set in self.train.data_generator.dataset:
tf_set_paths = self.train.data_generator.dataset[tf_set]['file']
l.logger().info("BERT Validation on {}".format(', '.join([pathlib.Path(x).stem for x in tf_set_paths])))
eval_input_fn = self.train.data_generator.generateTfDataset(
sequence_length = self.config.training.sequence_length,
num_cpu_threads = os.cpu_count(),
is_training = False,
eval_set = tf_set_paths
)
result = self.train.estimator.evaluate(input_fn=eval_input_fn, steps=self.max_eval_steps)
self._writeValidation(result, tf_set)
self.is_validated = True
return
def InitSampling(self,
sampler : samplers.Sampler,
seed : typing.Optional[int] = None
) -> None:
"""This is called only once. Performs basic initialization of sampling"""
data_generator = tfLMDataGenerator.SampleMaskLMBatchGenerator(
self.config.training, sampler, self.tokenizer, seed,
self.config.architecture.max_position_embeddings, self.cache.path
)
self._ConfigSampleParams(data_generator, sampler)
l.logger().info("Initialized model samples in {}".format(self.sample_path))
return
def InitSampleBatch(self, *unused_args, **unused_kwargs) -> None:
"""Batch-specific initialization. Called once when a new batch is going to be generated"""
del unused_args
del unused_kwargs
self.sample.data_generator.InitSampleBatch()
return
def SampleNextIndices(self, *unused_args, **unused_kwargs):
"""Called iteratively to build a single batch of samples, until termination criteria stops calling"""
del unused_kwargs
del unused_args
if self.sample is None:
raise ValueError("Bert sampler has not been initialized.")
predict_input_fn = self.sample.data_generator.generateTfSamples()
predict_generator = self.sample.estimator.predict(input_fn = predict_input_fn)
output_seq, done = None, False
for step in predict_generator:
output_seq, sampleIndices = self.sample.data_generator.updateSampleBatch(
step['input_ids'], step['masked_lm_predictions']
)
return output_seq, sampleIndices
def _getTestSampler(self, test_sampler, sequence_length):
if test_sampler is None:
sampler_str = [
"start_text: \"kernel void A(const double g, const double i){\\n [HOLE] = [HOLE]\\n int a = g + [HOLE]\"",
"batch_size: 2",
"sequence_length: {}".format(sequence_length),
"temperature_micros: 800000",
]
mock_config = pbutil.FromString('\n'.join(sampler_str), sampler_pb2.Sampler())
sampler = samplers.Sampler(mock_config, sample_db_name = "epoch_samples.db")
else:
sampler = test_sampler
if sampler.isFixedStr:
sampler.Specialize(self.tokenizer)
observers = [sample_observers.PrintSampleObserver()]
if FLAGS.store_samples_db:
observers.append(sample_observers.SamplesDatabaseObserver(
self.sample_path / sampler.hash / sampler.sample_db_name
)
)
sampler.symlinkModelDB(
self.sample_path / sampler.hash,
self.hash
)
return sampler, observers
def GetShortSummary(self) -> str:
l.logger().debug("deeplearning.clgen.models.tf_bert.tfBert.GetShortSummary()")
return (
f"h_s: {self.config.architecture.hidden_size}, "
f"#h_l: {self.config.architecture.num_hidden_layers}, "
f"#att_h: {self.config.architecture.num_attention_heads}, "
f"imd_s: {self.config.architecture.intermediate_size}, "
f"h_act: {self.config.architecture.hidden_act}, "
f"{model_pb2.NetworkArchitecture.Backend.Name(self.config.architecture.backend)} "
"network"
"\n"
# self.data_generator.GetShortSummary() # TODO
)
def InferenceManifest(self) -> typing.List[pathlib.Path]:
"""Return the list of files which are required for model inference.
Returns:
A list of absolute paths.
"""
# The TensorFlow save file.
l.logger().debug("deeplearning.clgen.models.tf_bert.tfBert.InferenceManifest()")
paths = [ path.absolute() for path in (self.cache.path / "checkpoints").iterdir() ]
paths += [ path.absolute() for path in (self.cache.path / "logs").iterdir() ]
paths += [ path.absolute() for path in (self.cache.path / "samples").iterdir() ]
# paths += self.data_generator.InferenceManifest # TODO
return sorted(paths)
def _writeValidation(self, result, tf_set) -> None:
db = validation_database.ValidationDatabase("sqlite:///{}".format(str(self.logfile_path / "validation_samples.db")))
r = [ "{}: {}".format(key, str(result[key])) for key in result.keys() ]
with db.Session(commit = True) as session:
exists = session.query(validation_database.ValResults.key).filter_by(key = str(tf_set)).scalar() is not None
if exists:
entry = session.query(validation_database.ValResults).filter_by(key = str(tf_set)).first()
entry.results = "\n".join(r)
else:
session.add(validation_database.ValResults(key = str(tf_set), results = "\n".join(r)))
return
def GetTrainingHooks(self,
tensors: typing.Dict[str, typing.Any],
log_steps: int = None,
max_steps: int = None,
output_dir: pathlib.Path = None,
**kwargs
):# -> typing.List[typing.Any("tfBert.tf.estimator.SessionRunHook")]:
if log_steps is None:
log_steps = self.steps_per_epoch
if max_steps is None:
max_steps = self.num_train_steps
if output_dir is None:
output_dir = self.logfile_path
summary_tensors = ([ self.tf.compat.v1.summary.scalar(name, value)
for name, value in kwargs.items()
],
[ value for (name, value) in kwargs.items()
])
return [
hooks.AverageSummarySaverHook(tensors = summary_tensors,
save_steps = min(FLAGS.monitor_frequency, log_steps),
output_dir = str(output_dir),
),
hooks.tfLogTensorHook(tensors = tensors,
log_steps = log_steps,
at_end = True,
),
hooks.tfPlotTensorHook(tensors = summary_tensors,
log_steps = min(FLAGS.monitor_frequency, log_steps),
output_dir = output_dir,
),
hooks.tfProgressBar(max_length = max_steps),
]
def GetValidationHooks(self,
max_steps = None,
**kwargs
):# -> typing.List[self.tf.estimator.SessionRunHook]:
if max_steps is None:
max_steps = self.max_eval_steps
return [
hooks.tfProgressBar(max_length = max_steps, mode = self.tf.compat.v1.estimator.ModeKeys.EVAL),
hooks.writeValidationDB(**kwargs)
]
def _model_fn_builder(self,
bert_config,
):
"""Returns `model_fn` closure for TPUEstimator."""
def _model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
seen_in_training = features["seen_in_training"]
original_input = features["original_input"]
input_ids = features["input_ids"]
input_mask = features["input_mask"]
# segment_ids = features["segment_ids"]
masked_lm_positions = features["masked_lm_positions"]
masked_lm_ids = features["masked_lm_ids"]
masked_lm_weights = features["masked_lm_weights"]
masked_lm_lengths = features["masked_lm_lengths"]
next_sentence_labels = features["next_sentence_labels"]
is_training = (mode == self.tf.compat.v1.estimator.ModeKeys.TRAIN)
bert_model = model.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=None, # You can ignore. Used for double sentences (sA -> 0, sB ->1). Now all will be zero
use_one_hot_embeddings=FLAGS.use_tpu)
(masked_lm_loss,
masked_lm_example_loss, masked_lm_log_probs) = model._get_masked_lm_output(
bert_config, bert_model.get_sequence_output(), bert_model.get_embedding_table(),
masked_lm_positions, masked_lm_ids, masked_lm_weights)
(next_sentence_loss, next_sentence_example_loss,
next_sentence_log_probs) = model._get_next_sentence_output(
bert_config, bert_model.get_pooled_output(), next_sentence_labels)
total_loss = masked_lm_loss + next_sentence_loss
tvars = self.tf.compat.v1.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if (self.ckpt_path / "checkpoint").exists():
(assignment_map, initialized_variable_names
) = model.get_assignment_map_from_checkpoint(tvars, str(self.ckpt_path))
if FLAGS.use_tpu:
def _tpu_scaffold():
self.tf.compat.v1.train.init_from_checkpoint(str(self.ckpt_path), assignment_map)
return self.tf.train.Scaffold()
scaffold_fn = _tpu_scaffold
else:
if mode != self.tf.compat.v1.estimator.ModeKeys.PREDICT:
l.logger().info("Loading model checkpoint from: {}".format(str(self.ckpt_path)))
self.tf.compat.v1.train.init_from_checkpoint(str(self.ckpt_path), assignment_map)
output_spec = None
if mode == self.tf.compat.v1.estimator.ModeKeys.TRAIN:
with self.tf.compat.v1.variable_scope("training"):
train_op, learning_rate = optimizer.create_optimizer(
total_loss, self.learning_rate, self.num_train_steps, self.num_warmup_steps, FLAGS.use_tpu)
training_hooks = self.GetTrainingHooks(tensors = {'Loss': total_loss},
masked_lm_loss = masked_lm_loss,
next_sentence_loss = next_sentence_loss,
total_loss = total_loss,
learning_rate = learning_rate,
)
output_spec = self.tf.compat.v1.estimator.tpu.TPUEstimatorSpec(
mode = mode,
loss = total_loss,
train_op = train_op,
training_hooks = training_hooks,
scaffold_fn = scaffold_fn)
elif mode == self.tf.compat.v1.estimator.ModeKeys.EVAL:
with self.tf.compat.v1.variable_scope("evaluation"):
def _metric_fn(masked_lm_example_loss, masked_lm_predictions, masked_lm_ids,
masked_lm_weights, next_sentence_example_loss,
next_sentence_predictions, next_sentence_labels):
"""Computes the loss and accuracy of the model."""
masked_lm_example_loss = self.tf.reshape(masked_lm_example_loss, [-1])
masked_lm_ids = self.tf.reshape(masked_lm_ids, [-1])
masked_lm_weights = self.tf.reshape(masked_lm_weights, [-1])
masked_lm_accuracy = self.tf.compat.v1.metrics.accuracy(
labels=masked_lm_ids,
predictions=masked_lm_predictions,
weights=masked_lm_weights,
name = "masked_lm_mean_loss")
masked_lm_mean_loss = self.tf.compat.v1.metrics.mean(
values=masked_lm_example_loss,
weights=masked_lm_weights,
name = "masked_lm_mean_loss")
next_sentence_labels = self.tf.reshape(next_sentence_labels, [-1])
next_sentence_accuracy = self.tf.compat.v1.metrics.accuracy(
labels=next_sentence_labels,
predictions=next_sentence_predictions,
name = "next_sentence_accuracy")
next_sentence_mean_loss = self.tf.compat.v1.metrics.mean(
values=next_sentence_example_loss,
name = "next_sentence_mean_loss")
return {
'masked_lm_accuracy' : masked_lm_accuracy,
'masked_lm_loss' : masked_lm_mean_loss,
'next_sentence_accuracy' : next_sentence_accuracy,
'next_sentence_loss' : next_sentence_mean_loss,
}
masked_lm_log_probs = self.tf.reshape(
masked_lm_log_probs, [-1, masked_lm_log_probs.shape[-1]]
)
masked_lm_predictions = self.tf.argmax(
masked_lm_log_probs, axis=-1, output_type=self.tf.int32,# name = "masked_lm_predictions"
)
next_sentence_log_probs = self.tf.reshape(
next_sentence_log_probs, [-1, next_sentence_log_probs.shape[-1]]
)
next_sentence_predictions = self.tf.argmax(
next_sentence_log_probs, axis=-1, output_type=self.tf.int32,# name = "next_sentence_predictions"
)
eval_metrics = (_metric_fn, [
masked_lm_example_loss, masked_lm_predictions, masked_lm_ids,
masked_lm_weights, next_sentence_example_loss,
next_sentence_predictions, next_sentence_labels
])
evaluation_hooks = self.GetValidationHooks(
mode = mode,
url = self.logfile_path / "validation_samples.db",
tokenizer = self.tokenizer,
seen_in_training = seen_in_training,
original_input = original_input,
input_ids = input_ids,
input_mask = input_mask,
masked_lm_positions = masked_lm_positions,
masked_lm_ids = masked_lm_ids,
masked_lm_weights = masked_lm_weights,
masked_lm_lengths = masked_lm_lengths,
next_sentence_labels = next_sentence_labels,
masked_lm_predictions = masked_lm_predictions,
next_sentence_predictions = next_sentence_predictions,
)
output_spec = self.tf.compat.v1.estimator.tpu.TPUEstimatorSpec(
mode = mode,
loss = total_loss,
evaluation_hooks = evaluation_hooks,
eval_metrics = eval_metrics,
scaffold_fn = scaffold_fn)
elif mode == self.tf.compat.v1.estimator.ModeKeys.PREDICT:
with self.tf.compat.v1.variable_scope("predict"):
mask_batch_size, mask_seq_length = model.get_shape_list(masked_lm_positions, expected_rank = 2)
next_batch_size, next_seq_length = model.get_shape_list(next_sentence_labels, expected_rank = 2)
masked_lm_log_probs = self.tf.reshape(masked_lm_log_probs, [-1, masked_lm_log_probs.shape[-1]])
next_sentence_log_probs = self.tf.reshape(next_sentence_log_probs, [-1, next_sentence_log_probs.shape[-1]])
if FLAGS.categorical_sampling:
mlm_sampler = tfp.distributions.Categorical(logits = masked_lm_log_probs / params['sampling_temperature'])
nsp_sampler = tfp.distributions.Categorical(logits = next_sentence_log_probs / params['sampling_temperature'])
masked_lm_predictions = mlm_sampler.sample()
next_sentence_predictions = nsp_sampler.sample()
else:
masked_lm_predictions = self.tf.argmax(masked_lm_log_probs, axis = -1, output_type = self.tf.int32)
next_sentence_predictions = self.tf.argmax(next_sentence_log_probs, axis = -1, output_type = self.tf.int32)
masked_lm_predictions = self.tf.reshape(masked_lm_predictions, shape = [mask_batch_size, mask_seq_length])
next_sentence_predictions = self.tf.reshape(next_sentence_predictions, shape = [next_batch_size, next_seq_length])
input_ids = self.tf.expand_dims(input_ids, 0, name = "input_ids")
masked_lm_predictions = self.tf.expand_dims(masked_lm_predictions, 0, name = "masked_lm_predictions")
next_sentence_predictions = self.tf.expand_dims(next_sentence_predictions, 0, name = "next_sentence_predictions")
prediction_metrics = {
'input_ids' : input_ids,
'masked_lm_predictions' : masked_lm_predictions,
'next_sentence_predictions' : next_sentence_predictions,
}
output_spec = self.tf.compat.v1.estimator.tpu.TPUEstimatorSpec(
mode = mode,
predictions = prediction_metrics,
scaffold_fn = scaffold_fn)
else:
raise ValueError("{} is not a valid mode".format(mode))
return output_spec
return _model_fn
| 33,831 | 45.281806 | 142 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/models/tf_bert/optimizer.py | """Functions and classes related to optimization (weight updates)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from deeplearning.benchpress.util.tf import tf
def create_optimizer(loss, init_lr, num_train_steps, num_warmup_steps, use_tpu):
"""Creates an optimizer training op."""
global_step = tf.compat.v1.train.get_or_create_global_step()
learning_rate = tf.constant(value=init_lr, shape=[], dtype=tf.float32)
# Implements linear decay of the learning rate.
learning_rate = tf.compat.v1.train.polynomial_decay(
learning_rate,
global_step,
num_train_steps,
end_learning_rate=0.0,
power=1.0,
cycle=False)
# Implements linear warmup. I.e., if global_step < num_warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
if num_warmup_steps:
global_steps_int = tf.cast(global_step, tf.int32)
warmup_steps_int = tf.constant(num_warmup_steps, dtype=tf.int32)
global_steps_float = tf.cast(global_steps_int, tf.float32)
warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)
warmup_percent_done = global_steps_float / warmup_steps_float
warmup_learning_rate = init_lr * warmup_percent_done
is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)
learning_rate = (
(1.0 - is_warmup) * learning_rate + is_warmup * warmup_learning_rate)
# It is recommended that you use this optimizer for fine tuning, since this
# is how the model was trained (note that the Adam m/v variables are NOT
# loaded from init_checkpoint.)
optimizer = AdamWeightDecayOptimizer(
learning_rate=learning_rate,
weight_decay_rate=0.01,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"])
if use_tpu:
optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)
tvars = tf.compat.v1.trainable_variables()
grads = tf.gradients(loss, tvars)
# This is how the model was pre-trained.
(grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)
train_op, global_step = optimizer.apply_gradients(
zip(grads, tvars), global_step=global_step)
train_op = tf.group(train_op, [global_step])
return train_op, learning_rate
class AdamWeightDecayOptimizer(tf.compat.v1.train.Optimizer):
"""A basic Adam optimizer that includes "correct" L2 weight decay."""
def __init__(self,
learning_rate,
weight_decay_rate=0.0,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=None,
name="AdamWeightDecayOptimizer"):
"""Constructs a AdamWeightDecayOptimizer."""
super(AdamWeightDecayOptimizer, self).__init__(False, name)
self.learning_rate = learning_rate
self.weight_decay_rate = weight_decay_rate
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
self.exclude_from_weight_decay = exclude_from_weight_decay
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""See base class."""
if global_step is None:
global_step = tf.compat.v1.get_global_step()
global_step = tf.compat.v1.assign(global_step, global_step + 1)
assignments = []
for (grad, param) in grads_and_vars:
if grad is None or param is None:
continue
param_name = self._get_variable_name(param.name)
m = tf.compat.v1.get_variable(
name=param_name + "/adam_m",
shape=param.shape.as_list(),
dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer())
v = tf.compat.v1.get_variable(
name=param_name + "/adam_v",
shape=param.shape.as_list(),
dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer())
# Standard Adam update.
next_m = (
tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad))
next_v = (
tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2,
tf.square(grad)))
update = next_m / (tf.sqrt(next_v) + self.epsilon)
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want ot decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if self._do_use_weight_decay(param_name):
update += self.weight_decay_rate * param
update_with_lr = self.learning_rate * update
next_param = param - update_with_lr
assignments.extend(
[param.assign(next_param),
m.assign(next_m),
v.assign(next_v)])
return tf.group(*assignments, name=name), global_step
def _do_use_weight_decay(self, param_name):
"""Whether to use L2 weight decay for `param_name`."""
if not self.weight_decay_rate:
return False
if self.exclude_from_weight_decay:
for r in self.exclude_from_weight_decay:
if re.search(r, param_name) is not None:
return False
return True
def _get_variable_name(self, param_name):
"""Get the variable name from the tensor name."""
m = re.match("^(.*):\\d+$", param_name)
if m is not None:
param_name = m.group(1)
return param_name
| 5,642 | 34.26875 | 80 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/github/bigQuery_database.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas and Chris Cummins.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module for databases of BenchPress samples."""
import contextlib
import pathlib
import datetime
import typing
import progressbar
import tqdm
import sqlite3
from google.cloud import bigquery
import sqlalchemy as sql
from sqlalchemy.ext import declarative
from deeplearning.benchpress.util import sqlutil
from absl import app, flags
from deeplearning.benchpress.util import monitors
from deeplearning.benchpress.util import logging as l
FLAGS = flags.FLAGS
flags.DEFINE_string(
"bq_database",
None,
"Insert path of BigQuery's database."
)
flags.DEFINE_integer(
"chunkify",
None,
"Select chunkifying factor to split BQ database into sub-databases to perform pseudo-distributed preprocessing."
)
Base = declarative.declarative_base()
class bqData(Base):
__tablename__ = "data"
"""
DB Table for concentrated validation results.
"""
key : str = sql.Column(sql.String(1024), primary_key=True)
value : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
@staticmethod
def bqSchema() -> typing.List[bigquery.SchemaField]:
return [
bigquery.SchemaField("key", "STRING", mode = "REQUIRED"),
bigquery.SchemaField("value", "STRING", mode = "REQUIRED"),
]
class bqFile():
"""
A database entry representing a BenchPress validation trace.
"""
id : int = sql.Column(sql.String(64), primary_key = True)
repo_name : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
ref : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
path : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
size : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
content : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
date_added : datetime.datetime = sql.Column(sql.DateTime, nullable=False)
@classmethod
def FromArgs(cls,
row: bigquery.Row
) -> typing.Dict[str, typing.Any]:
return {
"id" : row['id'],
"repo_name" : row['repo_name'],
"ref" : row['ref'],
"path" : row['path'],
"size" : row['size'] if row['size'] else "None",
"content" : row['content'] if row['content'] else "None",
"date_added" : datetime.datetime.utcnow(),
}
@staticmethod
def bqSchema() -> typing.List[bigquery.SchemaField]:
return [
bigquery.SchemaField("id", "STRING", mode = "REQUIRED"),
bigquery.SchemaField("repo_name", "STRING", mode = "REQUIRED"),
bigquery.SchemaField("ref", "STRING", mode = "REQUIRED"),
bigquery.SchemaField("path", "STRING", mode = "REQUIRED"),
bigquery.SchemaField("size", "INTEGER", mode = "REQUIRED"),
bigquery.SchemaField("content", "STRING", mode = "REQUIRED"),
]
def ToJSONDict(self) -> typing.Dict[str, typing.Any]:
return {
"id" : self.id,
"repo_name" : self.repo_name,
"ref" : self.ref,
"path" : self.path,
"size" : self.size,
"content" : self.content,
"date_added" : str(self.date_added.strftime("%m/%d/%Y, %H:%M:%S")),
}
class bqMainFile(Base, bqFile):
"""Abstract representation of main queried files."""
__tablename__ = "main_files"
class bqOtherFile(Base, bqFile):
"""Abstract representation of other-to-main-language queried files."""
__tablename__ = "other_files"
class bqRepo(Base):
"""
A database entry representing a BenchPress validation trace.
"""
__tablename__ = "repositories"
id : int = sql.Column(sql.Integer, primary_key = True)
repo_name : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
ref : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
date_added : datetime.datetime = sql.Column(sql.DateTime, nullable=False)
@classmethod
def FromArgs(cls,
id: int,
row: bigquery.Row
) -> typing.Dict[str, typing.Any]:
return {
"id" : id,
"repo_name" : row['repo_name'],
"ref" : row['ref'],
"date_added" : datetime.datetime.utcnow(),
}
@staticmethod
def bqSchema() -> typing.List[bigquery.SchemaField]:
return [
bigquery.SchemaField("repo_name", "STRING", mode = "REQUIRED"),
bigquery.SchemaField("ref", "STRING", mode = "REQUIRED"),
]
def ToJSONDict(self) -> typing.Dict[str, typing.Any]:
return {
"id" : self.id,
"repo_name" : self.repo_name,
"ref" : self.ref,
"date_added" : str(self.date_added.strftime("%m/%d/%Y, %H:%M:%S")),
}
class bqDatabase(sqlutil.Database):
"""A database of BigQuery contentfiles."""
def __init__(self, url: str, must_exist: bool = False):
super(bqDatabase, self).__init__(url, Base, must_exist = must_exist)
def main_files_batch(self, limit: int, offset: int, exclude_id: typing.Set[str] = set()) -> typing.List[bqMainFile]:
with self.Session() as s:
total_batch = s.query(bqMainFile).limit(limit).offset(offset).all()
batch = [f for f in total_batch if f.id not in exclude_id]
if exclude_id:
for vid in [f.id for f in total_batch if f.id in exclude_id]:
exclude_id.remove(vid)
return batch
##### Main file properties
@property
def main_files(self) -> typing.List[bqMainFile]:
with self.Session() as s:
return s.query(bqMainFile).yield_per(100000)
@property
def mainfile_entries(self) -> typing.Set[typing.Tuple[str, str]]:
with self.Session() as s:
return set(s.query(bqMainFile.repo_name, bqMainFile.path).all())
@property
def main_ids(self) -> typing.Set[str]:
with self.Session() as s:
return set(x[0] for x in s.query(bqMainFile.id).all())
@property
def mainfile_count(self) -> int:
with self.Session() as s:
return s.query(bqMainFile).count()
@property
def main_repo_count(self) -> int:
with self.Session() as s:
return s.query(bqMainFile.repo_name, bqMainFile.ref).distinct().count()
##### Other file properties
@property
def other_files(self) -> typing.List[bqOtherFile]:
with self.Session() as s:
return s.query(bqOtherFile).all()
@property
def otherfile_entries(self) -> typing.Set[typing.Tuple[str, str]]:
with self.Session() as s:
return set(s.query(bqOtherFile.repo_name, bqOtherFile.path).all())
@property
def other_ids(self) -> typing.Set[str]:
with self.Session() as s:
return set(x[0] for x in s.query(bqOtherFile.id).all())
@property
def otherfile_count(self) -> int:
with self.Session() as s:
return s.query(bqOtherFile).count()
@property
def other_repo_count(self) -> int:
with self.Session() as s:
return s.query(bqOtherFile.repo_name, bqOtherFile.ref).distinct().count()
##### Repository table properties
@property
def loadRepos(self) -> typing.Set[typing.Tuple[str, str]]:
with self.Session() as s:
return set((e.repo_name, e.ref) for e in s.query(bqRepo))
@property
def repo_count(self) -> int:
"""
Get number of repos in bqRepo table.
"""
with self.Session() as s:
return s.query(bqRepo).count()
##### Data
@property
def data(self) -> bqData:
"""
Get bqData entry from table.
"""
with self.Session() as s:
return s.query(bqData).first()
def chunkify_db(bq_db: bqDatabase, chunks: int, prefix: str) -> None:
out_dbs = [bqDatabase(url = "sqlite:///{}_{}.db".format(prefix, idx)) for idx in range(chunks)]
total_files = bq_db.mainfile_count
chunk_size = total_files // chunks
idx = 0
for db_idx, db in enumerate(out_dbs):
l.logger().info("Writing db_{}...".format(db_idx))
batch = bq_db.main_files_batch(limit = chunk_size + (chunks if db_idx == chunks - 1 else 0), offset = idx)
with db.Session() as s:
bar = progressbar.ProgressBar(max_value = len(batch))
l.logger().info(len(batch))
for f in bar(batch):
s.add(bqMainFile(**bqMainFile.FromArgs(f.ToJSONDict())))
idx += 1
l.logger().info("commit")
s.commit()
return
def file_size_distribution(db: bqDatabase) -> None:
"""
Plot the distribution of size of each entry.
"""
m = monitors.FrequencyMonitor(".", "bq_size_distrib")
with db.Session() as s:
for x in tqdm.tqdm(s.query(bqMainFile.size).all(), total = db.mainfile_count):
try:
y = int(str(x[0]))
m.register(y)
except Exception:
pass
m.plot()
print(m.getData())
return
def reduce_database_by_size(db: bqDatabase, out_db: bqDatabase) -> None:
"""
Reduce BQ database by files that are too massive anyway.
"""
with db.Session() as s:
data = []
for x in s.query(bqMainFile).all():
try:
if int(str(x.size)) < 10**6:
data.append(x)
except ValueError:
pass
l.logger().info("BQ Database reduced from {} to {} files".format(db.mainfile_count, len(data)))
with out_db.Session(commit = True) as s:
for dp in data:
s.add(bqMainFile(
id = dp.id,
repo_name = "",
ref = "",
path = "",
size = dp.size,
content = dp.content,
date_added = datetime.datetime.utcnow(),
)
)
s.commit()
return
def initMain(*args, **kwargs):
"""
Setup module's operations.
"""
l.initLogger(name = "bigQuery_database")
# file_size_distribution(bqDatabase(url = "sqlite:///{}".format("/private/home/foivos/clgen_c_github.db", must_exist = True)))
reduce_database_by_size(
bqDatabase(url = "sqlite:///{}".format("/private/home/foivos/clgen_c_github.db", must_exist = True)),
bqDatabase(url = "sqlite:///{}".format("/private/home/foivos/reduced1M_clgen_c_github.db", must_exist = False)),
)
return
if FLAGS.chunkify or FLAGS.chunkify < 2:
if not FLAGS.bq_database:
raise ValueError("You must set a path for bq_database")
bq_db_path = pathlib.Path(FLAGS.bq_database).resolve()
if not bq_db_path.exists():
raise FileNotFoundError(bq_db_path)
bq_db = bqDatabase(url = "sqlite:///{}".format(str(bq_db_path)), must_exist = True)
chunkify_db(bq_db, FLAGS.chunkify, prefix = "{}/{}".format(bq_db_path.parent, bq_db_path.stem))
else:
l.logger().warn("Chunkify has not been set or has been set to less than 2. Nothing to do, exiting...")
return
if __name__ == "__main__":
app.run(initMain)
sys.exit(0)
| 11,405 | 32.745562 | 128 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/github/storage.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas and Chris Cummins.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BigQuery Dataset structures"""
import os
import sys
import subprocess
import typing
import json
import shutil
import pathlib
import progressbar
import humanize
import functools
from google.cloud import bigquery
from deeplearning.benchpress.proto import github_pb2
from deeplearning.benchpress.github import bigQuery_database as bqdb
from deeplearning.benchpress.util import logging as l
class Storage(object):
@classmethod
def FromArgs(cls,
path: pathlib.Path,
name: str,
extension: str,
data_format: int
):
return {
github_pb2.GithubMiner.DataFormat.zip : zipStorage,
github_pb2.GithubMiner.DataFormat.folder : fileStorage,
github_pb2.GithubMiner.DataFormat.json : functools.partial(JSONStorage, with_zip = False),
github_pb2.GithubMiner.DataFormat.jsonzip: functools.partial(JSONStorage, with_zip = True),
github_pb2.GithubMiner.DataFormat.sql : dbStorage,
github_pb2.GithubMiner.DataFormat.bq : bqStorage,
}[data_format](path, name, extension)
def __init__(self,
path: pathlib.Path,
name: str,
extension: str):
self.cache_path = path
self.cache_path.mkdir(exist_ok = True)
self.name = name
self.extension = extension
return
def __enter__(self):
return self
def __exit__(self, path, name, extension):
return
def save(self):
raise NotImplementedError("Abstract Class")
def flush(self):
pass
class zipStorage(Storage):
@property
def repocount(self):
return len(self.repos)
@property
def filecount(self):
return self.file_count
@property
def loadRepos(self):
raise NotImplementedError("Open ZIP files and read repos_list.json")
def __init__(self,
path: pathlib.Path,
name: str,
extension: str
):
super(zipStorage, self).__init__(path, name, extension)
self.cached_content = []
self.flush_counter = 20000
self.file_count = 0
self.repos = self.loadRepos
self.data_file = ""
l.logger().info("Set up ZIP storage in {}".format(self.cache_path))
def __exit__(self, path, name, extension):
self.zipFiles()
return
def save(self,
contentfile: typing.Union[
bqdb.bqData,
bqdb.bqFile,
bqdb.bqRepo
]
) -> None:
if isinstance(contentfile, bqdb.bqFile):
self.cached_content.append(contentfile.content)
self.file_count += 1
if self.file_count % self.flush_counter == 0:
self.zipFiles()
self.repos.add((contentfile.repo_name, contentfile.ref))
elif isinstance(contentfile, bqdb.bqData):
self.data_file = "{}\n\n{}".format(contentfile.key, contentfile.value)
elif isinstance(contentfile, bqdb.bqRepo):
self.repos.add((contentfile.repo_name, contentfile.ref))
return
def zipFiles(self) -> None:
tmp_root = pathlib.Path("/tmp/bqZipStorageTMP/corpus")
tmp_root.mkdir(exist_ok = True, parents = True)
for cf in self.cached_content:
with open(tmp_root / pathlib.Path(cf.path).name, 'w') as f:
f.write(cf)
with open(tmp_root / "data.txt", 'w') as f:
f.write(self.data_file)
with open(tmp_root / "repos_list.json", 'w') as f:
json.dump(
[
{
'repo_name': rn,
'ref': rf
} for rn, rf in self.repos
],
f,
sort_keys = True,
indent = 2
)
p = os.getcwd()
os.chdir(tmp_root.parent)
cmd = subprocess.Popen(
"zip -qr -9 {} {}".format(self.cache_path / (self.name + ".zip"), tmp_root.name).split(),
stdout = sys.stdout,
stderr = sys.stderr
)
try:
out, err = cmd.communicate()
if err:
raise OSError(err)
shutil.rmtree(tmp_root)
except Exception as e:
raise e
finally:
os.chdir(p)
return
class fileStorage(Storage):
@property
def repocount(self):
return len(self.repos)
@property
def filecount(self):
return self.file_count
@property
def loadRepos(self):
if (self.cache_path / "repos_list.json").exists():
with open(self.cache_path / "repos_list.json", 'r') as f:
repos = json.load(f)
return [(repo['repo_name'], repo['ref']) for repo in repos]
else:
return set()
def __init__(self,
path: pathlib.Path,
name: str,
extension: str
):
super(fileStorage, self).__init__(path, name, extension)
self.cache_path = self.cache_path / self.name
(self.cache_path).mkdir(exist_ok = True)
self.repos = self.loadRepos
l.logger().info("Set up folder storage in {}".format(self.cache_path))
def __exit__(self, path, name, extension) -> None:
with open(self.cache_path / "repos_list.json", 'w') as f:
json.dump(
[
{
'repo_name': rn,
'ref': rf
} for rf, rf in self.repos
],
f,
sort_keys = True,
indent = 2
)
return
def save(self,
contentfile: typing.Union[
bqdb.bqData,
bqdb.bqFile,
bqdb.bqRepo
]
) -> None:
if isinstance(contentfile, bqdb.bqFile):
with open(self.cache_path / pathlib.Path(contentfile.path).name, 'w') as f:
f.write(contentfile.content)
self.repos.add((contentfile.repo_name, contentfile.ref))
elif isinstance(contentfile, bqdb.bqData):
with open(self.cache_path / "data.txt", 'w') as f:
f.write("{}\n\n{}".format(contentfile.key, contentfile.value))
elif isinstance(contentfile, bqdb.bqRepo):
self.repos.add((contentfile.repo_name, contentfile.ref))
return
class JSONStorage(Storage):
@property
def repocount(self):
return len(self.repos)
@property
def filecount(self):
return self.file_count
@property
def loadRepos(self):
if (self.cache_path / "repos_list.json").exists():
with open(self.cache_path / "repos_list.json", 'r') as f:
repos = json.load(f)
return [(repo['repo_name'], repo['ref']) for repo in repos]
else:
return set()
def __init__(self,
path: pathlib.Path,
name: str,
extension: str,
with_zip: bool,
):
super(JSONStorage, self).__init__(path, name, extension)
self.cache_path = self.cache_path / self.name
(self.cache_path).mkdir(exist_ok = True)
self.with_zip = with_zip
self.jsonfile_count = 0
self.file_count = 0
self.files = []
self.repos = self.loadRepos
self.data = ""
l.logger().info("Set up JSON storage in {}".format(self.cache_path))
return
def __exit__(self, path, name, extension):
if len(self.files) > 0:
self._flush_json()
with open(self.cache_path / "repos_list.json", 'w') as outf:
json.dump(
[
{
'repo_name': rn,
'ref': rf
} for rn, rf in self.repos
],
outf,
sort_keys = True,
indent = 2
)
self.repos = set()
with open(self.cache_path / "data.txt", 'w') as outf:
outf.write(self.data)
self.data = ""
return
def save(self,
contentfile: typing.Union[
bqdb.bqData,
bqdb.bqFile,
bqdb.bqRepo
]
) -> None:
if isinstance(contentfile, bqdb.bqData):
self.data = "{}\n\n{}".format(contentfile.key, contentfile.value)
elif isinstance(contentfile, bqdb.bqRepo):
self.repos.add((contentfile.repo_name, contentfile.ref))
else:
self.files.append(contentfile.ToJSONDict())
self.file_count += 1
self.repos.add((contentfile.repo_name, contentfile.ref))
if self.file_count % 500000:
self._flush_json()
return
def _flush_json(self) -> None:
filename = lambda ext: "{}.{}".format(self.jsonfile_count, ext)
with open(self.cache_path / filename("json"), 'w') as outf:
json.dump(self.files, outf, indent = 2)
if self.with_zip:
p = os.getcwd()
os.chdir(self.cache_path)
cmd = subprocess.Popen(
"zip -qr -9 {} {}".format(filename("zip"), filename("json")).split(),
stdout = sys.stdout,
stderr = sys.stderr
)
try:
out, err = cmd.communicate()
os.remove(filename("json"))
if err:
raise OSError(err)
except Exception as e:
raise e
finally:
os.chdir(p)
self.jsonfile_count += 1
self.files = []
return
class dbStorage(Storage):
@property
def repocount(self):
return len(self.repos)
@property
def main_repocount(self):
return self.db.main_repo_count
@property
def other_repocount(self):
return self.db.other_repo_count
@property
def filecount(self):
return self.maincount + self.othercount
@property
def maincount(self):
return self.db.mainfile_count + len(self.main_files)
@property
def othercount(self):
return self.db.otherfile_count + len(self.other_files)
@property
def mainfiles(self):
return self.db.main_files
@property
def otherfiles(self):
return self.db.other_files
@property
def loadRepos(self):
return self.repos
@property
def content_data(self):
return self.db.data
def __init__(self,
path: pathlib.Path,
name: str,
extension: str
):
super(dbStorage, self).__init__(path, name, extension)
self.db = bqdb.bqDatabase("sqlite:///{}".format(self.cache_path / (self.name + ".db")))
self.main_ids = self.db.main_ids
self.other_ids = self.db.other_ids
self.repos = self.db.loadRepos
self.main_files = set()
self.other_files = set()
self.data = None
self.flush_freq = 20000
l.logger().info("Set up SQL storage in {}".format(self.cache_path))
def __exit__(self, path, name, extension):
self.flush()
return
def save(self,
contentfile: typing.Union[
bqdb.bqData,
bqdb.bqFile,
bqdb.bqRepo
]
) -> None:
if isinstance(contentfile, bqdb.bqData):
self.data = contentfile
elif isinstance(contentfile, bqdb.bqRepo):
self.repos.add((contentfile.repo_name, contentfile.ref))
else: # bqFile.
if isinstance(contentfile, bqdb.bqMainFile):
if contentfile.id not in self.main_ids:
self.repos.add((contentfile.repo_name, contentfile.ref))
self.main_ids.add(contentfile.id)
self.main_files.add(contentfile)
if len(self.main_files) > self.flush_freq:
self.flushToDB(self.main_files)
self.main_files = set()
elif isinstance(contentfile, bqdb.bqOtherFile):
if contentfile.id not in self.other_ids:
self.repos.add((contentfile.repo_name, contentfile.ref))
self.other_ids.add(contentfile.id)
self.other_files.add(contentfile)
if len(self.other_files) > self.flush_freq:
self.flushToDB(self.other_files)
self.other_files = set()
return
def flush(self):
"""Flushes all cached data to DB."""
## Write data
if self.data is not None:
with self.db.Session(commit = True) as session:
entry = session.query(
bqdb.bqData
).filter_by(key = self.data.key).first()
if entry is not None:
entry.value = self.data.value
else:
session.add(self.data)
## Write repos
if self.repocount > self.db.repo_count:
for en, (repo_name, ref) in enumerate(self.repos):
content = bqdb.bqRepo(**bqdb.bqRepo.FromArgs(
self.db.repo_count + en, {'repo_name': repo_name, 'ref': ref})
)
with self.db.Session(commit = True) as session:
exists = session.query(
bqdb.bqRepo
).filter_by(repo_name = content.repo_name, ref = content.ref).scalar() is not None
if not exists:
session.add(content)
if len(self.main_files) > 0:
self.flushToDB(self.main_files)
self.main_files = set()
if len(self.other_files) > 0:
self.flushToDB(self.other_files)
self.other_files = set()
return
def flushToDB(self, files: typing.Set[bqdb.bqFile]) -> None:
with self.db.Session(commit = True) as session:
for file in files:
session.add(file)
return
class bqStorage(Storage):
@property
def repocount(self):
return 0 # TODO
@property
def filecount(self):
return 0 # TODO
def __init__(self,
path: pathlib.Path,
extension: str
):
super(bqTableStorage, self).__init__(path, extension)
def save(self,
contentfile: bqdb.bqFile
) -> None:
raise NotImplementedError
| 13,868 | 26.905433 | 98 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/github/miner.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas and Chris Cummins.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Github mining configuration"""
import json
import os
import io
import re
import time
import requests
import functools
import sys
import typing
import pathlib
import github
import progressbar
import copy
import numpy as np
from absl import flags
from base64 import b64decode
from google.cloud import bigquery
from deeplearning.benchpress.util import pbutil
from deeplearning.benchpress.util import crypto
from deeplearning.benchpress.proto import github_pb2
from deeplearning.benchpress.github import datasets
from deeplearning.benchpress.github import storage
from deeplearning.benchpress.github import bigQuery_database
from deeplearning.benchpress.util import logging as l
FLAGS = flags.FLAGS
flags.DEFINE_boolean(
"bq_force_update",
False,
"Select to force querying data in a seemingly updated satorage."
)
flags.DEFINE_string(
"exclude_repos_from_db",
None,
"Specify repo-db to bypass repositories in recursive fetcher."
)
flags.DEFINE_string(
"enhance_from_db",
None,
"Specify bq DB to enhance corpus with contentfiles"
)
flags.DEFINE_boolean(
"remove_identical_files",
False,
"Select to load all files, calculate hashes and remove duplicates."
)
flags.DEFINE_boolean(
"export_db",
False,
"Dumps bigquery database to folder of files."
)
class GithubMiner(object):
"""Base abstract class of a github miner"""
@classmethod
def FromConfig(cls, config: github_pb2.GithubMiner):
"""Constructs github miner from protobuf configuration."""
try:
pbutil.AssertFieldIsSet(config, "path")
pbutil.AssertFieldIsSet(config, "data_format")
pbutil.AssertFieldIsSet(config, "miner")
if config.HasField("big_query"):
pbutil.AssertFieldIsSet(config.big_query, "credentials")
pbutil.AssertFieldConstraint(
config.big_query,
"language",
lambda x: x in {'generic', 'opencl', 'c', 'cpp', 'java', 'python'},
"language must be one of opencl, c, cpp, java, python. 'generic' for language agnostic queries.",
)
if config.big_query.HasField("export_corpus"):
pbutil.AssertFieldIsSet(config.big_query.export_corpus, "data_format")
pbutil.AssertFieldIsSet(config.big_query.export_corpus, "access_token")
return BigQuery(config)
elif config.HasField("recursive"):
pbutil.AssertFieldIsSet(config.recursive, "access_token")
pbutil.AssertFieldConstraint(
config.recursive,
"flush_limit_K",
lambda x: x>0,
"flush limit cannot be non-positive."
)
pbutil.AssertFieldConstraint(
config.recursive,
"corpus_size_K",
lambda x: x >= -1,
"corpus size must either be -1 or non-negative."
)
if config.data_format != github_pb2.GithubMiner.DataFormat.folder:
raise NotImplementedError("RecursiveFetcher only stores files in local folder.")
return RecursiveFetcher(config)
else:
raise SystemError("{} miner not recognized".format(config))
except Exception as e:
raise e
def __init__(self):
return
def fetch(self) -> None:
raise NotImplementedError("Abstract class")
class BigQuery(GithubMiner):
def __init__(self,
config: github_pb2.GithubMiner
):
super(BigQuery, self).__init__()
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = str(pathlib.Path(config.big_query.credentials, must_exist = True))
self.cache_path = pathlib.Path(config.path, must_exist = False).expanduser().resolve()
self.cache_path.mkdir(exist_ok = True, parents = True)
self.config = config
l.logger().info("Initializing BigQuery miner in {}".format(self.cache_path))
job_config = bigquery.QueryJobConfig(allowLargeResults = True)
job_config.allow_large_results = True
self.client = bigquery.Client(default_query_job_config = job_config)
self.dataset = datasets.Dataset.FromArgs(self.client, self.config.big_query.language)
self.storage = storage.Storage.FromArgs(self.cache_path, self.dataset.name, self.dataset.extension, self.config.data_format)
return
def fetch(self):
if FLAGS.export_db:
folder = self.cache_path / "export_files"
folder.mkdir(exist_ok = True, parents = True)
with progressbar.ProgressBar(max_value = self.storage.maincount, prefix = "Export") as bar:
for mf in bar(self.storage.db.main_files):
with open(folder / mf.id, 'w') as outf:
outf.write(mf.content)
return
self._query_github()
if self.config.big_query.export_corpus.inline_headers:
self._export_corpus()
return
def _query_github(self) -> None:
"""Apply bigQuery requests to get all contentfiles"""
with self.storage as st:
if st.content_data is not None and not FLAGS.bq_force_update:
l.logger().info("Query storage has been updated. Skipping...")
return
mainf_it, otherf_it = self.dataset.contentfile_query()
if mainf_it:
with progressbar.ProgressBar(max_value = mainf_it.total_rows, prefix = "Main Files") as bar:
try:
for mf in bar(mainf_it):
st.save(
bigQuery_database.bqMainFile(**bigQuery_database.bqMainFile.FromArgs(mf))
)
except KeyboardInterrupt:
pass
st.flush()
if otherf_it:
with progressbar.ProgressBar(max_value = otherf_it.total_rows, prefix = "Other Files") as bar:
try:
for of in bar(otherf_it):
st.save(
bigQuery_database.bqOtherFile(**bigQuery_database.bqOtherFile.FromArgs(of))
)
except KeyboardInterrupt:
pass
st.flush()
# Get repository list of requested file specifications.
# If contentfile_query has taken place, use cached results instead of re-querying.
if mainf_it or otherf_it:
mainrep_it, otherrep_it = None, None
else:
mainrep_it, otherrep_it = self.dataset.repository_query()
main_repo_count = None
if mainrep_it:
with progressbar.ProgressBar(max_value = mainrep_it.total_rows, prefix = "Main Repos") as bar:
for mr in bar(mainrep_it):
st.save(
bigQuery_database.bqRepo(**bigQuery_database.bqRepo.FromArgs(st.repocount, mr))
)
main_repo_count = st.repocount
st.flush()
other_repo_count = None
if otherrep_it:
with progressbar.ProgressBar(max_value = otherrep_it.total_rows, prefix = "Other Repos") as bar:
for orep in bar(otherrep_it):
st.save(
bigQuery_database.bqRepo(**bigQuery_database.bqRepo.FromArgs(st.repocount, orep))
)
other_repo_count = st.repocount - main_repo_count
st.flush()
# Filecount of requested file specifications.
# Use cached results if contentfile has taken place.
if mainf_it or otherf_it:
self.dataset.filecount = (mainf_it.total_rows if mainf_it else 0, otherf_it.total_rows if otherf_it else 0)
mainfile_count, otherfile_count = self.dataset.filecount
if main_repo_count is None:
main_repo_count = st.main_repocount
if other_repo_count is None:
other_repo_count = st.other_repocount
query_data = [
"main_contentfiles : {}".format(mainfile_count),
"other_contentfiles: {}".format(otherfile_count),
"total_contentfiles: {}".format(mainfile_count + otherfile_count),
"",
"main_repositories : {}".format(main_repo_count),
"other_repositories: {}".format(other_repo_count),
"total_repositories: {}".format(st.repocount),
]
st.save(bigQuery_database.bqData(key = self.dataset.name, value = '\n'.join(query_data)))
return
def _export_corpus(self) -> None:
"""
Get all raw files requested from BQ and export them to CLGEN corpus.
The most important aspect is inlining includes into the source files.
In case the selected storage type is SQL DB, all needed header files
will be found in bq_header_contentfiles table and will be drawn from there.
The original storage DB can be diminished in size, by deleting the header
files that were not eventually used.
"""
export_storage = storage.Storage.FromArgs(
self.cache_path,
"export_{}".format(self.dataset.name),
self.dataset.extension,
self.config.data_format
)
g = github.Github(self.config.big_query.export_corpus.access_token)
iterated_history = export_storage.db.mainfile_entries
iterated_history.update(export_storage.db.otherfile_entries)
with export_storage as st:
with progressbar.ProgressBar(max_value = self.storage.maincount) as bar:
for cf in bar(self.storage.mainfiles):
if (cf.repo_name, cf.path) in iterated_history:
continue
try:
rem = g.get_rate_limit().rate.remaining
while rem < 100:
time.sleep(1)
print('\r\033[KWaiting on rate limit: {}'.format(rem), sep='', end='')
sys.stdout.flush()
rem = g.get_rate_limit().rate.remaining
repo = g.get_repo(cf.repo_name)
cf = self._inline_headers(repo, cf.ref, cf)
st.save(
bigQuery_database.bqMainFile(**bigQuery_database.bqMainFile.FromArgs(cf.ToJSONDict()))
)
except Exception as e:
st.flush()
if "404" in str(e):
l.logger().error("Not found: {}-{}".format(cf.repo_name, cf.path))
st.save(
bigQuery_database.bqMainFile(**bigQuery_database.bqMainFile.FromArgs(cf.ToJSONDict()))
)
else:
raise e
with progressbar.ProgressBar(max_value = self.storage.othercount) as bar:
try:
for cf in bar(self.storage.otherfiles):
if (cf.repo_name, cf.path) in iterated_history:
continue
### Rate limit
rem = g.get_rate_limit().rate.remaining
while rem < 100:
time.sleep(1)
print("Waiting on rate limit: {}".format(rem), sep='', end='')
sys.stdout.flush()
rem = g.get_rate_limit().rate.remaining
### Save file if repo not found
try:
repo = g.get_repo(cf.repo_name)
except github.GithubException as e:
if "Not Found" in str(e):
st.save(
bigQuery_database.bqMainFile(**bigQuery_database.bqOtherFile.FromArgs(cf.ToJSONDict()))
)
continue
else:
raise e
cf = self._inline_headers(repo, cf.ref, cf)
st.save(
bigQuery_database.bqMainFile(**bigQuery_database.bqOtherFile.FromArgs(cf.ToJSONDict()))
)
except Exception as e:
st.flush()
raise e
return
def _inline_headers(self,
repo : github.Repository.Repository,
ref : str,
content: bigQuery_database.bqFile,
) -> str:
## Do the same as inlineHeaders
# 1. Parse file for #include
# 2. Resolve include path
# 3. Ping DB to get it
# 4. Do BFS on included
def get_included_file(file_path : pathlib.Path,
incl_path : pathlib.Path,
) -> github.ContentFile.ContentFile:
parent_folder = file_path.parent
if parent_folder == file_path:
return None
folder_files = repo.get_contents(str(parent_folder), ref = ref)
while folder_files:
file = folder_files.pop(0)
if file.path == file_path:
continue
elif file.type == "dir":
folder_files.extend(repo.get_contents(file.path, ref = ref))
elif file.path.endswith(str(incl_path)):
return file
return get_included_file(parent_folder, incl_path)
inlined_cf = []
inlined_paths = set()
inlined_paths.add(content.path)
include_exist = True
while include_exist:
include_exist = False
for line in content.content.split('\n'):
match = re.match(re.compile('\w*#include ["<](.*)[">]'), line)
if match:
include_exist = True
include_path = match.group(1)
# Try and resolve relative paths
include_path = pathlib.Path(include_path.replace('../', ''))
incl_file = get_included_file(pathlib.Path(content.path), include_path)
if incl_file and incl_file.path not in inlined_paths:
inlined_paths.add(incl_file.path)
inlined_cf.append("// [FETCH] included: {}\n".format(line))
if incl_file.size < 1*1000*1000:
inlined_cf.append(incl_file.content)
else:
response = json.loads(requests.get(
incl_file.git_url, headers={'Authorization': 'token {}'.format(self.config.big_query.export_corpus.access_token)}
).content.decode('utf-8'))
incl_cf = b64decode(response['content']).decode('utf-8')
inlined_cf.append(incl_cf)
inlined_cf.append('// [FETCH] eof({})'.format(line))
else:
if not incl_file:
inlined_cf.append('// [FETCH] didnt find: {}'.format(line))
else:
inlined_cf.append('// [FETCH] skipped: {}'.format(line))
else:
inlined_cf.append(line)
content.content = '\n'.join(inlined_cf)
inlined_cf = []
return content
class RecursiveFetcher(GithubMiner):
"""GitHub API wrapper to pull from github a fresh corpus of OpenCL kernels"""
class GithubRepoHandler():
"""Repo manager for recursive fetcher"""
class GithubRepo():
"""Class representation of a single github Repo."""
def __init__(self, **kwargs):
# url of a repo is immutable.
self.url = kwargs.get('url')
if kwargs:
self.update(**kwargs)
return
def update(self,
url : str,
owner : str,
name : str,
fork : int,
stars : str,
contributors : int,
forks : str,
created_at : str,
updated_at : str):
if url != self.url:
raise ValueError("Updated url of already existent repo does not match.")
self.owner = owner
self.name = name
self.fork = fork
self.stars = stars
self.contributors = contributors
self.forks = forks
self.created_at = created_at
self.updated_at = updated_at
return
class GithubFile():
"""Class representation of a single github file."""
def __init__(self, **kwargs):
# url of a file is immutable
self.url = kwargs.get('url')
self.size = 0
if kwargs:
self.update(**kwargs)
def update(self,
url : str,
contents : str,
path : str,
repo_url : str,
sha : str,
size : int):
if url != self.url:
raise ValueError("Updated url of already existent file does not match.")
self.contents = contents
self.path = path
self.repo_url = repo_url
self.sha = sha
if self.size != 0:
current_size = size - self.size
else:
current_size = size
self.size = size
return current_size
def __init__(self,
corpus_path: str,
corpus_size: int,
flush_limit: int,
):
## Use this to read a json file with all current sha files
## And of course to append the json file every time you flush
## ..and to flush
self.cache_path = corpus_path
self.stored_file_idx = "record.json"
self.updated_length = 0
self._scraped_repos = {}
self._stored_repos = {}
self._scraped_files = {}
self.repos_new_counter = 0
self.repos_modified_counter = 0
self.repos_unchanged_counter = 0
self.repos_stored_counter = 0
self.files_new_counter = 0
self.files_modified_counter = 0
self.files_unchanged_counter = 0
self.file_size_counter = 0
self.file_size_limit = flush_limit
self.collectHistory()
self.is_finished = False if (corpus_size // 1000) == -1 else (self.updated_length >= corpus_size)
return
def collectHistory(self) -> None:
storage_file = os.path.join(self.cache_path, self.stored_file_idx)
if os.path.isfile(storage_file):
with open(storage_file, 'r') as f:
try:
data = json.load(f)
assert len(data) == 2, "Wrong format of kernel history provided"
self._stored_repos = data[0]
self.updated_length = data[1]['total_files']
except json.JSONDecodeError:
l.logger().warn("Problem encountered with reading kernel file record.")
return
def appendHistory(self) -> None:
storage_file = os.path.join(self.cache_path, self.stored_file_idx)
with open(storage_file, 'w') as f:
json.dump(
[self._stored_repos,
{'total_files': self.updated_length + copy.deepcopy(len(self._scraped_files))}],
f,
indent = 2)
return
def is_repo_updated(self, url, updated_at) -> bool:
if url in self._scraped_repos and self._scraped_repos[url].updated_at == updated_at:
self.repos_unchanged_counter += 1
return True
elif url in self._stored_repos:# and self._stored_repos[url] == updated_at:
self.repos_stored_counter += 1
return True
return False
def is_file_updated(self, url, sha) -> bool:
if url in self._scraped_files and self._scraped_files[url].sha == sha:
self.files_unchanged_counter += 1
return True
return False
def update_file(self, **kwargs) -> bool:
url = kwargs.get('url')
if url in self._scraped_files:
self.file_size_counter += self._scraped_files[url].update(**kwargs)
self.files_modified_counter += 1
else:
self._scraped_files[url] = RecursiveFetcher.GithubRepoHandler.GithubFile(**kwargs)
self.files_new_counter += 1
self.file_size_counter += kwargs.get('size')
if self.file_size_counter >= self.file_size_limit:
l.logger().warn("time to flush!")
self.Flush()
self.collectHistory()
self.file_size_counter = 0
return True
def update_repo(self, **kwargs) -> bool:
url = kwargs.get('url')
l.logger().info("Add: {}".format(url))
if url in self._scraped_repos:
self._scraped_repos[url].update(**kwargs)
self.repos_modified_counter += 1
else:
self._scraped_repos[url] = RecursiveFetcher.GithubRepoHandler.GithubRepo(**kwargs)
self.repos_new_counter += 1
return True
def Flush(self) -> None:
for idx, file in enumerate(self._scraped_files):
if self._scraped_files[file].repo_url in self._scraped_repos:
with open(os.path.join(self.cache_path, "{}.cl".format(idx + self.updated_length)), 'w') as f:
f.write(self._scraped_files[file].contents)
for repo in self._scraped_repos:
self._stored_repos[repo] = self._scraped_repos[repo].updated_at
self.appendHistory()
self._scraped_repos.clear()
self._scraped_files.clear()
self.file_size_counter = 0
return
def print_counters(self) -> None:
"""
Print analytics counters.
"""
print('\r\033[Kfiles: new: ', self.files_new_counter,
', modified: ', self.files_modified_counter,
', mem_size: ', self.file_size_counter, 'B',
sep='', end='')
def __init__(self,
config: github_pb2.GithubMiner
):
self.cache_path = pathlib.Path(config.path, must_exist = False).expanduser().resolve()
self.cache_path.mkdir(exist_ok = True, parents = True)
l.logger().info("Github fetcher initialized: {}".format(self.cache_path))
self.token = config.recursive.access_token
self.repo_handler = RecursiveFetcher.GithubRepoHandler(
self.cache_path,
config.recursive.corpus_size_K * 1000,
config.recursive.flush_limit_K * 1000,
)
self.current_status = ""
self.errors_counter = 0
return
def print_counters(self) -> None:
self.repo_handler.print_counters()
print('. errors: ', self.errors_counter,
'. ', self.current_status[0:80],
sep='', end='')
sys.stdout.flush()
def fetch(self) -> None:
"""
Download all of the OpenCL on GitHub (!)
Shortcomings of this appraoch:
* Only includes exclusively OpenCL files, no inline strings.
* Occasionally (< 1%) can't find headers to include.
"""
# ### Dummy code to compare similarities of recursive corpus and bq CL corpus.
# db = bigQuery_database.bqDatabase("sqlite:////home/fivosts/PhD/Code/clgen/bq_corpus/exported_clgen_opencl_github.db")
# self.bq_repos = set()
# with db.Session() as s:
# for r in s.query(bigQuery_database.bqMainFile.repo_name):
# self.bq_repos.add(r[0])
# for r in s.query(bigQuery_database.bqOtherFile.repo_name):
# self.bq_repos.add(r[0])
# with open("/home/fivosts/Downloads/record.json", 'r') as f:
# chris = json.load(f)
# chris_repos = set(x.replace('https://api.github.com/repos/', '') for x, v in chris[0].items())
# common_repos = set()
# for r in chris_repos:
# if r in self.bq_repos:
# common_repos.add(r)
# l.logger().warn(len(common_repos))
# file_count = 0
# with db.Session() as s:
# for r in s.query(bigQuery_database.bqMainFile).all():
# if r.repo_name in common_repos:
# file_count += 1
# for r in s.query(bigQuery_database.bqOtherFile).all():
# if r.repo_name in common_repos:
# file_count += 1
# l.logger().info(file_count)
# exit()
if FLAGS.remove_identical_files:
if FLAGS.enhance_from_db:
self.enhance_from_db(pathlib.Path(FLAGS.enhance_from_db).resolve())
self.remove_identical_files()
return
if FLAGS.exclude_repos_from_db:
db = bigQuery_database.bqDatabase("sqlite:///{}".format(pathlib.Path(FLAGS.exclude_repos_from_db).resolve()))
self.db_excluded_repos = set()
with db.Session() as s:
for r in s.query(bigQuery_database.bqRepo.repo_name):
self.db_excluded_repos.add(r[0])
g = github.Github(self.token)
handle_repo = functools.partial(self.process_repo, g)
# fetch the repositories to iterate over. Since opencl isn't
# treated as a first-class language by GitHub, we can't use the
# 'language=' keyword for queries, so instead we through a much
# wider net and filter the results afterwards.
query_terms = [
'opencl',
'cl',
'khronos',
'gpu',
'gpgpu',
'cuda',
'amd',
'nvidia',
'heterogeneous',
'language:C',
'language:C++',
'language:LLVM',
]
try:
for query in query_terms:
# forks are okay - we use checksums to ensure uniqueness in
# final dataset
repos = g.search_repositories(query + ' fork:true sort:stars')
for repo in repos:
self.cached_includes = {}
if self.repo_handler.is_finished:
self.print_counters()
self.repo_handler.Flush()
l.logger().info("Finished gathering Github kernels.")
return
repo_modified = handle_repo(repo)
# do nothing unless the repo is new or modified
if not repo_modified:
continue
handle_file = functools.partial(self.process_file, g, repo)
# iterate over the entire git tree of the repo's default
# branch (usually 'master'). If a file ends with the .cl
# extension, check to see if we already have it, else download
# it
try:
branch = repo.default_branch
tree_iterator = repo.get_git_tree(branch, recursive=True).tree
for f in tree_iterator:
try:
handle_file(f)
except UnicodeError:
self.errors_counter += 1
pass
except Exception as e:
raise e
try:
contributors = len([x for x in repo.get_contributors()])
except github.GithubException:
contributors = -1
self.repo_handler.update_repo(
url = repo.url,
owner = repo.owner.email,
name = repo.name,
fork = 1 if repo.fork else 0,
stars = repo.stargazers_count,
contributors = contributors,
forks = repo.forks,
created_at = repo.created_at,
updated_at = str(repo.updated_at)
)
except github.GithubException:
# do nothing in case of error (such as an empty repo)
pass
except KeyboardInterrupt:
# Don't gather any more files
pass
except Exception as e:
self.errors_counter += 1
self.repo_handler.Flush()
raise e
self.print_counters()
self.repo_handler.Flush()
l.logger().info("Finished gathering Github kernels.")
return
def process_repo(self, g, repo) -> bool:
"""
GitHub repository handler.
Determines if a repository needs to be scraped. There are two cases for
this:
* The repository has not already been visited.
* The repository has been modified since it was last visited.
Parameters
----------
g
GitHub connection.
repo
Repository.
Returns
-------
bool
True if repository should be scraped, else False.
"""
self.rate_limit(g)
self.current_status = repo.name
self.print_counters()
if FLAGS.exclude_repos_from_db and repo.full_name in self.db_excluded_repos:
return False
if self.repo_handler.is_repo_updated(repo.url, str(repo.updated_at)):
# Timestamp of already scraped repo matches, so nothing to do.
return False
return True
def process_file(self, g, repo, file) -> bool:
"""
GitHub file handler.
Parameters
----------
g
GitHub connection.
repo
Repository.
file
File.
Returns
-------
bool
True on success, else False.
"""
# We're only interested in OpenCL files.
if not (file.path.endswith('.cl') or file.path.endswith('.ocl')):
return
url = file.url
sha = file.sha
path = file.path
self.current_status = repo.name + '/' + path
self.print_counters()
if self.repo_handler.is_file_updated(url, sha):
# Do nothing unless checksums don't match
return False
repo_url = repo.url
contents, _ = self.download_file(g, repo, url, [])
size = file.size or 0
self.repo_handler.update_file(
url = url, contents = contents, path = path,
sha = sha, repo_url = repo_url, size = size,
)
return True
def download_file(self, g, repo, url: str, stack: typing.List[str]) -> typing.Tuple[str, typing.List[str]]:
"""
Fetch file from GitHub.
Recursively downloads and inlines headers.
Parameters
----------
repo
Repository.
url : str
Path.
stack : List[str]
URL stack.
Returns
-------
str
File contents.
"""
# Recursion stack
stack.append(url)
exc_idx = 0
while True:
self.rate_limit(g)
try:
response = json.loads(requests.get(
url,
headers={
'Authorization': 'token ' + str(self.token)
}
).content.decode('utf-8'))
if 'content' in response:
src = b64decode(response['content']).decode('utf-8')
else:
src = ""
break
except requests.exceptions.RequestException as e:
if exc_idx == 0:
l.logger().error(e)
exc_idx += 1
time.sleep(10)
outlines = []
for line in src.split('\n'):
match = re.match(re.compile('\w*#include ["<](.*)[">]'), line)
if match:
include_name = match.group(1)
# Try and resolve relative paths
include_name = include_name.replace('../', '')
branch = repo.default_branch
tree_iterator = repo.get_git_tree(branch, recursive=True).tree
include_url = ''
for f in tree_iterator:
if f.path.endswith(include_name):
include_url = f.url
break
if include_url and include_url not in stack:
if include_url not in self.cached_includes:
self.cached_includes[include_url], stack = self.download_file(g, repo, include_url, stack)
outlines.append("// [FETCH] included: {}\n".format(line))
outlines.append(self.cached_includes[include_url])
outlines.append('// [FETCH] eof({})'.format(line))
else:
if not include_url:
outlines.append('// [FETCH] didnt find: {}'.format(line))
else:
outlines.append('// [FETCH] skipped: {}'.format(line))
else:
outlines.append(line)
return '\n'.join(outlines), stack
def rate_limit(self, g) -> None:
"""
Block on GitHub rate limit.
Parameters
----------
g
GitHub connection.
"""
remaining = g.get_rate_limit().rate.remaining
while remaining < 100:
time.sleep(1)
self.current_status = 'WAITING ON RATE LIMIT: {}'.format(remaining)
self.print_counters()
remaining = g.get_rate_limit().rate.remaining
def remove_identical_files(self) -> None:
l.logger().info("Removing duplicate files from mined corpus...")
if os.path.isfile(str(self.cache_path / "record.json")):
with open(self.cache_path / "record.json", 'r') as f:
data = json.load(f)
repos = data[0]
length = data[1]['total_files']
cache_map = {}
for i in range(length):
with open(self.cache_path / "{}.cl".format(i), 'r') as f:
cf = f.read()
cf_hash = crypto.sha256_str(cf)
if cf_hash not in cache_map:
cache_map[cf_hash] = cf
new_path = self.cache_path / "distinct_corpus"
new_path.mkdir(exist_ok = True, parents = True)
for k, v in cache_map.items():
with open(new_path / "{}.cl".format(k), 'w') as f:
f.write(v)
with open(new_path / "record.json", 'w') as f:
data[1]['total_files'] = len(cache_map)
json.dump(data, f, indent = 2)
return
def enhance_from_db(self, db_path: pathlib.Path) -> None:
l.logger().info("Enhancing dataset with {}".format(db_path.name))
if not db_path.exists():
l.logger().warn("{} db not found. Returning...".format(db_path))
db = bigQuery_database.bqDatabase("sqlite:///{}".format(db_path))
contentfiles = [cf.content for cf in db.main_files ]
contentfiles += [cf.content for cf in db.other_files]
if os.path.isfile(str(self.cache_path / "record.json")):
with open(self.cache_path / "record.json", 'r') as f:
data = json.load(f)
length = data[1]['total_files']
else:
l.logger().warn("record.json not found. Returning...")
return
for cf in contentfiles:
with open(self.cache_path / "{}.cl".format(length), 'w') as f:
f.write(cf)
length += 1
with open(self.cache_path / "record.json", 'w') as f:
data[1]['total_files'] = length
json.dump(data, f, indent = 2)
return
| 32,978 | 32.686415 | 129 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/github/datasets.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas and Chris Cummins.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BigQuery Dataset structures"""
import os
import typing
import pathlib
import progressbar
import humanize
import google
from google.cloud import bigquery
from absl import flags
from deeplearning.benchpress.github import bigQuery_database
from deeplearning.benchpress.util import logging as l
FLAGS = flags.FLAGS
flags.DEFINE_boolean(
"bq_wait_permission",
True,
"Ask for permission every time a query is about to happen."
)
class Dataset(object):
"""Representation of dataset instance in Big Query"""
@classmethod
def FromArgs(cls,
client: bigquery.Client,
lang: int,
):
"""Use this classmethod to initialize a Dataset."""
languages = {
'generic': Dataset,
'opencl' : openclDataset,
'c' : cDataset,
'cpp' : cppDataset,
'java' : javaDataset,
'python' : pythonDataset,
}
if lang not in languages:
raise NotImplementedError(lang)
return languages[lang](client)
@property
def filecount(self) -> typing.Tuple[int, int]:
"""Return file count of represented query."""
if self.file_count is None:
return self.filecount_query()
else:
return self.file_count
@filecount.setter
def filecount(self, value: typing.Tuple[int, int]) -> None:
self.file_count = value
return
@property
def name(self):
return self.dataset.dataset_id
@property
def language(self):
return "generic"
@property
def extension(self):
if self.extensions:
return self.extensions[0]
else:
return None
def __init__(self,
client: bigquery.Client,
dataset_id: str = None,
extensions: typing.List[str] = None,
):
"""Generic Dataset class constructor. Not to be used directly."""
self.client = client
self.dataset, self.tables = self._setupDataset(
"{}.clgen_{}_github".format(self.client.project, dataset_id or "generic")
)
self.queryConfig = lambda qt, qr = [], dr = False : bigquery.QueryJobConfig(
destination = self.tables[qt],
write_disposition = 'WRITE_TRUNCATE',
query_parameters = qr,
dry_run = dr,
)
self.extensions = extensions
self.query_file_id = ""
if self.extensions is not None:
self.query_file_id = " OR ".join(["substr(file.path, {}, {}) = '{}'".format(-len(ext), 1+len(ext), ext)
for ext in self.extensions
])
self.file_count = None
l.logger().info("{} dataset initialized.".format(self.language))
return
def _setupDataset(self,
dataset_id: str
) -> typing.Tuple[bigquery.Dataset, typing.Dict[str, bigquery.Table]]:
"""API request to get or set bigquery.Dataset instance and bigquery.Table."""
dataset = bigquery.Dataset(dataset_id)
dataset.location = "US"
try:
dataset = self.client.get_dataset(dataset_id)
except google.api_core.exceptions.NotFound:
dataset = self.client.create_dataset(dataset, timeout = 30)
except Exception as e:
raise e
return dataset, self._setupTables(dataset_id)
def _setupTables(self, dataset_id: str) -> typing.Dict[str, bigquery.Table]:
"""API request that gets or sets bigquery.Table instances."""
table_reg = {
'main_files' : bigQuery_database.bqFile.bqSchema,
'other_files' : bigQuery_database.bqFile.bqSchema,
'repositories' : bigQuery_database.bqRepo.bqSchema,
'data' : bigQuery_database.bqData.bqSchema,
}
for reg, get_sc in table_reg.items():
table_id = "{}.{}".format(dataset_id, reg)
table = bigquery.Table(table_id, schema = get_sc())
try:
table_reg[reg] = self.client.get_table(table_id)
except google.api_core.exceptions.NotFound:
table_reg[reg] = self.client.create_table(table)
except Exception as e:
raise e
return table_reg
def filecount_query(self) -> typing.Tuple[int, int]:
"""
Queries the file count of files intended to query.
Returns file count in int.
"""
query = """
SELECT COUNT(*)
FROM `bigquery-public-data.github_repos.files` as file
{}
""".format("" if not self.query_file_id else "WHERE " + self.query_file_id)
dry_run_job = self.client.query(query, job_config = self.queryConfig('main_files', dr = True))
l.logger().warn("This query is going to consume {}".format(
humanize.naturalsize(dry_run_job.total_bytes_processed)
)
)
l.logger().info(query)
if FLAGS.bq_wait_permission:
l.logger().warn("Hit any button to continue...")
try:
input()
except KeyboardInterrupt:
return (0, 0)
l.logger().info("Running file count query...")
try:
job = self.client.query(query)
for f in job:
self.file_count = (f[0], 0)
return self.file_count
except google.api_core.exceptions.Forbidden as e:
l.logger().error(e)
exit()
def repository_query(self) -> typing.Tuple[bigquery.table.RowIterator]:
"""
Queries the repositories' name/branch that contain files with requested
specifications (e.g. OpenCL files).
Returns iterable of query.
"""
query = """
SELECT DISTINCT file.repo_name, file.ref
FROM `bigquery-public-data.github_repos.files` as file
{}
""".format("" if not self.query_file_id else "WHERE " + self.query_file_id)
dry_run_job = self.client.query(query, job_config = self.queryConfig('repositories', dr = True))
l.logger().warn("This query is going to consume {}".format(
humanize.naturalsize(dry_run_job.total_bytes_processed)
)
)
l.logger().info(query)
if FLAGS.bq_wait_permission:
l.logger().warn("Hit any button to continue...")
try:
input()
except KeyboardInterrupt:
return (None, None)
l.logger().info("Retrieving repository list of specs...")
try:
rows = self.client.query(query, job_config = self.queryConfig('repositories')).result()
except google.api_core.exceptions.Forbidden as e:
l.logger().error(e)
exit()
return (rows, None)
def contentfile_query(self) -> typing.Tuple[bigquery.table.RowIterator]:
"""
Queries all contentfiles with requested specifications (e.g. specific file extensions).
Returns iterable of query files
"""
query = """
SELECT MIN(file.repo_name) as repo_name,
MIN(file.path) as path,
MIN(file.ref) as ref,
file.id,
MIN(contentfile.size) as size,
MIN(contentfile.content) as content
FROM (`bigquery-public-data.github_repos.contents` as contentfile
INNER JOIN `bigquery-public-data.github_repos.files` as file ON file.id = contentfile.id {})
GROUP BY file.id
""".format("" if not self.query_file_id else "AND (" + self.query_file_id + ")")
# query = """
# SELECT file.id
# FROM `bigquery-public-data.github_repos.files` as file
# WHERE {}
# GROUP BY file.id
# """.format("" if not self.query_file_id else "(" + self.query_file_id + ")")
dry_run_job = self.client.query(query, job_config = self.queryConfig('main_files', dr = True))
l.logger().warn("This query is going to consume {}".format(
humanize.naturalsize(dry_run_job.total_bytes_processed)
)
)
l.logger().info(query)
if FLAGS.bq_wait_permission:
l.logger().warn("Hit any button to continue...")
try:
input()
except KeyboardInterrupt:
return (None, None)
l.logger().info("Retrieving {} contentfiles...".format(self.dataset.dataset_id))
try:
rows = self.client.query(query, job_config = self.queryConfig('main_files')).result()
except google.api_core.exceptions.Forbidden as e:
l.logger().error(e)
exit()
return (rows, None)
def header_file_query(self) -> None:
"""
From the repositories that contentfiles were scraped from, also get their header files
for header inlining reasons.
Override this method IF you want header files fetched with the language's contentfiles.
"""
return None
class openclDataset(Dataset):
"""Opencl Dataset"""
@property
def language(self):
return "openCL"
def __init__(self,
client: bigquery.Client,
):
extensions = ['.cl']
super(openclDataset, self).__init__(client, "opencl", extensions)
self.other_extensions = ['.c', '.cc', '.cpp', '.cxx', '.c++', '.h', '.hpp']
self.query_exception = ' AND (' + ' OR '.join([
"(substr(file.path, {}, {}) = '{}' AND contentfile.content LIKE '%kernel void%')"
.format(-len(ext), 1+len(ext), ext)
for ext in self.other_extensions
]) + ')'
return
def filecount_query(self) -> typing.Tuple[int, int]:
"""
Queries the file count of files intended to query.
Returns file count in int.
"""
super(openclDataset, self).filecount_query()
query = """
SELECT COUNT(*)
FROM `bigquery-public-data.github_repos.files` as file
INNER JOIN `bigquery-public-data.github_repos.contents` as contentfile
ON file.id = contentfile.id
{}
""".format(self.query_exception or "")
dry_run_job = self.client.query(query, job_config = self.queryConfig('repositories', dr = True))
l.logger().warn("This query is going to consume {}".format(
humanize.naturalsize(dry_run_job.total_bytes_processed)
)
)
l.logger().info(query)
if FLAGS.bq_wait_permission:
l.logger().warn("Hit any button to continue...")
try:
input()
except KeyboardInterrupt:
return (0, 0)
try:
job = self.client.query(query)
for f in job:
self.file_count[1] = f[0]
return self.file_count
except google.api_core.exceptions.Forbidden as e:
l.logger().error(e)
exit()
def repository_query(self) -> typing.Tuple[bigquery.table.RowIterator, bigquery.table.RowIterator]:
"""
Query repositories that tested positive for having CL.
CL has its own function, because two types of files are checked:
'.cl' files and any C/C++ file that contains the keyword 'kernel void'
"""
cl_repo_it, _ = super(openclDataset, self).repository_query()
query = """
SELECT DISTINCT file.repo_name, file.ref
FROM `bigquery-public-data.github_repos.files` as file
INNER JOIN `bigquery-public-data.github_repos.contents` as contentfile
ON file.id = contentfile.id
{}
""".format(self.query_exception or "")
dry_run_job = self.client.query(query, job_config = self.queryConfig('repositories', dr = True))
l.logger().warn("This query is going to consume {}".format(
humanize.naturalsize(dry_run_job.total_bytes_processed)
)
)
l.logger().info(query)
if FLAGS.bq_wait_permission:
l.logger().warn("Hit any button to continue...")
try:
input()
except KeyboardInterrupt:
return (cl_repo_it, None)
l.logger().info("Retrieving etc. repo list...")
try:
rows = self.client.query(query, job_config = self.queryConfig('repositories')).result()
except google.api_core.exceptions.Forbidden as e:
l.logger().error(e)
exit()
return (cl_repo_it, rows)
def contentfile_query(self) -> typing.Tuple[bigquery.table.RowIterator, bigquery.table.RowIterator]:
"""
Query contentfiles that tested positive for being CL.
CL has its own function, because two types of files are checked:
'.cl' files and any C/C++ file that contains the keyword 'kernel void'
"""
cl_file_it, _ = super(openclDataset, self).contentfile_query()
query = """
SELECT file.repo_name, file.path, file.ref, file.id,
contentfile.size, contentfile.content
FROM `bigquery-public-data.github_repos.files` as file
INNER JOIN `bigquery-public-data.github_repos.contents` as contentfile
ON file.id = contentfile.id
{}
""".format(self.query_exception or "")
dry_run_job = self.client.query(query, job_config = self.queryConfig('other_files', dr = True))
l.logger().warn("This query is going to consume {}".format(
humanize.naturalsize(dry_run_job.total_bytes_processed)
)
)
l.logger().info(query)
if FLAGS.bq_wait_permission:
l.logger().warn("Hit any button to continue...")
try:
input()
except KeyboardInterrupt:
return (cl_file_it, None)
l.logger().info("Retrieving etc. contentfiles...")
try:
rows = self.client.query(query, job_config = self.queryConfig('other_files')).result()
except google.api_core.exceptions.Forbidden as e:
l.logger().error(e)
exit()
return (cl_file_it, rows)
class cDataset(Dataset):
"""C Dataset"""
@property
def language(self):
return "C"
def __init__(self,
client: bigquery.Client,
):
extensions = ['.c']
super(cDataset, self).__init__(client, "c", extensions)
return
class cppDataset(Dataset):
"""C++ Dataset"""
@property
def language(self):
return "C++"
def __init__(self,
client: bigquery.Client,
):
extensions = ['.cpp', 'cc', '.cxx', '.c++', '.hpp']
super(cppDataset, self).__init__(client, "cpp", extensions)
return
class javaDataset(Dataset):
"""java Dataset"""
@property
def language(self):
return "Java"
def __init__(self,
client: bigquery.Client,
):
extensions = ['.java']
super(javaDataset, self).__init__(client, "java", extensions)
return
class pythonDataset(Dataset):
"""python Dataset"""
@property
def language(self):
return "Python"
def __init__(self,
client: bigquery.Client,
):
extensions = ['.py']
super(pythonDataset, self).__init__(client, "python", extensions)
return
| 14,558 | 30.997802 | 109 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/util/distributions.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Statistical distributions used for sampling"""
import pathlib
import sys
import copy
import typing
import math
import numpy as np
from deeplearning.benchpress.proto import model_pb2
from deeplearning.benchpress.util import plotter
class Distribution():
def __init__(self,
sample_length : int,
relative_length: float,
log_path : typing.Union[pathlib.Path, str],
set_name : str
):
self.sample_length = sample_length
self.relative_length = relative_length
self.log_path = log_path if isinstance(log_path, pathlib.Path) else pathlib.Path(log_path)
self.set_name = set_name
self.sample_counter = {}
return
@classmethod
def FromHoleConfig(cls,
config: model_pb2.Hole,
log_path: typing.Union[pathlib.Path, str],
set_name: str,
) -> typing.TypeVar("Distribution"):
if config.HasField("absolute_length"):
abs_len = config.absolute_length
rel_len = 1.0
elif config.HasField("relative_length"):
abs_len = None
rel_len = float(config.relative_length)
if config.HasField("uniform_distribution"):
return UniformDistribution(abs_len,
rel_len,
log_path,
set_name,
)
elif config.HasField("normal_distribution"):
return NormalDistribution(abs_len,
rel_len,
config.normal_distribution.mean,
config.normal_distribution.variance,
log_path,
set_name,
)
else:
raise NotImplementedError(config)
def sample(self, length = None):
raise NotImplementedError
def register(self, actual_sample):
if isinstance(actual_sample, list):
for s in actual_sample:
self.register(s)
else:
if actual_sample not in self.sample_counter:
self.sample_counter[actual_sample] = 1
else:
self.sample_counter[actual_sample] += 1
return
def plot(self):
sorted_dict = sorted(self.sample_counter.items(), key = lambda x: x[0])
plotter.FrequencyBars(
x = [x for (x, _) in sorted_dict],
y = [y for (_, y) in sorted_dict],
plot_name = self.set_name,
path = self.log_path,
title = self.set_name,
x_name = self.set_name,
)
return
class UniformDistribution(Distribution):
"""
A uniform distribution sampler. Get a random number from distribution calling sample()
Upper range of sampling is defined as [0, sample_length].
"""
def __init__(self,
sample_length : int,
relative_length: float,
log_path : typing.Union[pathlib.Path, str],
set_name : str,
seed : int = None,
):
super(UniformDistribution, self).__init__(sample_length, relative_length, log_path, set_name)
if seed:
self.seed = seed
self.sample_gen = np.random
self.sample_gen.seed(seed)
if self.sample_length:
self.sampler = self.sample_gen.randint
else:
self.sampler = self.sample_gen.randint
else:
if self.sample_length:
self.sampler = np.random.RandomState().randint
else:
self.sampler = np.random.RandomState().randint
return
def sample(self, length = None):
if not self.sample_length and not length:
raise ValueErrror("One of sample length and upper length must be specified.")
if self.sample_length:
return self.sampler(0, self.sample_length + 1)
else:
return self.sampler(0, int(length * self.relative_length))
class NormalDistribution(Distribution):
"""
Normal distribution sampler. Initialized with mean, variance.
Upper range of sampling is defined as [0, sample_length].
"""
def __init__(self,
sample_length : int,
relative_length: float,
mean : float,
variance : float,
log_path : typing.Union[pathlib.Path, str],
set_name : str,
):
super(NormalDistribution, self).__init__(sample_length, relative_length, log_path, set_name)
self.mean = mean
self.variance = variance
def sample(self, length = None):
upper_length = self.sample_length or length * self.relative_length
sample = int(round(np.random.RandomState().normal(loc = self.mean, scale = self.variance)))
while sample < 0 or sample > self.sample_length:
sample = int(round(np.random.RandomState().normal(loc = self.mean, scale = self.variance)))
return sample
class ProgLinearDistribution(Distribution):
"""
A sampling distribution used in training per stage mode.
Distribution starts with empty or tiny holes and
gradually progresses into sampling bigger holes while still
feeding small holes as well, until max hole length is met.
Gradual increase is an interval based on number of stages
and number of train steps.
Cumulative stage distribution appears as negative linear.
At any given moment, probability of selecting a hole
length should be uniform.
Parameters:
number of stages
number of training steps
max hole length
"""
def __init__(self,
num_train_steps : int,
max_hole_length : int,
log_path : typing.Union[pathlib.Path, str],
set_name : str,
):
super(ProgLinearDistribution, self).__init__(
max_hole_length, log_path, set_name
)
self.num_train_steps = num_train_steps
def sample(self):
return
class GenericDistribution(Distribution):
"""
A small sample distribution of datapoints
that we don't know what distribution they follow. Used
to perform statistics on small samples.
"""
@property
def population_size(self) -> int:
"""
Size of distribution's population.
"""
return self.sample_length
@property
def population(self) -> typing.List[int]:
"""
Get population.
"""
return self.samples
@property
def min(self) -> int:
return self.min_idx
@property
def max(self) -> int:
return self.max_idx
@property
def average(self) -> float:
if self.avg is not None:
return self.avg
else:
self.avg = sum(self.population) / self.population_size
return self.avg
"""
avg = 0.0
for idx, p in enumerate(self.distribution):
avg += p * (idx + self.min_idx)
self.avg = avg
return self.avg
"""
@property
def median(self) -> int:
if self.med is not None:
return self.med
else:
s = sorted(self.population)
if self.population_size % 2 == 1:
self.med = s[self.population_size // 2]
else:
self.med = 0.5 * (s[(self.population_size // 2) - 1] + s[self.population_size // 2])
return self.med
"""
l_idx, r_idx = 0, len(self.distribution)
l,r = self.distribution[l_idx], None
queue = copy.copy(self.distribution)
cur = queue.pop(0)
offset = -cur
if cur != 0:
l = cur
while queue:
if offset < 0:
cur = queue.pop()
r_idx -= 1
if cur != 0:
r = r_idx
offset += cur
else:
cur = queue.pop(0)
l_idx += 1
if cur != 0:
l = l_idx
offset -= cur
if offset > sys.float_info.epsilon:
self.med = r + self.min_idx
elif offset < -sys.float_info.epsilon:
self.med = l + self.min_idx
else:
self.med = (l+r+2*self.min_idx) / 2
return self.med
"""
@property
def variance(self) -> float:
"""
Calculate variance of population.
"""
if self.var is not None:
return self.var
else:
self.var = sum([(x - self.average)**2 for x in self.population]) / self.population_size
return self.var
@property
def standard_deviation(self) -> float:
return math.sqrt(self.variance)
def __init__(self, samples: typing.List[int], log_path: pathlib.Path, set_name: str):
super(GenericDistribution, self).__init__(
sample_length = len(samples),
relative_length = float('NaN'),
log_path = log_path,
set_name = set_name,
)
self.min_idx, self.max_idx = math.inf, -math.inf
self.avg, self.med, self.var = None, None, None
self.samples = samples
total = len(samples)
if len(samples) > 0:
for s in samples:
if s > self.max_idx:
self.max_idx = s
if s < self.min_idx:
self.min_idx = s
# If there is a large discrepancy in the min/max range, this array will be massive.
# You could turn this to a dict for only non-zero keys.
self.distribution = [0] * abs(1 + self.max_idx - self.min_idx)
for s in samples:
self.distribution[s - self.min_idx] += 1
for idx, v in enumerate(self.distribution):
self.distribution[idx] = v / total
self.pmf_to_pdf()
else:
self.distribution = []
self.pdf = []
return
def __add__(self, d: "GenericDistribution") -> "GenericDistribution":
"""
The addition of two distribution happens with convolution.
For two discrete distributions d1, d2:
P[X1=X + X2=Y] = P[X1=X] ** P[X2=Y] = Σn Σk (Pd1[k] * Pd2[n-k])
"""
if self.min_idx > d.min_idx:
d1 = self.realign(self.min_idx - d.min_idx)
d2 = d.distribution
else:
d1 = self.distribution
d2 = d.realign(d.min_idx - self.min_idx)
if len(d1) > len(d2):
d2 = d2 + [0] * (len(d1) - len(d2))
else:
d1 = d1 + [0] * (len(d2) - len(d1))
ret = GenericDistribution([], self.log_path, "{}+{}".format(self.set_name, d.set_name))
summed = list(np.convolve(d1, d2, mode = 'full'))
while summed[0] == 0:
summed.pop(0)
while summed[-1] == 0:
summed.pop()
min_idx = self.min_idx + d.min_idx
max_idx = len(summed) - 1 + min_idx
ret.distribution = summed
ret.min_idx = min_idx
ret.max_idx = max_idx
ret.pmf_to_pdf()
return ret
def __sub__(self, d: "GenericDistribution") -> "GenericDistribution":
"""
Subtraction of distributions is equal to addition of inverted distribution.
P[X - Y] = P[X + (-Y)]
"""
neg = d.negate()
sub = self + neg
sub.set_name = "{}-{}".format(self.set_name, d.set_name)
return sub
def __mul__(self, d: "GenericDistribution") -> "GenericDistribution":
"""
Multiplication of two independent random variables.
P[X*Y = c] = Σx Σy P[X=x]*P[Y=y]
"""
l_idx = self.min_idx*d.min_idx
r_idx = self.max_idx*d.max_idx
out_distr = [0] * (r_idx - l_idx)
for x in range(self.min_idx, self.max_idx + 1):
for y in range(d.min_idx, d.max_idx + 1):
out_distr[x*y - l_idx] = self.distribution[x] * d.distribution[y]
while out_distr[0] == 0:
out_distr.pop(0)
l_idx += 1
while out_distr[-1] == 0:
out_distr.pop()
r_idx -= 1
mul = GenericDistribution([], self.log_path, "{}*{}".format(self.set_name, d.set_name))
mul.distribution = out_distr
mul.min_idx = l_idx
mul.max_idx = r_idx
return mul
def __ge__(self, v: int) -> float:
"""
Probability of P[X >= v]
"""
voffset = v - self.min_idx
probs = 0.0
for idx, s in enumerate(self.distribution):
if idx >= voffset:
probs += s
return probs
def __gt__(self, v: int) -> float:
"""
Probability of P[X > v]
"""
voffset = v - self.min_idx
probs = 0.0
for idx, s in enumerate(self.distribution):
if idx > voffset:
probs += s
return probs
def __le__(self, v: int) -> float:
"""
Probability of P[X <= v]
"""
voffset = v - self.min_idx
probs = 0.0
for idx, s in enumerate(self.distribution):
if idx <= voffset:
probs += s
return probs
def __lt__(self, v: int) -> float:
"""
Probability of P[X < v]
"""
voffset = v - self.min_idx
probs = 0.0
for idx, s in enumerate(self.distribution):
if idx < voffset:
probs += s
return probs
def __eq__(self, v: int) -> float:
"""
Probability of P[X = v]
"""
voffset = v - self.min_idx
probs = 0.0
for idx, s in enumerate(self.distribution):
if idx == voffset:
probs += s
return probs
def negate(self) -> "GenericDistribution":
"""
Inverts distribution: P[Y] -> P[-Y]
"""
neg = GenericDistribution([], self.log_path, "neg-{}".format(self.set_name))
neg.distribution = self.distribution[::-1]
neg.min_idx = -self.max_idx
neg.max_idx = -self.min_idx
neg.pmf_to_pdf()
return neg
def realign(self, offset: int) -> typing.List[int]:
"""
When performing operations with distributions,
both distributions must have the same reference as to
what index does array's 0th index refers to.
This function slides to the right, the index array
that is leftmost, and aligns it with the others.
"""
return [0] * offset + self.distribution
def cov(self, d: "GenericDistribution") -> float:
"""
Compute covariance of two distributions.
"""
if self.population_size != d.population_size:
raise ValueError("Covariance and correlation can only be computed to 1-1 equal-sized distributions. Or you could take two equal-sized samples.")
return sum([(x - self.average)*(y - d.average) for (x, y) in zip(self.population, d.population)]) / self.population_size
def corr(self, d: "GenericDistribution") -> float:
"""
Compute correlation factor between two distributions.
"""
try:
return self.cov(d) / (self.standard_deviation * d.standard_deviation)
except ZeroDivisionError:
return math.inf
def get_sorted_index(self, idx: int) -> int:
"""
Get the smallest 'idx' sample in the population.
"""
return sorted(self.samples)[min(len(self.samples) - 1, idx)]
def pmf_to_pdf(self) -> None:
"""
Compute pdf from pmf.
"""
self.pdf = [0] * len(self.distribution)
cur = 0.0
for idx, prob in enumerate(self.distribution):
cur += prob
self.pdf[idx] = cur
return
def plot(self, with_avg: bool = False) -> None:
"""
Plot distribution.
"""
vlines = None
if with_avg:
vlines = [
(self.average, "Average"),
(self.median, "Median"),
]
plotter.FrequencyBars(
x = [idx + self.min_idx for idx, _ in enumerate(self.distribution)],
y = [v for v in self.distribution],
plot_name = "pmf_{}".format(self.set_name),
path = self.log_path,
title = "pmf_{}".format(self.set_name),
x_name = self.set_name,
vlines = vlines,
)
plotter.FrequencyBars(
x = [idx + self.min_idx for idx, _ in enumerate(self.pdf)],
y = [v for v in self.pdf],
plot_name = "pdf_{}".format(self.set_name),
path = self.log_path,
title = "pdf_{}".format(self.set_name),
x_name = self.set_name,
vlines = vlines,
)
return
| 16,053 | 29.347826 | 150 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/util/environment.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module handles application's environment variables"""
import os
import ifcfg
import subprocess
os.environ["TOKENIZERS_PARALLELISM"] = "false"
def check_path_exists(path, must_exist = True):
if not os.path.exists(path):
if must_exist:
raise ValueError("{} does not exist.".format(path))
else:
return None
return path
try:
LLVM_VERSION = os.environ['LLVM_VERSION']
LLVM = check_path_exists(os.environ['LLVM'])
LLVM_LIB = check_path_exists(os.environ['LLVM_LIB'])
LIBCXX_HEADERS = check_path_exists(os.environ['LIBCXX_HEADERS'])
OPENCL_HEADERS = check_path_exists(os.environ['OPENCL_HEADERS'])
CLANG = check_path_exists(os.environ['CLANG'])
OPT = check_path_exists(os.environ['OPT'])
LLVM_EXTRACT = check_path_exists(os.environ['LLVM_EXTRACT'])
LLVM_DIS = check_path_exists(os.environ['LLVM_DIS'])
CLANG_FORMAT = check_path_exists(os.environ['CLANG_FORMAT'])
CLANG_HEADERS = check_path_exists(os.environ['CLANG_HEADERS'])
CLANG_REWRITER = check_path_exists(os.environ['CLANG_REWRITER'])
SEQ_CLANG_REWRITER = check_path_exists(os.environ['SEQ_CLANG_REWRITER'])
LIBCLC = check_path_exists(os.environ['LIBCLC'])
DASHBOARD_TEMPLATES = check_path_exists(os.environ['DASHBOARD_TEMPLATES'])
DASHBOARD_STATIC = check_path_exists(os.environ['DASHBOARD_STATIC'])
DATA_CL_INCLUDE = check_path_exists(os.environ['DATA_CL_INCLUDE'])
AUX_INCLUDE = check_path_exists(os.environ['AUX_INCLUDE'])
GREWE = check_path_exists(os.environ['GREWE'])
CLDRIVE = check_path_exists(os.environ['CLDRIVE'], must_exist = False)
MUTEC = check_path_exists(os.environ['MUTEC'], must_exist = False)
SRCIROR_SRC = check_path_exists(os.environ['SRCIROR_SRC'], must_exist = False)
SRCIROR_IR = check_path_exists(os.environ['SRCIROR_IR'], must_exist = False)
CSMITH = check_path_exists(os.environ['CSMITH'], must_exist = False)
CLSMITH = check_path_exists(os.environ['CLSMITH'], must_exist = False)
CLSMITH_INCLUDE = check_path_exists(os.environ['CLSMITH_INCLUDE'], must_exist = False)
INSTCOUNT = check_path_exists(os.environ['INSTCOUNT'])
AUTOPHASE = check_path_exists(os.environ['AUTOPHASE'])
MASTER_PORT = int(os.environ.get("MASTER_PORT", 8738))
MASTER_ADDR = os.environ.get("MASTER_ADDR", "127.0.0.1")
LOCAL_RANK = int(os.environ.get("LOCAL_RANK", os.environ.get("SLURM_LOCALID", 0)))
WORLD_RANK = int(os.environ.get("RANK", os.environ.get("SLURM_PROCID", 0)))
WORLD_SIZE = int(os.environ.get("WORLD_SIZE", os.environ.get("SLURM_NTASKS", 1)))
HOSTNAME = subprocess.check_output("hostname -A".split(), stderr = subprocess.STDOUT).decode().split()[0]
if "GLOO_SOCKET_IFNAME" not in os.environ:
os.environ["GLOO_SOCKET_IFNAME"] = ifcfg.default_interface()['device']
if "NCCL_SOCKET_IFNAME" not in os.environ:
os.environ["NCCL_SOCKET_IFNAME"] = ifcfg.default_interface()['device']
except Exception as e:
lite = os.environ.get("LITE_BUILD", 0)
if lite == 0:
raise e
| 3,842 | 51.643836 | 118 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/util/fs.py | # coding=utf-8
# Copyright 2022 Chris Cummins.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""High level filesystem interface.
"""
import contextlib
import os.path
import pathlib
import re
import humanize
import shutil
import tempfile
import typing
from glob import iglob
from absl import flags
from send2trash import send2trash
FLAGS = flags.FLAGS
flags.DEFINE_string(
"local_filesystem",
None,
"Select target directory of TemporaryDirectory."
)
class Error(Exception):
pass
class File404(Error):
pass
# A list of file names that frequently appear in file systems that are not
# "useful".
COMMONLY_IGNORED_FILE_NAMES = set(
[
"._.DS_Store",
".com.apple.timemachine.donotpresent",
".com.apple.timemachine.supported",
".DS_Store",
".sync.ffs_db",
".sync_timestamp.txt",
".VolumeIcon.icns",
".VolumeIcon.ico",
"autorun.inf",
]
)
def path(*components):
"""
Get a file path.
Concatenate all components into a path.
"""
_path = os.path.join(*components)
_path = os.path.expanduser(_path)
return _path
def must_exist(*components):
"""
Ensure path exists.
Arguments:
*components (str[]): Path components.
Returns:
str: File path.
Raises:
File404: If path does not exist.
"""
_path = path(*components)
if not exists(_path):
raise File404(_path)
return _path
def abspath(*components):
"""
Get an absolute file path.
Concatenate all components into an absolute path.
"""
return os.path.abspath(path(*components))
def basename(*components):
"""
Return the basename of a given file path.
"""
return os.path.basename(path(*components))
def dirname(*components):
"""
Return the directory name of a given file path.
"""
return os.path.dirname(path(*components))
def is_subdir(child, parent):
"""
Determine if "child" is a subdirectory of "parent".
If child == parent, returns True.
"""
child_path = os.path.realpath(child)
parent_path = os.path.realpath(parent)
if len(child_path) < len(parent_path):
return False
for i in range(len(parent_path)):
if parent_path[i] != child_path[i]:
return False
return True
# Directory history.
_cdhist = []
def cd(path):
"""
Change working directory.
Returns absolute path to new working directory.
"""
_cdhist.append(pwd()) # Push to history.
path = abspath(path)
os.chdir(path)
return path
def cdpop():
"""
Return the last directory.
Returns absolute path to new working directory.
"""
if len(_cdhist) >= 1:
old = _cdhist.pop() # Pop from history.
os.chdir(old)
return old
else:
return pwd()
def pwd():
"""
Return the path to the current working directory.
"""
return os.getcwd()
def exists(*components):
"""
Return whether a file exists.
"""
return os.path.exists(path(*components))
def isfile(*components):
"""
Return whether a path exists, and is a file.
"""
return os.path.isfile(path(*components))
def isexe(*components):
"""
Return whether a path is an executable file.
Arguments:
path (str): Path of the file to check.
Examples:
>>> isexe("/bin/ls")
True
>>> isexe("/home")
False
>>> isexe("/not/a/real/path")
False
Returns:
bool: True if file is executable, else false.
"""
_path = path(*components)
return isfile(_path) and os.access(_path, os.X_OK)
def isdir(*components):
"""
Return whether a path exists, and is a directory.
"""
if components:
return os.path.isdir(path(*components))
else:
return False
def ls(
root: typing.Union[str, pathlib.Path] = ".", abspaths=False, recursive=False,
):
"""
Return a list of files in directory.
Directory listings are sorted alphabetically. If the named
directory is a file, return it's path.
Examples:
>>> ls("foo")
["a", "b", "c"]
>>> ls("foo/a")
["foo/a"]
>>> ls("foo", abspaths=True)
["/home/test/foo/a", "/home/test/foo/b", "/home/test/foo/c"]
>>> ls("foo", recursive=True)
["a", "b", "b/d", "b/d/e", "c"]
Arguments:
root (str): Path to directory. Can be relative or absolute.
abspaths (bool, optional): Return absolute paths if true.
recursive (bool, optional): Recursively list subdirectories if
true.
Returns:
list of str: A list of paths.
Raises:
OSError: If root directory does not exist.
"""
def _expand_subdirs(file):
if isdir(path(root, file)):
return [file,] + [
path(file, x) for x in ls(path(root, file), recursive=True)
]
else:
return [file]
if isfile(root):
# If argument is a file, return path.
return [abspath(root)] if abspaths else [basename(root)]
elif abspaths:
# Get relative names.
relpaths = ls(root, recursive=recursive, abspaths=False)
# Prepend the absolute path to each relative name.
base = abspath(root)
return [path(base, relpath) for relpath in relpaths]
elif recursive:
# Recursively expand subdirectories.
paths = ls(root, abspaths=abspaths, recursive=False)
return [item for sublist in [_expand_subdirs(file) for file in paths] for item in sublist]
else:
# List directory contents.
return list(sorted(os.listdir(root)))
def lsdirs(root=".", **kwargs):
"""
Return only subdirectories from a directory listing.
Arguments:
root (str): Path to directory. Can be relative or absolute.
**kwargs: Any additional arguments to be passed to ls().
Returns:
list of str: A list of directory paths.
Raises:
OSError: If root directory does not exist.
"""
paths = ls(root=root, **kwargs)
if isfile(root):
return []
return [_path for _path in paths if isdir(path(root, _path))]
def lsfiles(root: typing.Union[str, pathlib.Path] = ".", **kwargs):
"""
Return only files from a directory listing.
Arguments:
root (str): Path to directory. Can be relative or absolute.
**kwargs: Any additional arguments to be passed to ls().
Returns:
list of str: A list of file paths.
Raises:
OSError: If root directory does not exist.
"""
paths = ls(root=root, **kwargs)
if isfile(root):
return paths
return [_path for _path in paths if isfile(path(root, _path))]
def rm(*components, **kwargs):
"""
Remove a file or directory.
If path is a directory, this recursively removes the directory and
any contents. Non-existent paths are silently ignored.
Supports Unix style globbing by default (disable using
glob=False). For details on globbing pattern expansion, see:
https://docs.python.org/2/library/glob.html
Arguments:
*components (string[]): path to the file or directory to remove. May be
absolute or relative. May contain unix glob
**kwargs: if "glob" is True, perform Unix style pattern expansion of
paths (default: True).
"""
_path = path(*components)
glob = kwargs.get("glob", True)
paths = iglob(_path) if glob else [_path]
for file in paths:
if isfile(file):
os.remove(file)
elif exists(file):
shutil.rmtree(file, ignore_errors=False)
def rmtrash(*components):
"""
Move a file or directory to trash.
If file does not exist, nothing happens.
Examples:
>>> rmtrash("foo", "bar")
>>> rmtrash("/home/file.txt")
Arguments:
*components (string[]): path to the file or directory.
"""
_path = path(*components)
if exists(_path):
send2trash(_path)
def cp(src, dst):
"""
Copy a file or directory.
If source is a directory, this recursively copies the directory
and its contents. If the destination is a directory, then this
creates a copy of the source in the destination directory with the
same basename.
If the destination already exists, this will attempt to overwrite
it.
Arguments:
src (string): path to the source file or directory.
dst (string): path to the destination file or directory.
Raises:
IOError: if source does not exist.
"""
if isdir(src):
# Overwrite an existing directory.
if isdir(dst):
rm(dst)
shutil.copytree(src, dst)
elif isfile(src):
shutil.copy(src, dst)
else:
raise IOError("Source '{0}' not found".format(src))
def mv(src, dst):
"""
Move a file or directory.
If the destination already exists, this will attempt to overwrite
it.
Arguments:
src (string): path to the source file or directory.
dst (string): path to the destination file or directory.
Raises:
File404: if source does not exist.
IOError: in case of error.
"""
if not exists(src):
raise File404(src)
try:
shutil.move(src, dst)
except Exception as e:
raise IOError(str(e))
def mkdir(*components, **kwargs):
"""
Make directory "path", including any required parents. If
directory already exists, do nothing.
"""
_path = path(*components)
if not isdir(_path):
try:
os.makedirs(_path, **kwargs)
except FileExistsError:
## This happens in asynchronous distributed filesystems.
pass
return _path
def mkopen(p, *args, **kwargs):
"""
A wrapper for the open() builtin which makes parent directories if needed.
"""
dir = os.path.dirname(p)
mkdir(dir)
return open(p, *args, **kwargs)
def read(*components, **kwargs):
"""
Read file and return a list of lines. If comment_char is set, ignore the
contents of lines following the comment_char.
Raises:
IOError: if reading path fails
"""
rstrip = kwargs.get("rstrip", True)
comment_char = kwargs.get("comment_char", None)
ignore_comments = comment_char is not None
file = open(path(*components))
lines = file.readlines()
file.close()
# Multiple definitions to handle all cases.
if ignore_comments:
comment_line_re = re.compile(r"^\s*{char}".format(char=comment_char))
not_comment_re = re.compile(r"[^{char}]+".format(char=comment_char))
if rstrip:
# Ignore comments, and right strip results.
return [
re.match(not_comment_re, line).group(0).rstrip()
for line in lines
if not re.match(comment_line_re, line)
]
else:
# Ignore comments, and don't strip results.
return [
re.match(not_comment_re, line).group(0)
for line in lines
if not re.match(comment_line_re, line)
]
elif rstrip:
# No comments, and right strip results.
return [line.rstrip() for line in lines]
else:
# Just a good old-fashioned read!
return lines
def du(*components, **kwargs):
"""
Get the size of a file in bytes or as a human-readable string.
Arguments:
*components (str[]): Path to file.
**kwargs: If "human_readable" is True, return a formatted string,
e.g. "976.6 KiB" (default True)
Returns:
int or str: If "human_readble" kwarg is True, return str, else int.
"""
human_readable = kwargs.get("human_readable", True)
_path = path(*components)
if not exists(_path):
raise Error("file '{}' not found".format(_path))
size = os.stat(_path).st_size
if human_readable:
return humanize.BinaryPrefix(size, "B")
else:
return size
def files_from_list(*paths):
"""
Return a list of all file paths from a list of files or directories.
For each path in the input: if it is a file, return it; if it is a
directory, return a list of files in the directory.
Arguments:
paths (list of str): List of file and directory paths.
Returns:
list of str: Absolute file paths.
Raises:
File404: If any of the paths do not exist.
"""
ret = []
for path in paths:
if isfile(path):
ret.append(abspath(path))
elif isdir(path):
ret += [f for f in ls(path, abspaths=True, recursive=True) if isfile(f)]
else:
raise File404(path)
return ret
def directory_is_empty(directory: pathlib.Path) -> bool:
"""Return if a directory is empty.
A directory which does not exist is considered empty (returns True). A
directory containing only subdirectories but no files is considered not empty
(returns False).
Args:
directory: The path of a directory.
Returns:
True if directory is empty, else False.
"""
for _, subdirs, files in os.walk(path(directory)):
if subdirs or files:
return False
return True
@contextlib.contextmanager
def chdir(directory: typing.Union[str, pathlib.Path]) -> pathlib.Path:
"""A context manager which allows you to temporarily change working directory.
Args:
directory: The directory to change to.
Returns:
The directory which has been changed to.
Raises:
OSError: If the given directory does not exist.
NotADirectoryError: If the given path is a file.
"""
previous_directory = pathlib.Path.cwd()
os.chdir(str(directory))
try:
yield pathlib.Path(directory)
finally:
os.chdir(str(previous_directory))
@contextlib.contextmanager
def TemporaryWorkingDir(prefix: str = "phd_") -> pathlib.Path:
"""A context manager which provides a temporary working directory.
This creates an empty temporary directory, and changes the current working
directory to it. Once out of scope, the directory and all it's contents are
removed.
Args:
prefix: A prefix for the temporary directory name.
Returns:
The directory which has been changed to.
"""
# getcwd() will raise FileNotFoundError if the current workind directory
# does not exist.
old_directory = None
try:
old_directory = os.getcwd()
except FileNotFoundError:
pass
# Create a temporary directory, change to it, and return the path to the user.
with tempfile.TemporaryDirectory(prefix=prefix) as d:
os.chdir(d)
yield pathlib.Path(d)
# Return to previous working directory, if there was one.
if old_directory:
os.chdir(old_directory)
def Read(filename: typing.Union[str, pathlib.Path]) -> str:
"""Read entire contents of file with name 'filename'."""
with open(filename) as fp:
return fp.read()
def Write(
filename: typing.Union[str, pathlib.Path],
contents: bytes,
overwrite_existing: bool = True,
mode: int = 0o0666,
gid: int = None,
) -> pathlib.Path:
"""Create a file 'filename' with 'contents', with the mode given in 'mode'.
The 'mode' is modified by the umask, as in open(2). If
'overwrite_existing' is False, the file will be opened in O_EXCL mode.
An optional gid can be specified.
Args:
filename: the name of the file
contents: the data to write to the file
overwrite_existing: whether or not to allow the write if the file
already exists
mode: permissions with which to create the file (default is 0666 octal)
gid: group id with which to create the file
"""
# Adapted from <https://github.com/google/google-apputils>.
# Copyright 2007 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
flags = os.O_WRONLY | os.O_TRUNC | os.O_CREAT
if not overwrite_existing:
flags |= os.O_EXCL
fd = os.open(filename, flags, mode)
try:
os.write(fd, contents)
finally:
os.close(fd)
if gid is not None:
os.chown(filename, -1, gid)
return pathlib.Path(filename)
def AtomicWrite(
filename: typing.Union[str, pathlib.Path],
contents: bytes,
mode: int = 0o0666,
gid: int = None,
) -> None:
"""Create a file 'filename' with 'contents' atomically.
As in Write, 'mode' is modified by the umask. This creates and moves
a temporary file, and errors doing the above will be propagated normally,
though it will try to clean up the temporary file in that case.
This is very similar to the prodlib function with the same name.
An optional gid can be specified.
Args:
filename: the name of the file
contents: the data to write to the file
mode: permissions with which to create the file (default is 0666 octal)
gid: group id with which to create the file
"""
# Adapted from <https://github.com/google/google-apputils>.
# Copyright 2007 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
fd, tmp_filename = tempfile.mkstemp(dir=os.path.dirname(filename))
try:
os.write(fd, contents)
finally:
os.close(fd)
try:
os.chmod(tmp_filename, mode)
if gid is not None:
os.chown(tmp_filename, -1, gid)
os.rename(tmp_filename, filename)
except OSError as exc:
try:
os.remove(tmp_filename)
except OSError as e:
exc = OSError("%s. Additional errors cleaning up: %s" % (exc, e))
raise exc
@contextlib.contextmanager
def TemporaryFileWithContents(contents: bytes, **kwargs):
"""A contextmanager that writes out a string to a file on disk.
This is useful whenever you need to call a function or command that expects a
file on disk with some contents that you have in memory. The context manager
abstracts the writing, flushing, and deletion of the temporary file. This is a
common idiom that boils down to a single with statement.
Note: if you need a temporary file-like object for calling an internal
function, you should use a StringIO as a file-like object and not this.
Temporary files should be avoided unless you need a file name or contents in a
file on disk to be read by some other function or program.
Args:
contents: a string with the contents to write to the file.
**kwargs: Optional arguments passed on to tempfile.NamedTemporaryFile.
Yields:
The temporary file object, opened in 'w' mode.
"""
# Adapted from <https://github.com/google/google-apputils>.
# Copyright 2007 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if not kwargs.get("prefix"):
kwargs["prefix"] = "phd_tempfile_with_contents_"
temporary_file = tempfile.NamedTemporaryFile(**kwargs)
temporary_file.write(contents)
temporary_file.flush()
yield temporary_file
temporary_file.close()
| 19,969 | 24.834411 | 94 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/util/memory.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CPU and GPU memory usage monitor"""
import os
import humanize
import pathlib
import psutil
import threading
import time
import typing
from deeplearning.benchpress.util import gpu
from deeplearning.benchpress.util import monitors
def getRamUsage() -> typing.Dict[str, str]:
"""
Return memory usage of current PID without its child processes.
"""
process = psutil.Process(os.getpid())
return {
'dss': humanize.naturalsize(process.dss),
'vms': humanize.naturalsize(process.vms),
'shared': humanize.naturalsize(process.shared),
'text': humanize.naturalsize(process.text),
'lib': humanize.naturalsize(process.lib),
'data': humanize.naturalsize(process.data),
'dirty': humanize.naturalsize(process.dirty),
}
def monRamUsage(path: pathlib.Path) -> None:
ram_monitor = monitors.HistoryMonitor(
path, "ram_usage"
)
main_process = psutil.Process(os.getpid())
while True:
try:
total_mem = (main_process.memory_info().rss +
sum([p.memory_info().rss
for p in main_process.children(recursive = True)]
)
)
except psutil._exceptions.NoSuchProcess:
total_mem = (main_process.memory_info().rss +
sum([p.memory_info().rss
for p in main_process.children(recursive = True)]
)
)
ram_monitor.register(total_mem / (1024**2)) # MB
ram_monitor.plot()
time.sleep(5)
return
def monGPUsage(path: pathlib.Path) -> None:
gpu_monitor = monitors.HistoryMonitor(
path, "gpu_usage"
)
main_process = psutil.Process(os.getpid())
while True:
process_pids = [main_process.pid] + [p.pid for p in main_process.children(recursive = True)]
total_mem = gpu.memUsageByPID(process_pids)
gpu_monitor.register(total_mem) # MB
gpu_monitor.plot()
time.sleep(5)
return
def init_mem_monitors(path: pathlib.Path) -> typing.Tuple[threading.Thread, threading.Thread]:
cpu_thread = threading.Thread(target = monRamUsage, args = (path,))
gpu_thread = threading.Thread(target = monGPUsage, args = (path,))
cpu_thread.setDaemon(True)
gpu_thread.setDaemon(True)
cpu_thread.start()
gpu_thread.start()
return cpu_thread, gpu_thread
| 2,892 | 31.875 | 96 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/util/tf.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A wrapper module to include tensorflow with some options"""
from absl import flags
import os
import re
from deeplearning.benchpress.util import gpu
FLAGS = flags.FLAGS
flags.DEFINE_boolean(
"tf_print_deprecation",
False,
"Print tensorflow deprecation warnings"
)
flags.DEFINE_boolean(
"tf_gpu_allow_growth",
True,
"Force tensorflow to allocate only needed space and not full GPU memory"
)
flags.DEFINE_integer(
"tf_logging_level",
3,
"Logging level of tensorflow logger"
)
flags.DEFINE_boolean(
"tf_disable_eager",
True,
"Select to enable or disable eager execution. As of now, all modules use graph mode, \
therefore eager execution must be disabled."
)
flags.DEFINE_string(
"tf_device",
"gpu",
"Select device to deploy application. Valid options are 'cpu', 'gpu' and 'tpu'. [Default]: 'gpu'"
"If GPU unavailable, it rolls back to CPU."
)
import tensorflow
tf = tensorflow
def initTensorflow():
from deeplearning.benchpress.util import logging as l
tensorflow.python.util.deprecation._PRINT_DEPRECATION_WARNINGS = FLAGS.tf_print_deprecation
os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(FLAGS.tf_logging_level).lower()
os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = str(FLAGS.tf_gpu_allow_growth).lower()
available_gpus = gpu.getGPUID()
try:
if FLAGS.tf_device == "tpu":
raise NotImplementedError
elif FLAGS.tf_device == "gpu" and available_gpus is not None:
l.logger().info("Selected GPU:{} {}".format(available_gpus[0]['id'], available_gpus[0]['gpu_name']))
os.environ['CUDA_VISIBLE_DEVICES'] = str(available_gpus[0]['id'])
elif re.search("gpu:[0-9]", FLAGS.tf_device) and available_gpus is not None:
gpuid = int(FLAGS.tf_device.split(':')[-1])
selected_gpu = None
for gp in available_gpus:
if int(gp['id']) == gpuid:
selected_gpu = gp
if selected_gpu is None:
raise ValueError("Invalid GPU ID: {}".format(gpuid))
l.logger().info("Selected GPU:{} {}".format(selected_gpu['id'], selected_gpu['gpu_name']))
os.environ['CUDA_VISIBLE_DEVICES'] = str(selected_gpu['id'])
else:
l.logger().info("Selected CPU device.")
os.environ['CUDA_VISIBLE_DEVICES'] = "-1"
if FLAGS.tf_logging_level == 0:
lvl = l.logger().level
l.logger().level = 'DEBUG'
l.logger().debug("TF 'CUDA_VISIBLE_DEVICES': {}".format(os.environ['CUDA_VISIBLE_DEVICES']))
l.logger().level = lvl
except RuntimeError as e:
raise e
if FLAGS.tf_disable_eager:
tensorflow.compat.v1.disable_eager_execution()
tf = tensorflow
| 3,176 | 31.752577 | 106 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/util/http_server.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import portpicker
import queue
import multiprocessing
import waitress
import subprocess
import json
import typing
import requests
import time
import flask
import heapq
from absl import flags
from deeplearning.benchpress.util import logging as l
from deeplearning.benchpress.util import environment
FLAGS = flags.FLAGS
flags.DEFINE_boolean(
"use_http_server",
False,
"Select to use http server in the app. If you set to True, the app will know how to use it with respect to the requested task."
)
flags.DEFINE_integer(
"http_port",
40822,
"Define port this current server listens to."
)
flags.DEFINE_string(
"http_server_ip_address",
"cc1.inf.ed.ac.uk",
"Set the target IP address of the host http server."
)
flags.DEFINE_list(
"http_server_peers",
[],
"Set comma-separated http address <dns_name:port> to load balance on secondary nodes."
)
flags.DEFINE_string(
"host_address",
"localhost",
"Specify address where http server will be set."
)
app = flask.Flask(__name__)
class FlaskHandler(object):
def __init__(self):
self.read_queue = None
self.write_queues = None
self.reject_queue = None
self.peers = None
self.backlog = None
return
def set_params(self, read_queue, write_queues, reject_queues, manager, work_flag):
self.read_queue = read_queue
self.write_queues = write_queues
self.work_flag = work_flag
self.reject_queues = reject_queues
self.my_address = "http://{}:{}".format(FLAGS.http_server_ip_address, FLAGS.http_port)
self.peers = ["http://{}".format(s) for s in FLAGS.http_server_peers]
self.master_node = True if self.peers else False
self.manager = manager
self.backlog = []
return
handler = FlaskHandler()
@app.route('/write_message', methods=['PUT'])
def write_message(): # Expects serialized json file, one list of dictionaries..
"""
This function receives new kernels that need to be computed.
Collect a json file with data and send to computation..
Example command:
curl -X PUT http://localhost:PORT/write_message \
--header "Content-Type: application/json" \
-d @/path/to/json/file.json
"""
source = flask.request.headers.get("Server-Name")
if source is None:
return "Source address not provided.", 404
if source not in handler.write_queues:
handler.write_queues[source] = handler.manager.list()
if source not in handler.reject_queues:
handler.reject_queues[source] = handler.manager.list()
data = flask.request.json
if not isinstance(data, list):
return "ERROR: JSON Input has to be a list of dictionaries. One for each entry.\n", 400
if handler.master_node:
# 1. Read the pending queue from all peer nodes.
# A min heap is created that stores server nodes with their queue size.
heap = []
for add in handler.peers:
status, sc = client_read_queue_size(add)
size = status['read_queue_size']
if sc < 200:
l.logger().error("{}, {}".format(size, sc))
else:
heap.append([size, add])
heap.append([handler.read_queue.qsize(), handler.my_address])
heapq.heapify(heap)
# 2. Create the schedule: dict[node_address -> list of workload]
schedule = {}
for entry in data:
# For every kernel to be computed.
# Pop the server with the least load.
min_load = heapq.heappop(heap)
size, address = min_load
if address not in schedule:
schedule[address] = []
schedule[address].append(entry)
heapq.heappush(heap, [size+1, address])
# 3. For each compute node other than myself, do a write_message request.
for node, workload in schedule.items():
# If I need to add to my workload, just add to queue.
if node == handler.my_address:
for entry in workload:
handler.read_queue.put([source, entry])
# Otherwise run a request
else:
client_put_request(workload, address = node, servername = source)
else:
for entry in data:
handler.read_queue.put([source, entry])
return 'OK\n', 200
@app.route('/read_message', methods = ['GET'])
def read_message() -> bytes:
"""
Publish all the predicted results of the write_queue.
Before flushing the write_queue, save them into the backlog.
Example command:
curl -X GET http://localhost:PORT/read_message
"""
source = flask.request.headers.get("Server-Name")
if source not in handler.write_queues:
l.logger().warn("Source {} not in write_queues: {}".format(source, ', '.join(handler.write_queues.keys())))
ret = []
else:
ret = [r for r in handler.write_queues[source]]
handler.write_queues[source] = handler.manager.list()
handler.backlog += [[source, r] for r in ret]
if handler.master_node:
queue = handler.peers
while queue:
peer = queue.pop(0)
sc = client_status_request()[1]
if sc < 300:
ret += client_get_request(address = peer, servername = source)
else:
queue.append(peer)
time.sleep(2)
return bytes(json.dumps(ret), encoding="utf-8"), 200
@app.route('/read_rejects', methods = ['GET'])
def read_rejects() -> bytes:
"""
Publish all the predicted results of the write_queue.
Before flushing the write_queue, save them into the backlog.
Example command:
curl -X GET http://localhost:PORT/read_rejects
"""
source = flask.request.headers.get("Server-Name")
if source not in handler.reject_queues:
l.logger().warn("Source {} not in reject_queues: {}".format(source, ', '.join(handler.reject_queues.keys())))
ret = []
else:
ret = [r for r in handler.reject_queues[source]]
if handler.master_node:
for peer in handler.peers:
ret += client_get_rejects(address = peer, servername = source)
return bytes(json.dumps(ret), encoding="utf-8"), 200
@app.route('/read_reject_labels', methods = ['GET'])
def read_reject_labels() -> bytes:
"""
Get labels of rejected OpenCL kernels.
Example command:
curl -X GET http://localhost:PORT/read_reject_labels
"""
labels = {}
source = flask.request.headers.get("Server-Name")
if source is None:
return "Server-Name is undefined", 404
if source not in handler.reject_queues:
l.logger().warn("Source {} not in reject_queues: {}".format(source, ', '.join(handler.reject_queues.keys())))
ret = []
else:
ret = [r for r in handler.reject_queues[source]]
for c in ret:
if c['runtime_features']['label'] not in labels:
labels[c['runtime_features']['label']] = 1
else:
labels[c['runtime_features']['label']] += 1
if handler.master_node:
for peer in handler.peers:
peer_labels = client_read_reject_labels(address = peer, servername = source)
for lab, frq in peer_labels.items():
if lab not in labels:
labels[lab] = frq
else:
labels[lab] += frq
return bytes(json.dumps(labels), encoding="utf-8"), 200
@app.route('/read_queue_size', methods = ['GET'])
def read_queue_size() -> bytes:
"""
Read size of pending workload in read_queue for current compute node.
"""
return handler.read_queue.qsize(), 200
@app.route('/get_backlog', methods = ['GET'])
def get_backlog() -> bytes:
"""
In case a client side error has occured, proactively I have stored
the whole backlog in memory. To retrieve it, call this method.
Example command:
curl -X GET http://localhost:PORT/get_backlog
"""
backlog = handler.backlog
if handler.master_node:
for peer in handler.peers:
backlog += client_get_backlog(address = peer)
return bytes(json.dumps(backlog), encoding = "utf-8"), 200
@app.route('/status', methods = ['GET'])
def status():
"""
Read the workload status of the http server.
"""
source = flask.request.headers.get("Server-Name")
if source is None:
return "Server-Name is undefined", 404
status = {
'read_queue' : 'EMPTY' if handler.read_queue.empty() else 'NOT_EMPTY',
'write_queue' : 'EMPTY' if source not in handler.write_queues or len(handler.write_queues[source]) == 0 else 'NOT_EMPTY',
'reject_queue' : 'EMPTY' if source not in handler.reject_queues or len(handler.reject_queues[source]) == 0 else 'NOT_EMPTY',
'work_flag' : 'WORKING' if handler.work_flag.value else 'IDLE',
'read_queue_size' : handler.read_queue.qsize(),
'write_queue_size' : -1 if source not in handler.write_queues else len(handler.write_queues[source]),
'reject_queue_size' : -1 if source not in handler.reject_queues else len(handler.reject_queues[source]),
}
if handler.master_node:
for peer in handler.peers:
peer_status, sc = client_status_request(address = peer, servername = source)
if sc < 200:
l.logger().error("Error at {} /status".format(peer))
status['read_queue'] = 'EMPTY' if peer_status['read_queue'] == 'EMPTY' and status['read_queue'] == 'EMPTY' else 'NOT_EMPTY'
status['write_queue'] = 'EMPTY' if peer_status['write_queue'] == 'EMPTY' and status['write_queue'] == 'EMPTY' else 'NOT_EMPTY'
status['reject_queue'] = 'EMPTY' if peer_status['reject_queue'] == 'EMPTY' and status['reject_queue'] == 'EMPTY' else 'NOT_EMPTY'
status['work_flag'] = 'IDLE' if peer_status['work_flag'] == 'IDLE' and status['work_flag'] == 'IDLE' else 'WORKING'
status['read_queue_size'] += peer_status['read_queue_size']
status['write_queue_size'] += peer_status['write_queue_size']
status['reject_queue_size'] += peer_status['reject_queue_size']
if status['read_queue'] == 'EMPTY' and status['write_queue'] == 'EMPTY':
return bytes(json.dumps(status), encoding = 'utf-8'), 200 + (100 if handler.work_flag.value else 0)
elif status['read_queue'] == 'EMPTY' and status['write_queue'] == 'NOT_EMPTY':
return bytes(json.dumps(status), encoding = 'utf-8'), 201 + (100 if handler.work_flag.value else 0)
elif status['read_queue'] == 'NOT_EMPTY' and status['write_queue'] == 'EMPTY':
return bytes(json.dumps(status), encoding = 'utf-8'), 202 + (100 if handler.work_flag.value else 0)
elif status['read_queue'] == 'NOT_EMPTY' and status['write_queue'] == 'NOT_EMPTY':
return bytes(json.dumps(status), encoding = 'utf-8'), 203 + (100 if handler.work_flag.value else 0)
@app.route('/ping', methods = ['PUT'])
def ping():
"""
A peer compute node receives a ping from master node before initializing the compute network.
"""
source = flask.request.headers.get("Server-Name")
if source is None:
return "Server-Name is undefined", 404
data = flask.request.json
handler.peers = [x for x in data['peers'] if x != handler.my_address] + [data['master']]
return ",".join(handler.peers), 200
@app.route('/', methods = ['GET', 'POST', 'PUT'])
def index():
"""
In case a client side error has occured, proactively I have stored
the whole backlog in memory. To retrieve it, call this method.
Example command:
curl -X GET http://localhost:PORT/get_backlog
"""
multi_status = {
'read_queue' : 'EMPTY' if handler.read_queue.empty() else 'NOT_EMPTY',
'read_queue_size' : handler.read_queue.qsize(),
'work_flag' : 'WORKING' if handler.work_flag.value else 'IDLE',
}
it = set(handler.write_queues.keys())
it.update(set(handler.reject_queues.keys()))
multi_status['out_servers'] = {}
for hn in it:
status = {
'write_queue' : 'EMPTY' if hn in handler.write_queues and len(handler.write_queues[hn]) == 0 else 'NOT_EMPTY',
'reject_queue' : 'EMPTY' if hn in handler.reject_queues and len(handler.reject_queues[hn]) == 0 else 'NOT_EMPTY',
'write_queue_size' : len(handler.write_queues[hn]) if hn in handler.write_queues else 0,
'reject_queue_size' : len(handler.reject_queues[hn]) if hn in handler.reject_queues else 0,
}
multi_status['out_servers'][hn] = status
return flask.render_template("index.html", data = multi_status)
def http_serve(read_queue : multiprocessing.Queue,
write_queues : 'multiprocessing.Dict',
reject_queues : 'multiprocessing.Dict',
work_flag : multiprocessing.Value,
manager : multiprocessing.Manager,
) -> None:
"""
Run http server for read and write workload queues.
"""
try:
port = FLAGS.http_port
if port is None:
port = portpicker.pick_unused_port()
handler.set_params(read_queue, write_queues, reject_queues, manager, work_flag)
hostname = subprocess.check_output(
["hostname", "-i"],
stderr = subprocess.STDOUT,
).decode("utf-8").replace("\n", "").split(' ')
if len(hostname) == 2:
ips = "ipv4: {}, ipv6: {}".format(hostname[1], hostname[0])
else:
ips = "ipv4: {}".format(hostname[0])
l.logger().warn("Server Public IP: {}:{}".format(ips, port))
if handler.master_node:
l.logger().info("This is master compute server at {}.".format(handler.my_address))
l.logger().info("Idling until I ensure all peer compute servers are responding:\n{}".format('\n'.join(handler.peers)))
queue = [[p, 0] for p in handler.peers]
while queue:
cur = queue.pop(0)
_, sc = ping_peer_request(cur[0], handler.peers, handler.my_address)
if sc != 200:
queue.append([cur[0], cur[1] + 1])
else:
l.logger().info("Successfully connected to {}, {} attempts".format(cur[0], cur[1]))
time.sleep(5)
l.logger().info("Successfully connected to all peers")
waitress.serve(app, host = FLAGS.host_address, port = port)
except KeyboardInterrupt:
return
except Exception as e:
raise e
return
##########################
# Client request methods #
##########################
def ping_peer_request(peer: str, peers: typing.List[str], master_node: str) -> int:
"""
Master compute node peers a peer compute node to check if it's alive.
If so, also pass the information of all peers that must be alive
inside the compute network.
"""
try:
r = requests.put(
"{}/ping".format(peer),
data = json.dumps({'peers': peers, 'master': master_node}),
headers = {
"Content-Type": "application/json",
"Server-Name": environment.HOSTNAME}
)
except Exception as e:
l.logger().warn("PUT status Request at {}/ping has failed.".format(peer))
print(e)
return None, 404
return r.content, r.status_code
def client_status_request(address: str = None, servername: str = None) -> typing.Tuple[typing.Dict, int]:
"""
Get status of http server.
"""
try:
if FLAGS.http_port == -1 or address:
r = requests.get(
"{}/status".format(FLAGS.http_server_ip_address if not address else address),
headers = {"Server-Name": (environment.HOSTNAME if not servername else servername)}
)
else:
r = requests.get(
"http://{}:{}/status".format(FLAGS.http_server_ip_address, FLAGS.http_port),
headers = {"Server-Name": (environment.HOSTNAME if not servername else servername)}
)
except Exception as e:
l.logger().error("GET status Request at {}:{} has failed.".format(FLAGS.http_server_ip_address, FLAGS.http_port))
raise e
return r.json(), r.status_code
def client_get_request(address: str = None, servername: str = None) -> typing.List[typing.Dict]:
"""
Helper function to perform get request at /read_message of http target host.
"""
try:
if FLAGS.http_port == -1 or address:
r = requests.get(
"{}/read_message".format(FLAGS.http_server_ip_address if not address else address),
headers = {"Server-Name": (environment.HOSTNAME if servername is None else servername)}
)
else:
r = requests.get(
"http://{}:{}/read_message".format(FLAGS.http_server_ip_address, FLAGS.http_port),
headers = {"Server-Name": (environment.HOSTNAME if servername is None else servername)}
)
except Exception as e:
l.logger().error("GET Request at {}:{} has failed.".format(FLAGS.http_server_ip_address, FLAGS.http_port))
raise e
if r.status_code == 200:
return r.json()
else:
l.logger().error("Error code {} in read_message request.".format(r.status_code))
return None
def client_get_rejects(address: str = None, servername: str = None) -> typing.List[typing.Dict]:
"""
Helper function to perform get request at /read_rejects of http target host.
"""
try:
if FLAGS.http_port == -1 or address:
r = requests.get(
"{}/read_rejects".format(FLAGS.http_server_ip_address if not address else address),
headers = {"Server-Name": (environment.HOSTNAME if servername is None else servername)}
)
else:
r = requests.get(
"http://{}:{}/read_rejects".format(FLAGS.http_server_ip_address, FLAGS.http_port),
headers = {"Server-Name": (environment.HOSTNAME if servername is None else servername)}
)
except Exception as e:
l.logger().error("GET Request at {}:{} has failed.".format(FLAGS.http_server_ip_address, FLAGS.http_port))
raise e
if r.status_code == 200:
return r.json()
else:
l.logger().error("Error code {} in read_rejects request.".format(r.status_code))
return None
def client_read_reject_labels(address: str = None, servername: str = None) -> typing.List[typing.Dict]:
"""
Read the frequency table of labels for rejected benchmarks.
"""
try:
if FLAGS.http_port == -1 or address:
r = requests.get(
"{}/read_reject_labels".format(FLAGS.http_server_ip_address if not address else address),
headers = {"Server-Name": (environment.HOSTNAME if servername is None else servername)}
)
else:
r = requests.get(
"http://{}:{}/read_reject_labels".format(FLAGS.http_server_ip_address, FLAGS.http_port),
headers = {"Server-Name": (environment.HOSTNAME if servername is None else servername)}
)
except Exception as e:
l.logger().error("GET Request at {}:{} has failed.".format(FLAGS.http_server_ip_address, FLAGS.http_port))
raise e
if r.status_code == 200:
return r.json()
else:
l.logger().error("Error code {} in read_reject_labels request.".format(r.status_code))
return None
def client_get_backlog(address: str = None) -> typing.List[typing.Dict]:
"""
Read backlog from compute node.
"""
try:
if FLAGS.http_port == -1 or address:
r = requests.get(
"{}/get_backlog".format(FLAGS.http_server_ip_address if not address else address),
)
else:
r = requests.get(
"http://{}:{}/get_backlog".format(FLAGS.http_server_ip_address, FLAGS.http_port),
)
except Exception as e:
l.logger().error("GET Request at {}:{} has failed.".format(FLAGS.http_server_ip_address, FLAGS.http_port))
raise e
if r.status_code == 200:
return r.json()
else:
l.logger().error("Error code {} in get_backlog request.".format(r.status_code))
return None
def client_put_request(msg: typing.List[typing.Dict], address: str = None, servername: str = None) -> None:
"""
Helper function to perform put at /write_message of http target host.
"""
try:
if FLAGS.http_port == -1 or address:
r = requests.put(
"{}/write_message".format(FLAGS.http_server_ip_address if not address else address),
data = json.dumps(msg),
headers = {
"Content-Type": "application/json",
"Server-Name": (environment.HOSTNAME if servername is None else servername)
}
)
else:
r = requests.put(
"http://{}:{}/write_message".format(FLAGS.http_server_ip_address, FLAGS.http_port),
data = json.dumps(msg),
headers = {
"Content-Type": "application/json",
"Server-Name": (environment.HOSTNAME if servername is None else servername)
}
)
except Exception as e:
l.logger().error("PUT Request at {}:{} has failed.".format(FLAGS.http_server_ip_address, FLAGS.http_port))
raise e
if r.status_code != 200:
l.logger().error("Error code {} in write_message request.".format(r.status_code))
return
def client_read_queue_size(address: str) -> int:
"""
Read the pending queue size of a compute node.
"""
try:
r = requests.get("{}/status".format(address), headers = {"Server-Name": environment.HOSTNAME})
except Exception as e:
l.logger().error("GET status Request at {} has failed.".format(address))
raise e
return r.json(), r.status_code
########################
def start_server_process() -> typing.Tuple[multiprocessing.Process, multiprocessing.Value, multiprocessing.Queue, typing.Dict, typing.Dict]:
"""
This is an easy wrapper to start server from parent routine.
Starts a new process or thread and returns all the multiprocessing
elements needed to control the server.
"""
m = multiprocessing.Manager()
rq, wqs, rjqs = multiprocessing.Queue(), m.dict(), m.dict()
wf = multiprocessing.Value('i', False)
p = multiprocessing.Process(
target = http_serve,
kwargs = {
'read_queue' : rq,
'write_queues' : wqs,
'reject_queues' : rjqs,
'work_flag' : wf,
'manager' : m,
}
)
p.daemon = True
p.start()
return p, wf, rq, wqs, rjqs
| 21,868 | 36.066102 | 140 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/util/logging.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tailor made logging module."""
import logging
import typing
from eupy.hermes import client
_logger = None
NOTSET = logging.NOTSET
DEBUG = logging.DEBUG
INFO = logging.INFO
WARNING = logging.WARNING
ERROR = logging.ERROR
CRITICAL = logging.CRITICAL
PURPLE = "\033[95m"
CYAN = "\033[96m"
DARKCYAN = "\033[36m"
BLUE = "\033[94m"
GREEN = "\033[92m"
YELLOW = "\033[93m"
RED = "\033[91m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
END = "\033[0m"
def purple(string, callback = None):
if callback:
string = callback[0](string, callback = [x for x in callback if x != callback[0]])
return "{}{}{}".format(PURPLE, string, END)
def cyan(string, callback = None):
if callback:
string = callback[0](string, callback = [x for x in callback if x != callback[0]])
return "{}{}{}".format(CYAN, string, END)
def darkcyan(string, callback = None):
if callback:
string = callback[0](string, callback = [x for x in callback if x != callback[0]])
return "{}{}{}".format(DARKCYAN, string, END)
def blue(string, callback = None):
if callback:
string = callback[0](string, callback = [x for x in callback if x != callback[0]])
return "{}{}{}".format(BLUE, string, END)
def green(string, callback = None):
if callback:
string = callback[0](string, callback = [x for x in callback if x != callback[0]])
return "{}{}{}".format(GREEN, string, END)
def yellow(string, callback = None):
if callback:
string = callback[0](string, callback = [x for x in callback if x != callback[0]])
return "{}{}{}".format(YELLOW, string, END)
def red(string, callback = None):
if callback:
string = callback[0](string, callback = [x for x in callback if x != callback[0]])
return "{}{}{}".format(RED, string, END)
def bold(string, callback = None):
if callback:
string = callback[0](string, callback = [x for x in callback if x != callback[0]])
return "{}{}{}".format(BOLD, string, END)
def underline(string, callback = None):
if callback:
string = callback[0](string, callback = [x for x in callback if x != callback[0]])
return "{}{}{}".format(UNDERLINE, string, END)
def output(string, *args):
if args:
string = args[0](string, callback = [x for x in args if x != args[0]])
return string
class Logger:
"""
Logger class API.
"""
def __init__(self,
name : str,
level : int,
mail_client : client.gmail,
rank : int,
):
self.mail_client = mail_client
self.rank = rank
self.configLogger(name, level)
return
def configLogger(self, name, level):
# create logger
logging.root.handlers = []
self.log = logging.getLogger(name)
self.log.setLevel(level)
# create console handler and set level to debug
ch = logging.StreamHandler()
ch.setLevel(level)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
if not self.log.handlers:
self.log.addHandler(ch)
self.log.propagate = False
self.info("Logger has been initialized")
return
@property
def handlers(self):
return self.log.handlers
@property
def logger(self):
return self.log
@property
def level(self):
return logging.getLevelName(self.log.level)
@level.setter
def level(self, lvl):
self.log.setLevel(lvl)
self.handlers[0].setLevel(lvl)
return
"""
Main logging functions
"""
def debug(self,
message : str,
color : bool = True,
ddp_nodes : bool = False
) -> None:
if self.rank == 0 or ddp_nodes:
if ddp_nodes:
message = "N{}: {}".format(self.rank, message)
if color:
message = output(message, bold, green)
if self.mail_client:
self.mail_client.send_message("Logger", message)
self.log.debug(message)
return
def info(self,
message : str,
color : bool = True,
ddp_nodes : bool = False
) -> None:
if self.rank == 0 or ddp_nodes:
if ddp_nodes:
message = "N{}: {}".format(self.rank, message)
if color:
message = output(message, bold, cyan)
if self.mail_client:
self.mail_client.send_message("Logger", message)
self.log.info(message)
return
def warning(self,
message : str,
color : bool = True,
ddp_nodes : bool = False
) -> None:
if self.rank == 0 or ddp_nodes:
if ddp_nodes:
message = "N{}: {}".format(self.rank, message)
if color:
message = output(message, bold, yellow)
if self.mail_client:
self.mail_client.send_message("Logger", message)
self.log.warning(message)
return
def warn(self,
message : str,
color : bool = True,
ddp_nodes : bool = False
) -> None:
if self.rank == 0 or ddp_nodes:
if ddp_nodes:
message = "N{}: {}".format(self.rank, message)
if color:
message = output(message, bold, yellow)
if self.mail_client:
self.mail_client.send_message("Logger", message)
self.log.warn(message)
return
def error(self,
message : str,
color : bool = True,
ddp_nodes : bool = False
) -> None:
if self.rank == 0 or ddp_nodes:
if ddp_nodes:
message = "N{}: {}".format(self.rank, message)
if color:
message = output(message, bold, red)
if self.mail_client:
self.mail_client.send_message("Logger", message)
self.log.error(message)
return
def critical(self,
message : str,
color : bool = True,
ddp_nodes : bool = False
) -> None:
if self.rank == 0 or ddp_nodes:
if ddp_nodes:
message = "N{}: {}".format(self.rank, message)
if color:
message = output(message, bold, underline, red)
if self.mail_client:
self.mail_client.send_message("Logger", message)
self.log.critical(message)
return
def shutdown(self):
logging.shutdown()
return
def logger() -> Logger:
global _logger
if not _logger:
initLogger()
return _logger
def initLogger(name = "", lvl = logging.INFO, mail = None, rank = 0):
global _logger
_logger = Logger(name, lvl, mail, rank)
return _logger
| 7,158 | 27.185039 | 86 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/util/pytorch.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A wrapper module to include pytorch with some options"""
from absl import flags
import os
import datetime
from deeplearning.benchpress.util import gpu
from deeplearning.benchpress.util import environment
FLAGS = flags.FLAGS
flags.DEFINE_boolean(
"pt_cpu_only",
False,
"Do not use GPU/TPU in pytorch session."
)
import torch
try:
import torch_xla.core.xla_model
import torch_xla.debug.metrics
import torch_xla.distributed.parallel_loader
torch_xla = torch_xla.core.xla_model
torch_xla_met = torch_xla.debug.metrics
torch_ploader = torch_xla.distributed.parallel_loader
torch_tpu_available = True
except ImportError:
torch_tpu_available = False
offset_device = None
devices = None
device = None
num_gpus = None
num_nodes = None
initialized = False
def initPytorch() -> None:
global torch_tpu_available
global offset_device
global devices
global device
global num_gpus
global num_nodes
global initialized
if FLAGS.pt_cpu_only:
device = torch.device("cpu")
num_gpus = 0
num_nodes = 1
elif torch_tpu_available:
device = torch_xla.xla_device()
num_gpus = 0
elif environment.WORLD_SIZE == 1 and torch.cuda.is_available():
# if num_gpus is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
offset_device = torch.device("cuda:0")
device = torch.device("cuda:0")
available_gpus = gpu.getGPUID()
devices = ["cuda:{}".format(str(x['id'])) for x in available_gpus]
num_nodes = 1
num_gpus = torch.cuda.device_count()
if device.type == "cuda":
torch.cuda.set_device(device)
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs.
# This branch will trigger DistributedDataParalel instead of simple DP.
# Distributed training prohibits manual selection of GPUs and takes for granted that cuda is available.
ddp_backend = "nccl" if torch.cuda.is_available() else "gloo"
tcp_store = torch.distributed.TCPStore(
environment.MASTER_ADDR,
environment.MASTER_PORT,
environment.WORLD_SIZE,
environment.WORLD_RANK == 0
)
torch.distributed.init_process_group(
backend = ddp_backend,
store = tcp_store,
rank = environment.WORLD_RANK,
world_size = environment.WORLD_SIZE,
timeout = datetime.timedelta(days = 3)
)
num_nodes = torch.distributed.get_world_size()
num_gpus = torch.cuda.device_count()
if num_gpus == 0:
device = torch.device('cpu', environment.LOCAL_RANK)
offset_device = torch.device('cpu', environment.LOCAL_RANK)
else:
device = torch.device("cuda", environment.LOCAL_RANK)
offset_device = torch.device("cuda", environment.LOCAL_RANK)
torch.cuda.set_device(device)
initialized = True
return | 3,833 | 32.33913 | 107 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/util/proxy_bash.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A tool that helps running bash commands using BenchPress as proxy.
Especially useful when deploying BenchPress on clusters but still
need to keep an eye on resources (e.g. nvidia-smi) or files.
"""
import subprocess
import threading
from deeplearning.benchpress.util import environment
def listen() -> None:
"""
Listen for bash commands from standard input
and execute using a subprocess PIPE.
"""
while True:
cmd = input()
if cmd[:3] == ">> ":
cmd = cmd[3:]
try:
pr = subprocess.Popen(cmd.split(), stdout = subprocess.PIPE, stderr = subprocess.PIPE)
stdout, stderr = pr.communicate()
print(stdout.decode('utf-8'))
if stderr:
print(stderr.decode('utf-8'))
except FileNotFoundError:
print("{}: command not found".format(cmd))
return
def start() -> None:
"""
Initialize daemon thread to run your proxy bash commands.
"""
if environment.WORLD_RANK == 0:
th = threading.Thread(
target = listen,
daemon = True
)
th.start()
return
| 1,651 | 28.5 | 94 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/util/socket_server.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import socket
import multiprocessing
import time
from absl import flags
from deeplearning.benchpress.util import logging as l
FLAGS = flags.FLAGS
flags.DEFINE_boolean(
"use_socket_server",
False,
"Select to use socket server in the app. If you set to True, the app will know how to use it with respect to the requested task."
)
flags.DEFINE_string(
"target_host",
None,
"Define IP Address of target socket server."
)
flags.DEFINE_integer(
"listen_port",
None,
"Define port this current server listens to."
)
flags.DEFINE_integer(
"send_port",
None,
"Define port this current server listens to."
)
MAX_PAYLOAD_SIZE = 65535
def listen_read_queue(read_queue : multiprocessing.Queue,
port : int,
status : multiprocessing.Value,
listen_status : multiprocessing.Value,
) -> None:
"""
Keep a socket connection open, listen to incoming traffic
and populate read_queue queue.
"""
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Bind to socket.
s.bind(('0.0.0.0', port))
# Set listen settings
s.listen(2**16)
# Block until connection is established.
try:
conn, addr = s.accept()
while status.value:
data = conn.recv(MAX_PAYLOAD_SIZE)
if len(data) > 0:
read_queue.put(data)
else:
break
conn.close()
except KeyboardInterrupt:
try:
conn.close()
except Exception:
pass
raise KeyboardInterrupt
except Exception as e:
try:
conn.close()
except Exception:
pass
raise e
s.close()
except KeyboardInterrupt:
s.close()
except Exception as e:
s.close()
raise e
listen_status.value = False
return
def send_write_queue(write_queue : multiprocessing.Queue,
host : str,
port : int,
status : multiprocessing.Value,
send_status : multiprocessing.Value,
) -> None:
"""
Keep scanning for new unpublished data in write_queue.
Fetch them and send them over to the out socket connection.
"""
try:
# Create a socket connection.
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
while status.value:
try:
s.connect((host, port))
break
except Exception:
time.sleep(1)
while status.value:
cur = write_queue.get()
try:
s.send(cur)
except BrokenPipeError:
break
s.close()
except KeyboardInterrupt:
s.close()
except Exception as e:
s.close()
raise e
send_status.value = False
return
def socket_serve(read_queue : multiprocessing.Queue,
write_queue : multiprocessing.Queue,
status_bit : multiprocessing.Value,
listen_status : multiprocessing.Value,
send_status : multiprocessing.Value,
) -> None:
"""
A standalone daemon process executes this function and serves.
It's purpose is to populate input queue and publish out queue.
"""
target_host = FLAGS.target_host
listen_port = FLAGS.listen_port
send_port = FLAGS.send_port
if listen_port is None:
status_bit.value = False
listen_status.value = False
send_status.value = False
raise ValueError("You have to define listen_port to use the socket server.")
if send_port is None:
status_bit.value = False
listen_status.value = False
send_status.value = False
raise ValueError("You have to define send_port to use the socket server.")
if target_host is None:
status_bit.value = False
listen_status.value = False
send_status.value = False
raise ValueError("You have to define the IP of the target server to use the socket server.")
try:
lp = multiprocessing.Process(
target = listen_read_queue,
kwargs = {
'read_queue' : read_queue,
'port' : listen_port,
'status' : status_bit,
'listen_status' : listen_status,
}
)
sp = multiprocessing.Process(
target = send_write_queue,
kwargs = {
'write_queue' : write_queue,
'host' : target_host,
'port' : send_port,
'status' : status_bit,
'send_status' : send_status,
}
)
lp.start()
sp.start()
while status_bit.value:
time.sleep(1)
lp.join(timeout = 20)
sp.join(timeout = 20)
lp.terminate()
sp.terminate()
except KeyboardInterrupt:
status_bit.value = False
lp.join(timeout = 20)
sp.join(timeout = 20)
lp.terminate()
sp.terminate()
except Exception as e:
status_bit.value = False
lp.join(timeout = 20)
sp.join(timeout = 20)
lp.terminate()
sp.terminate()
raise e
return
def start_server_process():
"""
This is an easy wrapper to start server from parent routine.
Starts a new process or thread and returns all the multiprocessing
elements needed to control the server.
"""
rq, wq = multiprocessing.Queue(), multiprocessing.Queue()
sb, rb, wb = multiprocessing.Value('i', True), multiprocessing.Value('i', True), multiprocessing.Value('i', True)
p = multiprocessing.Process(
target = socket_serve,
kwargs = {
'read_queue' : rq,
'write_queue' : wq,
'status_bit' : sb,
'listen_status' : rb,
'send_status' : wb,
}
)
# p.daemon = True
p.start()
return p, sb, (rq, rb), (wq, wb)
def start_thread_process():
"""
This is an easy wrapper to start server from parent routine.
Starts a new process or thread and returns all the multiprocessing
elements needed to control the server.
"""
rq, wq = multiprocessing.Queue(), multiprocessing.Queue()
sb, rb, wb = multiprocessing.Value('i', True), multiprocessing.Value('i', True), multiprocessing.Value('i', True)
th = threading.Thread(
target = socket_serve,
kwargs = {
'read_queue' : rq,
'write_queue' : wq,
'status_bit' : sb,
'listen_status' : rb,
'send_status' : wb,
},
)
th.start()
return None, sb, (rq, rb), (wq, wb)
| 6,885 | 26.110236 | 131 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/util/cldrive_server.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import portpicker
import queue
import multiprocessing
import waitress
import subprocess
import json
import typing
import requests
import flask
from absl import flags
from deeplearning.benchpress.util import logging as l
from deeplearning.benchpress.util import environment
FLAGS = flags.FLAGS
flags.DEFINE_boolean(
"use_http_server",
False,
"Select to use http server in the app. If you set to True, the app will know how to use it with respect to the requested task."
)
flags.DEFINE_integer(
"http_port",
40822,
"Define port this current server listens to."
)
flags.DEFINE_string(
"http_server_ip_address",
"cc1.inf.ed.ac.uk",
"Set the target IP address of the host http server."
)
app = flask.Flask(__name__)
class FlaskHandler(object):
def __init__(self):
self.read_queue = None
self.write_queues = None
self.reject_queue = None
self.backlog = None
return
def set_params(self, read_queue, write_queues, reject_queues, manager, work_flag):
self.read_queue = read_queue
self.write_queues = write_queues
self.work_flag = work_flag
self.reject_queues = reject_queues
self.manager = manager
self.backlog = []
return
handler = FlaskHandler()
@app.route('/write_message', methods=['PUT'])
def write_message(): # Expects serialized json file, one list of dictionaries..
"""
Collect a json file with data and send to computation..
Example command:
curl -X PUT http://localhost:PORT/write_message \
--header "Content-Type: application/json" \
-d @/path/to/json/file.json
"""
source = flask.request.headers.get("Server-Name")
if source is None:
return "Source address not provided.", 404
if source not in handler.write_queues:
handler.write_queues[source] = handler.manager.list()
if source not in handler.reject_queues:
handler.reject_queues[source] = handler.manager.list()
data = flask.request.json
if not isinstance(data, list):
return "ERROR: JSON Input has to be a list of dictionaries. One for each entry.\n", 400
for entry in data:
handler.read_queue.put([source, entry])
return 'OK\n', 200
@app.route('/read_message', methods = ['GET'])
def read_message() -> bytes:
"""
Publish all the predicted results of the write_queue.
Before flushing the write_queue, save them into the backlog.
Example command:
curl -X GET http://localhost:PORT/read_message
"""
source = flask.request.headers.get("Server-Name")
ret = [r for r in handler.write_queues[source]]
handler.write_queues[source] = handler.manager.list()
handler.backlog += [[source, r] for r in ret]
return bytes(json.dumps(ret), encoding="utf-8"), 200
@app.route('/read_rejects', methods = ['GET'])
def read_rejects() -> bytes:
"""
Publish all the predicted results of the write_queue.
Before flushing the write_queue, save them into the backlog.
Example command:
curl -X GET http://localhost:PORT/read_rejects
"""
source = flask.request.headers.get("Server-Name")
ret = [r for r in handler.reject_queues[source]]
return bytes(json.dumps(ret), encoding="utf-8"), 200
@app.route('/read_reject_labels', methods = ['GET'])
def read_reject_labels() -> bytes:
"""
Get labels of rejected OpenCL kernels.
Example command:
curl -X GET http://localhost:PORT/read_reject_labels
"""
labels = {}
source = flask.request.headers.get("Server-Name")
if source is None:
return "Server-Name is undefined", 404
ret = [r for r in handler.reject_queues[source]]
for c in ret:
if c['runtime_features']['label'] not in labels:
labels[c['runtime_features']['label']] = 1
else:
labels[c['runtime_features']['label']] += 1
return bytes(json.dumps(labels), encoding="utf-8"), 200
@app.route('/read_queue_size', methods = ['GET'])
def read_queue_size() -> bytes:
"""
Read size of pending workload in read_queue.
"""
return handler.read_queue.qsize(), 200
@app.route('/get_backlog', methods = ['GET'])
def get_backlog() -> bytes:
"""
In case a client side error has occured, proactively I have stored
the whole backlog in memory. To retrieve it, call this method.
Example command:
curl -X GET http://localhost:PORT/get_backlog
"""
return bytes(json.dumps(handler.backlog), encoding = "utf-8"), 200
@app.route('/status', methods = ['GET'])
def status():
"""
Read the workload status of the http server.
"""
source = flask.request.headers.get("Server-Name")
if source is None:
return "Server-Name is undefined", 404
status = {
'read_queue' : 'EMPTY' if handler.read_queue.empty() else 'NOT_EMPTY',
'write_queue' : 'EMPTY' if len(handler.write_queues[source]) == 0 else 'NOT_EMPTY',
'reject_queue' : 'EMPTY' if len(handler.reject_queues[source]) == 0 else 'NOT_EMPTY',
'work_flag' : 'WORKING' if handler.work_flag.value else 'IDLE',
'read_queue_size' : handler.read_queue.qsize(),
'write_queue_size' : len(handler.write_queues[source]),
'reject_queue_size' : len(handler.reject_queues[source]),
}
if status['read_queue'] == 'EMPTY' and status['write_queue'] == 'EMPTY':
return bytes(json.dumps(status), encoding = 'utf-8'), 200 + (100 if handler.work_flag.value else 0)
elif status['read_queue'] == 'EMPTY' and status['write_queue'] == 'NOT_EMPTY':
return bytes(json.dumps(status), encoding = 'utf-8'), 201 + (100 if handler.work_flag.value else 0)
elif status['read_queue'] == 'NOT_EMPTY' and status['write_queue'] == 'EMPTY':
return bytes(json.dumps(status), encoding = 'utf-8'), 202 + (100 if handler.work_flag.value else 0)
elif status['read_queue'] == 'NOT_EMPTY' and status['write_queue'] == 'NOT_EMPTY':
return bytes(json.dumps(status), encoding = 'utf-8'), 203 + (100 if handler.work_flag.value else 0)
@app.route('/', methods = ['GET', 'POST', 'PUT'])
def index():
"""
In case a client side error has occured, proactively I have stored
the whole backlog in memory. To retrieve it, call this method.
Example command:
curl -X GET http://localhost:PORT/get_backlog
"""
multi_status = {
'read_queue' : 'EMPTY' if handler.read_queue.empty() else 'NOT_EMPTY',
'read_queue_size' : handler.read_queue.qsize(),
'work_flag' : 'WORKING' if handler.work_flag.value else 'IDLE',
}
it = set(handler.write_queues.keys())
it.update(set(handler.reject_queues.keys()))
for hn in it:
status = {
'write_queue' : 'EMPTY' if hn in handler.write_queues and len(handler.write_queues[hn]) == 0 else 'NOT_EMPTY',
'reject_queue' : 'EMPTY' if hn in handler.reject_queues and len(handler.reject_queues[hn]) == 0 else 'NOT_EMPTY',
'write_queue_size' : len(handler.write_queues[hn]) if hn in handler.write_queues else 0,
'reject_queue_size' : len(handler.reject_queues[hn]) if hn in handler.reject_queues else 0,
}
multi_status[hn] = status
return json.dumps(multi_status), 200
def client_status_request() -> typing.Tuple:
"""
Get status of http server.
"""
try:
r = requests.get("http://{}:{}/status".format(FLAGS.http_server_ip_address, FLAGS.http_port), headers = {"Server-Name": environment.HOSTNAME})
except Exception as e:
l.logger().error("GET status Request at {}:{} has failed.".format(FLAGS.http_server_ip_address, FLAGS.http_port))
raise e
return r.json(), r.status_code
def client_get_request() -> typing.List[typing.Dict]:
"""
Helper function to perform get request at /read_message of http target host.
"""
try:
r = requests.get("http://{}:{}/read_message".format(FLAGS.http_server_ip_address, FLAGS.http_port), headers = {"Server-Name": environment.HOSTNAME})
except Exception as e:
l.logger().error("GET Request at {}:{} has failed.".format(FLAGS.http_server_ip_address, FLAGS.http_port))
raise e
if r.status_code == 200:
return r.json()
else:
l.logger().error("Error code {} in read_message request.".format(r.status_code))
return None
def client_put_request(msg: typing.List[typing.Dict]) -> None:
"""
Helper function to perform put at /write_message of http target host.
"""
try:
r = requests.put("http://{}:{}/write_message".format(FLAGS.http_server_ip_address, FLAGS.http_port), data = json.dumps(msg), headers = {"Content-Type": "application/json", "Server-Name": environment.HOSTNAME})
except Exception as e:
l.logger().error("PUT Request at {}:{} has failed.".format(FLAGS.http_server_ip_address, FLAGS.http_port))
raise e
if r.status_code != 200:
l.logger().error("Error code {} in write_message request.".format(r.status_code))
return
class CLDriveServer(object):
"""
Abstract class for cldrive server handling,
"""
def __init__(self) -> None:
return
def http_serve(self,
read_queue : multiprocessing.Queue,
write_queues : 'multiprocessing.Dict',
reject_queues : 'multiprocessing.Dict',
work_flag : multiprocessing.Value,
manager : multiprocessing.Manager,
):
"""
Run http server for read and write workload queues.
"""
try:
port = FLAGS.http_port
if port is None:
port = portpicker.pick_unused_port()
handler.set_params(read_queue, write_queues, reject_queues, manager, work_flag)
hostname = subprocess.check_output(
["hostname", "-i"],
stderr = subprocess.STDOUT,
).decode("utf-8").replace("\n", "").split(' ')
if len(hostname) == 2:
ips = "ipv4: {}, ipv6: {}".format(hostname[1], hostname[0])
else:
ips = "ipv4: {}".format(hostname[0])
l.logger().warn("Server Public IP: {}".format(ips))
waitress.serve(app, host = '0.0.0.0', port = port)
except KeyboardInterrupt:
return
except Exception as e:
raise e
return
def start_server_process(self):
"""
This is an easy wrapper to start server from parent routine.
Starts a new process or thread and returns all the multiprocessing
elements needed to control the server.
"""
m = multiprocessing.Manager()
rq, wqs, rjqs = multiprocessing.Queue(), m.dict(), m.dict()
wf = multiprocessing.Value('i', False)
p = multiprocessing.Process(
target = http_serve,
kwargs = {
'read_queue' : rq,
'write_queues' : wqs,
'reject_queues' : rjqs,
'work_flag' : wf,
'manager' : m,
}
)
p.daemon = True
p.start()
return p, wf, rq, wqs, rjqs
class WorkloadServer(object):
"""
Server that
a) computes a given source load and
b) Caches it locally (or maybe not) in the cldrive cache.
"""
def __init__(self, is_client: bool = False) -> None:
self.is_client = is_client
if not is_client:
self.start_server_process()
return
def http_serve(self) -> None:
raise NotImplementedError("TODO")
| 11,555 | 33.912387 | 213 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/util/commit.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import git
import pathlib
import typing
def saveCommit(path: pathlib.Path) -> None:
curr_commits = loadCommit(path)
repo = git.Repo(search_parent_directories = True)
cm = repo.head.object.hexsha
if cm not in curr_commits:
with open(path / "commit", 'a') as cf:
cf.write(repo.head.object.hexsha + "\n")
return
def loadCommit(path: pathlib.Path) -> typing.List[str]:
if (path / "commit").exists():
with open(path / "commit", 'r') as cf:
return [hx.replace('\n', '') for hx in cf.readlines()]
else:
return [] | 1,141 | 33.606061 | 74 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/util/pbutil.py | # coding=utf-8
# Copyright 2022 Chris Cummins.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility code for working with Protocol Buffers."""
import collections
import gzip
import json
import pathlib
import subprocess
import typing
import google.protobuf.json_format
import google.protobuf.message
import google.protobuf.text_format
# A type alias for annotating methods which take or return protocol buffers.
ProtocolBuffer = typing.Any
# A type alias for protocol enum fields.
Enum = int
class ProtoValueError(ValueError):
"""Raised in case of a value error from a proto."""
pass
class EncodeError(ProtoValueError):
"""Raised in case of error encoding a proto."""
pass
class DecodeError(ProtoValueError):
"""Raised in case of error decoding a proto."""
pass
class ProtoWorkerTimeoutError(subprocess.CalledProcessError):
"""Raised is a protobuf worker binary times out."""
def __init__(
self, cmd: typing.List[str], timeout_seconds: int, returncode: int,
):
self.cmd = cmd
self.timeout_seconds = timeout_seconds
# subprocess.CalledProcessError.str() requires a returncode attribute.
self.returncode = returncode
def __repr__(self) -> str:
return (
f"Proto worker timeout after {self.timeout_seconds} "
f"seconds: {' '.join(self.cmd)}"
)
def FromString(
string: str, message: ProtocolBuffer, uninitialized_okay: bool = False,
) -> ProtocolBuffer:
"""Read a text format protocol buffer from a string.
Args:
string: A text format protocol buffer.
message: A message instance to read into.
uninitialized_okay: If True, do not require that decoded messages be
initialized. If False, DecodeError is raised.
Returns:
The parsed message (same as the message argument).
Raises:
DecodeError: If the file cannot be decoded to the given message type, or if
after decoding, the message is not initialized and uninitialized_okay is
False.
"""
try:
google.protobuf.text_format.Merge(string, message)
except google.protobuf.text_format.ParseError as e:
raise DecodeError(e)
if not uninitialized_okay and not message.IsInitialized():
raise DecodeError(f"Required fields not set")
return message
def FromFile(
path: pathlib.Path,
message: ProtocolBuffer,
assume_filename: typing.Optional[typing.Union[str, pathlib.Path]] = None,
uninitialized_okay: bool = False,
) -> ProtocolBuffer:
"""Read a protocol buffer from a file.
This method uses attempts to guess the encoding from the path suffix,
supporting binary, text, and json formatted messages. The mapping of suffixes
to formatting is, in order:
*.txt.gz: Gzipped text.
*.txt: Text.
*.pbtxt.gz: Gzipped text.
*.pbtxt: Text.
*.json.gz: Gzipped JSON.
*.json: JSON.
*.gz: Gzipped encoded string.
*: Encoded string.
Args:
path: Path to the proto file.
message: A message instance to read into.
assume_filename: For the purpose of determining the encoding from the file
extension, use this name rather than the true path.
uninitialized_okay: If True, do not require that decoded messages be
initialized. If False, DecodeError is raised.
Returns:
The parsed message (same as the message argument).
Raises:
FileNotFoundError: If the path does not exist.
IsADirectoryError: If the path is a directory.
DecodeError: If the file cannot be decoded to the given message type, or if
after decoding, the message is not initialized and uninitialized_okay is
False.
"""
if not path.is_file():
if path.is_dir():
raise IsADirectoryError(f"Path is a directory: '{path}'")
else:
raise FileNotFoundError(f"File not found: '{path}'")
suffixes = (
pathlib.Path(assume_filename,).suffixes
if assume_filename
else path.suffixes
)
if suffixes and suffixes[-1] == ".gz":
suffixes.pop()
open_function = gzip.open
else:
open_function = open
suffix = suffixes[-1] if suffixes else ""
try:
with open_function(path, "rb") as f:
if suffix == ".txt" or suffix == ".pbtxt":
# Allow uninitialized fields here because we will catch the error later,
# allowing us to report the path of the proto.
FromString(f.read().decode("utf-8"), message, uninitialized_okay=True)
elif suffix == ".json":
google.protobuf.json_format.Parse(f.read(), message)
else:
message.ParseFromString(f.read())
except (
google.protobuf.text_format.ParseError,
google.protobuf.json_format.ParseError,
) as e:
# The exception raised during parsing depends on the message format. Catch
# them all under a single DecodeError exception type.
raise DecodeError(e)
if not uninitialized_okay and not message.IsInitialized():
raise DecodeError(f"Required fields not set: '{path}'")
return message
def ToFile(
message: ProtocolBuffer,
path: pathlib.Path,
exist_ok: bool = True,
assume_filename: typing.Optional[typing.Union[str, pathlib.Path]] = None,
) -> ProtocolBuffer:
"""Write a protocol buffer to a file.
This method uses attempts to guess the encoding from the path suffix,
supporting binary, text, and json formatted messages. The mapping of suffixes
to formatting is, in order:
*.txt.gz: Gzipped text format.
*.txt: Text format.
*.pbtxt.gz: Gzipped text format.
*.pbtxt: Text format.
*.json.gz: Gzipped JSON format.
*.json: JSON format.
*.gz: Gzipped binary format.
*: Binary format.
Args:
message: A message instance to write to file. The message must be
initialized, i.e. have all required fields set.
path: Path to the proto file.
exist_ok: If True, overwrite existing file.
assume_filename: For the purpose of determining the encoding from the file
extension, use this name rather than the true path.
Returns:
The parsed message (same as the message argument).
Raises:
EncodeError: If the message is not initialized, i.e. it is missing required
fields.
FileNotFoundError: If the parent directory of the requested path does not
exist.
IsADirectoryError: If the requested path is a directory.
FileExistsError: If the requested path already exists and exist_ok is False.
"""
if not exist_ok and path.exists():
raise FileExistsError(f"Refusing to overwrite {path}")
# The SerializeToString() method refuses to encode a message which is not
# initialized, whereas the MessageToString() and MessageToJson() methods do
# not. This API should be consistent, so we enforce that all formats require
# the message to be initialized.
if not message.IsInitialized():
class_name = type(message).__name__
raise EncodeError(f"Required fields not set: '{class_name}'")
suffixes = (
pathlib.Path(assume_filename,).suffixes
if assume_filename
else path.suffixes
)
if suffixes and suffixes[-1] == ".gz":
suffixes.pop()
open_function = gzip.open
else:
open_function = open
suffix = suffixes[-1] if suffixes else ""
mode = "wt" if suffix in {".txt", ".pbtxt", ".json"} else "wb"
with open_function(path, mode) as f:
if suffix == ".txt" or suffix == ".pbtxt":
f.write(google.protobuf.text_format.MessageToString(message))
elif suffix == ".json":
f.write(
google.protobuf.json_format.MessageToJson(
message, preserving_proto_field_name=True,
),
)
else:
f.write(message.SerializeToString())
return message
def ToJson(message: ProtocolBuffer) -> "typing.Union[typing.List[typing.Any], typing.Dict[str, typing.Any]]":
"""Return a JSON encoded representation of a protocol buffer.
Args:
message: The message to convert to JSON.
Returns:
JSON encoded message.
"""
return google.protobuf.json_format.MessageToDict(
message, preserving_proto_field_name=True,
)
def _TruncatedString(string: str, n: int = 80) -> str:
"""Return the truncated first 'n' characters of a string.
Args:
string: The string to truncate.
n: The maximum length of the string to return.
Returns:
The truncated string.
"""
if len(string) > n:
return string[: n - 3] + "..."
else:
return string
def _TruncateDictionaryStringValues(
data: "typing.Union[typing.List[typing.Any], typing.Dict[str, typing.Any]]", n: int = 62,
) -> "typing.Union[typing.List[typing.Any], typing.Dict[str, typing.Any]]":
"""Truncate all string values in a nested dictionary.
Args:
data: A dictionary.
Returns:
The dictionary.
"""
for key, value in data.items():
if isinstance(value, collections.Mapping):
data[key] = _TruncateDictionaryStringValues(data[key])
elif isinstance(value, str):
data[key] = _TruncatedString(value, n)
else:
data[key] = value
return data
def PrettyPrintJson(message: ProtocolBuffer, truncate: int = 52) -> str:
"""Return a pretty printed JSON string representation of the message.
Args:
message: The message to pretty print.
truncate: The length to truncate string values. Truncation is disabled if
this argument is None.
Returns:
JSON string.
"""
data = ToJson(message)
return json.dumps(
_TruncateDictionaryStringValues(data) if truncate else data,
indent=2,
sort_keys=True,
)
def RaiseIfNotSet(
proto: ProtocolBuffer, field: str, err: ValueError,
) -> typing.Any:
"""Check that a proto field is set before returning it.
Args:
proto: A message instance.
field: The name of the field.
err: The exception class to raise.
Returns:
The value of the field.
Raises:
ValueError: If the field is not set.
"""
if not proto.HasField(field):
raise err(f"datastore field {field} not set")
elif not getattr(proto, field):
raise err(f"datastore field {field} not set")
return getattr(proto, field)
def ProtoIsReadable(
path: typing.Union[str, pathlib.Path], message: ProtocolBuffer,
) -> bool:
"""Return whether a file is a readable protocol buffer.
Arguments:
path: The path of the file to read.
message: An instance of the message type.
Returns:
True if contents of path can be parsed as an instance of message, else
False.
"""
try:
FromFile(pathlib.Path(path), message)
return True
except:
return False
def AssertFieldIsSet(
proto: ProtocolBuffer, field_name: str, fail_message: str = None,
) -> typing.Optional[typing.Any]:
"""Assert that protocol buffer field is set.
Args:
proto: A proto message instance.
field_name: The name of the field to assert the constraint on.
fail_message: An optional message to raise the ProtoValueError
with if the assertion fails. If not provided, a default message is used.
Returns:
The value of the field, if the field has a value. Even though a field may
be set, it may not have a value. For example, if any of a 'oneof' fields
is set, then this function will return True for the name of the oneof,
but the return value will be None.
Raises:
ValueError: If the requested field does not exist in the proto schema.
ProtoValueError: If the field is not set.
"""
if not proto.HasField(field_name):
proto_class_name = type(proto).__name__
raise ProtoValueError(
fail_message or f"Field not set: '{proto_class_name}.{field_name}'",
)
return getattr(proto, field_name) if hasattr(proto, field_name) else None
def AssertFieldConstraint(
proto: ProtocolBuffer,
field_name: str,
constraint: typing.Callable[[typing.Any], bool] = lambda x: True,
fail_message: str = None,
) -> typing.Any:
"""Assert a constraint on the value of a protocol buffer field.
Args:
proto: A proto message instance.
field_name: The name of the field to assert the constraint on.
constraint: A constraint checking function to call with the value of the
field. The function must return True if the constraint check passes, else
False. If no constraint is specified, this callback always returns True.
This still allows you to use this function to check if a field is set.
fail_message: An optional message to raise the ProtoValueError
with if the assertion fails. If not provided, default messages are used.
Returns:
The value of the field.
Raises:
ValueError: If the requested field does not exist in the proto schema.
ProtoValueError: If the field is not set, or if the constraint callback
returns False for the field's value.
"""
value = AssertFieldIsSet(proto, field_name, fail_message)
if not constraint(value):
proto_class_name = type(proto).__name__
raise ProtoValueError(
fail_message
or f"Field fails constraint check: '{proto_class_name}.{field_name}'",
)
else:
return value
def RunProcessMessage(
cmd: typing.List[str],
input_proto: ProtocolBuffer,
timeout_seconds: int = 360,
env: typing.Dict[str, str] = None,
) -> str:
"""Run the given command, feeding a serialized input proto to stdin.
Args:
cmd: The command to execute.
input_proto: The input message for the command.
timeout_seconds: The maximum number of seconds to allow the command to run
for.
env: A map of environment variables to set, overriding the default
environment.
Returns:
The raw stdout of the command as a byte array.
Raises;
ProtoWorkerTimeoutError: If timeout_seconds elapses without the command
terminating.
CalledProcessError: If the command terminates with a non-zero returncode.
"""
# Run the C++ worker process, capturing it's output.
process = subprocess.Popen(
["timeout", "-s9", str(timeout_seconds)] + cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
env=env,
)
# Send the input proto to the C++ worker process.
stdout, _ = process.communicate(input_proto.SerializeToString())
# TODO: Check signal value, not hardcoded a hardcoded kill signal.
if process.returncode == -9 or process.returncode == 9:
raise ProtoWorkerTimeoutError(
cmd=cmd, timeout_seconds=timeout_seconds, returncode=process.returncode,
)
elif process.returncode:
raise subprocess.CalledProcessError(process.returncode, cmd)
return stdout
def RunProcessMessageToProto(
cmd: typing.List[str],
input_proto: ProtocolBuffer,
output_proto: ProtocolBuffer,
timeout_seconds: int = 360,
env: typing.Dict[str, str] = None,
) -> ProtocolBuffer:
"""Run a command that accepts a protocol buffer as input and produces a
protocol buffer output.
Args:
cmd: The command to execute.
input_proto: The input message for the command. This is fed to the command's
stdin as a serialized string.
output_proto: The output message for the command. The values of this proto
are set by the stdout of the command.
timeout_seconds: The maximum number of seconds to allow the command to run
for.
env: A map of environment variables to set, overriding the default
environment.
Returns:
The same protocol buffer as output_proto, with the values produced by the
stdout of the command.
Raises;
ProtoWorkerTimeoutError: If timeout_seconds elapses without the command
terminating.
CalledProcessError: If the command terminates with a non-zero returncode.
"""
stdout = RunProcessMessage(
cmd, input_proto, timeout_seconds=timeout_seconds, env=env,
)
output_proto.ParseFromString(stdout)
return output_proto
def RunProcessMessageInPlace(
cmd: typing.List[str],
input_proto: ProtocolBuffer,
timeout_seconds: int = 360,
env: typing.Dict[str, str] = None,
) -> ProtocolBuffer:
"""Run the given command, modifying a protocol buffer inplace.
Args:
cmd: The command to execute.
input_proto: The input message for the command. This is fed to the command's
stdin as a serialized string.
timeout_seconds: The maximum number of seconds to allow the command to run
for.
env: A map of environment variables to set, overriding the default
environment.
Returns:
The same protocol buffer as input_proto, with the values produced by the
stdout of the command.
Raises;
ProtoWorkerTimeoutError: If timeout_seconds elapses without the command
terminating.
CalledProcessError: If the command terminates with a non-zero returncode.
"""
input_proto.ParseFromString(
RunProcessMessage(
cmd, input_proto, timeout_seconds=timeout_seconds, env=env,
),
)
return input_proto
class ProtoBackedMixin(object):
"""A class backed by protocol buffers.
This mixin provides the abstract interface for classes which support
serialization of instances to and from protocol buffers.
Inheriting classes must set the proto_t class attribute, and implement the
SetProto() and FromProto() methods.
Attributes:
proto_t: The protocol buffer class that backs instances of this class.
"""
# Inheritinc classes must set this attribute to the Protocol Buffer class.
proto_t = None
def SetProto(self, proto: ProtocolBuffer) -> None:
"""Set the fields of a protocol buffer with the values of the instance.
It is the responsibility of the inheriting class to ensure that all required
instance variables are recorded as fields in this proto.
Args:
proto: A protocol buffer.
"""
# ABSTRACT METHOD. Inheriting classes must implement!
raise NotImplementedError(
f"{type(self).__name__}.SetProto() not implemented",
)
@classmethod
def FromProto(cls, proto: ProtocolBuffer) -> "ProtoBackedMixin":
"""Return an instance of the class from proto.
It is the responsibility of the inheriting class to ensure that all required
instance variables are set according to the fields in the proto.
Args:
proto: A protocol buffer.
Returns:
An instance of the class.
"""
# ABSTRACT METHOD. Inheriting classes must implement!
raise NotImplementedError(
f"{type(self).__name__}.FromProto() not implemented",
)
def ToProto(self) -> ProtocolBuffer:
"""Serialize the instance to protocol buffer.
It is the responsibility of the inheriting class to set the proto_t class
attribute to the
Returns:
A protocol buffer.
"""
proto = self.proto_t()
self.SetProto(proto)
return proto
@classmethod
def FromProtoFile(cls, path: pathlib.Path) -> "ProtoBackedMixin":
"""Return an instance of the class from serialized proto file.
Args:
path: Path to a proto file.
Returns:
An instance.
"""
return cls.FromProto(FromFile(path, cls.proto_t()))
| 19,217 | 29.699681 | 109 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/util/monitors.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Statistical distributions used for sampling"""
import pathlib
import pickle
import typing
import numpy as np
import sklearn.manifold
from deeplearning.benchpress.util import plotter
from deeplearning.benchpress.util import logging as l
class Monitor():
def __init__(self,
cache_path : typing.Union[pathlib.Path, str],
set_name : str
):
self.cache_path = cache_path if isinstance(cache_path, pathlib.Path) else pathlib.Path(cache_path)
self.set_name = set_name
return
def saveCheckpoint(self):
with open(self.cache_path / "{}_state.pkl".format(self.set_name), 'wb') as outf:
pickle.dump(self, outf)
return
@classmethod
def loadCheckpoint(cls, cache_path, set_name):
if (cache_path / "{}_state.pkl".format(set_name)).exists():
with open(cache_path / "{}_state.pkl".format(set_name), 'rb') as infile:
obj = pickle.load(infile)
else:
obj = cls(cache_path, set_name)
return obj
def getData(self):
raise NotImplementedError("Abstract Class")
def getStrData(self):
raise NotImplementedError("Abstract Class")
def register(self, actual_sample):
raise NotImplementedError("Abstract Class")
def sample(self):
raise NotImplementedError("Abstract Class")
def plot(self):
raise NotImplementedError("Abstract Class")
class FrequencyMonitor(Monitor):
"""
Keeps monitor of the occured frequency of a specific key.
Key is provided through `actual_sample` in register method.
Its frequency is incremented by one.
Bar plots num of occurences VS keys.
"""
def __init__(self,
cache_path: typing.Union[pathlib.Path, str],
set_name : str,
):
super(FrequencyMonitor, self).__init__(cache_path, set_name)
self.sample_counter = {}
return
def getData(self) -> typing.List[typing.Tuple[typing.Union[int, str, float], int]]:
return sorted(self.sample_counter.items(), key = lambda x: x[0])
def getStrData(self) -> str:
return "\n".join(
["{}:{}".format(k, v) for (k, v) in self.getData()]
)
def register(self, actual_sample: typing.Union[int, str, list]) -> None:
if isinstance(actual_sample, list):
for s in actual_sample:
self.register(s)
else:
if actual_sample not in self.sample_counter:
self.sample_counter[actual_sample] = 1
else:
self.sample_counter[actual_sample] += 1
return
def plot(self) -> None:
"""Plot bars of number of occurences."""
sorted_dict = sorted(self.sample_counter.items(), key = lambda x: x[0])
plotter.FrequencyBars(
x = [x for (x, _) in sorted_dict],
y = [y for (_, y) in sorted_dict],
plot_name = self.set_name,
path = self.cache_path,
title = self.set_name,
x_name = self.set_name,
)
return
class NormalizedFrequencyMonitor(FrequencyMonitor):
"""
Identical to FrequencyMonitor but normalizes absolute values
of bars with respect to total occurrences.
"""
def __init__(self,
cache_path: typing.Union[pathlib.Path, str],
set_name : str,
):
super(NormalizedFrequencyMonitor, self).__init__(cache_path, set_name)
return
def plot(self) -> None:
"""Plot bars of number of occurences."""
total = sum(self.sample_counter.values())
sorted_dict = sorted(self.sample_counter.items(), key = lambda x: x[0])
plotter.FrequencyBars(
x = [x for (x, _) in sorted_dict],
y = [y / total for (_, y) in sorted_dict],
plot_name = self.set_name,
path = self.cache_path,
title = self.set_name,
x_name = self.set_name,
)
return
class CumulativeHistMonitor(Monitor):
"""
Keeps monitor of the occured frequency of a specific key.
Key is provided through `actual_sample` in register method.
Its frequency is incremented by one.
Bar plots num of occurences VS keys.
"""
def __init__(self,
cache_path: typing.Union[pathlib.Path, str],
set_name : str,
):
super(CumulativeHistMonitor, self).__init__(cache_path, set_name)
self.sample_counter = {}
return
def getData(self) -> typing.List[typing.Tuple[typing.Union[int, str, float], int]]:
return sorted(self.sample_counter.items(), key = lambda x: x[0])
def getStrData(self) -> str:
return "\n".join(
["{}:{}".format(k, v) for (k, v) in self.getData()]
)
def register(self, actual_sample: typing.Union[list, int, float]) -> None:
if isinstance(actual_sample, list):
for s in actual_sample:
self.register(s)
else:
if actual_sample not in self.sample_counter:
self.sample_counter[actual_sample] = 1
else:
self.sample_counter[actual_sample] += 1
return
def plot(self) -> None:
"""Plot bars of number of occurences."""
sorted_dict = self.getData()
plotter.CumulativeHistogram(
x = [x for (x, _) in sorted_dict],
y = [y for (_, y) in sorted_dict],
plot_name = self.set_name,
path = self.cache_path,
title = self.set_name,
x_name = self.set_name,
)
return
class HistoryMonitor(Monitor):
"""
Monitors values in an ordered timeline
Plots a line of values against timesteps.
"""
def __init__(self,
cache_path: typing.Union[pathlib.Path, str],
set_name: str,
):
super(HistoryMonitor, self).__init__(cache_path, set_name)
self.sample_list = []
return
def getData(self) -> typing.List[typing.Union[int, float]]:
return self.sample_list
def getStrData(self) -> str:
return ",".join(
[str(v) for v in self.getData()]
)
def register(self, actual_sample: typing.Union[int, float]) -> None:
self.sample_list.append(float(actual_sample))
return
def plot(self) -> None:
"""Plot line over timescale"""
plotter.SingleScatterLine(
x = np.arange(len(self.sample_list)),
y = self.sample_list,
plot_name = self.set_name,
path = self.cache_path,
title = self.set_name,
y_name = self.set_name,
)
return
class CategoricalHistoryMonitor(Monitor):
"""
Scatter line of one datapoint per category.
Useful to track average value per category.
"""
def __init__(self,
cache_path: typing.Union[pathlib.Path, str],
set_name: str,
):
super(CategoricalHistoryMonitor, self).__init__(cache_path, set_name)
self.sample_dict = {}
return
def getData(self) -> typing.List[typing.Tuple[typing.Union[int, str, float], int]]:
return sorted(self.sample_dict.items(), key = lambda x: x[0])
def getStrData(self) -> str:
return "\n".join(
["{}:{}".format(k, v) for (k, v) in self.getData()]
)
def register(self, actual_sample: typing.Tuple[typing.Any, typing.Any]) -> None:
key, value = actual_sample
self.sample_dict[key] = value
return
def plot(self) -> None:
"""Plot line over timescale"""
sorted_dict = self.getData()
plotter.SingleScatterLine(
x = [x for (x, _) in sorted_dict],
y = [y for (_, y) in sorted_dict],
plot_name = self.set_name,
path = self.cache_path,
title = self.set_name,
y_name = self.set_name,
)
return
class CategoricalDistribMonitor(Monitor):
"""
Monitors values in an ordered timeline
Plots a line of values against timesteps.
X-Axis can be regulated when registering a new value.
The new value per x-element is always going to be the minimum seen.
"""
def __init__(self,
cache_path: typing.Union[pathlib.Path, str],
set_name: str,
):
super(CategoricalDistribMonitor, self).__init__(cache_path, set_name)
self.sample_dict = {}
return
def getData(self) -> typing.List[typing.Tuple[typing.Union[int, str, float], int]]:
return sorted(self.sample_dict.items(), key = lambda x: x[0])
def getStrData(self) -> str:
return "\n".join(
["{}:{}".format(k, sum(v) / len(v)) for (k, v) in self.getData()]
)
def register(self, actual_sample: typing.Dict[str, float]) -> None:
for k, v in actual_sample.items():
if isinstance(v, list):
val = v
else:
val = [v]
if k in self.sample_dict:
self.sample_dict[k] += val
else:
self.sample_dict[k] = val
return
def plot(self) -> None:
"""Plot line over timescale"""
sorted_dict = self.getData()
plotter.CategoricalViolin(
x = [k for (k, _) in sorted_dict],
y = [v for (_, v) in sorted_dict],
plot_name = self.set_name,
path = self.cache_path,
title = self.set_name,
)
return
class FeatureMonitor(Monitor):
"""
Produces a bar chart of averaged features.
Yes, features are averaged. It is not a cumulative representation.
"""
def __init__(self,
cache_path: typing.Union[pathlib.Path, str],
set_name: str,
):
super(FeatureMonitor, self).__init__(cache_path, set_name)
self.features = {}
self.instance_counter = 0
return
def getData(self) -> typing.Dict[str, float]:
return {k: v / self.instance_counter for k, v in self.features.items()}
def getStrData(self) -> str:
return "\n".join(
["{}:{}".format(k, v) for k, v in self.getData().items()]
)
def register(self, actual_sample: typing.Dict[str, float]) -> None:
"""actual sample is a dict of features to their values."""
if not isinstance(actual_sample, dict):
raise TypeError("Feature sample must be dictionary of string features to float values. Received: {}".format(actual_sample))
self.instance_counter += 1
for k, v in actual_sample.items():
if k not in self.features:
self.features[k] = v
else:
self.features[k] += v
return
def plot(self) -> None:
"""Plot averaged Bar chart"""
plotter.FrequencyBars(
x = [k for k in self.features.keys()],
y = [v / self.instance_counter for v in self.features.values()],
plot_name = self.set_name,
path = self.cache_path,
title = self.set_name,
x_name = self.set_name,
)
class TSNEMonitor(Monitor):
"""
Keeps track of feature vectors of various groups in a given feature space.
Performs t-SNE algorithm to reduce dimensionality to 2 and plots groupped scatterplot.
"""
def __init__(self,
cache_path: typing.Union[pathlib.Path, str],
set_name: str,
):
super(TSNEMonitor, self).__init__(cache_path, set_name)
self.features = []
self.features_set = set()
self.groups = []
self.groups_set = set()
self.names = []
return
def getData(self) -> None:
raise NotImplementedError
def getStrData(self) -> None:
raise NotImplementedError
def register(self, actual_sample: typing.Tuple[typing.Union[typing.List, typing.Dict[str, int]], str, typing.Optional[str]]) -> None:
"""
A registered sample must contain:
1. A feature vector.
2. The group it belongs to.
3. (Optional) The name of the datapoint.
Feature vectors stored are unique.
"""
feats, group = actual_sample[0], actual_sample[1]
name = actual_sample[2] if len(actual_sample) == 3 else ""
if isinstance(feats, dict):
feats_list = list(feats.values())
else:
feats_list = feats
if str(feats_list) not in self.features_set:
self.features.append(feats_list)
self.features_set.add(str(feats_list))
self.groups.append(group)
self.groups_set.add(group)
self.names.append(name)
elif name != "":
for idx, f in enumerate(self.features):
if feats_list == f:
self.names[idx] += ",{}".format(name)
return
def plot(self) -> None:
"""
Plot groupped scatter graph.
"""
if len(self.features) <= 0:
# Nothing to plot.
return
tsne = sklearn.manifold.TSNE(
perplexity = min(30, len(self.features) - 1),
)
embeddings = tsne.fit_transform(np.array(self.features))
groupped_data = {}
for points, group, name in zip(embeddings, self.groups, self.names):
if group in groupped_data:
groupped_data[group]['data'].append(points)
groupped_data[group]['names'].append(name)
else:
groupped_data[group] = {
'data': [points],
'names': [name],
}
plotter.GroupScatterPlot(
groups = groupped_data,
plot_name = self.set_name,
path = self.cache_path,
title = self.set_name,
x_name = self.set_name,
y_name = self.set_name,
) | 13,386 | 29.916859 | 135 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/util/cache.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas and Chris Cummins.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains the logic for managing BenchPress filesystem caches."""
import os
import pathlib
import six
import atexit
import json
import pathlib
import re
import typing
from deeplearning.benchpress.util import crypto
from deeplearning.benchpress.util import fs
class Cache(object):
"""
Cache for storing (key,value) relational data.
A cache is a dictionary with a limited subset of a the
functionality.
"""
def get(self, key, default=None) -> typing.Optional[pathlib.Path]:
"""
Retrieve an item from cache.
Arguments:
key: Item key.
default (optional): Default value if item not found.
"""
raise NotImplementedError
def clear(self) -> None:
"""
Remove all items from cache.
"""
raise NotImplementedError
def items(self) -> typing.Iterable[pathlib.Path]:
"""
Returns a generator for iterating over (key, value) pairs.
"""
raise NotImplementedError
def __getitem__(self, key) -> pathlib.Path:
"""
Retrieve an item from cache.
Arguments:
key: Item key.
Raises:
KeyError: If key is not in cache.
"""
raise NotImplementedError
def __setitem__(self, key, value) -> None:
"""
Set (key, value) pair.
"""
raise NotImplementedError
def __contains__(self, key) -> bool:
"""
Returns whether key is in cache.
"""
raise NotImplementedError
def __delitem__(self, key) -> None:
"""
Remove (key, value) pair.
"""
raise NotImplementedError
def __iter__(self) -> typing.Iterator[pathlib.Path]:
"""
Iterate over all cache entries.
"""
raise NotImplementedError
def __len__(self) -> int:
"""
Get the number of entries in the cache.
"""
raise NotImplementedError
class TransientCache(Cache):
"""
An in-memory only cache.
"""
def __init__(self, basecache=None):
"""
Create a new transient cache.
Optionally supports populating the cache with values of an
existing cache.
Arguments:
basecache (TransientCache, optional): Cache to populate this new
cache with.
"""
self._data = {}
if basecache is not None:
for key, val in basecache.items():
self._data[key] = val
def get(self, key, default=None):
if key in self._data:
return self._data[key]
else:
return default
def clear(self):
self._data.clear()
def items(self):
return six.iteritems(self._data)
def __getitem__(self, key):
return self._data[key]
def __setitem__(self, key, value):
self._data[key] = value
return value
def __contains__(self, key):
return key in self._data
def __delitem__(self, key):
del self._data[key]
def __iter__(self):
"""
Iterate over all cache entries.
Returns:
iterable: Entries in cache.
"""
for value in self._data.values():
yield value
def __len__(self):
"""
Get the number of cache entries.
Returns:
int: Number of entries in the cache.
"""
return len(list(self._data.keys()))
class JsonCache(TransientCache):
"""
A persistent, JSON-backed cache.
Requires that (key, value) pairs are JSON serialisable.
"""
def __init__(self, path, basecache=None):
"""
Create a new JSON cache.
Optionally supports populating the cache with values of an
existing cache.
Arguments:
basecache (TransientCache, optional): Cache to populate this new
cache with.
"""
super(JsonCache, self).__init__()
self.path = fs.abspath(path)
if fs.exists(self.path) and fs.Read(self.path):
with open(self.path) as file:
self._data = json.load(file)
if basecache is not None:
for key, val in basecache.items():
self._data[key] = val
# Register exit handler
atexit.register(self.write)
def write(self):
"""
Write contents of cache to disk.
"""
with open(self.path, "w") as file:
json.dump(
self._data, file, sort_keys=True, indent=2, separators=(",", ": "),
)
def hash_key(key):
"""
Convert a key to a filename by hashing its value.
"""
return crypto.sha1_str(json.dumps(key, sort_keys=True))
def escape_path(key):
"""
Convert a key to a filename by escaping invalid characters.
"""
return re.sub(r"[ \\/]+", "_", key)
class FSCache(Cache):
"""
Persistent filesystem cache.
Each key uniquely identifies a file.
Each value is a file path.
Adding a file to the cache moves it into the cahce directory.
Members:
path (str): Root cache.
escape_key (fn): Function to convert keys to file names.
"""
def __init__(self, root, escape_key=hash_key):
"""
Create filesystem cache.
Arguments:
root (str): String.
escape_key (fn, optional): Function to convert keys to file names.
"""
self.path = pathlib.Path(root)
self.escape_key = escape_key
fs.mkdir(self.path)
def clear(self):
"""
Empty the filesystem cache.
This deletes the entire cache directory.
"""
fs.rm(self.path)
def keypath(self, key):
"""
Get the filesystem path for a key.
Arguments:
key: Key.
Returns:
str: Absolute path.
"""
return fs.path(self.path, self.escape_key(key))
def __getitem__(self, key):
"""
Get path to file in cache.
Arguments:
key: Key.
Returns:
str: Path to cache value.
Raises:
KeyErorr: If key not in cache.
"""
path = self.keypath(key)
if fs.exists(path):
return path
else:
raise KeyError(key)
def __setitem__(self, key, value):
"""
Emplace file in cache.
Arguments:
key: Key.
value (str): Path of file to insert in cache.
Raises:
ValueError: If no "value" does nto exist.
"""
if not fs.exists(value):
raise ValueError(value)
path = self.keypath(key)
fs.mkdir(self.path)
fs.mv(value, path)
def __contains__(self, key):
"""
Check cache contents.
Arguments:
key: Key.
Returns:
bool: True if key in cache, else false.
"""
path = self.keypath(key)
return fs.exists(path)
def __delitem__(self, key):
"""
Delete cached file.
Arguments:
key: Key.
Raises:
KeyError: If file not in cache.
"""
path = self.keypath(key)
if fs.exists(path):
fs.rm(path)
else:
raise KeyError(key)
def __iter__(self):
"""
Iterate over all cached files.
Returns:
iterable: Paths in cache.
"""
for path in fs.ls(self.path, abspaths=True):
yield path
def __len__(self):
"""
Get the number of entries in the cache.
Returns:
int: Number of entries in the cache.
"""
return len(list(fs.ls(self.path)))
def get(self, key, default=None):
"""
Fetch from cache.
Arguments:
key: Key.
default (optional): Value returned if key not found.
Returns:
str: Path to cached file.
"""
if key in self:
return self[key]
else:
return default
def ls(self, **kwargs):
"""
List files in cache.
Arguments:
**kwargs: Keyword options to pass to fs.ls().
Returns:
iterable: List of files.
"""
return fs.ls(self.path, **kwargs)
def cachepath(*relative_path_components: str) -> pathlib.Path:
"""Return path to file system cache.
Args:
*relative_path_components: Relative path of cache.
Returns:
Absolute path of file system cache.
"""
cache_root = pathlib.Path(os.environ.get("BENCHPRESS_CACHE", "~/.cache/clgen/"))
cache_root.expanduser().mkdir(parents=True, exist_ok=True)
return pathlib.Path(fs.path(cache_root, *relative_path_components))
def mkcache(*relative_path_components: str) -> FSCache:
"""Instantiate a file system cache.
If the cache does not exist, one is created.
Args:
*relative_path_components: Relative path of cache.
Returns:
A filesystem cache instance.
"""
return FSCache(
cachepath(*relative_path_components), escape_key=escape_path
)
| 8,784 | 20.271186 | 82 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/util/distrib.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cluster node handling for Distributed model training and sampling"""
import glob
import os
import sys
import pickle
import time
import pathlib
import typing
import functools
import tqdm
from deeplearning.benchpress.util import environment
from deeplearning.benchpress.util import logging as l
from deeplearning.benchpress.util import pytorch
torch = pytorch.torch
MASTER_PORT = environment.MASTER_PORT
MASTER_ADDR = environment.MASTER_ADDR
LOCAL_RANK = environment.LOCAL_RANK
WORLD_RANK = environment.WORLD_RANK
WORLD_SIZE = environment.WORLD_SIZE
PATH = None
LOCK_TYPES = [
'barrier-lock-',
'barrier-escape-',
'critical-lock-',
'index-',
'msg-'
]
def barrier(fn: typing.Callable = None) -> None:
"""
Node processes are blocked until all nodes have reached this checkpoint.
!!Warning!!: This function must not be called under a child process or thread.
"""
if environment.WORLD_SIZE > 1:
if pytorch.num_gpus > 0:
torch.distributed.barrier(device_ids = [environment.LOCAL_RANK])
else:
torch.distributed.barrier()
return
else:
return
# if WORLD_SIZE > 1:
# if PATH is None:
# raise FileNotFoundError("Distributed env path has not been set!")
# with open(PATH / "barrier-lock-{}".format(WORLD_RANK), 'w') as outf:
# outf.write("{}\n".format(WORLD_RANK))
# outf.flush()
# barriers = glob.glob(str(PATH / "barrier-lock-*"))
# while len(barriers) < WORLD_SIZE:
# if fn:
# fn()
# time.sleep(0.5)
# barriers = glob.glob(str(PATH / "barrier-lock-*"))
# with open(PATH / "barrier-escape-{}".format(WORLD_RANK), 'w') as outf:
# outf.write("{}\n".format(WORLD_RANK))
# outf.flush()
# while len(barriers) > 0:
# barriers = glob.glob(str(PATH / "barrier-lock-*"))
# escapes = glob.glob(str(PATH / "barrier-escape-*"))
# if WORLD_RANK == 0 and len(escapes) == WORLD_SIZE:
# for be in escapes:
# os.remove(str(be))
# for b in barriers:
# os.remove(str(b))
# else:
# time.sleep(0.2)
# time.sleep(0.5)
return
def lock() -> None:
"""
#####!!!! DEPRECATED. WILL BE REMOVED SOON.
Acquire lockfile to proceed to critical section.
"""
## Corner-case where no DDP is used.
if WORLD_SIZE == 1:
return
## Busy waiting to acquire lock.
while len(glob.glob(str(PATH / "critical-lock-*"))) > 0:
time.sleep(0.5)
## Register lockfile.
if (PATH / "critical-lock-{}".format(WORLD_RANK)).exists():
raise ValueError("Node {} lock already exists.".format(WORLD_RANK))
with open(PATH / "critical-lock-{}".format(WORLD_RANK), 'w') as outf:
outf.write("{}\n".format(WORLD_RANK))
outf.flush()
## Maybe more than one processes are here already. Prioritize by id.
## Unlock and Re-lock if you are not the minimum privileged id.
locks = glob.glob(str(PATH / "critical-lock-*"))
if len(locks) > 1:
min_id = min([int(x.split('critical-lock-')[-1]) for x in locks])
if WORLD_RANK != min_id:
unlock()
lock()
return
def unlock() -> None:
"""
#####!!!! DEPRECATED. WILL BE REMOVED SOON.
Release node lock.
"""
if WORLD_SIZE == 1:
return
if not (PATH / "critical-lock-{}".format(WORLD_RANK)).exists():
raise FileNotFoundError("Node {} lock missing.".format(WORLD_RANK))
exc_counter = 0
while (PATH / "critical-lock-{}".format(WORLD_RANK)).exists():
try:
os.remove(PATH / "critical-lock-{}".format(WORLD_RANK))
except FileNotFoundError as e:
exc_counter += 1
if exc_counter > 500:
raise e
time.sleep(0.5)
return
def broadcast(msg: str = None) -> None:
"""
Node broadcasts a message to all other nodes.
This function is not process-safe. User must ensure one node calls it
and all reads have been complete before re-writing.
"""
if environment.WORLD_SIZE == 1:
return msg
msg = [msg] * environment.WORLD_SIZE
torch.distributed.broadcast_object_list(msg, src = 0)
return msg[0]
def get_consistent(msg: typing.Any) -> typing.Any:
"""
All nodes become consistent on a set of discrete chunks of data.
All nodes must get updated with the same merged blob.
"""
if environment.WORLD_SIZE == 1:
return msg
consistent_array = [None for _ in range(environment.WORLD_SIZE)]
torch.distributed.all_gather_object(consistent_array, [msg])
return [i for rank in consistent_array for i in rank[0]]
def init(path: pathlib.Path) -> None:
"""
Initialize parent directory for distrib coordination.
"""
global PATH
if isinstance(path, str):
PATH = pathlib.Path(path).resolve()
else:
PATH = path
cleanup()
return
def cleanup() -> None:
"""
Cleanup any distributed lock files used.
"""
for tp in LOCK_TYPES:
for f in glob.glob(str(PATH / "{}{}".format(tp, WORLD_RANK))):
os.remove(f)
barrier()
return
class ProgressBar(object):
"""
Creates a distributed progressbar.
All nodes write their current index to a distinct file.
Only master node reads the indices and updates the progressbar.
"""
def __init__(self, total: int, offset: int, desc: str = ""):
self.total = total
self.offset = offset
self.path = PATH
self.n = 0 # tqdm compatibility getter.
if self.path is None:
raise FileNotFoundError("Distributed env path has not been set!")
if WORLD_RANK == 0:
self.bar = tqdm.tqdm(total = total, desc = desc, leave = True)
return
def _fetch_indices(self, idx: int) -> int:
"""
Master node reads current workload indices of all nodes.
"""
total = idx - self.offset
for n in range(1, WORLD_SIZE):
if (self.path / "index-{}".format(n)).exists():
try:
with open(self.path / "index-{}".format(n), 'r') as inf:
total += int(inf.read())
except Exception:
pass
return total
def _write_index(self, idx: int) -> None:
"""
Update personal node dictionary with current index.
"""
with open(self.path / "index-{}".format(WORLD_RANK), 'w') as outf:
outf.write(str(idx - self.offset))
outf.flush()
return
def update(self, idx: int, flush: bool = False) -> None:
"""
Master node updates the bar,
slave nodes update their indices.
"""
if (idx - self.offset) % 100 == 0 or flush:
if WORLD_RANK == 0:
total_idx = self._fetch_indices(idx)
self.bar.update(total_idx - self.bar.n)
self.bar.refresh()
else:
self._write_index(idx)
return
def finalize(self, idx: int) -> None:
"""
Do a final bar update and cleanup progressbar object.
"""
fn = functools.partial(self.update, idx = idx, flush = True)
barrier(fn)
if WORLD_RANK == 0:
indices = glob.glob(str(PATH / "index-*"))
for ip in indices:
os.remove(str(ip))
self.bar.close()
return
| 7,491 | 28.380392 | 80 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/util/plotter.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
In-house plotter module that plots data.
Based on plotly module
"""
import typing
import pathlib
import numpy as np
import itertools
from plotly import graph_objs as go
import plotly.figure_factory as ff
import plotly.express as px
from deeplearning.benchpress.util import logging as l
example_formats = """
margin = {'l': 0, 'r': 0, 't': 0, 'b': 0} # Eliminates excess background around the plot (Can hide the title)
plot_bgcolor = 'rgba(0,0,0,0)' or "#000fff" # Sets the background color of the plot
"""
def _get_generic_figure(**kwargs) -> go.Layout:
"""
Constructor of a basic plotly layout.
All keyword arguments are compatible with plotly documentation
Exceptions:
axisfont instead of titlefont. Reserved titlefont for title's 'font' size property.
"""
# Title and axis names
title = kwargs.get('title', "")
x_name = kwargs.get('x_name', "")
y_name = kwargs.get('y_name', "")
# Font sizes
titlefont = kwargs.get('titlefont', None)
axisfont = kwargs.get('axisfont', None)
tickfont = kwargs.get('tickfont', None)
# Plot line and axis options
showline_x = kwargs.get('showline_x', None)
showline_y = kwargs.get('showline_y', None)
linecolor = kwargs.get('linecolor', None)
gridcolor_x = kwargs.get('gridcolor_x', None)
gridcolor_y = kwargs.get('gridcolor_y', None)
mirror = kwargs.get('mirror', None)
showgrid_x = kwargs.get('showgrid_x', None)
showgrid_y = kwargs.get('showgrid_y', None)
linewidth = kwargs.get('linewidth', None)
gridwidth = kwargs.get('gridwidth', None)
margin = kwargs.get('margin', {'l': 40, 'r': 45, 't': 95, 'b': 0})
x_tickangle = kwargs.get('x_tickangle', None)
showticklabels_x = kwargs.get('showticklabels_x', True)
showticklabels_y = kwargs.get('showticklabels_y', True)
# Legend
legend_x = kwargs.get('legend_x', 1.0)
legend_y = kwargs.get('legend_y', 1.0)
traceorder = kwargs.get('traceorder', None)
legendfont = kwargs.get('legendfont', None)
# Background
bg_color = kwargs.get('bg_color', None)
# Violin options
violingap = kwargs.get('violingap', None)
violinmode = kwargs.get('violinmode', None)
title = dict(text = title, font = dict(size = titlefont))
yaxis = dict(
title = y_name, showgrid = showgrid_y,
showline = showline_y, linecolor = linecolor,
mirror = mirror, linewidth = linewidth,
gridwidth = gridwidth,
gridcolor = gridcolor_y,
tickfont = dict(size = tickfont),
titlefont = dict(size = axisfont),
showticklabels = showticklabels_y,
)
xaxis = dict(
title = x_name, showgrid = showgrid_x,
showline = showline_x, linecolor = linecolor,
mirror = mirror, linewidth = linewidth,
gridcolor = gridcolor_x,
tickfont = dict(size = tickfont),
titlefont = dict(size = axisfont),
showticklabels = showticklabels_x,
rangemode="tozero"
)
layout = go.Layout(
plot_bgcolor = bg_color,
margin = margin,
legend = dict(x = legend_x, y = legend_y, bgcolor = 'rgba(0,0,0,0)', traceorder = traceorder, font = dict(size = legendfont)),
title = title,
xaxis = xaxis,
yaxis = yaxis,
violingap = violingap,
violinmode = violinmode,
)
fig = go.Figure(layout = layout)
if x_tickangle:
fig.update_xaxes(tickangle = 45)
fig.update_yaxes(automargin = True)
return fig
def _write_figure(fig : go.Figure,
plot_name : str,
path : pathlib.Path = None,
**kwargs
) -> None:
"""
Write plotly image & and html file if path exists.
Otherwise only show html file.
"""
if path:
path.mkdir(parents = True, exist_ok = True)
outf = lambda ext: str(path / "{}.{}".format(plot_name, ext))
try:
fig.write_html (outf("html"))
except ValueError:
l.logger().warn("HTML plot failed", ddp_nodes = True)
try:
fig.write_image(outf("png"), width = kwargs.get('width', 1024), height = kwargs.get('height', 768))
except ValueError:
l.logger().warn("PNG plot failed", ddp_nodes = True)
try:
fig.write_image(outf("pdf"))
except ValueError:
l.logger().warn("PDF plot failed", ddp_nodes = True)
else:
fig.show()
return
def SingleScatterLine(x : np.array,
y : np.array,
plot_name : str,
path : pathlib.Path = None,
**kwargs,
) -> None:
"""Plot a single line, with scatter points at datapoints."""
fig = _get_generic_figure(**kwargs)
fig.add_trace(
go.Scatter(
x = x, y = y,
mode = kwargs.get('mode', 'lines+markers'),
name = plot_name,
showlegend = kwargs.get('showlegend', False),
marker_color = kwargs.get('marker_color', "#00b3b3"),
opacity = kwargs.get('opacity', 0.75),
)
)
_write_figure(fig, plot_name, path, **kwargs)
return
def MultiScatterLine(x : typing.List[np.array],
y : typing.List[np.array],
names : typing.List[str],
plot_name : str,
path : pathlib.Path = None,
**kwargs,
) -> None:
"""
Implementation of a simple, ungroupped 2D plot of multiple scatter lines.
"""
fig = _get_generic_figure(**kwargs)
for xx, yy, n in zip(x, y, names):
fig.add_trace(
go.Scatter(
x = xx,
y = yy,
name = n,
mode = kwargs.get('mode', 'lines+markers'),
showlegend = kwargs.get('showlegend', True),
opacity = kwargs.get('opacity', 0.75),
)
)
_write_figure(fig, plot_name, path, **kwargs)
return
def GroupScatterPlot(groups : typing.Dict[str, typing.Dict[str, list]],
plot_name : str,
marker_style : typing.List[str] = None,
path : pathlib.Path = None,
**kwargs,
) -> None:
"""
Plots groupped scatter plot of points in two-dimensional space.
Groups must comply to the following format:
groups = {
'group_name': {
'data': [[xi, yi], ...],
'names': [ni, ...],
'frequency': [1, 1, 2, ...] if you want scatter bubbles based on frequency.
}
}
"""
fig = _get_generic_figure(**kwargs)
if marker_style:
if len(marker_style) != len(groups.keys()):
raise ValueError("Mismatch between markers styles and number of groups")
miter = iter(marker_style)
else:
miter = None
for group, values in groups.items():
if len(values['data']) == 0:
continue
feats = np.array(values['data'])
names = values['names']
fig.add_trace(
go.Scatter(
x = feats[:,0], y = feats[:,1],
name = group,
mode = kwargs.get('mode', 'lines+markers+text' if len(names) > 0 else "lines+markers"),
showlegend = kwargs.get('showlegend', True),
opacity = kwargs.get('opacity', 1.0),
marker = next(miter) if miter else ({'size': 8} if 'frequency' not in values else None),
marker_size = [14*x for x in values['frequency']] if 'frequency' in values else 8,
text = names,
line_shape='spline',
textposition = "top center",
textfont = dict(size = 16),
)
)
_write_figure(fig, plot_name, path, **kwargs)
return
def SliderGroupScatterPlot(steps : typing.List[typing.Dict[str, typing.Dict[str, list]]],
plot_name : str,
path : pathlib.Path = None,
**kwargs,
) -> None:
fig = _get_generic_figure(**kwargs)
print(len(steps.keys()))
for step, step_data in steps.items():
for group, values in step_data.items():
if len(values['data']) == 0:
fig.add_trace(
go.Scatter(
visible = False,
x = [], y = [],
name = group,
)
)
else:
feats = np.array(values['data'])
names = values['names']
fig.add_trace(
go.Scatter(
visible = False,
x = feats[:,0], y = feats[:,1],
name = group,
mode = kwargs.get('mode', 'markers'),
showlegend = kwargs.get('showlegend', True),
opacity = kwargs.get('opacity', 1.0),
marker_size = [14*x for x in values['frequency']] if 'frequency' in values else None,
text = names,
)
)
print(len(fig.data))
fig.data[0].visible = True
fig.data[1].visible = True
fig.data[2].visible = True
fig.data[3].visible = True
fig.data[4].visible = True
slider_steps = []
for i in range(len(steps.keys())):
step = dict(
method = 'update',
args = [
{'visible': [False] * len(fig.data)},
{"title": "Users with total predictions >= {}".format(i+2)}
]
)
step["args"][0]["visible"][i*5] = True
step["args"][0]["visible"][i*5 + 1] = True
step["args"][0]["visible"][i*5 + 2] = True
step["args"][0]["visible"][i*5 + 3] = True
step["args"][0]["visible"][i*5 + 4] = True
slider_steps.append(step)
sliders = [dict(steps = slider_steps)]
fig.update_layout(sliders = sliders)
_write_figure(fig, plot_name, path, **kwargs)
return
def FrequencyBars(x : np.array,
y : np.array,
plot_name : str,
path : pathlib.Path = None,
vlines : typing.List[typing.Tuple[int, str]] = None,
**kwargs,
) -> None:
"""Plot frequency bars based on key."""
fig = _get_generic_figure(**kwargs)
fig.add_trace(
go.Bar(
x = x,
y = y,
showlegend = False,
marker_color = '#ac3939',
opacity = 0.75,
)
)
if vlines:
for (vline, annotation) in vlines:
fig.add_vline(
x = vline,
line_width = 1,
line_dash = "solid",
line_color = "black",
annotation_position = "bottom",
annotation_text = annotation,
)
_write_figure(fig, plot_name, path, **kwargs)
return
def LogitsStepsDistrib(x : typing.List[np.array],
atoms : typing.List[str],
sample_indices : typing.List[str],
plot_name : str,
path : pathlib.Path = None,
**kwargs,
) -> None:
"""
Categorical group-bar plotting.
vocab_size number of groups. Groups are as many as prediction steps.
Used to plot the probability distribution of BERT's token selection.
"""
fig = _get_generic_figure(**kwargs)
for pred, name in zip(x, sample_indices):
fig.add_trace(
go.Bar(
name = name,
x = atoms,
y = pred,
)
)
_write_figure(fig, plot_name, path, **kwargs)
return
def GrouppedBars(groups : typing.Dict[str, typing.Tuple[typing.List, typing.List]],
plot_name : str,
text : typing.List[str] = None,
path : pathlib.Path = None,
**kwargs,
) -> None:
"""
Similar to LogitsStepsDistrib but more generic.
Plots groups of bars.
Groups must comply to the following format:
groups = {
'group_name': ([], [])
}
"""
# colors
fig = _get_generic_figure(**kwargs)
palette = itertools.cycle(px.colors.qualitative.T10)
for group, (x, y) in groups.items():
fig.add_trace(
go.Bar(
name = str(group),
x = x,
y = [(0.2+i if i == 0 else i) for i in y],
marker_color = next(palette),
textposition = kwargs.get('textposition', 'inside'),
# text = text,
text = ["" if i < 100 else "*" for i in y],
textfont = dict(color = "white", size = 140),
)
)
_write_figure(fig, plot_name, path, **kwargs)
return
def SliderGrouppedBars(steps : typing.Dict[int, typing.Dict[str, typing.Tuple[typing.List, typing.List]]],
plot_name : str,
text : typing.List[str] = None,
path : pathlib.Path = None,
**kwargs,
) -> None:
"""
Similar to LogitsStepsDistrib but more generic.
Plots groups of bars.
Groups must comply to the following format:
groups = {
'group_name': ([], [])
}
"""
# colors
fig = _get_generic_figure(**kwargs)
for step_id, step_data in steps.items():
palette = itertools.cycle(px.colors.qualitative.T10)
print(step_id)
print(step_data.keys())
for group, (x, y) in step_data.items():
fig.add_trace(
go.Bar(
visible = False,
name = str(group),
x = x,
y = [(0.0+i if i == 0 else i) for i in y],
marker_color = next(palette),
textposition = kwargs.get('textposition', 'inside'),
text = text,
# text = ["" if i < 100 else "*" for i in y],
textfont = dict(color = "white", size = 140),
)
)
print(len(fig.data))
fig.data[0].visible = True
slider_steps = []
for i in range(len(steps.keys())):
step = dict(
method = 'update',
args = [
{'visible': [False] * len(fig.data)},
{"title": "Users with total predictions >= {}".format(i+2)}
]
)
step["args"][0]["visible"][i] = True
slider_steps.append(step)
sliders = [dict(steps = slider_steps)]
fig.update_layout(sliders = sliders)
_write_figure(fig, plot_name, path, **kwargs)
return
def CumulativeHistogram(x : np.array,
y : np.array,
plot_name : str,
path : pathlib.Path = None,
**kwargs,
) -> None:
"""Plot percent cumulative histogram."""
fig = _get_generic_figure(**kwargs)
fig.add_trace(
go.Histogram(
x = x,
y = y,
xbins = dict(size = 8),
cumulative_enabled = True,
histfunc = 'sum',
histnorm = 'percent',
showlegend = False,
marker_color = '#1d99a3',
opacity = 0.65,
)
)
_write_figure(fig, plot_name, path, **kwargs)
return
def NormalizedRadar(r : np.array,
theta : typing.List[str],
plot_name : str,
path : pathlib.Path = None,
**kwargs,
) -> None:
"""Radar chart for feature plotting"""
fig = _get_generic_figure(**kwargs)
fig.add_trace(
go.Scatterpolar(
r = r,
theta = theta,
fill = 'toself',
marker_color = "#cbef0e",
)
)
_write_figure(fig, plot_name, path, **kwargs)
return
def GrouppedRadar(groups : typing.Dict[str, typing.Tuple[typing.List[float], typing.List[str]]],
plot_name : str,
path : pathlib.Path = None,
**kwargs,
) -> None:
"""
Plot multiple groups within the same radar.
Input format of groups:
groups = {
'group1': [
[v1, v2, v3], # float values
[th1, th2, th3] # axis names
]
}
"""
# fig = _get_generic_figure(**kwargs)
fig = go.Figure(
layout = go.Layout(title = kwargs.get('title'))
)
for group, (vals, thetas) in groups.items():
fig.add_trace(
go.Scatterpolar(
r = vals + [vals[0]],
theta = thetas + [thetas[0]],
name = group,
)
)
# fig = px.line_polar(df, r='r', theta='theta', line_close=True)
fig.update_layout(
polar=dict(
radialaxis=dict(
visible=True,
# range=[0, 1]
)),
showlegend=True
)
_write_figure(fig, plot_name, path, width = 1800, height = 1800, **kwargs)
return
def CategoricalViolin(x : np.array,
y : typing.List[np.array],
plot_name : str,
path : pathlib.Path = None,
**kwargs,
) -> None:
"""Plot percent cumulative histogram."""
fig = _get_generic_figure(**kwargs)
for xel, yel in zip(x, y):
fig.add_trace(
go.Violin(
x = [xel]*len(yel),
y = yel,
name = xel,
# side = 'positive',
meanline_visible = True,
box_visible = True,
showlegend = False,
opacity = 0.65,
)
)
_write_figure(fig, plot_name, path, **kwargs)
return
def GrouppedViolins(data : typing.Dict[str, typing.Dict[str, typing.List[int]]],
plot_name : str,
path : pathlib.Path = None,
**kwargs,
) -> None:
"""
Plot groupped violins. Inputs must be:
data = {
'group_name': {
'feat_1': [v1, v2, v3...],
'feat_2': [v1, v2, v3...],
}
} etc.
"""
fig = _get_generic_figure(**kwargs)
# fig = go.Figure(layout = go.Layout(violinmode = 'group'))
for group in data.keys():
fig.add_trace(
go.Violin(
x = [x for y in [[x]*len(data[group][x]) for x in data[group].keys()] for x in y],
y = [x for y in data[group].values() for x in y],
legendgroup = group,
scalegroup = group,
name = group,
)
)
_write_figure(fig, plot_name, path, **kwargs)
return
def RelativeDistribution(x : np.array,
y : typing.List[np.array],
plot_name : str,
path : pathlib.Path = None,
**kwargs,
) -> None:
"""Plot smoothened relative distribution of data"""
# layout = _get_generic_layout(**kwargs)
fig = ff.create_distplot(
y,
x,
curve_type = 'normal',
histnorm = "probability",
show_rug = kwargs.get('show_rug', False),
bin_size = kwargs.get('bin_size', 1),
show_hist = True
)
# fig.update_layout(layout)
_write_figure(fig, plot_name, path, **kwargs)
return
| 19,047 | 30.746667 | 136 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/util/process.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import multiprocessing
def isolate(process: callable, **kwargs) -> None:
"""
Executes a callable in isolated process space by spawning a child process.
After executing, memory, cpu and gpu resources will be freed.
Handy in executing TF-graph functions that will not free memory after execution.
Args:
process: callable. Function to be executed.
kwargs: See multiprocessing.Process docs for kwargs.
"""
pr = multiprocessing.Process(target = process, **kwargs)
pr.start()
pr.join()
return | 1,112 | 36.1 | 82 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/util/crypto.py | # coding=utf-8
# Copyright 2022 Chris Cummins.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Hashing and cryptography utils.
"""
import hashlib
import pathlib
import typing
def _checksum(hash_fn, data):
return hash_fn(data).hexdigest()
def _checksum_str(hash_fn, string, encoding="utf-8"):
return _checksum(hash_fn, string.encode(encoding))
def _checksum_list(hash_fn, *elems):
string = "".join(sorted(str(x) for x in elems))
return _checksum_str(hash_fn, string)
def _checksum_file(hash_fn, path: typing.Union[str, pathlib.Path]):
with open(path, "rb") as infile:
ret = _checksum(hash_fn, infile.read())
return ret
def sha1(data):
"""
Return the sha1 of "data".
Arguments:
data (bytes): Data.
Returns:
str: Hex encoded.
"""
return _checksum(hashlib.sha1, data)
def sha1_str(string, encoding="utf-8"):
"""
Return the sha1 of string "data".
Arguments:
string: String.
Returns:
str: Hex encoded.
"""
return _checksum_str(hashlib.sha1, string, encoding=encoding)
def sha1_list(*elems):
"""
Return the sha1 of all elements of a list.
Arguments:
*elems: List of stringifiable data.
Returns:
str: Hex encoded.
"""
return _checksum_list(hashlib.sha1, *elems)
def sha1_file(path: typing.Union[str, pathlib.Path]):
"""
Return the sha1 of file at "path".
Arguments:
path (str): Path to file
Returns:
str: Hex encoded.
"""
return _checksum_file(hashlib.sha1, path)
def md5(data):
"""
Return the md5 of "data".
Arguments:
data (bytes): Data.
Returns:
str: Hex encoded.
"""
return _checksum(hashlib.md5, data)
def md5_str(string, encoding="utf-8"):
"""
Return the md5 of string "data".
Arguments:
string: String.
Returns:
str: Hex encoded.
"""
return _checksum_str(hashlib.md5, string, encoding=encoding)
def md5_list(*elems):
"""
Return the md5 of all elements of a list.
Arguments:
*elems: List of stringifiable data.
Returns:
str: Hex encoded.
"""
return _checksum_list(hashlib.md5, *elems)
def md5_file(path: typing.Union[str, pathlib.Path]):
"""
Return the md5 of file at "path".
Arguments:
path (str): Path to file
Returns:
str: Hex encoded.
"""
return _checksum_file(hashlib.md5, path)
def sha256(data):
"""
Return the sha256 of "data".
Arguments:
data (bytes): Data.
Returns:
str: Hex encoded.
"""
return _checksum(hashlib.sha256, data)
def sha256_str(string, encoding="utf-8"):
"""
Return the sha256 of string "data".
Arguments:
string: String.
Returns:
str: Hex encoded.
"""
return _checksum_str(hashlib.sha256, string, encoding=encoding)
def sha256_list(*elems):
"""
Return the sha256 of all elements of a list.
Arguments:
*elems: List of stringifiable data.
Returns:
str: Hex encoded.
"""
return _checksum_list(hashlib.sha256, *elems)
def sha256_file(path: typing.Union[str, pathlib.Path]):
"""
Return the sha256 of file at "path".
Arguments:
path (str): Path to file
Returns:
str: Hex encoded.
"""
return _checksum_file(hashlib.sha256, path)
| 3,703 | 17.994872 | 74 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/util/sqlutil.py | # coding=utf-8
# Copyright 2022 Chris Cummins.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility code for working with sqlalchemy."""
import contextlib
import os
import pathlib
import queue
import sqlite3
import humanize
import sys
import threading
import time
import typing
from typing import Callable
from typing import List
from typing import Optional
import sqlalchemy as sql
from absl import flags as absl_flags
from sqlalchemy import func
from sqlalchemy import orm
from sqlalchemy.dialects import mysql
from sqlalchemy.ext import declarative
from deeplearning.benchpress.util import pbutil
FLAGS = absl_flags.FLAGS
absl_flags.DEFINE_boolean(
"sqlutil_echo",
False,
"If True, the Engine will log all statements as well as a repr() of their "
"parameter lists to the engines logger, which defaults to sys.stdout.",
)
absl_flags.DEFINE_boolean(
"sqlutil_pool_pre_ping",
True,
"Enable pessimistic pre-ping to check that database connections are "
"alive. This adds some overhead, but reduces the risk of "
'"server has gone away" errors. See:'
"<https://docs.sqlalchemy.org/en/13/core/pooling.html#disconnect-handling-pessimistic>",
)
absl_flags.DEFINE_integer(
"mysql_engine_pool_size",
5,
"The number of connections to keep open inside the connection pool. A "
"--mysql_engine_pool_size of 0 indicates no limit",
)
absl_flags.DEFINE_integer(
"mysql_engine_max_overflow",
10,
"The number of connections to allow in connection pool “overflow”, that "
"is connections that can be opened above and beyond the "
"--mysql_engine_pool_size setting",
)
absl_flags.DEFINE_boolean(
"mysql_assume_utf8_charset",
True,
"Default to adding the '?charset=utf8' suffix to MySQL database URLs.",
)
absl_flags.DEFINE_boolean(
"sqlite_enable_foreign_keys",
True,
"Enable foreign key support for SQLite. This enforces foreign key "
"constraints, and enables cascaded update/delete statements. See: "
"https://docs.sqlalchemy.org/en/13/dialects/sqlite.html#foreign-key-support",
)
# The Query type is returned by Session.query(). This is a convenience for type
# annotations.
Query = orm.query.Query
class DatabaseNotFound(FileNotFoundError):
"""An error that is raised if the requested database cannot be found."""
def __init__(self, url: str):
self._url = url
@property
def url(self):
return self._url
def __repr__(self) -> str:
return f"Database not found: '{self.url}'"
def __str__(self) -> str:
return repr(self)
def Base(*args, **kwargs) -> sql.ext.declarative.DeclarativeMeta:
"""Construct a base class for declarative class definitions."""
return sql.ext.declarative.declarative_base(*args, **kwargs)
def GetOrAdd(
session: sql.orm.session.Session,
model,
defaults: typing.Dict[str, object] = None,
**kwargs,
):
"""Instantiate a mapped database object.
If the object is not in the database,
add it. Note that no change is written to disk until commit() is called on the
session.
Args:
session: The database session.
model: The database table class.
defaults: Default values for mapped objects.
kwargs: The values for the table row.
Returns:
An instance of the model class, with the values specified.
"""
instance = session.query(model).filter_by(**kwargs).first()
if not instance:
params = {
k: v
for k, v in kwargs.items()
if not isinstance(v, sql.sql.expression.ClauseElement)
}
params.update(defaults or {})
instance = model(**params)
session.add(instance)
return instance
def Get(
session: sql.orm.session.Session,
model,
defaults: typing.Dict[str, object] = None,
**kwargs,
):
"""Determine if a database object exists.
Args:
session: The database session.
model: The database table class.
defaults: Default values for mapped objects.
kwargs: The values for the table row.
Returns:
An instance of the model class with the values specified, or None if the
object is not in the database.
"""
del defaults
return session.query(model).filter_by(**kwargs).first()
def CreateEngine(url: str, must_exist: bool = False) -> sql.engine.Engine:
"""Create an sqlalchemy database engine.
This is a convenience wrapper for creating an sqlalchemy engine, that also
creates the database if required, and checks that the database exists. This
means that it is less flexible than SqlAlchemy's create_engine() - only three
combination of dialects and drivers are supported: sqlite, mysql, and
postgresql.
See https://docs.sqlalchemy.org/en/latest/core/engines.html for details.
Additionally, this implements a custom 'file://' handler, which reads a URL
from a local file, and returns a connection to the database addressed by the
URL. Use this if you would like to keep sensitive information such as a MySQL
database password out of your .bash_history.
Examples:
Create in-memory SQLite database:
>>> engine = CreateEngine('sqlite://')
Connect to an SQLite database at relative.db:
>>> engine = CreateEngine('sqlite:///relative.db')
Connect to an SQLite database at /absolute/path/to/db:
>>> engine = CreateEngine('sqlite:////absolute/path/to/db')
Connect to MySQL database:
>>> engine = CreateEngine(
'mysql://bob:password@localhost:1234/database?charset=utf8')
Connect to PostgreSQL database:
>>> engine.CreateEngine(
'postgresql://bob:password@localhost:1234/database')
Connect to a URL specified in the file /tmp/url.txt:
>>> engine.CreateEngine('file:///tmp/url.txt')
Connect to a URL specified in the file /tmp/url.txt, with the suffix
'/database?charset=utf8':
>>> engine.CreateEngine('file:///tmp/url.txt?/database?charset=utf8')
Args:
url: The URL of the database to connect to.
must_exist: If True, raise DatabaseNotFound if it doesn't exist. Else,
database is created if it doesn't exist.
Returns:
An SQLalchemy Engine instance.
Raises:
DatabaseNotFound: If the database does not exist and must_exist is set.
ValueError: If the datastore backend is not supported.
"""
engine_args = {}
# Read and expand a `file://` prefixed URL.
url = ResolveUrl(url)
if url.startswith("mysql://"):
# Support for MySQL dialect.
# We create a throwaway engine that we use to check if the requested
# database exists.
engine = sql.create_engine("/".join(url.split("/")[:-1]))
database = url.split("/")[-1].split("?")[0]
query = engine.execute(
sql.text(
"SELECT SCHEMA_NAME FROM "
"INFORMATION_SCHEMA.SCHEMATA WHERE "
"SCHEMA_NAME = :database",
),
database=database,
)
# Engine-specific options.
engine_args["pool_size"] = FLAGS.mysql_engine_pool_size
engine_args["max_overflow"] = FLAGS.mysql_engine_max_overflow
if not query.first():
if must_exist:
raise DatabaseNotFound(url)
else:
# We can't use sql.text() escaping here because it uses single quotes
# for escaping. MySQL only accepts backticks for quoting database
# names.
engine.execute(f"CREATE DATABASE `{database}`")
engine.dispose()
elif url.startswith("sqlite://"):
# Support for SQLite dialect.
# This project (phd) deliberately disallows relative paths due to Bazel
# sandboxing.
if url != "sqlite://" and not url.startswith("sqlite:////"):
raise ValueError("Relative path to SQLite database is not allowed")
if url == "sqlite://":
if must_exist:
raise ValueError(
"must_exist=True not valid for in-memory SQLite database",
)
else:
path = pathlib.Path(url[len("sqlite:///") :])
if must_exist:
if not path.is_file():
raise DatabaseNotFound(url)
else:
# Make the parent directory for SQLite database if creating a new
# database.
path.parent.mkdir(parents=True, exist_ok=True)
elif url.startswith("postgresql://"):
# Support for PostgreSQL dialect.
engine = sql.create_engine("/".join(url.split("/")[:-1] + ["postgres"]))
conn = engine.connect()
database = url.split("/")[-1]
query = conn.execute(
sql.text("SELECT 1 FROM pg_database WHERE datname = :database"),
database=database,
)
if not query.first():
if must_exist:
raise DatabaseNotFound(url)
else:
# PostgreSQL does not let you create databases within a transaction, so
# manually complete the transaction before creating the database.
conn.execute(sql.text("COMMIT"))
# PostgreSQL does not allow single quoting of database names.
conn.execute(f"CREATE DATABASE {database}")
conn.close()
engine.dispose()
else:
raise ValueError(f"Unsupported database URL='{url}'")
# Create the engine.
engine = sql.create_engine(
url,
encoding="utf-8",
echo=FLAGS.sqlutil_echo,
pool_pre_ping=FLAGS.sqlutil_pool_pre_ping,
**engine_args,
)
# Create and immediately close a connection. This is because SQLAlchemy engine
# is lazily instantiated, so for connections such as SQLite, this line
# actually creates the file.
engine.connect().close()
return engine
@sql.event.listens_for(sql.engine.Engine, "connect")
def EnableSqliteForeignKeysCallback(dbapi_connection, connection_record):
"""Enable foreign key constraints for SQLite databases.
See --sqlite_enable_foreign_keys for details.
"""
del connection_record
# This callback listens for *all* database connections, not just SQLite. Check
# the type before trying to run an SQLite-specific pragma.
if FLAGS.sqlite_enable_foreign_keys and isinstance(
dbapi_connection, sqlite3.Connection
):
cursor = dbapi_connection.cursor()
cursor.execute("PRAGMA foreign_keys=ON")
cursor.close()
def ResolveUrl(url: str, use_flags: bool = True):
"""Resolve the URL of a database.
The following modifications are supported:
* If the url begins with 'file://', the URL is substituted with the
contents of the file.
* If --mysql_assume_utf8_charset is set, then '?charset=utf8' suffix is
appended to URLs which begin with mysql://.
* Shell variables are expanded.
Args:
url: The URL to expand, e.g. `file://path/to/file.txt?arg'
use_flags: Determine whether behaviour is dictated by the FLAGS variables.
Set this to False only when resolving database URLs before flags parsing,
e.g. in enumerating test fixtures.
Returns:
The URL as interpreted by reading any URL file.
Raises:
ValueError: If the file path is invalid.
FileNotFoundError: IF the file path does not exist.
"""
# Substitute shell variables.
url = os.path.expandvars(url)
if url.startswith("file://"):
# Split the URL into the file path, and the optional suffix.
components = url.split("?")
path, suffix = components[0], "?".join(components[1:])
# Strip the file:// prefix from the path.
path = pathlib.Path(path[len("file://") :])
if not path.is_absolute():
raise ValueError("Relative path to file:// is not allowed")
if not path.is_file():
raise FileNotFoundError(f"File '{path}' not found")
# Read the contents of the file, ignoring lines starting with '#'.
with open(path) as f:
url = "\n".join(
x for x in f.read().split("\n") if not x.lstrip().startswith("#")
).strip()
# Append the suffix.
url += suffix
if (
use_flags and url.startswith("mysql://") and FLAGS.mysql_assume_utf8_charset
):
url += "?charset=utf8"
return url
def ColumnNames(model) -> typing.List[str]:
"""Return the names of all columns in a mapped object.
Args:
model: A mapped class.
Returns:
A list of string column names in the order that they are declared.
"""
try:
inst = sql.inspect(model)
return [c_attr.key for c_attr in inst.mapper.column_attrs]
except sql.exc.NoInspectionAvailable as e:
raise TypeError(str(e))
class Session(orm.session.Session):
"""A subclass of the default SQLAlchemy Session with added functionality.
An instance of this class is returned by Database.Session().
"""
def GetOrAdd(
self, model, defaults: typing.Dict[str, object] = None, **kwargs
):
"""Instantiate a mapped database object.
If the object is not in the database, add it. Note that no change is written
to disk until commit() is called on the session.
Args:
model: The database table class.
defaults: Default values for mapped objects.
kwargs: The values for the table row.
Returns:
An instance of the model class, with the values specified.
"""
return GetOrAdd(self, model, defaults, **kwargs)
class Database(object):
"""A base class for implementing databases."""
SessionType = Session
def __init__(self, url: str, declarative_base, must_exist: bool = False):
"""Instantiate a database object.
Example:
>>> db = Database('sqlite:////tmp/foo.db',
sqlalchemy.ext.declarative.declarative_base())
Args:
url: The URL of the database to connect to.
declarative_base: The SQLAlchemy declarative base instance.
must_exist: If True, raise DatabaseNotFound if it doesn't exist. Else,
database is created if it doesn't exist.
Raises:
DatabaseNotFound: If the database does not exist and must_exist is set.
ValueError: If the datastore backend is not supported.
"""
self._url = url
self.engine = CreateEngine(url, must_exist=must_exist)
declarative_base.metadata.create_all(self.engine)
declarative_base.metadata.bind = self.engine
# Bind the Engine to a session maker, which instantiates our own Session
# class, which is a subclass of the default SQLAlchemy Session with added
# functionality.
self.MakeSession = orm.sessionmaker(bind=self.engine, class_=Session)
def Close(self) -> None:
"""Close the connection to the database.
Use this to free up the connection to a database, while keeping the database
instance around. After calling this method, attempting to run operations on
this database will raise an error (like a sqlalchemy.exc.OperationalError).
Usage of this method is generally discouraged - connections are
automatically closed up when a database instance is garbage collected, so
there are rarely cases for leaving a database instance around with the
connection closed. Use at your peril!
"""
self.engine.dispose()
def Drop(self, are_you_sure_about_this_flag: bool = False):
"""Drop the database, irreverisbly destroying it.
Be careful with this! After calling this method an a Database instance, no
further operations can be made on it, and any Sessions should be discarded.
Args:
are_you_sure_about_this_flag: You should be sure.
Raises:
ValueError: In case you're not 100% sure.
"""
if not are_you_sure_about_this_flag:
raise ValueError("Let's take a minute to think things over")
if self.url.startswith("mysql://"):
engine = sql.create_engine("/".join(self.url.split("/")[:-1]))
database = self.url.split("/")[-1].split("?")[0]
engine.execute(f"DROP DATABASE IF EXISTS `{database}`")
elif self.url == "sqlite://":
# In-memory databases do not dropping.
pass
elif self.url.startswith("sqlite:///"):
path = pathlib.Path(self.url[len("sqlite:///") :])
assert path.is_file()
path.unlink()
else:
raise NotImplementedError(
f"Unsupported operation DROP for database: '{self.url}'",
)
@property
def url(self) -> str:
"""Return the URL of the database."""
return self._url
@contextlib.contextmanager
def Session(
self, commit: bool = False, session: Optional[Session] = None
) -> Session:
"""Provide a transactional scope around a session.
The optional session argument may be used for cases where you want to
optionally re-use an existing session, rather than always creating a new
session, e.g.:
class MyDatabase(sqlutil.Database):
def DoAThing(self, session=None):
with self.Session(session=session, commit=True):
# go nuts ...
Args:
commit: If true, commit session at the end of scope.
session: An existing session object to re-use.
Returns:
A database session.
"""
session = session or self.MakeSession()
try:
yield session
if commit:
session.commit()
except:
session.rollback()
raise
finally:
session.close()
@property
def Random(self):
"""Get the backend-specific random function.
This can be used to select a random row from a table, e.g.
session.query(Table).order_by(db.Random()).first()
"""
if self.url.startswith("mysql"):
return func.rand
else:
return func.random # for PostgreSQL, SQLite
def __repr__(self) -> str:
return self.url
class TablenameFromClassNameMixin(object):
"""A class mixin which derives __tablename__ from the class name.
Add this mixin to a mapped table class to automatically set the set the
__tablename__ property of a class to the lowercase name of the Python class.
"""
@declarative.declared_attr
def __tablename__(self):
return self.__name__.lower()
class ProtoBackedMixin(object):
"""A database table backed by protocol buffers.
This class provides the abstract interface for sqlalchemy table classes which
support serialization to and from protocol buffers.
This is only an interface - inheriting classes must still inherit from
sqlalchemy.ext.declarative.declarative_base().
"""
proto_t = None
def SetProto(self, proto: pbutil.ProtocolBuffer) -> None:
"""Set the fields of a protocol buffer with the values from the instance.
Args:
proto: A protocol buffer.
"""
raise NotImplementedError(
f"{type(self).__name__}.SetProto() not implemented",
)
def ToProto(self) -> pbutil.ProtocolBuffer:
"""Serialize the instance to protocol buffer.
Returns:
A protocol buffer.
"""
proto = self.proto_t()
self.SetProto(proto)
return proto
@classmethod
def FromProto(
cls, proto: pbutil.ProtocolBuffer,
) -> typing.Dict[str, typing.Any]:
"""Return a dictionary of instance constructor args from proto.
Examples:
Construct a table instance from proto:
>>> table = Table(**Table.FromProto(proto))
Construct a table instance and add to session:
>>> session.GetOrAdd(Table, **Table.FromProto(proto))
Args:
proto: A protocol buffer.
Returns:
A dictionary of constructor arguments.
"""
raise NotImplementedError(
f"{type(self).__name__}.FromProto() not implemented",
)
@classmethod
def FromFile(cls, path: pathlib.Path) -> typing.Dict[str, typing.Any]:
"""Return a dictionary of instance constructor args from proto file.
Examples:
Construct a table instance from proto file:
>>> table = Table(**Table.FromFile(path))
Construct a table instance and add to session:
>>> session.GetOrAdd(Table, **Table.FromFile(path))
Args:
path: Path to a proto file.
Returns:
An instance.
"""
proto = pbutil.FromFile(path, cls.proto_t())
return cls.FromProto(proto)
class OffsetLimitQueryResultsBatch(typing.NamedTuple):
"""The results of an offset-limit batched query."""
# The current batch number.
batch_num: int
# Offset into the results set.
offset: int
# Limit is the last row in the results set.
limit: int
# The total number of rows in the query if compute_max_rows=True, else None.
max_rows: int
# The results of the query.
rows: typing.List[typing.Any]
def OffsetLimitBatchedQuery(
query: Query,
batch_size: int = 1000,
start_at: int = 0,
compute_max_rows: bool = False,
) -> typing.Iterator[OffsetLimitQueryResultsBatch]:
"""Split and return the rows resulting from a query in to batches.
This iteratively runs the query `SELECT * FROM * OFFSET i LIMIT batch_size;`
with `i` initialized to `start_at` and increasing by `batch_size` per
iteration. Iteration terminates when the query returns no rows.
This function is useful for returning row sets from enormous tables, where
loading the full query results in to memory would take prohibitive time or
resources.
Args:
query: The query to run.
batch_size: The number of rows to return per batch.
start_at: The initial offset into the table.
compute_max_rows: If true
Returns:
A generator of OffsetLimitQueryResultsBatch tuples, where each tuple
contains between 1 <= x <= `batch_size` rows.
"""
max_rows = None
if compute_max_rows:
max_rows = query.count()
batch_num = 0
i = start_at
while True:
batch_num += 1
batch = query.offset(i).limit(batch_size).all()
if batch:
yield OffsetLimitQueryResultsBatch(
batch_num=batch_num,
offset=i,
limit=i + batch_size,
max_rows=max_rows,
rows=batch,
)
i += len(batch)
else:
break
class ColumnTypes(object):
"""Abstract class containing methods for generating column types."""
def __init__(self):
raise TypeError("abstract class")
@staticmethod
def BinaryArray(length: int):
"""Return a fixed size binary array column type.
Args:
length: The length of the column.
Returns:
A column type.
"""
return sql.Binary(length).with_variant(mysql.BINARY(length), "mysql")
@staticmethod
def LargeBinary():
"""Return a fixed size binary array column type.
Returns:
A column type.
"""
return sql.LargeBinary().with_variant(sql.LargeBinary(2 ** 31), "mysql")
@staticmethod
def UnboundedUnicodeText():
"""Return an unbounded unicode text column type.
This isn't truly unbounded, but 2^32 chars should be enough!
Returns:
A column type.
"""
return sql.UnicodeText().with_variant(sql.UnicodeText(2 ** 31), "mysql")
@staticmethod
def IndexableString(length: int = None):
"""Return a string that is short enough that it can be used as an index.
Returns:
A column type.
"""
# MySQL InnoDB tables use a default index key prefix length limit of 767.
# https://dev.mysql.com/doc/refman/5.6/en/innodb-restrictions.html
MAX_LENGTH = 767
if length and length > MAX_LENGTH:
raise ValueError(
f"IndexableString requested length {length} is greater "
f"than maximum allowed {MAX_LENGTH}",
)
return sql.String(MAX_LENGTH)
@staticmethod
def MillisecondDatetime():
"""Return a datetime type with millisecond precision.
Returns:
A column type.
"""
return sql.DateTime().with_variant(mysql.DATETIME(fsp=3), "mysql")
class ColumnFactory(object):
"""Abstract class containing methods for generating columns."""
@staticmethod
def MillisecondDatetime(
nullable: bool = False, default = None,
):
"""Return a datetime column with millisecond precision.
Returns:
A column which defaults to UTC now.
"""
return sql.Column(
sql.DateTime().with_variant(mysql.DATETIME(fsp=3), "mysql",),
nullable=nullable,
default=default,
)
def ResilientAddManyAndCommit(db: Database, mapped: typing.Iterable[Base]):
"""Attempt to commit all mapped objects and return those that fail.
This method creates a session and commits the given mapped objects.
In case of error, this method will recurse up to O(log(n)) times, committing
as many objects that can be as possible.
Args:
db: The database to add the objects to.
mapped: A sequence of objects to commit.
Returns:
Any items in `mapped` which could not be committed, if any. Relative order
of items is preserved.
"""
failures = []
if not mapped:
return failures
mapped = list(mapped)
try:
with db.Session(commit=True) as session:
session.add_all(mapped)
except sql.exc.SQLAlchemyError as e:
# Divide and conquer. If we're committing only a single object, then a
# failure to commit it means that we can do nothing other than return it.
# Else, divide the mapped objects in half and attempt to commit as many of
# them as possible.
if len(mapped) == 1:
return mapped
else:
mid = int(len(mapped) / 2)
left = mapped[:mid]
right = mapped[mid:]
failures += ResilientAddManyAndCommit(db, left)
failures += ResilientAddManyAndCommit(db, right)
return failures
def QueryToString(query) -> str:
"""Compile the query to inline literals in place of '?' placeholders.
See: https://stackoverflow.com/a/23835766
"""
return str(query.statement.compile(compile_kwargs={"literal_binds": True}))
class BufferedDatabaseWriter(threading.Thread):
"""A buffered writer for adding objects to a database.
Use this class for cases when you are producing lots of mapped objects that
you would like to commit to a database, but don't require them to be committed
immediately. By buffering objects and committing them in batches, this class
minimises the number of SQL statements that are executed, and is faster than
creating and committing a session for every object.
This object spawns a separate thread for asynchronously performing database
writes. Use AddOne() and AddMany() methods to add objects to the write buffer.
Note that because this is a multithreaded implementation, in-memory SQLite
databases are not supported.
The user is responsible for calling Close() to flush the contents of the
buffer and terminate the thread. Alternatively, use this class as a context
manager to automatically flush the buffer and terminate the thread:
with BufferedDatabaseWriter(db, max_buffer_length=128) as writer:
for chunk in chunks_to_process:
objs = ProcessChunk(chunk)
writer.AddMany(objs)
"""
def __init__(
self,
db: Database,
max_buffer_size: Optional[int] = None,
max_buffer_length: Optional[int] = None,
max_seconds_since_flush: Optional[float] = None,
log_level: int = 2,
ctx = None,
):
"""Constructor.
Args:
db: The database to write to.
max_buffer_size: The maximum size of the buffer before flushing, in bytes.
The buffer size is the sum of the elements in the write buffer. The size
of elements is determined using sys.getsizeof(), and has all the caveats
of this method.
max_buffer_length: The maximum number of items in the write buffer before
flushing.
max_seconds_since_flush: The maximum number of elapsed seconds between
flushes.
ctx: progress.ProgressContext = progress.NullContext,
log_level: The logging level for logging output.
"""
super(BufferedDatabaseWriter, self).__init__()
self.db = db
self.ctx = ctx
self.log_level = log_level
self.max_seconds_since_flush = max_seconds_since_flush
self.max_buffer_size = max_buffer_size
self.max_buffer_length = max_buffer_length
# Counters.
self.flush_count = 0
self.error_count = 0
self._buffer = []
self.buffer_size = 0
self._last_flush = time.time()
# Limit the size of the queue so that calls to AddOne() or AddMany() will
# block if the calling code is too far ahead of the writer.
queue_size = self.max_buffer_length * 2 if self.max_buffer_length else 1000
self._queue = queue.Queue(maxsize=queue_size)
self.start()
def __enter__(self) -> "Buff":
"""Enter a scoped writer context closes at the end."""
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Exit a scoped writer context closes at the end."""
del exc_type
del exc_val
del exc_tb
self.Close()
def AddOne(self, mapped, size: Optional[int] = None) -> None:
"""Add a mapped object.
Args:
mapped: The mapped object to write to the database.
size: The object sizes to use to update the total buffer size. If not
provided, sys.getsizeof() is used to determine the size.
"""
size = size or sys.getsizeof(mapped)
self._queue.put((mapped, size))
def AddMany(self, mappeds, sizes: Optional[List[int]] = None) -> None:
"""Add many mapped objects.
Args:
mappeds: The mapped objects to write to the database.
sizes: A list of mapped object sizes to use to calculate the buffer size.
If not provided, sys.getsizeof() is used to determine the size.
"""
sizes = sizes or [sys.getsizeof(item) for item in mappeds]
for mapped, size in zip(mappeds, sizes):
self._queue.put((mapped, size))
def AddLambdaOp(self, callback: Callable[[Database.SessionType], None]):
self._queue.put(BufferedDatabaseWriter.LambdaOp(callback))
def Flush(self) -> None:
"""Flush the buffer.
This method blocks until the flush has completed.
In normal use, you can rely on the automated flushing mechanisms to flush
the write buffer, rather than calling this by hand.
"""
self._queue.put(BufferedDatabaseWriter.FlushMarker())
self._queue.join()
def Close(self):
"""Close the writer thread.
This method blocks until the buffer has been flushed and the thread
terminates.
"""
if not self.is_alive():
raise TypeError("Close() called on dead BufferedDatabaseWriter")
self._queue.put(BufferedDatabaseWriter.CloseMarker())
self._queue.join()
self.join()
@property
def buffer_length(self) -> int:
"""Get the current length of the buffer, in range [0, max_buffer_length]."""
return len(self._buffer)
@property
def seconds_since_last_flush(self) -> float:
"""Get the number of seconds since the buffer was last flushed."""
return time.time() - self._last_flush
##############################################################################
# Private methods.
##############################################################################
class CloseMarker(object):
"""An object to append to _queue to close the thread."""
pass
class FlushMarker(object):
"""An object to append to _queue to flush the buffer."""
pass
class LambdaOp(object):
def __init__(self, callback):
self.callback = callback
def __call__(self, session: Database.SessionType):
self.callback(session)
def run(self):
"""The thread loop."""
while True:
# Block until there is something on the queue. Use max_seconds_since_flush
# as a timeout to ensure that flushes still occur when the writer is not
# being used.
try:
item = self._queue.get(timeout=self.max_seconds_since_flush)
except queue.Empty:
self._Flush()
continue
if isinstance(item, BufferedDatabaseWriter.CloseMarker):
# End of queue. Break out of the loop.
break
elif isinstance(item, BufferedDatabaseWriter.FlushMarker):
# Force a flush.
self._Flush()
elif isinstance(item, BufferedDatabaseWriter.LambdaOp):
# Handle delete op.
self._buffer.append(item)
self._MaybeFlush()
else:
# Add the object to the buffer.
mapped, size = item
self._buffer.append(mapped)
self.buffer_size += size
self._MaybeFlush()
# Register that the item has been processed. This is used by join() to
# signal to stop blocking.
self._queue.task_done()
# Register that the end-of-queue marker has been processed.
self._Flush()
self._queue.task_done()
def _MaybeFlush(self) -> None:
if (
(self.max_buffer_size and self.buffer_size >= self.max_buffer_size)
or (
self.max_buffer_length and self.buffer_length >= self.max_buffer_length
)
or (
self.max_seconds_since_flush
and self.seconds_since_last_flush >= self.max_seconds_since_flush
)
):
self._Flush()
def _AddMapped(self, mapped) -> None:
"""Add and commit a list of mapped objects."""
if not mapped:
return
failures = ResilientAddManyAndCommit(self.db, mapped)
if failures:
self.ctx.Error("Logger failed to commit %d objects", len(failures))
self.error_count += len(failures)
def _Flush(self):
"""Flush the buffer."""
if not self._buffer:
return
with self.ctx.Profile(
self.log_level,
f"Committed {self.buffer_length} rows "
f"({humanize.BinaryPrefix(self.buffer_size, 'B')}) to {self.db.url}",
), self.db.Session() as session:
# Iterate through the buffer and handle any lambda ops.
start_i, end_i = 0, 0
for end_i, item in enumerate(self._buffer):
if isinstance(item, BufferedDatabaseWriter.LambdaOp):
# If we have a lambda op, we flush the contents of the current buffer,
# then execute the op and continue.
self._AddMapped(self._buffer[start_i:end_i])
self._buffer[end_i](session)
session.commit()
start_i = end_i + 1
# Add any remaining mapped objects from the buffer.
self._AddMapped(self._buffer[start_i:])
self._buffer = []
self._last_flush = time.time()
self.buffer_size = 0
self.flush_count += 1
| 33,932 | 30.017367 | 90 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/util/gpu.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper module for GPU system handling"""
import os
import subprocess
import typing
from deeplearning.benchpress.util import logging as l
NVIDIA_SMI_GET_GPUS = "nvidia-smi --query-gpu=index,uuid,utilization.gpu,memory.total,memory.used,memory.free,driver_version,name,gpu_serial,display_active,display_mode,temperature.gpu --format=csv,noheader,nounits"
def _to_float_or_inf(value: str):
"""Util conversion string to float"""
try:
number = float(value)
except ValueError:
number = float("nan")
return number
def getGPUs(smi_output):
"""
Get all available GPU entries with information.
"""
gpus = []
for line in smi_output:
if line.strip():
values = line.split(", ")
gpus.append({
'id' : values[0],
'uuid' : values[1],
'gpu_util' : _to_float_or_inf(values[2]),
'mem_total' : _to_float_or_inf(values[3]),
'mem_used' : _to_float_or_inf(values[4]),
'mem_free' : _to_float_or_inf(values[5]),
'driver' : values[6],
'gpu_name' : values[7],
'serial' : values[8],
'display_active': values[9],
'display_mode' : values[10],
'temp_gpu' : _to_float_or_inf(values[11]),
})
return gpus
def getGPUID():
"""
Get GPU entries and select the one with the most memory available.
"""
try:
output = subprocess.check_output(NVIDIA_SMI_GET_GPUS.split())
except FileNotFoundError:
return None
gpus = getGPUs(output.decode("utf-8").split(os.linesep))
if len(gpus) > 0:
selected_gpus = sorted(gpus, key=lambda x: x['mem_used'])
return selected_gpus
else:
return None
def memUsageByPID(pids: typing.Iterable[int]) -> int:
"""
Get a python iterable (list, set, dict, tuple) of PIDs.
Returns the total GPU memory allocation in MB.
"""
try:
output = subprocess.check_output("nvidia-smi pmon -c 1 -s m".split())
except FileNotFoundError:
return 0
pid_list = [i.split() for i in output.decode('utf-8').split(os.linesep)[2:]]
return sum([int(x[3]) for x in pid_list if x and x[1] != '-' and x[3] != '-' and int(x[1]) in pids])
| 2,747 | 32.108434 | 215 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/scratchpad/feature_transformer.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Experimental transformer for feature space mapping.
"""
import math
import typing
import copy
import time
import pathlib
import typing
import tqdm
import multiprocessing
import pickle
from absl import app
from deeplearning.benchpress.util import pytorch
from deeplearning.benchpress.corpuses import encoded
from deeplearning.benchpress.corpuses import tokenizers
from deeplearning.benchpress.util import distributions
from deeplearning.benchpress.util import logging as l
from deeplearning.benchpress.models.torch_bert import optimizer
from deeplearning.benchpress.models.torch_bert import hooks
from deeplearning.benchpress.experiments import workers
torch = pytorch.torch
ENCODED_DB_PATH = "/home/foivos/unique_encoded.db"
TOKENIZER_PATH = "/home/foivos/backup_tokenizer.pkl"
class TransformerModel(torch.nn.Module):
def __init__(self, ntoken: int, d_model: int, nhead: int, d_hid: int,
nlayers: int, pad_idx, dropout: float = 0.5):
super().__init__()
self.model_type = 'Transformer'
self.embed = torch.nn.Embedding(ntoken, d_model, padding_idx = pad_idx)
self.pos_encoder = PositionalEncoding(d_model, dropout)
self.target_embed = torch.nn.Embedding(ntoken, d_model)
self.target_pos_encoder = PositionalEncoding(d_model, dropout)
self.d_model = d_model
encoder_layers = torch.nn.TransformerEncoderLayer(d_model, nhead, d_hid, dropout, batch_first = True)
encoder_norm = torch.nn.LayerNorm(d_model, eps=1e-5)
self.transformer_encoder = torch.nn.TransformerEncoder(encoder_layers, nlayers, encoder_norm)
decoder_layer = torch.nn.TransformerDecoderLayer(d_model, nhead, d_hid, dropout, batch_first = True)
decoder_norm = torch.nn.LayerNorm(d_model, eps=1e-5)
self.transformer_decoder = torch.nn.TransformerDecoder(decoder_layer, nlayers, decoder_norm)
self.linear = torch.nn.Linear(d_model, ntoken)
self.init_weights()
def init_weights(self) -> None:
initrange = 0.1
self.embed.weight.data.uniform_(-initrange, initrange)
# self.decoder.bias.data.zero_()
# self.decoder.weight.data.uniform_(-initrange, initrange)
def forward(self, src: torch.Tensor, target: torch.Tensor, src_mask: torch.Tensor = None, src_key_padding_mask = None) -> torch.Tensor:
"""
Args:
src: Tensor, shape [seq_len, batch_size]
src_mask: Tensor, shape [seq_len, seq_len]
Returns:
output Tensor of shape [seq_len, batch_size, ntoken]
"""
src1 = self.embed(src) * math.sqrt(self.d_model)
src2 = self.pos_encoder(src1)
output1 = self.transformer_encoder(src2, mask = src_mask, src_key_padding_mask = src_key_padding_mask)
tgt1 = self.embed(target) * math.sqrt(self.d_model)
tgt2 = self.pos_encoder(tgt1)
output2 = self.transformer_decoder(tgt2, output1)
output3 = self.linear(output2)
# print(src.shape)
# print(src1.shape)
# print(src2.shape)
# print(output1.shape)
# print(output2.shape)
# print(output3.shape)
# input()
return output3
class PositionalEncoding(torch.nn.Module):
def __init__(self, d_model: int, dropout: float = 0.1, max_len: int = 5000):
super().__init__()
self.dropout = torch.nn.Dropout(p=dropout)
position = torch.arange(max_len).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2) * (-math.log(10000.0) / d_model))
pe = torch.zeros(max_len, 1, d_model)
pe[:, 0, 0::2] = torch.sin(position * div_term)
pe[:, 0, 1::2] = torch.cos(position * div_term)
self.register_buffer('pe', pe)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Args:
x: Tensor, shape [seq_len, batch_size, embedding_dim]
"""
x = x + self.pe[:x.size(0)]
return self.dropout(x)
class FeatureDataset(torch.utils.data.Dataset):
def __init__(self, corpus: typing.List[typing.Dict[str, typing.Dict[str, float]]]) -> None:
self.corpus = corpus
self.feat_tokenizer = tokenizers.FeatureTokenizer.FromFeatures(768, 65536, 2048)
self.dataset = self.compute_dataset()
return
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx: int):
if idx < 0:
if -idx > len(self):
raise ValueError
idx = len(self) + idx
return self.dataset[idx]
def compute_dataset(self):
seq_len = 256
f_len = {
"GreweFeatures": 6,
"AutophaseFeatures": 56,
"InstCountFeatures": 70,
}
pad_len = seq_len - sum(list(f_len.values()))
dataset = []
for dp in self.corpus:
for fspace in {"GreweFeatures", "AutophaseFeatures", "InstCountFeatures"}:
inp = []
for n, x in dp[fspace].items():
if n not in {"F2:coalesced/mem", "F4:comp/mem"}:
try:
x = int(x)
except Exception:
continue
inp.append(self.feat_tokenizer.TokenizeFeature(int(x)))
assert len(inp) == f_len[fspace], len(inp)
target_feats = dp["AutophaseFeatures"]
target = []
for x in target_feats.values():
try:
x = int(x)
except Exception:
continue
target.append(self.feat_tokenizer.TokenizeFeature(int(x)))
assert len(target) == f_len["AutophaseFeatures"], len(target)
if fspace == "GreweFeatures":
d = {
'inputs' : torch.LongTensor(inp + [self.feat_tokenizer.padToken] * (f_len["AutophaseFeatures"] + f_len["InstCountFeatures"] + pad_len)),
'target' : torch.LongTensor(target)
}
elif fspace == "AutophaseFeatures":
d = {
'inputs' : torch.LongTensor([self.feat_tokenizer.padToken] * f_len["GreweFeatures"] + inp + [self.feat_tokenizer.padToken] * (f_len["InstCountFeatures"] + pad_len)),
'target' : torch.LongTensor(target)
}
else:
d = {
'inputs' : torch.LongTensor([self.feat_tokenizer.padToken] * (f_len["GreweFeatures"] + f_len["AutophaseFeatures"]) + inp + [self.feat_tokenizer.padToken] * pad_len),
'target' : torch.LongTensor(target)
}
d['padding_mask'] = d['inputs'] == self.feat_tokenizer.padToken
dataset.append(d)
return dataset
def generate_square_subsequent_mask(sz: int) -> torch.Tensor:
"""Generates an upper-triangular matrix of -inf, with zeros on diag."""
return torch.triu(torch.ones(sz, sz) * float('-inf'), diagonal=1)
def data_process(raw_text_iter: torch.utils.data.dataset.IterableDataset) -> torch.Tensor:
"""Converts raw text into a flat Tensor."""
data = [torch.tensor(vocab(tokenizer(item)), dtype=torch.long) for item in raw_text_iter]
return torch.cat(tuple(filter(lambda t: t.numel() > 0, data)))
def batchify(data: torch.Tensor, bsz: int) -> torch.Tensor:
"""Divides the data into bsz separate sequences, removing extra elements
that wouldn't cleanly fit.
Args:
data: Tensor, shape [N]
bsz: int, batch size
Returns:
Tensor of shape [N // bsz, bsz]
"""
seq_len = data.size(0) // bsz
data = data[:seq_len * bsz]
data = data.view(bsz, seq_len).t().contiguous()
return data.to(device)
def get_data_features(db, tokenizer, size_limit = None) -> typing.List[typing.Dict[str, typing.Dict[str, float]]]:
"""
Get or set feature with data list of tuples.
"""
datapoints = []
db_feats = db.get_data_features(tokenizer, size_limit)
for inp in tqdm.tqdm(db_feats, total = len(db_feats), desc = "Fetch data"):
feats = workers.ContentFeat(inp)
if len(inp) == 2:
src, _ = inp
include = ""
else:
src, include, _ = inp
try:
datapoints.append({
"GreweFeatures" : feats["GreweFeatures"],
"AutophaseFeatures" : feats["AutophaseFeatures"],
"InstCountFeatures" : feats["InstCountFeatures"],
})
except KeyError as e:
l.logger().warn(e)
return datapoints
def Train(feat_vecs):
size = len(feat_vecs)
train_data, val_data = feat_vecs[:(9 * size) // 10], feat_vecs[(9 * size) // 10:]
device = 'cuda'
num_epochs = 30
batch_size = 32
num_warmup_steps = 5000
learning_rate = 45 / 1e6
train_dataset = FeatureDataset(train_data)
val_dataset = FeatureDataset(val_data)
vocab_size = len(train_dataset.feat_tokenizer)
emsize = 64 # embedding dimension
d_hid = 128 # dimension of the feedforward network model in nn.TransformerEncoder
nlayers = 2 # number of nn.TransformerEncoderLayer in nn.TransformerEncoder
nhead = 2 # number of heads in nn.MultiheadAttention
dropout = 0.1 # dropout probability
model = TransformerModel(
vocab_size,
emsize,
nhead,
d_hid,
nlayers,
train_dataset.feat_tokenizer.padToken,
dropout
).to(device)
## Define dataloaders.
train_loader = torch.utils.data.dataloader.DataLoader(
dataset = train_dataset,
batch_size = batch_size,
sampler = torch.utils.data.RandomSampler(train_dataset, replacement = False),
num_workers = 0,
drop_last = False,
)
val_loader = torch.utils.data.dataloader.DataLoader(
dataset = val_dataset,
batch_size = batch_size,
sampler = torch.utils.data.RandomSampler(val_dataset, replacement = False),
num_workers = 0,
drop_last = False,
)
## Also create scheduler and optmizer.
opt, scheduler = optimizer.create_optimizer_and_scheduler(
model = model,
num_train_steps = (num_epochs * len(train_dataset)) // batch_size,
warmup_steps = num_warmup_steps,
learning_rate = learning_rate,
)
loss_fn = torch.nn.CrossEntropyLoss()
model.zero_grad()
hook_path = pathlib.Path("./feat_reconstruction").resolve()
hook_path.mkdir(exist_ok = True, parents = True)
train_hook = hooks.tensorMonitorHook(hook_path, 0, 50)
val_hook = hooks.tensorMonitorHook(pathlib.Path("./feat_reconstruction").resolve(), 0, 10)
for ep in tqdm.tqdm(range(num_epochs), desc = "Epoch", leave = False):
model.train()
for batch in tqdm.tqdm(train_loader, total = len(train_loader), desc = "Batch", leave = False):
inp, att, target = batch['inputs'], batch['padding_mask'], batch['target']
output = model(inp.to(device), target.to(device), src_key_padding_mask = att.to(device))
loss = loss_fn(output.view(-1, len(train_dataset.feat_tokenizer)), target.to(device).view(-1))
opt.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
opt.step()
scheduler.step()
train_hook.step(total_loss = loss.item())
l.logger().info("Epoch {} loss {}".format(ep, train_hook.epoch_loss))
train_hook.end_epoch()
model.eval()
for batch in tqdm.tqdm(train_loader, total = len(train_loader), desc = "Val Train Batch", leave = False):
inp, att, target = batch['inputs'], batch['padding_mask'], batch['target']
output = model(inp.to(device), target.to(device), src_key_padding_mask = att.to(device))
loss = loss_fn(output.view(-1, len(train_dataset.feat_tokenizer)), target.to(device).view(-1))
euclids = []
accuracy = []
for bi in range(output.size(0)):
raw_out = torch.argmax(output[bi], dim = 1).cpu()
targ = target[bi].cpu()
assert len(raw_out) == len(targ), "{}/{}".format(len(raw_out), len(targ))
dist = 0.0
for vi in range(len(targ)):
dist += (targ[vi] - raw_out[vi])**2
euclids.append(math.sqrt(dist))
accuracy.append(len(torch.where(targ == raw_out)[0]) / len(targ))
mean_dist = sum(euclids) / len(euclids)
mean_accuracy = sum(accuracy) / len(accuracy)
val_hook.step(val_train_loss = loss.item(), val_train_dist = mean_dist, val_train_accuracy = mean_accuracy)
for batch in tqdm.tqdm(val_loader, total = len(val_loader), desc = "Val Batch", leave = False):
inp, att, target = batch['inputs'], batch['padding_mask'], batch['target']
output = model(inp.to(device), target.to(device) ) #, src_key_padding_mask = att.to(device))
loss = loss_fn(output.view(-1, len(train_dataset.feat_tokenizer)), target.to(device).view(-1))
euclids = []
accuracy = []
for bi in range(output.size(0)):
raw_out = torch.argmax(output[bi], dim = 1).cpu()
targ = target[bi].cpu()
assert len(raw_out) == len(targ), "{}/{}".format(len(raw_out), len(targ))
dist = 0.0
for vi in range(len(targ)):
dist += (targ[vi] - raw_out[vi])**2
euclids.append(math.sqrt(dist))
accuracy.append(len(torch.where(targ == raw_out)[0]) / len(targ))
mean_dist = sum(euclids) / len(euclids)
mean_accuracy = sum(accuracy) / len(accuracy)
val_hook.step(val_loss = loss.item(), val_dist = mean_dist, val_accuracy = mean_accuracy)
return
def Validate(model, tokenizer, train_loader, val_loader):
return
def main(*args):
db = encoded.EncodedContentFiles(url = "sqlite:///{}".format(ENCODED_DB_PATH), must_exist = True)
tokenizer = tokenizers.TokenizerBase.FromFile(pathlib.Path(TOKENIZER_PATH).resolve())
feat_vecs = get_data_features(db, tokenizer)
Train(feat_vecs)
return
if __name__ == "__main__":
app.run(main)
| 13,876 | 35.518421 | 185 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/scratchpad/parse_structs.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Scratchpad experimental analysis for anyhing related to CLDrive.
"""
import pathlib
import typing
import clang.cindex
from deeplearning.benchpress.util import environment
from deeplearning.benchpress.preprocessors import opencl
from deeplearning.benchpress.preprocessors import structs
from deeplearning.benchpress.util import plotter as plt
from deeplearning.benchpress.util import logging as l
clang.cindex.Config.set_library_path(environment.LLVM_LIB)
if environment.LLVM_VERSION != 6:
# LLVM 9 needs libclang explicitly defined.
clang.cindex.Config.set_library_file(environment.LLVM_LIB + "/libclang.so.{}".format(environment.LLVM_VERSION))
src1 ="""
struct my_struct{
int a, b, c;
};
"""
src2 ="""
typedef struct my_struct {
int x, y, z;
} structy;
"""
l.initLogger(name = "experiments")
try:
unit = clang.cindex.TranslationUnit.from_source(f.name, args = builtin_cflags + cflags + extra_args)
except clang.cindex.TranslationUnitLoadError as e:
raise ValueError(e)
| 1,588 | 30.156863 | 113 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/scratchpad/pipe_clang.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
import subprocess
import time
import typing
import os
from deeplearning.benchpress.preprocessors import opencl, clang
from deeplearning.benchpress.corpuses import encoded
from deeplearning.benchpress.corpuses import tokenizers
from deeplearning.benchpress.features import autophase
from deeplearning.benchpress.util import logging as l
from deeplearning.benchpress.util import distributions
from absl import app
def main(*args):
db = encoded.EncodedContentFiles("sqlite:///{}".format(pathlib.Path("./unique_encoded.db").resolve()), must_exist = True)
tokenizer = tokenizers.TokenizerBase.FromFile(pathlib.Path("./backup_tokenizer.pkl").resolve())
data = [tokenizer.ArrayToCode(x) for x in db.get_data()][:30]
args = builtin_cflags = opencl.GetClangArgs(use_shim = False, use_aux_headers = False) + ["-S", "-emit-llvm", "-o", "-"]
stdin_times, opencl_times, bytecode_times = [], [], []
opt_stdin, opt_file = [], []
for idx, src in enumerate(data):
print(idx)
# for x in range(50):
# t1 = time.time()
# opencl.CompileStdin(src)
# t2 = time.time()
# stdin_times.append(int(1000 * (t2-t1)))
# for x in range(50):
# t1 = time.time()
# opencl.Compile(src)
# t2 = time.time()
# opencl_times.append(int(1000 * (t2 - t1)))
# for x in range(50):
# t1 = time.time()
# opencl.CompileLlvmBytecode(src)
# t2 = time.time()
# bytecode_times.append(int(1000 * (t2 - t1)))
for x in range(100):
t1 = time.time()
opencl.CompileOptimizer(src, autophase.AUTOPHASE)
t2 = time.time()
opt_file.append(int(1000 * (t2 - t1)))
for x in range(100):
t1 = time.time()
opencl.CompileOptimizerStdin(src, autophase.AUTOPHASE)
t2 = time.time()
opt_stdin.append(int(1000 * (t2 - t1)))
# stdin_distr = distributions.GenericDistribution(stdin_times, "process_benchmarks", "stdin")
# opencl_distr = distributions.GenericDistribution(opencl_times, "process_benchmarks", "opencl")
# bytecode_distr = distributions.GenericDistribution(bytecode_times, "process_benchmarks", "bytecode")
opt_stdin_distr = distributions.GenericDistribution(opt_stdin, "process_benchmarks", "opt_stdin")
opt_file_distr = distributions.GenericDistribution(opt_file, "process_benchmarks", "opt_file")
# stdin_distr.plot()
# opencl_distr.plot()
# bytecode_distr.plot()
opt_stdin_distr.plot()
opt_file_distr.plot()
# cum = stdin_distr - opencl_distr
# cum2 = stdin_distr - bytecode_distr
cum3 = opt_stdin_distr - opt_file_distr
# cum.plot()
# cum2.plot()
cum3.plot()
# print(cum < 0)
# print(cum2 < 0)
print(cum3 < 0)
print(opt_stdin_distr.average, opt_stdin_distr.median)
print(opt_file_distr.average, opt_file_distr.median)
print(cum3.average, cum3.median)
print()
if __name__ == "__main__":
app.run(main)
| 3,489 | 30.441441 | 123 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/scratchpad/test_torch_sampler.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file defines the streaming generators for model training data.
We train models on overlapping one-hot encoded text sequences. For a corpus of
a reasonable size, the full training data may not fit in memory. This modules
provides Python Generator classes for use by a sequential Keras model's
fit_generator() method to stream batches of training data.
"""
import torch
WORLD_SIZE = 16
dataset_tensor = [0, 1, 2, 3]
cumulative_sizes = [100000, 200000, 300000, 400000]
dset_iter = iter(dataset_tensor)
def get_rand_tensor(epoch, dset_idx, world_rank):
# global dataset_tensor
# global dset_iter
# try:
# dataset_idx = next(dset_iter)
# except StopIteration:
# dset_iter = iter(dataset_tensor)
# dataset_idx = next(dset_iter)
dataset_idx = dset_idx
lb, ub = cumulative_sizes[dataset_idx - 1] if dataset_idx else 0, cumulative_sizes[dataset_idx]
bounds = (lb, ub)
generator = torch.Generator()
generator.manual_seed(epoch)
size = bounds[1] - bounds[0]
rand_tensor = [x + bounds[0] for x in torch.randperm(bounds[1] - bounds[0], generator = generator).tolist()]
rounded_total = (len(rand_tensor) // WORLD_SIZE) * WORLD_SIZE
# print(rounded_total, rand_tensor, world_rank, rounded_total, WORLD_SIZE)
rand_tensor = rand_tensor[world_rank:rounded_total:WORLD_SIZE]
return rand_tensor
for y in range(20):
idx = y
l1, l2, l3, l4 = get_rand_tensor(0, idx%4, 0), get_rand_tensor(0, idx%4, 1), get_rand_tensor(0, idx%4, 2), get_rand_tensor(0, idx%4, 3)
visited = set()
for x in l1 + l2 + l3 + l4:
if x in visited:
print(visited)
print(x)
raise ValueError("Ton ipiame!")
else:
visited.add(x)
print("Ok")
| 2,289 | 33.179104 | 137 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/scratchpad/analyze_feature_vals.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file defines the streaming generators for model training data.
We train models on overlapping one-hot encoded text sequences. For a corpus of
a reasonable size, the full training data may not fit in memory. This modules
provides Python Generator classes for use by a sequential Keras model's
fit_generator() method to stream batches of training data.
"""
import pathlib
import typing
import tqdm
import multiprocessing
import pickle
from absl import app
from deeplearning.benchpress.corpuses import encoded
from deeplearning.benchpress.corpuses import tokenizers
from deeplearning.benchpress.util import distributions
from deeplearning.benchpress.experiments import workers
ENCODED_DB_PATH = "/home/foivos/unique_encoded.db"
TOKENIZER_PATH = "/home/foivos/backup_tokenizer.pkl"
def get_data_features(feature_space: str, db, tokenizer, size_limit = None) -> typing.List[typing.Tuple[str, typing.Dict[str, float]]]:
"""
Get or set feature with data list of tuples.
"""
data_features = {}
data_features[feature_space] = []
db_feats = db.get_data_features(tokenizer, size_limit)
for inp in tqdm.tqdm(db_feats, total = len(db_feats), desc = "Fetch data"):
feats = workers.ContentFeat(inp)
if len(inp) == 2:
src, _ = inp
include = ""
else:
src, include, _ = inp
if feature_space in feats and feats[feature_space]:
data_features[feature_space].append((src, include, feats[feature_space]))
return data_features[feature_space]
def main(*args):
db = encoded.EncodedContentFiles(url = "sqlite:///{}".format(ENCODED_DB_PATH), must_exist = True)
tokenizer = tokenizers.TokenizerBase.FromFile(pathlib.Path(TOKENIZER_PATH).resolve())
distr = {
"GreweFeatures": None,
"AutophaseFeatures": None,
"InstCountFeatures": None,
}
distr_768 = {
"GreweFeatures": None,
"AutophaseFeatures": None,
"InstCountFeatures": None,
}
for fspace in {"GreweFeatures", "AutophaseFeatures", "InstCountFeatures"}:
feat_vecs = [v for s, i, v in get_data_features(fspace, db, tokenizer)]
flat_vals = []
for vec in feat_vecs:
for v in vec.values():
try:
flat_vals.append(4 * int(v // 4))
except Exception:
pass
distr[fspace] = distributions.GenericDistribution(flat_vals, "feature_vals", fspace)
distr[fspace].plot()
for fspace in {"GreweFeatures", "AutophaseFeatures", "InstCountFeatures"}:
feat_vecs = [v for s, i, v in get_data_features(fspace, db, tokenizer, 768)]
flat_vals = []
for vec in feat_vecs:
for v in vec.values():
try:
flat_vals.append(4 * int(v // 4))
except Exception:
pass
distr_768[fspace] = distributions.GenericDistribution(flat_vals, "feature_vals", "{}_768".format(fspace))
distr_768[fspace].plot()
return
if __name__ == "__main__":
app.run(main)
| 3,469 | 33.7 | 135 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/scratchpad/plot_pareto.py | from deeplearning.benchpress.util import plotter as plt
import pathlib
groups = {
"BenchDirect": {},
"BenchPress": {},
}
## Grewe
groups["BenchDirect"]['data'] = [[267*2048, 73.56], [266*1024, 77.79], [512*290, 81.56], [256*289, 82.94], [128*272, 85.30], [64*282, 87.62], [32*151, 96.24]]
groups["BenchPress"]['data'] = [[2048*286, 76.79], [1024*306, 83.62], [512*325, 88.27], [256*326, 91.47], [128*333, 95.53], [64*338, 97.30], [32*236, 99.13]]
# relative proximity
groups["BenchDirect"]['data'] = [[267*2048, 80.99], [266*1024, 77.07], [512*290, 72.45], [256*289, 68.75], [128*272, 61.65], [64*282, 56.97], [32*151, 45.06]]
groups["BenchPress"]['data'] = [[2048*286, 75.83], [1024*306, 69.44], [512*325, 62.23], [256*326, 55.68], [128*333, 48.27], [64*338, 42.16], [32*236, 34.67]]
groups["BenchDirect"]['names'] = [2048, 1024, 512, 256, 128, 64, 32]
groups["BenchPress"]['names'] = [2048, 1024, 512, 256, 128, 64, 32]
time_speedup = [100*abs(round((x[0]-y[0])) / y[0]) for x, y in zip(groups["BenchDirect"]["data"], groups["BenchPress"]["data"])]
acc_speedup = [100*abs(round((x[1]-y[1])) / y[1]) for x, y in zip(groups["BenchDirect"]["data"], groups["BenchPress"]["data"])]
time_speedup = [[x, y] for x, y in zip([2048, 1024, 512, 256, 128, 64, 32], time_speedup)]
acc_speedup = [[x, y] for x, y in zip([2048, 1024, 512, 256, 128, 64, 32], acc_speedup)]
print(time_speedup)
print(acc_speedup)
plt.GroupScatterPlot(
groups,
plot_name="pareto_grewe",
path = pathlib.Path("./pareto").resolve(),
title = "Grewe Features",
x_name = "# Total Inferences",
y_name = "Avg Relative Proximity (%)",
showline_x = True,
showline_y = True,
linewidth = 2,
linecolor = "black",
showgrid_x = False,
showgrid_y = True,
bg_color = "white",
gridcolor_x = "gray",
gridcolor_y = "gray",
gridwidth = 1,
height = 900,
width = 1280,
tickfont = 24,
axisfont = 24,
legendfont = 18,
titlefont = 24,
legend_x = 0.75,
legend_y = 0.45,
)
plt.GroupScatterPlot(
{"% Speedup": {'data': time_speedup, 'names': []}, "% Proximity": {'data': acc_speedup, 'names': []}},
plot_name="speedup_grewe",
path = pathlib.Path("./pareto").resolve(),
title = "",
marker_style = [dict(color = "rgb(57, 105, 172)"), dict(color = "rgb(102, 166, 30)")],
x_name = "Workload Size",
y_name = "% Gain over BenchPress",
showline_x = True,
showline_y = True,
linewidth = 2,
linecolor = "black",
showgrid_x = False,
showgrid_y = True,
bg_color = "white",
gridcolor_x = "gray",
gridcolor_y = "gray",
gridwidth = 1,
height = 900,
width = 1280,
tickfont = 24,
axisfont = 24,
legendfont = 18,
titlefont = 24,
legend_x = 0.75,
legend_y = 0.87,
)
## Autophase
groups["BenchDirect"]['data'] = [[262*2048, 41.02], [262*1024, 44.7], [512*267, 52.36], [256*262, 54.60], [128*254, 58.02], [64*230, 61.09], [32*164, 57.74]]
groups["BenchPress"]['data'] = [[2048*292, 48.88], [1024*297, 50.84], [512*302, 57.38], [256*307, 57.63], [128*312, 71.32], [64*312, 74.27], [32*254, 83.59]]
# relative proximity
groups["BenchDirect"]['data'] = [[267*2048, 74.63], [266*1024, 72.03], [512*290, 66.77], [256*289, 64.39], [128*272, 61.38], [64*282, 59.22], [32*151, 57.81]]
groups["BenchPress"]['data'] = [[2048*286, 64.51], [1024*306, 65.78], [512*325, 60.08], [256*326, 58.19], [128*333, 57.81], [64*338, 43.82], [32*236, 33.32]]
time_speedup = [100*abs(round((x[0]-y[0])) / y[0]) for x, y in zip(groups["BenchDirect"]["data"], groups["BenchPress"]["data"])]
acc_speedup = [100*abs(round((x[1]-y[1])) / y[1]) for x, y in zip(groups["BenchDirect"]["data"], groups["BenchPress"]["data"])]
time_speedup = [[x, y] for x, y in zip([2048, 1024, 512, 256, 128, 64, 32], time_speedup)]
acc_speedup = [[x, y] for x, y in zip([2048, 1024, 512, 256, 128, 64, 32], acc_speedup)]
print(time_speedup)
print(acc_speedup)
plt.GroupScatterPlot(
groups,
plot_name="pareto_autophase",
path = pathlib.Path("./pareto").resolve(),
title = "Autophase Features",
x_name = "# Total Inferences",
y_name = "Avg Relative Proximity (%)",
showline_x = True,
showline_y = True,
linewidth = 2,
linecolor = "black",
showgrid_x = False,
showgrid_y = True,
bg_color = "white",
gridcolor_x = "gray",
gridcolor_y = "gray",
gridwidth = 1,
height = 900,
width = 1280,
tickfont = 24,
axisfont = 24,
legendfont = 18,
titlefont = 24,
legend_x = 0.75,
legend_y = 0.52,
)
plt.GroupScatterPlot(
{"% Speedup": {'data': time_speedup, 'names': []}, "% Proximity": {'data': acc_speedup, 'names': []}},
plot_name="speedup_autophase",
path = pathlib.Path("./pareto").resolve(),
title = "",
marker_style = [dict(color = "rgb(57, 105, 172)"), dict(color = "rgb(102, 166, 30)")],
x_name = "Workload Size",
y_name = "% Gain over BenchPress",
showline_x = True,
showline_y = True,
linewidth = 2,
linecolor = "black",
showgrid_x = False,
showgrid_y = True,
bg_color = "white",
gridcolor_x = "gray",
gridcolor_y = "gray",
gridwidth = 1,
height = 900,
width = 1280,
tickfont = 24,
axisfont = 24,
legendfont = 18,
titlefont = 24,
legend_x = 0.75,
legend_y = 0.87,
)
## Instcount
groups["BenchDirect"]['data'] = [[252*2048, 30.73], [257*1024, 34.36], [512*262, 36.32], [256*259, 39.89], [128*265, 41.96], [64*257, 46.21], [32*163, 48.33]]
groups["BenchPress"]['data'] = [[2048*301, 32.63], [1024*307, 40.09], [512*302, 40.49], [256*307, 52.89], [128*307, 56.41], [64*312, 57.77], [32*208, 69.11]]
# relative proximity
groups["BenchDirect"]['data'] = [[267*2048, 79.86], [266*1024, 77.90], [512*290, 76.27], [256*289, 73.92], [128*272, 71.10], [64*282, 66.54], [32*151, 63.33]]
groups["BenchPress"]['data'] = [[2048*286, 75.57], [1024*306, 70.61], [512*325, 71.79], [256*326, 59.43], [128*333, 59.24], [64*338, 54.92], [32*236, 41.41]]
time_speedup = [100*abs(round((x[0]-y[0])) / y[0]) for x, y in zip(groups["BenchDirect"]["data"], groups["BenchPress"]["data"])]
acc_speedup = [100*abs(round((x[1]-y[1])) / y[1]) for x, y in zip(groups["BenchDirect"]["data"], groups["BenchPress"]["data"])]
time_speedup = [[x, y] for x, y in zip([2048, 1024, 512, 256, 128, 64, 32], time_speedup)]
acc_speedup = [[x, y] for x, y in zip([2048, 1024, 512, 256, 128, 64, 32], acc_speedup)]
print(time_speedup)
print(acc_speedup)
plt.GroupScatterPlot(
groups,
plot_name="pareto_instcount",
path = pathlib.Path("./pareto").resolve(),
title = "InstCount Features",
x_name = "# Total Inferences",
y_name = "Avg Relative Proximity (%)",
showline_x = True,
showline_y = True,
linewidth = 2,
linecolor = "black",
showgrid_x = False,
showgrid_y = True,
bg_color = "white",
gridcolor_x = "gray",
gridcolor_y = "gray",
gridwidth = 1,
height = 900,
width = 1280,
tickfont = 24,
axisfont = 24,
legendfont = 18,
titlefont = 24,
legend_x = 0.75,
legend_y = 0.48,
)
plt.GroupScatterPlot(
{"% Speedup": {'data': time_speedup, 'names': []}, "% Proximity": {'data': acc_speedup, 'names': []}},
plot_name="speedup_instcount",
path = pathlib.Path("./pareto").resolve(),
marker_style = [dict(color = "rgb(57, 105, 172)"), dict(color = "rgb(102, 166, 30)")],
title = "",
x_name = "Workload Size",
y_name = "% Gain over BenchPress",
showline_x = True,
showline_y = True,
linewidth = 2,
linecolor = "black",
showgrid_x = False,
showgrid_y = True,
bg_color = "white",
gridcolor_x = "gray",
gridcolor_y = "gray",
gridwidth = 1,
height = 900,
width = 1280,
tickfont = 24,
axisfont = 24,
legendfont = 18,
titlefont = 24,
legend_x = 0.75,
legend_y = 0.87,
)
| 7,529 | 32.171806 | 158 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/scratchpad/cldrive.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Scratchpad experimental analysis for anyhing related to CLDrive.
"""
import pathlib
import math
import pandas as pd
import statistics
import numpy as np
import scipy.stats as st
from deeplearning.benchpress.preprocessors import opencl
from deeplearning.benchpress.util import plotter as plt
from deeplearning.benchpress.util import logging as l
src1 ="""
kernel void A(global float *a, global float* n, global float* j, global float* h, const unsigned int r) {
int l = get_global_id(0);
if (l >= r)
return;
h[l] = n[l] - j[l] * j[l-1];
}
"""
src2 ="""
kernel void A(global float *a, global float* n, global float* j, global float* h, const unsigned int r) {
int l = get_global_id(0);
for (unsigned int i = 0; i < 10; i++){
a[i] = n[i]*j[i]*h[i];
a[i+1] = n[i+1]*j[i+1]*h[i+1];
}
}
"""
l.initLogger(name = "experiments")
def confidenceInterval() -> None:
"""
For a simple and a more complicated OpenCL kernel,
this experiment uses a fixed local size, iterates over a wide range of global sizes
and a different number of runs to check the stability of execution times.
The 95% confidence interval is calculated for each run and the average distance of the mean
from the confidence interval boundaries is expressed as a percentage distance.
"""
global src1
global src2
MIN_GISZE_POW = 8
MAX_GSIZE_POW = 28
MAX_RUNS_POW = 5
for n, src in [("src1", src1), ("src2", src2)]:
for gsize_pow in range(MIN_GISZE_POW, MAX_GSIZE_POW+1):
print("Running {}, {} gsize".format(gsize_pow, 2**gsize_pow))
data_cpt = []
data_cpk = []
data_gpt = []
data_gpk = []
for n_runs in range(1, MAX_RUNS_POW+1):
print("##### num_runs: {} ####".format(10**n_runs))
hist = {}
cpt, cpk, gpt, gpk = opencl.CLDriveExecutionTimes(src, num_runs = 10**n_runs, gsize = 2**gsize_pow, lsize = 32)
print("## CPU transfer")
interval = st.t.interval(alpha = 0.95, df = len(list(cpt)) -1, loc = np.mean(list(cpt)), scale = st.sem(list(cpt)))
cpt_mean = cpt.mean()
ratio = 100*(0.5 * (interval[1] - interval[0])) / cpt_mean
data_cpt.append(ratio)
print("95 interval: {}".format(interval))
print("Ratio: {}%".format(100*(0.5 * (interval[1] - interval[0])) / cpt_mean))
print("## CPU kernel")
interval = st.t.interval(alpha = 0.95, df = len(list(cpk)) -1, loc = np.mean(list(cpk)), scale = st.sem(list(cpk)))
cpk_mean = cpk.mean()
ratio = 100*(0.5 * (interval[1] - interval[0])) / cpk_mean
data_cpk.append(ratio)
print("95 interval: {}".format(interval))
print("Ratio: {}%".format(100*(0.5 * (interval[1] - interval[0])) / cpk_mean))
print("## GPU transfer")
interval = st.t.interval(alpha = 0.95, df = len(list(gpt)) -1, loc = np.mean(list(gpt)), scale = st.sem(list(gpt)))
gpt_mean = gpt.mean()
ratio = 100*(0.5 * (interval[1] - interval[0])) / gpt_mean
data_gpt.append(ratio)
print("95 interval: {}".format(interval))
print("Ratio: {}%".format(100*(0.5 * (interval[1] - interval[0])) / gpt_mean))
print("## GPU kernel")
interval = st.t.interval(alpha = 0.95, df = len(list(gpk)) -1, loc = np.mean(list(gpk)), scale = st.sem(list(gpk)))
gpk_mean = gpk.mean()
ratio = 100*(0.5 * (interval[1] - interval[0])) / gpk_mean
data_gpk.append(ratio)
print("95 interval: {}".format(interval))
print("Ratio: {}%".format(100*(0.5 * (interval[1] - interval[0])) / gpk_mean))
print()
x_axis = [x for x in range(1, MAX_RUNS_POW+1)]
plt.MultiScatterLine(
x = [x_axis] * MAX_RUNS_POW,
y = [data_cpt, data_cpk, data_gpt, data_gpk],
names = ['cpu_transfer', 'cpu_kernel', 'gpu_transfer', 'gpu_kernel'],
x_name = "power of 10",
plot_name = "{}_perc_diff_mean_int".format(2**gsize_pow),
path = pathlib.Path("./plots/conf_interval/{}".format(n)).resolve()
)
return
def LabelGlobalSize() -> None:
"""
Iterate over multiple global sizes and collect the optimal device
to execute an OpenCL kernel. GPU or CPU.
"""
global src1
global src2
MIN_GISZE_POW = 8
MAX_GSIZE_POW = 28
N_RUNS = {
'src1': {
2**8 : 10**5,
2**9 : 10**5,
2**10: 10**5,
2**11: 10**5,
2**12: 10**5,
2**13: 10**5,
2**14: 10**4,
2**15: 10**4,
2**16: 10**4,
2**17: 10**4,
2**18: 10**4,
2**19: 10**3,
2**20: 10**3,
2**21: 10**3,
2**22: 10**3,
2**23: 10**2,
2**24: 10**2,
2**25: 10**1,
2**26: 10**1,
2**27: 10**1,
2**28: 10**1,
},
'src2': {
2**8 : 10**5,
2**9 : 10**5,
2**10: 10**5,
2**11: 10**5,
2**12: 10**5,
2**13: 10**4,
2**14: 10**4,
2**15: 10**4,
2**16: 10**4,
2**17: 10**3,
2**18: 10**3,
2**19: 10**3,
2**20: 10**3,
2**21: 10**2,
2**22: 10**2,
2**23: 10**2,
2**24: 10**1,
2**25: 10**1,
2**26: 10**1,
2**27: 10**1,
2**28: 10**1,
}
}
for n, src in [("src1", src1), ("src2", src2)]:
labels = {
'CPU': {'data': [], 'names': None},
'GPU': {'data': [], 'names': None},
}
for gsize_pow in range(MIN_GISZE_POW, MAX_GSIZE_POW+1):
print("##########", gsize_pow, 2**gsize_pow)
label = opencl.CLDriveLabel(src, num_runs = N_RUNS[n][2**gsize_pow], gsize = 2**gsize_pow, lsize = 256)
if label != 'ERR':
labels[label]['data'].append([gsize_pow, 1])
plt.GroupScatterPlot(
groups = labels,
plot_name = "label_per_gsize",
path = pathlib.Path("./plots/label_gsize/{}".format(n)),
x_name = "power of 2",
)
return
def ExecutionTimesGlobalSize() -> None:
"""
Iterate over multiple global sizes and collect the execution time
for transferring to CPU and GPU and executing kernel on CPU and GPU
and report groupped bar plot.
"""
global src1
global src2
MIN_GISZE_POW = 8
MAX_GSIZE_POW = 28
N_RUNS = {
'src1': {
2**8 : 10**5,
2**9 : 10**5,
2**10: 10**5,
2**11: 10**5,
2**12: 10**5,
2**13: 10**5,
2**14: 10**4,
2**15: 10**4,
2**16: 10**4,
2**17: 10**4,
2**18: 10**4,
2**19: 10**3,
2**20: 10**3,
2**21: 10**3,
2**22: 10**3,
2**23: 10**2,
2**24: 10**2,
2**25: 10**1,
2**26: 10**1,
2**27: 10**1,
2**28: 10**1,
},
'src2': {
2**8 : 10**5,
2**9 : 10**5,
2**10: 10**5,
2**11: 10**5,
2**12: 10**5,
2**13: 10**4,
2**14: 10**4,
2**15: 10**4,
2**16: 10**4,
2**17: 10**3,
2**18: 10**3,
2**19: 10**3,
2**20: 10**3,
2**21: 10**2,
2**22: 10**2,
2**23: 10**2,
2**24: 10**1,
2**25: 10**1,
2**26: 10**1,
2**27: 10**1,
2**28: 10**1,
}
}
for n, src in [("src1", src1), ("src2", src2)]:
labels = {
'CPU': {'data': [], 'names': None},
'GPU': {'data': [], 'names': None},
}
groups = {
'cpu_transfer' : [[], []],
'cpu_kernel' : [[], []],
'gpu_transfer' : [[], []],
'gpu_kernel' : [[], []],
}
for gsize_pow in range(MIN_GISZE_POW, MAX_GSIZE_POW+1):
print("##########", gsize_pow, 2**gsize_pow)
cpt, cpk, gpt, gpk = opencl.CLDriveExecutionTimes(src, num_runs = N_RUNS[n][2**gsize_pow], gsize = 2**gsize_pow, lsize = 256)
if cpt is None:
while cpt is None:
cpt, cpk, gpt, gpk = opencl.CLDriveExecutionTimes(src, num_runs = N_RUNS[n][2**gsize_pow], gsize = 2**gsize_pow, lsize = 256)
print(cpt.mean(), cpk.mean(), gpt.mean(), gpk.mean())
if not math.isnan(cpt.mean()):
groups['cpu_transfer'][0].append(lsize_pow)
groups['cpu_transfer'][1].append(cpt.mean() / (10**6))
if not math.isnan(cpk.mean()):
groups['cpu_kernel'][0].append(lsize_pow)
groups['cpu_kernel'][1].append(cpk.mean() / (10**6))
if not math.isnan(gpt.mean()):
groups['gpu_transfer'][0].append(lsize_pow)
groups['gpu_transfer'][1].append(gpt.mean() / (10**6))
if not math.isnan(gpk.mean()):
groups['gpu_kernel'][0].append(lsize_pow)
groups['gpu_kernel'][1].append(gpk.mean() / (10**6))
plt.GrouppedBars(
groups = groups,
plot_name = "exec_times_per_gsize",
path = pathlib.Path("./plots/exec_times_gsize/{}".format(n)),
x_name = "power of 2",
y_name = "ms",
)
return
def ExecutionTimesLocalSize() -> None:
"""
Iterate over multiple global sizes and collect the execution time
for transferring to CPU and GPU and executing kernel on CPU and GPU
and report groupped bar plot.
"""
global src1
global src2
MIN_LISZE_POW = 0
MAX_LSIZE_POW = 21
GSIZE_POW = 21
N_RUNS = 10**2
for n, src in [("src1", src1), ("src2", src2)]:
labels = {
'CPU': {'data': [], 'names': None},
'GPU': {'data': [], 'names': None},
}
groups = {
'cpu_transfer' : [[], []],
'cpu_kernel' : [[], []],
'gpu_transfer' : [[], []],
'gpu_kernel' : [[], []],
}
for lsize_pow in range(MIN_LISZE_POW, MAX_LSIZE_POW+1):
print("##########", lsize_pow, 2**lsize_pow)
cpt, cpk, gpt, gpk = opencl.CLDriveExecutionTimes(src, num_runs = N_RUNS, gsize = 2**GSIZE_POW, lsize = 2**lsize_pow)
if cpt is None:
while cpt is None:
cpt, cpk, gpt, gpk = opencl.CLDriveExecutionTimes(src, num_runs = N_RUNS, gsize = 2**GSIZE_POW, lsize = 2**lsize_pow)
print(cpt.mean(), cpk.mean(), gpt.mean(), gpk.mean())
if not math.isnan(cpt.mean()):
groups['cpu_transfer'][0].append(lsize_pow)
groups['cpu_transfer'][1].append(cpt.mean() / (10**6))
if not math.isnan(cpk.mean()):
groups['cpu_kernel'][0].append(lsize_pow)
groups['cpu_kernel'][1].append(cpk.mean() / (10**6))
if not math.isnan(gpt.mean()):
groups['gpu_transfer'][0].append(lsize_pow)
groups['gpu_transfer'][1].append(gpt.mean() / (10**6))
if not math.isnan(gpk.mean()):
groups['gpu_kernel'][0].append(lsize_pow)
groups['gpu_kernel'][1].append(gpk.mean() / (10**6))
plt.GrouppedBars(
groups = groups,
plot_name = "exec_times_per_lsize",
path = pathlib.Path("./plots/exec_times_lsize/{}".format(n)),
x_name = "power of 2",
y_name = "ms",
)
return
| 11,147 | 29.542466 | 135 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/active_models/downstream_data.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module specifies the range of available
downstream tasks that the committee can be trained on.
The input and output features per downstream task are defined.
"""
import datetime
import typing
import tqdm
import sqlalchemy as sql
from sqlalchemy.ext import declarative
from deeplearning.benchpress.util import sqlutil
from deeplearning.benchpress.util import crypto
from absl import flags
FLAGS = flags.FLAGS
Base = declarative.declarative_base()
def GreweDataScheme() -> typing.List[str]:
"""
Return schema of grewe predictive model data inputs.
"""
return [
"benchmark",
"dataset",
"comp",
"rational",
"mem",
"localmem",
"coalesced",
"atomic",
"transfer",
"wgsize",
"F1:transfer/(comp+mem)",
"F2:coalesced/mem",
"F3:(localmem/mem)*avgws",
"F4:comp/mem",
"oracle",
"runtime",
"speedup",
"penalty",
"runtime_cpu",
"ci_cpu",
"ci_mean_cpu",
"runtime_gpu",
"ci_gpu",
"ci_mean_gpu",
"kernel_nlines",
"kernel_size"
]
class GreweInstance(Base, sqlutil.ProtoBackedMixin):
"""
A database row representation for Grewe heuristic model training instance.
"""
__tablename__ = "grewe_training_instances"
# entry id
id : int = sql.Column(sql.Integer, primary_key = True)
# Indexable hash
sha256 : str = sql.Column(sql.String(64), nullable = False, index = True)
# source code of the first occurence that created this row.
src : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Sampling epoch where training instance was first collected.
sampling_epoch : int = sql.Column(sql.Integer, nullable = False)
# Grewe features of kernel.
features : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Target grewe features of kernel.
target_features : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Distance from target.
euclidean_distance : float = sql.Column(sql.Float, nullable = False)
# Name of benchmark.
benchmark : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# For some reason, this is the global size.
dataset : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# #comp instructions.
comp : int = sql.Column(sql.Integer, nullable = False)
# #rational instructions.
rational : int = sql.Column(sql.Integer, nullable = False)
# #mem instructions.
mem : int = sql.Column(sql.Integer, nullable = False)
# #localmem instructions.
localmem : int = sql.Column(sql.Integer, nullable = False)
# #coalesced instructions.
coalesced : int = sql.Column(sql.Integer, nullable = False)
# #atomic instructions.
atomic : int = sql.Column(sql.Integer, nullable = False)
# amount of transferred bytes.
transfer : int = sql.Column(sql.Integer, nullable = False)
# work-group size as in local size.
wgsize : int = sql.Column(sql.Integer, nullable = False)
# F1:transfer/(comp+mem) score
F1 : float = sql.Column(sql.Float, nullable = False)
# F2:coalesced/mem
F2 : float = sql.Column(sql.Float, nullable = False)
# F3:(localmem/mem)*avgws
F3 : float = sql.Column(sql.Float, nullable = False)
# F4:comp/mem
F4 : float = sql.Column(sql.Float, nullable = False)
# Is CPU or GPU the best place to run this instance?
oracle : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Total execution time for optimal device
runtime : float = sql.Column(sql.Float, nullable = False)
# How much faster is the faster device from the slower
speedup : float = sql.Column(sql.Float, nullable = False)
# The inverse of speedup
penalty : float = sql.Column(sql.Float, nullable = False)
# The runtime of CPU.
runtime_cpu : int = sql.Column(sql.Integer, nullable = False)
# transfer time of CPU.
ci_cpu : int = sql.Column(sql.Integer, nullable = False)
# kernel time of CPU.
ci_mean_cpu : int = sql.Column(sql.Integer, nullable = False)
# The runtime of GPU.
runtime_gpu : int = sql.Column(sql.Integer, nullable = False)
# transfer time of GPU.
ci_gpu : int = sql.Column(sql.Integer, nullable = False)
# kernel time of GPU.
ci_mean_gpu : int = sql.Column(sql.Integer, nullable = False)
# Number of source code lines of kernel.
kernel_nlines : int = sql.Column(sql.Integer, nullable = False)
# Size of kernel in number of tokens
kernel_size : int = sql.Column(sql.Integer, nullable = False)
# Date added
date_added : datetime.datetime = sql.Column(sql.DateTime, nullable=False)
@classmethod
def FromArgs(cls,
sampling_epoch : int,
src : str,
grewe_feats : typing.Dict[str, float],
target_features : typing.Dict[str, float],
euclidean_distance : float,
global_size : int,
local_size : int,
transferred_bytes : int,
oracle : str,
cpu_transfer_ns : int,
cpu_kernel_ns : int,
gpu_transfer_ns : int,
gpu_kernel_ns : int,
kernel_nlines : int,
kernel_size : int,
) -> typing.Dict[str, typing.Any]:
sha = crypto.sha256_str(
src
+ str(grewe_feats)
+ str(target_features)
+ str(transferred_bytes)
+ str(local_size)
+ str(global_size)
+ str(oracle)
)
try:
F1 = transferred_bytes / (grewe_feats['comp'] + grewe_feats['mem'])
except ZeroDivisionError:
F1 = 0.0
try:
F3 = (grewe_feats['localmem'] / grewe_feats['mem']) * local_size
except ZeroDivisionError:
F3 = 0.0
return GreweInstance(**{
"src" : src,
"sampling_epoch" : sampling_epoch,
"sha256" : sha,
"features" : "\n".join(["{}:{}".format(k, v) for k, v in grewe_feats.items()]),
"target_features" : "\n".join(["{}:{}".format(k, v) for k, v in target_features.items()]),
"euclidean_distance" : euclidean_distance,
"benchmark" : "{}-cl.A".format(sha),
"dataset" : global_size,
"comp" : grewe_feats['comp'],
"rational" : grewe_feats['rational'],
"mem" : grewe_feats['mem'],
"localmem" : grewe_feats['localmem'],
"coalesced" : grewe_feats['coalesced'],
"atomic" : grewe_feats['atomic'],
"transfer" : transferred_bytes,
"wgsize" : local_size,
"F1" : F1,
"F2" : grewe_feats["F2:coalesced/mem"],
"F3" : F3,
"F4" : grewe_feats["F4:comp/mem"],
"oracle" : oracle,
"runtime" : min(cpu_transfer_ns + cpu_kernel_ns, gpu_transfer_ns + gpu_kernel_ns),
"speedup" : max(cpu_transfer_ns + cpu_kernel_ns / gpu_transfer_ns + gpu_kernel_ns, gpu_transfer_ns + gpu_kernel_ns / cpu_transfer_ns + cpu_kernel_ns),
"penalty" : min(cpu_transfer_ns + cpu_kernel_ns / gpu_transfer_ns + gpu_kernel_ns, gpu_transfer_ns + gpu_kernel_ns / cpu_transfer_ns + cpu_kernel_ns),
"runtime_cpu" : cpu_kernel_ns + cpu_transfer_ns,
"ci_cpu" : cpu_transfer_ns,
"ci_mean_cpu" : cpu_kernel_ns,
"runtime_gpu" : gpu_kernel_ns + gpu_transfer_ns,
"ci_gpu" : gpu_transfer_ns,
"ci_mean_gpu" : gpu_kernel_ns,
"kernel_nlines" : kernel_nlines,
"kernel_size" : kernel_size,
"date_added" : datetime.datetime.utcnow(),
})
@classmethod
def add_epoch(cls,
batch : typing.List[typing.Dict],
sampling_epoch : int,
target_features : typing.Dict[str, float],
tokenizer : 'tokenizers.TokenizerBase',
) -> typing.List['GreweInstance']:
instances = []
for sample in batch:
src = tokenizer.ArrayToCode(sample.sample)
instances.append(GreweInstance.FromArgs(
src = src,
sampling_epoch = sampling_epoch,
global_size = sample.runtime_features['global_size'],
grewe_feats = sample.features,
target_features = target_features,
euclidean_distance = sample.score,
transferred_bytes = int(sample.runtime_features['transferred_bytes']),
local_size = int(sample.runtime_features['local_size']),
oracle = sample.runtime_features['label'],
cpu_transfer_ns = int(sample.runtime_features['cpu_transfer_ns']),
cpu_kernel_ns = int(sample.runtime_features['cpu_kernel_ns']),
gpu_transfer_ns = int(sample.runtime_features['gpu_transfer_ns']),
gpu_kernel_ns = int(sample.runtime_features['gpu_kernel_ns']),
kernel_nlines = len(src.split('\n')),
kernel_size = len(src.split(' ')),
))
return instances
class FeatureLessGreweInstance(Base, sqlutil.ProtoBackedMixin):
"""
A database row representation for Grewe heuristic model training instance.
"""
__tablename__ = "featureless_grewe_training_instances"
# entry id
id : int = sql.Column(sql.Integer, primary_key = True)
# Indexable hash
sha256 : str = sql.Column(sql.String(64), nullable = False, index = True)
# source code of the first occurence that created this row.
src : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Sampling epoch where training instance was first collected.
sampling_epoch : int = sql.Column(sql.Integer, nullable = False)
# Grewe features of kernel.
features : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Target grewe features of kernel.
target_features : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Distance from target.
euclidean_distance : float = sql.Column(sql.Float, nullable = False)
# Name of benchmark.
benchmark : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# For some reason, this is the global size.
dataset : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# #comp instructions.
comp : int = sql.Column(sql.Integer, nullable = False)
# #rational instructions.
rational : int = sql.Column(sql.Integer, nullable = False)
# #mem instructions.
mem : int = sql.Column(sql.Integer, nullable = False)
# #localmem instructions.
localmem : int = sql.Column(sql.Integer, nullable = False)
# #coalesced instructions.
coalesced : int = sql.Column(sql.Integer, nullable = False)
# #atomic instructions.
atomic : int = sql.Column(sql.Integer, nullable = False)
# amount of transferred bytes.
transfer : int = sql.Column(sql.Integer, nullable = False)
# work-group size as in local size.
wgsize : int = sql.Column(sql.Integer, nullable = False)
# F1:transfer/(comp+mem) score
F1 : float = sql.Column(sql.Float, nullable = False)
# F2:coalesced/mem
F2 : float = sql.Column(sql.Float, nullable = False)
# F3:(localmem/mem)*avgws
F3 : float = sql.Column(sql.Float, nullable = False)
# F4:comp/mem
F4 : float = sql.Column(sql.Float, nullable = False)
# Is CPU or GPU the best place to run this instance?
oracle : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Total execution time for optimal device
runtime : float = sql.Column(sql.Float, nullable = False)
# How much faster is the faster device from the slower
speedup : float = sql.Column(sql.Float, nullable = False)
# The inverse of speedup
penalty : float = sql.Column(sql.Float, nullable = False)
# The runtime of CPU.
runtime_cpu : int = sql.Column(sql.Integer, nullable = False)
# transfer time of CPU.
ci_cpu : int = sql.Column(sql.Integer, nullable = False)
# kernel time of CPU.
ci_mean_cpu : int = sql.Column(sql.Integer, nullable = False)
# The runtime of GPU.
runtime_gpu : int = sql.Column(sql.Integer, nullable = False)
# transfer time of GPU.
ci_gpu : int = sql.Column(sql.Integer, nullable = False)
# kernel time of GPU.
ci_mean_gpu : int = sql.Column(sql.Integer, nullable = False)
# Number of source code lines of kernel.
kernel_nlines : int = sql.Column(sql.Integer, nullable = False)
# Size of kernel in number of tokens
kernel_size : int = sql.Column(sql.Integer, nullable = False)
# Date added
date_added : datetime.datetime = sql.Column(sql.DateTime, nullable=False)
@classmethod
def FromArgs(cls,
sampling_epoch : int,
src : str,
features : typing.Dict[str, float],
grewe_feats : typing.Dict[str, float],
target_features : typing.Dict[str, float],
euclidean_distance : float,
global_size : int,
local_size : int,
transferred_bytes : int,
oracle : str,
cpu_transfer_ns : int,
cpu_kernel_ns : int,
gpu_transfer_ns : int,
gpu_kernel_ns : int,
kernel_nlines : int,
kernel_size : int,
) -> typing.Dict[str, typing.Any]:
sha = crypto.sha256_str(
src
+ str(features)
+ str(grewe_feats)
+ str(target_features)
+ str(transferred_bytes)
+ str(local_size)
+ str(global_size)
+ str(oracle)
)
try:
F1 = transferred_bytes / (grewe_feats['comp'] + grewe_feats['mem'])
except ZeroDivisionError:
F1 = 0.0
try:
F3 = (grewe_feats['localmem'] / grewe_feats['mem']) * local_size
except ZeroDivisionError:
F3 = 0.0
return FeatureLessGreweInstance(**{
"src" : src,
"sampling_epoch" : sampling_epoch,
"sha256" : sha,
"features" : "\n".join(["{}:{}".format(k, v) for k, v in features.items()]),
"target_features" : "\n".join(["{}:{}".format(k, v) for k, v in target_features.items()]),
"euclidean_distance" : euclidean_distance,
"benchmark" : "{}-cl.A".format(sha),
"dataset" : global_size,
"comp" : grewe_feats['comp'],
"rational" : grewe_feats['rational'],
"mem" : grewe_feats['mem'],
"localmem" : grewe_feats['localmem'],
"coalesced" : grewe_feats['coalesced'],
"atomic" : grewe_feats['atomic'],
"transfer" : transferred_bytes,
"wgsize" : local_size,
"F1" : F1,
"F2" : grewe_feats["F2:coalesced/mem"],
"F3" : F3,
"F4" : grewe_feats["F4:comp/mem"],
"oracle" : oracle,
"runtime" : min(cpu_transfer_ns + cpu_kernel_ns, gpu_transfer_ns + gpu_kernel_ns),
"speedup" : max(cpu_transfer_ns + cpu_kernel_ns / gpu_transfer_ns + gpu_kernel_ns, gpu_transfer_ns + gpu_kernel_ns / cpu_transfer_ns + cpu_kernel_ns),
"penalty" : min(cpu_transfer_ns + cpu_kernel_ns / gpu_transfer_ns + gpu_kernel_ns, gpu_transfer_ns + gpu_kernel_ns / cpu_transfer_ns + cpu_kernel_ns),
"runtime_cpu" : cpu_kernel_ns + cpu_transfer_ns,
"ci_cpu" : cpu_transfer_ns,
"ci_mean_cpu" : cpu_kernel_ns,
"runtime_gpu" : gpu_kernel_ns + gpu_transfer_ns,
"ci_gpu" : gpu_transfer_ns,
"ci_mean_gpu" : gpu_kernel_ns,
"kernel_nlines" : kernel_nlines,
"kernel_size" : kernel_size,
"date_added" : datetime.datetime.utcnow(),
})
@classmethod
def add_epoch(cls,
batch : typing.List[typing.Tuple[typing.Dict, typing.Dict]],
sampling_epoch : int,
target_features : typing.Dict[str, float],
tokenizer : 'tokenizers.TokenizerBase',
) -> typing.List['FeatureLessGreweInstance']:
instances = []
for sample, grewe_feats in batch:
src = tokenizer.ArrayToCode(sample.sample)
instances.append(FeatureLessGreweInstance.FromArgs(
src = src,
sampling_epoch = sampling_epoch,
global_size = sample.runtime_features['global_size'],
features = sample.features,
grewe_feats = grewe_feats,
target_features = target_features,
euclidean_distance = sample.score,
transferred_bytes = int(sample.runtime_features['transferred_bytes']),
local_size = int(sample.runtime_features['local_size']),
oracle = sample.runtime_features['label'],
cpu_transfer_ns = int(sample.runtime_features['cpu_transfer_ns']),
cpu_kernel_ns = int(sample.runtime_features['cpu_kernel_ns']),
gpu_transfer_ns = int(sample.runtime_features['gpu_transfer_ns']),
gpu_kernel_ns = int(sample.runtime_features['gpu_kernel_ns']),
kernel_nlines = len(src.split('\n')),
kernel_size = len(src.split(' ')),
))
return instances
class DownstreamData(sqlutil.Database):
"""Database for downstream task yielded data."""
@property
def count(self) -> int:
"""
Count the number of rows in given table.
"""
with self.Session() as s:
return s.query(self.task_type).count()
@property
def sampling_epoch(self) -> int:
"""
Return the current sample epoch.
If DB is empty then this is 0. Otherwise it is the max+1,
given a full sample epoch is populated at the same time.
"""
if self.count == 0:
return 0
else:
with self.Session() as s:
return 1 + max([int(x.sampling_epoch) for x in s.query(self.task_type).all()])
def __init__(self, url: str, task_type: typing.Callable, must_exist: bool = False):
super(DownstreamData, self).__init__(url, Base, must_exist = must_exist)
self.task_type = task_type
return
def add_epoch(self,
batch : typing.List[typing.Union[typing.Tuple, typing.Dict]],
sampling_epoch : int,
target_features : typing.Dict[str, float],
tokenizer : 'tokenizers.TokenizerBase',
) -> None:
"""
Add new row entry in downstream data DB.
"""
instances = self.task_type.add_epoch(batch, sampling_epoch, target_features, tokenizer)
with self.Session(commit = True) as ses:
for instance in instances:
entry = ses.query(self.task_type).filter_by(sha256 = instance.sha256).first()
if entry is None:
ses.add(instance)
ses.commit()
return
| 20,020 | 41.963519 | 160 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/active_models/data_generator.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Data generators for active learning committee.
"""
import typing
import copy
import pathlib
import numpy as np
from deeplearning.benchpress.util import pytorch
from deeplearning.benchpress.util.pytorch import torch
from deeplearning.benchpress.util import logging as l
class ListTrainDataloader(torch.utils.data.Dataset):
"""
Modular dataloading class for downstream tasks.
"""
def __init__(self,
dataset : typing.List[typing.Tuple[typing.List, typing.List]],
lazy : bool = False,
):
super(ListTrainDataloader, self).__init__()
## The dataset here should be a list, and each entry
## must be a tuple containing the input and the target vector.
if len(dataset) <= 0 and not lazy:
l.logger().warn("Active learning committee dataset is empty. Make sure this is expected behavior.")
self.compute_dataset(dataset)
return
def compute_dataset(self, dataset) -> None:
"""
Convert list dataset to torch tensors.
"""
self.dataset = []
for dp in dataset:
if len(dp) == 2:
inp, targ = dp
self.dataset.append(
{
'input_ids' : torch.FloatTensor(inp),
'target_ids': torch.LongTensor(targ),
}
)
elif len(dp) == 3:
inp, targ, idx = dp
self.dataset.append(
{
'input_ids' : torch.FloatTensor(inp),
'target_ids': torch.LongTensor(targ),
'idx' : torch.LongTensor(idx),
}
)
return
def get_batched_dataset(self) -> typing.Dict[str, np.array]:
"""
Batch the whole dataset by keys and return it.
"""
return {
'input_ids' : np.asarray([x['input_ids'].numpy() for x in self.dataset]),
'target_ids' : np.asarray([x['target_ids'].numpy() for x in self.dataset]),
}
def get_random_subset(self, num: int, seed: int = None) -> 'ListTrainDataloader':
"""
Get a sample of num random samples from dataset.
"""
ret = ListTrainDataloader([], lazy = True)
num = min(num, len(self.dataset))
if seed:
generator = torch.Generator()
generator.manual_seed(seed)
else:
generator = None
rand = set(torch.randperm(len(self.dataset), generator = None).tolist()[:num])
ret.dataset = [x for idx, x in enumerate(self.dataset) if idx in rand]
return ret
def get_sliced_subset(self, l: int = None, r: int = None) -> 'ListTrainDataloader':
"""
Implement slice operation of current List Dataset.
"""
ret = ListTrainDataloader([], lazy = True)
if l is None:
l = 0
if r is None:
r = len(self.dataset)
ret.dataset = self.dataset[l:r]
return ret
def __len__(self) -> int:
return len(self.dataset)
def __getitem__(self, idx: int) -> typing.Dict[str, torch.Tensor]:
if idx < 0:
if -idx > len(self):
raise ValueError("absolute value of index should not exceed dataset length")
idx = len(self) + idx
return self.dataset[idx]
def __add__(self, dl: 'ListTrainDataloader') -> 'ListTrainDataloader':
ret = ListTrainDataloader([], lazy = True)
ret.dataset = copy.copy(self.dataset)
if dl:
ret.dataset += dl.dataset
return ret
class DictPredictionDataloader(torch.utils.data.Dataset):
"""
Dataloading class that takes datapoint dictionary.
"""
def __init__(self,
dataset: typing.List[typing.Dict[str, typing.List]],
lazy : bool = False,
):
super(DictPredictionDataloader, self).__init__()
if len(dataset) <= 0 and not lazy:
raise ValuError("Sample dataset is empty.")
self.compute_dataset(dataset)
return
def compute_dataset(self,
dataset: typing.List[typing.Dict[str, typing.List]]
) -> None:
"""
Batch the whole dataset by keys and return it.
"""
self.dataset = []
for idx, dp in enumerate(dataset):
self.dataset.append(
{
'idx' : torch.LongTensor([idx]),
'static_features' : torch.FloatTensor(dp['static_features']),
'runtime_features' : torch.LongTensor(dp['runtime_features']),
'input_ids' : torch.FloatTensor(dp['input_ids']),
}
)
return
def get_batched_dataset(self) -> typing.Dict[str, np.array]:
"""
Batch the whole dataset by keys and return it.
"""
return {
'idx' : np.asarray([x['idx'].numpy() for x in self.dataset]),
'static_features' : np.asarray([x['static_features'].numpy() for x in self.dataset]),
'runtime_features' : np.asarray([x['runtime_features'].numpy() for x in self.dataset]),
'input_ids' : np.asarray([x['input_ids'].numpy() for x in self.dataset]),
}
def get_random_subset(self, num: int, seed: int = None) -> 'DictPredictionDataloader':
"""
Get a sample of num random samples from dataset.
"""
ret = DictPredictionDataloader([], lazy = True)
num = min(num, len(self.dataset))
if seed:
generator = torch.Generator()
generator.manual_seed(seed)
else:
generator = None
rand = set(torch.randperm(len(self.dataset), generator = generator).tolist()[:num])
ret.dataset = [x for idx, x in enumerate(self.dataset) if idx in rand]
return ret
def get_sliced_subset(self, l: int = None, r: int = None) -> 'DictPredictionDataloader':
"""
Implement slice operation of current List Dataset.
"""
ret = DictPredictionDataloader([], lazy = True)
if l is None:
l = 0
if r is None:
r = len(self.dataset)
ret.dataset = self.dataset[l:r]
return ret
def __len__(self) -> int:
return len(self.dataset)
def __getitem__(self, idx: int) -> typing.Dict[str, torch.Tensor]:
if idx < 0:
if -idx > len(self):
raise ValueError("absolute value of index should not exceed dataset length")
idx = len(self) + idx
return self.dataset[idx]
def __add__(self, dl: 'DictPredictionDataloader') -> 'DictPredictionDataloader':
ret = DictPredictionDataloader([], lazy = True)
ret.dataset = copy.copy(self.dataset)
if dl:
ret.dataset += dl.dataset
return ret
| 6,852 | 31.478673 | 105 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/active_models/active_models.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Active Learning feature space models."""
import pathlib
import typing
from deeplearning.benchpress.util import pbutil
from deeplearning.benchpress.util import commit
from deeplearning.benchpress.util import environment
from deeplearning.benchpress.util import distrib
from deeplearning.benchpress.active_models import downstream_tasks
from deeplearning.benchpress.active_models.committee import active_committee
from deeplearning.benchpress.active_models.committee import config as com_config
from deeplearning.benchpress.active_models.expected_error_reduction import eer
from deeplearning.benchpress.active_models.expected_error_reduction import config as eer_config
from deeplearning.benchpress.proto import active_learning_pb2
from absl import flags
from deeplearning.benchpress.util import logging as l
FLAGS = flags.FLAGS
flags.DEFINE_boolean(
"disable_active_learning",
False,
"Set True to disable active learner from learning feature space."
"All candidate feature vectors have equal likelihood of being important"
)
flags.DEFINE_integer(
"num_active_samples",
256,
"Select number of points you want to sample with active learner."
)
def AssertConfigIsValid(config: active_learning_pb2.ActiveLearner) -> active_learning_pb2.ActiveLearner:
"""
Parse proto description and check for validity.
"""
pbutil.AssertFieldConstraint(
config,
"downstream_task",
lambda x: x in downstream_tasks.TASKS,
"Downstream task has to be one of {}".format(', '.join([str(x) for x in downstream_tasks.TASKS]))
)
pbutil.AssertFieldIsSet(config, "training_corpus")
pbutil.AssertFieldIsSet(config, "num_train_steps")
pbutil.AssertFieldIsSet(config, "random_seed")
if config.downstream_task in {"Grewe", "FeatureLessGrewe"}:
pbutil.AssertFieldIsSet(config, "top_k")
p = pathlib.Path(config.training_corpus).resolve()
if not p.exists() and config.num_train_steps > 0:
raise FileNotFoundError(p)
if config.HasField("query_by_committee"):
com_config.AssertConfigIsValid(config.query_by_committee)
elif config.HasField("expected_error_reduction"):
eer_config.AssertConfigIsValid(config.expected_error_reduction)
else:
raise NotImplementedError(config)
return config
class Model(object):
"""Predictive models for active learning.
Please note model instances should be treated as immutable. Upon
instantiation, a model's properties are used to determine its hash. If you
modify a property after instantiation, the hash will be out of date, which
can lead to bad things happening.
"""
def __init__(self,
config : active_learning_pb2.ActiveLearner,
cache_path : pathlib.Path,
):
"""Instantiate a model.
Args:
config: A Model message.
Raises:
TypeError: If the config argument is not a Model proto.
UserError: In case on an invalid config.
"""
# Error early, so that a cache isn't created.
if not isinstance(config, active_learning_pb2.ActiveLearner):
t = type(config).__name__
raise TypeError(f"Config must be an ActiveLearner proto. Received: '{t}'")
self.config = active_learning_pb2.ActiveLearner()
# Validate config options.
self.config.CopyFrom(AssertConfigIsValid(config))
self.cache_path = cache_path / "active_model"
if environment.WORLD_RANK == 0:
self.cache_path.mkdir(exist_ok = True, parents = True)
(self.cache_path / "samples").mkdir(exist_ok = True)
distrib.barrier()
(self.cache_path / "downstream_task").mkdir(exist_ok = True, parents = True)
self.downstream_task = downstream_tasks.DownstreamTask.FromTask(
self.config.downstream_task,
pathlib.Path(self.config.training_corpus).resolve(),
self.cache_path / "downstream_task",
self.config.random_seed,
top_k = self.config.top_k if self.config.HasField("top_k") else None,
test_db = pathlib.Path(self.config.test_db).resolve() if self.config.HasField("test_db") else None
)
if environment.WORLD_RANK == 0:
## Store current commit
commit.saveCommit(self.cache_path)
if self.config.HasField("query_by_committee"):
self.backend = active_committee.QueryByCommittee(self.config, self.cache_path, self.downstream_task)
elif self.config.HasField("expected_error_reduction"):
self.backend = eer.ExpectedErrorReduction(self.config, self.cache_path, self.downstream_task)
l.logger().info("Initialized {} in {}".format(self.backend, self.cache_path))
return
def Train(self, **kwargs) -> "Model":
"""Train the model.
Returns:
The model instance.
Raises:
UnableToAcquireLockError: If the model is locked (i.e. there is another
process currently modifying the model).
"""
if FLAGS.disable_active_learning:
l.logger().warn("Active learning has been disabled. Skip training.")
else:
self.backend.Train(**kwargs)
return self
def UpdateLearn(self, update_dataloader: 'torch.utils.data.Dataset') -> None:
"""
Train-update active learner with new generated datapoints.
"""
if FLAGS.disable_active_learning:
l.logger().warn("Active learning has been disabled. Skip update training.")
else:
self.Train(update_dataloader = update_dataloader)
return
def Sample(self, num_samples: int = None) -> typing.List[typing.Dict[str, float]]:
"""
Sample the active learner.
Knowing a downstream task, the active learning model samples
and returns the datapoints that are deemed valuable.
"""
sample_set = self.downstream_task.sample_space(num_samples = FLAGS.num_active_samples if num_samples is None else num_samples)
if FLAGS.disable_active_learning:
l.logger().warn("Active learning has been disabled. Skip update training.")
l.logger().warn("This is passive learning mode to illustrate AL's significance.")
l.logger().warn("Instead of querying, a random datapoint is returned.")
return [
{
'idx' : int(x['idx']),
'static_features' : self.downstream_task.VecToStaticFeatDict(x['static_features'].numpy()),
'runtime_features': self.downstream_task.VecToRuntimeFeatDict(x['runtime_features'].numpy()),
'input_features' : self.downstream_task.VecToInputFeatDict(x['input_ids'].numpy()),
} for x in
sample_set.get_random_subset(num = len(sample_set), seed = self.config.random_seed).dataset
]
else:
return self.backend.Sample(sample_set = sample_set)
def SamplerCache(self, sampler: 'samplers.Sampler') -> pathlib.Path:
"""Get the path to a sampler cache.
Args:
sampler: A Sampler instance.
Returns:
A path to a directory. Note that this directory may not exist - it is
created only after a call to Sample().
"""
return self.cache_path / "samples" / sampler.hash
@property
def is_trained(self) -> bool:
return self.backend.is_trained
| 7,599 | 37.77551 | 130 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/active_models/downstream_tasks.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module specifies the range of available
downstream tasks that the committee can be trained on.
The input and output features per downstream task are defined.
"""
import pathlib
import pickle
import math
import functools
import typing
import tqdm
import copy
import multiprocessing
import time
import numpy as np
from deeplearning.benchpress.active_models import data_generator
from deeplearning.benchpress.active_models import downstream_data
from deeplearning.benchpress.experiments import cldrive
from deeplearning.benchpress.features import extractor
from deeplearning.benchpress.features import grewe
from deeplearning.benchpress.features import hidden_state
from deeplearning.benchpress.corpuses import tokenizers
from deeplearning.benchpress.util import environment
from deeplearning.benchpress.util import distrib
from deeplearning.benchpress.util import http_server
from deeplearning.benchpress.util import crypto
from deeplearning.benchpress.util import logging as l
from deeplearning.benchpress.models.torch_bert.data_generator import JSON_to_ActiveSample
from deeplearning.benchpress.models.torch_bert.data_generator import ActiveSample_to_JSON
from absl import app, flags
FLAGS = flags.FLAGS
flags.DEFINE_string(
"server_tokenizer",
None,
"Set path for tokenizer to be used by downstream server."
)
flags.DEFINE_string(
"server_cldrive_cache",
None,
"Set path for cldrive_cache to be used by downstream server."
)
flags.DEFINE_boolean(
"only_optimal_gsize",
False,
"If True, only the best matching global size to transferred_bytes will be executed. Otherwise, everything."
)
def ExtractorWorker(cldrive_entry: cldrive.CLDriveSample, fspace: str):
"""
Worker that extracts features and buffers cldrive entry, to maintain consistency
among multiprocessed data.
"""
features = extractor.ExtractFeatures(cldrive_entry.source, [fspace])
if fspace in features and features[fspace]:
return features[fspace], cldrive_entry
return None
class DownstreamTask(object):
"""
Downstream Task generic class.
"""
@classmethod
def FromTask(cls,
task : str,
corpus_path : pathlib.Path,
cache_path : pathlib.Path,
random_seed : int,
**kwargs
) -> "DownstreamTask":
return TASKS[task](corpus_path, cache_path, random_seed, **kwargs)
def __init__(self,
name : str,
cache_path : pathlib.Path,
task_type : typing.Callable,
random_seed : int,
use_as_server : bool
) -> None:
self.name = name
self.random_seed = random_seed
self.cache_path = cache_path
if environment.WORLD_RANK == 0 and not use_as_server:
self.downstream_data = downstream_data.DownstreamData(
"sqlite:///{}/downstream_data.db".format(cache_path),
task_type = task_type,
must_exist = False,
)
return
def step_generation(self, candidates: typing.List['ActiveSample']) -> None:
raise NotImplementedError("Abstract Class")
def saveCheckpoint(self) -> None:
raise NotImplementedError("Abstract Class")
def loadCheckpoint(self) -> None:
raise NotImplementedError("Abstract Class")
class GreweAbstract(DownstreamTask):
"""
An abstract class for Grewe CPU vs GPU -related downstream tasks.
"""
@property
def runtime_features_size(self) -> int:
return 2
@property
def static_features_size(self) -> int:
return len(self.static_features_labels)
@property
def output_size(self) -> int:
return 2
@property
def output_labels(self) -> typing.Tuple[str, str]:
return ["CPU", "GPU"]
@property
def output_ids(self) -> typing.Tuple[str, str]:
return [0, 1]
@property
def test_set(self) -> 'torch.Dataset':
if self.test_db:
if not self.test_dataset:
data = [x for x in self.test_db.get_valid_data(dataset = "GPGPU_benchmarks")]
features_iter = extractor.ExtractFeaturesIter([x.source for x in data], [self.feature_space])[self.feature_space]
test_data = []
for dp, features in tqdm.tqdm(zip(data, features_iter), total = len(data), desc = "Test Set"):
test_data.append(
(
self.InputtoEncodedVector(features, dp.transferred_bytes, dp.local_size),
[self.TargetLabeltoID(dp.status)],
[int(dp.id)],
)
)
self.test_dataset = data_generator.ListTrainDataloader(test_data)
self.saveCheckpoint()
return self.test_dataset
else:
return None
def __init__(self,
name : str,
cache_path : pathlib.Path,
task_type : typing.Callable,
random_seed : int,
top_k : int,
use_as_server : bool,
test_db : pathlib.Path = None,
) -> None:
super(GreweAbstract, self).__init__(
name,
cache_path,
task_type,
random_seed,
use_as_server,
)
if not use_as_server:
self.top_k = top_k
if test_db:
self.test_db = cldrive.CLDriveExecutions(url = "sqlite:///{}".format(str(test_db)), must_exist = True)
else:
self.test_db = None
self.test_dataset = None
return
def setup_server(self) -> None:
"""
This is server mode.
In server mode, initialize the serving process.
"""
if environment.WORLD_RANK == 0:
self.cl_proc, self.work_flag, self.read_queue, self.write_queues, self.reject_queues = http_server.start_server_process()
return
def TargetIDtoLabels(self, id: int) -> str:
"""
Integer ID to label of predictive model.
"""
return {
0: "CPU",
1: "GPU",
}[id]
def TargetLabeltoID(self, label: str) -> int:
"""
Predictive label to ID.
"""
return {
"CPU": 0,
"GPU": 1,
}[label]
def TargetLabeltoEncodedVector(self, label: str) -> typing.List[int]:
"""
Label to target vector.
"""
return {
"CPU": [1, 0],
"GPU": [0, 1],
}[label]
def StaticFeatDictToVec(self, static_feats: typing.Dict[str, float]) -> typing.List[float]:
"""
Process grewe static features dictionary into list of floats to be passed as tensor.
"""
return [static_feats[key] for key in self.static_features_labels]
def VecToStaticFeatDict(self, feature_values: typing.List[float]) -> typing.Dict[str, float]:
"""
Process float vector of feature values to dictionary of features.
"""
return {key: val for key, val in zip(self.static_features_labels, feature_values)}
def VecToRuntimeFeatDict(self, runtime_values: typing.List[int]) -> typing.Dict[str, int]:
"""
Process runtime int values to runtime features dictionary.
"""
trb, ls = runtime_values
return {
'transferred_bytes' : int(trb),
'local_size' : int(ls),
}
def VecToInputFeatDict(self, input_ids: typing.List[float]) -> typing.Dict[str, float]:
"""
Convert to dictionary of predictive model input features.
"""
return {
k: v for k, v in zip(self.input_labels, input_ids)
}
def CollectSingleRuntimeFeature(self,
sample: 'ActiveSample',
tokenizer: 'tokenizers.TokenizerBase',
store_rejects: bool = False,
) -> typing.Tuple[typing.List['ActiveSample'], typing.List['ActiveSample']]:
"""
Overloaded function to compute runtime features for a single instance.
"""
def create_sample(s: 'ActiveSample', cached: cldrive.CLDriveSample, trb: int, gs: int) -> typing.List['ActiveSample']:
nrfeats = copy.deepcopy(s.runtime_features)
nrfeats['transferred_bytes'] = trb
nrfeats['global_size'] = int(2**gs)
nrfeats['label'] = cached.status
if nrfeats['label'] in {"CPU", "GPU"}:
nrfeats['cpu_transfer_ns'] = self.corpus_db.reduce_execution_times(cached.cpu_transfer_time_ns)
nrfeats['cpu_kernel_ns'] = self.corpus_db.reduce_execution_times(cached.cpu_kernel_time_ns)
nrfeats['gpu_transfer_ns'] = self.corpus_db.reduce_execution_times(cached.gpu_transfer_time_ns)
nrfeats['gpu_kernel_ns'] = self.corpus_db.reduce_execution_times(cached.gpu_kernel_time_ns)
return s._replace(runtime_features = nrfeats)
exp_tr_bytes = sample.runtime_features['transferred_bytes']
local_size = sample.runtime_features['local_size']
found = False
found_bytes = None
gsize = int(max(1, math.log2(local_size)))
opt_gsize = gsize
code = tokenizer.ArrayToCode(sample.sample)
new_samples = []
rejects = []
last_cached = None
while not found and gsize <= 20:
sha256 = crypto.sha256_str(code + "BenchPress" + str(2**gsize) + str(local_size))
if sha256 in self.corpus_db.status_cache:
cached = self.corpus_db.get_entry(code, "BenchPress", int(2**gsize), int(local_size))
else:
## If not cached, compute.
cached = self.corpus_db.update_and_get(
code,
sample.features,
"BenchPress",
global_size = int(2**gsize),
local_size = int(local_size),
num_runs = 10000,
timeout = 60,
)
if cached is not None and cached.status in {"CPU", "GPU"}:
## If element execution has succeeeded.
tr_bytes = cached.transferred_bytes
if FLAGS.only_optimal_gsize:
## only_optimal_size means you compute only one gsize combination.
## The one that falls closest to the targeted transferred_bytes.
if tr_bytes < exp_tr_bytes or found_bytes is None or abs(exp_tr_bytes - tr_bytes) < abs(exp_tr_bytes - found_bytes):
## If bytes still slide below than expected,
## OR bytes are more than expected but it's the first successful execution,
## OR if bytes just surpassed the expected tr bytes and the distance from target is closer than the previous tr_bytes,
## Then update the optimal global size and the found bytes.
opt_gsize = gsize
found_bytes = tr_bytes
last_cached = cached
if tr_bytes >= exp_tr_bytes:
## Set this to True only when you surpass the expected.
## Only then you can be sure that you got as close as possible to the optimal.
found = True
else:
s = create_sample(
s = sample,
cached = cached,
trb = tr_bytes,
gs = gsize
)
if s.runtime_features['label'] in {"CPU", "GPU"}:
new_samples.append(s)
elif store_rejects:
rejects.append(s)
elif cached is not None:
## If failed, store to rejects and set transferred bytes to None.
if store_rejects:
rejects.append(
create_sample(
s = sample,
cached = cached,
trb = exp_tr_bytes,
gs = gsize,
)
)
gsize += 1
if FLAGS.only_optimal_gsize:
## If only the optimal size is needed and the execution has succeeded,
## create a new copy of the sample
if found_bytes:
s = create_sample(sample, last_cached, found_bytes, opt_gsize)
if s.runtime_features['label'] in {"CPU", "GPU"}: ## This check is redundant, but better safe than sorry.
new_samples = [s]
elif store_rejects:
rejects.append(
s = sample,
cached = last_cached,
trb = exp_tr_bytes,
gs = gsize,
)
return new_samples, rejects
def CollectRuntimeFeatures(self,
samples : typing.List['ActiveSample'],
tokenizer : 'tokenizers.TokenizerBase',
) -> typing.List['ActiveSample']:
"""
Collect the top_k samples that can run on CLDrive and set their global size
to the appropriate value so it can match the transferred bytes.
Args:
samples:
List of Active Samples collected from LM inference.
tokenizer:
Tokenizer.
"""
if FLAGS.use_http_server:
## For server mode, master node, sleep while the backend is still working.
if environment.WORLD_RANK == 0:
new_samples = []
while int(http_server.client_status_request()[1]) >= 300: # While the backend is WORKING
## Backend is working.
time.sleep(2)
while int(http_server.client_status_request()[1]) != 200:
## While more samples.
new_samples += http_server.client_get_request()
time.sleep(1)
if environment.WORLD_SIZE > 1:
distrib.broadcast(new_samples)
else:
# Else synchronize with new data.
new_samples = distrib.broadcast()
distrib.barrier()
new_samples = [JSON_to_ActiveSample(x) for x in new_samples]
if self.top_k != -1:
## Return only the results that come from the top_k code samples.
top_k_codes = set()
return_samples = []
for s in sorted([x for x in new_samples if x.runtime_features['label'] in {"CPU", "GPU"}], key = lambda x: x.score):
key = ''.join([str(x) for x in s.sample])
if key not in top_k_codes and len(top_k_codes) < self.top_k:
top_k_codes.add(key)
return_samples.append(s)
elif key in top_k_codes:
return_samples.append(s)
l.logger().warn("Collected {} new samples from {} top_k code".format(len(return_samples), len(top_k_codes)))
return return_samples
else:
l.logger().warn("Collected {} new samples from http server".format(len(new_samples)))
return sorted([x for x in new_samples if x.runtime_features['label'] in {"CPU", "GPU"}], key = lambda x: x.score)
else:
## If not server mode, compute locally labels for each sample.
new_samples = []
total = 0
for sample in tqdm.tqdm(sorted(samples, key = lambda x: x.score), total = len(samples), desc = "CLDrive", leave = False):
ret, rej = self.CollectSingleRuntimeFeature(sample, tokenizer)
if len(ret) > 0:
total += 1
for s in ret:
if s.runtime_features['label'] in {"CPU", "GPU"}:
new_samples.append(s)
if self.top_k != -1 and total >= self.top_k:
return new_samples
return new_samples
def UpdateDataGenerator(self,
new_samples : typing.List['ActiveSample'],
target_features : typing.Dict[str, float],
tokenizer : 'tokenizers.TokenizerBase',
) -> data_generator.ListTrainDataloader:
"""
Collect new generated samples, find their runtime features and processs to a torch dataset.
"""
new_samples = self.CollectRuntimeFeatures(new_samples, tokenizer)
self.UpdateDownstreamDatabase(new_samples, target_features, tokenizer)
updated_dataset = [
(
self.InputtoEncodedVector(entry.features,
entry.runtime_features['transferred_bytes'],
entry.runtime_features['local_size']
),
[self.TargetLabeltoID(entry.runtime_features['label'])]
) for entry in new_samples
]
if len(updated_dataset) == 0:
l.logger().warn("Update dataset is empty.")
return updated_dataset, data_generator.ListTrainDataloader(updated_dataset, lazy = True)
def UpdateTrainDataset(self, updated_dataloader: data_generator.ListTrainDataloader) -> None:
"""
After active learner has been updated, store updated samples to original train dataset.
"""
self.data_generator = self.data_generator + updated_dataloader
self.saveCheckpoint()
def step_generation(self, candidates: typing.List['ActiveSample']) -> None:
"""
End of LM generation's epoch hook.
"""
if FLAGS.use_http_server:
serialized = []
for cand in candidates:
serialized.append(
ActiveSample_to_JSON(cand)
)
http_server.client_put_request(serialized)
return
def ServeRuntimeFeatures(self, tokenizer: 'tokenizers.TokenizerBase') -> None:
"""
In server mode, listen to the read queue, collect runtime features,
append to local cache and publish to write queue for the client to fetch.
This has been easily implemented only for HTTP server and not socket.
"""
try:
while self.cl_proc.is_alive():
if not self.read_queue.empty():
self.work_flag.value = True
source, serialized = self.read_queue.get()
sample = JSON_to_ActiveSample(serialized)
ret, rej = self.CollectSingleRuntimeFeature(sample, tokenizer, store_rejects = True)
for x in ret:
self.write_queues[source].append(ActiveSample_to_JSON(x))
for x in rej:
self.reject_queues[source].append(ActiveSample_to_JSON(x))
else:
self.work_flag.value = False
time.sleep(1)
except KeyboardInterrupt:
pass
return
def saveCheckpoint(self) -> None:
"""
Store data generator.
"""
if environment.WORLD_RANK == 0:
with open(self.cache_path / "downstream_task_dg.pkl", 'wb') as outf:
pickle.dump(
{
'data_generator': self.data_generator,
'rand_generator': self.rand_generator.get_state(),
'test_dataset' : self.test_dataset,
},
outf
)
return
def loadCheckpoint(self) -> 'torch.Dataset':
"""
Load state of downstream task.
"""
if (self.cache_path / "downstream_task_dg.pkl").exists():
distrib.lock()
with open(self.cache_path / "downstream_task_dg.pkl", 'rb') as infile:
data = pickle.load(infile)
infile.close()
while not infile.closed:
time.sleep(1)
if environment.WORLD_SIZE > 1:
time.sleep(30)
distrib.unlock()
return data
else:
return None
class Grewe(GreweAbstract):
"""
Specification class for Grewe et al. CGO 2013 predictive model.
This class is responsible to fetch the raw data and act as a tokenizer
for the data. Reason is, the data generator should be agnostic of the labels.
"""
@property
def input_size(self) -> int:
return 4
@property
def static_features_labels(self) -> typing.List[str]:
return grewe.KEYS
@property
def input_labels(self) -> typing.List[str]:
return [
"tr_bytes/(comp+mem)",
"coalesced/mem",
"localmem/(mem+wgsize)",
"comp/mem"
]
@property
def feature_space(self) -> str:
return "GreweFeatures"
def __init__(self,
corpus_path : pathlib.Path,
cache_path : pathlib.Path,
random_seed : int,
top_k : int,
use_as_server : bool = False,
test_db : pathlib.Path = None,
**unused_kwargs,
) -> None:
del unused_kwargs
super(Grewe, self).__init__(
"Grewe",
cache_path,
downstream_data.GreweInstance,
random_seed,
top_k,
use_as_server,
test_db,
)
self.corpus_path = corpus_path
self.corpus_db = cldrive.CLDriveExecutions(url = "sqlite:///{}".format(str(self.corpus_path)))
if use_as_server:
self.setup_server()
else:
## Setup random seed np random stuff
self.rand_generator = None
self.gen_bounds = {
'comp' : (1, 300),
'rational' : (0, 50),
'mem' : (1, 50),
'localmem' : (0, 50),
'coalesced' : (0, 10),
'atomic' : (0, 10),
'transferred_bytes': (1, 31), # 2**pow,
'local_size' : (1, 10), # 2**pow,
}
return
def __repr__(self) -> str:
return "Grewe"
def setup_dataset(self, num_train_steps: int = None) -> None:
"""
Fetch data and preprocess into corpus for Grewe's predictive model.
"""
checkpointed = self.loadCheckpoint()
if checkpointed:
self.data_generator = checkpointed['data_generator']
self.rand_generator = np.random.RandomState()
self.test_dataset = checkpointed['test_dataset']
self.rand_generator.set_state(checkpointed['rand_generator'])
self.dataset = self.data_generator.dataset
else:
self.rand_generator = np.random
self.rand_generator.seed(self.random_seed)
self.dataset = []
data = [x for x in self.corpus_db.get_valid_data(dataset = "GitHub")] ## TODO: Here you must get original training dataset instead of random github benchmarks.
pool = multiprocessing.Pool()
it = pool.imap_unordered(functools.partial(ExtractorWorker, fspace = self.feature_space), data)
idx = 0
try:
loop = tqdm.tqdm(it, total = len(data), desc = "Grewe corpus setup", leave = False) if environment.WORLD_RANK == 0 else it
for dp in loop:
if dp:
feats, entry = dp
self.dataset.append(
(
self.InputtoEncodedVector(feats, entry.transferred_bytes, entry.local_size),
[self.TargetLabeltoID(entry.status)]
)
)
idx += 1
# if idx >= 100:
# break
pool.close()
except Exception as e:
pool.terminate()
raise e
# pool.terminate()
if num_train_steps:
self.data_generator = data_generator.ListTrainDataloader(self.dataset[:num_train_steps])
else:
self.data_generator = data_generator.ListTrainDataloader(self.dataset)
self.saveCheckpoint()
return
def UpdateDownstreamDatabase(self,
new_samples : typing.List[typing.Dict[str, typing.Any]],
target_features : typing.Dict[str, float],
tokenizer : 'tokenizers.TokenizerBase',
) -> None:
"""
Update exported database of downstream task.
"""
if environment.WORLD_RANK == 0:
cur_sample_ep = self.downstream_data.sampling_epoch
self.downstream_data.add_epoch(
new_samples, cur_sample_ep, target_features, tokenizer
)
distrib.barrier()
return
def sample_space(self, num_samples: int = 512) -> data_generator.DictPredictionDataloader:
"""
Go fetch Grewe Predictive model's feature space and randomly return num_samples samples
to evaluate. The predictive model samples are mapped as a value to the static features
as a key.
"""
samples = []
samples_hash = set()
for x in range(num_samples):
fvec = {
k: self.rand_generator.randint(self.gen_bounds[k][0], self.gen_bounds[k][1])
for k in self.static_features_labels if k not in {"F2:coalesced/mem", "F4:comp/mem"}
}
try:
fvec['F2:coalesced/mem'] = fvec['coalesced'] / fvec['mem']
except ZeroDivisionError:
fvec['F2:coalesced/mem'] = 0.0
try:
fvec['F4:comp/mem'] = fvec['comp'] / fvec['mem']
except ZeroDivisionError:
fvec['F4:comp/mem'] = 0.0
transferred_bytes = 2**self.rand_generator.randint(self.gen_bounds['transferred_bytes'][0], self.gen_bounds['transferred_bytes'][1])
local_size = 2**self.rand_generator.randint(self.gen_bounds['local_size'][0], self.gen_bounds['local_size'][1])
inp_ids = self.InputtoEncodedVector(fvec, transferred_bytes, local_size)
if str(inp_ids) not in samples_hash:
samples.append(
{
'static_features' : self.StaticFeatDictToVec(fvec),
'runtime_features' : [transferred_bytes, local_size],
'input_ids' : inp_ids,
}
)
samples_hash.add(str(inp_ids))
return data_generator.DictPredictionDataloader(samples)
def InputtoEncodedVector(self,
static_feats : typing.Dict[str, float],
transferred_bytes : int,
local_size : int,
) -> typing.List[float]:
"""
Encode consistently raw features to Grewe's predictive model inputs.
"""
try:
i1 = transferred_bytes / (static_feats['comp'] + static_feats['mem'])
except ZeroDivisionError:
i1 = 0.0
try:
i2 = static_feats['coalesced'] / static_feats['mem']
except ZeroDivisionError:
i2 = 0.0
try:
i3 = (static_feats['localmem'] / static_feats['mem']) * local_size
except ZeroDivisionError:
i3 = 0.0
try:
i4 = static_feats['comp'] / static_feats['mem']
except ZeroDivisionError:
i4 = 0.0
return [i1, i2, i3, i4]
class FeatureLessGrewe(GreweAbstract):
"""
A feature-less implementation of Grewe's CPU vs GPU model.
This task uses the language model's hidden outpus as features
instead of manually selecting the compiler features.
"""
@property
def input_size(self) -> int:
return self.static_features_size + self.runtime_features_size
@property
def static_features_labels(self) -> typing.List[str]:
return hidden_state.KEYS
@property
def input_labels(self) -> typing.List[str]:
return self.static_features_labels + ["transferred_bytes", "local_size"]
@property
def feature_space(self) -> str:
return "HiddenState"
def __init__(self,
corpus_path : pathlib.Path,
cache_path : pathlib.Path,
random_seed : int,
top_k : int,
use_as_server : bool = False,
test_db : pathlib.Path = None,
**unused_kwargs,
) -> None:
del unused_kwargs
super(FeatureLessGrewe, self).__init__(
"FeatureLessGrewe",
cache_path,
downstream_data.FeatureLessGreweInstance,
random_seed,
top_k,
use_as_server,
test_db,
)
self.corpus_path = corpus_path
self.corpus_db = cldrive.CLDriveExecutions(url = "sqlite:///{}".format(str(self.corpus_path)))
if use_as_server:
self.setup_server()
else:
## Setup random seed np random stuff
self.dataset = None
self.data_generator = None
self.rand_generator = None
self.gen_bounds = {
'transferred_bytes': (1, 31), # 2**pow,
'local_size' : (1, 10), # 2**pow,
}
return
def __repr__(self) -> str:
return "FeatureLessGrewe"
def setup_dataset(self, **kwargs) -> None:
"""
Function that initializes all initial data/data types needed for downstream task.
The predictive model will not be trained on initial data, therefore data generator
is initialized here as empty.
Test set is needed for this task, which will be the CSV file for the labelled
human written benchmarks. This is going to be the evaluator
"""
checkpointed = self.loadCheckpoint()
if checkpointed:
self.data_generator = checkpointed['data_generator']
self.rand_generator = np.random.RandomState()
self.rand_generator.set_state(checkpointed['rand_generator'])
self.test_dataset = checkpointed['test_dataset']
self.dataset = self.data_generator.dataset
else:
## For Expected Error Reduction, no human benchmarks are used for initial training.
self.data_generator = data_generator.ListTrainDataloader([])
self.dataset = []
self.rand_generator = np.random
self.rand_generator.seed(self.random_seed)
self.saveCheckpoint()
return
def sample_space(self, num_samples: int = 128) -> data_generator.DictPredictionDataloader:
"""
Go fetch the hidden state's feature space [1xhidden_state_size] where N~[0, 1] and
randomly return num_samples samples to evaluate. The predictive model samples are
mapped as a value to the static features as a key.
"""
samples = []
samples_hash = set()
for _ in range(num_samples):
random_values = self.rand_generator.uniform(-1, 1, self.static_features_size)
fvec = {
k: v
for k, v in zip(self.static_features_labels, random_values)
}
transferred_bytes = 2**self.rand_generator.randint(self.gen_bounds['transferred_bytes'][0], self.gen_bounds['transferred_bytes'][1])
local_size = 2**self.rand_generator.randint(self.gen_bounds['local_size'][0], self.gen_bounds['local_size'][1])
inp_ids = self.InputtoEncodedVector(fvec, transferred_bytes, local_size)
if str(inp_ids) not in samples_hash:
samples.append(
{
'static_features' : self.StaticFeatDictToVec(fvec),
'runtime_features' : [transferred_bytes, local_size],
'input_ids' : inp_ids,
}
)
samples_hash.add(str(inp_ids))
return data_generator.DictPredictionDataloader(samples)
def UpdateDownstreamDatabase(self,
new_samples : typing.List[typing.Dict[str, typing.Any]],
target_features : typing.Dict[str, float],
tokenizer : 'tokenizers.TokenizerBase',
) -> None:
"""
Update exported database of downstream task.
"""
if environment.WORLD_RANK == 0:
cur_sample_ep = self.downstream_data.sampling_epoch
extended_samples = []
memo = {}
for sample in new_samples:
key = ','.join([str(x) for x in sample.sample])
if key not in memo:
src = tokenizer.ArrayToCode(sample.sample)
memo[key] = extractor.ExtractFeatures(src, ["GreweFeatures"])["GreweFeatures"]
extended_samples.append((sample, memo[key]))
self.downstream_data.add_epoch(
extended_samples, cur_sample_ep, target_features, tokenizer
)
distrib.barrier()
return
def InputtoEncodedVector(self,
static_feats : typing.Dict[str, float],
transferred_bytes : int,
local_size : int,
) -> typing.List[float]:
"""
Encode consistently LM's hidden output features to Grewe's predictive model inputs.
"""
return [
static_feats[l] for l in self.static_features_labels
] + [math.log2(transferred_bytes), math.log2(local_size)]
def VecToRuntimeFeatDict(self, runtime_values: typing.List[int]) -> typing.Dict[str, int]:
"""
Process runtime int values to runtime features dictionary.
"""
trb, ls = runtime_values
return {
'transferred_bytes' : int(trb),
'local_size' : int(ls),
}
TASKS = {
"Grewe" : Grewe,
"FeatureLessGrewe" : FeatureLessGrewe,
}
def main(*args, **kwargs) -> None:
if FLAGS.server_tokenizer is None:
raise ValueError("Please define --server_tokenizer")
if FLAGS.server_cldrive_cache is None:
raise ValueError("Please define --server_cldrive_cache")
tokenizer_path = pathlib.Path(FLAGS.server_tokenizer).resolve()
cldrive_cache = pathlib.Path(FLAGS.server_cldrive_cache).resolve()
if not tokenizer_path.exists():
raise FileNotFoundError(tokenizer_path)
# if not cldrive_cache.exists():
# raise FileNotFoundError(cldrive_cache)
if not FLAGS.use_http_server and not FLAGS.use_socket_server:
raise ValueError("This booting point is supposed to work as server. Set your flags appropriately.")
tokenizer = tokenizers.TokenizerBase.FromFile(tokenizer_path)
task = DownstreamTask.FromTask("FeatureLessGrewe", cldrive_cache, "/tmp/", 0, top_k = -1, use_as_server = True)
task.ServeRuntimeFeatures(tokenizer)
return
if __name__ == "__main__":
app.run(main)
exit()
| 32,769 | 35.451613 | 165 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/active_models/backends.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Neural network backends for active learning models."""
import typing
import pathlib
import numpy as np
from deeplearning.benchpress.active_models import downstream_tasks
from deeplearning.benchpress.proto import active_learning_pb2
from deeplearning.benchpress.util import cache
class BackendBase(object):
"""
The base class for an active learning model backend.
"""
def __init__(
self,
config : active_learning_pb2.ActiveLearner,
cache_path : pathlib.Path,
downstream_task : downstream_tasks.DownstreamTask
):
self.config = config
self.cache_path = cache_path
self.downstream_task = downstream_task
self.downstream_task.setup_dataset(num_train_steps = self.config.num_train_steps)
return
def Train(self, **extra_kwargs) -> None:
"""Train the backend."""
raise NotImplementedError
def Sample(self, sampler: 'samplers.Sampler', seed: typing.Optional[int] = None) -> None:
"""
Sampling regime for backend.
"""
raise NotImplementedError
| 1,639 | 32.469388 | 91 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/active_models/committee/active_committee.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Array of NN models used for Active Learning Query-By-Committee.
This module handles
a) the passive training of the committee,
b) the confidence level of the committee for a datapoint (using entropy)
"""
import typing
import datetime
import tqdm
import pathlib
import pickle
import copy
import math
import copy
import numpy as np
from deeplearning.benchpress.models.torch_bert import optimizer
from deeplearning.benchpress.models.torch_bert import hooks
from deeplearning.benchpress.active_models import backends
from deeplearning.benchpress.active_models import data_generator
from deeplearning.benchpress.active_models.committee import models
from deeplearning.benchpress.active_models.committee import config
from deeplearning.benchpress.active_models.committee import committee_database
from deeplearning.benchpress.util import environment
from deeplearning.benchpress.util import distrib
from deeplearning.benchpress.util import logging as l
from absl import flags
FLAGS = flags.FLAGS
class QueryByCommittee(backends.BackendBase):
class TrainingOpts(typing.NamedTuple):
"""Wrapper class for training options"""
train_batch_size : int
learning_rate : float
num_warmup_steps : int
max_grad_norm : float
steps_per_epoch : int
num_epochs : int
num_train_steps : int
n_clusters : int
init : str
n_init : int
max_iter : int
tol : float
algorithm : str
n_neighbors : int
weights : str
leaf_size : int
p : float
class CommitteeEstimator(typing.NamedTuple):
"""Named tuple to wrap BERT pipeline."""
model : typing.TypeVar('nn.Module')
data_generator : 'torch.utils.data.Dataset'
optimizer : typing.Any
scheduler : typing.Any
training_opts : 'TrainingOpts'
sha256 : str
config : config.ModelConfig
train_fn : typing.Callable
sample_fn : typing.Callable
def __repr__(self):
return "QueryByCommittee"
def __init__(self, *args, **kwargs):
super(QueryByCommittee, self).__init__(*args, **kwargs)
from deeplearning.benchpress.util import pytorch
if not pytorch.initialized:
pytorch.initPytorch()
self.pytorch = pytorch
self.torch = pytorch.torch
self.torch_tpu_available = pytorch.torch_tpu_available
self.torch.manual_seed(self.config.random_seed)
self.torch.cuda.manual_seed_all(self.config.random_seed)
self.ckpt_path = self.cache_path / "checkpoints"
self.sample_path = self.cache_path / "samples"
self.logfile_path = self.cache_path / "logs"
self.validation_results_file = "val_results.txt"
self.validation_results_path = self.logfile_path / self.validation_results_file
self.committee = None
self.is_validated = False
self.is_trained = False
self.committee_samples = committee_database.CommitteeSamples(
url = "sqlite:///{}".format(str(self.sample_path / "samples.db")),
must_exist = False,
)
self.sample_epoch = self.committee_samples.cur_sample_epoch
l.logger().info("Active Committee config initialized in {}".format(self.cache_path))
return
def _ConfigModelParams(self,
data_generator : 'torch.utils.data.Dataset' = None,
is_sampling : bool = False
) -> None:
"""
Model parameter initialization.
"""
if not self.committee:
self.committee = []
self.committee_configs = config.ModelConfig.FromConfig(
self.config.query_by_committee, self.downstream_task, self.config.num_train_steps
)
for idx, cconfig in enumerate(self.committee_configs):
training_opts = QueryByCommittee.TrainingOpts(
train_batch_size = cconfig.batch_size,
learning_rate = cconfig.learning_rate,
num_warmup_steps = cconfig.num_warmup_steps,
max_grad_norm = cconfig.max_grad_norm,
steps_per_epoch = cconfig.steps_per_epoch,
num_epochs = cconfig.num_epochs,
num_train_steps = cconfig.num_train_steps,
n_clusters = cconfig.n_clusters,
init = cconfig.init,
n_init = cconfig.n_init,
max_iter = cconfig.max_iter,
tol = cconfig.tol,
algorithm = cconfig.algorithm,
n_neighbors = cconfig.n_neighbors,
weights = cconfig.weights,
leaf_size = cconfig.leaf_size,
p = cconfig.p,
)
cm = models.CommitteeModels.FromConfig(idx, cconfig)
if not is_sampling and isinstance(cm, self.torch.nn.Module):
opt, lr_scheduler = optimizer.create_optimizer_and_scheduler(
model = cm,
num_train_steps = 10**5,
warmup_steps = training_opts.num_warmup_steps,
learning_rate = training_opts.learning_rate,
)
else:
opt, lr_scheduler = None, None
self.committee.append(
QueryByCommittee.CommitteeEstimator(
model = cm,
data_generator = copy.deepcopy(data_generator),
optimizer = opt,
scheduler = lr_scheduler,
training_opts = training_opts,
sha256 = cconfig.sha256,
config = cconfig,
train_fn = self.TrainNNMember if isinstance(cm, self.torch.nn.Module) else self.TrainUnsupervisedMember,
sample_fn = self.SampleNNMember if isinstance(cm, self.torch.nn.Module) else self.SampleUnsupervisedMember,
)
)
(self.ckpt_path / cconfig.sha256).mkdir(exist_ok = True, parents = True),
(self.logfile_path / cconfig.sha256).mkdir(exist_ok = True, parents = True),
l.logger().info(self.GetShortSummary())
for member in self.committee:
self.committee_samples.add_member(
member_id = member.model.id,
member_name = member.config.name,
type = "supervised" if isinstance(member.model, self.torch.nn.Module) else "unsupervised",
configuration = member.config.config,
)
return
def model_step(self,
model: 'torch.nn.module',
inputs: typing.Dict[str, 'torch.Tensor'],
is_sampling: bool = False
) -> float:
"""
Run forward function for member model.
"""
outputs = model(
input_ids = inputs['input_ids'].to(self.pytorch.device),
target_ids = inputs['target_ids'].to(self.pytorch.device) if not is_sampling else None,
is_sampling = is_sampling,
)
return outputs
def TrainNNMember(self, member: 'QueryByCommittee.CommitteeEstimator', **kwargs) -> None:
"""
Member-dispatching function for loading checkpoint, training and saving back.
"""
update_dataloader = kwargs.get('update_dataloader', None)
model = member.model.to(self.pytorch.offset_device)
model_name = "{}-{}".format(member.config.name, member.model.id)
data_generator = (
member.data_generator
if update_dataloader is None
else update_dataloader
# + member.data_generator.get_random_subset(
# max(0, abs(len(update_dataloader) - member.training_opts.num_train_steps)))
)
if len(data_generator) == 0:
return
optimizer = member.optimizer
scheduler = member.scheduler
member_path = self.ckpt_path / member.sha256
member_log_path = self.logfile_path / member.sha256
# if self.pytorch.num_nodes > 1:
# distrib.barrier()
# model = self.torch.nn.parallel.DistributedDataParallel(
# model,
# device_ids = [self.pytorch.offset_device],
# output_device = self.pytorch.offset_device,
# )
if self.pytorch.num_gpus > 1:
model = self.torch.nn.DataParallel(model)
current_step = self.loadCheckpoint(model, member_path, optimizer, scheduler)
if self.pytorch.num_gpus > 0:
self.torch.cuda.empty_cache()
if current_step >= 0:
l.logger().info("{}: Loaded checkpoint step {}".format(model_name, current_step))
current_step = max(0, current_step)
num_train_steps = min((len(data_generator) + member.training_opts.train_batch_size) // member.training_opts.train_batch_size, member.training_opts.num_train_steps) if update_dataloader is None else ((len(update_dataloader) + member.training_opts.train_batch_size) // member.training_opts.train_batch_size) + current_step
if current_step < num_train_steps:
model.zero_grad()
# if self.pytorch.num_nodes <= 1:
sampler = self.torch.utils.data.RandomSampler(data_generator, replacement = False)
# else:
# sampler = self.torch.utils.data.DistributedSampler(
# data_generator,
# num_replicas = self.pytorch.num_nodes,
# rank = self.pytorch.torch.distributed.get_rank()
# )
loader = self.torch.utils.data.dataloader.DataLoader(
dataset = data_generator,
batch_size = member.training_opts.train_batch_size,
sampler = (sampler
if not self.pytorch.torch_tpu_available or self.pytorch.torch_xla.xrt_world_size() <= 1
else self.torch.utils.data.distributed.DistributedSampler(
dataset = data_generator,
num_replicas = self.pytorch.num_nodes if not self.pytorch.torch_tpu_available else self.pytorch.torch_xla.xrt_world_size(),
rank = self.pytorch.torch.distributed.get_rank() if not self.pytorch.torch_tpu_available else self.pytorch.torch_xla.get_ordinal()
)
),
num_workers = 0,
drop_last = False # if environment.WORLD_SIZE == 1 else True,
)
# Set dataloader in case of TPU training.
if self.torch_tpu_available:
loader = self.pytorch.torch_ploader.ParallelLoader(
data_generator, [self.pytorch.device]
).per_device_loader(self.pytorch.device)
# Get dataloader iterator and setup hooks.
batch_iterator = iter(loader)
if self.is_world_process_zero():
train_hook = hooks.tensorMonitorHook(
member_log_path, current_step, min((len(data_generator) + member.training_opts.train_batch_size) // member.training_opts.train_batch_size, member.training_opts.steps_per_epoch, 50)
)
try:
with self.torch.enable_grad():
model.train()
# epoch_iter = tqdm.auto.trange(member.training_opts.num_epochs, desc="Epoch", leave = False) if self.is_world_process_zero() else range(member.training_opts.num_epochs)
epoch = num_train_steps // member.training_opts.steps_per_epoch
# In distributed mode, calling the set_epoch() method at
# the beginning of each epoch before creating the DataLoader iterator
# is necessary to make shuffling work properly across multiple epochs.
# Otherwise, the same ordering will be always used.
# if self.pytorch.num_nodes > 1:
# loader.sampler.set_epoch(epoch)
batch_iter = tqdm.tqdm(batch_iterator, desc="Batch", leave = False) if self.is_world_process_zero() else batch_iterator
for inputs in batch_iter:
if self.is_world_process_zero():
start = datetime.datetime.utcnow()
# Run model step on inputs
step_out = self.model_step(model, inputs)
# Backpropagate losses
total_loss = step_out['total_loss'].mean()
total_loss.backward()
self.torch.nn.utils.clip_grad_norm_(model.parameters(), member.training_opts.max_grad_norm)
if self.torch_tpu_available:
self.pytorch.torch_xla.optimizer_step(optimizer)
else:
optimizer.step()
scheduler.step()
## Collect tensors for logging.
# if self.pytorch.num_nodes > 1:
# total_loss = [self.torch.zeros(tuple(step_out['total_loss'].shape), dtype = self.torch.float32).to(self.pytorch.device) for _ in range(self.torch.distributed.get_world_size())]
# self.torch.distributed.all_gather(total_loss, step_out["total_loss"])
# else:
total_loss = step_out['total_loss'].unsqueeze(0).cpu()
if self.is_world_process_zero():
train_hook.step(
train_step = current_step,
total_loss = sum([tl.mean().item() for tl in total_loss]) / len(total_loss),
)
model.zero_grad()
if current_step == 0:
l.logger().info("{}: Starting Loss: {}".format(model_name, sum([tl.mean().item() for tl in total_loss]) / len(total_loss)))
current_step += 1
# End of epoch
self.saveCheckpoint(
model,
member_path,
optimizer = optimizer,
scheduler = scheduler,
step = current_step
)
# if self.pytorch.num_nodes > 1:
# loader.sampler.set_epoch(epoch)
if self.is_world_process_zero():
try:
l.logger().info("{}: Epoch {} Loss: {}".format(model_name, current_step // member.training_opts.steps_per_epoch, train_hook.epoch_loss))
except ZeroDivisionError:
l.logger().error(
"Hook has crashed again: current_step: {}, step_freq: {}, flush_freq: {}, train_step: {}".format(
train_hook.current_step, train_hook.step_freq, train_hook.flush_freq,
current_step
)
)
train_hook.end_epoch()
if self.torch_tpu_available:
self.pytorch.torch_xla.master_print(self.pytorch.torch_xla_met.metrics_report())
except KeyboardInterrupt:
pass
return
def TrainUnsupervisedMember(self, member: 'QueryByCommittee.CommitteeEstimator', **kwargs) -> None:
"""
Train non-NeuralNetwork based architectures, such as DecisionTrees or KMeans.
"""
update_dataloader = kwargs.get('update_dataloader', None)
model = member.model
model_name = "{}-{}".format(member.config.name, member.model.id)
data_generator = member.data_generator + update_dataloader
if len(data_generator) == 0:
return
train_dataset = data_generator.get_batched_dataset()
member_path = self.ckpt_path / member.sha256
member_log_path = self.logfile_path / member.sha256
current_step = self.loadCheckpoint(model, member_path)
if current_step >= 0:
l.logger().info("{}: Loaded checkpoint step {}".format(model_name, current_step))
if current_step < 0 or update_dataloader is not None:
current_step = max(0, current_step)
outputs = model(
input_ids = train_dataset['input_ids'],
target_ids = train_dataset['target_ids'],
is_sampling = False,
)
self.saveCheckpoint(
model,
member_path,
step = current_step + 1,
)
l.logger().info("{}: Trained with {} instances".format(model_name, len(train_dataset['input_ids'])))
return
def Train(self, **kwargs) -> None:
"""
Training point of active learning committee.
"""
# Configure committee members.
update_dataloader = kwargs.get('update_dataloader', None)
if update_dataloader is None:
l.logger().info("Initial committee training.")
self._ConfigModelParams(self.downstream_task.data_generator)
if not self.is_trained or update_dataloader is not None:
if self.is_world_process_zero():
for member in self.committee:
member.train_fn(member, update_dataloader = update_dataloader)
self.is_trained = True
if self.pytorch.num_nodes > 1:
self.torch.distributed.barrier()
return
def Validate(self) -> None:
"""
Perform validation for committee members.
"""
raise NotImplementedError
return
def SampleNNMember(self,
member : 'QueryByCommittee.CommitteeEstimator',
sample_set : 'torch.utils.data.Dataset',
) -> typing.Dict[str, typing.List]:
"""
Sample member of committee. Return predicted label.
"""
model = member.model.to(self.pytorch.offset_device)
model_name = "{}-{}".format(member.config.name, member.model.id)
member_path = self.ckpt_path / member.sha256
member_log_path = self.logfile_path / member.sha256
if self.pytorch.num_nodes > 1:
distrib.barrier()
model = self.torch.nn.parallel.DistributedDataParallel(
model,
device_ids = [self.pytorch.offset_device],
output_device = self.pytorch.offset_device,
)
elif self.pytorch.num_gpus > 1:
model = self.torch.nn.DataParallel(model)
current_step = self.loadCheckpoint(model, member_path)
if self.pytorch.num_gpus > 0:
self.torch.cuda.empty_cache()
if current_step < 0:
l.logger().warn("{}: You are trying to sample an untrained model.".format(model_name))
current_step = max(0, current_step)
if self.pytorch.num_nodes <= 1:
sampler = self.torch.utils.data.SequentialSampler(sample_set)
else:
sampler = self.torch.utils.data.DistributedSampler(
sample_set,
num_replicas = self.pytorch.num_nodes,
rank = self.pytorch.torch.distributed.get_rank(),
shuffle = False,
drop_last = False,
)
loader = self.torch.utils.data.dataloader.DataLoader(
dataset = sample_set,
batch_size = member.training_opts.train_batch_size,
sampler = (sampler
if self.pytorch.num_nodes <= 1 or not self.pytorch.torch_tpu_available or self.pytorch.torch_xla.xrt_world_size() <= 1
else self.torch.utils.data.distributed.DistributedSampler(
dataset = sample_set,
num_replicas = self.pytorch.num_nodes if not self.pytorch.torch_tpu_available else self.pytorch.torch_xla.xrt_world_size(),
rank = self.pytorch.torch.distributed.get_rank() if not self.pytorch.torch_tpu_available else self.pytorch.torch_xla.get_ordinal()
)
),
num_workers = 0,
drop_last = False # True if environment.WORLD_SIZE > 1 else False,
)
# Set dataloader in case of TPU training.
if self.torch_tpu_available:
loader = self.pytorch.torch_ploader.ParallelLoader(
sample_set, [self.pytorch.device]
).per_device_loader(self.pytorch.device)
# Get dataloader iterator and setup hooks.
model.eval()
predictions = {
'train_step' : current_step,
'idx' : None,
'static_features' : None,
'runtime_features': None,
'input_ids' : None,
'predictions' : None,
}
it = tqdm.tqdm(loader, desc="Sample member", leave = False) if self.is_world_process_zero() else loader
for batch in it:
out = self.model_step(model, batch, is_sampling = True)
for key in set(predictions.keys()) - set({'train_step'}):
r = batch[key] if key != "predictions" else out['output_label']
if predictions[key] is None:
predictions[key] = r
else:
predictions[key] = self.torch.cat(
(predictions[key], r),
0
)
if self.pytorch.num_nodes > 1:
self.torch.distributed.barrier()
idx = [self.torch.zeros(tuple(predictions['idx' ].shape), dtype = self.torch.int64).to(self.pytorch.device) for _ in range(self.torch.distributed.get_world_size())]
static_features = [self.torch.zeros(tuple(predictions['static_features' ].shape), dtype = self.torch.float32).to(self.pytorch.device) for _ in range(self.torch.distributed.get_world_size())]
runtime_features = [self.torch.zeros(tuple(predictions['runtime_features'].shape), dtype = self.torch.int64).to(self.pytorch.device) for _ in range(self.torch.distributed.get_world_size())]
input_ids = [self.torch.zeros(tuple(predictions['input_ids' ].shape), dtype = self.torch.float32).to(self.pytorch.device) for _ in range(self.torch.distributed.get_world_size())]
output_label = [self.torch.zeros(tuple(predictions['predictions' ].shape), dtype = self.torch.int64).to(self.pytorch.device) for _ in range(self.torch.distributed.get_world_size())]
self.torch.distributed.all_gather(idx, predictions["idx" ].to(self.pytorch.device))
self.torch.distributed.all_gather(static_features, predictions["static_features" ].to(self.pytorch.device))
self.torch.distributed.all_gather(runtime_features, predictions["runtime_features"].to(self.pytorch.device))
self.torch.distributed.all_gather(input_ids, predictions["input_ids" ].to(self.pytorch.device))
self.torch.distributed.all_gather(output_label, predictions["predictions" ])
predictions['idx'] = self.torch.cat(idx)
predictions['static_features'] = self.torch.cat(static_features)
predictions['runtime_features'] = self.torch.cat(runtime_features)
predictions['input_ids'] = self.torch.cat(input_ids)
predictions['predictions'] = self.torch.cat(output_label)
idx = self.torch.zeros(tuple(predictions['idx' ].shape), dtype = self.torch.int64).to(self.pytorch.device)
static_features = self.torch.zeros(tuple(predictions['static_features' ].shape), dtype = self.torch.float32).to(self.pytorch.device)
runtime_features = self.torch.zeros(tuple(predictions['runtime_features'].shape), dtype = self.torch.int64).to(self.pytorch.device)
input_ids = self.torch.zeros(tuple(predictions['input_ids' ].shape), dtype = self.torch.float32).to(self.pytorch.device)
output_label = self.torch.zeros(tuple(predictions['predictions' ].shape), dtype = self.torch.int64).to(self.pytorch.device)
for x, i in enumerate(predictions['idx']):
idx [int(i)] = predictions['idx'] [x]
static_features [int(i)] = predictions['static_features'] [x]
runtime_features[int(i)] = predictions['runtime_features'][x]
input_ids [int(i)] = predictions['input_ids'] [x]
output_label [int(i)] = predictions['predictions'] [x]
predictions['idx'] = idx
predictions['static_features'] = static_features
predictions['runtime_features'] = runtime_features
predictions['input_ids'] = input_ids
predictions['predictions'] = output_label
for key in set(predictions.keys()) - set({'train_step'}):
if key == 'predictions':
predictions[key] = [self.downstream_task.TargetIDtoLabels(int(x)) for x in predictions[key].cpu().numpy()]
elif key == "runtime_features":
predictions[key] = [[int(y) for y in x.cpu().numpy()] for x in predictions[key]]
elif key == "idx":
predictions[key] = [int(x.cpu().numpy()) for x in predictions[key]]
else:
predictions[key] = [[float(y) for y in x.cpu().numpy()] for x in predictions[key]]
return predictions
def SampleUnsupervisedMember(self,
member : 'QueryByCommittee.CommitteeEstimator',
sample_set : 'torch.utils.data.Dataset',
) -> typing.Dict[str, typing.List]:
"""
Sample non-NeuralNetwork based architectures, such as DecisionTrees or KMeans.
"""
model = member.model
model_name = "{}-{}".format(member.config.name, member.model.id)
sample_dataset = sample_set.get_batched_dataset()
member_path = self.ckpt_path / member.sha256
member_log_path = self.logfile_path / member.sha256
current_step = self.loadCheckpoint(model, member_path)
if current_step < 0:
l.logger().warn("{}: You are trying to sample an untrained model.".format(model_name))
current_step = max(0, current_step)
if self.is_world_process_zero():
outputs = model(
input_ids = sample_dataset['input_ids'],
is_sampling = True,
)
predictions = {
'train_step' : current_step,
'idx' : [int(x) for x in sample_dataset['idx']],
'static_features' : sample_dataset['static_features'],
'runtime_features': sample_dataset['runtime_features'],
'input_ids' : sample_dataset['input_ids'],
'predictions' : [self.downstream_task.TargetIDtoLabels(i) for i in outputs['predicted_labels']],
}
distrib.broadcast(predictions)
else:
predictions = distrib.broadcast()
distrib.barrier()
return predictions
def SampleCommittee(self,
sample_set: 'torch.utils.data.Dataset',
) -> typing.Dict[
'QueryByCommittee.CommitteeEstimator',
typing.Dict[str, 'torch.Tensor']
]:
"""
Sample committee with a set of inputs.
Return a dictionary mapped from each member to the
total workload computed by a committee member.
"""
self._ConfigModelParams()
committee_predictions = {}
for member in self.committee:
key = "{}_{}".format(member.config.name, member.model.id)
committee_predictions[key] = member.sample_fn(member, sample_set)
return committee_predictions
def Sample(self, sample_set: 'torch.Dataset') -> typing.List[typing.Dict[str, float]]:
"""
Active learner sampling.
This method queries all committee members and measures their cross-entropy to validate
the usefulness of parts of the feature space.
"""
# Ask the committee for their predictions.
committee_predictions = self.SampleCommittee(sample_set)
space_samples = []
for nsample in range(len(sample_set)):
# Get the feature vectors for each sample.
for model, samples in committee_predictions.items():
if nsample != samples['idx'][nsample]:
raise ValueError("{} Mismatch in sample output: Expected {} but had {}".format(model, nsample, samples['idx'][nsample]))
static_feats = self.downstream_task.VecToStaticFeatDict(samples['static_features'][nsample])
run_feats = self.downstream_task.VecToRuntimeFeatDict(samples['runtime_features'][nsample])
input_feats = self.downstream_task.VecToInputFeatDict(samples['input_ids'][nsample])
break
# Calculate entropy for that sample.
ent = self.entropy([x['predictions'][nsample] for x in committee_predictions.values()])
# Save the dictionary entry.
space_samples.append({
'train_step' : {k: v['train_step'] for k, v in committee_predictions.items()},
'static_features' : static_feats,
'runtime_features' : run_feats,
'input_features' : input_feats,
'member_predictions' : {k: v['predictions'][nsample] for k, v in committee_predictions.items()},
'entropy' : ent,
})
# Add everything to database.
self.committee_samples.add_samples(self.sample_epoch, space_samples)
self.sample_epoch += 1
return sorted(space_samples, key = lambda x: x['entropy'], reverse = True)
def entropy(self, labels, base=None):
""" Computes entropy of label distribution. """
if len(labels) <= 1:
return 0
value,counts = np.unique(labels, return_counts=True)
probs = counts / len(labels)
n_classes = np.count_nonzero(probs)
if n_classes <= 1:
return 0
entropy = 0.0
# Compute entropy
base = math.e if base is None else base
for p in probs:
entropy -= p * math.log(p, base)
return entropy
def saveCheckpoint(self,
model : 'torch.nn.Module',
path : pathlib.Path,
optimizer = None,
scheduler = None,
step : int = None,
) -> None:
"""
Saves model, scheduler, optimizer checkpoints per epoch.
"""
if self.is_world_process_zero():
ckpt_comp = lambda x: path / "{}-{}.pt".format(x, step)
if isinstance(model, self.torch.nn.Module):
if self.torch_tpu_available:
if self.pytorch.torch_xla_model.rendezvous("saving_checkpoint"):
self.pytorch.torch_xla_model.save(model, ckpt_comp("model"))
self.pytorch.torch_xla.rendezvous("saving_optimizer_states")
self.pytorch.torch_xla.save(optimizer.state_dict(), ckpt_comp("optimizer"))
self.pytorch.torch_xla.save(scheduler.state_dict(), ckpt_comp("scheduler"))
else:
if isinstance(model, self.torch.nn.DataParallel):
self.torch.save(model.module.state_dict(), ckpt_comp("model"))
else:
self.torch.save(model.state_dict(), ckpt_comp("model"))
self.torch.save(optimizer.state_dict(), ckpt_comp("optimizer"))
self.torch.save(scheduler.state_dict(), ckpt_comp("scheduler"))
else:
checkpoint_dict = model.get_checkpoint_state()
with open(ckpt_comp("model"), 'wb') as outf:
pickle.dump(checkpoint_dict, outf)
with open(path / "checkpoint.meta", 'a') as mf:
mf.write("train_step: {}\n".format(step))
return
def loadCheckpoint(self,
model : 'torch.nn.Module',
path : pathlib.Path,
optimizer = None,
scheduler = None
) -> int:
"""
Load model checkpoint. Loads either most recent epoch, or selected checkpoint through FLAGS.
"""
if not (path / "checkpoint.meta").exists():
return -1
with open(path / "checkpoint.meta", 'r') as mf:
key = "train_step"
get_step = lambda x: int(x.replace("\n", "").replace("{}: ".format(key), ""))
lines = mf.readlines()
entries = set({get_step(x) for x in lines if key in x})
if FLAGS.select_checkpoint_step == -1:
ckpt_step = max(entries)
else:
raise ValueError("{} not found in checkpoint folder.".format(FLAGS.select_checkpoint_step))
ckpt_comp = lambda x: path / "{}-{}.pt".format(x, ckpt_step)
if isinstance(model, self.torch.nn.DataParallel):
try:
model.module.load_state_dict(
self.torch.load(ckpt_comp("model"))
)
except RuntimeError:
"""
Pytorch doesn't love loading a DataParallel checkpoint
to a simple model. So, the following hack is needed
to remove the 'module.' prefix from state keys.
OR it might as well need the opposite. Transitioning from
single to multiple GPUs will mean that 'module.' prefix is missing
"""
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in self.torch.load(ckpt_comp("model")).items():
if k[:7] == 'module.':
name = k[7:] # remove `module.`
else:
name = 'module.' + k # Add 'module.'
new_state_dict[name] = v
model.module.load_state_dict(new_state_dict)
model.eval()
elif isinstance(model, self.torch.nn.Module):
try:
model.load_state_dict(
self.torch.load(ckpt_comp("model"), map_location=self.pytorch.device)
)
except RuntimeError:
"""
Pytorch doesn't love loading a DataParallel checkpoint
to a simple model. So, the following hack is needed
to remove the 'module.' prefix from state keys.
OR it might as well need the opposite. Transitioning from
single to multiple GPUs will mean that 'module.' prefix is missing
"""
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in self.torch.load(ckpt_comp("model")).items():
if k[:7] == 'module.':
name = k[7:] # remove `module.`
else:
name = 'module.' + k # Add 'module.'
new_state_dict[name] = v
model.load_state_dict(new_state_dict)
model.eval()
else:
checkpoint_dict = pickle.load(open(ckpt_comp("model"), 'rb'))
model.load_checkpoint_state(checkpoint_dict)
return ckpt_step
def is_world_process_zero(self) -> bool:
"""
Whether or not this process is the global main process (when training in a distributed fashion on
several machines, this is only going to be :obj:`True` for one process).
"""
if self.torch_tpu_available:
return self.pytorch.torch_xla_model.is_master_ordinal(local=False)
elif self.pytorch.num_nodes > 1:
return self.torch.distributed.get_rank() == 0
else:
return True
def GetShortSummary(self) -> str:
return "Short summary TODO"
| 33,727 | 42.407979 | 324 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/active_models/committee/committee_database.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module for databases of active learning query by committee samples."""
import datetime
import typing
import progressbar
import pathlib
import sqlite3
import sqlalchemy as sql
from sqlalchemy.ext import declarative
from deeplearning.benchpress.util import crypto
from deeplearning.benchpress.util import distrib
from deeplearning.benchpress.util import environment
from deeplearning.benchpress.util import sqlutil
from deeplearning.benchpress.util import logging as l
from absl import flags
FLAGS = flags.FLAGS
Base = declarative.declarative_base()
class QBCResults(Base):
__tablename__ = "qbc_results"
"""
DB Table for concentrated validation results.
"""
key : str = sql.Column(sql.String(1024), primary_key=True)
results : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
class CommitteeConfig(Base, sqlutil.ProtoBackedMixin):
"""
A table where each row presents the configuration of a committee member.
"""
__tablename__ = "committee_members"
# entry id
id : int = sql.Column(sql.Integer, primary_key = True)
# Assigned member ID
member_id : int = sql.Column(sql.Integer, nullable = False, index = True)
# Name of member
member_name : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Type of AI architecture (supervised, unsupervised etc.)
type : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# configuration specs
configuration : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Date
date_added : datetime.datetime = sql.Column(sql.DateTime, nullable=False)
@classmethod
def FromArgs(cls,
id : int,
member_id : int,
member_name : str,
type : str,
configuration : str
) -> 'CommitteeConfig':
return CommitteeConfig(
id = id,
member_id = member_id,
member_name = member_name,
type = type,
configuration = str(configuration),
date_added = datetime.datetime.utcnow(),
)
class CommitteeSample(Base, sqlutil.ProtoBackedMixin):
"""A database row representing a AL committee sample.
"""
__tablename__ = "qbc_samples"
# entry id
id : int = sql.Column(sql.Integer, primary_key = True)
# unique hash of sample text
sha256 : str = sql.Column(sql.String(64), nullable = False, index = True)
# Sample step iteration ID.
sample_epoch : int = sql.Column(sql.Integer, nullable = False)
# model's train step that generated the sample
train_step : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Original input where the feed came from
static_features : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Starting feed of model
runtime_features : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# String-format generated text
input_features : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Predictions of committee
member_predictions : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Amount of entropy
entropy : float = sql.Column(sql.Float, nullable=False)
# Date
date_added : datetime.datetime = sql.Column(sql.DateTime, nullable=False)
@classmethod
def FromArgs(cls,
id : int,
sample_epoch : int,
train_step : typing.Dict[str, int],
static_features : typing.Dict[str, float],
runtime_features : typing.Dict[str, float],
input_features : typing.Dict[str, float],
member_predictions : typing.Dict[str, str],
entropy : float,
) -> 'CommitteeSample':
str_train_step = '\n'.join(["{}:{}".format(k, v) for k, v in train_step.items()])
str_static_features = '\n'.join(["{}:{}".format(k, v) for k, v in static_features.items()])
str_runtime_features = '\n'.join(["{}:{}".format(k, v) for k, v in runtime_features.items()])
str_input_features = '\n'.join(["{}:{}".format(k, v) for k, v in input_features.items()])
str_member_predictions = '\n'.join(["{}:{}".format(k, v) for k, v in member_predictions.items()])
sha256 = crypto.sha256_str(
str_train_step
+ str_static_features
+ str_runtime_features
+ str_input_features
+ str_member_predictions
)
return CommitteeSample(
id = id,
sha256 = sha256,
sample_epoch = sample_epoch,
train_step = str_train_step,
static_features = str_static_features,
runtime_features = str_runtime_features,
input_features = str_input_features,
member_predictions = str_member_predictions,
entropy = entropy,
date_added = datetime.datetime.utcnow(),
)
class CommitteeSamples(sqlutil.Database):
"""A database of Query-by-Committee samples."""
def __init__(self, url: str, must_exist: bool = False, is_replica: bool = False):
if environment.WORLD_RANK == 0 or is_replica:
super(CommitteeSamples, self).__init__(url, Base, must_exist = must_exist)
if environment.WORLD_SIZE > 1 and not is_replica:
# Conduct engine connections to replicated preprocessed chunks.
self.base_path = pathlib.Path(url.replace("sqlite:///", "")).resolve().parent
hash_id = self.base_path.name
try:
tdir = pathlib.Path(FLAGS.local_filesystem).resolve() / hash_id / "node_committee_samples"
except Exception:
tdir = pathlib.Path("/tmp").resolve() / hash_id / "node_committee_samples"
try:
tdir.mkdir(parents = True, exist_ok = True)
except Exception:
pass
self.replicated_path = tdir / "samples_{}.db".format(environment.WORLD_RANK)
self.replicated = CommitteeSamples(
url = "sqlite:///{}".format(str(self.replicated_path)),
must_exist = must_exist,
is_replica = True
)
distrib.barrier()
return
@property
def member_count(self):
"""Number of committee members in DB."""
with self.get_session() as s:
count = s.query(CommitteeConfig).count()
return count
@property
def sample_count(self):
"""Number of samples in DB."""
with self.get_session() as s:
count = s.query(CommitteeSample).count()
return count
@property
def get_data(self):
"""Return all database in list format"""
with self.get_session() as s:
return s.query(CommitteeSample).all()
@property
def cur_sample_epoch(self):
"""Return the most recent checkpointed current sample step."""
if self.sample_count > 0:
with self.get_session() as s:
return max([int(x.sample_epoch) for x in s.query(CommitteeSample).all()])
else:
return 0
@property
def get_session(self):
"""
get proper DB session.
"""
if environment.WORLD_SIZE == 1 or environment.WORLD_RANK == 0:
return self.Session
else:
return self.replicated.Session
def add_member(self, member_id: int, member_name: str, type: str, configuration: str) -> None:
"""
Add committee member if not exists.
"""
with self.get_session(commit = True) as s:
exists = s.query(CommitteeConfig).filter_by(member_id = member_id).first()
if not exists:
s.add(CommitteeConfig.FromArgs(self.member_count, member_id, member_name, type, configuration))
s.commit()
return
def add_samples(self, sample_epoch: int, samples: typing.Dict[str, typing.Any]) -> None:
"""
If not exists, add sample to Samples table.
"""
hash_cache = set()
offset_idx = self.sample_count
with self.get_session(commit = True) as s:
for sample in samples:
sample_entry = CommitteeSample.FromArgs(
id = offset_idx,
sample_epoch = sample_epoch,
train_step = sample['train_step'],
static_features = sample['static_features'],
runtime_features = sample['runtime_features'],
input_features = sample['input_features'],
member_predictions = sample['member_predictions'],
entropy = sample['entropy'],
)
exists = s.query(CommitteeSample).filter_by(sha256 = sample_entry.sha256).first()
if not exists and sample_entry.sha256 not in hash_cache:
s.add(sample_entry)
hash_cache.add(sample_entry.sha256)
offset_idx += 1
s.commit()
return
| 9,469 | 37.811475 | 105 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/active_models/committee/config.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Configuration base class for committee models."""
import typing
import pathlib
from deeplearning.benchpress.active_models import downstream_tasks
from deeplearning.benchpress.proto import active_learning_pb2
from deeplearning.benchpress.util import pbutil
from deeplearning.benchpress.util import crypto
def AssertConfigIsValid(config: active_learning_pb2.QueryByCommittee) -> None:
"""
Parse proto description and check for validity.
"""
tm = 0
## Parse all MLPs.
for nn in config.mlp:
tl = 0
pbutil.AssertFieldIsSet(nn, "initial_learning_rate_micros")
pbutil.AssertFieldIsSet(nn, "batch_size")
pbutil.AssertFieldIsSet(nn, "num_warmup_steps")
for l in nn.layer:
if l.HasField("embedding"):
pbutil.AssertFieldIsSet(l.embedding, "num_embeddings")
pbutil.AssertFieldIsSet(l.embedding, "embedding_dim")
elif l.HasField("linear"):
pbutil.AssertFieldIsSet(l.linear, "in_features")
pbutil.AssertFieldIsSet(l.linear, "out_features")
elif l.HasField("dropout"):
pbutil.AssertFieldIsSet(l.dropout, "p")
elif l.HasField("layer_norm"):
pbutil.AssertFieldIsSet(l.layer_norm, "normalized_shape")
pbutil.AssertFieldIsSet(l.layer_norm, "eps")
elif l.HasField("act_fn"):
pbutil.AssertFieldIsSet(l.act_fn, "fn")
else:
raise AttributeError(l)
tl += 1
assert tl > 0, "Model is empty. No layers found."
tm += 1
## Parse all KMeans algos.
for km in config.k_means:
pbutil.AssertFieldIsSet(km, "n_clusters")
pbutil.AssertFieldConstraint(
km,
"init",
lambda x: x in {"k-means++", "random"},
"KMeans algorithm can only be 'k-means++' or 'random'."
)
pbutil.AssertFieldIsSet(km, "n_init")
pbutil.AssertFieldIsSet(km, "max_iter")
pbutil.AssertFieldIsSet(km, "tol")
pbutil.AssertFieldConstraint(
km,
"algorithm",
lambda x : x in {"auto", "full", "elkan"},
"KMeans algorithm can only be 'auto', 'full' or 'elkan'."
)
tm += 1
## Parse KNN algos.
for k in config.knn:
pbutil.AssertFieldIsSet(k, "n_neighbors")
pbutil.AssertFieldConstraint(
k,
"weights",
lambda x: x in {"uniform", "distance"},
"KNN weights can only be 'uniform' or 'distance'."
)
pbutil.AssertFieldConstraint(
k,
"algorithm",
lambda x: x in {"auto", "ball_tree", "kd_tree", "brute"},
"KNN algorithm can only be 'auto', 'ball_tree', 'kd_tree' or 'brute'."
)
pbutil.AssertFieldIsSet(k, "leaf_size")
pbutil.AssertFieldIsSet(k, "p")
## Add another for loop here if more committee model types are added.
assert tm > 0, "Committee is empty. No models found."
return
class ModelConfig(object):
model_type = "committee"
@classmethod
def FromConfig(cls,
config: active_learning_pb2.QueryByCommittee,
downstream_task: downstream_tasks.DownstreamTask,
num_train_steps: int,
) -> typing.List["ModelConfig"]:
model_configs = []
nts = num_train_steps
for m in config.mlp:
model_configs.append(NNModelConfig(m, downstream_task, nts))
for m in config.k_means:
model_configs.append(KMeansModelConfig(m, downstream_task, nts))
for m in config.knn:
model_configs.append(KNNModelConfig(m, downstream_task, nts))
return model_configs
@property
def num_labels(self) -> int:
"""
The number of output labels for classification models.
"""
return self.downstream_task.output_size
@property
def num_features(self) -> int:
"""
The number of input features to model committee.
"""
return self.downstream_task.input_size
def __init__(self,
name: str,
config : typing.Union[active_learning_pb2.MLP, active_learning_pb2.KMeans],
downstream_task: downstream_tasks.DownstreamTask
) -> "ModelConfig":
self.name = name
self.config = config
self.downstream_task = downstream_task
self.sha256 = crypto.sha256_str(str(config))
## Placeholding initialization
self.num_train_steps = None
self.num_warmup_steps = None
self.num_epochs = None
self.steps_per_epoch = None
self.batch_size = None
self.learning_rate = None
self.max_grad_norm = None
self.layer_config = None
self.n_clusters = None
self.init = None
self.n_init = None
self.max_iter = None
self.tol = None
self.algorithm = None
self.n_neighbors = None
self.weights = None
self.algorithm = None
self.leaf_size = None
self.p = None
return
class NNModelConfig(ModelConfig):
"""
NeuralNetwork-based architectural config.
"""
def __init__(self,
config : active_learning_pb2.MLP,
downstream_task : downstream_tasks.DownstreamTask,
num_train_steps : int
) -> "ModelConfig":
super(NNModelConfig, self).__init__("MLP", config, downstream_task)
## NN-specific attributes
self.num_train_steps = (num_train_steps + config.batch_size) // config.batch_size
self.num_warmup_steps = config.num_warmup_steps
self.num_epochs = 1
self.steps_per_epoch = self.num_train_steps
self.batch_size = config.batch_size
self.learning_rate = config.initial_learning_rate_micros / 1e6
self.max_grad_norm = 1.0
if len(self.config.layer) == 0:
raise ValueError("Layer list is empty for committee model")
if self.config.layer[0].HasField("linear"):
if self.config.layer[0].linear.in_features != self.downstream_task.input_size:
raise ValueError("Mismatch between committee member's input size {} and downstream task's input size {}".format(
self.config.layer[0].linear.in_features,
self.downstream_task.input_size
)
)
if self.config.layer[-1].HasField("linear"):
if self.config.layer[-1].linear.out_features != self.downstream_task.output_size:
raise ValueError("Mismatch between committee member's output size {} and downstream task's output size {}".format(
self.config.layer[-1].linear.out_features,
self.downstream_task.output_size
)
)
self.layer_config = []
for l in self.config.layer:
if l.HasField("embedding"):
self.layer_config.append((
'Embedding', {
'num_embeddings': l.embedding.num_embeddings,
'embedding_dim' : l.embedding.embedding_dim,
'padding_idx' : l.embedding.padding_idx if l.embedding.HasField("padding_idx") else None
}
)
)
elif l.HasField("linear"):
self.layer_config.append((
'Linear', {
'in_features': l.linear.in_features,
'out_features': l.linear.out_features,
}
)
)
elif l.HasField("dropout"):
self.layer_config.append((
'Dropout', {
'p': l.dropout.p,
}
)
)
elif l.HasField("layer_norm"):
self.layer_config.append((
'LayerNorm', {
'normalized_shape': l.layer_norm.normalized_shape,
'eps': l.layer_norm.eps,
}
)
)
elif l.HasField("act_fn"):
self.layer_config.append((
l.act_fn.fn, {}
)
)
return
class KMeansModelConfig(ModelConfig):
"""
KMeans config subclass.
"""
def __init__(self,
config : active_learning_pb2.KMeans,
downstream_task : downstream_tasks.DownstreamTask,
num_train_steps : int
) -> "ModelConfig":
super(KMeansModelConfig, self).__init__("KMeans", config, downstream_task)
## KMeans-specific attributes.
self.n_clusters = self.config.n_clusters
self.init = self.config.init
self.n_init = self.config.n_init
self.max_iter = self.config.max_iter
self.tol = self.config.tol
self.algorithm = self.config.algorithm
self.num_train_steps = num_train_steps
return
class KNNModelConfig(ModelConfig):
"""
KNN config subclass.
"""
def __init__(self,
config : active_learning_pb2.KMeans,
downstream_task : downstream_tasks.DownstreamTask,
num_train_steps : int
) -> "ModelConfig":
super(KNNModelConfig, self).__init__("KNN", config, downstream_task)
## KMeans-specific attributes.
self.n_neighbors = self.config.n_neighbors
self.weights = self.config.weights
self.algorithm = self.config.algorithm
self.leaf_size = self.config.leaf_size
self.p = self.config.p
self.num_train_steps = num_train_steps
return
| 9,649 | 33.71223 | 122 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/active_models/committee/models.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Here all the committee members are defined.
"""
import math
import sys
import typing
import numpy as np
from sklearn import cluster as sklearn_cluster
from sklearn import neighbors as sklearn_neighbors
from deeplearning.benchpress.active_models.committee import config
from deeplearning.benchpress.models.torch_bert import activations
from deeplearning.benchpress.util import pytorch
from deeplearning.benchpress.util.pytorch import torch
from deeplearning.benchpress.util import logging as l
def mish(x):
return x * torch.tanh(torch.nn.functional.softplus(x))
ACT2FN = {
"gelu" : activations.gelu,
"relu" : torch.nn.functional.relu,
"swish" : activations.swish,
"gelu_new" : activations.gelu_new,
"mish" : mish,
"softmax" : torch.nn.Softmax
}
class CommitteeModels(object):
"""
Abstract representation of model committee.
"""
@classmethod
def FromConfig(cls, id: int, config: config.ModelConfig) -> "CommitteeModels":
return {
'MLP' : MLP,
'KMeans' : KMeans,
'KNN' : KNN,
}[config.name](id, config)
def __init__(self, id: int):
super(CommitteeModels, self).__init__()
self.id = id
return
def forward(self, *args, **kwargs) -> None:
raise NotImplementedError("Abstract class.")
def get_checkpoint_state(self, *args, **kwargs) -> None:
raise NotImplementedError("Only for non-NN modules")
def load_checkpoint_state(self, *args, **kwargs) -> None:
raise NotImplementedError("Only for non-NN modules")
class MLP(CommitteeModels, torch.nn.Module):
"""
A modular MLP model that supports Linear, Dropout, LayerNorm and activations.
"""
def __init__(self, id: int, config: config.ModelConfig):
super(MLP, self).__init__(id)
self.config = config.layer_config
self.layers = []
layers = {
'Embedding' : torch.nn.Embedding,
'Linear' : torch.nn.Linear,
'Dropout' : torch.nn.Dropout,
'LayerNorm' : torch.nn.LayerNorm,
}
layers.update(ACT2FN)
self.layers = torch.nn.ModuleList([layers[layer[0]](**layer[1]) for layer in self.config])
return
def calculate_loss(self,
outputs: torch.Tensor,
target_ids: torch.Tensor,
) -> torch.Tensor:
"""
Categorical cross-entropy function.
"""
## Calculate categorical label loss.
loss_fn = torch.nn.CrossEntropyLoss()
label_loss = loss_fn(outputs.to(torch.float32), target_ids.squeeze(1))
## Calculate top-1 accuracy of predictions across batch.
hits, total = 0, int(outputs.size(0))
for out, target in zip(torch.argmax(outputs, dim = 1), target_ids):
if out == target:
hits += 1
return label_loss, torch.FloatTensor([hits / total])
def forward(self,
input_ids : torch.Tensor,
target_ids : torch.Tensor = None,
is_sampling : bool = False
) -> torch.Tensor:
"""
Args:
input_ids: Input features for training or prediction.
target_ids: Target tokens to predict during training.
static_features: List of static input features of respective sample to predict.
is_sampling: Select between training and sampling method.
"""
device = input_ids.get_device()
device = device if device >= 0 else 'cpu'
out = input_ids
for layer in self.layers:
out = layer(out)
if not is_sampling:
total_loss, batch_accuracy = self.calculate_loss(out, target_ids)
return {
'total_loss' : total_loss,
'accuracy' : batch_accuracy.to(device),
'output_probs' : out,
'output_label' : torch.argmax(out)
}
else:
return {
'output_probs' : out,
'output_label' : torch.argmax(out, dim = 1),
}
class KMeans(CommitteeModels):
"""
Wrapper class to manage, fit and predict KMeans clusters.
"""
def __init__(self, id: int, config: config.ModelConfig):
super(KMeans, self).__init__(id)
self.config = config
self.target_ids = self.config.downstream_task.output_ids
self.kmeans = sklearn_cluster.KMeans(
n_clusters = self.config.n_clusters,
init = self.config.init,
n_init = self.config.n_init,
max_iter = self.config.max_iter,
tol = self.config.tol,
algorithm = self.config.algorithm,
)
## The following two variables are the model's attributes.
self.classifier = None
self.cluster_map = {}
return
def __call__(self,
input_ids : np.array,
target_ids : np.array = None,
is_sampling : bool = False
) -> None:
if not is_sampling:
## Create a map for labels from target ids, and cluster IDS.
self.cluster_map = {
cluster_id: [0] * self.config.num_labels for cluster_id in range(self.config.n_clusters)
}
self.classifier = self.kmeans.fit(input_ids)
for cluster_id, target_id in zip(self.classifier.labels_, target_ids):
self.cluster_map[cluster_id][int(target_id)] += 1
return {
'cluster_map' : self.cluster_map,
'cluster_labels' : self.classifier.labels_,
}
else:
target_labels = []
if not self.classifier:
for idx, _ in enumerate(input_ids):
target_labels.append(
np.random.choice(a = np.arange(self.config.num_labels))
)
return {
'cluster_labels' : [],
'predicted_labels' : target_labels,
}
else:
cluster_labels = self.classifier.predict(input_ids)
for x in cluster_labels:
p = [(y / sum(self.cluster_map[x]) if sum(self.cluster_map[x]) else 0.5) for y in self.cluster_map[x]]
p = p / np.array(p).sum()
target_labels.append(
np.random.choice(a = np.arange(self.config.num_labels), p = p)
)
return {
'cluster_labels' : cluster_labels,
'predicted_labels' : target_labels,
}
def get_checkpoint_state(self) -> typing.Dict[str, typing.Any]:
"""
Return the blob that is to be checkpointed.
"""
return {
'kmeans' : self.classifier,
'cluster_map' : self.cluster_map,
}
def load_checkpoint_state(self, checkpoint_state: typing.Dict[str, typing.Any]) -> None:
"""
Load the checkpoints to the class states.
"""
self.classifier = checkpoint_state['kmeans']
self.cluster_map = checkpoint_state['cluster_map']
return
class KNN(CommitteeModels):
"""
Wrapper class to manage, fit and predict KNN algorithm.
"""
def __init__(self, id: int, config: config.ModelConfig):
super(KNN, self).__init__(id)
self.config = config
self.knn = sklearn_neighbors.KNeighborsRegressor(
n_neighbors = self.config.n_neighbors,
weights = self.config.weights,
algorithm = self.config.algorithm,
leaf_size = self.config.leaf_size,
p = self.config.p,
n_jobs = -1,
)
## The model's attributes
self.classifier = None
return
def __call__(self,
input_ids : np.array,
target_ids : np.array = None,
is_sampling : bool = False,
) -> typing.Dict[str, np.array]:
if not is_sampling:
self.classifier = self.knn.fit(input_ids, target_ids)
return {}
else:
if not self.classifier:
return {
'predicted_labels' : [np.random.choice(a = np.arange(self.config.num_labels)) for x in input_ids]
}
else:
labels = self.classifier.predict(input_ids)
return {
'predicted_labels' : [int(round(float(x) + sys.float_info.epsilon)) for x in labels]
}
def get_checkpoint_state(self) -> typing.Dict[str, typing.Any]:
"""
Return the blob that is to be checkpointed.
"""
return {'knn' : self.classifier,}
def load_checkpoint_state(self, checkpoint_state: typing.Dict[str, typing.Any]) -> None:
"""
Load the checkpoints to the class states.
"""
self.classifier = checkpoint_state['knn']
return | 8,757 | 31.557621 | 112 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/active_models/committee/optimizer.py | # coding=utf-8
# Copyright 2022 The Google AI Language Team Authors, The HuggingFace Inc. team and Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch optimization for BERT model."""
import math
import typing
from deeplearning.benchpress.util.pytorch import torch
def create_optimizer_and_scheduler(model,
num_train_steps: int,
warmup_steps: int,
learning_rate: float,
adam_beta1 = 0.9,
adam_beta2 = 0.999,
adam_epsilon = 1e-6,
weight_decay = 0.01,
):
"""
Setup the optimizer and the learning rate scheduler.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
Trainer's init through :obj:`optimizers`, or subclass and override this method in a subclass.
"""
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
opt = AdamW(
optimizer_grouped_parameters,
lr = learning_rate,
betas = (adam_beta1, adam_beta2),
eps = adam_epsilon,
)
lr_scheduler = get_linear_schedule_with_warmup(
opt, num_warmup_steps = warmup_steps, num_training_steps = num_train_steps
)
return opt, lr_scheduler
def get_constant_schedule(optimizer: torch.optim.Optimizer, last_epoch: int = -1):
"""
Create a schedule with a constant learning rate, using the learning rate set in optimizer.
Args:
optimizer (:class:`~torch.optim.Optimizer`):
The optimizer for which to schedule the learning rate.
last_epoch (:obj:`int`, `optional`, defaults to -1):
The index of the last epoch when resuming training.
Return:
:obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
return torch.optim.lr_scheduler.LambdaLR(optimizer, lambda _: 1, last_epoch=last_epoch)
def get_constant_schedule_with_warmup(optimizer: torch.optim.Optimizer, num_warmup_steps: int, last_epoch: int = -1):
"""
Create a schedule with a constant learning rate preceded by a warmup period during which the learning rate
increases linearly between 0 and the initial lr set in the optimizer.
Args:
optimizer (:class:`~torch.optim.Optimizer`):
The optimizer for which to schedule the learning rate.
num_warmup_steps (:obj:`int`):
The number of steps for the warmup phase.
last_epoch (:obj:`int`, `optional`, defaults to -1):
The index of the last epoch when resuming training.
Return:
:obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
def lr_lambda(current_step: int):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1.0, num_warmup_steps))
return 1.0
return torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda, last_epoch=last_epoch)
def get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1):
"""
Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0,
after a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer.
Args:
optimizer (:class:`~torch.optim.Optimizer`):
The optimizer for which to schedule the learning rate.
num_warmup_steps (:obj:`int`):
The number of steps for the warmup phase.
num_training_steps (:obj:`int`):
The totale number of training steps.
last_epoch (:obj:`int`, `optional`, defaults to -1):
The index of the last epoch when resuming training.
Return:
:obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
def lr_lambda(current_step: int):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
return max(
0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps))
)
return torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda, last_epoch)
def get_cosine_schedule_with_warmup(
optimizer: torch.optim.Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: float = 0.5, last_epoch: int = -1
):
"""
Create a schedule with a learning rate that decreases following the values of the cosine function between the
initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the
initial lr set in the optimizer.
Args:
optimizer (:class:`~torch.optim.Optimizer`):
The optimizer for which to schedule the learning rate.
num_warmup_steps (:obj:`int`):
The number of steps for the warmup phase.
num_training_steps (:obj:`int`):
The total number of training steps.
num_cycles (:obj:`float`, `optional`, defaults to 0.5):
The number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0
following a half-cosine).
last_epoch (:obj:`int`, `optional`, defaults to -1):
The index of the last epoch when resuming training.
Return:
:obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
def lr_lambda(current_step):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps))
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress)))
return torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda, last_epoch)
def get_cosine_with_hard_restarts_schedule_with_warmup(
optimizer: torch.optim.Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: int = 1, last_epoch: int = -1
):
"""
Create a schedule with a learning rate that decreases following the values of the cosine function between the
initial lr set in the optimizer to 0, with several hard restarts, after a warmup period during which it increases
linearly between 0 and the initial lr set in the optimizer.
Args:
optimizer (:class:`~torch.optim.Optimizer`):
The optimizer for which to schedule the learning rate.
num_warmup_steps (:obj:`int`):
The number of steps for the warmup phase.
num_training_steps (:obj:`int`):
The total number of training steps.
num_cycles (:obj:`int`, `optional`, defaults to 1):
The number of hard restarts to use.
last_epoch (:obj:`int`, `optional`, defaults to -1):
The index of the last epoch when resuming training.
Return:
:obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
def lr_lambda(current_step):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps))
if progress >= 1.0:
return 0.0
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * ((float(num_cycles) * progress) % 1.0))))
return torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda, last_epoch)
class AdamW(torch.optim.Optimizer):
"""
Implements Adam algorithm with weight decay fix as introduced in
`Decoupled Weight Decay Regularization <https://arxiv.org/abs/1711.05101>`__.
Parameters:
params (:obj:`typing.Iterable[torch.nn.parameter.Parameter]`):
typing.Iterable of parameters to optimize or dictionaries defining parameter groups.
lr (:obj:`float`, `optional`, defaults to 1e-3):
The learning rate to use.
betas (:obj:`typing.Tuple[float,float]`, `optional`, defaults to (0.9, 0.999)):
Adam's betas parameters (b1, b2).
eps (:obj:`float`, `optional`, defaults to 1e-6):
Adam's epsilon for numerical stability.
weight_decay (:obj:`float`, `optional`, defaults to 0):
Decoupled weight decay to apply.
correct_bias (:obj:`bool`, `optional`, defaults to `True`):
Whether ot not to correct bias in Adam (for instance, in Bert TF repository they use :obj:`False`).
"""
def __init__(
self,
params: typing.Iterable[torch.nn.parameter.Parameter],
lr: float = 1e-3,
betas: typing.Tuple[float, float] = (0.9, 0.999),
eps: float = 1e-6,
weight_decay: float = 0.0,
correct_bias: bool = True,
):
if lr < 0.0:
raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter: {} - should be in [0.0, 1.0[".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter: {} - should be in [0.0, 1.0[".format(betas[1]))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(eps))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, correct_bias=correct_bias)
super().__init__(params, defaults)
def step(self, closure: typing.Callable = None):
"""
Performs a single optimization step.
Arguments:
closure (:obj:`typing.Callable`, `optional`): A closure that reevaluates the model and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError("Adam does not support sparse gradients, please consider SparseAdam instead")
state = self.state[p]
# State initialization
if len(state) == 0:
state["step"] = 0
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
beta1, beta2 = group["betas"]
state["step"] += 1
# Decay the first and second moment running average coefficient
# In-place operations to update the averages at the same time
exp_avg.mul_(beta1).add_(grad, alpha=1.0 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)
denom = exp_avg_sq.sqrt().add_(group["eps"])
step_size = group["lr"]
if group["correct_bias"]: # No bias correction for Bert
bias_correction1 = 1.0 - beta1 ** state["step"]
bias_correction2 = 1.0 - beta2 ** state["step"]
step_size = step_size * math.sqrt(bias_correction2) / bias_correction1
p.data.addcdiv_(exp_avg, denom, value=-step_size)
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
# Add weight decay at the end (fixed version)
if group["weight_decay"] > 0.0:
p.data.add_(p.data, alpha=-group["lr"] * group["weight_decay"])
return loss
| 12,214 | 39.989933 | 129 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/active_models/expected_error_reduction/eer_database.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module for databases of active learning query by committee samples."""
import datetime
import typing
import progressbar
import pathlib
import sqlite3
import sqlalchemy as sql
from sqlalchemy.ext import declarative
from deeplearning.benchpress.util import crypto
from deeplearning.benchpress.util import distrib
from deeplearning.benchpress.util import environment
from deeplearning.benchpress.util import sqlutil
from deeplearning.benchpress.util import logging as l
from absl import flags
FLAGS = flags.FLAGS
Base = declarative.declarative_base()
class EERResults(Base):
__tablename__ = "eer)results"
"""
DB Table for concentrated validation results.
"""
key : str = sql.Column(sql.String(1024), primary_key=True)
results : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
class EERSample(Base, sqlutil.ProtoBackedMixin):
"""A database row representing a AL model sample.
"""
__tablename__ = "eer_samples"
# entry id
id : int = sql.Column(sql.Integer, primary_key = True)
# unique hash of sample text
sha256 : str = sql.Column(sql.String(64), nullable = False, index = True)
# Sample step iteration ID.
sample_epoch : int = sql.Column(sql.Integer, nullable = False)
# model's train step that generated the sample
train_step : int = sql.Column(sql.Integer, nullable = False)
# Original input where the feed came from
src : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Runtime features of input.
runtime_features : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Input to the model.
input_features : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Predicted label
prediction : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Date
date_added : datetime.datetime = sql.Column(sql.DateTime, nullable=False)
@classmethod
def FromArgs(cls,
id : int,
sample_epoch : int,
train_step : int,
src : str,
runtime_features : typing.Dict[str, float],
input_features : typing.Dict[str, float],
prediction : str,
) -> 'EERSample':
str_input_features = '\n'.join(["{}:{}".format(k, v) for k, v in input_features.items()])
str_runtime_features = '\n'.join(["{}:{}".format(k, v) for k, v in runtime_features.items()])
sha256 = crypto.sha256_str(
str(train_step)
+ src
+ str_runtime_features
+ str_input_features
+ prediction
)
return EERSample(
id = id,
sha256 = sha256,
sample_epoch = sample_epoch,
train_step = train_step,
src = src,
runtime_features = str_runtime_features,
input_features = str_input_features,
prediction = prediction,
date_added = datetime.datetime.utcnow(),
)
class EERSamples(sqlutil.Database):
"""A database of Query-by-Committee samples."""
def __init__(self, url: str, must_exist: bool = False, is_replica: bool = False):
if environment.WORLD_RANK == 0 or is_replica:
super(EERSamples, self).__init__(url, Base, must_exist = must_exist)
if environment.WORLD_SIZE > 1 and not is_replica:
# Conduct engine connections to replicated preprocessed chunks.
self.base_path = pathlib.Path(url.replace("sqlite:///", "")).resolve().parent
hash_id = self.base_path.name
try:
tdir = pathlib.Path(FLAGS.local_filesystem).resolve() / hash_id / "node_committee_samples"
except Exception:
tdir = pathlib.Path("/tmp").resolve() / hash_id / "node_committee_samples"
try:
tdir.mkdir(parents = True, exist_ok = True)
except Exception:
pass
self.replicated_path = tdir / "samples_{}.db".format(environment.WORLD_RANK)
self.replicated = EERSamples(
url = "sqlite:///{}".format(str(self.replicated_path)),
must_exist = must_exist,
is_replica = True
)
distrib.barrier()
return
@property
def sample_count(self):
"""Number of samples in DB."""
with self.get_session() as s:
count = s.query(EERSample).count()
return count
@property
def get_data(self):
"""Return all database in list format"""
with self.get_session() as s:
return s.query(EERSample).all()
@property
def cur_sample_epoch(self):
"""Return the most recent checkpointed current sample step."""
if self.sample_count > 0:
with self.get_session() as s:
return max([int(x.sample_epoch) for x in s.query(EERSample).all()])
else:
return 0
@property
def get_session(self):
"""
get proper DB session.
"""
if environment.WORLD_SIZE == 1 or environment.WORLD_RANK == 0:
return self.Session
else:
return self.replicated.Session
def add_samples(self, sample_epoch: int, samples: typing.Dict[str, typing.Any]) -> None:
"""
If not exists, add sample to Samples table.
"""
hash_cache = set()
offset_idx = self.sample_count
with self.get_session(commit = True) as s:
for sample in samples:
sample_entry = EERSample.FromArgs(
id = offset_idx,
sample_epoch = sample_epoch,
train_step = sample['train_step'],
src = sample['src'],
runtime_features = sample['runtime_features'],
input_features = sample['input_features'],
prediction = sample['prediction'],
)
exists = s.query(EERSample).filter_by(sha256 = sample_entry.sha256).first()
if not exists and sample_entry.sha256 not in hash_cache:
s.add(sample_entry)
hash_cache.add(sample_entry.sha256)
offset_idx += 1
s.commit()
return
| 6,628 | 35.224044 | 99 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/active_models/expected_error_reduction/model.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Here all the committee members are defined.
"""
import typing
from deeplearning.benchpress.active_models.expected_error_reduction import config
from deeplearning.benchpress.models.torch_bert import activations
from deeplearning.benchpress.util.pytorch import torch
def mish(x):
return x * torch.tanh(torch.nn.functional.softplus(x))
ACT2FN = {
"gelu" : activations.gelu,
"relu" : torch.nn.ReLU,
"swish" : activations.swish,
"gelu_new" : activations.gelu_new,
"mish" : mish,
"softmax" : torch.nn.Softmax
}
class MLP(torch.nn.Module):
"""
A modular MLP model that supports Linear, Dropout, LayerNorm and activations.
"""
def __init__(self, config: config.ModelConfig):
super(MLP, self).__init__()
self.config = config.layer_config
self.layers = []
layers = {
'Embedding' : torch.nn.Embedding,
'Linear' : torch.nn.Linear,
'Dropout' : torch.nn.Dropout,
'LayerNorm' : torch.nn.LayerNorm,
}
layers.update(ACT2FN)
self.layers = torch.nn.ModuleList([layers[layer[0]](**layer[1]) for layer in self.config])
self.softmax = torch.nn.Softmax(dim = 1)
return
def calculate_loss(self,
outputs: torch.Tensor,
target_ids: torch.Tensor,
) -> torch.Tensor:
"""
Categorical cross-entropy function.
"""
## Calculate categorical label loss.
loss_fn = torch.nn.CrossEntropyLoss()
label_loss = loss_fn(outputs.to(torch.float32), target_ids.squeeze(1))
## Calculate probs
probs = self.softmax(outputs.clone().detach())
## Calculate top-1 accuracy of predictions across batch.
hits, total = 0, int(outputs.size(0))
for out, target in zip(torch.argmax(outputs, dim = 1), target_ids):
if out == target:
hits += 1
return label_loss, probs, torch.FloatTensor([hits / total])
def forward(self,
input_ids : torch.Tensor,
target_ids : torch.Tensor = None,
is_sampling : bool = False
) -> torch.Tensor:
"""
Args:
input_ids: Input features for training or prediction.
target_ids: Target tokens to predict during training.
static_features: List of static input features of respective sample to predict.
is_sampling: Select between training and sampling method.
"""
device = input_ids.get_device()
device = device if device >= 0 else 'cpu'
out = input_ids
for layer in self.layers:
out = layer(out)
if not is_sampling:
total_loss, probs, batch_accuracy = self.calculate_loss(out, target_ids)
return {
'total_loss' : total_loss,
'accuracy' : batch_accuracy.to(device),
'output_probs' : probs,
'output_label' : torch.argmax(out, dim = -1).unsqueeze(-1),
}
else:
return {
'output_probs' : self.softmax(out.clone().detach()),
'output_label' : torch.argmax(out, dim = -1).unsqueeze(-1),
}
| 3,600 | 32.036697 | 94 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/active_models/expected_error_reduction/config.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Config setup for expected error reduction active learner.
"""
import typing
import pathlib
from deeplearning.benchpress.active_models import downstream_tasks
from deeplearning.benchpress.proto import active_learning_pb2
from deeplearning.benchpress.util import pbutil
from deeplearning.benchpress.util import crypto
def AssertConfigIsValid(config: active_learning_pb2.ExpectedErrorReduction) -> None:
"""
Parse proto description and check for validity.
"""
if config.HasField("head"):
tl = 0
pbutil.AssertFieldIsSet(config.head, "initial_learning_rate_micros")
pbutil.AssertFieldIsSet(config.head, "batch_size")
pbutil.AssertFieldIsSet(config.head, "num_warmup_steps")
for l in config.head.layer:
if l.HasField("embedding"):
pbutil.AssertFieldIsSet(l.embedding, "num_embeddings")
pbutil.AssertFieldIsSet(l.embedding, "embedding_dim")
elif l.HasField("linear"):
pbutil.AssertFieldIsSet(l.linear, "in_features")
pbutil.AssertFieldIsSet(l.linear, "out_features")
elif l.HasField("dropout"):
pbutil.AssertFieldIsSet(l.dropout, "p")
elif l.HasField("layer_norm"):
pbutil.AssertFieldIsSet(l.layer_norm, "normalized_shape")
pbutil.AssertFieldIsSet(l.layer_norm, "eps")
elif l.HasField("act_fn"):
pbutil.AssertFieldIsSet(l.act_fn, "fn")
else:
raise AttributeError(l)
tl += 1
assert tl > 0, "Model is empty. No layers found."
return
class ModelConfig(object):
model_type = "expected_error_reduction"
@classmethod
def FromConfig(cls,
config: active_learning_pb2.ExpectedErrorReduction,
downstream_task: downstream_tasks.DownstreamTask,
num_train_steps: int,
) -> typing.List["ModelConfig"]:
return NNModelConfig(config.head, downstream_task, num_train_steps)
@property
def num_labels(self) -> int:
"""
The number of output labels for classification models.
"""
return self.downstream_task.output_size
@property
def num_features(self) -> int:
"""
The number of input features to model.
"""
return self.downstream_task.input_size
def __init__(self,
name : str,
config : typing.Union[active_learning_pb2.MLP, active_learning_pb2.KMeans],
downstream_task : downstream_tasks.DownstreamTask
) -> "ModelConfig":
self.name = name
self.config = config
self.downstream_task = downstream_task
self.sha256 = crypto.sha256_str(str(config))
## Placeholding initialization
self.num_train_steps = None
self.num_warmup_steps = None
self.num_epochs = None
self.steps_per_epoch = None
self.batch_size = None
self.learning_rate = None
self.max_grad_norm = None
self.layer_config = None
return
class NNModelConfig(ModelConfig):
"""
NeuralNetwork-based architectural config.
"""
def __init__(self,
config : active_learning_pb2.MLP,
downstream_task : downstream_tasks.DownstreamTask,
num_train_steps : int
) -> "ModelConfig":
super(NNModelConfig, self).__init__("MLP", config, downstream_task)
## NN-specific attributes
self.num_train_steps = (num_train_steps + config.batch_size) // config.batch_size
self.num_warmup_steps = config.num_warmup_steps
self.num_epochs = 1
self.steps_per_epoch = self.num_train_steps
self.batch_size = config.batch_size
self.learning_rate = config.initial_learning_rate_micros / 1e6
self.max_grad_norm = 1.0
if len(self.config.layer) == 0:
raise ValueError("Layer list is empty for model")
if self.config.layer[0].HasField("linear"):
if self.config.layer[0].linear.in_features != self.downstream_task.input_size:
raise ValueError("Mismatch between model's input size {} and downstream task's input size {}".format(
self.config.layer[0].linear.in_features,
self.downstream_task.input_size
)
)
if self.config.layer[-1].HasField("linear"):
if self.config.layer[-1].linear.out_features != self.downstream_task.output_size:
raise ValueError("Mismatch between model's output size {} and downstream task's output size {}".format(
self.config.layer[-1].linear.out_features,
self.downstream_task.output_size
)
)
self.layer_config = []
for l in self.config.layer:
if l.HasField("embedding"):
self.layer_config.append((
'Embedding', {
'num_embeddings': l.embedding.num_embeddings,
'embedding_dim' : l.embedding.embedding_dim,
'padding_idx' : l.embedding.padding_idx if l.embedding.HasField("padding_idx") else None
}
)
)
elif l.HasField("linear"):
self.layer_config.append((
'Linear', {
'in_features': l.linear.in_features,
'out_features': l.linear.out_features,
}
)
)
elif l.HasField("dropout"):
self.layer_config.append((
'Dropout', {
'p': l.dropout.p,
}
)
)
elif l.HasField("layer_norm"):
self.layer_config.append((
'LayerNorm', {
'normalized_shape': l.layer_norm.normalized_shape,
'eps': l.layer_norm.eps,
}
)
)
elif l.HasField("act_fn"):
self.layer_config.append((
l.act_fn.fn, {}
)
)
return
| 6,306 | 34.234637 | 111 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/active_models/expected_error_reduction/eer.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A neural architecture for downstream task label prediction.
This head is used for feature-less learning to target benchmarks.
"""
import typing
import collections
import tqdm
from deeplearning.benchpress.models.torch_bert import hooks
from deeplearning.benchpress.active_models import backends
from deeplearning.benchpress.active_models import data_generator
from deeplearning.benchpress.active_models.expected_error_reduction import optimizer
from deeplearning.benchpress.active_models.expected_error_reduction import model
from deeplearning.benchpress.active_models.expected_error_reduction import config
from deeplearning.benchpress.active_models.expected_error_reduction import eer_database
from deeplearning.benchpress.util import environment
from deeplearning.benchpress.util import distrib
from deeplearning.benchpress.util import logging as l
from absl import flags
FLAGS = flags.FLAGS
class ExpectedErrorReduction(backends.BackendBase):
class TrainingOpts(typing.NamedTuple):
"""Wrapper class for training options"""
train_batch_size : int
learning_rate : float
num_warmup_steps : int
max_grad_norm : float
steps_per_epoch : int
num_epochs : int
num_train_steps : int
class Estimator(typing.NamedTuple):
"""Named tuple to wrap BERT pipeline."""
model : typing.TypeVar('nn.Module')
data_generator : 'torch.utils.data.Dataset'
optimizer : typing.Any
scheduler : typing.Any
def __repr__(self):
return "ExpectedErrorReduction"
def __init__(self, *args, **kwargs):
super(ExpectedErrorReduction, self).__init__(*args, **kwargs)
from deeplearning.benchpress.util import pytorch
if not pytorch.initialized:
pytorch.initPytorch()
self.pytorch = pytorch
self.torch = pytorch.torch
self.torch_tpu_available = pytorch.torch_tpu_available
self.torch.manual_seed(self.config.random_seed)
self.torch.cuda.manual_seed_all(self.config.random_seed)
self.ckpt_path = self.cache_path / "checkpoints"
self.sample_path = self.cache_path / "samples"
self.logfile_path = self.cache_path / "logs"
if environment.WORLD_RANK == 0:
self.ckpt_path.mkdir(exist_ok = True, parents = True)
self.sample_path.mkdir(exist_ok = True, parents = True)
self.logfile_path.mkdir(exist_ok = True, parents = True)
self.validation_results_file = "val_results.txt"
self.validation_results_path = self.logfile_path / self.validation_results_file
self.model_config = None
self.training_opts = None
self.train = None
self.sample = None
self.is_validated = False
self.is_trained = False
self.eer_samples = eer_database.EERSamples(
url = "sqlite:///{}".format(str(self.sample_path / "samples.db")),
must_exist = False,
)
self.sample_epoch = self.eer_samples.cur_sample_epoch
l.logger().info("Active ExpectedErrorReduction config initialized in {}".format(self.cache_path))
return
def _ConfigModelParams(self) -> None:
"""
Generic initialization.
"""
self.model_config = config.ModelConfig.FromConfig(
self.config.expected_error_reduction,
self.downstream_task,
self.config.num_train_steps
)
self.training_opts = ExpectedErrorReduction.TrainingOpts(
train_batch_size = self.model_config.batch_size,
learning_rate = self.model_config.learning_rate,
num_warmup_steps = self.model_config.num_warmup_steps,
max_grad_norm = self.model_config.max_grad_norm,
steps_per_epoch = self.model_config.steps_per_epoch,
num_epochs = self.model_config.num_epochs,
num_train_steps = self.model_config.num_train_steps,
)
return
def _ConfigTrainParams(self, data_generator: 'torch.utils.data.Dataset') -> None:
"""
Model parameter initialization.
"""
if not self.train:
self._ConfigModelParams()
cm = model.MLP(self.model_config).to(self.pytorch.device)
if self.pytorch.num_nodes > 1:
distrib.barrier()
cm = self.torch.nn.parallel.DistributedDataParallel(
cm,
device_ids = [self.pytorch.offset_device],
output_device = self.pytorch.offset_device,
)
elif self.pytorch.num_gpus > 1:
cm = self.torch.nn.DataParallel(cm)
opt, lr_scheduler = optimizer.create_optimizer_and_scheduler(
model = cm,
num_train_steps = self.training_opts.num_train_steps,
warmup_steps = self.training_opts.num_warmup_steps,
learning_rate = self.training_opts.learning_rate,
weight_decay = 0.0,
)
self.train = ExpectedErrorReduction.Estimator(
model = cm,
data_generator = data_generator,
optimizer = opt,
scheduler = lr_scheduler,
)
l.logger().info(self.GetShortSummary())
return
def _ConfigSampleParams(self) -> None:
"""
Model parameter initialization.
"""
if not self.sample:
self._ConfigModelParams()
cm = model.MLP(self.model_config).to(self.pytorch.device)
if self.pytorch.num_nodes > 1:
distrib.barrier()
cm = self.torch.nn.parallel.DistributedDataParallel(
cm,
device_ids = [self.pytorch.offset_device],
output_device = self.pytorch.offset_device,
)
elif self.pytorch.num_gpus > 1:
cm = self.torch.nn.DataParallel(cm)
self.sample = ExpectedErrorReduction.Estimator(
model = cm,
data_generator = None,
optimizer = None,
scheduler = None,
)
l.logger().info(self.GetShortSummary())
return
def model_step(self,
model : 'torch.nn.Module',
inputs : typing.Dict[str, 'torch.Tensor'],
is_sampling : bool = False
) -> float:
"""
Run forward function for member model.
"""
return model(
input_ids = inputs['input_ids'].to(self.pytorch.device),
target_ids = inputs['target_ids'].to(self.pytorch.device) if not is_sampling else None,
is_sampling = is_sampling,
)
def Train(self, **kwargs) -> None:
"""
Train the AL predictive model.
"""
# The update dataloader for when you want to step-train after collecting target benchmark.
update_dataloader = kwargs.get('update_dataloader', None)
# Temp estimator, for when you are temp-training a model version during EER Sample.
update_estimator = kwargs.get('eer_estimator', None)
if not update_estimator:
# If not a temp estimator, then create the standard train estimator if not already created.
self._ConfigTrainParams(self.downstream_task.data_generator)
train_estimator = update_estimator if update_estimator else self.train
if update_dataloader is None and update_estimator is None:
l.logger().info("Initial EER model training.")
# self.Validate()
if not self.is_trained or update_dataloader is not None or update_estimator:
data_generator = (
train_estimator.data_generator
if update_dataloader is None
else update_dataloader
# + train_estimator.data_generator.get_random_subset(
# max(0, abs(len(update_dataloader) - self.training_opts.num_train_steps)))
)
if len(data_generator) == 0:
return
# ## TODO: Dummy code. If active learner can't learn on test set, then features suck.
# Toggle this to train on test set. Used for evaluation purposes.
# elif not update_estimator:
# data_generator = self.downstream_task.test_set
# Empty cache for GPU environments.
if self.pytorch.num_gpus > 0:
self.torch.cuda.empty_cache()
# Load most recent checkpoint to estimator, if not temp-model.
if not update_estimator:
current_step = self.loadCheckpoint(train_estimator)
if current_step >= 0:
l.logger().info("EER: Loaded checkpoint step {}".format(current_step))
current_step = max(0, current_step)
num_train_steps = min(
(len(data_generator) + self.training_opts.train_batch_size) // self.training_opts.train_batch_size,
self.training_opts.num_train_steps
) if update_dataloader is None else ((len(update_dataloader) + self.training_opts.train_batch_size) // self.training_opts.train_batch_size) + current_step
else:
current_step = 0
num_train_steps = len(data_generator)
if current_step < num_train_steps:
train_estimator.model.zero_grad()
# Setup sampler and data loader.
if self.pytorch.num_nodes <= 1:
sampler = self.torch.utils.data.RandomSampler(data_generator, replacement = False)
else:
sampler = self.torch.utils.data.DistributedSampler(
data_generator,
num_replicas = self.pytorch.num_nodes,
rank = self.pytorch.torch.distributed.get_rank()
)
loader = self.torch.utils.data.dataloader.DataLoader(
dataset = data_generator,
batch_size = self.training_opts.train_batch_size,
sampler = (sampler
if not self.pytorch.torch_tpu_available or self.pytorch.torch_xla.xrt_world_size() <= 1
else self.torch.utils.data.distributed.DistributedSampler(
dataset = data_generator,
num_replicas = self.pytorch.num_nodes if not self.pytorch.torch_tpu_available else self.pytorch.torch_xla.xrt_world_size(),
rank = self.pytorch.torch.distributed.get_rank() if not self.pytorch.torch_tpu_available else self.pytorch.torch_xla.get_ordinal()
)
),
num_workers = 0,
drop_last = False if environment.WORLD_SIZE == 1 else True,
)
# Set dataloader in case of TPU training.
if self.torch_tpu_available:
loader = self.pytorch.torch_ploader.ParallelLoader(
data_generator, [self.pytorch.device]
).per_device_loader(self.pytorch.device)
# Get dataloader iterator and setup hooks.
batch_iterator = iter(loader)
if self.is_world_process_zero() and not update_estimator:
# Monitoring hook.
train_hook = hooks.tensorMonitorHook(
self.logfile_path,
current_step,
min(
(len(data_generator) + self.training_opts.train_batch_size) // self.training_opts.train_batch_size,
self.training_opts.steps_per_epoch, 50
)
)
try:
with self.torch.enable_grad():
train_estimator.model.train()
# epoch_iter = tqdm.auto.trange(self.training_opts.num_epochs, desc="Epoch", leave = False) if self.is_world_process_zero() else range(self.training_opts.num_epochs)
# In distributed mode, calling the set_epoch() method at
# the beginning of each epoch before creating the DataLoader iterator
# is necessary to make shuffling work properly across multiple epochs.
# Otherwise, the same ordering will be always used.
if self.pytorch.num_nodes > 1:
loader.sampler.set_epoch(current_step)
batch_iter = tqdm.tqdm(batch_iterator, desc="Batch", leave = False) if self.is_world_process_zero() else batch_iterator
for inputs in batch_iter:
# Run model step on inputs
step_out = self.model_step(train_estimator.model, inputs)
# Backpropagate losses
total_loss = step_out['total_loss'].mean()
total_loss.backward()
self.torch.nn.utils.clip_grad_norm_(train_estimator.model.parameters(), self.training_opts.max_grad_norm)
if self.torch_tpu_available:
self.pytorch.torch_xla.optimizer_step(train_estimator.optimizer)
else:
train_estimator.optimizer.step()
train_estimator.scheduler.step()
## Collect tensors for logging.
if self.pytorch.num_nodes > 1:
total_loss = [
self.torch.zeros(tuple(step_out['total_loss'].shape), dtype = self.torch.float32).to(self.pytorch.device)
for _ in range(self.torch.distributed.get_world_size())
]
self.torch.distributed.all_gather(total_loss, step_out["total_loss"])
else:
total_loss = step_out['total_loss'].unsqueeze(0).cpu()
if self.is_world_process_zero() and not update_estimator:
train_hook.step(
train_step = current_step,
total_loss = sum([tl.mean().item() for tl in total_loss]) / len(total_loss),
)
train_estimator.model.zero_grad()
if current_step == 0 and update_estimator is None:
l.logger().info("EER: Starting Loss: {}".format(sum([tl.mean().item() for tl in total_loss]) / len(total_loss)))
current_step += 1
# End of epoch
if not update_estimator:
self.saveCheckpoint(train_estimator, current_step = current_step)
if self.is_world_process_zero() and not update_estimator:
try:
l.logger().info(
"EER: Step {} Loss: {}".format(
current_step, train_hook.epoch_loss
)
)
except ZeroDivisionError:
l.logger().error(
"Hook has crashed again: current_step: {}, step_freq: {}, flush_freq: {}, train_step: {}".format(
train_hook.current_step, train_hook.step_freq, train_hook.flush_freq,
current_step
)
)
val_accuracy = self.Validate()
train_hook.end_epoch(
**{"val_{}_accuracy".format(key): val for key, val in val_accuracy.items()}
)
if self.torch_tpu_available:
self.pytorch.torch_xla.master_print(self.pytorch.torch_xla_met.metrics_report())
except KeyboardInterrupt:
pass
self.is_trained = True
if self.pytorch.num_nodes > 1:
self.torch.distributed.barrier()
return
def Validate(self, **kwargs) -> int:
"""
Run validation to measure accuracy on the downstream task's selected test set, if exists.
"""
# Load the test database from the downstream task.
test_set = self.downstream_task.test_set
# If non-empty.
if test_set:
_ = self.loadCheckpoint(self.train)
self.train.model.zero_grad()
# Setup sampler and dataloader.
if self.pytorch.num_nodes <= 1:
sampler = self.torch.utils.data.SequentialSampler(test_set)
else:
sampler = self.torch.utils.data.DistributedSampler(
test_set,
num_replicas = self.pytorch.num_nodes,
rank = self.pytorch.torch.distributed.get_rank()
)
loader = self.torch.utils.data.dataloader.DataLoader(
dataset = test_set,
batch_size = self.training_opts.train_batch_size,
sampler = (sampler
if not self.pytorch.torch_tpu_available or self.pytorch.torch_xla.xrt_world_size() <= 1
else self.torch.utils.data.distributed.DistributedSampler(
dataset = test_set,
num_replicas = self.pytorch.num_nodes if not self.pytorch.torch_tpu_available else self.pytorch.torch_xla.xrt_world_size(),
rank = self.pytorch.torch.distributed.get_rank() if not self.pytorch.torch_tpu_available else self.pytorch.torch_xla.get_ordinal()
)
),
num_workers = 0,
drop_last = False,
)
# Set dataloader in case of TPU training.
if self.torch_tpu_available:
loader = self.pytorch.torch_ploader.ParallelLoader(
data_generator, [self.pytorch.device]
).per_device_loader(self.pytorch.device)
# Setup iterator and accuracy metrics.
batch_iter = tqdm.tqdm(iter(loader), desc = "Test Set", leave = False) if self.is_world_process_zero() else iter(loader)
accuracy = {}
missed_idxs = {}
with self.torch.no_grad():
self.train.model.eval()
if self.pytorch.num_nodes > 1:
loader.sampler.set_epoch(0)
# Run inference.
for inputs in batch_iter:
step_out = self.model_step(self.train.model, inputs)
## Collect tensors for logging.
if self.pytorch.num_nodes > 1:
output_label = [
self.torch.zeros(tuple(step_out['output_label'].shape), dtype = self.torch.int64).to(self.pytorch.device)
for _ in range(self.torch.distributed.get_world_size())
]
target_ids = [
self.torch.zeros(tuple(inputs['target_ids'].shape), dtype = self.torch.int64).to(self.pytorch.device)
for _ in range(self.torch.distributed.get_world_size())
]
self.torch.distributed.all_gather(output_label, step_out['output_label'])
self.torch.distributed.all_gather(target_ids, inputs['target_ids'].to(self.pytorch.device))
else:
output_label = step_out['output_label'].unsqueeze(0)
target_ids = inputs ['target_ids'].unsqueeze(0).to(self.pytorch.device)
# Group accuracy stats by label.
# Assign to the first index the count of correct predictions.
# Assign to the second index the total predictions.
for id, label in zip(self.downstream_task.output_ids, self.downstream_task.output_labels):
if label not in accuracy:
accuracy[label] = [0, 0]
accuracy[label][0] += int(self.torch.sum((output_label == id) & (target_ids == id)).cpu())
accuracy[label][1] += int(self.torch.sum(target_ids == id).cpu())
for out, tar, idx in zip(step_out['output_label'], inputs['target_ids'], inputs['idx']):
if int(tar) != int(out):
if int(tar) not in missed_idxs:
missed_idxs[int(tar)] = []
missed_idxs[int(tar)].append(int(idx))
# You may want to all gather that.
epoch_accuracy = {
k: v[0] / v[1] for k, v in accuracy.items()
}
distrib.barrier()
l.logger().error("Total data: {},\nValidation stats: {}\n{}".format(len(test_set), epoch_accuracy, accuracy))
l.logger().error("Missed indices: {}".format(missed_idxs))
return epoch_accuracy
def Sample(self, sample_set: 'torch.Dataset') -> typing.List[typing.Dict[str, float]]:
"""
Active learner sampling.
sample_set contains random datapoints provided by the downstream task.
Expected Error Reduction algorithm is going to be applied for each datapoint for each label class.
"""
l.logger().error("Problem #2: Check that for DDP, every one gets the chunk they must.")
self._ConfigSampleParams()
current_step = self.loadCheckpoint(self.sample)
if self.pytorch.num_gpus > 0:
self.torch.cuda.empty_cache()
if current_step < 0:
l.logger().warn("EER: You are trying to sample an untrained model.")
current_step = max(0, current_step)
## If DDP, each node will work separately on chunks of the unlabelled dataset.
node_size = len(sample_set) // environment.WORLD_SIZE
node_rem = len(sample_set) % environment.WORLD_SIZE
node_set = sample_set.get_sliced_subset(environment.WORLD_RANK * node_size, (1 + environment.WORLD_RANK) * node_size)
if environment.WORLD_RANK == environment.WORLD_SIZE - 1 and node_rem > 0:
node_set += sample_set.get_sliced_subset((1 + environment.WORLD_RANK) * node_size)
node_loader = self.torch.utils.data.dataloader.DataLoader(
dataset = node_set,
batch_size = 1,
sampler = self.torch.utils.data.SequentialSampler(node_set),
num_workers = 0,
drop_last = False,
)
node_losses = {
'input_ids' : self.torch.zeros([len(node_set), self.downstream_task.input_size], dtype = self.torch.float32),
'static_features' : self.torch.zeros([len(node_set), self.downstream_task.static_features_size], dtype = self.torch.float32),
'runtime_features' : self.torch.zeros([len(node_set), self.downstream_task.runtime_features_size], dtype = self.torch.int64),
'posterior_probs' : self.torch.zeros([len(node_set), self.downstream_task.output_size], dtype = self.torch.float32),
'aggregated_entropy' : self.torch.zeros([len(node_set), self.downstream_task.output_size], dtype = self.torch.float32),
'expected_error_rate' : self.torch.zeros([len(node_set), 1], dtype = self.torch.float32),
}
self.sample.model.eval()
for idx, unl_train_point in tqdm.tqdm(enumerate(iter(node_loader)), total = len(node_loader), desc = "D + (x, y)"):
node_losses['input_ids'][idx] = unl_train_point['input_ids']
node_losses['static_features'][idx] = unl_train_point['static_features']
node_losses['runtime_features'][idx] = unl_train_point['runtime_features']
for out_label in self.downstream_task.output_ids:
## For (x, y) run model inference to obtain p(x|y)
with self.torch.no_grad():
out = self.model_step(self.sample.model, unl_train_point, is_sampling = True)
node_losses['posterior_probs'][idx][out_label] = out['output_probs'].squeeze(0)[out_label]
## Extend Dataset D+: D + (x, y)
# extended_dataset = self.downstream_task.dataset + {'input_ids': unl_train_point, 'target_ids': out_label}
extended_datapoint = data_generator.ListTrainDataloader([], lazy = True)
extended_datapoint.dataset = [
{
'input_ids': unl_train_point['input_ids'].squeeze(0),
'target_ids': self.torch.LongTensor([out_label]),
}
]
extended_dataset = self.downstream_task.data_generator + extended_datapoint
## Copy the model to a temp one.
new_model = model.MLP(self.model_config).to(self.pytorch.device)
if self.pytorch.num_nodes > 1:
distrib.barrier()
new_model.load_state_dict(self.sample.model.module.state_dict())
new_model = self.torch.nn.parallel.DistributedDataParallel(
new_model,
device_ids = [self.pytorch.offset_device],
output_device = self.pytorch.offset_device,
)
elif self.pytorch.num_gpus > 1:
new_model.load_state_dict(self.sample.model.module.state_dict())
new_model = self.torch.nn.DataParallel(new_model)
else:
new_model.load_state_dict(self.sample.model.state_dict())
## Define optimizer, scheduler for training regime.
opt, lr_scheduler = optimizer.create_optimizer_and_scheduler(
model = new_model,
num_train_steps = len(extended_dataset),
warmup_steps = 0,
learning_rate = self.training_opts.learning_rate,
weight_decay = 0.0,
)
dp_estimator = ExpectedErrorReduction.Estimator(
model = new_model,
data_generator = extended_dataset,
optimizer = opt,
scheduler = lr_scheduler,
)
## Train the new model here.
self.Train(eer_estimator = dp_estimator)
## Run the new model on the unlabelled dataset to estimate future errors.
loader = self.torch.utils.data.dataloader.DataLoader(
dataset = node_set,
batch_size = self.training_opts.train_batch_size,
sampler = self.torch.utils.data.SequentialSampler(node_set),
num_workers = 0,
drop_last = False,
)
aggr_entropy = 0.0
target_ids = self.torch.zeros(
[self.downstream_task.output_size, self.training_opts.train_batch_size, 1], dtype = self.torch.int64
)
with self.torch.no_grad():
for tid in self.downstream_task.output_ids:
target_ids[tid,:] = tid
for unl_batch in iter(loader):
for target_id_batch in target_ids:
out = self.model_step(new_model, {'input_ids': unl_batch['input_ids'], 'target_ids': target_id_batch}, is_sampling = False)
aggr_entropy += out['total_loss'].mean()
node_losses['aggregated_entropy'][idx][out_label] = aggr_entropy
node_losses['expected_error_rate'][idx] = sum(
[node_losses['posterior_probs'][idx][L] * node_losses['aggregated_entropy'][idx][L]
for L in self.downstream_task.output_ids]
)
if self.pytorch.num_nodes > 1:
self.torch.distributed.barrier()
input_ids = [self.torch.zeros(tuple(node_losses['input_ids' ].shape), dtype = self.torch.float32).to(self.pytorch.device) for _ in range(self.torch.distributed.get_world_size())]
static_features = [self.torch.zeros(tuple(node_losses['static_features' ].shape), dtype = self.torch.float32).to(self.pytorch.device) for _ in range(self.torch.distributed.get_world_size())]
runtime_features = [self.torch.zeros(tuple(node_losses['runtime_features' ].shape), dtype = self.torch.float32).to(self.pytorch.device) for _ in range(self.torch.distributed.get_world_size())]
posterior_probs = [self.torch.zeros(tuple(node_losses['posterior_probs' ].shape), dtype = self.torch.float32).to(self.pytorch.device) for _ in range(self.torch.distributed.get_world_size())]
aggregated_entropy = [self.torch.zeros(tuple(node_losses['aggregated_entropy' ].shape), dtype = self.torch.float32).to(self.pytorch.device) for _ in range(self.torch.distributed.get_world_size())]
expected_error_rate = [self.torch.zeros(tuple(node_losses['expected_error_rate'].shape), dtype = self.torch.float32).to(self.pytorch.device) for _ in range(self.torch.distributed.get_world_size())]
self.torch.distributed.all_gather(input_ids, node_losses['input_ids' ])
self.torch.distributed.all_gather(static_features, node_losses['static_features' ])
self.torch.distributed.all_gather(runtime_features, node_losses['runtime_features' ])
self.torch.distributed.all_gather(posterior_probs, node_losses['posterior_probs' ])
self.torch.distributed.all_gather(aggregated_entropy, node_losses['aggregated_entropy' ])
self.torch.distributed.all_gather(expected_error_rate, node_losses['expected_error_rate'])
input_ids = self.torch.reshape(input_ids, (-1, input_ids.shape[-1]))
static_features = self.torch.reshape(static_features, (-1, static_features.shape[-1]))
runtime_features = self.torch.reshape(runtime_features, (-1, runtime_features.shape[-1]))
posterior_probs = self.torch.reshape(posterior_probs, (-1, posterior_probs.shape[-1]))
aggregated_entropy = self.torch.reshape(aggregated_entropy, (-1, aggregated_entropy.shape[-1]))
expected_error_rate = self.torch.reshape(expected_error_rate, (-1, expected_error_rate.shape[-1]))
expected_losses = {
'input_ids' : input_ids,
'static_features' : static_features,
'runtime_features' : runtime_features,
'posterior_probs' : posterior_probs,
'aggregated_entropy' : aggregated_entropy,
'expected_error_rate' : expected_error_rate,
}
else:
expected_losses = node_losses
expected_losses['input_ids'] = expected_losses['input_ids' ].cpu().numpy()
expected_losses['static_features'] = expected_losses['static_features' ].cpu().numpy()
expected_losses['runtime_features'] = expected_losses['runtime_features' ].cpu().numpy()
expected_losses['posterior_probs'] = expected_losses['posterior_probs' ].cpu().numpy()
expected_losses['aggregated_entropy'] = expected_losses['aggregated_entropy' ].cpu().numpy()
expected_losses['expected_error_rate'] = expected_losses['expected_error_rate'].cpu().numpy()
space_samples = []
for idx in range(len(expected_losses['input_ids'])):
space_samples.append({
'input_ids' : self.downstream_task.VecToInputFeatDict(expected_losses['input_ids' ][idx]),
'static_features' : self.downstream_task.VecToStaticFeatDict(expected_losses['static_features' ][idx]),
'runtime_features' : self.downstream_task.VecToRuntimeFeatDict(expected_losses['runtime_features'][idx]),
'posterior_probs' : expected_losses['posterior_probs' ][idx],
'aggregated_entropy' : expected_losses['aggregated_entropy' ][idx],
'expected_error_rate' : expected_losses['expected_error_rate'][idx],
})
return sorted(space_samples, key = lambda x: x['expected_error_rate'])
def saveCheckpoint(self,
estimator : 'ExpectedErrorReduction.Estimator',
current_step : int
) -> None:
"""
Saves model, scheduler, optimizer checkpoints per epoch.
"""
if self.is_world_process_zero():
ckpt_comp = lambda x: self.ckpt_path / "{}-{}.pt".format(x, current_step)
if self.torch_tpu_available:
if self.pytorch.torch_xla_model.rendezvous("saving_checkpoint"):
self.pytorch.torch_xla_model.save(estimator.model, ckpt_comp("model"))
self.pytorch.torch_xla.rendezvous("saving_optimizer_states")
self.pytorch.torch_xla.save(estimator.optimizer.state_dict(), ckpt_comp("optimizer"))
self.pytorch.torch_xla.save(estimator.scheduler.state_dict(), ckpt_comp("scheduler"))
else:
if isinstance(estimator.model, self.torch.nn.DataParallel):
self.torch.save(estimator.model.module.state_dict(), ckpt_comp("model"))
else:
self.torch.save(estimator.model.state_dict(), ckpt_comp("model"))
self.torch.save(estimator.optimizer.state_dict(), ckpt_comp("optimizer"))
self.torch.save(estimator.scheduler.state_dict(), ckpt_comp("scheduler"))
with open(self.ckpt_path / "checkpoint.meta", 'a') as mf:
mf.write("train_step: {}\n".format(current_step))
return
def loadCheckpoint(self, estimator: 'ExpectedErrorReduction.Estimator') -> int:
"""
Load model checkpoint. Loads either most recent epoch, or selected checkpoint through FLAGS.
"""
if not (self.ckpt_path / "checkpoint.meta").exists():
return -1
with open(self.ckpt_path / "checkpoint.meta", 'r') as mf:
key = "train_step"
get_step = lambda x: int(x.replace("\n", "").replace("{}: ".format(key), ""))
lines = mf.readlines()
entries = set({get_step(x) for x in lines if key in x})
if FLAGS.select_checkpoint_step == -1:
ckpt_step = max(entries)
else:
if FLAGS.select_checkpoint_step in entries:
ckpt_step = FLAGS.select_checkpoint_step
else:
raise ValueError("{} not found in checkpoint folder.".format(FLAGS.select_checkpoint_step))
ckpt_comp = lambda x: self.ckpt_path / "{}-{}.pt".format(x, ckpt_step)
if isinstance(estimator.model, self.torch.nn.DataParallel):
try:
estimator.model.module.load_state_dict(
self.torch.load(ckpt_comp("model")),
)
except RuntimeError:
"""
Pytorch doesn't love loading a DataParallel checkpoint
to a simple model. So, the following hack is needed
to remove the 'module.' prefix from state keys.
OR it might as well need the opposite. Transitioning from
single to multiple GPUs will mean that 'module.' prefix is missing
"""
new_state_dict = collections.OrderedDict()
for k, v in self.torch.load(ckpt_comp("model")).items():
if k[:7] == 'module.':
name = k[7:] # remove `module.`
else:
name = 'module.' + k # Add 'module.'
new_state_dict[name] = v
estimator.model.module.load_state_dict(new_state_dict)
else:
try:
estimator.model.load_state_dict(
self.torch.load(ckpt_comp("model")),
)
except RuntimeError:
"""
Pytorch doesn't love loading a DataParallel checkpoint
to a simple model. So, the following hack is needed
to remove the 'module.' prefix from state keys.
OR it might as well need the opposite. Transitioning from
single to multiple GPUs will mean that 'module.' prefix is missing
"""
new_state_dict = collections.OrderedDict()
for k, v in self.torch.load(ckpt_comp("model")).items():
if k[:7] == 'module.':
name = k[7:] # remove `module.`
else:
name = 'module.' + k # Add 'module.'
new_state_dict[name] = v
estimator.model.load_state_dict(new_state_dict)
if estimator.optimizer is not None and estimator.scheduler is not None and ckpt_step > 0:
estimator.optimizer.load_state_dict(
self.torch.load(ckpt_comp("optimizer"), map_location=self.pytorch.device)
)
estimator.scheduler.load_state_dict(
self.torch.load(ckpt_comp("scheduler"), map_location=self.pytorch.device)
)
estimator.model.eval()
return ckpt_step
def is_world_process_zero(self) -> bool:
"""
Whether or not this process is the global main process (when training in a distributed fashion on
several machines, this is only going to be :obj:`True` for one process).
"""
if self.torch_tpu_available:
return self.pytorch.torch_xla_model.is_master_ordinal(local=False)
elif self.pytorch.num_nodes > 1:
return self.torch.distributed.get_rank() == 0
else:
return True
def GetShortSummary(self) -> None:
return "Short Summary TODO" | 34,761 | 45.164675 | 203 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/active_models/expected_error_reduction/optimizer.py | # coding=utf-8
# Copyright 2022 The Google AI Language Team Authors, The HuggingFace Inc. team and Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch optimization for BERT model."""
import math
import typing
from deeplearning.benchpress.util.pytorch import torch
def create_optimizer_and_scheduler(model,
num_train_steps: int,
warmup_steps: int,
learning_rate: float,
adam_beta1 = 0.9,
adam_beta2 = 0.999,
adam_epsilon = 1e-6,
weight_decay = 0.01,
):
"""
Setup the optimizer and the learning rate scheduler.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
Trainer's init through :obj:`optimizers`, or subclass and override this method in a subclass.
"""
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
opt = AdamW(
optimizer_grouped_parameters,
lr = learning_rate,
betas = (adam_beta1, adam_beta2),
eps = adam_epsilon,
)
lr_scheduler = get_linear_schedule_with_warmup(
opt, num_warmup_steps = warmup_steps, num_training_steps = num_train_steps
)
return opt, lr_scheduler
def get_constant_schedule(optimizer: torch.optim.Optimizer, last_epoch: int = -1):
"""
Create a schedule with a constant learning rate, using the learning rate set in optimizer.
Args:
optimizer (:class:`~torch.optim.Optimizer`):
The optimizer for which to schedule the learning rate.
last_epoch (:obj:`int`, `optional`, defaults to -1):
The index of the last epoch when resuming training.
Return:
:obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
return torch.optim.lr_scheduler.LambdaLR(optimizer, lambda _: 1, last_epoch=last_epoch)
def get_constant_schedule_with_warmup(optimizer: torch.optim.Optimizer, num_warmup_steps: int, last_epoch: int = -1):
"""
Create a schedule with a constant learning rate preceded by a warmup period during which the learning rate
increases linearly between 0 and the initial lr set in the optimizer.
Args:
optimizer (:class:`~torch.optim.Optimizer`):
The optimizer for which to schedule the learning rate.
num_warmup_steps (:obj:`int`):
The number of steps for the warmup phase.
last_epoch (:obj:`int`, `optional`, defaults to -1):
The index of the last epoch when resuming training.
Return:
:obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
def lr_lambda(current_step: int):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1.0, num_warmup_steps))
return 1.0
return torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda, last_epoch=last_epoch)
def get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1):
"""
Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0,
after a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer.
Args:
optimizer (:class:`~torch.optim.Optimizer`):
The optimizer for which to schedule the learning rate.
num_warmup_steps (:obj:`int`):
The number of steps for the warmup phase.
num_training_steps (:obj:`int`):
The totale number of training steps.
last_epoch (:obj:`int`, `optional`, defaults to -1):
The index of the last epoch when resuming training.
Return:
:obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
def lr_lambda(current_step: int):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
return max(
0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps))
)
return torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda, last_epoch)
def get_cosine_schedule_with_warmup(
optimizer: torch.optim.Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: float = 0.5, last_epoch: int = -1
):
"""
Create a schedule with a learning rate that decreases following the values of the cosine function between the
initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the
initial lr set in the optimizer.
Args:
optimizer (:class:`~torch.optim.Optimizer`):
The optimizer for which to schedule the learning rate.
num_warmup_steps (:obj:`int`):
The number of steps for the warmup phase.
num_training_steps (:obj:`int`):
The total number of training steps.
num_cycles (:obj:`float`, `optional`, defaults to 0.5):
The number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0
following a half-cosine).
last_epoch (:obj:`int`, `optional`, defaults to -1):
The index of the last epoch when resuming training.
Return:
:obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
def lr_lambda(current_step):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps))
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress)))
return torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda, last_epoch)
def get_cosine_with_hard_restarts_schedule_with_warmup(
optimizer: torch.optim.Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: int = 1, last_epoch: int = -1
):
"""
Create a schedule with a learning rate that decreases following the values of the cosine function between the
initial lr set in the optimizer to 0, with several hard restarts, after a warmup period during which it increases
linearly between 0 and the initial lr set in the optimizer.
Args:
optimizer (:class:`~torch.optim.Optimizer`):
The optimizer for which to schedule the learning rate.
num_warmup_steps (:obj:`int`):
The number of steps for the warmup phase.
num_training_steps (:obj:`int`):
The total number of training steps.
num_cycles (:obj:`int`, `optional`, defaults to 1):
The number of hard restarts to use.
last_epoch (:obj:`int`, `optional`, defaults to -1):
The index of the last epoch when resuming training.
Return:
:obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
def lr_lambda(current_step):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps))
if progress >= 1.0:
return 0.0
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * ((float(num_cycles) * progress) % 1.0))))
return torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda, last_epoch)
class AdamW(torch.optim.Optimizer):
"""
Implements Adam algorithm with weight decay fix as introduced in
`Decoupled Weight Decay Regularization <https://arxiv.org/abs/1711.05101>`__.
Parameters:
params (:obj:`typing.Iterable[torch.nn.parameter.Parameter]`):
typing.Iterable of parameters to optimize or dictionaries defining parameter groups.
lr (:obj:`float`, `optional`, defaults to 1e-3):
The learning rate to use.
betas (:obj:`typing.Tuple[float,float]`, `optional`, defaults to (0.9, 0.999)):
Adam's betas parameters (b1, b2).
eps (:obj:`float`, `optional`, defaults to 1e-6):
Adam's epsilon for numerical stability.
weight_decay (:obj:`float`, `optional`, defaults to 0):
Decoupled weight decay to apply.
correct_bias (:obj:`bool`, `optional`, defaults to `True`):
Whether ot not to correct bias in Adam (for instance, in Bert TF repository they use :obj:`False`).
"""
def __init__(
self,
params: typing.Iterable[torch.nn.parameter.Parameter],
lr: float = 1e-3,
betas: typing.Tuple[float, float] = (0.9, 0.999),
eps: float = 1e-6,
weight_decay: float = 0.0,
correct_bias: bool = True,
):
if lr < 0.0:
raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter: {} - should be in [0.0, 1.0[".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter: {} - should be in [0.0, 1.0[".format(betas[1]))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(eps))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, correct_bias=correct_bias)
super().__init__(params, defaults)
def step(self, closure: typing.Callable = None):
"""
Performs a single optimization step.
Arguments:
closure (:obj:`typing.Callable`, `optional`): A closure that reevaluates the model and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError("Adam does not support sparse gradients, please consider SparseAdam instead")
state = self.state[p]
# State initialization
if len(state) == 0:
state["step"] = 0
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
beta1, beta2 = group["betas"]
state["step"] += 1
# Decay the first and second moment running average coefficient
# In-place operations to update the averages at the same time
exp_avg.mul_(beta1).add_(grad, alpha=1.0 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)
denom = exp_avg_sq.sqrt().add_(group["eps"])
step_size = group["lr"]
if group["correct_bias"]: # No bias correction for Bert
bias_correction1 = 1.0 - beta1 ** state["step"]
bias_correction2 = 1.0 - beta2 ** state["step"]
step_size = step_size * math.sqrt(bias_correction2) / bias_correction1
p.data.addcdiv_(exp_avg, denom, value=-step_size)
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
# Add weight decay at the end (fixed version)
if group["weight_decay"] > 0.0:
p.data.add_(p.data, alpha=-group["lr"] * group["weight_decay"])
return loss
| 12,214 | 39.989933 | 129 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/samplers/validation_database.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module for databases of BenchPress samples."""
import datetime
import typing
import sqlalchemy as sql
from sqlalchemy.ext import declarative
from absl import flags
from deeplearning.benchpress.util import crypto
from deeplearning.benchpress.util import sqlutil
FLAGS = flags.FLAGS
Base = declarative.declarative_base()
class ValResults(Base):
__tablename__ = "validation_results"
"""
DB Table for concentrated validation results.
"""
key : str = sql.Column(sql.String(1024), primary_key=True)
results : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
class BERTValFile(Base, sqlutil.ProtoBackedMixin):
"""
A database entry representing a BenchPress validation trace.
"""
__tablename__ = "validation_traces"
id : int = sql.Column(sql.Integer, primary_key = True)
sha256 : str = sql.Column(sql.String(64), nullable = False, index = True)
train_step : int = sql.Column(sql.Integer, nullable = False)
original_input : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
input_ids : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
masked_lm_ids : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
masked_lm_positions : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
masked_lm_lengths : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
masked_lm_predictions : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
num_targets : int = sql.Column(sql.Integer, nullable = False)
encoded_original_input : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
encoded_input_ids : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
input_mask : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
encoded_mask_lm_ids : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
masked_lm_weights : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
encoded_masked_lm_predictions : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
next_sentence_labels : int = sql.Column(sql.Integer, nullable = False)
next_sentence_predictions : int = sql.Column(sql.Integer, nullable = False)
seen_in_training : int = sql.Column(sql.Integer, nullable = False)
date_added : datetime.datetime = sql.Column(sql.DateTime, nullable=False)
@classmethod
def FromArgs(cls,
tokenizer,
id: int,
train_step: int,
seen_in_training,
original_input: typing.List[int],
input_ids: typing.List[int],
input_mask: typing.List[int],
masked_lm_ids: typing.List[int],
masked_lm_positions: typing.List[int],
masked_lm_weights: typing.List[float],
masked_lm_lengths: typing.List[int],
next_sentence_labels: typing.List[int],
masked_lm_predictions: typing.List[int],
next_sentence_predictions: typing.List[int],
) -> typing.Dict[str, typing.Any]:
str_original_input = tokenizer.tokensToString(original_input, ignore_token = tokenizer.padToken, with_formatting = True)
str_input_ids = tokenizer.tokensToString(input_ids, ignore_token = tokenizer.padToken, with_formatting = True)
str_masked_lm_ids = '\n'.join([tokenizer.decoder[x] if ('\n' not in tokenizer.vocab or ('\n' in tokenizer.vocab and x != tokenizer.vocab['\n'])) else '\\n' for x in masked_lm_ids])
str_masked_lm_predictions = '\n'.join([tokenizer.decoder[x] if ('\n' not in tokenizer.vocab or ('\n' in tokenizer.vocab and x != tokenizer.vocab['\n'])) else '\\n' for x in masked_lm_predictions])
return {
"id" : id,
"sha256" : crypto.sha256_str(
str(int(train_step)) +
str_original_input +
str_input_ids +
str_masked_lm_ids +
str_masked_lm_predictions
),
"train_step" : int(train_step),
"original_input" : str_original_input,
"encoded_original_input" : ','.join([str(x) for x in original_input]),
"input_ids" : str_input_ids,
"encoded_input_ids" : ','.join([str(x) for x in input_ids]),
"input_mask" : ','.join([str(x) for x in input_mask]),
"masked_lm_positions" : ','.join([str(x) for x in masked_lm_positions]),
"masked_lm_ids" : str_masked_lm_ids,
"encoded_mask_lm_ids" : ','.join([str(x) for x in masked_lm_ids]),
"masked_lm_weights" : ','.join([str(int(x)) for x in masked_lm_weights]),
"masked_lm_lengths" : ','.join([str(int(x)) for x in masked_lm_lengths if x >= 0]),
"next_sentence_labels" : int(next_sentence_labels),
"masked_lm_predictions" : str_masked_lm_predictions,
"encoded_masked_lm_predictions" : ','.join([str(x) for x in masked_lm_predictions]),
"next_sentence_predictions" : int(next_sentence_predictions),
"num_targets" : list(masked_lm_ids).index(tokenizer.padToken) if tokenizer.padToken in list(masked_lm_ids) else len(list(masked_lm_ids)),
"seen_in_training" : int(seen_in_training),
"date_added" : datetime.datetime.utcnow(),
}
class ValidationDatabase(sqlutil.Database):
"""A database of BenchPress samples."""
def __init__(self, url: str, must_exist: bool = False):
super(ValidationDatabase, self).__init__(url, Base, must_exist = must_exist)
@property
def count(self):
with self.Session() as s:
count = s.query(BERTValFile).count()
return count | 7,215 | 56.269841 | 207 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/samplers/samples_database.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module for databases of BenchPress samples."""
import contextlib
import sys
import datetime
import typing
import multiprocessing
import progressbar
import sqlite3
import functools
import pathlib
import tqdm
import sqlalchemy as sql
from sqlalchemy.ext import declarative
from absl import app, flags
from deeplearning.benchpress.features import extractor
from deeplearning.benchpress.preprocessors import opencl
from deeplearning.benchpress.corpuses import tokenizers
from deeplearning.benchpress.proto import model_pb2
from deeplearning.benchpress.util import crypto
from deeplearning.benchpress.util import distrib
from deeplearning.benchpress.util import environment
from deeplearning.benchpress.util import sqlutil
from deeplearning.benchpress.util import logging as l
FLAGS = flags.FLAGS
flags.DEFINE_string(
"sample_mergeable_databases",
None,
"Comma separated paths of SamplesDatabase to merge into one."
)
flags.DEFINE_string(
"sample_merged_path",
None,
"Specify output of merged database."
)
flags.DEFINE_string(
"tokenizer_path",
None,
"Specify path of tokenizer to update database."
)
Base = declarative.declarative_base()
class SampleResults(Base):
__tablename__ = "sampling_results"
"""
DB Table for concentrated validation results.
"""
key : str = sql.Column(sql.String(1024), primary_key=True)
results : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
class Sample(Base, sqlutil.ProtoBackedMixin):
"""A database row representing a BenchPress sample.
This is the clgen.Sample protocol buffer in SQL format.
"""
__tablename__ = "samples"
# entry id
id : int = sql.Column(sql.Integer, primary_key = True)
# unique hash of sample text
sha256 : str = sql.Column(sql.String(64), nullable = False, index = True)
# model's train step that generated the sample
train_step : int = sql.Column(sql.Integer, nullable = False)
# Original input where the feed came from
original_input : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Starting feed of model
sample_feed : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# String-format generated text
text : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Array of the actual generated tokens
sample_indices : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# encoded sample text
encoded_text : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Encoded generated tokens
encoded_sample_indices : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Whether the generated sample compiles or not.
compile_status : bool = sql.Column(sql.Boolean, nullable = False)
# Sample's vector of features.
feature_vector : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Length of total sequence in number of tokens
num_tokens : int = sql.Column(sql.Integer, nullable = False)
# If Bernoulli distribution was used during samplinng
categorical_sampling : str = sql.Column(sql.String(8), nullable = False)
# Time
sample_time_ms : int = sql.Column(sql.Integer, nullable = False)
# Date
date_added : datetime.datetime = sql.Column(sql.DateTime, nullable=False)
@classmethod
def FromProto(cls, id: int, proto: model_pb2.Sample) -> typing.Dict[str, typing.Any]:
return {
"id" : id,
"sha256" : crypto.sha256_str(proto.text),
"train_step" : proto.train_step,
"encoded_text" : proto.encoded_text,
"original_input" : proto.original_input,
"sample_feed" : proto.sample_feed,
"text" : proto.text,
"sample_indices" : proto.sample_indices,
"encoded_sample_indices" : proto.encoded_sample_indices,
"feature_vector" : proto.feature_vector,
"num_tokens" : proto.num_tokens,
"compile_status" : proto.compile_status,
"categorical_sampling" : proto.categorical_sampling,
"sample_time_ms" : proto.sample_time_ms,
"date_added" : datetime.datetime.strptime(proto.date_added, "%m/%d/%Y, %H:%M:%S"),
}
@classmethod
def FromArgsLite(cls, id: int, text: str, feature_vector: str, compiles: bool) -> "Sample":
"""
Do you want to use SamplesDatabase as a means to store only code
without much fuss ? This function is for you!
"""
return Sample(**{
"sha256" : crypto.sha256_str(text),
"train_step" : -1,
"encoded_text" : "",
"original_input" : "",
"sample_feed" : "",
"text" : text,
"sample_indices" : "",
"encoded_sample_indices" : "",
"compile_status" : compiles,
"feature_vector" : feature_vector,
"num_tokens" : 0,
"categorical_sampling" : "False",
"sample_time_ms" : 0,
"date_added" : datetime.datetime.utcnow(),
})
class SamplesDatabase(sqlutil.Database):
"""A database of BenchPress samples."""
def __init__(self, url: str, must_exist: bool = False, is_replica: bool = False):
self.base_url = url
if environment.WORLD_RANK == 0 or is_replica:
super(SamplesDatabase, self).__init__(url, Base, must_exist = must_exist)
if environment.WORLD_SIZE > 1 and not is_replica:
# Conduct engine connections to replicated preprocessed chunks.
self.base_path = pathlib.Path(url.replace("sqlite:///", "")).resolve().parent
hash_id = self.base_path.name
try:
tdir = pathlib.Path(FLAGS.local_filesystem).resolve() / hash_id / "lm_samples"
except Exception:
tdir = pathlib.Path("/tmp").resolve() / hash_id / "lm_samples"
try:
tdir.mkdir(parents = True, exist_ok = True)
except Exception:
pass
self.replicated_path = tdir / "samples_{}.db".format(environment.WORLD_RANK)
self.replicated = SamplesDatabase(
url = "sqlite:///{}".format(str(self.replicated_path)),
must_exist = must_exist,
is_replica = True
)
distrib.barrier()
@property
def url(self):
"""
Return Database URL
"""
if environment.WORLD_RANK == 0:
return self.base_url
else:
return self.replicated.base_url
@property
def get_session(self):
"""
get proper DB session.
"""
if environment.WORLD_RANK == 0:
return self.Session
else:
return self.replicated.Session
@property
def count(self):
"""Number of samples in DB."""
with self.get_session() as s:
count = s.query(Sample).count()
return count
@property
def get_data(self):
"""Return all database in list format"""
with self.get_session() as s:
return s.query(Sample).all()
@property
def get_hash_entries(self):
"""Return all unique hash entries found in DB."""
with self.get_session() as s:
return s.query(Sample.sha256).all()
@property
def samples(self) -> typing.List[Sample]:
"""Get a list of all files in database."""
with self.get_session() as s:
return s.query(Sample).yield_per(1000)
@property
def correct_samples(self) -> typing.Set[str]:
"""Get samples that compile from SamplesDatabase."""
with self.get_session() as s:
return s.query(Sample).filter(Sample.compile_status == True).yield_per(1000).enable_eagerloads(False)
@property
def get_features(self) -> typing.List[typing.Dict[str, float]]:
"""Return all feature vectors of compiling samples."""
with self.get_session() as s:
return [x.feature_vector for x in s.query(Sample).filter(Sample.compile_status == True).yield_per(1000)]
@property
def get_data_features(self) -> typing.List[typing.Tuple[str, typing.Dict[str, float]]]:
"""Return tuple of code + feature vectors"""
with self.get_session() as s:
return [(x.text, x.feature_vector) for x in s.query(Sample).filter(Sample.compile_status == True).yield_per(1000)]
@property
def get_samples_features(self) -> typing.List[typing.Tuple[str, typing.Dict[str, float]]]:
"""Return compiling samples with feature vectors"""
with self.get_session() as s:
return [(x.text, extractor.RawToDictFeats(x.feature_vector)) for x in s.query(Sample).filter(Sample.compile_status == True).yield_per(1000)]
@property
def get_compilable_num_tokens(self) -> typing.List[int]:
"""Return num_tokens column."""
with self.get_session() as s:
return [int(x[0]) for x in s.query(Sample.num_tokens).filter(Sample.compile_status == True)]
def get_by_ids(self, ids):
"""Index and return sample by ID."""
with self.get_session() as s:
return [s.query(Sample).filter(Sample.id == i).first() for i in ids]
def merge_databases(dbs: typing.List[SamplesDatabase], out_db: SamplesDatabase) -> None:
sdir = {}
new_id = 0
for db in dbs:
data = [x for x in db.get_data]
for dp in data:
if dp.sha256 not in sdir:
dp.id = new_id
sdir[dp.sha256] = dp
new_id += 1
with out_db.Session() as s:
for dp in sdir.values():
s.add(Sample.FromArgsLite(0, dp.text, dp.feature_vector, dp.compile_status))
s.commit()
return
def run_extractors(sample: Sample) -> Sample:
if sample.compile_status:
return Sample(
**Sample.FromProto(0, model_pb2.Sample(
train_step = sample.train_step,
text = sample.text,
sample_indices = sample.sample_indices,
encoded_sample_indices = sample.encoded_sample_indices,
original_input = sample.original_input,
sample_feed = sample.sample_feed,
encoded_text = sample.encoded_text,
sample_time_ms = sample.sample_time_ms,
feature_vector = extractor.ExtractRawFeatures(sample.text),
num_tokens = sample.num_tokens,
compile_status = sample.compile_status,
categorical_sampling = int(sample.categorical_sampling) if sample.categorical_sampling in {"0", "1"} else (1 if bool(sample.categorical_sampling) else False),
date_added = sample.date_added.strftime("%m/%d/%Y, %H:%M:%S"),
)
)
)
else:
return Sample(
**Sample.FromProto(0, model_pb2.Sample(
train_step = sample.train_step,
text = sample.text,
sample_indices = sample.sample_indices,
encoded_sample_indices = sample.encoded_sample_indices,
original_input = sample.original_input,
sample_feed = sample.sample_feed,
encoded_text = sample.encoded_text,
sample_time_ms = sample.sample_time_ms,
feature_vector = "",
num_tokens = sample.num_tokens,
compile_status = sample.compile_status,
categorical_sampling = int(sample.categorical_sampling) if sample.categorical_sampling in {"0", "1"} else (1 if bool(sample.categorical_sampling) else False),
date_added = sample.date_added.strftime("%m/%d/%Y, %H:%M:%S"),
)
)
)
def get_sample(sample: Sample) -> Sample:
return Sample(
**Sample.FromProto(0, model_pb2.Sample(
train_step = sample.train_step,
text = sample.text,
sample_indices = sample.sample_indices,
encoded_sample_indices = sample.encoded_sample_indices,
original_input = sample.original_input,
sample_feed = sample.sample_feed,
encoded_text = sample.encoded_text,
sample_time_ms = sample.sample_time_ms,
feature_vector = sample.feature_vector,
num_tokens = sample.num_tokens,
compile_status = sample.compile_status,
categorical_sampling = int(sample.categorical_sampling) if sample.categorical_sampling in {"0", "1"} else (1 if bool(sample.categorical_sampling) else False),
date_added = sample.date_added.strftime("%m/%d/%Y, %H:%M:%S"),
)
)
)
def modernize_samples_db(db: SamplesDatabase, out_db: SamplesDatabase) -> None:
"""
Re-run feature extractors to update old db.
"""
pool = multiprocessing.Pool()
inp_data = db.get_data
bar = progressbar.ProgressBar(max_value = len(inp_data))
with out_db.Session(commit = True) as s:
for idx, dp in bar(enumerate(pool.imap_unordered(run_extractors, inp_data))):
dp.id = idx
s.add(dp)
if idx+1 % 5000:
s.commit()
s.commit()
pool.close()
return
def update_tokenizer(sample: Sample, tokenizer) -> Sample:
encoded = tokenizer.TokenizeString(sample.text)
return Sample(
**Sample.FromProto(0, model_pb2.Sample(
train_step = sample.train_step,
text = sample.text,
sample_indices = sample.sample_indices,
encoded_sample_indices = sample.sample_indices,
original_input = sample.original_input,
sample_feed = sample.sample_feed,
encoded_text = ','.join([str(x) for x in encoded]),
sample_time_ms = sample.sample_time_ms,
feature_vector = sample.feature_vector,
num_tokens = len(encoded),
compile_status = sample.compile_status,
categorical_sampling = int(sample.categorical_sampling),
date_added = sample.date_added.strftime("%m/%d/%Y, %H:%M:%S"),
)
)
)
def modernize_clgen_tokenizer(db: SamplesDatabase, out_db: SamplesDatabase, tokenizer) -> None:
"""
Re-run feature extractors to update old db.
"""
pool = multiprocessing.Pool()
inp_data = db.get_data
bar = progressbar.ProgressBar(max_value = len(inp_data))
f = functools.partial(update_tokenizer, tokenizer = tokenizer)
with out_db.Session(commit = True) as s:
for idx, dp in bar(enumerate(pool.imap_unordered(f, inp_data))):
dp.id = idx
s.add(dp)
if idx+1 % 5000:
s.commit()
s.commit()
pool.close()
return
def ContentHash_worker(sample: Sample) -> typing.Tuple[str, Sample]:
"""
Return new sample along with content hash of code.
"""
try:
return opencl.ContentHash(sample.text), sample
except Exception as e:
l.logger().warn(e)
return None
def to_unique_samples(db: SamplesDatabase, out_db: SamplesDatabase) -> None:
"""
Read input database, pass through deterministic re-writer and keep only unique samples.
"""
pool = multiprocessing.Pool()
inp_data = [x for x in db.get_data]
visited = set()
data = []
try:
for sha, sample in tqdm.tqdm(pool.imap_unordered(ContentHash_worker, inp_data), total = len(inp_data), desc = "Unique-fy samples database"):
if sha not in visited:
visited.add(sha)
data.append(sample)
except Exception as e:
l.logger().error(e)
pool.terminate()
raise e
pool.close()
with out_db.Session() as s:
idx = 0
for dp in tqdm.tqdm(data, total = len(data), desc = "Adding to DB"):
new_dp = get_sample(dp)
new_dp.id = idx
idx += 1
s.add(new_dp)
s.commit()
return
def extract_features(db: SamplesDatabase, out_db: SamplesDatabase) -> None:
inp_data = [x for x in db.get_data]
out_data = []
pool = multiprocessing.Pool()
for dp in tqdm.tqdm(pool.imap_unordered(run_extractors, inp_data), total = len(inp_data)):
out_data.append(dp)
with out_db.Session() as s:
idx = 0
for new_dp in out_data:
new_dp.id = idx
s.add(new_dp)
idx += 1
s.commit()
return
def initMain(*args, **kwargs):
l.initLogger("samples_database")
if not FLAGS.sample_merged_path:
raise ValueError("Specify out path for merged database")
out_path = pathlib.Path(FLAGS.sample_merged_path).absolute()
if out_path.suffix != '.db':
raise ValueError("sample_merged_path must end in a valid database name (.db extension): {}")
out_path.parent.mkdir(exist_ok = True, parents = True)
out_db = SamplesDatabase(url = "sqlite:///{}".format(str(out_path)), must_exist = False)
db_paths = [pathlib.Path(p).absolute() for p in FLAGS.sample_mergeable_databases.replace(" ", "").split(",")]
for p in db_paths:
if not p.exists():
raise FileNotFoundError(p)
dbs = [SamplesDatabase(url = "sqlite:///{}".format(str(p)), must_exist = True) for p in db_paths]
# tokenizer_path = pathlib.Path(FLAGS.tokenizer_path).resolve()
# if not tokenizer_path.exists():
# raise FileNotFoundError(tokenizer_path)
# tokenizer = tokenizers.TokenizerBase.FromFile(tokenizer_path)
merge_databases(dbs, out_db)
# modernize_samples_db(dbs[0], out_db)
# modernize_clgen_tokenizer(dbs[0], out_db, tokenizer)
# to_unique_samples(dbs[0], out_db)
# extract_features(dbs[0], out_db)
return
if __name__ == "__main__":
app.run(initMain)
sys.exit(0)
| 18,419 | 37.375 | 175 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/samplers/samplers.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas and Chris Cummins.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Samplers for BenchPress language models.
A Sampler is an object which, when passed to a mode's Sample() method,
determines the shape of the generated samples.
"""
import os
import time
import datetime
import typing
import pathlib
import pickle
from absl import flags
from sqlalchemy.ext import declarative
from deeplearning.benchpress.util import cache
from deeplearning.benchpress.util import pbutil
from deeplearning.benchpress.util import crypto
from deeplearning.benchpress.util import environment
from deeplearning.benchpress.util import distrib
from deeplearning.benchpress.util import commit
from deeplearning.benchpress.features import extractor
from deeplearning.benchpress.corpuses import tokenizers
from deeplearning.benchpress.corpuses import benchmarks
from deeplearning.benchpress.corpuses import corpuses
from deeplearning.benchpress.proto import sampler_pb2
from deeplearning.benchpress.proto import internal_pb2
from deeplearning.benchpress.models import lm_data_generator
from deeplearning.benchpress.active_models import active_models
from deeplearning.benchpress.util import logging as l
from deeplearning.benchpress.util import sqlutil
FLAGS = flags.FLAGS
Base = declarative.declarative_base()
def AssertConfigIsValid(config: sampler_pb2.Sampler) -> sampler_pb2.Sampler:
"""Assert that a sampler configuration contains no invalid values.
Args:
config: A sampler configuration proto.
Returns:
The sampler configuration proto.
Raises:
UserError: If there are configuration errors.
"""
try:
if config.HasField("start_text"):
pbutil.AssertFieldConstraint(
config,
"start_text",
lambda s: len(s),
"Sampler.start_text must be a string",
)
elif config.HasField("sample_corpus"):
if config.sample_corpus.HasField("corpus_config"):
if config.sample_corpus.corpus_config.HasField("normal"):
pbutil.AssertFieldIsSet(config.sample_corpus.corpus_config, "normal")
elif config.sample_corpus.corpus_config.HasField("online"):
pbutil.AssertFieldIsSet(config.sample_corpus.corpus_config, "online")
elif config.sample_corpus.corpus_config.HasField("active"):
pbutil.AssertFieldIsSet(config.sample_corpus.corpus_config.active, "active_search_depth")
pbutil.AssertFieldIsSet(config.sample_corpus.corpus_config.active, "active_search_width")
pbutil.AssertFieldConstraint(
config.sample_corpus.corpus_config.active,
"active_dropout_prob",
lambda x: x >= 0 and x <= 1,
"Active dropout rate must be a float between in range [0.0, 1.0]",
)
pbutil.AssertFieldConstraint(
config.sample_corpus.corpus_config.active,
"batch_size_per_feed",
lambda x : config.batch_size % x == 0,
"batch_size {} must be a multiple of batch_size_per_feed".format(
config.batch_size
)
)
pbutil.AssertFieldConstraint(
config.sample_corpus.corpus_config.active,
"feature_space",
lambda x : x in set(["HiddenState"] + list(extractor.extractors.keys())),
"feature_space can only be one of {}".format(', '.join(["HiddenState"] + list(extractor.extractors.keys())))
)
if config.sample_corpus.corpus_config.active.HasField("target"):
pbutil.AssertFieldConstraint(
config.sample_corpus.corpus_config.active,
"target",
lambda x : x in set(benchmarks.targets.keys()),
"target can only be one of {}".format(', '.join(list(benchmarks.targets.keys())))
)
elif config.sample_corpus.corpus_config.active.HasField("active_learner"):
active_models.AssertConfigIsValid(config.sample_corpus.corpus_config.active.active_learner)
else:
raise ValueError(config.sample_corpus.corpus_config.active)
else:
raise ValueError("Sampling type is undefined: {}".format(config.sample_corpus.corpus_config))
pbutil.AssertFieldIsSet(config.sample_corpus.corpus_config, "max_predictions_per_seq")
pbutil.AssertFieldIsSet(config.sample_corpus.corpus_config, "masked_lm_prob")
pbutil.AssertFieldIsSet(config.sample_corpus.corpus_config, "mask_technique")
if config.sample_corpus.corpus_config.HasField("mask"):
pbutil.AssertFieldIsSet(
config.sample_corpus.corpus_config.mask,
"random_placed_mask",
)
elif config.sample_corpus.corpus_config.HasField("hole"):
if config.sample_corpus.corpus_config.hole.HasField("absolute_length"):
pbutil.AssertFieldConstraint(
config.sample_corpus.corpus_config.hole,
"absolute_length",
lambda x : x > 0,
"absolute length is the upper bound range of a hole's length. Therefore should be > 0."
)
else:
pbutil.AssertFieldConstraint(
config.sample_corpus.corpus_config.hole,
"relative_length",
lambda x : 0.0 < x <= 1.0,
"relative length must be between 0 and 100% of a kernel's actual length."
)
if config.sample_corpus.corpus_config.hole.HasField("normal_distribution"):
pbutil.AssertFieldIsSet(
config.sample_corpus.corpus_config.hole.normal_distribution,
"mean",
)
pbutil.AssertFieldIsSet(
config.sample_corpus.corpus_config.hole.normal_distribution,
"variance",
)
elif not config.sample_corpus.corpus_config.hole.HasField("uniform_distribution"):
raise ValueError("Hole length distribution has not been set.")
elif config.sample_corpus.corpus_config.HasField("mask_seq"):
if config.sample_corpus.corpus_config.mask_seq.HasField("absolute_length"):
pbutil.AssertFieldConstraint(
config.sample_corpus.corpus_config.mask_seq,
"absolute_length",
lambda x : x > 0,
"absolute length is the upper bound range of a mask_seq's length. Therefore should be > 0."
)
else:
pbutil.AssertFieldConstraint(
config.sample_corpus.corpus_config.mask_seq,
"relative_length",
lambda x : 0.0 < x <= 1.0,
"relative length must be between 0 and 100% of a kernel's actual length."
)
if config.sample_corpus.corpus_config.mask_seq.HasField("normal_distribution"):
pbutil.AssertFieldIsSet(
config.sample_corpus.corpus_config.mask_seq.normal_distribution,
"mean",
)
pbutil.AssertFieldIsSet(
config.sample_corpus.corpus_config.mask_seq.normal_distribution,
"variance",
)
elif not config.sample_corpus.corpus_config.mask_seq.HasField("uniform_distribution"):
raise ValueError("Hole length distribution has not been set.")
else:
raise ValueError("sample_corpus has no corpus_config field.")
if config.sample_corpus.HasField("corpus"):
corpuses.AssertConfigIsValid(config.sample_corpus.corpus)
else:
pbutil.AssertFieldIsSet(
config.sample_corpus,
"start_text"
)
elif ((not config.HasField("train_set"))
and (not config.HasField("validation_set"))
and (not config.HasField("sample_set"))
and (not config.HasField("live_sampling"))):
raise ValueError(config)
pbutil.AssertFieldConstraint(
config, "batch_size", lambda x: 0 < x, "Sampler.batch_size must be > 0"
)
pbutil.AssertFieldConstraint(
config,
"sequence_length",
lambda x: 0 < x,
"Sampler.sequence_length must be > 0",
)
pbutil.AssertFieldConstraint(
config,
"temperature_micros",
lambda x: 0 < x,
"Sampler.temperature_micros must be > 0",
)
return config
except pbutil.ProtoValueError as e:
raise ValueError(e)
class TerminationCriterionBase(object):
"""Base class for TerminationCriterion objects.
A TerminationCriterion is an object with a single public function
SampleIsComplete(), which accepts as its sole argument a sample-in-progress,
and returns whether to stop sampling.
"""
def Specialize(self, tokenizer: tokenizers.TokenizerBase) -> None:
"""Specialize a termination criteria to a vocabulary.
This enables the termination criteria to set state specialized to a specific
encoding vocabulary. This is guaranteed to be called before
SampleIsComplete(), and ensures that the vocabulary used for all sample
arguments to SampleIsComplete() is from this vocabulary.
Args:
tokenizer: An tokenizer to specialize to.
"""
pass
def SampleIsComplete(self, sample_in_progress: typing.List[str]) -> bool:
"""Determine whether to stop sampling.
Args:
sample_in_progress: A sample in progress, as a sequence of decoded tokens.
Returns:
True if the sample is "complete", else False to continue sampling.
"""
raise NotImplementedError("abstract class")
class MaxlenTerminationCriterion(TerminationCriterionBase):
"""A termination criterion which limits the maximum length of a sample."""
def __init__(self, config: sampler_pb2.MaxTokenLength):
try:
self.max_len = pbutil.AssertFieldConstraint(
config,
"maximum_tokens_in_sample",
lambda x: x > 1,
"MaxTokenLength.maximum_tokens_in_sample must be > 0",
)
except pbutil.ProtoValueError as e:
raise ValueError(e)
def SampleIsComplete(self, sample_in_progress: typing.List[str]) -> bool:
"""Determine whether to stop sampling."""
return len(sample_in_progress) >= self.max_len
class SymmetricalTokenDepthCriterion(TerminationCriterionBase):
"""A termination criterion which counts symmetrical token depth.
This is a generalization of bracked (i.e. { }) depth counting for C-syntax
programming languages. When sampling to generate a C function, the sample
is not "started" until the first { token is reached, and it is complete once
the final } token has been emitted to close the function. In between those
two tokens, there may be additional { } characters which increase and decrease
the "depth" of the scope, respectively.
"""
def __init__(self, config: sampler_pb2.SymmetricalTokenDepth):
try:
self.left_token = pbutil.AssertFieldConstraint(
config,
"depth_increase_token",
lambda s: len(s) > 0,
"SymmetricalTokenDepth.depth_increase_token must be a string",
)
self.right_token = pbutil.AssertFieldConstraint(
config,
"depth_decrease_token",
lambda s: len(s) > 0,
"SymmetricalTokenDepth.depth_decrease_token must be a string",
)
except pbutil.ProtoValueError as e:
raise ValueError(e)
if self.left_token == self.right_token:
raise ValueError("SymmetricalTokenDepth tokens must be different")
def Specialize(self, tokenizer: tokenizers.TokenizerBase) -> None:
"""Specialize a termination criteria to a vocabulary.
This enables the termination criteria to set state specialized to a specific
encoding vocabulary. This is guaranteed to be called before
SampleIsComplete(), and ensures that the vocabulary used for all sample
arguments to SampleIsComplete() is from this vocabulary.
Args:
tokenizer: An tokenizer to specialize to.
Raises:
InvalidSymtokTokens: If the depth tokens can't be encoded, or they encode
to more than one token.
"""
try:
left = tokenizer.TokenizeString(self.left_token)
right = tokenizer.TokenizeString(self.right_token)
if len(left) > 1 or len(right) > 1:
raise ValueError(
"Sampler symmetrical depth tokens do not encode to a single "
"token using the corpus vocabulary"
)
except ValueError:
raise ValueError(
"Sampler symmetrical depth tokens cannot be encoded using the "
"corpus vocabulary"
)
def SampleIsComplete(self, sample_in_progress: typing.List[str]) -> bool:
"""Determine whether to stop sampling."""
if len(sample_in_progress) == 0:
return False
if not sample_in_progress[-1] == self.right_token:
return False
return self.GetTokenDepth(sample_in_progress) == 0
def GetTokenDepth(self, sample_in_progress: typing.List[str]) -> int:
"""Calculate the symmetrical token depth.
The symmetrical token depth is the difference between the left and right
token counts, provided that the last token is the right, left token count
is nonzero, the right token count is less than the left token count. If
either of those constraints are not met, the returned value is negative.
"""
left_token_count = sample_in_progress.count(self.left_token)
right_token_count = sample_in_progress.count(self.right_token)
# We have descending into negative depth, so abort.
if right_token_count and not left_token_count:
return 0
# We haven't started balancing the tokens yet.
if not left_token_count:
return -1
return left_token_count - right_token_count
def GetTerminationCriteria(
config: typing.List[sampler_pb2.SampleTerminationCriterion],
) -> typing.List[TerminationCriterionBase]:
"""Build a list of termination criteria from config protos.
Args:
config: A list of SampleTerminationCriterion protos.
Returns:
A list of TerminationCriterion instances.
Raises:
UserError: In case of invalid configs.
InternalError: If any of the termination criteria are unrecognized.
"""
terminators = []
for criterion in config:
if criterion.HasField("maxlen"):
terminators.append(MaxlenTerminationCriterion(criterion.maxlen))
elif criterion.HasField("symtok"):
terminators.append(SymmetricalTokenDepthCriterion(criterion.symtok))
else:
raise SystemError("Unknown Sampler.termination_criteria")
return terminators
class Sampler(object):
"""BenchPress sampler for models.
Please note sampler instances should be treated as immutable. Upon
instantiation, a sampler's properties are used to determine its hash. If you
modify a property after instantiation, the hash will be out of date, which
can lead to bad things happening.
"""
@property
def is_active(self):
if self.config.HasField("sample_corpus"):
return self.config.sample_corpus.corpus_config.HasField("active")
else:
return False
@property
def has_features(self):
return self.config.sample_corpus.corpus_config.active.feature_space != "HiddenState"
@property
def has_active_learning(self):
if not self.is_active:
return False
return self.config.sample_corpus.corpus_config.active.HasField("active_learner")
@property
def is_online(self):
if self.config.HasField("sample_corpus"):
return self.config.sample_corpus.corpus_config.HasField("online")
else:
return False
@property
def is_live(self):
return self.config.HasField("live_sampling")
@property
def isFixedStr(self):
if self.config.HasField("sample_corpus"):
return self.config.sample_corpus.HasField("start_text")
else:
return self.config.HasField("start_text") and not (
self.config.HasField("train_set") or
self.config.HasField("validation_set") or
self.config.HasField("sample_set") or
self.config.HasField("sample_corpus")
)
def __init__(self,
config : sampler_pb2.Sampler,
sample_db_name : str = "samples.db",
model_hash : str = None,
):
"""Instantiate a sampler.
Args:
config: A Sampler message.
Raises:
TypeError: If the config argument is not a Sampler proto.
UserError: If the config contains invalid values.
"""
if not isinstance(config, sampler_pb2.Sampler):
t = type(config).__name__
raise TypeError(f"Config must be a Sampler proto. Received: '{t}'")
self.config = sampler_pb2.Sampler()
self.config.CopyFrom(AssertConfigIsValid(config))
self.hash = self._ComputeHash(self.config, model_hash)
self.terminators = GetTerminationCriteria(self.config.termination_criteria)
if config.HasField("start_text"):
self.start_text = self.config.start_text
else:
self.start_text = ""
self.temperature = self.config.temperature_micros / 1e6
self.batch_size = self.config.batch_size
self.sequence_length = self.config.sequence_length
self.sample_db_name = sample_db_name
# Create the necessary cache directories.
if environment.WORLD_RANK == 0:
self.cache = cache.mkcache("sampler", self.hash)
self.cache.path.mkdir(exist_ok = True, parents = True)
else:
while not cache.cachepath("sampler", self.hash).exists():
time.sleep(0.5)
self.cache = cache.mkcache("sampler", self.hash)
self.samples_directory = self.cache.path / "samples"
if environment.WORLD_RANK == 0:
self.samples_directory.mkdir(exist_ok = True)
self.corpus_directory = None
self.sample_corpus = None
if self.config.HasField("sample_corpus"):
l.logger().warn("This path not having a model hash, prohibits the sampler with working with multiple models.")
self.corpus_directory = self.cache.path / "sample_corpus"
if environment.WORLD_RANK == 0:
self.corpus_directory.mkdir(exist_ok = True)
if self.config.sample_corpus.HasField("corpus"):
self.sample_corpus = corpuses.Corpus(self.config.sample_corpus.corpus)
self.sample_corpus.Create()
self.symlinkSampleCorpus(
pathlib.Path(self.sample_corpus.encoded.url[len("sqlite:///") :]).parent
)
text_data = [
self.sample_corpus.tokenizer.tokensToString(x) for x in self.sample_corpus.GetTrainingData()
]
else:
self.start_text = self.config.sample_corpus.start_text
text_data = [self.start_text]
# Text data is dumped in order to specialize with all different model tokenizers.
if environment.WORLD_RANK == 0:
with open(self.cache.path / "sample_corpus" / "text_corpus.pkl", 'wb') as outf:
pickle.dump(text_data, outf)
if self.has_active_learning:
self.active_learner = active_models.Model(
config.sample_corpus.corpus_config.active.active_learner,
self.cache.path,
)
if config.sample_corpus.corpus_config.active.feature_space != self.active_learner.downstream_task.feature_space:
raise ValueError("Feature space {} does not match downstream task {}".format(
config.sample_corpus.corpus_config.active.feature_space,
self.active_learner.downstream_task
)
)
if environment.WORLD_RANK == 0:
meta = internal_pb2.SamplerMeta()
meta.config.CopyFrom(self.config)
pbutil.ToFile(meta, path = self.cache.path / "META.pbtxt")
commit.saveCommit(self.cache.path)
if self.config.HasField("description"):
with open(self.cache.path / self.config.description, 'w') as outf:
outf.write("")
# Set in Specialize().
self.encoded_start_text = None
self.tokenized_start_text = None
def setStartText(self, start_text: str):
"""
Assign current start_text used to sample. This function lazily assigns self.start_text and
is used when sampling from tf_record dataset instead of a simple fixed string. This
function is usedin conjunction with BERT Data generator.
"""
self.start_text = start_text
return
def Create(self) -> None:
if not self.has_active_learning:
return None
else:
self.active_learner.Train()
return
def Specialize(self, tokenizer: tokenizers.TokenizerBase) -> None:
"""Specialize a sampler a vocabulary.
This enables the sampler to set state specialized to a specific encoding
vocabulary. This is guaranteed to be called before SampleIsComplete(), and
ensures that the vocabulary used for all sample arguments to
SampleIsComplete() is from this vocabulary.
Args:
tokenizer: An tokenizer to specialize to.
Raises:
InvalidStartText: If the start_text cannot be encoded using the
vocabulary.
UserError: In case the sampler cannot be specialized to this vocabulary.
"""
try:
self.encoded_start_text = tokenizer.TokenizeString(self.start_text)
self.tokenized_start_text = tokenizer.AtomizeString(self.start_text)
except ValueError:
raise ValueError(
"Sampler start text cannot be encoded using the corpus vocabulary: "
f"'{self.start_text}'"
)
if len(self.encoded_start_text) > self.sequence_length:
raise ValueError(
"Encoded sampler start text must be less than sampler sequence "
f"length. Sampler sequence length={self.sequence_length}, encoded "
f"start text length={len(self.encoded_start_text)}"
)
l.logger().info("Sampling: '{}'\n".format(self.start_text))
[terminator.Specialize(tokenizer) for terminator in self.terminators]
def symlinkModelDB(self,
db_path : pathlib.Path,
model_hash: int,
) -> None:
"""
Create symbolic link entry in sampler workspace. In one
model's workspace, there is one sampler.db for each different
sampler. Each sampler holds a directory of all models it has
sampled with symbolic links created in this function.
"""
if environment.WORLD_RANK == 0:
assert os.path.isdir(db_path), "Parent path of database is not an existing path!"
(self.samples_directory / model_hash).mkdir(exist_ok = True)
for file in db_path.iterdir():
symlink = self.samples_directory / model_hash / file.name
if not symlink.is_symlink():
os.symlink(
os.path.relpath(
db_path / file.name,
self.samples_directory / model_hash
),
symlink
)
distrib.barrier()
return
def symlinkSampleCorpus(self,
corpus_path : pathlib.Path,
) -> None:
"""
When sample corpus has been selected, creates a symlink
of the sampled encoded corpus to the dataset 'sample_corpus'
directory of the sampler.
"""
assert os.path.isdir(corpus_path), "Parent path of database is not an existing path!"
symlink = self.corpus_directory / "corpus"
if not symlink.is_symlink():
os.symlink(
os.path.relpath(
corpus_path,
self.corpus_directory,
),
symlink,
)
return
def SampleIsComplete(self, sample_in_progress: typing.List[str]) -> bool:
"""Determine whether to stop sampling.
Args:
sample_in_progress: A sample in progress, as a sequence of decoded tokens.
Returns:
True if the sample is "complete", else False to continue sampling.
"""
return any(t.SampleIsComplete(sample_in_progress) for t in self.terminators)
@staticmethod
def _ComputeHash(config: sampler_pb2.Sampler, model_hash: str = None) -> str:
"""Compute sampler hash.
The hash is computed from the serialized representation of the config
proto.
"""
return crypto.sha1(config.SerializeToString() + (model_hash.encode('utf-8') if model_hash is not None else "".encode('utf-8')))
def __eq__(self, rhs) -> bool:
if not isinstance(rhs, Sampler):
return False
return rhs.hash == self.hash
def __ne__(self, rhs) -> bool:
return not self.__eq__(rhs)
| 24,541 | 37.527473 | 131 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/samplers/sample_observers.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas and Chris Cummins.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains the SampleObserver interface and concrete subclasses."""
import pathlib
from deeplearning.benchpress.proto import model_pb2
from absl import flags
from deeplearning.benchpress.util import crypto
from deeplearning.benchpress.util import environment
from deeplearning.benchpress.util import distrib
from deeplearning.benchpress.util import pbutil
from deeplearning.benchpress.util import monitors
from deeplearning.benchpress.util import fs
from deeplearning.benchpress.samplers import samples_database
from deeplearning.benchpress.features import extractor
FLAGS = flags.FLAGS
class SampleObserver(object):
"""An observer that is notified when new samples are produced.
During sampling of a model, sample observers are notified for each new
sample produced. Additionally, sample observers determine when to terminate
sampling.
"""
def Specialize(self, model, sampler) -> None:
"""Specialize the sample observer to a model and sampler combination.
This enables the observer to set state specialized to a specific model and
sampler. This is guaranteed to be called before OnSample(), and
sets that the model and sampler for each subsequent call to OnSample(),
until the next call to Specialize().
Subclasses do not need to override this method.
Args:
model: The model that is being sampled.
sampler: The sampler that is being used.
"""
pass
def OnSample(self, sample: model_pb2.Sample) -> bool:
"""Sample notification callback.
Args:
sample: The newly created sample message.
Returns:
True if sampling should continue, else False. Batching of samples means
that returning False does not guarantee that sampling will terminate
immediately, and OnSample() may be called again.
"""
raise NotImplementedError("abstract class")
def endSample(self) -> None:
pass
class MaxSampleCountObserver(SampleObserver):
"""An observer that terminates sampling after a finite number of samples."""
def __init__(self, min_sample_count: int):
if min_sample_count <= 0:
raise ValueError(
f"min_sample_count must be >= 1. Received: {min_sample_count}"
)
self._sample_count = 0
self._min_sample_count = min_sample_count
def OnSample(self, sample: model_pb2.Sample) -> bool:
"""Sample receive callback. Returns True if sampling should continue."""
self._sample_count += 1
return self._sample_count < self._min_sample_count
class SaveSampleTextObserver(SampleObserver):
"""An observer that creates a file of the sample text for each sample."""
def __init__(self, path: pathlib.Path):
self.path = pathlib.Path(path)
self.path.mkdir(parents=True, exist_ok=True)
def OnSample(self, sample: model_pb2.Sample) -> bool:
"""Sample receive callback. Returns True if sampling should continue."""
sample_id = crypto.sha256_str(sample.text)
path = self.path / f"{sample_id}.txt"
fs.Write(path, sample.text.encode("utf-8"))
return True
class PrintSampleObserver(SampleObserver):
"""An observer that prints the text of each sample that is generated."""
def OnSample(self, sample: model_pb2.Sample) -> bool:
"""Sample receive callback. Returns True if sampling should continue."""
print(f"=== BENCHPRESS SAMPLE ===\n\n{sample.text}\n")
return True
class InMemorySampleSaver(SampleObserver):
"""An observer that saves all samples in-memory."""
def __init__(self):
self.samples = []
def OnSample(self, sample: model_pb2.Sample) -> bool:
"""Sample receive callback. Returns True if sampling should continue."""
self.samples.append(sample)
return True
class SamplesDatabaseObserver(SampleObserver):
"""A sample observer that imports samples to a database.
The observer buffers the records that it recieves and commits them to the
database in batches.
"""
def __init__(
self,
path: pathlib.Path,
must_exist: bool = False,
flush_secs: int = 30,
plot_sample_status = False,
commit_sample_frequency: int = 1024,
):
self.db = samples_database.SamplesDatabase("sqlite:///{}".format(str(path)), must_exist = must_exist)
self.sample_id = self.db.count
self.visited = set(self.db.get_hash_entries)
self.flush_queue = []
self.plot_sample_status = plot_sample_status
if self.plot_sample_status:
self.saturation_monitor = monitors.CumulativeHistMonitor(path.parent, "cumulative_sample_count")
def OnSample(self, sample: model_pb2.Sample) -> bool:
"""Sample receive callback."""
# with self.db.get_session(commit = True) as session:
db_sample = samples_database.Sample(
**samples_database.Sample.FromProto(self.sample_id + len(self.flush_queue), sample)
)
if db_sample.sha256 not in self.visited:
self.flush_queue.append(db_sample)
self.visited.add(db_sample.sha256)
if len(self.flush_queue) >= 4096:
with self.db.get_session(commit = True) as s:
for sample in self.flush_queue:
s.add(sample)
if self.plot_sample_status:
self.saturation_monitor.register(sample.id)
self.saturation_monitor.plot()
s.commit()
self.sample_id += len(self.flush_queue)
self.flush_queue = []
return True
def endSample(self) -> None:
"""Write final summed data about sampling session."""
## Flush final queue, if exists.
with self.db.get_session(commit = True) as s:
for sample in self.flush_queue:
s.add(sample)
if self.plot_sample_status:
self.saturation_monitor.register(sample.id)
self.saturation_monitor.plot()
s.commit()
self.sample_id += len(self.flush_queue)
self.flush_queue = []
# Create feature vector plots
db_path = pathlib.Path(self.db.url[len("sqlite:///"):]).parent
# feature_monitor = monitors.CategoricalDistribMonitor(db_path, "samples_feature_vector_distribution")
feature_monitors = {
ftype: monitors.CategoricalDistribMonitor(
db_path,
"{}_distribution".format(ftype)
)
for ftype in extractor.extractors.keys()
}
# for sample in self.db.correct_samples:
# if sample.feature_vector:
# feature_monitor.register({l.split(':')[0:-1]: float(l.split(':')[-1]) for l in sample.feature_vector.split('\n')}) # This used to work only for Grewe. Needs expanding, see lm_data_generator.
# feature_monitor.plot()
for sample in self.db.correct_samples:
if sample.feature_vector:
features = extractor.RawToDictFeats(sample.feature_vector)
for ftype, fvector in features.items():
feature_monitors[ftype].register(fvector)
for mon in feature_monitors.values():
mon.plot()
with self.db.get_session() as session:
compiled_count = session.query(samples_database.Sample.compile_status).filter_by(compile_status = 1).count()
try:
r = [
'compilation rate: {}'.format(compiled_count / self.sample_id),
'total compilable samples: {}'.format(compiled_count),
'average feature vector: \n{}'.format('\n'.join(["{}:\n{}".format(ft, fm.getStrData()) for ft, fm in feature_monitors.items()]))
]
except ZeroDivisionError:
r = [
'compilation rate: +/-inf',
'total compilable samples: {}'.format(compiled_count),
'average feature vector: \n{}'.format('\n'.join(["{}:\n{}".format(ft, fm.getStrData()) for ft, fm in feature_monitors.items()]))
]
with self.db.get_session(commit = True) as session:
exists = session.query(samples_database.SampleResults.key).filter_by(key = "meta").scalar() is not None
if exists:
entry = session.query(samples_database.SampleResults ).filter_by(key = "meta").first()
entry.results = "\n".join(r)
else:
session.add(samples_database.SampleResults(key = "meta", results = "\n".join(r)))
return
class LegacySampleCacheObserver(SampleObserver):
"""Backwards compatability implementation of the old sample caching behavior.
In previous versions of BenchPress, model sampling would silently (and always)
create sample protobufs in the sampler cache, located at:
BENCHPRESS_CACHE/models/MODEL/samples/SAMPLER
This sample observer provides equivalent behavior.
"""
def __init__(self):
self.cache_path = None
def Specialize(self, model, sampler) -> None:
"""Specialize observer to a model and sampler combination."""
self.cache_path = model.SamplerCache(sampler)
self.cache_path.mkdir(exist_ok=True)
def OnSample(self, sample: model_pb2.Sample) -> bool:
"""Sample receive callback. Returns True if sampling should continue."""
sample_id = crypto.sha256_str(sample.text)
sample_path = self.cache_path / f"{sample_id}.pbtxt"
pbutil.ToFile(sample, sample_path)
return True
| 9,558 | 36.486275 | 201 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/experiments/clsmith.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Evaluation script for clsmith mutation program.
"""
import typing
import tempfile
import subprocess
import multiprocessing
import pathlib
import json
import datetime
import sqlite3
import functools
import os
import tqdm
import math
import sqlalchemy as sql
from sqlalchemy.ext import declarative
from absl import flags
from deeplearning.benchpress.features import extractor
from deeplearning.benchpress.preprocessors import opencl
from deeplearning.benchpress.preprocessors import c
from deeplearning.benchpress.util import plotter
from deeplearning.benchpress.util import environment
from deeplearning.benchpress.util import crypto
from deeplearning.benchpress.util import sqlutil
from deeplearning.benchpress.util import logging as l
from deeplearning.benchpress.experiments import public
FLAGS = flags.FLAGS
CLSMITH = environment.CLSMITH
CLSMITH_INCLUDE = environment.CLSMITH_INCLUDE
Base = declarative.declarative_base()
class CLSmithSample(Base, sqlutil.ProtoBackedMixin):
"""A database row representing a BenchPress sample.
This is the clgen.CLSmithSample protocol buffer in SQL format.
"""
__tablename__ = "clsmith_samples"
# entry id
id : int = sql.Column(sql.Integer, primary_key = True)
# unique hash of sample text
sha256 : str = sql.Column(sql.String(64), nullable = False, index = True)
# String-format generated kernel
sample : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# String-format generated header file
include : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# encoded sample text
encoded_sample : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Whether the generated sample compiles or not.
compile_status : bool = sql.Column(sql.Boolean, nullable = False)
# CLSmithSample's vector of features.
feature_vector : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Length of total sequence in number of tokens
num_tokens : int = sql.Column(sql.Integer, nullable = False)
# Date
date_added : datetime.datetime = sql.Column(sql.DateTime, nullable=False)
@classmethod
def FromArgs(cls,
id : int,
sample : str,
include : str,
encoded_sample : str,
compile_status : bool,
feature_vector : str,
num_tokens : int,
) -> "CLSmithSample":
"""
Do you want to use CLSmithDatabase as a means to store only code
without much fuss ? This function is for you!
"""
return CLSmithSample(**{
"id" : id,
"sha256" : crypto.sha256_str(sample),
"sample" : sample,
"include" : include,
"encoded_sample" : encoded_sample,
"compile_status" : compile_status,
"feature_vector" : feature_vector,
"num_tokens" : num_tokens,
"date_added" : datetime.datetime.utcnow(),
})
class CLSmithDatabase(sqlutil.Database):
"""A database of BenchPress samples."""
def __init__(self, url: str, must_exist: bool = False):
super(CLSmithDatabase, self).__init__(url, Base, must_exist = must_exist)
@property
def count(self):
"""Number of samples in DB."""
with self.Session() as s:
count = s.query(CLSmithSample).count()
return count
def get_features(self, sequence_length: int = None) -> typing.List[str]:
"""
Get feature vectors of training instances within the specified sequence length.
"""
with self.Session() as session:
if sequence_length:
return [x.feature_vector for x in session.query(CLSmithSample).filter(CLSmithSample.num_tokens <= sequence_length).limit(100000).offset(0).all()]
else:
return [x.feature_vector for x in session.query(CLSmithSample).limit(100000).offset(0).all()]
def get_data_features(self, tokenizer, sequence_length: int = None) -> typing.List[typing.Tuple[str, str, str]]:
"""
Collect list of source with features
"""
with self.Session() as session:
if sequence_length:
return [(x.sample, x.include, x.feature_vector) for x in session.query(CLSmithSample).filter(CLSmithSample.num_tokens <= sequence_length).limit(100000).offset(0).all()]
else:
return [(x.sample, x.include, x.feature_vector) for x in session.query(CLSmithSample).limit(100000).offset(0).all()]
def execute_clsmith(idx: int, tokenizer, timeout_seconds: int = 15) -> typing.List[CLSmithSample]:
"""
Execute clsmith and return sample.
"""
try:
tdir = pathlib.Path(FLAGS.local_filesystem).resolve()
except Exception:
tdir = None
extra_args = ["-include{}".format(pathlib.Path(CLSMITH_INCLUDE) / "CLSmith.h")]
with tempfile.NamedTemporaryFile("w", prefix = "clsmith_", suffix = ".cl", dir = tdir) as f:
cmd =[
"timeout",
"-s9",
str(timeout_seconds),
CLSMITH,
"-o",
str(f.name)
]
process = subprocess.Popen(
cmd,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
universal_newlines = True,
)
try:
stdout, stderr = process.communicate()
except TimeoutError:
return None
contentfile = open(str(f.name), 'r').read()
try:
ks = opencl.ExtractSingleKernelsHeaders(
opencl.StripDoubleUnderscorePrefixes(
c.StripIncludes(contentfile),
)
)
except ValueError as e:
l.logger().error(contentfile)
raise e
samples = []
for kernel, include in ks:
encoded_sample = tokenizer.AtomizeString(kernel)
try:
stdout = opencl.Compile(kernel, header_file = include, extra_args = extra_args)
compile_status = True
except ValueError as e:
stdout = str(e)
compile_status = False
samples.append(
CLSmithSample.FromArgs(
id = idx,
sample = stdout,
include = include,
encoded_sample = ','.join(encoded_sample),
compile_status = compile_status,
feature_vector = extractor.ExtractRawFeatures(kernel, header_file = include, extra_args = extra_args),
num_tokens = len(encoded_sample)
)
)
return samples
@public.evaluator
def GenerateCLSmith(**kwargs) -> None:
"""
Compare mutec mutation tool on github's database against BenchPress.
Comparison is similar to KAverageScore comparison.
"""
clsmith_path = kwargs.get('clsmith_path', '')
tokenizer = kwargs.get('tokenizer')
if not pathlib.Path(CLSMITH).exists():
raise FileNotFoundError("CLSmith executable not found: {}".format(CLSMITH))
# Initialize clsmith database
clsmith_db = CLSmithDatabase(url = "sqlite:///{}".format(str(pathlib.Path(clsmith_path).resolve())), must_exist = False)
while True:
chunk_size = 1000
it = 0
f = functools.partial(execute_clsmith, tokenizer = tokenizer, timeout_seconds = 15)
pool = multiprocessing.Pool()
try:
entries = []
for samples in tqdm.tqdm(pool.imap_unordered(f, range(chunk_size)), total = chunk_size, desc = "Generate CLSmith Samples {}".format(it), leave = False):
if samples:
for sample in samples:
entries.append(sample)
db_idx = clsmith_db.count
with clsmith_db.Session(commit = True) as s:
for entry in entries:
exists = s.query(CLSmithSample.sha256).filter_by(sha256 = entry.sha256).scalar() is not None
if not exists:
entry.id = db_idx
s.add(entry)
db_idx += 1
s.commit()
except KeyboardInterrupt as e:
pool.terminate()
break
except Exception as e:
l.logger().error(e)
pool.terminate()
raise e
pool.close()
it += 1
return
| 8,518 | 33.630081 | 176 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/experiments/workers.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Helper module the provides range of worker functions for experiments.
"""
import typing
import pathlib
from deeplearning.benchpress.features import extractor
from deeplearning.benchpress.features import feature_sampler
from deeplearning.benchpress.preprocessors import opencl
from deeplearning.benchpress.util import environment
from deeplearning.benchpress.util import logging as l
def ContentHash(db_feat: typing.Tuple[str, str]) -> typing.Tuple[str, typing.Dict[str, float]]:
"""
Multiprocessing Worker calculates contentfile hash
of file and returns it.
"""
if len(db_feat) == 2:
src, feats = db_feat
include = None
else:
src, include, feat = db_feat
try:
return opencl.ContentHash(src), extractor.RawToDictFeats(feats)
except Exception as e:
l.logger().warn(e)
return None
def ContentFeat(db_feat: typing.Tuple[str, str]) -> typing.Dict[str, float]:
"""
Multiprocessing Worker calculates contentfile hash
of file and returns it.
"""
if len(db_feat) == 2:
_, feats = db_feat
else:
_, _, feats = db_feat
try:
return extractor.RawToDictFeats(feats)
except Exception as e:
l.logger().warn(e)
return None
def ExtractAndCalculate(src_incl : typing.Tuple[str, str],
target_features : typing.Dict[str, float],
feature_space : str
) -> typing.Tuple[str, str, float]:
"""
Extract features for source code and calculate distance from target.
Returns:
Tuple of source code with distance.
"""
src, incl = src_incl
f = extractor.ExtractFeatures(src, [feature_space], header_file = incl, extra_args = ["-include{}".format(pathlib.Path(environment.CLSMITH_INCLUDE) / "CLSmith.h")] if incl else [""])
if feature_space in f and f[feature_space]:
return src, incl, feature_sampler.calculate_distance(f[feature_space], target_features, feature_space)
return None
def IRExtractAndCalculate(bytecode : str,
target_features : typing.Dict[str, float],
feature_space : str
) -> typing.Tuple[str, str, float]:
"""
Extract features for source code and calculate distance from target.
Returns:
Tuple of source code with distance.
"""
f = extractor.ExtractIRFeatures(bytecode, [feature_space])
if feature_space in f and f[feature_space]:
return bytecode, "", feature_sampler.calculate_distance(f[feature_space], target_features, feature_space)
return None
def FeatureExtractor(src_incl: typing.Tuple[str, str]) -> typing.Tuple[str, str, str]:
"""
Extracts Raw features for all feat spaces and returns tuple of source and features.
"""
src, incl = src_incl
try:
return src, incl, extractor.ExtractRawFeatures(src, header_file = incl, extra_args = ["-include{}".format(pathlib.Path(environment.CLSMITH_INCLUDE) / "CLSmith.h")] if incl else [""])
except ValueError:
return src, incl, ""
def IRFeatureExtractor(bytecode: str) -> typing.Tuple[str, str, str]:
"""
Extracts Raw features for all feat spaces and returns tuple of source and features.
"""
try:
return bytecode, "", extractor.ExtractIRRawFeatures(bytecode)
except ValueError:
return bytecode, "", ""
def SortedDistances(data: typing.List[typing.Tuple[str, str, typing.Dict[str, float]]],
target_features: typing.Dict[str, float],
feature_space: str
) -> typing.List[float]:
"""
Return list of euclidean distances from target features in ascending order.
"""
return sorted([feature_sampler.calculate_distance(dp, target_features, feature_space) for _, _, dp in data])
def SortedSrcDistances(data: typing.List[typing.Tuple[str, typing.Dict[str, float]]],
target_features: typing.Dict[str, float],
feature_space: str
) -> typing.List[typing.Tuple[str, str, float]]:
"""
Return list of pairs of euclidean distances from target features with source code in ascending order.
"""
return sorted([(src, include, feature_sampler.calculate_distance(dp, target_features, feature_space)) for src, include, dp in data], key = lambda x: x[2])
def SortedSrcFeatsDistances(data: typing.List[typing.Tuple[str, typing.Dict[str, float]]],
target_features: typing.Dict[str, float],
feature_space: str
) -> typing.List[typing.Tuple[str, str, typing.Dict[str, float], float]]:
"""
Return list of pairs of euclidean distances from target features with source code and features in ascending order.
"""
return sorted([(src, include, dp, feature_sampler.calculate_distance(dp, target_features, feature_space)) for src, include, dp in data], key = lambda x: x[3])
| 5,437 | 39.58209 | 186 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/experiments/public.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file defines the decorator for marking an evaluator function."""
import typing
PreprocessorFunction = typing.Callable[[str], str]
def evaluator(func: PreprocessorFunction) -> PreprocessorFunction:
"""A decorator which marks a function as an evaluator.
Args:
func: The preprocessor function to decorate.
Returns:
The decorated preprocessor function.
Raises:
InternalError: If the function being wrapped does not have the signature
'def func(text: str) -> str:'.
"""
type_hints = typing.get_type_hints(func)
if not type_hints == {"return": type(None)}:
raise SystemError(
f"Preprocessor {func.__name__} does not have signature "
f'"def {func.__name__}(text: str) -> str".'
f"or"
f'"def {func.__name__}(text: str) -> typing.List[str]".'
)
func.__dict__["is_evaluator"] = True
return func
| 1,462 | 33.023256 | 76 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/experiments/evaluators.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Evaluators - result fetchers for samples across different techniques.
"""
import typing
import sys
import pathlib
import tqdm
import multiprocessing
from deeplearning.benchpress.proto import evaluator_pb2
from deeplearning.benchpress.samplers import samples_database
from deeplearning.benchpress.features import extractor
from deeplearning.benchpress.features import feature_sampler
from deeplearning.benchpress.features import evaluate_cand_database
from deeplearning.benchpress.features import active_feed_database
from deeplearning.benchpress.corpuses import benchmarks
from deeplearning.benchpress.corpuses import tokenizers
from deeplearning.benchpress.corpuses import encoded
from deeplearning.benchpress.util import pbutil
from deeplearning.benchpress.experiments import log_file
from deeplearning.benchpress.experiments import benchmark_analysis
from deeplearning.benchpress.experiments import distance_score
from deeplearning.benchpress.experiments import comp_vs_mem
from deeplearning.benchpress.experiments import cldrive
from deeplearning.benchpress.experiments import clsmith
from deeplearning.benchpress.experiments import mutec
from deeplearning.benchpress.experiments import srciror
from deeplearning.benchpress.experiments import workers
from deeplearning.benchpress.experiments.grewe import api as grewe_api
from deeplearning.benchpress.experiments.turing import analysis
from deeplearning.benchpress.util import logging as l
from absl import app, flags
FLAGS = flags.FLAGS
flags.DEFINE_string(
"evaluator_config",
"",
"Set path to evaluator config file",
)
class DBGroup(object):
"""
Class representation of a group of databases evaluated.
"""
@property
def get_data(self) -> typing.List[str]:
"""
Get concatenated data of all databases.
"""
if self.data:
return self.data
else:
self.data = []
for db in self.databases:
if self.db_type == encoded.EncodedContentFiles or self.db_type == clsmith.CLSmithDatabase:
self.data += db.get_data(self.size_limit)
else:
self.data += db.get_data
return self.data
def __init__(self, group_name: str, db_type: str, databases: typing.List[pathlib.Path], tokenizer = None, size_limit: int = None):
self.group_name = group_name
self.db_type = {
"SamplesDatabase" : samples_database.SamplesDatabase,
"ActiveFeedDatabase" : active_feed_database.ActiveFeedDatabase,
"SearchCandidateDatabase" : evaluate_cand_database.SearchCandidateDatabase,
"EncodedContentFiles" : encoded.EncodedContentFiles,
"CLSmithDatabase" : clsmith.CLSmithDatabase,
}[db_type]
self.databases = [self.db_type("sqlite:///{}".format(pathlib.Path(p).resolve()), must_exist = True) for p in databases]
self.features = {ext: None for ext in extractor.extractors.keys()}
self.data_features = {ext: None for ext in extractor.extractors.keys()}
self.unique_data_features = {ext: None for ext in extractor.extractors.keys()}
self.tokenizer = tokenizer
self.size_limit = size_limit
self.data = None
return
def get_features(self, feature_space: str) -> typing.List[typing.Dict[str, float]]:
"""
Get or set and get features for a specific feature space.
"""
if not self.features[feature_space]:
self.features[feature_space] = []
for db in self.databases:
db_feats = db.get_features(self.tokenizer, self.size_limit) if (self.db_type == encoded.EncodedContentFiles or self.db_type == clsmith.CLSmithDatabase) else db.get_features
for x in db_feats:
try:
feats = extractor.RawToDictFeats(x)
except Exception as e:
l.logger().warn(x)
if feature_space in feats and feats[feature_space]:
self.features[feature_space].append(feats[feature_space])
return self.features[feature_space]
def get_data_features(self, feature_space: str, use_mp = True, target_name: str = None) -> typing.List[typing.Tuple[str, typing.Dict[str, float]]]:
"""
Get or set feature with data list of tuples.
"""
if not self.data_features[feature_space] or target_name is not None:
self.data_features[feature_space] = []
for db in self.databases:
db_feats = db.get_data_features(self.tokenizer, self.size_limit) if (self.db_type == encoded.EncodedContentFiles or self.db_type == clsmith.CLSmithDatabase) else db.get_data_features
if self.db_type in {encoded.EncodedContentFiles, clsmith.CLSmithDatabase}:
db_feats = db.get_data_features(self.tokenizer, self.size_limit)
elif self.db_type == active_feed_database.ActiveFeedDatabase:
db_feats = db.get_data_features(target_name)
else:
db_feats = db.get_data_features
if use_mp:
try:
pool = multiprocessing.Pool()
for inp, feats in tqdm.tqdm(zip(db_feats, pool.imap_unordered(workers.ContentFeat, db_feats)), total = len(db_feats), desc = "{} data".format(self.group_name)):
if len(inp) == 2:
src, _ = inp
include = ""
else:
src, include, _ = inp
if feature_space in feats and feats[feature_space]:
self.data_features[feature_space].append((src, include, feats[feature_space]))
pool.close()
except Exception as e:
l.logger().error(e)
pool.terminate()
raise e
else:
for inp in tqdm.tqdm(db_feats, total = len(db_feats), desc = "{} data".format(self.group_name)):
feats = workers.ContentFeat(inp)
if len(inp) == 2:
src, _ = inp
include = ""
else:
src, include, _ = inp
if feature_space in feats and feats[feature_space]:
self.data_features[feature_space].append((src, include, feats[feature_space]))
return self.data_features[feature_space]
def get_unique_data_features(self, feature_space: str, use_mp = True) -> typing.List[typing.Tuple[str, typing.Dict[str, float]]]:
"""
Get or set feature with data list of tuples.
"""
if not self.unique_data_features[feature_space]:
self.unique_data_features[feature_space] = []
visited = set()
for db in self.databases:
db_feats = db.get_data_features(self.tokenizer, self.size_limit) if (self.db_type == encoded.EncodedContentFiles or self.db_type == clsmith.CLSmithDatabase) else db.get_data_features
if use_mp:
try:
pool = multiprocessing.Pool()
for inp, (sha, feats) in tqdm.tqdm(zip(db_feats, pool.imap_unordered(workers.ContentHash, db_feats)), total = len(db_feats), desc = "{} unique data".format(self.group_name)):
if len(inp) == 2:
src, _ = inp
include = ""
else:
src, include, _ = inp
if sha not in visited:
visited.add(sha)
if feature_space in feats and feats[feature_space]:
self.unique_data_features[feature_space].append((src, include, feats[feature_space]))
except Exception as e:
l.logger().error(e)
pool.terminate()
raise e
pool.close()
else:
for inp in db_feats:
sha, feats = workers.ContentHash(inp)
if len(inp) == 2:
src, _ = inp
include = ""
else:
src, include, _ = inp
if sha not in visited:
visited.add(sha)
if feature_space in feats and feats[feature_space]:
self.unique_data_features[feature_space].append((src, include, feats[feature_space]))
return self.unique_data_features[feature_space]
class Benchmark(typing.NamedTuple):
path : pathlib.Path
name : str
full_name : str
contents : str
features : typing.Dict[str, float]
class TargetBenchmarks(object):
"""
Class representation of target benchmarks.
"""
@classmethod
def shorten_benchmark_name(cls, benchmark_name: str) -> str:
"""
Pretty-printing rules for benchmark names.
"""
return benchmark_name.replace(
".cl", ""
).replace(
"_kernels", ""
).replace(
"_kernel", ""
).replace(
"kernel_", ""
).replace(
"particle", "prtcl"
).replace(
"1024", ""
).replace(
"track_", ""
).replace(
"_opencl", ""
)
def __init__(self, target: str):
self.target = target
self.benchmark_cfs = benchmarks.yield_cl_kernels(pathlib.Path(benchmarks.targets[self.target]).resolve())
self.benchmarks = {ext: [] for ext in extractor.extractors.keys()}
l.logger().info("Loaded {} {} benchmarks".format(len(self.benchmark_cfs), self.target))
return
def get_benchmarks(self, feature_space: str, reduced_git_corpus = None):
"""
Get or set and get benchmarks with their features for a feature space.
"""
self.benchmarks = {ext: [] for ext in extractor.extractors.keys()}
for p, k, h in self.benchmark_cfs:
features = extractor.ExtractFeatures(k, [feature_space], header_file = h, use_aux_headers = False)
if features[feature_space]:
if reduced_git_corpus:
closest_git = sorted(
[
(cf, feature_sampler.calculate_distance(fts, features[feature_space], feature_space))
for cf, _, fts in reduced_git_corpus
], key = lambda x: x[1])[0]
if closest_git[1] == 0:
continue
## Benchmark name shortener.
full_name = p.name
benchmark_name = self.shorten_benchmark_name(p.name)
self.benchmarks[feature_space].append(
Benchmark(
p,
benchmark_name,
full_name,
k,
features[feature_space],
)
)
self.benchmarks[feature_space] = benchmarks.resolve_benchmark_names(self.benchmarks[feature_space])
l.logger().info("Extracted features for {} {} benchmarks".format(len(self.benchmarks[feature_space]), self.target))
return self.benchmarks[feature_space]
def AssertIfValid(config: evaluator_pb2.Evaluation):
"""
Parse config file and check for validity.
"""
pathlib.Path(config.workspace).resolve().mkdir(exist_ok = True, parents = True)
for ev in config.evaluator:
if ev.HasField("k_average_score"):
### KAverageScore
# Generic Fields
pbutil.AssertFieldIsSet(config, "workspace")
if not pathlib.Path(config.tokenizer).resolve().exists():
raise FileNotFoundError(pathlib.Path(config.tokenizer).resolve())
# DB groups
for dbs in ev.k_average_score.db_group:
for db in dbs.database:
p = pathlib.Path(db).resolve()
if not p.exists():
raise FileNotFoundError(p)
if dbs.HasField("size_limit"):
pbutil.AssertFieldConstraint(
dbs,
"size_limit",
lambda x : x > 0,
"Size limit must be a positive integer, {}".format(dbs.size_limit)
)
if dbs.db_type in {"EncodedContentFiles", "CLSmithDatabase"}:
pbutil.AssertFieldIsSet("tokenizer")
# Specialized fields.
pbutil.AssertFieldConstraint(
ev.k_average_score,
"target",
lambda x: x in benchmarks.targets,
"target {} not found".format(ev.k_average_score.target),
)
pbutil.AssertFieldIsSet(ev.k_average_score, "feature_space")
pbutil.AssertFieldConstraint(
ev.k_average_score,
"top_k",
lambda x: x > 0,
"top-K factor must be positive",
)
elif ev.HasField("min_score"):
### MinScore
# Generic Fields
pbutil.AssertFieldIsSet(config, "workspace")
if not pathlib.Path(config.tokenizer).resolve().exists():
raise FileNotFoundError(pathlib.Path(config.tokenizer).resolve())
# DB groups
for dbs in ev.min_score.db_group:
for db in dbs.database:
p = pathlib.Path(db).resolve()
if not p.exists():
raise FileNotFoundError(p)
if dbs.HasField("size_limit"):
pbutil.AssertFieldConstraint(
dbs,
"size_limit",
lambda x : x > 0,
"Size limit must be a positive integer, {}".format(dbs.size_limit)
)
if dbs.db_type in {"EncodedContentFiles", "CLSmithDatabase"}:
pbutil.AssertFieldIsSet("tokenizer")
# Specialized fields.
pbutil.AssertFieldConstraint(
ev.min_score,
"target",
lambda x: x in benchmarks.targets,
"target {} not found".format(ev.min_score.target),
)
pbutil.AssertFieldIsSet(ev.min_score, "feature_space")
elif ev.HasField("analyze_target"):
### AnalyzeTarget
# DB groups
for dbs in ev.analyze_target.db_group:
for db in dbs.database:
p = pathlib.Path(db).resolve()
if not p.exists():
raise FileNotFoundError(p)
if dbs.HasField("size_limit"):
pbutil.AssertFieldConstraint(
dbs,
"size_limit",
lambda x : x > 0,
"Size limit must be a positive integer, {}".format(dbs.size_limit)
)
# Specialized fields.
for target in ev.analyze_target.targets:
assert target in benchmarks.targets, target
elif ev.HasField("token_size_distribution"):
### TokenSizeDistribution
# DB groups
for dbs in ev.token_size_distribution.db_group:
for db in dbs.database:
p = pathlib.Path(db).resolve()
if not p.exists():
raise FileNotFoundError(p)
if dbs.HasField("size_limit"):
pbutil.AssertFieldConstraint(
dbs,
"size_limit",
lambda x : x > 0,
"Size limit must be a positive integer, {}".format(dbs.size_limit)
)
elif ev.HasField("llvm_instcount_distribution"):
### LLVMInstCountDistribution
# DB groups
for dbs in ev.llvm_instcount_distribution.db_group:
for db in dbs.database:
p = pathlib.Path(db).resolve()
if not p.exists():
raise FileNotFoundError(p)
if dbs.HasField("size_limit"):
pbutil.AssertFieldConstraint(
dbs,
"size_limit",
lambda x : x > 0,
"Size limit must be a positive integer, {}".format(dbs.size_limit)
)
elif ev.HasField("pca_samples_features"):
### PCASamplesFeatures
# DB groups
for dbs in ev.pca_samples_features.db_group:
for db in dbs.database:
p = pathlib.Path(db).resolve()
if not p.exists():
raise FileNotFoundError(p)
if dbs.HasField("size_limit"):
pbutil.AssertFieldConstraint(
dbs,
"size_limit",
lambda x : x > 0,
"Size limit must be a positive integer, {}".format(dbs.size_limit)
)
# Specialized fields.
pbutil.AssertFieldIsSet(ev.pca_samples_features, "feature_space")
elif ev.HasField("features_distribution"):
### KAverageScore
# Generic Fields
pbutil.AssertFieldIsSet(config, "workspace")
# DB groups
for dbs in ev.features_distribution.db_group:
for db in dbs.database:
p = pathlib.Path(db).resolve()
if not p.exists():
raise FileNotFoundError(p)
if dbs.HasField("size_limit"):
pbutil.AssertFieldConstraint(
dbs,
"size_limit",
lambda x : x > 0,
"Size limit must be a positive integer, {}".format(dbs.size_limit)
)
# Specialized fields.
pbutil.AssertFieldConstraint(
ev.features_distribution,
"target",
lambda x: x in benchmarks.targets,
"target {} not found".format(ev.features_distribution.target),
)
pbutil.AssertFieldIsSet(ev.features_distribution, "feature_space")
pbutil.AssertFieldConstraint(
ev.features_distribution,
"top_k",
lambda x: x > 0,
"top-K factor must be positive",
)
elif ev.HasField("human_likeness"):
### KAverageScore
# Generic Fields
pbutil.AssertFieldIsSet(config, "workspace")
# DB groups
for dbs in ev.human_likeness.db_group:
for db in dbs.database:
p = pathlib.Path(db).resolve()
if not p.exists():
raise FileNotFoundError(p)
if dbs.HasField("size_limit"):
pbutil.AssertFieldConstraint(
dbs,
"size_limit",
lambda x : x > 0,
"Size limit must be a positive integer, {}".format(dbs.size_limit)
)
# Specialized fields.
pbutil.AssertFieldConstraint(
ev.human_likeness,
"target",
lambda x: x in benchmarks.targets,
"target {} not found".format(ev.human_likeness.target),
)
pbutil.AssertFieldConstraint(
ev.human_likeness,
"top_k",
lambda x: x > 0,
"top-K factor must be positive",
)
elif ev.HasField("human_likeness_analysis"):
### KAverageScore
# Generic Fields
pbutil.AssertFieldIsSet(config, "workspace")
# Human or AI database.
pbutil.AssertFieldIsSet(ev.human_likeness_analysis, "human_or_ai_db")
elif ev.HasField("log_file"):
### LogFile
# DB groups
for dbs in ev.log_file.db_group:
for db in dbs.database:
p = pathlib.Path(db).resolve()
if not p.exists():
raise FileNotFoundError(p)
if dbs.HasField("size_limit"):
pbutil.AssertFieldConstraint(
dbs,
"size_limit",
lambda x : x > 0,
"Size limit must be a positive integer, {}".format(dbs.size_limit)
)
elif ev.HasField("comp_mem_grewe"):
### CompMemGrewe
# Generic Fields
pbutil.AssertFieldIsSet(config, "workspace")
pbutil.AssertFieldIsSet(config, "tokenizer")
if not pathlib.Path(config.tokenizer).resolve().exists():
raise FileNotFoundError(pathlib.Path(config.tokenizer).resolve())
# DB groups
for dbs in ev.comp_mem_grewe.db_group:
for db in dbs.database:
p = pathlib.Path(db).resolve()
if not p.exists():
raise FileNotFoundError(p)
if dbs.HasField("size_limit"):
pbutil.AssertFieldConstraint(
dbs,
"size_limit",
lambda x : x > 0,
"Size limit must be a positive integer, {}".format(dbs.size_limit)
)
# Specialized fields.
pbutil.AssertFieldConstraint(
ev.comp_mem_grewe,
"target",
lambda x: x in benchmarks.targets,
"target {} not found".format(ev.comp_mem_grewe.target),
)
elif ev.HasField("topk_cldrive"):
### TopKCLDrive
# Generic Fields
pbutil.AssertFieldIsSet(config, "workspace")
pbutil.AssertFieldIsSet(config, "tokenizer")
if not pathlib.Path(config.tokenizer).resolve().exists():
raise FileNotFoundError(pathlib.Path(config.tokenizer).resolve())
# DB groups
for dbs in ev.topk_cldrive.db_group:
for db in dbs.database:
p = pathlib.Path(db).resolve()
if not p.exists():
raise FileNotFoundError(p)
if dbs.HasField("size_limit"):
pbutil.AssertFieldConstraint(
dbs,
"size_limit",
lambda x : x > 0,
"Size limit must be a positive integer, {}".format(dbs.size_limit)
)
# Specialized fields.
pbutil.AssertFieldConstraint(
ev.topk_cldrive,
"target",
lambda x: x in benchmarks.targets,
"target {} not found".format(ev.topk_cldrive.target),
)
pbutil.AssertFieldIsSet(ev.topk_cldrive, "feature_space")
pbutil.AssertFieldIsSet(ev.topk_cldrive, "cldrive_cache")
if not pathlib.Path(ev.topk_cldrive.cldrive_cache).resolve().exists():
l.logger().warn("CLDrive cache not found in {}. Will create one from scratch.".format(ev.topk_cldrive.cldrive_cache))
pbutil.AssertFieldConstraint(
ev.topk_cldrive,
"top_k",
lambda x: x > 0,
"top-K factor must be positive",
)
elif ev.HasField("mutec_vs_benchpress"):
### MutecVsBenchPress
# Generic Fields
pbutil.AssertFieldIsSet(config, "workspace")
pbutil.AssertFieldIsSet(config, "tokenizer")
if not pathlib.Path(config.tokenizer).resolve().exists():
raise FileNotFoundError(pathlib.Path(config.tokenizer).resolve())
# DB groups
if ev.mutec_vs_benchpress.HasField("db_group"):
raise ValueError("db_group is a placeholder for mutec_vs_benchpress evaluator and should not be used.")
for dbs in [ev.mutec_vs_benchpress.seed, ev.mutec_vs_benchpress.benchpress]:
for db in ev.mutec_vs_benchpress.seed.database:
p = pathlib.Path(db).resolve()
if not p.exists():
raise FileNotFoundError(p)
if dbs.HasField("size_limit"):
pbutil.AssertFieldConstraint(
dbs,
"size_limit",
lambda x : x > 0,
"Size limit must be a positive integer, {}".format(dbs.size_limit)
)
# Specialized fields.
pbutil.AssertFieldIsSet(ev.mutec_vs_benchpress, "mutec_cache")
if not pathlib.Path(ev.mutec_vs_benchpress.mutec_cache).resolve().exists():
l.logger().warn("Mutec cache not found in {}. Will create one from scratch.".format(ev.mutec_vs_benchpress.mutec_cache))
pbutil.AssertFieldConstraint(
ev.mutec_vs_benchpress,
"target",
lambda x: x in benchmarks.targets,
"target {} not found".format(ev.mutec_vs_benchpress.target),
)
pbutil.AssertFieldIsSet(ev.mutec_vs_benchpress, "feature_space")
pbutil.AssertFieldConstraint(
ev.mutec_vs_benchpress,
"top_k",
lambda x: x > 0,
"top-K factor must be positive",
)
pbutil.AssertFieldConstraint(
ev.mutec_vs_benchpress,
"beam_width",
lambda x: x > 0,
"beam width factor must be positive",
)
elif ev.HasField("srciror_src_vs_benchpress"):
### MutecVsBenchPress
# Generic Fields
pbutil.AssertFieldIsSet(config, "workspace")
pbutil.AssertFieldIsSet(config, "tokenizer")
if not pathlib.Path(config.tokenizer).resolve().exists():
raise FileNotFoundError(pathlib.Path(config.tokenizer).resolve())
# DB groups
if ev.srciror_src_vs_benchpress.HasField("db_group"):
raise ValueError("db_group is a placeholder for srciror_src_vs_benchpress evaluator and should not be used.")
for dbs in [ev.srciror_src_vs_benchpress.seed, ev.srciror_src_vs_benchpress.benchpress]:
for db in ev.srciror_src_vs_benchpress.seed.database:
p = pathlib.Path(db).resolve()
if not p.exists():
raise FileNotFoundError(p)
if dbs.HasField("size_limit"):
pbutil.AssertFieldConstraint(
dbs,
"size_limit",
lambda x : x > 0,
"Size limit must be a positive integer, {}".format(dbs.size_limit)
)
# Specialized fields.
pbutil.AssertFieldIsSet(ev.srciror_src_vs_benchpress, "srciror_src_cache")
if not pathlib.Path(ev.srciror_src_vs_benchpress.srciror_src_cache).resolve().exists():
l.logger().warn("Mutec cache not found in {}. Will create one from scratch.".format(ev.srciror_src_vs_benchpress.srciror_src_cache))
pbutil.AssertFieldConstraint(
ev.srciror_src_vs_benchpress,
"target",
lambda x: x in benchmarks.targets,
"target {} not found".format(ev.srciror_src_vs_benchpress.target),
)
pbutil.AssertFieldIsSet(ev.srciror_src_vs_benchpress, "feature_space")
pbutil.AssertFieldConstraint(
ev.srciror_src_vs_benchpress,
"top_k",
lambda x: x > 0,
"top-K factor must be positive",
)
pbutil.AssertFieldConstraint(
ev.srciror_src_vs_benchpress,
"beam_width",
lambda x: x > 0,
"beam width factor must be positive",
)
elif ev.HasField("srciror_ir_vs_benchpress"):
### MutecVsBenchPress
# Generic Fields
pbutil.AssertFieldIsSet(config, "workspace")
pbutil.AssertFieldIsSet(config, "tokenizer")
if not pathlib.Path(config.tokenizer).resolve().exists():
raise FileNotFoundError(pathlib.Path(config.tokenizer).resolve())
# DB groups
if ev.srciror_ir_vs_benchpress.HasField("db_group"):
raise ValueError("db_group is a placeholder for srciror_ir_vs_benchpress evaluator and should not be used.")
for dbs in [ev.srciror_ir_vs_benchpress.seed, ev.srciror_ir_vs_benchpress.benchpress]:
for db in ev.srciror_ir_vs_benchpress.seed.database:
p = pathlib.Path(db).resolve()
if not p.exists():
raise FileNotFoundError(p)
if dbs.HasField("size_limit"):
pbutil.AssertFieldConstraint(
dbs,
"size_limit",
lambda x : x > 0,
"Size limit must be a positive integer, {}".format(dbs.size_limit)
)
# Specialized fields.
pbutil.AssertFieldIsSet(ev.srciror_ir_vs_benchpress, "srciror_ir_cache")
if not pathlib.Path(ev.srciror_ir_vs_benchpress.srciror_ir_cache).resolve().exists():
l.logger().warn("Mutec cache not found in {}. Will create one from scratch.".format(ev.srciror_ir_vs_benchpress.srciror_ir_cache))
pbutil.AssertFieldConstraint(
ev.srciror_ir_vs_benchpress,
"target",
lambda x: x in benchmarks.targets,
"target {} not found".format(ev.srciror_ir_vs_benchpress.target),
)
pbutil.AssertFieldIsSet(ev.srciror_ir_vs_benchpress, "feature_space")
pbutil.AssertFieldConstraint(
ev.srciror_ir_vs_benchpress,
"top_k",
lambda x: x > 0,
"top-K factor must be positive",
)
pbutil.AssertFieldConstraint(
ev.srciror_ir_vs_benchpress,
"beam_width",
lambda x: x > 0,
"beam width factor must be positive",
)
elif ev.HasField("generate_clsmith"):
# Generic Fields
pbutil.AssertFieldIsSet(config, "tokenizer")
if not pathlib.Path(config.tokenizer).resolve().exists():
raise FileNotFoundError(pathlib.Path(config.tokenizer).resolve())
# Specialized fields.
pbutil.AssertFieldIsSet(ev.generate_clsmith, "clsmith_db")
if not pathlib.Path(ev.generate_clsmith.clsmith_db).resolve().exists():
l.logger().warn("CLSmith samples DB not found in {}. Will create one from scratch.".format(ev.generate_clsmith.clsmith_db))
elif ev.HasField("grewe_top_k_csv"):
### TopKCLDrive
# Generic Fields
pbutil.AssertFieldIsSet(config, "workspace")
pbutil.AssertFieldIsSet(config, "tokenizer")
if not pathlib.Path(config.tokenizer).resolve().exists():
raise FileNotFoundError(pathlib.Path(config.tokenizer).resolve())
# DB groups
for dbs in ev.grewe_top_k_csv.db_group:
for db in dbs.database:
p = pathlib.Path(db).resolve()
if not p.exists():
raise FileNotFoundError(p)
if dbs.HasField("size_limit"):
pbutil.AssertFieldConstraint(
dbs,
"size_limit",
lambda x : x > 0,
"Size limit must be a positive integer, {}".format(dbs.size_limit)
)
# Specialized fields.
pbutil.AssertFieldIsSet(ev.grewe_top_k_csv, "cldrive_cache")
if not pathlib.Path(ev.grewe_top_k_csv.cldrive_cache).resolve().exists():
l.logger().warn("CLDrive cache not found in {}. Will create one from scratch.".format(ev.grewe_top_k_csv.cldrive_cache))
pbutil.AssertFieldConstraint(
ev.grewe_top_k_csv,
"target",
lambda x: x in benchmarks.targets,
"target {} not found".format(ev.grewe_top_k_csv.target),
)
pbutil.AssertFieldConstraint(
ev.grewe_top_k_csv,
"top_k",
lambda x: x > 0,
"top-K factor must be positive",
)
elif ev.HasField("grewe_csv"):
### TopKCLDrive
# Generic Fields
pbutil.AssertFieldIsSet(config, "workspace")
pbutil.AssertFieldIsSet(config, "tokenizer")
if not pathlib.Path(config.tokenizer).resolve().exists():
raise FileNotFoundError(pathlib.Path(config.tokenizer).resolve())
# DB groups
for dbs in ev.grewe_csv.db_group:
for db in dbs.database:
p = pathlib.Path(db).resolve()
if not p.exists():
raise FileNotFoundError(p)
if dbs.HasField("size_limit"):
pbutil.AssertFieldConstraint(
dbs,
"size_limit",
lambda x : x > 0,
"Size limit must be a positive integer, {}".format(dbs.size_limit)
)
pbutil.AssertFieldIsSet(ev.grewe_csv, "cldrive_cache")
if not pathlib.Path(ev.grewe_csv.cldrive_cache).resolve().exists():
l.logger().warn("CLDrive cache not found in {}. Will create one from scratch.".format(ev.grewe_csv.cldrive_cache))
elif ev.HasField("train_grewe"):
### TrainGrewe
# Generic fields
pbutil.AssertFieldIsSet(config, "workspace")
# CSV groups
pbutil.AssertFieldIsSet(ev.train_grewe, "grewe_baseline")
p = pathlib.Path(ev.train_grewe.grewe_baseline)
if not p.exists():
raise FileNotFoundError(p)
for c in ev.train_grewe.csv:
pbutil.AssertFieldIsSet(c, "name")
pbutil.AssertFieldIsSet(c, "path")
p = pathlib.Path(c.path)
if not p.exists():
raise FileNotFoundError(p)
elif ev.HasField("feature_space_cov_label"):
### FeatureSpaceCovLabel
# Generic fields
pbutil.AssertFieldIsSet(config, "workspace")
# CSV groups
pbutil.AssertFieldIsSet(ev.feature_space_cov_label, "grewe_baseline")
p = pathlib.Path(ev.feature_space_cov_label.grewe_baseline)
if not p.exists():
raise FileNotFoundError(p)
for c in ev.feature_space_cov_label.csv:
pbutil.AssertFieldIsSet(c, "name")
pbutil.AssertFieldIsSet(c, "path")
p = pathlib.Path(c.path)
if not p.exists():
raise FileNotFoundError(p)
elif ev.HasField("feature_space_cov_group"):
### FeatureSpaceCovGroup
# Generic fields
pbutil.AssertFieldIsSet(config, "workspace")
# CSV groups
pbutil.AssertFieldIsSet(ev.feature_space_cov_group, "grewe_baseline")
p = pathlib.Path(ev.feature_space_cov_group.grewe_baseline)
if not p.exists():
raise FileNotFoundError(p)
for c in ev.feature_space_cov_group.csv:
pbutil.AssertFieldIsSet(c, "name")
pbutil.AssertFieldIsSet(c, "path")
p = pathlib.Path(c.path)
if not p.exists():
raise FileNotFoundError(p)
elif ev.HasField("analyze_beam_search"):
### AnalyzeBeamSearch
# Generic Fields
pbutil.AssertFieldIsSet(config, "workspace")
if not pathlib.Path(config.tokenizer).resolve().exists():
raise FileNotFoundError(pathlib.Path(config.tokenizer).resolve())
# DB groups
for dbs in ev.analyze_beam_search.db_group:
for db in dbs.database:
p = pathlib.Path(db).resolve()
if not p.exists():
raise FileNotFoundError(p)
if dbs.HasField("size_limit"):
pbutil.AssertFieldConstraint(
dbs,
"size_limit",
lambda x : x > 0,
"Size limit must be a positive integer, {}".format(dbs.size_limit)
)
# Specialized fields.
pbutil.AssertFieldConstraint(
ev.analyze_beam_search,
"target",
lambda x: x in benchmarks.targets,
"target {} not found".format(ev.analyze_beam_search.target),
)
pbutil.AssertFieldIsSet(ev.analyze_beam_search, "feature_space")
elif ev.HasField("gen_distance_distribution"):
### GenDistanceDistribution
# Generic Fields
pbutil.AssertFieldIsSet(config, "workspace")
if not pathlib.Path(config.tokenizer).resolve().exists():
raise FileNotFoundError(pathlib.Path(config.tokenizer).resolve())
# DB groups
for dbs in ev.gen_distance_distribution.db_group:
for db in dbs.database:
p = pathlib.Path(db).resolve()
if not p.exists():
raise FileNotFoundError(p)
if dbs.HasField("size_limit"):
pbutil.AssertFieldConstraint(
dbs,
"size_limit",
lambda x : x > 0,
"Size limit must be a positive integer, {}".format(dbs.size_limit)
)
# Specialized fields.
pbutil.AssertFieldConstraint(
ev.gen_distance_distribution,
"target",
lambda x: x in benchmarks.targets,
"target {} not found".format(ev.gen_distance_distribution.target),
)
pbutil.AssertFieldIsSet(ev.gen_distance_distribution, "feature_space")
pbutil.AssertFieldIsSet(ev.gen_distance_distribution, "generation_id")
else:
raise ValueError(ev)
return config
def ConfigFromFlags() -> evaluator_pb2.Evaluation:
"""
Parse evaluator config path and return config.
"""
config_path = pathlib.Path(FLAGS.evaluator_config)
if not config_path.is_file():
raise FileNotFoundError (f"Evaluation --evaluator_config file not found: '{config_path}'")
config = pbutil.FromFile(config_path, evaluator_pb2.Evaluation())
return AssertIfValid(config)
def main(config: evaluator_pb2.Evaluation):
"""
Run the evaluators iteratively.
"""
evaluation_map = {
evaluator_pb2.LogFile : log_file.LogFile,
evaluator_pb2.KAverageScore : distance_score.KAverageScore,
evaluator_pb2.MinScore : distance_score.MinScore,
evaluator_pb2.AnalyzeTarget : benchmark_analysis.AnalyzeTarget,
evaluator_pb2.TokenSizeDistribution : benchmark_analysis.TokenSizeDistribution,
evaluator_pb2.LLVMInstCountDistribution : benchmark_analysis.LLVMInstCountDistribution,
evaluator_pb2.PCASamplesFeatures : benchmark_analysis.PCASamplesFeatures,
evaluator_pb2.FeaturesDistribution : benchmark_analysis.FeaturesDistribution,
evaluator_pb2.HumanLikeness : benchmark_analysis.HumanLikeness,
evaluator_pb2.HumanLikenessAnalysis : analysis.HumanLikenessAnalysis,
evaluator_pb2.CompMemGrewe : comp_vs_mem.CompMemGrewe,
evaluator_pb2.TopKCLDrive : cldrive.TopKCLDrive,
evaluator_pb2.MutecVsBenchPress : mutec.MutecVsBenchPress,
evaluator_pb2.SRCIROR_srcVsBenchPress : srciror.SRCIRORVsBenchPress,
evaluator_pb2.SRCIROR_IRVsBenchPress : srciror.SRCIRORVsBenchPress,
evaluator_pb2.GenerateCLSmith : clsmith.GenerateCLSmith,
evaluator_pb2.GreweTopKCSV : grewe_api.GreweTopKCSV,
evaluator_pb2.GreweCSV : grewe_api.GreweCSV,
evaluator_pb2.TrainGrewe : grewe_api.TrainGrewe,
evaluator_pb2.FeatureSpaceCovLabel : grewe_api.FeatureSpaceCovLabel,
evaluator_pb2.FeatureSpaceCovGroup : grewe_api.FeatureSpaceCovGroup,
evaluator_pb2.AnalyzeBeamSearch : distance_score.AnalyzeBeamSearch,
evaluator_pb2.GenDistanceDistribution : distance_score.GenDistanceDistribution,
}
db_cache = {}
target_cache = {}
feature_spaces = []
for ev in config.evaluator:
kw_args = {
"db_groups" : [],
"tokenizer" : tokenizers.TokenizerBase.FromFile(pathlib.Path(config.tokenizer).resolve()) if config.HasField("tokenizer") else None,
"workspace_path" : pathlib.Path(config.workspace).resolve() if config.HasField("workspace") else None,
}
if ev.HasField("k_average_score"):
sev = ev.k_average_score
kw_args['top_k'] = sev.top_k
# Gather target benchmarks and cache them
if isinstance(sev.target, list):
kw_args["targets"] = []
for t in sev.target:
if t not in target_cache:
target_cache[t] = TargetBenchmarks(t)
kw_args["targets"].append(target_cache[t])
else:
if sev.target not in target_cache:
target_cache[sev.target] = TargetBenchmarks(sev.target)
kw_args["targets"] = target_cache[sev.target]
for dbs in sev.db_group:
key = dbs.group_name + ''.join(dbs.database)
if key not in db_cache:
size_limit = dbs.size_limit if dbs.HasField("size_limit") else None
db_cache[key] = DBGroup(
dbs.group_name,
dbs.db_type,
dbs.database,
tokenizer = kw_args['tokenizer'] if not dbs.HasField("tokenizer") else dbs.tokenizer,
size_limit = size_limit
)
kw_args['db_groups'].append(db_cache[key])
# Gather feature spaces if applicable.
if sev.HasField("feature_space"):
kw_args['feature_space'] = sev.feature_space
# Gather plotter configuration
if sev.HasField("plot_config"):
kw_args['plot_config'] = pbutil.ToJson(sev.plot_config)
elif ev.HasField("min_score"):
sev = ev.min_score
# Gather target benchmarks and cache them
if isinstance(sev.target, list):
kw_args["targets"] = []
for t in sev.target:
if t not in target_cache:
target_cache[t] = TargetBenchmarks(t)
kw_args["targets"].append(target_cache[t])
else:
if sev.target not in target_cache:
target_cache[sev.target] = TargetBenchmarks(sev.target)
kw_args["targets"] = target_cache[sev.target]
for dbs in sev.db_group:
key = dbs.group_name + ''.join(dbs.database)
if key not in db_cache:
size_limit = dbs.size_limit if dbs.HasField("size_limit") else None
db_cache[key] = DBGroup(
dbs.group_name,
dbs.db_type,
dbs.database,
tokenizer = kw_args['tokenizer'] if not dbs.HasField("tokenizer") else dbs.tokenizer,
size_limit = size_limit
)
kw_args['db_groups'].append(db_cache[key])
# Gather feature spaces if applicable.
if sev.HasField("feature_space"):
kw_args['feature_space'] = sev.feature_space
# Gather plotter configuration
if sev.HasField("plot_config"):
kw_args['plot_config'] = pbutil.ToJson(sev.plot_config)
elif ev.HasField("analyze_target"):
sev = ev.analyze_target
# Gather target benchmarks and cache them
if isinstance(sev.target, list):
kw_args["targets"] = []
for t in sev.target:
if t not in target_cache:
target_cache[t] = TargetBenchmarks(t)
kw_args["targets"].append(target_cache[t])
else:
if sev.target not in target_cache:
target_cache[sev.target] = TargetBenchmarks(sev.target)
kw_args["targets"] = target_cache[sev.target]
for dbs in sev.db_group:
key = dbs.group_name + ''.join(dbs.database)
if key not in db_cache:
size_limit = dbs.size_limit if dbs.HasField("size_limit") else None
db_cache[key] = DBGroup(dbs.group_name, dbs.db_type, dbs.database, tokenizer = kw_args['tokenizer'], size_limit = size_limit)
kw_args['db_groups'].append(db_cache[key])
# Gather feature spaces if applicable.
if sev.HasField("feature_space"):
kw_args['feature_space'] = sev.feature_space
# Gather plotter configuration
if sev.HasField("plot_config"):
kw_args['plot_config'] = pbutil.ToJson(sev.plot_config)
elif ev.HasField("features_distribution"):
sev = ev.features_distribution
kw_args['top_k'] = sev.top_k
# Gather target benchmarks and cache them
if isinstance(sev.target, list):
kw_args["targets"] = []
for t in sev.target:
if t not in target_cache:
target_cache[t] = TargetBenchmarks(t)
kw_args["targets"].append(target_cache[t])
else:
if sev.target not in target_cache:
target_cache[sev.target] = TargetBenchmarks(sev.target)
kw_args["targets"] = target_cache[sev.target]
for dbs in sev.db_group:
key = dbs.group_name + ''.join(dbs.database)
if key not in db_cache:
size_limit = dbs.size_limit if dbs.HasField("size_limit") else None
db_cache[key] = DBGroup(dbs.group_name, dbs.db_type, dbs.database, tokenizer = kw_args['tokenizer'], size_limit = size_limit)
kw_args['db_groups'].append(db_cache[key])
# Gather feature spaces if applicable.
if sev.HasField("feature_space"):
kw_args['feature_space'] = sev.feature_space
# Gather plotter configuration
if sev.HasField("plot_config"):
kw_args['plot_config'] = pbutil.ToJson(sev.plot_config)
elif ev.HasField("human_likeness"):
sev = ev.human_likeness
kw_args['top_k'] = sev.top_k
# Gather target benchmarks and cache them
if isinstance(sev.target, list):
kw_args["targets"] = []
for t in sev.target:
if t not in target_cache:
target_cache[t] = TargetBenchmarks(t)
kw_args["targets"].append(target_cache[t])
else:
if sev.target not in target_cache:
target_cache[sev.target] = TargetBenchmarks(sev.target)
kw_args["targets"] = target_cache[sev.target]
for dbs in sev.db_group:
key = dbs.group_name + ''.join(dbs.database)
if key not in db_cache:
size_limit = dbs.size_limit if dbs.HasField("size_limit") else None
db_cache[key] = DBGroup(dbs.group_name, dbs.db_type, dbs.database, tokenizer = kw_args['tokenizer'], size_limit = size_limit)
kw_args['db_groups'].append(db_cache[key])
# Gather plotter configuration
if sev.HasField("plot_config"):
kw_args['plot_config'] = pbutil.ToJson(sev.plot_config)
elif ev.HasField("human_likeness_analysis"):
sev = ev.human_likeness_analysis
# Pass human or AI DB to kwargs.
kw_args["human_likeness_data"] = sev.human_or_ai_db
# Gather plotter configuration
if sev.HasField("plot_config"):
kw_args['plot_config'] = pbutil.ToJson(sev.plot_config)
elif ev.HasField("token_size_distribution"):
sev = ev.token_size_distribution
for dbs in sev.db_group:
key = dbs.group_name + ''.join(dbs.database)
if key not in db_cache:
size_limit = dbs.size_limit if dbs.HasField("size_limit") else None
db_cache[key] = DBGroup(dbs.group_name, dbs.db_type, dbs.database, tokenizer = kw_args['tokenizer'], size_limit = size_limit)
kw_args['db_groups'].append(db_cache[key])
# Gather plotter configuration
if sev.HasField("plot_config"):
kw_args['plot_config'] = pbutil.ToJson(sev.plot_config)
elif ev.HasField("llvm_instcount_distribution"):
sev = ev.llvm_instcount_distribution
for dbs in sev.db_group:
key = dbs.group_name + ''.join(dbs.database)
if key not in db_cache:
size_limit = dbs.size_limit if dbs.HasField("size_limit") else None
db_cache[key] = DBGroup(dbs.group_name, dbs.db_type, dbs.database, tokenizer = kw_args['tokenizer'], size_limit = size_limit)
kw_args['db_groups'].append(db_cache[key])
# Gather plotter configuration
if sev.HasField("plot_config"):
kw_args['plot_config'] = pbutil.ToJson(sev.plot_config)
elif ev.HasField("pca_samples_features"):
sev = ev.pca_samples_features
for dbs in sev.db_group:
key = dbs.group_name + ''.join(dbs.database)
if key not in db_cache:
size_limit = dbs.size_limit if dbs.HasField("size_limit") else None
db_cache[key] = DBGroup(dbs.group_name, dbs.db_type, dbs.database, tokenizer = kw_args['tokenizer'], size_limit = size_limit)
kw_args['db_groups'].append(db_cache[key])
# Gather feature spaces if applicable.
if sev.HasField("feature_space"):
kw_args['feature_space'] = sev.feature_space
# Gather plotter configuration
if sev.HasField("plot_config"):
kw_args['plot_config'] = pbutil.ToJson(sev.plot_config)
elif ev.HasField("log_file"):
sev = ev.log_file
# Gather target benchmarks and cache them
if isinstance(sev.target, list):
kw_args["targets"] = []
for t in sev.target:
if t not in target_cache:
target_cache[t] = TargetBenchmarks(t)
kw_args["targets"].append(target_cache[t])
else:
if sev.target not in target_cache:
target_cache[sev.target] = TargetBenchmarks(sev.target)
kw_args["targets"] = target_cache[sev.target]
for dbs in sev.db_group:
key = dbs.group_name + ''.join(dbs.database)
if key not in db_cache:
size_limit = dbs.size_limit if dbs.HasField("size_limit") else None
db_cache[key] = DBGroup(dbs.group_name, dbs.db_type, dbs.database, tokenizer = kw_args['tokenizer'], size_limit = size_limit)
kw_args['db_groups'].append(db_cache[key])
# Gather feature spaces if applicable.
if sev.HasField("feature_space"):
kw_args['feature_space'] = sev.feature_space
# Gather plotter configuration
if sev.HasField("plot_config"):
kw_args['plot_config'] = pbutil.ToJson(sev.plot_config)
elif ev.HasField("comp_mem_grewe"):
sev = ev.comp_mem_grewe
# Gather target benchmarks and cache them
if isinstance(sev.target, list):
kw_args["targets"] = []
for t in sev.target:
if t not in target_cache:
target_cache[t] = TargetBenchmarks(t)
kw_args["targets"].append(target_cache[t])
else:
if sev.target not in target_cache:
target_cache[sev.target] = TargetBenchmarks(sev.target)
kw_args["targets"] = target_cache[sev.target]
for dbs in sev.db_group:
key = dbs.group_name + ''.join(dbs.database)
if key not in db_cache:
size_limit = dbs.size_limit if dbs.HasField("size_limit") else None
db_cache[key] = DBGroup(dbs.group_name, dbs.db_type, dbs.database, tokenizer = kw_args['tokenizer'], size_limit = size_limit)
kw_args['db_groups'].append(db_cache[key])
# Gather plotter configuration
if sev.HasField("plot_config"):
kw_args['plot_config'] = pbutil.ToJson(sev.plot_config)
elif ev.HasField("topk_cldrive"):
sev = ev.topk_cldrive
kw_args['top_k'] = sev.top_k
kw_args['cldrive_cache'] = sev.cldrive_cache
# Gather target benchmarks and cache them
if isinstance(sev.target, list):
kw_args["targets"] = []
for t in sev.target:
if t not in target_cache:
target_cache[t] = TargetBenchmarks(t)
kw_args["targets"].append(target_cache[t])
else:
if sev.target not in target_cache:
target_cache[sev.target] = TargetBenchmarks(sev.target)
kw_args["targets"] = target_cache[sev.target]
for dbs in sev.db_group:
key = dbs.group_name + ''.join(dbs.database)
if key not in db_cache:
size_limit = dbs.size_limit if dbs.HasField("size_limit") else None
db_cache[key] = DBGroup(dbs.group_name, dbs.db_type, dbs.database, tokenizer = kw_args['tokenizer'], size_limit = size_limit)
kw_args['db_groups'].append(db_cache[key])
# Gather feature spaces if applicable.
if sev.HasField("feature_space"):
kw_args['feature_space'] = sev.feature_space
# Gather plotter configuration
if sev.HasField("plot_config"):
kw_args['plot_config'] = pbutil.ToJson(sev.plot_config)
elif ev.HasField("mutec_vs_benchpress"):
sev = ev.mutec_vs_benchpress
kw_args['top_k'] = sev.top_k
kw_args['mutec_cache'] = sev.mutec_cache
kw_args['beam_width'] = sev.beam_width
# Gather target benchmarks and cache them
if isinstance(sev.target, list):
kw_args["targets"] = []
for t in sev.target:
if t not in target_cache:
target_cache[t] = TargetBenchmarks(t)
kw_args["targets"].append(target_cache[t])
else:
if sev.target not in target_cache:
target_cache[sev.target] = TargetBenchmarks(sev.target)
kw_args["targets"] = target_cache[sev.target]
for name, dbs in [('seed', sev.seed), ('benchpress', sev.benchpress)]:
key = dbs.group_name + ''.join(dbs.database)
if key not in db_cache:
size_limit = dbs.size_limit if dbs.HasField("size_limit") else None
db_cache[key] = DBGroup(dbs.group_name, dbs.db_type, dbs.database, tokenizer = kw_args['tokenizer'], size_limit = size_limit)
kw_args[name] = db_cache[key]
# Gather feature spaces if applicable.
if sev.HasField("feature_space"):
kw_args['feature_space'] = sev.feature_space
# Gather plotter configuration
if sev.HasField("plot_config"):
kw_args['plot_config'] = pbutil.ToJson(sev.plot_config)
elif ev.HasField("srciror_src_vs_benchpress") or ev.HasField("srciror_ir_vs_benchpress"):
if ev.HasField("srciror_src_vs_benchpress"):
sev = ev.srciror_src_vs_benchpress
kw_args['srciror_cache'] = sev.srciror_src_cache
kw_args['mutation_level'] = "src"
else:
sev = ev.srciror_ir_vs_benchpress
kw_args['srciror_cache'] = sev.srciror_ir_cache
kw_args['mutation_level'] = "IR"
kw_args['top_k'] = sev.top_k
kw_args['srciror_cache'] = sev.srciror_src_cache
kw_args['beam_width'] = sev.beam_width
# Gather target benchmarks and cache them
if isinstance(sev.target, list):
kw_args["targets"] = []
for t in sev.target:
if t not in target_cache:
target_cache[t] = TargetBenchmarks(t)
kw_args["targets"].append(target_cache[t])
else:
if sev.target not in target_cache:
target_cache[sev.target] = TargetBenchmarks(sev.target)
kw_args["targets"] = target_cache[sev.target]
for name, dbs in [('seed', sev.seed), ('benchpress', sev.benchpress)]:
key = dbs.group_name + ''.join(dbs.database)
if key not in db_cache:
size_limit = dbs.size_limit if dbs.HasField("size_limit") else None
db_cache[key] = DBGroup(dbs.group_name, dbs.db_type, dbs.database, tokenizer = kw_args['tokenizer'], size_limit = size_limit)
kw_args[name] = db_cache[key]
# Gather feature spaces if applicable.
if sev.HasField("feature_space"):
kw_args['feature_space'] = sev.feature_space
# Gather plotter configuration
if sev.HasField("plot_config"):
kw_args['plot_config'] = pbutil.ToJson(sev.plot_config)
elif ev.HasField("generate_clsmith"):
sev = ev.generate_clsmith
kw_args['clsmith_path'] = sev.clsmith_db
elif ev.HasField("grewe_top_k_csv"):
sev = ev.grewe_top_k_csv
kw_args['top_k'] = sev.top_k
kw_args['cldrive_cache'] = sev.cldrive_cache
# Gather target benchmarks and cache them
if isinstance(sev.target, list):
kw_args["targets"] = []
for t in sev.target:
if t not in target_cache:
target_cache[t] = TargetBenchmarks(t)
kw_args["targets"].append(target_cache[t])
else:
if sev.target not in target_cache:
target_cache[sev.target] = TargetBenchmarks(sev.target)
kw_args["targets"] = target_cache[sev.target]
for dbs in sev.db_group:
key = dbs.group_name + ''.join(dbs.database)
if key not in db_cache:
size_limit = dbs.size_limit if dbs.HasField("size_limit") else None
db_cache[key] = DBGroup(dbs.group_name, dbs.db_type, dbs.database, tokenizer = kw_args['tokenizer'], size_limit = size_limit)
kw_args['db_groups'].append(db_cache[key])
kw_args['feature_space'] = "GreweFeatures"
elif ev.HasField("grewe_csv"):
sev = ev.grewe_csv
kw_args['cldrive_cache'] = sev.cldrive_cache
# Gather target benchmarks and cache them
for dbs in sev.db_group:
key = dbs.group_name + ''.join(dbs.database)
if key not in db_cache:
size_limit = dbs.size_limit if dbs.HasField("size_limit") else None
db_cache[key] = DBGroup(dbs.group_name, dbs.db_type, dbs.database, tokenizer = kw_args['tokenizer'], size_limit = size_limit)
kw_args['db_groups'].append(db_cache[key])
kw_args['feature_space'] = "GreweFeatures"
elif ev.HasField("train_grewe"):
sev = ev.train_grewe
if sev.HasField("plot_config"):
kw_args['plot_config'] = pbutil.ToJson(sev.plot_config)
kw_args['grewe_baseline'] = pathlib.Path(sev.grewe_baseline).resolve()
kw_args['csv_groups'] = []
for c in sev.csv:
kw_args['csv_groups'].append({'name': c.name, 'path': pathlib.Path(c.path).resolve()})
elif ev.HasField("feature_space_cov_label"):
sev = ev.feature_space_cov_label
if sev.HasField("plot_config"):
kw_args['plot_config'] = pbutil.ToJson(sev.plot_config)
kw_args['grewe_baseline'] = pathlib.Path(sev.grewe_baseline).resolve()
kw_args['csv_groups'] = []
for c in sev.csv:
kw_args['csv_groups'].append({'name': c.name, 'path': pathlib.Path(c.path).resolve()})
elif ev.HasField("feature_space_cov_group"):
sev = ev.feature_space_cov_group
if sev.HasField("plot_config"):
kw_args['plot_config'] = pbutil.ToJson(sev.plot_config)
kw_args['grewe_baseline'] = pathlib.Path(sev.grewe_baseline).resolve()
kw_args['csv_groups'] = []
for c in sev.csv:
kw_args['csv_groups'].append({'name': c.name, 'path': pathlib.Path(c.path).resolve()})
elif ev.HasField("analyze_beam_search"):
sev = ev.analyze_beam_search
if sev.HasField("plot_config"):
kw_args['plot_config'] = pbutil.ToJson(sev.plot_config)
# Gather target benchmarks and cache them
if isinstance(sev.target, list):
kw_args["targets"] = []
for t in sev.target:
if t not in target_cache:
target_cache[t] = TargetBenchmarks(t)
kw_args["targets"].append(target_cache[t])
else:
if sev.target not in target_cache:
target_cache[sev.target] = TargetBenchmarks(sev.target)
kw_args["targets"] = target_cache[sev.target]
for dbs in sev.db_group:
key = dbs.group_name + ''.join(dbs.database)
if key not in db_cache:
size_limit = dbs.size_limit if dbs.HasField("size_limit") else None
db_cache[key] = DBGroup(dbs.group_name, dbs.db_type, dbs.database, tokenizer = kw_args['tokenizer'], size_limit = size_limit)
kw_args['db_groups'].append(db_cache[key])
# Gather feature spaces if applicable.
if sev.HasField("feature_space"):
kw_args['feature_space'] = sev.feature_space
elif ev.HasField("gen_distance_distribution"):
sev = ev.gen_distance_distribution
if sev.HasField("plot_config"):
kw_args['plot_config'] = pbutil.ToJson(sev.plot_config)
# Gather target benchmarks and cache them
if isinstance(sev.target, list):
kw_args["targets"] = []
for t in sev.target:
if t not in target_cache:
target_cache[t] = TargetBenchmarks(t)
kw_args["targets"].append(target_cache[t])
else:
if sev.target not in target_cache:
target_cache[sev.target] = TargetBenchmarks(sev.target)
kw_args["targets"] = target_cache[sev.target]
for dbs in sev.db_group:
key = dbs.group_name + ''.join(dbs.database)
if key not in db_cache:
size_limit = dbs.size_limit if dbs.HasField("size_limit") else None
db_cache[key] = DBGroup(dbs.group_name, dbs.db_type, dbs.database, tokenizer = kw_args['tokenizer'], size_limit = size_limit)
kw_args['db_groups'].append(db_cache[key])
# Gather feature spaces if applicable.
if sev.HasField("feature_space"):
kw_args['feature_space'] = sev.feature_space
if sev.HasField("generation_id"):
kw_args['generation_id'] = sev.generation_id
else:
raise NotImplementedError(ev)
evaluation_map[type(sev)](**kw_args)
return
def initMain(*args, **kwargs):
l.initLogger(name = "evaluators")
config = ConfigFromFlags()
main(config)
return
if __name__ == "__main__":
app.run(initMain)
sys.exit(0)
| 57,585 | 40.911208 | 190 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/experiments/log_file.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Log file evaluation.
"""
from deeplearning.benchpress.experiments import public
@public.evaluator
def LogFile(**kwargs) -> None:
"""
Write benchmarks and target stats in log file.
"""
db_groups = kwargs.get('db_groups')
target = kwargs.get('targets')
raise NotImplementedError
return | 910 | 31.535714 | 74 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/experiments/comp_vs_mem.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Evaluation script for mem vs comp Grewe features against multiple database groups.
"""
from deeplearning.benchpress.samplers import samples_database
from deeplearning.benchpress.util import plotter
from deeplearning.benchpress.experiments import public
@public.evaluator
def CompMemGrewe(**kwargs) -> None:
"""
Compare Computation vs Memory instructions for each database group
and target benchmarks.
"""
db_groups = kwargs.get('db_groups')
target = kwargs.get('targets')
plot_config = kwargs.get('plot_config')
workspace_path = kwargs.get('workspace_path') / "comp_vs_mem_grewe"
workspace_path.mkdir(exist_ok = True, parents = True)
feature_space = "GreweFeatures"
groups = {
target.target: {
'data' : [],
'names' : [],
}
}
for dbg in db_groups:
if dbg.db_type != samples_database.SamplesDatabase:
raise ValueError("CompMemGrewe requires SamplesDatabase but received", dbg.db_type)
groups[dbg.group_name] = {
'data' : [],
'names' : []
}
for b in target.get_benchmarks(feature_space):
groups[target.target]['data'].append([b.features['comp'], b.features['mem']])
groups[target.target]['names'].append(b.name)
unique = set()
for dbg in db_groups:
for feats in dbg.get_features(feature_space):
if "{}-{}".format(feats['comp'], feats['mem']) not in unique:
groups[dbg.group_name]["data"].append([feats['comp'], feats['mem']])
groups[dbg.group_name]['names'].append("")
unique.add("{}-{}".format(feats['comp'], feats['mem']))
plotter.GroupScatterPlot(
groups = groups,
plot_name = "comp_mem_{}".format('-'.join([str(x) for x in groups.keys()])),
path = workspace_path,
**plot_config if plot_config else {}
)
return | 2,382 | 34.567164 | 89 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/experiments/mutec.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Evaluation script for mutec mutation program.
"""
import typing
import glob
import tempfile
import subprocess
import pathlib
import json
import os
import tqdm
import functools
import math
import multiprocessing
from absl import flags
from deeplearning.benchpress.corpuses import encoded
from deeplearning.benchpress.features import extractor
from deeplearning.benchpress.samplers import samples_database
from deeplearning.benchpress.preprocessors import clang
from deeplearning.benchpress.preprocessors import opencl
from deeplearning.benchpress.util import plotter
from deeplearning.benchpress.util import logging as l
from deeplearning.benchpress.util import environment
from deeplearning.benchpress.experiments import workers
from deeplearning.benchpress.experiments import public
from deeplearning.benchpress.experiments import clsmith
FLAGS = flags.FLAGS
MUTEC = environment.MUTEC
CLSMITH_INCLUDE = environment.CLSMITH_INCLUDE
## Some hard limits in order to finish the experiments this year.
# max amount of mutants per input source.
PER_INPUT_HARD_LIMIT = 1000
SEARCH_DEPTH_HARD_LIMIT = 30
def generate_mutants(src: str, incl: str, timeout_seconds: int = 45) -> typing.Set[typing.Tuple[str, str]]:
"""
Collect all mutants from src and return them
"""
try:
tdir = pathlib.Path(FLAGS.local_filesystem).resolve()
except Exception:
tdir = None
with tempfile.NamedTemporaryFile("w", prefix="mutec_src", suffix='.cl', dir = tdir) as f:
try:
f.write(src)
f.flush()
except UnicodeDecodeError:
return []
except UnicodeEncodeError:
return []
if incl:
with open("/tmp/mutec_src_temp_header.h", 'w') as f:
f.write(incl)
f.flush()
# Fix compile_commands.json for source file.
base_path = pathlib.Path(f.name).resolve().parent
compile_command = {
'directory' : str(base_path),
'arguments' : [str(clang.CLANG), f.name] +
["-S", "-emit-llvm", "-o", "-"] +
opencl.GetClangArgs(use_shim = False, use_aux_headers = False, extra_args = ["-include{}".format(pathlib.Path(CLSMITH_INCLUDE) / "CLSmith.h")] if incl else [""]) +
["-include/tmp/mutec_src_temp_header.h" if incl else ""],
'file' : str(f.name)
}
with open(base_path / "compile_commands.json", 'w') as ccf:
json.dump([compile_command], ccf)
# Construct and execute mutec command
mutec_cmd = [
"timeout",
"-s9",
str(timeout_seconds),
MUTEC,
str(f.name),
"-o",
str(base_path)
]
process = subprocess.Popen(
mutec_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
try:
stdout, stderr = process.communicate()
except TimeoutError:
pass
os.remove(str(base_path / "compile_commands.json"))
mutec_paths = glob.glob("{}.mutec*".format(f.name))
templates = glob.glob("{}.code_template".format(f.name))
mutants = set([(open(x, 'r').read(), incl) for x in mutec_paths[:PER_INPUT_HARD_LIMIT]])
for m in mutec_paths:
os.remove(m)
for m in templates:
os.remove(m)
os.remove("/tmp/mutec_src_temp_header.h")
return mutants
def beam_mutec(srcs : typing.List[typing.Tuple[str, str, float]],
target_features : typing.Dict[str, float],
feat_space : str,
beam_width : int,
mutec_cache : samples_database.SamplesDatabase,
) -> typing.List[typing.Tuple[str, float]]:
"""
Run generational beam search over starting github kernels
to minimize distance from target features.
"""
better_score = True
total_beams, beam, closest = set(), [], []
gen_id = 0
while better_score:
cands = set()
## Generate mutants for current generation.
for src, incl, dist in tqdm.tqdm(srcs, total = len(srcs), desc = "Mutec candidates {}".format(gen_id), leave = False):
cands.update(generate_mutants(src, incl)) ### This should collect all mutants and return them, out of a single source.
## Extract their features and calculate distances.
pool = multiprocessing.Pool()
f = functools.partial(
workers.ExtractAndCalculate,
target_features = target_features,
feature_space = feat_space,
)
# total.update(cands)
try:
for cand in tqdm.tqdm(pool.imap_unordered(f, cands), total = len(cands), desc = "Extract Features {}".format(gen_id), leave = False):
if cand:
beam.append(cand)
except Exception as e:
l.logger().error(e)
pool.terminate()
raise e
pool.close()
## Sort by distance in ascending order. If score is better, keep doing beam search
## srcs are included to the outputs, in order to keep them if the offsprings are worse.
closest = sorted(beam + srcs, key = lambda x: x[2])[:beam_width]
total_beams.update([(x, y) for x, y, _ in closest])
min_length = min(len(closest), len(srcs))
if sum([x for _, _, x in closest[:min_length]]) < sum([x for _, _, x in srcs[:min_length]]) and gen_id < SEARCH_DEPTH_HARD_LIMIT:
srcs = closest
beam = []
else:
better_score = False
gen_id += 1
## Store all mutants in database.
with mutec_cache.Session(commit = True) as s:
pool = multiprocessing.Pool()
try:
idx = mutec_cache.count
for dp in tqdm.tqdm(pool.imap_unordered(workers.FeatureExtractor, total_beams), total = len(total_beams), desc = "Add mutants to DB", leave = False):
if dp:
src, incl, feats = dp
try:
_ = opencl.Compile(src, header_file = incl, extra_args = ["-include{}".format(pathlib.Path(environment.CLSMITH_INCLUDE) / "CLSmith.h")] if incl else [""])
compiles = True
except ValueError:
compiles = False
sample = samples_database.Sample.FromArgsLite(idx, incl + src, feats, compiles)
exists = s.query(samples_database.Sample.sha256).filter_by(sha256 = sample.sha256).scalar() is not None
if not exists:
s.add(sample)
idx += 1
except Exception as e:
l.logger().error(e)
pool.terminate()
raise e
pool.close()
s.commit()
return closest
@public.evaluator
def MutecVsBenchPress(**kwargs) -> None:
"""
Compare mutec mutation tool on github's database against BenchPress.
Comparison is similar to KAverageScore comparison.
"""
seed = kwargs.get('seed')
benchpress = kwargs.get('benchpress')
mutec_cache = kwargs.get('mutec_cache', '')
target = kwargs.get('targets')
feature_space = kwargs.get('feature_space')
top_k = kwargs.get('top_k')
beam_width = kwargs.get('beam_width')
unique_code = kwargs.get('unique_code', False)
plot_config = kwargs.get('plot_config')
workspace_path = kwargs.get('workspace_path') / "mutec_vs_benchpress" / feature_space
workspace_path.mkdir(exist_ok = True, parents = True)
if not pathlib.Path(MUTEC).exists():
raise FileNotFoundError("Mutec executable not found: {}".format(MUTEC))
if seed.db_type != encoded.EncodedContentFiles and seed.db_type != clsmith.CLSmithDatabase:
raise ValueError("Scores require EncodedContentFiles or CLSmithDatabase but received", seed.db_type)
if benchpress.db_type != samples_database.SamplesDatabase:
raise ValueError("BenchPress scores require SamplesDatabase but received", benchpress.db_type)
## Load database and checkpoint of targets.
mutec_db = samples_database.SamplesDatabase(url = "sqlite:///{}".format(pathlib.Path(mutec_cache).resolve()), must_exist = False)
done = set()
with mutec_db.Session(commit = True) as s:
res = s.query(samples_database.SampleResults).filter_by(key = feature_space).first()
if res is not None:
done.update([str(x) for x in res.results.split('\n')])
s.commit()
## Initialize dictionary.
groups = {}
groups["Mutec"] = ([], [])
groups[seed.group_name] = ([], [])
groups[benchpress.group_name] = ([], [])
## Fix fetching data functions.
if unique_code:
git_get_data = lambda x: seed.get_unique_data_features(x)
bp_get_data = lambda x: benchpress.get_unique_data_features(x)
else:
git_get_data = lambda x: seed.get_data_features(x)
bp_get_data = lambda x: benchpress.get_data_features(x)
## Run engine on mutec.
benchmarks = target.get_benchmarks(feature_space)
for benchmark in tqdm.tqdm(benchmarks, total = len(benchmarks), desc = "Benchmarks"):
## This has already been searched for.
if benchmark.name in done:
continue
## Tuple of closest src, distance from target benchmark.0
closest = workers.SortedSrcDistances(git_get_data(feature_space), benchmark.features, feature_space)
## IF CLsmith takes too long here, collect only features, then for the beam size go and fetch
## the code.
# Split source and distances lists.
git_dist = [x for _, _, x in closest]
## If distances are already minimized, nothing to do.
if sum(git_dist[:top_k]) == 0:
continue
l.logger().info(benchmark.name)
closest_mutec_src = beam_mutec([(src, inc, dist) for src, inc, dist in closest[:beam_width] if dist > 0], benchmark.features, feature_space, beam_width, mutec_db)[:top_k] # tuple of (src, distance)
closest_mutec_dist = [x for _, _, x in closest_mutec_src]
assert len(closest_mutec_dist) == len(git_dist[:top_k])
## If mutec has provided a better score
if sum(closest_mutec_dist) < sum(git_dist[:top_k]):
l.logger().info("Score reduced from {} to {}".format(sum(git_dist[:top_k]), sum(closest_mutec_dist)))
l.logger().info("Best score from {} to {}".format(git_dist[0], closest_mutec_dist[0]))
with mutec_db.Session(commit = True) as s:
res = s.query(samples_database.SampleResults).filter_by(key = feature_space).first()
if res is not None:
res.results = res.results + "\n" + benchmark.name
else:
s.add(samples_database.SampleResults(key = feature_space, results = benchmark.name))
s.commit()
# Compute target's distance from O(0,0)
target_origin_dist = math.sqrt(sum([x**2 for x in benchmark.features.values()]))
mutec_avg_dist = sum(closest_mutec_dist) / top_k
groups["Mutec"][0].append(benchmark.name)
groups["Mutec"][1].append(100 * ((target_origin_dist - mutec_avg_dist) / target_origin_dist))
# Compute target's distance from O(0,0)
git_avg_dist = sum(git_dist[:top_k]) / top_k
groups[seed.group_name][0].append(benchmark.name)
groups[seed.group_name][1].append(100 * ((target_origin_dist - git_avg_dist) / target_origin_dist))
## Run engine on benchpress.
benchmarks = target.get_benchmarks(feature_space)
for benchmark in tqdm.tqdm(benchmarks, total = len(benchmarks), desc = "Benchpress"):
## Run only for benchmarks mutec has improved.
if benchmark.name in groups["Mutec"][0]:
l.logger().info(benchmark.name)
distances = workers.SortedDistances(bp_get_data(feature_space), benchmark.features, feature_space)
# Compute target's distance from O(0,0)
target_origin_dist = math.sqrt(sum([x**2 for x in benchmark.features.values()]))
avg_dist = sum(distances[:top_k]) / len(distances[:top_k])
groups[benchpress.group_name][0].append(benchmark.name)
groups[benchpress.group_name][1].append(100 * ((target_origin_dist - avg_dist) / target_origin_dist))
plotter.GrouppedBars(
groups = groups,
plot_name = "mutec_avg_{}_{}_{}".format(top_k, seed.group_name, feature_space.replace("Features", " Features")),
path = workspace_path,
**plot_config if plot_config else {},
)
return
| 12,379 | 37.209877 | 202 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/experiments/distance_score.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Top-K or min distance of database groups against target benchmark suites.
"""
import json
import tqdm
import typing
import math
from deeplearning.benchpress.features import active_feed_database
from deeplearning.benchpress.features import evaluate_cand_database
from deeplearning.benchpress.features import extractor
from deeplearning.benchpress.corpuses import encoded
from deeplearning.benchpress.samplers import samples_database
from deeplearning.benchpress.experiments import public
from deeplearning.benchpress.experiments import clsmith
from deeplearning.benchpress.experiments import workers
from deeplearning.benchpress.util import plotter
from deeplearning.benchpress.util import distributions
from deeplearning.benchpress.util import logging as l
@public.evaluator
def KAverageScore(**kwargs) -> None:
"""
Compare the average of top-K closest per target benchmark
for all different database groups.
"""
db_groups = kwargs.get('db_groups')
target = kwargs.get('targets')
feature_space = kwargs.get('feature_space')
top_k = kwargs.get('top_k')
unique_code = kwargs.get('unique_code', False)
plot_config = kwargs.get('plot_config')
workspace_path = kwargs.get('workspace_path') / "{}_avg_score".format(top_k) / feature_space
workspace_path.mkdir(exist_ok = True, parents = True)
groups = {}
# You need this if you want to have the same (github) baseline but when github is not plotted.
reduced_git = None
for dbg in db_groups:
if dbg.group_name == "GitHub-768-inactive" or dbg.group_name == "GitHub-768":
reduced_git = dbg.get_data_features(feature_space)
break
benchmarks = target.get_benchmarks(feature_space, reduced_git_corpus = reduced_git)
target_origin_dists = {}
for dbg in db_groups:
if dbg.group_name == "GitHub-768-inactive":
# Skip baseline DB group.
continue
if not (
dbg.db_type == samples_database.SamplesDatabase or
dbg.db_type == encoded.EncodedContentFiles or
dbg.db_type == clsmith.CLSmithDatabase or
dbg.db_type == active_feed_database.ActiveFeedDatabase
):
raise ValueError("Scores require SamplesDatabase or EncodedContentFiles but received", dbg.db_type)
groups[dbg.group_name] = ([], [], [])
for benchmark in tqdm.tqdm(benchmarks, total = len(benchmarks), desc = "Benchmarks"):
groups[dbg.group_name][0].append(benchmark.name)
# Find shortest distances.
if unique_code:
raise NotImplementedError
get_data = lambda x: dbg.get_unique_data_features(x, target_name = benchmark.full_name)
else:
get_data = lambda x: dbg.get_data_features(x)
src_distances = workers.SortedSrcDistances(get_data(feature_space), benchmark.features, feature_space)
distances = [d for _, _, d in src_distances]
# Compute target's distance from O(0,0)
if len(distances) == 0:
l.logger().error("{}-{}: Empty list, make sure this is expected.".format(benchmark.name, dbg.group_name))
continue
assert len(distances) != 0, "Sorted src list for {} is empty!".format(dbg.group_name)
avg_dist = sum(distances[:top_k]) / top_k
if benchmark.name in target_origin_dists:
target_origin_dists[benchmark.name] = max(target_origin_dists[benchmark.name], avg_dist)
else:
target_origin_dists[benchmark.name] = max(math.sqrt(sum([x**2 for x in benchmark.features.values()])), avg_dist)
groups[dbg.group_name][1].append(avg_dist)
groups[dbg.group_name][2].append([s for s, _, _ in src_distances[:top_k]])
averages = {}
abs_average = {}
counters = {}
for group_name, tup in groups.items():
bench_names, raw_dists, _ = tup
averages[group_name] = 0.0
abs_average[group_name] = 0.0
counters[group_name] = 0
for idx, (bench_name, raw_dist) in enumerate(zip(bench_names, raw_dists)):
groups[group_name][1][idx] = 100 * ( (target_origin_dists[bench_name] - raw_dist ) / target_origin_dists[bench_name])
averages[group_name] += ( (target_origin_dists[bench_name] - raw_dist ) / target_origin_dists[bench_name])
abs_average[group_name] += raw_dist
if ( (target_origin_dists[bench_name] - raw_dist ) / target_origin_dists[bench_name]) == 1.0:
counters[group_name] += 1
averages[group_name] = averages[group_name] / len(bench_names)
abs_average[group_name] = abs_average[group_name] / len(bench_names)
l.logger().info(feature_space)
l.logger().info("Average euclidean distance:\n{}".format(abs_average))
l.logger().info("Average relative proxmity:\n{}".format(averages))
l.logger().info("Exact target feature matches: {}".format(counters))
plotter.GrouppedBars(
groups = {dbname: (c[0], c[1]) for dbname, c in groups.items()},
plot_name = "avg_{}_dist_{}_{}".format(top_k, feature_space.replace("Features", "Features"), '-'.join([dbg.group_name for dbg in db_groups])),
path = workspace_path,
**plot_config if plot_config else {},
)
"""
## Grewe
groups["BenchDirect"]['data'] = [[267*2048, 73.56], [266*1024, 77.79], [512*290, 81.56], [256*289, 82.94], [128*272, 85.30], [64*282, 87.62], [32*151, 96.24]]
groups["BenchPress"]['data'] = [[2048*286, 76.79], [1024*306, 83.62], [512*325, 88.27], [256*326, 91.47], [128*333, 95.53], [64*338, 97.30], [32*236, 99.13]]
groups["BenchDirect"]['names'] = [2048, 1024, 512, 256, 128, 64, 32]
groups["BenchPress"]['names'] = [2048, 1024, 512, 256, 128, 64, 32]
time_speedup = [100*abs(round((x[0]-y[0])) / y[0]) for x, y in zip(groups["BenchDirect"]["data"], groups["BenchPress"]["data"])]
acc_speedup = [100*abs(round((x[1]-y[1])) / y[1]) for x, y in zip(groups["BenchDirect"]["data"], groups["BenchPress"]["data"])]
time_speedup = [[x, y] for x, y in zip([2048, 1024, 512, 256, 128, 64, 32], time_speedup)]
acc_speedup = [[x, y] for x, y in zip([2048, 1024, 512, 256, 128, 64, 32], acc_speedup)]
print(time_speedup)
print(acc_speedup)
plt.GroupScatterPlot(groups, plot_name="grewe")
plt.GroupScatterPlot({"time_speedup": {'data': time_speedup, 'names': []}, "accuracy_improvement": {'data': acc_speedup, 'names': []}}, plot_name="grewe_speedup")
## Autophase
groups["BenchDirect"]['data'] = [[262*2048, 41.02], [262*1024, 44.7], [512*267, 52.36], [256*262, 54.60], [128*254, 58.02], [64*230, 61.09], [32*164, 83.59]]
groups["BenchPress"]['data'] = [[2048*292, 48.88], [1024*297, 50.84], [512*302, 57.38], [256*307, 57.63], [128*312, 71.32], [64*312, 74.27], [32*254, 83.59]]
time_speedup = [100*abs(round((x[0]-y[0])) / y[0]) for x, y in zip(groups["BenchDirect"]["data"], groups["BenchPress"]["data"])]
acc_speedup = [100*abs(round((x[1]-y[1])) / y[1]) for x, y in zip(groups["BenchDirect"]["data"], groups["BenchPress"]["data"])]
time_speedup = [[x, y] for x, y in zip([2048, 1024, 512, 256, 128, 64, 32], time_speedup)]
acc_speedup = [[x, y] for x, y in zip([2048, 1024, 512, 256, 128, 64, 32], acc_speedup)]
print(time_speedup)
print(acc_speedup)
plt.GroupScatterPlot(groups, plot_name="autophase")
plt.GroupScatterPlot({"time_speedup": {'data': time_speedup, 'names': []}, "accuracy_improvement": {'data': acc_speedup, 'names': []}}, plot_name="autophase_speedup")
## Instcount
groups["BenchDirect"]['data'] = [[252*2048, 30.73], [257*1024, 34.36], [512*262, 36.32], [256*259, 39.89], [128*265, 41.96], [64*257, 46.21], [32*163, 48.33]]
groups["BenchPress"]['data'] = [[2048*301, 32.63], [1024*307, 40.09], [512*302, 40.49], [256*307, 52.89], [128*307, 56.41], [64*312, 57.77], [32*208, 69.11]]
time_speedup = [100*abs(round((x[0]-y[0])) / y[0]) for x, y in zip(groups["BenchDirect"]["data"], groups["BenchPress"]["data"])]
acc_speedup = [100*abs(round((x[1]-y[1])) / y[1]) for x, y in zip(groups["BenchDirect"]["data"], groups["BenchPress"]["data"])]
time_speedup = [[x, y] for x, y in zip([2048, 1024, 512, 256, 128, 64, 32], time_speedup)]
acc_speedup = [[x, y] for x, y in zip([2048, 1024, 512, 256, 128, 64, 32], acc_speedup)]
print(time_speedup)
print(acc_speedup)
plt.GroupScatterPlot(groups, plot_name="instcount")
plt.GroupScatterPlot({"time_speedup": {'data': time_speedup, 'names': []}, "accuracy_improvement": {'data': acc_speedup, 'names': []}}, plot_name="instcount_speedup")
"""
return groups
@public.evaluator
def MinScore(**kwargs) -> None:
"""
Compare the closest sample per target benchmark
for all different database groups.
"""
if 'top_k' in kwargs:
del kwargs['top_k']
return KAverageScore(top_k = 1, unique_code = False, **kwargs)
@public.evaluator
def AnalyzeBeamSearch(**kwargs) -> None:
"""
Analyze active feed databases and provide statistics
on distance convergence from target.
Two types of plots are exported:
1. For each target benchmark, a radar plot with its features, along with the closest candidate per db group.
2. For each target benchmark, a convergence line per generation for all db groups is shown.
Also, a final converge distribution line per db group is exported for all target benchmarks.
"""
db_groups = kwargs.get('db_groups')
target = kwargs.get('targets')
feature_space = kwargs.get('feature_space')
plot_config = kwargs.get('plot_config')
workspace_path = kwargs.get('workspace_path') / "analyze_beam_search" / feature_space
workspace_path.mkdir(exist_ok = True, parents = True)
def feats_to_list(feats: typing.Dict[str, float]) -> typing.Tuple[typing.List, typing.List]:
k, v = list(feats.keys()), list(feats.values())
k, v = zip(*sorted(zip(k, v)))
k, v = list(k), list(v)
return k, v
stats = {}
benchmarks = target.get_benchmarks(feature_space)
for benchmark in tqdm.tqdm(benchmarks, total = len(benchmarks), desc = "Benchmarks"):
keys, vals = feats_to_list(benchmark.features)
radar_features = {}
generations_score = {}
radar_features[benchmark.name] = [
vals,
keys,
]
for dbg in db_groups:
if not dbg.db_type == active_feed_database.ActiveFeedDatabase:
raise ValueError("Beam search analysis requires ActiveFeedDatabase, but received {}", dbg.db_type)
data = [dp for dp in dbg.get_data if target.shorten_benchmark_name(dp.target_benchmark.split('\n')[0]) == "// {}".format(benchmark.name)]
if len(data) == 0:
l.logger().warn("{} not found in {}, here are the features: {}".format(benchmark.name, dbg.group_name, benchmark.features))
continue
closest = sorted(data, key = lambda dp: dp.sample_quality)[0]
dict_feats = {':'.join(l.split(':')[:-1]) : float(l.split(':')[-1]) for l in closest.output_features.split('\n')}
keys, vals = feats_to_list(dict_feats)
radar_features[dbg.group_name] = [
vals,
keys
]
score_gens = {}
if dbg.group_name not in stats:
stats[dbg.group_name] = {
"zero_distance" : 0,
"total_epochs" : 0,
"best_distance" : [],
"singleshot_distance": [],
"total_benchmarks" : len(benchmarks),
}
stats[dbg.group_name]["best_distance"].append(math.inf)
for dp in data:
if dp.generation_id not in score_gens:
score_gens[dp.generation_id] = dp.sample_quality
stats[dbg.group_name]['best_distance'][-1] = dp.sample_quality
stats[dbg.group_name]['singleshot_distance'].append(dp.sample_quality)
else:
score_gens[dp.generation_id] = min(score_gens[dp.generation_id], dp.sample_quality)
stats[dbg.group_name]['best_distance'][-1] = score_gens[dp.generation_id]
stats[dbg.group_name]['total_epochs'] += len(list(score_gens.keys()))
if stats[dbg.group_name]['best_distance'][-1] == 0:
stats[dbg.group_name]['zero_distance'] += 1
generations_score[dbg.group_name] = {
'data': [[idx, v] for idx, v in score_gens.items()],
'names': [x for x, _ in score_gens.items()]
}
## Benchmark characterization.
plotter.GrouppedRadar(
groups = radar_features,
plot_name = "feeds_radar_{}_{}_{}".format(feature_space, benchmark.name, '-'.join([dbg.group_name.replace("BenchPress", "BP").replace("BenchDirect", "BD") for dbg in db_groups])),
path = workspace_path / "radar",
title = benchmark.name,
# **plot_config if plot_config else {},
)
## Score convergence per generation.
plotter.GroupScatterPlot(
groups = generations_score,
plot_name = "Beam_generation_{}_{}_{}".format(feature_space, benchmark.name, '-'.join([dbg.group_name.replace("BenchPress", "BP").replace("BenchDirect", "BD") for dbg in db_groups])),
path = workspace_path / "scatter",
mode = "lines+markers",
title = "{}, {}".format(feature_space, benchmark.name),
**plot_config if plot_config else {},
)
plotter.GrouppedBars(
groups = {
'#zero_distanced': (
list(stats.keys()),
[x['zero_distance'] for x in stats.values()],
)
},
plot_name = "zero_distances_{}_{}".format(feature_space, '-'.join([dbg.group_name.replace("BenchPress", "BP").replace("BenchDirect", "BD") for dbg in db_groups])),
path = workspace_path / "stats",
# **plot_config if plot_config else {},
)
plotter.GrouppedBars(
groups = {
'#total_epochs': (
list(stats.keys()),
[x['total_epochs'] for x in stats.values()],
)
},
plot_name = "total_epochs_{}_{}".format(feature_space, '-'.join([dbg.group_name.replace("BenchPress", "BP").replace("BenchDirect", "BD") for dbg in db_groups])),
path = workspace_path / "stats",
**plot_config if plot_config else {},
)
# base_dist = distributions.GenericDistribution(
# samples = [int(x*10) for x in stats['Base']['best_distance']],
# log_path = workspace_path,
# set_name = "Base_best_dist_distr_{}".format(feature_space)
# )
# feat_dist = distributions.GenericDistribution(
# samples = [int(x*10) for x in stats['Feature_Head']['best_distance']],
# log_path = workspace_path,
# set_name = "FeatHead_best_dist_distr_{}".format(feature_space)
# )
# base_dist.plot()
# feat_dist.plot()
# (base_dist - feat_dist).plot()
# single_base_dist = distributions.GenericDistribution(
# samples = [int(x*10) for x in stats['Base']['singleshot_distance']],
# log_path = workspace_path,
# set_name = "Base_single_dist_distr_{}".format(feature_space)
# )
# single_feat_dist = distributions.GenericDistribution(
# samples = [int(x*10) for x in stats['Feature_Head']['singleshot_distance']],
# log_path = workspace_path,
# set_name = "FeatHead_single_dist_distr_{}".format(feature_space)
# )
# single_base_dist.plot()
# single_feat_dist.plot()
# (single_base_dist - single_feat_dist).plot()
return
@public.evaluator
def GenDistanceDistribution(**kwargs) -> None:
"""
For a given beam search generation, calculate the distance distribution from the given target benchmark.
Compare against multiple db_groups.
"""
db_groups = kwargs.get('db_groups')
feature_space = kwargs.get('feature_space')
plot_config = kwargs.get('plot_config')
generation_id = kwargs.get('generation_id')
workspace_path = kwargs.get('workspace_path') / "gen_distance_distr" / feature_space
workspace_path.mkdir(exist_ok = True, parents = True)
"""
groups = {
target: {
db_group_name: [sample_score]
}
}
"""
groups = {}
for dbg in db_groups:
## Flattened list of scores distribution, sorted by target -> group
if not dbg.db_type == evaluate_cand_database.SearchCandidateDatabase:
raise ValueError("Beam search analysis requires SearchCandidateDatabase, but received {}", dbg.db_type)
benchmarks = [x for x in dbg.get_data if x.generation_id == generation_id]
for b in benchmarks:
target = b.target_benchmark.split('\n')[0].replace("// ", "")
if target not in groups:
groups[target] = {dbg.group_name: []}
elif dbg.group_name not in groups[target]:
groups[target][dbg.group_name] = []
groups[target][dbg.group_name] += [b.sample_score]*b.frequency
stats = {}
for target, groups in groups.items():
distrs = []
stats[target] = {}
for name, data in groups.items():
d = distributions.GenericDistribution([int(round(x)) for x in data if x < float('inf')], workspace_path, "{}-{}".format(target, name))
d.plot()
distrs.append(d)
stats[target][name] = {}
stats[target][name]['average'] = d.average
stats[target][name]['median'] = d.median
stats[target][name]['min'] = d.min
stats[target][name]['max'] = d.max
if len(distrs) == 2:
diff = distrs[0] - distrs[1]
stats[target]["likelihood"] = distrs[0] - distrs[1] < 0
stats[target]["closer_than_the_minimum"] = distrs[0] <= distrs[1].min
stats[target]["closer_than_2"] = distrs[0] <= distrs[1].get_sorted_index(2)
stats[target]["closer_than_4"] = distrs[0] <= distrs[1].get_sorted_index(4)
stats[target]["closer_than_8"] = distrs[0] <= distrs[1].get_sorted_index(8)
stats[target]["closer_than_16"] = distrs[0] <= distrs[1].get_sorted_index(16)
stats[target]["closer_than_32"] = distrs[0] <= distrs[1].get_sorted_index(32)
diff.plot()
l.logger().info(feature_space)
l.logger().info("Avg closer than 1: {}".format(sum([x['closer_than_the_minimum'] for y, x in stats.items()]) / len(stats.keys())))
l.logger().info("Avg closer than 2: {}".format(sum([x['closer_than_2'] for y, x in stats.items()]) / len(stats.keys())))
l.logger().info("Avg closer than 4: {}".format(sum([x['closer_than_4'] for y, x in stats.items()]) / len(stats.keys())))
l.logger().info("Avg closer than 8: {}".format(sum([x['closer_than_8'] for y, x in stats.items()]) / len(stats.keys())))
l.logger().info("Avg closer than 16: {}".format(sum([x['closer_than_16'] for y, x in stats.items()]) / len(stats.keys())))
l.logger().info("Avg closer than 32: {}".format(sum([x['closer_than_32'] for y, x in stats.items()]) / len(stats.keys())))
with open(workspace_path / "stats.json", 'w') as outf:
json.dump(stats, outf, indent = 2)
return
| 18,825 | 45.830846 | 189 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/experiments/srciror.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Evaluation script for mutec mutation program.
"""
import typing
import glob
import tempfile
import subprocess
import pathlib
import json
import os
import tqdm
import functools
import math
import multiprocessing
from absl import flags
from deeplearning.benchpress.corpuses import encoded
from deeplearning.benchpress.features import extractor
from deeplearning.benchpress.samplers import samples_database
from deeplearning.benchpress.preprocessors import opencl
from deeplearning.benchpress.util import plotter
from deeplearning.benchpress.util import logging as l
from deeplearning.benchpress.util import environment
from deeplearning.benchpress.experiments import workers
from deeplearning.benchpress.experiments import public
from deeplearning.benchpress.experiments import clsmith
FLAGS = flags.FLAGS
try:
SRCIROR_SRC = environment.SRCIROR_SRC
SRCIROR_IR = environment.SRCIROR_IR
SRCIROR_BASE = pathlib.Path(SRCIROR_SRC).resolve().parent
CLSMITH_INCLUDE = environment.CLSMITH_INCLUDE
except Exception:
pass
## Some hard limits in order to finish the experiments this year.
# max amount of mutants per input source.
PER_INPUT_HARD_LIMIT = 1000
SEARCH_DEPTH_HARD_LIMIT = 30
def generate_IR_mutants(src: str, incl: str, timeout_seconds: int = 45) -> typing.Set[typing.Tuple[pathlib.Path, str]]:
"""
Collect all mutants from src and return them
"""
if incl:
with open(SRCIROR_BASE / "incl.h", 'w') as f:
f.write(incl)
f.flush()
with open(SRCIROR_BASE / "test.c", 'w') as f:
try:
f.write(src)
f.flush()
except UnicodeDecodeError:
return []
except UnicodeEncodeError:
return []
# Construct and execute mutec command
srciror_cmd = (["timeout", "-s9", str(timeout_seconds), "bash", SRCIROR_IR]
+ opencl.GetClangArgs(use_shim = False, use_aux_headers = False, extra_args = ["-include{}".format(pathlib.Path(CLSMITH_INCLUDE) / "CLSmith.h")] if incl else [""])
+ ["-include/tmp/mutec_src_temp_header.h" if incl else ""])
process = subprocess.Popen(
srciror_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
try:
stdout, stderr = process.communicate()
except TimeoutError:
pass
os.remove(str(SRCIROR_BASE / "test.c"))
os.remove(str(SRCIROR_BASE / "incl.h"))
srciror_ir_paths = glob.glob(str(SRCIROR_BASE / "test-*.ll"))
mutants = set()
for path in srciror_ir_paths[:PER_INPUT_HARD_LIMIT]:
try:
mutants.add((opencl.HumanReadableBytecode(path), incl))
except ValueError:
continue
return mutants
def generate_src_mutants(src: str, incl: str, timeout_seconds: int = 45) -> typing.Set[typing.Tuple[str, str]]:
"""
Collect all mutants from src and return them
"""
if incl:
with open(SRCIROR_BASE / "incl.h", 'w') as f:
f.write(incl)
f.flush()
src = "#include \"incl.h\"\n" + src
with open(SRCIROR_BASE / "test.c", 'w') as f:
try:
f.write(src)
f.flush()
except UnicodeDecodeError:
return []
except UnicodeEncodeError:
return []
# Construct and execute mutec command
srciror_cmd = (["timeout", "-s9", str(timeout_seconds), "bash", SRCIROR_SRC]
+ opencl.GetClangArgs(use_shim = False, use_aux_headers = False, extra_args = ["-include{}".format(pathlib.Path(CLSMITH_INCLUDE) / "CLSmith.h")] if incl else [""])
+ ["-include/tmp/mutec_src_temp_header.h" if incl else ""])
process = subprocess.Popen(
srciror_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
try:
stdout, stderr = process.communicate()
except TimeoutError:
pass
os.remove(str(SRCIROR_BASE / "test.c"))
if incl:
os.remove(str(SRCIROR_BASE / "incl.h"))
srciror_src_paths = glob.glob(str(SRCIROR_BASE / "test.*.c"))
mutants = set([(open(x, 'r').read(), incl) for x in srciror_src_paths[:PER_INPUT_HARD_LIMIT]])
for m in srciror_src_paths:
os.remove(m)
return mutants
def beam_srciror(srcs : typing.List[typing.Tuple[str, str, float]],
target_features : typing.Dict[str, float],
feat_space : str,
beam_width : int,
srciror_cache : samples_database.SamplesDatabase,
src_mode : bool = True,
) -> typing.List[typing.Tuple[str, float]]:
"""
Run generational beam search over starting github kernels
to minimize distance from target features.
"""
better_score = True
total_beams, beam, closest = set(), [], []
gen_id = 0
if src_mode:
generate_mutants = lambda x, y: generate_src_mutants(x, y)
ext_func = functools.partial(
workers.ExtractAndCalculate,
target_features = target_features,
feature_space = feat_space,
)
db_func = workers.FeatureExtractor
else:
generate_mutants = lambda x, y: generate_IR_mutants(x, y)
ext_func = functools.partial(
workers.IRExtractAndCalculate,
target_features = target_features,
feature_space = feat_space,
)
db_func = workers.IRFeatureExtractor
while better_score:
cands = set()
## Generate mutants for current generation.
for src, incl, dist in tqdm.tqdm(srcs, total = len(srcs), desc = "SRCIROR_src candidates {}".format(gen_id), leave = False):
cands.update(generate_mutants(src, incl)) ### This should collect all mutants and return them, out of a single source.
## Extract their features and calculate distances.
pool = multiprocessing.Pool()
# total.update(cands)
try:
for cand in tqdm.tqdm(pool.imap_unordered(ext_func, cands), total = len(cands), desc = "Extract Features {}".format(gen_id), leave = False):
if cand:
beam.append(cand)
except Exception as e:
l.logger().error(e)
pool.terminate()
raise e
pool.close()
## Sort by distance in ascending order. If score is better, keep doing beam search
## srcs are included to the outputs, in order to keep them if the offsprings are worse.
closest = sorted(beam + srcs, key = lambda x: x[2])[:beam_width]
total_beams.update([(x, y) for x, y, _ in closest])
min_length = min(len(closest), len(srcs))
if sum([x for _, _, x in closest[:min_length]]) < sum([x for _, _, x in srcs[:min_length]]) and gen_id < SEARCH_DEPTH_HARD_LIMIT:
srcs = closest
beam = []
else:
better_score = False
gen_id += 1
## Store all mutants in database.
with srciror_cache.Session(commit = True) as s:
pool = multiprocessing.Pool()
try:
idx = srciror_cache.count
for dp in tqdm.tqdm(pool.imap_unordered(db_func, total_beams), total = len(total_beams), desc = "Add mutants to DB", leave = False):
if dp:
src, incl, feats = dp
sample = samples_database.Sample.FromArgsLite(idx, incl + src, feats)
exists = s.query(samples_database.Sample.sha256).filter_by(sha256 = sample.sha256).scalar() is not None
if not exists:
s.add(sample)
idx += 1
except Exception as e:
l.logger().error(e)
pool.terminate()
raise e
pool.close()
s.commit()
return closest
@public.evaluator
def SRCIRORVsBenchPress(**kwargs) -> None:
"""
Compare mutec mutation tool on github's database against BenchPress.
Comparison is similar to KAverageScore comparison.
"""
seed = kwargs.get('seed')
benchpress = kwargs.get('benchpress')
srciror_cache = kwargs.get('srciror_cache', '')
mutation_level = kwargs.get('mutation_level')
target = kwargs.get('targets')
feature_space = kwargs.get('feature_space')
top_k = kwargs.get('top_k')
beam_width = kwargs.get('beam_width')
unique_code = kwargs.get('unique_code', False)
plot_config = kwargs.get('plot_config')
workspace_path = kwargs.get('workspace_path') / "srciror_vs_benchpress" / feature_space
workspace_path.mkdir(exist_ok = True, parents = True)
if mutation_level == 'src':
if not pathlib.Path(SRCIROR_SRC).exists():
raise FileNotFoundError("SRCIROR_src executable not found: {}".format(SRCIROR_SRC))
else:
if not pathlib.Path(SRCIROR_IR).exists():
raise FileNotFoundError("SRCIROR_IR executable not found: {}".format(SRCIROR_IR))
if seed.db_type != encoded.EncodedContentFiles and seed.db_type != clsmith.CLSmithDatabase:
raise ValueError("Scores require EncodedContentFiles or CLSmithDatabase but received", seed.db_type)
if benchpress.db_type != samples_database.SamplesDatabase:
raise ValueError("BenchPress scores require SamplesDatabase but received", benchpress.db_type)
if seed.db_type == clsmith.CLSmithDatabase:
if not pathlib.Path(CLSMITH_INCLUDE).exists():
raise FileNotFoundError("CLSMITH_INCLUDE folder does not exist: {}".format(CLSMITH_INCLUDE))
## Load database and checkpoint of targets.
mutec_db = samples_database.SamplesDatabase(url = "sqlite:///{}".format(pathlib.Path(srciror_cache).resolve()), must_exist = False)
done = set()
with mutec_db.Session(commit = True) as s:
res = s.query(samples_database.SampleResults).filter_by(key = feature_space).first()
if res is not None:
done.update([str(x) for x in res.results.split('\n')])
s.commit()
## Initialize dictionary.
groups = {}
groups["SRCIROR_{}".format(mutation_level)] = ([], [])
groups[seed.group_name] = ([], [])
groups[benchpress.group_name] = ([], [])
## Fix fetching data functions.
if unique_code:
git_get_data = lambda x: seed.get_unique_data_features(x)
bp_get_data = lambda x: benchpress.get_unique_data_features(x)
else:
git_get_data = lambda x: seed.get_data_features(x)
bp_get_data = lambda x: benchpress.get_data_features(x)
## Run engine on mutec.
benchmarks = target.get_benchmarks(feature_space)
for benchmark in tqdm.tqdm(benchmarks, total = len(benchmarks), desc = "Benchmarks"):
## This has already been searched for.
if benchmark.name in done:
continue
## Tuple of closest src, distance from target benchmark.0
closest = workers.SortedSrcDistances(git_get_data(feature_space), benchmark.features, feature_space)
## IF CLsmith takes too long here, collect only features, then for the beam size go and fetch
## the code.
# Split source and distances lists.
git_dist = [x for _, _, x in closest]
## If distances are already minimized, nothing to do.
if sum(git_dist[:top_k]) == 0:
continue
l.logger().info(benchmark.name)
closest_mutec_src = beam_srciror([(src, inc, dist) for src, inc, dist in closest[:beam_width] if dist > 0], benchmark.features, feature_space, beam_width, mutec_db)[:top_k] # tuple of (src, distance)
closest_mutec_dist = [x for _, _, x in closest_mutec_src]
assert len(closest_mutec_dist) == len(git_dist[:top_k])
## If mutec has provided a better score
if sum(closest_mutec_dist) < sum(git_dist[:top_k]):
l.logger().info("Score reduced from {} to {}".format(sum(git_dist[:top_k]), sum(closest_mutec_dist)))
l.logger().info("Best score from {} to {}".format(git_dist[0], closest_mutec_dist[0]))
with mutec_db.Session(commit = True) as s:
res = s.query(samples_database.SampleResults).filter_by(key = feature_space).first()
if res is not None:
res.results = res.results + "\n" + benchmark.name
else:
s.add(samples_database.SampleResults(key = feature_space, results = benchmark.name))
s.commit()
# Compute target's distance from O(0,0)
target_origin_dist = math.sqrt(sum([x**2 for x in benchmark.features.values()]))
mutec_avg_dist = sum(closest_mutec_dist) / top_k
groups["SRCIROR_{}".format(mutation_level)][0].append(benchmark.name)
groups["SRCIROR_{}".format(mutation_level)][1].append(100 * ((target_origin_dist - mutec_avg_dist) / target_origin_dist))
# Compute target's distance from O(0,0)
git_avg_dist = sum(git_dist[:top_k]) / top_k
groups[seed.group_name][0].append(benchmark.name)
groups[seed.group_name][1].append(100 * ((target_origin_dist - git_avg_dist) / target_origin_dist))
## Run engine on benchpress.
benchmarks = target.get_benchmarks(feature_space)
for benchmark in tqdm.tqdm(benchmarks, total = len(benchmarks), desc = "Benchpress"):
## Run only for benchmarks mutec has improved.
if benchmark.name in groups["SRCIROR_{}".format(mutation_level)][0]:
l.logger().info(benchmark.name)
distances = workers.SortedDistances(bp_get_data(feature_space), benchmark.features, feature_space)
# Compute target's distance from O(0,0)
target_origin_dist = math.sqrt(sum([x**2 for x in benchmark.features.values()]))
avg_dist = sum(distances[:top_k]) / len(distances[:top_k])
groups[benchpress.group_name][0].append(benchmark.name)
groups[benchpress.group_name][1].append(100 * ((target_origin_dist - avg_dist) / target_origin_dist))
plotter.GrouppedBars(
groups = groups,
plot_name = "srciror_src_avg_{}_{}_{}".format(top_k, seed.group_name, feature_space.replace("Features", " Features")),
path = workspace_path,
**plot_config if plot_config else {},
)
return
| 13,962 | 36.942935 | 204 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/experiments/cldrive.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Evaluation script for kernel execution using cldrive or similar drivers.
"""
import datetime
import sqlite3
import tqdm
import pickle
import math
import pathlib
import typing
import pandas as pd
import sqlalchemy as sql
from sqlalchemy.ext import declarative
from deeplearning.benchpress.preprocessors import opencl
from deeplearning.benchpress.corpuses import encoded
from deeplearning.benchpress.samplers import samples_database
from deeplearning.benchpress.util import plotter
from deeplearning.benchpress.util import sqlutil
from deeplearning.benchpress.util import crypto
from deeplearning.benchpress.util import distributions
# from deeplearning.benchpress.util import cldrive_server
from deeplearning.benchpress.util import logging as l
from deeplearning.benchpress.experiments import workers
from deeplearning.benchpress.experiments import public
from absl import flags
Base = declarative.declarative_base()
FLAGS = flags.FLAGS
# flags.DEFINE_string(
# "remote_cldrive_cache",
# None,
# "Set reachable address of cldrive cache. If None, cldrive cache is considered to reside in the local machine."
# "If set, computation happens in local machine, caching in get is de-activated and computed samples are sent to remote."
# )
class Data(Base):
__tablename__ = "sampling_results"
"""
DB Table for concentrated validation results.
"""
key : str = sql.Column(sql.String(1024), primary_key=True)
results : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
class CLDriveSample(Base, sqlutil.ProtoBackedMixin):
"""
A database row representing a CLDrive execution trace.
"""
__tablename__ = "cldrive_samples"
# entry id
id : int = sql.Column(sql.Integer, primary_key = True)
# unique hash of cldrive execution.
sha256 : str = sql.Column(sql.String(64), nullable = False, index = True)
# Global size of execution
global_size : int = sql.Column(sql.Integer, nullable = False)
# Local size of execution
local_size : int = sql.Column(sql.Integer, nullable = False)
# Executed source code
source : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Code features, possibly directly derived from extractos.
features : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Name of dataset where this sample comes from.
dataset : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# cpu transfer time of kernel
cpu_transfer_time_ns : str = sql.Column(sql.Integer, nullable = False)
# cpu execution time of kernel
cpu_kernel_time_ns : str = sql.Column(sql.Integer, nullable = False)
# gpu transfer time of kernel
gpu_transfer_time_ns : str = sql.Column(sql.Integer, nullable = False)
# gpu execution time of kernel
gpu_kernel_time_ns : str = sql.Column(sql.Integer, nullable = False)
# amount of transferred bytes
transferred_bytes : int = sql.Column(sql.Integer, nullable = False)
# Whether cldrive executes correctly or not.
status : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Date
date_added : datetime.datetime = sql.Column(sql.DateTime, nullable=False)
@classmethod
def FromArgs(cls,
id : int,
global_size : int,
local_size : int,
source : str,
grewe_features : typing.Dict[str, float],
dataset : str,
cpu_transfer_time_ns : typing.List[int],
cpu_kernel_time_ns : typing.List[int],
gpu_transfer_time_ns : typing.List[int],
gpu_kernel_time_ns : typing.List[int],
transferred_bytes : int,
status : str,
) -> typing.Dict[str, typing.Any]:
return CLDriveSample(**{
"sha256" : crypto.sha256_str(source + dataset + str(global_size) + str(local_size)),
"global_size" : global_size,
"local_size" : local_size,
"source" : source,
"features" : "GreweFeatures:\n{}".format('\n'.join(["{}:{}".format(k,v) for k, v in grewe_features.items()])),
"dataset" : dataset,
"cpu_transfer_time_ns" : '\n'.join([str(int(x)) for x in cpu_transfer_time_ns if x != 'nan' and x != '']),
"cpu_kernel_time_ns" : '\n'.join([str(int(x)) for x in cpu_kernel_time_ns if x != 'nan' and x != '']),
"gpu_transfer_time_ns" : '\n'.join([str(int(x)) for x in gpu_transfer_time_ns if x != 'nan' and x != '']),
"gpu_kernel_time_ns" : '\n'.join([str(int(x)) for x in gpu_kernel_time_ns if x != 'nan' and x != '']),
"transferred_bytes" : transferred_bytes,
"status" : status,
"date_added" : datetime.datetime.utcnow(),
})
class CLDriveExecutions(sqlutil.Database):
"""A database of CLDrive Execution samples."""
@property
def count(self):
"""Number of cldrive traces in DB."""
with self.Session() as s:
count = s.query(CLDriveSample).count()
return count
@property
def status_cache(self):
"""Return list of tuples [hash, status]"""
if self._status_cache is None:
with self.Session() as s:
self._status_cache = {f.sha256: f.status for f in s.query(CLDriveSample).yield_per(1000)}
return self._status_cache
# @property
# def get_session(self):
# """
# Return the correct session for the cache.
# """
# if FLAGS.remote_cldrive_cache is None:
# return self.Session
# else:
# return self.remote_session
def __init__(self, url: str, must_exist: bool = False):
super(CLDriveExecutions, self).__init__(url, Base, must_exist = must_exist)
self._status_cache = None
# if FLAGS.remote_cldrive_cache is not None:
# self.remote_session = cldrive_server.RemoteSession(FLAGS.remote_cldrive_cache)
def add_entry(self,
src : str,
grewe_features : typing.Dict[str, float],
dataset : str,
status : str,
global_size : int,
local_size : int,
df : pd.DataFrame,
include : str = ""
) -> None:
"""
Adds execution entries from pandas dataframe.
"""
sha = crypto.sha256_str(include + src + dataset + str(global_size) + str(local_size))
try:
with self.Session(commit = True) as session:
entry = session.query(CLDriveSample).filter_by(sha256 = sha).first()
if entry is None:
if status in {"CPU", "GPU"}:
idx = 0
transferred_bytes = float('NaN')
while idx < len(df.transferred_bytes) and math.isnan(transferred_bytes):
try:
transferred_bytes = int(df.transferred_bytes[idx])
except ValueError:
idx += 1
try:
session.add(
CLDriveSample.FromArgs(
id = self.count,
global_size = global_size,
local_size = local_size,
source = include + src,
grewe_features = grewe_features,
dataset = dataset,
cpu_transfer_time_ns = list(df[df['device'].str.contains("CPU")].transfer_time_ns),
cpu_kernel_time_ns = list(df[df['device'].str.contains("CPU")].kernel_time_ns),
gpu_transfer_time_ns = list(df[df['device'].str.contains("GPU")].transfer_time_ns),
gpu_kernel_time_ns = list(df[df['device'].str.contains("GPU")].kernel_time_ns),
transferred_bytes = transferred_bytes,
status = status,
)
)
except ValueError as e:
l.logger().warn(e)
l.logger().warn("Skip adding entry,")
pass
else:
session.add(
CLDriveSample.FromArgs(
id = self.count,
global_size = global_size,
local_size = local_size,
source = include + src,
grewe_features = grewe_features,
dataset = dataset,
cpu_transfer_time_ns = [],
cpu_kernel_time_ns = [],
gpu_transfer_time_ns = [],
gpu_kernel_time_ns = [],
transferred_bytes = -1,
status = status,
)
)
if self._status_cache is not None:
assert sha not in self._status_cache, "{} should not be in DB".format(sha)
self._status_cache[sha] = status
elif status in {"CPU", "GPU"}:
entry.cpu_transfer_time_ns = (entry.cpu_transfer_time_ns + "\n" if entry.cpu_transfer_time_ns != '' else '') + '\n'.join([str(x) for x in df[df['device'].str.contains("CPU")].transfer_time_ns if x != 'nan' and x != ''])
entry.cpu_kernel_time_ns = (entry.cpu_kernel_time_ns + "\n" if entry.cpu_kernel_time_ns != '' else '') + '\n'.join([str(x) for x in df[df['device'].str.contains("CPU")].kernel_time_ns if x != 'nan' and x != ''])
entry.gpu_transfer_time_ns = (entry.gpu_transfer_time_ns + "\n" if entry.gpu_transfer_time_ns != '' else '') + '\n'.join([str(x) for x in df[df['device'].str.contains("GPU")].transfer_time_ns if x != 'nan' and x != ''])
entry.gpu_kernel_time_ns = (entry.gpu_kernel_time_ns + "\n" if entry.gpu_kernel_time_ns != '' else '') + '\n'.join([str(x) for x in df[df['device'].str.contains("GPU")].kernel_time_ns if x != 'nan' and x != ''])
session.commit()
except Exception as e:
raise e
return
def get_entry(self,
src : str,
dataset : str,
global_size : int,
local_size : int,
include : str = "",
) -> "CLDriveSample":
"""
Fetch row from DB, if exists.
"""
sha = crypto.sha256_str(include + src + dataset + str(global_size) + str(local_size))
try:
with self.Session() as session:
entry = session.query(CLDriveSample).filter_by(sha256 = sha).first()
if entry is not None:
return entry
else:
return None
except Exception as e:
l.logger().error(e)
return None
def update_and_get(self,
src : str,
grewe_features : typing.Dict[str, float],
dataset : str,
global_size : int,
local_size : int,
num_runs : int,
timeout : int = 0,
include : str = "",
extra_args : typing.List[str] = [],
) -> "CLDriveSample":
"""
Add or update incoming entry by running CLDrive and pinging the database.
"""
df, label = opencl.CLDriveDataFrame(
src,
header_file = include,
num_runs = num_runs,
gsize = global_size,
lsize = local_size,
extra_args = extra_args,
timeout = timeout,
)
self.add_entry(include + src, grewe_features, dataset, label, global_size, local_size, df)
return self.get_entry(src, dataset, global_size, local_size)
def get_valid_data(self, dataset: str = None) -> typing.List[CLDriveSample]:
"""
Return all valid entries, labelled either as CPU or GPU.
"""
with self.Session() as session:
if dataset:
return session.query(
CLDriveSample
).filter(
sql.and_(
CLDriveSample.dataset == dataset,
CLDriveSample.status.in_({"CPU", "GPU"})
)
).yield_per(1000)
else:
return session.query(
CLDriveSample
).filter(
CLDriveSample.status.in_({"CPU", "GPU"})
).yield_per(1000)
@classmethod
def reduce_execution_times(cls, exec_times: typing.List[typing.Any]) -> float:
"""
This is the centralized method that reduces a list of execution time distribution, to a single float.
Policy can be average, median or anything else, but it has to happen through this method only, for consistency.
"""
def is_float(x):
try:
float(x)
return True
except ValueError:
return False
def median_of_list(lst: typing.List) -> int:
"""
Return median of list
"""
mid = len(lst) // 2
if len(lst) == 0:
return None
elif len(lst) % 2 == 0:
return (lst[mid] + lst[mid+1]) / 2
else:
return lst[mid] / 2
if isinstance(exec_times, str):
return median_of_list([int(float(x)) for x in exec_times.split('\n') if x != 'nan' and is_float(x)])
elif isinstance(exec_times, pd.Row):
return exec_times.median()
else:
raise NotImplementedError("Unsupported type: {}".format(type(exec_times)))
def get_execution_times_ms(self, src: str, dataset: str, global_size: int, local_size: int) -> typing.Tuple[typing.List[int], typing.List[int], typing.List[int], typing.List[int]]:
"""
Search code by hash and return lists with all different execution times.
"""
sha = crypto.sha256_str(src + dataset + str(global_size) + str(local_size))
ctt, ckt, gtt, gkt = [], [], [], []
with self.Session() as session:
entry = session.query(CLDriveSample).filter_by(sha256 = sha).first()
if entry is None:
return None
else:
ctt = [int(x) // 1000 for x in entry.cpu_transfer_time_ns.split('\n')]
ckt = [int(x) // 1000 for x in entry.cpu_kernel_time_ns.split('\n')]
gtt = [int(x) // 1000 for x in entry.gpu_transfer_time_ns.split('\n')]
gkt = [int(x) // 1000 for x in entry.gpu_kernel_time_ns.split('\n')]
return ctt, ckt, gtt, gkt
def ComputeLabel(cpu_transfer : typing.List[int],
cpu_execute : typing.List[int],
gpu_transfer : typing.List[int],
gpu_execute : typing.List[int],
workspace : pathlib.Path,
) -> typing.Dict[str, float]:
"""
Collects execution metrics of kernels, computes statistical
distribution of execution times and returns optimal device
to execute with certainty metrics.
"""
cput_dist = distributions.GenericDistribution(cpu_transfer, workspace, "cpu_transfer_time")
cpue_dist = distributions.GenericDistribution(cpu_execute, workspace, "cpu_execution_time")
gput_dist = distributions.GenericDistribution(gpu_transfer, workspace, "gpu_transfer_time")
gpue_dist = distributions.GenericDistribution(gpu_execute, workspace, "gpu_execution_time")
## P[CPUt + CPUe] and P[GPUt + GPUe].
cpu_dist = cput_dist + cpue_dist
gpu_dist = gput_dist + gpue_dist
## P[CPU - GPU]
dist = cput_dist - gpu_dist
return {
"CPU": round(100 * (dist < 0), 2),
"GPU": round(100 * (dist > 0), 2),
}
@public.evaluator
def TopKCLDrive(**kwargs) -> None:
"""
Collect top-K samples per database group for each target benchmark.
"""
db_groups = kwargs.get('db_groups')
cldrive_cache = kwargs.get('cldrive_cache', '')
target = kwargs.get('targets')
feature_space = kwargs.get('feature_space')
top_k = kwargs.get('top_k')
unique_code = kwargs.get('unique_code', False)
plot_config = kwargs.get('plot_config')
workspace_path = kwargs.get('workspace_path') / "topk_cldrive"
workspace_path.mkdir(exist_ok = True, parents = True)
groups = {}
gsize, lsize = [2**10, 2**15, 2**20], [2**10] # 1024 is max local size for GTX1080.
cldrive_db = CLDriveExecutions(url = "sqlite:///{}".format(pathlib.Path(cldrive_cache).resolve()), must_exist = False)
# For each db group -> for each target -> k samples -> 1) benchmark.name 2) distance 3) label.
for dbg in db_groups:
l.logger().info("Running {} on cldrive".format(dbg.group_name))
if not (dbg.db_type == samples_database.SamplesDatabase or dbg.db_type == encoded.EncodedContentFiles):
raise ValueError("Scores require SamplesDatabase or EncodedContentFiles but received", dbg.db_type)
if unique_code:
get_data = lambda x: dbg.get_unique_data_features(x)
else:
get_data = lambda x: dbg.get_data_features(x)
## Unpack and collect benchmarks
benchmarks = target.get_benchmarks(feature_space)
for idx, benchmark in enumerate(tqdm.tqdm(benchmarks, total = len(benchmarks), desc = "Benchmarks")):
# if feature_space == "AutophaseFeatures" and (idx < 25 or benchmark.name == "particle_double.cl-1" or benchmark.name == "particle_double.cl-3" or benchmark.name == "particle_naive.cl" or benchmark.name == "particle_single.cl-1"):
# continue
closest_src = None
for gs in gsize:
for ls in lsize:
if ls > gs:
continue
## Run cldrive on benchmark.
benchmark_label = "TimeOut"
nruns = 50
bench_runs = nruns
try:
df, benchmark_label = opencl.CLDriveDataFrame(benchmark.contents, num_runs = bench_runs, gsize = gs, lsize = ls, timeout = 200)
except TimeoutError:
pass
cldrive_db.add_entry(benchmark.contents, {}, target.target, benchmark_label, gs, ls, df)
if benchmark_label not in {"CPU", "GPU"}:
continue
times = cldrive_db.get_execution_times_ms(benchmark.contents, target.target, gs, ls)
if times:
ctt, ckt, gtt, gkt = times
prob_labels = ComputeLabel(ctt, ckt, gtt, gkt, workspace_path)
else:
raise ValueError("Why can you not find a file you just inserted ?")
## Fix dictionary entry.
config = "g{}-l{}".format(gs, ls)
if config not in groups:
groups[config] = {}
if dbg.group_name not in groups[config]:
groups[config][dbg.group_name] = ([], [], [], [])
groups[config][dbg.group_name][0].append(
{
'benchmark_name' : benchmark.name,
'benchmark_label' : "CPU:{}/GPU:{}".format(prob_labels['CPU'], prob_labels['GPU']),
'benchmark_contents' : benchmark.contents
}
)
## Get unique contentfiles of database group.
if closest_src is None:
l.logger().info(benchmark.name)
closest_src = workers.SortedSrcDistances(get_data(feature_space), benchmark.features, feature_space)
l.logger().info("global size: {}, local size: {}".format(gs, ls))
l.logger().error("Benchmark label: {}, prob: {}".format(benchmark_label, "CPU:{}/GPU:{}".format(prob_labels['CPU'], prob_labels['GPU'])))
cand_idx = 0
for idx, (src, incl, dist) in enumerate(closest_src):
if cand_idx >= top_k:
break
label = "TimeOut"
c_runs = nruns
try:
df, label = opencl.CLDriveDataFrame(incl + src, num_runs = c_runs, gsize = gs, lsize = ls, timeout = 200)
except TimeoutError:
pass
cldrive_db.add_entry(incl + src, {}, dbg.group_name, label, gs, ls, df)
if label not in {"CPU", "GPU"}:
continue
times = cldrive_db.get_execution_times_ms(incl + src, dbg.group_name, gs, ls)
if times:
ctt, ckt, gtt, gkt = times
prob_labels = ComputeLabel(ctt, ckt, gtt, gkt, workspace_path)
else:
raise ValueError("Why can you not find a file you just inserted ?")
l.logger().error("Label: {}, probs: {}, distance: {}".format(label, "CPU:{}/GPU:{}".format(prob_labels['CPU'], prob_labels['GPU']), dist))
if len(groups[config][dbg.group_name][1]) - 1 < idx:
groups[config][dbg.group_name][1].append([dist])
groups[config][dbg.group_name][2].append(["CPU:{}/GPU:{}".format(prob_labels['CPU'], prob_labels['GPU'])])
groups[config][dbg.group_name][3].append([incl + src])
else:
groups[config][dbg.group_name][1][idx].append(dist)
groups[config][dbg.group_name][2][idx].append("CPU:{}/GPU:{}".format(prob_labels['CPU'], prob_labels['GPU']))
groups[config][dbg.group_name][3][idx].append(incl + src)
cand_idx += 1
# Some thoughts: Maybe a dedicated plot to show distribution of execution times, etc. ?
# In here you basically need the label.
# Compute target's distance from O(0,0)
# target_origin_dist = math.sqrt(sum([x**2 for x in benchmark.features.values()]))
# avg_dist = sum([x[1] for x in closest_src]) / top_k
# groups[config][dbg.group_name][1].append(100 * ((target_origin_dist - avg_dist) / target_origin_dist))
print(groups)
with open("./data_{}.pkl".format(feature_space), 'wb') as inf:
pickle.dump(groups, inf)
return
| 21,995 | 42.214145 | 236 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/experiments/benchmark_analysis.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Target benchmark analysis evaluator.
"""
import tqdm
import pickle
import json
import sklearn
from sklearn.decomposition import PCA
from deeplearning.benchpress.experiments import public
from deeplearning.benchpress.experiments import clsmith
from deeplearning.benchpress.experiments import workers
from deeplearning.benchpress.experiments import distance_score
from deeplearning.benchpress.experiments.turing import server
from deeplearning.benchpress.preprocessors import opencl
from deeplearning.benchpress.preprocessors import c
from deeplearning.benchpress.samplers import samples_database
from deeplearning.benchpress.corpuses import encoded
from deeplearning.benchpress.util import plotter
@public.evaluator
def AnalyzeTarget(**kwargs) -> None:
"""
Analyze requested target benchmark suites.
"""
targets = kwargs.get('targets')
tokenizer = kwargs.get('tokenizer')
workspace_path = kwargs.get('workspace_path') / "analyze_target"
workspace_path.mkdir(exist_ok = True, parents = True)
raise NotImplementedError
return
@public.evaluator
def TokenSizeDistribution(**kwargs) -> None:
"""
Plot token size distribution among multiple SamplesDatabases.
"""
db_groups = kwargs.get('db_groups')
plot_config = kwargs.get('plot_config')
workspace_path = kwargs.get('workspace_path') / "token_size_distr"
workspace_path.mkdir(exist_ok = True, parents = True)
names = []
token_lens = []
for dbg in db_groups:
if dbg.db_type != samples_database.SamplesDatabase:
raise ValueError("Token size distribution requires SamplesDatabase. Received {}".format(dbg.db_type))
lens = []
for db in dbg.databases:
lens += db.get_compilable_num_tokens
names.append(dbg.group_name)
token_lens.append(lens)
plotter.RelativeDistribution(
x = names,
y = token_lens,
plot_name = "{}_token_dist".format('-'.join(names)),
path = workspace_path,
x_name = "Token Length",
**plot_config if plot_config else {},
)
return
@public.evaluator
def LLVMInstCountDistribution(**kwargs) -> None:
"""
Plot LLVM Instruction count distribution among functions in SamplesDatabase dbs.
"""
db_groups = kwargs.get('db_groups')
plot_config = kwargs.get('plot_config')
workspace_path = kwargs.get('workspace_path') / "llvm_instcount_distr"
workspace_path.mkdir(exist_ok = True, parents = True)
names = []
token_lens = []
for dbg in db_groups:
if dbg.db_type != samples_database.SamplesDatabase:
raise ValueError("Token size distribution requires SamplesDatabase. Received {}".format(dbg.db_type))
lens = []
for db in dbg.databases:
lens += [x[1]["InstCountFeatures"]["TotalInsts"] for x in db.get_samples_features if "InstCountFeatures" in x[1]]
names.append(dbg.group_name)
token_lens.append(lens)
plotter.RelativeDistribution(
x = names,
y = token_lens,
plot_name = "{}_llvm_inst".format('-'.join(names)),
path = workspace_path,
x_name = "LLVM IR Instructions Length (-O1)"
**plot_config if plot_config else {},
)
return
@public.evaluator
def PCASamplesFeatures(**kwargs) -> None:
"""
Plot PCA-ed features of different SamplesDatabase samples.
"""
db_groups = kwargs.get('db_groups')
plot_config = kwargs.get('plot_config')
feature_space = kwargs.get('feature_space')
workspace_path = kwargs.get('workspace_path') / "pca_samples_feat" / feature_space
workspace_path.mkdir(exist_ok = True, parents = True)
indexed_data = {}
full_data = []
scaler = sklearn.preprocessing.StandardScaler()
i = 0
for dbg in db_groups:
if dbg.db_type != samples_database.SamplesDatabase:
raise ValueError("Token size distribution requires SamplesDatabase. Received {}".format(dbg.db_type))
ds = []
for db in dbg.databases:
ds += [x for _, x in db.get_samples_features if feature_space in x]
indexed_data[dbg.group_name] = {}
indexed_data[dbg.group_name]['start'] = i
for x in ds:
vals = list(x[feature_space].values())
if vals:
i += 1
full_data.append([float(y) for y in vals])
indexed_data[dbg.group_name]['end'] = i
# scaled = scaler.fit_transform(full_data)
reduced = PCA(2).fit_transform(full_data)
groups = {}
for dbg in db_groups:
groups[dbg.group_name] = {
"names" : [],
"data" : reduced[indexed_data[dbg.group_name]['start']: indexed_data[dbg.group_name]['end']],
}
plotter.GroupScatterPlot(
groups = groups,
title = "PCA-2 {}".format(feature_space.replace("Features", " Features")),
plot_name = "pca2_{}_{}".format(feature_space, '-'.join([str(x) for x in groups.keys()])),
path = workspace_path,
**plot_config if plot_config else {},
)
return
@public.evaluator
def FeaturesDistribution(**kwargs) -> None:
"""
Plot distribution of features per feature dimension per database group.
"""
db_groups = kwargs.get('db_groups')
target = kwargs.get('targets')
feature_space = kwargs.get('feature_space')
top_k = kwargs.get('top_k')
unique_code = kwargs.get('unique_code', False)
plot_config = kwargs.get('plot_config')
workspace_path = kwargs.get('workspace_path') / "features_distr" / feature_space
workspace_path.mkdir(exist_ok = True, parents = True)
data = {}
# You need this if you want to have the same (github) baseline but when github is not plotted.
reduced_git = None
for dbg in db_groups:
if dbg.group_name == "GitHub-768-inactive" or dbg.group_name == "GitHub-768":
reduced_git = dbg.get_data_features(feature_space)
break
radar_groups = {}
max_fvals = {}
benchmarks = target.get_benchmarks(feature_space, reduced_git_corpus = reduced_git)
for idx, dbg in enumerate(db_groups):
if dbg.group_name == "GitHub-768-inactive":
# Skip baseline DB group.
continue
if not (dbg.db_type == samples_database.SamplesDatabase or dbg.db_type == encoded.EncodedContentFiles or dbg.db_type == clsmith.CLSmithDatabase):
raise ValueError("Scores require SamplesDatabase or EncodedContentFiles but received", dbg.db_type)
data[dbg.group_name] = {}
for benchmark in tqdm.tqdm(benchmarks, total = len(benchmarks), desc = "Benchmarks"):
# Find shortest distances.
if unique_code:
get_data = lambda x: dbg.get_unique_data_features(x)
else:
get_data = lambda x: dbg.get_data_features(x)
if idx == 0:
if target.target not in data:
data[target.target] = {}
for k, v in benchmark.features.items():
if k not in data[target.target]:
data[target.target][k] = [v]
else:
data[target.target][k].append(v)
if k not in max_fvals:
max_fvals[k] = v
else:
max_fvals[k] = max(max_fvals[k], v)
if "{}_{}".format(benchmark.name, feature_space) not in radar_groups:
radar_groups["{}_{}".format(benchmark.name, feature_space)] = {}
if target.target not in radar_groups["{}_{}".format(benchmark.name, feature_space)]:
keys, vals = zip(*sorted(zip(list(benchmark.features.keys()), list(benchmark.features.values()))))
keys, vals = list(keys), list(vals)
radar_groups["{}_{}".format(benchmark.name, feature_space)][target.target] = [vals, keys]
ret = workers.SortedSrcFeatsDistances(get_data(feature_space), benchmark.features, feature_space)[:top_k]
for _, _, fvec, _ in ret:
for k, v in fvec.items():
if k not in data[dbg.group_name]:
data[dbg.group_name][k] = [v]
else:
data[dbg.group_name][k].append(v)
if k not in max_fvals:
max_fvals[k] = v
else:
max_fvals[k] = max(max_fvals[k], v)
keys, vals = zip(*sorted(zip(list(fvec.keys()), list(fvec.values()))))
keys, vals = list(keys), list(vals)
if dbg.group_name not in radar_groups["{}_{}".format(benchmark.name, feature_space)]:
radar_groups["{}_{}".format(benchmark.name, feature_space)][dbg.group_name] = [vals, keys]
plotter.GrouppedViolins(
data = data,
plot_name = "feat_distr_{}_dist_{}_{}".format(top_k, feature_space.replace("Features", " Features"), '-'.join([dbg.group_name for dbg in db_groups])),
path = workspace_path,
**plot_config if plot_config else {},
)
for benchmark, groups in radar_groups.items():
for k, (values, thetas) in groups.items():
for idx, (val, theta) in enumerate(zip(values, thetas)):
if max_fvals[theta] > 0:
radar_groups[benchmark][k][0][idx] = radar_groups[benchmark][k][0][idx] / max_fvals[theta]
else:
radar_groups[benchmark][k][0][idx] = 1.0
plotter.GrouppedRadar(
groups,
plot_name = "radar_{}_{}_{}".format(benchmark, feature_space, '-'.join([dbg.group_name for dbg in db_groups])),
path = workspace_path,
title = "{}".format(benchmark)
)
return
@public.evaluator
def HumanLikeness(**kwargs) -> None:
"""
Initialize a dashboard webpage that creates a Turing quiz for users.
Human or Robot ?
"""
workspace_path = kwargs.get('workspace_path') / "human_likely"
workspace_path.mkdir(exist_ok = True, parents = True)
db_groups = kwargs.get('db_groups')
blob_name = "human_like_{}".format('_'.join([kwargs.get('targets').target] + [dbg.group_name for dbg in db_groups]))
def preprocess(text):
r = opencl.ExtractOnlySingleKernels(
opencl.InvertKernelSpecifier(
opencl.StripDoubleUnderscorePrefixes(
opencl.ClangPreprocessWithShim(
c.StripIncludes(text)))))
if len(r) > 0:
return opencl.ClangFormat(opencl.SequentialNormalizeIdentifiers(r[0]))
else:
return None
if not (workspace_path / "{}.json".format(blob_name)).exists():
data = {}
for feat_space in {"GreweFeatures", "AutophaseFeatures", "InstCountFeatures"}:
kwargs["feature_space"] = feat_space
groups = distance_score.MinScore(**kwargs)
for db_name, code in groups.items():
if db_name not in data:
data[db_name] = {
"label": "human" if db_name=="GitHub" else "robot",
"code" : set([preprocess(s) for b in code[2] for s in b]),
}
else:
data[db_name]["code"].update([preprocess(s) for b in code[2] for s in b])
for db_name in data.keys():
data[db_name]["code"] = list([x for x in data[db_name]["code"] if x])
with open(workspace_path / "{}.pkl".format(blob_name), 'wb') as outf:
pickle.dump(data, outf)
with open(workspace_path / "{}.json".format(blob_name), 'w') as outf:
json.dump(data, outf, indent = 2)
server.serve(databases = json.load(open(workspace_path / "{}.json".format(blob_name), 'r')), workspace_path = workspace_path, http_port=40822)
return
| 11,531 | 36.320388 | 154 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/experiments/turing/db.py | # coding=utf-8
# Copyright 2023 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Evaluation script for kernel execution using cldrive or similar drivers.
"""
import pathlib
import datetime
import sqlite3
import typing
import json
import sqlalchemy as sql
from sqlalchemy.ext import declarative
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.ext.mutable import MutableDict
from deeplearning.benchpress.util import sqlutil
from deeplearning.benchpress.util import crypto
from deeplearning.benchpress.util import logging as l
from absl import flags
from absl import app as absl_app
Base = declarative.declarative_base()
FLAGS = flags.FLAGS
flags.DEFINE_string(
"out_results_db",
None,
"Set path to out results DB."
)
flags.DEFINE_string(
"in_results_db",
None,
"Set comma-separated paths for input DBs to be merged."
)
class QuizResult(Base, sqlutil.ProtoBackedMixin):
"""
A database row representing a single quiz result.
"""
__tablename__ = "quiz_result"
# entry id
id : int = sql.Column(sql.Integer, primary_key = True)
# dataset name
dataset : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# code
code : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Human or Robot
label : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Prediction from user.
prediction : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# User ID that made prediction.
user_id : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Ip of the user.
user_ip : str = sql.Column(sql.Integer, nullable = False)
# User was software engineer ?
engineer : bool = sql.Column(sql.Boolean, unique = False, nullable = False)
# Date the quiz was performed.
date_added : datetime.datetime = sql.Column(sql.DateTime, nullable=False)
@classmethod
def FromArgs(cls,
dataset : int,
code : int,
label : str,
prediction : typing.Dict[str, float],
user_id : str,
user_ip : typing.List[int],
engineer : bool,
) -> "QuizResult":
return QuizResult(**{
"dataset" : dataset,
"code" : code,
"label" : label,
"prediction" : prediction,
"user_id" : user_id,
"user_ip" : user_ip,
"engineer" : engineer,
"date_added" : datetime.datetime.utcnow(),
})
class UserSession(Base, sqlutil.ProtoBackedMixin):
"""
A database with statistics, indexed by the unique User ID.
"""
__tablename__ = "user_session"
# entry id
id : int = sql.Column(sql.Integer, primary_key = True)
# unique hash of cldrive execution.
user_id : str = sql.Column(sql.String(64), nullable = False, index = True)
# Ips of one user.
user_ip : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Software engineer or not ?
engineer : bool = sql.Column(sql.Boolean, unique = False, nullable = False)
# Save the schedule for that user
schedule : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Frequency distribution of encountered datasets
dataset_distr : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Frequency distribution of oracle labels
label_distr : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Predicted labels distribution per dataset
prediction_distr : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Total predictions made
num_predictions : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Accumulated session for this user.
session : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Date the quiz was performed.
date_added : datetime.datetime = sql.Column(sql.DateTime, nullable=False)
@classmethod
def FromArgs(cls,
user_id : str,
engineer : bool,
schedule : typing.List[str],
user_ip : typing.List[str] = [],
dataset_distr : typing.Dict[str, int] = {},
label_distr : typing.Dict[str, int] = {"human": 0, "robot": 0},
prediction_distr : typing.Dict[str, typing.Dict[str, typing.Any]] = {},
num_predictions : typing.Dict[str, int] = {},
session : typing.List[typing.Dict[str, typing.Any]] = [],
) -> 'UserSession':
l.logger().critical(prediction_distr)
return UserSession(**{
"user_id" : user_id,
"user_ip" : json.dumps(user_ip, indent = 2),
"engineer" : engineer,
"schedule" : json.dumps(schedule, indent = 2),
"dataset_distr" : json.dumps(dataset_distr, indent = 2),
"label_distr" : json.dumps(label_distr, indent = 2),
"prediction_distr" : json.dumps(prediction_distr, indent = 2),
"session" : json.dumps(session, indent = 2),
"num_predictions" : json.dumps(num_predictions, indent = 2),
"date_added" : datetime.datetime.utcnow(),
})
class TuringSession(Base, sqlutil.ProtoBackedMixin):
"""
A database with high level statistics of all sessions.
"""
__tablename__ = "turing_session"
# entry id
id : int = sql.Column(sql.Integer, primary_key = True)
# Total number of participants by unique ids.
num_user_ids : int = sql.Column(sql.Integer, nullable = False)
# A list of all user IDs.
user_ids : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Ips of one user.
num_user_ips : int = sql.Column(sql.Integer, nullable = False)
# A list of all user IPs.
user_ips : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Engineers distribution
engineer_distr : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Total predictions made per engineer and non engineer
num_predictions : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Predictions distribution per engineer and non engineer per dataset with accuracies.
prediction_distr : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Date of assigned session.
date_added : datetime.datetime = sql.Column(sql.DateTime, nullable=False)
@classmethod
def FromArgs(cls,
num_user_ids : int = 0,
user_ids : typing.List[str] = [],
num_user_ips : int = 0,
user_ips : typing.List[str] = [],
engineer_distr : typing.Dict[str, int] = {"engineer": 0, "non-engineer": 0},
num_predictions : typing.Dict[str, int] = {"engineer": {}, "non-engineer": {}},
prediction_distr : typing.Dict[str, typing.Dict[str, typing.Any]] = {"engineer": {}, "non-engineer": {}},
) -> "TuringSession":
return TuringSession(**{
"num_user_ids" : num_user_ids,
"user_ids" : json.dumps(user_ids, indent = 2),
"num_user_ips" : num_user_ips,
"user_ips" : json.dumps(user_ips, indent = 2),
"engineer_distr" : json.dumps(engineer_distr, indent = 2),
"num_predictions" : json.dumps(num_predictions, indent = 2),
"prediction_distr" : json.dumps(prediction_distr, indent = 2),
"date_added" : datetime.datetime.utcnow(),
})
class TuringDB(sqlutil.Database):
"""A database of CLDrive Execution samples."""
@property
def count_users(self):
"""Number of cldrive traces in DB."""
with self.Session() as s:
count = s.query(UserSession).count()
return count
@property
def count_quiz(self):
"""Number of cldrive traces in DB."""
with self.Session() as s:
count = s.query(QuizResult).count()
return count
def __init__(self, url: str, must_exist: bool = False):
super(TuringDB, self).__init__(url, Base, must_exist = must_exist)
self._status_cache = None
def get_quizzes(self) -> typing.List[QuizResult]:
"""
Return a list of all quizzes.
"""
with self.Session() as s:
return s.query(QuizResult).all()
def get_users(self) -> typing.List[UserSession]:
"""
Return a list of all user sessions.
"""
with self.Session() as s:
return s.query(UserSession).all()
def get_session(self) -> TuringSession:
"""
Return DB's session.
"""
with self.Session() as s:
return s.query(TuringSession).first()
def get_user_accuracy(self, user_id: str, min_attempts: int) -> float:
"""
Get accuracy of player, so long they've done at least min attempts.
"""
with self.Session() as s:
user = s.query(UserSession).filter_by(user_id = user_id).first()
correct, total = 0, 0
for dataset, data in json.loads(user.prediction_distr).items():
for label, amount in data["predictions"].items():
total += amount
if label == data["label"]:
correct += amount
if total >= min_attempts:
return correct / total, total
else:
return None, total
def get_prediction_distr(self) -> typing.Dict[str, typing.Any]:
"""
Return turing_session.prediction_distr
"""
with self.Session() as s:
return json.loads(s.query(TuringSession.prediction_distr).all()[0][0])
def get_user_prediction_distr(self) -> typing.Dict[str, typing.List[typing.Dict[str, typing.Any]]]:
"""
Group users to eng/non-eng, each category has a list of prediction_distr one per user.
"""
with self.Session() as s:
return {
"engineer": [json.loads(x[0]) for x in s.query(UserSession.prediction_distr).filter_by(engineer = 1).all()],
"non-engineer": [json.loads(x[0]) for x in s.query(UserSession.prediction_distr).filter_by(engineer = 0).all()],
}
def is_engineer(self, user_id: str) -> bool:
"""
Return bool value of engineer status.
"""
with self.Session() as s:
user = s.query(UserSession).filter_by(user_id = user_id).first()
if user:
return user.engineer
else:
return None
def get_schedule(self, user_id: str) -> typing.List[str]:
"""
Return assigned schedule.
"""
with self.Session() as s:
user = s.query(UserSession).filter_by(user_id = user_id).first()
if user:
return json.loads(user.schedule)
else:
return None
def init_session(self) -> None:
"""
TuringSession table must have only one entry.
If no entries exist, initialize one.
"""
with self.Session(commit = True) as s:
exists = s.query(TuringSession).scalar() is not None
if not exists:
s.add(TuringSession.FromArgs())
def update_session(self, **kwargs) -> None:
"""
Update session table with any new kwargs
"""
with self.Session(commit = True) as s:
session = s.query(TuringSession).first()
if session is None:
self.init_session()
for key, value in kwargs.items():
if key == "user_ids":
usr_ids = json.loads(session.user_ids)
if value not in usr_ids:
session.user_ids = json.dumps(usr_ids + [value], indent = 2)
session.num_user_ids += 1
elif key == "user_ips":
usr_ips = json.loads(session.user_ips)
if value not in usr_ips:
session.user_ips = json.dumps(usr_ips + [value], indent = 2)
session.num_user_ips += 1
elif key == "engineer_distr":
eng_dist = json.loads(session.engineer_distr)
if value:
eng_dist["engineer"] += 1
else:
eng_dist["non-engineer"] += 1
session.engineer_distr = json.dumps(eng_dist, indent = 2)
elif key == "num_predictions":
pred_dist = json.loads(session.num_predictions)
engineer = "engineer" if kwargs.get("engineer") else "non-engineer"
dname, freq = value
if dname not in pred_dist[engineer]:
pred_dist[engineer][dname] = freq
else:
pred_dist[engineer][dname] += freq
session.num_predictions = json.dumps(pred_dist, indent = 2)
elif key == "prediction_distr":
cur_distr = json.loads(session.prediction_distr)
for eng, attrs in value.items():
engineer = "engineer" if eng else "non-engineer"
for dname, attrs2 in attrs.items():
if dname not in cur_distr[engineer]:
cur_distr[engineer][dname] = {
"label": attrs2["label"],
"predictions": {
"human": 0,
"robot": 0,
}
}
cur_distr[engineer][dname]["predictions"][attrs2["predictions"]] += 1
session.prediction_distr = json.dumps(cur_distr, indent = 2)
return
def update_user(self, user_id: str, **kwargs) -> None:
"""
Add or update existing user.
"""
with self.Session(commit = True) as s:
user = s.query(UserSession).filter_by(user_id = user_id).first()
if user is None:
s.add(UserSession.FromArgs(
user_id = user_id,
**kwargs
)
)
session = s.query(TuringSession).first()
is_engineer = kwargs.get("engineer")
cur_eng_dist = json.loads(session.engineer_distr)
if is_engineer:
cur_eng_dist["engineer"] += 1
else:
cur_eng_dist["non-engineer"] += 1
session.engineer_dist = json.dumps(cur_eng_dist, indent = 2)
else:
for key, value in kwargs.items():
if key == "user_ip":
usr_ip = json.loads(user.user_ip)
if value not in usr_ip:
user.user_ip = json.dumps(usr_ip + [value], indent = 2)
elif key == "engineer":
l.logger().warn("Engineer has already been set to {}. I am not updating that.".format(user.engineer))
elif key == "schedule":
user.schedule = json.dumps(value, indent = 2)
elif key == "dataset_distr":
cur_distr = json.loads(user.dataset_distr)
for k, v in value.items():
if k not in cur_distr:
cur_distr[k] = v
else:
cur_distr[k] += v
user.dataset_distr = json.dumps(cur_distr, indent = 2)
elif key == "label_distr":
cur_distr = json.loads(user.label_distr)
for k, v in value.items():
cur_distr[k] += v
user.label_distr = json.dumps(cur_distr, indent = 2)
elif key == "prediction_distr":
cur_distr = json.loads(user.prediction_distr)
for dname, attrs in value.items():
if dname not in cur_distr:
cur_distr[dname] = {
"label": attrs["label"],
"predictions": {
"human": 0,
"robot": 0,
}
}
for k, v in attrs["predictions"].items():
cur_distr[dname]["predictions"][k] += v
user.prediction_distr = json.dumps(cur_distr, indent = 2)
elif key == "num_predictions":
cur_num_preds = json.loads(user.num_predictions)
for k, v in value.items():
if k not in cur_num_preds:
cur_num_preds[k] = v
else:
cur_num_preds[k] += v
user.num_predictions = json.dumps(cur_num_preds, indent = 2)
elif key == "session":
user.session = json.dumps(json.loads(user.session) + value, indent = 2)
return
def add_quiz(self,
dataset : int,
code : int,
label : str,
prediction : typing.Dict[str, float],
user_id : str,
user_ip : typing.List[int],
engineer : bool,
schedule : typing.List[str],
) -> int:
"""
Add new quiz instance to DB
"""
with self.Session(commit = True) as s:
s.add(QuizResult.FromArgs(
dataset = dataset,
code = code,
label = label,
prediction = prediction,
user_id = user_id,
user_ip = user_ip,
engineer = engineer,
)
)
self.update_user(
user_id = user_id,
dataset_distr = {dataset: 1},
label_distr = {label: 1, "human" if label == "robot" else "robot" : 0},
engineer = engineer,
schedule = schedule,
prediction_distr = {
dataset: {
"label": label,
"predictions": {prediction: 1, "human" if prediction == "robot" else "robot" : 0},
}
},
num_predictions = {dataset: 1},
session = [{
"dataset" : dataset,
"code" : code,
"label" : label,
"prediction" : prediction,
}]
)
self.update_session(
engineer = engineer,
num_predictions = [dataset, 1],
prediction_distr = {
engineer: {
dataset: {
"label": label,
"predictions": prediction
}
}
}
)
return 0
def merge_quiz(in_dbs: typing.List[TuringDB], out_db: TuringDB) -> None:
data = []
for db in in_dbs:
data += db.get_quizzes()
with out_db.Session(commit = True) as s:
for dp in data:
s.add(
QuizResult(
**{
"dataset" : dp.dataset,
"code" : dp.code,
"label" : dp.label,
"prediction" : dp.prediction,
"user_id" : dp.user_id,
"user_ip" : dp.user_ip,
"engineer" : dp.engineer,
"date_added" : dp.date_added,
}
)
)
return
def merge_user(in_dbs: typing.List[TuringDB], out_db: TuringDB) -> None:
data = []
for db in in_dbs:
data += db.get_users()
with out_db.Session(commit = True) as s:
for dp in data:
s.add(
UserSession(
**{
"user_id" : dp.user_id,
"user_ip" : dp.user_ip,
"engineer" : dp.engineer,
"schedule" : dp.schedule,
"dataset_distr" : dp.dataset_distr,
"label_distr" : dp.label_distr,
"prediction_distr" : dp.prediction_distr,
"session" : dp.session,
"num_predictions" : dp.num_predictions,
"date_added" : dp.date_added,
}
)
)
return
id : int = sql.Column(sql.Integer, primary_key = True)
# Total number of participants by unique ids.
num_user_ids : int = sql.Column(sql.Integer, nullable = False)
# A list of all user IDs.
user_ids : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Ips of one user.
num_user_ips : int = sql.Column(sql.Integer, nullable = False)
# A list of all user IPs.
user_ips : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Engineers distribution
engineer_distr : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Total predictions made per engineer and non engineer
num_predictions : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Predictions distribution per engineer and non engineer per dataset with accuracies.
prediction_distr : str = sql.Column(sqlutil.ColumnTypes.UnboundedUnicodeText(), nullable = False)
# Date of assigned session.
date_added : datetime.datetime = sql.Column(sql.DateTime, nullable=False)
def merge_session(in_dbs: typing.List[TuringDB], out_db: TuringDB) -> None:
data = None
for db in in_dbs:
new_s = db.get_session()
if data is None:
data = new_s
else:
data.num_user_ids = data.num_user_ids + new_s.num_user_ids
data.user_ids = json.dumps(json.loads(data.user_ids) + json.loads(new_s.user_ids))
data.num_user_ips = data.num_user_ips + new_s.num_user_ips
data.user_ips = json.dumps(json.loads(data.user_ips) + json.loads(new_s.user_ips))
## engineer_distr
e1, e2 = json.loads(data.engineer_distr), json.loads(new_s.engineer_distr)
e1['engineer'] += e2['engineer']
e1['non-engineer'] += e2['non-engineer']
data.engineer_distr = json.dumps(e1)
## num_predictions.
e1, e2 = json.loads(data.num_predictions), json.loads(new_s.num_predictions)
x1, x2 = json.loads(data.prediction_distr), json.loads(new_s.prediction_distr)
out = {}
out2 = {}
keys = {"GitHub", "BenchPress_directed", "CLgen", "CLSmith", "BenchPress"}
for l in {"engineer", "non-engineer"}:
out[l] = {}
out2[l] = {}
for k in keys:
out[l][k] = 0
out2[l][k] = {
"label": "human" if k == "GitHub" else "robot",
"predictions": {
"human": 0,
"robot": 0,
}
}
if k in e1[l]:
out[l][k] += e1[l][k]
out2[l][k]["predictions"]["human"] += x1[l][k]["predictions"]["human"]
out2[l][k]["predictions"]["robot"] += x1[l][k]["predictions"]["robot"]
if k in e2[l]:
out[l][k] += e2[l][k]
out2[l][k]["predictions"]["human"] += x2[l][k]["predictions"]["human"]
out2[l][k]["predictions"]["robot"] += x2[l][k]["predictions"]["robot"]
data.num_predictions = json.dumps(out)
data.prediction_distr = json.dumps(out2)
with out_db.Session(commit = True) as s:
s.add(
TuringSession(
**{
"num_user_ids" : data.num_user_ids,
"user_ids" : data.user_ids,
"num_user_ips" : data.num_user_ips,
"user_ips" : data.user_ips,
"engineer_distr" : data.engineer_distr,
"num_predictions" : data.num_predictions,
"prediction_distr" : data.prediction_distr,
"date_added" : data.date_added,
}
)
)
return
def merge_results(in_dbs: typing.List[TuringDB], out_db: TuringDB):
merge_quiz(in_dbs, out_db)
merge_user(in_dbs, out_db)
merge_session(in_dbs, out_db)
return
def main(*args, **kwargs) -> None:
if FLAGS.out_results_db is None:
raise ValueError("Please set out results db path")
if FLAGS.in_results_db is None:
raise ValueError("Please set path for input DBs")
out_db = TuringDB(url = "sqlite:///{}".format(pathlib.Path(FLAGS.out_results_db).resolve()))
in_dbs = [TuringDB(url = "sqlite:///{}".format(pathlib.Path(p).resolve()), must_exist = True) for p in FLAGS.in_results_db.split(',')]
merge_results(in_dbs, out_db)
if __name__ == "__main__":
absl_app.run(main)
| 23,786 | 36.937799 | 136 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/experiments/turing/server.py | # coding=utf-8
# Copyright 2023 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import portpicker
import pathlib
import waitress
import subprocess
import typing
import flask
import uuid
import json
import sys
import numpy as np
from absl import app as absl_app
from deeplearning.benchpress.experiments.turing import db
from deeplearning.benchpress.util import logging as l
app = flask.Flask(__name__)
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = -1
class FlaskHandler(object):
def __init__(self):
self.databases = None
self.workspace = None
self.session_db = None
self.schedule = None
self.user_cache = None
return
def set_params(self, databases: typing.Dict[str, typing.Tuple[str, typing.List[str]]], workspace: pathlib.Path) -> None:
self.databases = databases
self.workspace = workspace
self.session_db = db.TuringDB(url = "sqlite:///{}".format(workspace / "turing_results.db"))
self.session_db.init_session()
self.user_cache = {}
return
def get_cookie(self, key: str) -> typing.Any:
resp = flask.request.cookies.get(key)
if resp is None:
return resp
elif key == "schedule":
return resp.split(',')
elif key == "engineer":
return bool(resp)
elif key in {"user_id", "user_ip"}:
return str(resp)
elif key == "quiz_cache":
try:
return json.loads(resp)
except Exception as e:
l.logger().error(resp)
raise e
else:
raise ValueError(key)
def set_cookie(self, resp, **kwargs) -> None:
extra_args = {}
for key, val in kwargs.items():
if key == "schedule":
pr_val = ','.join(val)
elif key in {"user_id", "user_ip", "engineer"}:
pr_val = str(val)
elif key == "quiz_cache":
expires = kwargs.get("expires")
if expires is not None:
extra_args["expires"] = expires
pr_val = json.dumps(val)
else:
continue
resp.set_cookie(key, pr_val, **extra_args)
return
handler = FlaskHandler()
@app.route('/submit_quiz', methods = ["POST"])
def submit_quiz():
"""
Capture quiz submission and redirect.
"""
## Save entry to databases right here.
l.logger().error("Submit quiz.")
prediction = "human" if "human" in flask.request.form else "robot"
user_id = handler.get_cookie("user_id")
if user_id is None or user_id not in handler.user_cache:
return flask.redirect(flask.url_for('index'))
user_ip = handler.user_cache[user_id].get("user_ip", None)
engineer = handler.user_cache[user_id].get("engineer", None)
schedule = handler.user_cache[user_id].get("schedule", None)
quiz_cache = handler.user_cache[user_id].get("quiz_cache", None)
try:
handler.session_db.add_quiz(
dataset = quiz_cache["dataset"],
code = quiz_cache["code"],
label = quiz_cache["label"],
prediction = prediction,
user_id = user_id,
user_ip = user_ip,
engineer = engineer,
schedule = schedule,
)
except TypeError as e:
print(e)
raise e
l.logger().error("Caught exception.")
return flask.redirect(flask.url_for('quiz'))
## Clear cache for current quiz.
del handler.user_cache[user_id]["quiz_cache"]
return flask.redirect(flask.url_for('quiz'))
@app.route('/submit_quiz', methods = ["GET", "PUT"])
def submit_quiz_override():
l.logger().error("quiz override.")
return flask.redirect(flask.url_for('quiz'))
@app.route('/quiz')
def quiz():
"""
Give a quiz.
Cookies:
gets:
schedule
user_id
user_ip
engineer
sets:
cached_session (All data for a single quiz result.)
"""
l.logger().info("quiz")
## Read cache. IF quiz exists in cache, force user to answer this.
user_id = handler.get_cookie("user_id")
l.logger().warn(handler.user_cache)
set_user_id = False
if user_id is None or user_id not in handler.user_cache:
return flask.redirect(flask.url_for('index'))
quiz_cache = handler.user_cache[user_id].get("quiz_cache", None)
if quiz_cache is not None:
l.logger().error("Cached quiz.")
resp = flask.make_response(
flask.render_template(
"quiz.html",
data = quiz_cache["code"]
)
)
else:
l.logger().error("New quiz.")
## Avoid new users going directly to quiz URL.
## Get schedule from cookies.
schedule = handler.user_cache[user_id].get("schedule", None)
## Introduce a little bit of randomness to dataset selection.
dropout = np.random.RandomState().random()
dataset = schedule[np.random.RandomState().randint(0, len(schedule) - 1)]
dropout = True
# if dropout <= 0.3:
# ## Pick a random dataset instead.
# dropout = True
# dataset = schedule[np.random.RandomState().randint(0, len(schedule) - 1)]
# else:
# ## Pop database.
# dropout = False
# dataset = schedule.pop(0)
label, data = handler.databases[dataset]["label"], handler.databases[dataset]["code"]
## Sample datapoint.
code = data[np.random.RandomState().randint(0, len(data) - 1)]
if not dropout:
## RR-add to the end.
schedule.append(dataset)
## Update cookies.
resp = flask.make_response(
flask.render_template(
"quiz.html",
data = code
)
)
handler.user_cache[user_id]["schedule"] = schedule
handler.user_cache[user_id]["quiz_cache"] = {"dataset": dataset, "code": code, "label": label}
if set_user_id:
handler.set_cookie(resp, user_id = user_id)
return resp
@app.route('/submit_engineer', methods = ["POST"])
def submit_engineer():
"""
Read input from engineer survey question.
Cookies:
gets:
user_id
user_ip
schedule
sets:
engineer
"""
l.logger().critical("submit engineer")
user_id = handler.get_cookie("user_id")
if user_id is None:
user_id = str(uuid.uuid4())
handler.user_cache[user_id] = {}
handler.set_cookie(resp, user_id = user_id)
engineer = handler.user_cache[user_id].get("engineer", None)
if engineer is not None:
l.logger().critical("skip engineer")
return flask.redirect(flask.url_for('index'))
user_ip = handler.user_cache[user_id].get("user_ip", None)
schedule = handler.user_cache[user_id].get("schedule", None)
engineer = "yes" in flask.request.form
## TODO: Save the engineer information associated with user id.
handler.session_db.update_user(
user_id = user_id,
user_ip = user_ip,
schedule = schedule,
engineer = engineer,
)
handler.session_db.update_session(
user_ids = str(user_id),
user_ips = user_ip,
engineer_distr = engineer,
)
handler.user_cache[user_id]["engineer"] = engineer
return flask.redirect(flask.url_for('quiz'))
@app.route('/submit_engineer', methods = ["GET", "PUT"])
def submit_engineer_override():
l.logger().critical("submit engineer override")
return flask.redirect(flask.url_for('index'))
@app.route('/start')
def start():
"""
Ask if person knows software. Drops here if engineer not in cookies.
Cookies:
gets:
user_id
user_ip
schedule
engineer (for the purpose of redirecting to index. Avoid re-answering.)
sets:
schedule
"""
## Create a round robin schedule of held databases.
l.logger().info("Start")
user_id = handler.get_cookie("user_id")
if user_id is None or user_id not in handler.user_cache:
return flask.redirect(flask.url_for('index'))
engineer = handler.user_cache[user_id].get("engineer", None)
l.logger().error(engineer)
l.logger().critical(handler.user_cache)
if engineer is not None:
return flask.redirect(flask.url_for('index'))
user_ip = handler.user_cache[user_id].get("user_ip", None)
schedule = handler.user_cache[user_id].get("schedule", None)
print("Cookie schedule: ", schedule)
if schedule is None:
schedule = list(handler.databases.keys())
if "GitHub" in schedule:
new_git = (2*len(schedule) - 5) / 3 ## Github must have a 40% chance.
if (new_git + 1 ) / (len(schedule) + new_git) < 0.35:
new_git += 1
schedule += ["GitHub"] * int(round(new_git))
np.random.RandomState().shuffle(schedule)
handler.user_cache[user_id]["schedule"] = schedule
l.logger().info("User schedule: {}".format(schedule))
handler.session_db.update_session(
user_ips = user_ip,
)
return flask.make_response(flask.render_template("start.html"))
@app.route('/score')
def score():
"""
Check player's current accuracy.
"""
user_id = handler.get_cookie("user_id")
if user_id is None or user_id not in handler.user_cache:
return flask.redirect(flask.url_for('index'))
last_total = handler.user_cache[user_id].get("last_total", 0)
accuracy, total = handler.session_db.get_user_accuracy(user_id = user_id, min_attempts = 10)
if accuracy is None or (0 < total - last_total < 10):
return flask.make_response(flask.render_template("score_null.html"))
else:
handler.user_cache[user_id]["last_total"] = total
return flask.make_response(flask.render_template("score.html", data = "{}%".format(int(100 * accuracy))))
@app.route('/submit', methods = ["POST"])
def submit():
"""
START submit button in homepage.
Cookies:
gets:
engineer
"""
l.logger().info("Submit")
user_id = handler.get_cookie("user_id")
if user_id is None or user_id not in handler.user_cache:
l.logger().critical(user_id)
l.logger().critical(handler.user_cache)
return flask.redirect(flask.url_for('index'))
if "start" in flask.request.form:
engineer = handler.user_cache[user_id].get("engineer", None)
l.logger().error("Software cookie: {}".format(engineer))
if engineer is None:
resp = flask.make_response(flask.redirect(flask.url_for('start')))
else:
resp = flask.make_response(flask.redirect(flask.url_for('quiz')))
else:
resp = flask.make_response(flask.redirect(flask.url_for('index')))
return resp
@app.route('/')
def index():
"""
Render the home page of the test.
Cookies:
gets:
user_id
sets:
user_id
user_ip
"""
l.logger().info("Index")
## Create response
resp = flask.make_response(flask.render_template("index.html"))
## Load user id, or create a new one if no cookie exists.
user_id = handler.get_cookie("user_id")
if user_id is None:
# Create user ID.
user_id = str(uuid.uuid4())
handler.set_cookie(resp, user_id = user_id)
handler.user_cache[user_id] = {}
else:
is_engineer = handler.session_db.is_engineer(user_id = user_id)
schedule = handler.session_db.get_schedule(user_id = user_id)
if is_engineer is not None:
handler.user_cache[user_id] = {
'engineer': is_engineer,
'schedule': schedule,
}
else:
handler.user_cache[user_id] = {}
## Assign a new IP anyway.
user_ip = flask.request.access_route[0]
handler.user_cache[user_id]["user_ip"] = user_ip
l.logger().warn("User login user id: {}, user_ip: {}".format(user_id, user_ip))
return resp
def serve(databases: typing.Dict[str, typing.Tuple[str, typing.List[str]]],
workspace_path: pathlib.Path,
http_port: int = None,
host_address: str = '0.0.0.0'
) -> None:
"""
Serving function for Turing test quiz dashboard.
Receive a list of databases. Each entry specifies:
a) Name of database
b) Data
c) Human or Robot
"""
try:
if http_port is None:
http_port = portpicker.pick_unused_port()
## Setup handler.
handler.set_params(databases, workspace_path)
## Pretty print hostname.
hostname = subprocess.check_output(
["hostname", "-i"],
stderr = subprocess.STDOUT,
).decode("utf-8").replace("\n", "").split(' ')
if len(hostname) == 2:
ips = "ipv4: {}, ipv6: {}".format(hostname[1], hostname[0])
else:
ips = "ipv4: {}".format(hostname[0])
l.logger().warn("Server Public IP: {}:{}".format(ips, http_port))
waitress.serve(app, host = host_address, port = http_port, threads = 32)
except KeyboardInterrupt:
return
except Exception as e:
raise e
return
def main(*args, **kwargs):
dbs = json.load(open(sys.argv[1], 'r'))
serve(
databases = dbs,
workspace_path = pathlib.Path(sys.argv[2]).resolve(),
http_port = 40822,
)
if __name__ == "__main__":
absl_app.run(main)
## ./benchpress ./get_human_likely/human_or_robot.json ./get_human_likely/test
| 12,964 | 30.240964 | 122 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/experiments/turing/analysis.py | # coding=utf-8
# Copyright 2023 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Analysis of Human or AI experiment.
"""
import pathlib
import json
from deeplearning.benchpress.experiments import public
from deeplearning.benchpress.experiments.turing import db
from deeplearning.benchpress.util import distributions
from deeplearning.benchpress.util import plotter
@public.evaluator
def HumanLikenessAnalysis(**kwargs) -> None:
"""
Analyse Human or AI experiment.
"""
workspace = kwargs.get("workspace_path")
str_path = kwargs.get("human_likeness_data")
path = pathlib.Path(str_path).resolve()
if not path.exists():
raise FileNotFoundError(path)
data = db.TuringDB(url = "sqlite:///{}".format(path), must_exist = True)
"""
Evaluate the results from the database.
"""
"""
1. Get distribution of scores per dataset (distribute score per user / per dataset.)
a. Get average score per dataset group.
b. Get confidence interval.
"""
prediction_distr = data.get_prediction_distr()
labels = {
"engineer": {
"human": [[], []],
"AI" : [[], []]
},
"non-engineer": {
"human": [[], []],
"AI" : [[], []]
}
}
for label in labels.keys():
for dset, values in prediction_distr[label].items():
if values["predictions"]["human"] > 0:
labels[label]["human"][0].append(dset)
labels[label]["human"][1].append(values["predictions"]["human"])
if values["predictions"]["robot"] > 0:
labels[label]["AI"][0].append(dset)
labels[label]["AI"][1].append(values["predictions"]["robot"])
plotter.GrouppedBars(
labels[label],
plot_name = "{}_scores_per_set".format(label),
path = workspace / "scores_per_set" / label,
)
# plotter.GrouppedBars(
# {
# label: [
# labels["engineer"][label][0] + labels["non-engineer"][label][0],
# labels["engineer"][label][1] + labels["non-engineer"][label][1]
# ]
# for label in labels["engineer"].keys()
# },
# plot_name = "Total_scores_per_set",
# path = workspace / "scores_per_set",
# )
unique_datasets = set(prediction_distr["engineer"].keys())
unique_datasets.update(set(prediction_distr["non-engineer"].keys()))
## Distributions
user_prediction_distr = data.get_user_prediction_distr()
distrs = {
"engineer": {},
"non-engineer": {},
}
for label in labels.keys():
for dset in unique_datasets:
distrs[label][dset] = distributions.GenericDistribution(
[
int(100 * user[dset]["predictions"]["human"] / (user[dset]["predictions"]["robot"] + user[dset]["predictions"]["human"]))
for user in user_prediction_distr[label] if dset in user
],
log_path = workspace / "distributions" / label / dset,
set_name = "{}_{}_distrib".format(label, dset)
)
distrs[label][dset].plot()
"""
2. Conditioned probabilities:
a. Score distribution on robots, given score on human.
b. Score distribution on human, given score on robots.
c. Score distribution on human and robots, given total ratio of human/robot selections.
"""
"""
3. Measure correlation between score on human and score on GitHub.
Plot scatter:
x axis: Github score
y axis: AI-dataset score.
One datapoint: One user that has given answers to both Github and AI-dataset.
"""
ai_datasets = set([x for x in unique_datasets if x != "GitHub"])
correlation_data_steps = {}
num_predictions = 2
while True:
correlation_data = {
"engineer": {ai: {'data': [], 'names': [], 'frequency': []} for ai in ai_datasets},
"non-engineer": {ai: {'data': [], 'names': [], 'frequency': []} for ai in ai_datasets}
,
}
keep_looping = False
for label in labels.keys():
for user in user_prediction_distr[label]:
total = sum([x['predictions']['human'] + x['predictions']['robot'] for x in user.values()])
for ai_set in ai_datasets:
if ai_set in user and "GitHub" in user and total >= num_predictions:
keep_looping = True
dp = [
user["GitHub"]["predictions"]["human"] / (user["GitHub"]["predictions"]["human"] + user["GitHub"]["predictions"]["robot"]),
user[ai_set]["predictions"]["robot"] / (user[ai_set]["predictions"]["robot"] + user[ai_set]["predictions"]["human"])
]
# if ai_set not in correlation_data[label]:
# correlation_data[label][ai_set] = {
# 'data': [dp],
# 'names': [""],
# 'frequency': [1],
# }
# else:
if dp in correlation_data[label][ai_set]['data']:
idx = correlation_data[label][ai_set]['data'].index(dp)
correlation_data[label][ai_set]['frequency'][idx] += 1
else:
correlation_data[label][ai_set]['data'].append(dp)
correlation_data[label][ai_set]['names'].append("")
correlation_data[label][ai_set]['frequency'].append(1)
if not keep_looping:
break
else:
correlation_data_steps[num_predictions] = correlation_data
num_predictions += 1
for label in correlation_data.keys():
correlation_data = {
"x=y": {
'data': [[x/100, x/100] for x in range(0, 105, 5)],
'names': [[""] for x in range(0, 105, 5)],
'frequency': [1 for x in range(0, 105, 5)]
}
}
step_cov_corrs = {}
for step in correlation_data_steps.keys():
if len([x for y in correlation_data_steps[step][label].values() for x in y['data']]) > 0:
correlation_data_steps[step][label].update(correlation_data)
"""
Print the distribution of scores on AI given scores on Github.
"""
plotter.SliderGroupScatterPlot(
{
s: v[label]
for s, v in correlation_data_steps.items()
if len([x for y in v[label].values() for x in y['data']]) > 0
},
"AI_vs_Human_correlation",
path = workspace / "score_correlation" / label / "scatter",
x_name = "Score on GitHub",
y_name = "Score on AI",
**kwargs,
)
averages = {}
for name, values in correlation_data_steps[2][label].items():
if name == "x=y":
continue
averages[name] = {}
for dp in values["data"]:
x, y = dp
if x not in averages[name]:
averages[name][x] = [y]
else:
averages[name][x].append(y)
averages[name] = [[x, sum(y) / len(y)] for x, y in averages[name].items()]
averages[name] = sorted(averages[name], key = lambda x: x[0])
"""
Print the average distribution of scores in AI given scores on Github.
"""
x = [[x[0] for x in data] for dname, data in averages.items()]
y = [[y[1] for y in data] for dname, data in averages.items()]
names = list(averages.keys())
plotter.MultiScatterLine(
x = x,
y = y,
names = names,
plot_name = "Avg_AI_vs_Human_correlation",
path = workspace / "score_correlation" / label / "scatter_avg",
x_name = "Score on GitHub",
y_name = "Avg Score on AI",
**kwargs,
)
"""
Find the covariance and correlation between score on each AI and score on GitHub.
"""
for step_id, corr_data in correlation_data_steps.items():
step_cov_corrs[step_id] = {
'covariance': ([], []),
'correlation': ([], []),
}
for name, values in corr_data[label].items():
if name == "x=y":
continue
xx = [x for x, _ in values["data"]]
yy = [y for _, y in values["data"]]
n = name
if len(xx) > 0 and len(yy) > 0:
gitd = distributions.GenericDistribution(
[int(100*i) for i in xx],
workspace / "score_correlation" / label / "distr",
set_name = "score_on_git_with_{}_distr".format(n)
)
aid = distributions.GenericDistribution(
[int(i*100) for i in yy],
workspace / "score_correlation" / label / "distr",
set_name = "score_on_{}_distr".format(n)
)
# gitd.plot()
# aid.plot()
# (aid - gitd).plot()
step_cov_corrs[step_id]['covariance'][0].append(n)
step_cov_corrs[step_id]['covariance'][1].append(gitd.cov(aid))
step_cov_corrs[step_id]['correlation'][0].append(n)
step_cov_corrs[step_id]['correlation'][1].append(gitd.corr(aid))
plotter.SliderGrouppedBars(
{
k: {'covariance': v['covariance']}
for k, v in step_cov_corrs.items()
if len(v['covariance'][0]) > 0
},
plot_name = "Cov_AI_vs_Human",
path = workspace / "score_correlation" / label / "stats",
**kwargs,
)
plotter.SliderGrouppedBars(
{
k: {'correlation': v['correlation']}
for k, v in step_cov_corrs.items()
if len(v['correlation'][0]) > 0
},
plot_name = "Corr_AI_vs_Human",
path = workspace / "score_correlation" / label / "stats",
**kwargs,
)
return
| 9,592 | 35.2 | 137 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/experiments/grewe/model.py | #
# cgo13 - Implementation of the autotuner from:
#
# Grewe, D., Wang, Z., & O'Boyle, M. F. P. M. (2013). Portable
# Mapping of Data Parallel Programs to OpenCL for Heterogeneous
# Systems. In CGO. IEEE.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import re
from collections import defaultdict
import numpy as np
import pandas as pd
from scipy import stats
from sklearn import model_selection as cross_validation
from sklearn import tree
from sklearn.metrics import accuracy_score
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from deeplearning.benchpress.util import fs
def flatten(lists):
"""
Flatten a list of lists.
"""
return [item for sublist in lists for item in sublist]
def geomean(array):
"""
Return the mean value of a list of divisible numbers.
"""
n = len(array)
if n < 1:
return 0
elif n == 1:
return array[0]
return stats.mstats.gmean(array)
def ingroup(getgroup, d, group):
"""return true if d is in group"""
return getgroup(d) == group
def getsuite(d):
"""fetch benchmark suite"""
return re.match(r"^[a-zA-Z-]+-[0-9\.]+", d["benchmark"]).group(0)
def getbenchmark(d):
"""fetch benchmark name"""
return re.sub(r"-[^-]+$", "", d["benchmark"])
def getprog(d):
"""fetch program name"""
return re.match(r"^[a-zA-Z-]+-[0-9\.]+-[^-]+-", d["benchmark"]).group(0)
def getclass(d):
"""fetch optimal device name"""
return d["oracle"]
class DataFilter(object):
@staticmethod
def from_str(string):
pass
class GetGroup(object):
@staticmethod
def from_str(string):
pass
def normalize(array):
"""normalize array values to range [0,1]"""
factor = np.amax(array)
return np.copy(array) / factor
class LabelledData(object):
"""dataset with group and normalized features"""
@staticmethod
def from_csv(path, group_by=None):
getgroup = {
"class": getclass,
"suite": getsuite,
"prog": getprog,
"benchmark": getbenchmark,
}.get(group_by, lambda x: "None")
data = pd.read_csv(path)
data["Group"] = [getgroup(d) for d in data.to_dict(orient='records')]
data["F1_norm"] = normalize(data["F1:transfer/(comp+mem)"])
data["F2_norm"] = normalize(data["F2:coalesced/mem"])
data["F3_norm"] = normalize(data["F3:(localmem/mem)*avgws"])
data["F4_norm"] = normalize(data["F4:comp/mem"])
return data
class UnLabelledData(object):
"""dataset without oracle device results"""
@staticmethod
def from_csv(path):
data = pd.read_csv(smith.assert_exists(path),
names=["benchmark", "dataset", "kernel",
"wgsize", "transfer", "runtime", "ci"])
return data
def norm_feature_distance(f1, f2):
"""
Distance between two features (as dicts).
"""
d1 = abs(f1["F1_norm"] - f2["F1_norm"])
d2 = abs(f1["F2_norm"] - f2["F2_norm"])
d3 = abs(f1["F3_norm"] - f2["F3_norm"])
d4 = abs(f1["F4_norm"] - f2["F4_norm"])
return math.sqrt(d1 * d1 + d2 * d2 + d3 * d3 + d4 * d4)
def eigens_distance(f1, f2):
"""
Distance between two features (as dicts).
"""
d1 = abs(f1["E1"] - f2["E1"])
d2 = abs(f1["E2"] - f2["E2"])
d3 = abs(f1["E3"] - f2["E3"])
d4 = abs(f1["E4"] - f2["E4"])
return math.sqrt(d1 * d1 + d2 * d2 + d3 * d3 + d4 * d4)
def nearest_neighbours(data1, data2, same_class=False,
distance=norm_feature_distance):
"""
Find the minimum distances between datapoints.
Returns list of tuples, where each tuple is in the form:
(distance, index_of_closest, same_oracle)
"""
dists, indices, sameoracles = [], [], []
for d1 in data1.to_dict(orient="record"):
mindist, index, sameoracle = float('inf'), None, False
for i, d2 in enumerate(data2.to_dict(orient="record")):
if not d1 == d2:
dist = distance(d1, d2)
if ((not same_class) or
(same_class and d1["oracle"] == d2["oracle"])):
if dist < mindist and i not in indices:
mindist = dist
index = i
sameoracle = d1["oracle"] == d2["oracle"]
dists.append(mindist)
indices.append(index)
sameoracles.append(sameoracle)
return zip(dists, indices, sameoracles)
def cgo13_features(d):
"""features used in CGO'13"""
return np.array([
d["F1:transfer/(comp+mem)"],
d["F2:coalesced/mem"],
d["F3:(localmem/mem)*avgws"],
d["F4:comp/mem"]
]).T
def raw_features(d):
"""all raw features"""
return np.array([
d["comp"],
d["rational"],
d["mem"],
d["localmem"],
d["coalesced"],
d["atomic"],
d["transfer"],
d["wgsize"]
]).T
def static_features(d):
"""static features"""
return np.array([
d["comp"],
d["mem"],
d["localmem"],
d["coalesced"],
]).T
def get_static_features(D):
"""static features from table"""
return np.array([
D["comp"].values,
D["mem"].values,
D["localmem"].values,
D["coalesced"].values,
], dtype=float).T
def extended_static_features(d):
"""static features with branching"""
return np.array([
d["comp"],
d["rational"],
d["mem"],
d["localmem"],
d["coalesced"],
]).T
def getlabels(d):
"""fetch optimal device name"""
return d["oracle"]
class Metrics(object):
"""classification result metrics"""
def __init__(self, prefix, data, predicted, model=None):
self._prefix = prefix
self._data = data
self._predicted = predicted
self._model = model
@property
def prefix(self):
return self._prefix
@property
def data(self):
return self._data
@property
def predicted(self):
return self._predicted
@property
def oracles(self):
return [float(x) for x in self.data["speedup"]]
@property
def oracle(self):
try:
return self._oracle
except AttributeError:
assert (len(self.speedups) == len(self.oracles))
self._oracle = self.speedup / geomean(self.oracles)
return self._oracle
@property
def y_test(self):
return self.data["oracle"]
@property
def accuracy(self):
try:
return self._accuracy
except AttributeError:
self._accuracy = accuracy_score(self.y_test, self.predicted)
return self._accuracy
@property
def speedups(self):
try:
return self._speedups
except AttributeError:
speedups = []
for d, p in zip(self.data.to_dict(orient="records"),
self.predicted):
if d["oracle"] == p:
speedups.append(d["speedup"])
else:
speedups.append(d["penalty"])
self._speedups = np.array(speedups)
return self._speedups
@property
def speedup(self):
try:
return self._speedup
except AttributeError:
self._speedup = geomean(self.speedups)
return self._speedup
@property
def groups(self):
try:
return self._groups
except AttributeError:
self._groups = sorted(set(self.data["Group"]))
return self._groups
@property
def n(self):
return len(self.speedups)
@property
def model(self):
return self._model
def export_model(self, out_basename):
try:
outfile = fs.path(str(out_basename) + ".dot")
tree.export_graphviz(self.model, out_file=outfile,
max_depth=5, filled=True, rounded=True,
class_names=["CPU", "GPU"],
feature_names=["F1", "F2", "F3", "F4"])
print("export model to '{}'".format(outfile))
except Exception:
pass
header = ", ".join([
"classifier",
"accuracy",
"speedup",
"oracle"
])
def __repr__(self):
return ", ".join([
self.prefix,
"{:.2f}%".format(self.accuracy * 100),
"{:.2f}".format(self.speedup),
"{:.0f}%".format(self.oracle * 100)
])
def getgroups(data, getgroup):
"""get list of group names"""
return sorted(list(set([getgroup(d) for d in
data.to_dict(orient="records")])))
def pairwise_groups_indices(data, getgroup):
"""
"""
groups = getgroups(data, getgroup)
group_indices = defaultdict(list)
for i, d in enumerate(data.to_dict(orient="records")):
group_indices[getgroup(d)].append(i)
groupnames, pairs = [], []
for j in range(len(groups)):
for i in range(len(groups)):
l, r = groups[j], groups[i]
groupnames.append((l, r))
li, ri = group_indices[l], group_indices[r]
pairs.append((li, ri))
return groupnames, pairs
def l1o_groups_indices(data, getgroup):
"""
"""
groups = getgroups(data, getgroup)
group_indices = defaultdict(list)
for i, d in enumerate(data.to_dict(orient="records")):
group_indices[getgroup(d)].append(i)
groupnames, pairs = [], []
for j in range(len(groups)):
l = groups[j]
groupnames.append((l, ", ".join([x for x in groups if x != l])))
pairs.append(([item for sublist in
[group_indices[x] for x in groups if x != l]
for item in sublist],
group_indices[l]))
return groupnames, pairs
def run_fold_indices(prefix, clf, data, train_index, test_index,
features=cgo13_features):
X_train = features(data)[train_index]
y_train = getlabels(data)[train_index]
clf.fit(X_train, y_train)
X_test = features(data)[test_index]
predicted = clf.predict(X_test)
predicted_data = data.ix[test_index]
return Metrics(prefix, predicted_data, predicted, clf)
def run_test(prefix, clf, train, test, features=cgo13_features):
X_train = features(train)
y_train = getlabels(train)
clf.fit(X_train, y_train)
X_test = features(test)
predicted = clf.predict(X_test)
return Metrics(prefix, test, predicted, clf)
def run_xval(prefix, clf, data, cv, features=cgo13_features, seed=1):
X = features(data)
y = getlabels(data)
predicted = cross_validation.cross_val_predict(clf, X, y, cv=cv)
return Metrics(prefix, data, predicted, clf)
def model(seed=204):
"""CGO'13 model"""
return DecisionTreeClassifier(
random_state=seed, splitter="best", criterion="entropy")
original_pair = [0, 0]
synthetics_pair = [0, 0]
def leave_one_benchmark_out(clf, get_features, D, benchmark, synthetics = False, is_clgen = False):
# Create data masks. For training we exclude all results from
# the test benchmark.
test_mask = D["benchmark"].str.contains(r"^" + benchmark)
if synthetics:
train_mask = D["benchmark"].str.contains(".cl")
else:
train_mask = ~test_mask
import math
# Create training and testing data:
X_train = get_features(D[train_mask])
for id1, seq in enumerate(X_train):
for id2, el in enumerate(seq):
if math.isnan(el) or math.isinf(el):
X_train[id1][id2] = 0.0
y_train = getclass(D[train_mask])
D_test = D[test_mask]
X_test = get_features(D_test)
for id1, seq in enumerate(X_test):
for id2, el in enumerate(seq):
if math.isnan(el) or math.isinf(el):
X_test[id1][id2] = 0.0
y_test = getclass(D_test)
# Train classifier:
clf.fit(X_train, y_train)
# Make predictions
predicted = clf.predict(X_test)
D_out = []
total = 0
correct = 0
for d, y, p in zip(D_test.to_dict('records'), y_test, predicted):
d["p"] = p
d["p_correct"] = 1 if y == p else 0
if y == p:
correct += 1
total += 1
D_out.append(d)
global original_pair
global synthetics_pair
if is_clgen:
synthetics_pair[0] += correct
synthetics_pair[1] += total
else:
original_pair[0] += correct
original_pair[1] += total
# if is_clgen:
# print("######")
# print("CLgen", synthetics_pair)
# print("Benchmarks", original_pair)
# input()
# Return a list of dicts
return D_out
def get_benchmark_names(data, prefix=None):
"""names of benchmarks"""
if prefix:
return sorted(set([
re.match(r"^([^0-9]+-[0-9\.]+-[^-]+)", b).group(1)
for b in data["benchmark"] if b.startswith(prefix)
]))
else:
return sorted(set([
re.match(r"^([^0-9]+-[0-9\.]+-[^-]+)", b).group(1)
for b in data["benchmark"]
]))
def xval_benchmarks(clf, data, **benchmark_name_opts):
"""cross-validate across benchmarks"""
benchmark_names = get_benchmark_names(data, **benchmark_name_opts)
return pd.DataFrame(
flatten([leave_one_benchmark_out(clf, cgo13_features, data, b)
for b in benchmark_names]))
def classification(train, classifier="DecisionTree",
test=None, supplementary=None,
with_raw_features=False, only_raw_features=False,
group_by=None, samegroup_xval=False, l1o=False, **kwargs):
if with_raw_features:
getfeatures = cgo13_with_raw_features
elif only_raw_features:
getfeatures = raw_features
else:
getfeatures = cgo13_features
seed = kwargs.get("seed", 0)
# Get classifier
classifiers = {
"DecisionTree": DecisionTreeClassifier(
random_state=seed, criterion="entropy", splitter="best"),
"NaiveBayes": GaussianNB(),
"NearestNeighbour": KNeighborsClassifier(n_neighbors=1)
}
lookup_table = {
"DecisionTree": classifiers["DecisionTree"],
"NaiveBayes": classifiers["NaiveBayes"],
"NearestNeighbour": classifiers["NearestNeighbour"],
"dt": classifiers["DecisionTree"],
"nb": classifiers["NaiveBayes"],
"nn": classifiers["NearestNeighbour"]
}
clf = lookup_table.get(classifier, None)
if clf is None:
raise Exception(
"unkown classifier '{}'. Possible values: {{{}}}"
.format(classifier, ",".join(sorted(lookup_table.keys()))))
if test is not None:
return run_test(classifier, clf, train, test, features=getfeatures)
elif group_by:
# Cross-validation over some grouping
getgroup = {
"suite": getsuite,
"benchmark": getbenchmark,
}.get(group_by, None)
if group_by and not getgroup:
raise Exception("Unkown group type '{}'".format(group_by))
groups = sorted(getgroups(train, getgroup))
if l1o:
groupnames, folds = l1o_groups_indices(train, getgroup)
results = [None] * len(groups)
else:
groupnames, folds = pairwise_groups_indices(train, getgroup)
results = [[None] * len(groups) for x in range(len(groups))]
for gpname, fold in zip(groupnames, folds):
train_group, test_group = gpname
train_index, test_index = fold
# If samegroup_xval option is true, then cross-validate on
# training data.
if samegroup_xval and train_group == test_group:
train2 = train.ix[train_index]
metrics = classification(
train2, with_raw_features=with_raw_features,
only_raw_features=only_raw_features,
classifier=classifier, **kwargs)
else:
if supplementary is not None:
# If we have supplementary data, then copy data
# and append training.
train2 = train.ix[train_index]
train2 = train2.append(supplementary)
X_train = getfeatures(train2)
y_train = getlabels(train2)
clf.fit(X_train, y_train)
X_test = getfeatures(train)[test_index]
predicted = clf.predict(X_test)
predicted_data = train.ix[test_index]
metrics = Metrics(
classifier, predicted_data, predicted, clf)
else:
metrics = run_fold_indices(classifier, clf, train,
train_index, test_index,
features=getfeatures)
train_index = groups.index(train_group)
if l1o:
results[train_index] = metrics
else:
test_index = groups.index(test_group)
results[train_index][test_index] = metrics
return results
else:
# plain old cross-validation
#
# Get the number of folds to use. If "nfold=n", then perform
# leave-one-out cross validation.
nfolds = kwargs.get("nfolds", 10)
if nfolds == "n":
nfolds = len(train)
else:
nfolds = int(nfolds)
folds = cross_validation.KFold(len(train), n_folds=nfolds,
shuffle=True, random_state=seed)
return run_xval(classifier, clf, train, folds, features=getfeatures) | 16,346 | 24.743307 | 99 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/experiments/grewe/api.py | # Copyright (c) Foivos Tsimpourlas.
#
# BenchPress is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# BenchPress is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
"""
API to communicate with legacy 'preamble.py' and 'model.py'
of Grewe's et al. predictive model (CGO 2013).
This API is used to convert modernized database groups
to the expected csv files by the script and also fill in
missing cldrive data.
"""
import sys
import pathlib
import typing
import math
import tqdm
import pandas as pd
from deeplearning.benchpress.corpuses import encoded
from deeplearning.benchpress.corpuses import benchmarks
from deeplearning.benchpress.experiments import public
from deeplearning.benchpress.experiments import workers
from deeplearning.benchpress.experiments import cldrive
from deeplearning.benchpress.experiments import clsmith
from deeplearning.benchpress.preprocessors import opencl
from deeplearning.benchpress.samplers import samples_database
from deeplearning.benchpress.util import crypto
from deeplearning.benchpress.util import monitors
from deeplearning.benchpress.util import logging as l
from deeplearning.benchpress.util import plotter
from deeplearning.benchpress.util import distributions
from deeplearning.benchpress.experiments.grewe import preamble
from absl import app, flags
FLAGS = flags.FLAGS
"""
1. You may insert database groups as usual to convert to csv
2. You need to introduce a systematic way to insert the amd/nvidia/clgen csv's from clgen's artifacts.
a) Could be protobuf path arguments pointing to results workspace
"""
def DataFrameSchema() -> typing.List[str]:
"""
Return index list of dataframe.
"""
return [
"benchmark",
"dataset",
"comp",
"rational",
"mem",
"localmem",
"coalesced",
"atomic",
"transfer",
"wgsize",
"F1:transfer/(comp+mem)",
"F2:coalesced/mem",
"F3:(localmem/mem)*avgws",
"F4:comp/mem",
"oracle",
"runtime",
"speedup",
"penalty",
"runtime_cpu",
"ci_cpu",
"ci_mean_cpu",
"runtime_gpu",
"ci_gpu",
"ci_mean_gpu",
"kernel_nlines",
"kernel_size"
]
def ToDataFrameRow(name : str,
grewe_feats : typing.Dict[str, float],
transferred_bytes : int,
global_size : int,
local_size : int,
label : str,
cpu_transfer_time_ns : int,
cpu_kernel_time_ns : int,
gpu_transfer_time_ns : int,
gpu_kernel_time_ns : int,
) -> pd.DataFrame:
"""
Convert a samples DB to a csv with the same columns found in paper's artifact.
"""
return [
name,
global_size,
grewe_feats['comp'],
grewe_feats['rational'],
grewe_feats['mem'],
grewe_feats['localmem'],
grewe_feats['coalesced'],
grewe_feats['atomic'],
transferred_bytes,
local_size,
transferred_bytes / (grewe_feats['comp'] + grewe_feats['mem']) if (grewe_feats['comp'] + grewe_feats['mem']) > 0 else 0.0,
grewe_feats["F2:coalesced/mem"],
(grewe_feats['localmem'] / grewe_feats['mem']) * local_size if grewe_feats['mem'] > 0 else 0.0,
grewe_feats["F4:comp/mem"],
label,
min(cpu_transfer_time_ns + cpu_kernel_time_ns, gpu_transfer_time_ns + gpu_kernel_time_ns) / (10**6),
max((cpu_transfer_time_ns + cpu_kernel_time_ns) / (gpu_transfer_time_ns + gpu_kernel_time_ns), (gpu_transfer_time_ns + gpu_kernel_time_ns) / (cpu_transfer_time_ns + cpu_kernel_time_ns)),
min((cpu_transfer_time_ns + cpu_kernel_time_ns) / (gpu_transfer_time_ns + gpu_kernel_time_ns), (gpu_transfer_time_ns + gpu_kernel_time_ns) / (cpu_transfer_time_ns + cpu_kernel_time_ns)),
(cpu_transfer_time_ns + cpu_kernel_time_ns) / 10**6,
cpu_transfer_time_ns / 10**6,
cpu_kernel_time_ns / 10**6,
(gpu_transfer_time_ns + gpu_kernel_time_ns) / 10**6,
gpu_transfer_time_ns / 10**6,
gpu_kernel_time_ns / 10**6,
0,
0
]
def CSVPathToFrame(csv_path: pathlib.Path) -> pd.DataFrame:
"""
Receive a csv path and return a dataframe.
"""
return pd.read_csv(csv_path)
def DriveSource(src : str,
include : str,
group_name : str,
feats : typing.Dict[str, float],
cldrive_db : cldrive.CLDriveExecutions,
name : str = None,
no_cache : bool = False,
extra_args : typing.List[str] = [],
) -> typing.Generator:
"""
For a given source code, drive to CLDrive and return a ready row.
Args:
src : source code to process
feats : Grewe Feature vector of source code.
cldrive_db : Caches cldrive executions of source code.
"""
# for gsize in tqdm.tqdm([2**6, 2**7, 2**8, 2**10, 2**12, 2**14, 2**16, 2**18, 2**20], desc = "gsize", leave = False):
for gsize in tqdm.tqdm([2**1, 2**2, 2**4, 2**8, 2**10, 2**12, 2**14, 2**16, 2**18, 2**20], desc = "gsize", leave = False):
for lsize in tqdm.tqdm([2**1, 2**2, 2**3, 2**4, 2**5, 2**6, 2**7, 2**8, 2**9, 2**10], desc = "lsize", leave = False):
if lsize > gsize:
continue
sha = crypto.sha256_str(include + src + group_name + str(gsize) + str(lsize))
if sha in cldrive_db.status_cache:
if no_cache:
cached = cldrive_db.update_and_get(
src,
feats,
group_name,
gsize,
lsize,
num_runs = 10000,
timeout = 60,
include = include,
extra_args = extra_args
)
else:
cached = cldrive_db.get_entry(
src,
group_name,
gsize,
lsize,
include = include
)
if cached.status in {"CPU", "GPU"}:
yield ToDataFrameRow(
name = "{}.cl".format(sha) if name is None else name,
grewe_feats = feats,
transferred_bytes = cached.transferred_bytes,
global_size = gsize,
local_size = lsize,
label = cached.status,
cpu_transfer_time_ns = cldrive_db.reduce_execution_times(cached.cpu_transfer_time_ns),
cpu_kernel_time_ns = cldrive_db.reduce_execution_times(cached.cpu_kernel_time_ns),
gpu_transfer_time_ns = cldrive_db.reduce_execution_times(cached.gpu_transfer_time_ns),
gpu_kernel_time_ns = cldrive_db.reduce_execution_times(cached.gpu_kernel_time_ns),
)
else:
yield None
else:
df, label = opencl.CLDriveDataFrame(
src,
header_file = include,
num_runs = 10000,
gsize = gsize,
lsize = lsize,
extra_args = extra_args,
timeout = 60
)
cldrive_db.add_entry(
src,
feats,
group_name,
label,
gsize,
lsize,
df,
include = include
)
if label not in {"CPU", "GPU"}:
yield None
else:
idx = 0
transferred_bytes = float('NaN')
while idx < len(df.transferred_bytes) and math.isnan(transferred_bytes):
try:
transferred_bytes = int(df.transferred_bytes[idx])
except ValueError:
idx += 1
yield ToDataFrameRow(
name = "{}.cl".format(sha) if name is None else name,
grewe_feats = feats,
transferred_bytes = transferred_bytes,
global_size = gsize,
local_size = lsize,
label = label,
cpu_transfer_time_ns = cldrive_db.reduce_execution_times(df[df['device'].str.contains("CPU")].transfer_time_ns),
cpu_kernel_time_ns = cldrive_db.reduce_execution_times(df[df['device'].str.contains("CPU")].kernel_time_ns),
gpu_transfer_time_ns = cldrive_db.reduce_execution_times(df[df['device'].str.contains("GPU")].transfer_time_ns),
gpu_kernel_time_ns = cldrive_db.reduce_execution_times(df[df['device'].str.contains("GPU")].kernel_time_ns),
)
@public.evaluator
def GreweTopKCSV(**kwargs) -> None:
"""
Sample top-K candidates for each db group to target, and store them to CSV.
"""
db_groups = kwargs.get('db_groups')
cldrive_cache = kwargs.get('cldrive_cache', '')
target = kwargs.get('targets')
top_k = kwargs.get('top_k')
unique_code = kwargs.get('unique_code', False)
workspace = kwargs.get('workspace_path')
tokenizer = kwargs.get('tokenizer')
cldrive_db = cldrive.CLDriveExecutions(url = "sqlite:///{}".format(pathlib.Path(cldrive_cache).resolve()), must_exist = False)
for dbg in tqdm.tqdm(db_groups, desc = "DB Groups", leave = True):
l.logger().info("Running {} on cldrive".format(dbg.group_name))
if not (dbg.db_type == samples_database.SamplesDatabase or dbg.db_type == encoded.EncodedContentFiles):
raise ValueError("Scores require SamplesDatabase or EncodedContentFiles but received", dbg.db_type)
if dbg.db_type == clsmith.CLSmithDatabase:
extra_args = ["-include{}".format(pathlib.Path(clsmith.CLSMITH_INCLUDE) / "CLSmith.h")]
else:
extra_args = []
datapoints = []
out_path = workspace / "{}.csv".format(dbg.group_name)
if unique_code:
get_data = lambda: dbg.get_unique_data_features("GreweFeatures", use_mp = False)
else:
get_data = lambda: dbg.get_data_features("GreweFeatures", use_mp = False)
## Unpack and collect benchmarks
benchmarks = target.get_benchmarks("GreweFeatures")
for benchmark in tqdm.tqdm(benchmarks, total = len(benchmarks), desc = "Benchmarks"):
top_k_idx = 0
top_k_bar = tqdm.tqdm(total = top_k, desc = "Top K cands", leave = False)
for (src, incl, feats, dist) in tqdm.tqdm(workers.SortedSrcFeatsDistances(get_data(), benchmark.features, "GreweFeatures"), desc = "Sorted Data", leave = False):
toggle = False
if dbg.db_type == clsmith.CLSmithDatabase:
src = "#include \"CLSmith.h\"\n" + src
for row in DriveSource(src, incl, dbg.group_name, feats, cldrive_db, extra_args = extra_args):
if row:
toggle = True
datapoints.append(row)
if toggle:
top_k_idx += 1
top_k_bar.update(1)
if top_k_idx >= top_k:
break
frame = pd.DataFrame(datapoints, columns = DataFrameSchema())
frame.to_csv(out_path)
return
@public.evaluator
def GreweCSV(**kwargs) -> None:
"""
Convert database groups to CSV files that are supported by Grewe's predictive model.
"""
db_groups = kwargs.get('db_groups')
cldrive_cache = kwargs.get('cldrive_cache', '')
unique_code = kwargs.get('unique_code', False)
workspace = kwargs.get('workspace_path')
tokenizer = kwargs.get('tokenizer')
cldrive_db = cldrive.CLDriveExecutions(url = "sqlite:///{}".format(pathlib.Path(cldrive_cache).resolve()), must_exist = False)
for dbg in tqdm.tqdm(db_groups, desc = "DB Groups", leave = True):
if not (dbg.db_type == samples_database.SamplesDatabase or dbg.db_type == encoded.EncodedContentFiles or dbg.db_type == clsmith.CLSmithDatabase):
raise ValueError("Scores require SamplesDatabase or EncodedContentFiles but received", dbg.db_type)
if dbg.db_type == clsmith.CLSmithDatabase:
extra_args = ["-I{}".format(pathlib.Path(clsmith.CLSMITH_INCLUDE))]
else:
extra_args = []
datapoints = []
out_path = workspace / "{}.csv".format(dbg.group_name)
if unique_code:
get_data = lambda: dbg.get_unique_data_features("GreweFeatures", use_mp = False)
else:
get_data = lambda: dbg.get_data_features("GreweFeatures", use_mp = False)
for (src, incl, feats) in tqdm.tqdm(get_data(), desc = "Src", leave = True):
if dbg.db_type == clsmith.CLSmithDatabase:
src = "#include \"CLSmith.h\"\n" + src
for row in DriveSource(src, incl, dbg.group_name, feats, cldrive_db, extra_args = extra_args):
if row:
datapoints.append(row)
frame = pd.DataFrame(datapoints, columns = DataFrameSchema())
frame.to_csv(out_path)
return
@public.evaluator
def TrainGrewe(**kwargs) -> None:
"""
Collect CSV files in the same formate expected by 'preamble.py'
and train Grewe et al. predictive model.
"""
grewe_baseline = kwargs.get('grewe_baseline')
csv_groups = kwargs.get('csv_groups')
plot_config = kwargs.get('plot_config')
workspace = kwargs.get('workspace_path')
speedups = {}
accuracies = {}
for group in csv_groups:
R, base, enhanced, base_precision, base_recall, base_tnr, enhanced_precision, enhanced_recall, enhanced_tnr = preamble.plot_speedups_with_clgen(
open(grewe_baseline, 'r'),
open(group['path'], 'r'),
synth_bench_name = group['name'],
)
if "GPGPU" not in speedups:
speedups["GPGPU"] = base
accuracies["GPGPU"] = {
'precision': base_precision,
'recall' : base_recall,
'tnr' : base_tnr,
}
speedups["GPGPU+{}".format(group['name'])] = enhanced
accuracies["GPGPU+{}".format(group['name'])] = {
'precision': enhanced_precision,
'recall' : enhanced_recall,
'tnr' : enhanced_tnr,
}
l.logger().info("Predictive model speedup vs GPU static mapping for different datasets:")
for k, v in speedups.items():
l.logger().info("{}: {}x speedup".format(k, round(v, 2)))
plotter.MultiScatterLine(
x = [[x for x in range(10)], [x for x in range(10)]],
y = [preamble.bp_al, preamble.bp_pl],
names = ["BenchPress_Active", "BenchPress_Passive"],
plot_name = "Active_vs_Passive_speedup",
path = workspace,
**plot_config if plot_config else {}
)
return
@public.evaluator
def FeatureSpaceCovLabel(**kwargs) -> None:
"""
For each baseline + ground truth, collect
all Grewe datapoints from CSV and plot the feature
space coverage. Points are colored based on label, CPU or GPU.
"""
grewe_baseline = kwargs.get('grewe_baseline')
csv_groups = kwargs.get('csv_groups')
plot_config = kwargs.get('plot_config')
workspace = kwargs.get('workspace_path')
base_df = CSVPathToFrame(grewe_baseline)
base_map = {
'CPU': base_df[base_df['oracle'] == 'CPU'].values.tolist(),
'GPU': base_df[base_df['oracle'] == 'GPU'].values.tolist(),
}
tsne_mon = monitors.TSNEMonitor(
cache_path = workspace,
set_name = 'GPGPU',
)
for k in {'CPU', 'GPU'}:
for dp in base_map[k]:
sample = (
dp[2:14],
k,
)
tsne_mon.register(sample)
tsne_mon.plot()
for group in csv_groups:
group_df = CSVPathToFrame(group['path'])
group_map = {
'CPU': group_df[group_df['oracle'] == 'CPU'].values.tolist(),
'GPU': group_df[group_df['oracle'] == 'GPU'].values.tolist(),
}
tsne_mon = monitors.TSNEMonitor(
cache_path = workspace,
set_name = group['name'],
)
for k in {'CPU', 'GPU'}:
for dp in group_map[k] + base_map[k]:
sample = (
dp[2:14],
k,
)
tsne_mon.register(sample)
tsne_mon.plot()
return
@public.evaluator
def FeatureSpaceCovGroup(**kwargs) -> None:
"""
For each baseline + ground truth, collect
all Grewe datapoints from CSV and plot the feature
space coverage.
Points are colored based on group, everything is plotted
in one figure.
"""
grewe_baseline = kwargs.get('grewe_baseline')
csv_groups = kwargs.get('csv_groups')
plot_config = kwargs.get('plot_config')
workspace = kwargs.get('workspace_path')
base_df = CSVPathToFrame(grewe_baseline)
groups = {}
csv_data = {
'GPGPU_CPU': [(dp[10:14], "{}-{}-{}".format(dp[0], dp[1], dp[9])) for dp in base_df[base_df['oracle'] == 'CPU'].values.tolist()],
'GPGPU_GPU': [(dp[10:14], "{}-{}-{}".format(dp[0], dp[1], dp[9])) for dp in base_df[base_df['oracle'] == 'GPU'].values.tolist()],
}
tsne_mon = monitors.TSNEMonitor(
cache_path = workspace,
set_name = 'Benchmarks_without_derived_split',
)
ranges = [
[167, 250],
[500, 533],
[50, 167],
]
print(len(base_df[base_df["F1:transfer/(comp+mem)"] == 64]) / len(base_df))
for idx, f_dim in enumerate(range(11, 14)):
samples = [int((dp[f_dim] * 100)) for dp in base_df.values.tolist()]
d = distributions.GenericDistribution(samples, log_path = workspace, set_name = "GPGPU_distr_{}".format(f_dim))
print("P[X inside range] = {}%".format(100 * (1 - (d < ranges[idx][0]) - (d > ranges[idx][1]))))
d.plot()
for k in ['GPGPU_CPU', 'GPGPU_GPU']:
for dp, name in csv_data[k]:
tsne_mon.register((dp, k, name))
for group in csv_groups:
# Run the predictive model and plot per group-predicted_label.
# R, base, enhanced, base_precision, base_recall, base_tnr, enhanced_precision, enhanced_recall, enhanced_tnr = preamble.plot_speedups_with_clgen(
# open(grewe_baseline, 'r'),
# open(group['path'], 'r'),
# synth_bench_name = group['name'],
# )
# b_mask = R["training"] == "Grewe et al."
# bs_mask = R["training"] == "w. {}".format(group['name'])
# groups['GPGPU_correct'] = [[dp[13], dp[10]] for dp in R[b_mask][R[b_mask]['oracle'] == R[b_mask]['p']].values.tolist()]
# groups['GPGPU_wrong'] = [[dp[13], dp[10]] for dp in R[b_mask][R[b_mask]['oracle'] != R[b_mask]['p']].values.tolist()]
# groups['{}_correct'.format(group['name'])] = [[dp[13], dp[10]] for dp in R[bs_mask][R[bs_mask]['oracle'] == R[bs_mask]['p']].values.tolist()]
# groups['{}_wrong'.format(group['name'])] = [[dp[13], dp[10]] for dp in R[bs_mask][R[bs_mask]['oracle'] != R[bs_mask]['p']].values.tolist()]
group_df = CSVPathToFrame(group['path'])
csv_data["{}_CPU".format(group['name'])] = [(dp[10:14], "{}-{}-{}".format(dp[0], dp[1], dp[9])) for dp in group_df[group_df['oracle'] == 'CPU'].values.tolist()]
csv_data["{}_GPU".format(group['name'])] = [(dp[10:14], "{}-{}-{}".format(dp[0], dp[1], dp[9])) for dp in group_df[group_df['oracle'] == 'GPU'].values.tolist()]
for k in ['{}_CPU'.format(group['name']), '{}_GPU'.format(group['name'])]:
for dp, name in csv_data[k]:
tsne_mon.register((dp, k, name))
tsne_mon.plot()
# runtime runtime_cpu runtime_gpu
# npb-3.3-BT-exact_rhs5 16 4 4 8 5 2 0 768 [ 8 64 0.25 5 0.5 ] CPU 0.036555 0.039456282295465 0.036221266726596 0.036555 0.028247 0.008308 0.039122 0.031148 0.007974 0 0
# npb-3.3-SP-rhs_norm 64 10 5 6 4 1 0 1024 [ 8 64 0.167 5.33333333333333 1.67 ] GPU 0.029257 0.032185415898677 0.027501519665707 0.030429 0.019423 0.011006 0.029257 0.021179 0.008078 0 0
# shoc-1.1.5-S3D-ratt2_kernel 16384 2426 0 702 0 0 0 262144 32 [83.8056265984655 0 0 3.46] GPU 0.169562 0.48240488604774 0.174838721962068 0.487672 0.060578 0.427094 0.169562 0.055309 0.114253 0 0
# npb-3.3-LU-setbv3 64 14 3 4 0 0 0 1536 8 [85.3333333333333 0 0 3.5 ] CPU 0.037712 0.041938274881693 0.034214350094968 0.037712 0.026837 0.010875 0.03844 0.031063 0.007377 0 0
# norm_0, norm_1 = 0, 0
# for k, v in groups.items():
# for dp in v:
# norm_0 = max(norm_0, dp[0])
# norm_1 = max(norm_1, dp[1])
# for k, v in groups.items():
# for idx, dp in enumerate(v):
# groups[k][idx][0] = groups[k][idx][0] / norm_0
# groups[k][idx][1] = groups[k][idx][1] / norm_1
# norm_0, norm_1 = 0, 0
# for k, v in csv_data.items():
# for dp in v:
# norm_0 = max(norm_0, dp[0])
# norm_1 = max(norm_1, dp[1])
# for k, v in csv_data.items():
# for idx, dp in enumerate(v):
# csv_data[k][idx][0] = csv_data[k][idx][0] / norm_0
# csv_data[k][idx][1] = csv_data[k][idx][1] / norm_1
# for group in csv_groups:
# tsne_mon = monitors.TSNEMonitor(
# cache_path = workspace,
# set_name = 'GPGPU',
# )
# for k in ['GPGPU_correct', 'GPGPU_wrong']:
# for dp in groups[k]:
# tsne_mon.register((dp, k))
# tsne_mon.plot()
# tsne_mon = monitors.TSNEMonitor(
# cache_path = workspace,
# set_name = '{}'.format(group['name']),
# )
# for k in ['{}_correct'.format(group['name']), '{}_wrong'.format(group['name'])]:
# for dp in groups[k]:
# tsne_mon.register((dp, k))
# tsne_mon.plot()
# plot_groups = {}
# for k, v in groups.items():
# plot_groups[k] = {
# 'data': v,
# 'names': []
# }
# plotter.GroupScatterPlot(
# groups = plot_groups,
# plot_name = "test",
# path = workspace,
# title = "test",
# )
# plot_groups = {}
# for k, v in csv_data.items():
# plot_groups[k] = {
# 'data': v,
# 'names': []
# }
# plotter.GroupScatterPlot(
# groups = plot_groups,
# plot_name = "test2",
# path = workspace,
# title = "test2",
# )
# for k, l in groups.items():
# for dp in l:
# tsne_mon.register((dp, k))
# tsne_mon.plot()
return
def fetch_gpgpu_cummins_benchmarks(gpgpu_path: pathlib.Path, cldrive_path: pathlib.Path, out_path: pathlib.Path) -> None:
"""
Parse GPGPU folder, isolate and collect all kernel instances.
Save to DataFrame.
Args:
gpgpu_path:
Root path where the GPGPU benchmarks are located.
cldrive_path:
Path where the CLDrive database is located.
out_path:
Path where the CSV is going to be exported.
"""
if isinstance(gpgpu_path, str):
gpgpu_path = pathlib.Path(gpgpu_path)
if isinstance(cldrive_path, str):
cldrive_path = pathlib.Path(cldrive_path)
if isinstance(out_path, str):
out_path = pathlib.Path(out_path)
kernels = benchmarks.yield_cl_kernels(gpgpu_path)
gpgpu_benchmarks = []
for k in kernels:
try:
_ = opencl.Compile(k[1])
b = benchmarks.benchmark_worker(k, "GreweFeatures")
gpgpu_benchmarks.append(b)
except ValueError:
pass
l.logger().info("Fetched {} GPGPU benchmarks. {} compiled successfully.".format(len(kernels), len(gpgpu_benchmarks)))
datapoints = []
cldrive_db = cldrive.CLDriveExecutions(url = "sqlite:///{}".format(pathlib.Path(cldrive_path).resolve()), must_exist = False)
for k in tqdm.tqdm(gpgpu_benchmarks, total = len(gpgpu_benchmarks), desc = "Benchmark"):
name = '-'.join(str(k.path).split("gpgpu/")[-1].split('/'))
for row in DriveSource(k.contents, "", "GPGPU_benchmarks", k.features, cldrive_db, name = name, no_cache = True):
if row:
datapoints.append(row)
frame = pd.DataFrame(datapoints, columns = DataFrameSchema())
frame.to_csv(out_path)
return
def main(*args, **kwargs):
fetch_gpgpu_cummins_benchmarks(sys.argv[1], sys.argv[2], sys.argv[3])
return
if __name__ == "__main__":
app.run(main)
exit(0)
| 23,483 | 36.335453 | 209 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/experiments/grewe/preamble.py | # preamble
import warnings
import typing
import pathlib
import re
from collections import Counter
# code for the paper:
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from numpy.random import RandomState
from sklearn.neighbors import KNeighborsClassifier, NearestNeighbors
from deeplearning.benchpress.experiments.grewe import model
from deeplearning.benchpress.util import distributions
from deeplearning.benchpress.util import plotter
def Finalize(
output: typing.Optional[typing.Union[str, pathlib.Path]] = None,
figsize=None,
tight=True,
**savefig_opts,
):
"""Finalise a plot.
Display or show the plot, then close it.
Args:
output: Path to save figure to. If not given, plot is shown.
figsize: Figure size in inches.
**savefig_opts: Any additional arguments to pass to
plt.savefig(). Only required if output is not None.
"""
# Set figure size.
if figsize is not None:
plt.gcf().set_size_inches(*figsize)
# Set plot layout.
if tight:
plt.tight_layout()
if output is None:
plt.show()
else:
output = pathlib.Path(output)
output.parent.mkdir(parents=True, exist_ok=True)
plt.savefig(str(output), **savefig_opts)
plt.close()
def mean(array):
"""
Return the mean value of a list of divisible numbers.
"""
n = len(array)
if n < 1:
return 0
elif n == 1:
return array[0]
return sum(array) / n
# plotting config:
sns.set(style="ticks", color_codes=True)
plt.style.use(["seaborn-white", "seaborn-paper"])
# tables config:
pd.set_option('display.max_rows', 15)
def line_word_char_count(path):
"""count words, lines, chars in file"""
num_lines = 0
num_words = 0
num_chars = 0
with open(path) as infile:
for line in infile:
words = line.split()
num_lines += 1
num_words += len(words)
num_chars += len(line)
return num_lines, num_words, num_chars
def rand_jitter(arr, factor=0.01, randomstate=RandomState(204)):
"""apply jitter to array"""
stdev = factor * (max(arr) - min(arr))
return arr + randomstate.randn(len(arr)) * stdev
def scatter_with_jitter(plt, x, y, **kwargs):
"""scatter x,y values with jitter"""
jitter_opts = kwargs.get("jitter_opts", {})
if "jitter_opts" in kwargs:
kwargs.pop("jitter_opts")
return plt.scatter(rand_jitter(x, **jitter_opts),
rand_jitter(y, **jitter_opts), **kwargs)
def shortlabels(groups):
"""shorten benchmark suite names"""
return [escape_suite_name(re.sub("-.+$", "", x)) for x in groups]
def shortbenchmark(benchmark):
"""short benchmark name"""
return benchmark.split('-')[-1]
def escape_benchmark_name(g):
"""escape benchmark name for display"""
c = g.split('-')
return escape_suite_name(g) + "." + c[-2]
def plot_pca(X, B_out, Bother=None, pca=None):
"""plot PCA projection of feature space"""
def jitter_opts(randomstate):
return {"factor": .075, "randomstate": RandomState(randomstate)}
# size and opacity
plot_opts = {"s": 85, "alpha": .65}
# apply jitter and repack
x, y = zip(*X)
x = rand_jitter(x, **jitter_opts(204))
y = rand_jitter(y, **jitter_opts(205))
X = list(zip(x, y))
# group by correct or not
correct = [x for x, b in zip(X, B_out.to_dict('records')) if b["p_correct"]]
incorrect = [x for x, b in zip(X, B_out.to_dict('records')) if
not b["p_correct"]]
if Bother is not None:
additional = pca.transform(get_raw_features(Bother))
scatter_with_jitter(plt, *zip(*additional), color="g", marker="o",
label="Additional", jitter_opts=jitter_opts(206),
**plot_opts)
plt.scatter(*zip(*incorrect),
color="r", marker="v", label='Incorrect', **plot_opts)
plt.scatter(*zip(*correct),
color="b", marker="^", label='Correct', **plot_opts)
# no tick labels
ax = plt.gca()
ax.set_xticklabels([])
ax.set_yticklabels([])
# axis labels
plt.xlabel(r"Principle Component 1 $\rightarrow$", ha="right")
plt.ylabel(r"Principle Component 2 $\rightarrow$", ha="right")
# position axis labels at end of axis
ax.xaxis.set_label_coords(1, -.025)
ax.yaxis.set_label_coords(-.025, 1)
# show legend
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles[::-1], labels[::-1])
ax.get_legend().draw_frame(True)
return ax
def get_our_model():
"""return extended model"""
return KNeighborsClassifier(1)
def get_our_features(D):
"""return extended featureset"""
return np.array([
D["comp"].values,
D["rational"].values,
D["mem"].values,
D["localmem"].values,
D["coalesced"].values,
D["transfer"].values,
D["wgsize"].values,
(D["transfer"].values / (D["comp"].values + D["mem"].values)),
(D["coalesced"].values / D["mem"].values),
((D["localmem"].values / D["mem"].values) * D["wgsize"].values),
(D["comp"].values / D["mem"].values),
]).T
def get_raw_features(D):
"""return raw feature values"""
return np.array([
D["comp"].values,
D["rational"].values,
D["mem"].values,
D["localmem"].values,
D["coalesced"].values,
D["atomic"].values,
D["transfer"].values,
D["wgsize"].values,
]).T
def get_cgo13_features(D):
"""return features used in CGO'13"""
return np.array([
(D["transfer"].values / (D["comp"].values + D["mem"].values)),
(D["coalesced"].values / D["mem"].values),
((D["localmem"].values / D["mem"].values) * D["wgsize"].values),
(D["comp"].values / D["mem"].values),
]).T
def readfile(path):
"""read file to string"""
with open(path) as infile:
return ''.join(infile.readlines())
def escape_suite_name(g):
"""format benchmark suite name for display"""
c = g.split('-')
if (c[0] == "amd" or c[0] == "npb" or c[0] == "nvidia" or c[0] == "shoc"):
return c[0].upper()
else:
return c[0].capitalize()
def get_nearest_neighbour_distance(F1, F2):
"""return nearest-neighbour distances from F1 to F2"""
nbrs = NearestNeighbors(n_neighbors=1, algorithm='brute').fit(F2)
distances, indices = nbrs.kneighbors(F1)
return distances
def plot_speedups_with_clgen(benchmarks_data, clgen_data, synth_bench_name = "CLgen", suite=""):
"""
Plot speedups of predictive models trained with and without clgen.
Returns speedups (without and with).
"""
# datasets: B - benchmarks, S - synthetics, BS - benchmarks + synthetics:
B = pd.read_csv(benchmarks_data)
B["group"] = ["B"] * len(B)
S = pd.read_csv(clgen_data)
S["group"] = ["S"] * len(S)
BS = pd.concat((B, S))
# find the ZeroR. This is the device which is most frequently optimal
Bmask = B[B["benchmark"].str.contains(suite)]
zeror = Counter(Bmask["oracle"]).most_common(1)[0][0]
zeror_runtime = "runtime_" + zeror.lower()
# get the names of the benchmarks, in the form: $suite-$version-$benchmark
benchmark_names = sorted(set([
re.match(r"^([^0-9]+-[0-9\.]+-[^-]+)-", b).group(1)
for b in B["benchmark"] if b.startswith(suite)
]))
B_out, S_out, BS_out = [], [], []
for benchmark in benchmark_names:
clf = model.model()
features = get_cgo13_features
# cross validate on baseline
B_out += model.leave_one_benchmark_out(clf, features, B, benchmark)
# reset model
clf = model.model()
S_out += model.leave_one_benchmark_out(clf, features, BS, benchmark, synthetics = True, is_clgen = True)
clf = model.model()
# repeate cross-validation with synthetic kernels
BS_out += model.leave_one_benchmark_out(clf, features, BS, benchmark, synthetics = False, is_clgen = True)
# create results frame
R_out = []
for b, s, bs in zip(B_out, S_out, BS_out):
# get runtimes of device using predicted device
b_p_runtime = b["runtime_" + b["p"].lower()]
s_p_runtime = s["runtime_" + s["p"].lower()]
bs_p_runtime = bs["runtime_" + bs["p"].lower()]
best_possible_p = b["runtime_" + b["oracle"].lower()]
# speedup is the ratio of runtime using the predicted device
# over runtime using ZeroR device
b["p_speedup"] = b[zeror_runtime] / b_p_runtime
s["p_speedup"] = s[zeror_runtime] / s_p_runtime
bs["p_speedup"] = bs[zeror_runtime] / bs_p_runtime
b["best_p_speedup"] = b[zeror_runtime] / best_possible_p
b["opt_runtime"] = b_p_runtime
s["opt_runtime"] = s_p_runtime
bs["opt_runtime"] = bs_p_runtime
# print(b_p_runtime, s_p_runtime, bs_p_runtime, b[zeror_runtime], s[zeror_runtime], bs[zeror_runtime])
if "training" in benchmarks_data:
# $benchmark
group = escape_benchmark_name(b["benchmark"])
else:
# $benchmark.$dataset
group = re.sub(r"[^-]+-[0-9\.]+-([^-]+)-.+", r"\1",
b["benchmark"]) + "." + str(b["dataset"])
b["group"] = group
s["group"] = group
bs["group"] = group
# set the training data type
b["training"] = "Grewe et al."
s["training"] = "Only {}".format(synth_bench_name)
bs["training"] = "w. {}".format(synth_bench_name)
R_out.append(b)
R_out.append(s)
R_out.append(bs)
R = pd.DataFrame(R_out)
b_mask = R["training"] == "Grewe et al."
s_mask = R["training"] == "Only {}".format(synth_bench_name)
bs_mask = R["training"] == "w. {}".format(synth_bench_name)
b_gpu = (len(R[b_mask][R[b_mask]["oracle"] == "GPU"]), len(R[b_mask][R[b_mask]["oracle"] == "GPU"][R[b_mask]["p"] == "GPU"]), len(R[b_mask][R[b_mask]["oracle"] == "GPU"][R[b_mask]["p"] == "CPU"]))
b_cpu = (len(R[b_mask][R[b_mask]["oracle"] == "CPU"]), len(R[b_mask][R[b_mask]["oracle"] == "CPU"][R[b_mask]["p"] == "GPU"]), len(R[b_mask][R[b_mask]["oracle"] == "CPU"][R[b_mask]["p"] == "CPU"]))
s_gpu = (len(R[s_mask][R[s_mask]["oracle"] == "GPU"]), len(R[s_mask][R[s_mask]["oracle"] == "GPU"][R[s_mask]["p"] == "GPU"]), len(R[s_mask][R[s_mask]["oracle"] == "GPU"][R[s_mask]["p"] == "CPU"]))
s_cpu = (len(R[s_mask][R[s_mask]["oracle"] == "CPU"]), len(R[s_mask][R[s_mask]["oracle"] == "CPU"][R[s_mask]["p"] == "GPU"]), len(R[s_mask][R[s_mask]["oracle"] == "CPU"][R[s_mask]["p"] == "CPU"]))
bs_gpu = (len(R[bs_mask][R[bs_mask]["oracle"] == "GPU"]), len(R[bs_mask][R[bs_mask]["oracle"] == "GPU"][R[bs_mask]["p"] == "GPU"]), len(R[bs_mask][R[bs_mask]["oracle"] == "GPU"][R[bs_mask]["p"] == "CPU"]))
bs_cpu = (len(R[bs_mask][R[bs_mask]["oracle"] == "CPU"]), len(R[bs_mask][R[bs_mask]["oracle"] == "CPU"][R[bs_mask]["p"] == "GPU"]), len(R[bs_mask][R[bs_mask]["oracle"] == "CPU"][R[bs_mask]["p"] == "CPU"]))
base_p = b_gpu[0]
base_tp = b_gpu[1] # Predict GPU is GPU
base_fn = b_gpu[2] # Predict CPU is GPU
base_n = b_cpu[0]
base_tn = b_cpu[2] # Predicts CPU is CPU
base_fp = b_cpu[1] # Predicts GPU is CPU
enhanced_p = bs_gpu[0]
enhanced_tp = bs_gpu[1]
enhanced_fn = bs_gpu[2]
enhanced_n = bs_cpu[0]
enhanced_tn = bs_cpu[2]
enhanced_fp = bs_cpu[1]
base_precision = base_tp / (base_tp + base_fp)
base_recall = base_tp / (base_p)
base_tnr = base_tn / (base_tn + base_fp)
enhanced_precision = enhanced_tp / (enhanced_tp + enhanced_fp)
enhanced_recall = enhanced_tp / (enhanced_p)
enhanced_tnr = enhanced_tn / (enhanced_tn + enhanced_fp)
# print("{} GPU Oracle Grewe: {} GPU / {} CPU".format(b_gpu[0], b_gpu[1], b_gpu[2]))
# print("{} GPU Oracle Only {}: {} GPU / {} CPU".format(s_gpu[0], synth_bench_name, s_gpu[1], s_gpu[2]))
# print("{} GPU Oracle Grewe + {}: {} GPU / {} CPU".format(bs_gpu[0], synth_bench_name, bs_gpu[1], bs_gpu[2]))
# print()
# print("{} CPU Oracle Grewe: {} GPU / {} CPU".format(b_cpu[0], b_cpu[1], b_cpu[2]))
# print("{} CPU Oracle Only {}: {} GPU / {} CPU".format(s_cpu[0], synth_bench_name, s_cpu[1], s_cpu[2]))
# print("{} CPU Oracle Grewe + {}: {} GPU / {} CPU".format(bs_cpu[0], synth_bench_name, bs_cpu[1], bs_cpu[2]))
B_speedup = mean(R[b_mask].groupby(["group"])["p_speedup"].mean())
S_speedup = mean(R[s_mask].groupby(["group"])["p_speedup"].mean())
BS_speedup = mean(R[bs_mask].groupby(["group"])["p_speedup"].mean())
B_runtimes = R[b_mask]["opt_runtime"].mean()
S_runtimes = R[s_mask]["opt_runtime"].mean()
BS_runtimes = R[bs_mask]["opt_runtime"].mean()
groups = {
"Benchmarks": {},
"Bench+Synth": {},
"Synthetics": {},
}
bench_times = 0.0
benchsynth_times = 0.0
synth_times = 0.0
for x in R[b_mask]["p_speedup"]:
x = int(x)
if x not in groups["Benchmarks"]:
groups["Benchmarks"][x] = 1
else:
groups["Benchmarks"][x] += 1
for x in R[bs_mask]["p_speedup"]:
x = int(x)
if x not in groups["Bench+Synth"]:
groups["Bench+Synth"][x] = 1
else:
groups["Bench+Synth"][x] += 1
for x in R[s_mask]["p_speedup"]:
x = int(x)
if x not in groups["Synthetics"]:
groups["Synthetics"][x] = 1
else:
groups["Synthetics"][x] += 1
for k, v in groups.items():
groups[k] = (list(v.keys()), list(v.values()))
plotter.GrouppedBars(
groups = groups, # Dict[Dict[int, int]]
plot_name = "speedup_distribution",
path = pathlib.Path("."),
title = "Speedup distribution frequency",
x_name = "Speedup absolute value",
)
b_distr = distributions.GenericDistribution([int(x) for x in R[b_mask]["p_speedup"]], "plots", "benchmarks")
s_distr = distributions.GenericDistribution([int(x) for x in R[s_mask]["p_speedup"]], "plots", "synthetics")
bs_distr = distributions.GenericDistribution([int(x) for x in R[bs_mask]["p_speedup"]], "plots", "synthetics_benchmarks")
b_distr.plot()
s_distr.plot()
bs_distr.plot()
(s_distr - b_distr).plot()
(bs_distr - b_distr).plot()
base = float(model.geomean([x for x in R[b_mask]["p_speedup"]]))
enhanced = float(model.geomean([x for x in R[bs_mask]["p_speedup"]]))
print(" #. benchmarks: ",
len(set(B["benchmark"])), "kernels,", len(B), "observations")
print(" #. synthetic: ",
len(set(S["benchmark"])), "kernels,", len(S), "observations")
print()
print(" ZeroR device: {}".format(zeror))
print()
print(" Speedup of Grewe et al.: {:.2f} x".format(B_speedup))
print(" Speedup w. {}: {:.2f} x".format(synth_bench_name, BS_speedup))
print(" Speedup Only {}: {:.2f} x".format(synth_bench_name, S_speedup))
print(" Geo Speedup of Grewe et al.: {:.2f} x".format(base))
print(" Geo Speedup w. {}: {:.2f} x".format(synth_bench_name, enhanced))
print(" Geo Speedup Only {}: {:.2f} x".format(synth_bench_name, model.geomean([x for x in R[s_mask]["p_speedup"]])))
print(" Best speedup {}: {:.2f} x".format("Best", model.geomean([x for x in R[b_mask]["best_p_speedup"]])))
bft = [x.p_speedup for idx, x in R[b_mask].iterrows() if x.group == "FT.B"]
sft = [x.p_speedup for idx, x in R[s_mask].iterrows() if x.group == "FT.B"]
bsft = [x.p_speedup for idx, x in R[bs_mask].iterrows() if x.group == "FT.B"]
R = R.append({ # average bars
"group": "Average",
"p_speedup": B_speedup,
"training": "Grewe et al."
}, ignore_index=True)
R = R.append({
"group": "Average",
"p_speedup": BS_speedup,
"training": "w. {}".format(synth_bench_name)
}, ignore_index=True)
R["p_speedup"] -= 1 # negative offset so that bars start at 1
# colors
palette = sns.cubehelix_palette(len(set(R["training"])),
rot=-.4, light=.85, dark=.35)
ax = sns.barplot(
x="group", y="p_speedup", data=R, ci=None, hue="training",
palette=palette)
plt.ylabel("Speedup")
plt.xlabel("")
plt.axhline(y=0, color="k", lw=1) # speedup line
plt.axvline(x=plt.xlim()[1] - 1, color="k", lw=1,
linestyle="--") # average line
ax.get_legend().set_title("") # no legend title
plt.legend(loc='upper right')
ax.get_legend().draw_frame(True)
# plot shape and size
figsize = (3*9, 3*2.2)
if "nvidia" in benchmarks_data:
typecast = int;
plt.ylim(-1, 16)
elif "training" in benchmarks_data:
typecast = float;
figsize = (3*7, 3*3.2)
else:
typecast = float
# counter negative offset:
ax.set_yticklabels([typecast(i) + 1 for i in ax.get_yticks()])
plt.setp(ax.get_xticklabels(), rotation=90)
Finalize(output = "plot.png", figsize=figsize, tight=True)
## Return predictive model's speedup when A) trained on gpgpu and B) gpgpu+synthetics
return R, base, enhanced, base_precision, base_recall, base_tnr, enhanced_precision, enhanced_recall, enhanced_tnr
def _compare_clfs(clf1, get_features1, clf2, get_features2, D1, D2, benchmark):
"""cross-validate across all benchmarks using CGO13 model and our own, with
and without synthetic benchmarks. Report per-platform speedup of our model
over CGO13"""
test1_mask = D1["benchmark"].str.contains(r"^" + benchmark)
test2_mask = D2["benchmark"].str.contains(r"^" + benchmark)
assert (len(D1[test1_mask]) == len(D2[test2_mask]))
# create data masks. For training we exclude all results from benchmark
train1_mask = ~test1_mask
train2_mask = ~test2_mask
# create training and testing data
X1_train = get_features1(D1.loc[train1_mask])
X2_train = get_features2(D2.loc[train2_mask])
y1_train = model.getlabels(D1[train1_mask])
y2_train = model.getlabels(D2[train2_mask])
D1_test = D1[test1_mask]
D2_test = D2[test2_mask]
X1_test = get_features1(D1.loc[test1_mask])
X2_test = get_features2(D2.loc[test2_mask])
y1_test = model.getlabels(D1_test)
y2_test = model.getlabels(D2_test)
clf1.fit(X1_train, y1_train) # train classifiers
clf2.fit(X2_train, y2_train)
predicted1 = clf1.predict(X1_test) # make predictions
predicted2 = clf2.predict(X2_test)
D_out = []
for d, y, p1, p2 in zip(D1_test.to_dict('records'), y1_test,
predicted1, predicted2):
d["p1"], d["p2"] = p1, p2
D_out.append(d)
return D_out # return a list of dicts
def plot_speedups_extended_model_2platform(platform_a, platform_b):
"""
Plot speedup of extended model over Grewe et al for 2 platforms
"""
aB = pd.read_csv(platform_a[0])
aB["synthetic"] = np.zeros(len(aB))
bB = pd.read_csv(platform_b[0])
bB["synthetic"] = np.zeros(len(bB))
B = pd.concat((aB, bB))
aS = pd.read_csv(platform_a[1])
aS["synthetic"] = np.ones(len(aS))
bS = pd.read_csv(platform_b[1])
bS["synthetic"] = np.ones(len(bS))
S = pd.concat((aS, bS))
aBS = pd.concat((aB, aS))
bBS = pd.concat((bB, bS))
BS = pd.concat((B, S))
assert (len(B) == len(aB) + len(bB)) # sanity checks
assert (len(S) == len(aS) + len(bS))
assert (len(BS) == len(aBS) + len(bBS))
# get benchmark names: <suite>-<benchmark>
benchmark_names = sorted(set([
re.match(r"^([^0-9]+-[0-9\.]+-[^-]+)", b).group(1)
for b in B["benchmark"]
]))
# perform cross-validation
B_out = []
for i, benchmark in enumerate(benchmark_names):
cgo13_clf, our_clf = model.model(), get_our_model()
cgo13_features, our_features = get_cgo13_features, get_our_features
# cross validate on Grewe et al. and our model
tmp = _compare_clfs(cgo13_clf, cgo13_features, our_clf, our_features,
aBS, aBS, benchmark)
for d in tmp: d["platform"] = "AMD Tahiti 7970"
B_out += tmp
# reset models
cgo13_clf, our_clf = model.model(), get_our_model()
# same as before, on other platform:
tmp = _compare_clfs(cgo13_clf, cgo13_features, our_clf, our_features,
bBS, bBS, benchmark)
for d in tmp: d["platform"] = "NVIDIA GTX 970"
B_out += tmp
# create results frame
R_out = []
# get runtimes of device using predicted device
for b in B_out:
p1_runtime = b["runtime_" + b["p1"].lower()]
p2_runtime = b["runtime_" + b["p2"].lower()]
# speedup is the ratio of runtime using our predicted device
# over runtime using CGO13 predicted device.
b["p_speedup"] = p2_runtime / p1_runtime
# get the benchmark name
b["group"] = escape_benchmark_name(b["benchmark"])
R_out.append(b)
R = pd.DataFrame(R_out)
improved = R[R["p_speedup"] > 1]
Amask = R["platform"] == "AMD Tahiti 7970"
Bmask = R["platform"] == "NVIDIA GTX 970"
a = R[Amask]
b = R[Bmask]
a_speedups = a.groupby(["group"])["p_speedup"].mean()
b_speedups = b.groupby(["group"])["p_speedup"].mean()
a_speedup = mean(a_speedups)
b_speedup = mean(b_speedups)
assert (len(R) == len(a) + len(b)) # sanity-check
print(" #. benchmarks: ",
len(set(B["benchmark"])), "kernels,", len(B), "observations")
print(" #. synthetic: ",
len(set(S["benchmark"])), "kernels,", len(S), "observations")
print()
print(" Speedup on AMD: {:.2f} x".format(a_speedup))
print(" Speedup on NVIDIA: {:.2f} x".format(b_speedup))
palette = sns.cubehelix_palette(
len(set(R["platform"])), start=4, rot=.8, light=.8, dark=.3)
R = R.append({ # average bars
"group": "Average",
"p_speedup": a_speedup,
"platform": "AMD Tahiti 7970"
}, ignore_index=True)
R = R.append({
"group": "Average",
"p_speedup": b_speedup,
"platform": "NVIDIA GTX 970"
}, ignore_index=True)
R["p_speedup"] -= 1 # negative offset so that bars start at 1
ax = sns.barplot(x="group", y="p_speedup", hue="platform", data=R,
palette=palette, ci=None)
plt.ylabel("Speedup over Grewe et al.");
plt.xlabel("")
plt.axhline(y=0, color="k", lw=1)
plt.axvline(x=plt.xlim()[1] - 1, color="k", lw=1, linestyle="--")
plt.ylim(-1, 9)
plt.setp(ax.get_xticklabels(), rotation=90) # rotate x ticks
ax.get_legend().set_title("") # legend
plt.legend(loc='upper right')
# counter negative offset
ax.set_yticklabels([int(i) + 1 for i in ax.get_yticks()])
ax.get_legend().draw_frame(True)
Finalize(figsize=(9, 4), tight=True)
def plot_speedups_extended_model(benchmarks_data, clgen_data):
"""
Plots speedups of extended model over Grewe et al
Returns: speedup
"""
B = pd.read_csv(benchmarks_data)
B["synthetic"] = np.zeros(len(B))
S = pd.read_csv(clgen_data)
S["synthetic"] = np.ones(len(S))
BS = pd.concat((B, S))
assert (len(BS) == len(B) + len(S))
# get benchmark names: <suite>-<benchmark>
benchmark_names = sorted(set([
re.match(r"^([^0-9]+-[0-9\.]+-[^-]+)", b).group(1)
for b in B["benchmark"]
]))
# perform cross-validation
B_out = []
for i, benchmark in enumerate(benchmark_names):
cgo13_clf, our_clf = model.model(), get_our_model()
cgo13_features, our_features = get_cgo13_features, get_our_features
# cross validate on Grewe et al. and our model
tmp = _compare_clfs(cgo13_clf, cgo13_features, our_clf, our_features,
BS, BS, benchmark)
B_out += tmp
# create results frame
R_out = []
# get runtimes of device using predicted device
for b in B_out:
p1_runtime = b["runtime_" + b["p1"].lower()]
p2_runtime = b["runtime_" + b["p2"].lower()]
# speedup is the ratio of runtime using our predicted device
# over runtime using CGO13 predicted device.
b["p_speedup"] = p2_runtime / p1_runtime
# get the benchmark name
b["group"] = escape_benchmark_name(b["benchmark"])
R_out.append(b)
R = pd.DataFrame(R_out)
improved = R[R["p_speedup"] > 1]
speedups = R.groupby(["group"])["p_speedup"].mean()
speedup = mean(speedups)
print(" #. benchmarks: ",
len(set(B["benchmark"])), "kernels,", len(B), "observations")
print(" #. synthetic: ",
len(set(S["benchmark"])), "kernels,", len(S), "observations")
print()
print(" Speedup: {:.2f} x".format(speedup))
palette = sns.cubehelix_palette(1, start=4, rot=.8, light=.8, dark=.3)
R = R.append({ # average bar
"group": "Average",
"p_speedup": speedup
}, ignore_index=True)
R["p_speedup"] -= 1 # negative offset so that bars start at 1
ax = sns.barplot(x="group", y="p_speedup", data=R,
palette=palette, ci=None)
plt.ylabel("Speedup over Grewe et al.");
plt.xlabel("")
plt.axhline(y=0, color="k", lw=1)
plt.axvline(x=plt.xlim()[1] - 1, color="k", lw=1, linestyle="--")
plt.ylim(-1, 9)
plt.setp(ax.get_xticklabels(), rotation=90) # rotate x ticks
# counter negative offset
ax.set_yticklabels([int(i) + 1 for i in ax.get_yticks()])
Finalize(figsize=(7, 3.7), tight=True)
return speedup
# Saved values from cldrive cache.
bp_al = [0.06, 0.01, 0.04, 0.05, 0.06, 0.07, 0.06, 0.06, 0.07, 0.06]
bp_pl = [-0.08, -0.06, -0.03, -0.1, 0.01, 0.0, 0.01, -0.01, 0.0, 0.0]
| 24,555 | 31.353096 | 207 | py |
DeepGlow | DeepGlow-main/example.py | from DeepGlow import Emulator
import numpy as np
model = Emulator(simtype='ism')
observing_times=np.array([1e5,1e6,1e7])
observing_frequencies = np.array([1e9,1e12,1e15])
GRB_params = np.array([0,-1,0,0,0.1,0.1,2.2,-2,-2,0])
flux_values = model.flux(params=GRB_params,t_obs=observing_times,nu_obs=observing_frequencies)
print(flux_values)
# [5.75068180e-01, 8.58790301e-01, 5.39014321e-05]
| 393 | 29.307692 | 94 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.