repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
seq2seq | seq2seq-master/bin/tools/generate_vocab.py | #! /usr/bin/env python
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#pylint: disable=invalid-name
"""
Generate vocabulary for a tokenized text file.
"""
import sys
import argparse
import collections
import logging
parser = argparse.ArgumentParser(
description="Generate vocabulary for a tokenized text file.")
parser.add_argument(
"--min_frequency",
dest="min_frequency",
type=int,
default=0,
help="Minimum frequency of a word to be included in the vocabulary.")
parser.add_argument(
"--max_vocab_size",
dest="max_vocab_size",
type=int,
help="Maximum number of tokens in the vocabulary")
parser.add_argument(
"--downcase",
dest="downcase",
type=bool,
help="If set to true, downcase all text before processing.",
default=False)
parser.add_argument(
"infile",
nargs="?",
type=argparse.FileType("r"),
default=sys.stdin,
help="Input tokenized text file to be processed.")
parser.add_argument(
"--delimiter",
dest="delimiter",
type=str,
default=" ",
help="Delimiter character for tokenizing. Use \" \" and \"\" for word and char level respectively."
)
args = parser.parse_args()
# Counter for all tokens in the vocabulary
cnt = collections.Counter()
for line in args.infile:
if args.downcase:
line = line.lower()
if args.delimiter == "":
tokens = list(line.strip())
else:
tokens = line.strip().split(args.delimiter)
tokens = [_ for _ in tokens if len(_) > 0]
cnt.update(tokens)
logging.info("Found %d unique tokens in the vocabulary.", len(cnt))
# Filter tokens below the frequency threshold
if args.min_frequency > 0:
filtered_tokens = [(w, c) for w, c in cnt.most_common()
if c > args.min_frequency]
cnt = collections.Counter(dict(filtered_tokens))
logging.info("Found %d unique tokens with frequency > %d.",
len(cnt), args.min_frequency)
# Sort tokens by 1. frequency 2. lexically to break ties
word_with_counts = cnt.most_common()
word_with_counts = sorted(
word_with_counts, key=lambda x: (x[1], x[0]), reverse=True)
# Take only max-vocab
if args.max_vocab_size is not None:
word_with_counts = word_with_counts[:args.max_vocab_size]
for word, count in word_with_counts:
print("{}\t{}".format(word, count))
| 2,816 | 28.652632 | 103 | py |
seq2seq | seq2seq-master/bin/tools/profile.py | #! /usr/bin/env python
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Script to generates model profiling information
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import six
#pylint: disable=E0611
from google.protobuf import text_format
import tensorflow as tf
from tensorflow.contrib.tfprof import model_analyzer
from tensorflow.contrib.tfprof.python.tools.tfprof import tfprof_logger
from tensorflow import gfile
from tensorflow.tools.tfprof import tfprof_log_pb2
from tensorflow.python.framework import op_def_registry # pylint: disable=E0611
from tensorflow.python.framework.ops import RegisterShape # pylint: disable=E0611
from tensorflow.python.framework import common_shapes # pylint: disable=E0611
# Import custom ops
from seq2seq.decoders.attention import att_sum_bahdanau, att_sum_dot
tf.flags.DEFINE_string("model_dir", None, "path to model directory")
FLAGS = tf.flags.FLAGS
CUSTOM_OP_FUNCTIONS = [att_sum_bahdanau, att_sum_dot]
def _register_function_ops(func_list):
"""Registers custom ops in the default graph. This is needed
Because our checkpoint is saved with ops that are not part of Tensorflow."""
op_dict = op_def_registry.get_registered_ops()
for func in func_list:
#pylint: disable=W0212
func._create_definition_if_needed()
op_def = func._definition.signature
op_dict[op_def.name] = op_def
RegisterShape(op_def.name)(common_shapes.unknown_shape)
def load_metadata(model_dir):
"""Loads RunMetadata, Graph and OpLog from files
"""
# Import RunMetadata
run_meta_path = os.path.join(model_dir, "metadata/run_meta")
run_meta = tf.RunMetadata()
if gfile.Exists(run_meta_path):
with gfile.GFile(run_meta_path, "rb") as file:
run_meta.MergeFromString(file.read())
print("Loaded RunMetadata from {}".format(run_meta_path))
else:
print("RunMetadata does not exist a {}. Skipping.".format(run_meta_path))
# Import Graph
graph_def_path = os.path.join(model_dir, "graph.pbtxt")
graph = tf.Graph()
if gfile.Exists(graph_def_path):
with graph.as_default():
_register_function_ops(CUSTOM_OP_FUNCTIONS)
graph_def = tf.GraphDef()
with gfile.GFile(graph_def_path, "rb") as file:
text_format.Parse(file.read(), graph_def)
tf.import_graph_def(graph_def, name="")
print("Loaded Graph from {}".format(graph_def_path))
else:
print("Graph does not exist a {}. Skipping.".format(graph_def_path))
# Import OpLog
op_log_path = os.path.join(model_dir, "metadata/tfprof_log")
op_log = tfprof_log_pb2.OpLog()
if gfile.Exists(op_log_path):
with gfile.GFile(op_log_path, "rb") as file:
op_log.MergeFromString(file.read())
print("Loaded OpLog from {}".format(op_log_path))
else:
print("OpLog does not exist a {}. Skipping.".format(op_log_path))
return run_meta, graph, op_log
def merge_default_with_oplog(graph, op_log=None, run_meta=None):
"""Monkeypatch. There currently is a bug in tfprof_logger that
prevents it from being used with Python 3. So we override the method
manually until the fix comes in.
"""
tmp_op_log = tfprof_log_pb2.OpLog()
# pylint: disable=W0212
logged_ops = tfprof_logger._get_logged_ops(graph, run_meta)
if not op_log:
tmp_op_log.log_entries.extend(logged_ops.values())
else:
all_ops = dict()
for entry in op_log.log_entries:
all_ops[entry.name] = entry
for op_name, entry in six.iteritems(logged_ops):
if op_name in all_ops:
all_ops[op_name].types.extend(entry.types)
if entry.float_ops > 0 and all_ops[op_name].float_ops == 0:
all_ops[op_name].float_ops = entry.float_ops
else:
all_ops[op_name] = entry
tmp_op_log.log_entries.extend(all_ops.values())
return tmp_op_log
def param_analysis_options(output_dir):
"""Options for model parameter analysis
"""
options = model_analyzer.TRAINABLE_VARS_PARAMS_STAT_OPTIONS.copy()
options["select"] = ["params", "bytes"]
options["order_by"] = "params"
options["account_type_regexes"] = ["Variable"]
if output_dir:
options["dump_to_file"] = os.path.join(output_dir, "params.txt")
return "scope", options
def micro_anaylsis_options(output_dir):
"""Options for microsecond analysis
"""
options = model_analyzer.TRAINABLE_VARS_PARAMS_STAT_OPTIONS.copy()
options["select"] = ["micros", "device"]
options["min_micros"] = 1000
options["account_type_regexes"] = [".*"]
options["order_by"] = "micros"
if output_dir:
options["dump_to_file"] = os.path.join(output_dir, "micro.txt")
return "graph", options
def flops_analysis_options(output_dir):
"""Options for FLOPS analysis
"""
options = model_analyzer.TRAINABLE_VARS_PARAMS_STAT_OPTIONS.copy()
options["select"] = ["float_ops", "micros", "device"]
options["min_float_ops"] = 1
options["order_by"] = "float_ops"
options["account_type_regexes"] = [".*"]
if output_dir:
options["dump_to_file"] = os.path.join(output_dir, "flops.txt")
return "scope", options
def device_analysis_options(output_dir):
"""Options for device placement analysis
"""
options = model_analyzer.TRAINABLE_VARS_PARAMS_STAT_OPTIONS.copy()
options["select"] = ["device", "float_ops", "micros"]
options["order_by"] = "name"
options["account_type_regexes"] = [".*"]
if output_dir:
options["dump_to_file"] = os.path.join(output_dir, "device.txt")
return "scope", options
def main(_argv):
"""Main functions. Runs all anaylses."""
# pylint: disable=W0212
tfprof_logger._merge_default_with_oplog = merge_default_with_oplog
FLAGS.model_dir = os.path.abspath(os.path.expanduser(FLAGS.model_dir))
output_dir = os.path.join(FLAGS.model_dir, "profile")
gfile.MakeDirs(output_dir)
run_meta, graph, op_log = load_metadata(FLAGS.model_dir)
param_arguments = [
param_analysis_options(output_dir),
micro_anaylsis_options(output_dir),
flops_analysis_options(output_dir),
device_analysis_options(output_dir),
]
for tfprof_cmd, params in param_arguments:
model_analyzer.print_model_analysis(
graph=graph,
run_meta=run_meta,
op_log=op_log,
tfprof_cmd=tfprof_cmd,
tfprof_options=params)
if params["dump_to_file"] != "":
print("Wrote {}".format(params["dump_to_file"]))
if __name__ == '__main__':
tf.app.run()
| 6,966 | 32.820388 | 81 | py |
seq2seq | seq2seq-master/bin/tools/generate_toy_data.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Functions to generate various toy datasets.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import os
import numpy as np
import io
PARSER = argparse.ArgumentParser(description="Generates toy datasets.")
PARSER.add_argument(
"--vocab_size", type=int, default=100, help="size of the vocabulary")
PARSER.add_argument(
"--num_examples", type=int, default=10000, help="number of examples")
PARSER.add_argument(
"--min_len", type=int, default=5, help="minimum sequence length")
PARSER.add_argument(
"--max_len", type=int, default=40, help="maximum sequence length")
PARSER.add_argument(
"--type",
type=str,
default="copy",
choices=["copy", "reverse"],
help="Type of dataet to generate. One of \"copy\" or \"reverse\"")
PARSER.add_argument(
"--output_dir",
type=str,
help="path to the output directory",
required=True)
ARGS = PARSER.parse_args()
VOCABULARY = list([str(x) for x in range(ARGS.vocab_size - 1)])
VOCABULARY += ["笑"]
def make_copy(num_examples, min_len, max_len):
"""
Generates a dataset where the target is equal to the source.
Sequence lengths are chosen randomly from [min_len, max_len].
Args:
num_examples: Number of examples to generate
min_len: Minimum sequence length
max_len: Maximum sequence length
Returns:
An iterator of (source, target) string tuples.
"""
for _ in range(num_examples):
turn_length = np.random.choice(np.arange(min_len, max_len + 1))
source_tokens = np.random.choice(
list(VOCABULARY), size=turn_length, replace=True)
target_tokens = source_tokens
yield " ".join(source_tokens), " ".join(target_tokens)
def make_reverse(num_examples, min_len, max_len):
"""
Generates a dataset where the target is equal to the source reversed.
Sequence lengths are chosen randomly from [min_len, max_len].
Args:
num_examples: Number of examples to generate
min_len: Minimum sequence length
max_len: Maximum sequence length
Returns:
An iterator of (source, target) string tuples.
"""
for _ in range(num_examples):
turn_length = np.random.choice(np.arange(min_len, max_len + 1))
source_tokens = np.random.choice(
list(VOCABULARY), size=turn_length, replace=True)
target_tokens = source_tokens[::-1]
yield " ".join(source_tokens), " ".join(target_tokens)
def write_parallel_text(sources, targets, output_prefix):
"""
Writes two files where each line corresponds to one example
- [output_prefix].sources.txt
- [output_prefix].targets.txt
Args:
sources: Iterator of source strings
targets: Iterator of target strings
output_prefix: Prefix for the output file
"""
source_filename = os.path.abspath(os.path.join(output_prefix, "sources.txt"))
target_filename = os.path.abspath(os.path.join(output_prefix, "targets.txt"))
with io.open(source_filename, "w", encoding='utf8') as source_file:
for record in sources:
source_file.write(record + "\n")
print("Wrote {}".format(source_filename))
with io.open(target_filename, "w", encoding='utf8') as target_file:
for record in targets:
target_file.write(record + "\n")
print("Wrote {}".format(target_filename))
def main():
"""Main function"""
if ARGS.type == "copy":
generate_fn = make_copy
elif ARGS.type == "reverse":
generate_fn = make_reverse
# Generate dataset
examples = list(generate_fn(ARGS.num_examples, ARGS.min_len, ARGS.max_len))
try:
os.makedirs(ARGS.output_dir)
except OSError:
if not os.path.isdir(ARGS.output_dir):
raise
# Write train data
train_sources, train_targets = zip(*examples)
write_parallel_text(train_sources, train_targets, ARGS.output_dir)
if __name__ == "__main__":
main()
| 4,493 | 29.364865 | 79 | py |
seq2seq | seq2seq-master/bin/data/cnn_daily_mail_summarization/process_story.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Processes a CNN/Daily Mail story file into a format that can
be used for summarization.
"""
import fileinput
import re
def process_story(text):
"""Processed a story text into an (article, summary) tuple.
"""
# Split by highlights
elements = text.split("@highlight")
elements = [_.strip() for _ in elements]
story_text = elements[0]
highlights = elements[1:]
# Join all highlights into a single blob
highlights_joined = "; ".join(highlights)
highlights_joined = re.sub(r"\s+", " ", highlights_joined)
highlights_joined = highlights_joined.strip()
# Remove newlines from story
# story_text = story_text.replace("\n", " ")
story_text = re.sub(r"\s+", " ", story_text)
story_text = story_text.strip()
return story_text, highlights_joined
def main(*args, **kwargs):
"""Program entry point"""
story_text = "\n".join(list(fileinput.input()))
story, highlights = process_story(story_text)
if story and highlights:
print("{}\t{}".format(story, highlights))
if __name__ == '__main__':
main()
| 1,667 | 27.271186 | 74 | py |
seq2seq | seq2seq-master/seq2seq/losses.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Operations related to calculating sequence losses.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
def cross_entropy_sequence_loss(logits, targets, sequence_length):
"""Calculates the per-example cross-entropy loss for a sequence of logits and
masks out all losses passed the sequence length.
Args:
logits: Logits of shape `[T, B, vocab_size]`
targets: Target classes of shape `[T, B]`
sequence_length: An int32 tensor of shape `[B]` corresponding
to the length of each input
Returns:
A tensor of shape [T, B] that contains the loss per example, per time step.
"""
with tf.name_scope("cross_entropy_sequence_loss"):
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=targets)
# Mask out the losses we don't care about
loss_mask = tf.sequence_mask(
tf.to_int32(sequence_length), tf.to_int32(tf.shape(targets)[0]))
losses = losses * tf.transpose(tf.to_float(loss_mask), [1, 0])
return losses
| 1,666 | 34.468085 | 79 | py |
seq2seq | seq2seq-master/seq2seq/graph_module.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
All graph components that create Variables should inherit from this
base class defined in this file.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
class GraphModule(object):
"""
Convenience class that makes it easy to share variables.
Each insance of this class creates its own set of variables, but
each subsequent execution of an instance will re-use its variables.
Graph components that define variables should inherit from this class
and implement their logic in the `_build` method.
"""
def __init__(self, name):
"""
Initialize the module. Each subclass must call this constructor with a name.
Args:
name: Name of this module. Used for `tf.make_template`.
"""
self.name = name
self._template = tf.make_template(name, self._build, create_scope_now_=True)
# Docstrings for the class should be the docstring for the _build method
self.__doc__ = self._build.__doc__
# pylint: disable=E1101
self.__call__.__func__.__doc__ = self._build.__doc__
def _build(self, *args, **kwargs):
"""Subclasses should implement their logic here.
"""
raise NotImplementedError
def __call__(self, *args, **kwargs):
# pylint: disable=missing-docstring
return self._template(*args, **kwargs)
def variable_scope(self):
"""Returns the proper variable scope for this module.
"""
return tf.variable_scope(self._template.variable_scope)
| 2,091 | 32.206349 | 80 | py |
seq2seq | seq2seq-master/seq2seq/global_vars.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Collection of global variables.
"""
SYNC_REPLICAS_OPTIMIZER = None
| 648 | 33.157895 | 74 | py |
seq2seq | seq2seq-master/seq2seq/graph_utils.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Miscellaneous utility function.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
def templatemethod(name_):
"""This decorator wraps a method with `tf.make_template`. For example,
@templatemethod
def my_method():
# Create variables
"""
def template_decorator(func):
"""Inner decorator function"""
def func_wrapper(*args, **kwargs):
"""Inner wrapper function"""
templated_func = tf.make_template(name_, func)
return templated_func(*args, **kwargs)
return func_wrapper
return template_decorator
def add_dict_to_collection(dict_, collection_name):
"""Adds a dictionary to a graph collection.
Args:
dict_: A dictionary of string keys to tensor values
collection_name: The name of the collection to add the dictionary to
"""
key_collection = collection_name + "_keys"
value_collection = collection_name + "_values"
for key, value in dict_.items():
tf.add_to_collection(key_collection, key)
tf.add_to_collection(value_collection, value)
def get_dict_from_collection(collection_name):
"""Gets a dictionary from a graph collection.
Args:
collection_name: A collection name to read a dictionary from
Returns:
A dictionary with string keys and tensor values
"""
key_collection = collection_name + "_keys"
value_collection = collection_name + "_values"
keys = tf.get_collection(key_collection)
values = tf.get_collection(value_collection)
return dict(zip(keys, values))
| 2,141 | 28.342466 | 74 | py |
seq2seq | seq2seq-master/seq2seq/configurable.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Abstract base class for objects that are configurable using
a parameters dictionary.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import copy
from pydoc import locate
import six
import yaml
import tensorflow as tf
class abstractstaticmethod(staticmethod): #pylint: disable=C0111,C0103
"""Decorates a method as abstract and static"""
__slots__ = ()
def __init__(self, function):
super(abstractstaticmethod, self).__init__(function)
function.__isabstractmethod__ = True
__isabstractmethod__ = True
def _create_from_dict(dict_, default_module, *args, **kwargs):
"""Creates a configurable class from a dictionary. The dictionary must have
"class" and "params" properties. The class can be either fully qualified, or
it is looked up in the modules passed via `default_module`.
"""
class_ = locate(dict_["class"]) or getattr(default_module, dict_["class"])
params = {}
if "params" in dict_:
params = dict_["params"]
instance = class_(params, *args, **kwargs)
return instance
def _maybe_load_yaml(item):
"""Parses `item` only if it is a string. If `item` is a dictionary
it is returned as-is.
"""
if isinstance(item, six.string_types):
return yaml.load(item)
elif isinstance(item, dict):
return item
else:
raise ValueError("Got {}, expected YAML string or dict", type(item))
def _deep_merge_dict(dict_x, dict_y, path=None):
"""Recursively merges dict_y into dict_x.
"""
if path is None: path = []
for key in dict_y:
if key in dict_x:
if isinstance(dict_x[key], dict) and isinstance(dict_y[key], dict):
_deep_merge_dict(dict_x[key], dict_y[key], path + [str(key)])
elif dict_x[key] == dict_y[key]:
pass # same leaf value
else:
dict_x[key] = dict_y[key]
else:
dict_x[key] = dict_y[key]
return dict_x
def _parse_params(params, default_params):
"""Parses parameter values to the types defined by the default parameters.
Default parameters are used for missing values.
"""
# Cast parameters to correct types
if params is None:
params = {}
result = copy.deepcopy(default_params)
for key, value in params.items():
# If param is unknown, drop it to stay compatible with past versions
if key not in default_params:
raise ValueError("%s is not a valid model parameter" % key)
# Param is a dictionary
if isinstance(value, dict):
default_dict = default_params[key]
if not isinstance(default_dict, dict):
raise ValueError("%s should not be a dictionary", key)
if default_dict:
value = _parse_params(value, default_dict)
else:
# If the default is an empty dict we do not typecheck it
# and assume it's done downstream
pass
if value is None:
continue
if default_params[key] is None:
result[key] = value
else:
result[key] = type(default_params[key])(value)
return result
@six.add_metaclass(abc.ABCMeta)
class Configurable(object):
"""Interface for all classes that are configurable
via a parameters dictionary.
Args:
params: A dictionary of parameters.
mode: A value in tf.contrib.learn.ModeKeys
"""
def __init__(self, params, mode):
self._params = _parse_params(params, self.default_params())
self._mode = mode
self._print_params()
def _print_params(self):
"""Logs parameter values"""
classname = self.__class__.__name__
tf.logging.info("Creating %s in mode=%s", classname, self._mode)
tf.logging.info("\n%s", yaml.dump({classname: self._params}))
@property
def mode(self):
"""Returns a value in tf.contrib.learn.ModeKeys.
"""
return self._mode
@property
def params(self):
"""Returns a dictionary of parsed parameters.
"""
return self._params
@abstractstaticmethod
def default_params():
"""Returns a dictionary of default parameters. The default parameters
are used to define the expected type of passed parameters. Missing
parameter values are replaced with the defaults returned by this method.
"""
raise NotImplementedError
| 4,758 | 29.120253 | 78 | py |
seq2seq | seq2seq-master/seq2seq/__init__.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
seq2seq library base module
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from seq2seq.graph_module import GraphModule
from seq2seq import contrib
from seq2seq import data
from seq2seq import decoders
from seq2seq import encoders
from seq2seq import global_vars
from seq2seq import graph_utils
from seq2seq import inference
from seq2seq import losses
from seq2seq import metrics
from seq2seq import models
from seq2seq import test
from seq2seq import training
| 1,110 | 29.861111 | 74 | py |
seq2seq | seq2seq-master/seq2seq/training/hooks.py | # -*- coding: utf-8 -*-
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Collection of tf.train.SessionRunHooks
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import abc
import os
import numpy as np
import six
import yaml
import tensorflow as tf
from tensorflow.python.training.basic_session_run_hooks import SecondOrStepTimer # pylint: disable=E0611
from tensorflow.python.training import session_manager # pylint: disable=E0611
from tensorflow.python.client import timeline # pylint: disable=E0611
from tensorflow import gfile
from seq2seq.configurable import Configurable, abstractstaticmethod
from seq2seq import graph_utils, global_vars
FLAGS = tf.flags.FLAGS
@six.add_metaclass(abc.ABCMeta)
class TrainingHook(tf.train.SessionRunHook, Configurable):
"""Abstract base class for training hooks.
"""
def __init__(self, params, model_dir, run_config):
tf.train.SessionRunHook.__init__(self)
Configurable.__init__(self, params, tf.contrib.learn.ModeKeys.TRAIN)
self._model_dir = model_dir
self._run_config = run_config
@property
def model_dir(self):
"""Returns the directory model checkpoints are written to.
"""
return os.path.abspath(self._model_dir)
@property
def is_chief(self):
"""Returns true if and only if the current process is the chief.
This is used for distributed training.
"""
return self._run_config.is_chief
@abstractstaticmethod
def default_params():
raise NotImplementedError()
class MetadataCaptureHook(TrainingHook):
"""A hook to capture metadata for a single step.
Useful for performance debugging. It performs a full trace and saves
run_metadata and Chrome timeline information to a file.
Args:
step: The step number to trace. The hook is only enable for this step.
"""
def __init__(self, params, model_dir, run_config):
super(MetadataCaptureHook, self).__init__(params, model_dir, run_config)
self._active = False
self._done = False
self._global_step = None
self._output_dir = os.path.abspath(self.model_dir)
@staticmethod
def default_params():
return {"step": 10}
def begin(self):
self._global_step = tf.train.get_global_step()
def before_run(self, _run_context):
if not self.is_chief or self._done:
return
if not self._active:
return tf.train.SessionRunArgs(self._global_step)
else:
tf.logging.info("Performing full trace on next step.")
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) #pylint: disable=E1101
return tf.train.SessionRunArgs(self._global_step, options=run_options)
def after_run(self, _run_context, run_values):
if not self.is_chief or self._done:
return
step_done = run_values.results
if self._active:
tf.logging.info("Captured full trace at step %s", step_done)
# Create output directory
gfile.MakeDirs(self._output_dir)
# Save run metadata
trace_path = os.path.join(self._output_dir, "run_meta")
with gfile.GFile(trace_path, "wb") as trace_file:
trace_file.write(run_values.run_metadata.SerializeToString())
tf.logging.info("Saved run_metadata to %s", trace_path)
# Save timeline
timeline_path = os.path.join(self._output_dir, "timeline.json")
with gfile.GFile(timeline_path, "w") as timeline_file:
tl_info = timeline.Timeline(run_values.run_metadata.step_stats)
tl_chrome = tl_info.generate_chrome_trace_format(show_memory=True)
timeline_file.write(tl_chrome)
tf.logging.info("Saved timeline to %s", timeline_path)
# Save tfprof op log
tf.contrib.tfprof.tfprof_logger.write_op_log(
graph=tf.get_default_graph(),
log_dir=self._output_dir,
run_meta=run_values.run_metadata)
tf.logging.info("Saved op log to %s", self._output_dir)
self._active = False
self._done = True
self._active = (step_done >= self.params["step"])
class TrainSampleHook(TrainingHook):
"""Occasionally samples predictions from the training run and prints them.
Params:
every_n_secs: Sample predictions every N seconds.
If set, `every_n_steps` must be None.
every_n_steps: Sample predictions every N steps.
If set, `every_n_secs` must be None.
sample_dir: Optional, a directory to write samples to.
delimiter: Join tokens on this delimiter. Defaults to space.
"""
#pylint: disable=missing-docstring
def __init__(self, params, model_dir, run_config):
super(TrainSampleHook, self).__init__(params, model_dir, run_config)
self._sample_dir = os.path.join(self.model_dir, "samples")
self._timer = SecondOrStepTimer(
every_secs=self.params["every_n_secs"],
every_steps=self.params["every_n_steps"])
self._pred_dict = {}
self._should_trigger = False
self._iter_count = 0
self._global_step = None
self._source_delimiter = self.params["source_delimiter"]
self._target_delimiter = self.params["target_delimiter"]
@staticmethod
def default_params():
return {
"every_n_secs": None,
"every_n_steps": 1000,
"source_delimiter": " ",
"target_delimiter": " "
}
def begin(self):
self._iter_count = 0
self._global_step = tf.train.get_global_step()
self._pred_dict = graph_utils.get_dict_from_collection("predictions")
# Create the sample directory
if self._sample_dir is not None:
gfile.MakeDirs(self._sample_dir)
def before_run(self, _run_context):
self._should_trigger = self._timer.should_trigger_for_step(self._iter_count)
if self._should_trigger:
fetches = {
"predicted_tokens": self._pred_dict["predicted_tokens"],
"target_words": self._pred_dict["labels.target_tokens"],
"target_len": self._pred_dict["labels.target_len"]
}
return tf.train.SessionRunArgs([fetches, self._global_step])
return tf.train.SessionRunArgs([{}, self._global_step])
def after_run(self, _run_context, run_values):
result_dict, step = run_values.results
self._iter_count = step
if not self._should_trigger:
return None
# Convert dict of lists to list of dicts
result_dicts = [
dict(zip(result_dict, t)) for t in zip(*result_dict.values())
]
# Print results
result_str = ""
result_str += "Prediction followed by Target @ Step {}\n".format(step)
result_str += ("=" * 100) + "\n"
for result in result_dicts:
target_len = result["target_len"]
predicted_slice = result["predicted_tokens"][:target_len - 1]
target_slice = result["target_words"][1:target_len]
result_str += self._target_delimiter.encode("utf-8").join(
predicted_slice).decode("utf-8") + "\n"
result_str += self._target_delimiter.encode("utf-8").join(
target_slice).decode("utf-8") + "\n\n"
result_str += ("=" * 100) + "\n\n"
tf.logging.info(result_str)
if self._sample_dir:
filepath = os.path.join(self._sample_dir,
"samples_{:06d}.txt".format(step))
with gfile.GFile(filepath, "w") as file:
file.write(result_str)
self._timer.update_last_triggered_step(self._iter_count - 1)
class PrintModelAnalysisHook(TrainingHook):
"""Writes the parameters of the model to a file and stdout.
"""
#pylint: disable=missing-docstring
def __init__(self, params, model_dir, run_config):
super(PrintModelAnalysisHook, self).__init__(params, model_dir, run_config)
self._filename = os.path.join(self.model_dir, "model_analysis.txt")
@staticmethod
def default_params():
return {}
def begin(self):
# Dump to file on the chief worker
if self.is_chief:
opts = tf.contrib.tfprof.model_analyzer.TRAINABLE_VARS_PARAMS_STAT_OPTIONS
opts['dump_to_file'] = os.path.abspath(self._filename)
tf.contrib.tfprof.model_analyzer.print_model_analysis(
tf.get_default_graph(), tfprof_options=opts)
# Print the model analysis
with gfile.GFile(self._filename) as file:
tf.logging.info(file.read())
class VariableRestoreHook(TrainingHook):
"""A hooks that restored variables from a given checkpoints.
Params:
prefix: Variables matching this prefix are restored.
checkpoint_path: Path to the checkpoint to restore variables from.
"""
def __init__(self, params, model_dir, run_config):
super(VariableRestoreHook, self).__init__(params, model_dir, run_config)
self._saver = None
@staticmethod
def default_params():
return {"prefix": "", "checkpoint_path": ""}
def begin(self):
variables = tf.contrib.framework.get_variables(scope=self.params["prefix"])
def varname_in_checkpoint(name):
"""Removes the prefix from the variable name.
"""
prefix_parts = self.params["prefix"].split("/")
checkpoint_prefix = "/".join(prefix_parts[:-1])
return name.replace(checkpoint_prefix + "/", "")
target_names = [varname_in_checkpoint(_.op.name) for _ in variables]
restore_map = {k: v for k, v in zip(target_names, variables)}
tf.logging.info("Restoring variables: \n%s",
yaml.dump({k: v.op.name
for k, v in restore_map.items()}))
self._saver = tf.train.Saver(restore_map)
def after_create_session(self, session, coord):
self._saver.restore(session, self.params["checkpoint_path"])
tf.logging.info("Successfully restored all variables")
class DelayStartHook(TrainingHook, tf.train.GlobalStepWaiterHook):
"""Delays the start of the current worker process until global step
K * task_id is reached. K is a parameter.
"""
def __init__(self, params, model_dir, run_config):
TrainingHook.__init__(self, params, model_dir, run_config)
self._task_id = self._run_config.task_id
self._delay_k = self.params["delay_k"]
self._wait_until_step = int(self._delay_k * self._task_id)
tf.train.GlobalStepWaiterHook.__init__(self, self._wait_until_step)
@staticmethod
def default_params():
return {"delay_k": 500}
class SyncReplicasOptimizerHook(TrainingHook):
"""A SessionRunHook handles ops related to SyncReplicasOptimizer."""
def __init__(self, params, model_dir, run_config):
super(SyncReplicasOptimizerHook, self).__init__(
params, model_dir, run_config)
self._sync_optimizer = None
self._num_tokens = -1
self._local_init_op = None
self._ready_for_local_init_op = None
self._q_runner = None
self._init_tokens_op = None
@staticmethod
def default_params():
return {}
def begin(self):
if global_vars.SYNC_REPLICAS_OPTIMIZER is not None:
self._sync_optimizer = global_vars.SYNC_REPLICAS_OPTIMIZER
else:
return
if self._sync_optimizer._gradients_applied is False: # pylint: disable=protected-access
raise ValueError(
"SyncReplicasOptimizer.apply_gradient should be called before using "
"the hook.")
if self.is_chief:
self._local_init_op = self._sync_optimizer.chief_init_op
self._ready_for_local_init_op = (
self._sync_optimizer.ready_for_local_init_op)
self._q_runner = self._sync_optimizer.get_chief_queue_runner()
self._init_tokens_op = self._sync_optimizer.get_init_tokens_op(
self._num_tokens)
else:
self._local_init_op = self._sync_optimizer.local_step_init_op
self._ready_for_local_init_op = (
self._sync_optimizer.ready_for_local_init_op)
self._q_runner = None
self._init_tokens_op = None
def after_create_session(self, session, coord):
"""Runs SyncReplicasOptimizer initialization ops."""
if not self._sync_optimizer:
return
tf.logging.info("Found SyncReplicasOptimizer. Initializing.")
local_init_success, msg = session_manager._ready( # pylint: disable=protected-access
self._ready_for_local_init_op, session,
"Model is not ready for SyncReplicasOptimizer local init.")
if not local_init_success:
raise RuntimeError(
"Init operations did not make model ready for SyncReplicasOptimizer "
"local_init. Init op: %s, error: %s" %
(self._local_init_op.name, msg))
session.run(self._local_init_op)
if self._init_tokens_op is not None:
session.run(self._init_tokens_op)
if self._q_runner is not None:
self._q_runner.create_threads(
session, coord=coord, daemon=True, start=True)
| 13,075 | 33.776596 | 105 | py |
seq2seq | seq2seq-master/seq2seq/training/utils.py | # -*- coding: utf-8 -*-
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Miscellaneous training utility functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import inspect
import os
from collections import defaultdict
from pydoc import locate
import json
import tensorflow as tf
from tensorflow import gfile
from seq2seq.contrib import rnn_cell
class TrainOptions(object):
"""A collection of options that are passed to the training script
and can be saved to perform inference later.
Args:
task: Name of the training task class.
task_params: A dictionary of parameters passed to the training task.
"""
def __init__(self, model_class, model_params):
self._model_class = model_class
self._model_params = model_params
@property
def model_class(self):
"""Returns the training task parameters"""
return self._model_class
@property
def model_params(self):
"""Returns the training task class"""
return self._model_params
@staticmethod
def path(model_dir):
"""Returns the path to the options file.
Args:
model_dir: The model directory
"""
return os.path.join(model_dir, "train_options.json")
def dump(self, model_dir):
"""Dumps the options to a file in the model directory.
Args:
model_dir: Path to the model directory. The options will be
dumped into a file in this directory.
"""
gfile.MakeDirs(model_dir)
options_dict = {
"model_class": self.model_class,
"model_params": self.model_params,
}
with gfile.GFile(TrainOptions.path(model_dir), "wb") as file:
file.write(json.dumps(options_dict).encode("utf-8"))
@staticmethod
def load(model_dir):
""" Loads options from the given model directory.
Args:
model_dir: Path to the model directory.
"""
with gfile.GFile(TrainOptions.path(model_dir), "rb") as file:
options_dict = json.loads(file.read().decode("utf-8"))
options_dict = defaultdict(None, options_dict)
return TrainOptions(
model_class=options_dict["model_class"],
model_params=options_dict["model_params"])
def cell_from_spec(cell_classname, cell_params):
"""Create a RNN Cell instance from a JSON string.
Args:
cell_classname: Name of the cell class, e.g. "BasicLSTMCell".
cell_params: A dictionary of parameters to pass to the cell constructor.
Returns:
A RNNCell instance.
"""
cell_params = cell_params.copy()
# Find the cell class
cell_class = locate(cell_classname) or getattr(rnn_cell, cell_classname)
# Make sure additional arguments are valid
cell_args = set(inspect.getargspec(cell_class.__init__).args[1:])
for key in cell_params.keys():
if key not in cell_args:
raise ValueError(
"""{} is not a valid argument for {} class. Available arguments
are: {}""".format(key, cell_class.__name__, cell_args))
# Create cell
return cell_class(**cell_params)
def get_rnn_cell(cell_class,
cell_params,
num_layers=1,
dropout_input_keep_prob=1.0,
dropout_output_keep_prob=1.0,
residual_connections=False,
residual_combiner="add",
residual_dense=False):
"""Creates a new RNN Cell
Args:
cell_class: Name of the cell class, e.g. "BasicLSTMCell".
cell_params: A dictionary of parameters to pass to the cell constructor.
num_layers: Number of layers. The cell will be wrapped with
`tf.contrib.rnn.MultiRNNCell`
dropout_input_keep_prob: Dropout keep probability applied
to the input of cell *at each layer*
dropout_output_keep_prob: Dropout keep probability applied
to the output of cell *at each layer*
residual_connections: If true, add residual connections
between all cells
Returns:
An instance of `tf.contrib.rnn.RNNCell`.
"""
cells = []
for _ in range(num_layers):
cell = cell_from_spec(cell_class, cell_params)
if dropout_input_keep_prob < 1.0 or dropout_output_keep_prob < 1.0:
cell = tf.contrib.rnn.DropoutWrapper(
cell=cell,
input_keep_prob=dropout_input_keep_prob,
output_keep_prob=dropout_output_keep_prob)
cells.append(cell)
if len(cells) > 1:
final_cell = rnn_cell.ExtendedMultiRNNCell(
cells=cells,
residual_connections=residual_connections,
residual_combiner=residual_combiner,
residual_dense=residual_dense)
else:
final_cell = cells[0]
return final_cell
def create_learning_rate_decay_fn(decay_type,
decay_steps,
decay_rate,
start_decay_at=0,
stop_decay_at=1e9,
min_learning_rate=None,
staircase=False):
"""Creates a function that decays the learning rate.
Args:
decay_steps: How often to apply decay.
decay_rate: A Python number. The decay rate.
start_decay_at: Don't decay before this step
stop_decay_at: Don't decay after this step
min_learning_rate: Don't decay below this number
decay_type: A decay function name defined in `tf.train`
staircase: Whether to apply decay in a discrete staircase,
as opposed to continuous, fashion.
Returns:
A function that takes (learning_rate, global_step) as inputs
and returns the learning rate for the given step.
Returns `None` if decay_type is empty or None.
"""
if decay_type is None or decay_type == "":
return None
start_decay_at = tf.to_int32(start_decay_at)
stop_decay_at = tf.to_int32(stop_decay_at)
def decay_fn(learning_rate, global_step):
"""The computed learning rate decay function.
"""
global_step = tf.to_int32(global_step)
decay_type_fn = getattr(tf.train, decay_type)
decayed_learning_rate = decay_type_fn(
learning_rate=learning_rate,
global_step=tf.minimum(global_step, stop_decay_at) - start_decay_at,
decay_steps=decay_steps,
decay_rate=decay_rate,
staircase=staircase,
name="decayed_learning_rate")
final_lr = tf.train.piecewise_constant(
x=global_step,
boundaries=[start_decay_at],
values=[learning_rate, decayed_learning_rate])
if min_learning_rate:
final_lr = tf.maximum(final_lr, min_learning_rate)
return final_lr
return decay_fn
def create_input_fn(pipeline,
batch_size,
bucket_boundaries=None,
allow_smaller_final_batch=False,
scope=None):
"""Creates an input function that can be used with tf.learn estimators.
Note that you must pass "factory funcitons" for both the data provider and
featurizer to ensure that everything will be created in the same graph.
Args:
pipeline: An instance of `seq2seq.data.InputPipeline`.
batch_size: Create batches of this size. A queue to hold a
reasonable number of batches in memory is created.
bucket_boundaries: int list, increasing non-negative numbers.
If None, no bucket is performed.
Returns:
An input function that returns `(feature_batch, labels_batch)`
tuples when called.
"""
def input_fn():
"""Creates features and labels.
"""
with tf.variable_scope(scope or "input_fn"):
data_provider = pipeline.make_data_provider()
features_and_labels = pipeline.read_from_data_provider(data_provider)
if bucket_boundaries:
_, batch = tf.contrib.training.bucket_by_sequence_length(
input_length=features_and_labels["source_len"],
bucket_boundaries=bucket_boundaries,
tensors=features_and_labels,
batch_size=batch_size,
keep_input=features_and_labels["source_len"] >= 1,
dynamic_pad=True,
capacity=5000 + 16 * batch_size,
allow_smaller_final_batch=allow_smaller_final_batch,
name="bucket_queue")
else:
batch = tf.train.batch(
tensors=features_and_labels,
enqueue_many=False,
batch_size=batch_size,
dynamic_pad=True,
capacity=5000 + 16 * batch_size,
allow_smaller_final_batch=allow_smaller_final_batch,
name="batch_queue")
# Separate features and labels
features_batch = {k: batch[k] for k in pipeline.feature_keys}
if set(batch.keys()).intersection(pipeline.label_keys):
labels_batch = {k: batch[k] for k in pipeline.label_keys}
else:
labels_batch = None
return features_batch, labels_batch
return input_fn
| 9,354 | 30.819728 | 78 | py |
seq2seq | seq2seq-master/seq2seq/training/__init__.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Operatations and wrappers to help with model training.
"""
from seq2seq.training import hooks
from seq2seq.training import utils
| 709 | 36.368421 | 74 | py |
seq2seq | seq2seq-master/seq2seq/models/model_base.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for models"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import collections
import tensorflow as tf
from seq2seq.configurable import Configurable
from seq2seq.training import utils as training_utils
from seq2seq import global_vars
def _flatten_dict(dict_, parent_key="", sep="."):
"""Flattens a nested dictionary. Namedtuples within
the dictionary are converted to dicts.
Args:
dict_: The dictionary to flatten.
parent_key: A prefix to prepend to each key.
sep: Separator between parent and child keys, a string. For example
{ "a": { "b": 3 } } will become { "a.b": 3 } if the separator is ".".
Returns:
A new flattened dictionary.
"""
items = []
for key, value in dict_.items():
new_key = parent_key + sep + key if parent_key else key
if isinstance(value, collections.MutableMapping):
items.extend(_flatten_dict(value, new_key, sep=sep).items())
elif isinstance(value, tuple) and hasattr(value, "_asdict"):
dict_items = collections.OrderedDict(zip(value._fields, value))
items.extend(_flatten_dict(dict_items, new_key, sep=sep).items())
else:
items.append((new_key, value))
return dict(items)
class ModelBase(Configurable):
"""Abstract base class for models.
Args:
params: A dictionary of hyperparameter values
name: A name for this model to be used as a variable scope
"""
def __init__(self, params, mode, name):
self.name = name
Configurable.__init__(self, params, mode)
def _clip_gradients(self, grads_and_vars):
"""Clips gradients by global norm."""
gradients, variables = zip(*grads_and_vars)
clipped_gradients, _ = tf.clip_by_global_norm(
gradients, self.params["optimizer.clip_gradients"])
return list(zip(clipped_gradients, variables))
def _create_optimizer(self):
"""Creates the optimizer"""
name = self.params["optimizer.name"]
optimizer = tf.contrib.layers.OPTIMIZER_CLS_NAMES[name](
learning_rate=self.params["optimizer.learning_rate"],
**self.params["optimizer.params"])
# Optionally wrap with SyncReplicasOptimizer
if self.params["optimizer.sync_replicas"] > 0:
optimizer = tf.train.SyncReplicasOptimizer(
opt=optimizer,
replicas_to_aggregate=self.params[
"optimizer.sync_replicas_to_aggregate"],
total_num_replicas=self.params["optimizer.sync_replicas"])
# This is really ugly, but we need to do this to make the optimizer
# accessible outside of the model.
global_vars.SYNC_REPLICAS_OPTIMIZER = optimizer
return optimizer
def _build_train_op(self, loss):
"""Creates the training operation"""
learning_rate_decay_fn = training_utils.create_learning_rate_decay_fn(
decay_type=self.params["optimizer.lr_decay_type"] or None,
decay_steps=self.params["optimizer.lr_decay_steps"],
decay_rate=self.params["optimizer.lr_decay_rate"],
start_decay_at=self.params["optimizer.lr_start_decay_at"],
stop_decay_at=self.params["optimizer.lr_stop_decay_at"],
min_learning_rate=self.params["optimizer.lr_min_learning_rate"],
staircase=self.params["optimizer.lr_staircase"])
optimizer = self._create_optimizer()
train_op = tf.contrib.layers.optimize_loss(
loss=loss,
global_step=tf.contrib.framework.get_global_step(),
learning_rate=self.params["optimizer.learning_rate"],
learning_rate_decay_fn=learning_rate_decay_fn,
clip_gradients=self._clip_gradients,
optimizer=optimizer,
summaries=["learning_rate", "loss", "gradients", "gradient_norm"])
return train_op
@staticmethod
def default_params():
"""Returns a dictionary of default parameters for this model."""
return {
"optimizer.name": "Adam",
"optimizer.learning_rate": 1e-4,
"optimizer.params": {}, # Arbitrary parameters for the optimizer
"optimizer.lr_decay_type": "",
"optimizer.lr_decay_steps": 100,
"optimizer.lr_decay_rate": 0.99,
"optimizer.lr_start_decay_at": 0,
"optimizer.lr_stop_decay_at": tf.int32.max,
"optimizer.lr_min_learning_rate": 1e-12,
"optimizer.lr_staircase": False,
"optimizer.clip_gradients": 5.0,
"optimizer.sync_replicas": 0,
"optimizer.sync_replicas_to_aggregate": 0,
}
def batch_size(self, features, labels):
"""Returns the batch size for a batch of examples"""
raise NotImplementedError()
def __call__(self, features, labels, params):
"""Creates the model graph. See the model_fn documentation in
tf.contrib.learn.Estimator class for a more detailed explanation.
"""
with tf.variable_scope("model"):
with tf.variable_scope(self.name):
return self._build(features, labels, params)
def _build(self, features, labels, params):
"""Subclasses should implement this method. See the `model_fn` documentation
in tf.contrib.learn.Estimator class for a more detailed explanation.
"""
raise NotImplementedError
| 5,752 | 36.601307 | 80 | py |
seq2seq | seq2seq-master/seq2seq/models/seq2seq_model.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for models"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import collections
import tensorflow as tf
from seq2seq import graph_utils
from seq2seq import losses as seq2seq_losses
from seq2seq.contrib.seq2seq.decoder import _transpose_batch_time
from seq2seq.data import vocab
from seq2seq.graph_utils import templatemethod
from seq2seq.decoders.beam_search_decoder import BeamSearchDecoder
from seq2seq.inference import beam_search
from seq2seq.models.model_base import ModelBase, _flatten_dict
class Seq2SeqModel(ModelBase):
"""Base class for seq2seq models with embeddings
"""
def __init__(self, params, mode, name):
super(Seq2SeqModel, self).__init__(params, mode, name)
self.source_vocab_info = None
if "vocab_source" in self.params and self.params["vocab_source"]:
self.source_vocab_info = vocab.get_vocab_info(self.params["vocab_source"])
self.target_vocab_info = None
if "vocab_target" in self.params and self.params["vocab_target"]:
self.target_vocab_info = vocab.get_vocab_info(self.params["vocab_target"])
@staticmethod
def default_params():
params = ModelBase.default_params()
params.update({
"source.max_seq_len": 50,
"source.reverse": True,
"target.max_seq_len": 50,
"embedding.dim": 100,
"embedding.init_scale": 0.04,
"embedding.share": False,
"inference.beam_search.beam_width": 0,
"inference.beam_search.length_penalty_weight": 0.0,
"inference.beam_search.choose_successors_fn": "choose_top_k",
"optimizer.clip_embed_gradients": 0.1,
"vocab_source": "",
"vocab_target": "",
})
return params
def _clip_gradients(self, grads_and_vars):
"""In addition to standard gradient clipping, also clips embedding
gradients to a specified value."""
grads_and_vars = super(Seq2SeqModel, self)._clip_gradients(grads_and_vars)
clipped_gradients = []
variables = []
for gradient, variable in grads_and_vars:
if "embedding" in variable.name:
tmp = tf.clip_by_norm(
gradient.values, self.params["optimizer.clip_embed_gradients"])
gradient = tf.IndexedSlices(tmp, gradient.indices, gradient.dense_shape)
clipped_gradients.append(gradient)
variables.append(variable)
return list(zip(clipped_gradients, variables))
def _create_predictions(self, decoder_output, features, labels, losses=None):
"""Creates the dictionary of predictions that is returned by the model.
"""
predictions = {}
# Add features and, if available, labels to predictions
predictions.update(_flatten_dict({"features": features}))
if labels is not None:
predictions.update(_flatten_dict({"labels": labels}))
if losses is not None:
predictions["losses"] = _transpose_batch_time(losses)
# Decoders returns output in time-major form [T, B, ...]
# Here we transpose everything back to batch-major for the user
output_dict = collections.OrderedDict(
zip(decoder_output._fields, decoder_output))
decoder_output_flat = _flatten_dict(output_dict)
decoder_output_flat = {
k: _transpose_batch_time(v)
for k, v in decoder_output_flat.items()
}
predictions.update(decoder_output_flat)
# If we predict the ids also map them back into the vocab and process them
if "predicted_ids" in predictions.keys():
vocab_tables = graph_utils.get_dict_from_collection("vocab_tables")
target_id_to_vocab = vocab_tables["target_id_to_vocab"]
predicted_tokens = target_id_to_vocab.lookup(
tf.to_int64(predictions["predicted_ids"]))
# Raw predicted tokens
predictions["predicted_tokens"] = predicted_tokens
return predictions
def batch_size(self, features, labels):
"""Returns the batch size of the curren batch based on the passed
features.
"""
return tf.shape(features["source_ids"])[0]
@property
@templatemethod("source_embedding")
def source_embedding(self):
"""Returns the embedding used for the source sequence.
"""
return tf.get_variable(
name="W",
shape=[self.source_vocab_info.total_size, self.params["embedding.dim"]],
initializer=tf.random_uniform_initializer(
-self.params["embedding.init_scale"],
self.params["embedding.init_scale"]))
@property
@templatemethod("target_embedding")
def target_embedding(self):
"""Returns the embedding used for the target sequence.
"""
if self.params["embedding.share"]:
return self.source_embedding
return tf.get_variable(
name="W",
shape=[self.target_vocab_info.total_size, self.params["embedding.dim"]],
initializer=tf.random_uniform_initializer(
-self.params["embedding.init_scale"],
self.params["embedding.init_scale"]))
@templatemethod("encode")
def encode(self, features, labels):
"""Encodes the inputs.
"""
raise NotImplementedError()
@templatemethod("decode")
def decode(self, encoder_output, features, labels):
"""Runs decoding based on the encoder outputs.
"""
raise NotImplementedError()
def _get_beam_search_decoder(self, decoder):
"""Wraps a decoder into a Beam Search decoder.
Args:
decoder: The original decoder
Returns:
A BeamSearchDecoder with the same interfaces as the original decoder.
"""
config = beam_search.BeamSearchConfig(
beam_width=self.params["inference.beam_search.beam_width"],
vocab_size=self.target_vocab_info.total_size,
eos_token=self.target_vocab_info.special_vocab.SEQUENCE_END,
length_penalty_weight=self.params[
"inference.beam_search.length_penalty_weight"],
choose_successors_fn=getattr(
beam_search,
self.params["inference.beam_search.choose_successors_fn"]))
return BeamSearchDecoder(decoder=decoder, config=config)
@property
def use_beam_search(self):
"""Returns true iff the model should perform beam search.
"""
return self.params["inference.beam_search.beam_width"] > 1
def _preprocess(self, features, labels):
"""Model-specific preprocessing for features and labels:
- Creates vocabulary lookup tables for source and target vocab
- Converts tokens into vocabulary ids
"""
# Create vocabulary lookup for source
source_vocab_to_id, source_id_to_vocab, source_word_to_count, _ = \
vocab.create_vocabulary_lookup_table(self.source_vocab_info.path)
# Create vocabulary look for target
target_vocab_to_id, target_id_to_vocab, target_word_to_count, _ = \
vocab.create_vocabulary_lookup_table(self.target_vocab_info.path)
# Add vocab tables to graph colection so that we can access them in
# other places.
graph_utils.add_dict_to_collection({
"source_vocab_to_id": source_vocab_to_id,
"source_id_to_vocab": source_id_to_vocab,
"source_word_to_count": source_word_to_count,
"target_vocab_to_id": target_vocab_to_id,
"target_id_to_vocab": target_id_to_vocab,
"target_word_to_count": target_word_to_count
}, "vocab_tables")
# Slice source to max_len
if self.params["source.max_seq_len"] is not None:
features["source_tokens"] = features["source_tokens"][:, :self.params[
"source.max_seq_len"]]
features["source_len"] = tf.minimum(features["source_len"],
self.params["source.max_seq_len"])
# Look up the source ids in the vocabulary
features["source_ids"] = source_vocab_to_id.lookup(features[
"source_tokens"])
# Maybe reverse the source
if self.params["source.reverse"] is True:
features["source_ids"] = tf.reverse_sequence(
input=features["source_ids"],
seq_lengths=features["source_len"],
seq_dim=1,
batch_dim=0,
name=None)
features["source_len"] = tf.to_int32(features["source_len"])
tf.summary.histogram("source_len", tf.to_float(features["source_len"]))
if labels is None:
return features, None
labels = labels.copy()
# Slices targets to max length
if self.params["target.max_seq_len"] is not None:
labels["target_tokens"] = labels["target_tokens"][:, :self.params[
"target.max_seq_len"]]
labels["target_len"] = tf.minimum(labels["target_len"],
self.params["target.max_seq_len"])
# Look up the target ids in the vocabulary
labels["target_ids"] = target_vocab_to_id.lookup(labels["target_tokens"])
labels["target_len"] = tf.to_int32(labels["target_len"])
tf.summary.histogram("target_len", tf.to_float(labels["target_len"]))
# Keep track of the number of processed tokens
num_tokens = tf.reduce_sum(labels["target_len"])
num_tokens += tf.reduce_sum(features["source_len"])
token_counter_var = tf.Variable(0, "tokens_counter")
total_tokens = tf.assign_add(token_counter_var, num_tokens)
tf.summary.scalar("num_tokens", total_tokens)
with tf.control_dependencies([total_tokens]):
features["source_tokens"] = tf.identity(features["source_tokens"])
# Add to graph collection for later use
graph_utils.add_dict_to_collection(features, "features")
if labels:
graph_utils.add_dict_to_collection(labels, "labels")
return features, labels
def compute_loss(self, decoder_output, _features, labels):
"""Computes the loss for this model.
Returns a tuple `(losses, loss)`, where `losses` are the per-batch
losses and loss is a single scalar tensor to minimize.
"""
#pylint: disable=R0201
# Calculate loss per example-timestep of shape [B, T]
losses = seq2seq_losses.cross_entropy_sequence_loss(
logits=decoder_output.logits[:, :, :],
targets=tf.transpose(labels["target_ids"][:, 1:], [1, 0]),
sequence_length=labels["target_len"] - 1)
# Calculate the average log perplexity
loss = tf.reduce_sum(losses) / tf.to_float(
tf.reduce_sum(labels["target_len"] - 1))
return losses, loss
def _build(self, features, labels, params):
# Pre-process features and labels
features, labels = self._preprocess(features, labels)
encoder_output = self.encode(features, labels)
decoder_output, _, = self.decode(encoder_output, features, labels)
if self.mode == tf.contrib.learn.ModeKeys.INFER:
predictions = self._create_predictions(
decoder_output=decoder_output, features=features, labels=labels)
loss = None
train_op = None
else:
losses, loss = self.compute_loss(decoder_output, features, labels)
train_op = None
if self.mode == tf.contrib.learn.ModeKeys.TRAIN:
train_op = self._build_train_op(loss)
predictions = self._create_predictions(
decoder_output=decoder_output,
features=features,
labels=labels,
losses=losses)
# We add "useful" tensors to the graph collection so that we
# can easly find them in our hooks/monitors.
graph_utils.add_dict_to_collection(predictions, "predictions")
return predictions, loss, train_op
| 11,905 | 35.860681 | 80 | py |
seq2seq | seq2seq-master/seq2seq/models/bridges.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A collection of bridges between encoder and decoder. A bridge defines
how encoder information are passed to the decoder.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import abc
from pydoc import locate
import six
import numpy as np
import tensorflow as tf
from tensorflow.python.util import nest # pylint: disable=E0611
from seq2seq.configurable import Configurable
def _total_tensor_depth(tensor):
"""Returns the size of a tensor without the first (batch) dimension"""
return np.prod(tensor.get_shape().as_list()[1:])
@six.add_metaclass(abc.ABCMeta)
class Bridge(Configurable):
"""An abstract bridge class. A bridge defines how state is passed
between encoder and decoder.
All logic is contained in the `_create` method, which returns an
initial state for the decoder.
Args:
encoder_outputs: A namedtuple that corresponds to the the encoder outputs.
decoder_state_size: An integer or tuple of integers defining the
state size of the decoder.
"""
def __init__(self, encoder_outputs, decoder_state_size, params, mode):
Configurable.__init__(self, params, mode)
self.encoder_outputs = encoder_outputs
self.decoder_state_size = decoder_state_size
self.batch_size = tf.shape(
nest.flatten(self.encoder_outputs.final_state)[0])[0]
def __call__(self):
"""Runs the bridge function.
Returns:
An initial decoder_state tensor or tuple of tensors.
"""
return self._create()
@abc.abstractmethod
def _create(self):
""" Implements the logic for this bridge.
This function should be implemented by child classes.
Returns:
A tuple initial_decoder_state tensor or tuple of tensors.
"""
raise NotImplementedError("Must be implemented by child class")
class ZeroBridge(Bridge):
"""A bridge that does not pass any information between encoder and decoder
and sets the initial decoder state to 0. The input function is not modified.
"""
@staticmethod
def default_params():
return {}
def _create(self):
zero_state = nest.map_structure(
lambda x: tf.zeros([self.batch_size, x], dtype=tf.float32),
self.decoder_state_size)
return zero_state
class PassThroughBridge(Bridge):
"""Passes the encoder state through to the decoder as-is. This bridge
can only be used if encoder and decoder have the exact same state size, i.e.
use the same RNN cell.
"""
@staticmethod
def default_params():
return {}
def _create(self):
nest.assert_same_structure(self.encoder_outputs.final_state,
self.decoder_state_size)
return self.encoder_outputs.final_state
class InitialStateBridge(Bridge):
"""A bridge that creates an initial decoder state based on the output
of the encoder. This state is created by passing the encoder outputs
through an additional layer to match them to the decoder state size.
The input function remains unmodified.
Args:
encoder_outputs: A namedtuple that corresponds to the the encoder outputs.
decoder_state_size: An integer or tuple of integers defining the
state size of the decoder.
bridge_input: Which attribute of the `encoder_outputs` to use for the
initial state calculation. For example, "final_state" means that
`encoder_outputs.final_state` will be used.
activation_fn: An optional activation function for the extra
layer inserted between encoder and decoder. A string for a function
name contained in `tf.nn`, e.g. "tanh".
"""
def __init__(self, encoder_outputs, decoder_state_size, params, mode):
super(InitialStateBridge, self).__init__(encoder_outputs,
decoder_state_size, params, mode)
if not hasattr(encoder_outputs, self.params["bridge_input"]):
raise ValueError("Invalid bridge_input not in encoder outputs.")
self._bridge_input = getattr(encoder_outputs, self.params["bridge_input"])
self._activation_fn = locate(self.params["activation_fn"])
@staticmethod
def default_params():
return {
"bridge_input": "final_state",
"activation_fn": "tensorflow.identity",
}
def _create(self):
# Concat bridge inputs on the depth dimensions
bridge_input = nest.map_structure(
lambda x: tf.reshape(x, [self.batch_size, _total_tensor_depth(x)]),
self._bridge_input)
bridge_input_flat = nest.flatten([bridge_input])
bridge_input_concat = tf.concat(bridge_input_flat, 1)
state_size_splits = nest.flatten(self.decoder_state_size)
total_decoder_state_size = sum(state_size_splits)
# Pass bridge inputs through a fully connected layer layer
initial_state_flat = tf.contrib.layers.fully_connected(
inputs=bridge_input_concat,
num_outputs=total_decoder_state_size,
activation_fn=self._activation_fn)
# Shape back into required state size
initial_state = tf.split(initial_state_flat, state_size_splits, axis=1)
return nest.pack_sequence_as(self.decoder_state_size, initial_state)
| 5,728 | 33.305389 | 78 | py |
seq2seq | seq2seq-master/seq2seq/models/image2seq.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Definition of a basic seq2seq model
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import tensorflow as tf
from seq2seq import graph_utils
from seq2seq.data import vocab
from seq2seq.graph_utils import templatemethod
from seq2seq.models.model_base import ModelBase
from seq2seq.models.attention_seq2seq import AttentionSeq2Seq
class Image2Seq(AttentionSeq2Seq):
"""A model that encodes an image and produces a sequence
of tokens.
"""
def __init__(self, params, mode, name="image_seq2seq"):
super(Image2Seq, self).__init__(params, mode, name)
self.params["source.reverse"] = False
self.params["embedding.share"] = False
@staticmethod
def default_params():
params = ModelBase.default_params()
params.update({
"attention.class": "AttentionLayerBahdanau",
"attention.params": {
"num_units": 128
},
"bridge.class": "seq2seq.models.bridges.ZeroBridge",
"bridge.params": {},
"encoder.class": "seq2seq.encoders.InceptionV3Encoder",
"encoder.params": {}, # Arbitrary parameters for the encoder
"decoder.class": "seq2seq.decoders.AttentionDecoder",
"decoder.params": {}, # Arbitrary parameters for the decoder
"target.max_seq_len": 50,
"embedding.dim": 100,
"inference.beam_search.beam_width": 0,
"inference.beam_search.length_penalty_weight": 0.0,
"inference.beam_search.choose_successors_fn": "choose_top_k",
"vocab_target": "",
})
return params
@templatemethod("encode")
def encode(self, features, _labels):
encoder_fn = self.encoder_class(self.params["encoder.params"], self.mode)
return encoder_fn(features["image"])
def batch_size(self, features, _labels):
return tf.shape(features["image"])[0]
def _preprocess(self, features, labels):
"""Model-specific preprocessing for features and labels:
- Creates vocabulary lookup tables for target vocab
- Converts tokens into vocabulary ids
- Prepends a speical "SEQUENCE_START" token to the target
- Appends a speical "SEQUENCE_END" token to the target
"""
# Create vocabulary look for target
target_vocab_to_id, target_id_to_vocab, target_word_to_count, _ = \
vocab.create_vocabulary_lookup_table(self.target_vocab_info.path)
# Add vocab tables to graph colection so that we can access them in
# other places.
graph_utils.add_dict_to_collection({
"target_vocab_to_id": target_vocab_to_id,
"target_id_to_vocab": target_id_to_vocab,
"target_word_to_count": target_word_to_count
}, "vocab_tables")
if labels is None:
return features, None
labels = labels.copy()
# Slices targets to max length
if self.params["target.max_seq_len"] is not None:
labels["target_tokens"] = labels["target_tokens"][:, :self.params[
"target.max_seq_len"]]
labels["target_len"] = tf.minimum(labels["target_len"],
self.params["target.max_seq_len"])
# Look up the target ids in the vocabulary
labels["target_ids"] = target_vocab_to_id.lookup(labels["target_tokens"])
labels["target_len"] = tf.to_int32(labels["target_len"])
tf.summary.histogram("target_len", tf.to_float(labels["target_len"]))
# Add to graph collection for later use
graph_utils.add_dict_to_collection(features, "features")
if labels:
graph_utils.add_dict_to_collection(labels, "labels")
return features, labels
| 4,190 | 34.516949 | 77 | py |
seq2seq | seq2seq-master/seq2seq/models/attention_seq2seq.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Sequence to Sequence model with attention
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from pydoc import locate
import tensorflow as tf
from seq2seq import decoders
from seq2seq.models.basic_seq2seq import BasicSeq2Seq
class AttentionSeq2Seq(BasicSeq2Seq):
"""Sequence2Sequence model with attention mechanism.
Args:
source_vocab_info: An instance of `VocabInfo`
for the source vocabulary
target_vocab_info: An instance of `VocabInfo`
for the target vocabulary
params: A dictionary of hyperparameters
"""
def __init__(self, params, mode, name="att_seq2seq"):
super(AttentionSeq2Seq, self).__init__(params, mode, name)
@staticmethod
def default_params():
params = BasicSeq2Seq.default_params().copy()
params.update({
"attention.class": "AttentionLayerBahdanau",
"attention.params": {}, # Arbitrary attention layer parameters
"bridge.class": "seq2seq.models.bridges.ZeroBridge",
"encoder.class": "seq2seq.encoders.BidirectionalRNNEncoder",
"encoder.params": {}, # Arbitrary parameters for the encoder
"decoder.class": "seq2seq.decoders.AttentionDecoder",
"decoder.params": {} # Arbitrary parameters for the decoder
})
return params
def _create_decoder(self, encoder_output, features, _labels):
attention_class = locate(self.params["attention.class"]) or \
getattr(decoders.attention, self.params["attention.class"])
attention_layer = attention_class(
params=self.params["attention.params"], mode=self.mode)
# If the input sequence is reversed we also need to reverse
# the attention scores.
reverse_scores_lengths = None
if self.params["source.reverse"]:
reverse_scores_lengths = features["source_len"]
if self.use_beam_search:
reverse_scores_lengths = tf.tile(
input=reverse_scores_lengths,
multiples=[self.params["inference.beam_search.beam_width"]])
return self.decoder_class(
params=self.params["decoder.params"],
mode=self.mode,
vocab_size=self.target_vocab_info.total_size,
attention_values=encoder_output.attention_values,
attention_values_length=encoder_output.attention_values_length,
attention_keys=encoder_output.outputs,
attention_fn=attention_layer,
reverse_scores_lengths=reverse_scores_lengths)
| 3,073 | 35.595238 | 74 | py |
seq2seq | seq2seq-master/seq2seq/models/basic_seq2seq.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Definition of a basic seq2seq model
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from pydoc import locate
import tensorflow as tf
from seq2seq.contrib.seq2seq import helper as tf_decode_helper
from seq2seq.models.seq2seq_model import Seq2SeqModel
from seq2seq.graph_utils import templatemethod
from seq2seq.models import bridges
class BasicSeq2Seq(Seq2SeqModel):
"""Basic Sequence2Sequence model with a unidirectional encoder and decoder.
The last encoder state is used to initialize the decoder and thus both
must share the same type of RNN cell.
Args:
source_vocab_info: An instance of `VocabInfo`
for the source vocabulary
target_vocab_info: An instance of `VocabInfo`
for the target vocabulary
params: A dictionary of hyperparameters
"""
def __init__(self, params, mode, name="basic_seq2seq"):
super(BasicSeq2Seq, self).__init__(params, mode, name)
self.encoder_class = locate(self.params["encoder.class"])
self.decoder_class = locate(self.params["decoder.class"])
@staticmethod
def default_params():
params = Seq2SeqModel.default_params().copy()
params.update({
"bridge.class": "seq2seq.models.bridges.InitialStateBridge",
"bridge.params": {},
"encoder.class": "seq2seq.encoders.UnidirectionalRNNEncoder",
"encoder.params": {}, # Arbitrary parameters for the encoder
"decoder.class": "seq2seq.decoders.BasicDecoder",
"decoder.params": {} # Arbitrary parameters for the decoder
})
return params
def _create_bridge(self, encoder_outputs, decoder_state_size):
"""Creates the bridge to be used between encoder and decoder"""
bridge_class = locate(self.params["bridge.class"]) or \
getattr(bridges, self.params["bridge.class"])
return bridge_class(
encoder_outputs=encoder_outputs,
decoder_state_size=decoder_state_size,
params=self.params["bridge.params"],
mode=self.mode)
def _create_decoder(self, _encoder_output, _features, _labels):
"""Creates a decoder instance based on the passed parameters."""
return self.decoder_class(
params=self.params["decoder.params"],
mode=self.mode,
vocab_size=self.target_vocab_info.total_size)
def _decode_train(self, decoder, bridge, _encoder_output, _features, labels):
"""Runs decoding in training mode"""
target_embedded = tf.nn.embedding_lookup(self.target_embedding,
labels["target_ids"])
helper_train = tf_decode_helper.TrainingHelper(
inputs=target_embedded[:, :-1],
sequence_length=labels["target_len"] - 1)
decoder_initial_state = bridge()
return decoder(decoder_initial_state, helper_train)
def _decode_infer(self, decoder, bridge, _encoder_output, features, labels):
"""Runs decoding in inference mode"""
batch_size = self.batch_size(features, labels)
if self.use_beam_search:
batch_size = self.params["inference.beam_search.beam_width"]
target_start_id = self.target_vocab_info.special_vocab.SEQUENCE_START
helper_infer = tf_decode_helper.GreedyEmbeddingHelper(
embedding=self.target_embedding,
start_tokens=tf.fill([batch_size], target_start_id),
end_token=self.target_vocab_info.special_vocab.SEQUENCE_END)
decoder_initial_state = bridge()
return decoder(decoder_initial_state, helper_infer)
@templatemethod("encode")
def encode(self, features, labels):
source_embedded = tf.nn.embedding_lookup(self.source_embedding,
features["source_ids"])
encoder_fn = self.encoder_class(self.params["encoder.params"], self.mode)
return encoder_fn(source_embedded, features["source_len"])
@templatemethod("decode")
def decode(self, encoder_output, features, labels):
decoder = self._create_decoder(encoder_output, features, labels)
if self.use_beam_search:
decoder = self._get_beam_search_decoder(decoder)
bridge = self._create_bridge(
encoder_outputs=encoder_output,
decoder_state_size=decoder.cell.state_size)
if self.mode == tf.contrib.learn.ModeKeys.INFER:
return self._decode_infer(decoder, bridge, encoder_output, features,
labels)
else:
return self._decode_train(decoder, bridge, encoder_output, features,
labels)
| 5,092 | 39.420635 | 79 | py |
seq2seq | seq2seq-master/seq2seq/models/__init__.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains various Encoder-Decoder models
"""
from seq2seq.models.basic_seq2seq import BasicSeq2Seq
from seq2seq.models.attention_seq2seq import AttentionSeq2Seq
from seq2seq.models.image2seq import Image2Seq
import seq2seq.models.bridges
import seq2seq.models.model_base
| 863 | 36.565217 | 74 | py |
seq2seq | seq2seq-master/seq2seq/test/pipeline_test.py | # -*- coding: utf-8 -*-
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test Cases for RNN encoders.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import imp
import os
import shutil
import tempfile
import yaml
import numpy as np
import tensorflow as tf
from tensorflow import gfile
from seq2seq.test import utils as test_utils
BIN_FOLDER = os.path.abspath(
os.path.join(os.path.dirname(__file__), "../../bin"))
def _clear_flags():
"""Resets Tensorflow's FLAG values"""
#pylint: disable=W0212
tf.app.flags.FLAGS = tf.app.flags._FlagValues()
tf.app.flags._global_parser = argparse.ArgumentParser()
class PipelineTest(tf.test.TestCase):
"""Tests training and inference scripts.
"""
def setUp(self):
super(PipelineTest, self).setUp()
self.output_dir = tempfile.mkdtemp()
self.bin_folder = os.path.abspath(
os.path.join(os.path.dirname(__file__), "../../bin"))
tf.contrib.framework.get_or_create_global_step()
def tearDown(self):
shutil.rmtree(self.output_dir, ignore_errors=True)
super(PipelineTest, self).tearDown()
def test_train_infer(self):
"""Tests training and inference scripts.
"""
# Create dummy data
sources_train, targets_train = test_utils.create_temp_parallel_data(
sources=["a a a a", "b b b b", "c c c c", "笑 笑 笑 笑"],
targets=["b b b b", "a a a a", "c c c c", "泣 泣 泣 泣"])
sources_dev, targets_dev = test_utils.create_temp_parallel_data(
sources=["a a", "b b", "c c c", "笑 笑 笑"],
targets=["b b", "a a", "c c c", "泣 泣 泣"])
vocab_source = test_utils.create_temporary_vocab_file(["a", "b", "c", "笑"])
vocab_target = test_utils.create_temporary_vocab_file(["a", "b", "c", "泣"])
_clear_flags()
tf.reset_default_graph()
train_script = imp.load_source("seq2seq.test.train_bin",
os.path.join(BIN_FOLDER, "train.py"))
# Set training flags
tf.app.flags.FLAGS.output_dir = self.output_dir
tf.app.flags.FLAGS.hooks = """
- class: PrintModelAnalysisHook
- class: MetadataCaptureHook
- class: TrainSampleHook
"""
tf.app.flags.FLAGS.metrics = """
- class: LogPerplexityMetricSpec
- class: BleuMetricSpec
- class: RougeMetricSpec
params:
rouge_type: rouge_1/f_score
"""
tf.app.flags.FLAGS.model = "AttentionSeq2Seq"
tf.app.flags.FLAGS.model_params = """
attention.params:
num_units: 10
vocab_source: {}
vocab_target: {}
""".format(vocab_source.name, vocab_target.name)
tf.app.flags.FLAGS.batch_size = 2
# We pass a few flags via a config file
config_path = os.path.join(self.output_dir, "train_config.yml")
with gfile.GFile(config_path, "w") as config_file:
yaml.dump({
"input_pipeline_train": {
"class": "ParallelTextInputPipeline",
"params": {
"source_files": [sources_train.name],
"target_files": [targets_train.name],
}
},
"input_pipeline_dev": {
"class": "ParallelTextInputPipeline",
"params": {
"source_files": [sources_dev.name],
"target_files": [targets_dev.name],
}
},
"train_steps": 50,
"model_params": {
"embedding.dim": 10,
"decoder.params": {
"rnn_cell": {
"cell_class": "GRUCell",
"cell_params": {
"num_units": 8
}
}
},
"encoder.params": {
"rnn_cell": {
"cell_class": "GRUCell",
"cell_params": {
"num_units": 8
}
}
}
}
}, config_file)
tf.app.flags.FLAGS.config_paths = config_path
# Run training
tf.logging.set_verbosity(tf.logging.INFO)
train_script.main([])
# Make sure a checkpoint was written
expected_checkpoint = os.path.join(self.output_dir,
"model.ckpt-50.data-00000-of-00001")
self.assertTrue(os.path.exists(expected_checkpoint))
# Reset flags and import inference script
_clear_flags()
tf.reset_default_graph()
infer_script = imp.load_source("seq2seq.test.infer_bin",
os.path.join(BIN_FOLDER, "infer.py"))
# Set inference flags
attention_dir = os.path.join(self.output_dir, "att")
tf.app.flags.FLAGS.model_dir = self.output_dir
tf.app.flags.FLAGS.input_pipeline = """
class: ParallelTextInputPipeline
params:
source_files:
- {}
target_files:
- {}
""".format(sources_dev.name, targets_dev.name)
tf.app.flags.FLAGS.batch_size = 2
tf.app.flags.FLAGS.checkpoint_path = os.path.join(self.output_dir,
"model.ckpt-50")
# Use DecodeText Task
tf.app.flags.FLAGS.tasks = """
- class: DecodeText
- class: DumpAttention
params:
output_dir: {}
""".format(attention_dir)
# Make sure inference runs successfully
infer_script.main([])
# Make sure attention scores and visualizations exist
self.assertTrue(
os.path.exists(os.path.join(attention_dir, "attention_scores.npz")))
self.assertTrue(os.path.exists(os.path.join(attention_dir, "00002.png")))
# Load attention scores and assert shape
scores = np.load(os.path.join(attention_dir, "attention_scores.npz"))
self.assertIn("arr_0", scores)
self.assertEqual(scores["arr_0"].shape[1], 3)
self.assertIn("arr_1", scores)
self.assertEqual(scores["arr_1"].shape[1], 3)
self.assertIn("arr_2", scores)
self.assertEqual(scores["arr_2"].shape[1], 4)
self.assertIn("arr_3", scores)
self.assertEqual(scores["arr_3"].shape[1], 4)
# Test inference with beam search
_clear_flags()
tf.reset_default_graph()
infer_script = imp.load_source("seq2seq.test.infer_bin",
os.path.join(BIN_FOLDER, "infer.py"))
# Set inference flags
tf.app.flags.FLAGS.model_dir = self.output_dir
tf.app.flags.FLAGS.input_pipeline = """
class: ParallelTextInputPipeline
params:
source_files:
- {}
target_files:
- {}
""".format(sources_dev.name, targets_dev.name)
tf.app.flags.FLAGS.batch_size = 2
tf.app.flags.FLAGS.checkpoint_path = os.path.join(self.output_dir,
"model.ckpt-50")
tf.app.flags.FLAGS.model_params = """
inference.beam_search.beam_width: 5
"""
tf.app.flags.FLAGS.tasks = """
- class: DecodeText
params:
postproc_fn: seq2seq.data.postproc.decode_sentencepiece
- class: DumpBeams
params:
file: {}
""".format(os.path.join(self.output_dir, "beams.npz"))
# Run inference w/ beam search
infer_script.main([])
self.assertTrue(os.path.exists(os.path.join(self.output_dir, "beams.npz")))
if __name__ == "__main__":
tf.test.main()
| 7,879 | 31.697095 | 79 | py |
seq2seq | seq2seq-master/seq2seq/test/losses_test.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for loss-related operations.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from seq2seq import losses as seq2seq_losses
import tensorflow as tf
import numpy as np
class CrossEntropySequenceLossTest(tf.test.TestCase):
"""
Test for `sqe2seq.losses.sequence_mask`.
"""
def setUp(self):
super(CrossEntropySequenceLossTest, self).setUp()
tf.logging.set_verbosity(tf.logging.INFO)
self.batch_size = 4
self.sequence_length = 10
self.vocab_size = 50
def test_op(self):
logits = np.random.randn(self.sequence_length, self.batch_size,
self.vocab_size)
logits = logits.astype(np.float32)
sequence_length = np.array([1, 2, 3, 4])
targets = np.random.randint(0, self.vocab_size,
[self.sequence_length, self.batch_size])
losses = seq2seq_losses.cross_entropy_sequence_loss(logits, targets,
sequence_length)
with self.test_session() as sess:
losses_ = sess.run(losses)
# Make sure all losses not past the sequence length are > 0
np.testing.assert_array_less(np.zeros_like(losses_[:1, 0]), losses_[:1, 0])
np.testing.assert_array_less(np.zeros_like(losses_[:2, 1]), losses_[:2, 1])
np.testing.assert_array_less(np.zeros_like(losses_[:3, 2]), losses_[:3, 2])
# Make sure all losses past the sequence length are 0
np.testing.assert_array_equal(losses_[1:, 0], np.zeros_like(losses_[1:, 0]))
np.testing.assert_array_equal(losses_[2:, 1], np.zeros_like(losses_[2:, 1]))
np.testing.assert_array_equal(losses_[3:, 2], np.zeros_like(losses_[3:, 2]))
if __name__ == "__main__":
tf.test.main()
| 2,389 | 35.212121 | 80 | py |
seq2seq | seq2seq-master/seq2seq/test/hooks_test.py | # -*- coding: utf-8 -*-
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for SessionRunHooks.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import tempfile
import shutil
import time
import tensorflow as tf
from tensorflow.python.training import monitored_session # pylint: disable=E0611
from tensorflow import gfile
from seq2seq import graph_utils
from seq2seq.training import hooks
class TestPrintModelAnalysisHook(tf.test.TestCase):
"""Tests the `PrintModelAnalysisHook` hook"""
def test_begin(self):
model_dir = tempfile.mkdtemp()
outfile = tempfile.NamedTemporaryFile()
tf.get_variable("weigths", [128, 128])
hook = hooks.PrintModelAnalysisHook(
params={}, model_dir=model_dir, run_config=tf.contrib.learn.RunConfig())
hook.begin()
with gfile.GFile(os.path.join(model_dir, "model_analysis.txt")) as file:
file_contents = file.read().strip()
self.assertEqual(file_contents.decode(), "_TFProfRoot (--/16.38k params)\n"
" weigths (128x128, 16.38k/16.38k params)")
outfile.close()
class TestTrainSampleHook(tf.test.TestCase):
"""Tests `TrainSampleHook` class.
"""
def setUp(self):
super(TestTrainSampleHook, self).setUp()
self.model_dir = tempfile.mkdtemp()
self.sample_dir = os.path.join(self.model_dir, "samples")
# The hook expects these collections to be in the graph
pred_dict = {}
pred_dict["predicted_tokens"] = tf.constant([["Hello", "World", "笑w"]])
pred_dict["labels.target_tokens"] = tf.constant([["Hello", "World", "笑w"]])
pred_dict["labels.target_len"] = tf.constant(2),
graph_utils.add_dict_to_collection(pred_dict, "predictions")
def tearDown(self):
super(TestTrainSampleHook, self).tearDown()
shutil.rmtree(self.model_dir)
def test_sampling(self):
hook = hooks.TrainSampleHook(
params={"every_n_steps": 10}, model_dir=self.model_dir,
run_config=tf.contrib.learn.RunConfig())
global_step = tf.contrib.framework.get_or_create_global_step()
no_op = tf.no_op()
hook.begin()
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
sess.run(tf.tables_initializer())
#pylint: disable=W0212
mon_sess = monitored_session._HookedSession(sess, [hook])
# Should trigger for step 0
sess.run(tf.assign(global_step, 0))
mon_sess.run(no_op)
outfile = os.path.join(self.sample_dir, "samples_000000.txt")
with open(outfile, "rb") as readfile:
self.assertIn("Prediction followed by Target @ Step 0",
readfile.read().decode("utf-8"))
# Should not trigger for step 9
sess.run(tf.assign(global_step, 9))
mon_sess.run(no_op)
outfile = os.path.join(self.sample_dir, "samples_000009.txt")
self.assertFalse(os.path.exists(outfile))
# Should trigger for step 10
sess.run(tf.assign(global_step, 10))
mon_sess.run(no_op)
outfile = os.path.join(self.sample_dir, "samples_000010.txt")
with open(outfile, "rb") as readfile:
self.assertIn("Prediction followed by Target @ Step 10",
readfile.read().decode("utf-8"))
class TestMetadataCaptureHook(tf.test.TestCase):
"""Test for the MetadataCaptureHook"""
def setUp(self):
super(TestMetadataCaptureHook, self).setUp()
self.model_dir = tempfile.mkdtemp()
def tearDown(self):
super(TestMetadataCaptureHook, self).tearDown()
shutil.rmtree(self.model_dir)
def test_capture(self):
global_step = tf.contrib.framework.get_or_create_global_step()
# Some test computation
some_weights = tf.get_variable("weigths", [2, 128])
computation = tf.nn.softmax(some_weights)
hook = hooks.MetadataCaptureHook(
params={"step": 5}, model_dir=self.model_dir,
run_config=tf.contrib.learn.RunConfig())
hook.begin()
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
#pylint: disable=W0212
mon_sess = monitored_session._HookedSession(sess, [hook])
# Should not trigger for step 0
sess.run(tf.assign(global_step, 0))
mon_sess.run(computation)
self.assertEqual(gfile.ListDirectory(self.model_dir), [])
# Should trigger *after* step 5
sess.run(tf.assign(global_step, 5))
mon_sess.run(computation)
self.assertEqual(gfile.ListDirectory(self.model_dir), [])
mon_sess.run(computation)
self.assertEqual(
set(gfile.ListDirectory(self.model_dir)),
set(["run_meta", "tfprof_log", "timeline.json"]))
if __name__ == "__main__":
tf.test.main()
| 5,319 | 33.322581 | 81 | py |
seq2seq | seq2seq-master/seq2seq/test/decoder_test.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test Cases for decoders.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import tensorflow as tf
import numpy as np
from seq2seq.decoders import BasicDecoder, AttentionDecoder, AttentionLayerDot
from seq2seq.decoders import beam_search_decoder
from seq2seq.inference import beam_search
from seq2seq.contrib.seq2seq import helper as decode_helper
class DecoderTests(object):
"""
A collection of decoder tests. This class should be inherited together with
`tf.test.TestCase`.
"""
def __init__(self):
self.batch_size = 4
self.sequence_length = 16
self.input_depth = 10
self.vocab_size = 100
self.max_decode_length = 20
def create_decoder(self, helper, mode):
"""Creates the decoder module.
This must be implemented by child classes and instantiate the appropriate
decoder to be tested.
"""
raise NotImplementedError
def test_with_fixed_inputs(self):
inputs = tf.random_normal(
[self.batch_size, self.sequence_length, self.input_depth])
seq_length = tf.ones(self.batch_size, dtype=tf.int32) * self.sequence_length
helper = decode_helper.TrainingHelper(
inputs=inputs, sequence_length=seq_length)
decoder_fn = self.create_decoder(
helper=helper, mode=tf.contrib.learn.ModeKeys.TRAIN)
initial_state = decoder_fn.cell.zero_state(
self.batch_size, dtype=tf.float32)
decoder_output, _ = decoder_fn(initial_state, helper)
#pylint: disable=E1101
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
decoder_output_ = sess.run(decoder_output)
np.testing.assert_array_equal(
decoder_output_.logits.shape,
[self.sequence_length, self.batch_size, self.vocab_size])
np.testing.assert_array_equal(decoder_output_.predicted_ids.shape,
[self.sequence_length, self.batch_size])
return decoder_output_
def test_gradients(self):
inputs = tf.random_normal(
[self.batch_size, self.sequence_length, self.input_depth])
seq_length = tf.ones(self.batch_size, dtype=tf.int32) * self.sequence_length
labels = np.random.randint(0, self.vocab_size,
[self.batch_size, self.sequence_length])
helper = decode_helper.TrainingHelper(
inputs=inputs, sequence_length=seq_length)
decoder_fn = self.create_decoder(
helper=helper, mode=tf.contrib.learn.ModeKeys.TRAIN)
initial_state = decoder_fn.cell.zero_state(
self.batch_size, dtype=tf.float32)
decoder_output, _ = decoder_fn(initial_state, helper)
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=decoder_output.logits, labels=labels)
optimizer = tf.train.AdamOptimizer(learning_rate=0.001)
grads_and_vars = optimizer.compute_gradients(tf.reduce_mean(losses))
#pylint: disable=E1101
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
grads_and_vars_ = sess.run(grads_and_vars)
for grad, _ in grads_and_vars_:
self.assertFalse(np.isnan(grad).any())
return grads_and_vars_
def test_with_dynamic_inputs(self):
embeddings = tf.get_variable("W_embed", [self.vocab_size, self.input_depth])
helper = decode_helper.GreedyEmbeddingHelper(
embedding=embeddings, start_tokens=[0] * self.batch_size, end_token=-1)
decoder_fn = self.create_decoder(
helper=helper, mode=tf.contrib.learn.ModeKeys.INFER)
initial_state = decoder_fn.cell.zero_state(
self.batch_size, dtype=tf.float32)
decoder_output, _ = decoder_fn(initial_state, helper)
#pylint: disable=E1101
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
decoder_output_ = sess.run(decoder_output)
np.testing.assert_array_equal(
decoder_output_.logits.shape,
[self.max_decode_length, self.batch_size, self.vocab_size])
np.testing.assert_array_equal(decoder_output_.predicted_ids.shape,
[self.max_decode_length, self.batch_size])
def test_with_beam_search(self):
self.batch_size = 1
# Batch size for beam search must be 1.
config = beam_search.BeamSearchConfig(
beam_width=10,
vocab_size=self.vocab_size,
eos_token=self.vocab_size - 2,
length_penalty_weight=0.6,
choose_successors_fn=beam_search.choose_top_k)
embeddings = tf.get_variable("W_embed", [self.vocab_size, self.input_depth])
helper = decode_helper.GreedyEmbeddingHelper(
embedding=embeddings,
start_tokens=[0] * config.beam_width,
end_token=-1)
decoder_fn = self.create_decoder(
helper=helper, mode=tf.contrib.learn.ModeKeys.INFER)
decoder_fn = beam_search_decoder.BeamSearchDecoder(
decoder=decoder_fn, config=config)
initial_state = decoder_fn.cell.zero_state(
self.batch_size, dtype=tf.float32)
decoder_output, _ = decoder_fn(initial_state, helper)
#pylint: disable=E1101
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
decoder_output_ = sess.run(decoder_output)
np.testing.assert_array_equal(
decoder_output_.predicted_ids.shape,
[self.max_decode_length, 1, config.beam_width])
np.testing.assert_array_equal(
decoder_output_.beam_search_output.beam_parent_ids.shape,
[self.max_decode_length, 1, config.beam_width])
np.testing.assert_array_equal(
decoder_output_.beam_search_output.scores.shape,
[self.max_decode_length, 1, config.beam_width])
np.testing.assert_array_equal(
decoder_output_.beam_search_output.original_outputs.predicted_ids.shape,
[self.max_decode_length, 1, config.beam_width])
np.testing.assert_array_equal(
decoder_output_.beam_search_output.original_outputs.logits.shape,
[self.max_decode_length, 1, config.beam_width, self.vocab_size])
return decoder_output
class BasicDecoderTest(tf.test.TestCase, DecoderTests):
"""Tests the `BasicDecoder` class.
"""
def setUp(self):
tf.test.TestCase.setUp(self)
tf.logging.set_verbosity(tf.logging.INFO)
DecoderTests.__init__(self)
def create_decoder(self, helper, mode):
params = BasicDecoder.default_params()
params["max_decode_length"] = self.max_decode_length
decoder = BasicDecoder(params=params, mode=mode, vocab_size=self.vocab_size)
return decoder
class AttentionDecoderTest(tf.test.TestCase, DecoderTests):
"""Tests the `AttentionDecoder` class.
"""
def setUp(self):
tf.test.TestCase.setUp(self)
tf.logging.set_verbosity(tf.logging.INFO)
DecoderTests.__init__(self)
self.attention_dim = 64
self.input_seq_len = 10
def create_decoder(self, helper, mode):
attention_fn = AttentionLayerDot(
params={"num_units": self.attention_dim},
mode=tf.contrib.learn.ModeKeys.TRAIN)
attention_values = tf.convert_to_tensor(
np.random.randn(self.batch_size, self.input_seq_len, 32),
dtype=tf.float32)
attention_keys = tf.convert_to_tensor(
np.random.randn(self.batch_size, self.input_seq_len, 32),
dtype=tf.float32)
params = AttentionDecoder.default_params()
params["max_decode_length"] = self.max_decode_length
return AttentionDecoder(
params=params,
mode=mode,
vocab_size=self.vocab_size,
attention_keys=attention_keys,
attention_values=attention_values,
attention_values_length=np.arange(self.batch_size) + 1,
attention_fn=attention_fn)
def test_attention_scores(self):
decoder_output_ = self.test_with_fixed_inputs()
np.testing.assert_array_equal(
decoder_output_.attention_scores.shape,
[self.sequence_length, self.batch_size, self.input_seq_len])
# Make sure the attention scores sum to 1 for each step
scores_sum = np.sum(decoder_output_.attention_scores, axis=2)
np.testing.assert_array_almost_equal(
scores_sum, np.ones([self.sequence_length, self.batch_size]))
if __name__ == "__main__":
tf.test.main()
| 8,788 | 35.020492 | 80 | py |
seq2seq | seq2seq-master/seq2seq/test/train_utils_test.py | # -*- coding: utf-8 -*-
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test Cases for Training utils.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import tempfile
import tensorflow as tf
import numpy as np
from seq2seq.contrib import rnn_cell
from seq2seq.data import input_pipeline
from seq2seq.test import utils as test_utils
from seq2seq.training import utils as training_utils
class TestGetRNNCell(tf.test.TestCase):
"""Tests the get_rnn_cell function.
"""
def test_single_layer(self):
cell = training_utils.get_rnn_cell(
cell_class="BasicLSTMCell", cell_params={"num_units": 16}, num_layers=1)
self.assertIsInstance(cell, tf.contrib.rnn.BasicLSTMCell)
self.assertEqual(cell.output_size, 16)
def test_multi_layer(self):
cell = training_utils.get_rnn_cell(
cell_class="BasicLSTMCell", cell_params={"num_units": 16}, num_layers=2)
self.assertIsInstance(cell, rnn_cell.ExtendedMultiRNNCell)
self.assertEqual(cell.output_size, 16)
def test_full_class_path(self):
cell = training_utils.get_rnn_cell(
cell_class="tensorflow.contrib.rnn.BasicRNNCell",
cell_params={"num_units": 16},
num_layers=1)
self.assertIsInstance(cell, tf.contrib.rnn.BasicRNNCell)
self.assertEqual(cell.output_size, 16)
def test_dropout(self):
cell = training_utils.get_rnn_cell(
cell_class="BasicLSTMCell",
cell_params={"num_units": 16},
num_layers=1,
dropout_input_keep_prob=0.5)
self.assertIsInstance(cell, tf.contrib.rnn.DropoutWrapper)
self.assertEqual(cell.output_size, 16)
def test_extra_args(self):
# Invalid args should raise a ValueError
with self.assertRaises(ValueError):
training_utils.get_rnn_cell(
cell_class="LSTMCell",
cell_params={"num_units": 16,
"use_peepholesERROR": True},
num_layers=1)
cell = training_utils.get_rnn_cell(
cell_class="LSTMCell",
cell_params={"num_units": 8,
"use_peepholes": True,
"forget_bias": 0.5},
num_layers=1)
self.assertIsInstance(cell, tf.contrib.rnn.LSTMCell)
#pylint: disable=E1101,W0212
self.assertEqual(cell._use_peepholes, True)
self.assertEqual(cell._forget_bias, 0.5)
self.assertEqual(cell.output_size, 8)
class TestTrainOptions(tf.test.TestCase):
"""Tests reading and writing of training options"""
def setUp(self):
super(TestTrainOptions, self).setUp()
self.model_dir = tempfile.mkdtemp()
self.model_params = {"num_layers": 4}
self.model_class = "AttentionSeq2Seq"
def test_read_write(self):
saved_opts = training_utils.TrainOptions(
model_class=self.model_class, model_params=self.model_params)
saved_opts.dump(self.model_dir)
loaded_opt = training_utils.TrainOptions.load(model_dir=self.model_dir)
self.assertEqual(saved_opts.model_params, loaded_opt.model_params)
self.assertEqual(saved_opts.model_class, loaded_opt.model_class)
class TestInputFn(tf.test.TestCase):
"""Tests create_input_fn"""
def _test_with_args(self, **kwargs):
"""Helper function to test create_input_fn with keyword arguments"""
sources_file, targets_file = test_utils.create_temp_parallel_data(
sources=["Hello World ."], targets=["Goodbye ."])
pipeline = input_pipeline.ParallelTextInputPipeline(
params={
"source_files": [sources_file.name],
"target_files": [targets_file.name]
},
mode=tf.contrib.learn.ModeKeys.TRAIN)
input_fn = training_utils.create_input_fn(pipeline=pipeline, **kwargs)
features, labels = input_fn()
with self.test_session() as sess:
with tf.contrib.slim.queues.QueueRunners(sess):
features_, labels_ = sess.run([features, labels])
self.assertEqual(
set(features_.keys()), set(["source_tokens", "source_len"]))
self.assertEqual(set(labels_.keys()), set(["target_tokens", "target_len"]))
def test_without_buckets(self):
self._test_with_args(batch_size=10)
def test_wit_buckets(self):
self._test_with_args(batch_size=10, bucket_boundaries=[0, 5, 10])
class TestLRDecay(tf.test.TestCase):
"""Tests learning rate decay function.
"""
def test_no_decay(self):
decay_fn = training_utils.create_learning_rate_decay_fn(
decay_type=None, decay_steps=5, decay_rate=2.0)
self.assertEqual(decay_fn, None)
decay_fn = training_utils.create_learning_rate_decay_fn(
decay_type="", decay_steps=5, decay_rate=2.0)
self.assertEqual(decay_fn, None)
def test_decay_without_min(self):
decay_fn = training_utils.create_learning_rate_decay_fn(
decay_type="exponential_decay",
decay_steps=10,
decay_rate=0.9,
start_decay_at=100,
stop_decay_at=1000,
staircase=False)
initial_lr = 1.0
with self.test_session() as sess:
# Should not decay before start_decay_at
np.testing.assert_equal(sess.run(decay_fn(initial_lr, 50)), initial_lr)
# Proper decay
np.testing.assert_almost_equal(
sess.run(decay_fn(initial_lr, 115)), initial_lr * 0.9**(15.0 / 10.0))
# Should not decay past stop_decay_at
np.testing.assert_almost_equal(
sess.run(decay_fn(initial_lr, 5000)), initial_lr * 0.9**(
(1000.0 - 100.0) / 10.0))
def test_decay_with_min(self):
decay_fn = training_utils.create_learning_rate_decay_fn(
decay_type="exponential_decay",
decay_steps=10,
decay_rate=0.9,
start_decay_at=100,
stop_decay_at=1000.0,
min_learning_rate=0.01,
staircase=False)
initial_lr = 1.0
with self.test_session() as sess:
# Should not decay past min_learning_rate
np.testing.assert_almost_equal(sess.run(decay_fn(initial_lr, 900)), 0.01)
if __name__ == '__main__':
tf.test.main()
| 6,539 | 32.88601 | 80 | py |
seq2seq | seq2seq-master/seq2seq/test/vocab_test.py | # -*- coding: utf-8 -*-
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for input-related operations.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import tensorflow as tf
import numpy as np
from seq2seq.data import vocab
from seq2seq.test import utils as test_utils
class VocabInfoTest(tf.test.TestCase):
"""Tests VocabInfo class"""
def setUp(self):
super(VocabInfoTest, self).setUp()
tf.logging.set_verbosity(tf.logging.INFO)
self.vocab_list = ["Hello", ".", "Bye"]
self.vocab_file = test_utils.create_temporary_vocab_file(self.vocab_list)
def tearDown(self):
super(VocabInfoTest, self).tearDown()
self.vocab_file.close()
def test_vocab_info(self):
vocab_info = vocab.get_vocab_info(self.vocab_file.name)
self.assertEqual(vocab_info.vocab_size, 3)
self.assertEqual(vocab_info.path, self.vocab_file.name)
self.assertEqual(vocab_info.special_vocab.UNK, 3)
self.assertEqual(vocab_info.special_vocab.SEQUENCE_START, 4)
self.assertEqual(vocab_info.special_vocab.SEQUENCE_END, 5)
self.assertEqual(vocab_info.total_size, 6)
class CreateVocabularyLookupTableTest(tf.test.TestCase):
"""
Tests Vocabulary lookup table operations.
"""
def test_without_counts(self):
vocab_list = ["Hello", ".", "笑"]
vocab_file = test_utils.create_temporary_vocab_file(vocab_list)
vocab_to_id_table, id_to_vocab_table, _, vocab_size = \
vocab.create_vocabulary_lookup_table(vocab_file.name)
self.assertEqual(vocab_size, 6)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
sess.run(tf.tables_initializer())
ids = vocab_to_id_table.lookup(
tf.convert_to_tensor(["Hello", ".", "笑", "??", "xxx"]))
ids = sess.run(ids)
np.testing.assert_array_equal(ids, [0, 1, 2, 3, 3])
words = id_to_vocab_table.lookup(
tf.convert_to_tensor(
[0, 1, 2, 3], dtype=tf.int64))
words = sess.run(words)
np.testing.assert_array_equal(
np.char.decode(words.astype("S"), "utf-8"),
["Hello", ".", "笑", "UNK"])
def test_with_counts(self):
vocab_list = ["Hello", ".", "笑"]
vocab_counts = [100, 200, 300]
vocab_file = test_utils.create_temporary_vocab_file(vocab_list,
vocab_counts)
vocab_to_id_table, id_to_vocab_table, word_to_count_table, vocab_size = \
vocab.create_vocabulary_lookup_table(vocab_file.name)
self.assertEqual(vocab_size, 6)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
sess.run(tf.tables_initializer())
ids = vocab_to_id_table.lookup(
tf.convert_to_tensor(["Hello", ".", "笑", "??", "xxx"]))
ids = sess.run(ids)
np.testing.assert_array_equal(ids, [0, 1, 2, 3, 3])
words = id_to_vocab_table.lookup(
tf.convert_to_tensor(
[0, 1, 2, 3], dtype=tf.int64))
words = sess.run(words)
np.testing.assert_array_equal(
np.char.decode(words.astype("S"), "utf-8"),
["Hello", ".", "笑", "UNK"])
counts = word_to_count_table.lookup(
tf.convert_to_tensor(["Hello", ".", "笑", "??", "xxx"]))
counts = sess.run(counts)
np.testing.assert_array_equal(counts, [100, 200, 300, -1, -1])
if __name__ == "__main__":
tf.test.main()
| 4,111 | 32.430894 | 77 | py |
seq2seq | seq2seq-master/seq2seq/test/attention_test.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for attention functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import tensorflow as tf
import numpy as np
from seq2seq.decoders.attention import AttentionLayerDot
from seq2seq.decoders.attention import AttentionLayerBahdanau
class AttentionLayerTest(tf.test.TestCase):
"""
Tests the AttentionLayer module.
"""
def setUp(self):
super(AttentionLayerTest, self).setUp()
tf.logging.set_verbosity(tf.logging.INFO)
self.batch_size = 8
self.attention_dim = 128
self.input_dim = 16
self.seq_len = 10
self.state_dim = 32
def _create_layer(self):
"""Creates the attention layer. Should be implemented by child classes"""
raise NotImplementedError
def _test_layer(self):
"""Tests Attention layer with a given score type"""
inputs_pl = tf.placeholder(tf.float32, (None, None, self.input_dim))
inputs_length_pl = tf.placeholder(tf.int32, [None])
state_pl = tf.placeholder(tf.float32, (None, self.state_dim))
attention_fn = self._create_layer()
scores, context = attention_fn(
query=state_pl,
keys=inputs_pl,
values=inputs_pl,
values_length=inputs_length_pl)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
feed_dict = {}
feed_dict[inputs_pl] = np.random.randn(self.batch_size, self.seq_len,
self.input_dim)
feed_dict[state_pl] = np.random.randn(self.batch_size, self.state_dim)
feed_dict[inputs_length_pl] = np.arange(self.batch_size) + 1
scores_, context_ = sess.run([scores, context], feed_dict)
np.testing.assert_array_equal(scores_.shape,
[self.batch_size, self.seq_len])
np.testing.assert_array_equal(context_.shape,
[self.batch_size, self.input_dim])
for idx, batch in enumerate(scores_, 1):
# All scores that are padded should be zero
np.testing.assert_array_equal(batch[idx:], np.zeros_like(batch[idx:]))
# Scores should sum to 1
scores_sum = np.sum(scores_, axis=1)
np.testing.assert_array_almost_equal(scores_sum, np.ones([self.batch_size]))
class AttentionLayerDotTest(AttentionLayerTest):
"""Tests the AttentionLayerDot class"""
def _create_layer(self):
return AttentionLayerDot(
params={"num_units": self.attention_dim},
mode=tf.contrib.learn.ModeKeys.TRAIN)
def test_layer(self):
self._test_layer()
class AttentionLayerBahdanauTest(AttentionLayerTest):
"""Tests the AttentionLayerBahdanau class"""
def _create_layer(self):
return AttentionLayerBahdanau(
params={"num_units": self.attention_dim},
mode=tf.contrib.learn.ModeKeys.TRAIN)
def test_layer(self):
self._test_layer()
if __name__ == "__main__":
tf.test.main()
| 3,532 | 31.412844 | 80 | py |
seq2seq | seq2seq-master/seq2seq/test/pooling_encoder_test.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test Cases for PoolingEncoder.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import tensorflow as tf
import numpy as np
from seq2seq.encoders import PoolingEncoder
class PoolingEncoderTest(tf.test.TestCase):
"""
Tests the PoolingEncoder class.
"""
def setUp(self):
super(PoolingEncoderTest, self).setUp()
self.batch_size = 4
self.sequence_length = 16
self.input_depth = 10
self.mode = tf.contrib.learn.ModeKeys.TRAIN
def _test_with_params(self, params):
"""Tests the encoder with a given parameter configuration"""
inputs = tf.random_normal(
[self.batch_size, self.sequence_length, self.input_depth])
example_length = tf.ones(
self.batch_size, dtype=tf.int32) * self.sequence_length
encode_fn = PoolingEncoder(params, self.mode)
encoder_output = encode_fn(inputs, example_length)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
encoder_output_ = sess.run(encoder_output)
np.testing.assert_array_equal(
encoder_output_.outputs.shape,
[self.batch_size, self.sequence_length, self.input_depth])
np.testing.assert_array_equal(
encoder_output_.attention_values.shape,
[self.batch_size, self.sequence_length, self.input_depth])
np.testing.assert_array_equal(encoder_output_.final_state.shape,
[self.batch_size, self.input_depth])
def test_encode_with_pos(self):
self._test_with_params({
"position_embeddings.enable": True,
"position_embeddings.num_positions": self.sequence_length
})
def test_encode_without_pos(self):
self._test_with_params({
"position_embeddings.enable": False,
"position_embeddings.num_positions": 0
})
if __name__ == "__main__":
tf.test.main() | 2,502 | 31.506494 | 74 | py |
seq2seq | seq2seq-master/seq2seq/test/rnn_encoder_test.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test Cases for RNN encoders.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import tensorflow as tf
import numpy as np
from seq2seq.encoders import rnn_encoder
class UnidirectionalRNNEncoderTest(tf.test.TestCase):
"""
Tests the UnidirectionalRNNEncoder class.
"""
def setUp(self):
super(UnidirectionalRNNEncoderTest, self).setUp()
tf.logging.set_verbosity(tf.logging.INFO)
self.batch_size = 4
self.sequence_length = 16
self.input_depth = 10
self.mode = tf.contrib.learn.ModeKeys.TRAIN
self.params = rnn_encoder.UnidirectionalRNNEncoder.default_params()
self.params["rnn_cell"]["cell_params"]["num_units"] = 32
self.params["rnn_cell"]["cell_class"] = "BasicLSTMCell"
def test_encode(self):
inputs = tf.random_normal(
[self.batch_size, self.sequence_length, self.input_depth])
example_length = tf.ones(
self.batch_size, dtype=tf.int32) * self.sequence_length
encode_fn = rnn_encoder.UnidirectionalRNNEncoder(self.params, self.mode)
encoder_output = encode_fn(inputs, example_length)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
encoder_output_ = sess.run(encoder_output)
np.testing.assert_array_equal(encoder_output_.outputs.shape,
[self.batch_size, self.sequence_length, 32])
self.assertIsInstance(encoder_output_.final_state,
tf.contrib.rnn.LSTMStateTuple)
np.testing.assert_array_equal(encoder_output_.final_state.h.shape,
[self.batch_size, 32])
np.testing.assert_array_equal(encoder_output_.final_state.c.shape,
[self.batch_size, 32])
class BidirectionalRNNEncoderTest(tf.test.TestCase):
"""
Tests the BidirectionalRNNEncoder class.
"""
def setUp(self):
super(BidirectionalRNNEncoderTest, self).setUp()
tf.logging.set_verbosity(tf.logging.INFO)
self.batch_size = 4
self.sequence_length = 16
self.input_depth = 10
self.params = rnn_encoder.BidirectionalRNNEncoder.default_params()
self.params["rnn_cell"]["cell_params"]["num_units"] = 32
self.params["rnn_cell"]["cell_class"] = "BasicLSTMCell"
self.mode = tf.contrib.learn.ModeKeys.TRAIN
def test_encode(self):
inputs = tf.random_normal(
[self.batch_size, self.sequence_length, self.input_depth])
example_length = tf.ones(
self.batch_size, dtype=tf.int32) * self.sequence_length
encode_fn = rnn_encoder.BidirectionalRNNEncoder(self.params, self.mode)
encoder_output = encode_fn(inputs, example_length)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
encoder_output_ = sess.run(encoder_output)
np.testing.assert_array_equal(
encoder_output_.outputs.shape,
[self.batch_size, self.sequence_length, 32 * 2])
self.assertIsInstance(encoder_output_.final_state[0],
tf.contrib.rnn.LSTMStateTuple)
self.assertIsInstance(encoder_output_.final_state[1],
tf.contrib.rnn.LSTMStateTuple)
np.testing.assert_array_equal(encoder_output_.final_state[0].h.shape,
[self.batch_size, 32])
np.testing.assert_array_equal(encoder_output_.final_state[0].c.shape,
[self.batch_size, 32])
np.testing.assert_array_equal(encoder_output_.final_state[1].h.shape,
[self.batch_size, 32])
np.testing.assert_array_equal(encoder_output_.final_state[1].c.shape,
[self.batch_size, 32])
class StackBidirectionalRNNEncoderTest(tf.test.TestCase):
"""
Tests the StackBidirectionalRNNEncoder class.
"""
def setUp(self):
super(StackBidirectionalRNNEncoderTest, self).setUp()
tf.logging.set_verbosity(tf.logging.INFO)
self.batch_size = 4
self.sequence_length = 16
self.input_depth = 10
self.mode = tf.contrib.learn.ModeKeys.TRAIN
def _test_encode_with_params(self, params):
"""Tests the StackBidirectionalRNNEncoder with a specific cell"""
inputs = tf.random_normal(
[self.batch_size, self.sequence_length, self.input_depth])
example_length = tf.ones(
self.batch_size, dtype=tf.int32) * self.sequence_length
encode_fn = rnn_encoder.StackBidirectionalRNNEncoder(params, self.mode)
encoder_output = encode_fn(inputs, example_length)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
encoder_output_ = sess.run(encoder_output)
output_size = encode_fn.params["rnn_cell"]["cell_params"]["num_units"]
np.testing.assert_array_equal(
encoder_output_.outputs.shape,
[self.batch_size, self.sequence_length, output_size * 2])
return encoder_output_
def test_encode_with_single_cell(self):
encoder_output_ = self._test_encode_with_params({
"rnn_cell": {
"num_layers": 1,
"cell_params": {
"num_units": 32
}
}
})
self.assertIsInstance(encoder_output_.final_state[0][0],
tf.contrib.rnn.LSTMStateTuple)
self.assertIsInstance(encoder_output_.final_state[1][0],
tf.contrib.rnn.LSTMStateTuple)
np.testing.assert_array_equal(encoder_output_.final_state[0][0].h.shape,
[self.batch_size, 32])
np.testing.assert_array_equal(encoder_output_.final_state[0][0].c.shape,
[self.batch_size, 32])
np.testing.assert_array_equal(encoder_output_.final_state[1][0].h.shape,
[self.batch_size, 32])
np.testing.assert_array_equal(encoder_output_.final_state[1][0].c.shape,
[self.batch_size, 32])
def test_encode_with_multi_cell(self):
encoder_output_ = self._test_encode_with_params({
"rnn_cell": {
"num_layers": 4,
"cell_params": {
"num_units": 32
}
}
})
for layer_idx in range(4):
self.assertIsInstance(encoder_output_.final_state[0][layer_idx],
tf.contrib.rnn.LSTMStateTuple)
self.assertIsInstance(encoder_output_.final_state[1][layer_idx],
tf.contrib.rnn.LSTMStateTuple)
np.testing.assert_array_equal(
encoder_output_.final_state[0][layer_idx].h.shape,
[self.batch_size, 32])
np.testing.assert_array_equal(
encoder_output_.final_state[0][layer_idx].c.shape,
[self.batch_size, 32])
np.testing.assert_array_equal(
encoder_output_.final_state[1][layer_idx].h.shape,
[self.batch_size, 32])
np.testing.assert_array_equal(
encoder_output_.final_state[1][layer_idx].c.shape,
[self.batch_size, 32])
if __name__ == "__main__":
tf.test.main()
| 7,638 | 36.446078 | 78 | py |
seq2seq | seq2seq-master/seq2seq/test/bridges_test.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for Encoder-Decoder bridges.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import namedtuple
import numpy as np
import tensorflow as tf
from tensorflow.python.util import nest # pylint: disable=E0611
from seq2seq.encoders.encoder import EncoderOutput
from seq2seq.models.bridges import ZeroBridge, InitialStateBridge
from seq2seq.models.bridges import PassThroughBridge
DecoderOutput = namedtuple("DecoderOutput", ["predicted_ids"])
class BridgeTest(tf.test.TestCase):
"""Abstract class for bridge tests"""
def setUp(self):
super(BridgeTest, self).setUp()
self.batch_size = 4
self.encoder_cell = tf.contrib.rnn.MultiRNNCell(
[tf.contrib.rnn.GRUCell(4), tf.contrib.rnn.GRUCell(8)])
self.decoder_cell = tf.contrib.rnn.MultiRNNCell(
[tf.contrib.rnn.LSTMCell(16), tf.contrib.rnn.GRUCell(8)])
final_encoder_state = nest.map_structure(
lambda x: tf.convert_to_tensor(
value=np.random.randn(self.batch_size, x),
dtype=tf.float32),
self.encoder_cell.state_size)
self.encoder_outputs = EncoderOutput(
outputs=tf.convert_to_tensor(
value=np.random.randn(self.batch_size, 10, 16), dtype=tf.float32),
attention_values=tf.convert_to_tensor(
value=np.random.randn(self.batch_size, 10, 16), dtype=tf.float32),
attention_values_length=np.full([self.batch_size], 10),
final_state=final_encoder_state)
def _create_bridge(self):
"""Creates the bridge class to be tests. Must be implemented by
child classes"""
raise NotImplementedError()
def _assert_correct_outputs(self):
"""Asserts bridge outputs are correct. Must be implemented by
child classes"""
raise NotImplementedError()
def _run(self, scope=None, **kwargs):
"""Runs the bridge with the given arguments
"""
with tf.variable_scope(scope or "bridge"):
bridge = self._create_bridge(**kwargs)
initial_state = bridge()
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
initial_state_ = sess.run(initial_state)
return initial_state_
class TestZeroBridge(BridgeTest):
"""Tests for the ZeroBridge class"""
def _create_bridge(self, **kwargs):
return ZeroBridge(
encoder_outputs=self.encoder_outputs,
decoder_state_size=self.decoder_cell.state_size,
params=kwargs,
mode=tf.contrib.learn.ModeKeys.TRAIN)
def _assert_correct_outputs(self, initial_state_):
initial_state_flat_ = nest.flatten(initial_state_)
for element in initial_state_flat_:
np.testing.assert_array_equal(element, np.zeros_like(element))
def test_zero_bridge(self):
self._assert_correct_outputs(self._run())
class TestPassThroughBridge(BridgeTest):
"""Tests for the ZeroBridge class"""
def _create_bridge(self, **kwargs):
return PassThroughBridge(
encoder_outputs=self.encoder_outputs,
decoder_state_size=self.decoder_cell.state_size,
params=kwargs,
mode=tf.contrib.learn.ModeKeys.TRAIN)
def _assert_correct_outputs(self, initial_state_):
nest.assert_same_structure(initial_state_, self.decoder_cell.state_size)
nest.assert_same_structure(initial_state_, self.encoder_outputs.final_state)
encoder_state_flat = nest.flatten(self.encoder_outputs.final_state)
with self.test_session() as sess:
encoder_state_flat_ = sess.run(encoder_state_flat)
initial_state_flat_ = nest.flatten(initial_state_)
for e_dec, e_enc in zip(initial_state_flat_, encoder_state_flat_):
np.testing.assert_array_equal(e_dec, e_enc)
def test_passthrough_bridge(self):
self.decoder_cell = self.encoder_cell
self._assert_correct_outputs(self._run())
class TestInitialStateBridge(BridgeTest):
"""Tests for the InitialStateBridge class"""
def _create_bridge(self, **kwargs):
return InitialStateBridge(
encoder_outputs=self.encoder_outputs,
decoder_state_size=self.decoder_cell.state_size,
params=kwargs,
mode=tf.contrib.learn.ModeKeys.TRAIN)
def _assert_correct_outputs(self, initial_state_):
nest.assert_same_structure(initial_state_, self.decoder_cell.state_size)
def test_with_final_state(self):
self._assert_correct_outputs(self._run(bridge_input="final_state"))
def test_with_outputs(self):
self._assert_correct_outputs(self._run(bridge_input="outputs"))
def test_with_activation_fn(self):
self._assert_correct_outputs(
self._run(
bridge_input="final_state", activation_fn="tanh"))
if __name__ == "__main__":
tf.test.main()
| 5,316 | 32.866242 | 80 | py |
seq2seq | seq2seq-master/seq2seq/test/utils.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Various testing utilities
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import tempfile
import tensorflow as tf
def create_temp_parallel_data(sources, targets):
"""
Creates a temporary TFRecords file.
Args:
source: List of source sentences
target: List of target sentences
Returns:
A tuple (sources_file, targets_file).
"""
file_source = tempfile.NamedTemporaryFile()
file_target = tempfile.NamedTemporaryFile()
file_source.write("\n".join(sources).encode("utf-8"))
file_source.flush()
file_target.write("\n".join(targets).encode("utf-8"))
file_target.flush()
return file_source, file_target
def create_temp_tfrecords(sources, targets):
"""
Creates a temporary TFRecords file.
Args:
source: List of source sentences
target: List of target sentences
Returns:
A tuple (sources_file, targets_file).
"""
output_file = tempfile.NamedTemporaryFile()
writer = tf.python_io.TFRecordWriter(output_file.name)
for source, target in zip(sources, targets):
ex = tf.train.Example()
#pylint: disable=E1101
ex.features.feature["source"].bytes_list.value.extend(
[source.encode("utf-8")])
ex.features.feature["target"].bytes_list.value.extend(
[target.encode("utf-8")])
writer.write(ex.SerializeToString())
writer.close()
return output_file
def create_temporary_vocab_file(words, counts=None):
"""
Creates a temporary vocabulary file.
Args:
words: List of words in the vocabulary
Returns:
A temporary file object with one word per line
"""
vocab_file = tempfile.NamedTemporaryFile()
if counts is None:
for token in words:
vocab_file.write((token + "\n").encode("utf-8"))
else:
for token, count in zip(words, counts):
vocab_file.write("{}\t{}\n".format(token, count).encode("utf-8"))
vocab_file.flush()
return vocab_file
| 2,557 | 26.804348 | 74 | py |
seq2seq | seq2seq-master/seq2seq/test/models_test.py | # -*- coding: utf-8 -*-
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Models
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import namedtuple
import yaml
import numpy as np
import tensorflow as tf
from seq2seq.data import vocab, input_pipeline
from seq2seq.training import utils as training_utils
from seq2seq.test import utils as test_utils
from seq2seq.models import BasicSeq2Seq, AttentionSeq2Seq
TEST_PARAMS = yaml.load("""
embedding.dim: 5
encoder.params:
rnn_cell:
dropout_input_keep_prob: 0.8
num_layers: 2
residual_connections: True,
cell_class: LSTMCell
cell_params:
num_units: 4
decoder.params:
rnn_cell:
num_layers: 2
cell_class: LSTMCell
cell_params:
num_units: 4
""")
class EncoderDecoderTests(tf.test.TestCase):
"""Base class for EncoderDecoder tests. Tests for specific classes should
inherit from this and tf.test.TestCase.
"""
def setUp(self):
super(EncoderDecoderTests, self).setUp()
tf.logging.set_verbosity(tf.logging.INFO)
self.batch_size = 2
self.input_depth = 4
self.sequence_length = 10
# Create vocabulary
self.vocab_list = [str(_) for _ in range(10)]
self.vocab_list += ["笑う", "泣く", "了解", "はい", "^_^"]
self.vocab_size = len(self.vocab_list)
self.vocab_file = test_utils.create_temporary_vocab_file(self.vocab_list)
self.vocab_info = vocab.get_vocab_info(self.vocab_file.name)
tf.contrib.framework.get_or_create_global_step()
def tearDown(self):
self.vocab_file.close()
def create_model(self, _mode, _params=None):
"""Creates model class to be tested. Subclasses must implement this method.
"""
self.skipTest("Base module should not be tested.")
def _create_example(self):
"""Creates example data for a test"""
source = np.random.randn(self.batch_size, self.sequence_length,
self.input_depth)
source_len = np.random.randint(0, self.sequence_length, [self.batch_size])
target_len = np.random.randint(0, self.sequence_length * 2,
[self.batch_size])
target = np.random.randn(self.batch_size,
np.max(target_len), self.input_depth)
labels = np.random.randint(0, self.vocab_size,
[self.batch_size, np.max(target_len) - 1])
example_ = namedtuple(
"Example", ["source", "source_len", "target", "target_len", "labels"])
return example_(source, source_len, target, target_len, labels)
def _test_pipeline(self, mode, params=None):
"""Helper function to test the full model pipeline.
"""
# Create source and target example
source_len = self.sequence_length + 5
target_len = self.sequence_length + 10
source = " ".join(np.random.choice(self.vocab_list, source_len))
target = " ".join(np.random.choice(self.vocab_list, target_len))
sources_file, targets_file = test_utils.create_temp_parallel_data(
sources=[source], targets=[target])
# Build model graph
model = self.create_model(mode, params)
input_pipeline_ = input_pipeline.ParallelTextInputPipeline(
params={
"source_files": [sources_file.name],
"target_files": [targets_file.name]
},
mode=mode)
input_fn = training_utils.create_input_fn(
pipeline=input_pipeline_, batch_size=self.batch_size)
features, labels = input_fn()
fetches = model(features, labels, None)
fetches = [_ for _ in fetches if _ is not None]
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
sess.run(tf.tables_initializer())
with tf.contrib.slim.queues.QueueRunners(sess):
fetches_ = sess.run(fetches)
sources_file.close()
targets_file.close()
return model, fetches_
def test_train(self):
model, fetches_ = self._test_pipeline(tf.contrib.learn.ModeKeys.TRAIN)
predictions_, loss_, _ = fetches_
target_len = self.sequence_length + 10 + 2
max_decode_length = model.params["target.max_seq_len"]
expected_decode_len = np.minimum(target_len, max_decode_length)
np.testing.assert_array_equal(predictions_["logits"].shape, [
self.batch_size, expected_decode_len - 1,
model.target_vocab_info.total_size
])
np.testing.assert_array_equal(predictions_["losses"].shape,
[self.batch_size, expected_decode_len - 1])
np.testing.assert_array_equal(predictions_["predicted_ids"].shape,
[self.batch_size, expected_decode_len - 1])
self.assertFalse(np.isnan(loss_))
def test_infer(self):
model, fetches_ = self._test_pipeline(tf.contrib.learn.ModeKeys.INFER)
predictions_, = fetches_
pred_len = predictions_["predicted_ids"].shape[1]
np.testing.assert_array_equal(predictions_["logits"].shape, [
self.batch_size, pred_len, model.target_vocab_info.total_size
])
np.testing.assert_array_equal(predictions_["predicted_ids"].shape,
[self.batch_size, pred_len])
def test_infer_beam_search(self):
self.batch_size = 1
beam_width = 10
model, fetches_ = self._test_pipeline(
mode=tf.contrib.learn.ModeKeys.INFER,
params={"inference.beam_search.beam_width": 10})
predictions_, = fetches_
pred_len = predictions_["predicted_ids"].shape[1]
vocab_size = model.target_vocab_info.total_size
np.testing.assert_array_equal(predictions_["predicted_ids"].shape,
[1, pred_len, beam_width])
np.testing.assert_array_equal(
predictions_["beam_search_output.beam_parent_ids"].shape,
[1, pred_len, beam_width])
np.testing.assert_array_equal(
predictions_["beam_search_output.scores"].shape,
[1, pred_len, beam_width])
np.testing.assert_array_equal(
predictions_["beam_search_output.original_outputs.predicted_ids"].shape,
[1, pred_len, beam_width])
np.testing.assert_array_equal(
predictions_["beam_search_output.original_outputs.logits"].shape,
[1, pred_len, beam_width, vocab_size])
class TestBasicSeq2Seq(EncoderDecoderTests):
"""Tests the seq2seq.models.BasicSeq2Seq model.
"""
def setUp(self):
super(TestBasicSeq2Seq, self).setUp()
def create_model(self, mode, params=None):
params_ = BasicSeq2Seq.default_params().copy()
params_.update(TEST_PARAMS)
params_.update({
"vocab_source": self.vocab_file.name,
"vocab_target": self.vocab_file.name,
"bridge.class": "PassThroughBridge"
})
params_.update(params or {})
return BasicSeq2Seq(params=params_, mode=mode)
class TestAttentionSeq2Seq(EncoderDecoderTests):
"""Tests the seq2seq.models.AttentionSeq2Seq model.
"""
def setUp(self):
super(TestAttentionSeq2Seq, self).setUp()
self.encoder_rnn_cell = tf.contrib.rnn.LSTMCell(32)
self.decoder_rnn_cell = tf.contrib.rnn.LSTMCell(32)
self.attention_dim = 128
def create_model(self, mode, params=None):
params_ = AttentionSeq2Seq.default_params().copy()
params_.update(TEST_PARAMS)
params_.update({
"source.reverse": True,
"vocab_source": self.vocab_file.name,
"vocab_target": self.vocab_file.name,
})
params_.update(params or {})
return AttentionSeq2Seq(params=params_, mode=mode)
if __name__ == "__main__":
tf.test.main()
| 8,137 | 33.927039 | 80 | py |
seq2seq | seq2seq-master/seq2seq/test/metrics_test.py | # -*- coding: utf-8 -*-
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Metrics.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import tensorflow as tf
from seq2seq.metrics import bleu
from seq2seq.metrics import rouge
from seq2seq.metrics.metric_specs import BleuMetricSpec
from seq2seq.metrics.metric_specs import RougeMetricSpec
class TestMosesBleu(tf.test.TestCase):
"""Tests using the Moses multi-bleu script to calcualte BLEU score
"""
def _test_multi_bleu(self, hypotheses, references, lowercase, expected_bleu):
#pylint: disable=R0201
"""Runs a multi-bleu test."""
result = bleu.moses_multi_bleu(
hypotheses=hypotheses, references=references, lowercase=lowercase)
np.testing.assert_almost_equal(result, expected_bleu, decimal=2)
def test_multi_bleu(self):
self._test_multi_bleu(
hypotheses=np.array([
"The brown fox jumps over the dog 笑",
"The brown fox jumps over the dog 2 笑"
]),
references=np.array([
"The quick brown fox jumps over the lazy dog 笑",
"The quick brown fox jumps over the lazy dog 笑"
]),
lowercase=False,
expected_bleu=46.51)
def test_empty(self):
self._test_multi_bleu(
hypotheses=np.array([]),
references=np.array([]),
lowercase=False,
expected_bleu=0.00)
def test_multi_bleu_lowercase(self):
self._test_multi_bleu(
hypotheses=np.array([
"The brown fox jumps over The Dog 笑",
"The brown fox jumps over The Dog 2 笑"
]),
references=np.array([
"The quick brown fox jumps over the lazy dog 笑",
"The quick brown fox jumps over the lazy dog 笑"
]),
lowercase=True,
expected_bleu=46.51)
class TestTextMetricSpec(tf.test.TestCase):
"""Abstract class for testing TextMetricSpecs
based on hypotheses and references"""
def _test_metric_spec(self, metric_spec, hyps, refs, expected_scores):
"""Tests a MetricSpec"""
predictions = {"predicted_tokens": tf.placeholder(dtype=tf.string)}
labels = {"target_tokens": tf.placeholder(dtype=tf.string)}
value, update_op = metric_spec.create_metric_ops(None, labels, predictions)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
scores = []
for hyp, ref in zip(hyps, refs):
hyp = hyp.split(" ")
ref = ref.split(" ")
sess.run(update_op, {
predictions["predicted_tokens"]: [hyp],
labels["target_tokens"]: [ref]
})
scores.append(sess.run(value))
for score, expected in zip(scores, expected_scores):
np.testing.assert_almost_equal(score, expected, decimal=2)
np.testing.assert_almost_equal(score, expected, decimal=2)
class TestBleuMetricSpec(TestTextMetricSpec):
"""Tests the `BleuMetricSpec`"""
def test_bleu(self):
metric_spec = BleuMetricSpec({})
return self._test_metric_spec(
metric_spec=metric_spec,
hyps=["A B C D E F", "A B C D E F"],
refs=["A B C D E F", "A B A D E F"],
expected_scores=[100.0, 69.19])
class TestRougeMetricSpec(TestTextMetricSpec):
"""Tests the `RougeMetricSpec`"""
def test_rouge_1_f_score(self):
metric_spec = RougeMetricSpec({"rouge_type": "rouge_1/f_score"})
self._test_metric_spec(
metric_spec=metric_spec,
hyps=["A B C D E F", "A B C D E F"],
refs=["A B C D E F", "A B A D E F"],
expected_scores=[1.0, 0.954])
self._test_metric_spec(
metric_spec=metric_spec,
hyps=[],
refs=[],
expected_scores=[0.0])
self._test_metric_spec(
metric_spec=metric_spec,
hyps=["A"],
refs=["B"],
expected_scores=[0.0])
def test_rouge_2_f_score(self):
metric_spec = RougeMetricSpec({"rouge_type": "rouge_2/f_score"})
self._test_metric_spec(
metric_spec=metric_spec,
hyps=["A B C D E F", "A B C D E F"],
refs=["A B C D E F", "A B A D E F"],
expected_scores=[1.0, 0.8])
self._test_metric_spec(
metric_spec=metric_spec,
hyps=[],
refs=[],
expected_scores=[0.0])
self._test_metric_spec(
metric_spec=metric_spec,
hyps=["A"],
refs=["B"],
expected_scores=[0.0])
def test_rouge_l_f_score(self):
metric_spec = RougeMetricSpec({"rouge_type": "rouge_l/f_score"})
self._test_metric_spec(
metric_spec=metric_spec,
hyps=["A B C D E F", "A B C D E F"],
refs=["A B C D E F", "A B A D E F"],
expected_scores=[1.0, 0.916])
self._test_metric_spec(
metric_spec=metric_spec,
hyps=[],
refs=[],
expected_scores=[0.0])
self._test_metric_spec(
metric_spec=metric_spec,
hyps=["A"],
refs=["B"],
expected_scores=[0.0])
class TestRougeMetric(tf.test.TestCase):
"""Tests the RougeMetric"""
def test_rouge(self):
#pylint: disable=R0201
hypotheses = np.array([
"The brown fox jumps over the dog 笑",
"The brown fox jumps over the dog 2 笑"
])
references = np.array([
"The quick brown fox jumps over the lazy dog 笑",
"The quick brown fox jumps over the lazy dog 笑"
])
output = rouge.rouge(hypotheses, references)
# pyrouge result: 0.84926
np.testing.assert_almost_equal(output["rouge_1/f_score"], 0.865, decimal=2)
# pyrouge result: 0.55238
np.testing.assert_almost_equal(output["rouge_2/f_score"], 0.548, decimal=2)
# pyrouge result 0.84926
np.testing.assert_almost_equal(output["rouge_l/f_score"], 0.852, decimal=2)
if __name__ == "__main__":
tf.test.main()
| 6,445 | 29.695238 | 79 | py |
seq2seq | seq2seq-master/seq2seq/test/data_test.py | # -*- coding: utf-8 -*-
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for input-related operations.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import tempfile
import tensorflow as tf
import numpy as np
from seq2seq.data import split_tokens_decoder
from seq2seq.data.parallel_data_provider import make_parallel_data_provider
class SplitTokensDecoderTest(tf.test.TestCase):
"""Tests the SplitTokensDecoder class
"""
def test_decode(self):
decoder = split_tokens_decoder.SplitTokensDecoder(
delimiter=" ",
tokens_feature_name="source_tokens",
length_feature_name="source_len")
self.assertEqual(decoder.list_items(), ["source_tokens", "source_len"])
data = tf.constant("Hello world ! 笑w")
decoded_tokens = decoder.decode(data, ["source_tokens"])
decoded_length = decoder.decode(data, ["source_len"])
decoded_both = decoder.decode(data, decoder.list_items())
with self.test_session() as sess:
decoded_tokens_ = sess.run(decoded_tokens)[0]
decoded_length_ = sess.run(decoded_length)[0]
decoded_both_ = sess.run(decoded_both)
self.assertEqual(decoded_length_, 4)
np.testing.assert_array_equal(
np.char.decode(decoded_tokens_.astype("S"), "utf-8"),
["Hello", "world", "!", "笑w"])
self.assertEqual(decoded_both_[1], 4)
np.testing.assert_array_equal(
np.char.decode(decoded_both_[0].astype("S"), "utf-8"),
["Hello", "world", "!", "笑w"])
class ParallelDataProviderTest(tf.test.TestCase):
"""Tests the ParallelDataProvider class
"""
def setUp(self):
super(ParallelDataProviderTest, self).setUp()
# Our data
self.source_lines = ["Hello", "World", "!", "笑"]
self.target_lines = ["1", "2", "3", "笑"]
self.source_to_target = dict(zip(self.source_lines, self.target_lines))
# Create two parallel text files
self.source_file = tempfile.NamedTemporaryFile()
self.target_file = tempfile.NamedTemporaryFile()
self.source_file.write("\n".join(self.source_lines).encode("utf-8"))
self.source_file.flush()
self.target_file.write("\n".join(self.target_lines).encode("utf-8"))
self.target_file.flush()
def tearDown(self):
super(ParallelDataProviderTest, self).tearDown()
self.source_file.close()
self.target_file.close()
def test_reading(self):
num_epochs = 50
data_provider = make_parallel_data_provider(
data_sources_source=[self.source_file.name],
data_sources_target=[self.target_file.name],
num_epochs=num_epochs,
shuffle=True)
item_keys = list(data_provider.list_items())
item_values = data_provider.get(item_keys)
items_dict = dict(zip(item_keys, item_values))
self.assertEqual(
set(item_keys),
set(["source_tokens", "source_len", "target_tokens", "target_len"]))
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
with tf.contrib.slim.queues.QueueRunners(sess):
item_dicts_ = [sess.run(items_dict) for _ in range(num_epochs * 3)]
for item_dict in item_dicts_:
item_dict["target_tokens"] = np.char.decode(
item_dict["target_tokens"].astype("S"), "utf-8")
item_dict["source_tokens"] = np.char.decode(
item_dict["source_tokens"].astype("S"), "utf-8")
# Source is Data + SEQUENCE_END
self.assertEqual(item_dict["source_len"], 2)
self.assertEqual(item_dict["source_tokens"][-1], "SEQUENCE_END")
# Target is SEQUENCE_START + Data + SEQUENCE_END
self.assertEqual(item_dict["target_len"], 3)
self.assertEqual(item_dict["target_tokens"][0], "SEQUENCE_START")
self.assertEqual(item_dict["target_tokens"][-1], "SEQUENCE_END")
# Make sure data is aligned
source_joined = " ".join(item_dict["source_tokens"][:-1])
expected_target = self.source_to_target[source_joined]
np.testing.assert_array_equal(
item_dict["target_tokens"],
["SEQUENCE_START"] + expected_target.split(" ") + ["SEQUENCE_END"])
def test_reading_without_targets(self):
num_epochs = 50
data_provider = make_parallel_data_provider(
data_sources_source=[self.source_file.name],
data_sources_target=None,
num_epochs=num_epochs,
shuffle=True)
item_keys = list(data_provider.list_items())
item_values = data_provider.get(item_keys)
items_dict = dict(zip(item_keys, item_values))
self.assertEqual(set(item_keys), set(["source_tokens", "source_len"]))
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
with tf.contrib.slim.queues.QueueRunners(sess):
item_dicts_ = [sess.run(items_dict) for _ in range(num_epochs * 3)]
for item_dict in item_dicts_:
self.assertEqual(item_dict["source_len"], 2)
item_dict["source_tokens"] = np.char.decode(
item_dict["source_tokens"].astype("S"), "utf-8")
self.assertEqual(item_dict["source_tokens"][-1], "SEQUENCE_END")
if __name__ == "__main__":
tf.test.main()
| 5,789 | 34.740741 | 77 | py |
seq2seq | seq2seq-master/seq2seq/test/rnn_cell_test.py | # -*- coding: utf-8 -*-
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for input-related operations.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import tensorflow as tf
from seq2seq.contrib import rnn_cell
import numpy as np
class ExtendedMultiRNNCellTest(tf.test.TestCase):
"""Tests the ExtendedMultiRNNCell"""
def test_without_residuals(self):
inputs = tf.constant(np.random.randn(1, 2))
state = (tf.constant(np.random.randn(1, 2)),
tf.constant(np.random.randn(1, 2)))
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
standard_cell = tf.contrib.rnn.MultiRNNCell(
[tf.contrib.rnn.GRUCell(2) for _ in range(2)], state_is_tuple=True)
res_standard = standard_cell(inputs, state, scope="standard")
test_cell = rnn_cell.ExtendedMultiRNNCell(
[tf.contrib.rnn.GRUCell(2) for _ in range(2)])
res_test = test_cell(inputs, state, scope="test")
with self.test_session() as sess:
sess.run([tf.global_variables_initializer()])
res_standard_, res_test_, = sess.run([res_standard, res_test])
# Make sure it produces the same results as the standard cell
self.assertAllClose(res_standard_[0], res_test_[0])
self.assertAllClose(res_standard_[1][0], res_test_[1][0])
self.assertAllClose(res_standard_[1][1], res_test_[1][1])
def _test_with_residuals(self, inputs, **kwargs):
"""Runs the cell in a session"""
inputs = tf.convert_to_tensor(inputs)
state = (tf.constant(np.random.randn(1, 2)),
tf.constant(np.random.randn(1, 2)))
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
test_cell = rnn_cell.ExtendedMultiRNNCell(
[tf.contrib.rnn.GRUCell(2) for _ in range(2)],
residual_connections=True,
**kwargs)
res_test = test_cell(inputs, state, scope="test")
with self.test_session() as sess:
sess.run([tf.global_variables_initializer()])
return sess.run(res_test)
def _test_constant_shape(self, combiner):
"""Tests a residual combiner whose shape doesn't change
with depth"""
inputs = np.random.randn(1, 2)
with tf.variable_scope("same_input_size"):
res_ = self._test_with_residuals(inputs, residual_combiner=combiner)
self.assertEqual(res_[0].shape, (1, 2))
self.assertEqual(res_[1][0].shape, (1, 2))
self.assertEqual(res_[1][1].shape, (1, 2))
inputs = np.random.randn(1, 5)
with tf.variable_scope("diff_input_size"):
res_ = self._test_with_residuals(inputs, residual_combiner=combiner)
self.assertEqual(res_[0].shape, (1, 2))
self.assertEqual(res_[1][0].shape, (1, 2))
self.assertEqual(res_[1][1].shape, (1, 2))
with tf.variable_scope("same_input_size_dense"):
res_ = self._test_with_residuals(
inputs, residual_combiner=combiner, residual_dense=True)
self.assertEqual(res_[0].shape, (1, 2))
self.assertEqual(res_[1][0].shape, (1, 2))
self.assertEqual(res_[1][1].shape, (1, 2))
inputs = np.random.randn(1, 5)
with tf.variable_scope("diff_input_size_dense"):
res_ = self._test_with_residuals(
inputs, residual_combiner=combiner, residual_dense=True)
self.assertEqual(res_[0].shape, (1, 2))
self.assertEqual(res_[1][0].shape, (1, 2))
self.assertEqual(res_[1][1].shape, (1, 2))
def test_residuals_mean(self):
self._test_constant_shape(combiner="mean")
def test_residuals_add(self):
self._test_constant_shape(combiner="add")
def test_residuals_concat(self):
inputs = np.random.randn(1, 2)
with tf.variable_scope("same_input_size"):
res_ = self._test_with_residuals(inputs, residual_combiner="concat")
self.assertEqual(res_[0].shape, (1, 6))
self.assertEqual(res_[1][0].shape, (1, 2))
self.assertEqual(res_[1][1].shape, (1, 2))
inputs = np.random.randn(1, 5)
with tf.variable_scope("diff_input_size"):
res_ = self._test_with_residuals(inputs, residual_combiner="concat")
self.assertEqual(res_[0].shape, (1, 5 + 2 + 2))
self.assertEqual(res_[1][0].shape, (1, 2))
self.assertEqual(res_[1][1].shape, (1, 2))
inputs = np.random.randn(1, 2)
with tf.variable_scope("same_input_size_dense"):
res_ = self._test_with_residuals(
inputs, residual_combiner="concat", residual_dense=True)
self.assertEqual(res_[0].shape, (1, 2 + 4 + 2))
self.assertEqual(res_[1][0].shape, (1, 2))
self.assertEqual(res_[1][1].shape, (1, 2))
inputs = np.random.randn(1, 5)
with tf.variable_scope("diff_input_size_dense"):
res_ = self._test_with_residuals(
inputs, residual_combiner="concat", residual_dense=True)
self.assertEqual(res_[0].shape, (1, 2 + (5 + 2) + 5))
self.assertEqual(res_[1][0].shape, (1, 2))
self.assertEqual(res_[1][1].shape, (1, 2))
if __name__ == "__main__":
tf.test.main()
| 5,578 | 37.475862 | 77 | py |
seq2seq | seq2seq-master/seq2seq/test/input_pipeline_test.py | # -*- coding: utf-8 -*-
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for input-related operations.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import tensorflow as tf
import numpy as np
import yaml
from seq2seq.data import input_pipeline
from seq2seq.test import utils as test_utils
class TestInputPipelineDef(tf.test.TestCase):
"""Tests InputPipeline string definitions"""
def test_without_extra_args(self):
pipeline_def = yaml.load("""
class: ParallelTextInputPipeline
params:
source_files: ["file1"]
target_files: ["file2"]
num_epochs: 1
shuffle: True
""")
pipeline = input_pipeline.make_input_pipeline_from_def(
pipeline_def, tf.contrib.learn.ModeKeys.TRAIN)
self.assertIsInstance(pipeline, input_pipeline.ParallelTextInputPipeline)
#pylint: disable=W0212
self.assertEqual(pipeline.params["source_files"], ["file1"])
self.assertEqual(pipeline.params["target_files"], ["file2"])
self.assertEqual(pipeline.params["num_epochs"], 1)
self.assertEqual(pipeline.params["shuffle"], True)
def test_with_extra_args(self):
pipeline_def = yaml.load("""
class: ParallelTextInputPipeline
params:
source_files: ["file1"]
target_files: ["file2"]
num_epochs: 1
shuffle: True
""")
pipeline = input_pipeline.make_input_pipeline_from_def(
def_dict=pipeline_def,
mode=tf.contrib.learn.ModeKeys.TRAIN,
num_epochs=5,
shuffle=False)
self.assertIsInstance(pipeline, input_pipeline.ParallelTextInputPipeline)
#pylint: disable=W0212
self.assertEqual(pipeline.params["source_files"], ["file1"])
self.assertEqual(pipeline.params["target_files"], ["file2"])
self.assertEqual(pipeline.params["num_epochs"], 5)
self.assertEqual(pipeline.params["shuffle"], False)
class TFRecordsInputPipelineTest(tf.test.TestCase):
"""
Tests Data Provider operations.
"""
def setUp(self):
super(TFRecordsInputPipelineTest, self).setUp()
tf.logging.set_verbosity(tf.logging.INFO)
def test_pipeline(self):
tfrecords_file = test_utils.create_temp_tfrecords(
sources=["Hello World . 笑"], targets=["Bye 泣"])
pipeline = input_pipeline.TFRecordInputPipeline(
params={
"files": [tfrecords_file.name],
"source_field": "source",
"target_field": "target",
"num_epochs": 5,
"shuffle": False
},
mode=tf.contrib.learn.ModeKeys.TRAIN)
data_provider = pipeline.make_data_provider()
features = pipeline.read_from_data_provider(data_provider)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
with tf.contrib.slim.queues.QueueRunners(sess):
res = sess.run(features)
self.assertEqual(res["source_len"], 5)
self.assertEqual(res["target_len"], 4)
np.testing.assert_array_equal(
np.char.decode(res["source_tokens"].astype("S"), "utf-8"),
["Hello", "World", ".", "笑", "SEQUENCE_END"])
np.testing.assert_array_equal(
np.char.decode(res["target_tokens"].astype("S"), "utf-8"),
["SEQUENCE_START", "Bye", "泣", "SEQUENCE_END"])
class ParallelTextInputPipelineTest(tf.test.TestCase):
"""
Tests Data Provider operations.
"""
def setUp(self):
super(ParallelTextInputPipelineTest, self).setUp()
tf.logging.set_verbosity(tf.logging.INFO)
def test_pipeline(self):
file_source, file_target = test_utils.create_temp_parallel_data(
sources=["Hello World . 笑"], targets=["Bye 泣"])
pipeline = input_pipeline.ParallelTextInputPipeline(
params={
"source_files": [file_source.name],
"target_files": [file_target.name],
"num_epochs": 5,
"shuffle": False
},
mode=tf.contrib.learn.ModeKeys.TRAIN)
data_provider = pipeline.make_data_provider()
features = pipeline.read_from_data_provider(data_provider)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
with tf.contrib.slim.queues.QueueRunners(sess):
res = sess.run(features)
self.assertEqual(res["source_len"], 5)
self.assertEqual(res["target_len"], 4)
np.testing.assert_array_equal(
np.char.decode(res["source_tokens"].astype("S"), "utf-8"),
["Hello", "World", ".", "笑", "SEQUENCE_END"])
np.testing.assert_array_equal(
np.char.decode(res["target_tokens"].astype("S"), "utf-8"),
["SEQUENCE_START", "Bye", "泣", "SEQUENCE_END"])
if __name__ == "__main__":
tf.test.main()
| 5,357 | 32.074074 | 77 | py |
seq2seq | seq2seq-master/seq2seq/test/__init__.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests and testing utilities
"""
from seq2seq.test import utils
| 643 | 34.777778 | 74 | py |
seq2seq | seq2seq-master/seq2seq/test/beam_search_test.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for Beam Search and related functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import tensorflow as tf
import numpy as np
from seq2seq.inference import beam_search
class TestGatherTree(tf.test.TestCase):
"""Tests the gather_tree function"""
def test_gather_tree(self):
predicted_ids = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
parent_ids = np.array([[0, 0, 0], [0, 1, 1], [2, 1, 2]])
expected_result = np.array([[2, 2, 2], [6, 5, 6], [7, 8, 9]])
res = beam_search.gather_tree(
tf.convert_to_tensor(predicted_ids), tf.convert_to_tensor(parent_ids))
with self.test_session() as sess:
res_ = sess.run(res)
np.testing.assert_array_equal(expected_result, res_)
class TestLengthNorm(tf.test.TestCase):
"""Tests the length normalization score"""
def test_length_norm(self):
#log_probs_ = np.ones([2, 3]) / 3.0
lengths_ = np.array([[1, 2, 3], [3, 3, 3]])
penalty_factor_ = 0.6
length_pen = beam_search.length_penalty(
sequence_lengths=tf.convert_to_tensor(lengths_),
penalty_factor=penalty_factor_)
with self.test_session() as sess:
length_pen_ = sess.run(length_pen)
np.testing.assert_almost_equal(length_pen_[0, 0], 1.0, decimal=5)
np.testing.assert_almost_equal(length_pen_[0, 1], 1.0969027, decimal=4)
np.testing.assert_almost_equal(length_pen_[0, 2], 1.1884017, decimal=4)
class TestBeamStep(tf.test.TestCase):
"""Tests a single step of beam search
"""
def setUp(self):
super(TestBeamStep, self).setUp()
self.state_size = 10
config = beam_search.BeamSearchConfig(
beam_width=3,
vocab_size=5,
eos_token=0,
length_penalty_weight=0.6,
choose_successors_fn=beam_search.choose_top_k)
self.config = config
def test_step(self):
beam_state = beam_search.BeamSearchState(
log_probs=tf.nn.log_softmax(tf.ones(self.config.beam_width)),
lengths=tf.constant(
2, shape=[self.config.beam_width], dtype=tf.int32),
finished=tf.zeros(
[self.config.beam_width], dtype=tf.bool))
logits_ = np.full([self.config.beam_width, self.config.vocab_size], 0.0001)
logits_[0, 2] = 1.9
logits_[0, 3] = 2.1
logits_[1, 3] = 3.1
logits_[1, 4] = 0.9
logits = tf.convert_to_tensor(logits_, dtype=tf.float32)
log_probs = tf.nn.log_softmax(logits)
outputs, next_beam_state = beam_search.beam_search_step(
time_=2, logits=logits, beam_state=beam_state, config=self.config)
with self.test_session() as sess:
outputs_, next_state_, state_, log_probs_ = sess.run(
[outputs, next_beam_state, beam_state, log_probs])
np.testing.assert_array_equal(outputs_.predicted_ids, [3, 3, 2])
np.testing.assert_array_equal(outputs_.beam_parent_ids, [1, 0, 0])
np.testing.assert_array_equal(next_state_.lengths, [3, 3, 3])
np.testing.assert_array_equal(next_state_.finished, [False, False, False])
expected_log_probs = state_.log_probs[[1, 0, 0]]
expected_log_probs[0] += log_probs_[1, 3]
expected_log_probs[1] += log_probs_[0, 3]
expected_log_probs[2] += log_probs_[0, 2]
np.testing.assert_array_equal(next_state_.log_probs, expected_log_probs)
def test_step_with_eos(self):
beam_state = beam_search.BeamSearchState(
log_probs=tf.nn.log_softmax(tf.ones(self.config.beam_width)),
lengths=tf.convert_to_tensor(
[2, 1, 2], dtype=tf.int32),
finished=tf.constant(
[False, True, False], dtype=tf.bool))
logits_ = np.full([self.config.beam_width, self.config.vocab_size], 0.0001)
logits_[0, 2] = 1.1
logits_[1, 2] = 1.0
logits_[2, 2] = 1.0
logits = tf.convert_to_tensor(logits_, dtype=tf.float32)
log_probs = tf.nn.log_softmax(logits)
outputs, next_beam_state = beam_search.beam_search_step(
time_=2, logits=logits, beam_state=beam_state, config=self.config)
with self.test_session() as sess:
outputs_, next_state_, state_, log_probs_ = sess.run(
[outputs, next_beam_state, beam_state, log_probs])
np.testing.assert_array_equal(outputs_.predicted_ids, [0, 2, 2])
np.testing.assert_array_equal(outputs_.beam_parent_ids, [1, 0, 2])
np.testing.assert_array_equal(next_state_.lengths, [1, 3, 3])
np.testing.assert_array_equal(next_state_.finished, [True, False, False])
expected_log_probs = state_.log_probs[outputs_.beam_parent_ids]
expected_log_probs[1] += log_probs_[0, 2]
expected_log_probs[2] += log_probs_[2, 2]
np.testing.assert_array_equal(next_state_.log_probs, expected_log_probs)
def test_step_with_new_eos(self):
beam_state = beam_search.BeamSearchState(
log_probs=tf.nn.log_softmax(tf.ones(self.config.beam_width)),
lengths=tf.constant(
2, shape=[self.config.beam_width], dtype=tf.int32),
finished=tf.zeros(
[self.config.beam_width], dtype=tf.bool))
logits_ = np.full([self.config.beam_width, self.config.vocab_size], 0.0001)
logits_[0, 0] = 1.9
logits_[0, 3] = 2.1
logits_[1, 3] = 3.1
logits_[1, 4] = 0.9
logits = tf.convert_to_tensor(logits_, dtype=tf.float32)
log_probs = tf.nn.log_softmax(logits)
outputs, next_beam_state = beam_search.beam_search_step(
time_=2, logits=logits, beam_state=beam_state, config=self.config)
with self.test_session() as sess:
outputs_, next_state_, state_, log_probs_ = sess.run(
[outputs, next_beam_state, beam_state, log_probs])
np.testing.assert_array_equal(outputs_.predicted_ids, [3, 3, 0])
np.testing.assert_array_equal(outputs_.beam_parent_ids, [1, 0, 0])
np.testing.assert_array_equal(next_state_.lengths, [3, 3, 2])
np.testing.assert_array_equal(next_state_.finished, [False, False, True])
expected_log_probs = state_.log_probs[[1, 0, 0]]
expected_log_probs[0] += log_probs_[1, 3]
expected_log_probs[1] += log_probs_[0, 3]
expected_log_probs[2] += log_probs_[0, 0]
np.testing.assert_array_equal(next_state_.log_probs, expected_log_probs)
class TestEosMasking(tf.test.TestCase):
"""Tests EOS masking used in beam search
"""
def test_eos_masking(self):
probs = tf.constant([[-.2, -.2, -.2, -.2, -.2], [-.3, -.3, -.3, 3, 0],
[5, 6, 0, 0, 0]])
eos_token = 0
previously_finished = tf.constant([0, 1, 0], dtype=tf.float32)
masked = beam_search.mask_probs(probs, eos_token, previously_finished)
with self.test_session() as sess:
probs = sess.run(probs)
masked = sess.run(masked)
np.testing.assert_array_equal(probs[0], masked[0])
np.testing.assert_array_equal(probs[2], masked[2])
np.testing.assert_equal(masked[1][0], 0)
np.testing.assert_approx_equal(masked[1][1], np.finfo('float32').min)
np.testing.assert_approx_equal(masked[1][2], np.finfo('float32').min)
np.testing.assert_approx_equal(masked[1][3], np.finfo('float32').min)
np.testing.assert_approx_equal(masked[1][4], np.finfo('float32').min)
if __name__ == "__main__":
tf.test.main()
| 7,776 | 36.752427 | 79 | py |
seq2seq | seq2seq-master/seq2seq/test/conv_encoder_test.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test Cases for PoolingEncoder.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import tensorflow as tf
import numpy as np
from seq2seq.encoders import ConvEncoder
class ConvEncoderTest(tf.test.TestCase):
"""
Tests the ConvEncoder class.
"""
def setUp(self):
super(ConvEncoderTest, self).setUp()
self.batch_size = 4
self.sequence_length = 16
self.input_depth = 10
self.mode = tf.contrib.learn.ModeKeys.TRAIN
def _test_with_params(self, params):
"""Tests the encoder with a given parameter configuration"""
inputs = tf.random_normal(
[self.batch_size, self.sequence_length, self.input_depth])
example_length = tf.ones(
self.batch_size, dtype=tf.int32) * self.sequence_length
encode_fn = ConvEncoder(params, self.mode)
encoder_output = encode_fn(inputs, example_length)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
encoder_output_ = sess.run(encoder_output)
att_value_units = encode_fn.params["attention_cnn.units"]
output_units = encode_fn.params["output_cnn.units"]
np.testing.assert_array_equal(
encoder_output_.outputs.shape,
[self.batch_size, self.sequence_length, att_value_units])
np.testing.assert_array_equal(
encoder_output_.attention_values.shape,
[self.batch_size, self.sequence_length, output_units])
np.testing.assert_array_equal(
encoder_output_.final_state.shape,
[self.batch_size, output_units])
def test_encode_with_pos(self):
self._test_with_params({
"position_embeddings.enable": True,
"position_embeddings.num_positions": self.sequence_length,
"attention_cnn.units": 5,
"output_cnn.units": 6
})
if __name__ == "__main__":
tf.test.main()
| 2,480 | 30.807692 | 74 | py |
seq2seq | seq2seq-master/seq2seq/test/example_config_test.py | # -*- coding: utf-8 -*-
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test Cases for example configuration files.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
from pydoc import locate
import yaml
import tensorflow as tf
from tensorflow import gfile
from seq2seq.test.models_test import EncoderDecoderTests
from seq2seq import models
EXAMPLE_CONFIG_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), "../../example_configs"))
def _load_model_from_config(config_path, hparam_overrides, vocab_file, mode):
"""Loads model from a configuration file"""
with gfile.GFile(config_path) as config_file:
config = yaml.load(config_file)
model_cls = locate(config["model"]) or getattr(models, config["model"])
model_params = config["model_params"]
if hparam_overrides:
model_params.update(hparam_overrides)
# Change the max decode length to make the test run faster
model_params["decoder.params"]["max_decode_length"] = 5
model_params["vocab_source"] = vocab_file
model_params["vocab_target"] = vocab_file
return model_cls(params=model_params, mode=mode)
class ExampleConfigTest(object):
"""Interface for configuration-based tests"""
def __init__(self, *args, **kwargs):
super(ExampleConfigTest, self).__init__(*args, **kwargs)
self.vocab_file = None
def _config_path(self):
"""Returns the path to the configuration to be tested"""
raise NotImplementedError()
def create_model(self, mode, params=None):
"""Creates the model"""
return _load_model_from_config(
config_path=self._config_path(),
hparam_overrides=params,
vocab_file=self.vocab_file.name,
mode=mode)
class TestNMTLarge(ExampleConfigTest, EncoderDecoderTests):
"""Tests nmt_large.yml"""
def _config_path(self):
return os.path.join(EXAMPLE_CONFIG_DIR, "nmt_large.yml")
class TestNMTMedium(ExampleConfigTest, EncoderDecoderTests):
"""Tests nmt_medium.yml"""
def _config_path(self):
return os.path.join(EXAMPLE_CONFIG_DIR, "nmt_medium.yml")
class TestNMTSmall(ExampleConfigTest, EncoderDecoderTests):
"""Tests nmt_small.yml"""
def _config_path(self):
return os.path.join(EXAMPLE_CONFIG_DIR, "nmt_small.yml")
class TestNMTConv(ExampleConfigTest, EncoderDecoderTests):
"""Tests nmt_small.yml"""
def _config_path(self):
return os.path.join(EXAMPLE_CONFIG_DIR, "nmt_conv.yml")
if __name__ == "__main__":
tf.test.main()
| 3,079 | 28.902913 | 77 | py |
seq2seq | seq2seq-master/seq2seq/decoders/rnn_decoder.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Base class for sequence decoders.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import abc
from collections import namedtuple
import six
import tensorflow as tf
from tensorflow.python.util import nest # pylint: disable=E0611
from seq2seq.graph_module import GraphModule
from seq2seq.configurable import Configurable
from seq2seq.contrib.seq2seq.decoder import Decoder, dynamic_decode
from seq2seq.encoders.rnn_encoder import _default_rnn_cell_params
from seq2seq.encoders.rnn_encoder import _toggle_dropout
from seq2seq.training import utils as training_utils
class DecoderOutput(
namedtuple("DecoderOutput", ["logits", "predicted_ids", "cell_output"])):
"""Output of an RNN decoder.
Note that we output both the logits and predictions because during
dynamic decoding the predictions may not correspond to max(logits).
For example, we may be sampling from the logits instead.
"""
pass
@six.add_metaclass(abc.ABCMeta)
class RNNDecoder(Decoder, GraphModule, Configurable):
"""Base class for RNN decoders.
Args:
cell: An instance of ` tf.contrib.rnn.RNNCell`
helper: An instance of `tf.contrib.seq2seq.Helper` to assist decoding
initial_state: A tensor or tuple of tensors used as the initial cell
state.
name: A name for this module
"""
def __init__(self, params, mode, name):
GraphModule.__init__(self, name)
Configurable.__init__(self, params, mode)
self.params["rnn_cell"] = _toggle_dropout(self.params["rnn_cell"], mode)
self.cell = training_utils.get_rnn_cell(**self.params["rnn_cell"])
# Not initialized yet
self.initial_state = None
self.helper = None
@abc.abstractmethod
def initialize(self, name=None):
raise NotImplementedError
@abc.abstractmethod
def step(self, name=None):
raise NotImplementedError
@property
def batch_size(self):
return tf.shape(nest.flatten([self.initial_state])[0])[0]
def _setup(self, initial_state, helper):
"""Sets the initial state and helper for the decoder.
"""
self.initial_state = initial_state
self.helper = helper
def finalize(self, outputs, final_state):
"""Applies final transformation to the decoder output once decoding is
finished.
"""
#pylint: disable=R0201
return (outputs, final_state)
@staticmethod
def default_params():
return {
"max_decode_length": 100,
"rnn_cell": _default_rnn_cell_params(),
"init_scale": 0.04,
}
def _build(self, initial_state, helper):
if not self.initial_state:
self._setup(initial_state, helper)
scope = tf.get_variable_scope()
scope.set_initializer(tf.random_uniform_initializer(
-self.params["init_scale"],
self.params["init_scale"]))
maximum_iterations = None
if self.mode == tf.contrib.learn.ModeKeys.INFER:
maximum_iterations = self.params["max_decode_length"]
outputs, final_state = dynamic_decode(
decoder=self,
output_time_major=True,
impute_finished=False,
maximum_iterations=maximum_iterations)
return self.finalize(outputs, final_state)
| 3,795 | 30.114754 | 77 | py |
seq2seq | seq2seq-master/seq2seq/decoders/basic_decoder.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A basic sequence decoder that performs a softmax based on the RNN state.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import tensorflow as tf
from seq2seq.decoders.rnn_decoder import RNNDecoder, DecoderOutput
class BasicDecoder(RNNDecoder):
"""Simple RNN decoder that performed a softmax operations on the cell output.
"""
def __init__(self, params, mode, vocab_size, name="basic_decoder"):
super(BasicDecoder, self).__init__(params, mode, name)
self.vocab_size = vocab_size
def compute_output(self, cell_output):
"""Computes the decoder outputs."""
return tf.contrib.layers.fully_connected(
inputs=cell_output, num_outputs=self.vocab_size, activation_fn=None)
@property
def output_size(self):
return DecoderOutput(
logits=self.vocab_size,
predicted_ids=tf.TensorShape([]),
cell_output=self.cell.output_size)
@property
def output_dtype(self):
return DecoderOutput(
logits=tf.float32, predicted_ids=tf.int32, cell_output=tf.float32)
def initialize(self, name=None):
finished, first_inputs = self.helper.initialize()
return finished, first_inputs, self.initial_state
def step(self, time_, inputs, state, name=None):
cell_output, cell_state = self.cell(inputs, state)
logits = self.compute_output(cell_output)
sample_ids = self.helper.sample(
time=time_, outputs=logits, state=cell_state)
outputs = DecoderOutput(
logits=logits, predicted_ids=sample_ids, cell_output=cell_output)
finished, next_inputs, next_state = self.helper.next_inputs(
time=time_, outputs=outputs, state=cell_state, sample_ids=sample_ids)
return (outputs, next_state, next_inputs, finished)
| 2,398 | 35.348485 | 79 | py |
seq2seq | seq2seq-master/seq2seq/decoders/attention_decoder.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A basic sequence decoder that performs a softmax based on the RNN state.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import namedtuple
import tensorflow as tf
from seq2seq.decoders.rnn_decoder import RNNDecoder
from seq2seq.contrib.seq2seq.helper import CustomHelper
class AttentionDecoderOutput(
namedtuple("DecoderOutput", [
"logits", "predicted_ids", "cell_output", "attention_scores",
"attention_context"
])):
"""Augmented decoder output that also includes the attention scores.
"""
pass
class AttentionDecoder(RNNDecoder):
"""An RNN Decoder that uses attention over an input sequence.
Args:
cell: An instance of ` tf.contrib.rnn.RNNCell`
helper: An instance of `tf.contrib.seq2seq.Helper` to assist decoding
initial_state: A tensor or tuple of tensors used as the initial cell
state.
vocab_size: Output vocabulary size, i.e. number of units
in the softmax layer
attention_keys: The sequence used to calculate attention scores.
A tensor of shape `[B, T, ...]`.
attention_values: The sequence to attend over.
A tensor of shape `[B, T, input_dim]`.
attention_values_length: Sequence length of the attention values.
An int32 Tensor of shape `[B]`.
attention_fn: The attention function to use. This function map from
`(state, inputs)` to `(attention_scores, attention_context)`.
For an example, see `seq2seq.decoder.attention.AttentionLayer`.
reverse_scores: Optional, an array of sequence length. If set,
reverse the attention scores in the output. This is used for when
a reversed source sequence is fed as an input but you want to
return the scores in non-reversed order.
"""
def __init__(self,
params,
mode,
vocab_size,
attention_keys,
attention_values,
attention_values_length,
attention_fn,
reverse_scores_lengths=None,
name="attention_decoder"):
super(AttentionDecoder, self).__init__(params, mode, name)
self.vocab_size = vocab_size
self.attention_keys = attention_keys
self.attention_values = attention_values
self.attention_values_length = attention_values_length
self.attention_fn = attention_fn
self.reverse_scores_lengths = reverse_scores_lengths
@property
def output_size(self):
return AttentionDecoderOutput(
logits=self.vocab_size,
predicted_ids=tf.TensorShape([]),
cell_output=self.cell.output_size,
attention_scores=tf.shape(self.attention_values)[1:-1],
attention_context=self.attention_values.get_shape()[-1])
@property
def output_dtype(self):
return AttentionDecoderOutput(
logits=tf.float32,
predicted_ids=tf.int32,
cell_output=tf.float32,
attention_scores=tf.float32,
attention_context=tf.float32)
def initialize(self, name=None):
finished, first_inputs = self.helper.initialize()
# Concat empty attention context
attention_context = tf.zeros([
tf.shape(first_inputs)[0],
self.attention_values.get_shape().as_list()[-1]
])
first_inputs = tf.concat([first_inputs, attention_context], 1)
return finished, first_inputs, self.initial_state
def compute_output(self, cell_output):
"""Computes the decoder outputs."""
# Compute attention
att_scores, attention_context = self.attention_fn(
query=cell_output,
keys=self.attention_keys,
values=self.attention_values,
values_length=self.attention_values_length)
# TODO: Make this a parameter: We may or may not want this.
# Transform attention context.
# This makes the softmax smaller and allows us to synthesize information
# between decoder state and attention context
# see https://arxiv.org/abs/1508.04025v5
softmax_input = tf.contrib.layers.fully_connected(
inputs=tf.concat([cell_output, attention_context], 1),
num_outputs=self.cell.output_size,
activation_fn=tf.nn.tanh,
scope="attention_mix")
# Softmax computation
logits = tf.contrib.layers.fully_connected(
inputs=softmax_input,
num_outputs=self.vocab_size,
activation_fn=None,
scope="logits")
return softmax_input, logits, att_scores, attention_context
def _setup(self, initial_state, helper):
self.initial_state = initial_state
def att_next_inputs(time, outputs, state, sample_ids, name=None):
"""Wraps the original decoder helper function to append the attention
context.
"""
finished, next_inputs, next_state = helper.next_inputs(
time=time,
outputs=outputs,
state=state,
sample_ids=sample_ids,
name=name)
next_inputs = tf.concat([next_inputs, outputs.attention_context], 1)
return (finished, next_inputs, next_state)
self.helper = CustomHelper(
initialize_fn=helper.initialize,
sample_fn=helper.sample,
next_inputs_fn=att_next_inputs)
def step(self, time_, inputs, state, name=None):
cell_output, cell_state = self.cell(inputs, state)
cell_output_new, logits, attention_scores, attention_context = \
self.compute_output(cell_output)
if self.reverse_scores_lengths is not None:
attention_scores = tf.reverse_sequence(
input=attention_scores,
seq_lengths=self.reverse_scores_lengths,
seq_dim=1,
batch_dim=0)
sample_ids = self.helper.sample(
time=time_, outputs=logits, state=cell_state)
outputs = AttentionDecoderOutput(
logits=logits,
predicted_ids=sample_ids,
cell_output=cell_output_new,
attention_scores=attention_scores,
attention_context=attention_context)
finished, next_inputs, next_state = self.helper.next_inputs(
time=time_, outputs=outputs, state=cell_state, sample_ids=sample_ids)
return (outputs, next_state, next_inputs, finished)
| 6,750 | 34.531579 | 77 | py |
seq2seq | seq2seq-master/seq2seq/decoders/beam_search_decoder.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A decoder that uses beam search. Can only be used for inference, not
training.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import namedtuple
import tensorflow as tf
from tensorflow.python.util import nest # pylint: disable=E0611
from seq2seq.inference import beam_search
from seq2seq.decoders.rnn_decoder import RNNDecoder
class FinalBeamDecoderOutput(
namedtuple("FinalBeamDecoderOutput",
["predicted_ids", "beam_search_output"])):
"""Final outputs returned by the beam search after all decoding is finished.
Args:
predicted_ids: The final prediction. A tensor of shape
`[T, 1, beam_width]`.
beam_search_output: An instance of `BeamDecoderOutput` that describes
the state of the beam search.
"""
pass
class BeamDecoderOutput(
namedtuple("BeamDecoderOutput", [
"logits", "predicted_ids", "log_probs", "scores", "beam_parent_ids",
"original_outputs"
])):
"""Structure for the output of a beam search decoder. This class is used
to define the output at each step as well as the final output of the decoder.
If used as the final output, a time dimension `T` is inserted after the
beam_size dimension.
Args:
logits: Logits at the current time step of shape `[beam_size, vocab_size]`
predicted_ids: Chosen softmax predictions at the current time step.
An int32 tensor of shape `[beam_size]`.
log_probs: Total log probabilities of all beams at the current time step.
A float32 tensor of shaep `[beam_size]`.
scores: Total scores of all beams at the current time step. This differs
from log probabilities in that the score may add additional processing
such as length normalization. A float32 tensor of shape `[beam_size]`.
beam_parent_ids: The indices of the beams that are being continued.
An int32 tensor of shape `[beam_size]`.
"""
pass
class BeamSearchDecoder(RNNDecoder):
"""The BeamSearchDecoder wraps another decoder to perform beam search instead
of greedy selection. This decoder must be used with batch size of 1, which
will result in an effective batch size of `beam_width`.
Args:
decoder: A instance of `RNNDecoder` to be used with beam search.
config: A `BeamSearchConfig` that defines beam search decoding parameters.
"""
def __init__(self, decoder, config):
super(BeamSearchDecoder, self).__init__(decoder.params, decoder.mode,
decoder.name)
self.decoder = decoder
self.config = config
def __call__(self, *args, **kwargs):
with self.decoder.variable_scope():
return self._build(*args, **kwargs)
@property
def output_size(self):
return BeamDecoderOutput(
logits=self.decoder.vocab_size,
predicted_ids=tf.TensorShape([]),
log_probs=tf.TensorShape([]),
scores=tf.TensorShape([]),
beam_parent_ids=tf.TensorShape([]),
original_outputs=self.decoder.output_size)
@property
def output_dtype(self):
return BeamDecoderOutput(
logits=tf.float32,
predicted_ids=tf.int32,
log_probs=tf.float32,
scores=tf.float32,
beam_parent_ids=tf.int32,
original_outputs=self.decoder.output_dtype)
@property
def batch_size(self):
return self.config.beam_width
def initialize(self, name=None):
finished, first_inputs, initial_state = self.decoder.initialize()
# Create beam state
beam_state = beam_search.create_initial_beam_state(config=self.config)
return finished, first_inputs, (initial_state, beam_state)
def finalize(self, outputs, final_state):
# Gather according to beam search result
predicted_ids = beam_search.gather_tree(outputs.predicted_ids,
outputs.beam_parent_ids)
# We're using a batch size of 1, so we add an extra dimension to
# convert tensors to [1, beam_width, ...] shape. This way Tensorflow
# doesn't confuse batch_size with beam_width
outputs = nest.map_structure(lambda x: tf.expand_dims(x, 1), outputs)
final_outputs = FinalBeamDecoderOutput(
predicted_ids=tf.expand_dims(predicted_ids, 1),
beam_search_output=outputs)
return final_outputs, final_state
def _build(self, initial_state, helper):
# Tile initial state
initial_state = nest.map_structure(
lambda x: tf.tile(x, [self.batch_size, 1]), initial_state)
self.decoder._setup(initial_state, helper) #pylint: disable=W0212
return super(BeamSearchDecoder, self)._build(self.decoder.initial_state,
self.decoder.helper)
def step(self, time_, inputs, state, name=None):
decoder_state, beam_state = state
# Call the original decoder
(decoder_output, decoder_state, _, _) = self.decoder.step(time_, inputs,
decoder_state)
# Perform a step of beam search
bs_output, beam_state = beam_search.beam_search_step(
time_=time_,
logits=decoder_output.logits,
beam_state=beam_state,
config=self.config)
# Shuffle everything according to beam search result
decoder_state = nest.map_structure(
lambda x: tf.gather(x, bs_output.beam_parent_ids), decoder_state)
decoder_output = nest.map_structure(
lambda x: tf.gather(x, bs_output.beam_parent_ids), decoder_output)
next_state = (decoder_state, beam_state)
outputs = BeamDecoderOutput(
logits=tf.zeros([self.config.beam_width, self.config.vocab_size]),
predicted_ids=bs_output.predicted_ids,
log_probs=beam_state.log_probs,
scores=bs_output.scores,
beam_parent_ids=bs_output.beam_parent_ids,
original_outputs=decoder_output)
finished, next_inputs, next_state = self.decoder.helper.next_inputs(
time=time_,
outputs=decoder_output,
state=next_state,
sample_ids=bs_output.predicted_ids)
next_inputs.set_shape([self.batch_size, None])
return (outputs, next_state, next_inputs, finished)
| 6,788 | 35.896739 | 79 | py |
seq2seq | seq2seq-master/seq2seq/decoders/__init__.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collection of decoders and decoder-related functions.
"""
from seq2seq.decoders.rnn_decoder import *
from seq2seq.decoders.attention import *
from seq2seq.decoders.basic_decoder import *
from seq2seq.decoders.attention_decoder import *
| 816 | 37.904762 | 74 | py |
seq2seq | seq2seq-master/seq2seq/decoders/attention.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Implementations of attention layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import abc
import six
import tensorflow as tf
from tensorflow.python.framework import function # pylint: disable=E0611
from seq2seq.graph_module import GraphModule
from seq2seq.configurable import Configurable
@function.Defun(
tf.float32,
tf.float32,
tf.float32,
func_name="att_sum_bahdanau",
noinline=True)
def att_sum_bahdanau(v_att, keys, query):
"""Calculates a batch- and timweise dot product with a variable"""
return tf.reduce_sum(v_att * tf.tanh(keys + tf.expand_dims(query, 1)), [2])
@function.Defun(tf.float32, tf.float32, func_name="att_sum_dot", noinline=True)
def att_sum_dot(keys, query):
"""Calculates a batch- and timweise dot product"""
return tf.reduce_sum(keys * tf.expand_dims(query, 1), [2])
@six.add_metaclass(abc.ABCMeta)
class AttentionLayer(GraphModule, Configurable):
"""
Attention layer according to https://arxiv.org/abs/1409.0473.
Params:
num_units: Number of units used in the attention layer
"""
def __init__(self, params, mode, name="attention"):
GraphModule.__init__(self, name)
Configurable.__init__(self, params, mode)
@staticmethod
def default_params():
return {"num_units": 128}
@abc.abstractmethod
def score_fn(self, keys, query):
"""Computes the attention score"""
raise NotImplementedError
def _build(self, query, keys, values, values_length):
"""Computes attention scores and outputs.
Args:
query: The query used to calculate attention scores.
In seq2seq this is typically the current state of the decoder.
A tensor of shape `[B, ...]`
keys: The keys used to calculate attention scores. In seq2seq, these
are typically the outputs of the encoder and equivalent to `values`.
A tensor of shape `[B, T, ...]` where each element in the `T`
dimension corresponds to the key for that value.
values: The elements to compute attention over. In seq2seq, this is
typically the sequence of encoder outputs.
A tensor of shape `[B, T, input_dim]`.
values_length: An int32 tensor of shape `[B]` defining the sequence
length of the attention values.
Returns:
A tuple `(scores, context)`.
`scores` is vector of length `T` where each element is the
normalized "score" of the corresponding `inputs` element.
`context` is the final attention layer output corresponding to
the weighted inputs.
A tensor fo shape `[B, input_dim]`.
"""
values_depth = values.get_shape().as_list()[-1]
# Fully connected layers to transform both keys and query
# into a tensor with `num_units` units
att_keys = tf.contrib.layers.fully_connected(
inputs=keys,
num_outputs=self.params["num_units"],
activation_fn=None,
scope="att_keys")
att_query = tf.contrib.layers.fully_connected(
inputs=query,
num_outputs=self.params["num_units"],
activation_fn=None,
scope="att_query")
scores = self.score_fn(att_keys, att_query)
# Replace all scores for padded inputs with tf.float32.min
num_scores = tf.shape(scores)[1]
scores_mask = tf.sequence_mask(
lengths=tf.to_int32(values_length),
maxlen=tf.to_int32(num_scores),
dtype=tf.float32)
scores = scores * scores_mask + ((1.0 - scores_mask) * tf.float32.min)
# Normalize the scores
scores_normalized = tf.nn.softmax(scores, name="scores_normalized")
# Calculate the weighted average of the attention inputs
# according to the scores
context = tf.expand_dims(scores_normalized, 2) * values
context = tf.reduce_sum(context, 1, name="context")
context.set_shape([None, values_depth])
return (scores_normalized, context)
class AttentionLayerDot(AttentionLayer):
"""An attention layer that calculates attention scores using
a dot product.
"""
def score_fn(self, keys, query):
return att_sum_dot(keys, query)
class AttentionLayerBahdanau(AttentionLayer):
"""An attention layer that calculates attention scores using
a parameterized multiplication."""
def score_fn(self, keys, query):
v_att = tf.get_variable(
"v_att", shape=[self.params["num_units"]], dtype=tf.float32)
return att_sum_bahdanau(v_att, keys, query)
| 5,054 | 32.476821 | 79 | py |
seq2seq | seq2seq-master/seq2seq/metrics/metric_specs.py | # -*- coding: utf-8 -*-
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collection of MetricSpecs for training and evaluation
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from pydoc import locate
import abc
import numpy as np
import six
import tensorflow as tf
from tensorflow.contrib import metrics
from tensorflow.contrib.learn import MetricSpec
from seq2seq.data import postproc
from seq2seq.configurable import Configurable
from seq2seq.metrics import rouge
from seq2seq.metrics import bleu
def accumulate_strings(values, name="strings"):
"""Accumulates strings into a vector.
Args:
values: A 1-d string tensor that contains values to add to the accumulator.
Returns:
A tuple (value_tensor, update_op).
"""
tf.assert_type(values, tf.string)
strings = tf.Variable(
name=name,
initial_value=[],
dtype=tf.string,
trainable=False,
collections=[],
validate_shape=True)
value_tensor = tf.identity(strings)
update_op = tf.assign(
ref=strings, value=tf.concat([strings, values], 0), validate_shape=False)
return value_tensor, update_op
@six.add_metaclass(abc.ABCMeta)
class TextMetricSpec(Configurable, MetricSpec):
"""Abstract class for text-based metrics calculated based on
hypotheses and references. Subclasses must implement `metric_fn`.
Args:
name: A name for the metric
separator: A separator used to join predicted tokens. Default to space.
eos_token: A string token used to find the end of a sequence. Hypotheses
and references will be slcied until this token is found.
"""
def __init__(self, params, name):
# We don't call the super constructor on purpose
#pylint: disable=W0231
"""Initializer"""
Configurable.__init__(self, params, tf.contrib.learn.ModeKeys.EVAL)
self._name = name
self._eos_token = self.params["eos_token"]
self._sos_token = self.params["sos_token"]
self._separator = self.params["separator"]
self._postproc_fn = None
if self.params["postproc_fn"]:
self._postproc_fn = locate(self.params["postproc_fn"])
if self._postproc_fn is None:
raise ValueError("postproc_fn not found: {}".format(
self.params["postproc_fn"]))
@property
def name(self):
"""Name of the metric"""
return self._name
@staticmethod
def default_params():
return {
"sos_token": "SEQUENCE_START",
"eos_token": "SEQUENCE_END",
"separator": " ",
"postproc_fn": "",
}
def create_metric_ops(self, _inputs, labels, predictions):
"""Creates (value, update_op) tensors
"""
with tf.variable_scope(self._name):
# Join tokens into single strings
predictions_flat = tf.reduce_join(
predictions["predicted_tokens"], 1, separator=self._separator)
labels_flat = tf.reduce_join(
labels["target_tokens"], 1, separator=self._separator)
sources_value, sources_update = accumulate_strings(
values=predictions_flat, name="sources")
targets_value, targets_update = accumulate_strings(
values=labels_flat, name="targets")
metric_value = tf.py_func(
func=self._py_func,
inp=[sources_value, targets_value],
Tout=tf.float32,
name="value")
with tf.control_dependencies([sources_update, targets_update]):
update_op = tf.identity(metric_value, name="update_op")
return metric_value, update_op
def _py_func(self, hypotheses, references):
"""Wrapper function that converts tensors to unicode and slices
them until the EOS token is found.
"""
# Deal with byte chars
if hypotheses.dtype.kind == np.dtype("U"):
hypotheses = np.char.encode(hypotheses, "utf-8")
if references.dtype.kind == np.dtype("U"):
references = np.char.encode(references, "utf-8")
# Convert back to unicode object
hypotheses = [_.decode("utf-8") for _ in hypotheses]
references = [_.decode("utf-8") for _ in references]
# Slice all hypotheses and references up to SOS -> EOS
sliced_hypotheses = [postproc.slice_text(
_, self._eos_token, self._sos_token) for _ in hypotheses]
sliced_references = [postproc.slice_text(
_, self._eos_token, self._sos_token) for _ in references]
# Apply postprocessing function
if self._postproc_fn:
sliced_hypotheses = [self._postproc_fn(_) for _ in sliced_hypotheses]
sliced_references = [self._postproc_fn(_) for _ in sliced_references]
return self.metric_fn(sliced_hypotheses, sliced_references) #pylint: disable=E1102
def metric_fn(self, hypotheses, references):
"""Calculates the value of the metric.
Args:
hypotheses: A python list of strings, each corresponding to a
single hypothesis/example.
references: A python list of strings, each corresponds to a single
reference. Must have the same number of elements of `hypotheses`.
Returns:
A float value.
"""
raise NotImplementedError()
class BleuMetricSpec(TextMetricSpec):
"""Calculates BLEU score using the Moses multi-bleu.perl script.
"""
def __init__(self, params):
super(BleuMetricSpec, self).__init__(params, "bleu")
def metric_fn(self, hypotheses, references):
return bleu.moses_multi_bleu(hypotheses, references, lowercase=False)
class RougeMetricSpec(TextMetricSpec):
"""Calculates BLEU score using the Moses multi-bleu.perl script.
"""
def __init__(self, params, **kwargs):
if not params["rouge_type"]:
raise ValueError("You must provide a rouge_type for ROUGE")
super(RougeMetricSpec, self).__init__(
params, params["rouge_type"], **kwargs)
self._rouge_type = self.params["rouge_type"]
@staticmethod
def default_params():
params = TextMetricSpec.default_params()
params.update({
"rouge_type": "",
})
return params
def metric_fn(self, hypotheses, references):
if not hypotheses or not references:
return np.float32(0.0)
return np.float32(rouge.rouge(hypotheses, references)[self._rouge_type])
class LogPerplexityMetricSpec(MetricSpec, Configurable):
"""A MetricSpec to calculate straming log perplexity"""
def __init__(self, params):
"""Initializer"""
# We don't call the super constructor on purpose
#pylint: disable=W0231
Configurable.__init__(self, params, tf.contrib.learn.ModeKeys.EVAL)
@staticmethod
def default_params():
return {}
@property
def name(self):
"""Name of the metric"""
return "log_perplexity"
def create_metric_ops(self, _inputs, labels, predictions):
"""Creates the metric op"""
loss_mask = tf.sequence_mask(
lengths=tf.to_int32(labels["target_len"] - 1),
maxlen=tf.to_int32(tf.shape(predictions["losses"])[1]))
return metrics.streaming_mean(predictions["losses"], loss_mask)
| 7,492 | 31.158798 | 86 | py |
seq2seq | seq2seq-master/seq2seq/metrics/bleu.py | # -*- coding: utf-8 -*-
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BLEU metric implementation.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import re
import subprocess
import tempfile
import numpy as np
from six.moves import urllib
import tensorflow as tf
def moses_multi_bleu(hypotheses, references, lowercase=False):
"""Calculate the bleu score for hypotheses and references
using the MOSES ulti-bleu.perl script.
Args:
hypotheses: A numpy array of strings where each string is a single example.
references: A numpy array of strings where each string is a single example.
lowercase: If true, pass the "-lc" flag to the multi-bleu script
Returns:
The BLEU score as a float32 value.
"""
if np.size(hypotheses) == 0:
return np.float32(0.0)
# Get MOSES multi-bleu script
try:
multi_bleu_path, _ = urllib.request.urlretrieve(
"https://raw.githubusercontent.com/moses-smt/mosesdecoder/"
"master/scripts/generic/multi-bleu.perl")
os.chmod(multi_bleu_path, 0o755)
except: #pylint: disable=W0702
tf.logging.info("Unable to fetch multi-bleu.perl script, using local.")
metrics_dir = os.path.dirname(os.path.realpath(__file__))
bin_dir = os.path.abspath(os.path.join(metrics_dir, "..", "..", "bin"))
multi_bleu_path = os.path.join(bin_dir, "tools/multi-bleu.perl")
# Dump hypotheses and references to tempfiles
hypothesis_file = tempfile.NamedTemporaryFile()
hypothesis_file.write("\n".join(hypotheses).encode("utf-8"))
hypothesis_file.write(b"\n")
hypothesis_file.flush()
reference_file = tempfile.NamedTemporaryFile()
reference_file.write("\n".join(references).encode("utf-8"))
reference_file.write(b"\n")
reference_file.flush()
# Calculate BLEU using multi-bleu script
with open(hypothesis_file.name, "r") as read_pred:
bleu_cmd = [multi_bleu_path]
if lowercase:
bleu_cmd += ["-lc"]
bleu_cmd += [reference_file.name]
try:
bleu_out = subprocess.check_output(
bleu_cmd, stdin=read_pred, stderr=subprocess.STDOUT)
bleu_out = bleu_out.decode("utf-8")
bleu_score = re.search(r"BLEU = (.+?),", bleu_out).group(1)
bleu_score = float(bleu_score)
except subprocess.CalledProcessError as error:
if error.output is not None:
tf.logging.warning("multi-bleu.perl script returned non-zero exit code")
tf.logging.warning(error.output)
bleu_score = np.float32(0.0)
# Close temp files
hypothesis_file.close()
reference_file.close()
return np.float32(bleu_score)
| 3,202 | 33.074468 | 80 | py |
seq2seq | seq2seq-master/seq2seq/metrics/rouge.py | # -*- coding: utf-8 -*-
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ROUGe metric implementation.
This is a modified and slightly extended verison of
https://github.com/miso-belica/sumy/blob/dev/sumy/evaluation/rouge.py.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import itertools
import numpy as np
#pylint: disable=C0103
def _get_ngrams(n, text):
"""Calcualtes n-grams.
Args:
n: which n-grams to calculate
text: An array of tokens
Returns:
A set of n-grams
"""
ngram_set = set()
text_length = len(text)
max_index_ngram_start = text_length - n
for i in range(max_index_ngram_start + 1):
ngram_set.add(tuple(text[i:i + n]))
return ngram_set
def _split_into_words(sentences):
"""Splits multiple sentences into words and flattens the result"""
return list(itertools.chain(*[_.split(" ") for _ in sentences]))
def _get_word_ngrams(n, sentences):
"""Calculates word n-grams for multiple sentences.
"""
assert len(sentences) > 0
assert n > 0
words = _split_into_words(sentences)
return _get_ngrams(n, words)
def _len_lcs(x, y):
"""
Returns the length of the Longest Common Subsequence between sequences x
and y.
Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
Args:
x: sequence of words
y: sequence of words
Returns
integer: Length of LCS between x and y
"""
table = _lcs(x, y)
n, m = len(x), len(y)
return table[n, m]
def _lcs(x, y):
"""
Computes the length of the longest common subsequence (lcs) between two
strings. The implementation below uses a DP programming algorithm and runs
in O(nm) time where n = len(x) and m = len(y).
Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
Args:
x: collection of words
y: collection of words
Returns:
Table of dictionary of coord and len lcs
"""
n, m = len(x), len(y)
table = dict()
for i in range(n + 1):
for j in range(m + 1):
if i == 0 or j == 0:
table[i, j] = 0
elif x[i - 1] == y[j - 1]:
table[i, j] = table[i - 1, j - 1] + 1
else:
table[i, j] = max(table[i - 1, j], table[i, j - 1])
return table
def _recon_lcs(x, y):
"""
Returns the Longest Subsequence between x and y.
Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
Args:
x: sequence of words
y: sequence of words
Returns:
sequence: LCS of x and y
"""
i, j = len(x), len(y)
table = _lcs(x, y)
def _recon(i, j):
"""private recon calculation"""
if i == 0 or j == 0:
return []
elif x[i - 1] == y[j - 1]:
return _recon(i - 1, j - 1) + [(x[i - 1], i)]
elif table[i - 1, j] > table[i, j - 1]:
return _recon(i - 1, j)
else:
return _recon(i, j - 1)
recon_tuple = tuple(map(lambda x: x[0], _recon(i, j)))
return recon_tuple
def rouge_n(evaluated_sentences, reference_sentences, n=2):
"""
Computes ROUGE-N of two text collections of sentences.
Sourece: http://research.microsoft.com/en-us/um/people/cyl/download/
papers/rouge-working-note-v1.3.1.pdf
Args:
evaluated_sentences: The sentences that have been picked by the summarizer
reference_sentences: The sentences from the referene set
n: Size of ngram. Defaults to 2.
Returns:
A tuple (f1, precision, recall) for ROUGE-N
Raises:
ValueError: raises exception if a param has len <= 0
"""
if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0:
raise ValueError("Collections must contain at least 1 sentence.")
evaluated_ngrams = _get_word_ngrams(n, evaluated_sentences)
reference_ngrams = _get_word_ngrams(n, reference_sentences)
reference_count = len(reference_ngrams)
evaluated_count = len(evaluated_ngrams)
# Gets the overlapping ngrams between evaluated and reference
overlapping_ngrams = evaluated_ngrams.intersection(reference_ngrams)
overlapping_count = len(overlapping_ngrams)
# Handle edge case. This isn't mathematically correct, but it's good enough
if evaluated_count == 0:
precision = 0.0
else:
precision = overlapping_count / evaluated_count
if reference_count == 0:
recall = 0.0
else:
recall = overlapping_count / reference_count
f1_score = 2.0 * ((precision * recall) / (precision + recall + 1e-8))
# return overlapping_count / reference_count
return f1_score, precision, recall
def _f_p_r_lcs(llcs, m, n):
"""
Computes the LCS-based F-measure score
Source: http://research.microsoft.com/en-us/um/people/cyl/download/papers/
rouge-working-note-v1.3.1.pdf
Args:
llcs: Length of LCS
m: number of words in reference summary
n: number of words in candidate summary
Returns:
Float. LCS-based F-measure score
"""
r_lcs = llcs / m
p_lcs = llcs / n
beta = p_lcs / (r_lcs + 1e-12)
num = (1 + (beta**2)) * r_lcs * p_lcs
denom = r_lcs + ((beta**2) * p_lcs)
f_lcs = num / (denom + 1e-12)
return f_lcs, p_lcs, r_lcs
def rouge_l_sentence_level(evaluated_sentences, reference_sentences):
"""
Computes ROUGE-L (sentence level) of two text collections of sentences.
http://research.microsoft.com/en-us/um/people/cyl/download/papers/
rouge-working-note-v1.3.1.pdf
Calculated according to:
R_lcs = LCS(X,Y)/m
P_lcs = LCS(X,Y)/n
F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs)
where:
X = reference summary
Y = Candidate summary
m = length of reference summary
n = length of candidate summary
Args:
evaluated_sentences: The sentences that have been picked by the summarizer
reference_sentences: The sentences from the referene set
Returns:
A float: F_lcs
Raises:
ValueError: raises exception if a param has len <= 0
"""
if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0:
raise ValueError("Collections must contain at least 1 sentence.")
reference_words = _split_into_words(reference_sentences)
evaluated_words = _split_into_words(evaluated_sentences)
m = len(reference_words)
n = len(evaluated_words)
lcs = _len_lcs(evaluated_words, reference_words)
return _f_p_r_lcs(lcs, m, n)
def _union_lcs(evaluated_sentences, reference_sentence):
"""
Returns LCS_u(r_i, C) which is the LCS score of the union longest common
subsequence between reference sentence ri and candidate summary C. For example
if r_i= w1 w2 w3 w4 w5, and C contains two sentences: c1 = w1 w2 w6 w7 w8 and
c2 = w1 w3 w8 w9 w5, then the longest common subsequence of r_i and c1 is
“w1 w2” and the longest common subsequence of r_i and c2 is “w1 w3 w5”. The
union longest common subsequence of r_i, c1, and c2 is “w1 w2 w3 w5” and
LCS_u(r_i, C) = 4/5.
Args:
evaluated_sentences: The sentences that have been picked by the summarizer
reference_sentence: One of the sentences in the reference summaries
Returns:
float: LCS_u(r_i, C)
ValueError:
Raises exception if a param has len <= 0
"""
if len(evaluated_sentences) <= 0:
raise ValueError("Collections must contain at least 1 sentence.")
lcs_union = set()
reference_words = _split_into_words([reference_sentence])
combined_lcs_length = 0
for eval_s in evaluated_sentences:
evaluated_words = _split_into_words([eval_s])
lcs = set(_recon_lcs(reference_words, evaluated_words))
combined_lcs_length += len(lcs)
lcs_union = lcs_union.union(lcs)
union_lcs_count = len(lcs_union)
union_lcs_value = union_lcs_count / combined_lcs_length
return union_lcs_value
def rouge_l_summary_level(evaluated_sentences, reference_sentences):
"""
Computes ROUGE-L (summary level) of two text collections of sentences.
http://research.microsoft.com/en-us/um/people/cyl/download/papers/
rouge-working-note-v1.3.1.pdf
Calculated according to:
R_lcs = SUM(1, u)[LCS<union>(r_i,C)]/m
P_lcs = SUM(1, u)[LCS<union>(r_i,C)]/n
F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs)
where:
SUM(i,u) = SUM from i through u
u = number of sentences in reference summary
C = Candidate summary made up of v sentences
m = number of words in reference summary
n = number of words in candidate summary
Args:
evaluated_sentences: The sentences that have been picked by the summarizer
reference_sentence: One of the sentences in the reference summaries
Returns:
A float: F_lcs
Raises:
ValueError: raises exception if a param has len <= 0
"""
if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0:
raise ValueError("Collections must contain at least 1 sentence.")
# total number of words in reference sentences
m = len(_split_into_words(reference_sentences))
# total number of words in evaluated sentences
n = len(_split_into_words(evaluated_sentences))
union_lcs_sum_across_all_references = 0
for ref_s in reference_sentences:
union_lcs_sum_across_all_references += _union_lcs(evaluated_sentences,
ref_s)
return _f_p_r_lcs(union_lcs_sum_across_all_references, m, n)
def rouge(hypotheses, references):
"""Calculates average rouge scores for a list of hypotheses and
references"""
# Filter out hyps that are of 0 length
# hyps_and_refs = zip(hypotheses, references)
# hyps_and_refs = [_ for _ in hyps_and_refs if len(_[0]) > 0]
# hypotheses, references = zip(*hyps_and_refs)
# Calculate ROUGE-1 F1, precision, recall scores
rouge_1 = [
rouge_n([hyp], [ref], 1) for hyp, ref in zip(hypotheses, references)
]
rouge_1_f, rouge_1_p, rouge_1_r = map(np.mean, zip(*rouge_1))
# Calculate ROUGE-2 F1, precision, recall scores
rouge_2 = [
rouge_n([hyp], [ref], 2) for hyp, ref in zip(hypotheses, references)
]
rouge_2_f, rouge_2_p, rouge_2_r = map(np.mean, zip(*rouge_2))
# Calculate ROUGE-L F1, precision, recall scores
rouge_l = [
rouge_l_sentence_level([hyp], [ref])
for hyp, ref in zip(hypotheses, references)
]
rouge_l_f, rouge_l_p, rouge_l_r = map(np.mean, zip(*rouge_l))
return {
"rouge_1/f_score": rouge_1_f,
"rouge_1/r_score": rouge_1_r,
"rouge_1/p_score": rouge_1_p,
"rouge_2/f_score": rouge_2_f,
"rouge_2/r_score": rouge_2_r,
"rouge_2/p_score": rouge_2_p,
"rouge_l/f_score": rouge_l_f,
"rouge_l/r_score": rouge_l_r,
"rouge_l/p_score": rouge_l_p,
}
| 10,972 | 28.980874 | 80 | py |
seq2seq | seq2seq-master/seq2seq/metrics/__init__.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Collection of metric-related functions
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| 733 | 35.7 | 74 | py |
seq2seq | seq2seq-master/seq2seq/encoders/image_encoder.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Image encoder classes
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib.slim.python.slim.nets.inception_v3 \
import inception_v3_base
from seq2seq.encoders.encoder import Encoder, EncoderOutput
class InceptionV3Encoder(Encoder):
"""
A unidirectional RNN encoder. Stacking should be performed as
part of the cell.
Params:
resize_height: Resize the image to this height before feeding it
into the convolutional network.
resize_width: Resize the image to this width before feeding it
into the convolutional network.
"""
def __init__(self, params, mode, name="image_encoder"):
super(InceptionV3Encoder, self).__init__(params, mode, name)
@staticmethod
def default_params():
return {
"resize_height": 299,
"resize_width": 299,
}
def encode(self, inputs):
inputs = tf.image.resize_images(
images=inputs,
size=[self.params["resize_height"], self.params["resize_width"]],
method=tf.image.ResizeMethod.BILINEAR)
outputs, _ = inception_v3_base(tf.to_float(inputs))
output_shape = outputs.get_shape() #pylint: disable=E1101
shape_list = output_shape.as_list()
# Take attentin over output elemnts in width and height dimension:
# Shape: [B, W*H, ...]
outputs_flat = tf.reshape(outputs, [shape_list[0], -1, shape_list[-1]])
# Final state is the pooled output
# Shape: [B, W*H*...]
final_state = tf.contrib.slim.avg_pool2d(
outputs, output_shape[1:3], padding="VALID", scope="pool")
final_state = tf.contrib.slim.flatten(outputs, scope="flatten")
return EncoderOutput(
outputs=outputs_flat,
final_state=final_state,
attention_values=outputs_flat,
attention_values_length=tf.shape(outputs_flat)[1])
| 2,469 | 31.5 | 75 | py |
seq2seq | seq2seq-master/seq2seq/encoders/rnn_encoder.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Collection of RNN encoders.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import tensorflow as tf
from tensorflow.contrib.rnn.python.ops import rnn
from seq2seq.encoders.encoder import Encoder, EncoderOutput
from seq2seq.training import utils as training_utils
def _unpack_cell(cell):
"""Unpack the cells because the stack_bidirectional_dynamic_rnn
expects a list of cells, one per layer."""
if isinstance(cell, tf.contrib.rnn.MultiRNNCell):
return cell._cells #pylint: disable=W0212
else:
return [cell]
def _default_rnn_cell_params():
"""Creates default parameters used by multiple RNN encoders.
"""
return {
"cell_class": "BasicLSTMCell",
"cell_params": {
"num_units": 128
},
"dropout_input_keep_prob": 1.0,
"dropout_output_keep_prob": 1.0,
"num_layers": 1,
"residual_connections": False,
"residual_combiner": "add",
"residual_dense": False
}
def _toggle_dropout(cell_params, mode):
"""Disables dropout during eval/inference mode
"""
cell_params = copy.deepcopy(cell_params)
if mode != tf.contrib.learn.ModeKeys.TRAIN:
cell_params["dropout_input_keep_prob"] = 1.0
cell_params["dropout_output_keep_prob"] = 1.0
return cell_params
class UnidirectionalRNNEncoder(Encoder):
"""
A unidirectional RNN encoder. Stacking should be performed as
part of the cell.
Args:
cell: An instance of tf.contrib.rnn.RNNCell
name: A name for the encoder
"""
def __init__(self, params, mode, name="forward_rnn_encoder"):
super(UnidirectionalRNNEncoder, self).__init__(params, mode, name)
self.params["rnn_cell"] = _toggle_dropout(self.params["rnn_cell"], mode)
@staticmethod
def default_params():
return {
"rnn_cell": _default_rnn_cell_params(),
"init_scale": 0.04,
}
def encode(self, inputs, sequence_length, **kwargs):
scope = tf.get_variable_scope()
scope.set_initializer(tf.random_uniform_initializer(
-self.params["init_scale"],
self.params["init_scale"]))
cell = training_utils.get_rnn_cell(**self.params["rnn_cell"])
outputs, state = tf.nn.dynamic_rnn(
cell=cell,
inputs=inputs,
sequence_length=sequence_length,
dtype=tf.float32,
**kwargs)
return EncoderOutput(
outputs=outputs,
final_state=state,
attention_values=outputs,
attention_values_length=sequence_length)
class BidirectionalRNNEncoder(Encoder):
"""
A bidirectional RNN encoder. Uses the same cell for both the
forward and backward RNN. Stacking should be performed as part of
the cell.
Args:
cell: An instance of tf.contrib.rnn.RNNCell
name: A name for the encoder
"""
def __init__(self, params, mode, name="bidi_rnn_encoder"):
super(BidirectionalRNNEncoder, self).__init__(params, mode, name)
self.params["rnn_cell"] = _toggle_dropout(self.params["rnn_cell"], mode)
@staticmethod
def default_params():
return {
"rnn_cell": _default_rnn_cell_params(),
"init_scale": 0.04,
}
def encode(self, inputs, sequence_length, **kwargs):
scope = tf.get_variable_scope()
scope.set_initializer(tf.random_uniform_initializer(
-self.params["init_scale"],
self.params["init_scale"]))
cell_fw = training_utils.get_rnn_cell(**self.params["rnn_cell"])
cell_bw = training_utils.get_rnn_cell(**self.params["rnn_cell"])
outputs, states = tf.nn.bidirectional_dynamic_rnn(
cell_fw=cell_fw,
cell_bw=cell_bw,
inputs=inputs,
sequence_length=sequence_length,
dtype=tf.float32,
**kwargs)
# Concatenate outputs and states of the forward and backward RNNs
outputs_concat = tf.concat(outputs, 2)
return EncoderOutput(
outputs=outputs_concat,
final_state=states,
attention_values=outputs_concat,
attention_values_length=sequence_length)
class StackBidirectionalRNNEncoder(Encoder):
"""
A stacked bidirectional RNN encoder. Uses the same cell for both the
forward and backward RNN. Stacking should be performed as part of
the cell.
Args:
cell: An instance of tf.contrib.rnn.RNNCell
name: A name for the encoder
"""
def __init__(self, params, mode, name="stacked_bidi_rnn_encoder"):
super(StackBidirectionalRNNEncoder, self).__init__(params, mode, name)
self.params["rnn_cell"] = _toggle_dropout(self.params["rnn_cell"], mode)
@staticmethod
def default_params():
return {
"rnn_cell": _default_rnn_cell_params(),
"init_scale": 0.04,
}
def encode(self, inputs, sequence_length, **kwargs):
scope = tf.get_variable_scope()
scope.set_initializer(tf.random_uniform_initializer(
-self.params["init_scale"],
self.params["init_scale"]))
cell_fw = training_utils.get_rnn_cell(**self.params["rnn_cell"])
cell_bw = training_utils.get_rnn_cell(**self.params["rnn_cell"])
cells_fw = _unpack_cell(cell_fw)
cells_bw = _unpack_cell(cell_bw)
result = rnn.stack_bidirectional_dynamic_rnn(
cells_fw=cells_fw,
cells_bw=cells_bw,
inputs=inputs,
dtype=tf.float32,
sequence_length=sequence_length,
**kwargs)
outputs_concat, _output_state_fw, _output_state_bw = result
final_state = (_output_state_fw, _output_state_bw)
return EncoderOutput(
outputs=outputs_concat,
final_state=final_state,
attention_values=outputs_concat,
attention_values_length=sequence_length)
| 6,202 | 29.55665 | 76 | py |
seq2seq | seq2seq-master/seq2seq/encoders/encoder.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Abstract base class for encoders.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from collections import namedtuple
import six
from seq2seq.configurable import Configurable
from seq2seq.graph_module import GraphModule
EncoderOutput = namedtuple(
"EncoderOutput",
"outputs final_state attention_values attention_values_length")
@six.add_metaclass(abc.ABCMeta)
class Encoder(GraphModule, Configurable):
"""Abstract encoder class. All encoders should inherit from this.
Args:
params: A dictionary of hyperparameters for the encoder.
name: A variable scope for the encoder graph.
"""
def __init__(self, params, mode, name):
GraphModule.__init__(self, name)
Configurable.__init__(self, params, mode)
def _build(self, inputs, *args, **kwargs):
return self.encode(inputs, *args, **kwargs)
@abc.abstractmethod
def encode(self, *args, **kwargs):
"""
Encodes an input sequence.
Args:
inputs: The inputs to encode. A float32 tensor of shape [B, T, ...].
sequence_length: The length of each input. An int32 tensor of shape [T].
Returns:
An `EncoderOutput` tuple containing the outputs and final state.
"""
raise NotImplementedError
| 1,874 | 28.296875 | 78 | py |
seq2seq | seq2seq-master/seq2seq/encoders/pooling_encoder.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
An encoder that pools over embeddings, as described in
https://arxiv.org/abs/1611.02344.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from pydoc import locate
import numpy as np
import tensorflow as tf
from seq2seq.encoders.encoder import Encoder, EncoderOutput
def position_encoding(sentence_size, embedding_size):
"""
Position Encoding described in section 4.1 of
End-To-End Memory Networks (https://arxiv.org/abs/1503.08895).
Args:
sentence_size: length of the sentence
embedding_size: dimensionality of the embeddings
Returns:
A numpy array of shape [sentence_size, embedding_size] containing
the fixed position encodings for each sentence position.
"""
encoding = np.ones((sentence_size, embedding_size), dtype=np.float32)
ls = sentence_size + 1
le = embedding_size + 1
for k in range(1, le):
for j in range(1, ls):
encoding[j-1, k-1] = (1.0 - j/float(ls)) - (
k / float(le)) * (1. - 2. * j/float(ls))
return encoding
def _create_position_embedding(embedding_dim, num_positions, lengths, maxlen):
"""Creates position embeddings.
Args:
embedding_dim: Dimensionality of the embeddings. An integer.
num_positions: The number of positions to be embedded. For example,
if you have inputs of length up to 100, this should be 100. An integer.
lengths: The lengths of the inputs to create position embeddings for.
An int32 tensor of shape `[batch_size]`.
maxlen: The maximum length of the input sequence to create position
embeddings for. An int32 tensor.
Returns:
A tensor of shape `[batch_size, maxlen, embedding_dim]` that contains
embeddings for each position. All elements past `lengths` are zero.
"""
# Create constant position encodings
position_encodings = tf.constant(
position_encoding(num_positions, embedding_dim),
name="position_encoding")
# Slice to size of current sequence
pe_slice = position_encodings[:maxlen, :]
# Replicate encodings for each element in the batch
batch_size = tf.shape(lengths)[0]
pe_batch = tf.tile([pe_slice], [batch_size, 1, 1])
# Mask out positions that are padded
positions_mask = tf.sequence_mask(
lengths=lengths, maxlen=maxlen, dtype=tf.float32)
positions_embed = pe_batch * tf.expand_dims(positions_mask, 2)
return positions_embed
class PoolingEncoder(Encoder):
"""An encoder that pools over embeddings, as described in
https://arxiv.org/abs/1611.02344. The encoder supports optional positions
embeddings and a configurable pooling window.
Params:
dropout_keep_prob: Dropout keep probability applied to the embeddings.
pooling_fn: The 1-d pooling function to use, e.g.
`tensorflow.layers.average_pooling1d`.
pool_size: The pooling window, passed as `pool_size` to
the pooling function.
strides: The stride during pooling, passed as `strides`
the pooling function.
position_embeddings.enable: If true, add position embeddings to the
inputs before pooling.
position_embeddings.combiner_fn: Function used to combine the
position embeddings with the inputs. For example, `tensorflow.add`.
position_embeddings.num_positions: Size of the position embedding matrix.
This should be set to the maximum sequence length of the inputs.
"""
def __init__(self, params, mode, name="pooling_encoder"):
super(PoolingEncoder, self).__init__(params, mode, name)
self._pooling_fn = locate(self.params["pooling_fn"])
self._combiner_fn = locate(self.params["position_embeddings.combiner_fn"])
@staticmethod
def default_params():
return {
"dropout_keep_prob": 0.8,
"pooling_fn": "tensorflow.layers.average_pooling1d",
"pool_size": 5,
"strides": 1,
"position_embeddings.enable": True,
"position_embeddings.combiner_fn": "tensorflow.multiply",
"position_embeddings.num_positions": 100,
}
def encode(self, inputs, sequence_length):
if self.params["position_embeddings.enable"]:
positions_embed = _create_position_embedding(
embedding_dim=inputs.get_shape().as_list()[-1],
num_positions=self.params["position_embeddings.num_positions"],
lengths=sequence_length,
maxlen=tf.shape(inputs)[1])
inputs = self._combiner_fn(inputs, positions_embed)
# Apply dropout
inputs = tf.contrib.layers.dropout(
inputs=inputs,
keep_prob=self.params["dropout_keep_prob"],
is_training=self.mode == tf.contrib.learn.ModeKeys.TRAIN)
outputs = self._pooling_fn(
inputs=inputs,
pool_size=self.params["pool_size"],
strides=self.params["strides"],
padding="SAME")
# Final state is the average representation of the pooled embeddings
final_state = tf.reduce_mean(outputs, 1)
return EncoderOutput(
outputs=outputs,
final_state=final_state,
attention_values=inputs,
attention_values_length=sequence_length)
| 5,639 | 35.387097 | 78 | py |
seq2seq | seq2seq-master/seq2seq/encoders/__init__.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collection of encoders"""
import seq2seq.encoders.encoder
import seq2seq.encoders.rnn_encoder
from seq2seq.encoders.rnn_encoder import *
from seq2seq.encoders.image_encoder import *
from seq2seq.encoders.pooling_encoder import PoolingEncoder
from seq2seq.encoders.conv_encoder import ConvEncoder
| 877 | 37.173913 | 74 | py |
seq2seq | seq2seq-master/seq2seq/encoders/conv_encoder.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
An encoder that pools over embeddings, as described in
https://arxiv.org/abs/1611.02344.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from pydoc import locate
import tensorflow as tf
from seq2seq.encoders.encoder import Encoder, EncoderOutput
from seq2seq.encoders.pooling_encoder import _create_position_embedding
class ConvEncoder(Encoder):
"""A deep convolutional encoder, as described in
https://arxiv.org/abs/1611.02344. The encoder supports optional positions
embeddings.
Params:
attention_cnn.units: Number of units in `cnn_a`. Same in each layer.
attention_cnn.kernel_size: Kernel size for `cnn_a`.
attention_cnn.layers: Number of layers in `cnn_a`.
embedding_dropout_keep_prob: Dropout keep probability
applied to the embeddings.
output_cnn.units: Number of units in `cnn_c`. Same in each layer.
output_cnn.kernel_size: Kernel size for `cnn_c`.
output_cnn.layers: Number of layers in `cnn_c`.
position_embeddings.enable: If true, add position embeddings to the
inputs before pooling.
position_embeddings.combiner_fn: Function used to combine the
position embeddings with the inputs. For example, `tensorflow.add`.
position_embeddings.num_positions: Size of the position embedding matrix.
This should be set to the maximum sequence length of the inputs.
"""
def __init__(self, params, mode, name="conv_encoder"):
super(ConvEncoder, self).__init__(params, mode, name)
self._combiner_fn = locate(self.params["position_embeddings.combiner_fn"])
@staticmethod
def default_params():
return {
"attention_cnn.units": 512,
"attention_cnn.kernel_size": 3,
"attention_cnn.layers": 15,
"embedding_dropout_keep_prob": 0.8,
"output_cnn.units": 256,
"output_cnn.kernel_size": 3,
"output_cnn.layers": 5,
"position_embeddings.enable": True,
"position_embeddings.combiner_fn": "tensorflow.multiply",
"position_embeddings.num_positions": 100,
}
def encode(self, inputs, sequence_length):
if self.params["position_embeddings.enable"]:
positions_embed = _create_position_embedding(
embedding_dim=inputs.get_shape().as_list()[-1],
num_positions=self.params["position_embeddings.num_positions"],
lengths=sequence_length,
maxlen=tf.shape(inputs)[1])
inputs = self._combiner_fn(inputs, positions_embed)
# Apply dropout to embeddings
inputs = tf.contrib.layers.dropout(
inputs=inputs,
keep_prob=self.params["embedding_dropout_keep_prob"],
is_training=self.mode == tf.contrib.learn.ModeKeys.TRAIN)
with tf.variable_scope("cnn_a"):
cnn_a_output = inputs
for layer_idx in range(self.params["attention_cnn.layers"]):
next_layer = tf.contrib.layers.conv2d(
inputs=cnn_a_output,
num_outputs=self.params["attention_cnn.units"],
kernel_size=self.params["attention_cnn.kernel_size"],
padding="SAME",
activation_fn=None)
# Add a residual connection, except for the first layer
if layer_idx > 0:
next_layer += cnn_a_output
cnn_a_output = tf.tanh(next_layer)
with tf.variable_scope("cnn_c"):
cnn_c_output = inputs
for layer_idx in range(self.params["output_cnn.layers"]):
next_layer = tf.contrib.layers.conv2d(
inputs=cnn_c_output,
num_outputs=self.params["output_cnn.units"],
kernel_size=self.params["output_cnn.kernel_size"],
padding="SAME",
activation_fn=None)
# Add a residual connection, except for the first layer
if layer_idx > 0:
next_layer += cnn_c_output
cnn_c_output = tf.tanh(next_layer)
final_state = tf.reduce_mean(cnn_c_output, 1)
return EncoderOutput(
outputs=cnn_a_output,
final_state=final_state,
attention_values=cnn_c_output,
attention_values_length=sequence_length)
| 4,663 | 37.229508 | 78 | py |
seq2seq | seq2seq-master/seq2seq/data/postproc.py | # -*- coding: utf-8 -*-
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A collection of commonly used post-processing functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
def strip_bpe(text):
"""Deodes text that was processed using BPE from
https://github.com/rsennrich/subword-nmt"""
return text.replace("@@ ", "").strip()
def decode_sentencepiece(text):
"""Decodes text that uses https://github.com/google/sentencepiece encoding.
Assumes that pieces are separated by a space"""
return "".join(text.split(" ")).replace("▁", " ").strip()
def slice_text(text,
eos_token="SEQUENCE_END",
sos_token="SEQUENCE_START"):
"""Slices text from SEQUENCE_START to SEQUENCE_END, not including
these special tokens.
"""
eos_index = text.find(eos_token)
text = text[:eos_index] if eos_index > -1 else text
sos_index = text.find(sos_token)
text = text[sos_index+len(sos_token):] if sos_index > -1 else text
return text.strip()
| 1,617 | 34.173913 | 77 | py |
seq2seq | seq2seq-master/seq2seq/data/sequence_example_decoder.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A decoder for tf.SequenceExample"""
import tensorflow as tf
from tensorflow.contrib.slim.python.slim.data import data_decoder
class TFSEquenceExampleDecoder(data_decoder.DataDecoder):
"""A decoder for TensorFlow Examples.
Decoding Example proto buffers is comprised of two stages: (1) Example parsing
and (2) tensor manipulation.
In the first stage, the tf.parse_example function is called with a list of
FixedLenFeatures and SparseLenFeatures. These instances tell TF how to parse
the example. The output of this stage is a set of tensors.
In the second stage, the resulting tensors are manipulated to provide the
requested 'item' tensors.
To perform this decoding operation, an ExampleDecoder is given a list of
ItemHandlers. Each ItemHandler indicates the set of features for stage 1 and
contains the instructions for post_processing its tensors for stage 2.
"""
def __init__(self, context_keys_to_features, sequence_keys_to_features,
items_to_handlers):
"""Constructs the decoder.
Args:
keys_to_features: a dictionary from TF-Example keys to either
tf.VarLenFeature or tf.FixedLenFeature instances. See tensorflow's
parsing_ops.py.
items_to_handlers: a dictionary from items (strings) to ItemHandler
instances. Note that the ItemHandler's are provided the keys that they
use to return the final item Tensors.
"""
self._context_keys_to_features = context_keys_to_features
self._sequence_keys_to_features = sequence_keys_to_features
self._items_to_handlers = items_to_handlers
def list_items(self):
"""See base class."""
return list(self._items_to_handlers.keys())
def decode(self, serialized_example, items=None):
"""Decodes the given serialized TF-example.
Args:
serialized_example: a serialized TF-example tensor.
items: the list of items to decode. These must be a subset of the item
keys in self._items_to_handlers. If `items` is left as None, then all
of the items in self._items_to_handlers are decoded.
Returns:
the decoded items, a list of tensor.
"""
context, sequence = tf.parse_single_sequence_example(
serialized_example, self._context_keys_to_features,
self._sequence_keys_to_features)
# Merge context and sequence features
example = {}
example.update(context)
example.update(sequence)
all_features = {}
all_features.update(self._context_keys_to_features)
all_features.update(self._sequence_keys_to_features)
# Reshape non-sparse elements just once:
for k, value in all_features.items():
if isinstance(value, tf.FixedLenFeature):
example[k] = tf.reshape(example[k], value.shape)
if not items:
items = self._items_to_handlers.keys()
outputs = []
for item in items:
handler = self._items_to_handlers[item]
keys_to_tensors = {key: example[key] for key in handler.keys}
outputs.append(handler.tensors_to_item(keys_to_tensors))
return outputs
| 3,625 | 39.288889 | 80 | py |
seq2seq | seq2seq-master/seq2seq/data/parallel_data_provider.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A Data Provder that reads parallel (aligned) data.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import tensorflow as tf
from tensorflow.contrib.slim.python.slim.data import data_provider
from tensorflow.contrib.slim.python.slim.data import parallel_reader
from seq2seq.data import split_tokens_decoder
def make_parallel_data_provider(data_sources_source,
data_sources_target,
reader=tf.TextLineReader,
num_samples=None,
source_delimiter=" ",
target_delimiter=" ",
**kwargs):
"""Creates a DataProvider that reads parallel text data.
Args:
data_sources_source: A list of data sources for the source text files.
data_sources_target: A list of data sources for the target text files.
Can be None for inference mode.
num_samples: Optional, number of records in the dataset
delimiter: Split tokens in the data on this delimiter. Defaults to space.
kwargs: Additional arguments (shuffle, num_epochs, etc) that are passed
to the data provider
Returns:
A DataProvider instance
"""
decoder_source = split_tokens_decoder.SplitTokensDecoder(
tokens_feature_name="source_tokens",
length_feature_name="source_len",
append_token="SEQUENCE_END",
delimiter=source_delimiter)
dataset_source = tf.contrib.slim.dataset.Dataset(
data_sources=data_sources_source,
reader=reader,
decoder=decoder_source,
num_samples=num_samples,
items_to_descriptions={})
dataset_target = None
if data_sources_target is not None:
decoder_target = split_tokens_decoder.SplitTokensDecoder(
tokens_feature_name="target_tokens",
length_feature_name="target_len",
prepend_token="SEQUENCE_START",
append_token="SEQUENCE_END",
delimiter=target_delimiter)
dataset_target = tf.contrib.slim.dataset.Dataset(
data_sources=data_sources_target,
reader=reader,
decoder=decoder_target,
num_samples=num_samples,
items_to_descriptions={})
return ParallelDataProvider(
dataset1=dataset_source, dataset2=dataset_target, **kwargs)
class ParallelDataProvider(data_provider.DataProvider):
"""Creates a ParallelDataProvider. This data provider reads two datasets
in parallel, keeping them aligned.
Args:
dataset1: The first dataset. An instance of the Dataset class.
dataset2: The second dataset. An instance of the Dataset class.
Can be None. If None, only `dataset1` is read.
num_readers: The number of parallel readers to use.
shuffle: Whether to shuffle the data sources and common queue when
reading.
num_epochs: The number of times each data source is read. If left as None,
the data will be cycled through indefinitely.
common_queue_capacity: The capacity of the common queue.
common_queue_min: The minimum number of elements in the common queue after
a dequeue.
seed: The seed to use if shuffling.
"""
def __init__(self,
dataset1,
dataset2,
shuffle=True,
num_epochs=None,
common_queue_capacity=4096,
common_queue_min=1024,
seed=None):
if seed is None:
seed = np.random.randint(10e8)
_, data_source = parallel_reader.parallel_read(
dataset1.data_sources,
reader_class=dataset1.reader,
num_epochs=num_epochs,
num_readers=1,
shuffle=False,
capacity=common_queue_capacity,
min_after_dequeue=common_queue_min,
seed=seed)
data_target = ""
if dataset2 is not None:
_, data_target = parallel_reader.parallel_read(
dataset2.data_sources,
reader_class=dataset2.reader,
num_epochs=num_epochs,
num_readers=1,
shuffle=False,
capacity=common_queue_capacity,
min_after_dequeue=common_queue_min,
seed=seed)
# Optionally shuffle the data
if shuffle:
shuffle_queue = tf.RandomShuffleQueue(
capacity=common_queue_capacity,
min_after_dequeue=common_queue_min,
dtypes=[tf.string, tf.string],
seed=seed)
enqueue_ops = []
enqueue_ops.append(shuffle_queue.enqueue([data_source, data_target]))
tf.train.add_queue_runner(
tf.train.QueueRunner(shuffle_queue, enqueue_ops))
data_source, data_target = shuffle_queue.dequeue()
# Decode source items
items = dataset1.decoder.list_items()
tensors = dataset1.decoder.decode(data_source, items)
if dataset2 is not None:
# Decode target items
items2 = dataset2.decoder.list_items()
tensors2 = dataset2.decoder.decode(data_target, items2)
# Merge items and results
items = items + items2
tensors = tensors + tensors2
super(ParallelDataProvider, self).__init__(
items_to_tensors=dict(zip(items, tensors)),
num_samples=dataset1.num_samples)
| 5,826 | 33.684524 | 78 | py |
seq2seq | seq2seq-master/seq2seq/data/vocab.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Vocabulary related functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import tensorflow as tf
from tensorflow import gfile
SpecialVocab = collections.namedtuple("SpecialVocab",
["UNK", "SEQUENCE_START", "SEQUENCE_END"])
class VocabInfo(
collections.namedtuple("VocbabInfo",
["path", "vocab_size", "special_vocab"])):
"""Convenience structure for vocabulary information.
"""
@property
def total_size(self):
"""Returns size the the base vocabulary plus the size of extra vocabulary"""
return self.vocab_size + len(self.special_vocab)
def get_vocab_info(vocab_path):
"""Creates a `VocabInfo` instance that contains the vocabulary size and
the special vocabulary for the given file.
Args:
vocab_path: Path to a vocabulary file with one word per line.
Returns:
A VocabInfo tuple.
"""
with gfile.GFile(vocab_path) as file:
vocab_size = sum(1 for _ in file)
special_vocab = get_special_vocab(vocab_size)
return VocabInfo(vocab_path, vocab_size, special_vocab)
def get_special_vocab(vocabulary_size):
"""Returns the `SpecialVocab` instance for a given vocabulary size.
"""
return SpecialVocab(*range(vocabulary_size, vocabulary_size + 3))
def create_vocabulary_lookup_table(filename, default_value=None):
"""Creates a lookup table for a vocabulary file.
Args:
filename: Path to a vocabulary file containg one word per line.
Each word is mapped to its line number.
default_value: UNK tokens will be mapped to this id.
If None, UNK tokens will be mapped to [vocab_size]
Returns:
A tuple (vocab_to_id_table, id_to_vocab_table,
word_to_count_table, vocab_size). The vocab size does not include
the UNK token.
"""
if not gfile.Exists(filename):
raise ValueError("File does not exist: {}".format(filename))
# Load vocabulary into memory
with gfile.GFile(filename) as file:
vocab = list(line.strip("\n") for line in file)
vocab_size = len(vocab)
has_counts = len(vocab[0].split("\t")) == 2
if has_counts:
vocab, counts = zip(*[_.split("\t") for _ in vocab])
counts = [float(_) for _ in counts]
vocab = list(vocab)
else:
counts = [-1. for _ in vocab]
# Add special vocabulary items
special_vocab = get_special_vocab(vocab_size)
vocab += list(special_vocab._fields)
vocab_size += len(special_vocab)
counts += [-1. for _ in list(special_vocab._fields)]
if default_value is None:
default_value = special_vocab.UNK
tf.logging.info("Creating vocabulary lookup table of size %d", vocab_size)
vocab_tensor = tf.constant(vocab)
count_tensor = tf.constant(counts, dtype=tf.float32)
vocab_idx_tensor = tf.range(vocab_size, dtype=tf.int64)
# Create ID -> word mapping
id_to_vocab_init = tf.contrib.lookup.KeyValueTensorInitializer(
vocab_idx_tensor, vocab_tensor, tf.int64, tf.string)
id_to_vocab_table = tf.contrib.lookup.HashTable(id_to_vocab_init, "UNK")
# Create word -> id mapping
vocab_to_id_init = tf.contrib.lookup.KeyValueTensorInitializer(
vocab_tensor, vocab_idx_tensor, tf.string, tf.int64)
vocab_to_id_table = tf.contrib.lookup.HashTable(vocab_to_id_init,
default_value)
# Create word -> count mapping
word_to_count_init = tf.contrib.lookup.KeyValueTensorInitializer(
vocab_tensor, count_tensor, tf.string, tf.float32)
word_to_count_table = tf.contrib.lookup.HashTable(word_to_count_init, -1)
return vocab_to_id_table, id_to_vocab_table, word_to_count_table, vocab_size
| 4,274 | 33.2 | 80 | py |
seq2seq | seq2seq-master/seq2seq/data/split_tokens_decoder.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A decoder that splits a string into tokens and returns the
individual tokens and the length.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import tensorflow as tf
from tensorflow.contrib.slim.python.slim.data import data_decoder
class SplitTokensDecoder(data_decoder.DataDecoder):
"""A DataProvider that splits a string tensor into individual tokens and
returns the tokens and the length.
Optionally prepends or appends special tokens.
Args:
delimiter: Delimiter to split on. Must be a single character.
tokens_feature_name: A descriptive feature name for the token values
length_feature_name: A descriptive feature name for the length value
"""
def __init__(self,
delimiter=" ",
tokens_feature_name="tokens",
length_feature_name="length",
prepend_token=None,
append_token=None):
self.delimiter = delimiter
self.tokens_feature_name = tokens_feature_name
self.length_feature_name = length_feature_name
self.prepend_token = prepend_token
self.append_token = append_token
def decode(self, data, items):
decoded_items = {}
# Split tokens
tokens = tf.string_split([data], delimiter=self.delimiter).values
# Optionally prepend a special token
if self.prepend_token is not None:
tokens = tf.concat([[self.prepend_token], tokens], 0)
# Optionally append a special token
if self.append_token is not None:
tokens = tf.concat([tokens, [self.append_token]], 0)
decoded_items[self.length_feature_name] = tf.size(tokens)
decoded_items[self.tokens_feature_name] = tokens
return [decoded_items[_] for _ in items]
def list_items(self):
return [self.tokens_feature_name, self.length_feature_name]
| 2,456 | 34.1 | 74 | py |
seq2seq | seq2seq-master/seq2seq/data/__init__.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collection of input-related utlities.
"""
from seq2seq.data import input_pipeline
from seq2seq.data import parallel_data_provider
from seq2seq.data import postproc
from seq2seq.data import split_tokens_decoder
from seq2seq.data import vocab
| 821 | 36.363636 | 74 | py |
seq2seq | seq2seq-master/seq2seq/data/input_pipeline.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Collection of input pipelines.
An input pipeline defines how to read and parse data. It produces a tuple
of (features, labels) that can be read by tf.learn estimators.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import abc
import sys
import six
import tensorflow as tf
from tensorflow.contrib.slim.python.slim.data import tfexample_decoder
from seq2seq.configurable import Configurable
from seq2seq.data import split_tokens_decoder, parallel_data_provider
from seq2seq.data.sequence_example_decoder import TFSEquenceExampleDecoder
def make_input_pipeline_from_def(def_dict, mode, **kwargs):
"""Creates an InputPipeline object from a dictionary definition.
Args:
def_dict: A dictionary defining the input pipeline.
It must have "class" and "params" that correspond to the class
name and constructor parameters of an InputPipeline, respectively.
mode: A value in tf.contrib.learn.ModeKeys
Returns:
A new InputPipeline object
"""
if not "class" in def_dict:
raise ValueError("Input Pipeline definition must have a class property.")
class_ = def_dict["class"]
if not hasattr(sys.modules[__name__], class_):
raise ValueError("Invalid Input Pipeline class: {}".format(class_))
pipeline_class = getattr(sys.modules[__name__], class_)
# Constructor arguments
params = {}
if "params" in def_dict:
params.update(def_dict["params"])
params.update(kwargs)
return pipeline_class(params=params, mode=mode)
@six.add_metaclass(abc.ABCMeta)
class InputPipeline(Configurable):
"""Abstract InputPipeline class. All input pipelines must inherit from this.
An InputPipeline defines how data is read, parsed, and separated into
features and labels.
Params:
shuffle: If true, shuffle the data.
num_epochs: Number of times to iterate through the dataset. If None,
iterate forever.
"""
def __init__(self, params, mode):
Configurable.__init__(self, params, mode)
@staticmethod
def default_params():
return {
"shuffle": True,
"num_epochs": None,
}
def make_data_provider(self, **kwargs):
"""Creates DataProvider instance for this input pipeline. Additional
keyword arguments are passed to the DataProvider.
"""
raise NotImplementedError("Not implemented.")
@property
def feature_keys(self):
"""Defines the features that this input pipeline provides. Returns
a set of strings.
"""
return set()
@property
def label_keys(self):
"""Defines the labels that this input pipeline provides. Returns
a set of strings.
"""
return set()
@staticmethod
def read_from_data_provider(data_provider):
"""Utility function to read all available items from a DataProvider.
"""
item_values = data_provider.get(list(data_provider.list_items()))
items_dict = dict(zip(data_provider.list_items(), item_values))
return items_dict
class ParallelTextInputPipeline(InputPipeline):
"""An input pipeline that reads two parallel (line-by-line aligned) text
files.
Params:
source_files: An array of file names for the source data.
target_files: An array of file names for the target data. These must
be aligned to the `source_files`.
source_delimiter: A character to split the source text on. Defaults
to " " (space). For character-level training this can be set to the
empty string.
target_delimiter: Same as `source_delimiter` but for the target text.
"""
@staticmethod
def default_params():
params = InputPipeline.default_params()
params.update({
"source_files": [],
"target_files": [],
"source_delimiter": " ",
"target_delimiter": " ",
})
return params
def make_data_provider(self, **kwargs):
decoder_source = split_tokens_decoder.SplitTokensDecoder(
tokens_feature_name="source_tokens",
length_feature_name="source_len",
append_token="SEQUENCE_END",
delimiter=self.params["source_delimiter"])
dataset_source = tf.contrib.slim.dataset.Dataset(
data_sources=self.params["source_files"],
reader=tf.TextLineReader,
decoder=decoder_source,
num_samples=None,
items_to_descriptions={})
dataset_target = None
if len(self.params["target_files"]) > 0:
decoder_target = split_tokens_decoder.SplitTokensDecoder(
tokens_feature_name="target_tokens",
length_feature_name="target_len",
prepend_token="SEQUENCE_START",
append_token="SEQUENCE_END",
delimiter=self.params["target_delimiter"])
dataset_target = tf.contrib.slim.dataset.Dataset(
data_sources=self.params["target_files"],
reader=tf.TextLineReader,
decoder=decoder_target,
num_samples=None,
items_to_descriptions={})
return parallel_data_provider.ParallelDataProvider(
dataset1=dataset_source,
dataset2=dataset_target,
shuffle=self.params["shuffle"],
num_epochs=self.params["num_epochs"],
**kwargs)
@property
def feature_keys(self):
return set(["source_tokens", "source_len"])
@property
def label_keys(self):
return set(["target_tokens", "target_len"])
class TFRecordInputPipeline(InputPipeline):
"""An input pipeline that reads a TFRecords containing both source
and target sequences.
Params:
files: An array of file names to read from.
source_field: The TFRecord feature field containing the source text.
target_field: The TFRecord feature field containing the target text.
source_delimiter: A character to split the source text on. Defaults
to " " (space). For character-level training this can be set to the
empty string.
target_delimiter: Same as `source_delimiter` but for the target text.
"""
@staticmethod
def default_params():
params = InputPipeline.default_params()
params.update({
"files": [],
"source_field": "source",
"target_field": "target",
"source_delimiter": " ",
"target_delimiter": " ",
})
return params
def make_data_provider(self, **kwargs):
splitter_source = split_tokens_decoder.SplitTokensDecoder(
tokens_feature_name="source_tokens",
length_feature_name="source_len",
append_token="SEQUENCE_END",
delimiter=self.params["source_delimiter"])
splitter_target = split_tokens_decoder.SplitTokensDecoder(
tokens_feature_name="target_tokens",
length_feature_name="target_len",
prepend_token="SEQUENCE_START",
append_token="SEQUENCE_END",
delimiter=self.params["target_delimiter"])
keys_to_features = {
self.params["source_field"]: tf.FixedLenFeature((), tf.string),
self.params["target_field"]: tf.FixedLenFeature(
(), tf.string, default_value="")
}
items_to_handlers = {}
items_to_handlers["source_tokens"] = tfexample_decoder.ItemHandlerCallback(
keys=[self.params["source_field"]],
func=lambda dict: splitter_source.decode(
dict[self.params["source_field"]], ["source_tokens"])[0])
items_to_handlers["source_len"] = tfexample_decoder.ItemHandlerCallback(
keys=[self.params["source_field"]],
func=lambda dict: splitter_source.decode(
dict[self.params["source_field"]], ["source_len"])[0])
items_to_handlers["target_tokens"] = tfexample_decoder.ItemHandlerCallback(
keys=[self.params["target_field"]],
func=lambda dict: splitter_target.decode(
dict[self.params["target_field"]], ["target_tokens"])[0])
items_to_handlers["target_len"] = tfexample_decoder.ItemHandlerCallback(
keys=[self.params["target_field"]],
func=lambda dict: splitter_target.decode(
dict[self.params["target_field"]], ["target_len"])[0])
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
dataset = tf.contrib.slim.dataset.Dataset(
data_sources=self.params["files"],
reader=tf.TFRecordReader,
decoder=decoder,
num_samples=None,
items_to_descriptions={})
return tf.contrib.slim.dataset_data_provider.DatasetDataProvider(
dataset=dataset,
shuffle=self.params["shuffle"],
num_epochs=self.params["num_epochs"],
**kwargs)
@property
def feature_keys(self):
return set(["source_tokens", "source_len"])
@property
def label_keys(self):
return set(["target_tokens", "target_len"])
class ImageCaptioningInputPipeline(InputPipeline):
"""An input pipeline that reads a TFRecords containing both source
and target sequences.
Params:
files: An array of file names to read from.
source_field: The TFRecord feature field containing the source text.
target_field: The TFRecord feature field containing the target text.
source_delimiter: A character to split the source text on. Defaults
to " " (space). For character-level training this can be set to the
empty string.
target_delimiter: Same as `source_delimiter` but for the target text.
"""
@staticmethod
def default_params():
params = InputPipeline.default_params()
params.update({
"files": [],
"image_field": "image/data",
"image_format": "jpg",
"caption_ids_field": "image/caption_ids",
"caption_tokens_field": "image/caption",
})
return params
def make_data_provider(self, **kwargs):
context_keys_to_features = {
self.params["image_field"]: tf.FixedLenFeature(
[], dtype=tf.string),
"image/format": tf.FixedLenFeature(
[], dtype=tf.string, default_value=self.params["image_format"]),
}
sequence_keys_to_features = {
self.params["caption_ids_field"]: tf.FixedLenSequenceFeature(
[], dtype=tf.int64),
self.params["caption_tokens_field"]: tf.FixedLenSequenceFeature(
[], dtype=tf.string)
}
items_to_handlers = {
"image": tfexample_decoder.Image(
image_key=self.params["image_field"],
format_key="image/format",
channels=3),
"target_ids":
tfexample_decoder.Tensor(self.params["caption_ids_field"]),
"target_tokens":
tfexample_decoder.Tensor(self.params["caption_tokens_field"]),
"target_len": tfexample_decoder.ItemHandlerCallback(
keys=[self.params["caption_tokens_field"]],
func=lambda x: tf.size(x[self.params["caption_tokens_field"]]))
}
decoder = TFSEquenceExampleDecoder(
context_keys_to_features, sequence_keys_to_features, items_to_handlers)
dataset = tf.contrib.slim.dataset.Dataset(
data_sources=self.params["files"],
reader=tf.TFRecordReader,
decoder=decoder,
num_samples=None,
items_to_descriptions={})
return tf.contrib.slim.dataset_data_provider.DatasetDataProvider(
dataset=dataset,
shuffle=self.params["shuffle"],
num_epochs=self.params["num_epochs"],
**kwargs)
@property
def feature_keys(self):
return set(["image"])
@property
def label_keys(self):
return set(["target_tokens", "target_ids", "target_len"])
| 12,004 | 32.347222 | 79 | py |
seq2seq | seq2seq-master/seq2seq/contrib/experiment.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A patched tf.learn Experiment class to handle GPU memory
sharing issues.
"""
import tensorflow as tf
class Experiment(tf.contrib.learn.Experiment):
"""A patched tf.learn Experiment class to handle GPU memory
sharing issues."""
def __init__(self, train_steps_per_iteration=None, *args, **kwargs):
super(Experiment, self).__init__(*args, **kwargs)
self._train_steps_per_iteration = train_steps_per_iteration
def _has_training_stopped(self, eval_result):
"""Determines whether the training has stopped."""
if not eval_result:
return False
global_step = eval_result.get(tf.GraphKeys.GLOBAL_STEP)
return global_step and self._train_steps and (
global_step >= self._train_steps)
def continuous_train_and_eval(self,
continuous_eval_predicate_fn=None):
"""Interleaves training and evaluation.
The frequency of evaluation is controlled by the `train_steps_per_iteration`
(via constructor). The model will be first trained for
`train_steps_per_iteration`, and then be evaluated in turns.
This differs from `train_and_evaluate` as follows:
1. The procedure will have train and evaluation in turns. The model
will be trained for a number of steps (usuallly smaller than `train_steps`
if provided) and then be evaluated. `train_and_evaluate` will train the
model for `train_steps` (no small training iteraions).
2. Due to the different approach this schedule takes, it leads to two
differences in resource control. First, the resources (e.g., memory) used
by training will be released before evaluation (`train_and_evaluate` takes
double resources). Second, more checkpoints will be saved as a checkpoint
is generated at the end of each small trainning iteration.
Args:
continuous_eval_predicate_fn: A predicate function determining whether to
continue after each iteration. `predicate_fn` takes the evaluation
results as its arguments. At the beginning of evaluation, the passed
eval results will be None so it's expected that the predicate function
handles that gracefully. When `predicate_fn` is not specified, this will
run in an infinite loop or exit when global_step reaches `train_steps`.
Returns:
A tuple of the result of the `evaluate` call to the `Estimator` and the
export results using the specified `ExportStrategy`.
Raises:
ValueError: if `continuous_eval_predicate_fn` is neither None nor
callable.
"""
if (continuous_eval_predicate_fn is not None and
not callable(continuous_eval_predicate_fn)):
raise ValueError(
"`continuous_eval_predicate_fn` must be a callable, or None.")
eval_result = None
# Set the default value for train_steps_per_iteration, which will be
# overriden by other settings.
train_steps_per_iteration = 1000
if self._train_steps_per_iteration is not None:
train_steps_per_iteration = self._train_steps_per_iteration
elif self._train_steps is not None:
# train_steps_per_iteration = int(self._train_steps / 10)
train_steps_per_iteration = min(
self._min_eval_frequency, self._train_steps)
while (not continuous_eval_predicate_fn or
continuous_eval_predicate_fn(eval_result)):
if self._has_training_stopped(eval_result):
# Exits once max steps of training is satisfied.
tf.logging.info("Stop training model as max steps reached")
break
tf.logging.info("Training model for %s steps", train_steps_per_iteration)
self._estimator.fit(
input_fn=self._train_input_fn,
steps=train_steps_per_iteration,
monitors=self._train_monitors)
tf.logging.info("Evaluating model now.")
eval_result = self._estimator.evaluate(
input_fn=self._eval_input_fn,
steps=self._eval_steps,
metrics=self._eval_metrics,
name="one_pass",
hooks=self._eval_hooks)
return eval_result, self._maybe_export(eval_result)
| 4,680 | 39.704348 | 80 | py |
seq2seq | seq2seq-master/seq2seq/contrib/rnn_cell.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collection of RNN Cells
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys
import inspect
import tensorflow as tf
from tensorflow.python.ops import array_ops # pylint: disable=E0611
from tensorflow.python.util import nest # pylint: disable=E0611
from tensorflow.contrib.rnn import MultiRNNCell # pylint: disable=E0611
# Import all cell classes from Tensorflow
TF_CELL_CLASSES = [
x for x in tf.contrib.rnn.__dict__.values()
if inspect.isclass(x) and issubclass(x, tf.contrib.rnn.RNNCell)
]
for cell_class in TF_CELL_CLASSES:
setattr(sys.modules[__name__], cell_class.__name__, cell_class)
class ExtendedMultiRNNCell(MultiRNNCell):
"""Extends the Tensorflow MultiRNNCell with residual connections"""
def __init__(self,
cells,
residual_connections=False,
residual_combiner="add",
residual_dense=False):
"""Create a RNN cell composed sequentially of a number of RNNCells.
Args:
cells: list of RNNCells that will be composed in this order.
state_is_tuple: If True, accepted and returned states are n-tuples, where
`n = len(cells)`. If False, the states are all
concatenated along the column axis. This latter behavior will soon be
deprecated.
residual_connections: If true, add residual connections between all cells.
This requires all cells to have the same output_size. Also, iff the
input size is not equal to the cell output size, a linear transform
is added before the first layer.
residual_combiner: One of "add" or "concat". To create inputs for layer
t+1 either "add" the inputs from the prev layer or concat them.
residual_dense: Densely connect each layer to all other layers
Raises:
ValueError: if cells is empty (not allowed), or at least one of the cells
returns a state tuple but the flag `state_is_tuple` is `False`.
"""
super(ExtendedMultiRNNCell, self).__init__(cells, state_is_tuple=True)
assert residual_combiner in ["add", "concat", "mean"]
self._residual_connections = residual_connections
self._residual_combiner = residual_combiner
self._residual_dense = residual_dense
def __call__(self, inputs, state, scope=None):
"""Run this multi-layer cell on inputs, starting from state."""
if not self._residual_connections:
return super(ExtendedMultiRNNCell, self).__call__(
inputs, state, (scope or "extended_multi_rnn_cell"))
with tf.variable_scope(scope or "extended_multi_rnn_cell"):
# Adding Residual connections are only possible when input and output
# sizes are equal. Optionally transform the initial inputs to
# `cell[0].output_size`
if self._cells[0].output_size != inputs.get_shape().as_list()[1] and \
(self._residual_combiner in ["add", "mean"]):
inputs = tf.contrib.layers.fully_connected(
inputs=inputs,
num_outputs=self._cells[0].output_size,
activation_fn=None,
scope="input_transform")
# Iterate through all layers (code from MultiRNNCell)
cur_inp = inputs
prev_inputs = [cur_inp]
new_states = []
for i, cell in enumerate(self._cells):
with tf.variable_scope("cell_%d" % i):
if not nest.is_sequence(state):
raise ValueError(
"Expected state to be a tuple of length %d, but received: %s" %
(len(self.state_size), state))
cur_state = state[i]
next_input, new_state = cell(cur_inp, cur_state)
# Either combine all previous inputs or only the current input
input_to_combine = prev_inputs[-1:]
if self._residual_dense:
input_to_combine = prev_inputs
# Add Residual connection
if self._residual_combiner == "add":
next_input = next_input + sum(input_to_combine)
if self._residual_combiner == "mean":
combined_mean = tf.reduce_mean(tf.stack(input_to_combine), 0)
next_input = next_input + combined_mean
elif self._residual_combiner == "concat":
next_input = tf.concat([next_input] + input_to_combine, 1)
cur_inp = next_input
prev_inputs.append(cur_inp)
new_states.append(new_state)
new_states = (tuple(new_states)
if self._state_is_tuple else array_ops.concat(new_states, 1))
return cur_inp, new_states
| 5,167 | 40.344 | 80 | py |
seq2seq | seq2seq-master/seq2seq/contrib/__init__.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 576 | 40.214286 | 74 | py |
seq2seq | seq2seq-master/seq2seq/contrib/seq2seq/helper.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
IMPORTANT: This code is taken directly from Tensorflow
(https://github.com/tensorflow/tensorflow) and is copied temporarily
until it is available in a packaged Tensorflow version on pypi.
TODO(dennybritz): Delete this code when it becomes available in TF.
A library of helpers for use with SamplingDecoders.
"""
# pylint: skip-file
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.contrib.distributions.python.ops import bernoulli
from tensorflow.contrib.distributions.python.ops import categorical
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.layers import base as layers_base
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.util import nest
from seq2seq.contrib.seq2seq import decoder
__all__ = [
"Helper",
"TrainingHelper",
"GreedyEmbeddingHelper",
"CustomHelper",
"ScheduledEmbeddingTrainingHelper",
"ScheduledOutputTrainingHelper",
]
_transpose_batch_time = decoder._transpose_batch_time # pylint: disable=protected-access
def _unstack_ta(inp):
return tensor_array_ops.TensorArray(
dtype=inp.dtype, size=array_ops.shape(inp)[0],
element_shape=inp.get_shape()[1:]).unstack(inp)
@six.add_metaclass(abc.ABCMeta)
class Helper(object):
"""Helper interface. Helper instances are used by SamplingDecoder."""
@abc.abstractproperty
def batch_size(self):
"""Returns a scalar int32 tensor."""
raise NotImplementedError("batch_size has not been implemented")
@abc.abstractmethod
def initialize(self, name=None):
"""Returns `(initial_finished, initial_inputs)`."""
pass
@abc.abstractmethod
def sample(self, time, outputs, state, name=None):
"""Returns `sample_ids`."""
pass
@abc.abstractmethod
def next_inputs(self, time, outputs, state, sample_ids, name=None):
"""Returns `(finished, next_inputs, next_state)`."""
pass
class CustomHelper(Helper):
"""Base abstract class that allows the user to customize sampling."""
def __init__(self, initialize_fn, sample_fn, next_inputs_fn):
"""Initializer.
Args:
initialize_fn: callable that returns `(finished, next_inputs)`
for the first iteration.
sample_fn: callable that takes `(time, outputs, state)`
and emits tensor `sample_ids`.
next_inputs_fn: callable that takes `(time, outputs, state, sample_ids)`
and emits `(finished, next_inputs, next_state)`.
"""
self._initialize_fn = initialize_fn
self._sample_fn = sample_fn
self._next_inputs_fn = next_inputs_fn
self._batch_size = None
@property
def batch_size(self):
if self._batch_size is None:
raise ValueError("batch_size accessed before initialize was called")
return self._batch_size
def initialize(self, name=None):
with ops.name_scope(name, "%sInitialize" % type(self).__name__):
(finished, next_inputs) = self._initialize_fn()
if self._batch_size is None:
self._batch_size = array_ops.size(finished)
return (finished, next_inputs)
def sample(self, time, outputs, state, name=None):
with ops.name_scope(
name, "%sSample" % type(self).__name__, (time, outputs, state)):
return self._sample_fn(time=time, outputs=outputs, state=state)
def next_inputs(self, time, outputs, state, sample_ids, name=None):
with ops.name_scope(
name, "%sNextInputs" % type(self).__name__, (time, outputs, state)):
return self._next_inputs_fn(
time=time, outputs=outputs, state=state, sample_ids=sample_ids)
class TrainingHelper(Helper):
"""A helper for use during training. Only reads inputs.
Returned sample_ids are the argmax of the RNN output logits.
"""
def __init__(self, inputs, sequence_length, time_major=False, name=None):
"""Initializer.
Args:
inputs: A (structure of) input tensors.
sequence_length: An int32 vector tensor.
time_major: Python bool. Whether the tensors in `inputs` are time major.
If `False` (default), they are assumed to be batch major.
name: Name scope for any created operations.
Raises:
ValueError: if `sequence_length` is not a 1D tensor.
"""
with ops.name_scope(name, "TrainingHelper", [inputs, sequence_length]):
inputs = ops.convert_to_tensor(inputs, name="inputs")
if not time_major:
inputs = nest.map_structure(_transpose_batch_time, inputs)
self._input_tas = nest.map_structure(_unstack_ta, inputs)
self._sequence_length = ops.convert_to_tensor(
sequence_length, name="sequence_length")
if self._sequence_length.get_shape().ndims != 1:
raise ValueError(
"Expected sequence_length to be a vector, but received shape: %s" %
self._sequence_length.get_shape())
self._zero_inputs = nest.map_structure(
lambda inp: array_ops.zeros_like(inp[0, :]), inputs)
self._batch_size = array_ops.size(sequence_length)
@property
def batch_size(self):
return self._batch_size
def initialize(self, name=None):
with ops.name_scope(name, "TrainingHelperInitialize"):
finished = math_ops.equal(0, self._sequence_length)
all_finished = math_ops.reduce_all(finished)
next_inputs = control_flow_ops.cond(
all_finished, lambda: self._zero_inputs,
lambda: nest.map_structure(lambda inp: inp.read(0), self._input_tas))
return (finished, next_inputs)
def sample(self, time, outputs, name=None, **unused_kwargs):
with ops.name_scope(name, "TrainingHelperSample", [time, outputs]):
sample_ids = math_ops.cast(
math_ops.argmax(outputs, axis=-1), dtypes.int32)
return sample_ids
def next_inputs(self, time, outputs, state, name=None, **unused_kwargs):
"""next_inputs_fn for TrainingHelper."""
with ops.name_scope(name, "TrainingHelperNextInputs",
[time, outputs, state]):
next_time = time + 1
finished = (next_time >= self._sequence_length)
all_finished = math_ops.reduce_all(finished)
def read_from_ta(inp):
return inp.read(next_time)
next_inputs = control_flow_ops.cond(
all_finished, lambda: self._zero_inputs,
lambda: nest.map_structure(read_from_ta, self._input_tas))
return (finished, next_inputs, state)
class ScheduledEmbeddingTrainingHelper(TrainingHelper):
"""A training helper that adds scheduled sampling.
Returns -1s for sample_ids where no sampling took place; valid sample id
values elsewhere.
"""
def __init__(self, inputs, sequence_length, embedding, sampling_probability,
time_major=False, seed=None, scheduling_seed=None, name=None):
"""Initializer.
Args:
inputs: A (structure of) input tensors.
sequence_length: An int32 vector tensor.
embedding: A callable that takes a vector tensor of `ids` (argmax ids),
or the `params` argument for `embedding_lookup`.
sampling_probability: A 0D `float32` tensor: the probability of sampling
categorically from the output ids instead of reading directly from the
inputs.
time_major: Python bool. Whether the tensors in `inputs` are time major.
If `False` (default), they are assumed to be batch major.
seed: The sampling seed.
scheduling_seed: The schedule decision rule sampling seed.
name: Name scope for any created operations.
Raises:
ValueError: if `sampling_probability` is not a scalar or vector.
"""
with ops.name_scope(name, "ScheduledEmbeddingSamplingWrapper",
[embedding, sampling_probability]):
if callable(embedding):
self._embedding_fn = embedding
else:
self._embedding_fn = (
lambda ids: embedding_ops.embedding_lookup(embedding, ids))
self._sampling_probability = ops.convert_to_tensor(
sampling_probability, name="sampling_probability")
if self._sampling_probability.get_shape().ndims not in (0, 1):
raise ValueError(
"sampling_probability must be either a scalar or a vector. "
"saw shape: %s" % (self._sampling_probability.get_shape()))
self._seed = seed
self._scheduling_seed = scheduling_seed
super(ScheduledEmbeddingTrainingHelper, self).__init__(
inputs=inputs,
sequence_length=sequence_length,
time_major=time_major,
name=name)
def initialize(self, name=None):
return super(ScheduledEmbeddingTrainingHelper, self).initialize(name=name)
def sample(self, time, outputs, state, name=None):
with ops.name_scope(name, "ScheduledEmbeddingTrainingHelperSample",
[time, outputs, state]):
# Return -1s where we did not sample, and sample_ids elsewhere
select_sample_noise = random_ops.random_uniform(
[self.batch_size], seed=self._scheduling_seed)
select_sample = (self._sampling_probability > select_sample_noise)
sample_id_sampler = categorical.Categorical(logits=outputs)
return array_ops.where(
select_sample,
sample_id_sampler.sample(seed=self._seed),
array_ops.tile([-1], [self.batch_size]))
def next_inputs(self, time, outputs, state, sample_ids, name=None):
with ops.name_scope(name, "ScheduledEmbeddingTrainingHelperSample",
[time, outputs, state, sample_ids]):
(finished, base_next_inputs, state) = (
super(ScheduledEmbeddingTrainingHelper, self).next_inputs(
time=time,
outputs=outputs,
state=state,
sample_ids=sample_ids,
name=name))
def maybe_sample():
"""Perform scheduled sampling."""
where_sampling = math_ops.cast(
array_ops.where(sample_ids > -1), dtypes.int32)
where_not_sampling = math_ops.cast(
array_ops.where(sample_ids <= -1), dtypes.int32)
where_sampling_flat = array_ops.reshape(where_sampling, [-1])
where_not_sampling_flat = array_ops.reshape(where_not_sampling, [-1])
sample_ids_sampling = array_ops.gather(sample_ids, where_sampling_flat)
inputs_not_sampling = array_ops.gather(
base_next_inputs, where_not_sampling_flat)
sampled_next_inputs = self._embedding_fn(sample_ids_sampling)
base_shape = array_ops.shape(base_next_inputs)
return (array_ops.scatter_nd(indices=where_sampling,
updates=sampled_next_inputs,
shape=base_shape)
+ array_ops.scatter_nd(indices=where_not_sampling,
updates=inputs_not_sampling,
shape=base_shape))
all_finished = math_ops.reduce_all(finished)
next_inputs = control_flow_ops.cond(
all_finished, lambda: base_next_inputs, maybe_sample)
return (finished, next_inputs, state)
class ScheduledOutputTrainingHelper(TrainingHelper):
"""A training helper that adds scheduled sampling directly to outputs.
Returns False for sample_ids where no sampling took place; True elsewhere.
"""
def __init__(self, inputs, sequence_length, sampling_probability,
time_major=False, seed=None, next_input_layer=None,
auxiliary_inputs=None, name=None):
"""Initializer.
Args:
inputs: A (structure) of input tensors.
sequence_length: An int32 vector tensor.
sampling_probability: A 0D `float32` tensor: the probability of sampling
from the outputs instead of reading directly from the inputs.
time_major: Python bool. Whether the tensors in `inputs` are time major.
If `False` (default), they are assumed to be batch major.
seed: The sampling seed.
next_input_layer: (Optional) An instance of `tf.layers.Layer`, i.e.,
`tf.layers.Dense`. Optional layer to apply to the RNN output to create
the next input.
auxiliary_inputs: An optional (structure of) auxiliary input tensors with
a shape that matches `inputs` in all but (potentially) the final
dimension. These tensors will be concatenated to the sampled output or
the `inputs` when not sampling for use as the next input.
name: Name scope for any created operations.
Raises:
ValueError: if `sampling_probability` is not a scalar or vector.
"""
with ops.name_scope(name, "ScheduledOutputTrainingHelper",
[inputs, auxiliary_inputs, sampling_probability]):
self._sampling_probability = ops.convert_to_tensor(
sampling_probability, name="sampling_probability")
if self._sampling_probability.get_shape().ndims not in (0, 1):
raise ValueError(
"sampling_probability must be either a scalar or a vector. "
"saw shape: %s" % (self._sampling_probability.get_shape()))
if auxiliary_inputs is None:
maybe_concatenated_inputs = inputs
else:
inputs = ops.convert_to_tensor(inputs, name="inputs")
auxiliary_inputs = ops.convert_to_tensor(
auxiliary_inputs, name="auxiliary_inputs")
maybe_concatenated_inputs = nest.map_structure(
lambda x, y: array_ops.concat((x, y), -1),
inputs, auxiliary_inputs)
if not time_major:
auxiliary_inputs = nest.map_structure(
_transpose_batch_time, auxiliary_inputs)
self._auxiliary_input_tas = (
nest.map_structure(_unstack_ta, auxiliary_inputs)
if auxiliary_inputs is not None else None)
self._seed = seed
if (next_input_layer is not None and not isinstance(next_input_layer,
layers_base._Layer)): # pylint: disable=protected-access
raise TypeError("next_input_layer must be a Layer, received: %s" %
type(next_input_layer))
self._next_input_layer = next_input_layer
super(ScheduledOutputTrainingHelper, self).__init__(
inputs=maybe_concatenated_inputs,
sequence_length=sequence_length,
time_major=time_major,
name=name)
def initialize(self, name=None):
return super(ScheduledOutputTrainingHelper, self).initialize(name=name)
def sample(self, time, outputs, state, name=None):
with ops.name_scope(name, "ScheduledOutputTrainingHelperSample",
[time, outputs, state]):
sampler = bernoulli.Bernoulli(probs=self._sampling_probability)
return math_ops.cast(
sampler.sample(sample_shape=self.batch_size, seed=self._seed),
dtypes.bool)
def next_inputs(self, time, outputs, state, sample_ids, name=None):
with ops.name_scope(name, "ScheduledOutputTrainingHelperNextInputs",
[time, outputs, state, sample_ids]):
(finished, base_next_inputs, state) = (
super(ScheduledOutputTrainingHelper, self).next_inputs(
time=time,
outputs=outputs,
state=state,
sample_ids=sample_ids,
name=name))
def maybe_sample():
"""Perform scheduled sampling."""
def maybe_concatenate_auxiliary_inputs(outputs_, indices=None):
"""Concatenate outputs with auxiliary inputs, if they exist."""
if self._auxiliary_input_tas is None:
return outputs_
next_time = time + 1
auxiliary_inputs = nest.map_structure(
lambda ta: ta.read(next_time), self._auxiliary_input_tas)
if indices is not None:
auxiliary_inputs = array_ops.gather_nd(auxiliary_inputs, indices)
return nest.map_structure(
lambda x, y: array_ops.concat((x, y), -1),
outputs_, auxiliary_inputs)
if self._next_input_layer is None:
return array_ops.where(
sample_ids, maybe_concatenate_auxiliary_inputs(outputs),
base_next_inputs)
where_sampling = math_ops.cast(
array_ops.where(sample_ids), dtypes.int32)
where_not_sampling = math_ops.cast(
array_ops.where(math_ops.logical_not(sample_ids)), dtypes.int32)
outputs_sampling = array_ops.gather_nd(outputs, where_sampling)
inputs_not_sampling = array_ops.gather_nd(base_next_inputs,
where_not_sampling)
sampled_next_inputs = maybe_concatenate_auxiliary_inputs(
self._next_input_layer(outputs_sampling), where_sampling)
base_shape = array_ops.shape(base_next_inputs)
return (array_ops.scatter_nd(indices=where_sampling,
updates=sampled_next_inputs,
shape=base_shape)
+ array_ops.scatter_nd(indices=where_not_sampling,
updates=inputs_not_sampling,
shape=base_shape))
all_finished = math_ops.reduce_all(finished)
next_inputs = control_flow_ops.cond(
all_finished, lambda: base_next_inputs, maybe_sample)
return (finished, next_inputs, state)
class GreedyEmbeddingHelper(Helper):
"""A helper for use during inference.
Uses the argmax of the output (treated as logits) and passes the
result through an embedding layer to get the next input.
"""
def __init__(self, embedding, start_tokens, end_token):
"""Initializer.
Args:
embedding: A callable that takes a vector tensor of `ids` (argmax ids),
or the `params` argument for `embedding_lookup`.
start_tokens: `int32` vector shaped `[batch_size]`, the start tokens.
end_token: `int32` scalar, the token that marks end of decoding.
Raises:
ValueError: if `sequence_length` is not a 1D tensor.
"""
if callable(embedding):
self._embedding_fn = embedding
else:
self._embedding_fn = (
lambda ids: embedding_ops.embedding_lookup(embedding, ids))
self._start_tokens = ops.convert_to_tensor(
start_tokens, dtype=dtypes.int32, name="start_tokens")
self._end_token = ops.convert_to_tensor(
end_token, dtype=dtypes.int32, name="end_token")
if self._start_tokens.get_shape().ndims != 1:
raise ValueError("start_tokens must be a vector")
self._batch_size = array_ops.size(start_tokens)
if self._end_token.get_shape().ndims != 0:
raise ValueError("end_token must be a scalar")
self._start_inputs = self._embedding_fn(self._start_tokens)
@property
def batch_size(self):
return self._batch_size
def initialize(self, name=None):
finished = array_ops.tile([False], [self._batch_size])
return (finished, self._start_inputs)
def sample(self, time, outputs, state, name=None):
"""sample for GreedyEmbeddingHelper."""
del time, state # unused by sample_fn
# Outputs are logits, use argmax to get the most probable id
if not isinstance(outputs, ops.Tensor):
raise TypeError("Expected outputs to be a single Tensor, got: %s" %
type(outputs))
sample_ids = math_ops.cast(
math_ops.argmax(outputs, axis=-1), dtypes.int32)
return sample_ids
def next_inputs(self, time, outputs, state, sample_ids, name=None):
"""next_inputs_fn for GreedyEmbeddingHelper."""
del time, outputs # unused by next_inputs_fn
finished = math_ops.equal(sample_ids, self._end_token)
all_finished = math_ops.reduce_all(finished)
next_inputs = control_flow_ops.cond(
all_finished,
# If we're finished, the next_inputs value doesn't matter
lambda: self._start_inputs,
lambda: self._embedding_fn(sample_ids))
return (finished, next_inputs, state)
| 20,852 | 39.491262 | 115 | py |
seq2seq | seq2seq-master/seq2seq/contrib/seq2seq/decoder.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
IMPORTANT: This code is taken directly from Tensorflow
(https://github.com/tensorflow/tensorflow) and is copied temporarily
until it is available in a packaged Tensorflow version on pypi.
TODO(dennybritz): Delete this code when it becomes available in TF.
Seq2seq layer operations for use in neural networks.
"""
# pylint: skip-file
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import nest
__all__ = ["Decoder", "dynamic_decode"]
def _transpose_batch_time(x):
"""Transpose the batch and time dimensions of a Tensor.
Retains as much of the static shape information as possible.
Args:
x: A tensor of rank 2 or higher.
Returns:
x transposed along the first two dimensions.
Raises:
ValueError: if `x` is rank 1 or lower.
"""
x_static_shape = x.get_shape()
if x_static_shape.ndims is not None and x_static_shape.ndims < 2:
raise ValueError(
"Expected input tensor %s to have rank at least 2, but saw shape: %s" %
(x, x_static_shape))
x_rank = array_ops.rank(x)
x_t = array_ops.transpose(
x, array_ops.concat(
([1, 0], math_ops.range(2, x_rank)), axis=0))
x_t.set_shape(
tensor_shape.TensorShape([
x_static_shape[1].value, x_static_shape[0].value
]).concatenate(x_static_shape[2:]))
return x_t
@six.add_metaclass(abc.ABCMeta)
class Decoder(object):
"""An RNN Decoder abstract interface object."""
@property
def batch_size(self):
"""The batch size of the inputs returned by `sample`."""
raise NotImplementedError
@property
def output_size(self):
"""A (possibly nested tuple of...) integer[s] or `TensorShape` object[s]."""
raise NotImplementedError
@property
def output_dtype(self):
"""A (possibly nested tuple of...) dtype[s]."""
raise NotImplementedError
@abc.abstractmethod
def initialize(self, name=None):
"""Called before any decoding iterations.
Args:
name: Name scope for any created operations.
Returns:
`(finished, first_inputs, initial_state)`.
"""
raise NotImplementedError
@abc.abstractmethod
def step(self, time, inputs, state, name=None):
"""Called per step of decoding (but only once for dynamic decoding).
Args:
time: Scalar `int32` tensor.
inputs: Input (possibly nested tuple of) tensor[s] for this time step.
state: State (possibly nested tuple of) tensor[s] from previous time step.
name: Name scope for any created operations.
Returns:
`(outputs, next_state, next_inputs, finished)`.
"""
raise NotImplementedError
def _create_zero_outputs(size, dtype, batch_size):
"""Create a zero outputs Tensor structure."""
def _t(s):
return (s if isinstance(s, ops.Tensor) else constant_op.constant(
tensor_shape.TensorShape(s).as_list(),
dtype=dtypes.int32,
name="zero_suffix_shape"))
def _create(s, d):
return array_ops.zeros(
array_ops.concat(
([batch_size], _t(s)), axis=0), dtype=d)
return nest.map_structure(_create, size, dtype)
def dynamic_decode(decoder,
output_time_major=False,
impute_finished=False,
maximum_iterations=None,
parallel_iterations=32,
swap_memory=False,
scope=None):
"""Perform dynamic decoding with `decoder`.
Args:
decoder: A `Decoder` instance.
output_time_major: Python boolean. Default: `False` (batch major). If
`True`, outputs are returned as time major tensors (this mode is faster).
Otherwise, outputs are returned as batch major tensors (this adds extra
time to the computation).
impute_finished: Python boolean. If `True`, then states for batch
entries which are marked as finished get copied through and the
corresponding outputs get zeroed out. This causes some slowdown at
each time step, but ensures that the final state and outputs have
the correct values and that backprop ignores time steps that were
marked as finished.
maximum_iterations: `int32` scalar, maximum allowed number of decoding
steps. Default is `None` (decode until the decoder is fully done).
parallel_iterations: Argument passed to `tf.while_loop`.
swap_memory: Argument passed to `tf.while_loop`.
scope: Optional variable scope to use.
Returns:
`(final_outputs, final_state)`.
Raises:
TypeError: if `decoder` is not an instance of `Decoder`.
ValueError: if maximum_iterations is provided but is not a scalar.
"""
if not isinstance(decoder, Decoder):
raise TypeError("Expected decoder to be type Decoder, but saw: %s" %
type(decoder))
with variable_scope.variable_scope(scope or "decoder") as varscope:
# Properly cache variable values inside the while_loop
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
if maximum_iterations is not None:
maximum_iterations = ops.convert_to_tensor(
maximum_iterations, dtype=dtypes.int32, name="maximum_iterations")
if maximum_iterations.get_shape().ndims != 0:
raise ValueError("maximum_iterations must be a scalar")
initial_finished, initial_inputs, initial_state = decoder.initialize()
zero_outputs = _create_zero_outputs(decoder.output_size,
decoder.output_dtype,
decoder.batch_size)
if maximum_iterations is not None:
initial_finished = math_ops.logical_or(
initial_finished, 0 >= maximum_iterations)
initial_time = constant_op.constant(0, dtype=dtypes.int32)
def _shape(batch_size, from_shape):
if not isinstance(from_shape, tensor_shape.TensorShape):
return tensor_shape.TensorShape(None)
else:
batch_size = tensor_util.constant_value(
ops.convert_to_tensor(
batch_size, name="batch_size"))
return tensor_shape.TensorShape([batch_size]).concatenate(from_shape)
def _create_ta(s, d):
return tensor_array_ops.TensorArray(
dtype=d,
size=0,
dynamic_size=True,
element_shape=_shape(decoder.batch_size, s))
initial_outputs_ta = nest.map_structure(_create_ta, decoder.output_size,
decoder.output_dtype)
def condition(unused_time, unused_outputs_ta, unused_state, unused_inputs,
finished):
return math_ops.logical_not(math_ops.reduce_all(finished))
def body(time, outputs_ta, state, inputs, finished):
"""Internal while_loop body.
Args:
time: scalar int32 tensor.
outputs_ta: structure of TensorArray.
state: (structure of) state tensors and TensorArrays.
inputs: (structure of) input tensors.
finished: 1-D bool tensor.
Returns:
`(time + 1, outputs_ta, next_state, next_inputs, next_finished)`.
"""
(next_outputs, decoder_state, next_inputs,
decoder_finished) = decoder.step(time, inputs, state)
next_finished = math_ops.logical_or(decoder_finished, finished)
if maximum_iterations is not None:
next_finished = math_ops.logical_or(
next_finished, time + 1 >= maximum_iterations)
nest.assert_same_structure(state, decoder_state)
nest.assert_same_structure(outputs_ta, next_outputs)
nest.assert_same_structure(inputs, next_inputs)
# Zero out output values past finish
if impute_finished:
emit = nest.map_structure(
lambda out, zero: array_ops.where(finished, zero, out),
next_outputs,
zero_outputs)
else:
emit = next_outputs
# Copy through states past finish
def _maybe_copy_state(new, cur):
# TensorArrays and scalar states get passed through.
if isinstance(cur, tensor_array_ops.TensorArray):
pass_through = True
else:
new.set_shape(cur.shape)
pass_through = (new.shape.ndims == 0)
return new if pass_through else array_ops.where(finished, cur, new)
if impute_finished:
next_state = nest.map_structure(
_maybe_copy_state, decoder_state, state)
else:
next_state = decoder_state
outputs_ta = nest.map_structure(lambda ta, out: ta.write(time, out),
outputs_ta, emit)
return (time + 1, outputs_ta, next_state, next_inputs, next_finished)
res = control_flow_ops.while_loop(
condition,
body,
loop_vars=[
initial_time, initial_outputs_ta, initial_state, initial_inputs,
initial_finished
],
parallel_iterations=parallel_iterations,
swap_memory=swap_memory)
final_outputs_ta = res[1]
final_state = res[2]
final_outputs = nest.map_structure(lambda ta: ta.stack(), final_outputs_ta)
if not output_time_major:
final_outputs = nest.map_structure(_transpose_batch_time, final_outputs)
return final_outputs, final_state
| 10,401 | 34.023569 | 80 | py |
seq2seq | seq2seq-master/seq2seq/contrib/seq2seq/__init__.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 576 | 40.214286 | 74 | py |
seq2seq | seq2seq-master/seq2seq/inference/inference.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Generates model predictions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from seq2seq.training import utils as training_utils
def create_inference_graph(model, input_pipeline, batch_size=32):
"""Creates a graph to perform inference.
Args:
task: An `InferenceTask` instance.
input_pipeline: An instance of `InputPipeline` that defines
how to read and parse data.
batch_size: The batch size used for inference
Returns:
The return value of the model function, typically a tuple of
(predictions, loss, train_op).
"""
# TODO: This doesn't really belong here.
# How to get rid of this?
if hasattr(model, "use_beam_search"):
if model.use_beam_search:
tf.logging.info("Setting batch size to 1 for beam search.")
batch_size = 1
input_fn = training_utils.create_input_fn(
pipeline=input_pipeline,
batch_size=batch_size,
allow_smaller_final_batch=True)
# Build the graph
features, labels = input_fn()
return model(features=features, labels=labels, params=None)
| 1,715 | 30.2 | 74 | py |
seq2seq | seq2seq-master/seq2seq/inference/beam_search.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""In-Graph Beam Search Implementation.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import numpy as np
import tensorflow as tf
from tensorflow.python.util import nest # pylint: disable=E0611
class BeamSearchState(
namedtuple("BeamSearchState", ["log_probs", "finished", "lengths"])):
"""State for a single step of beam search.
Args:
log_probs: The current log probabilities of all beams
finished: A boolean vector that specifies which beams are finished
lengths: Lengths of all beams
"""
pass
class BeamSearchStepOutput(
namedtuple("BeamSearchStepOutput",
["scores", "predicted_ids", "beam_parent_ids"])):
"""Outputs for a single step of beam search.
Args:
scores: Score for each beam, a float32 vector
predicted_ids: predictions for this step step, an int32 vector
beam_parent_ids: an int32 vector containing the beam indices of the
continued beams from the previous step
"""
pass
class BeamSearchConfig(
namedtuple("BeamSearchConfig", [
"beam_width", "vocab_size", "eos_token", "length_penalty_weight",
"choose_successors_fn"
])):
"""Configuration object for beam search.
Args:
beam_width: Number of beams to use, an integer
vocab_size: Output vocabulary size
eos_token: The id of the EOS token, used to mark beams as "done"
length_penalty_weight: Weight for the length penalty factor. 0.0 disables
the penalty.
choose_successors_fn: A function used to choose beam successors based
on their scores. Maps from (scores, config) => (chosen scores, chosen_ids)
"""
pass
def gather_tree_py(values, parents):
"""Gathers path through a tree backwards from the leave nodes. Used
to reconstruct beams given their parents."""
beam_length = values.shape[0]
num_beams = values.shape[1]
res = np.zeros_like(values)
res[-1, :] = values[-1, :]
for beam_id in range(num_beams):
parent = parents[-1][beam_id]
for level in reversed(range(beam_length - 1)):
res[level, beam_id] = values[level][parent]
parent = parents[level][parent]
return np.array(res).astype(values.dtype)
def gather_tree(values, parents):
"""Tensor version of gather_tree_py"""
res = tf.py_func(
func=gather_tree_py, inp=[values, parents], Tout=values.dtype)
res.set_shape(values.get_shape().as_list())
return res
def create_initial_beam_state(config):
"""Creates an instance of `BeamState` that can be used on the first
call to `beam_step`.
Args:
config: A BeamSearchConfig
Returns:
An instance of `BeamState`.
"""
return BeamSearchState(
log_probs=tf.zeros([config.beam_width]),
finished=tf.zeros(
[config.beam_width], dtype=tf.bool),
lengths=tf.zeros(
[config.beam_width], dtype=tf.int32))
def length_penalty(sequence_lengths, penalty_factor):
"""Calculates the length penalty according to
https://arxiv.org/abs/1609.08144
Args:
sequence_lengths: The sequence length of all hypotheses, a tensor
of shape [beam_size, vocab_size].
penalty_factor: A scalar that weights the length penalty.
Returns:
The length penalty factor, a tensor fo shape [beam_size].
"""
return tf.div((5. + tf.to_float(sequence_lengths))**penalty_factor, (5. + 1.)
**penalty_factor)
def hyp_score(log_probs, sequence_lengths, config):
"""Calculates scores for beam search hypotheses.
"""
# Calculate the length penality
length_penality_ = length_penalty(
sequence_lengths=sequence_lengths,
penalty_factor=config.length_penalty_weight)
score = log_probs / length_penality_
return score
def choose_top_k(scores_flat, config):
"""Chooses the top-k beams as successors.
"""
next_beam_scores, word_indices = tf.nn.top_k(scores_flat, k=config.beam_width)
return next_beam_scores, word_indices
def nest_map(inputs, map_fn, name=None):
"""Applies a function to (possibly nested) tuple of tensors.
"""
if nest.is_sequence(inputs):
inputs_flat = nest.flatten(inputs)
y_flat = [map_fn(_) for _ in inputs_flat]
outputs = nest.pack_sequence_as(inputs, y_flat)
else:
outputs = map_fn(inputs)
if name:
outputs = tf.identity(outputs, name=name)
return outputs
def mask_probs(probs, eos_token, finished):
"""Masks log probabilities such that finished beams
allocate all probability mass to eos. Unfinished beams remain unchanged.
Args:
probs: Log probabiltiies of shape `[beam_width, vocab_size]`
eos_token: An int32 id corresponding to the EOS token to allocate
probability to
finished: A boolean tensor of shape `[beam_width]` that specifies which
elements in the beam are finished already.
Returns:
A tensor of shape `[beam_width, vocab_size]`, where unfinished beams
stay unchanged and finished beams are replaced with a tensor that has all
probability on the EOS token.
"""
vocab_size = tf.shape(probs)[1]
finished_mask = tf.expand_dims(tf.to_float(1. - tf.to_float(finished)), 1)
# These examples are not finished and we leave them
non_finished_examples = finished_mask * probs
# All finished examples are replaced with a vector that has all
# probability on EOS
finished_row = tf.one_hot(
eos_token,
vocab_size,
dtype=tf.float32,
on_value=0.,
off_value=tf.float32.min)
finished_examples = (1. - finished_mask) * finished_row
return finished_examples + non_finished_examples
def beam_search_step(time_, logits, beam_state, config):
"""Performs a single step of Beam Search Decoding.
Args:
time_: Beam search time step, should start at 0. At time 0 we assume
that all beams are equal and consider only the first beam for
continuations.
logits: Logits at the current time step. A tensor of shape `[B, vocab_size]`
beam_state: Current state of the beam search. An instance of `BeamState`
config: An instance of `BeamSearchConfig`
Returns:
A new beam state.
"""
# Calculate the current lengths of the predictions
prediction_lengths = beam_state.lengths
previously_finished = beam_state.finished
# Calculate the total log probs for the new hypotheses
# Final Shape: [beam_width, vocab_size]
probs = tf.nn.log_softmax(logits)
probs = mask_probs(probs, config.eos_token, previously_finished)
total_probs = tf.expand_dims(beam_state.log_probs, 1) + probs
# Calculate the continuation lengths
# We add 1 to all continuations that are not EOS and were not
# finished previously
lengths_to_add = tf.one_hot([config.eos_token] * config.beam_width,
config.vocab_size, 0, 1)
add_mask = (1 - tf.to_int32(previously_finished))
lengths_to_add = tf.expand_dims(add_mask, 1) * lengths_to_add
new_prediction_lengths = tf.expand_dims(prediction_lengths,
1) + lengths_to_add
# Calculate the scores for each beam
scores = hyp_score(
log_probs=total_probs,
sequence_lengths=new_prediction_lengths,
config=config)
scores_flat = tf.reshape(scores, [-1])
# During the first time step we only consider the initial beam
scores_flat = tf.cond(
tf.convert_to_tensor(time_) > 0, lambda: scores_flat, lambda: scores[0])
# Pick the next beams according to the specified successors function
next_beam_scores, word_indices = config.choose_successors_fn(scores_flat,
config)
next_beam_scores.set_shape([config.beam_width])
word_indices.set_shape([config.beam_width])
# Pick out the probs, beam_ids, and states according to the chosen predictions
total_probs_flat = tf.reshape(total_probs, [-1], name="total_probs_flat")
next_beam_probs = tf.gather(total_probs_flat, word_indices)
next_beam_probs.set_shape([config.beam_width])
next_word_ids = tf.mod(word_indices, config.vocab_size)
next_beam_ids = tf.div(word_indices, config.vocab_size)
# Append new ids to current predictions
next_finished = tf.logical_or(
tf.gather(beam_state.finished, next_beam_ids),
tf.equal(next_word_ids, config.eos_token))
# Calculate the length of the next predictions.
# 1. Finished beams remain unchanged
# 2. Beams that are now finished (EOS predicted) remain unchanged
# 3. Beams that are not yet finished have their length increased by 1
lengths_to_add = tf.to_int32(tf.not_equal(next_word_ids, config.eos_token))
lengths_to_add = (1 - tf.to_int32(next_finished)) * lengths_to_add
next_prediction_len = tf.gather(beam_state.lengths, next_beam_ids)
next_prediction_len += lengths_to_add
next_state = BeamSearchState(
log_probs=next_beam_probs,
lengths=next_prediction_len,
finished=next_finished)
output = BeamSearchStepOutput(
scores=next_beam_scores,
predicted_ids=next_word_ids,
beam_parent_ids=next_beam_ids)
return output, next_state
| 9,614 | 33.339286 | 80 | py |
seq2seq | seq2seq-master/seq2seq/inference/__init__.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modules related to running model inference.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from seq2seq.inference.inference import *
import seq2seq.inference.beam_search
| 817 | 34.565217 | 74 | py |
seq2seq | seq2seq-master/seq2seq/tasks/inference_task.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Abstract base class for inference tasks.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import abc
import six
import tensorflow as tf
from seq2seq import graph_utils
from seq2seq.configurable import Configurable, abstractstaticmethod
def unbatch_dict(dict_):
"""Converts a dictionary of batch items to a batch/list of
dictionary items.
"""
batch_size = list(dict_.values())[0].shape[0]
for i in range(batch_size):
yield {key: value[i] for key, value in dict_.items()}
@six.add_metaclass(abc.ABCMeta)
class InferenceTask(tf.train.SessionRunHook, Configurable):
"""
Abstract base class for inference tasks. Defines the logic used to make
predictions for a specific type of task.
Params:
model_class: The model class to instantiate. If undefined,
re-uses the class used during training.
model_params: Model hyperparameters. Specified hyperparameters will
overwrite those used during training.
Args:
params: See Params above.
"""
def __init__(self, params):
Configurable.__init__(self, params, tf.contrib.learn.ModeKeys.INFER)
self._predictions = None
def begin(self):
self._predictions = graph_utils.get_dict_from_collection("predictions")
@abstractstaticmethod
def default_params():
raise NotImplementedError()
| 1,982 | 28.597015 | 75 | py |
seq2seq | seq2seq-master/seq2seq/tasks/dump_beams.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Task where both the input and output sequence are plain text.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import tensorflow as tf
from seq2seq.tasks.inference_task import InferenceTask, unbatch_dict
class DumpBeams(InferenceTask):
"""Defines inference for tasks where both the input and output sequences
are plain text.
Params:
file: File to write beam search information to.
"""
def __init__(self, params):
super(DumpBeams, self).__init__(params)
self._beam_accum = {
"predicted_ids": [],
"beam_parent_ids": [],
"scores": [],
"log_probs": []
}
if not self.params["file"]:
raise ValueError("Must specify file for DumpBeams")
@staticmethod
def default_params():
params = {}
params.update({"file": "",})
return params
def before_run(self, _run_context):
fetches = {}
fetches["beam_search_output.predicted_ids"] = self._predictions[
"beam_search_output.predicted_ids"]
fetches["beam_search_output.beam_parent_ids"] = self._predictions[
"beam_search_output.beam_parent_ids"]
fetches["beam_search_output.scores"] = self._predictions[
"beam_search_output.scores"]
fetches["beam_search_output.log_probs"] = self._predictions[
"beam_search_output.log_probs"]
return tf.train.SessionRunArgs(fetches)
def after_run(self, _run_context, run_values):
fetches_batch = run_values.results
for fetches in unbatch_dict(fetches_batch):
self._beam_accum["predicted_ids"].append(fetches[
"beam_search_output.predicted_ids"])
self._beam_accum["beam_parent_ids"].append(fetches[
"beam_search_output.beam_parent_ids"])
self._beam_accum["scores"].append(fetches["beam_search_output.scores"])
self._beam_accum["log_probs"].append(fetches[
"beam_search_output.log_probs"])
def end(self, _session):
np.savez(self.params["file"], **self._beam_accum)
| 2,654 | 31.777778 | 77 | py |
seq2seq | seq2seq-master/seq2seq/tasks/decode_text.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Task where both the input and output sequence are plain text.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import functools
from pydoc import locate
import numpy as np
import tensorflow as tf
from tensorflow import gfile
from seq2seq.tasks.inference_task import InferenceTask, unbatch_dict
def _get_prediction_length(predictions_dict):
"""Returns the length of the prediction based on the index
of the first SEQUENCE_END token.
"""
tokens_iter = enumerate(predictions_dict["predicted_tokens"])
return next(((i + 1) for i, _ in tokens_iter if _ == "SEQUENCE_END"),
len(predictions_dict["predicted_tokens"]))
def _get_unk_mapping(filename):
"""Reads a file that specifies a mapping from source to target tokens.
The file must contain lines of the form <source>\t<target>"
Args:
filename: path to the mapping file
Returns:
A dictionary that maps from source -> target tokens.
"""
with gfile.GFile(filename, "r") as mapping_file:
lines = mapping_file.readlines()
mapping = dict([_.split("\t")[0:2] for _ in lines])
mapping = {k.strip(): v.strip() for k, v in mapping.items()}
return mapping
def _unk_replace(source_tokens,
predicted_tokens,
attention_scores,
mapping=None):
"""Replaces UNK tokens with tokens from the source or a
provided mapping based on the attention scores.
Args:
source_tokens: A numpy array of strings.
predicted_tokens: A numpy array of strings.
attention_scores: A numeric numpy array
of shape `[prediction_length, source_length]` that contains
the attention scores.
mapping: If not provided, an UNK token is replaced with the
source token that has the highest attention score. If provided
the token is insead replaced with `mapping[chosen_source_token]`.
Returns:
A new `predicted_tokens` array.
"""
result = []
for token, scores in zip(predicted_tokens, attention_scores):
if token == "UNK":
max_score_index = np.argmax(scores)
chosen_source_token = source_tokens[max_score_index]
new_target = chosen_source_token
if mapping is not None and chosen_source_token in mapping:
new_target = mapping[chosen_source_token]
result.append(new_target)
else:
result.append(token)
return np.array(result)
class DecodeText(InferenceTask):
"""Defines inference for tasks where both the input and output sequences
are plain text.
Params:
delimiter: Character by which tokens are delimited. Defaults to space.
unk_replace: If true, enable unknown token replacement based on attention
scores.
unk_mapping: If `unk_replace` is true, this can be the path to a file
defining a dictionary to improve UNK token replacement. Refer to the
documentation for more details.
dump_attention_dir: Save attention scores and plots to this directory.
dump_attention_no_plot: If true, only save attention scores, not
attention plots.
dump_beams: Write beam search debugging information to this file.
"""
def __init__(self, params):
super(DecodeText, self).__init__(params)
self._unk_mapping = None
self._unk_replace_fn = None
if self.params["unk_mapping"] is not None:
self._unk_mapping = _get_unk_mapping(self.params["unk_mapping"])
if self.params["unk_replace"]:
self._unk_replace_fn = functools.partial(
_unk_replace, mapping=self._unk_mapping)
self._postproc_fn = None
if self.params["postproc_fn"]:
self._postproc_fn = locate(self.params["postproc_fn"])
if self._postproc_fn is None:
raise ValueError("postproc_fn not found: {}".format(
self.params["postproc_fn"]))
@staticmethod
def default_params():
params = {}
params.update({
"delimiter": " ",
"postproc_fn": "",
"unk_replace": False,
"unk_mapping": None,
})
return params
def before_run(self, _run_context):
fetches = {}
fetches["predicted_tokens"] = self._predictions["predicted_tokens"]
fetches["features.source_len"] = self._predictions["features.source_len"]
fetches["features.source_tokens"] = self._predictions[
"features.source_tokens"]
if "attention_scores" in self._predictions:
fetches["attention_scores"] = self._predictions["attention_scores"]
return tf.train.SessionRunArgs(fetches)
def after_run(self, _run_context, run_values):
fetches_batch = run_values.results
for fetches in unbatch_dict(fetches_batch):
# Convert to unicode
fetches["predicted_tokens"] = np.char.decode(
fetches["predicted_tokens"].astype("S"), "utf-8")
predicted_tokens = fetches["predicted_tokens"]
# If we're using beam search we take the first beam
if np.ndim(predicted_tokens) > 1:
predicted_tokens = predicted_tokens[:, 0]
fetches["features.source_tokens"] = np.char.decode(
fetches["features.source_tokens"].astype("S"), "utf-8")
source_tokens = fetches["features.source_tokens"]
source_len = fetches["features.source_len"]
if self._unk_replace_fn is not None:
# We slice the attention scores so that we do not
# accidentially replace UNK with a SEQUENCE_END token
attention_scores = fetches["attention_scores"]
attention_scores = attention_scores[:, :source_len - 1]
predicted_tokens = self._unk_replace_fn(
source_tokens=source_tokens,
predicted_tokens=predicted_tokens,
attention_scores=attention_scores)
sent = self.params["delimiter"].join(predicted_tokens).split(
"SEQUENCE_END")[0]
# Apply postproc
if self._postproc_fn:
sent = self._postproc_fn(sent)
sent = sent.strip()
print(sent)
| 6,520 | 33.502646 | 77 | py |
seq2seq | seq2seq-master/seq2seq/tasks/dump_attention.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Task where both the input and output sequence are plain text.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import numpy as np
from matplotlib import pyplot as plt
import tensorflow as tf
from tensorflow import gfile
from seq2seq.tasks.decode_text import _get_prediction_length
from seq2seq.tasks.inference_task import InferenceTask, unbatch_dict
def _get_scores(predictions_dict):
"""Returns the attention scores, sliced by source and target length.
"""
prediction_len = _get_prediction_length(predictions_dict)
source_len = predictions_dict["features.source_len"]
return predictions_dict["attention_scores"][:prediction_len, :source_len]
def _create_figure(predictions_dict):
"""Creates and returns a new figure that visualizes
attention scores for for a single model predictions.
"""
# Find out how long the predicted sequence is
target_words = list(predictions_dict["predicted_tokens"])
prediction_len = _get_prediction_length(predictions_dict)
# Get source words
source_len = predictions_dict["features.source_len"]
source_words = predictions_dict["features.source_tokens"][:source_len]
# Plot
fig = plt.figure(figsize=(8, 8))
plt.imshow(
X=predictions_dict["attention_scores"][:prediction_len, :source_len],
interpolation="nearest",
cmap=plt.cm.Blues)
plt.xticks(np.arange(source_len), source_words, rotation=45)
plt.yticks(np.arange(prediction_len), target_words, rotation=-45)
fig.tight_layout()
return fig
class DumpAttention(InferenceTask):
"""Defines inference for tasks where both the input and output sequences
are plain text.
Params:
delimiter: Character by which tokens are delimited. Defaults to space.
unk_replace: If true, enable unknown token replacement based on attention
scores.
unk_mapping: If `unk_replace` is true, this can be the path to a file
defining a dictionary to improve UNK token replacement. Refer to the
documentation for more details.
dump_attention_dir: Save attention scores and plots to this directory.
dump_attention_no_plot: If true, only save attention scores, not
attention plots.
dump_beams: Write beam search debugging information to this file.
"""
def __init__(self, params):
super(DumpAttention, self).__init__(params)
self._attention_scores_accum = []
self._idx = 0
if not self.params["output_dir"]:
raise ValueError("Must specify output_dir for DumpAttention")
@staticmethod
def default_params():
params = {}
params.update({"output_dir": "", "dump_plots": True})
return params
def begin(self):
super(DumpAttention, self).begin()
gfile.MakeDirs(self.params["output_dir"])
def before_run(self, _run_context):
fetches = {}
fetches["predicted_tokens"] = self._predictions["predicted_tokens"]
fetches["features.source_len"] = self._predictions["features.source_len"]
fetches["features.source_tokens"] = self._predictions[
"features.source_tokens"]
fetches["attention_scores"] = self._predictions["attention_scores"]
return tf.train.SessionRunArgs(fetches)
def after_run(self, _run_context, run_values):
fetches_batch = run_values.results
for fetches in unbatch_dict(fetches_batch):
# Convert to unicode
fetches["predicted_tokens"] = np.char.decode(
fetches["predicted_tokens"].astype("S"), "utf-8")
fetches["features.source_tokens"] = np.char.decode(
fetches["features.source_tokens"].astype("S"), "utf-8")
if self.params["dump_plots"]:
output_path = os.path.join(self.params["output_dir"],
"{:05d}.png".format(self._idx))
_create_figure(fetches)
plt.savefig(output_path)
plt.close()
tf.logging.info("Wrote %s", output_path)
self._idx += 1
self._attention_scores_accum.append(_get_scores(fetches))
def end(self, _session):
scores_path = os.path.join(self.params["output_dir"],
"attention_scores.npz")
np.savez(scores_path, *self._attention_scores_accum)
tf.logging.info("Wrote %s", scores_path)
| 4,850 | 34.152174 | 77 | py |
seq2seq | seq2seq-master/seq2seq/tasks/__init__.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Collection of task types.
"""
from seq2seq.tasks.inference_task import InferenceTask
from seq2seq.tasks.decode_text import DecodeText
from seq2seq.tasks.dump_attention import DumpAttention
from seq2seq.tasks.dump_beams import DumpBeams
| 817 | 36.181818 | 74 | py |
gunyah-hypervisor-develop | gunyah-hypervisor-develop/configure.py | #!/usr/bin/env python3
# coding: utf-8
#
# © 2021 Qualcomm Innovation Center, Inc. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Top-level configuration file for Gunyah build system.
This module constructs an instance of AbstractBuildGraph, and passes it to the
real build system which is in tools/build.
The AbstractBuildGraph class provides an interface which can be used to
declare a build graph consisting of template rules, targets which are build
using those rules, and variables that are substituted into rule commands and
subsequent variable definitions. Implementations of this interface are
provided for Ninja and SCons.
If run as a standalone script, this module generates a Ninja rules file. If
called from a SConstruct or SConscript, it sets up a SCons build that has the
same semantics as the Ninja build. The SConstruct or SConscript will typically
contain the following code:
import configure
env = Environment(tools={}, SCANNERS=[], BUILDERS={}, ENV={...})
configure.SConsBuild(env, Builder, Action, arguments=ARGUMENTS)(...)
"""
import os
import sys
import abc
import re
import runpy
import json
class ClangCompDB(object):
def __init__(self, path, var_subst):
self.path = path
self.var_subst = var_subst
self.commands = []
def add_command(self, command, i, o, **local_env):
self.commands.append({
'directory': os.getcwd(),
'command': command,
'file': i,
'output': o,
'local_env': dict(local_env),
})
def finalise(self):
for c in self.commands:
c['command'] = self.var_subst(c['command'], **c['local_env'])
del c['local_env']
d = os.path.dirname(self.path)
if d:
try:
os.makedirs(d)
except OSError as e:
import errno
if e.errno != errno.EEXIST:
raise
with open(self.path, 'w') as f:
json.dump(self.commands, f)
class AbstractBuildGraph(object):
__metaclass__ = abc.ABCMeta
def __init__(self, _parent=None, arguments=None, build_dir=None):
self._variants = []
if _parent is None:
self._is_variant = False
self._arguments = {} if arguments is None else dict(arguments)
self._build_dir = 'build' if build_dir is None else build_dir
self._env = {}
self._compdbs = {}
self._rule_compdbs = {}
self._rule_commands = {}
self._rule_byproducts = {}
self.add_gen_source(__file__)
else:
self._is_variant = True
assert arguments is None
self._arguments = _parent._arguments
assert build_dir is not None
self._build_dir = build_dir
# Make local copies of the parent's environment and rules.
self._env = dict(_parent._env)
self._compdbs = dict(_parent._compdbs)
self._rule_compdbs = dict(_parent._rule_compdbs)
self._rule_commands = dict(_parent._rule_commands)
self._rule_byproducts = dict(_parent._rule_byproducts)
def __call__(self, **kwargs):
for k in kwargs.keys():
self.add_env(k, kwargs[k], replace=True)
if not self._variants:
try:
runpy.run_module("tools.build", init_globals={'graph': self})
except SystemExit as e:
if e.code:
raise
for c in self._compdbs.values():
c.finalise()
def get_argument(self, key, default=None):
"""
Return the value of a command-line argument, or the specified default.
"""
return self._arguments.get(key, default)
@abc.abstractproperty
def root_dir(self):
"""
The root directory from which build commands are run.
This is either absolute, relative to the root directory of the
repository, or empty. It should be useda as the start= argument of an
os.path.relpath() call for any path that is specified on a command
line outside of the ${in} or ${out} variables (e.g. include search
directories).
"""
raise NotImplementedError
@property
def build_dir(self):
"""
The base directory that should be used for build outputs.
This is always a path relative to the working directory.
"""
return self._build_dir
def add_variant(self, build_dir):
"""
Create a variant build, and return the build object.
This may be called once or more before calling the object itself. If it
is called at least once before calling the object, the parent build
will not generate any build rules of its own; instead it will be
configured to only execute its own variants. If the build generator
calls this itself, it is responsible for making the distinction
between variant builds and the top level.
The specified directory is used as the build directory for the
variant.
"""
variant = self._make_variant(build_dir)
self._variants.append(variant)
return variant
@abc.abstractmethod
def _make_variant(self, build_dir):
# Create a new build object for a variant based on this one.
#
# This is the default implementation, but it is likely to need to be
# overridden, so subclasses must explicitly call the default through
# super if they want to use it.
return type(self)(build_dir=build_dir, _parent=self)
@abc.abstractmethod
def add_env(self, name, value, replace=False):
"""
Add an environment variable.
The value will be automatically substituted in future value arguments
to add_env() and command arguments to add_target(), if named in those
arguments in sh style, i.e. $name or ${name}.
If the optional replace argument is true, then replacing an existing
variable is allowed; otherwise it will raise KeyError.
"""
self._env[name] = self._var_subst(value)
@abc.abstractmethod
def append_env(self, name, value, separator=' '):
"""
Append to an environment variable.
This is like add_env(), except that if the variable is already set,
the given value will be appended to it. By default the values are
separated by spaces, but the optional separator argument can be used
to replace this.
"""
if name in self._env:
self._env[name] += separator
else:
self._env[name] = ''
self._env[name] += self._var_subst(value)
@abc.abstractmethod
def get_env(self, name):
"""
Fetch an environment variable.
This will return the value of the named environment variable, which
may have been either set by add_env() or append_env(), or else passed
to the build system from the external environment.
If the named value is unknown, this method throws KeyError.
"""
return self._env[name]
@abc.abstractmethod
def add_rule(self, name, command, depfile=None, depfile_external=False,
compdbs=None, restat=False):
"""
Add a build rule.
The rule name must be unique, and must be a valid Python identifier
that does not begin with an underscore.
The command will be run to build targets that use this rule. The
target name will be substituted for $out or ${out}, and the space
separated input names will be substituted for $in or ${in}.
If depfile is set, then it is assumed to be the name of a
Makefile-style dependency file produced as a side-effect of running
the command, and will be read (if it exists) to detect implicit
dependencies (included headers, etc). The target name will be
substituted for $out or ${out} in this name.
If the depfile is not generated by the commands in the rule it self,
then depfile_external should be set to true, otherwise the depfile will
be added to the list of byproducts.
If compdbs is set to a list of targets, then targets using this rule
will be added to the compilation databases represented by those
targets. The targets must be compilation databases created by calling
add_compdb().
"""
compdbs = self._expand_target_list(compdbs)
rule_compdbs = []
for c in compdbs:
if c not in self._compdbs:
raise KeyError("Not a compdb target: {:s}".format(c))
rule_compdbs.append(self._compdbs[c])
if rule_compdbs:
self._rule_compdbs[name] = tuple(rule_compdbs)
self._rule_commands[name] = command
if depfile and not depfile_external:
self._rule_byproducts[name] = depfile
@abc.abstractmethod
def add_target(self, targets, rule, sources=None, depends=None,
requires=None, byproducts=None, always=False, **local_env):
"""
Build one or more targets using a previously created build rule.
The named rule must be one that has previously been set up with
add_rule(). That rule's command will be invoked with the given target
and sources.
The targets, sources, depends, requires and byproducts arguments are
all lists of file paths relative to the top level build directory. The
depends and requires lists may contain names of alias targets, created
with the add_alias() method; otherwise, all elements of these lists
must be regular files that either exist in the source tree or will be
created during the build. If any of these arguments is specified as a
string, it is treated as a whitespace-separated list.
The targets are files created by the rule. These must be regular
files; directories are not allowed. If any target is in a directory
other than the top-level build directory, then that directory will be
automatically created before the rule is run. When the rule is run,
the list of targets will be substituted for the ${out} variable in the
rule command.
The listed sources are added as explicit input dependencies and are
substituted for the ${in} variable in the rule command. Like the
targets, this may be either a single list
If a depends list is provided, it specifies additional implicit
dependencies of this target. These behave the same as sources, except
that they are not included in the substitution of ${in}.
If a requires list is provided, it specifies order-only dependencies
of this target. These are dependencies that are not named on the
command line, and will not trigger a rebuild if they are newer than
one of the targets.
If a byproducts list is provided, it specifies additional products of
compilation that are generated along with the primary target. These
behave the same as targets, except that they are not included in the
substitution of ${out}.
If the "always" keyword is set to True, the target will be rebuilt
every time it is used as a dependency.
Any other keyword arguments are added temporarily to the environment
while building this specific target, overriding any variables
currently in the environment. Variable expansion is performed on the
values in this dictionary. Variables may be appended by expanding
their previous value in the new value. However, do not locally
override a variable if its value is substituted in local overrides of
_other_ variables; the effect of doing so is unspecified, and may vary
between runs of an otherwise unchanged build.
"""
sources = self._expand_target_list(sources)
targets = self._expand_target_list(targets)
local_env = {
name: self._var_subst(value) for name, value in local_env.items()
}
local_env['in'] = ' '.join(getattr(n, 'abspath', str(n))
for n in sources)
local_env['out'] = ' '.join(getattr(n, 'abspath', str(n))
for n in targets)
cmd = self._rule_commands[rule]
for compdb in self._rule_compdbs.get(rule, ()):
for s in sources:
compdb.add_command(cmd, s, targets[0], **local_env)
@abc.abstractmethod
def add_alias(self, alias, targets):
"""
Add an alias (phony) target.
This method creates a target that does not correspond to a file in the
build directory, with dependencies on a specific list of other
targets. It may be used to create aliases like "all", "install", etc.,
which may then be named on the command line, as default targets, or as
dependencies, like any other target.
However, due to a misfeature in SCons, if you need to name an alias in
a dependency list before defining it, you must wrap the alias's name
with a call to the future_alias() method.
"""
raise NotImplementedError
def future_alias(self, alias):
"""
Get a reference to an alias that may not have been defined yet.
If it is necessary to name an alias in a dependency list prior to
defining it, you must pass the name of the alias to this method and
add the result to the dependency list. This is because SCons can't
retroactively change a dependency from a file (the default) to an
alias.
"""
return alias
@abc.abstractmethod
def add_default_target(self, target, alias=False):
"""
Add a default target.
Targets named this way will be built if no target is specified on the
command line.
This can be called more than once; the effects are cumulative. If it
is not called, the fallback is to build all targets that are not used
as sources for other targets, and possibly also all other targets.
"""
raise NotImplementedError
@abc.abstractmethod
def add_gen_source(self, source):
"""
Add a generator source.
Future builds will re-run the generator script if the named file
changes. The base class calls this for the top-level generator script.
It may also be called for indirect dependencies of the generator
(Python modules, configuration files, etc).
"""
raise NotImplementedError
@abc.abstractmethod
def add_gen_output(self, output):
"""
Add a generator output.
The generator script may produce additional outputs that build commands
depend on. By declaring these outputs, the generator script can be
re-run if the named file is missing or out of date.
"""
raise NotImplementedError
def add_compdb(self, target, form='clang'):
"""
Add a compilation database target.
If a type is specified, it is the name of one of the supported forms
of compilation database:
* 'clang' for Clang JSON
The default is 'clang'.
If a rule is attached to this compdb target, then all targets built
using that rule will be written into the database file.
This target becomes an implicit output of the build graph generation.
"""
if form == 'clang':
compdb = ClangCompDB(target, self._var_subst)
else:
raise NotImplementedError("Unknown compdb form: " + repr(form))
self._compdbs[target] = compdb
def _expand_target_list(self, target_list):
"""
This is used to preprocess lists of targets, sources, etc.
"""
if target_list is None:
return ()
elif isinstance(target_list, str):
return tuple(target_list.split())
else:
return tuple(target_list)
def _var_subst(self, s, **local_env):
def shrepl(match):
name = match.group(2) or match.group(3)
if name in local_env:
return local_env[name]
try:
return self.get_env(name)
except KeyError:
return ''
shvars = re.compile(r'\$((\w+)\b|{(\w+)})')
n = 1
while n:
s, n = shvars.subn(shrepl, s)
return s
class NinjaBuild(AbstractBuildGraph):
def __init__(self, ninja_file, **kwargs):
self._lines = ['# Autogenerated, do not edit']
self._gen_sources = set()
self._gen_outputs = set()
self._env_names = set()
self._rule_names = {'phony'}
self._ninja_file = ninja_file
self._subninja_files = []
rules_dir = os.path.dirname(ninja_file) or '.'
try:
os.makedirs(rules_dir)
except FileExistsError:
pass
self._mkdir_cache = {'.', rules_dir}
self._mkdir_targets = []
super(NinjaBuild, self).__init__(**kwargs)
def _make_variant(self, build_dir):
ninja_file = os.path.join(build_dir, 'rules.ninja')
self._subninja_files.append(ninja_file)
self._lines.append('')
self._lines.append('subninja ' + ninja_file)
variant = type(self)(ninja_file, build_dir=build_dir, _parent=self)
# Shadowed state
variant._env_names = set(self._env_names)
variant._rule_names = set(self._rule_names)
# Shared state
variant._gen_sources = self._gen_sources
return variant
@property
def _all_ninja_files(self):
return (self._ninja_file,) + tuple(f for v in self._variants
for f in v._all_ninja_files)
@property
def _all_byproducts(self):
byproducts = tuple(self._compdbs.keys())
byproducts += tuple(self._gen_outputs)
byproducts += tuple(f for v in self._variants
for f in v._all_byproducts)
return byproducts
@property
def _phony_always(self):
return os.path.join('tools', 'build', '.should-not-exist')
def __call__(self, gen_cmd=None, **kwargs):
super(NinjaBuild, self).__call__(**kwargs)
if not self._is_variant:
# Add a rule at the top level to rerun the generator script
assert gen_cmd is not None
self.add_rule('_gen_rules', gen_cmd, generator=True, restat=True)
self.add_target(self._all_ninja_files, '_gen_rules',
depends=sorted(self._gen_sources),
byproducts=self._all_byproducts)
# Add a phony rule for always-built targets
self.add_alias(self._phony_always, [])
# Add phony rules for all of the generator sources, so Ninja
# does not fail if one of them disappears (e.g. if a module
# is renamed, or an older branch is checked out)
for f in sorted(self._gen_sources):
self.add_alias(f, [])
# Add a rule and targets for all of the automatically created parent
# directories. We do this in deepest-first order at the end of the
# build file because ninja -t clean always processes targets in the
# order they appear, so it might otherwise fail to remove directories
# that will become empty later.
self.add_rule('_mkdir', 'mkdir -p ${out}')
for d in reversed(self._mkdir_targets):
self.add_target([d], '_mkdir', _is_auto_dir=True)
# Write out the rules file
with open(self._ninja_file, 'w') as f:
f.write('\n'.join(self._lines) + '\n')
@property
def root_dir(self):
"""
The root directory from which build commands are run.
This is either absolute, relative to the root directory of the
repository, or empty. It should be used as the start= argument of an
os.path.relpath() call for any path that is specified on a command
line outside of the ${in} or ${out} variables (e.g. include search
directories).
For Ninja, it is simply the empty string.
"""
return ''
def add_env(self, name, value, replace=False):
if name in self._env_names and not replace:
raise KeyError("Duplicate definition of env ${name}"
.format(name=name))
super(NinjaBuild, self).add_env(name, value, replace=replace)
self._env_names.add(name)
self._lines.append('')
self._lines.append('{name} = {value}'.format(**locals()))
def append_env(self, name, value, separator=' '):
if name in self._env_names:
self._lines.append('')
self._lines.append('{name} = ${{{name}}}{separator}{value}'
.format(**locals()))
super(NinjaBuild, self).append_env(name, value, separator)
else:
self.add_env(name, value)
def get_env(self, name):
try:
return super(NinjaBuild, self).get_env(name)
except KeyError:
return os.environ[name]
def add_rule(self, name, command, depfile=None, depfile_external=False,
compdbs=None, generator=False, restat=False):
if name in self._rule_names:
raise KeyError("Duplicate definition of rule {name}"
.format(name=name))
super(NinjaBuild, self).add_rule(name, command, depfile=depfile,
depfile_external=depfile_external,
compdbs=compdbs)
self._rule_names.add(name)
self._lines.append('')
self._lines.append('rule ' + name)
self._lines.append(' command = ' + command)
self._lines.append(' description = ' + name + ' ${out}')
if depfile is not None:
self._lines.append(' depfile = ' + depfile)
if generator:
self._lines.append(' generator = true')
if restat:
self._lines.append(' restat = true')
def add_target(self, targets, rule, sources=None, depends=None,
requires=None, byproducts=None, always=False,
_is_auto_dir=False, **local_env):
super(NinjaBuild, self).add_target(
targets, rule, sources=sources, depends=depends,
requires=requires, byproducts=byproducts, **local_env)
targets = self._expand_target_list(targets)
sources = self._expand_target_list(sources)
depends = self._expand_target_list(depends)
requires = self._expand_target_list(requires)
byproducts = self._expand_target_list(byproducts)
if rule in self._rule_byproducts:
depsfile = re.sub(r'\$(out\b|{out})', targets[0],
self._rule_byproducts[rule])
byproducts = byproducts + (depsfile,)
if not _is_auto_dir:
# Automatically add a dependency on the parent directory of each
# target that is not at the top level
for t in targets:
target_dir = os.path.dirname(os.path.normpath(t))
if target_dir:
self._mkdir(target_dir)
requires = requires + (target_dir,)
self._lines.append('')
build_line = 'build ' + ' '.join(targets)
if byproducts:
build_line += ' | '
build_line += ' '.join(byproducts)
build_line += ' : ' + rule
if sources:
build_line += ' '
build_line += ' '.join(sources)
if depends:
build_line += ' | '
build_line += ' '.join(depends)
if always:
build_line += ' ' + self._phony_always + ' '
if requires:
build_line += ' || '
build_line += ' '.join(requires)
self._lines.append(build_line)
for name in sorted(local_env.keys()):
self._lines.append(' {} = {}'.format(name, local_env[name]))
def add_alias(self, alias, targets):
targets = self._expand_target_list(targets)
self._lines.append('')
self._lines.append('build ' + self._escape(alias) + ' : phony ' +
' '.join(targets))
def add_default_target(self, target, alias=False):
self._lines.append('')
self._lines.append('default ' + self._escape(target))
def add_gen_source(self, source):
self._gen_sources.add(os.path.normpath(source))
def add_gen_output(self, output):
self._gen_outputs.add(os.path.normpath(output))
def _mkdir(self, target_dir):
if target_dir in self._mkdir_cache:
return
# Always add parent directories first, if any. This ensures that
# the _mkdir_targets list is ordered with the deepest directories
# last.
parent = os.path.dirname(target_dir)
if parent:
self._mkdir(parent)
self._mkdir_cache.add(target_dir)
self._mkdir_targets.append(target_dir)
def _escape(self, path):
return re.sub(r'([ \n:$])', r'$\1', path)
def _expand_target_list(self, target_list):
return tuple(self._escape(s)
for s in super(NinjaBuild, self)
._expand_target_list(target_list))
class SConsBuild(AbstractBuildGraph):
def __init__(self, env, Builder, Action, _parent=None, **kwargs):
self.env = env
self.Builder = Builder
self.Action = Action
self._rule_depfiles = {}
self._root_dir = env.Dir('#.')
if _parent is None:
self._default_targets = []
else:
self._default_targets = _parent._default_targets
super(SConsBuild, self).__init__(_parent=_parent, **kwargs)
def __call__(self, **kwargs):
super(SConsBuild, self).__call__(**kwargs)
return self._default_targets
@property
def root_dir(self):
"""
The root directory from which build commands are run.
This is either absolute, relative to the root directory of the
repository, or empty. It should be useda as the start= argument of an
os.path.relpath() call for any path that is specified on a command
line outside of the ${in} or ${out} variables (e.g. include search
directories).
For SCons, it is the root-relative path of the current directory.
"""
return os.path.relpath(self._root_dir.abspath)
def _make_variant(self, build_dir):
return type(self)(self.env.Clone(), self.Builder, self.Action,
build_dir=build_dir, _parent=self)
def add_env(self, name, value, replace=False):
if not replace and name in self.env:
raise KeyError("Duplicate definition of env ${name}"
.format(name=name))
self.env.Replace(**{name: value})
super(SConsBuild, self).add_env(name, value, replace=replace)
def append_env(self, name, value, separator=' '):
if name in self.env:
self.env.Append(**{name: separator + value})
else:
self.env.Replace(**{name: value})
super(SConsBuild, self).append_env(name, value, separator)
def get_env(self, name):
try:
return super(SConsBuild, self).get_env(name)
except KeyError:
return self.env['ENV'][name]
def add_rule(self, name, command, depfile=None, depfile_external=False,
compdbs=None, restat=False):
if 'Rule_' + name in self.env['BUILDERS']:
raise KeyError("Duplicate definition of rule {name}"
.format(name=name))
super(SConsBuild, self).add_rule(name, command, depfile=depfile,
depfile_external=depfile_external,
compdbs=compdbs)
# Replace the Ninja-style $in/$out variables with $SOURCES / $TARGETS
command = re.sub(r'\$(in\b|{in})', '${SOURCES}', command)
command = re.sub(r'\$(out\b|{out})', '${TARGETS}', command)
description = name + ' ${TARGETS}'
builder = self.Builder(action=self.Action(command, description))
self.env.Append(BUILDERS={'Rule_' + name: builder})
if depfile is not None:
self._rule_depfiles[name] = depfile
def add_target(self, targets, rule, sources=None, depends=None,
requires=None, byproducts=None, always=None, **local_env):
super(SConsBuild, self).add_target(
targets, rule, sources=sources, depends=depends,
requires=requires, byproducts=byproducts, **local_env)
targets = self._expand_target_list(targets)
sources = self._expand_target_list(sources)
depends = self._expand_target_list(depends)
requires = self._expand_target_list(requires)
byproducts = self._expand_target_list(byproducts)
if rule in self._rule_byproducts:
depsfile = re.sub(r'\$(out\b|{out})', targets[0],
self._rule_byproducts[rule])
byproducts = byproducts + (depsfile,)
tnodes = getattr(self.env, 'Rule_' + rule)(
target=targets, source=sources, **local_env)
if depends:
self.env.Depends(tnodes, depends)
if requires:
self.env.Requires(tnodes, requires)
if byproducts:
self.env.SideEffect(byproducts, targets)
# side-effects are not cleaned by default
self.env.Clean(targets, byproducts)
if always:
self.env.AlwaysBuild(targets)
if rule in self._rule_depfiles:
depfile = re.sub(r'\$(out\b|{out})', targets[0],
self._rule_depfiles[rule])
# Note: this is slightly broken; if the depfile is created by the
# rule that it affects, SCons will spuriously rebuild everything
# that uses it on the _second_ run after a clean. This appears to
# be a deliberate feature; the SCons maintainers are ideologically
# opposed to compiler generated depfiles. Ninja handles them
# correctly.
saved_dir = self.env.fs.getcwd()
try:
# Change to the root directory, so the depends in the depfile
# will be interpreted relative to it.
self.env.fs.chdir(self._root_dir, change_os_dir=False)
# Note that depfile must be a plain path, not a File node, and
# SCons will directly call open() on it. So it must be
# relative to the repository, not to self._root_dir.
self.env.ParseDepends(depfile)
finally:
self.env.fs.chdir(saved_dir, change_os_dir=False)
def add_alias(self, alias, targets):
targets = self._expand_target_list(targets)
self.env.Alias(alias, targets)
def future_alias(self, alias):
return self.env.Alias(alias)
def add_default_target(self, target, alias=False):
if not alias:
try:
target = self.env.Entry(target)
except ValueError:
pass
self.env.Default(target)
self._default_targets.append(target)
def add_gen_source(self, source):
# Don't care about these, SCons regenerates on every run anyway
pass
def add_gen_output(self, output):
# Don't care about these, SCons regenerates on every run anyway
pass
if __name__ == '__main__':
# Called stand-alone; generate a Ninja file.
import pipes
build = NinjaBuild('build.ninja',
arguments=dict(a.split('=', 1) for a in sys.argv[1:]))
build(gen_cmd=' '.join((pipes.quote(arg) for arg in sys.argv)))
| 32,251 | 38.331707 | 79 | py |
gunyah-hypervisor-develop | gunyah-hypervisor-develop/tools/__init__.py | 0 | 0 | 0 | py |
|
gunyah-hypervisor-develop | gunyah-hypervisor-develop/tools/objects/object_gen.py | #!/usr/bin/env python3
#
# © 2021 Qualcomm Innovation Center, Inc. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from Cheetah.Template import Template
import argparse
import subprocess
import sys
class Object:
__slots__ = 'name', 'config'
def __init__(self, name):
items = name.split(',')
self.name = items[0]
self.config = items[1:]
def __str__(self):
return self.name
def type_enum(self):
return "OBJECT_TYPE_{:s}".format(self.name.upper())
def rcu_destroy_enum(self):
return "RCU_UPDATE_CLASS_{:s}_DESTROY".format(self.name.upper())
def main():
args = argparse.ArgumentParser()
mode_args = args.add_mutually_exclusive_group(required=True)
mode_args.add_argument('-t', '--template',
type=argparse.FileType('r', encoding="utf-8"),
help="Template file used to generate output")
args.add_argument('-o', '--output',
type=argparse.FileType('w', encoding="utf-8"),
default=sys.stdout, help="Write output to file")
args.add_argument("-f", "--formatter",
help="specify clang-format to format the code")
args.add_argument('input', metavar='INPUT', nargs='+', action='append',
help="List of objects to process")
options = args.parse_args()
object_list = [Object(o) for group in options.input for o in group]
output = "// Automatically generated. Do not modify.\n"
output += "\n"
ns = {'object_list': object_list}
output += str(Template(file=options.template, searchList=ns))
if options.formatter:
ret = subprocess.run([options.formatter], input=output.encode("utf-8"),
stdout=subprocess.PIPE)
output = ret.stdout.decode("utf-8")
if ret.returncode != 0:
raise Exception("failed to format output:\n ", ret.stderr)
options.output.write(output)
if __name__ == '__main__':
main()
| 2,038 | 28.550725 | 79 | py |
gunyah-hypervisor-develop | gunyah-hypervisor-develop/tools/events/event_gen.py | #!/usr/bin/env python3
#
# © 2021 Qualcomm Innovation Center, Inc. All rights reserved.
#
# 2019 Cog Systems Pty Ltd.
#
# SPDX-License-Identifier: BSD-3-Clause
import argparse
import os
import sys
import logging
import subprocess
import inspect
import pickle
if __name__ == '__main__' and __package__ is None:
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
from utils import genfile
else:
from ..utils import genfile
logger = logging.getLogger(__name__)
def main():
logging.basicConfig(
level=logging.INFO,
format="%(message)s",
)
__loc__ = os.path.relpath(os.path.realpath(
os.path.dirname(os.path.join(os.getcwd(), os.path.dirname(__file__)))))
args = argparse.ArgumentParser()
mode_args = args.add_mutually_exclusive_group(required=True)
mode_args.add_argument('-t', '--template',
type=argparse.FileType('r', encoding='utf-8'),
help="Template file used to generate output")
mode_args.add_argument('--dump-tree', action='store_true',
help="Print the parse tree and exit")
mode_args.add_argument('-P', '--dump-pickle',
type=genfile.GenFileType('wb'),
help="Dump the IR to a Python pickle")
args.add_argument('-m', '--module', default=None,
help="Constrain output to a particular module")
args.add_argument('-I', '--extra-include', action='append', default=[],
help="Extra headers to include")
args.add_argument('-d', "--deps", type=genfile.GenFileType('w'),
help="Write implicit dependencies to Makefile",
default=None)
args.add_argument('-o', '--output', type=genfile.GenFileType('w'),
default=sys.stdout, help="Write output to file")
args.add_argument("-f", "--formatter",
help="specify clang-format to format the code")
args.add_argument('-p', '--load-pickle', type=argparse.FileType('rb'),
help="Load the IR from a Python pickle")
args.add_argument('input', metavar='INPUT', nargs='*',
type=argparse.FileType('r', encoding='utf-8'),
help="Event DSL files to process")
options = args.parse_args()
if options.input and options.load_pickle:
logger.error("Cannot specify both inputs and --load-pickle")
args.print_usage()
sys.exit(1)
elif options.input:
from lark import Lark, Visitor
from parser import TransformToIR
grammar_file = os.path.join(__loc__, 'grammars', 'events_dsl.lark')
parser = Lark.open(grammar_file, parser='lalr', start='start',
propagate_positions=True)
modules = {}
events = {}
transformer = TransformToIR(modules, events)
for f in options.input:
tree = parser.parse(f.read())
class FilenameVisitor(Visitor):
def __init__(self, filename):
self.filename = filename
def __default__(self, tree):
tree.meta.filename = self.filename
FilenameVisitor(f.name).visit(tree)
if options.dump_tree:
print(tree.pretty(), file=options.output)
transformer.transform(tree)
if options.dump_tree:
return 0
errors = transformer.errors
for m in modules.values():
errors += m.resolve(events)
for m in modules.values():
errors += m.finalise()
if errors:
logger.error("Found %d errors, exiting...", errors)
sys.exit(1)
elif options.load_pickle:
modules = pickle.load(options.load_pickle)
else:
logger.error("Must specify inputs or --load-pickle")
args.print_usage()
sys.exit(1)
if options.dump_pickle:
pickle.dump(modules, options.dump_pickle, protocol=-1)
else:
from Cheetah.Template import Template
try:
module = modules[options.module]
except KeyError:
logger.error("Specified module '%s' is unknown", options.module)
sys.exit(1)
ns = [module, {'extra_includes': options.extra_include}]
template = Template(file=options.template, searchList=ns)
result = str(template)
if options.formatter:
ret = subprocess.run([options.formatter],
input=result.encode("utf-8"),
stdout=subprocess.PIPE)
result = ret.stdout.decode("utf-8")
if ret.returncode != 0:
logger.error("Error formatting output", result)
sys.exit(1)
options.output.write(result)
if options.deps is not None:
deps = set()
if options.input:
deps.add(grammar_file)
for m in sys.modules.values():
try:
f = inspect.getsourcefile(m)
except TypeError:
continue
if f is None:
continue
f = os.path.relpath(f)
if f.startswith('../'):
continue
deps.add(f)
if options.dump_pickle:
out_name = options.dump_pickle.name
else:
out_name = options.output.name
options.deps.write(out_name + ' : ')
options.deps.write(' '.join(sorted(deps)))
options.deps.write('\n')
options.deps.close()
if __name__ == '__main__':
main()
| 5,648 | 32.229412 | 79 | py |
gunyah-hypervisor-develop | gunyah-hypervisor-develop/tools/events/parser.py | # © 2021 Qualcomm Innovation Center, Inc. All rights reserved.
#
# 2019 Cog Systems Pty Ltd.
#
# SPDX-License-Identifier: BSD-3-Clause
from lark import Transformer, v_args
from ir import (
Include, Symbol, Type, ConstExpr, Priority, Result, ExpectedArgs, Param,
Selectors, SelectorParam, CountParam, Module, Event, HandledEvent,
MultiEvent, SetupEvent, SelectorEvent, Subscription, Optional, Public,
Handler, Constant, Unwinder, Success, LockAnnotation, LockName, NoReturn)
import collections
import logging
import math
logger = logging.getLogger(__name__)
class TransformToIR(Transformer):
def __init__(self, module_dict, event_dict):
super().__init__()
self.module_dict = module_dict
self.event_dict = event_dict
self.cur_event_dict = {}
self.errors = 0
def _add_without_duplicates(self, type_string, in_dict, new):
if new.name in in_dict:
existing = in_dict[new.name]
new_meta = new.name.meta
old_meta = existing.name.meta
logger.error("%s:%d:%d: error: duplicate definition of %s '%s'",
new_meta.filename, new_meta.line, new_meta.column,
type_string, new.name)
logger.info("%s:%d:%d: note: previous definition of %s '%s'",
old_meta.filename, old_meta.line,
old_meta.column, type_string, new.name)
self.errors += 1
else:
in_dict[new.name] = new
def _general_event(self, event_class, children, meta):
# Check for duplicated parameters
params = {}
for c in children:
if isinstance(c, Param):
self._add_without_duplicates('parameter', params, c)
event = event_class(children)
event.meta = meta
self._add_without_duplicates('event', self.cur_event_dict, event)
return event
@v_args(meta=True)
def module(self, children, meta):
assert not self.cur_event_dict
m = Module(children)
m.meta = meta
if m.name in self.module_dict:
self.module_dict[m.name].merge(m)
else:
self.module_dict[m.name] = m
return m
@v_args(meta=True)
def interface(self, children, meta):
interface_name = next(c for c in children if isinstance(c, Symbol))
for name, event in self.cur_event_dict.items():
if not name.startswith(interface_name + "_"):
meta = event.name.meta
logger.error("%s:%d:%d: error: incorrect name: "
"'%s' should start with '%s_'",
meta.filename, meta.line, meta.column, name,
interface_name)
self.errors += 1
self.event_dict[name] = event
self.cur_event_dict = {}
return self.module(children, meta)
def include(self, children):
return Include(''.join(str(c) for c in children))
@v_args(meta=True)
def publish_event(self, children, meta):
return self._general_event(Event, children, meta)
@v_args(meta=True)
def publish_handled_event(self, children, meta):
return self._general_event(HandledEvent, children, meta)
@v_args(meta=True)
def publish_multi_event(self, children, meta):
return self._general_event(MultiEvent, children, meta)
@v_args(meta=True)
def publish_setup_event(self, children, meta):
return self._general_event(SetupEvent, children, meta)
@v_args(meta=True)
def publish_selector_event(self, children, meta):
return self._general_event(SelectorEvent, children, meta)
@v_args(meta=True)
def symbol(self, children, meta):
sym = Symbol(*children)
sym.meta = meta
return sym
@v_args(meta=True)
def event_param(self, children, meta):
p = Param(children)
p.meta = meta
return p
@v_args(meta=True)
def selector_param(self, children, meta):
p = SelectorParam(children)
p.meta = meta
return p
@v_args(meta=True)
def count_param(self, children, meta):
p = CountParam(children)
p.meta = meta
return p
@v_args(meta=True)
def result(self, children, meta):
r = Result(children)
r.meta = meta
return r
@v_args(meta=True)
def void_result(self, children, meta):
r = Result(children, void=True)
r.meta = meta
return r
@v_args(meta=True)
def type_decl(self, children, meta):
t = Type(' '.join(str(c) for c in children))
t.meta = meta
return t
@v_args(meta=True)
def selector_const(self, children, meta):
t = Type(' '.join(str(c) for c in children))
t.meta = meta
return t
@v_args(meta=True)
def subscribe(self, children, meta):
s = Subscription(children)
s.meta = meta
return s
@v_args(meta=True)
def selector(self, children, meta):
s = Selectors(children)
s.meta = meta
return s
subscribe_public = subscribe
def optional(self, children):
return Optional()
def public(self, children):
return Public()
@v_args(meta=True)
def handler(self, children, meta):
h = Handler(*children)
h.meta = meta
return h
handler_public = handler
@v_args(meta=True)
def unwinder(self, children, meta):
u = Unwinder(*children)
u.meta = meta
return u
unwinder_public = unwinder
constant = v_args(inline=True)(Constant)
def expected_args(self, children):
args = collections.OrderedDict()
for c in children:
c.name = c
self._add_without_duplicates('argument', args, c)
return ExpectedArgs(args.values())
@v_args(meta=True)
def priority(self, children, meta):
if children[0] in ('first', 'max'):
c = Priority(math.inf)
elif children[0] in ('last', 'min'):
c = Priority(-math.inf)
elif children[0] == 'default':
c = Priority(0)
else:
c = Priority(children[0])
c.meta = meta
return c
@v_args(meta=True)
def noreturn(self, children, meta):
c = NoReturn()
c.meta = meta
return c
@v_args(meta=True)
def constexpr(self, children, meta):
c = ConstExpr(' '.join(children))
c.meta = meta
return c
@v_args(meta=True)
def success(self, children, meta):
c = Success(' '.join(children))
c.meta = meta
return c
@v_args(meta=True)
def lock_name(self, children, meta):
c = LockName(''.join(children))
c.meta = meta
return c
@v_args(meta=True)
def lock_opt(self, children, meta):
action, kind = children[0].rsplit()[-1].split('_')
c = LockAnnotation(action, kind, children[1])
c.meta = meta
return c
| 7,056 | 28.161157 | 77 | py |
gunyah-hypervisor-develop | gunyah-hypervisor-develop/tools/events/ir.py | # © 2021 Qualcomm Innovation Center, Inc. All rights reserved.
#
# 2019 Cog Systems Pty Ltd.
#
# SPDX-License-Identifier: BSD-3-Clause
import abc
import logging
import collections
logger = logging.getLogger(__name__)
def _first_of_type(children, cls):
return next(c for c in children if isinstance(c, cls))
def _first_of_type_opt(children, cls):
try:
return _first_of_type(children, cls)
except StopIteration:
return None
def _all_of_type(children, cls):
return tuple(c for c in children if isinstance(c, cls))
class IRObject(object):
def __getstate__(self):
state = self.__dict__.copy()
if 'meta' in state:
del state['meta']
return state
class DSLError(Exception):
pass
class Include(str, IRObject):
pass
class Symbol(str, IRObject):
pass
class Type(str, IRObject):
pass
class ConstExpr(str, IRObject):
pass
class LockName(str, IRObject):
pass
class LockAnnotation(IRObject):
def __init__(self, action, kind, lock):
self.action = action
self.kind = kind
self.lock = lock
def _check_kind(self, kinds):
if self.lock in kinds:
if kinds[self.lock] != self.kind:
logger.error("%s:%d:%d: "
"error: inconsistent lock kinds for %s (%s, %s)",
self.meta.filename, self.meta.line,
self.meta.column, self.lock, self.kind,
kinds[self.lock])
raise DSLError()
else:
kinds[self.lock] = self.kind
def apply(self, acquires, releases, requires, excludes, kinds):
self._check_kind(kinds)
if self.action == 'acquire':
if self.lock in acquires:
prev = next(acquires & set([self.lock]))
logger.error("%s:%d:%d: "
"error: %s previously acquired at %s:%d:%d",
self.meta.filename, self.meta.line,
self.meta.column, self.lock, prev.meta.filename,
prev.meta.line, prev.meta.column)
raise DSLError()
elif self.lock in releases:
releases.remove(self.lock)
else:
acquires.add(self.lock)
excludes.add(self.lock)
elif self.action == 'release':
if self.lock in releases:
prev = next(releases & set([self.lock]))
logger.error("%s:%d:%d: "
"error: %s previously released at %s:%d:%d",
self.meta.filename, self.meta.line,
self.meta.column, self.lock, prev.meta.filename,
prev.meta.line, prev.meta.column)
raise DSLError()
elif self.lock in acquires:
acquires.remove(self.lock)
else:
releases.add(self.lock)
requires.add(self.lock)
elif self.action == 'require':
if self.lock not in acquires:
requires.add(self.lock)
elif self.action == 'exclude':
if self.lock not in releases:
excludes.add(self.lock)
else:
raise NotImplementedError(self.action)
def combine(self, actions, kinds):
self._check_kind(kinds)
actions[self.action].add(self.lock)
def unwind(self):
if self.action == 'acquire':
ret = LockAnnotation('release', self.kind, self.lock)
ret.meta = self.meta
return ret
elif self.action == 'release':
ret = LockAnnotation('acquire', self.kind, self.lock)
ret.meta = self.meta
return ret
else:
return self
class Priority(float, IRObject):
pass
class Result(IRObject):
def __init__(self, children, void=False):
try:
self.type = _first_of_type(children, Type)
self.default = _first_of_type(children, ConstExpr)
except StopIteration:
if void:
self.type = Type('void')
self.default = None
else:
raise StopIteration
class ExpectedArgs(list, IRObject):
pass
class Selectors(list, IRObject):
pass
class Param(IRObject):
def __init__(self, children):
self.name = _first_of_type(children, Symbol)
self.type = _first_of_type(children, Type)
class SelectorParam(Param):
pass
class CountParam(Param):
pass
class AbstractEvent(IRObject, metaclass=abc.ABCMeta):
def __init__(self, children):
self.name = _first_of_type(children, Symbol)
self._param_dict = collections.OrderedDict(
(c.name, c) for c in children if isinstance(c, Param))
def set_owner(self, module):
self.module_name = module.name
self.module_includes = module.includes
@abc.abstractmethod
def subscribe(self, subscription):
pass
def finalise(self):
pass
@abc.abstractproperty
def subscribers(self):
raise NotImplementedError
@abc.abstractproperty
def lock_opts(self):
raise NotImplementedError
@abc.abstractproperty
def return_type(self):
raise NotImplementedError
@abc.abstractproperty
def noreturn(self):
raise NotImplementedError
def param(self, name):
return self._param_dict[name]
@property
def params(self):
return tuple(self._param_dict.values())
@property
def param_names(self):
return tuple(p.name for p in self.params)
@property
def unused_param_names(self):
params = set(self.param_names)
for s in self.subscribers:
for h in s.all_handlers:
params -= set(h.args)
if not params:
return set()
return params
class AbstractSortedEvent(AbstractEvent):
def __init__(self, children):
super().__init__(children)
self._subscribers = []
self._lock_opts = None
def subscribe(self, subscription):
super().subscribe(subscription)
if subscription.priority is None:
subscription.priority = 0
if subscription.selectors is not None:
logger.error("%s:%d:%d: error: selector %s does not apply to "
"non-selector event %s",
subscription.selectors[0].meta.filename,
subscription.selectors[0].meta.line,
subscription.selectors[0].meta.column,
subscription.selectors[0], self.name)
raise DSLError()
if subscription.constant is not None:
logger.error("%s:%d:%d: error: constant value %s specified for "
"non-selector event %s",
subscription.constant.meta.filename,
subscription.constant.meta.line,
subscription.constant.meta.column,
subscription.constant, self.name)
raise DSLError()
self._subscribers.append(subscription)
def finalise(self):
super().finalise()
subscribers = sorted(self._subscribers,
key=lambda x: (-x.priority, x.handler))
for left, right in zip(subscribers, subscribers[1:]):
if left.priority != 0 and left.priority == right.priority:
logger.error("%s:%d:%d: error: handler %s for event %s has "
"the same nonzero priority as handler %s\n"
"%s:%d:%d: info: handler %s subscribed here",
left.priority.meta.filename,
left.priority.meta.line,
left.priority.meta.column, left.handler,
self.name, right.handler,
right.priority.meta.filename,
right.priority.meta.line,
right.priority.meta.column, right.handler)
raise DSLError()
self._subscribers = tuple(subscribers)
acquires = set()
releases = set()
requires = set()
excludes = set()
kinds = {}
for s in self._subscribers:
for lock_opt in s.lock_opts:
lock_opt.apply(acquires, releases, requires, excludes, kinds)
lock_opts = []
for lock in sorted(acquires):
lock_opts.append(LockAnnotation('acquire', kinds[lock], lock))
for lock in sorted(releases):
lock_opts.append(LockAnnotation('release', kinds[lock], lock))
for lock in sorted(requires):
lock_opts.append(LockAnnotation('require', kinds[lock], lock))
for lock in sorted(excludes):
lock_opts.append(LockAnnotation('exclude', kinds[lock], lock))
self._lock_opts = tuple(lock_opts)
noreturn = (self._subscribers and self._subscribers[-1].handler and
self._subscribers[-1].handler.noreturn)
if noreturn and self.return_type != 'void':
s = self._subscribers[-1]
n = s.handler.noreturn
logger.error("%s:%d:%d: error: last handler %s for event %s must "
"return, but is declared as noreturn",
n.meta.filename, n.meta.line, n.meta.column,
s.handler, self.name)
raise DSLError()
for s in self._subscribers[:-1]:
if s.handler is not None and s.handler.noreturn:
n = s.handler.noreturn
logger.error("%s:%d:%d: error: handler %s for event %s does "
"not return, but is not the last handler (%s)",
n.meta.filename, n.meta.line, n.meta.column,
s.handler, self.name,
self._subscribers[-1].handler)
raise DSLError()
self._noreturn = noreturn
@property
def noreturn(self):
return self._noreturn
@property
def subscribers(self):
return self._subscribers
@property
def lock_opts(self):
return self._lock_opts
class Event(AbstractSortedEvent):
@property
def return_type(self):
return 'void'
class HandledEvent(AbstractSortedEvent):
def __init__(self, children):
super().__init__(children)
self.result = _first_of_type_opt(children, Result)
@property
def return_type(self):
return self.result.type if self.result is not None else 'bool'
@property
def default(self):
return self.result.default if self.result is not None else 'false'
class MultiEvent(AbstractSortedEvent):
def __init__(self, children):
super().__init__(children)
self.count = _first_of_type(children, CountParam)
@property
def return_type(self):
return self.count.type
@property
def unused_param_names(self):
return super().unused_param_names - {self.count.name}
class SetupEvent(AbstractSortedEvent):
def __init__(self, children):
super().__init__(children)
self.result = _first_of_type(children, Result)
self.success = _first_of_type(children, Success)
self.result.name = Symbol('result')
if self.result.name in self._param_dict:
result = self._param_dict[self.result.name]
logger.error("%s:%d:%d: error: setup event must not have an "
"explicit parameter named '%s'",
result.meta.filename, result.meta.line,
result.meta.column, self.result.name)
raise DSLError()
self._result_param = Param([self.result.name, self.result.type])
def finalise(self):
super().finalise()
if self.subscribers and self.subscribers[-1].unwinder is not None:
u = self.subscribers[-1].unwinder
logger.warning("%s:%d:%d: warning: unwinder %s() is unused",
u.meta.filename, u.meta.line, u.meta.column, u.name)
@property
def return_type(self):
return self.result.type
def param(self, name):
if name == self.result.name:
return self._result_param
return super().param(name)
class SelectorEvent(AbstractEvent):
def __init__(self, children):
super().__init__(children)
self.selector = _first_of_type(children, SelectorParam)
try:
self.result = _first_of_type(children, Result)
except StopIteration:
self.result = Result([Type('bool'), ConstExpr('false')])
self._subscribers = {}
@property
def subscribers(self):
return self._subscribers.values()
def subscribe(self, subscription):
if subscription.priority is not None:
logger.error("%s:%d:%d: error: priority (%d) cannot be specified "
"for subscription to a selector event ('%s')",
subscription.priority.meta.filename,
subscription.priority.meta.line,
subscription.priority.meta.column,
subscription.priority, self.name)
raise DSLError()
if subscription.selectors is None:
logger.error("%s:%d:%d: error: no selector specified for "
"subscription to selector event '%s'",
subscription.event_name.meta.filename,
subscription.event_name.meta.line,
subscription.event_name.meta.column, self.name)
raise DSLError()
for s in self._subscribers:
for new in subscription.selectors:
if new in self._subscribers[s].selectors:
logger.error("%s:%d:%d: error: duplicate selector '%s' "
"specified for subscription to selector "
"event '%s'",
subscription.event_name.meta.filename,
subscription.event_name.meta.line,
subscription.event_name.meta.column, new,
self.name)
raise DSLError()
key = subscription.selectors[0]
self._subscribers[key] = subscription
def finalise(self):
super().finalise()
kinds = {}
actions = {
'acquire': set(),
'release': set(),
'require': set(),
'exclude': set(),
}
for s in self._subscribers.values():
for lock_opt in s.lock_opts:
lock_opt.combine(actions, kinds)
lock_opts = []
for action in actions.keys():
for lock in actions[action]:
lock_opts.append(LockAnnotation(action, kinds[lock], lock))
self._lock_opts = tuple(lock_opts)
@property
def lock_opts(self):
return self._lock_opts
@property
def return_type(self):
return self.result.type
@property
def noreturn(self):
# Note: this could be true if the selector is a enum type that is
# covered by noreturn handlers. We're not likely to ever do that.
return False
@property
def unused_param_names(self):
return super().unused_param_names - {self.selector.name}
class Optional(IRObject):
pass
class Public(IRObject):
pass
class NoReturn(IRObject):
pass
class Subscription(IRObject):
def __init__(self, children):
self.event_name = _first_of_type(children, Symbol)
self.optional = any(c for c in children if isinstance(c, Optional))
self.selectors = _first_of_type_opt(children, Selectors)
self.handler = _first_of_type_opt(children, Handler)
self.constant = _first_of_type_opt(children, Constant)
if self.handler is None and self.constant is None:
self.handler = Handler(_first_of_type_opt(children, ExpectedArgs),
_first_of_type_opt(children, NoReturn))
self.unwinder = _first_of_type_opt(children, Unwinder)
self.priority = _first_of_type_opt(children, Priority)
self.lock_opts = _all_of_type(children, LockAnnotation)
def set_owner(self, module):
self.module_name = module.name
def resolve(self, events):
try:
self.event = events[self.event_name]
except KeyError:
if not self.optional:
logger.error(
"%s:%d:%d: error: subscribed to unknown event '%s'",
self.meta.filename, self.meta.line, self.meta.column,
self.event_name)
raise DSLError()
self.event = NotImplemented
else:
self.event.subscribe(self)
for h in self.all_handlers:
h.resolve(self)
@property
def all_handlers(self):
if self.event is not NotImplemented:
if self.handler is not None:
yield self.handler
if self.unwinder is not None:
yield self.unwinder
class AbstractFunction(IRObject, metaclass=abc.ABCMeta):
def __init__(self, *children):
self.name = _first_of_type_opt(children, Symbol)
self.args = _first_of_type_opt(children, ExpectedArgs)
self.public = any(c for c in children if isinstance(c, Public))
self._noreturn = _first_of_type_opt(children, NoReturn)
def resolve(self, subscription):
self.subscription = subscription
self.module_name = subscription.module_name
self.event = subscription.event
if self.name is None:
self.name = self._default_name
if self.args is None:
self.args = self._available_params
else:
for a in self.args:
if a not in self._available_params:
logger.error(
"%s:%d:%d: error: event '%s' has no argument '%s'",
a.meta.filename, a.meta.line,
a.meta.column, self.event.name, a)
raise DSLError()
@abc.abstractproperty
def _default_name(self):
yield NotImplementedError
@property
def _available_params(self):
return self.event.param_names
@property
def noreturn(self):
return self._noreturn
@property
def return_type(self):
return self.event.return_type if not self.noreturn else 'void'
@property
def params(self):
for a in self.args:
yield self.event.param(a)
@property
def lock_opts(self):
for opt in self.subscription.lock_opts:
yield opt
def __lt__(self, other):
return self.name < other.name
def __str__(self):
return self.name
def __hash__(self):
"""Generate a unique hash for the function."""
return hash((self.name, self.return_type) +
tuple((p.name, p.type) for p in self.params))
class Handler(AbstractFunction):
@property
def _default_name(self):
return "{:s}_handle_{:s}".format(self.module_name, self.event.name)
class Unwinder(AbstractFunction):
@property
def _default_name(self):
return "{:s}_unwind_{:s}".format(self.module_name, self.event.name)
@property
def _available_params(self):
return (self.event.result.name,) + super()._available_params
@property
def return_type(self):
return 'void'
@property
def lock_opts(self):
for opt in self.subscription.lock_opts:
yield opt.unwind()
class Constant(str, IRObject):
def __init__(self, children):
self.value = children[0]
class Success(Constant):
pass
class Module(IRObject):
def __init__(self, children):
self.name = _first_of_type(children, Symbol)
self.includes = _all_of_type(children, Include)
self.events = _all_of_type(children, AbstractEvent)
for e in self.events:
e.set_owner(self)
self.subscriptions = _all_of_type(children, Subscription)
for s in self.subscriptions:
s.set_owner(self)
def merge(self, other):
assert self.name == other.name
self.includes += other.includes
self.events += other.events
self.subscriptions += other.subscriptions
for s in other.subscriptions:
s.set_owner(self)
def resolve(self, events):
errors = 0
for s in self.subscriptions:
try:
s.resolve(events)
except DSLError:
errors += 1
return errors
def finalise(self):
errors = 0
for e in self.events:
try:
e.finalise()
except DSLError:
errors += 1
return errors
@property
def handlers(self):
# Unique event handlers defined by this module.
#
# Each of these may be used by multiple subscriptions, either to
# different events, or to the same selector event with different
# selections, or even repeatedly for one event.
seen_handlers = dict()
for s in self.subscriptions:
for h in s.all_handlers:
if h.name in seen_handlers:
if seen_handlers[h.name] != hash(h):
logger.error("handler decl mismatch: %s",
h.name)
raise DSLError()
continue
seen_handlers[h.name] = hash(h)
yield h
@property
def declared_handlers(self):
# Unique event handlers declared by this module's events.
seen_handlers = dict()
for e in self.events:
for s in e.subscribers:
for h in s.all_handlers:
if h.name in seen_handlers:
if seen_handlers[h.name] != hash(h):
logger.error("handler decl mismatch: %s",
h.name)
raise DSLError()
continue
if h.public:
continue
seen_handlers[h.name] = hash(h)
yield h
@property
def handler_includes(self):
seen_modules = set()
seen_includes = set()
for s in self.subscriptions:
e = s.event
if e is NotImplemented:
continue
m = e.module_name
if m in seen_modules:
continue
seen_modules.add(m)
for i in e.module_includes:
if i in seen_includes:
continue
seen_includes.add(i)
yield i
@property
def simple_events(self):
return (e for e in self.events if isinstance(e, Event))
@property
def handled_events(self):
return (e for e in self.events if isinstance(e, HandledEvent))
@property
def multi_events(self):
return (e for e in self.events if isinstance(e, MultiEvent))
@property
def setup_events(self):
return (e for e in self.events if isinstance(e, SetupEvent))
@property
def selector_events(self):
return (e for e in self.events if isinstance(e, SelectorEvent))
| 23,761 | 30.809906 | 79 | py |
gunyah-hypervisor-develop | gunyah-hypervisor-develop/tools/cpptest/misra_xml_to_json.py | #!/usr/bin/env python3
# coding: utf-8
#
# © 2023 Qualcomm Innovation Center, Inc. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Run as a part of gitlab CI, after Parasoft reports have been generated.
This script converts the Parasoft XML-format report to a Code Climate
compatible json file, that gitlab code quality can interpret.
"""
import xml.etree.ElementTree as ET
import json
import argparse
import sys
import os
import re
argparser = argparse.ArgumentParser(
description="Convert Parasoft XML to Code Climate JSON")
argparser.add_argument('input', type=argparse.FileType('r'), nargs='?',
default=sys.stdin, help="the Parasoft XML input")
argparser.add_argument('--output', '-o', type=argparse.FileType('w'),
default=sys.stdout, help="the Code Climate JSON output")
args = argparser.parse_args()
tree = ET.parse(args.input)
parasoft_viols = tree.findall(".//StdViol") + tree.findall(".//FlowViol")
cc_viols = []
severity_map = {
1: "blocker",
2: "critical",
3: "major",
4: "minor",
5: "info",
}
deviation_map = {
'MISRAC2012-RULE_20_12-a': [
(None, re.compile(r"parameter of potential macro 'assert'")),
],
# False positives due to __c11 builtins taking int memory order arguments
# instead of enum
'MISRAC2012-RULE_10_3-b': [
(None, re.compile(r"number '2'.*'essentially Enum'.*"
r"'__c11_atomic_load'.*'essentially signed'")),
(None, re.compile(r"number '3'.*'essentially Enum'.*"
r"'__c11_atomic_(store'|fetch_).*"
r"'essentially signed'")),
(None, re.compile(r"number '[45]'.*'essentially Enum'.*"
r"'__c11_atomic_compare_exchange_(strong|weak)'.*"
r"'essentially signed'")),
],
# False positives with unknown cause: the return value of assert_if_const()
# is always used, to determine whether to call assert_failed()
'MISRAC2012-RULE_17_7-b': [
(None, re.compile(r'"assert_if_const"')),
],
# Advisory rule which is impractical to enforce for generated accessors,
# since the type system has no information about which accessors are used.
'MISRAC2012-RULE_8_7-a': [
(re.compile(r'^build/.*/accessors\.c$'), None),
],
}
def matches_deviation(v):
rule = v.attrib['rule']
if rule not in deviation_map:
return False
msg = v.attrib['msg']
path = v.attrib['locFile'].split(os.sep, 2)[2]
def check_constraint(constraint, value):
if constraint is None:
return True
try:
return constraint.search(value)
except AttributeError:
return constraint == value
for d_path, d_msg in deviation_map[rule]:
if check_constraint(d_path, path) and check_constraint(d_msg, msg):
return True
return False
cc_viols = [
({
"type": "issue",
"categories": ["Bug Risk"],
"severity": ('info' if matches_deviation(v)
else severity_map[int(v.attrib['sev'])]),
"check_name": v.attrib['rule'],
"description": (v.attrib['msg'] + '. ' +
v.attrib['rule.header'] + '. (' +
v.attrib['rule'] + ')'),
"fingerprint": v.attrib['unbViolId'],
"location": {
"path": v.attrib['locFile'].split(os.sep, 2)[2],
"lines": {
"begin": int(v.attrib['locStartln']),
"end": int(v.attrib['locEndLn'])
}
}
})
for v in parasoft_viols]
args.output.write(json.dumps(cc_viols))
args.output.close()
| 3,717 | 30.243697 | 79 | py |
gunyah-hypervisor-develop | gunyah-hypervisor-develop/tools/debug/tracebuf.py | #!/usr/bin/env python3
# © 2021 Qualcomm Innovation Center, Inc. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Convert a trace buffer binary file to text form.
"""
import os
import struct
import argparse
import sys
import tempfile
import subprocess
import itertools
import math
MAP_ID = 0x14
UNMAP_ID = 0x15
TRACE_FORMAT = 1
TRACE_IDS = {
0: "INFO",
1: "WARN",
2: "HYPERCALL",
3: "DEBUG",
10: "PANIC",
11: "ASSERT_FAILED",
32: "VGIC_VIRQ_CHANGED",
33: "VGIC_DSTATE_CHANGED",
34: "VGIC_HWSTATE_CHANGED",
35: "VGIC_HWSTATE_UNCHANGED",
36: "VGIC_GICD_WRITE",
37: "VGIC_GICR_WRITE",
38: "VGIC_SGI",
39: "VGIC_ITS_COMMAND",
40: "VGIC_ROUTE",
41: "VGIC_ICC_WRITE",
42: "VGIC_ASYNC_EVENT",
48: "PSCI_PSTATE_VALIDATION",
49: "PSCI_VPM_STATE_CHANGED",
50: "PSCI_VPM_SYSTEM_SUSPEND",
51: "PSCI_VPM_SYSTEM_RESUME",
52: "PSCI_VPM_VCPU_SUSPEND",
53: "PSCI_VPM_VCPU_RESUME",
54: "PSCI_SYSTEM_SUSPEND",
55: "PSCI_SYSTEM_RESUME",
}
def main():
parser = argparse.ArgumentParser()
parser.add_argument("input", nargs='?', type=argparse.FileType('rb'),
default=sys.stdin.buffer, help="Trace binary file")
image_args = parser.add_mutually_exclusive_group()
image_args.add_argument("--elf", '-e', type=argparse.FileType('rb'),
help="ELF image")
image_args.add_argument("--binary", '-b', type=argparse.FileType('rb'),
help="Binary hypervisor image")
timestamp_args = parser.add_mutually_exclusive_group()
timestamp_args.add_argument("--freq", '-f', type=int, default=19200000,
help="Timer frequency in Hz")
timestamp_args.add_argument("--ticks", '-t', action="store_true",
help="Show time in ticks instead of seconds")
parser.add_argument('-T', "--time-offset", default=0, type=float,
help="Offset to subtract from displayed timestamps"
" (in the same units as the timestamp)")
format_args = parser.add_mutually_exclusive_group()
format_args.add_argument('-s', "--sort", action="store_const",
dest='sort', const='s',
help="Sort entries by timestamp")
format_args.add_argument('-r', "--raw", action="store_const",
dest='sort', const='u',
help="Entries as positioned in trace ring buffer")
format_args.add_argument('-m', "--merge", action="store_const",
dest='sort', const='m',
help="Entries merged and sorted by timestamp")
parser.add_argument("--show-missing", action="store_true",
help="Mark invalid or overwritten log entries")
parser.add_argument('-o', "--output", default=sys.stdout,
type=argparse.FileType('w', encoding='utf-8'),
help="Output text file")
parser.set_defaults(sort='s')
args = parser.parse_args()
global image
image = ()
if args.elf is not None:
with tempfile.TemporaryDirectory() as tmpdir:
binfile = os.path.join(tmpdir, 'hyp.bin')
objcopy = os.path.join(os.environ['LLVM'], 'bin',
'llvm-objcopy')
subprocess.check_call([objcopy, '-j', '.text',
'-j', '.rodata', '-O', 'binary',
args.elf.name, binfile])
with open(binfile, 'rb') as binfile:
image = binfile.read()
elif args.binary is not None:
image = args.binary.read()
entry_iter = read_all_entries(args)
log = prepare_log(args, entry_iter)
print_log(args, log)
class Arg(int):
__cache = {}
def __new__(cls, value, strict=False):
if value in Arg.__cache:
return Arg.__cache[value]
self = super().__new__(cls, value)
self.__strict = strict
Arg.__cache[value] = self
self.__str = self.__gen_str()
return self
def __format__(self, format_spec):
if format_spec.endswith('s'):
return str(self).__format__(format_spec)
return super().__format__(format_spec)
def __gen_str(self):
try:
bs = bytearray()
assert (self & 0x1fffff) < len(image)
for i in range((self & 0x1fffff), len(image)):
b = image[i]
if b == 0:
break
bs.append(b)
if len(bs) > 512:
break
return bs.decode('utf-8')
except Exception:
if self.__strict:
raise
return '<str:{:#x}>'.format(self)
def __str__(self):
return self.__str
class LogEntry:
def __init__(self, ticks, cpu_id, string=''):
self.ticks = ticks
self.cpu_id = cpu_id
self.__str = string
def __str__(self):
return self.__str
def set_string(self, string):
self.__str = string
class Event(LogEntry):
__slots__ = ('ticks', 'trace_id', 'trace_ids', 'cpu_id', 'missing_before',
'missing_after', '__str')
def __init__(self, args, info, tag, fmt_ptr, arg0, arg1, arg2, arg3, arg4):
if info == 0:
# Empty trace slot
raise ValueError("empty slot")
ticks = info & ((1 << 56) - 1)
cpu_id = info >> 56
super().__init__(ticks, cpu_id)
self.trace_id = tag & 0xffff
if TRACE_FORMAT == 1:
self.trace_ids = (tag >> 16) & 0xffffffff
vmid = self.trace_ids & 0xffff
vcpu = (self.trace_ids >> 16) & 0xffff
caller_id = '{:#04x}:{:#02d}'.format(vmid, vcpu)
else:
self.trace_ids = 0
caller_id = ''
if self.trace_id in TRACE_IDS:
trace_id = TRACE_IDS[self.trace_id]
else:
trace_id = '{:#06x}'.format(self.trace_id)
# Try to obtain a C string at the given offset
try:
fmt = str(Arg(fmt_ptr, strict=True))
except Exception:
fmt = "? fmt_ptr {:#x}".format(fmt_ptr) + \
" args {:#x} {:#x} {:#x} {:#x} {:#x}"
# Try to format the args using the given format string
try:
msg = fmt.format(Arg(arg0), Arg(arg1), Arg(arg2), Arg(arg3),
Arg(arg4))
except Exception:
msg = ("? fmt_str {:s} args {:#x} {:#x} {:#x} {:#x} {:#x}"
.format(fmt, arg0, arg1, arg2, arg3, arg4))
if args.ticks:
rel_time = int(self.ticks - args.time_offset)
abs_time = int(self.ticks)
if args.time_offset:
ts_str = "[{:12d}/{:12d}]".format(rel_time, abs_time)
else:
ts_str = "[{:12d}]".format(abs_time)
else:
rel_time = (float(self.ticks) / args.freq) - args.time_offset
abs_time = float(self.ticks) / args.freq
if args.time_offset:
ts_str = "[{:12.6f}/{:12.6f}]".format(rel_time, abs_time)
else:
ts_str = "[{:12.6f}]".format(abs_time)
self.set_string("{:s} <{:d}> {:s} {:s} {:s}\n".format(
ts_str, self.cpu_id, caller_id, trace_id, msg))
self.missing_before = False
self.missing_after = False
def read_entries(args):
header = args.input.read(64)
if not header or (len(header) < 64):
# Reached end of file
if header:
print("<skipped trailing bytes>\n")
raise StopIteration
magic = struct.unpack('<L', header[:4])[0]
if magic == 0x46554236: # 6BUF
endian = '<'
elif magic == 0x36425568: # FUB6
endian = '>'
else:
print("Unexpected magic number {:#x}".format(magic))
raise StopIteration
cpu_mask = struct.unpack(endian + 'QQQQ', header[8:40])
cpu_mask = cpu_mask[0] | (cpu_mask[1] << 64) | (cpu_mask[2] << 128) | \
(cpu_mask[2] << 192)
global_buffer = cpu_mask == 0
cpus = ''
while cpu_mask != 0:
msb = cpu_mask.bit_length() - 1
cpus += '{:d}'.format(msb)
cpu_mask &= ~(1 << msb)
if cpu_mask != 0:
cpus += ','
if global_buffer:
print("Processing global buffer...")
else:
print("Processing CPU {:s} buffer...".format(cpus))
entries_max = struct.unpack(endian + 'L', header[4:8])[0]
head_index = struct.unpack(endian + 'L', header[40:44])[0]
# Check if this buffer has wrapped around. Since the older traces that
# don't implement this flag will read it as zero, to stay backwards
# compatible, we decode a 0 as "wrapped" and 1 as "unwrapped".
wrapped = True if header[44:45] == b'\x00' else False
# If wrapped around or old format, read the whole buffer, otherwise only
# read the valid entries
entry_count = entries_max if wrapped else head_index
if entry_count == 0:
# Empty buffer, skip over the unused bytes
print(" Empty buffer")
args.input.seek(entries_max * 64, 1)
return iter(())
else:
print(" Found {:d} entries. Wrapped: {}".format(entry_count, wrapped))
warn = True
entries = []
for i in range(entry_count):
trace = args.input.read(64)
if len(trace) < 64:
print(" Warning, log truncated. Read {:d} of {:d} entries".format(
i, entry_count))
break
try:
entries.append(Event(args, *struct.unpack(endian + "QQQQQQQQ",
trace)))
except ValueError:
if warn:
print(" Warning, bad input. Read {:d} of {:d} entries".format(
i, entry_count))
warn = False
pass
if args.sort == 'm':
if global_buffer:
header_string = "=== GLOBAL TRACES START ===\n"
else:
header_string = "=== CPU {:s} TRACES START ===\n".format(cpus)
else:
if global_buffer:
header_string = "=== GLOBAL TRACE ===\n"
else:
header_string = "=== CPU {:s} TRACE ===\n".format(cpus)
if not wrapped or (head_index == entries_max):
first_index = 0
else:
first_index = head_index
# Add the same timestamp as the first entry
entry_header = LogEntry(entries[first_index].ticks, 0, header_string)
if args.sort == 's':
# Split at the head index
entry_iter = itertools.chain(
[entry_header], entries[head_index:], entries[:head_index])
else:
entry_iter = itertools.chain([entry_header], entries)
if not wrapped:
# Skip over the unused bytes
if args.input.seekable():
args.input.seek((entries_max - head_index) * 64, 1)
else:
args.input.read((entries_max - head_index) * 64)
return entry_iter
def read_all_entries(args):
def entry_iters():
if args.sort == 'm':
yield [LogEntry(0, 0, "==== MERGED CPU AND GLOBAL TRACES ====\n")]
while True:
try:
yield read_entries(args)
except StopIteration:
break
return itertools.chain(*entry_iters())
def prepare_log(args, entry_iter):
if args.show_missing:
# Simple search for missing entries: look for either an invalid info
# field, or a timestamp jumping backwards.
#
# If the timestamp jumps backwards by less than 10 ticks, we assume
# that it was an out-of-order trace write due to a race to obtain a
# slot. This typically suppresses several false positives in any large
# trace buffer.
timestamp_slack = 10
last_timestamp = -math.inf
missing_entry = False
log = []
for entry in entry_iter:
if entry is None:
missing_entry = True
if log:
log[-1].missing_after = True
else:
if missing_entry:
entry.missing_before = True
missing_entry = False
timestamp = entry.ticks
if timestamp + timestamp_slack < last_timestamp:
entry.missing_before = True
if log:
log[-1].missing_after = True
last_timestamp = timestamp
log.append(entry)
else:
log = list(entry_iter)
if args.sort == 'm':
log = sorted(log, key=lambda e: e.ticks)
if len(log) == 0:
sys.exit(1)
return log
def print_log(args, log):
prev_entry = None
for entry in log:
if args.show_missing and (prev_entry is not None and (
entry.missing_before or prev_entry.missing_after)):
args.output.write("<possible missing entries>\n")
args.output.write(str(entry))
prev_entry = entry
if __name__ == "__main__":
main()
| 13,209 | 31.298289 | 79 | py |
gunyah-hypervisor-develop | gunyah-hypervisor-develop/tools/hypercalls/hypercall.py | #!/usr/bin/env python3
#
# © 2021 Qualcomm Innovation Center, Inc. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import os
from Cheetah.Template import Template
from Cheetah import ImportHooks
def xreg_range(*args):
return tuple('x{:d}'.format(r) for r in range(*args))
templates_dir = os.path.join('tools', 'hypercalls', 'templates')
abis = {}
class abi():
def __init__(self, hypcall_base):
self.hypcall_base = hypcall_base
class abi_aarch64(abi):
def __init__(self, hypcall_base):
super().__init__(hypcall_base)
# The size in bytes of each machine register
self.register_size = 8
# Registers used for parameters and results. Note that we don't
# support indirect results (i.e. structs larger than 16 bytes).
self.parameter_reg = xreg_range(0, 8)
self.result_reg = xreg_range(0, 8)
# Registers clobbered by the hypervisor.
self.caller_saved_reg = xreg_range(8, 18)
@classmethod
def register_name(cls, size, index):
reg_type = "x" if size == 8 else "w"
return "{}{}".format(reg_type, index)
# HVC 0 is used for ARM SMCCC (PSCI, etc). Gunyah uses 0x6XXX
abis['aarch64'] = abi_aarch64(0x6000)
# Dictionary with all hypercalls defined
hypcall_dict = dict()
vendor_hypcall_list = []
class Variable:
def __init__(self, ctype, name, type_definition):
self.ctype = ctype
self.name = name
self.size = type_definition.size
self.category = type_definition.category
if type_definition.category == "bitfield":
self.type_name = type_definition.type_name
self.ignore = name.startswith('res0') or name.startswith(
'res1') or name.endswith('_')
self.pointer = False
try:
from ir import PointerType
d = type_definition
if isinstance(d.compound_type, PointerType):
self.pointer = True
except AttributeError:
pass
if self.ignore:
if name.startswith('res0'):
self.default = 0
elif name.startswith('res1'):
self.default = 0xffffffffffffffff
elif name.endswith('_'):
raise Exception(
"Invalid name ending with underscore: {:s}".format(name))
else:
raise Exception("Invalid ignored name {:s}".format(name))
class Hypercall:
def __init__(self, name, num, properties, abi):
self.name = name
self.num = num
self.used_regs = set()
self.inputs = []
self.input_count = 0
self.outputs = []
self.output_count = 0
self.clobbers = set()
self.abi = abis[abi]
self.properties = properties
self.hvc_num = "0x{:x}".format(self.abi.hypcall_base + num)
def check_type(self, var, role):
if var.size > self.abi.register_size:
raise Exception('Hypercall {:s}: {:s} {:s} has type {:s}, which '
'does not fit in a {:d}-byte machine register '
'(size is {:d} bytes)'.format(
self.name, role, var.name, var.ctype,
self.abi.register_size, var.size))
def add_input(self, input):
self.check_type(input, 'input')
reg = self.abi.parameter_reg[self.input_count]
self.used_regs.add(reg)
self.inputs.append((reg, input))
self.input_count += 1
def add_output(self, output):
self.check_type(output, 'output')
reg = self.abi.result_reg[self.output_count]
self.used_regs.add(reg)
self.outputs.append((reg, output))
self.output_count += 1
def finalise(self):
if 'vendor_hyp_call' in self.properties:
vendor_hypcall_list.append(self)
else:
hypcall_dict[self.num] = self
self.inputs = tuple(self.inputs)
self.outputs = tuple(self.outputs)
# Calculate register clobber list for guest interface
self.clobbers.update((x for x in self.abi.parameter_reg
if x not in self.used_regs))
self.clobbers.update((x for x in self.abi.result_reg
if x not in self.used_regs))
self.clobbers.update(self.abi.caller_saved_reg)
ns = locals()
def apply_template(template_file):
ImportHooks.install()
template = Template(file=template_file, searchList=ns)
result = str(template)
ImportHooks.uninstall()
return result
| 4,598 | 30.285714 | 77 | py |
gunyah-hypervisor-develop | gunyah-hypervisor-develop/tools/hypercalls/hypercall_gen.py | #!/usr/bin/env python3
# © 2021 Qualcomm Innovation Center, Inc. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from hypercall import Variable, Hypercall, apply_template
from lark import Lark, Tree
import os
import sys
import argparse
import subprocess
import logging
import pickle
import inspect
# Determine the location of this script.
__loc__ = os.path.realpath(os.path.join(os.getcwd(),
os.path.dirname(__file__)))
# The typed directory is added to the sys path so that when the pickle is
# loaded it can find the corresponding ast nodes.
typed_path = os.path.join(__loc__, '..', 'typed')
sys.path.append(typed_path)
# Silence flake8 warning about CType unused. It is required for pickle.load
from abi import AArch64ABI, CType # noqa: F401,E402
# The template directory is added to the sys path so templates can be imported
# from it.
template_path = os.path.join(__loc__, 'templates')
sys.path.append(template_path)
logger = logging.getLogger(__name__)
abi_classes = {
'aarch64': AArch64ABI,
}
primitive_types = dict()
types = dict()
used_ids = set()
used_calls = set()
class HypercallObject:
def __init__(self, name):
self.name = name
self.call_num = None
self.inputs = []
self.outputs = []
self.properties = {}
def get_constant(c):
type_parent = None
while isinstance(c, Tree):
type_parent = c.data
c = c.children[0]
assert (type_parent == 'constant_value')
return c
def get_type(c, ir):
type_parent = None
while isinstance(c, Tree):
type_parent = c.data
c = c.children[0]
if "primitive_type" in type_parent:
try:
d = primitive_types[c]
except KeyError:
logger.error("Type: %s not found", c)
sys.exit(1)
return (d.c_type_name, d)
else:
try:
d = types[c]
except KeyError:
logger.error("Type: %s not found", c)
sys.exit(1)
if not d.type_name.endswith('_t'):
c = c + '_t'
return (c, d)
logger.error("unknown type", c)
sys.exit(1)
def get_hypercalls(tree, hypercalls, hyp_num, ir):
for c in tree.children:
if isinstance(c, Tree):
if c.data == "hypercall_definition":
name = c.children[0]
if name in used_calls:
logger.error("Hypercall name: %s already used", name)
sys.exit(1)
used_calls.add(name)
new_hypercall = HypercallObject(name)
hypercalls.insert(hyp_num, new_hypercall)
get_hypercalls(c, hypercalls, hyp_num, ir)
hyp_num += 1
elif c.data == "hypercall_declaration":
if isinstance(c.children[0], Tree):
node = c.children[0]
if node.data == "declaration_call_num":
val = get_constant(node.children[0])
if hypercalls[hyp_num].call_num is not None:
logger.error("Hypercall: %s multiple call_nums",
hypercalls[hyp_num].name)
sys.exit(1)
call_num = int(str(val), base=0)
if call_num in used_ids:
logger.error("Hypercall call_num already used",
hypercalls[hyp_num].name)
sys.exit(1)
used_ids.add(call_num)
hypercalls[hyp_num].call_num = call_num
elif node.data == "declaration_sensitive":
hypercalls[hyp_num].properties['sensitive'] = True
elif node.data == "declaration_vendor_hyp_call":
if hypercalls[hyp_num].call_num is not None:
logger.error(
"Hypercall: %s call_num and "
"vendor_hyp_call",
hypercalls[hyp_num].name)
sys.exit(1)
hypercalls[hyp_num].call_num = 0
hypercalls[hyp_num].properties['vendor_hyp_call'] = \
True
else:
raise TypeError
elif isinstance(c.children[1], Tree):
identifier = str(c.children[0])
node = c.children[1]
if node.data == "declaration_input":
if len(hypercalls[hyp_num].inputs) >= 8:
logger.error("Maximum of 8 inputs per hypercall",
hypercalls[hyp_num].name)
sys.exit(1)
(t, d) = get_type(node.children[0], ir)
hypercalls[hyp_num].inputs.append(
Variable(t, identifier, d))
elif node.data == "declaration_output":
if len(hypercalls[hyp_num].outputs) >= 8:
logger.error("Maximum of 8 outputs per hypercall",
hypercalls[hyp_num].name)
sys.exit(1)
(t, d) = get_type(node.children[0], ir)
hypercalls[hyp_num].outputs.append(
Variable(t, identifier, d))
else:
raise TypeError
else:
logger.error("internal error")
sys.exit(1)
return hypercalls, hyp_num
def main():
logging.basicConfig(
level=logging.INFO,
format="%(message)s",
)
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("-o", "--output",
help="Output file (default stdout)",
type=argparse.FileType('w', encoding='utf-8'),
default=sys.stdout)
arg_parser.add_argument('-t', '--template',
type=argparse.FileType('r', encoding='utf-8'),
help="Template file used to generate output")
arg_parser.add_argument('--traceback', action="store_true",
help="Print a full traceback if an error occurs")
arg_parser.add_argument("-f", "--formatter",
help="specify clang-format to format the code")
arg_parser.add_argument("-d", "--deps", default=None,
type=argparse.FileType('w', encoding='utf-8'),
help="write implicit dependencies to a Makefile")
arg_parser.add_argument("input", metavar='INPUT', nargs="*",
type=argparse.FileType('r', encoding='utf-8'),
help="Input type DSL files to process")
arg_parser.add_argument('-p', '--load-pickle',
type=argparse.FileType('rb'),
help="Load the IR from typed Python pickle")
arg_parser.add_argument("-a", "--abi", help="specify the target machine "
"compiler ABI name", choices=abi_classes.keys(),
required=True)
options = arg_parser.parse_args()
grammar_file = os.path.join(__loc__, '..', 'grammars',
'hypercalls_dsl.lark')
parser = Lark.open(grammar_file, 'start', parser='lalr',
lexer='contextual', propagate_positions=True)
from ir import PrimitiveType
# Load typed pickle to get the types used for the inputs and output of the
# hypercall
ir = pickle.load(options.load_pickle)
for d in ir.abi_refs:
if isinstance(d, PrimitiveType):
if d.indicator not in primitive_types and d.is_public:
primitive_types[d.indicator] = d
for d in ir.definitions:
if d.indicator not in types and d.is_public:
types[d.indicator] = d
# Go through all *.hvc files, parse the content, do a top down iteration to
# get all hypercalls and get the type and size for the inputs and output
# arguments by searching in the ir of typed.pickle
hypercalls = []
hyp_num = 0
for p in options.input:
text = p.read()
parse_tree = parser.parse(text)
hypercalls, hyp_num = get_hypercalls(
parse_tree, hypercalls, hyp_num, ir)
for h in hypercalls:
hyper = Hypercall(h.name, h.call_num, h.properties, options.abi)
for i in h.inputs:
hyper.add_input(i)
for o in h.outputs:
hyper.add_output(o)
hyper.finalise()
# Apply templates to generate the output code and format it
result = apply_template(options.template)
if options.formatter and not options.template.name.endswith('.S.tmpl'):
ret = subprocess.run([options.formatter],
input=result.encode("utf-8"),
stdout=subprocess.PIPE)
result = ret.stdout.decode("utf-8")
if ret.returncode != 0:
logger.error("Error formatting output", result)
sys.exit(1)
options.output.write(result)
options.output.close()
# Write deps last to get template specific imports
if options.deps is not None:
deps = set()
for m in sys.modules.values():
try:
f = inspect.getsourcefile(m)
except TypeError:
continue
if f is None:
continue
f = os.path.relpath(f)
if f.startswith('../'):
continue
deps.add(f)
options.deps.write(options.output.name + ' : ')
options.deps.write(' '.join(sorted(deps)))
options.deps.write('\n')
options.deps.close()
if __name__ == '__main__':
main()
| 10,064 | 35.075269 | 79 | py |
gunyah-hypervisor-develop | gunyah-hypervisor-develop/tools/codegen/codegen.py | #!/usr/bin/env python3
#
# © 2021 Qualcomm Innovation Center, Inc. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# Simple generic code generator. Assumes that all logic is in the template
# itself and is based only on the architecture names and the preprocessor
# defines, all of which are passed on the command line.
#
# Note that it is generally bad style to have any non-trivial logic in the
# templates. Templates should import Python modules for anything complex.
# The directory containing the template file is automatically added to the
# Python path for this purpose.
from Cheetah.Template import Template
import argparse
import subprocess
import logging
import sys
import inspect
import os
import re
logger = logging.getLogger(__name__)
class DefineAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
defs = getattr(namespace, self.dest, None)
if defs is None:
defs = {}
setattr(namespace, self.dest, defs)
try:
name, val = values.split('=')
try:
val = int(val.rstrip('uU'), 0)
except TypeError:
pass
except ValueError:
name = values
val = True
defs[name] = val
def main():
logging.basicConfig(
level=logging.INFO,
format="%(message)s",
)
args = argparse.ArgumentParser()
args.add_argument("-o", "--output", help="Output file (default: stdout)",
default=sys.stdout,
type=argparse.FileType('w', encoding='utf-8'))
args.add_argument("-D", dest='defines', help="Define config variable",
action=DefineAction)
args.add_argument("-imacros",
type=argparse.FileType('r', encoding='utf-8'),
help="parse imacros CPP file",
default=None)
args.add_argument("-a", dest='archs', help="Define architecture name",
action='append')
args.add_argument("-f", "--formatter",
help="specify clang-format to format the code")
args.add_argument('-d', "--deps",
type=argparse.FileType('w', encoding='utf-8'),
help="Write implicit dependencies to Makefile",
default=None)
args.add_argument("template", metavar="TEMPLATE",
help="Template file used to generate output",
type=argparse.FileType('r', encoding='utf-8'))
options = args.parse_args()
defines = {}
if options.defines:
defines.update(options.defines)
if options.imacros:
d = re.compile(r'#define (?P<def>\w+)(\s+\"?(?P<val>[\w0-9,\ ]+)\"?)?')
for line in options.imacros.readlines():
match = d.search(line)
define = match.group('def')
val = match.group('val')
try:
try:
val = int(val.rstrip('uU'), 0)
except TypeError:
pass
except AttributeError:
pass
except ValueError:
pass
if define in defines:
raise Exception("multiply defined: {}\n", define)
defines[define] = val
sys.path.append(os.path.dirname(options.template.name))
output = str(Template(file=options.template,
searchList=(defines,
{'arch_list': options.archs})))
if options.formatter:
ret = subprocess.run([options.formatter], input=output.encode("utf-8"),
stdout=subprocess.PIPE)
output = ret.stdout.decode("utf-8")
if ret.returncode != 0:
raise Exception("failed to format output:\n", ret.stderr)
if options.deps is not None:
deps = set()
for m in sys.modules.values():
try:
f = inspect.getsourcefile(m)
except TypeError:
continue
if f is None:
continue
f = os.path.relpath(f)
if f.startswith('../'):
continue
deps.add(f)
deps.add(options.template.name)
if options.imacros:
deps.add(options.imacros.name)
options.deps.write(options.output.name + ' : ')
options.deps.write(' '.join(sorted(deps)))
options.deps.write('\n')
options.deps.close()
options.output.write(output)
options.output.close()
if __name__ == '__main__':
sys.exit(main())
| 4,655 | 31.333333 | 79 | py |
gunyah-hypervisor-develop | gunyah-hypervisor-develop/tools/typed/ast_nodes.py | # © 2021 Qualcomm Innovation Center, Inc. All rights reserved.
#
# 2019 Cog Systems Pty Ltd.
#
# SPDX-License-Identifier: BSD-3-Clause
from lark import Tree, Token
import itertools
from ir import (
TopLevel, PrimitiveType, BitFieldDeclaration, BitFieldType, StructureType,
EnumerationType, AlternativeType, BitFieldSpecifier, DirectType,
PointerType, PrimitiveDeclaration, ArrayType, ConstantDefinition,
AlternativeDefinition, BitFieldDefinition, StructureDefinition,
EnumerationDefinition, EnumerationConstant, EnumerationExtension,
ObjectType, ObjectDeclaration, ObjectDefinition, BitFieldExtension,
ObjectExtension, Qualifier, AlignedQualifier, GroupQualifier,
AtomicQualifier, PackedQualifier, ConstantExpression, ConstantReference,
UnaryOperation, SizeofOperation, AlignofOperation, BinaryOperation,
ConditionalOperation, UnionType, UnionDefinition, UnionExtension,
StructureExtension, MinofOperation, MaxofOperation, ContainedQualifier,
WriteonlyQualifier, PureFunctionCall, LockableQualifier, GlobalDefinition,
OptimizeQualifier
)
from exceptions import DSLError
"""
The classes in the module represent nodes in the AST.
They are used only to parse the input.
All classes that inherit from CommonTree in this file will be automatically
imported into the TransformTypes transformer (excluding CommonTree itself).
"""
def toint(text):
"""
Convert value strings to integers.
Supports Python styles for decimal, hex and binary (but not octal). Also
supports (and ignores) a C-style U suffix.
"""
text = text.rstrip('uU')
if len(text) > 1 and text[0] == '0' and text[1] not in 'xXbB':
raise DSLError('Unknown base for value {:s}'.format(text), text)
return int(text, base=0)
class CommonTree(Tree):
"""
Common class for all AST nodes
"""
def __init__(self, program, children, meta, data=None):
if data is None:
data = self.__class__.__name__
super().__init__(data, children, meta)
self.program = program
def pass_up(self):
pass
@property
def num_children(self):
return len(self.children)
class TToken(str):
__slots__ = ['program', 'line', 'column', 'pos_in_stream']
def __new__(cls, val, token=None, program=None, line=None, column=None,
pos_in_stream=None):
self = super(TToken, cls).__new__(cls, val)
if token:
line = token.line
column = token.column
pos_in_stream = token.pos_in_stream
self.program = program
self.line = line
self.column = column
self.pos_in_stream = pos_in_stream
return self
def __reduce__(self):
return (TToken, (str(self), None, self.program, self.line, self.column,
self.pos_in_stream))
class Action:
"""
A class helps rules to register function to provide input for parent node.
Parameter:
fn: function to call if parent decide to take this action.
The signature of this actionis "def f(object)". This action can handle
the object as it wants.
name: the rule name who provides this action
passive: True indicate it needs to be specifically called
trace: the tree node who provides this action. Just for debug
"""
def __init__(self, fn, name, passive=False, trace=None):
self.name = name
self.trace = trace
self.fn = fn
self.passive = passive
def take(self, obj):
"""
Take this action, and change the specified obj
"""
if self.trace:
print("take ", self)
return self.fn(obj)
def __repr__(self):
more = ""
if self.trace:
more = "<" + str(self.trace) + ">"
return "action %s from %s%s" % (self.fn.__name__, self.name, more)
def match(self, rule_name):
return self.name == rule_name
class ActionList:
"""
The helper to manipulate the actions.
"""
def __init__(self, actions=[]):
self.actions = actions.copy()
def __iter__(self):
return itertools.chain(self.actions)
def __iadd__(self, x):
if isinstance(x, ActionList):
self.actions += x.actions
return self
else:
raise Exception("only allowed to iadd Action List" + str(type(x)))
def __repr__(self):
ret = []
for a in self.actions:
ret.append(" " + str(a))
return "Action list: \n" + '\n'.join(ret)
def append_actions(self, action_list):
self.actions += action_list
def take_all(self, obj, accept_list=[], deny_list=[], remove=True):
if set(accept_list) & set(deny_list):
raise Exception("cannot accept/deny same name at the same time: " +
', '.join(set(accept_list) & set(deny_list)))
for a in list(self.actions):
if len(accept_list) > 0 and a.name in accept_list:
a.take(obj)
elif len(deny_list) > 0 and a.name not in deny_list:
a.take(obj)
else:
continue
if remove:
self.actions.remove(a)
def take(self, obj, name, remove=True, single=True):
"""
Take actions, can get the return value
parameters:
obj: the object who receive the action result
name: specify the action name to take
remove: indicate if need to remove the action after take it
single: indicate if need to take all actions who has the same name.
If so, just return the last action's return value.
"""
ret = None
for a in list(self.actions):
if a.name == name:
if remove:
self.actions.remove(a)
if single:
return a.take(obj)
ret = a.take(obj)
return ret
def remove_all(self, remove_list):
self.actions = [a for a in self.actions if a.name not in remove_list]
def has(self, name):
for a in self.actions:
if name == a.name:
return True
return False
def remains(self):
return len(self.actions) != 0
class CommonListener(CommonTree):
"""
Common class for all list nodes need to sync.
Order: Since it's a bottom up node, all children visitor (for read/write)
is ahead of parent, and left is ahead of right.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.actions = []
self.action_list = ActionList()
# get all actions from children
for c in self.children:
if isinstance(c, CommonListener):
self.action_list += c.pass_up()
self.init_interfaces()
def init_interfaces(self):
"""
In this function, node can:
* initialize all attributes which will be set by other nodes.
* set actions for parent nodes
* choose some actions to take
"""
pass
def pass_up(self):
"""
Setup accepts of this nodes, parents will collect this node's accepts,
and call them properly.
By default, it just return remaining accepts
"""
self.action_list.append_actions(self.actions)
return self.action_list
class start(CommonListener):
"""
The root node of the the AST
"""
def init_interfaces(self):
self.intermediate_tree = TopLevel()
al = [
"bitfield_extension",
"bitfield_definition",
"object_definition",
"object_extension",
"structure_definition",
"structure_extension",
"union_definition",
"union_extension",
"enumeration_definition",
"enumeration_extension",
"alternative_definition",
"constant_definition",
"global_definition",
"declaration",
"add_abi_ref",
"add_constant_ref",
"add_type_ref",
]
self.action_list.take_all(self.intermediate_tree, al)
if self.action_list.remains():
print("Warning: untaken actions remain. ", self.action_list)
def get_intermediate_tree(self):
return self.intermediate_tree
"""
Add a pass_up method to tokens so we can treat them the same way as nodes
"""
Token.pass_up = lambda self: self
class constant_value(CommonListener):
def init_interfaces(self):
self.value = toint(self.children[0])
class constant_reference(CommonListener):
def init_interfaces(self):
self.value = ConstantReference(self.children[0])
self.actions.append(Action(self.set_symbol_ref, 'add_constant_ref'))
def set_symbol_ref(self, obj):
obj.constant_refs.append(self.value)
class constant_expression(CommonListener):
def init_interfaces(self):
self.value = ConstantExpression(self.children[0].value)
bracketed_constant_expression = constant_expression
class unary_operation(CommonListener):
def init_interfaces(self):
self.value = UnaryOperation(self.children[0], self.children[1].value)
class IConstantFromTypeOperation(CommonListener):
def init_interfaces(self):
self.type_ref = None
al = ["direct_type", "array", "pointer", "alias",
"primitive_type", "bitfield_type", "structure_type",
"union_type", "object_type", "enumeration_type",
"alternative_type"]
self.action_list.take_all(self, al)
rl = [
"pointer_has_pointer",
"object_type_set_complex",
"object_type_has_object",
"object_type_create_declaration",
]
self.action_list.remove_all(rl)
self.actions.append(Action(self.set_type_ref, 'add_type_ref'))
def set_type_ref(self, obj):
if self.type_ref is not None:
obj.type_refs.append(self.type_ref)
class sizeof_operation(IConstantFromTypeOperation):
def init_interfaces(self):
super().init_interfaces()
self.value = SizeofOperation(self.compound_type)
class alignof_operation(IConstantFromTypeOperation):
def init_interfaces(self):
super().init_interfaces()
self.value = AlignofOperation(self.compound_type)
class minof_operation(IConstantFromTypeOperation):
def init_interfaces(self):
super().init_interfaces()
self.value = MinofOperation(self.compound_type)
class maxof_operation(IConstantFromTypeOperation):
def init_interfaces(self):
super().init_interfaces()
self.value = MaxofOperation(self.compound_type)
class IPureFunction(CommonListener):
def init_interfaces(self):
self.action_list.take(self, 'constant_expression')
self.value = PureFunctionCall(self.children[0].value, self.f)
class msb_operation(IPureFunction):
def f(self, x):
return x.bit_length() - 1
class IBinaryOperation(CommonListener):
def init_interfaces(self):
self.value = BinaryOperation(self.children[1], self.children[0].value,
self.children[2].value)
class mult_operation(IBinaryOperation):
pass
class add_operation(IBinaryOperation):
pass
class shift_operation(IBinaryOperation):
pass
class relational_operation(IBinaryOperation):
pass
class equality_operation(IBinaryOperation):
pass
class IFixedBinaryOperation(CommonListener):
def init_interfaces(self):
self.value = BinaryOperation(self.operator, self.children[0].value,
self.children[1].value)
class bitwise_and_operation(IFixedBinaryOperation):
operator = "&"
class bitwise_xor_operation(IFixedBinaryOperation):
operator = "^"
class bitwise_or_operation(IFixedBinaryOperation):
operator = "|"
class logical_and_operation(IFixedBinaryOperation):
operator = "&&"
class logical_or_operation(IFixedBinaryOperation):
operator = "||"
class conditional_operation(CommonListener):
def init_interfaces(self):
self.value = ConditionalOperation(self.children[0].value,
self.children[1].value,
self.children[2].value)
class IABISpecific:
"""
Mixin that triggers a call to set_abi().
This can be used either on a declaration or on a customised type.
"""
def init_interfaces(self):
super().init_interfaces()
self.actions.append(Action(self.set_abi, 'add_abi_ref'))
def set_abi(self, obj):
if hasattr(self, 'definition'):
obj.abi_refs.add(self.definition)
elif hasattr(self, 'compound_type'):
obj.abi_refs.add(self.compound_type)
else:
raise DSLError("Cannot set ABI", self)
class primitive_type(IABISpecific, CommonListener):
def init_interfaces(self):
self.type_name = self.children[0]
self.compound_type = PrimitiveType(self.type_name)
self.actions = [Action(self.set_type, "primitive_type", self)]
super().init_interfaces()
def set_type(self, declaration):
declaration.compound_type = self.compound_type
class bitfield_type(CommonListener):
def init_interfaces(self):
self.type_name = self.children[0]
self.compound_type = BitFieldType(self.type_name)
self.actions = [Action(self.set_type, "bitfield_type")]
def set_type(self, declaration):
d = declaration
d.compound_type = self.compound_type
d.type_ref = d.compound_type
d.is_customized_type = True
class structure_type(CommonListener):
def init_interfaces(self):
self.type_name = self.children[0]
self.compound_type = StructureType(self.type_name)
self.actions = [Action(self.set_type, "structure_type")]
def set_type(self, declaration):
d = declaration
d.compound_type = self.compound_type
d.type_ref = self.compound_type
d.is_customized_type = True
class union_type(CommonListener):
def init_interfaces(self):
self.type_name = self.children[0]
self.compound_type = UnionType(self.type_name)
self.actions = [Action(self.set_type, "union_type")]
if self.action_list.has("object_type_has_object"):
raise DSLError("cannot declare an object type member in union",
self.declaration.member_name)
def set_type(self, declaration):
d = declaration
d.compound_type = self.compound_type
d.type_ref = self.compound_type
d.is_customized_type = True
class enumeration_type(CommonListener):
def init_interfaces(self):
self.type_name = self.children[0]
self.compound_type = EnumerationType(self.type_name)
self.actions = [Action(self.set_type, "enumeration_type")]
def set_type(self, declaration):
d = declaration
d.compound_type = self.compound_type
d.type_ref = d.compound_type
d.is_customized_type = True
class alternative_type(CommonListener):
def init_interfaces(self):
self.type_name = self.children[0]
self.compound_type = AlternativeType(self.type_name)
self.actions = [Action(self.set_type, "alternative_type")]
def set_type(self, declaration):
d = declaration
d.compound_type = self.compound_type
d.type_ref = self.compound_type
d.is_customized_type = True
class direct_type(CommonListener):
def init_interfaces(self):
self.compound_type = DirectType()
self.action_list.take(self.compound_type, "qualifier_list")
self.actions = [Action(self.set_type, "direct_type")]
def set_type(self, declaration):
self.compound_type.set_basic_type(declaration.compound_type)
declaration.compound_type = self.compound_type
class qualifier_list(CommonListener):
def init_interfaces(self):
self.qualifiers = set()
al = [
"basic_qualifier",
"atomic_qualifier",
"packed_qualifier",
"aligned_qualifier",
"group_qualifier",
"contained_qualifier",
"writeonly_qualifier",
"lockable_qualifier",
"optimize_qualifier",
]
self.action_list.take_all(self, al)
self.actions = [Action(self.set_qualifiers, "qualifier_list")]
def set_qualifiers(self, obj):
obj.qualifiers = self.qualifiers
class basic_qualifier(CommonListener):
def init_interfaces(self):
self.qualifier = Qualifier(self.children[0])
self.actions = [Action(self.add_qualifier, "basic_qualifier")]
def add_qualifier(self, obj):
obj.qualifiers.add(self.qualifier)
class atomic_qualifier(CommonListener):
def init_interfaces(self):
self.qualifier = AtomicQualifier(self)
self.actions = [Action(self.add_qualifier, "atomic_qualifier")]
def add_qualifier(self, obj):
obj.qualifiers.add(self.qualifier)
class packed_qualifier(CommonListener):
def init_interfaces(self):
self.qualifier = PackedQualifier(self)
self.actions = [Action(self.add_qualifier, "packed_qualifier")]
def add_qualifier(self, obj):
obj.qualifiers.add(self.qualifier)
class aligned_qualifier(CommonListener):
def init_interfaces(self):
self.action_list.take(self, 'constant_expression')
self.qualifier = AlignedQualifier(self, self.children[0].value)
self.actions = [Action(self.add_qualifier, "aligned_qualifier")]
def add_qualifier(self, obj):
obj.qualifiers.add(self.qualifier)
class group_qualifier(CommonListener):
def init_interfaces(self):
self.qualifier = GroupQualifier(self, self.children)
self.actions = [Action(self.add_qualifier, "group_qualifier")]
def add_qualifier(self, obj):
obj.qualifiers.add(self.qualifier)
class contained_qualifier(CommonListener):
def init_interfaces(self):
self.qualifier = ContainedQualifier(self)
self.actions = [Action(self.add_qualifier, "contained_qualifier")]
def add_qualifier(self, obj):
obj.qualifiers.add(self.qualifier)
class writeonly_qualifier(CommonListener):
def init_interfaces(self):
self.qualifier = WriteonlyQualifier(self)
self.actions = [Action(self.add_qualifier, "writeonly_qualifier")]
def add_qualifier(self, obj):
obj.qualifiers.add(self.qualifier)
class lockable_qualifier(CommonListener):
def init_interfaces(self):
self.qualifier = LockableQualifier(self)
self.actions = [
Action(self.add_qualifier, "lockable_qualifier"),
Action(self.set_name, "describe_lockable_type"),
]
def add_qualifier(self, obj):
obj.qualifiers.add(self.qualifier)
def set_name(self, obj):
self.qualifier.resource_name = ' '.join((obj.type_name, obj.category))
class optimize_qualifier(CommonListener):
def init_interfaces(self):
self.qualifier = OptimizeQualifier(self)
self.actions = [
Action(self.add_qualifier, "optimize_qualifier"),
Action(self.set_category, "describe_optimized_type"),
]
def add_qualifier(self, obj):
obj.qualifiers.add(self.qualifier)
def set_category(self, obj):
self.qualifier.category = obj.category
class array_size(CommonListener):
def init_interfaces(self):
self.value = self.children[0].value
self.actions = [
Action(self.set_size, "array_size"),
]
def set_size(self, obj):
obj.length = self.value
class array(CommonListener):
def init_interfaces(self):
self.compound_type = ArrayType(self)
al = ["array_size", "qualifier_list", "object_type_set_complex"]
self.action_list.take_all(self.compound_type, accept_list=al)
self.actions = [Action(self.set_type, "array")]
def set_type(self, declaration):
a = self.compound_type
a.base_type = declaration.compound_type
declaration.compound_type = a
declaration.complex_type = True
class pointer(IABISpecific, CommonListener):
def init_interfaces(self):
self.compound_type = PointerType(self)
al = [
"qualifier_list",
"pointer_has_pointer",
"object_type_set_complex"]
self.action_list.take_all(self.compound_type, accept_list=al)
self.actions = [
Action(self.mark_has_pointer, "pointer_has_pointer"),
Action(self.set_type, "pointer")
]
# Pointers to objects hide an object
rl = ["object_type_has_object"]
self.action_list.remove_all(rl)
super().init_interfaces()
def set_type(self, declaration):
self.compound_type.base_type = declaration.compound_type
declaration.compound_type = self.compound_type
declaration.complex_type = True
def mark_has_pointer(self, pointer):
pass
class declaration(CommonListener):
def init_interfaces(self):
# special case to handle object type declaration
d = self.action_list.take(self, "object_type_create_declaration")
if d is None:
self.declaration = PrimitiveDeclaration()
else:
self.declaration = d
self.action_list.take(self.declaration, 'object_noprefix')
self.declaration.member_name = self.children[0]
al = ["direct_type", "array", "pointer", "alias", "primitive_type",
"bitfield_type", "structure_type", "union_type", "object_type",
"enumeration_type", "alternative_type", "declaration_offset"]
self.action_list.take_all(self.declaration, al)
rl = ["pointer_has_pointer", "object_type_set_complex"]
self.action_list.remove_all(rl)
self.actions = [Action(self.set_declaration, "declaration")]
def set_declaration(self, obj):
obj.declarations.append(self.declaration)
self.declaration.owner = obj
if self.declaration.type_ref is not None:
obj.type_refs.append(self.declaration.type_ref)
class declaration_offset(CommonListener):
def init_interfaces(self):
self.actions = [
Action(self.set_offset, "declaration_offset"),
]
def set_offset(self, e):
e.offset = self.children[0].value
class enumeration_expr(CommonListener):
def init_interfaces(self):
self.actions = [
Action(self.set_value, "enumeration_expr"),
]
def set_value(self, e):
e.value = self.children[0].value
class enumeration_noprefix(CommonListener):
def init_interfaces(self):
self.actions = [
Action(self.set_noprefix, "enumeration_noprefix"),
Action(self.set_noprefix, "enumeration_attribute"),
]
def set_noprefix(self, e):
e.prefix = ''
class enumeration_explicit(CommonListener):
def init_interfaces(self):
self.actions = [
Action(self.set_explicit, "enumeration_attribute"),
]
def set_explicit(self, e):
e.set_explicit()
class enumeration_constant(CommonListener):
def init_interfaces(self):
name = self.children[0]
self.constant = EnumerationConstant(name)
self.action_list.take_all(self.constant,
["enumeration_noprefix",
"enumeration_expr"])
self.action_list.remove_all(["enumeration_attribute"])
self.actions = [
Action(self.add_constant, "enumeration_constant"),
]
def add_constant(self, d):
d.add_enumerator(self.constant)
class bitfield_width(CommonListener):
def init_interfaces(self):
self.actions = [
Action(self.get_width, "bitfield_width"),
]
def get_width(self, bit):
bit.width = self.children[0].value
class bitfield_bit_range(CommonListener):
def init_interfaces(self):
self.actions = [
Action(self.add_range, "bitfield_bit_range"),
]
assert (len(self.children) <= 2)
def add_range(self, specifier):
specifier.add_bit_range(self)
def get_bits(self):
bit = int(self.children[0].value)
if len(self.children) == 1:
width = 1
else:
msb = bit
bit = int(self.children[1].value)
width = msb - bit + 1
if width < 1:
raise DSLError("invalid bitifield specfier", self.children[1])
return (bit, width)
class bitfield_auto(CommonListener):
def init_interfaces(self):
self.width = None
self.actions = [
Action(self.set_bitfield_auto, "bitfield_auto"),
]
self.action_list.take(self, "bitfield_width")
def set_bitfield_auto(self, specifier):
specifier.set_type_auto(self.width)
class bitfield_bit_span(CommonListener):
def init_interfaces(self):
self.width = None
self.actions = [
Action(self.add_range, "bitfield_bit_span"),
]
self.action_list.take(self, "bitfield_width")
def add_range(self, specifier):
specifier.add_bit_range(self)
def get_bits(self):
return (int(self.children[-1].value), int(self.width))
class bitfield_others(CommonListener):
def init_interfaces(self):
self.actions = [
Action(self.set_bitfield_others, "bitfield_others"),
]
def set_bitfield_others(self, specifier):
specifier.set_type_others()
class bitfield_delete(CommonListener):
def init_interfaces(self):
self.actions = [
Action(self.bitfield_delete, "bitfield_delete"),
]
def bitfield_delete(self, ext):
ext.add_delete_member(self.children[0])
class bitfield_specifier(CommonListener):
def init_interfaces(self):
self.actions = [
Action(self.set_specifier, "bitfield_specifier"),
]
self.bitfield_specifier = BitFieldSpecifier()
al = ["bitfield_bit_range", "bitfield_auto", "bitfield_bit_span",
"bitfield_others"]
self.action_list.take_all(self.bitfield_specifier, al)
def set_specifier(self, declaration):
declaration.bitfield_specifier = self.bitfield_specifier
class bitfield_unknown(CommonListener):
def init_interfaces(self):
self.actions = [
Action(self.set_bitfield_member, "bitfield_member"),
]
def set_bitfield_member(self, declaration):
declaration.member_name = self.children[0]
declaration.set_ignored()
class bitfield_member(CommonListener):
def init_interfaces(self):
self.name = self.children[0]
self.actions = [
Action(self.set_bitfield_member, "bitfield_member"),
]
def set_bitfield_member(self, declaration):
declaration.member_name = self.name
class bitfield_const(CommonListener):
def init_interfaces(self):
self.actions = [
Action(self.set_bitfield_member, "bitfield_member"),
]
def set_bitfield_member(self, declaration):
declaration.member_name = "<const>"
class bitfield_shift(CommonListener):
def init_interfaces(self):
self.actions = [
Action(self.set_bitfield_shift, "bitfield_shift"),
]
def set_bitfield_shift(self, declaration):
return self.children[0].value
class bitfield_default(CommonListener):
def init_interfaces(self):
self.action_list.take(self, 'constant_expression')
self.default = self.children[0]
self.actions = [
Action(self.set_default, 'bitfield_default'),
]
def set_default(self, obj):
obj.default = self.default
class bitfield_declaration(CommonListener):
def init_interfaces(self):
if self.action_list.has("bitfield_delete"):
return
self.declaration = BitFieldDeclaration()
shift = self.action_list.take(self.declaration, "bitfield_shift")
al = ["bitfield_member", "bitfield_specifier", "bitfield_default",
"direct_type", "primitive_type", "bitfield_type",
"enumeration_type", "alternative_type", "structure_type",
"union_type", "pointer"]
self.action_list.take_all(self.declaration, al)
if self.action_list.has("object_type_has_object"):
raise DSLError("cannot declare an object type member in bitfield",
self.declaration.member_name)
if shift:
if self.action_list.has("pointer_has_pointer"):
raise DSLError(
"cannot specify shift for pointer member in bitfield",
self.declaration.member_name)
self.declaration.bitfield_specifier.set_type_shift(shift)
rl = ["pointer_has_pointer"]
self.action_list.remove_all(rl)
self.actions = [Action(self.set_declaration, "bitfield_declaration")]
def set_declaration(self, definition):
definition.declarations.append(self.declaration)
if self.declaration.type_ref is not None:
definition.type_refs.append(self.declaration.type_ref)
class public(CommonListener):
def init_interfaces(self):
self.actions = [
Action(self.set_public, self.__class__.__name__)
]
def set_public(self, definition):
definition.set_public()
class constant_definition(CommonListener):
def init_interfaces(self):
self.name = self.children[0]
self.action_list.take(self, 'constant_expression')
d = ConstantDefinition(self.name, self.children[-1].value)
self.definition = d
al = ["direct_type", "array", "pointer", "alias",
"primitive_type", "bitfield_type", "structure_type",
"union_type", "object_type", "enumeration_type",
"alternative_type", "public"]
self.action_list.take_all(self.definition, al)
rl = ["pointer_has_pointer", "object_type_set_complex"]
self.action_list.remove_all(rl)
self.actions = [
Action(self.set_definition, self.__class__.__name__),
Action(self.set_type_ref, 'add_type_ref'),
]
def set_definition(self, obj):
obj.definitions.append(self.definition)
def set_type_ref(self, obj):
if self.definition.type_ref is not None:
obj.type_refs.append(self.definition.type_ref)
class global_definition(CommonListener):
def init_interfaces(self):
self.name = self.children[0]
d = GlobalDefinition(self.name)
self.definition = d
al = ["direct_type", "array", "pointer", "alias",
"primitive_type", "bitfield_type", "structure_type",
"union_type", "object_type", "enumeration_type",
"alternative_type"]
self.action_list.take_all(self.definition, al)
rl = ["pointer_has_pointer", "object_type_set_complex"]
self.action_list.remove_all(rl)
self.actions = [
Action(self.set_definition, self.__class__.__name__),
Action(self.set_type_ref, 'add_type_ref'),
]
def set_definition(self, obj):
obj.definitions.append(self.definition)
def set_type_ref(self, obj):
if self.definition.type_ref is not None:
obj.type_refs.append(self.definition.type_ref)
class ITypeDefinition(CommonListener):
def init_interfaces(self):
self.name = self.children[0]
self.definition = None
self.actions = [Action(self.set_definition, self.__class__.__name__)]
def set_definition(self, obj):
obj.definitions.append(self.definition)
obj.type_refs += self.definition.type_refs
class alternative_definition(ITypeDefinition):
def init_interfaces(self):
super().init_interfaces()
# must have "_t" postfix
name = self.name
if name[-2:] != "_t":
raise DSLError("Invalid type name.\n"
"Type name must have _t as postfix ", name)
d = AlternativeDefinition(name)
self.definition = d
al = ["direct_type", "array", "pointer",
"primitive_type", "bitfield_type", "structure_type",
"object_type", "union_type", "enumeration_type", "public"]
self.action_list.take_all(self.definition, al)
# special case, should have only 1 type ref
if d.type_ref is not None:
d.type_refs.append(d.type_ref)
rl = ["pointer_has_pointer"]
self.action_list.remove_all(rl)
class bitfield_const_decl(CommonListener):
def init_interfaces(self):
super().init_interfaces()
self.actions = [Action(self.set_const, "bitfield_const_decl")]
def set_const(self, obj):
if obj.const:
# TODO: proper logger warnings
print("Warning: redundant bitfield const")
obj.const = True
class bitfield_set_ops_decl(CommonListener):
def init_interfaces(self):
super().init_interfaces()
self.actions = [Action(self.set_has_set_ops, "bitfield_set_ops_decl")]
def set_has_set_ops(self, obj):
obj.has_set_ops = True
class bitfield_definition(ITypeDefinition):
def init_interfaces(self):
super().init_interfaces()
if self.action_list.has("bitfield_delete"):
raise DSLError(
"delete only allowed in extend", self.name)
self.size = -1
self.const = False
d = BitFieldDefinition(self.name)
self.definition = d
self.action_list.take_all(d, [
'bitfield_declaration', 'bitfield_size', 'bitfield_const_decl',
'bitfield_set_ops_decl', 'public'])
d.update_unit_info()
class structure_definition(IABISpecific, ITypeDefinition):
def init_interfaces(self):
super().init_interfaces()
d = StructureDefinition(self.name)
self.definition = d
self.action_list.take(d, "declaration", single=False)
self.action_list.take(d, "qualifier_list")
self.action_list.take(d, "public")
self.action_list.take(d, "describe_lockable_type")
self.action_list.take(d, "describe_optimized_type")
class union_definition(ITypeDefinition):
def init_interfaces(self):
super().init_interfaces()
d = UnionDefinition(self.name)
self.definition = d
self.action_list.take(d, "declaration", single=False)
self.action_list.take(d, "qualifier_list")
self.action_list.take(d, "public")
self.action_list.take(d, "describe_lockable_type")
class enumeration_definition(IABISpecific, ITypeDefinition):
def init_interfaces(self):
super().init_interfaces()
d = EnumerationDefinition(self.name)
self.definition = d
self.action_list.take(d, "enumeration_attribute")
self.action_list.take(d, "public")
# Unused
self.action_list.remove_all(["enumeration_noprefix"])
self.action_list.take(d, "enumeration_constant", single=False)
class object_definition(IABISpecific, ITypeDefinition):
def init_interfaces(self):
super().init_interfaces()
d = ObjectDefinition(self.name)
self.definition = d
self.action_list.take(d, "declaration", single=False)
self.action_list.take(d, "qualifier_list")
self.action_list.take(d, "public")
self.action_list.take(d, "describe_lockable_type")
self.action_list.take(d, "describe_optimized_type")
rl = ["object_type_has_object"]
self.action_list.remove_all(rl)
class module_name(CommonListener):
def init_interfaces(self):
self.module_name = self.children[0]
self.actions = [Action(self.set_name, "module_name")]
def set_name(self, extension):
extension.module_name = self.module_name
class bitfield_size(CommonListener):
def init_interfaces(self):
self.actions = [Action(self.set_size, "bitfield_size")]
def set_size(self, obj):
obj.length = int(self.children[0].value)
class ITypeExtension(CommonListener):
def init_interfaces(self):
# need to check definition if allowed it in the feature
self.type_name = self.children[0]
self.extension = None
self.actions = [Action(self.set_extension, self.__class__.__name__)]
def set_extension(self, parent):
parent.type_refs.append(self.extension)
parent.type_refs += self.extension.type_refs
class bitfield_extension(ITypeExtension):
def init_interfaces(self):
super().init_interfaces()
e = BitFieldExtension(self.type_name)
self.extension = e
self.action_list.take(e, "bitfield_delete", single=False)
al = ["bitfield_declaration", "module_name"]
self.action_list.take_all(e, al)
class structure_extension(ITypeExtension):
def init_interfaces(self):
super().init_interfaces()
e = StructureExtension(self.type_name)
self.extension = e
al = ["declaration", "module_name"]
self.action_list.take_all(e, al)
class object_extension(ITypeExtension):
def init_interfaces(self):
super().init_interfaces()
e = ObjectExtension(self.type_name)
self.extension = e
rl = ["object_type_has_object"]
self.action_list.remove_all(rl)
al = ["declaration", "module_name"]
self.action_list.take_all(e, al)
class union_extension(ITypeExtension):
def init_interfaces(self):
super().init_interfaces()
e = UnionExtension(self.type_name)
self.extension = e
if self.action_list.has("object_type_has_object"):
raise DSLError("cannot declare an object type member in union",
self.declaration.member_name)
al = ["declaration", "module_name"]
self.action_list.take_all(e, al)
class enumeration_extension(ITypeExtension):
def init_interfaces(self):
super().init_interfaces()
e = EnumerationExtension(self.type_name)
self.extension = e
al = ["enumeration_constant", "module_name"]
self.action_list.take_all(e, al)
class object_type(CommonListener):
def init_interfaces(self):
self.type_name = self.children[-1]
self.compound_type = ObjectType(self.type_name)
self.actions = [
Action(self.set_type, "object_type"),
Action(self.create_declaration, "object_type_create_declaration"),
Action(self.has_object, "object_type_has_object"),
Action(self.set_complex, "object_type_set_complex"),
]
def create_declaration(self, obj):
d = ObjectDeclaration()
d.type_ref = self.compound_type
return d
def set_type(self, declaration):
declaration.compound_type = self.compound_type
declaration.type_ref = declaration.compound_type
declaration.is_customized_type = True
def has_object(self, obj):
return True
def set_complex(self, obj):
self.compound_type.complex_type = True
class object_noprefix(CommonListener):
def init_interfaces(self):
self.actions = [Action(self.set_noprefix, 'object_noprefix')]
def set_noprefix(self, obj):
obj.noprefix = True
| 39,670 | 29.031037 | 79 | py |
gunyah-hypervisor-develop | gunyah-hypervisor-develop/tools/typed/exceptions.py | # © 2021 Qualcomm Innovation Center, Inc. All rights reserved.
#
# 2019 Cog Systems Pty Ltd.
#
# SPDX-License-Identifier: BSD-3-Clause
import re
class BaseError(Exception):
def get_context(self, text, pos, span=40):
start = max(pos - span, 0)
end = pos + span
before = text[start:pos].rsplit('\n', 1)[-1]
after = text[pos:end].split('\n', 1)[0]
before_spaces = re.sub(r'\S', ' ', before)
return before + after + '\n' + before_spaces + '^\n'
class DSLError(BaseError):
def __init__(self, message, token, expecting=None, state=None):
meta = getattr(token, 'meta', token)
self.line = getattr(meta, 'line', '?')
self.column = getattr(meta, 'column', '?')
self.pos_in_stream = getattr(meta, 'start_pos',
getattr(meta, 'pos_in_stream', None))
self.expecting = expecting
self.state = state
message = "\nError: %s\n" % message
message += "\nToken %s, at line %s col %s:\n" % (
str(token), self.line, self.column)
if isinstance(self.pos_in_stream, int):
message += '\n' + self.get_context(token.program,
self.pos_in_stream)
if expecting:
message += '\nExpecting: %s\n' % expecting
super(DSLError, self).__init__(message)
class DSLErrorWithRefs(BaseError):
def __init__(self, message, token, refs, expecting=None, state=None):
meta = getattr(token, 'meta', token)
self.line = getattr(meta, 'line', '?')
self.column = getattr(meta, 'column', '?')
self.pos_in_stream = getattr(meta, 'start_pos',
getattr(meta, 'pos_in_stream', None))
self.expecting = expecting
self.state = state
message = "\nError: %s\n" % message
message += "\nAt line %d col %d:\n" % (self.line, self.column)
message += '\n' + self.get_context(token.program, self.pos_in_stream)
message += "\nRefs:"
for r in refs:
line = getattr(r, 'line', '?')
column = getattr(r, 'column', '?')
pos = getattr(r, 'pos_in_stream', None)
message += "\nAt line %d col %d:\n" % (line, column)
message += '\n' + self.get_context(token.program, pos)
if expecting:
message += '\nExpecting: %s\n' % expecting
super(DSLErrorWithRefs, self).__init__(message)
class RangeError(DSLError):
pass
| 2,525 | 34.083333 | 77 | py |
gunyah-hypervisor-develop | gunyah-hypervisor-develop/tools/typed/type_gen.py | #!/usr/bin/env python3
# © 2021 Qualcomm Innovation Center, Inc. All rights reserved.
#
# 2019 Cog Systems Pty Ltd.
#
# SPDX-License-Identifier: BSD-3-Clause
from lark import Lark, ParseError
from exceptions import RangeError, DSLError
from ir import TransformTypes
from abi import AArch64ABI
import argparse
import sys
import os
import subprocess
import inspect
import logging
import pickle
logger = logging.getLogger(__name__)
abi_classes = {
'aarch64': AArch64ABI,
}
def parse_dsl(parser, inputs, abi):
trees = []
for p in inputs:
text = p.read()
try:
parse_tree = parser.parse(text)
cur_tree = TransformTypes(text).transform(parse_tree)
trees.append(cur_tree.get_intermediate_tree())
except ParseError as e:
raise Exception("Parse error in {:s}: {:s}".format(p.name,
str(e)))
final_tree = trees.pop(0)
for t in trees:
final_tree.merge(t)
final_tree.update(abi_classes[abi]())
return final_tree
def apply_template(tree, template, public_only=False):
if template is None:
code = tree.gen_output(public_only=public_only)
else:
code = tree.apply_template(template, public_only=public_only)
return code
def main():
logging.basicConfig(
level=logging.INFO,
format="%(message)s",
)
__loc__ = os.path.realpath(
os.path.dirname(os.path.join(os.getcwd(), os.path.dirname(__file__))))
arg_parser = argparse.ArgumentParser()
mode_args = arg_parser.add_mutually_exclusive_group(required=True)
mode_args.add_argument('-P', '--dump-pickle', type=argparse.FileType('wb'),
help="Dump the IR to a Python pickle")
mode_args.add_argument("-o", "--output",
help="Output file (default stdout)",
type=argparse.FileType('w', encoding='utf-8'),
default=sys.stdout)
arg_parser.add_argument('-t', '--template',
type=argparse.FileType('r', encoding='utf-8'),
help="Template file used to generate output")
arg_parser.add_argument('--public', action='store_true',
help="Include only public API types")
arg_parser.add_argument('--traceback', action="store_true",
help="Print a full traceback if an error occurs")
arg_parser.add_argument("-a", "--abi", help="specify the target machine "
"compiler ABI name", choices=abi_classes.keys(),
required=True)
arg_parser.add_argument("-f", "--formatter",
help="specify clang-format to format the code")
arg_parser.add_argument("-d", "--deps", default=None,
type=argparse.FileType('w', encoding='utf-8'),
help="write implicit dependencies to a Makefile")
arg_parser.add_argument("input", metavar='INPUT', nargs="*",
type=argparse.FileType('r', encoding='utf-8'),
help="Input type DSL files to process")
arg_parser.add_argument('-p', '--load-pickle',
type=argparse.FileType('rb'),
help="Load the IR from a Python pickle")
options = arg_parser.parse_args()
# Calling sanity checks
if options.input and options.load_pickle:
logger.error("Cannot specify both inputs and --load-pickle")
arg_parser.print_usage()
sys.exit(1)
grammar_file = os.path.join(__loc__, 'grammars', 'typed_dsl.lark')
parser = Lark.open(grammar_file, 'start', parser='lalr',
lexer='contextual', propagate_positions=True)
if options.input:
try:
ir = parse_dsl(parser, options.input, options.abi)
except (DSLError, RangeError) as e:
if options.traceback:
import traceback
traceback.print_exc(file=sys.stderr)
else:
logger.error("Parse error", e)
sys.exit(1)
if options.dump_pickle:
pickle.dump(ir, options.dump_pickle, protocol=4)
elif options.load_pickle:
ir = pickle.load(options.load_pickle)
else:
logger.error("Must specify inputs or --load-pickle")
arg_parser.print_usage()
sys.exit(1)
if not options.dump_pickle:
result = apply_template(ir, options.template,
public_only=options.public)
if options.formatter:
ret = subprocess.run([options.formatter],
input=result.encode("utf-8"),
stdout=subprocess.PIPE)
result = ret.stdout.decode("utf-8")
if ret.returncode != 0:
logger.error("Error formatting output", result)
sys.exit(1)
options.output.write(result)
options.output.close()
if options.deps is not None:
deps = set()
deps.add(os.path.relpath(grammar_file))
if options.template is not None:
deps.add(options.template.name)
for m in sys.modules.values():
try:
f = inspect.getsourcefile(m)
except TypeError:
continue
if f is None:
continue
f = os.path.relpath(f)
if f.startswith('../'):
continue
deps.add(f)
if options.template is None:
templates = ir.get_output_templates()
for t in templates:
deps.add(os.path.relpath(t))
if options.dump_pickle:
out_name = options.dump_pickle.name
else:
out_name = options.output.name
options.deps.write(out_name + ' : ')
options.deps.write(' '.join(sorted(deps)))
options.deps.write('\n')
options.deps.close()
if __name__ == '__main__':
main()
| 6,103 | 32.911111 | 79 | py |
gunyah-hypervisor-develop | gunyah-hypervisor-develop/tools/typed/ir.py | # © 2021 Qualcomm Innovation Center, Inc. All rights reserved.
#
# 2019 Cog Systems Pty Ltd.
#
# SPDX-License-Identifier: BSD-3-Clause
import inspect
import math
import itertools
import os
import abc
from Cheetah.Template import Template
from exceptions import DSLError
from collections import namedtuple
from functools import wraps
from lark import Transformer, Tree, Token, Discard
import tempfile
import Cheetah.ImportHooks
td = tempfile.TemporaryDirectory()
Cheetah.ImportHooks.setCacheDir(td.name)
"""
The classes in the module represent the features of the DSL language. They
form an intermediate representation and are used to generate the output. An
instance of TopLevel will contain all information necessary to generate the
output.
"""
__loc__ = os.path.realpath(
os.path.join(
os.getcwd(),
os.path.dirname(__file__)))
default_copyright = \
"© 2021 Qualcomm Innovation Center, Inc. All rights reserved.\n" \
"SPDX-License-Identifier: BSD-3-Clause"
def property_autoupdate(f):
@wraps(f)
def wrapper(self):
if not self._updated:
self._autoupdate()
return f(self)
return property(wrapper)
class TopLevel:
def __init__(self):
self.declarations = []
self.definitions = []
# record if need to link type to it's definition
self.type_refs = []
self.constant_refs = []
# record the need to call set_abi() on the type
self.abi_refs = set()
def gen_output(self, public_only=False):
footer = []
code = []
extra = []
sorted_defs = []
seen = set()
def visit(d):
if d in seen:
return
seen.add(d)
for dep in sorted(d.dependencies, key=lambda x: x.indicator):
visit(dep)
sorted_defs.append(d)
# Sort, ensuring that dependencies come before dependent definitions
for d in sorted(self.definitions, key=lambda x: x.indicator):
visit(d)
assert all(d in self.definitions for d in sorted_defs)
assert all(d in sorted_defs for d in self.definitions)
for d in sorted_defs:
if public_only and not d.is_public:
continue
f = d.gen_forward_decl()
code += f
if self.definitions:
code += '\n'
for d in itertools.chain(sorted_defs, self.declarations):
if public_only and not d.is_public:
continue
c, e = d.gen_code()
code += c
extra += e
# FIXME: move to a template file
output = "// Automatically generated. Do not modify.\n//\n"
output += '// ' + '\n// '.join(default_copyright.split('\n')) + '\n\n'
output += "\n"
output += "#include <stddef.h>\n"
output += "#include <stdint.h>\n"
output += "#include <stdbool.h>\n"
output += "#include <stdalign.h>\n"
output += "#include <stdatomic.h>\n"
# output += "#include <assert.h>\n"
# output += "#include <string.h>\n"
output += "#include <stdnoreturn.h>\n"
output += "\n"
output += ' '.join(code)
output += "\n"
if public_only:
output += "#include <guest_hypresult.h>\n"
else:
output += "#include <hypresult.h>\n"
output += "\n"
output += ' '.join(extra)
output += ' '.join(footer)
return output
def get_output_templates(self):
templates = set()
# Sort, ensuring that dependencies come before dependent definitions
for d in self.definitions:
deps = d.get_template_deps()
for t in deps:
templates.add(t)
return list(templates)
def apply_template(self, template_file, public_only=False):
ns = [{
'declarations': tuple(d for d in self.declarations
if d.is_public or not public_only),
'definitions': tuple(d for d in self.definitions
if d.is_public or not public_only),
'primitives': tuple(PrimitiveType(t) for t in
PrimitiveType.c_type_names),
'public_only': public_only,
}]
template = Template(file=template_file, searchList=ns)
return str(template)
def merge(self, t):
# TODO: need to handle all declaration type reference, especially
# objection
self.declarations += t.declarations
self.definitions += t.definitions
self.type_refs += t.type_refs
self.constant_refs += t.constant_refs
for a in t.abi_refs:
self.abi_refs.add(a)
def _handle_refs(self, abi):
# FIXME: check for duplicates
defs = {(d.type_name, d.category): d for d in self.definitions}
# Set the ABI for types whose definition depends on it
for t in self.abi_refs:
t.set_abi(abi)
# resolve type and constant references
for r in itertools.chain(self.type_refs, self.constant_refs):
k = (r.indicator, r.category)
if k in defs:
r.link(defs[k])
else:
raise DSLError("Failed to find corresponding definition for " +
r.indicator + ", category(" + r.category + ")",
r.indicator)
def update(self, abi):
"""
Second pass, handle internal information & setup connections between
nodes inside.
"""
# Add unreferenced primitives
refs = set()
for t in self.abi_refs:
if t.indicator not in refs:
refs.add(t.indicator)
for t in PrimitiveType.c_type_names:
if t not in refs:
x = PrimitiveType(t)
self.abi_refs.add(x)
# link customised declarations and definitions
self._handle_refs(abi)
# trigger the update of definitions and declarations
for d in itertools.chain(self.declarations, self.definitions):
d.update()
class ICustomized:
BITFIELD = "bitfield"
OBJECT = "object"
STRUCTURE = "structure"
UNION = "union"
ENUMERATION = "enumeration"
ALTERNATIVE = "alternative"
IMPORT = "import"
CONSTANT = "constant"
GLOBAL = "global"
class ICustomizedReference(ICustomized, metaclass=abc.ABCMeta):
@abc.abstractmethod
def link(self, definition):
"""
Link the definition of the base type to this customized object.
"""
raise NotImplementedError
class IConstantExpression(metaclass=abc.ABCMeta):
"""
Interface for integer constant expressions.
"""
def __init__(self):
super().__init__()
self._cache = None
def __int__(self):
if self._cache is None:
self._cache = self.to_int()
return self._cache
def __reduce__(self):
return (int, (int(self),))
def __format__(self, format_spec):
return int(self).__format__(format_spec)
@abc.abstractmethod
def to_int(self):
"""
Convert the expression to a constant value.
The result of this method is cached after it returns a value other
than None.
"""
raise NotImplementedError
class ConstantExpression(IConstantExpression):
"""
Top-level constant expression.
"""
def __init__(self, expr):
super().__init__()
self._cache = None
self.expr = expr
def to_int(self):
return int(self.expr)
class ConstantReference(IConstantExpression, ICustomizedReference):
"""
Constant reference.
"""
def __init__(self, symbol_name):
super().__init__()
self.referenced = False
self.symbol_name = symbol_name
@property
def indicator(self):
return self.symbol_name
def to_int(self):
if self.referenced:
raise DSLError("Definition of constant is self-referential",
self.indicator)
self.referenced = True
return int(self.expr)
@property
def category(self):
return self.CONSTANT
def link(self, definition):
self.expr = definition.value
class UnaryOperation(IConstantExpression):
"""
Apply a unary operator to a constant expression.
"""
operator_map = {
'+': lambda x: x,
'-': lambda x: -x,
'~': lambda x: ~x,
'!': lambda x: int(x == 0),
}
def __init__(self, operator, expr):
super().__init__()
try:
self.func = self.operator_map[operator]
except KeyError:
raise DSLError("Unhandled unary operator", self.operator)
self.expr = expr
def to_int(self):
return self.func(int(self.expr))
class BinaryOperation(IConstantExpression):
"""
Apply a binary operator to a constant expression.
"""
operator_map = {
'+': lambda x, y: x + y,
'-': lambda x, y: x - y,
'*': lambda x, y: x * y,
'/': lambda x, y: x // y,
'%': lambda x, y: x % y,
'<<': lambda x, y: x << y,
'>>': lambda x, y: x >> y,
'<': lambda x, y: int(x < y),
'>': lambda x, y: int(x > y),
'<=': lambda x, y: int(x <= y),
'>=': lambda x, y: int(x >= y),
'==': lambda x, y: int(x == y),
'!=': lambda x, y: int(x != y),
'&': lambda x, y: x & y,
'^': lambda x, y: x ^ y,
'|': lambda x, y: x | y,
'&&': lambda x, y: int(x and y),
'||': lambda x, y: int(x or y),
}
def __init__(self, operator, left_expr, right_expr):
super().__init__()
try:
self.func = self.operator_map[operator]
except KeyError:
raise DSLError("Unhandled binary operator", self.operator)
self.left_expr = left_expr
self.right_expr = right_expr
def to_int(self):
return self.func(int(self.left_expr), int(self.right_expr))
class ConditionalOperation(IConstantExpression):
"""
Apply a conditional (ternary) operator to a constant expression.
"""
def __init__(self, cond_expr, true_expr, false_expr):
super().__init__()
self.cond_expr = cond_expr
self.true_expr = true_expr
self.false_expr = false_expr
def to_int(self):
cond = int(self.cond_expr) != 0
return int(self.true_expr) if cond else int(self.false_expr)
class TypePropertyOperation(IConstantExpression):
"""
A constant expression evaluating to some integer property of a type.
"""
def __init__(self, compound_type):
super().__init__()
self.compound_type = compound_type
def to_int(self):
try:
return getattr(
self.compound_type.basic_type.definition,
self._type_property)
except AttributeError:
return getattr(self.compound_type, self._type_property)
class SizeofOperation(TypePropertyOperation):
_type_property = 'size'
class AlignofOperation(TypePropertyOperation):
_type_property = 'alignment'
class MinofOperation(TypePropertyOperation):
_type_property = 'minimum_value'
class MaxofOperation(TypePropertyOperation):
_type_property = 'maximum_value'
class PureFunctionCall(IConstantExpression):
"""
A constant expression that applies a pure function to another expression.
"""
def __init__(self, expr, f):
super().__init__()
self.expr = expr
self.f = f
def to_int(self):
return self.f(int(self.expr))
class IType(metaclass=abc.ABCMeta):
"""
Interface for all types.
"""
def __init__(self):
super().__init__()
self.qualifiers = set()
@abc.abstractproperty
def indicator(self):
"""
Return the AST node that names the type, for use in error messages.
"""
raise NotImplementedError
@abc.abstractproperty
def is_public(self):
"""
True if this type is exposed in the public API.
"""
raise NotImplementedError
def set_const(self):
"""
Add a const qualifier, if none already exists.
This is used on members of aggregate types, which can inherit the
qualifier from the aggregate.
"""
if self.is_writeonly:
raise DSLError(
"Can't have a constant type with a writeonly member",
self.name)
self.qualifiers.add(Qualifier("const"))
def gen_forward_decl(self):
"""
Generates a forward declaration (if needed) for the type.
"""
return ([])
def _gen_type(self, unqualified=False):
"""
Construct a C type name or declaration.
This returns a tuple of two lists. The first list contains tokens that
should appear to the left of the identifier if this type name is used
as a declaration; the second list contains tokens that should appear
to the right of the identifier.
For example, to declare a constant pointer to an array of 8 atomic
pointers to volatile integers:
volatile int *_Atomic (*const a)[8];
The returned value will be:
(['volatile', 'int', '*', '_Atomic', '(*', 'const'],
[')', '[', '8', ']'])
If unqualified is true, the top-level type's qualifiers will be
omitted, which is necessary when it is used as the return value of
a field accessor function.
"""
raise NotImplementedError
def gen_declaration(self, identifier):
"""
Construct a C declaration of an object of this type.
This returns a declaration (excluding any final ; or linebreak) for
an object of this type with the specified identifier.
"""
l, r = self._gen_type()
return ' '.join(itertools.chain(l, [identifier], r))
def gen_type_name(self, unqualified=False):
"""
Construct a C type name for this type.
This returns the type name (type-name in the C grammar), which is used
for casts, sizeof, alignof, the parenthesised form of _Atomic,
_Generic, and initialisers.
"""
l, r = self._gen_type(unqualified=unqualified)
return ' '.join(l) + ' '.join(r)
@property
def basic_type(self):
return self
@property
def has_suffix_declarator(self):
"""
True if a type declarator appears to the right of the identifier.
Type specifiers and pointer type declarators appear to the left of the
identifier. Array and function declarators appear to the right of the
identifier.
"""
return self.is_array
@property
def bitsize(self):
"""
The size of this type's value in bits.
Returns None if the true range of the type is not known, which is the
case for all scalar types other than booleans, enumerations, and
non-void pointers.
Implemented only for scalar types (including pointers).
"""
raise DSLError("Non-scalar type cannot be used in this context",
self.indicator)
@property
def minimum_value(self):
"""
The minimum scalar value of this type.
Implemented only for scalar types.
"""
bits = self.size * 8 if self.bitsize is None else self.bitsize
return -1 << (bits - 1) if self.is_signed else 0
@property
def maximum_value(self):
"""
The maximum scalar value of this type.
Implemented only for scalar types.
"""
bits = self.size * 8 if self.bitsize is None else self.bitsize
return (1 << (bits - (1 if self.is_signed else 0))) - 1
@abc.abstractproperty
def size(self):
"""
The size of this type in bytes.
"""
raise NotImplementedError
@property
def alignment(self):
"""
The alignment of this type in bytes, after qualifiers are applied.
"""
return max(
(q.align_bytes for q in self.qualifiers if q.is_aligned),
default=1 if self.is_packed else self.default_alignment)
@abc.abstractproperty
def default_alignment(self):
"""
The alignment of this type in bytes, if not overridden by a qualifier.
"""
raise NotImplementedError
@property
def is_const(self):
"""
Return True if current type is a const type.
"""
return any((q.is_const for q in self.qualifiers))
@property
def is_atomic(self):
"""
Return True if current type is an atomic type.
"""
return any((q.is_atomic for q in self.qualifiers))
@property
def is_writeonly(self):
"""
Return True if current type is write-only.
This is only applicable to bitfield members. It suppresses generation
of a read accessor for the member.
"""
return any((q.is_writeonly for q in self.qualifiers))
@property
def is_packed(self):
"""
Return True if the current type is packed.
This only makes sense for aggregate types and members in them.
"""
return any((q.is_packed for q in self.qualifiers))
@property
def is_contained(self):
"""
Return True if the container_of macro should be generated for the
current type.
"""
return any(q.is_contained for q in self.qualifiers)
@property
def is_ordered(self):
"""
Return True if the type should be generated exactly as ordered in its
definition.
"""
raise NotImplementedError
@property
def is_signed(self):
"""
For scalar types, returns whether the type is signed.
This is not implemented for non-scalar types.
"""
raise DSLError("Non-scalar type cannot be used in this context",
self.indicator)
@property
def accessor_basename(self):
"""
The name prefix for this type's accessors, if any.
For aggregate types which generate accessor functions, i.e.
structures, objects and bitfields, this is the type name. For all
other types, it is None.
"""
return None
@property
def is_array(self):
"""
True if this type is an array type.
"""
return False
@property
def is_pointer(self):
"""
True if this type is a pointer type.
"""
return False
@property
def pointer(self):
"""
A pointer type that points to this type.
"""
return PointerType(base_type=self)
@abc.abstractproperty
def dependencies(self):
"""
Return all definitions that this type relies on.
Note, pointers and other non definitions are not considered
dependencies as they can be forward declared.
"""
raise NotImplementedError
class ICustomizedType(ICustomizedReference, IType):
"""
Interface for customized type/symbol like structure, bitfield, object,
constant, etc.
"""
def __init__(self, type_suffix=''):
super().__init__()
self.definition = None
self.category = "invalid"
self.type_name = "invalid"
# indicate this customized type is used as an array or pointer, so the
# memory stores multiple element or other type of data instead of a
# single customized type
self.complex_type = False
self.type_suffix = type_suffix
@property
def indicator(self):
return self.type_name
# TODO: separate definitions from extensions list
# turn definitions into a dict, and assert there are no duplicates created
# c_types = {}
# if type_name in c_types:
# raise DSLError("duplicate definition of...")
#
# IType.c_types[type_name] = self
def link(self, definition):
"""
Link the definition with declaration
"""
self.definition = definition
@property
def size(self):
try:
return self.definition.size
except AttributeError:
raise DSLError(self.type_name + " is not defined", self.type_name)
@property
def bitsize(self):
try:
return self.definition.bitsize
except AttributeError:
raise DSLError(self.type_name + " is not defined", self.type_name)
@property
def default_alignment(self):
try:
return self.definition.alignment
except AttributeError:
raise DSLError(self.type_name + " is not defined", self.type_name)
@property
def is_signed(self):
try:
return self.definition.is_signed
except AttributeError:
raise DSLError(self.type_name + " is not defined", self.type_name)
@property
def is_pointer(self):
try:
return self.definition.is_pointer
except AttributeError:
raise DSLError(self.type_name + " is not defined", self.type_name)
@property
def is_array(self):
try:
return self.definition.is_array
except AttributeError:
raise DSLError(self.type_name + " is not defined", self.type_name)
@property
def is_public(self):
try:
return self.definition.is_public
except AttributeError:
raise DSLError(self.type_name + " is not defined", self.type_name)
def _gen_type(self, **_):
return ([self.type_name + self.type_suffix], [])
@property
def dependencies(self):
"""
Return all definitions that this type relies on.
Note, pointers and other non definitions are not considered
dependencies as they can be forward declared.
"""
return (self.definition,)
class IGenCode(metaclass=abc.ABCMeta):
"""
Interface for C code generation.
The interface will return (code, extra).
Code should contain all declarations and definitions. The extra contains
all flattened information such as getters/setters
"""
@abc.abstractmethod
def gen_code(self):
return ([], [])
def get_template_deps(self):
return []
class IAggregation:
"""
Interface for definition/extension who has declarations & definitions
"""
def __init__(self):
super().__init__()
self.declarations = []
self.definitions = []
self.type_refs = []
def set_declarations(self, declarations):
self.declarations = declarations
def set_definitions(self, definitions):
self.definitions = definitions
class IUpdate():
"""
Interface for all class who needs the second pass scan
"""
def update(self):
"""
Update internal data, prepare to generate code
"""
pass
class IExtension(ICustomizedReference, IAggregation):
"""
Interface for extension of bit field or object
"""
def __init__(self):
super().__init__()
self.prefix = None
class ICustomizedDefinition(ICustomized, IType, IAggregation, IUpdate):
"""
Interface for all customized definition
"""
def __init__(self):
super().__init__()
self.type_name = "invalid"
self.type_link_cnt = 0
self._public = False
@property
def is_public(self):
return self._public
def set_public(self):
self._public = True
@property
def indicator(self):
return self.type_name
@property
def is_container(self):
"""
True if offset macros and container_of functions should be generated.
"""
return False
def gen_type_name(self, unqualified=False):
"""
Construct a C type name for this type.
"""
return self.type_name + '_t'
class IDeclaration(IUpdate):
"""
Interface for all declarations, which add members to compound types.
"""
BITFIELD = "bitfield"
OBJECT = "object"
PRIMITIVE = "primitive"
SEPARATOR = "_"
def __init__(self):
super().__init__()
self.category = self.PRIMITIVE
self.member_name = None
# FIXME: compound_type is badly named; it is really the declared type.
self.compound_type = None
# complex type indicates that the actually data stored by this
# declaration is not just one single element of basic type, instead
# it has multiple elements or different type of data saved in the
# memory right now, there's only array and pointer make the type
# complicated
self.complex_type = False
self.is_customized_type = False
self.is_ignore = False
# keep the type need to find corresponding definition
self.type_ref = None
# indicate which definition owns this declaration
self.owner = None
def set_ignored(self):
assert (not self.is_ignore)
self.is_ignore = True
def set_const(self):
self.compound_type.set_const()
def get_members(self, prefix=None):
"""
Return the list of members added to the enclosing aggregate.
"""
return ((self._get_member_name(prefix), self.compound_type, self),)
def _get_member_name(self, prefix):
"""
Get the name of this member, given an optional prefix.
"""
prefix = '' if prefix is None else prefix + self.SEPARATOR
member_name = prefix + self.member_name
return member_name
class PrimitiveType(IType):
PRIMITIVE = "primitive"
c_type_names = {
'bool': 'bool',
'uint8': 'uint8_t',
'uint16': 'uint16_t',
'uint32': 'uint32_t',
'uint64': 'uint64_t',
'uintptr': 'uintptr_t',
'sint8': 'int8_t',
'sint16': 'int16_t',
'sint32': 'int32_t',
'sint64': 'int64_t',
'sintptr': 'intptr_t',
'char': 'char',
'size': 'size_t',
}
abi_type_names = {
'uregister': 'uregister_t',
'sregister': 'sregister_t',
}
def __init__(self, type_name):
super().__init__()
self.type_name = type_name
self.category = self.PRIMITIVE
assert type_name in itertools.chain(self.c_type_names,
self.abi_type_names)
if type_name in self.c_type_names:
self.c_type_name = self.c_type_names[type_name]
else:
self.c_type_name = self.abi_type_names[type_name]
@property
def indicator(self):
return self.type_name
@property
def is_public(self):
return True
def set_abi(self, abi):
if self.type_name in self.abi_type_names:
self.c_type_name = abi.get_c_type_name(self.c_type_name)
ctype = abi.get_c_type(self.c_type_name)
self._is_signed = ctype.is_signed
self._size = ctype.size
self._align = ctype.align
self._bitsize = ctype.bitsize
def _gen_type(self, **_):
return ([self.c_type_name], [])
@property
def size(self):
return self._size
@property
def is_signed(self):
return self._is_signed
@property
def default_alignment(self):
return self._align
@property
def bitsize(self):
return self._bitsize
def __repr__(self):
return "PrimitiveType<{:s}>".format(self.indicator)
@property
def dependencies(self):
"""
Return all definitions that this type relies on.
Note, pointers and other non definitions are not considered
dependencies as they can be forward declared.
"""
return ()
class BitFieldType(ICustomizedType):
def __init__(self, type_name):
super().__init__('_t')
self.type_name = type_name
self.category = self.BITFIELD
@property
def accessor_basename(self):
"""
The name prefix for this type's accessors, if any.
For aggregate types which generate accessor functions, i.e.
structures, objects and bitfields, this is the type name. For all
other types, it is None.
"""
return self.type_name
class ObjectType(ICustomizedType):
def __init__(self, type_name):
super().__init__('_t')
self.type_name = type_name
self.category = self.OBJECT
def link(self, definition):
super().link(definition)
if not self.complex_type and definition.type_link_cnt == 0:
definition.need_export = False
elif self.complex_type:
definition.need_export = True
definition.type_link_cnt += 1
@property
def accessor_basename(self):
"""
The name prefix for this type's accessors, if any.
For aggregate types which generate accessor functions, i.e.
structures, objects and bitfields, this is the type name. For all
other types, it is None.
"""
return self.type_name
class StructureType(ICustomizedType):
def __init__(self, type_name):
super().__init__('_t')
self.type_name = type_name
self.category = self.STRUCTURE
@property
def accessor_basename(self):
"""
The name prefix for this type's accessors, if any.
For aggregate types which generate accessor functions, i.e.
structures, objects and bitfields, this is the type name. For all
other types, it is None.
"""
return self.type_name
class UnionType(ICustomizedType):
def __init__(self, type_name):
super().__init__('_t')
self.type_name = type_name
self.category = self.UNION
@property
def accessor_basename(self):
"""
The name prefix for this type's accessors, if any.
For aggregate types which generate accessor functions, i.e.
structures, objects and bitfields, this is the type name. For all
other types, it is None.
"""
return self.type_name
class EnumerationType(ICustomizedType):
def __init__(self, type_name):
super().__init__('_t')
self.type_name = type_name
self.category = self.ENUMERATION
@property
def accessor_basename(self):
"""
The name prefix for this type's accessors, if any.
For aggregate types which generate accessor functions, i.e.
structures, objects and bitfields, this is the type name. For all
other types, it is None.
"""
return self.type_name
class AlternativeType(ICustomizedType):
def __init__(self, type_name):
super().__init__()
self.type_name = type_name
self.category = self.ALTERNATIVE
FieldMap = namedtuple('FieldMap', ['field_bit', 'mapped_bit', 'length'])
class BitFieldMemberMapping:
"""
Data structure that encodes member's bit mapping to the bitfield
"""
def __init__(self, shift):
self.field_shift = shift
self.field_signed = False
self.field_maps = []
def set_signed(self, signed=True):
self.field_signed = signed
def add_bit_range(self, field_bit, mapped_bit, length):
assert int(field_bit) >= 0
fmap = FieldMap(field_bit, mapped_bit, length)
self.field_maps.append(fmap)
def update(self, unit_size):
"""compact field_maps"""
i = 0
try:
while True:
a = self.field_maps[i]
b = self.field_maps[i + 1]
assert a.field_bit + a.length == b.field_bit
if (a.mapped_bit // unit_size == b.mapped_bit // unit_size) \
and (a.mapped_bit + a.length == b.mapped_bit):
assert a.field_bit >= 0
c = FieldMap(a.field_bit, a.mapped_bit,
a.length + b.length)
self.field_maps[i] = c
del self.field_maps[i + 1]
else:
i += 1
except IndexError:
pass
def __repr__(self):
ret = "BitFieldMemberMapping<"
sep = ''
for x in self.field_maps:
ret += "{:s}({:d},{:d},{:d})".format(sep, x.field_bit,
x.mapped_bit, x.length)
sep = ','
ret += ">"
return ret
class BitFieldSpecifier:
NONE = 0
RANGE = 1
OTHERS = 2
AUTO = 3
def __init__(self):
self.specifier_type = self.NONE
self.bit_length = None
self.shift = 0
self.auto_width = None
self._bit_ranges = []
self.mapping = None
def add_bit_range(self, bit_range):
assert (self.specifier_type in (self.NONE, self.RANGE))
self.specifier_type = self.RANGE
self._bit_ranges.append(bit_range)
def set_type_shift(self, shift):
assert (self.specifier_type in (self.RANGE, self.AUTO))
self.shift = shift
def set_type_auto(self, width=None):
assert (self.specifier_type is self.NONE)
self.specifier_type = self.AUTO
self.auto_width = width
def set_type_others(self):
assert (self.specifier_type is self.NONE)
self.specifier_type = self.OTHERS
@property
def bit_ranges(self):
return tuple(r.get_bits() for r in self._bit_ranges)
@property
def range_width(self):
assert self.specifier_type is self.RANGE
bits = 0
for bit, width in self.bit_ranges:
bits += width
return bits
def update_ranges(self, declaration, physical_ranges, unit_size):
shift = int(self.shift)
self.mapping = BitFieldMemberMapping(shift)
# FIXME - reserved members defaults need to be considered
# - extended registers need to have reserved ranges / defaults
# recalculated
# if declaration.is_ignore:
# print(" - skip", declaration.member_name)
# return
# Split up any ranges that cross unit boundaries
split_ranges = []
for bit, width in self.bit_ranges:
while (bit // unit_size) != (bit + width - 1) // unit_size:
split_point = ((bit // unit_size) + 1) * unit_size
split_width = split_point - bit
split_ranges.append((bit, split_width))
bit = split_point
width -= split_width
split_ranges.append((bit, width))
if self.specifier_type is self.RANGE:
field_bit = shift
for bit, width in reversed(split_ranges):
if not physical_ranges.insert_range((bit, width), declaration):
raise DSLError("bitfield member conflicts with previously "
"specified bits, freelist:\n" +
str(physical_ranges),
declaration.member_name)
self.mapping.add_bit_range(field_bit, bit, width)
field_bit += width
self.mapping.update(unit_size)
self.bit_length = field_bit
def set_signed(self, signed):
self.mapping.set_signed(signed)
class DirectType(IType):
def __init__(self):
super().__init__()
self._basic_type = None
def _gen_type(self, unqualified=False):
l, r = self._basic_type._gen_type()
ql = []
if not unqualified:
for q in self.qualifiers:
if q.is_restrict:
raise DSLError("Restrict qualifier is only for pointer",
self._basic_type.indicator)
ql.extend(q.gen_qualifier())
return (ql + l, r)
def set_basic_type(self, type):
assert self._basic_type is None
self._basic_type = type
@property
def category(self):
return self._basic_type.category
@property
def type_name(self):
return self._basic_type.type_name
@property
def definition(self):
return self._basic_type.definition
@property
def indicator(self):
return self._basic_type.indicator
@property
def basic_type(self):
return self._basic_type
@property
def size(self):
return self._basic_type.size
@property
def bitsize(self):
return self._basic_type.bitsize
@property
def default_alignment(self):
return self._basic_type.alignment
@property
def is_signed(self):
return self._basic_type.is_signed
@property
def is_public(self):
return self._basic_type.is_public
@property
def dependencies(self):
"""
Return all definitions that this type relies on.
Note, pointers and other non definitions are not considered
dependencies as they can be forward declared.
"""
return self._basic_type.dependencies
class ArrayType(IType):
def __init__(self, indicator):
super().__init__()
self.length = None
self.base_type = None
self._indicator = indicator
@property
def indicator(self):
return self._indicator
def _gen_type(self, **_):
l, r = self.base_type._gen_type()
return (l, ["[{:d}]".format(self.length)] + r)
@property
def size(self):
return int(self.length) * self.base_type.size
@property
def default_alignment(self):
return self.base_type.alignment
@property
def is_array(self):
"""
True if this type is an array type.
"""
return True
@property
def is_public(self):
return self.base_type.is_public
@property
def dependencies(self):
"""
Return all other types that this type definition relies on.
Note, pointers to types are not considered dependencies as they can be
forward declared.
"""
return self.base_type.dependencies
class PointerType(IType):
def __init__(self, indicator=None, base_type=None):
super().__init__()
self.base_type = base_type
self._indicator = indicator
@property
def indicator(self):
return self._indicator
def set_abi(self, abi):
self._size = abi.pointer_size
self._align = abi.pointer_align
def _gen_type(self, unqualified=False):
l, r = self.base_type._gen_type()
if unqualified:
ql = []
else:
ql = list(itertools.chain(*(
q.gen_qualifier() for q in self.qualifiers)))
if self.base_type.has_suffix_declarator:
# Needs parentheses to bind the * on the left before the base
# type's declarator on the right.
return (l + ["(*"] + ql, [")"] + r)
else:
return (l + ["*"] + ql, r)
@property
def size(self):
return self._size
@property
def bitsize(self):
return (self.size * 8) - (self.base_type.alignment - 1).bit_length()
@property
def is_signed(self):
return False
@property
def default_alignment(self):
return self._align
@property
def is_pointer(self):
return True
@property
def is_public(self):
return self.base_type.is_public
@property
def dependencies(self):
"""
Return all definitions that this type relies on.
Note, pointers and other non definitions are not considered
dependencies as they can be forward declared.
"""
if self.base_type.is_atomic:
# Clang requires atomic-qualified types to be complete, even when
# taking a pointer to one. This is not clearly required by the
# standard and seems to be a Clang implementation quirk.
return self.base_type.dependencies
return ()
class PrimitiveDeclaration(IDeclaration):
"""
Declaration of a single member in a structure or object.
"""
def __init__(self):
super().__init__()
self.category = self.PRIMITIVE
self.offset = None
class ObjectDeclaration(PrimitiveDeclaration):
"""
Declaration of an object-typed member, which will be flattened.
"""
def __init__(self):
super().__init__()
self.category = self.OBJECT
self.noprefix = False
def get_members(self, prefix=None):
"""
Return the list of members added to the enclosing aggregate.
"""
if self.offset is not None:
raise DSLError("Object-typed member cannot have a fixed offset",
self.offset)
if self.complex_type:
return super().get_members(prefix=prefix)
prefix = None if self.noprefix else self._get_member_name(prefix)
members = tuple(self.type_ref.definition._members(prefix))
for n, t, d in members:
if d.offset is not None:
raise DSLError(
"Flattened object type cannot contain fixed offsets",
d.member_name)
return members
templates = {}
class BitFieldDeclaration(IGenCode, IDeclaration):
"""
Declaration of a field in a BitFieldDefinition.
"""
ACCESSOR_TEMPLATE = "templates/bitfield-generic-accessor.tmpl"
def __init__(self):
super().__init__()
self.category = self.BITFIELD
self.prefix = None
self.bitfield_specifier = None
self.bf_type_name = None
self.unit_type = None
self.unit_size = -1
self.ranges = None
self.default = None
self._template = None
def gen_code(self):
# validate parameters
# FIXME:
# if self.bitfield_specifier.sign_map is not None and \
# not self.is_signed:
# raise DSLError("specified sign map for unsigned type",
# self.member_name)
body = []
footer = []
# generate code to extra
# FIXME: should be a list of templates (header, c, etc.) ?
assert self._template is not None
if 'bitfield' in templates:
template = templates['bitfield']
else:
template = Template.compile(file=open(self._template, 'r',
encoding='utf-8'))
templates['bitfield'] = template
t = template(namespaces=(self))
footer.append(str(t))
return (body, footer)
def get_members(self, prefix=None):
"""
Return the list of members added to the enclosing aggregate.
This doesn't make sense for bitfield declarations, which can never
flatten anything.
"""
raise NotImplementedError
def get_template_deps(self):
return [os.path.join(__loc__, self.ACCESSOR_TEMPLATE)]
def update(self):
super().update()
if not self.is_ignore and self.compound_type is None:
return
if self.compound_type.is_array:
raise DSLError("cannot declare an array in a bitfield",
self.member_name)
b = self.bitfield_specifier
# Allocate auto bits
if b.specifier_type is BitFieldSpecifier.AUTO:
width = b.auto_width
if width is None:
if self.compound_type.bitsize is not None:
width = self.compound_type.bitsize
else:
width = self.compound_type.size * 8
else:
width = int(width)
r = self.ranges.alloc_range(width, self)
if r is None:
raise DSLError(
"unable to allocate {:d} bits from {:s}".format(
width, repr(self.ranges)),
self.member_name)
assert (r[1] == width)
b.bit_length = width
range_shift = b.shift
unit_size = self.unit_size
bit = r[0]
while ((width > 0) and
((bit // unit_size) != (bit + width - 1) // unit_size)):
split_point = ((bit // unit_size) + 1) * unit_size
split_width = split_point - bit
b.mapping.add_bit_range(range_shift, bit, split_width)
bit = split_point
width -= split_width
range_shift += split_width
b.mapping.add_bit_range(range_shift, bit, width)
assert (b.bit_length is not None)
b.set_signed(self.compound_type.is_signed)
const = self.compound_type.is_const
writeonly = self.compound_type.is_writeonly
if const and writeonly:
raise DSLError("const and writeonly is invalid", self.member_name)
# pdb.set_trace()
member_typesize = self.compound_type.size * 8
if member_typesize < b.bit_length:
raise DSLError(
"too many bits {:d}, exceed type size {:d}".format(
b.bit_length, member_typesize),
self.member_name)
member_bitsize = self.compound_type.bitsize
if member_bitsize is not None and member_bitsize > b.bit_length:
raise DSLError(
"not enough bits {:d}, need at least {:d}".format(
b.bit_length, member_bitsize),
self.member_name)
self._template = os.path.join(__loc__, self.ACCESSOR_TEMPLATE)
@property
def indicator(self):
return self.compound_type.indicator
@property
def field_shift(self):
return self.bitfield_specifier.mapping.field_shift
@property
def field_signed(self):
return self.bitfield_specifier.mapping.field_signed
@property
def field_maps(self):
return self.bitfield_specifier.mapping.field_maps
@property
def field_length(self):
return self.bitfield_specifier.bit_length
@property
def is_const(self):
return self.compound_type.is_const
@property
def is_writeonly(self):
return self.compound_type.is_writeonly
@property
def is_nested_bitfield(self):
return (not self.compound_type.is_pointer and
self.compound_type.category == 'bitfield')
@property
def field_name(self):
return self._get_member_name(self.prefix)
def update_ranges(self, ranges, unit_size):
if self.compound_type is not None and self.compound_type.is_pointer:
# Pointers are automatically shifted left as far as possible,
# because their bitsize reflects the low bits being fixed to zero
# by the alignment of the target type. They should never have an
# explicitly specified shift.
assert self.bitfield_specifier.shift == 0
b = self.bitfield_specifier
ptr_size = self.compound_type.size * 8
ptr_bits = self.compound_type.bitsize
if b.specifier_type is BitFieldSpecifier.RANGE:
range_width = b.range_width
ptr_bits = range_width
if ptr_bits < self.compound_type.bitsize:
raise DSLError(
"too few bits {:d}, for pointer bitsize {:d}".format(
ptr_bits, self.compound_type.bitsize),
self.member_name)
if ptr_bits > ptr_size:
raise DSLError(
"too many bits {:d}, for pointer type size {:d}".format(
ptr_bits, ptr_size),
self.member_name)
self.bitfield_specifier.set_type_shift(ptr_size - ptr_bits)
self.bitfield_specifier.update_ranges(self, ranges, unit_size)
class StructurePadding:
def __init__(self, length):
super().__init__()
self._length = length
def gen_declaration(self, identifier):
return 'uint8_t {:s}[{:d}]'.format(identifier, self._length)
class StructureDefinition(IGenCode, ICustomizedDefinition):
def __init__(self, type_name):
super().__init__()
self.type_name = type_name
self.category = self.STRUCTURE
self.declarations = []
self.extensions = []
self._abi = None
self._size = None
self._alignment = None
self._layout = None
self._ordered = True
def set_abi(self, abi):
"""
Set the ABI object that provides structure layout rules.
"""
self._abi = abi
def set_ordered(self, ordered):
"""
Set the structure layout rules.
"""
self._ordered = ordered
def _update_layout(self):
"""
Determine the layout of the structure.
"""
for q in self.qualifiers:
if q.is_optimized:
self.set_ordered(False)
if self.is_ordered:
member_list = iter(self._members())
else:
# Sort members by group, module and alignment
member_list = list(self._members())
def member_key(member):
member_type = member[1]
member_decl = member[2]
default_group = chr(0xff)
if hasattr(member_decl, "module_name"):
if member_decl.module_name:
default_group = '~' + member_decl.module_name
key = (default_group, -member_type.alignment)
for q in member_type.qualifiers:
if q.is_group:
key = tuple(['/'.join(q.group),
-member_type.alignment])
break
return key
# list of lists, sort by group and alignment
member_list = sorted(self._members(), key=member_key)
packed = self.is_packed
layout = []
abi = self._abi
# The layout is (member name, member type, member offset).
offset = 0
max_alignment = 1
members = set()
for member_name, member_type, member_decl in member_list:
if member_name in members:
raise DSLError("structure {}: duplicated members".format(
self.type_name), member_decl.member_name)
members.add(member_name)
if member_decl.offset is not None:
member_pos = int(member_decl.offset)
if member_pos < offset:
raise DSLError("structure {}: "
"Fixed offset of member (@{:d}) is "
"before the end of the previous member "
"(@{:d})".format(self.type_name,
member_pos, offset),
member_decl.member_name)
elif member_pos > offset:
layout.append(('pad_to_{:s}_'.format(member_name),
StructurePadding(member_pos - offset),
member_pos))
offset = member_pos
else:
member_pos = None
if not packed:
pos = abi.layout_struct_member(offset, max_alignment,
member_type.size,
member_type.alignment)
if pos > offset:
if member_pos is not None:
raise DSLError("structure {}: "
"Padding needed after fixed offset "
"({:d} bytes)".format(self.type_name,
pos - offset),
member_decl.member_name)
layout.append(('pad_to_{:s}_'.format(member_name),
StructurePadding(pos - offset), pos))
offset = pos
layout.append((member_name, member_type, offset))
offset += member_type.size
max_alignment = max(max_alignment, member_type.alignment)
if offset != 0:
# update max_alignment for end padding
for q in self.qualifiers:
if q.is_aligned:
max_alignment = max(max_alignment, q.align_bytes)
if not packed:
# Pad the structure at the end
end = abi.layout_struct_member(
offset, max_alignment, None, None)
if end > offset:
layout.append(('pad_end_', StructurePadding(end - offset),
offset))
offset = end
self._alignment = max_alignment
self._size = offset
self._layout = tuple(layout)
def gen_forward_decl(self):
"""
Generates a forward declaration (if needed) for the type.
"""
code = []
code.append("typedef")
for q in self.qualifiers:
if q.is_aligned or q.is_packed:
pass
elif q.is_atomic or q.is_const:
code.extend(q.gen_qualifier())
code.append("struct " + self.type_name + '_s' + ' ' +
self.type_name + '_t'";\n")
return (code)
def gen_code(self):
if self._layout is None:
self._update_layout()
code = []
extra = []
if self._size != 0:
code.append("struct ")
for q in self.qualifiers:
if q.is_aligned or q.is_atomic or q.is_const or q.is_optimized:
pass
elif q.is_packed or q.is_lockable:
code.extend(q.gen_qualifier())
else:
raise DSLError("Invalid qualifier for structure", q.name)
code.append(" " + self.type_name + '_s' " {\n")
for q in self.qualifiers:
if q.is_aligned:
code.extend(q.gen_qualifier())
for member_name, member_type, member_offset in self._layout:
code.append(member_type.gen_declaration(member_name) + ';\n')
code.append("} ")
code.append(';\n\n')
return (code, extra)
@property
def size(self):
if self._size is None:
self._update_layout()
return self._size
@property
def layout_with_padding(self):
if self._layout is None:
self._update_layout()
return self._layout
@property
def layout(self):
return ((n, t, p)
for n, t, p in self.layout_with_padding
if not isinstance(t, StructurePadding))
@property
def is_container(self):
return True
@property
def is_ordered(self):
return self._ordered
@property
def default_alignment(self):
if self._alignment is None:
self._update_layout()
return self._alignment
def _members(self, prefix=None):
for d in self.declarations:
yield from d.get_members(prefix=prefix)
for e in self.extensions:
yield from e._members(prefix=prefix)
def update(self):
"""
Update internal data, prepare to generate code
"""
used_names = set()
for member_name, member_type, _ in self._members():
if member_name in used_names:
raise DSLError("'structure {:s}': each member needs to have a"
" unique name".format(self.type_name),
member_type.type_name)
used_names.add(member_name)
@property
def dependencies(self):
"""
Return all definitions that this type relies on.
Note, pointers and other non definitions are not considered
dependencies as they can be forward declared.
"""
return itertools.chain(*(t.dependencies
for _, t, _ in self._members()))
class ObjectDefinition(StructureDefinition):
def __init__(self, type_name):
super().__init__(type_name)
self.category = self.OBJECT
# Object definitions are only generated for objects that are never
# embedded in any other object.
# FIXME: this should be explicit in the language
self.need_export = True
self.set_ordered(False)
def gen_code(self):
if self.need_export:
return super().gen_code()
else:
return ([], [])
def gen_forward_decl(self):
if self.need_export:
return super().gen_forward_decl()
else:
return ([])
@property
def is_container(self):
return self.need_export
def gen_type_name(self, unqualified=False):
"""
Construct a C type name for this type.
"""
if self.need_export:
return super().gen_type_name()
else:
return None
class UnionDefinition(IGenCode, ICustomizedDefinition):
def __init__(self, type_name):
super().__init__()
self.type_name = type_name
self.category = self.UNION
self.declarations = []
self.extensions = []
self._size = None
self._alignment = None
def gen_forward_decl(self):
"""
Generates a forward declaration (if needed) for the type.
"""
code = []
code.append("typedef")
for q in self.qualifiers:
if q.is_aligned or q.is_lockable:
pass
elif q.is_atomic or q.is_const:
code.extend(q.gen_qualifier())
else:
raise DSLError("Invalid qualifier for union", q.name)
code.append("union " + self.type_name + '_u' +
' ' + self.type_name + '_t;\n')
return code
def gen_code(self):
code = []
code.append('union ')
for q in self.qualifiers:
if q.is_aligned or q.is_atomic or q.is_const:
pass
elif q.is_lockable:
code.extend(q.gen_qualifier())
else:
raise DSLError("Invalid qualifier for union", q.name)
code.append(" " + self.type_name + '_u' + " {\n")
align_qualifiers = tuple(q for q in self.qualifiers if q.is_aligned)
for member_name, member_type, member_decl in self._members():
if member_decl.offset is not None and int(member_decl.offset) != 0:
raise DSLError("Union member must have zero offset",
member_decl.member_name)
for q in align_qualifiers:
code.extend(q.gen_qualifier())
code.append(member_type.gen_declaration(member_name) + ';\n')
code.append("} ")
code.append(';\n\n')
return (code, [])
@property
def size(self):
if self._size is None:
self._size = max(t.size for _, t, _ in self._members())
return self._size
@property
def default_alignment(self):
if self._alignment is None:
q_align = max(
(q.align_bytes for q in self.qualifiers if q.is_aligned),
default=1)
m_align = max(t.alignment for _, t, _ in self._members())
self._alignment = max(q_align, m_align)
return self._alignment
def _members(self, prefix=None):
for d in self.declarations:
members = d.get_members(prefix=prefix)
if len(members) > 1:
raise DSLError("Unions must not contain flattened objects",
d.member_name)
yield from members
for e in self.extensions:
yield from e._members(prefix=prefix)
def update(self):
"""
Update internal data, prepare to generate code
"""
used_names = set()
for member_name, member_type, _ in self._members():
if member_name in used_names:
raise DSLError("'union {:s}': each member needs to have a"
" unique name".format(self.type_name),
member_type.type_name)
used_names.add(member_name)
@property
def dependencies(self):
"""
Return all definitions that this type relies on.
Note, pointers and other non definitions are not considered
dependencies as they can be forward declared.
"""
return itertools.chain(*(t.dependencies
for _, t, _ in self._members()))
class EnumerationConstant(object):
__slots__ = ['name', 'value', 'prefix']
def __init__(self, name, value=None):
self.name = name
self.value = value
self.prefix = None
enum_const = namedtuple('enum_const', ['name', 'value', 'prefix'])
class EnumerationDefinition(IGenCode, ICustomizedDefinition):
def __init__(self, type_name):
super().__init__()
self.type_name = type_name
self.category = self.ENUMERATION
self._enumerators = []
self.prefix = None
self.capitalized = True
self._explicit = False
self._signed = None
self._size = None
self._bitsize = None
self._alignment = None
self._min_value = None
self._max_value = None
self._updated = False
def __getstate__(self):
"""
Temporary workaround to ensure types are updated before pickling. Auto
update should be removed entirely when issue is resolved.
"""
if not self._updated:
self._autoupdate()
return self.__dict__
def set_abi(self, abi):
"""
Set the ABI object that provides structure layout rules.
"""
self._abi = abi
def set_explicit(self):
"""
Set the enumeration as being explicit, no value allocation.
"""
self._explicit = True
def add_enumerator(self, e):
"""
Add an enumerator to the type.
"""
# If this is the first enumerator, its default value is zero
if e.value is None and len(self._enumerators) == 0:
if self._explicit:
raise DSLError("auto allocated enumerator in explicit "
"enumeration", e.name)
e.value = 0
self._enumerators.append(e)
def _autoupdate(self):
"""
Update internal data, prepare to generate code
"""
def _check_enumerator(e):
if e.name in used_names:
raise DSLError("'enumeration {:s}': each enumerator needs to"
" have a unique name".format(self.type_name),
e.name)
if e.value in used_values:
raise DSLError("'enumeration {:s}': each enumerator needs to"
" have a unique value".format(self.type_name),
e.name)
used_names.add(e.name)
used_values.add(e.value)
# Ensure constant values are resolved and not duplicates
used_names = set()
used_values = set()
for e in self._enumerators:
if e.value is not None:
e.value = int(e.value)
_check_enumerator(e)
# Auto-allocation of remaining values
last_val = None
for e in self._enumerators:
if e.value is None and self._explicit:
raise DSLError("auto allocated enumerator in explicit "
"enumeration", e.name)
if e.value is None:
assert last_val is not None
e.value = last_val + 1
_check_enumerator(e)
last_val = e.value
if not self._enumerators:
raise DSLError('Empty enumeration', self.type_name)
e_min = min(used_values)
e_max = max(used_values)
self._size, self._alignment, self._signed = \
self._abi.get_enum_properties(e_min, e_max)
self._bitsize = max(e_min.bit_length(), e_max.bit_length())
self._min_value = e_min
self._max_value = e_max
if self.prefix is None:
self.prefix = self.type_name + '_'
if self.capitalized:
self.prefix = self.prefix.upper()
# Todo: support suppressing prefix for some enumerators
for e in self._enumerators:
if e.prefix is None:
e.prefix = self.prefix
# Finalize
enumerators = [enum_const(e.name, e.value, e.prefix) for e in
self._enumerators]
self._enumerators = enumerators
self._updated = True
@property_autoupdate
def enumerators(self):
return self._enumerators
@property_autoupdate
def size(self):
return self._size
@property_autoupdate
def bitsize(self):
return self._bitsize
@property_autoupdate
def minimum_value(self):
return self._min_value
@property_autoupdate
def maximum_value(self):
return self._max_value
@property_autoupdate
def default_alignment(self):
return self._alignment
@property_autoupdate
def is_signed(self):
return self._signed
def get_enum_name(self, e):
if self.capitalized:
return e.name.upper()
else:
return e.name
def gen_code(self):
if not self._updated:
self._autoupdate()
code = []
extra = []
# generate code now
code = ['typedef', 'enum', self.type_name + '_e', '{\n']
sorted_enumerators = sorted(self._enumerators, key=lambda x: x.value)
code.append(',\n'.join(' ' + e.prefix + self.get_enum_name(e) +
' = ' + str(e.value)
for e in sorted_enumerators))
code.append('\n}')
code.append(self.type_name + '_t')
code.append(';\n\n')
for e in self._enumerators:
if e.value == self.minimum_value:
e_min = e.prefix + self.get_enum_name(e)
if e.value == self.maximum_value:
e_max = e.prefix + self.get_enum_name(e)
code.append('#define {:s}__MAX {:s}\n'.format(
self.type_name.upper(), e_max))
code.append('#define {:s}__MIN {:s}\n'.format(
self.type_name.upper(), e_min))
code.append('\n')
return (code, extra)
@property
def dependencies(self):
"""
Return all definitions that this type relies on.
Note, pointers and other non definitions are not considered
dependencies as they can be forward declared.
"""
return ()
class IDeclarationDefinition(IGenCode, ICustomizedDefinition, IDeclaration):
def __init__(self, type_name):
super().__init__()
self.type_name = type_name
@property
def size(self):
return self.compound_type.size
@property
def bitsize(self):
return self.compound_type.bitsize
@property
def default_alignment(self):
return self.compound_type.alignment
@property
def is_signed(self):
return self.compound_type.is_signed
@property
def dependencies(self):
"""
Return all definitions that this type relies on.
Note, pointers and other non definitions are not considered
dependencies as they can be forward declared.
"""
if self.compound_type is None:
return ()
return self.compound_type.dependencies
class AlternativeDefinition(IDeclarationDefinition):
def __init__(self, type_name):
super().__init__(type_name)
self.category = self.ALTERNATIVE
def gen_code(self):
code = []
extra = []
# generate code now
decl = self.compound_type.gen_declaration(self.type_name)
code = ["typedef ", decl, ';\n']
return (code, extra)
def gen_type_name(self, unqualified=False):
"""
Construct a C type name for this type.
"""
return self.type_name
class ConstantDefinition(IDeclarationDefinition):
def __init__(self, type_name, value):
super().__init__(type_name)
self.value = value
self.category = self.CONSTANT
def gen_code(self):
code = []
extra = []
# generate code now
val = int(self.value)
if self.compound_type is not None:
t = self.compound_type.gen_type_name(unqualified=True)
cast = '({:s})'.format(t)
suffix = '' if self.compound_type.is_signed else 'U'
if val < 0 and not self.compound_type.is_signed:
val &= ((1 << (self.compound_type.size * 8)) - 1)
val = '{:d}{:s} // {:#x}'.format(val, suffix, val)
else:
val = '{:d}'.format(val)
cast = ''
code.append("#define {:s} {:s}{:s}\n".format(
self.type_name, cast, val))
return (code, extra)
class GlobalDefinition(IDeclarationDefinition):
def __init__(self, type_name):
super().__init__(type_name)
self.category = self.GLOBAL
def gen_code(self):
code = []
extra = []
# generate code now
t = self.compound_type.gen_type_name(unqualified=True)
code.append("extern {:s} {:s};\n\n".format(t, self.type_name))
return (code, extra)
class BitFieldDefinition(IGenCode, ICustomizedDefinition):
TYPE_TEMPLATE = "templates/bitfield-type.tmpl"
def __init__(self, type_name):
super().__init__()
self.type_name = type_name
self.length = -1
self.unit_type = "uint64_t"
self.unit_size = -1
self.declarations = []
self.extensions = []
self.category = self.BITFIELD
self._ranges = None
self.const = False
self._signed = False
self._has_set_ops = None
self._template = None
def update_unit_info(self):
if self.length <= 8:
self.unit_size = 8
self.unit_type = "uint8_t"
elif self.length <= 16:
self.unit_size = 16
self.unit_type = "uint16_t"
elif self.length <= 32:
self.unit_size = 32
self.unit_type = "uint32_t"
elif self.length <= 64:
self.unit_size = 64
self.unit_type = "uint64_t"
else:
self.unit_size = 64
self.unit_type = "uint64_t"
@property
def size(self):
return math.ceil(self.length / self.unit_size) * self.unit_size // 8
@property
def ranges(self):
if self._ranges is None:
self._update_layout()
return self._ranges
@property
def bitsize(self):
return max(r[1] + 1 for r in self.ranges.alloc_list)
@property
def default_alignment(self):
return self.unit_size // 8
@property
def is_signed(self):
return self._signed
@property
def _all_declarations(self):
for d in self.declarations:
yield d
for e in self.extensions:
for d in e.declarations:
yield d
@property
def fields(self):
items = []
for d in self._all_declarations:
if d.compound_type is not None:
items.append(d)
items = sorted(items, key=lambda x: x.field_maps[0].mapped_bit)
return tuple(items)
@property
def all_fields_boolean(self):
return all(m.compound_type.bitsize == 1 for m in self.fields)
@property
def has_set_ops(self):
if self._has_set_ops is None:
return self.all_fields_boolean
return self._has_set_ops
@has_set_ops.setter
def has_set_ops(self, value):
self._has_set_ops = bool(value)
def _gen_definition_code(self):
"""
Return type definition code if this type needs it.
"""
if self.ranges.range_auto:
raise Exception("unhandled auto ranges")
self.length = self.ranges.free_list[0][1] - 1
self.update_unit_info()
# generate type definition by template
ns = {
"type_name": self.type_name,
"unit_type": self.unit_type,
"unit_size": self.unit_size,
"unit_cnt": self.unit_count,
"declarations": self._all_declarations,
"init_values": self.init_values,
"compare_masks": self.compare_masks,
"boolean_masks": self.boolean_masks,
"all_fields_boolean": self.all_fields_boolean,
"has_set_ops": self.has_set_ops,
}
if 'bitfield-type' in templates:
template = templates['bitfield-type']
else:
template = Template.compile(file=open(self._template, 'r',
encoding='utf-8'))
templates['bitfield-type'] = template
t = template(namespaces=(ns))
return str(t)
@property
def unit_count(self):
return math.ceil(self.length / self.unit_size)
@property
def init_values(self):
init_value = 0
for d in self._all_declarations:
if d.default is None:
continue
val = int(d.default.value)
if d.field_length is None and val != 0:
raise DSLError(
"bitfield others must not have a nonzero default",
d.member_name)
if val == 0:
continue
if val.bit_length() > d.field_length:
raise DSLError("Bitfield default value does not fit in field",
d.member_name)
for field_map in d.field_maps:
field_mask = (1 << field_map.length) - 1
# First mask out any reserved bits that have been replaced by a
# field in an extension.
init_value &= ~(field_mask << field_map.mapped_bit)
init_value |= (((val >> field_map.field_bit) & field_mask) <<
field_map.mapped_bit)
unit_mask = (1 << self.unit_size) - 1
return tuple((init_value >> (i * self.unit_size)) & unit_mask
for i in range(self.unit_count))
@property
def compare_masks(self):
"""
Tuple of per-unit masks of non-writeonly fields.
This is used for generating comparison operations.
"""
compare_mask = 0
for d in self.fields:
if d.is_writeonly:
continue
for field_map in d.field_maps:
field_mask = (1 << field_map.length) - 1
compare_mask |= field_mask << field_map.mapped_bit
unit_mask = (1 << self.unit_size) - 1
return tuple((compare_mask >> (i * self.unit_size)) & unit_mask
for i in range(self.unit_count))
@property
def boolean_masks(self):
"""
Tuple of per-unit masks of boolean typed fields.
This is used for generating bitwise set operations that exclude non-
boolean fields, if there are any. This allows the binary set
operations to be constructed such that any non-boolean fields from the
left hand argument are preserved in the result.
"""
boolean_mask = 0
for d in self.fields:
if d.compound_type.bitsize != 1:
continue
for field_map in d.field_maps:
field_mask = (1 << field_map.length) - 1
boolean_mask |= field_mask << field_map.mapped_bit
unit_mask = (1 << self.unit_size) - 1
return tuple((boolean_mask >> (i * self.unit_size)) & unit_mask
for i in range(self.unit_count))
def gen_code(self):
code = []
extra = []
assert self._template is not None
code.append(self._gen_definition_code())
# generate getters and setters for all declarations
for d in self._all_declarations:
if d.compound_type is None:
continue
if d.bitfield_specifier is None:
raise DSLError("each declaration needs to specify logical" +
" physical bit map", d.member_name)
else:
c, e = d.gen_code()
code += c
extra += e
return (code, extra)
def get_template_deps(self):
templates = []
for d in self._all_declarations:
if d.compound_type is None:
continue
else:
templates += d.get_template_deps()
return templates + [os.path.join(__loc__, self.TYPE_TEMPLATE)]
def update(self):
super().update()
if self._ranges is None:
self._update_layout()
self._template = os.path.join(__loc__, self.TYPE_TEMPLATE)
def _update_layout(self):
"""
Determine the layout of the bitfield.
"""
for e in self.extensions:
for name in e.delete_items:
found = False
for i, d in enumerate(self.declarations):
if str(d.member_name) == name:
del (self.declarations[i])
found = True
break
if not found:
raise DSLError("can't delete unknown member", name)
self._ranges = BitFieldRangeCollector(self.length)
for d in self._all_declarations:
d.update_ranges(self._ranges, self.unit_size)
used_names = set()
for d in self._all_declarations:
if d.is_ignore:
continue
if d.member_name in used_names:
raise DSLError("'bitfield {:s}': each member needs to"
" have a unique name".format(self.type_name),
d.member_name)
used_names.add(d.member_name)
# if bitfield is constant, update all members
if self.const:
d.set_const()
# share definition information to declarations
d.bf_type_name = self.type_name
d.unit_type = self.unit_type
d.unit_size = self.unit_size
d.ranges = self._ranges
d.update()
@property
def dependencies(self):
"""
Return all definitions that this type relies on.
Note, pointers and other non definitions are not considered
dependencies as they can be forward declared.
"""
for m in self.fields:
yield from m.compound_type.dependencies
class StructureExtension(IExtension):
def __init__(self, type_name):
super().__init__()
self.type_name = type_name
self.indicator = type_name
self.module_name = None
self.category = self.STRUCTURE
def link(self, definition):
self.prefix = self.module_name
definition.extensions.append(self)
for d in self.declarations:
d.module_name = self.prefix
def _members(self, prefix=None):
if prefix is not None and self.prefix is not None:
p = prefix + "_" + self.prefix
elif prefix is not None:
p = prefix
else:
p = self.prefix
return itertools.chain(*(d.get_members(prefix=p)
for d in self.declarations))
class ObjectExtension(StructureExtension):
def __init__(self, type_name):
super().__init__(type_name)
self.category = self.OBJECT
class UnionExtension(IExtension):
def __init__(self, type_name):
super().__init__()
self.type_name = type_name
self.indicator = type_name
self.module_name = None
self.definition = NotImplemented
self.category = self.UNION
def link(self, definition):
self.prefix = self.module_name
definition.extensions.append(self)
def _members(self, prefix=None):
if prefix is not None and self.prefix is not None:
p = prefix + "_" + self.prefix
elif prefix is not None:
p = prefix
else:
p = self.prefix
for d in itertools.chain(self.declarations):
members = d.get_members(prefix=p)
if len(members) > 1:
raise DSLError("Unions must not contain flattened objects",
d.member_name)
yield from members
class EnumerationExtension(IExtension):
def __init__(self, type_name):
super().__init__()
self.type_name = type_name
self.indicator = type_name
self.module_name = None
self.category = self.ENUMERATION
self._enumerators = []
def add_enumerator(self, e):
"""
Add an enumerator to the extension.
"""
self._enumerators.append(e)
def link(self, definition):
for e in self._enumerators:
definition.add_enumerator(e)
class BitFieldExtension(IExtension, IGenCode, IUpdate):
def __init__(self, type_name):
super().__init__()
self.type_name = type_name
self.indicator = type_name
self.module_name = None
self.category = self.BITFIELD
self.delete_items = set()
def link(self, definition):
# FIXME check if declarations in extension overlap the original bit
# fields
# there's only one level for bitfield extension
# change the name of declaration as ModuleName__member_name
if self.module_name is not None:
self.prefix = self.module_name
# update definition's declarations list
definition.extensions.append(self)
def update(self):
super().update()
# Set the module prefix on all our declarations
if self.prefix is not None:
for d in self.declarations:
d.prefix = self.prefix
def add_delete_member(self, name):
if name in self.delete_items:
raise DSLError("delete item: {:s} duplicated".format(name), name)
self.delete_items.add(name)
def gen_code(self):
code = []
extra = []
for d in self.declarations:
c, e = d.gen_code()
code += c
extra += e
return (code, extra)
class Qualifier:
def __init__(self, name):
self.name = name
@property
def is_const(self):
return self.name == 'const'
@property
def is_writeonly(self):
return False
@property
def is_restrict(self):
return self.name == 'restrict'
@property
def is_atomic(self):
return False
@property
def is_aligned(self):
return False
@property
def is_packed(self):
return False
@property
def is_contained(self):
return False
@property
def is_lockable(self):
return False
@property
def is_optimized(self):
return False
@property
def is_group(self):
return False
def gen_qualifier(self):
return [self.name]
def __eq__(self, other):
return (str(self.name) == str(other.name))
def __hash__(self):
return hash(str(self.name))
class AlignedQualifier(Qualifier):
def __init__(self, name, align_bytes):
super().__init__(name)
self._align_bytes_expr = align_bytes
self._align_bytes = None
@property
def align_bytes(self):
if self._align_bytes is None:
align_bytes = int(self._align_bytes_expr)
if align_bytes <= 0:
raise DSLError("Alignment {:d} is not positive"
.format(align_bytes), self.name)
if align_bytes != 1 << (align_bytes - 1).bit_length():
raise DSLError("Alignment {:d} is not a power of two"
.format(align_bytes), self.name)
self._align_bytes = align_bytes
return self._align_bytes
def gen_qualifier(self):
return ["alignas({:d})".format(self.align_bytes)]
@property
def is_aligned(self):
return self.align_bytes
def __eq__(self, other):
return (id(self) == id(other))
def __hash__(self):
return hash(id(self))
class GroupQualifier(Qualifier):
def __init__(self, name, group):
super().__init__(name)
self.group = group
def gen_qualifier(self):
return [""]
@property
def is_group(self):
return True
class AtomicQualifier(Qualifier):
def gen_qualifier(self):
return ["_Atomic"]
@property
def is_atomic(self):
return True
class PackedQualifier(Qualifier):
def gen_qualifier(self):
return ["__attribute__((packed))"]
@property
def is_packed(self):
return True
class ContainedQualifier(Qualifier):
def gen_qualifier(self):
return [""]
@property
def is_contained(self):
return True
class WriteonlyQualifier(Qualifier):
def gen_qualifier(self):
return [""]
@property
def is_writeonly(self):
return True
class LockableQualifier(Qualifier):
def __init__(self, name):
super().__init__(name)
self.resource_name = None
def gen_qualifier(self):
if self.resource_name is None:
raise DSLError(
"Only structure, object and union definitions may be lockable",
self.name)
return ['__attribute__((capability("{:s}")))'
.format(self.resource_name)]
@property
def is_lockable(self):
return True
class OptimizeQualifier(Qualifier):
def __init__(self, name):
super().__init__(name)
self.category = None
def gen_qualifier(self):
if self.category is None:
raise DSLError(
"Only structure and object definitions may be optimized",
self.name)
return [""]
@property
def is_optimized(self):
return True
class BitFieldRangeCollector:
"""
BitFieldRangeCollector manages the range defined by length [0, length).
It addresses the range manage issue from Bit Field declaration.
"""
def __init__(self, length):
if length == -1:
self.range_auto = True
else:
self.range_auto = False
self.free_list = [(0, length - 1 if length != -1 else 0)]
self.alloc_list = []
self.reserved_list = []
self.origLength = length
def insert_range(self, bit_range, declaration):
"""
Check if the [start, end] is inside existing range
definition.
If it's inside the range, remove specified range, and return True.
Else return False.
"""
start = bit_range[0]
end = bit_range[0] + bit_range[1] - 1
if start < 0 or end < 0 or start > end:
return False
# NOTE: it's OK only if not continue loop after find the target
for i, (s, e) in enumerate(self.free_list):
if s <= start and e >= end:
if declaration.is_ignore:
self.reserved_list.append((start, end, declaration))
# FIXME: check for reserved overlaps
return True
del self.free_list[i]
if s < start:
self.free_list.insert(i, (s, start - 1))
i += 1
if e > end:
self.free_list.insert(i, (end + 1, e))
self.alloc_list.append((start, end, declaration))
return True
return False
def alloc_range(self, length, declaration):
"""
Assumption, it's only used by auto logical physical mapping. This use
case only happens for software bit field definition. It seldom to
define scattered bit field members. Contiguously available space can
be find easily.
Also the typical bit field length is less than 64 bits (maybe 128
bits), no need to handle alignment right now.
Return the lowest fragment which satisfy the length requirement.
Also mark it as used
"""
# FIXME: - option to keep fragment in one word (for bitfields) ?
if self.range_auto:
self.free_list[0] = (self.free_list[0][0],
self.free_list[0][1] + length - 1)
ret = None
for (s, e) in self.free_list:
sz = e - s + 1
if sz >= length:
ret = (s, length)
self.insert_range(ret, declaration)
break
return ret
def is_empty(self):
return len(self.free_list) == 0
def __repr__(self):
msg = []
for s, e in self.free_list:
msg.append("[" + str(e) + ":" + str(s) + "]")
return 'Range len(%d), ranges available: %s' % \
(self.origLength, ','.join(msg))
class TransformTypes(Transformer):
"""
Bottom up traversal helper. It overrides Transformer to do the
traversal. Use CommonTree as
the default tree node.
"""
def __init__(self, program):
super().__init__()
self.program = program
def __default__(self, children, meta, data):
"Default operation on tree (for override)"
import ast_nodes
return ast_nodes.CommonListener(self.program, children, meta, data)
def node_handler(self, name):
import ast_nodes
x = getattr(ast_nodes, name)
if not inspect.isclass(x):
raise AttributeError
if not issubclass(x, ast_nodes.CommonTree):
raise AttributeError
if type(x) in [ast_nodes.CommonTree]:
raise AttributeError
return x
def _call_userfunc(self, tree, new_children=None):
# Assumes tree is already transformed
children = new_children if new_children is not None else tree.children
try:
f = self.node_handler(tree.data)
except AttributeError:
ret = self.__default__(children, tree.meta, tree.data)
else:
ret = f(self.program, children, tree.meta)
return ret
def _transform_tree(self, tree):
children = list(self._transform_children(tree.children))
ret = self._call_userfunc(tree, children)
return ret
def _transform_children(self, children):
import ast_nodes
for c in children:
try:
if isinstance(c, Tree):
yield self._transform_tree(c)
elif isinstance(c, Token):
yield ast_nodes.TToken(str(c), c, self.program)
else:
yield c
except Discard:
pass
| 91,723 | 28.493248 | 79 | py |
gunyah-hypervisor-develop | gunyah-hypervisor-develop/tools/typed/abi.py | #!/usr/bin/env python3
# © 2021 Qualcomm Innovation Center, Inc. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import abc
class CType:
def __init__(self, name, is_signed, size, align=None, bitsize=None):
self.name = name
self.is_signed = is_signed
self.size = size
self.align = size if align is None else align
self.bitsize = bitsize
class ABI(metaclass=abc.ABCMeta):
"""
Abstract base class for ABI definitions.
"""
def __init__(self):
basic_ctypes = (
CType('bool', False, 1, bitsize=1),
CType('uint8_t', False, 1),
CType('uint16_t', False, 2),
CType('uint32_t', False, 4),
CType('uint64_t', False, 8),
CType('uintptr_t', False, self.pointer_size,
align=self.pointer_align),
CType('int8_t', True, 1),
CType('int16_t', True, 2),
CType('int32_t', True, 4),
CType('int64_t', True, 8),
CType('intptr_t', True, self.pointer_size,
align=self.pointer_align),
CType('char', self.signed_char, 1),
CType('size_t', False, self.pointer_size,
align=self.pointer_align),
CType('uregister_t', False, self.register_size,
align=self.register_align),
)
self.c_types = {t.name: t for t in basic_ctypes}
@staticmethod
def is_power2(val):
"""Returns true if number is a power of two"""
return ((val & (val - 1)) == 0) and (val > 0)
@abc.abstractproperty
def pointer_size(self):
"""The size of a pointer, in bytes."""
raise NotImplementedError
@property
def pointer_align(self):
"""The alignment of a pointer, in bytes."""
return self.pointer_size
@abc.abstractproperty
def register_size(self):
"""The size of a register, in bytes."""
raise NotImplementedError
@property
def register_align(self):
"""The alignment of a register, in bytes."""
return self.pointer_size
@abc.abstractproperty
def signed_char(self):
"""True if the char type is signed."""
raise NotImplementedError
def get_c_type(self, type_name):
return self.c_types[type_name]
@abc.abstractmethod
def get_c_type_name(self, abi_type_name):
"""Return the c name for the abi type name."""
raise NotImplementedError
def layout_struct_member(self, current_offset, current_alignment,
next_size, next_alignment):
"""
Return the offset at which a new struct member should be placed.
In principle this is entirely implementation-defined, but nearly all
implementations use the same algorithm: add enough padding bytes to
align the next member, and no more. Any ABI that does something
different can override this.
The current_offset argument is the size of the structure in bytes up
to the end of the last member. This is always nonzero, because
structures are not allowed to be padded at the start, so this function
is only called for the second member onwards.
The current_align argument is the largest alignment that has been seen
in the members added to the struct so far.
If next_size is not None, the next_size and next_alignment arguments
are the size and alignment of the member that is being added to the
struct. The return value in this case is the offset of the new member.
If next_size is None, all members have been added and this function
should determine the amount of padding at the end of the structure.
The return value in this case is the final size of the structure.
The return value in any case should be an integer that is not less
than current_offset.
"""
if next_size is not None:
alignment = next_alignment
else:
alignment = current_alignment
assert (self.is_power2(alignment))
align_mask = alignment - 1
return (current_offset + align_mask) & ~align_mask
@abc.abstractmethod
def get_enum_properties(self, e_min, e_max):
"""
Returns the size, alignment and signedness of the enum.
Calculates the underlying type properties that the ABI will use for
an enum with the given enumerator value range.
"""
raise NotImplementedError
class AArch64ABI(ABI):
abi_type_map = {
'uregister_t': 'uint64_t',
'sregister_t': 'int64_t',
}
@property
def pointer_size(self):
"""The size of a pointer, in bytes."""
return 8
@property
def register_size(self):
"""The size of a register, in bytes."""
return 8
@property
def signed_char(self):
"""True if the char type is signed."""
return True
def get_c_type_name(self, abi_type_name):
"""Return the c name for the abi type name."""
assert (abi_type_name in self.abi_type_map)
return self.abi_type_map[abi_type_name]
def get_enum_properties(self, e_min, e_max):
"""
Returns the size, alignment and signedness of the enum.
Calculates the underlying type properties that the ABI will use for
an enum with the given enumerator value range.
"""
min_bits = e_min.bit_length()
max_bits = e_max.bit_length()
signed = e_min < 0
if not signed and max_bits <= 32:
return (4, 4, False)
elif signed and max_bits <= 31 and min_bits <= 32:
return (4, 4, True)
elif not signed and max_bits <= 64:
return (8, 8, False)
elif signed and max_bits <= 63 and min_bits <= 64:
return (8, 8, True)
else:
raise NotImplementedError
| 5,962 | 31.763736 | 78 | py |
gunyah-hypervisor-develop | gunyah-hypervisor-develop/tools/misc/get_genfiles.py | #!/usr/bin/env python3
# coding: utf-8
#
# © 2021 Qualcomm Innovation Center, Inc. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Simple script to parse the compile_commands to extract generated source and
header files, to pass to cscope or other source indexing tools.
"""
import json
import sys
import os
import re
build_dir = 'build'
commands_file = 'compile_commands.json'
compile_commands = []
conf_default_weight = 60
build_preferences = {}
files = set()
incdirs = set()
include_regex = re.compile('(-iquote|-I) (\\w+[-\\{:s}\\w]+)'.format(os.sep))
imacros_regex = re.compile('(-imacros) (\\w+[-\\{:s}\\w.]+)'.format(os.sep))
for dir, dir_dirs, dir_files in os.walk('config/featureset'):
regex = re.compile('^# indexer-weight: (\\d+)')
for file in dir_files:
file_base = os.path.splitext(file)[0]
if file.endswith('.conf'):
build_preferences[file_base] = conf_default_weight
infile = os.path.join(dir, file)
with open(infile, 'r') as f:
for i, line in enumerate(f):
weights = regex.findall(line)
if weights:
build_preferences[file_base] = int(weights[0])
for dir, dir_dirs, dir_files in os.walk(build_dir):
if commands_file in dir_files:
x = os.stat(dir)
time = max(x.st_atime, x.st_mtime, x.st_ctime)
compile_commands.append((time, os.path.join(dir, commands_file)))
if not compile_commands:
print('no build found!', file=sys.stderr)
exit(1)
newest = 0.0
for time, f in compile_commands:
x = os.stat(f)
time = max(x.st_atime, x.st_mtime, x.st_ctime, time)
for p in build_preferences.keys():
if p in f:
# Boost these to preference them
time += build_preferences[p]
if time > newest:
newest = time
infile = f
if len(compile_commands) > 1:
print('warning: multiple builds found, using: {:s}'.format(
infile), file=sys.stderr)
try:
with open(infile, 'r') as f:
compile = json.loads(f.read())
for s in compile:
if s['file'].startswith(build_dir):
files.add(s['file'])
cmd = s['command']
for t, dir in include_regex.findall(cmd):
if dir.startswith(build_dir):
incdirs.add(dir)
for t, f in imacros_regex.findall(cmd):
files.add(f)
except FileNotFoundError:
exit(1)
for dir in incdirs:
try:
for f in os.listdir(dir):
filename = os.path.join(dir, f)
if filename.endswith('.h'):
files.add(filename)
except OSError:
pass
for f in files:
print(f)
| 2,739 | 26.676768 | 77 | py |
gunyah-hypervisor-develop | gunyah-hypervisor-develop/tools/registers/register_gen.py | #!/usr/bin/env python3
#
# © 2021 Qualcomm Innovation Center, Inc. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from Cheetah.Template import Template
import argparse
import itertools
import subprocess
import logging
import sys
logger = logging.getLogger(__name__)
valid_access_strs = \
set([''.join(x) for x in itertools.chain.from_iterable(
itertools.combinations('oOrwRW', r) for r in range(1, 6))])
class register:
def __init__(self, name, type_name, variants=[], access='rw'):
if access in ['o', 'O']:
access += 'rw'
if access not in valid_access_strs:
logger.error("Invalid access type '%s'", access)
sys.exit(1)
self.name = name
self.type_name = type_name
self._variants = variants
self._read = 'r' in access
self._write = 'w' in access
self._volatile_read = 'R' in access
self._barrier_write = 'W' in access
self._ordered = 'O' in access
self._non_ordered = 'o' in access or 'O' not in access
@property
def variants(self):
ret = []
type_name = self.type_name[:-1] if self.type_name.endswith(
'!') else self.type_name
for v in self._variants:
if v.endswith('!'):
ret.append((v[:-1],
type_name if self.type_name.endswith(
'!') else v[:-1]))
else:
ret.append(('_'.join((self.name, v)),
type_name if self.type_name.endswith(
'!') else '_'.join((type_name, v))))
if not ret:
ret = [(self.name, type_name)]
return sorted(ret)
@property
def is_readable(self):
return self._read
@property
def is_volatile(self):
return self._volatile_read
@property
def is_writable(self):
return self._write
@property
def is_writeable_barrier(self):
return self._barrier_write
@property
def need_ordered(self):
return self._ordered
@property
def need_non_ordered(self):
return self._non_ordered
def generate_accessors(template, input, ns):
registers = {}
for line in input.splitlines():
if line.startswith('//'):
continue
tokens = line.split(maxsplit=1)
if not tokens:
continue
name = tokens[0]
assert name not in registers
if len(tokens) == 1:
registers[name] = register(name, name)
continue
args = tokens[1]
type_name = name
if args.startswith('<'):
type_name, args = args[1:].split('>', maxsplit=1)
args = args.strip()
identifiers = []
if args.startswith('['):
identifiers, args = args[1:].split(']', maxsplit=1)
identifiers = identifiers.split()
args = args.strip()
if args:
registers[name] = register(name, type_name, identifiers, args)
else:
registers[name] = register(name, type_name, identifiers)
ns['registers'] = [registers[r] for r in sorted(registers.keys())]
output = str(Template(file=template, searchList=ns))
return output
def main():
logging.basicConfig(
level=logging.INFO,
format="%(message)s",
)
args = argparse.ArgumentParser()
mode_args = args.add_mutually_exclusive_group(required=True)
mode_args.add_argument('-t', '--template',
type=argparse.FileType('r', encoding="utf-8"),
help="Template file used to generate output")
args.add_argument('-o', '--output',
type=argparse.FileType('w', encoding="utf-8"),
default=sys.stdout, help="Write output to file")
args.add_argument("-f", "--formatter",
help="specify clang-format to format the code")
args.add_argument("input", metavar='INPUT', nargs='*',
help="Input type register file to process",
type=argparse.FileType('r', encoding="utf-8"))
options = args.parse_args()
output = ""
input = ""
for f in options.input:
input += f.read()
f.close()
output += generate_accessors(options.template, input, {})
if options.formatter:
ret = subprocess.run([options.formatter], input=output.encode("utf-8"),
stdout=subprocess.PIPE)
output = ret.stdout.decode("utf-8")
if ret.returncode != 0:
raise Exception("failed to format output:\n ", ret.stderr)
options.output.write(output)
if __name__ == '__main__':
main()
| 4,773 | 27.586826 | 79 | py |
gunyah-hypervisor-develop | gunyah-hypervisor-develop/tools/elf/package_apps.py | #!/usr/bin/env python3
# © 2021 Qualcomm Innovation Center, Inc. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import argparse
import logging
import sys
from elftools.elf.elffile import ELFFile
from elftools.elf.constants import SH_FLAGS, P_FLAGS
from elftools import construct
class NewSegment():
def __init__(self, base, p_align=16):
self._data = b''
hdr = construct.lib.Container()
hdr.p_type = 'PT_LOAD'
hdr.p_flags = P_FLAGS.PF_R
hdr.p_offset = 0
hdr.p_vaddr = 0
hdr.p_paddr = 0
hdr.p_filesz = 0
hdr.p_memsz = 0
hdr.p_align = p_align
self.header = hdr
# print(self.header)
def add_data(self, data):
n = len(data)
self._data += data
self.header.p_filesz += n
self.header.p_memsz += n
# print(self.header)
class NewELF():
def __init__(self, base):
self.structs = base.structs
self.header = base.header
# print(self.header)
self.segments = []
self.sections = []
for i in range(0, base.num_segments()):
seg = base.get_segment(i)
seg._data = seg.data()
self.segments.append(seg)
# print(" ", self.segments[i].header)
for i in range(0, base.num_sections()):
sec = base.get_section(i)
sec._data = sec.data()
self.sections.append(sec)
# print(" ", self.sections[i].header)
def strip(self):
print("strip() unimplemented")
def merge_segments(self, elf):
print("merging...")
p_last = 0
# Find the end of the last segment
for seg in self.segments:
last = seg.header.p_offset + seg.header.p_filesz
if last > p_last:
p_last = last
p_adj = p_last
# Append new segments
for i in range(0, elf.num_segments()):
seg = elf.get_segment(i)
seg._data = seg.data()
p_last = (p_last + (seg.header.p_align - 1)) & \
(0xffffffffffffffff ^ (seg.header.p_align - 1))
# print(hex(p_last))
seg.header.p_offset = p_last
# print(seg.header)
self.segments.append(seg)
self.header.e_phnum += 1
p_last = p_last + seg.header.p_filesz
p_off = p_last - p_adj
# print(">>", hex(p_adj), hex(p_last), hex(p_off))
# Adjust file offsets for affected sections
for sec in self.sections:
if sec.header.sh_offset >= p_adj:
# print(sec.header)
align = sec.header.sh_addralign
if align > 1:
p_off = (p_off + (align - 1)) & \
(0xffffffffffffffff ^ (align - 1))
# print("SA", hex(sec.header.sh_offset), hex(p_off),
# hex(sec.header.sh_offset + p_off))
sec.header.sh_offset += p_off
if self.header.e_shoff >= p_adj:
self.header.e_shoff += p_off
def append_segment(self, newseg):
print("appending...")
p_last = 0
# Find the end of the last segment
for seg in self.segments:
last = seg.header.p_offset + seg.header.p_filesz
if last > p_last:
p_last = last
p_adj = p_last
# Append new segment
p_last = (p_last + (newseg.header.p_align - 1)) & \
(0xffffffffffffffff ^ (newseg.header.p_align - 1))
# print(hex(p_last))
newseg.header.p_offset = p_last
# print(newseg.header)
self.segments.append(newseg)
self.header.e_phnum += 1
p_last = p_last + newseg.header.p_filesz
p_off = p_last - p_adj
# print(">>", hex(p_adj), hex(p_last), hex(p_off))
# Adjust file offsets for affected sections
for sec in self.sections:
if sec.header.sh_offset >= p_adj:
# print(sec.header)
align = sec.header.sh_addralign
if align > 1:
p_off = (p_off + (align - 1)) & \
(0xffffffffffffffff ^ (align - 1))
# print("SA", hex(sec.header.sh_offset), hex(p_off),
# hex(sec.header.sh_offset + p_off))
sec.header.sh_offset += p_off
if self.header.e_shoff >= p_adj:
self.header.e_shoff += p_off
# Insert a segment into a pre-sorted ELF
def insert_segment(self, newseg, phys):
print("inserting...")
phys_offset = self.segments[0].header.p_paddr - \
self.segments[0].header.p_vaddr
newseg.header.p_paddr = phys
newseg.header.p_vaddr = phys - phys_offset
p_adj = 0
idx = 0
# Find the position to insert segment
for seg in self.segments:
if seg.header.p_paddr > newseg.header.p_paddr:
break
idx += 1
# print(seg, hex(seg.header.p_paddr))
last = seg.header.p_offset + seg.header.p_filesz
assert (last >= p_adj)
p_adj = last
p_prev = p_adj
# Append new segment
p_adj = (p_adj + (newseg.header.p_align - 1)) & \
(0xffffffffffffffff ^ (newseg.header.p_align - 1))
# print(hex(p_adj))
newseg.header.p_offset = p_adj
# print(newseg.header)
self.segments.insert(idx, newseg)
self.header.e_phnum += 1
p_adj = p_adj + newseg.header.p_filesz
p_off = p_adj - p_prev
# print(">>", hex(p_adj), hex(p_prev), hex(p_off))
# Update file offsets of remaining segments
for seg in self.segments[idx+1:]:
last = seg.header.p_offset + seg.header.p_filesz
assert (last >= p_prev)
p_next = seg.header.p_offset + p_off
p_adj = (p_next + (seg.header.p_align - 1)) & \
(0xffffffffffffffff ^ (seg.header.p_align - 1))
seg.header.p_offset = p_adj
p_off += p_adj - p_next
# Adjust file offsets for affected sections
for sec in self.sections:
if sec.header.sh_offset >= p_prev:
# print(sec.header)
align = sec.header.sh_addralign
if align > 1:
p_off = (p_off + (align - 1)) & \
(0xffffffffffffffff ^ (align - 1))
# print("SA", hex(sec.header.sh_offset), hex(p_off),
# hex(sec.header.sh_offset + p_off))
sec.header.sh_offset += p_off
if self.header.e_shoff >= p_prev:
self.header.e_shoff += p_off
def write(self, f):
print("writing...")
# print("EH", self.header)
# Write out the ELF header
f.seek(0)
self.structs.Elf_Ehdr.build_stream(self.header, f)
# Write out the ELF program headers
f.seek(self.header.e_phoff)
for seg in self.segments:
# print("PH", seg.header)
self.structs.Elf_Phdr.build_stream(seg.header, f)
# Write out the ELF segment data
for seg in self.segments:
f.seek(seg.header.p_offset)
f.write(seg._data)
# Write out the ELF section headers
f.seek(self.header.e_shoff)
for sec in self.sections:
# print("SH", sec.header)
self.structs.Elf_Shdr.build_stream(sec.header, f)
# Write out the ELF non-segment based sections
for sec in self.sections:
# Copy extra sections, mostly strings and debug
if sec.header.sh_flags & SH_FLAGS.SHF_ALLOC == 0:
# print("SH", sec.header)
f.seek(sec.header.sh_offset)
f.write(sec._data)
continue
def package_files(base, app, runtime, output):
base_elf = ELFFile(base)
new = NewELF(base_elf)
symtab = base_elf.get_section_by_name('.symtab')
pkg_phys = symtab.get_symbol_by_name('image_pkg_start')
if pkg_phys:
print(pkg_phys[0].name, hex(pkg_phys[0].entry.st_value))
pkg_phys = pkg_phys[0].entry.st_value
else:
logging.error("can't find symbol 'image_pkg_start'")
sys.exit(1)
# Describe the package header structure
pkg_hdr = construct.Struct(
'pkg_hdr',
construct.ULInt32('ident'),
construct.ULInt32('items'),
construct.Array(
3,
construct.Struct(
'list',
construct.ULInt32('type'),
construct.ULInt32('offset'))
),
)
hdr = construct.lib.Container()
# Initialize package header
hdr.ident = 0x47504b47 # GPKG
hdr.items = 0
items = []
for i in range(0, 3):
item = construct.lib.Container()
item.type = 0
item.offset = 0
items.append(item)
hdr.list = items
hdr_len = len(pkg_hdr.build(hdr))
# Add the runtime ELF image
run_data = runtime.read()
run_data_len = len(run_data)
pad = ((run_data_len + 0x1f) & ~0x1f) - run_data_len
if pad:
run_data += b'\0' * pad
run_data_len += pad
hdr.list[0].type = 0x1 # Runtime
hdr.list[0].offset = hdr_len
hdr.items += 1
# Add the application ELF image
app_data = app.read()
app_data_len = len(app_data)
pad = ((app_data_len + 0x1f) & ~0x1f) - app_data_len
if pad:
app_data += b'\0' * pad
app_data_len += pad
hdr.list[1].type = 0x2 # Application
hdr.list[1].offset = hdr_len + run_data_len
hdr.items += 1
# note, we align segment to 4K for signing tools
segment = NewSegment(base_elf, 4096)
segment.add_data(pkg_hdr.build(hdr))
segment.add_data(run_data)
segment.add_data(app_data)
new.insert_segment(segment, pkg_phys)
new.write(output)
def main():
logging.basicConfig(
level=logging.INFO,
format="%(message)s",
)
args = argparse.ArgumentParser()
args.add_argument('-a', "--app",
type=argparse.FileType('rb'),
help="Input application ELF",
required=True)
args.add_argument('-r', "--runtime",
type=argparse.FileType('rb'),
help="Input runtime ELF",
required=True)
args.add_argument('-o', '--output',
type=argparse.FileType('wb'),
default=sys.stdout,
required=True,
help="Write output to file")
args.add_argument('input', metavar='INPUT', nargs=1,
type=argparse.FileType('rb'),
help="Input hypervisor ELF")
options = args.parse_args()
package_files(options.input[0], options.app, options.runtime,
options.output)
if __name__ == '__main__':
main()
| 11,003 | 29.823529 | 68 | py |
gunyah-hypervisor-develop | gunyah-hypervisor-develop/tools/utils/__init__.py | 0 | 0 | 0 | py |
|
gunyah-hypervisor-develop | gunyah-hypervisor-develop/tools/utils/genfile.py | # coding: utf-8
#
# © 2021 Qualcomm Innovation Center, Inc. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
"""
import sys
import io
import argparse
class _ReplaceFileMixin(object):
def __init__(self, name, mode, encoding=None):
super().__init__()
self._name = name
self._mode = mode
self._encoding = encoding
@property
def name(self):
return self._name
def close(self):
tmp = io.open(self._name, self._mode.replace('w', 'r'),
encoding=self._encoding)
old = tmp.read()
tmp.close()
self.seek(0)
new = self.read()
if old != new:
replace = io.open(self._name, self._mode, encoding=self._encoding)
replace.write(new)
replace.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
class _ReplaceBinaryFile(_ReplaceFileMixin, io.BytesIO):
pass
class _ReplaceTextFile(_ReplaceFileMixin, io.StringIO):
pass
class _GenFileFactory(object):
def __init__(self, mode, encoding=None):
self._mode = mode
if mode not in ('w', 'wt', 'wb'):
raise ValueError("mode {:s} not supported".format(mode))
if encoding is None and 'b' not in mode:
# Default to UTF-8 for text files
encoding = 'utf-8'
self._encoding = encoding
def __call__(self, p):
if sys.hexversion < 0x03030000:
# Exclusive file creation ('x' mode) isn't available before Python
# 3.3, so fall back to just replacing the file.
return io.open(p, self._mode, encoding=self._encoding)
try:
return io.open(p, self._mode.replace('w', 'x'),
encoding=self._encoding)
except FileExistsError:
if 'b' in self._mode:
return _ReplaceBinaryFile(p, self._mode)
else:
return _ReplaceTextFile(p, self._mode, encoding=self._encoding)
class GenFileType(_GenFileFactory, argparse.FileType):
def __call__(self, p):
if p == '-':
assert 'w' in self._mode
return sys.stdout
try:
return super().__call__(p)
except OSError as e:
raise argparse.ArgumentTypeError(
"can't open {:s}: {:s}".format(p, str(e)))
def GenFile(name, mode, encoding=None):
return _GenFileFactory(mode, encoding=encoding)(name)
| 2,524 | 26.150538 | 79 | py |
gunyah-hypervisor-develop | gunyah-hypervisor-develop/tools/build/__main__.py | # coding: utf-8
#
# © 2021 Qualcomm Innovation Center, Inc. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Gunyah build system.
This module is invoked by configure.py with the global variable `graph` set to
an instance of AbstractBuildGraph, which can be used to add rules, targets and
variables to the build graph.
"""
import os
import sys
import logging
import inspect
import re
from collections import namedtuple
from io import open
from ..utils.genfile import GenFile
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
# Silence flake8 warnings about the externally-defined graph variable
graph = graph # noqa: F821
logging.basicConfig()
logger = logging.getLogger(__name__)
#
# General setup
#
def relpath(path):
return os.path.relpath(path, start=graph.root_dir)
class Items(dict):
def add(self, key, value):
if key not in self:
super(Items, self).__setitem__(key, value)
else:
raise KeyError("Item duplicate {:s}".format(key))
class module_set(set):
def __init__(self):
self.module_re = re.compile(
r'[A-Za-z][A-Za-z0-9_]*([/][A-Za-z][A-Za-z0-9_]*)*')
super(module_set, self).__init__()
def add(self, value):
if not self.module_re.fullmatch(value):
print("invalid module name:", value)
sys.exit(1)
super(module_set, self).add(value)
build_dir = graph.build_dir
config_base = 'config'
module_base = 'hyp'
arch_base = os.path.join(module_base, 'arch')
interface_base = os.path.join(module_base, 'interfaces')
conf_includes = set()
modules = module_set()
modules.add('arch')
interfaces = set()
objects = set()
external_objects = set()
guestapis = set()
types = set()
hypercalls = set()
registers = list()
test_programs = set()
sa_html = set()
asts = set()
defmap_frags = set()
event_sources = set()
modules_with_events = set()
interfaces_with_events = set()
template_engines = {}
TemplateEngine = namedtuple('TemplateEngine', ['sources', 'config'])
first_class_objects = Items()
first_class_templates = list()
template_engines['first_class_object'] = \
TemplateEngine(first_class_templates, None)
typed_templates = list()
template_engines['typed'] = \
TemplateEngine(typed_templates, None)
typed_guestapi_templates = list()
template_engines['typed_guestapi'] = \
TemplateEngine(typed_guestapi_templates, None)
hypercalls_templates = list()
template_engines['hypercalls'] = \
TemplateEngine(hypercalls_templates, None)
registers_templates = list()
template_engines['registers'] = \
TemplateEngine(registers_templates, None)
shvars_re = re.compile(r'\$((\w+)\b|{(\w+)})')
def var_subst(w):
def shrepl(match):
name = match.group(2) or match.group(3)
try:
return graph.get_env(name)
except KeyError:
logger.error("Undefined environment variable: $%s", name)
sys.exit(1)
n = 1
while n:
w, n = shvars_re.subn(shrepl, w)
return w
#
# Variant setup
#
def arch_match(arch_name):
return arch_name in target_arch_names
def process_variant_conf(variant_key, conf, basename):
graph.add_gen_source(conf)
platform = variant_key == 'platform'
featureset = variant_key == 'featureset'
allow_arch = variant_key and not platform
if platform:
if basename in target_arch_names:
logger.error("existing arch: %s", basename)
target_arch_names.append(basename)
with open(conf, 'r', encoding='utf-8') as f:
for s in f.readlines():
words = s.split()
if not words or words[0].startswith('#'):
# Skip comments or blank lines
pass
elif words[0] == 'include':
include_conf = os.path.join(config_base,
words[1] + '.conf')
if include_conf not in conf_includes:
process_variant_conf(None, include_conf, None)
conf_includes.add(include_conf)
elif featureset and words[0] == 'platforms':
global featureset_platforms
featureset_platforms = words[1:]
elif platform and words[0] == 'base_arch':
arch_conf = os.path.join(config_base, 'arch',
words[1] + '.conf')
process_variant_conf(variant_key, arch_conf, words[1])
elif platform and words[0] == 'alias_arch':
if words[1] in target_arch_names:
logger.error("Alias existing arch: %s",
words[1])
target_arch_names.append(words[1])
elif platform and words[0] == 'is_abi':
global abi_arch
if abi_arch is not None:
logger.warning("Duplicate abi definitions: %s and %s",
abi_arch, basename)
abi_arch = basename
elif platform and words[0] == 'defines_link':
global link_arch
if link_arch is not None:
logger.warning("Duplicate link definitions: %s and %s",
link_arch, basename)
link_arch = basename
elif platform and words[0] == 'target_triple':
global target_triple
if target_triple is not None:
logger.warning(
"Duplicate target triple definitions: %s and %s",
target_triple, words[1])
target_triple = words[1]
elif words[0] == 'module':
modules.add(words[1])
elif words[0] == 'flags':
variant_cflags.extend(map(var_subst, words[1:]))
elif words[0] == 'ldflags':
variant_ldflags.extend(map(var_subst, words[1:]))
elif words[0] == 'configs':
for c in map(var_subst, words[1:]):
add_global_define(c)
elif allow_arch and words[0] == 'arch_module':
if arch_match(words[1]):
modules.add(words[2])
elif allow_arch and words[0] == 'arch_flags':
if arch_match(words[1]):
variant_cflags.extend(map(var_subst, words[2:]))
elif allow_arch and words[0] == 'arch_ldflags':
if arch_match(words[1]):
variant_ldflags.extend(map(var_subst, words[2:]))
elif allow_arch and words[0] == 'arch_configs':
if arch_match(words[1]):
for c in map(var_subst, words[2:]):
add_global_define(c)
else:
# TODO: dependencies, configuration variables, etc
# Restructure this to use a proper parser first
logger.error('Unknown token "%s" in %s', words[0], conf)
sys.exit(1)
true_strings = ('true', 't', '1', 'yes', 'y')
false_strings = ('false', 'f', '0', 'no', 'n')
all_arg = graph.get_argument('all', 'false').lower()
if all_arg in true_strings:
default_all_variants = True
elif all_arg in false_strings:
default_all_variants = False
else:
logger.error("Argument all= must have a boolean value, not '%s'", all_arg)
sys.exit(1)
missing_variant = False
abi_arch = None
link_arch = None
target_triple = None
target_arch_names = []
variant_cflags = []
variant_cppflags = []
variant_defines = []
variant_ldflags = []
featureset_platforms = ['*']
#
# Configs sanity checking
#
configs = {}
def check_global_define(d):
try:
define, val = d.split('=')
except ValueError:
logger.warning("invalid configuration: %s", d)
if define in configs:
if configs[define] == val:
logger.warning("Duplicate configuration: %s", d)
else:
logger.error("Conflicting configuration: %s and %s",
'='.join([define, configs[define]]), d)
sys.exit(-1)
configs[define] = val
def add_global_define(d):
check_global_define(d)
variant_defines.append(d)
for variant_key in ('platform', 'featureset', 'quality'):
try:
variant_value = graph.get_env('VARIANT_' + variant_key)
except KeyError:
variant_arg = graph.get_argument(
variant_key, 'all' if default_all_variants else None)
import glob
known_variants = frozenset(
os.path.splitext(os.path.basename(f))[0]
for f in glob.iglob(os.path.join(
config_base, variant_key, '*.conf')))
if not known_variants:
logger.error('No variants known for key %s', variant_key)
sys.exit(1)
if variant_arg is None:
logger.error('No variant specified for key %s; choices: %s',
variant_key, ', '.join(known_variants))
missing_variant = True
continue
if variant_arg == 'all':
selected_variants = known_variants
else:
selected_variants = frozenset(variant_arg.split(','))
if not (selected_variants <= known_variants):
logger.error("Unknown variants specified for key %s: %s; "
"choices: %s", variant_key,
', '.join(selected_variants - known_variants),
', '.join(known_variants))
missing_variant = True
continue
for val in selected_variants:
graph.add_variant(os.path.join(build_dir, val))(**{
'VARIANT_' + variant_key: val
})
# Don't build anything until all variants are configured
sys.exit()
variant_conf = os.path.join(config_base, variant_key,
variant_value + '.conf')
process_variant_conf(variant_key, variant_conf, variant_value)
if len(featureset_platforms) == 1 and \
featureset_platforms[0] == '*':
pass
else:
if graph.get_env('VARIANT_platform') not in featureset_platforms:
# Skip plaforms not supported in the featureset
sys.exit(0)
if missing_variant:
sys.exit(1)
for a in target_arch_names:
graph.append_env('CODEGEN_ARCHS', '-a ' + a)
try:
partial_link_arg = graph.get_env('PARTIAL_LINK')
except KeyError:
partial_link_arg = graph.get_argument('partial_link', '0')
do_partial_link = partial_link_arg.lower() in true_strings
try:
sa_enabled_arg = graph.get_env('ENABLE_SA')
except KeyError:
sa_enabled_arg = graph.get_argument('enable_sa', '0')
do_sa_html = sa_enabled_arg.lower() in true_strings
#
# Match available template generators
#
def template_match(template_engine, d):
try:
return template_engines[template_engine]
except KeyError:
logger.error('Unknown template system "%s" in %s', template_engine, d)
sys.exit(1)
#
# Architecture setup
#
# Add the arch-specific include directories for asm/ headers
for arch_name in target_arch_names:
d = os.path.join(arch_base, arch_name, 'include')
graph.append_env('CPPFLAGS', '-I ' + relpath(d))
# Add the arch generic include directory for asm-generic/ headers
graph.append_env('CPPFLAGS', '-I ' + os.path.join(
relpath(arch_base), 'generic', 'include'))
# Set up for a freestanding ARMv8.2 EL2 target
graph.append_env('TARGET_CFLAGS', '-ffreestanding')
graph.append_env('TARGET_CFLAGS', '-ftls-model=local-exec')
graph.append_env('TARGET_CFLAGS', '-fpic')
if not do_partial_link:
graph.append_env('TARGET_LDFLAGS', '-pie')
# Enable stack protection by default
graph.append_env('TARGET_CFLAGS', '-fstack-protector-strong')
#
# Toolchain setup
#
graph.add_env('ROOT_DIR', os.path.realpath(graph.root_dir))
graph.add_env('BUILD_DIR', os.path.realpath(build_dir))
try:
llvm_root = graph.get_env('LLVM')
except KeyError:
logger.error(
"Please set $LLVM to the root of the prebuilt LLVM")
sys.exit(1)
# Use a QC prebuilt LLVM
graph.add_env('CLANG', os.path.join(llvm_root, 'bin', 'clang'))
graph.add_env('CLANG_MAP', os.path.join(
llvm_root, 'bin', 'clang-extdef-mapping'))
graph.add_env('FORMATTER', os.path.join(llvm_root, 'bin', 'clang-format'))
# Use Clang to compile.
graph.add_env('TARGET_TRIPLE', target_triple)
graph.add_env('TARGET_CC', '${CLANG} -target ${TARGET_TRIPLE}')
# Use Clang with LLD to link.
graph.add_env('TARGET_LD', '${TARGET_CC} -fuse-ld=lld')
# Use Clang to preprocess DSL files.
graph.add_env('CPP', '${CLANG}-cpp -target ${TARGET_TRIPLE}')
# Use C18. For the purposes of MISRA, the language is C99 and all differences
# between C99 and C18 are language extensions permitted by a project deviation
# from rule 1.2.
graph.append_env('CFLAGS', '-std=gnu18')
# Turn all warnings on as errors by default
graph.append_env('CFLAGS', '-Weverything')
graph.append_env('CFLAGS', '-Werror')
# Unused macros are expected
graph.append_env('CFLAGS', '-Wno-unused-macros')
# MISRA rule 16.4 requires default: in every switch, even if it is covered
graph.append_env('CFLAGS', '-Wno-covered-switch-default')
# No need for C++ compatibility
graph.append_env('CFLAGS', '-Wno-c++98-compat')
graph.append_env('CFLAGS', '-Wno-c++-compat')
# No need for pre-C99 compatibility; we always use C18
graph.append_env('CFLAGS', '-Wno-declaration-after-statement')
# No need for GCC compatibility
graph.append_env('CFLAGS', '-Wno-gcc-compat')
# Allow GCC's _Alignof(lvalue) as a project deviation from MISRA rule 1.2.
graph.append_env('CFLAGS', '-Wno-gnu-alignof-expression')
# Allow Clang nullability as a project deviation from MISRA rule 1.2.
graph.append_env('CFLAGS', '-Wno-nullability-extension')
# Automatically requiring negative capabilities breaks analysis of reentrant
# locks, like the preemption count.
graph.append_env('CFLAGS', '-Wno-thread-safety-negative')
# We depend on section garbage collection; otherwise there are undefined and
# unused symbols that will be pulled in and cause link failures
graph.append_env('CFLAGS', '-ffunction-sections')
graph.append_env('CFLAGS', '-fdata-sections')
if not do_partial_link:
graph.append_env('LDFLAGS', '-Wl,--gc-sections')
# Ensure that there are no symbol clashes with externally linked objects.
graph.append_env('CFLAGS', '-fvisibility=hidden')
# Generate DWARF compatible with older T32 releases
graph.append_env('CFLAGS', '-gdwarf-4')
# Catch undefined switches during type system preprocessing
graph.append_env('CPPFLAGS', '-Wundef')
graph.append_env('CPPFLAGS', '-Werror')
# Add the variant-specific flags
if variant_cflags:
graph.append_env('CFLAGS', ' '.join(variant_cflags))
if variant_cppflags:
graph.append_env('CPPFLAGS', ' '.join(variant_cppflags))
graph.append_env('CODEGEN_CONFIGS', ' '.join(variant_cppflags))
if variant_ldflags:
graph.append_env('TARGET_LDFLAGS', ' '.join(variant_ldflags))
# On scons builds, the abs path may be put into the commandline, strip it out
# of the __FILE__ macro.
root = os.path.abspath(os.curdir) + os.sep
graph.append_env('CFLAGS',
'-fmacro-prefix-map={:s}={:s}'.format(root, ''))
graph.append_env('TARGET_CPPFLAGS', '-nostdlibinc')
graph.append_env('TARGET_LDFLAGS', '-nostdlib')
graph.append_env('TARGET_LDFLAGS', '-Wl,-z,max-page-size=0x1000')
graph.append_env('TARGET_LDFLAGS', '-Wl,-z,notext')
# Build rules
compdb_file = os.path.join(build_dir, 'compile_commands.json')
graph.add_compdb(compdb_file, form='clang')
# Compile a target C file.
graph.add_rule('cc',
'$TARGET_CC $CFLAGS $CPPFLAGS $TARGET_CFLAGS $TARGET_CPPFLAGS '
'$LOCAL_CFLAGS $LOCAL_CPPFLAGS -MD -MF ${out}.d '
'-c -o ${out} ${in}',
depfile='${out}.d', compdbs=[compdb_file])
# Preprocess a DSL file.
graph.add_rule('cpp-dsl', '${CPP} $CPPFLAGS $TARGET_CPPFLAGS $LOCAL_CPPFLAGS '
'-undef $DSL_DEFINES -x c -P -MD -MF ${out}.d -MT ${out} '
'${in} > ${out}',
depfile='${out}.d')
# Link a target binary.
graph.add_rule('ld', '$TARGET_LD $LDFLAGS $TARGET_LDFLAGS $LOCAL_LDFLAGS '
'${in} -o ${out}')
# CTU rule to generate the .ast files
ctu_dir = os.path.join(build_dir, "ctu")
graph.add_env('CTU_DIR', relpath(ctu_dir))
graph.add_rule('cc-ctu-ast',
'$TARGET_CC $CFLAGS $CPPFLAGS $TARGET_CFLAGS $TARGET_CPPFLAGS '
'$LOCAL_CFLAGS $LOCAL_CPPFLAGS -DCLANG_CTU_AST '
'-MD -MF ${out}.d -Wno-unused-command-line-argument '
'-emit-ast -o${out} ${in}',
depfile='${out}.d')
graph.add_env('COMPDB_DIR', compdb_file)
# CTU rule to generate the externalDefMap files
graph.add_rule('cc-ctu-map',
'$CLANG_MAP -p $COMPDB_DIR ${in} | '
'sed -e "s/\\$$/.ast/g; s| \\+${ROOT_DIR}/| |g" > '
'${out}')
graph.add_rule('cc-ctu-all', 'cat ${in} > ${out}')
# Run the static analyzer
graph.add_rule('cc-analyze',
'$TARGET_CC $CFLAGS $CPPFLAGS $TARGET_CFLAGS $TARGET_CPPFLAGS '
'$LOCAL_CFLAGS $LOCAL_CPPFLAGS '
'-Wno-unused-command-line-argument '
'--analyze '
'-Xanalyzer -analyzer-output=html '
'-Xanalyzer -analyzer-config '
'-Xanalyzer experimental-enable-naive-ctu-analysis=true '
'-Xanalyzer -analyzer-config '
'-Xanalyzer stable-report-filename=true '
'-Xanalyzer -analyzer-config '
'-Xanalyzer unroll-loops=true '
'-Xanalyzer -analyzer-config '
'-Xanalyzer ctu-dir=$CTU_DIR '
'-Xanalyzer -analyzer-disable-checker '
'-Xanalyzer alpha.core.FixedAddr '
'-o ${out} '
'${in}')
#
# Parse the module configurations
#
def process_dir(d, handler):
conf = os.path.join(d, 'build.conf')
with open(conf, 'r', encoding='utf-8') as f:
handler(d, f)
graph.add_gen_source(conf)
def module_local_headers_gen(d):
return graph.future_alias(os.path.join(build_dir, d, 'local_headers_gen'))
def parse_module_conf(d, f):
local_env = {}
module = os.path.basename(d)
local_headers_gen = module_local_headers_gen(d)
local_headers = []
add_include_dir(get_event_local_inc_dir(module), local_env)
src_requires = (
hyptypes_header,
version_header,
sym_version_header,
registers_header,
typed_headers_gen,
event_headers_gen,
hypercalls_headers_gen,
objects_headers_gen,
local_headers_gen,
)
objs = []
have_events = False
for s in f.readlines():
words = s.split()
if not words or words[0].startswith('#'):
# Skip comments or blank lines
pass
elif words[0] == 'interface':
for w in map(var_subst, words[1:]):
interfaces.add(w)
elif words[0] == 'types':
for w in map(var_subst, words[1:]):
types.add(add_type_dsl(d, w, local_env))
elif words[0] == 'hypercalls':
for w in map(var_subst, words[1:]):
hypercalls.add(add_hypercall_dsl(d, w, local_env))
elif words[0] == 'events':
for w in map(var_subst, words[1:]):
event_sources.add(add_event_dsl(d, w, local_env))
have_events = True
elif words[0] == 'registers':
for w in map(var_subst, words[1:]):
f = os.path.join(d, w)
if f in registers:
raise KeyError("duplicate {:s}".format(f))
registers.append(f)
elif words[0] == 'local_include':
add_include(d, 'include', local_env)
elif words[0] == 'source':
for w in map(var_subst, words[1:]):
objs.append(add_source(d, w, src_requires, local_env))
elif words[0] == 'external_object':
if not do_partial_link:
for w in map(var_subst, words[1:]):
external_objects.add(w)
elif words[0] == 'flags':
add_flags(map(var_subst, words[1:]), local_env)
elif words[0] == 'configs':
for c in map(var_subst, words[1:]):
add_global_define(c)
elif words[0] == 'macros':
for w in map(var_subst, words[1:]):
add_macro_include(d, 'include', w)
elif words[0] == 'arch_types':
if arch_match(words[1]):
for w in map(var_subst, words[2:]):
types.add(add_type_dsl(
os.path.join(d, words[1]), w, local_env))
elif words[0] == 'arch_hypercalls':
if arch_match(words[1]):
for w in map(var_subst, words[2:]):
f = os.path.join(words[1], w)
hypercalls.add(add_hypercall_dsl(d, f, local_env))
elif words[0] == 'arch_events':
if arch_match(words[1]):
for w in map(var_subst, words[2:]):
event_sources.add(add_event_dsl(
os.path.join(d, words[1]), w, local_env))
have_events = True
elif words[0] == 'arch_registers':
if arch_match(words[1]):
for w in map(var_subst, words[2:]):
f = os.path.join(d, words[1], w)
if f in registers:
raise KeyError("duplicate {:s}".format(f))
registers.append(f)
elif words[0] == 'arch_local_include':
if arch_match(words[1]):
add_include(d, os.path.join(words[1], 'include'), local_env)
elif words[0] == 'arch_source':
if arch_match(words[1]):
for w in map(var_subst, words[2:]):
objs.append(add_source(os.path.join(d, words[1]),
w, src_requires, local_env))
elif words[0] == 'arch_external_object':
if not do_partial_link:
if arch_match(words[1]):
for w in map(var_subst, words[2:]):
external_objects.add(w)
elif words[0] == 'arch_flags':
if arch_match(words[1]):
add_flags(map(var_subst, words[2:]), local_env)
elif words[0] == 'arch_configs':
if arch_match(words[1]):
for w in map(var_subst, words[2:]):
add_global_define(w)
elif words[0] == 'arch_macros':
if arch_match(words[1]):
for w in map(var_subst, words[2:]):
add_macro_include(d, 'include', w)
elif words[0] == 'first_class_object':
first_class_objects.add(words[1], words[2:])
elif words[0] == 'base_module':
for w in map(var_subst, words[1:]):
# Require the base module's generated headers
local_headers.append(module_local_headers_gen(w))
# FIXME: We can't properly determine whether there are
# local_includes or not unless we do two-pass parsing of the
# build configs, so we just add them all.
logger.disabled = True
add_include(w, 'include', local_env)
add_include(os.path.join(build_dir, w), 'include', local_env)
# FIXME: We assume module has all possible arch include dirs
for arch_name in target_arch_names:
arch_dir = os.path.join(arch_name, 'include')
add_include(w, arch_dir, local_env)
add_include(os.path.join(build_dir, w),
arch_dir, local_env)
logger.disabled = False
modules.add(os.path.relpath(w, module_base))
if w not in module_dirs:
module_dirs.append(w)
elif words[0] == 'template' and words[1] == 'simple':
for w in map(var_subst, words[2:]):
add_simple_template(d, w, src_requires, local_env,
local_headers=True, headers=local_headers,
objects=objs)
elif words[0] == 'template':
ts = template_match(words[1], d)
for w in map(var_subst, words[2:]):
if add_template(ts, d, '', w, src_requires, local_env,
module):
have_events = True
elif words[0] == 'arch_template' and words[1] == 'simple':
if arch_match(words[2]):
for w in map(var_subst, words[3:]):
add_simple_template(d, w, src_requires, local_env,
local_headers=True,
headers=local_headers,
objects=objs, arch=words[2])
elif words[0] == 'arch_template':
ts = template_match(words[1], d)
if arch_match(words[2]):
for w in map(var_subst, words[3:]):
if add_template(ts, d, words[2], w, src_requires,
local_env, module):
have_events = True
elif words[0] == 'assert_config':
test = ' '.join(words[1:])
result = eval(test, {}, configs_as_ints)
if result is True:
continue
logger.error('assert_config failed "%s" in module conf for %s',
test, d)
sys.exit(1)
else:
# TODO: dependencies, configuration variables, etc
# Restructure this to use a proper parser first
logger.error('Unknown token "%s" in module conf for %s',
words[0], d)
sys.exit(1)
if have_events:
local_headers.append(get_event_local_inc_file(module))
modules_with_events.add(module)
add_event_handlers(module)
graph.add_alias(local_headers_gen, local_headers)
def parse_interface_conf(d, f):
local_env = {}
interface = os.path.basename(d)
have_events = False
for s in f.readlines():
words = s.split()
if not words or words[0].startswith('#'):
# Skip comments or blank lines
pass
elif words[0] == 'types':
for w in map(var_subst, words[1:]):
types.add(add_type_dsl(d, w, local_env))
elif words[0] == 'hypercalls':
for w in map(var_subst, words[1:]):
hypercalls.add(add_hypercall_dsl(d, w, local_env))
elif words[0] == 'events':
for w in map(var_subst, words[1:]):
event_sources.add(add_event_dsl(d, w, local_env))
have_events = True
elif words[0] == 'registers':
for w in map(var_subst, words[1:]):
f = os.path.join(d, w)
if f in registers:
raise KeyError("duplicate {:s}".format(f))
registers.append(f)
elif words[0] == 'macros':
for w in map(var_subst, words[1:]):
add_macro_include(d, 'include', w)
elif words[0] == 'arch_types':
if arch_match(words[1]):
for w in map(var_subst, words[2:]):
types.add(add_type_dsl(
os.path.join(d, words[1]), w, local_env))
elif words[0] == 'arch_hypercalls':
if arch_match(words[1]):
for w in map(var_subst, words[2:]):
f = os.path.join(words[1], w)
hypercalls.add(add_hypercall_dsl(d, f, local_env))
elif words[0] == 'arch_events':
if arch_match(words[1]):
for w in map(var_subst, words[2:]):
event_sources.add(add_event_dsl(
os.path.join(d, words[1]), w, local_env))
have_events = True
elif words[0] == 'arch_macros':
if arch_match(words[1]):
for w in map(var_subst, words[2:]):
add_macro_include(os.path.join(d, words[1], 'include', w))
elif words[0] == 'first_class_object':
first_class_objects.add(words[1], words[2:])
elif words[0] == 'template' and words[1] == 'simple':
for w in map(var_subst, words[2:]):
add_simple_template(d, w, src_requires, local_env)
elif words[0] == 'template':
ts = template_match(words[1], d)
for w in map(var_subst, words[2:]):
if add_template(ts, d, '', w, None, local_env, None):
have_events = True
else:
# TODO: dependencies, configuration variables, etc
# Restructure this to use a proper parser first
logger.error('Unknown token "%s" in interface conf for %s',
words[0], d)
sys.exit(1)
if have_events:
interfaces_with_events.add(interface)
add_event_handlers(interface)
def add_include_dir(d, local_env):
if not d.startswith(build_dir):
if not os.path.isdir(d):
logger.warning("include path: '{:s}' non-existant!".format(d))
if 'LOCAL_CPPFLAGS' in local_env:
local_env['LOCAL_CPPFLAGS'] += ' '
else:
local_env['LOCAL_CPPFLAGS'] = ''
local_env['LOCAL_CPPFLAGS'] += '-iquote ' + relpath(d)
def add_include(module_dir, include, local_env):
add_include_dir(os.path.join(module_dir, include), local_env)
def add_flags(flags, local_env):
if 'LOCAL_CFLAGS' in local_env:
local_env['LOCAL_CFLAGS'] += ' '
else:
local_env['LOCAL_CFLAGS'] = ''
local_env['LOCAL_CFLAGS'] += ' '.join(flags)
def add_source_file(src, obj, requires, local_env):
file_env = local_env.copy()
if 'LOCAL_CPPFLAGS' not in file_env:
file_env['LOCAL_CPPFLAGS'] = ''
else:
file_env['LOCAL_CPPFLAGS'] += ' '
graph.add_target([obj], 'cc', [src], requires=requires,
**file_env)
objects.add(obj)
if do_sa_html and src.endswith(".c"):
ast = os.path.join(ctu_dir, src + ".ast")
graph.add_target([ast], 'cc-ctu-ast', [src], requires=requires,
**file_env)
asts.add(ast)
defmap_frag = os.path.join(ctu_dir, src + ".map")
graph.add_target([defmap_frag], 'cc-ctu-map', [src], requires=requires,
**file_env)
defmap_frags.add(defmap_frag)
sa_html_dir = obj + ".html"
graph.add_target([sa_html_dir], 'cc-analyze', [src], requires=requires,
depends=(ast_gen, defmap), **file_env)
sa_html.add(sa_html_dir)
def add_source(module_dir, src, requires, local_env):
if not src.endswith(".c") and not src.endswith(".S"):
logger.error('unknown source file type for: %s', src)
sys.exit(1)
out_dir = os.path.join(build_dir, module_dir, 'obj')
i = os.path.join(module_dir, 'src', src)
o = os.path.join(out_dir, src + '.o')
add_source_file(i, o, requires, local_env)
return o
def add_macro_include(module_dir, include, src):
graph.append_env('CPPFLAGS', '-imacros {:s}'
.format(relpath(os.path.join(module_dir, include, src))))
def add_preproc_dsl(module_dir, src, **local_env):
out_dir = os.path.join(build_dir, module_dir)
i = os.path.join(module_dir, src)
o = os.path.join(out_dir, src + '.pp')
graph.add_target([o], 'cpp-dsl', [i], **local_env)
return o
def add_type_dsl(module_dir, src, local_env):
return add_preproc_dsl(module_dir, src, DSL_DEFINES='-D__TYPED_DSL__',
**local_env)
def add_hypercall_dsl(module_dir, src, local_env):
return add_preproc_dsl(module_dir, src, DSL_DEFINES='-D__HYPERCALLS_DSL__',
**local_env)
def add_event_dsl(module_dir, src, local_env):
return add_preproc_dsl(module_dir, src, requires=(hypconstants_header,),
DSL_DEFINES='-D__EVENTS_DSL__', **local_env)
def add_template(ts, d, arch, tmpl_file, requires, local_env, module):
ext = os.path.splitext(tmpl_file)[1]
is_event = False
is_module = module is not None
if ext == '.h' and is_module:
mod_gen_dir = os.path.join(objects_build_dir, module)
add_include(mod_gen_dir, 'include', local_env)
if ext == '.c' and not is_module:
logger.error('C template specified for interface %s', d)
sys.exit(1)
else:
ts.sources.append((d, tmpl_file, arch, requires, is_module, local_env))
if ext == '.ev':
is_event = True
return is_event
def add_simple_template(d, t, requires, local_env, local_headers=False,
headers=None, objects=None, arch=''):
i = os.path.join(d, arch, 'templates', t)
out_name, ext = os.path.splitext(t)
if ext != '.tmpl':
logger.warning("Template filename does not end in .tmpl: %s", t)
out_ext = os.path.splitext(out_name)[1]
if out_ext == '.h' and headers is not None:
if local_headers:
out_dir = os.path.join(build_dir, d, arch, 'include')
add_include_dir(out_dir, local_env)
else:
assert not arch
out_dir = interface_gen_dir
o = os.path.join(out_dir, out_name)
headers.append(o)
elif out_ext in ('.c', '.S') and objects is not None:
out_dir = os.path.join(build_dir, d, arch, 'src')
o = os.path.join(out_dir, out_name)
oo = o + '.o'
add_source_file(o, oo, requires, local_env)
objects.append(oo)
else:
logger.error("Unsupported template output: %s", out_name)
sys.exit(1)
graph.add_target([o], 'code_gen_asm' if out_ext == '.S' else 'code_gen',
[i])
event_handler_modules = set()
def add_event_handlers(module):
if module in event_handler_modules:
return
event_handler_modules.add(module)
obj = get_event_src_file(module) + '.o'
event_src_requires = (
hyptypes_header,
typed_headers_gen,
get_event_inc_file(module),
)
add_source_file(get_event_src_file(module), obj, event_src_requires,
{})
# Header locations
interface_gen_dir = os.path.join(build_dir, 'interface', 'include')
graph.append_env('CPPFLAGS', '-I ' + relpath(interface_gen_dir))
objects_build_dir = os.path.join(build_dir, 'objects')
events_inc_dir = os.path.join(build_dir, 'events', 'include')
objects_headers_gen = graph.future_alias(
os.path.join(build_dir, 'objects_headers_gen'))
# Support for the event generator
graph.append_env('CPPFLAGS', '-I ' + relpath(events_inc_dir))
event_headers_gen = graph.future_alias(
os.path.join(build_dir, 'event_headers_gen'))
# Support for the hypercalls generator
hypercalls_headers_gen = graph.future_alias(
os.path.join(build_dir, 'hypercalls_headers_gen'))
def get_event_local_inc_dir(module):
return os.path.join(build_dir, 'events', module, 'include')
def get_event_local_inc_file(module):
return os.path.join(get_event_local_inc_dir(module), 'event_handlers.h')
def get_event_inc_file(module):
return os.path.join(events_inc_dir, 'events', module + '.h')
def get_event_src_file(module):
return os.path.join(build_dir, 'events', 'src', module + '.c')
#
# Global generated headers depends
#
build_includes = os.path.join(build_dir, 'include')
hyptypes_header = os.path.join(build_includes, 'hyptypes.h')
hypconstants_header = os.path.join(build_includes, 'hypconstants.h')
registers_header = os.path.join(build_includes, 'hypregisters.h')
version_header = os.path.join(build_includes, 'hypversion.h')
sym_version_header = os.path.join(build_includes, 'hypsymversion.h')
graph.append_env('CPPFLAGS', '-I ' + relpath(build_includes))
typed_headers_gen = graph.future_alias(
os.path.join(build_dir, 'typed_headers_gen'))
guestapi_interface_types = os.path.join(build_dir, 'guestapi', 'include',
'guest_types.h')
#
# Hypercalls generated files
#
# FIXME: This is not hypervisor source, it should not be built.
# Generation temporarily hard coded here until better handling implemented
hypguest_interface_src = os.path.join(build_dir, 'guestapi', 'src',
'guest_interface.c')
hypguest_interface_header = os.path.join(build_dir, 'guestapi', 'include',
'guest_interface.h')
guestapis.add(hypguest_interface_header)
guestapis.add(hypguest_interface_src)
#
# Set up the simple code generator
#
codegen_script = os.path.join('tools', 'codegen', 'codegen.py')
graph.add_env('CODEGEN', relpath(codegen_script))
graph.add_rule('code_gen', '${CODEGEN} ${CODEGEN_ARCHS} ${CODEGEN_CONFIGS} '
'-f ${FORMATTER} -o ${out} -d ${out}.d ${in}',
depfile='${out}.d')
graph.add_rule('code_gen_asm', '${CODEGEN} ${CODEGEN_ARCHS} '
'${CODEGEN_CONFIGS} -o ${out} -d ${out}.d ${in}',
depfile='${out}.d')
#
# Set up the Clang static analyser
#
defmap = os.path.join(ctu_dir, "externalDefMap.txt")
ast_gen = graph.future_alias(os.path.join(build_dir, 'ast-gen'))
# Get all configs as Ints or strings
configs_as_ints = dict()
def configs_get_int(c):
try:
s = configs[c].strip('uU')
return int(s, 0)
except ValueError:
return configs[c]
for c in configs:
configs_as_ints[c] = configs_get_int(c)
#
# Collect the lists of objects, modules and interfaces
#
module_dirs = sorted(os.path.join(module_base, m) for m in modules)
for d in module_dirs:
process_dir(d, parse_module_conf)
for i in sorted(interfaces):
d = os.path.join(interface_base, i)
process_dir(d, parse_interface_conf)
#
# Collect all defines and configs
#
def mkdirs(path):
try:
os.makedirs(path)
except OSError as e:
import errno
if e.errno == errno.EEXIST:
pass
else:
raise
define_file = os.path.join(build_dir, 'config.h')
mkdirs(os.path.split(define_file)[0])
graph.add_gen_output(define_file)
with GenFile(define_file, 'w') as f:
if variant_defines:
for define_arg in variant_defines:
define, val = define_arg.split('=')
f.write(u"#define {:s} {:s}\n".format(define, val))
for i in sorted(interfaces):
f.write(u"#define INTERFACE_{:s} 1\n".format(i.upper()))
for i in sorted(modules):
i = i.replace(os.path.sep, '_')
f.write(u"#define MODULE_{:s} 1\n".format(i.upper()))
graph.append_env('CPPFLAGS', '-imacros {:s}'.format(relpath(define_file)))
graph.append_env('CODEGEN_CONFIGS',
'-imacros {:s}'.format(relpath(define_file)))
#
# Generate types and events for first class objects
#
def add_object_c_template(module, template, requires, object_str, target,
local_env):
out = os.path.join(objects_build_dir, module, target)
graph.add_target([out], 'object_gen_c', [template], OBJ=object_str,
depends=[objects_script])
add_source_file(out, out + '.o', requires, local_env)
def add_object_h_template(module, template, requires, object_str, target,
is_module, local_env):
if is_module:
out = os.path.join(objects_build_dir, module, 'include', target)
else:
out = os.path.join(objects_incl_dir, target)
# For now, add all headers here, in future, dependencies for local headers
# could be more contrained to the module's source files
objects_headers.append(out)
graph.add_target([out], 'object_gen_c', [template], OBJ=object_str,
depends=[objects_script])
def add_object_event_template(module, template, object_str, target):
object_ev = os.path.join(objects_build_dir, module, target)
graph.add_target([object_ev], 'object_gen', [template], OBJ=object_str,
depends=[objects_script])
event_sources.add(object_ev)
def add_object_type_template(module, template, object_str, target):
object_tc = os.path.join(objects_build_dir, module, target)
graph.add_target([object_tc], 'object_gen', [template], OBJ=object_str,
depends=[objects_script])
types.add(object_tc)
objects_script = os.path.join('tools', 'objects', 'object_gen.py')
graph.add_env('OBJECTS', relpath(objects_script))
graph.add_rule('object_gen', '${OBJECTS} -t ${in} '
'${OBJ} -o ${out}')
graph.add_rule('object_gen_c', '${OBJECTS} -t ${in} -f ${FORMATTER} '
'${OBJ} -o ${out}')
objects_incl_dir = os.path.join(objects_build_dir, 'include')
fc_objects = []
for x in sorted(first_class_objects):
fc_objects.append(','.join([x] + first_class_objects[x]))
fc_objects = ' '.join(fc_objects)
have_object_incl = False
objects_headers = []
for module_dir, target, arch, src_requires, is_module, local_env in \
first_class_templates:
ext = os.path.splitext(target)[1]
module = os.path.basename(module_dir)
template = os.path.join(module_dir, arch, 'templates', target + '.tmpl')
if ext == '.ev':
add_object_event_template(module, template, fc_objects, target)
elif ext == '.tc':
add_object_type_template(module, template, fc_objects, target)
elif ext == '.c':
add_object_c_template(module, template, src_requires, fc_objects,
target, local_env)
elif ext == '.h':
add_object_h_template(module, template, src_requires, fc_objects,
target, is_module, local_env)
if not is_module:
have_object_incl = True
else:
logger.error('Unsupported first_class_object target "%s" in %s',
target, module_dir)
sys.exit(1)
if have_object_incl:
graph.append_env('CPPFLAGS', '-I ' + relpath(objects_incl_dir))
# An alias target is used to order header generation before source compliation
graph.add_alias(objects_headers_gen, objects_headers)
#
# Setup the types generator
#
types_script = os.path.join('tools', 'typed', 'type_gen.py')
types_pickle = os.path.join(build_dir, 'types.pickle')
graph.add_rule('types_parse', '${TYPED} -a ${ABI} -d ${out}.d '
'${in} -P ${out}', depfile='${out}.d')
graph.add_target([types_pickle], 'types_parse', sorted(types), ABI=abi_arch)
graph.add_env('TYPED', relpath(types_script))
graph.add_rule('gen_types', '${TYPED} -a ${ABI} -f ${FORMATTER} -d ${out}.d '
'-p ${in} -o ${out}', depfile='${out}.d')
graph.add_target(hyptypes_header, 'gen_types', types_pickle, ABI=abi_arch)
# gen guest type
graph.add_rule('gen_public_types',
'${TYPED} --public -a ${ABI} -f ${FORMATTER} -d ${out}.d '
'-p ${in} -o ${out}', depfile='${out}.d')
graph.add_target(guestapi_interface_types, 'gen_public_types',
types_pickle, ABI=abi_arch)
graph.add_rule('gen_types_tmpl', '${TYPED} -a ${ABI} -f ${FORMATTER} '
'-d ${out}.d -t ${TEMPLATE} -p ${in} -o ${out}',
depfile='${out}.d')
graph.add_rule('gen_public_types_tmpl', '${TYPED} --public -a ${ABI} '
'-f ${FORMATTER} -d ${out}.d -t ${TEMPLATE} -p ${in} -o ${out}',
depfile='${out}.d')
typed_headers = []
for module_dir, target, arch, src_requires, is_module, local_env in \
typed_templates:
ext = os.path.splitext(target)[1]
template = os.path.join(module_dir, arch, 'templates', target + '.tmpl')
if ext == '.h':
out = os.path.join(build_dir, 'include', target)
typed_headers.append(out)
graph.add_target([out], 'gen_types_tmpl', types_pickle,
depends=[template], TEMPLATE=relpath(template),
ABI=abi_arch)
elif ext == '.c':
out = os.path.join(build_dir, module_dir, target)
graph.add_target([out], 'gen_types_tmpl', types_pickle,
depends=[template], TEMPLATE=relpath(template),
ABI=abi_arch)
add_source_file(out, out + '.o', src_requires, local_env)
else:
logger.error('Unsupported typed_template target "%s" in %s',
target, module_dir)
sys.exit(1)
graph.add_alias(typed_headers_gen, typed_headers)
for module_dir, target, arch, src_requires, is_module, local_env in \
typed_guestapi_templates:
assert (is_module)
ext = os.path.splitext(target)[1]
template = os.path.join(module_dir, arch, 'templates', target + '.tmpl')
if ext == '.h':
subdir = 'include'
elif ext == '.c':
subdir = 'src'
else:
logger.error('Unsupported typed_guestapi target "%s" in %s',
target, module_dir)
out = os.path.join(build_dir, 'guestapi', subdir, 'guest_' + target)
graph.add_target([out], 'gen_public_types_tmpl', types_pickle,
depends=[template], TEMPLATE=relpath(template),
ABI=abi_arch)
guestapis.add(out)
guestapis.add(guestapi_interface_types)
guestapi_gen = os.path.join(build_dir, 'guestapi_gen')
graph.add_alias(guestapi_gen, sorted(guestapis))
graph.add_default_target(guestapi_gen, True)
#
# Setup the hypercalls generator
#
hypercalls_script = os.path.join('tools', 'hypercalls', 'hypercall_gen.py')
graph.add_env('HYPERCALLS', relpath(hypercalls_script))
hypercalls_template_path = os.path.join('tools', 'hypercalls', 'templates')
hypercalls_guest_templates = (('guest_interface.c', hypguest_interface_src),
('guest_interface.h', hypguest_interface_header))
# FIXME:
# FIXME: upgrade Lark and remove LANG env workaround.
graph.add_rule('hypercalls_gen', 'LANG=C.UTF-8'
' ${HYPERCALLS} -a ${ABI} -f ${FORMATTER}'
' -d ${out}.d -t ${TEMPLATE} -p ${TYPES_PICKLE} ${in}'
' -o ${out}', depfile='${out}.d')
hypercalls_headers = []
for module_dir, target, arch, src_requires, is_module, local_env in \
hypercalls_templates:
template = os.path.join(module_dir, arch, 'templates', target + '.tmpl')
out_ext = os.path.splitext(target)[1]
if out_ext == '.h':
out = os.path.join(build_dir, 'include', target)
elif out_ext in ('.c', '.S'):
out = os.path.join(build_dir, module_dir, 'src', target)
else:
logger.error("Unsupported template file: %s", target)
sys.exit(1)
graph.add_target([out], 'hypercalls_gen', sorted(hypercalls),
TEMPLATE=relpath(template), ABI=abi_arch,
TYPES_PICKLE=relpath(types_pickle),
depends=[types_pickle, template])
if out_ext == '.h':
hypercalls_headers.append(out)
elif out_ext in ('.c', '.S'):
oo = out + '.o'
requires = (
hyptypes_header,
hypercalls_headers_gen,
typed_headers_gen,
event_headers_gen
)
local_env = {}
add_source_file(out, oo, requires, local_env)
graph.add_alias(hypercalls_headers_gen, hypercalls_headers)
# FIXME: provide a better/standalone way to generate guest headers
for tmpl, out_name in hypercalls_guest_templates:
template = os.path.join(hypercalls_template_path, tmpl + '.tmpl')
graph.add_target(out_name, 'hypercalls_gen', sorted(hypercalls),
TEMPLATE=relpath(template), ABI=abi_arch,
TYPES_PICKLE=relpath(types_pickle),
depends=[types_pickle, template])
#
# Setup the events generators
#
def event_template(name):
return os.path.join('tools', 'events', 'templates', name + '.tmpl')
events_script = os.path.join('tools', 'events', 'event_gen.py')
graph.add_env('EVENTS', relpath(events_script))
event_handlers_tmpl = event_template('handlers.h')
event_triggers_tmpl = event_template('triggers.h')
events_pickle = os.path.join(build_dir, 'events.pickle')
event_src_tmpl = event_template('c')
graph.add_rule('event_parse',
'${EVENTS} ${INCLUDES} -d ${out}.d ${in} -P ${out}',
depfile='${out}.d', restat=True)
graph.add_target([events_pickle], 'event_parse', sorted(event_sources))
graph.add_rule('event_gen', '${EVENTS} -t ${TEMPLATE} -m ${MODULE} ${OPTIONS}'
'${INCLUDES} -d ${out}.d -p ${in} -o ${out}',
depfile='${out}.d', restat=True)
event_headers = []
for module in sorted(interfaces_with_events | modules_with_events):
event_out = get_event_inc_file(module)
event_headers.append(event_out)
graph.add_target([event_out], 'event_gen', events_pickle,
MODULE=module, TEMPLATE=relpath(event_triggers_tmpl),
depends=[event_triggers_tmpl])
event_out = get_event_src_file(module)
graph.add_target([event_out], 'event_gen', events_pickle,
MODULE=module, TEMPLATE=relpath(event_src_tmpl),
depends=[event_src_tmpl])
# OPTIONS='-f ${FORMATTER}',
# An alias target is used to order header generation before source compliation
graph.add_alias(event_headers_gen, event_headers)
for module in sorted(modules_with_events):
# Gen handler headers
event_out = get_event_local_inc_file(module)
graph.add_target([event_out], 'event_gen', events_pickle,
MODULE=module, TEMPLATE=relpath(event_handlers_tmpl),
depends=[event_handlers_tmpl])
# Generate the static analysis definition map and ASTs
graph.add_target([defmap], 'cc-ctu-all', sorted(defmap_frags))
graph.add_alias(ast_gen, sorted(asts))
#
# Generate register accessors
#
registers_script = os.path.join('tools', 'registers', 'register_gen.py')
graph.add_env('REGISTERS', relpath(registers_script))
graph.add_rule('registers_gen', '${REGISTERS} -t ${TEMPLATE} -f ${FORMATTER} '
'-o ${out} ${in}')
registers_pp = list()
# Pre-process the register scripts
for f in registers:
f_pp = os.path.join(build_dir, f + '.pp')
graph.add_target([f_pp], 'cpp-dsl', [f])
registers_pp.append(f_pp)
for module_dir, target, arch, src_requires, is_module, local_env in \
registers_templates:
template = os.path.join(module_dir, arch, 'templates', target + '.tmpl')
header = os.path.join(build_includes, target)
graph.add_target([header], 'registers_gen', registers_pp,
TEMPLATE=relpath(template),
depends=[template, registers_script])
#
# Build version setup
#
version_file = os.path.join('hyp', 'core', 'boot', 'include', 'version.h')
if os.path.exists(version_file):
graph.add_rule('version_copy', 'cp ${in} ${out}')
graph.add_target([version_header], 'version_copy', [version_file])
else:
ver_script = os.path.join('tools', 'build', 'gen_ver.py')
graph.add_rule('version_gen', 'PYTHONPATH=' +
relpath(os.path.join('tools', 'utils')) + ' ' +
relpath(ver_script) + ' -C ' + relpath('.') +
' -o ${out}', restat=True)
import subprocess
gitdir = subprocess.check_output(['git', 'rev-parse', '--git-dir'])
gitdir = gitdir.decode('utf-8').strip()
graph.add_target([version_header], 'version_gen',
['{:s}/logs/HEAD'.format(gitdir)], always=True)
#
# Symbols version setup
#
sym_ver_script = os.path.join('tools', 'build', 'gen_sym_ver.py')
graph.add_rule('sym_version_gen', relpath(sym_ver_script) + ' > ${out}')
graph.add_target([sym_version_header], 'sym_version_gen', always=True)
#
# Includes setup
#
# Add module interfaces to the global CPPFLAGS
for interface in sorted(interfaces):
d = os.path.join(interface_base, interface, 'include')
graph.append_env('CPPFLAGS', '-I ' + relpath(d))
#
# Top-level targets
#
# Run the static analyser if 'enable_sa' is set in command line
if do_sa_html:
sa_alias = os.path.join(build_dir, 'sa-html')
graph.add_alias(sa_alias, sa_html)
graph.add_default_target(sa_alias)
# Pre-process the linker script
linker_script_in = os.path.join(arch_base, link_arch, 'link.lds')
linker_script = os.path.join(build_dir, 'link.lds.pp')
graph.add_target([linker_script], 'cpp-dsl',
[linker_script_in], requires=[hypconstants_header])
# Link the hypervisor ELF file
if do_partial_link:
hyp_elf = os.path.join(build_dir, 'hyp.o')
graph.append_env('TARGET_LDFLAGS', '-r -Wl,-x')
graph.add_default_target(linker_script)
else:
hyp_elf = os.path.join(build_dir, 'hyp.elf')
graph.append_env('TARGET_LDFLAGS',
'-Wl,-T,{:s}'.format(relpath(linker_script)))
graph.add_target([hyp_elf], 'ld', sorted(objects | external_objects),
depends=[linker_script])
graph.add_default_target(hyp_elf)
#
# Python dependencies
#
for m in list(sys.modules.values()) + [relpath]:
try:
f = inspect.getsourcefile(m)
except TypeError:
continue
if f is None:
continue
f = os.path.relpath(f)
if f.startswith('../'):
continue
graph.add_gen_source(f)
| 53,295 | 35.05954 | 79 | py |
gunyah-hypervisor-develop | gunyah-hypervisor-develop/tools/build/gen_sym_ver.py | #!/usr/bin/env python3
# coding: utf-8
#
# © 2021 Qualcomm Innovation Center, Inc. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from random import SystemRandom
rng = SystemRandom()
print("#define HYP_SYM_VERSION 0x{:x}".format(rng.getrandbits(64)))
| 268 | 23.454545 | 67 | py |
Subsets and Splits