repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
wikitables | wikitables-master/wikitables/readers.py | # pylint: disable=invalid-name,useless-object-inheritance
import logging
from collections import defaultdict
import gettext
import pycountry
from mwparserfromhell.nodes.tag import Tag
from mwparserfromhell.nodes.template import Template
from mwparserfromhell.nodes.wikilink import Wikilink
from wikitables.models import Field, Row
from wikitables.util import ftag, ustr, guess_type
from wikitables.templates import read_template
log = logging.getLogger('wikitables')
ignore_attrs = ['group="Note"']
class FieldReader(object):
""" Stateful Field value reader """
def __init__(self, lang='en'):
self.lang = lang
try:
language_translation = gettext.translation(
'iso3166', pycountry.LOCALES_DIR, languages=[lang]
)
language_translation.install()
except FileNotFoundError:
language_translation = gettext
self.translate_fn = language_translation.gettext
self._attrs = {} # node attribute state
def parse(self, node):
"""
Return generator yielding Field objects for a given node
"""
self._attrs = {}
vals = []
yielded = False
for x in self._read_parts(node):
if isinstance(x, Field):
yielded = True
x.attrs = self._attrs
yield x
else:
vals.append(ustr(x).strip(' \n\t'))
joined = ' '.join([x for x in vals if x])
if joined:
yielded = True
yield Field(node, guess_type(joined), self._attrs)
if not yielded:
yield Field(node, "", self._attrs)
def _read_parts(self, n):
for a in getattr(n, 'attributes', []):
self._attrs[ustr(a.name)] = ustr(a.value)
if hasattr(n, 'contents') and hasattr(n.contents, 'nodes'):
for subnode in n.contents.nodes:
for x in self._read_parts(subnode):
yield x
else:
for x in self._read_part(n):
yield x
def _read_part(self, node):
if isinstance(node, Template):
for x in read_template(node, self.translate_fn):
yield x
return
if isinstance(node, Tag):
if not self._exclude_tag(node):
yield node.contents.strip_code()
return
if isinstance(node, Wikilink):
if node.text:
yield node.text
else:
yield node.title
return
yield node
@staticmethod
def _exclude_tag(node):
# exclude tag nodes with attributes in ignore_attrs
n_attrs = [x.strip() for x in node.attributes]
for a in n_attrs:
if a in ignore_attrs:
return True
# exclude tag nodes without contents
if not node.contents:
return True
return False
class RowReader(object):
""" Stateful Row reader """
def __init__(self, tname, head, lang='en'):
self.head = head
self.lang = lang
self._idx = 0
self._tname = tname
# track spanned fields across rows
self._span = {}
self._nspan = defaultdict(int)
self._freader = FieldReader(lang)
def parse(self, *nodes):
"""
Parse one or more `tr` nodes, yielding wikitables.Row objects
"""
for n in nodes:
if not n.contents:
continue
row = self._parse(n)
if not row.is_null:
yield row
def _parse(self, node):
rname = '%s[%s]' % (self._tname, self._idx)
self._idx += 1
r = Row(rname, node)
cols = node.contents.ifilter_tags(matches=ftag('th', 'td'))
fields = [f for col in cols for f in self._freader.parse(col)]
for col_name in self.head:
if self._nspan[col_name]:
r[col_name] = self._span[col_name]
self._nspan[col_name] -= 1
continue
if not fields:
log.warning('%s: missing field for column [%s]', r.name, col_name)
continue
f = fields.pop(0)
if 'rowspan' in f.attrs:
self._span[col_name] = f
self._nspan[col_name] = int(f.attrs['rowspan'])-1
r[col_name] = f
for f in fields:
log.warning('%s: dropping field from unknown column: %s', r.name, f)
return r
| 4,536 | 27.534591 | 82 | py |
wikitables | wikitables-master/wikitables/cli.py | import sys
import json
import logging
from argparse import ArgumentParser
from wikitables.version import version
from wikitables.util import TableJSONEncoder
from wikitables import import_tables
from wikitables.util import jprint
log = logging.getLogger('wikitables')
def main():
parser = ArgumentParser(description='wikitables v%s' % version)
parser.add_argument('-l', '--lang',
dest='lang',
help='article language (default: %(default)s)',
default='en')
parser.add_argument('-p', '--pretty',
action='store_true',
help='pretty-print json output')
parser.add_argument('-d', '--debug',
action='store_true',
help='enable debug output')
parser.add_argument('article', help='article title')
args = parser.parse_args()
if not args.article:
print('usage: wikitables <article title>')
sys.exit(1)
if args.debug:
logging.basicConfig(level=logging.DEBUG)
log.debug('debug logging enabled')
else:
logging.basicConfig(level=logging.WARN)
tables = import_tables(args.article, lang=args.lang)
tables_dict = {table.name: table.rows for table in tables}
if args.pretty:
jprint(tables_dict)
else:
print(json.dumps(tables_dict, cls=TableJSONEncoder))
| 1,418 | 29.191489 | 71 | py |
wikitables | wikitables-master/wikitables/version.py | __version__ = (0, 5, 5)
version = '%d.%d.%d' % __version__
| 59 | 19 | 34 | py |
wikitables | wikitables-master/wikitables/client.py | import logging
import requests
log = logging.getLogger(__name__)
class ArticleNotFound(RuntimeError):
""" Article query returned no results """
class Client(requests.Session):
""" Mediawiki API client """
def __init__(self, lang="en"):
super(Client, self).__init__()
self.base_url = 'https://' + lang + '.wikipedia.org/w/api.php'
def fetch_page(self, title, method='GET'):
""" Query for page by title """
params = {
'prop': 'revisions',
'format': 'json',
'action': 'query',
'explaintext': '',
'titles': _parse_title(title),
'rvprop': 'content',
}
req = self.request(method, self.base_url, params=params)
req.raise_for_status()
pages = req.json()["query"]["pages"]
# use key from first result in 'pages' array
page_id = list(pages.keys())[0]
if page_id == '-1':
raise ArticleNotFound('no matching articles returned')
return pages[page_id]
def _parse_title(s):
# extract title from, potentially, a URL
return s.split('/')[-1].split('#')[0].split('?')[0]
| 1,166 | 26.785714 | 70 | py |
wikitables | wikitables-master/wikitables/util.py | import sys
import json
import logging
log = logging.getLogger('wikitables')
def ftag(*args):
return lambda node: node.tag in args
def jprint(obj):
if isinstance(obj, str):
obj = json.loads(obj)
print(json.dumps(obj, indent=2, sort_keys=False, cls=TableJSONEncoder))
def guess_type(value):
""" attempt to convert string value into numeric type """
num_value = value.replace(',', '') # remove comma from potential numbers
try:
return int(num_value)
except ValueError:
pass
try:
return float(num_value)
except ValueError:
pass
return value
def ustr(value):
if sys.version_info < (3, 0):
#py2
try:
# pylint: disable=undefined-variable
return unicode(value).encode('utf-8')
except UnicodeDecodeError:
return str(value)
else:
return str(value)
class TableJSONEncoder(json.JSONEncoder):
def default(self, o):
if hasattr(o, '__json__'):
return o.__json__()
return json.JSONEncoder.default(self, o)
| 1,093 | 19.641509 | 76 | py |
wikitables | wikitables-master/wikitables/templates.py | # Template readers
import logging
import pycountry
from wikitables.util import ustr
from wikitables.models import Field
log = logging.getLogger('wikitables')
def read_template(node, translate_fn):
if node.name == 'refn':
log.debug('omitting refn subtext from field')
return []
for read_function in _tmpl_readers:
value = read_function(node, translate_fn)
if value:
return value
return []
def _read_unknown_template(node, translate_fn):
del translate_fn
# for unknown templates, concatenate all arg values
_, args = _read_template_params(node)
concat = ' '.join([ustr(x) for x in args])
return [concat]
def _read_change_template(node, translate_fn):
del translate_fn
if node.name != 'change':
return None
params, args = _read_template_params(node)
args = [int(ustr(a)) for a in args]
if params.get('invert') == 'on':
change = ((args[0] / args[1]) - 1) * 100
else:
change = ((args[1] / args[0]) - 1) * 100
return [Field(node, args[0]), Field(node, args[1]), Field(node, change)]
def _read_flag_template(node, translate_fn):
# read flag shorthand templates
sname = ustr(node.name)
try:
country = pycountry.countries.lookup(sname)
return [translate_fn(country.name)]
except LookupError:
pass
def _read_template_params(node):
kvs, args = {}, []
for param in node.params:
if '=' in param:
parts = param.split('=')
kvs[parts[0]] = '='.join(parts[1:])
else:
args.append(param)
return kvs, args
_tmpl_readers = [
_read_change_template,
_read_flag_template,
_read_unknown_template
]
| 1,737 | 22.173333 | 76 | py |
wikitables | wikitables-master/wikitables/models.py | # pylint: disable=useless-object-inheritance
import json
from wikitables.util import TableJSONEncoder
class Field(object):
"""
Field within a table row
attributes:
- raw(mwparserfromhell.nodes.Node) - Unparsed field Wikicode
- value(str) - Parsed field value as string
"""
def __init__(self, node, value, attrs=None):
if attrs is None:
attrs = {}
self.raw = node
self.value = value
self.attrs = attrs
def __str__(self):
return str(self.value)
def __repr__(self):
return str(self.value)
def __json__(self):
return self.value
class Row(dict):
"""
Single WikiTable row, mapping a field name(str) to wikitables.Field obj
"""
def __init__(self, name, node):
super(Row, self).__init__()
self.name = name
self.raw = node
def json(self):
return json.dumps(self, cls=TableJSONEncoder)
@property
def is_null(self):
for _, item in self.items():
if item.value != '':
return False
return True
| 1,105 | 21.12 | 75 | py |
wikitables | wikitables-master/wikitables/__init__.py | import json
import logging
import mwparserfromhell as mwp
from wikitables.client import Client
from wikitables.readers import RowReader
from wikitables.util import TableJSONEncoder, ftag, ustr
log = logging.getLogger('wikitables')
def import_tables(article, lang='en'):
client = Client(lang)
page = client.fetch_page(article)
body = page['revisions'][0]['*']
## parse for tables
raw_tables = mwp.parse(body).filter_tags(matches=ftag('table'))
def _table_gen():
for idx, table in enumerate(raw_tables):
name = '%s[%s]' % (page['title'], idx)
yield WikiTable(name, table, lang)
return list(_table_gen())
class WikiTable():
"""
Parsed Wikipedia table
attributes:
- name(str): Table name in the format <article_name>[<table_index>]
- head(list): List of parsed column names as strings
- rows(list): List of <wikitables.Row> objects
"""
def __init__(self, name, raw_table, lang='en'):
self.name = ustr(name)
self.lang = lang
self.rows = []
self._head = []
self._node = raw_table
self._tr_nodes = raw_table.contents.filter_tags(matches=ftag('tr'))
self._read_header()
self._read_rows()
def json(self):
return json.dumps(self.rows, cls=TableJSONEncoder)
@property
def head(self):
return self._head
@head.setter
def head(self, val):
if not isinstance(val, list):
raise ValueError('table head must be provided as list')
self._head = val
self._read_rows()
def __repr__(self):
return "<WikiTable '%s'>" % self.name
def _log(self, value):
log.debug('%s: %s', self.name, value)
def _read_rows(self):
reader = RowReader(self.name, self._head, self.lang)
self.rows = list(reader.parse(*self._tr_nodes))
self._log('parsed %d rows %d cols' % (len(self.rows), len(self._head)))
def _read_header(self):
# read header
header_nodes = self._find_header_flat()
if not header_nodes:
header_nodes = self._find_header_row()
if not header_nodes:
header_nodes = self._make_default_header()
for header_node in header_nodes:
field_name = header_node.contents.strip_code().strip(' ')
self._head.append(ustr(field_name))
def _find_header_flat(self):
"""
Find header elements in a table, if possible. This case handles
situations where '<th>' elements are not within a row('<tr>')
"""
nodes = self._node.contents.filter_tags(matches=ftag('th'), recursive=False)
if not nodes:
return None
self._log('found header outside rows (%d <th> elements)' % len(nodes))
return nodes
def _find_header_row(self):
"""
Evaluate all rows and determine header position, based on
greatest number of 'th' tagged elements
"""
th_max = 0
header_idx = 0
for idx, tr_node in enumerate(self._tr_nodes):
th_count = len(tr_node.contents.filter_tags(matches=ftag('th')))
if th_count > th_max:
th_max = th_count
header_idx = idx
if not th_max:
return None
self._log('found header at row %d (%d <th> elements)' % \
(header_idx, th_max))
header_row = self._tr_nodes.pop(header_idx)
return header_row.contents.filter_tags(matches=ftag('th'))
def _make_default_header(self):
"""
Return a generic placeholder header based on the tables column count
"""
td_max = 0
for tr_node in self._tr_nodes:
td_count = len(tr_node.contents.filter_tags(matches=ftag('td')))
if td_count > td_max:
td_max = td_count
self._log('creating default header (%d columns)' % td_max)
return ['column%d' % n for n in range(0, td_max)]
| 4,005 | 29.580153 | 84 | py |
models | models-master/official/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/pip_package/setup.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sets up TensorFlow Official Models."""
import datetime
import os
import sys
from setuptools import find_packages
from setuptools import setup
version = '2.13.0'
tf_version = '2.13.0' # Major version.
project_name = 'tf-models-official'
long_description = """The TensorFlow official models are a collection of
models that use TensorFlow's high-level APIs.
They are intended to be well-maintained, tested, and kept up to date with the
latest TensorFlow API. They should also be reasonably optimized for fast
performance while still being easy to read."""
if '--project_name' in sys.argv:
project_name_idx = sys.argv.index('--project_name')
project_name = sys.argv[project_name_idx + 1]
sys.argv.remove('--project_name')
sys.argv.pop(project_name_idx)
def _get_requirements():
"""Parses requirements.txt file."""
install_requires_tmp = []
dependency_links_tmp = []
with open(
os.path.join(os.path.dirname(__file__), '../requirements.txt'), 'r') as f:
for line in f:
package_name = line.strip()
# Skip empty line or comments starting with "#".
if not package_name or package_name[0] == '#':
continue
if package_name.startswith('-e '):
dependency_links_tmp.append(package_name[3:].strip())
else:
install_requires_tmp.append(package_name)
return install_requires_tmp, dependency_links_tmp
install_requires, dependency_links = _get_requirements()
if project_name == 'tf-models-nightly':
version += '.dev' + datetime.datetime.now().strftime('%Y%m%d')
install_requires.append('tf-nightly')
install_requires.append('tensorflow-text-nightly')
else:
install_requires.append(f'tensorflow~={tf_version}')
install_requires.append(f'tensorflow-text~={tf_version}')
print('install_requires: ', install_requires)
print('dependency_links: ', dependency_links)
setup(
name=project_name,
version=version,
description='TensorFlow Official Models',
long_description=long_description,
author='Google Inc.',
author_email='[email protected]',
url='https://github.com/tensorflow/models',
license='Apache 2.0',
packages=find_packages(exclude=[
'research*',
'official.pip_package*',
'official.benchmark*',
'official.colab*',
'official.recommendation.ranking.data.preprocessing*',
]),
exclude_package_data={
'': ['*_test.py',],
},
install_requires=install_requires,
dependency_links=dependency_links,
python_requires='>=3.7',
)
| 3,128 | 32.287234 | 80 | py |
models | models-master/official/core/base_task.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines the base task abstraction."""
import abc
import functools
from typing import Optional
from absl import logging
import tensorflow as tf
from official.core import config_definitions
from official.modeling import optimization
from official.modeling import performance
from official.modeling.privacy import configs
from official.modeling.privacy import ops
OptimizationConfig = optimization.OptimizationConfig
RuntimeConfig = config_definitions.RuntimeConfig
DifferentialPrivacyConfig = configs.DifferentialPrivacyConfig
class Task(tf.Module, metaclass=abc.ABCMeta):
"""A single-replica view of training procedure.
Tasks provide artifacts for training/validation procedures, including
loading/iterating over Datasets, training/validation steps, calculating the
loss and customized metrics with reduction.
"""
# Special keys in train/validate step returned logs.
loss = "loss"
def __init__(self,
params,
logging_dir: Optional[str] = None,
name: Optional[str] = None):
"""Task initialization.
Args:
params: the task configuration instance, which can be any of dataclass,
ConfigDict, namedtuple, etc.
logging_dir: a string pointing to where the model, summaries etc. will be
saved. You can also write additional stuff in this directory.
name: the task name.
"""
super().__init__(name=name)
self._task_config = params
self._logging_dir = (
logging_dir or ""
) # Empty directory hints current working dir.
@property
def task_config(self):
return self._task_config
@property
def logging_dir(self) -> str:
return self._logging_dir
@classmethod
def create_optimizer(cls, optimizer_config: OptimizationConfig,
runtime_config: Optional[RuntimeConfig] = None,
dp_config: Optional[DifferentialPrivacyConfig] = None):
"""Creates an TF optimizer from configurations.
Args:
optimizer_config: the parameters of the Optimization settings.
runtime_config: the parameters of the runtime.
dp_config: the parameter of differential privacy.
Returns:
A tf.optimizers.Optimizer object.
"""
gradient_transformers = None
if dp_config is not None:
logging.info("Adding differential privacy transform with config %s.",
dp_config.as_dict())
noise_stddev = dp_config.clipping_norm * dp_config.noise_multiplier
gradient_transformers = [
functools.partial(
ops.clip_l2_norm, l2_norm_clip=dp_config.clipping_norm),
functools.partial(
ops.add_noise, noise_stddev=noise_stddev)
]
opt_factory = optimization.OptimizerFactory(optimizer_config)
optimizer = opt_factory.build_optimizer(
opt_factory.build_learning_rate(),
gradient_transformers=gradient_transformers
)
# Configuring optimizer when loss_scale is set in runtime config. This helps
# avoiding overflow/underflow for float16 computations.
if runtime_config:
optimizer = performance.configure_optimizer(
optimizer,
use_float16=runtime_config.mixed_precision_dtype == "float16",
loss_scale=runtime_config.loss_scale)
return optimizer
def initialize(self, model: tf.keras.Model):
"""[Optional] A callback function used as CheckpointManager's init_fn.
This function will be called when no checkpoint is found for the model.
If there is a checkpoint, the checkpoint will be loaded and this function
will not be called. You can use this callback function to load a pretrained
checkpoint, saved under a directory other than the model_dir.
Args:
model: The keras.Model built or used by this task.
"""
ckpt_dir_or_file = self.task_config.init_checkpoint
logging.info("Trying to load pretrained checkpoint from %s",
ckpt_dir_or_file)
if ckpt_dir_or_file and tf.io.gfile.isdir(ckpt_dir_or_file):
ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file)
if not ckpt_dir_or_file:
logging.info("No checkpoint file found from %s. Will not load.",
ckpt_dir_or_file)
return
if hasattr(model, "checkpoint_items"):
checkpoint_items = model.checkpoint_items
else:
checkpoint_items = dict(model=model)
ckpt = tf.train.Checkpoint(**checkpoint_items)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
logging.info("Finished loading pretrained checkpoint from %s",
ckpt_dir_or_file)
def build_model(self) -> tf.keras.Model:
"""[Optional] Creates model architecture.
Returns:
A model instance.
""" # pytype: disable=bad-return-type # typed-keras
@abc.abstractmethod
def build_inputs(self,
params,
input_context: Optional[tf.distribute.InputContext] = None):
"""Returns a dataset or a nested structure of dataset functions.
Dataset functions define per-host datasets with the per-replica batch size.
With distributed training, this method runs on remote hosts.
Args:
params: hyperparams to create input pipelines, which can be any of
dataclass, ConfigDict, namedtuple, etc.
input_context: optional distribution input pipeline context.
Returns:
A nested structure of per-replica input functions.
"""
def build_losses(self, labels, model_outputs, aux_losses=None) -> tf.Tensor:
"""Standard interface to compute losses.
Args:
labels: optional label tensors.
model_outputs: a nested structure of output tensors.
aux_losses: auxiliary loss tensors, i.e. `losses` in keras.Model.
Returns:
The total loss tensor.
"""
del model_outputs, labels
if aux_losses is None:
losses = [tf.constant(0.0, dtype=tf.float32)]
else:
losses = aux_losses
total_loss = tf.add_n(losses)
return total_loss
def build_metrics(self, training: bool = True):
"""Gets streaming metrics for training/validation."""
del training
return []
def process_metrics(self, metrics, labels, model_outputs, **kwargs):
"""Process and update metrics.
Called when using custom training loop API.
Args:
metrics: a nested structure of metrics objects. The return of function
self.build_metrics.
labels: a tensor or a nested structure of tensors.
model_outputs: a tensor or a nested structure of tensors. For example,
output of the keras model built by self.build_model.
**kwargs: other args.
"""
for metric in metrics:
metric.update_state(labels, model_outputs)
def process_compiled_metrics(self, compiled_metrics, labels, model_outputs):
"""Process and update compiled_metrics.
call when using compile/fit API.
Args:
compiled_metrics: the compiled metrics (model.compiled_metrics).
labels: a tensor or a nested structure of tensors.
model_outputs: a tensor or a nested structure of tensors. For example,
output of the keras model built by self.build_model.
"""
compiled_metrics.update_state(labels, model_outputs)
def train_step(self,
inputs,
model: tf.keras.Model,
optimizer: tf.keras.optimizers.Optimizer,
metrics=None):
"""Does forward and backward.
With distribution strategies, this method runs on devices.
Args:
inputs: a dictionary of input tensors.
model: the model, forward pass definition.
optimizer: the optimizer for this training step.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
if isinstance(inputs, tuple) and len(inputs) == 2:
features, labels = inputs
else:
features, labels = inputs, inputs
with tf.GradientTape() as tape:
outputs = model(features, training=True)
# Computes per-replica loss.
if model.compiled_loss:
loss = model.compiled_loss(
labels, outputs, regularization_losses=model.losses)
loss += self.build_losses(
labels=labels, model_outputs=outputs, aux_losses=None)
else:
loss = self.build_losses(
labels=labels, model_outputs=outputs, aux_losses=model.losses)
# Scales loss as the default gradients allreduce performs sum inside the
# optimizer.
scaled_loss = loss / tf.distribute.get_strategy().num_replicas_in_sync
# For mixed precision, when a LossScaleOptimizer is used, the loss is
# scaled to avoid numeric underflow.
if isinstance(optimizer,
tf.keras.mixed_precision.LossScaleOptimizer):
scaled_loss = optimizer.get_scaled_loss(scaled_loss)
tvars = model.trainable_variables
grads = tape.gradient(scaled_loss, tvars)
if isinstance(optimizer,
tf.keras.mixed_precision.LossScaleOptimizer):
grads = optimizer.get_unscaled_gradients(grads)
optimizer.apply_gradients(list(zip(grads, tvars)))
logs = {self.loss: loss}
if metrics:
self.process_metrics(metrics, labels, outputs)
if model.compiled_metrics:
self.process_compiled_metrics(model.compiled_metrics, labels, outputs)
logs.update({m.name: m.result() for m in metrics or []})
logs.update({m.name: m.result() for m in model.metrics})
return logs
def validation_step(self, inputs, model: tf.keras.Model, metrics=None):
"""Validation step.
With distribution strategies, this method runs on devices.
Args:
inputs: a dictionary of input tensors.
model: the keras.Model.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
if isinstance(inputs, tuple) and len(inputs) == 2:
features, labels = inputs
else:
features, labels = inputs, inputs
outputs = self.inference_step(features, model)
loss = self.build_losses(
labels=labels, model_outputs=outputs, aux_losses=model.losses)
logs = {self.loss: loss}
if metrics:
self.process_metrics(metrics, labels, outputs)
if model.compiled_metrics:
self.process_compiled_metrics(model.compiled_metrics, labels, outputs)
logs.update({m.name: m.result() for m in metrics or []})
logs.update({m.name: m.result() for m in model.metrics})
return logs
def inference_step(self, inputs, model: tf.keras.Model):
"""Performs the forward step.
With distribution strategies, this method runs on devices.
Args:
inputs: a dictionary of input tensors.
model: the keras.Model.
Returns:
Model outputs.
"""
return model(inputs, training=False)
def aggregate_logs(self, state, step_logs):
"""Optional aggregation over logs returned from a validation step.
Given step_logs from a validation step, this function aggregates the logs
after each eval_step() (see eval_reduce() function in
official/core/base_trainer.py). It runs on CPU and can be used to aggregate
metrics during validation, when there are too many metrics that cannot fit
into TPU memory. Note that this may increase latency due to data transfer
between TPU and CPU. Also, the step output from a validation step may be a
tuple with elements from replicas, and a concatenation of the elements is
needed in such case.
Args:
state: The current state of training, for example, it can be a sequence of
metrics.
step_logs: Logs from a validation step. Can be a dictionary.
"""
pass
def reduce_aggregated_logs(self,
aggregated_logs,
global_step: Optional[tf.Tensor] = None):
"""Optional reduce of aggregated logs over validation steps.
This function reduces aggregated logs at the end of validation, and can be
used to compute the final metrics. It runs on CPU and in each eval_end() in
base trainer (see eval_end() function in official/core/base_trainer.py).
Args:
aggregated_logs: Aggregated logs over multiple validation steps.
global_step: An optional variable of global step.
Returns:
A dictionary of reduced results.
"""
return {}
| 12,958 | 34.897507 | 80 | py |
models | models-master/official/core/config_definitions.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common configuration settings."""
import dataclasses
from typing import Optional, Sequence, Union
from official.modeling.hyperparams import base_config
from official.modeling.optimization.configs import optimization_config
from official.modeling.privacy import configs as dp_configs
OptimizationConfig = optimization_config.OptimizationConfig
@dataclasses.dataclass
class DataConfig(base_config.Config):
"""The base configuration for building datasets.
Attributes:
input_path: The path to the input. It can be either (1) a str indicating a
file path/pattern, or (2) a str indicating multiple file paths/patterns
separated by comma (e.g "a, b, c" or no spaces "a,b,c"), or (3) a list of
str, each of which is a file path/pattern or multiple file paths/patterns
separated by comma, or (4) a dictionary of the previous three approaches
for more advanced data mixing using named access. It should not be
specified when the following `tfds_name` is specified.
tfds_name: The name of the tensorflow dataset (TFDS). It should not be
specified when the above `input_path` is specified.
tfds_split: A str indicating which split of the data to load from TFDS. It
is required when above `tfds_name` is specified.
global_batch_size: The global batch size across all replicas.
is_training: Whether this data is used for training or not. This flag is
useful for consumers of this object to determine whether the data should
be repeated or shuffled.
drop_remainder: Whether the last batch should be dropped in the case it has
fewer than `global_batch_size` elements.
shuffle_buffer_size: The buffer size used for shuffling training data.
cache: Whether to cache dataset examples. If `True`, we will cache the
dataset after applying the decode_fn and parse_fn. It can be used to avoid
re-reading from disk, re-decoding and re-parsing the example on the second
epoch, but it requires significant memory overhead.
cycle_length: The number of files that will be processed concurrently when
interleaving files.
block_length: The number of consecutive elements to produce from each input
element before cycling to another input element when interleaving files.
deterministic: A boolean controlling whether determinism should be enforced.
sharding: Whether sharding is used in the input pipeline.
enable_tf_data_service: A boolean indicating whether to enable tf.data
service for the input pipeline.
tf_data_service_address: The URI of a tf.data service to offload
preprocessing onto during training. The URI should be in the format
"protocol://address", e.g. "grpc://tf-data-service:5050". It can be
overridden by `FLAGS.tf_data_service` flag in the binary.
tf_data_service_job_name: The name of the tf.data service job. This argument
makes it possible for multiple datasets to share the same job. The default
behavior is that the dataset creates anonymous, exclusively owned jobs.
tfds_data_dir: A str specifying the directory to read/write TFDS data.
tfds_as_supervised: A bool. When loading dataset from TFDS, if True, the
returned tf.data.Dataset will have a 2-tuple structure (input, label)
according to builder.info.supervised_keys; if False, the default, the
returned tf.data.Dataset will have a dictionary with all the features.
tfds_skip_decoding_feature: A str to indicate which features are skipped for
decoding when loading dataset from TFDS. Use comma to separate multiple
features. The main use case is to skip the image/video decoding for better
performance.
enable_shared_tf_data_service_between_parallel_trainers: A bool. When set to
true, only a single tf.data service will be started, and it will be shared
between all the trainer run simultaneously, e.g. using vizier to tune
hyperparameters. This will save CPU and RAM resources compared to running
separate tf.data service for each trainer. Notice that if batch size is
different for different trainers, the field
apply_tf_data_service_before_batching also needs to be true so that only a
single tf.data service instance will be created. In this case, tf.data
service will be applied before batching operation. So make sure to not
apply any processing steps after batching (e.g. in postprocess_fn) since
they wouldn't be paralleled by tf.data service and may slow down your
tf.data pipeline. When using shared tf.data service, the tf.data dataset
must be infinite, and slow trainer may skip certain training examples.
More details about shared tf.data service can be found at:
https://www.tensorflow.org/api_docs/python/tf/data/experimental/service#sharing_tfdata_service_with_concurrent_trainers.
apply_tf_data_service_before_batching: A bool. If set to True, tf.data
service will be applied before batching operation. This is useful to make
sure only a single tf.data service instance is created when
enable_shared_tf_data_service_between_parallel_trainers is true and batch
size is changing between parallel trainers.
trainer_id: A string. The id of the trainer if there are multiple parallel
trainer running at the same time, e.g. in vizier tuning case. It will be
automatically set if this field is needed. Users does not need to set it
when creating experiment configs.
seed: An optional seed to use for deterministic shuffling/preprocessing.
prefetch_buffer_size: An int specifying the buffer size of prefetch
datasets. If None, the buffer size is autotuned. Specifying this is useful
in case autotuning uses up too much memory by making the buffer size too
high.
autotune_algorithm: If specified, use this algorithm for AUTOTUNE. See:
https://www.tensorflow.org/api_docs/python/tf/data/experimental/AutotuneAlgorithm
"""
input_path: Union[Sequence[str], str, base_config.Config] = ""
tfds_name: Union[str, base_config.Config] = ""
tfds_split: str = ""
global_batch_size: int = 0
is_training: Optional[bool] = None
drop_remainder: bool = True
shuffle_buffer_size: int = 100
cache: bool = False
cycle_length: Optional[int] = None
block_length: int = 1
deterministic: Optional[bool] = None
sharding: bool = True
enable_tf_data_service: bool = False
tf_data_service_address: Optional[str] = None
tf_data_service_job_name: Optional[str] = None
tfds_data_dir: str = ""
tfds_as_supervised: bool = False
tfds_skip_decoding_feature: str = ""
enable_shared_tf_data_service_between_parallel_trainers: bool = False
apply_tf_data_service_before_batching: bool = False
trainer_id: Optional[str] = None
seed: Optional[int] = None
prefetch_buffer_size: Optional[int] = None
autotune_algorithm: Optional[str] = None
@dataclasses.dataclass
class RuntimeConfig(base_config.Config):
"""High-level configurations for Runtime.
These include parameters that are not directly related to the experiment,
e.g. directories, accelerator type, etc.
Attributes:
distribution_strategy: e.g. 'mirrored', 'tpu', etc.
enable_xla: Whether or not to enable XLA.
per_gpu_thread_count: thread count per GPU.
gpu_thread_mode: Whether and how the GPU device uses its own threadpool.
dataset_num_private_threads: Number of threads for a private threadpool
created for all datasets computation.
tpu: The address of the TPU to use, if any.
num_gpus: The number of GPUs to use, if any.
worker_hosts: comma-separated list of worker ip:port pairs for running
multi-worker models with DistributionStrategy.
task_index: If multi-worker training, the task index of this worker.
all_reduce_alg: Defines the algorithm for performing all-reduce.
num_packs: Sets `num_packs` in the cross device ops used in
MirroredStrategy. For details, see tf.distribute.NcclAllReduce.
mixed_precision_dtype: dtype of mixed precision policy. It can be 'float32',
'float16', or 'bfloat16'.
loss_scale: The type of loss scale, or 'float' value. This is used when
setting the mixed precision policy.
run_eagerly: Whether or not to run the experiment eagerly.
batchnorm_spatial_persistent: Whether or not to enable the spatial
persistent mode for CuDNN batch norm kernel for improved GPU performance.
"""
distribution_strategy: str = "mirrored"
enable_xla: bool = False
gpu_thread_mode: Optional[str] = None
dataset_num_private_threads: Optional[int] = None
per_gpu_thread_count: int = 0
tpu: Optional[str] = None
num_gpus: int = 0
worker_hosts: Optional[str] = None
task_index: int = -1
all_reduce_alg: Optional[str] = None
num_packs: int = 1
mixed_precision_dtype: Optional[str] = None
loss_scale: Optional[Union[str, float]] = None
run_eagerly: bool = False
batchnorm_spatial_persistent: bool = False
# XLA runtime params.
# XLA params are only applied to the train_step.
# These augments can improve training speed. They can also improve eval, but
# may reduce usability and users would need to make changes to code.
# Whether to enable XLA dynamic padder
# infrastructure to handle dynamic shapes inputs inside XLA. True by
# default. Disabling this may cause correctness issues with dynamic shapes
# inputs, as XLA will just assume the inputs are with padded shapes. However
# users can optionally set it to False to improve device time if masking is
# already handled in the user side.
# If None, will respect XLA default.
tpu_enable_xla_dynamic_padder: Optional[bool] = None
# Global model parallelism configurations.
num_cores_per_replica: int = 1
default_shard_dim: int = -1
use_tpu_mp_strategy: bool = False
def model_parallelism(self):
return dict(
num_cores_per_replica=self.num_cores_per_replica,
default_shard_dim=self.default_shard_dim)
@dataclasses.dataclass
class TrainerConfig(base_config.Config):
"""Configuration for trainer.
Attributes:
optimizer_config: optimizer config, it includes optimizer, learning rate,
and warmup schedule configs.
train_tf_while_loop: whether or not to use tf while loop.
train_tf_function: whether or not to use tf_function for training loop.
eval_tf_function: whether or not to use tf_function for eval.
allow_tpu_summary: Whether to allow summary happen inside the XLA program
runs on TPU through automatic outside compilation.
steps_per_loop: number of steps per loop to report training metrics. This
can also be used to reduce host worker communication in a TPU setup.
summary_interval: number of steps between each summary.
checkpoint_interval: number of steps between checkpoints.
max_to_keep: max checkpoints to keep.
continuous_eval_timeout: maximum number of seconds to wait between
checkpoints, if set to None, continuous eval will wait indefinitely. This
is only used continuous_train_and_eval and continuous_eval modes. Default
value is 1 hrs.
train_steps: number of train steps.
validation_steps: number of eval steps. If -1, the entire eval dataset is
used.
validation_interval: number of training steps to run between evaluations.
best_checkpoint_export_subdir: if set, the trainer will keep track of the
best evaluation metric, and export the corresponding best checkpoint under
`model_dir/best_checkpoint_export_subdir`. Note that this only works if
mode contains eval (such as `train_and_eval`, `continuous_eval`, and
`continuous_train_and_eval`).
best_checkpoint_eval_metric: for exporting the best checkpoint, which
evaluation metric the trainer should monitor. This can be any evaluation
metric appears on tensorboard.
best_checkpoint_metric_comp: for exporting the best checkpoint, how the
trainer should compare the evaluation metrics. This can be either `higher`
(higher the better) or `lower` (lower the better).
validation_summary_subdir: A 'str', sub directory for saving eval summary.
preemption_on_demand_checkpoint: whether or not to save on-demand
checkpoints after a preemption.
"""
optimizer_config: OptimizationConfig = dataclasses.field(
default_factory=OptimizationConfig
)
# Orbit settings.
train_tf_while_loop: bool = True
train_tf_function: bool = True
eval_tf_function: bool = True
eval_tf_while_loop: bool = False
allow_tpu_summary: bool = False
# Trainer intervals.
steps_per_loop: int = 1000
summary_interval: int = 1000
checkpoint_interval: int = 1000
# Checkpoint manager.
max_to_keep: int = 5
continuous_eval_timeout: int = 60 * 60
# Train/Eval routines.
train_steps: int = 0
# Sets validation steps to be -1 to evaluate the entire dataset.
validation_steps: int = -1
validation_interval: int = 1000
# Best checkpoint export.
best_checkpoint_export_subdir: str = ""
best_checkpoint_eval_metric: str = ""
best_checkpoint_metric_comp: str = "higher"
# Blowup recovery.
loss_upper_bound: float = 1e6
recovery_begin_steps: int = 0 # Enforcing the loss bound after these steps.
# When max trials < 0, no recovery module; max trials = 0, we will check
# the condition and fail the job if the condition happens; max trials > 0,
# we will retore the model states.
recovery_max_trials: int = 0
validation_summary_subdir: str = "validation"
# Preemption on-demand checkpoint.
preemption_on_demand_checkpoint: bool = True # copybara-replace
@dataclasses.dataclass
class TaskConfig(base_config.Config):
"""Config passed to task."""
init_checkpoint: str = ""
model: Optional[base_config.Config] = None
train_data: DataConfig = dataclasses.field(default_factory=DataConfig)
validation_data: DataConfig = dataclasses.field(default_factory=DataConfig)
name: Optional[str] = None
# Configs for differential privacy
# These configs are only effective if you use create_optimizer in
# tensorflow_models/official/core/base_task.py
# DEPRECATED b/264611883
differential_privacy_config: Optional[
dp_configs.DifferentialPrivacyConfig] = None
# Whether to show image summary. Useful to visualize model predictions. Only
# work for vision tasks.
allow_image_summary: bool = False
@dataclasses.dataclass
class ExperimentConfig(base_config.Config):
"""Top-level configuration."""
task: TaskConfig = dataclasses.field(default_factory=TaskConfig)
trainer: TrainerConfig = dataclasses.field(default_factory=TrainerConfig)
runtime: RuntimeConfig = dataclasses.field(default_factory=RuntimeConfig)
| 15,333 | 48.624595 | 126 | py |
models | models-master/official/core/savedmodel_checkpoint_manager_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from typing import Iterable
import tensorflow as tf
from official.core import savedmodel_checkpoint_manager
def _models_exist(checkpoint_path: str, models: Iterable[str]) -> bool:
for model_name in models:
if not tf.io.gfile.isdir(
os.path.join(
savedmodel_checkpoint_manager.make_saved_modules_directory_name(
checkpoint_path), model_name)):
return False
return True
class _ModelForTest(tf.keras.Model):
def __init__(self, hidden_size: int = 8):
super().__init__()
self.dense = tf.keras.layers.Dense(hidden_size)
@tf.function(input_signature=[tf.TensorSpec([None, 16])])
def call(self, inputs):
return self.dense(inputs)
@property
def saved_model_signatures(self):
# Build SavedModel signatures.
return dict(serving_default=self.call)
class CheckpointManagerTest(tf.test.TestCase):
def _create_manager(self, max_to_keep: int = 1) -> tf.train.CheckpointManager:
"""Sets up SavedModelCheckpointManager object.
Args:
max_to_keep: max number of savedmodels to keep.
Returns:
created savedmodel manager.
"""
models = {
'model_1': _ModelForTest(12),
'model_2': _ModelForTest(14),
}
checkpoint = tf.train.Checkpoint()
manager = savedmodel_checkpoint_manager.SavedModelCheckpointManager(
checkpoint=checkpoint,
directory=self.get_temp_dir(),
max_to_keep=max_to_keep,
modules_to_export=models)
return manager
def test_max_to_keep(self):
manager = self._create_manager()
models = manager.modules_to_export
first_path = manager.save()
second_path = manager.save()
savedmodel = savedmodel_checkpoint_manager.make_saved_modules_directory_name(
manager.latest_checkpoint)
self.assertEqual(savedmodel, manager.latest_savedmodel)
self.assertTrue(_models_exist(second_path, models.keys()))
self.assertFalse(_models_exist(first_path, models.keys()))
def test_returns_none_after_timeout(self):
manager = self._create_manager()
start = time.time()
ret = manager.wait_for_new_savedmodel(
None, timeout=1.0, seconds_to_sleep=0.5)
end = time.time()
self.assertIsNone(ret)
# We've waited 0.5 second.
self.assertGreater(end, start + 0.5)
# The timeout kicked in.
self.assertLess(end, start + 0.6)
def test_saved_model_iterator(self):
manager = self._create_manager(max_to_keep=2)
self.assertIsNotNone(manager.save(checkpoint_number=1))
self.assertIsNotNone(manager.save(checkpoint_number=2))
self.assertIsNotNone(manager.save(checkpoint_number=3))
# Savedmodels are in time order.
expected_savedmodels = manager.savedmodels
# Order not guaranteed.
existing_savedmodels = manager.get_existing_savedmodels()
savedmodels = list(manager.savedmodels_iterator(timeout=3.0))
self.assertEqual(savedmodels, expected_savedmodels)
self.assertEqual(set(savedmodels), set(existing_savedmodels))
def test_saved_model_iterator_timeout_fn(self):
manager = self._create_manager()
timeout_fn_calls = [0]
def timeout_fn():
timeout_fn_calls[0] += 1
return timeout_fn_calls[0] > 3
results = list(
manager.savedmodels_iterator(timeout=0.1, timeout_fn=timeout_fn))
self.assertEqual([], results)
self.assertEqual(4, timeout_fn_calls[0])
if __name__ == '__main__':
tf.test.main()
| 4,045 | 31.111111 | 81 | py |
models | models-master/official/core/input_reader.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A common dataset reader."""
import dataclasses
import random
from typing import Any, Callable, Dict, List, Optional, Sequence, Text, Union
from absl import logging
import tensorflow as tf
import tensorflow_datasets as tfds
from official.core import config_definitions as cfg
def _get_random_integer():
return random.randint(0, (1 << 31) - 1)
def _maybe_map_fn(dataset: tf.data.Dataset,
fn: Optional[Callable[..., Any]] = None) -> tf.data.Dataset:
"""Calls dataset.map if a valid function is passed in."""
return dataset if fn is None else dataset.map(
fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
def match_files(input_path: Union[Sequence[str], str]) -> List[str]:
"""Matches files from an input_path."""
matched_files = []
# Read dataset from files.
usage = ('`input_path` should be either (1) a str indicating a file '
'path/pattern, or (2) a str indicating multiple file '
'paths/patterns separated by comma (e.g "a, b, c" or no spaces '
'"a,b,c", or (3) a list of str, each of which is a file '
'path/pattern or multiple file paths/patterns separated by '
'comma, but got: %s')
if isinstance(input_path, str):
input_path_list = [input_path]
elif isinstance(input_path, (list, tuple)):
if any(not isinstance(x, str) for x in input_path):
raise ValueError(usage % input_path)
input_path_list = input_path
else:
raise ValueError(usage % input_path)
for input_path in input_path_list:
input_patterns = input_path.strip().split(',')
for input_pattern in input_patterns:
input_pattern = input_pattern.strip()
if not input_pattern:
continue
if '*' in input_pattern or '?' in input_pattern:
tmp_matched_files = tf.io.gfile.glob(input_pattern)
if not tmp_matched_files:
raise ValueError('%s does not match any files.' % input_pattern)
matched_files.extend(tmp_matched_files)
else:
matched_files.append(input_pattern)
if not matched_files:
raise ValueError('%s does not match any files.' % input_path)
return matched_files
def _read_files_then_shard(matched_files: List[str],
dataset_fn,
input_context: Optional[
tf.distribute.InputContext] = None,
sharding: bool = False,
repeat: bool = False) -> tf.data.Dataset:
"""Sends all data files to every worker and then shard by data."""
dataset = dataset_fn(matched_files)
# When `input_file` is a path to a single file or the number of files is
# less than the number of input pipelines, disable auto sharding
# so that same input file is sent to all workers.
options = tf.data.Options()
options.experimental_distribute.auto_shard_policy = (
tf.data.experimental.AutoShardPolicy.OFF)
dataset = dataset.with_options(options)
# Do not enable sharding if tf.data service is enabled, as sharding will be
# handled inside tf.data service.
if sharding and input_context and (input_context.num_input_pipelines > 1):
dataset = dataset.shard(input_context.num_input_pipelines,
input_context.input_pipeline_id)
if repeat:
dataset = dataset.repeat()
return dataset
def _shard_files_then_read(matched_files: List[str],
dataset_fn,
input_context: Optional[
tf.distribute.InputContext] = None,
seed: Optional[Union[int, tf.Tensor]] = None,
is_training: bool = False,
sharding: bool = False,
cache: bool = False,
cycle_length: Optional[int] = None,
block_length: Optional[int] = None,
deterministic: bool = False) -> tf.data.Dataset:
"""Shards the data files and then sent a split to every worker to read."""
dataset = tf.data.Dataset.from_tensor_slices(matched_files)
# Shuffle and repeat at file level.
# If cache is enabled, `reshuffle_each_iteration` is set to False,
# because we will read the same cached data in every iteration anyway.
if is_training:
# We need a seed to shuffle the files so that when each TPU workers gets
# its own shard the files do not overlap.
if sharding and seed is None:
seed = _get_random_integer()
dataset = dataset.shuffle(
len(matched_files),
seed=seed,
reshuffle_each_iteration=True if not cache else False)
# Do not enable sharding if tf.data service is enabled, as sharding will be
# handled inside tf.data service.
if sharding and input_context and (input_context.num_input_pipelines > 1):
dataset = dataset.shard(input_context.num_input_pipelines,
input_context.input_pipeline_id)
# If cache is enabled, we will call `repeat()` later after `cache()`.
if is_training and not cache:
dataset = dataset.repeat()
dataset = dataset.interleave(
map_func=dataset_fn,
cycle_length=cycle_length,
block_length=block_length,
num_parallel_calls=(cycle_length
if cycle_length else tf.data.experimental.AUTOTUNE),
deterministic=deterministic)
return dataset
def _read_tfds(tfds_name: Text,
tfds_data_dir: Text,
tfds_split: Text,
tfds_skip_decoding_feature: Text,
tfds_as_supervised: bool,
input_context: Optional[tf.distribute.InputContext] = None,
seed: Optional[Union[int, tf.Tensor]] = None,
is_training: bool = False,
cache: bool = False,
cycle_length: Optional[int] = None,
block_length: Optional[int] = None) -> tf.data.Dataset:
"""Reads a dataset from tfds."""
repeat_filenames = is_training and not cache
read_config = tfds.ReadConfig(
interleave_cycle_length=cycle_length,
interleave_block_length=block_length,
input_context=input_context,
shuffle_seed=seed,
repeat_filenames=repeat_filenames,
# Only assert cardinality when we have a finite dataset.
assert_cardinality=not repeat_filenames,
skip_prefetch=True)
decoders = {}
if tfds_skip_decoding_feature:
for skip_feature in tfds_skip_decoding_feature.split(','):
decoders[skip_feature.strip()] = tfds.decode.SkipDecoding()
if tfds_name.startswith('mldataset.'):
dataset = tfds.load(name=tfds_name,
split=tfds_split,
as_supervised=tfds_as_supervised,
decoders=decoders if decoders else None,
read_config=read_config)
else:
builder = tfds.builder(tfds_name, data_dir=tfds_data_dir)
if builder.info.splits:
num_shards = len(builder.info.splits[tfds_split].file_instructions)
else:
# The tfds mock path often does not provide splits.
num_shards = 1
load_kwargs = dict(
name=tfds_name, download=True, split=tfds_split,
shuffle_files=is_training, as_supervised=tfds_as_supervised,
decoders=decoders if decoders else None)
if tfds_data_dir:
load_kwargs.update({'data_dir': tfds_data_dir})
if input_context and num_shards < input_context.num_input_pipelines:
# The number of files in the dataset split is smaller than the number of
# input pipelines. We read the entire dataset first and then shard in the
# host memory.
read_config = dataclasses.replace(read_config, input_context=None)
load_kwargs.update({'read_config': read_config})
dataset = tfds.load(**load_kwargs)
dataset = dataset.shard(input_context.num_input_pipelines,
input_context.input_pipeline_id)
else:
load_kwargs.update({'read_config': read_config})
dataset = tfds.load(**load_kwargs)
return dataset
class InputReader:
"""Input reader that returns a tf.data.Dataset instance."""
# A static random number which is the same across different InputReader
# instances.
static_randnum = _get_random_integer()
def __init__(
self,
params: cfg.DataConfig,
dataset_fn=tf.data.TFRecordDataset,
decoder_fn: Optional[Callable[..., Any]] = None,
combine_fn: Optional[Callable[..., Any]] = None,
sample_fn: Optional[Callable[..., Any]] = None,
parser_fn: Optional[Callable[..., Any]] = None,
filter_fn: Optional[Callable[..., tf.Tensor]] = None,
transform_and_batch_fn: Optional[
Callable[
[tf.data.Dataset, Optional[tf.distribute.InputContext]],
tf.data.Dataset,
]
] = None,
postprocess_fn: Optional[Callable[..., Any]] = None,
):
"""Initializes an InputReader instance.
Args:
params: A config_definitions.DataConfig object.
dataset_fn: A `tf.data.Dataset` that consumes the input files. For
example, it can be `tf.data.TFRecordDataset`.
decoder_fn: An optional `callable` that takes the serialized data string
and decodes them into the raw tensor dictionary.
combine_fn: An optional `callable` that takes a dictionarty of
`tf.data.Dataset` objects as input and outputs a combined dataset. It
will be executed after the decoder_fn and before the sample_fn.
sample_fn: An optional `callable` that takes a `tf.data.Dataset` object as
input and outputs the transformed dataset. It performs sampling on the
decoded raw tensors dict before the parser_fn.
parser_fn: An optional `callable` that takes the decoded raw tensors dict
and parse them into a dictionary of tensors that can be consumed by the
model. It will be executed after decoder_fn.
filter_fn: An optional `callable` mapping a dataset element to a boolean.
It will be executed after parser_fn.
transform_and_batch_fn: An optional `callable` that takes a
`tf.data.Dataset` object and an optional `tf.distribute.InputContext` as
input, and returns a `tf.data.Dataset` object. It will be executed after
`parser_fn` to transform and batch the dataset; if None, after
`parser_fn` is executed, the dataset will be batched into per-replica
batch size.
postprocess_fn: A optional `callable` that processes batched tensors. It
will be executed after batching.
"""
if params.input_path and params.tfds_name:
raise ValueError('At most one of `input_path` and `tfds_name` can be '
'specified, but got %s and %s.' %
(params.input_path, params.tfds_name))
if (isinstance(params.input_path, cfg.base_config.Config) or
isinstance(params.tfds_name, cfg.base_config.Config)
) and combine_fn is None:
raise ValueError(
'A combine_fn is required if `input_path` or `tfds_name` is a dict.')
self._tfds_name = params.tfds_name
self._tfds_data_dir = params.tfds_data_dir
self._matched_files = None
if not params.input_path:
# Read dataset from TFDS.
if not params.tfds_split:
raise ValueError(
'`tfds_name` is %s, but `tfds_split` is not specified.' %
params.tfds_name)
else:
self._matched_files = self.get_files(params.input_path)
self._global_batch_size = params.global_batch_size
self._is_training = params.is_training
self._drop_remainder = params.drop_remainder
self._shuffle_buffer_size = params.shuffle_buffer_size
self._cache = params.cache
self._cycle_length = params.cycle_length
self._block_length = params.block_length
self._deterministic = params.deterministic
self._sharding = params.sharding
self._tfds_split = params.tfds_split
self._tfds_as_supervised = params.tfds_as_supervised
self._tfds_skip_decoding_feature = params.tfds_skip_decoding_feature
self._dataset_fn = dataset_fn
self._decoder_fn = decoder_fn
self._combine_fn = combine_fn
self._sample_fn = sample_fn
self._parser_fn = parser_fn
self._transform_and_batch_fn = transform_and_batch_fn
self._postprocess_fn = postprocess_fn
self._filter_fn = filter_fn
self._seed = params.seed
self._prefetch_buffer_size = (
params.prefetch_buffer_size or tf.data.experimental.AUTOTUNE)
self._autotune_algorithm = params.autotune_algorithm
# When tf.data service is enabled, each data service worker should get
# different random seeds. Thus, we set `seed` to None.
# Sharding should also be disabled because tf data service handles how
# each worker shard data with `processing_mode` in distribute method.
if params.enable_tf_data_service:
self._seed = None
self._sharding = False
self._enable_tf_data_service = (
params.enable_tf_data_service and params.tf_data_service_address)
self._tf_data_service_address = params.tf_data_service_address
self._enable_shared_tf_data_service_between_parallel_trainers = (
params.enable_shared_tf_data_service_between_parallel_trainers)
self._apply_tf_data_service_before_batching = (
params.apply_tf_data_service_before_batching)
self._trainer_id = params.trainer_id
if self._enable_tf_data_service:
# Add a random seed as the tf.data service job name suffix, so tf.data
# service doesn't reuse the previous state if TPU worker gets preempted.
# It's necessary to add global batch size into the tf data service job
# name because when tuning batch size with vizier and tf data service is
# also enable, the tf data servce job name should be different for
# different vizier trials since once batch size is changed, from the
# tf.data perspective, the dataset is a different instance, and a
# different job name should be used for tf data service. Otherwise, the
# model would read tensors from the incorrect tf data service job, which
# would causes dimension mismatch on the batch size dimension.
self._tf_data_service_job_name = (
f'{params.tf_data_service_job_name}_bs{params.global_batch_size}_'
f'{self.static_randnum}')
self._enable_round_robin_tf_data_service = params.get(
'enable_round_robin_tf_data_service', False)
if self._enable_shared_tf_data_service_between_parallel_trainers:
# When shared tf.data service is enabled, only a single tf.data service
# instance should be created and shared between parallel trainers. If
# the global batch size is different across trainers,
# params.apply_tf_data_service_before_batching should be set to true
# because tf.data service with different batch sizes will be considered
# separate tf.data service instances.
self._tf_data_service_job_name = (
f'{params.tf_data_service_job_name}_{self.static_randnum}')
def get_files(self, input_path):
"""Gets matched files. Can be overridden by subclasses."""
if not input_path:
return None
# we want to combine / mix datasets
if isinstance(input_path, cfg.base_config.Config):
matched_files = {}
for k, v in input_path.as_dict().items():
matched_files[k] = match_files(v)
# single dataset
else:
matched_files = match_files(input_path)
return matched_files
def _read_data_source(
self,
matched_files: Union[Dict[str, List[str]], List[str]],
dataset_fn,
input_context: Optional[tf.distribute.InputContext] = None,
):
"""Reads the data source (files/tfds) to a dataset."""
def _files_to_dataset(files: List[str]) -> tf.data.Dataset:
if len(files) > 1:
if input_context and (len(files) < input_context.num_input_pipelines):
logging.warn(
(
'The number of files %d is less than the number of input '
'pipelines %d. We will send all input files to every worker. '
'Please consider sharding your data into more files.'
),
len(files),
input_context.num_input_pipelines,
)
return _read_files_then_shard(
files,
dataset_fn,
input_context,
sharding=self._sharding,
repeat=self._is_training and not self._cache)
else:
return _shard_files_then_read(
files,
dataset_fn,
input_context,
seed=self._seed,
is_training=self._is_training,
sharding=self._sharding,
cache=self._cache,
cycle_length=self._cycle_length,
block_length=self._block_length,
deterministic=self._deterministic)
elif len(files) == 1:
return _read_files_then_shard(
files,
dataset_fn,
input_context,
sharding=self._sharding,
repeat=self._is_training and not self._cache)
else:
raise ValueError('It is unexpected that `tfds_builder` is None and '
'there is also no `files`.')
if self._tfds_name:
if isinstance(self._tfds_name, cfg.base_config.Config):
dataset = {}
for k, tfds_name in self._tfds_name.as_dict().items():
dataset[k] = _read_tfds(
tfds_name=tfds_name,
tfds_data_dir=self._tfds_data_dir,
tfds_split=self._tfds_split,
tfds_skip_decoding_feature=self._tfds_skip_decoding_feature,
tfds_as_supervised=self._tfds_as_supervised,
input_context=input_context,
seed=self._seed,
is_training=self._is_training,
cache=self._cache,
cycle_length=self._cycle_length,
block_length=self._block_length)
else:
dataset = _read_tfds(
tfds_name=self._tfds_name,
tfds_data_dir=self._tfds_data_dir,
tfds_split=self._tfds_split,
tfds_skip_decoding_feature=self._tfds_skip_decoding_feature,
tfds_as_supervised=self._tfds_as_supervised,
input_context=input_context,
seed=self._seed,
is_training=self._is_training,
cache=self._cache,
cycle_length=self._cycle_length,
block_length=self._block_length)
elif isinstance(matched_files, (list, tuple)):
dataset = _files_to_dataset(matched_files)
elif isinstance(matched_files, dict):
dataset = {}
for k, fs in matched_files.items():
dataset[k] = _files_to_dataset(fs)
else:
raise ValueError('`matched_files` should be a list or dict.')
return dataset
def _decode_and_parse_dataset(
self,
dataset: Union[tf.data.Dataset, Dict[Text, tf.data.Dataset]],
batch_size: int,
input_context: Optional[tf.distribute.InputContext] = None
) -> tf.data.Dataset:
"""Returns a tf.data.Dataset object after shuffling, decoding, and parsing."""
def _shuffle_and_decode(ds):
# If cache is enabled, we will call `shuffle()` later after `cache()`.
if self._is_training and not self._cache:
ds = ds.shuffle(self._shuffle_buffer_size, seed=self._seed)
# Decode
ds = _maybe_map_fn(ds, self._decoder_fn)
return ds
dataset = tf.nest.map_structure(_shuffle_and_decode, dataset)
if tf.nest.is_nested(dataset):
dataset = self._combine_fn(dataset)
if self._sample_fn is not None:
dataset = dataset.apply(self._sample_fn)
dataset = _maybe_map_fn(dataset, self._parser_fn)
if self._filter_fn is not None:
dataset = dataset.filter(self._filter_fn)
if self._cache:
dataset = dataset.cache()
if self._is_training:
dataset = dataset.repeat()
dataset = dataset.shuffle(self._shuffle_buffer_size, seed=self._seed)
# Applies tf.data service before batching operations. This is useful when
# tf.data service is shared between parallel trainers, and batch size is
# changing between parallel trainers. Then batch size is changing, tf.data
# services will be considered different instances if applied after batching
# operations, which make it difficult to share between parallel trainers.
# However, if there are additional expensive operations in
# self._transform_and_batch_fn and self._postprocess_fn, the entire tf.data
# pipeline could be slowed down. In this case, try to move these dataset
# operations into early stages if possible.
if (self._enable_shared_tf_data_service_between_parallel_trainers and
self._apply_tf_data_service_before_batching):
dataset = self._maybe_apply_data_service(dataset, input_context)
if self._transform_and_batch_fn is not None:
dataset = self._transform_and_batch_fn(dataset, input_context)
else:
per_replica_batch_size = input_context.get_per_replica_batch_size(
batch_size) if input_context else batch_size
dataset = dataset.batch(
per_replica_batch_size, drop_remainder=self._drop_remainder)
return dataset
def _maybe_apply_data_service(
self,
dataset: tf.data.Dataset,
input_context: Optional[tf.distribute.InputContext] = None
) -> tf.data.Dataset:
"""Potentially distributes a dataset."""
if self._enable_tf_data_service and input_context:
if self._enable_round_robin_tf_data_service:
replicas_per_input_pipeline = input_context.num_replicas_in_sync // (
input_context.num_input_pipelines)
base_consumer_index = input_context.input_pipeline_id * (
replicas_per_input_pipeline)
num_consumers = input_context.num_input_pipelines * (
replicas_per_input_pipeline)
range_dataset = tf.data.Dataset.range(replicas_per_input_pipeline)
tfds_kwargs = {
'processing_mode': 'parallel_epochs',
'service': self._tf_data_service_address,
'job_name': self._tf_data_service_job_name,
'num_consumers': num_consumers
}
if self._enable_shared_tf_data_service_between_parallel_trainers:
raise ValueError('Shared tf.data service does not support round-robin'
' tf.data service.')
dataset = range_dataset.map(lambda i: dataset.apply( # pylint: disable=g-long-lambda
tf.data.experimental.service.distribute(
consumer_index=base_consumer_index + i, **tfds_kwargs)))
# Use parallel interleave to read multiple batches from a tf.data
# service worker in parallel.
dataset = dataset.interleave(
lambda x: x,
cycle_length=replicas_per_input_pipeline,
num_parallel_calls=replicas_per_input_pipeline,
deterministic=True)
else:
tfds_kwargs = {
'processing_mode': 'parallel_epochs',
'service': self._tf_data_service_address,
'job_name': self._tf_data_service_job_name,
}
if self._enable_shared_tf_data_service_between_parallel_trainers:
tfds_kwargs.update({
'processing_mode':
tf.data.experimental.service.ShardingPolicy.OFF,
'cross_trainer_cache':
tf.data.experimental.service.CrossTrainerCache(
trainer_id=self._trainer_id)
})
dataset = dataset.apply(
tf.data.experimental.service.distribute(**tfds_kwargs))
return dataset
def read(self,
input_context: Optional[tf.distribute.InputContext] = None,
dataset: Optional[tf.data.Dataset] = None) -> tf.data.Dataset:
"""Generates a tf.data.Dataset object."""
if dataset is None:
dataset = self._read_data_source(self._matched_files, self._dataset_fn,
input_context)
dataset = self._decode_and_parse_dataset(dataset, self._global_batch_size,
input_context)
dataset = _maybe_map_fn(dataset, self._postprocess_fn)
if not (self._enable_shared_tf_data_service_between_parallel_trainers and
self._apply_tf_data_service_before_batching):
dataset = self._maybe_apply_data_service(dataset, input_context)
if self._deterministic is not None:
options = tf.data.Options()
options.deterministic = self._deterministic
dataset = dataset.with_options(options)
if self._autotune_algorithm:
options = tf.data.Options()
options.autotune.autotune_algorithm = (
tf.data.experimental.AutotuneAlgorithm[self._autotune_algorithm])
dataset = dataset.with_options(options)
return dataset.prefetch(self._prefetch_buffer_size)
| 25,588 | 42.224662 | 93 | py |
models | models-master/official/core/registry_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for registry."""
import tensorflow as tf
from official.core import registry
class RegistryTest(tf.test.TestCase):
def test_register(self):
collection = {}
@registry.register(collection, 'functions/func_0')
def func_test():
pass
self.assertEqual(registry.lookup(collection, 'functions/func_0'), func_test)
@registry.register(collection, 'classes/cls_0')
class ClassRegistryKey:
pass
self.assertEqual(
registry.lookup(collection, 'classes/cls_0'), ClassRegistryKey)
@registry.register(collection, ClassRegistryKey)
class ClassRegistryValue:
pass
self.assertEqual(
registry.lookup(collection, ClassRegistryKey), ClassRegistryValue)
def test_register_hierarchy(self):
collection = {}
@registry.register(collection, 'functions/func_0')
def func_test0():
pass
@registry.register(collection, 'func_1')
def func_test1():
pass
@registry.register(collection, func_test1)
def func_test2():
pass
expected_collection = {
'functions': {
'func_0': func_test0,
},
'func_1': func_test1,
func_test1: func_test2,
}
self.assertEqual(collection, expected_collection)
def test_register_error(self):
collection = {}
@registry.register(collection, 'functions/func_0')
def func_test0(): # pylint: disable=unused-variable
pass
with self.assertRaises(KeyError):
@registry.register(collection, 'functions/func_0/sub_func')
def func_test1(): # pylint: disable=unused-variable
pass
with self.assertRaises(LookupError):
registry.lookup(collection, 'non-exist')
if __name__ == '__main__':
tf.test.main()
| 2,350 | 25.41573 | 80 | py |
models | models-master/official/core/train_utils_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for official.core.train_utils."""
import json
import os
import pprint
import numpy as np
import tensorflow as tf
from official.core import exp_factory
from official.core import test_utils
from official.core import train_utils
from official.modeling import hyperparams
@exp_factory.register_config_factory('foo')
def foo():
"""Multitask experiment for test."""
experiment_config = hyperparams.Config(
default_params={
'runtime': {
'tpu': 'fake',
},
'task': {
'model': {
'model_id': 'bar',
},
},
'trainer': {
'train_steps': -1,
'validation_steps': -1,
},
})
return experiment_config
class TrainUtilsTest(tf.test.TestCase):
def test_get_leaf_nested_dict(self):
d = {'a': {'i': {'x': 5}}}
self.assertEqual(train_utils.get_leaf_nested_dict(d, ['a', 'i', 'x']), 5)
def test_get_leaf_nested_dict_not_leaf(self):
with self.assertRaisesRegex(KeyError, 'The value extracted with keys.*'):
d = {'a': {'i': {'x': 5}}}
train_utils.get_leaf_nested_dict(d, ['a', 'i'])
def test_get_leaf_nested_dict_path_not_exist_missing_key(self):
with self.assertRaisesRegex(KeyError, 'Path not exist while traversing .*'):
d = {'a': {'i': {'x': 5}}}
train_utils.get_leaf_nested_dict(d, ['a', 'i', 'y'])
def test_get_leaf_nested_dict_path_not_exist_out_of_range(self):
with self.assertRaisesRegex(KeyError, 'Path not exist while traversing .*'):
d = {'a': {'i': {'x': 5}}}
train_utils.get_leaf_nested_dict(d, ['a', 'i', 'z'])
def test_get_leaf_nested_dict_path_not_exist_meets_leaf(self):
with self.assertRaisesRegex(KeyError, 'Path not exist while traversing .*'):
d = {'a': {'i': 5}}
train_utils.get_leaf_nested_dict(d, ['a', 'i', 'z'])
def test_cast_leaf_nested_dict(self):
d = {'a': {'i': {'x': '123'}}, 'b': 456.5}
d = train_utils.cast_leaf_nested_dict(d, int)
self.assertEqual(d['a']['i']['x'], 123)
self.assertEqual(d['b'], 456)
def test_write_model_params_keras_model(self):
inputs = np.zeros([2, 3])
model = test_utils.FakeKerasModel()
model(inputs) # Must do forward pass to build the model.
filepath = os.path.join(self.create_tempdir(), 'model_params.txt')
train_utils.write_model_params(model, filepath)
actual = tf.io.gfile.GFile(filepath, 'r').read().splitlines()
expected = [
'fake_keras_model/dense/kernel:0 [3, 4]',
'fake_keras_model/dense/bias:0 [4]',
'fake_keras_model/dense_1/kernel:0 [4, 4]',
'fake_keras_model/dense_1/bias:0 [4]',
'',
'Total params: 36',
]
self.assertEqual(actual, expected)
def test_write_model_params_module(self):
inputs = np.zeros([2, 3], dtype=np.float32)
model = test_utils.FakeModule(3, name='fake_module')
model(inputs) # Must do forward pass to build the model.
filepath = os.path.join(self.create_tempdir(), 'model_params.txt')
train_utils.write_model_params(model, filepath)
actual = tf.io.gfile.GFile(filepath, 'r').read().splitlines()
expected = [
'fake_module/dense/b:0 [4]',
'fake_module/dense/w:0 [3, 4]',
'fake_module/dense_1/b:0 [4]',
'fake_module/dense_1/w:0 [4, 4]',
'',
'Total params: 36',
]
self.assertEqual(actual, expected)
def test_construct_experiment_from_flags(self):
options = train_utils.ParseConfigOptions(
experiment='foo',
config_file=[],
tpu='bar',
tf_data_service='',
params_override='task.model.model_id=new,'
'trainer.train_steps=10,'
'trainer.validation_steps=11')
builder = train_utils.ExperimentParser(options)
params_from_obj = builder.parse()
params_from_func = train_utils.parse_configuration(options)
pp = pprint.PrettyPrinter()
self.assertEqual(
pp.pformat(params_from_obj.as_dict()),
pp.pformat(params_from_func.as_dict()))
self.assertEqual(params_from_obj.runtime.tpu, 'bar')
self.assertEqual(params_from_obj.task.model.model_id, 'new')
self.assertEqual(params_from_obj.trainer.train_steps, 10)
self.assertEqual(params_from_obj.trainer.validation_steps, 11)
class BestCheckpointExporterTest(tf.test.TestCase):
def test_maybe_export(self):
model_dir = self.create_tempdir().full_path
best_ckpt_path = os.path.join(model_dir, 'best_ckpt-1')
metric_name = 'test_metric|metric_1'
exporter = train_utils.BestCheckpointExporter(
model_dir, metric_name, 'higher')
v = tf.Variable(1.0)
checkpoint = tf.train.Checkpoint(v=v)
ret = exporter.maybe_export_checkpoint(
checkpoint, {'test_metric': {'metric_1': 5.0}}, 100)
with self.subTest(name='Successful first save.'):
self.assertEqual(ret, True)
v_2 = tf.Variable(2.0)
checkpoint_2 = tf.train.Checkpoint(v=v_2)
checkpoint_2.restore(best_ckpt_path)
self.assertEqual(v_2.numpy(), 1.0)
v = tf.Variable(3.0)
checkpoint = tf.train.Checkpoint(v=v)
ret = exporter.maybe_export_checkpoint(
checkpoint, {'test_metric': {'metric_1': 6.0}}, 200)
with self.subTest(name='Successful better metic save.'):
self.assertEqual(ret, True)
v_2 = tf.Variable(2.0)
checkpoint_2 = tf.train.Checkpoint(v=v_2)
checkpoint_2.restore(best_ckpt_path)
self.assertEqual(v_2.numpy(), 3.0)
v = tf.Variable(5.0)
checkpoint = tf.train.Checkpoint(v=v)
ret = exporter.maybe_export_checkpoint(
checkpoint, {'test_metric': {'metric_1': 1.0}}, 300)
with self.subTest(name='Worse metic no save.'):
self.assertEqual(ret, False)
v_2 = tf.Variable(2.0)
checkpoint_2 = tf.train.Checkpoint(v=v_2)
checkpoint_2.restore(best_ckpt_path)
self.assertEqual(v_2.numpy(), 3.0)
def test_export_best_eval_metric(self):
model_dir = self.create_tempdir().full_path
metric_name = 'test_metric|metric_1'
exporter = train_utils.BestCheckpointExporter(model_dir, metric_name,
'higher')
exporter.export_best_eval_metric({'test_metric': {'metric_1': 5.0}}, 100)
with tf.io.gfile.GFile(os.path.join(model_dir, 'info.json'),
'rb') as reader:
metric = json.loads(reader.read())
self.assertAllEqual(
metric,
{'test_metric': {'metric_1': 5.0}, 'best_ckpt_global_step': 100.0})
def test_export_best_eval_metric_skips_non_scalar_values(self):
model_dir = self.create_tempdir().full_path
metric_name = 'test_metric|metric_1'
exporter = train_utils.BestCheckpointExporter(model_dir, metric_name,
'higher')
image = tf.zeros(shape=[16, 8, 1])
eval_logs = {'test_metric': {'metric_1': 5.0, 'image': image}}
exporter.export_best_eval_metric(eval_logs, 100)
with tf.io.gfile.GFile(os.path.join(model_dir, 'info.json'),
'rb') as reader:
metric = json.loads(reader.read())
self.assertAllEqual(
metric,
{'test_metric': {'metric_1': 5.0}, 'best_ckpt_global_step': 100.0})
if __name__ == '__main__':
tf.test.main()
| 7,893 | 35.546296 | 80 | py |
models | models-master/official/core/tf_example_builder_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tf_example_builder.
See `test_add_image_matrix_feature_with_fake_image` for the typical structure of
a unit test.
"""
from absl.testing import parameterized
import tensorflow as tf
from official.core import tf_example_builder
class TfExampleBuilderTest(tf.test.TestCase, parameterized.TestCase):
def test_init_an_empty_example(self):
example_builder = tf_example_builder.TfExampleBuilder()
example = example_builder.example
self.assertProtoEquals('', example)
def test_init_an_empty_serialized_example(self):
example_builder = tf_example_builder.TfExampleBuilder()
example = example_builder.serialized_example
self.assertProtoEquals('', example)
def test_add_feature(self):
example_builder = tf_example_builder.TfExampleBuilder()
example_builder.add_feature(
'foo',
tf.train.Feature(
bytes_list=tf.train.BytesList(value=[b'Hello World!'])))
example = example_builder.example
# Use proto text to show how the entire proto would look like.
self.assertProtoEquals(
"""
features: {
feature: {
key: "foo"
value: {
bytes_list: {
value: "Hello World!"
}
}
}
}""", example)
def test_add_feature_dict(self):
example_builder = tf_example_builder.TfExampleBuilder()
example_builder.add_feature_dict({
'foo':
tf.train.Feature(
bytes_list=tf.train.BytesList(value=[b'Hello World!'])),
'bar':
tf.train.Feature(
int64_list=tf.train.Int64List(value=[299, 792, 458]))
})
example = example_builder.example
# Use proto text to show how the entire proto would look like.
self.assertProtoEquals(
"""
features: {
feature: {
key: "foo"
value: {
bytes_list: {
value: "Hello World!"
}
}
}
feature: {
key: "bar"
value: {
int64_list: {
value: 299
value: 792
value: 458
}
}
}
}""", example)
@parameterized.named_parameters(
('single_bytes', b'Hello World!', b'Hello World!'),
('single_string', 'Hello World!', b'Hello World!'))
def test_add_single_byte_feature(self, value, expected_value):
example_builder = tf_example_builder.TfExampleBuilder()
example_builder.add_bytes_feature('foo', value)
example = example_builder.example
# Use constructor to easily work with test parameters.
self.assertProtoEquals(
tf.train.Example(
features=tf.train.Features(
feature={
'foo':
tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[expected_value]))
})), example)
@parameterized.named_parameters(
('multiple_bytes', [b'Hello World!', b'Good Morning!'
], [b'Hello World!', b'Good Morning!']),
('multiple_sring', ['Hello World!', 'Good Morning!'
], [b'Hello World!', b'Good Morning!']))
def test_add_multiple_bytes_feature(self, values, expected_values):
example_builder = tf_example_builder.TfExampleBuilder()
example_builder.add_bytes_feature('foo', values)
example = example_builder.example
self.assertProtoEquals(
tf.train.Example(
features=tf.train.Features(
feature={
'foo':
tf.train.Feature(
bytes_list=tf.train.BytesList(
value=expected_values))
})), example)
@parameterized.named_parameters(
('single_integer', 123, [123]),
('multiple_integers', [123, 456, 789], [123, 456, 789]))
def test_add_ints_feature(self, value, expected_value):
example_builder = tf_example_builder.TfExampleBuilder()
example_builder.add_ints_feature('bar', value)
example = example_builder.example
self.assertProtoEquals(
tf.train.Example(
features=tf.train.Features(
feature={
'bar':
tf.train.Feature(
int64_list=tf.train.Int64List(value=expected_value))
})), example)
@parameterized.named_parameters(
('single_float', 3.14, [3.14]),
('multiple_floats', [3.14, 1.57, 6.28], [3.14, 1.57, 6.28]))
def test_add_floats_feature(self, value, expected_value):
example_builder = tf_example_builder.TfExampleBuilder()
example_builder.add_floats_feature('baz', value)
example = example_builder.example
self.assertProtoEquals(
tf.train.Example(
features=tf.train.Features(
feature={
'baz':
tf.train.Feature(
float_list=tf.train.FloatList(value=expected_value))
})), example)
if __name__ == '__main__':
tf.test.main()
| 5,793 | 33.903614 | 80 | py |
models | models-master/official/core/base_trainer.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Standard Trainer implementation.
The base trainer implements the Orbit `StandardTrainable` and
`StandardEvaluable` interfaces. Trainers inside this project should be
interchangable and independent on model architectures and tasks.
"""
import functools
from typing import Union, Optional
from absl import logging
import gin
import orbit
import tensorflow as tf
from official.core import base_task
from official.core import config_definitions
from official.modeling import optimization
ExperimentConfig = config_definitions.ExperimentConfig
TrainerConfig = config_definitions.TrainerConfig
class _AsyncTrainer(orbit.StandardTrainer, orbit.StandardEvaluator):
"""Trainer class for both sync and async Strategy."""
def init_async(self):
"""Initializes the Async Trainer base class."""
assert isinstance(self._strategy, tf.distribute.Strategy)
self._is_async = isinstance(
self._strategy, tf.distribute.experimental.ParameterServerStrategy)
self._coordinator = None
if self._is_async:
self._coordinator = (
tf.distribute.experimental.coordinator.ClusterCoordinator(
self._strategy))
def coordinator_for_async(
self,
) -> tf.distribute.experimental.coordinator.ClusterCoordinator:
if not self._coordinator:
raise ValueError(
"Coordinator uninitialized for async run. Call init_async() first."
)
return self._coordinator
def join(self):
"""Join all async steps. Only useful in aysnc training."""
if getattr(self, "_is_async", False):
self.coordinator_for_async().join()
def create_train_loop_fn(self):
"""Creates a eval loop from the given step function and options."""
train_loop_fn = super().create_train_loop_fn()
if getattr(self, "_is_async", False):
def _async_loop_fn(iterator, num_steps):
self.coordinator_for_async().schedule(
train_loop_fn, args=(iterator, num_steps)
)
return _async_loop_fn
else:
return train_loop_fn
def create_eval_loop_fn(self, has_state: bool):
"""Creates a training loop from the given step function and options."""
eval_loop_fn = super().create_eval_loop_fn(has_state)
if getattr(self, "_is_async", False):
if has_state:
raise ValueError(
"Stateful eval loop is not supported in async training.")
def _async_loop_fn(iterator, num_steps, state=None, reduce_fn=None):
assert state is None
assert reduce_fn is None
self.coordinator_for_async().schedule(
eval_loop_fn, args=(iterator, num_steps)
)
return _async_loop_fn
else:
return eval_loop_fn
def distribute_dataset(self, dataset_or_fn, *args, **kwargs):
"""A utility function to help create a `tf.distribute.DistributedDataset`.
Args:
dataset_or_fn: A instance of `tf.data.Dataset`, or a "dataset function"
returning a `tf.data.Dataset`. If it is a function, it may optionally
have an argument named `input_context` which will be passed a
`tf.distribute.InputContext` instance.
*args: Any positional arguments to pass through to `dataset_or_fn`.
**kwargs: Any keyword arguments to pass through to `dataset_or_fn`.
Returns:
A distributed Dataset.
"""
if getattr(self, "_is_async", False):
per_worker_dataset_fn = functools.partial(
orbit.utils.make_distributed_dataset, self._strategy, dataset_or_fn,
*args, **kwargs)
per_worker_dataset_fn = tf.function(per_worker_dataset_fn)
return self.coordinator_for_async().create_per_worker_dataset(
per_worker_dataset_fn
)
else:
return orbit.utils.make_distributed_dataset(self._strategy, dataset_or_fn,
*args, **kwargs)
def get_runtime_options(config: ExperimentConfig):
"""Get tf.distribute.RunOptions from config."""
xla_options = {}
if config.runtime.tpu_enable_xla_dynamic_padder is not None:
xla_options["enable_xla_dynamic_padder"] = (
config.runtime.tpu_enable_xla_dynamic_padder)
return tf.distribute.RunOptions(
experimental_xla_options=tf.tpu.XLAOptions(**xla_options))
@gin.configurable
class Trainer(_AsyncTrainer):
"""Implements the common trainer shared for TensorFlow models."""
# pylint: disable=super-init-not-called
def __init__(
self,
config: ExperimentConfig,
task: base_task.Task,
model: tf.keras.Model,
optimizer: tf.optimizers.Optimizer,
train: bool = True,
evaluate: bool = True,
train_dataset: Optional[Union[tf.data.Dataset,
tf.distribute.DistributedDataset]] = None,
validation_dataset: Optional[Union[
tf.data.Dataset, tf.distribute.DistributedDataset]] = None,
checkpoint_exporter=None):
"""Initialize common trainer for TensorFlow models.
Args:
config: An `ExperimentConfig` instance specifying experiment config.
task: A base_task.Task instance.
model: The model instance, e.g. a tf.keras.Model instance.
optimizer: tf.optimizers.Optimizer instance.
train: bool, whether or not this trainer will be used for training.
default to True.
evaluate: bool, whether or not this trainer will be used for evaluation.
default to True.
train_dataset: a dataset object created for training. With tf.distribute,
it needs to be a `DistributedDataset`.
validation_dataset: a dataset object created for evaluation. With
tf.distribute, it needs to be a `DistributedDataset`. The evaluator will
create a dataset iterator for each eval round, so the dataset does not
need to repeat.
checkpoint_exporter: an object that has the `maybe_export_checkpoint`
interface.
"""
# Gets the current distribution strategy. If not inside any strategy scope,
# it gets a single-replica no-op strategy.
self._strategy = tf.distribute.get_strategy()
self._validate_params(
config,
check_train_data=train_dataset is None,
check_validation_data=validation_dataset is None)
self._config = config
self._task = task
self._model = model
self._optimizer = optimizer
self._checkpoint_exporter = checkpoint_exporter
self._recovery = None
# Runtime options are only applied to train_step.
# We use default for eval_step.
self._runtime_options = get_runtime_options(config)
# Creates a shadow copy of the weights to store weights moving average.
if isinstance(self._optimizer, optimization.ExponentialMovingAverage
) and not self._optimizer.has_shadow_copy:
self._optimizer.shadow_copy(self._model)
# global_step increases by 1 after each training iteration.
# We should have global_step.numpy() == self.optimizer.iterations.numpy()
# when there is only 1 optimizer.
self._global_step = orbit.utils.create_global_step()
if hasattr(self.model, "checkpoint_items"):
checkpoint_items = self.model.checkpoint_items
else:
checkpoint_items = {}
self._checkpoint = tf.train.Checkpoint(
global_step=self.global_step,
model=self.model,
optimizer=self.optimizer,
**checkpoint_items)
self._train_loss = tf.keras.metrics.Mean("training_loss", dtype=tf.float32)
self._validation_loss = tf.keras.metrics.Mean(
"validation_loss", dtype=tf.float32)
model_metrics = model.metrics if hasattr(model, "metrics") else []
self.init_async()
if train:
self._train_metrics = self.task.build_metrics(
training=True) + model_metrics
train_dataset = train_dataset or self.distribute_dataset(
self.task.build_inputs, self.config.task.train_data)
orbit.StandardTrainer.__init__(
self,
train_dataset,
options=orbit.StandardTrainerOptions(
use_tf_while_loop=config.trainer.train_tf_while_loop,
use_tf_function=config.trainer.train_tf_function,
use_tpu_summary_optimization=config.trainer.allow_tpu_summary))
if evaluate:
self._validation_metrics = self.task.build_metrics(
training=False) + model_metrics
validation_dataset = validation_dataset or self.distribute_dataset(
self.task.build_inputs, self.config.task.validation_data)
orbit.StandardEvaluator.__init__(
self,
validation_dataset,
options=orbit.StandardEvaluatorOptions(
use_tf_function=config.trainer.eval_tf_function,
use_tf_while_loop=config.trainer.eval_tf_while_loop))
def _validate_params(self,
config,
check_train_data=True,
check_validation_data=True):
r"""Validates if the configuration object passed to the Trainer.
The experiment configuration should be structured as:
\trainer
\task
\train_data
\validation_data
Args:
config: a namedtuple, dataclass, ConfigDict, etc.
check_train_data: whether to check task.train_data field.
check_validation_data: whether to check task.validation_data field.
"""
if not hasattr(config, "trainer"):
raise AttributeError("The trainer requires the configuration contains an"
" attribute `trainer`.")
if not hasattr(config, "task"):
raise AttributeError("The trainer requires the configuration contains an"
" attribute `task`.")
if check_train_data and not hasattr(config.task, "train_data"):
raise AttributeError("The trainer requires the configuration contains an"
" attribute `task.train_data`.")
if check_validation_data and not hasattr(config.task, "validation_data"):
raise AttributeError("The trainer requires the configuration contains an"
" attribute `task.validation_data`.")
@property
def strategy(self):
return self._strategy
@property
def config(self):
return self._config
@property
def task(self):
return self._task
@property
def model(self):
return self._model
@property
def optimizer(self):
if hasattr(self, "_optimizer"):
return self._optimizer
else:
return None
@property
def global_step(self):
return self._global_step
@property
def train_loss(self):
"""Accesses the training loss metric object."""
return self._train_loss
@property
def validation_loss(self):
"""Accesses the validation loss metric object."""
return self._validation_loss
@property
def train_metrics(self):
"""Accesses all training metric objects."""
return self._train_metrics
@property
def validation_metrics(self):
"""Accesses all validation metric metric objects."""
return self._validation_metrics
def initialize(self):
"""A callback function.
This function will be called when no checkpoint found for the model.
If there is a checkpoint, the checkpoint will be loaded and this function
will not be called. Tasks may use this callback function to load a
pretrained checkpoint, saved under a directory other than the model_dir.
"""
self.task.initialize(self.model)
@property
def checkpoint(self):
"""Accesses the training checkpoint."""
return self._checkpoint
@property
def checkpoint_exporter(self):
"""Accesses the checkpoint exporter."""
return self._checkpoint_exporter
def train_loop_end(self):
"""See base class."""
self.join()
logs = {}
for metric in self.train_metrics + [self.train_loss]:
logs[metric.name] = metric.result()
metric.reset_states()
if callable(self.optimizer.learning_rate):
# Maybe a self-implemented optimizer does not have `optimizer.iterations`.
# So just to be safe here.
if hasattr(self.optimizer, "iterations"):
logs["learning_rate"] = self.optimizer.learning_rate(
self.optimizer.iterations)
else:
logs["learning_rate"] = self.optimizer.learning_rate(self.global_step)
else:
logs["learning_rate"] = self.optimizer.learning_rate
return logs
def next_train_inputs(self, iterator):
"""Fetches the next inputs for the model during train.
This method consumes the input iterator and returns the next inputs for the
model.
This method provides a way to control how to fetch the next model input, and
what data to send to the model.
This function runs in eager mode.
Args:
iterator: Dataset iterator to generate the next inputs from.
Returns:
The inputs to the model.
"""
return next(iterator)
def train_step(self, iterator):
"""See base class."""
def step_fn(inputs):
if self.config.runtime.enable_xla and (self.config.runtime.num_gpus > 0):
task_train_step = tf.function(self.task.train_step, jit_compile=True)
else:
task_train_step = self.task.train_step
logs = task_train_step(
inputs,
model=self.model,
optimizer=self.optimizer,
metrics=self.train_metrics)
self._train_loss.update_state(logs[self.task.loss])
self.global_step.assign_add(1)
inputs = self.next_train_inputs(iterator)
self.strategy.run(step_fn, args=(inputs,), options=self._runtime_options)
def eval_begin(self):
"""Sets up metrics."""
for metric in self.validation_metrics + [self.validation_loss]:
metric.reset_states()
# Swaps weights to test on weights moving average.
if self.optimizer and isinstance(self.optimizer,
optimization.ExponentialMovingAverage):
self.optimizer.swap_weights()
def next_eval_inputs(self, iterator):
"""Fetches the next inputs for the model during eval.
This method consumes the input iterator and returns the next inputs for the
model and an additional logs dict. The output dict remains in the host (not
sent to GPUs/TPUs) and is merged with the model outputs which will be
processed later in `aggregate_logs`. This is useful for sending extra logs
downstream that are not compatible with the accelerators.
This function runs in eager mode.
Args:
iterator: Dataset iterator to generate the next inputs from.
Returns:
The inputs to the model, and an additional logs dictionnary. The logs
are not passed to the model, instead they are merged with model output
logs.
"""
passthrough_logs = dict()
return next(iterator), passthrough_logs
def eval_step(self, iterator):
"""See base class."""
def step_fn(inputs):
logs = self.task.validation_step(
inputs, model=self.model, metrics=self.validation_metrics)
if self.task.loss in logs:
self._validation_loss.update_state(logs[self.task.loss])
return logs
inputs, passthrough_logs = self.next_eval_inputs(iterator)
distributed_outputs = self.strategy.run(step_fn, args=(inputs,))
logs = tf.nest.map_structure(
self.strategy.experimental_local_results, distributed_outputs
)
if set(logs.keys()) & set(passthrough_logs.keys()):
logging.warning(
(
"Conflict between the pasthrough log keys and the returned model"
" log keys. Found %r keys in the passthrough logs and %r keys in"
" the model logs. Model log keys takes precedence."
),
logs.keys(),
passthrough_logs.keys(),
)
return passthrough_logs | logs
def eval_end(self, aggregated_logs=None):
"""Processes evaluation results."""
self.join()
logs = {}
for metric in self.validation_metrics:
logs[metric.name] = metric.result()
if self.validation_loss.count.numpy() != 0:
logs[self.validation_loss.name] = self.validation_loss.result()
else:
# `self.validation_loss` metric was not updated, because the validation
# loss was not returned from the task's `validation_step` method.
logging.info("The task did not report validation loss.")
if aggregated_logs:
metrics = self.task.reduce_aggregated_logs(
aggregated_logs, global_step=self.global_step)
logs.update(metrics)
if self._checkpoint_exporter:
self._checkpoint_exporter.maybe_export_checkpoint(
self.checkpoint, logs, self.global_step.numpy())
metric_name = self.config.trainer.best_checkpoint_eval_metric
logs["best_" +
metric_name] = self._checkpoint_exporter.best_ckpt_logs[metric_name]
# Swaps back weights after testing when EMA is used.
# This happens after best checkpoint export so that average weights used for
# eval are exported instead of regular weights.
if self.optimizer and isinstance(self.optimizer,
optimization.ExponentialMovingAverage):
self.optimizer.swap_weights()
return logs
def eval_reduce(self, state=None, step_outputs=None):
return self.task.aggregate_logs(state, step_outputs)
| 17,783 | 35.073022 | 80 | py |
models | models-master/official/core/train_lib_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for train_ctl_lib."""
import json
import os
from absl import flags
from absl.testing import flagsaver
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from official.common import flags as tfm_flags
# pylint: disable=unused-import
from official.common import registry_imports
# pylint: enable=unused-import
from official.core import task_factory
from official.core import train_lib
from official.core import train_utils
from official.utils.testing import mock_task
FLAGS = flags.FLAGS
tfm_flags.define_flags()
class TrainTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super(TrainTest, self).setUp()
self._test_config = {
'trainer': {
'checkpoint_interval': 10,
'steps_per_loop': 10,
'summary_interval': 10,
'train_steps': 10,
'validation_steps': 5,
'validation_interval': 10,
'continuous_eval_timeout': 1,
'validation_summary_subdir': 'validation',
'optimizer_config': {
'optimizer': {
'type': 'sgd',
},
'learning_rate': {
'type': 'constant'
}
}
},
}
@combinations.generate(
combinations.combine(
distribution_strategy=[
strategy_combinations.default_strategy,
strategy_combinations.cloud_tpu_strategy,
strategy_combinations.one_device_strategy_gpu,
],
flag_mode=['train', 'eval', 'train_and_eval'],
run_post_eval=[True, False]))
def test_end_to_end(self, distribution_strategy, flag_mode, run_post_eval):
model_dir = self.get_temp_dir()
flags_dict = dict(
experiment='mock',
mode=flag_mode,
model_dir=model_dir,
params_override=json.dumps(self._test_config))
with flagsaver.flagsaver(**flags_dict):
params = train_utils.parse_configuration(flags.FLAGS)
train_utils.serialize_config(params, model_dir)
with distribution_strategy.scope():
task = task_factory.get_task(params.task, logging_dir=model_dir)
_, logs = train_lib.run_experiment(
distribution_strategy=distribution_strategy,
task=task,
mode=flag_mode,
params=params,
model_dir=model_dir,
run_post_eval=run_post_eval)
if 'eval' in flag_mode:
self.assertTrue(
tf.io.gfile.exists(
os.path.join(model_dir,
params.trainer.validation_summary_subdir)))
if run_post_eval:
self.assertNotEmpty(logs)
else:
self.assertEmpty(logs)
self.assertNotEmpty(
tf.io.gfile.glob(os.path.join(model_dir, 'params.yaml')))
if flag_mode == 'eval':
return
self.assertNotEmpty(
tf.io.gfile.glob(os.path.join(model_dir, 'checkpoint')))
# Tests continuous evaluation.
_, logs = train_lib.run_experiment(
distribution_strategy=distribution_strategy,
task=task,
mode='continuous_eval',
params=params,
model_dir=model_dir,
run_post_eval=run_post_eval)
@combinations.generate(
combinations.combine(
distribution_strategy=[
strategy_combinations.default_strategy,
strategy_combinations.cloud_tpu_strategy,
strategy_combinations.one_device_strategy_gpu,
],
flag_mode=['train', 'eval', 'train_and_eval'],
run_post_eval=[True, False]))
def test_end_to_end_class(self, distribution_strategy, flag_mode,
run_post_eval):
model_dir = self.get_temp_dir()
flags_dict = dict(
experiment='mock',
mode=flag_mode,
model_dir=model_dir,
params_override=json.dumps(self._test_config))
with flagsaver.flagsaver(**flags_dict):
params = train_utils.parse_configuration(flags.FLAGS)
train_utils.serialize_config(params, model_dir)
with distribution_strategy.scope():
task = task_factory.get_task(params.task, logging_dir=model_dir)
_, logs = train_lib.OrbitExperimentRunner(
distribution_strategy=distribution_strategy,
task=task,
mode=flag_mode,
params=params,
model_dir=model_dir,
run_post_eval=run_post_eval).run()
if 'eval' in flag_mode:
self.assertTrue(
tf.io.gfile.exists(
os.path.join(model_dir,
params.trainer.validation_summary_subdir)))
if run_post_eval:
self.assertNotEmpty(logs)
else:
self.assertEmpty(logs)
self.assertNotEmpty(
tf.io.gfile.glob(os.path.join(model_dir, 'params.yaml')))
if flag_mode == 'eval':
return
self.assertNotEmpty(
tf.io.gfile.glob(os.path.join(model_dir, 'checkpoint')))
# Tests continuous evaluation.
_, logs = train_lib.OrbitExperimentRunner(
distribution_strategy=distribution_strategy,
task=task,
mode='continuous_eval',
params=params,
model_dir=model_dir,
run_post_eval=run_post_eval).run()
@combinations.generate(
combinations.combine(
distribution_strategy=[
strategy_combinations.default_strategy,
strategy_combinations.cloud_tpu_strategy,
strategy_combinations.one_device_strategy_gpu,
],
flag_mode=['train', 'train_and_eval'],
))
def test_recovery_nan_error(self, distribution_strategy, flag_mode):
model_dir = self.get_temp_dir()
flags_dict = dict(
experiment='mock',
mode=flag_mode,
model_dir=model_dir,
params_override=json.dumps(self._test_config))
with flagsaver.flagsaver(**flags_dict):
params = train_utils.parse_configuration(flags.FLAGS)
train_utils.serialize_config(params, model_dir)
with distribution_strategy.scope():
# task = task_factory.get_task(params.task, logging_dir=model_dir)
task = mock_task.MockTask(params.task, logging_dir=model_dir)
# Set the loss to NaN to trigger RunTimeError.
def build_losses(labels, model_outputs, aux_losses=None):
del labels, model_outputs
return tf.constant([np.nan], tf.float32) + aux_losses
task.build_losses = build_losses
with self.assertRaises(RuntimeError):
train_lib.OrbitExperimentRunner(
distribution_strategy=distribution_strategy,
task=task,
mode=flag_mode,
params=params,
model_dir=model_dir).run()
@combinations.generate(
combinations.combine(
distribution_strategy=[
strategy_combinations.default_strategy,
strategy_combinations.cloud_tpu_strategy,
strategy_combinations.one_device_strategy_gpu,
],
flag_mode=['train'],
))
def test_recovery(self, distribution_strategy, flag_mode):
loss_threshold = 1.0
model_dir = self.get_temp_dir()
flags_dict = dict(
experiment='mock',
mode=flag_mode,
model_dir=model_dir,
params_override=json.dumps(self._test_config))
with flagsaver.flagsaver(**flags_dict):
params = train_utils.parse_configuration(flags.FLAGS)
params.trainer.loss_upper_bound = loss_threshold
params.trainer.recovery_max_trials = 1
train_utils.serialize_config(params, model_dir)
with distribution_strategy.scope():
task = task_factory.get_task(params.task, logging_dir=model_dir)
# Saves a checkpoint for reference.
model = task.build_model()
checkpoint = tf.train.Checkpoint(model=model)
checkpoint_manager = tf.train.CheckpointManager(
checkpoint, self.get_temp_dir(), max_to_keep=2)
checkpoint_manager.save()
before_weights = model.get_weights()
def build_losses(labels, model_outputs, aux_losses=None):
del labels, model_outputs
return tf.constant([loss_threshold], tf.float32) + aux_losses
task.build_losses = build_losses
model, _ = train_lib.OrbitExperimentRunner(
distribution_strategy=distribution_strategy,
task=task,
mode=flag_mode,
params=params,
model_dir=model_dir).run()
after_weights = model.get_weights()
for left, right in zip(before_weights, after_weights):
self.assertAllEqual(left, right)
def test_parse_configuration(self):
model_dir = self.get_temp_dir()
flags_dict = dict(
experiment='mock',
mode='train',
model_dir=model_dir,
params_override=json.dumps(self._test_config))
with flagsaver.flagsaver(**flags_dict):
params = train_utils.parse_configuration(flags.FLAGS, lock_return=True)
with self.assertRaises(ValueError):
params.override({'task': {'init_checkpoint': 'Foo'}})
params = train_utils.parse_configuration(flags.FLAGS, lock_return=False)
params.override({'task': {'init_checkpoint': 'Bar'}})
self.assertEqual(params.task.init_checkpoint, 'Bar')
if __name__ == '__main__':
tf.test.main()
| 9,932 | 34.348754 | 78 | py |
models | models-master/official/core/file_writers.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""File writer functions for dataset preparation, infra validation, and unit tests."""
import io
from typing import Optional, Sequence, Union
import tensorflow as tf
def write_small_dataset(examples: Sequence[Union[tf.train.Example,
tf.train.SequenceExample]],
output_path: str,
file_type: str = 'tfrecord') -> None:
"""Writes `examples` to a file at `output_path` with type `file_type`.
CAVEAT: This function is not recommended for writing large datasets, since it
will loop through `examples` and perform write operation sequentially.
Args:
examples: List of tf.train.Example or tf.train.SequenceExample.
output_path: Output path for the dataset.
file_type: A string indicating the file format, could be: 'tfrecord',
'tfrecords', 'tfrecord_compressed', 'tfrecords_gzip', 'riegeli'. The
string is case insensitive.
"""
file_type = file_type.lower()
if file_type == 'tfrecord' or file_type == 'tfrecords':
_write_tfrecord(examples, output_path)
elif file_type == 'tfrecord_compressed' or file_type == 'tfrecords_gzip':
_write_tfrecord(examples, output_path,
tf.io.TFRecordOptions(compression_type='GZIP'))
elif file_type == 'riegeli':
_write_riegeli(examples, output_path)
else:
raise ValueError(f'Unknown file_type: {file_type}')
def _write_tfrecord(examples: Sequence[Union[tf.train.Example,
tf.train.SequenceExample]],
output_path: str,
options: Optional[tf.io.TFRecordOptions] = None) -> None:
"""Writes `examples` to a TFRecord file at `output_path`.
Args:
examples: A list of tf.train.Example.
output_path: Output path for the dataset.
options: Options used for manipulating TFRecord files.
"""
with tf.io.TFRecordWriter(output_path, options) as writer:
for example in examples:
writer.write(example.SerializeToString())
def _write_riegeli(examples: Sequence[Union[tf.train.Example,
tf.train.SequenceExample]],
output_path: str) -> None:
"""Writes `examples` to a Riegeli file at `output_path`.
Args:
examples: A list of tf.train.Example.
output_path: Output path for the dataset.
"""
with io.FileIO(output_path, 'wb') as fileio:
import riegeli # pylint: disable=g-import-not-at-top
with riegeli.RecordWriter(fileio) as writer:
writer.write_messages(examples)
| 3,174 | 38.197531 | 86 | py |
models | models-master/official/core/registry.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Registry utility."""
def register(registered_collection, reg_key):
"""Register decorated function or class to collection.
Register decorated function or class into registered_collection, in a
hierarchical order. For example, when reg_key="my_model/my_exp/my_config_0"
the decorated function or class is stored under
registered_collection["my_model"]["my_exp"]["my_config_0"].
This decorator is supposed to be used together with the lookup() function in
this file.
Args:
registered_collection: a dictionary. The decorated function or class will be
put into this collection.
reg_key: The key for retrieving the registered function or class. If reg_key
is a string, it can be hierarchical like my_model/my_exp/my_config_0
Returns:
A decorator function
Raises:
KeyError: when function or class to register already exists.
"""
def decorator(fn_or_cls):
"""Put fn_or_cls in the dictionary."""
if isinstance(reg_key, str):
hierarchy = reg_key.split("/")
collection = registered_collection
for h_idx, entry_name in enumerate(hierarchy[:-1]):
if entry_name not in collection:
collection[entry_name] = {}
collection = collection[entry_name]
if not isinstance(collection, dict):
raise KeyError(
"Collection path {} at position {} already registered as "
"a function or class.".format(entry_name, h_idx))
leaf_reg_key = hierarchy[-1]
else:
collection = registered_collection
leaf_reg_key = reg_key
if leaf_reg_key in collection:
raise KeyError("Function or class {} registered multiple times.".format(
leaf_reg_key))
collection[leaf_reg_key] = fn_or_cls
return fn_or_cls
return decorator
def lookup(registered_collection, reg_key):
"""Lookup and return decorated function or class in the collection.
Lookup decorated function or class in registered_collection, in a
hierarchical order. For example, when
reg_key="my_model/my_exp/my_config_0",
this function will return
registered_collection["my_model"]["my_exp"]["my_config_0"].
Args:
registered_collection: a dictionary. The decorated function or class will be
retrieved from this collection.
reg_key: The key for retrieving the registered function or class. If reg_key
is a string, it can be hierarchical like my_model/my_exp/my_config_0
Returns:
The registered function or class.
Raises:
LookupError: when reg_key cannot be found.
"""
if isinstance(reg_key, str):
hierarchy = reg_key.split("/")
collection = registered_collection
for h_idx, entry_name in enumerate(hierarchy):
if entry_name not in collection:
raise LookupError(
f"collection path {entry_name} at position {h_idx} is never "
f"registered. Please make sure the {entry_name} and its library is "
"imported and linked to the trainer binary.")
collection = collection[entry_name]
return collection
else:
if reg_key not in registered_collection:
raise LookupError(
f"registration key {reg_key} is never "
f"registered. Please make sure the {reg_key} and its library is "
"imported and linked to the trainer binary.")
return registered_collection[reg_key]
| 3,949 | 37.72549 | 80 | py |
models | models-master/official/core/train_utils.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training utils."""
import dataclasses
import inspect
import json
import os
import pprint
from typing import Any, Callable, Dict, List, Optional, Union
from absl import logging
import gin
import numpy as np
import orbit
import tensorflow as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.framework import ops
from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2_as_graph
# pylint: enable=g-direct-tensorflow-import
from official.core import base_task
from official.core import base_trainer
from official.core import config_definitions
from official.core import exp_factory
from official.modeling import hyperparams
BEST_CHECKPOINT_NAME = 'best_ckpt'
def get_leaf_nested_dict(d: Dict[str, Any], keys: List[str]) -> Dict[str, Any]:
"""Get leaf from a dictionary with arbitrary depth with a list of keys.
Args:
d: The dictionary to extract value from.
keys: The list of keys to extract values recursively.
Returns:
The value of the leaf.
Raises:
KeyError: If the value of keys extracted is a dictionary.
"""
leaf = d
for k in keys:
if not isinstance(leaf, dict) or k not in leaf:
raise KeyError(
'Path not exist while traversing the dictionary: d with keys'
': %s.' % keys)
leaf = leaf[k]
if isinstance(leaf, dict):
raise KeyError('The value extracted with keys: %s is not a leaf of the '
'dictionary: %s.' % (keys, d))
return leaf
def cast_leaf_nested_dict(d: Dict[str, Any],
cast_fn: Callable[[Any], Any]) -> Dict[str, Any]:
"""Cast the leaves of a dictionary with arbitrary depth in place.
Args:
d: The dictionary to extract value from.
cast_fn: The casting function.
Returns:
A dictionray with the same structure as d.
"""
for key, value in d.items():
if isinstance(value, dict):
d[key] = cast_leaf_nested_dict(value, cast_fn)
else:
d[key] = cast_fn(value)
return d
def _filter_leaf_nested_dict(
d: Dict[str, Any], predicate: Callable[[Any], bool]
) -> Dict[str, Any]:
"""Filters the leaves of a dictionary with arbitrary depth in place.
Args:
d: The dictionary to extract value from.
predicate: A function that will be called on every leave item. When the
function returns True the leave will be kept. Otherwise the leave will be
dropped.
Returns:
A new dictionray with filtered result.
"""
result = {}
for key, value in d.items():
if isinstance(value, dict):
result[key] = _filter_leaf_nested_dict(value, predicate)
elif predicate(value):
result[key] = value
return result
def maybe_create_best_ckpt_exporter(params: config_definitions.ExperimentConfig,
data_dir: str) -> Any:
"""Maybe create a BestCheckpointExporter object, according to the config."""
export_subdir = params.trainer.best_checkpoint_export_subdir
metric_name = params.trainer.best_checkpoint_eval_metric
metric_comp = params.trainer.best_checkpoint_metric_comp
if data_dir and export_subdir and metric_name:
best_ckpt_dir = os.path.join(data_dir, export_subdir)
best_ckpt_exporter = BestCheckpointExporter(best_ckpt_dir, metric_name,
metric_comp)
logging.info(
'Created the best checkpoint exporter. '
'data_dir: %s, export_subdir: %s, metric_name: %s', data_dir,
export_subdir, metric_name)
else:
best_ckpt_exporter = None
return best_ckpt_exporter
class BestCheckpointExporter:
"""Keeps track of the best result, and saves its checkpoint.
Orbit will support an API for checkpoint exporter. This class will be used
together with orbit once this functionality is ready.
"""
def __init__(self, export_dir: str, metric_name: str, metric_comp: str):
"""Initialization.
Args:
export_dir: The directory that will contain exported checkpoints.
metric_name: Indicates which metric to look at, when determining which
result is better. If eval_logs being passed to maybe_export_checkpoint
is a nested dictionary, use `|` as a seperator for different layers.
metric_comp: Indicates how to compare results. Either `lower` or `higher`.
"""
self._export_dir = export_dir
self._metric_name = metric_name.split('|')
self._metric_comp = metric_comp
if self._metric_comp not in ('lower', 'higher'):
raise ValueError('best checkpoint metric comp must be one of '
'higher, lower. Got: {}'.format(self._metric_comp))
tf.io.gfile.makedirs(os.path.dirname(self.best_ckpt_logs_path))
self._best_ckpt_logs = self._maybe_load_best_eval_metric()
self._checkpoint_manager = None
def _get_checkpoint_manager(self, checkpoint):
"""Gets an existing checkpoint manager or creates a new one."""
if self._checkpoint_manager is None or (self._checkpoint_manager.checkpoint
!= checkpoint):
logging.info('Creates a new checkpoint manager.')
self._checkpoint_manager = tf.train.CheckpointManager(
checkpoint,
directory=self._export_dir,
max_to_keep=1,
checkpoint_name=BEST_CHECKPOINT_NAME)
return self._checkpoint_manager
def maybe_export_checkpoint(
self, checkpoint, eval_logs, global_step, write_logs=True) -> bool:
"""Compare eval_logs with past eval_logs and export checkpoint if better."""
logging.info('[BestCheckpointExporter] received eval_logs: %s, at step: %d',
eval_logs, global_step)
if self._best_ckpt_logs is None or self._new_metric_is_better(
self._best_ckpt_logs, eval_logs):
self._best_ckpt_logs = eval_logs
if write_logs:
self.export_best_eval_metric(self._best_ckpt_logs, global_step)
self._get_checkpoint_manager(checkpoint).save()
return True
return False
def _maybe_load_best_eval_metric(self):
if not tf.io.gfile.exists(self.best_ckpt_logs_path):
return None
with tf.io.gfile.GFile(self.best_ckpt_logs_path, 'r') as reader:
return json.loads(reader.read())
def _new_metric_is_better(self, old_logs, new_logs):
"""Check if the metric in new_logs is better than the metric in old_logs."""
old_value = float(
orbit.utils.get_value(
get_leaf_nested_dict(old_logs, self._metric_name)))
new_value = float(
orbit.utils.get_value(
get_leaf_nested_dict(new_logs, self._metric_name)))
logging.info('[BestCheckpointExporter] comparing results. old: %f, new: %f',
old_value, new_value)
if self._metric_comp == 'higher':
if new_value > old_value:
logging.info('[BestCheckpointExporter] '
'the new number is better since it is higher.')
return True
else: # self._metric_comp == 'lower':
if new_value < old_value:
logging.info('[BestCheckpointExporter] '
'the new number is better since it is lower.')
return True
return False
def export_best_eval_metric(self, eval_logs, global_step):
"""Export evaluation results of the best checkpoint into a json file."""
# eval_log_ext may contains non-scalar tensors, such as image data when
# `allow_image_summary` is True. Here we only keep scalar tensors.
eval_logs_ext = _filter_leaf_nested_dict(
eval_logs, lambda x: tf.rank(x) <= 1
)
eval_logs_ext['best_ckpt_global_step'] = global_step
eval_logs_ext = cast_leaf_nested_dict(
eval_logs_ext, lambda x: float(orbit.utils.get_value(x)))
# Saving json file is very fast.
with tf.io.gfile.GFile(self.best_ckpt_logs_path, 'w') as writer:
writer.write(json.dumps(eval_logs_ext, indent=4) + '\n')
@property
def best_ckpt_logs(self):
return self._best_ckpt_logs
@property
def best_ckpt_logs_path(self):
return os.path.join(self._export_dir, 'info.json')
@property
def best_ckpt_path(self):
"""Returns the best ckpt path or None if there is no ckpt yet."""
return tf.train.latest_checkpoint(self._export_dir)
def create_optimizer(task: base_task.Task,
params: config_definitions.ExperimentConfig
) -> tf.keras.optimizers.Optimizer:
"""A create optimizer util to be backward compatability with new args."""
if 'dp_config' in inspect.signature(task.create_optimizer).parameters:
dp_config = None
if hasattr(params.task, 'differential_privacy_config'):
dp_config = params.task.differential_privacy_config
optimizer = task.create_optimizer(
params.trainer.optimizer_config, params.runtime,
dp_config=dp_config)
else:
if hasattr(params.task, 'differential_privacy_config'
) and params.task.differential_privacy_config is not None:
raise ValueError('Differential privacy config is specified but '
'task.create_optimizer api does not accept it.')
optimizer = task.create_optimizer(
params.trainer.optimizer_config,
params.runtime)
return optimizer
@gin.configurable
def create_trainer(params: config_definitions.ExperimentConfig,
task: base_task.Task,
train: bool,
evaluate: bool,
checkpoint_exporter: Optional[BestCheckpointExporter] = None,
trainer_cls=base_trainer.Trainer) -> base_trainer.Trainer:
"""Create trainer."""
logging.info('Running default trainer.')
model = task.build_model()
optimizer = create_optimizer(task, params)
return trainer_cls(
params,
task,
model=model,
optimizer=optimizer,
train=train,
evaluate=evaluate,
checkpoint_exporter=checkpoint_exporter)
@dataclasses.dataclass
class ParseConfigOptions:
"""Use this dataclass instead of FLAGS to customize parse_configuration()."""
experiment: str
config_file: List[str]
tpu: str = ''
tf_data_service: str = ''
params_override: str = ''
def __contains__(self, name):
return name in dataclasses.asdict(self)
class ExperimentParser:
"""Constructs the Experiment config from Flags or equivalent object.
Most of the cases, users only need to call the `parse()` function:
```
builder = ExperimentParser(FLAGS)
params = builder.parse()
```
The advanced users can modify the flow by calling the parse_*() functions
separately.
"""
def __init__(self, flags_obj):
self._flags_obj = flags_obj
def parse(self):
"""Overrall process of constructing Experiment config."""
params = self.base_experiment()
params = self.parse_config_file(params)
params = self.parse_runtime(params)
params = self.parse_data_service(params)
params = self.parse_params_override(params)
return params
def base_experiment(self):
"""Get the base experiment config from --experiment field."""
if self._flags_obj.experiment is None:
raise ValueError('The flag --experiment must be specified.')
return exp_factory.get_exp_config(self._flags_obj.experiment)
def parse_config_file(self, params):
"""Override the configs of params from the config_file."""
for config_file in self._flags_obj.config_file or []:
params = hyperparams.override_params_dict(
params, config_file, is_strict=True)
return params
def parse_runtime(self, params):
"""Override the runtime configs of params from flags."""
# Override the TPU address and tf.data service address.
params.override({
'runtime': {
'tpu': self._flags_obj.tpu,
},
})
return params
def parse_data_service(self, params):
"""Override the data service configs of params from flags."""
if ('tf_data_service' in self._flags_obj and
self._flags_obj.tf_data_service and
isinstance(params.task, config_definitions.TaskConfig)):
params.override({
'task': {
'train_data': {
'tf_data_service_address': self._flags_obj.tf_data_service,
},
'validation_data': {
'tf_data_service_address': self._flags_obj.tf_data_service,
}
}
})
return params
def parse_params_override(self, params):
# Get the second level of override from `--params_override`.
# `--params_override` is typically used as a further override over the
# template. For example, one may define a particular template for training
# ResNet50 on ImageNet in a config file and pass it via `--config_file`,
# then define different learning rates and pass it via `--params_override`.
if self._flags_obj.params_override:
params = hyperparams.override_params_dict(
params, self._flags_obj.params_override, is_strict=True)
return params
def parse_configuration(flags_obj, lock_return=True, print_return=True):
"""Parses ExperimentConfig from flags."""
params = ExperimentParser(flags_obj).parse()
params.validate()
if lock_return:
params.lock()
if print_return:
pp = pprint.PrettyPrinter()
logging.info('Final experiment parameters:\n%s',
pp.pformat(params.as_dict()))
return params
def serialize_config(params: config_definitions.ExperimentConfig,
model_dir: str):
"""Serializes and saves the experiment config."""
if model_dir is None:
raise ValueError('model_dir must be specified, but got None')
params_save_path = os.path.join(model_dir, 'params.yaml')
logging.info('Saving experiment configuration to %s', params_save_path)
tf.io.gfile.makedirs(model_dir)
hyperparams.save_params_dict_to_yaml(params, params_save_path)
def save_gin_config(filename_suffix: str, model_dir: str):
"""Serializes and saves the experiment config."""
gin_save_path = os.path.join(
model_dir, 'operative_config.{}.gin'.format(filename_suffix))
logging.info('Saving gin configurations to %s', gin_save_path)
tf.io.gfile.makedirs(model_dir)
with tf.io.gfile.GFile(gin_save_path, 'w') as f:
f.write(gin.operative_config_str())
def read_global_step_from_checkpoint(ckpt_file_path):
"""Read global step from checkpoint, or get global step from its filename."""
global_step = tf.Variable(-1, dtype=tf.int64)
ckpt = tf.train.Checkpoint(global_step=global_step)
try:
ckpt.restore(ckpt_file_path).expect_partial()
global_step_maybe_restored = global_step.numpy()
except tf.errors.InvalidArgumentError:
global_step_maybe_restored = -1
if global_step_maybe_restored == -1:
raise ValueError('global_step not found in checkpoint {}. '
'If you want to run finetune eval jobs, you need to '
'make sure that your pretrain model writes '
'global_step in its checkpoints.'.format(ckpt_file_path))
global_step_restored = global_step.numpy()
logging.info('get global_step %d from checkpoint %s', global_step_restored,
ckpt_file_path)
return global_step_restored
def write_json_summary(log_dir, global_step, eval_metrics):
"""Dump evaluation metrics to json file."""
serializable_dict = {}
for name, value in eval_metrics.items():
if hasattr(value, 'numpy'):
serializable_dict[name] = str(value.numpy())
else:
serializable_dict[name] = str(value)
output_json = os.path.join(log_dir, 'metrics-{}.json'.format(global_step))
logging.info('Evaluation results at pretrain step %d: %s', global_step,
serializable_dict)
with tf.io.gfile.GFile(output_json, 'w') as writer:
writer.write(json.dumps(serializable_dict, indent=4) + '\n')
def write_summary(summary_writer, global_step, eval_metrics):
"""Write evaluation metrics to TF summary."""
numeric_dict = {}
for name, value in eval_metrics.items():
numeric_dict[name] = float(orbit.utils.get_value(value))
with summary_writer.as_default():
for name, value in numeric_dict.items():
tf.summary.scalar(name, value, step=global_step)
summary_writer.flush()
def remove_ckpts(model_dir):
"""Remove model checkpoints, so we can restart."""
ckpts = os.path.join(model_dir, 'ckpt-*')
logging.info('removing checkpoint files %s', ckpts)
for file_to_remove in tf.io.gfile.glob(ckpts):
tf.io.gfile.rmtree(file_to_remove)
file_to_remove = os.path.join(model_dir, 'checkpoint')
if tf.io.gfile.exists(file_to_remove):
tf.io.gfile.remove(file_to_remove)
def write_model_params(model: Union[tf.Module, tf.keras.Model],
output_path: str) -> None:
"""Writes the model parameters and shapes to a file.
Args:
model: A model instance.
output_path: Output file path.
"""
with tf.io.gfile.GFile(output_path, 'w') as f:
total_params = 0
for var in model.variables:
shape = tf.shape(var)
total_params += tf.math.reduce_prod(shape).numpy()
f.write(f'{var.name} {shape.numpy().tolist()}\n')
f.write(f'\nTotal params: {total_params}\n')
def try_count_params(
model: Union[tf.Module, tf.keras.Model],
trainable_only: bool = False):
"""Count the number of parameters if model is possible.
Args:
model: Try to count the number of params in this model.
trainable_only: Whether to calculate trainable params only. This flag is
not used when the model has `count_params` attribute.
Returns:
The number of parameters or None.
"""
if hasattr(model, 'count_params'):
try:
return model.count_params()
except ValueError:
logging.info('Number of trainable params unknown, because the build() '
'methods in keras layers were not called. This is probably '
'because the model was not feed any input, e.g., the max '
'train step already reached before this run.')
return None
else:
total_params = 0
variables = model.trainable_variables if trainable_only else model.variables
for var in variables:
shape = tf.shape(var)
total_params += tf.math.reduce_prod(shape).numpy()
return total_params
def try_count_flops(model: Union[tf.Module, tf.keras.Model],
inputs_kwargs: Optional[Dict[str, Any]] = None,
output_path: Optional[str] = None):
"""Counts and returns model FLOPs.
Args:
model: A model instance.
inputs_kwargs: An optional dictionary of argument pairs specifying inputs'
shape specifications to getting corresponding concrete function.
output_path: A file path to write the profiling results to.
Returns:
The model's FLOPs.
"""
if hasattr(model, 'inputs'):
try:
# Get input shape and set batch size to 1.
if model.inputs:
inputs = [
tf.TensorSpec([1] + input.shape[1:], input.dtype)
for input in model.inputs
]
concrete_func = tf.function(model).get_concrete_function(inputs)
# If model.inputs is invalid, try to use the input to get concrete
# function for model.call (subclass model).
else:
concrete_func = tf.function(model.call).get_concrete_function(
**inputs_kwargs)
frozen_func, _ = convert_variables_to_constants_v2_as_graph(concrete_func)
# Calculate FLOPs.
run_meta = tf.compat.v1.RunMetadata()
opts = tf.compat.v1.profiler.ProfileOptionBuilder.float_operation()
if output_path is not None:
opts['output'] = f'file:outfile={output_path}'
else:
opts['output'] = 'none'
flops = tf.compat.v1.profiler.profile(
graph=frozen_func.graph, run_meta=run_meta, options=opts)
return flops.total_float_ops
except Exception as e: # pylint: disable=broad-except
logging.info(
'Failed to count model FLOPs with error %s, because the build() '
'methods in keras layers were not called. This is probably because '
'the model was not feed any input, e.g., the max train step already '
'reached before this run.', e)
return None
return None
@ops.RegisterStatistics('Einsum', 'flops')
def _einsum_flops(graph, node):
"""Calculates the compute resources needed for Einsum."""
assert len(node.input) == 2
x_shape = tf.compat.v1.graph_util.tensor_shape_from_node_def_name(
graph, node.input[0])
y_shape = tf.compat.v1.graph_util.tensor_shape_from_node_def_name(
graph, node.input[1])
x_shape.assert_is_fully_defined()
y_shape.assert_is_fully_defined()
x_shape = x_shape.as_list()
y_shape = y_shape.as_list()
equation = str(node.attr['equation'])
equation = (
equation.replace('s:', '')
.replace('"', '')
.replace(' ', '')
.replace('\n', '')
)
x_str = equation.split(',')[0]
y_r_str = equation.split(',')[1]
y_str = y_r_str.split('->')[0]
r_str = y_r_str.split('->')[1]
shape_dic = {}
contracted = set()
for indice in x_str + y_str:
if indice in x_str:
indice_dim = x_shape[x_str.find(indice)]
elif indice in y_str:
indice_dim = y_shape[y_str.find(indice)]
else:
raise ValueError('indice {} not found in inputs'.format(indice))
shape_dic[indice] = indice_dim
if indice not in r_str:
contracted.add(indice)
madds = np.prod([shape_dic[indice] for indice in r_str]) * (
np.prod([shape_dic[indice] for indice in contracted]))
flops = 2 * madds
return ops.OpStats('flops', flops)
| 22,106 | 35.181669 | 103 | py |
models | models-master/official/core/actions.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides TFM orbit actions and associated helper functions/classes."""
import os
from typing import List
from absl import logging
import gin
import orbit
import tensorflow as tf
from official.core import base_trainer
from official.core import config_definitions
from official.modeling import optimization
class PruningAction:
"""Train action to updates pruning related information.
This action updates pruning steps at the end of trainig loop, and log
pruning metrics to tensorboard.
This action must be used when training a pruned model to avoid pruning error.
"""
def __init__(
self,
export_dir: str,
model: tf.keras.Model,
optimizer: tf.keras.optimizers.Optimizer,
):
"""Initializes the instance.
Args:
export_dir: `str` for the export directory of the pruning summaries.
model: `tf.keras.Model` model instance used for training. This will be
used to assign a pruning step to each prunable weight.
optimizer: `tf.keras.optimizers.Optimizer` optimizer instance used for
training. This will be used to find the current training steps.
"""
# TODO(b/221490190): Avoid local import when the bug is fixed.
import tensorflow_model_optimization as tfmot # pylint: disable=g-import-not-at-top
self._optimizer = optimizer
self.update_pruning_step = tfmot.sparsity.keras.UpdatePruningStep()
self.update_pruning_step.set_model(model)
self.update_pruning_step.on_train_begin()
self.pruning_summaries = tfmot.sparsity.keras.PruningSummaries(
log_dir=export_dir)
model.optimizer = optimizer
self.pruning_summaries.set_model(model)
def __call__(self, output: orbit.runner.Output):
"""Update pruning step and log pruning summaries.
Args:
output: The train output.
"""
self.update_pruning_step.on_epoch_end(batch=None)
self.pruning_summaries.on_epoch_begin(epoch=None)
class EMACheckpointing:
"""Eval action to save checkpoint with average weights when EMA is used.
This action swaps the weights of the model with the average weights, then it
saves the checkpoint under export_dir/ema_checkpoints. Checkpointing is
expensive for large models, so doing this action in eval is more efficient
than training.
"""
def __init__(self,
export_dir: str,
optimizer: tf.keras.optimizers.Optimizer,
checkpoint: tf.train.Checkpoint,
max_to_keep: int = 1):
"""Initializes the instance.
Args:
export_dir: `str` for the export directory of the EMA average weights.
optimizer: `tf.keras.optimizers.Optimizer` optimizer instance used for
training. This will be used to swap the model weights with the average
weigths.
checkpoint: `tf.train.Checkpoint` instance.
max_to_keep: `int` for max checkpoints to keep in ema_checkpoints subdir.
"""
if not isinstance(optimizer, optimization.ExponentialMovingAverage):
raise ValueError('Optimizer has to be instance of'
'optimization.ExponentialMovingAverage for'
'EMACheckpointing action')
export_dir = os.path.join(export_dir, 'ema_checkpoints')
tf.io.gfile.makedirs(os.path.dirname(export_dir))
self._optimizer = optimizer
self._checkpoint = checkpoint
self._checkpoint_manager = tf.train.CheckpointManager(
checkpoint,
directory=export_dir,
max_to_keep=max_to_keep,
checkpoint_name='average_weights')
def __call__(self, output: orbit.runner.Output):
"""Swaps model weights, and saves the checkpoint.
Args:
output: The train or eval output.
"""
self._optimizer.swap_weights()
self._checkpoint_manager.save(checkpoint_number=self._optimizer.iterations)
self._optimizer.swap_weights()
class RecoveryAction:
"""Train action to recover from loss blowup.
Checks the loss value by the given threshold. If applicable, recover the
model by reading the checkpoint on disk.
"""
def __init__(self, checkpoint_manager: tf.train.CheckpointManager):
self.checkpoint_manager = checkpoint_manager
def __call__(self, _):
"""Recovers the training by triggering checkpoint restoration."""
# Loads the previous good checkpoint.
checkpoint_path = self.checkpoint_manager.restore_or_initialize()
logging.warning('Recovering the model from checkpoint: %s.',
checkpoint_path)
class RecoveryCondition:
"""Recovery Condition."""
def __init__(self,
global_step: tf.Variable,
loss_upper_bound: float,
recovery_begin_steps: int = 0,
recovery_max_trials: int = 3):
self.recover_counter = 0
self.recovery_begin_steps = recovery_begin_steps
self.recovery_max_trials = recovery_max_trials
self.loss_upper_bound = loss_upper_bound
self.global_step = global_step
def __call__(self, outputs: orbit.runner.Output):
loss_value = outputs['training_loss']
if tf.math.is_nan(loss_value):
self.recover_counter += 1
if self.recover_counter > self.recovery_max_trials:
raise RuntimeError(
'The loss value is NaN after training loop and it happens %d times.'
% self.recover_counter)
return True
if (self.global_step >= self.recovery_begin_steps and
loss_value > self.loss_upper_bound):
self.recover_counter += 1
if self.recover_counter > self.recovery_max_trials:
raise RuntimeError(
f'The loss value is {loss_value}, which is larger than the bound {self.loss_upper_bound}, happens {self.recover_counter} times.'
)
return True
return False
@gin.configurable
def get_eval_actions(params: config_definitions.ExperimentConfig,
trainer: base_trainer.Trainer,
model_dir: str) -> List[orbit.Action]:
"""Gets eval actions for TFM trainer."""
eval_actions = []
# Adds ema checkpointing action to save the average weights under
# ema_checkpoints subdir.
if isinstance(trainer.optimizer, optimization.ExponentialMovingAverage):
eval_actions.append(
EMACheckpointing(
export_dir=model_dir,
optimizer=trainer.optimizer,
checkpoint=trainer.checkpoint,
max_to_keep=params.trainer.max_to_keep))
return eval_actions
@gin.configurable
def get_train_actions(
params: config_definitions.ExperimentConfig, trainer: base_trainer.Trainer,
model_dir: str,
checkpoint_manager: tf.train.CheckpointManager) -> List[orbit.Action]:
"""Gets train actions for TFM trainer."""
train_actions = []
# Adds pruning callback actions.
if hasattr(params.task, 'pruning') and params.task.pruning:
train_actions.append(
PruningAction(
export_dir=model_dir,
model=trainer.model,
optimizer=trainer.optimizer))
if params.trainer.recovery_max_trials >= 0:
recovery_condition = RecoveryCondition(
global_step=trainer.global_step,
loss_upper_bound=params.trainer.loss_upper_bound,
recovery_begin_steps=params.trainer.recovery_begin_steps,
recovery_max_trials=params.trainer.recovery_max_trials,
)
recover_action = orbit.actions.ConditionalAction(
condition=recovery_condition,
action=RecoveryAction(checkpoint_manager),
)
train_actions.append(recover_action)
if (
params.trainer.preemption_on_demand_checkpoint
and trainer.strategy.cluster_resolver
):
on_demand_checkpoint_action = orbit.actions.SaveCheckpointIfPreempted(
trainer.strategy.cluster_resolver,
checkpoint_manager,
trainer.global_step,
keep_running_after_save=True,
)
train_actions.append(on_demand_checkpoint_action)
return train_actions
| 8,466 | 34.725738 | 140 | py |
models | models-master/official/core/savedmodel_checkpoint_manager.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Custom checkpoint manager that also exports saved models."""
import os
import re
import time
from typing import Callable, List, Mapping, Optional, Union
from absl import logging
import tensorflow as tf
SAVED_MODULES_PATH_SUFFIX = 'saved_modules'
def make_saved_modules_directory_name(checkpoint_name: str) -> str:
return f'{checkpoint_name}_{SAVED_MODULES_PATH_SUFFIX}'
class SavedModelCheckpointManager(tf.train.CheckpointManager):
"""A CheckpointManager that also exports `SavedModel`s."""
def __init__(self,
checkpoint: tf.train.Checkpoint,
directory: str,
max_to_keep: int,
modules_to_export: Optional[Mapping[str, tf.Module]] = None,
keep_checkpoint_every_n_hours: Optional[int] = None,
checkpoint_name: str = 'ckpt',
step_counter: Optional[tf.Variable] = None,
checkpoint_interval: Optional[int] = None,
init_fn: Optional[Callable[[], None]] = None):
"""See base class."""
super().__init__(
checkpoint=checkpoint,
directory=directory,
max_to_keep=max_to_keep,
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours,
checkpoint_name=checkpoint_name,
step_counter=step_counter,
checkpoint_interval=checkpoint_interval,
init_fn=init_fn)
self._modules_to_export = modules_to_export
self._savedmodels = self.get_existing_savedmodels()
def save(self,
checkpoint_number: Optional[int] = None,
check_interval: bool = True,
options: Optional[tf.train.CheckpointOptions] = None):
"""See base class."""
checkpoint_path = super().save(
checkpoint_number=checkpoint_number,
check_interval=check_interval,
options=options)
if not checkpoint_path: # Nothing got written.
return
if not self._modules_to_export: # No modules to export.
logging.info('Skip saving SavedModel due to empty modules_to_export.')
return checkpoint_path
# Save the models for the checkpoint that just got written.
saved_modules_directory = make_saved_modules_directory_name(checkpoint_path)
# Atomic export of SavedModel. Write into a temporary direcotory and then
# rename as the final direcotory after finishing the writing.
# This can avoid trying to read an unfinished savedmodel.
saved_modules_directory_tmp = saved_modules_directory + '_temp'
for model_name, model in self._modules_to_export.items():
signatures = getattr(model, 'saved_model_signatures', None)
if signatures is not None:
tf.saved_model.save(
obj=model,
export_dir=os.path.join(saved_modules_directory_tmp, model_name),
signatures=signatures)
if tf.io.gfile.exists(saved_modules_directory_tmp):
tf.io.gfile.rename(saved_modules_directory_tmp, saved_modules_directory)
saved_modules_directories_to_keep = [
make_saved_modules_directory_name(ckpt) for ckpt in self.checkpoints
]
existing_saved_modules_dirs = self.get_existing_savedmodels()
self._savedmodels = []
# Keep savedmodels in the same order as checkpoints (from oldest to newest).
for saved_modules_dir_to_keep in saved_modules_directories_to_keep:
if saved_modules_dir_to_keep in existing_saved_modules_dirs:
self._savedmodels.append(saved_modules_dir_to_keep)
for existing_saved_modules_dir in existing_saved_modules_dirs:
if existing_saved_modules_dir not in self._savedmodels:
tf.io.gfile.rmtree(existing_saved_modules_dir)
return checkpoint_path
def get_existing_savedmodels(self) -> List[str]:
"""Gets a list of all existing SavedModel paths in `directory`.
Returns:
A list of all existing SavedModel paths.
"""
saved_modules_glob = make_saved_modules_directory_name(
self._checkpoint_prefix + '-*')
savedmodels = tf.io.gfile.glob(saved_modules_glob)
# Filter out temporary savedmodel.
savedmodels = [
savedmodel
for savedmodel in savedmodels
if savedmodel.endswith(SAVED_MODULES_PATH_SUFFIX)
]
return savedmodels
@property
def latest_savedmodel(self) -> Union[str, None]:
"""The path of the most recent SavedModel in `directory`.
Returns:
The latest SavedModel path. If there are no SavedModels, returns `None`.
"""
if self._savedmodels:
return self._savedmodels[-1]
return None
@property
def savedmodels(self) -> List[str]:
"""A list of managed SavedModels.
Returns:
A list of SavedModel paths, sorted from oldest to newest.
"""
return self._savedmodels
@property
def modules_to_export(self) -> Union[Mapping[str, tf.Module], None]:
return self._modules_to_export
def get_savedmodel_number_from_path(self,
savedmodel_path: str) -> Union[int, None]:
"""Gets the savedmodel_number/checkpoint_number from savedmodel filepath.
The savedmodel_number is global step when using with orbit controller.
Args:
savedmodel_path: savedmodel directory path.
Returns:
Savedmodel number or None if no matched pattern found in savedmodel path.
"""
pattern = rf'\d+_{SAVED_MODULES_PATH_SUFFIX}$'
savedmodel_number = re.search(pattern, savedmodel_path)
if savedmodel_number:
savedmodel_number = savedmodel_number.group()
return int(savedmodel_number[:-len(SAVED_MODULES_PATH_SUFFIX) - 1])
return None
def savedmodels_iterator(self,
min_interval_secs: float = 0,
timeout: Optional[float] = None,
timeout_fn: Optional[Callable[[], bool]] = None):
"""Continuously yield new SavedModel files as they appear.
The iterator only checks for new savedmodels when control flow has been
reverted to it. The logic is same to the `train.checkpoints_iterator`.
Args:
min_interval_secs: The minimum number of seconds between yielding
savedmodels.
timeout: The maximum number of seconds to wait between savedmodels. If
left as `None`, then the process will wait indefinitely.
timeout_fn: Optional function to call after a timeout. If the function
returns True, then it means that no new savedmodels will be generated
and the iterator will exit. The function is called with no arguments.
Yields:
String paths to latest SavedModel files as they arrive.
"""
savedmodel_path = None
while True:
new_savedmodel_path = self.wait_for_new_savedmodel(
savedmodel_path, timeout=timeout)
if new_savedmodel_path is None:
if not timeout_fn:
# timed out
logging.info('Timed-out waiting for a savedmodel.')
return
if timeout_fn():
# The timeout_fn indicated that we are truly done.
return
else:
# The timeout_fn indicated that more savedmodels may come.
continue
start = time.time()
savedmodel_path = new_savedmodel_path
yield savedmodel_path
time_to_next_eval = start + min_interval_secs - time.time()
if time_to_next_eval > 0:
time.sleep(time_to_next_eval)
def wait_for_new_savedmodel(
self,
last_savedmodel: Optional[str] = None,
seconds_to_sleep: float = 1.0,
timeout: Optional[float] = None) -> Union[str, None]:
"""Waits until a new savedmodel file is found.
Args:
last_savedmodel: The last savedmodel path used or `None` if we're
expecting a savedmodel for the first time.
seconds_to_sleep: The number of seconds to sleep for before looking for a
new savedmodel.
timeout: The maximum number of seconds to wait. If left as `None`, then
the process will wait indefinitely.
Returns:
A new savedmodel path, or None if the timeout was reached.
"""
logging.info('Waiting for new savedmodel at %s', self._directory)
stop_time = time.time() + timeout if timeout is not None else None
last_savedmodel_number = 0
if last_savedmodel:
last_savedmodel_number = self.get_savedmodel_number_from_path(
last_savedmodel)
while True:
if stop_time is not None and time.time() + seconds_to_sleep > stop_time:
return None
existing_savedmodels = {}
for savedmodel_path in self.get_existing_savedmodels():
savedmodel_number = self.get_savedmodel_number_from_path(
savedmodel_path)
if savedmodel_number is not None:
existing_savedmodels[savedmodel_number] = savedmodel_path
# Find the first savedmodel with larger step number as next savedmodel.
savedmodel_path = None
existing_savedmodels = dict(sorted(existing_savedmodels.items()))
for savedmodel_number in existing_savedmodels:
if savedmodel_number > last_savedmodel_number:
savedmodel_path = existing_savedmodels[savedmodel_number]
break
if savedmodel_path:
logging.info('Found new savedmodel at %s', savedmodel_path)
return savedmodel_path
else:
time.sleep(seconds_to_sleep)
| 9,848 | 37.027027 | 80 | py |
models | models-master/official/core/export_base_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for official.core.export_base."""
import os
from typing import Any, Dict, Mapping, Text
import tensorflow as tf
from official.core import export_base
class TestModule(export_base.ExportModule):
@tf.function
def serve(self, inputs: tf.Tensor) -> Mapping[Text, tf.Tensor]:
x = inputs if self.preprocessor is None else self.preprocessor(
inputs=inputs)
x = self.inference_step(x)
x = self.postprocessor(x) if self.postprocessor else x
return {'outputs': x}
def get_inference_signatures(
self, function_keys: Dict[Text, Text]) -> Mapping[Text, Any]:
input_signature = tf.TensorSpec(shape=[None, None], dtype=tf.float32)
return {'foo': self.serve.get_concrete_function(input_signature)}
class ExportBaseTest(tf.test.TestCase):
def test_export_module(self):
tmp_dir = self.get_temp_dir()
model = tf.keras.layers.Dense(2)
inputs = tf.ones([2, 4], tf.float32)
expected_output = model(inputs, training=False)
module = TestModule(params=None, model=model)
ckpt_path = tf.train.Checkpoint(model=model).save(
os.path.join(tmp_dir, 'ckpt'))
export_dir = export_base.export(
module, ['foo'],
export_savedmodel_dir=tmp_dir,
checkpoint_path=ckpt_path,
timestamped=True)
self.assertTrue(os.path.exists(os.path.join(export_dir, 'saved_model.pb')))
self.assertTrue(
os.path.exists(
os.path.join(export_dir, 'variables', 'variables.index')))
self.assertTrue(
os.path.exists(
os.path.join(export_dir, 'variables',
'variables.data-00000-of-00001')))
imported = tf.saved_model.load(export_dir)
output = imported.signatures['foo'](inputs)
self.assertAllClose(output['outputs'].numpy(), expected_output.numpy())
def test_custom_inference_step(self):
tmp_dir = self.get_temp_dir()
model = tf.keras.layers.Dense(2)
inputs = tf.ones([2, 4], tf.float32)
def _inference_step(inputs, model):
return tf.nn.softmax(model(inputs, training=False))
module = TestModule(
params=None, model=model, inference_step=_inference_step)
expected_output = _inference_step(inputs, model)
ckpt_path = tf.train.Checkpoint(model=model).save(
os.path.join(tmp_dir, 'ckpt'))
export_dir = export_base.export(
module, ['foo'],
export_savedmodel_dir=tmp_dir,
checkpoint_path=ckpt_path,
timestamped=False)
imported = tf.saved_model.load(export_dir)
output = imported.signatures['foo'](inputs)
self.assertAllClose(output['outputs'].numpy(), expected_output.numpy())
def test_processors(self):
model = tf.Module()
inputs = tf.zeros((), tf.float32)
def _inference_step(inputs, model):
del model
return inputs + 1.0
def _preprocessor(inputs):
print(inputs)
return inputs + 0.1
module = TestModule(
params=None,
model=model,
inference_step=_inference_step,
preprocessor=_preprocessor)
output = module.serve(inputs)
self.assertAllClose(output['outputs'].numpy(), 1.1)
class _PostProcessor(tf.Module):
def __call__(self, inputs):
return inputs + 0.01
module = TestModule(
params=None,
model=model,
inference_step=_inference_step,
preprocessor=_preprocessor,
postprocessor=_PostProcessor())
output = module.serve(inputs)
self.assertAllClose(output['outputs'].numpy(), 1.11)
def test_get_timestamped_export_dir(self):
export_dir = self.get_temp_dir()
timed_dir = export_base.get_timestamped_export_dir(
export_dir_base=export_dir)
self.assertFalse(tf.io.gfile.exists(timed_dir))
self.assertIn(export_dir, str(timed_dir))
if __name__ == '__main__':
tf.test.main()
| 4,426 | 32.037313 | 79 | py |
models | models-master/official/core/task_factory.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A global factory to register and access all registered tasks."""
from official.core import registry
_REGISTERED_TASK_CLS = {}
# TODO(b/158741360): Add type annotations once pytype checks across modules.
def register_task_cls(task_config_cls):
"""Decorates a factory of Tasks for lookup by a subclass of TaskConfig.
This decorator supports registration of tasks as follows:
```
@dataclasses.dataclass
class MyTaskConfig(TaskConfig):
# Add fields here.
pass
@register_task_cls(MyTaskConfig)
class MyTask(Task):
# Inherits def __init__(self, task_config).
pass
my_task_config = MyTaskConfig()
my_task = get_task(my_task_config) # Returns MyTask(my_task_config).
```
Besisdes a class itself, other callables that create a Task from a TaskConfig
can be decorated by the result of this function, as long as there is at most
one registration for each config class.
Args:
task_config_cls: a subclass of TaskConfig (*not* an instance of TaskConfig).
Each task_config_cls can only be used for a single registration.
Returns:
A callable for use as class decorator that registers the decorated class
for creation from an instance of task_config_cls.
"""
return registry.register(_REGISTERED_TASK_CLS, task_config_cls)
def get_task(task_config, **kwargs):
"""Creates a Task (of suitable subclass type) from task_config."""
# TODO(hongkuny): deprecate the task factory to use config.BUILDER.
if task_config.BUILDER is not None:
return task_config.BUILDER(task_config, **kwargs)
return get_task_cls(task_config.__class__)(task_config, **kwargs)
# The user-visible get_task() is defined after classes have been registered.
# TODO(b/158741360): Add type annotations once pytype checks across modules.
def get_task_cls(task_config_cls):
task_cls = registry.lookup(_REGISTERED_TASK_CLS, task_config_cls)
return task_cls
| 2,513 | 34.408451 | 80 | py |
models | models-master/official/core/tf_example_feature_key.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data classes for tf.Example proto feature keys.
Feature keys are grouped by feature types. Key names follow conventions in
go/tf-example.
"""
import dataclasses
import functools
from typing import Optional
# Disable init function to use the one defined in base class.
dataclass = functools.partial(dataclasses.dataclass(init=False))
@dataclass
class TfExampleFeatureKeyBase:
"""Base dataclass for defining tf.Example proto feature keys.
This class defines the logic of adding prefix to feature keys. Subclasses
will define feature keys for a specific feature type in data fields.
NOTE: Please follow subclass examples in this module to define feature keys
for a new feature type.
"""
def __init__(self, prefix: Optional[str] = None):
"""Instantiates the feature key class.
Adds a string prefix to all fields of a feature key instance if `prefix` is
not None nor empty.
Example usage:
>>> test_key = EncodedImageFeatureKey()
>>> test_key.encoded
image/encoded
>>> test_key = EncodedImageFeatureKey('prefix')
>>> test_key.encoded
prefix/image/encoded
Args:
prefix: A prefix string that will be added before the feature key string
with a trailing slash '/'.
"""
if prefix:
for field in dataclasses.fields(self): # pytype: disable=wrong-arg-types # re-none
key_name = field.name
key_value = getattr(self, key_name)
setattr(self, key_name, f'{prefix}/{key_value}')
| 2,095 | 32.269841 | 90 | py |
models | models-master/official/core/tf_example_builder.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Builder class for preparing tf.train.Example."""
# https://www.python.org/dev/peps/pep-0563/#enabling-the-future-behavior-in-python-3-7
from __future__ import annotations
from typing import Mapping, Sequence, Union
import numpy as np
import tensorflow as tf
BytesValueType = Union[bytes, Sequence[bytes], str, Sequence[str]]
_to_array = lambda v: [v] if not isinstance(v, (list, np.ndarray)) else v
_to_bytes = lambda v: v.encode() if isinstance(v, str) else v
_to_bytes_array = lambda v: list(map(_to_bytes, _to_array(v)))
class TfExampleBuilder(object):
"""Builder class for preparing tf.train.Example.
Read API doc at https://www.tensorflow.org/api_docs/python/tf/train/Example.
Example usage:
>>> example_builder = TfExampleBuilder()
>>> example = (
example_builder.add_bytes_feature('feature_a', 'foobarbaz')
.add_ints_feature('feature_b', [1, 2, 3])
.example)
"""
def __init__(self) -> None:
self._example = tf.train.Example()
@property
def example(self) -> tf.train.Example:
"""Returns a copy of the generated tf.train.Example proto."""
return self._example
@property
def serialized_example(self) -> str:
"""Returns a serialized string of the generated tf.train.Example proto."""
return self._example.SerializeToString()
def set(self, example: tf.train.Example) -> TfExampleBuilder:
"""Sets the example."""
self._example = example
return self
def reset(self) -> TfExampleBuilder:
"""Resets the example to an empty proto."""
self._example = tf.train.Example()
return self
###### Basic APIs for primitive data types ######
def add_feature_dict(
self, feature_dict: Mapping[str, tf.train.Feature]) -> TfExampleBuilder:
"""Adds the predefined `feature_dict` to the example.
Note: Please prefer to using feature-type-specific methods.
Args:
feature_dict: A dictionary from tf.Example feature key to
tf.train.Feature.
Returns:
The builder object for subsequent method calls.
"""
for k, v in feature_dict.items():
self._example.features.feature[k].CopyFrom(v)
return self
def add_feature(self, key: str,
feature: tf.train.Feature) -> TfExampleBuilder:
"""Adds predefined `feature` with `key` to the example.
Args:
key: String key of the feature.
feature: The feature to be added to the example.
Returns:
The builder object for subsequent method calls.
"""
self._example.features.feature[key].CopyFrom(feature)
return self
def add_bytes_feature(self, key: str,
value: BytesValueType) -> TfExampleBuilder:
"""Adds byte(s) or string(s) with `key` to the example.
Args:
key: String key of the feature.
value: The byte(s) or string(s) to be added to the example.
Returns:
The builder object for subsequent method calls.
"""
return self.add_feature(
key,
tf.train.Feature(
bytes_list=tf.train.BytesList(value=_to_bytes_array(value))))
def add_ints_feature(self, key: str,
value: Union[int, Sequence[int]]) -> TfExampleBuilder:
"""Adds integer(s) with `key` to the example.
Args:
key: String key of the feature.
value: The integer(s) to be added to the example.
Returns:
The builder object for subsequent method calls.
"""
return self.add_feature(
key,
tf.train.Feature(int64_list=tf.train.Int64List(value=_to_array(value))))
def add_floats_feature(
self, key: str, value: Union[float, Sequence[float]]) -> TfExampleBuilder:
"""Adds float(s) with `key` to the example.
Args:
key: String key of the feature.
value: The float(s) to be added to the example.
Returns:
The builder object for subsequent method calls.
"""
return self.add_feature(
key,
tf.train.Feature(float_list=tf.train.FloatList(value=_to_array(value))))
| 4,623 | 30.889655 | 86 | py |
models | models-master/official/core/train_lib.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TFM common training driver library."""
# pytype: disable=attribute-error
import os
import tempfile
from typing import Any, List, Mapping, Optional, Tuple
# Import libraries
from absl import logging
import orbit
import tensorflow as tf
from official.core import actions
from official.core import base_task
from official.core import base_trainer
from official.core import config_definitions
from official.core import train_utils
maybe_create_best_ckpt_exporter = train_utils.maybe_create_best_ckpt_exporter
class OrbitExperimentRunner:
"""Runs experiment with Orbit training loop.
The default experiment runner for model garden experiments. User can
customize the experiment pipeline by subclassing this class and replacing
components or functions.
For example, an experiment runner with customized checkpoint manager:
```python
class MyExpRunnerWithExporter(OrbitExperimentRunner):
def _maybe_build_checkpoint_manager(sefl):
# Replaces the default CheckpointManger with a customized one.
return MyCheckpointManager(*args)
# In user code, instead of the orginal
# `OrbitExperimentRunner(..).run(mode)`, now user can do:
MyExpRunnerWithExporter(**needed_kwargs).run(mode)
```
Similar override can be done to other components.
"""
def __init__(
self,
distribution_strategy: tf.distribute.Strategy,
task: base_task.Task,
mode: str,
params: config_definitions.ExperimentConfig,
model_dir: str,
run_post_eval: bool = False,
save_summary: bool = True,
train_actions: Optional[List[orbit.Action]] = None,
eval_actions: Optional[List[orbit.Action]] = None,
trainer: Optional[base_trainer.Trainer] = None,
controller_cls=orbit.Controller,
summary_manager: Optional[orbit.utils.SummaryManager] = None,
eval_summary_manager: Optional[orbit.utils.SummaryManager] = None,
enable_async_checkpointing: bool = False,
):
"""Constructor.
Args:
distribution_strategy: A distribution strategy.
task: A Task instance.
mode: A 'str', specifying the mode. Can be 'train', 'eval',
'train_and_eval' or 'continuous_eval'.
params: ExperimentConfig instance.
model_dir: A 'str', a path to store model checkpoints and summaries.
run_post_eval: Whether to run post eval once after training, metrics logs
are returned.
save_summary: Whether to save train and validation summary.
train_actions: Optional list of Orbit train actions.
eval_actions: Optional list of Orbit eval actions.
trainer: the base_trainer.Trainer instance. It should be created within
the strategy.scope().
controller_cls: The controller class to manage the train and eval process.
Must be a orbit.Controller subclass.
summary_manager: Instance of the summary manager to override default
summary manager.
eval_summary_manager: Instance of the eval summary manager to override
default eval summary manager.
enable_async_checkpointing: Optional boolean indicating whether to enable
async checkpoint saving.
"""
self.strategy = distribution_strategy or tf.distribute.get_strategy()
self._params = params
self._model_dir = model_dir
self._mode = mode
self._run_post_eval = run_post_eval
self._trainer = trainer or self._build_trainer(
task,
train='train' in mode,
evaluate=('eval' in mode) or run_post_eval)
assert self.trainer is not None
self._checkpoint_manager = self._maybe_build_checkpoint_manager()
self._summary_manager = summary_manager
self._eval_summary_manager = eval_summary_manager
self._controller = self._build_controller(
trainer=self.trainer if 'train' in mode else None,
evaluator=self.trainer,
save_summary=save_summary,
train_actions=train_actions,
eval_actions=eval_actions,
controller_cls=controller_cls,
enable_async_checkpointing=enable_async_checkpointing)
@property
def params(self) -> config_definitions.ExperimentConfig:
"""The whole experiment parameters object."""
return self._params
@property
def model_dir(self) -> str:
"""Path to the model folder, which stores checkpoints, params, log, etc."""
return self._model_dir
@property
def trainer(self) -> base_trainer.Trainer:
"""The underlying Orbit Trainer object."""
return self._trainer
@property
def checkpoint_manager(self) -> Optional[tf.train.CheckpointManager]:
"""The CheckpointManager that stores the checkpoints in a train job."""
return self._checkpoint_manager
@property
def controller(self) -> orbit.Controller:
"""The Orbit controller object."""
return self._controller
def _build_trainer(self, task: base_task.Task, train: bool,
evaluate: bool) -> base_trainer.Trainer:
"""Create trainer."""
with self.strategy.scope():
trainer = train_utils.create_trainer(
self.params,
task,
train=train,
evaluate=evaluate,
checkpoint_exporter=self._build_best_checkpoint_exporter())
return trainer
def _build_best_checkpoint_exporter(self):
return maybe_create_best_ckpt_exporter(self.params, self.model_dir)
def _maybe_build_checkpoint_manager(
self) -> Optional[tf.train.CheckpointManager]:
"""Maybe create a CheckpointManager."""
assert self.trainer is not None
if self.trainer.checkpoint:
if self.model_dir is None:
raise ValueError('model_dir must be specified, but got None')
if (not self.strategy) or self.strategy.extended.should_checkpoint:
ckpt_path = self.model_dir
max_to_keep = self.params.trainer.max_to_keep
else:
# In multi worker training we need every worker to save checkpoint,
# because variables can trigger synchronization on read and
# synchronization needs all workers to participate. To avoid workers
# overriding each other we save to a temporary directory on non-chief
# workers.
ckpt_path = tempfile.mkdtemp()
max_to_keep = 1
checkpoint_manager = tf.train.CheckpointManager(
self.trainer.checkpoint,
directory=ckpt_path,
max_to_keep=max_to_keep,
step_counter=self.trainer.global_step,
checkpoint_interval=self.params.trainer.checkpoint_interval,
init_fn=self.trainer.initialize)
else:
checkpoint_manager = None
return checkpoint_manager
def _build_controller(
self,
trainer,
evaluator,
save_summary: bool = True,
train_actions: Optional[List[orbit.Action]] = None,
eval_actions: Optional[List[orbit.Action]] = None,
controller_cls=orbit.Controller,
enable_async_checkpointing: bool = False,
) -> orbit.Controller:
"""Builds a Orbit controler."""
train_actions = [] if not train_actions else train_actions
if trainer:
checkpoint_manager = self.checkpoint_manager
assert checkpoint_manager, 'Checkpoint manager required but undefined.'
train_actions += actions.get_train_actions(
self.params,
trainer,
self.model_dir,
checkpoint_manager=checkpoint_manager,
)
eval_actions = [] if not eval_actions else eval_actions
if evaluator:
eval_actions += actions.get_eval_actions(self.params, evaluator,
self.model_dir)
if save_summary:
eval_summary_dir = os.path.join(
self.model_dir, self.params.trainer.validation_summary_subdir
)
else:
eval_summary_dir = None
controller = controller_cls(
strategy=self.strategy,
trainer=trainer,
evaluator=evaluator,
global_step=self.trainer.global_step,
steps_per_loop=self.params.trainer.steps_per_loop,
checkpoint_manager=self.checkpoint_manager,
enable_async_checkpointing=enable_async_checkpointing,
summary_dir=os.path.join(self.model_dir, 'train')
if (save_summary)
else None,
eval_summary_dir=eval_summary_dir,
summary_interval=self.params.trainer.summary_interval
if (save_summary)
else None,
train_actions=train_actions,
eval_actions=eval_actions,
summary_manager=self._summary_manager
if hasattr(self, '_summary_manager')
else None,
eval_summary_manager=self._eval_summary_manager
if hasattr(self, '_eval_summary_manager')
else None,
)
return controller
def run(self) -> Tuple[tf.keras.Model, Mapping[str, Any]]:
"""Run experiments by mode.
Returns:
A 2-tuple of (model, eval_logs).
model: `tf.keras.Model` instance.
eval_logs: returns eval metrics logs when run_post_eval is set to True,
otherwise, returns {}.
"""
mode = self._mode
params = self.params
logging.info('Starts to execute mode: %s', mode)
with self.strategy.scope():
if mode == 'train' or mode == 'train_and_post_eval':
self.controller.train(steps=params.trainer.train_steps)
elif mode == 'train_and_eval':
self.controller.train_and_evaluate(
train_steps=params.trainer.train_steps,
eval_steps=params.trainer.validation_steps,
eval_interval=params.trainer.validation_interval)
elif mode == 'eval':
self.controller.evaluate(steps=params.trainer.validation_steps)
elif mode == 'continuous_eval':
def timeout_fn():
if self.trainer.global_step.numpy() >= params.trainer.train_steps:
return True
return False
self.controller.evaluate_continuously(
steps=params.trainer.validation_steps,
timeout=params.trainer.continuous_eval_timeout,
timeout_fn=timeout_fn)
else:
raise NotImplementedError('The mode is not implemented: %s' % mode)
num_params = train_utils.try_count_params(self.trainer.model)
if num_params is not None:
logging.info('Number of trainable params in model: %f Millions.',
num_params / 10.**6)
flops = train_utils.try_count_flops(self.trainer.model)
if flops is not None:
logging.info('FLOPs (multi-adds) in model: %f Billions.',
flops / 10.**9 / 2)
if self._run_post_eval or mode == 'train_and_post_eval':
with self.strategy.scope():
return self.trainer.model, self.controller.evaluate(
steps=params.trainer.validation_steps)
else:
return self.trainer.model, {}
def run_experiment(
distribution_strategy: tf.distribute.Strategy,
task: base_task.Task,
mode: str,
params: config_definitions.ExperimentConfig,
model_dir: str,
run_post_eval: bool = False,
save_summary: bool = True,
train_actions: Optional[List[orbit.Action]] = None,
eval_actions: Optional[List[orbit.Action]] = None,
trainer: Optional[base_trainer.Trainer] = None,
controller_cls=orbit.Controller,
summary_manager: Optional[orbit.utils.SummaryManager] = None,
eval_summary_manager: Optional[orbit.utils.SummaryManager] = None,
enable_async_checkpointing: bool = False,
) -> Tuple[tf.keras.Model, Mapping[str, Any]]:
"""Runs train/eval configured by the experiment params.
Args:
distribution_strategy: A distribution distribution_strategy.
task: A Task instance.
mode: A 'str', specifying the mode. Can be 'train', 'eval', 'train_and_eval'
or 'continuous_eval'.
params: ExperimentConfig instance.
model_dir: A 'str', a path to store model checkpoints and summaries.
run_post_eval: Whether to run post eval once after training, metrics logs
are returned.
save_summary: Whether to save train and validation summary.
train_actions: Optional list of Orbit train actions.
eval_actions: Optional list of Orbit eval actions.
trainer: the base_trainer.Trainer instance. It should be created within the
strategy.scope().
controller_cls: The controller class to manage the train and eval process.
Must be a orbit.Controller subclass.
summary_manager: Instance of the summary manager to override default summary
manager.
eval_summary_manager: Instance of the eval summary manager to override
default eval summary manager.
enable_async_checkpointing: Optional boolean indicating whether to enable
async checkpoint saving.
Returns:
A 2-tuple of (model, eval_logs).
model: `tf.keras.Model` instance.
eval_logs: returns eval metrics logs when run_post_eval is set to True,
otherwise, returns {}.
"""
runner = OrbitExperimentRunner(
distribution_strategy=distribution_strategy,
task=task,
mode=mode,
params=params,
model_dir=model_dir,
run_post_eval=run_post_eval,
save_summary=save_summary,
train_actions=train_actions,
eval_actions=eval_actions,
trainer=trainer,
controller_cls=controller_cls,
summary_manager=summary_manager,
eval_summary_manager=eval_summary_manager,
enable_async_checkpointing=enable_async_checkpointing,
)
return runner.run()
| 13,911 | 36.297587 | 80 | py |
models | models-master/official/core/actions_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for TFM actions."""
import os
from absl.testing import parameterized
import numpy as np
import orbit
import tensorflow as tf
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from official.core import actions
from official.modeling import optimization
class TestModel(tf.keras.Model):
def __init__(self):
super().__init__()
self.value = tf.Variable(0.0)
self.dense = tf.keras.layers.Dense(2)
_ = self.dense(tf.zeros((2, 2), tf.float32))
def call(self, x, training=None):
return self.value + x
class ActionsTest(tf.test.TestCase, parameterized.TestCase):
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.cloud_tpu_strategy,
strategy_combinations.one_device_strategy,
],))
def test_ema_checkpointing(self, distribution):
with distribution.scope():
directory = self.create_tempdir()
model = TestModel()
optimizer = tf.keras.optimizers.SGD()
optimizer = optimization.ExponentialMovingAverage(
optimizer, trainable_weights_only=False)
# Creats average weights for the model variables. Average weights are
# initialized to zero.
optimizer.shadow_copy(model)
checkpoint = tf.train.Checkpoint(model=model)
# Changes model.value to 3, average value is still 0.
model.value.assign(3)
# Checks model.value is 3
self.assertEqual(model(0.), 3)
ema_action = actions.EMACheckpointing(directory, optimizer, checkpoint)
ema_action({})
self.assertNotEmpty(
tf.io.gfile.glob(os.path.join(directory, 'ema_checkpoints')))
checkpoint.read(
tf.train.latest_checkpoint(
os.path.join(directory, 'ema_checkpoints')))
# Checks model.value is 0 after swapping.
self.assertEqual(model(0.), 0)
# Raises an error for a normal optimizer.
with self.assertRaisesRegex(ValueError,
'Optimizer has to be instance of.*'):
_ = actions.EMACheckpointing(directory, tf.keras.optimizers.SGD(),
checkpoint)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.default_strategy,
strategy_combinations.cloud_tpu_strategy,
strategy_combinations.one_device_strategy_gpu,
],))
def test_recovery_condition(self, distribution):
with distribution.scope():
global_step = orbit.utils.create_global_step()
recover_condition = actions.RecoveryCondition(
global_step, loss_upper_bound=0.5, recovery_max_trials=2)
outputs = {'training_loss': 0.6}
self.assertTrue(recover_condition(outputs))
self.assertTrue(recover_condition(outputs))
with self.assertRaises(RuntimeError):
recover_condition(outputs)
global_step = orbit.utils.create_global_step()
recover_condition = actions.RecoveryCondition(
global_step, loss_upper_bound=0.5, recovery_max_trials=2)
outputs = {'training_loss': tf.constant([np.nan], tf.float32)}
self.assertTrue(recover_condition(outputs))
self.assertTrue(recover_condition(outputs))
with self.assertRaises(RuntimeError):
recover_condition(outputs)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.one_device_strategy_gpu,
strategy_combinations.one_device_strategy,
],))
def test_pruning(self, distribution):
with distribution.scope():
directory = self.get_temp_dir()
model = TestModel()
optimizer = tf.keras.optimizers.SGD()
pruning = actions.PruningAction(directory, model, optimizer)
pruning({})
if __name__ == '__main__':
tf.test.main()
| 4,515 | 33.212121 | 77 | py |
models | models-master/official/core/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Core is shared by both `nlp` and `vision`."""
from official.core import actions
from official.core import base_task
from official.core import base_trainer
from official.core import config_definitions
from official.core import exp_factory
from official.core import export_base
from official.core import file_writers
from official.core import input_reader
from official.core import registry
from official.core import savedmodel_checkpoint_manager
from official.core import task_factory
from official.core import tf_example_builder
from official.core import tf_example_feature_key
from official.core import train_lib
from official.core import train_utils
| 1,265 | 38.5625 | 74 | py |
models | models-master/official/core/file_writers_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for file_writers."""
import os
from absl.testing import parameterized
import tensorflow as tf
from official.core import file_writers
from official.core import tf_example_builder
class FileWritersTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super().setUp()
example_builder = tf_example_builder.TfExampleBuilder()
example_builder.add_bytes_feature('foo', 'Hello World!')
self._example = example_builder.example
@parameterized.parameters('tfrecord', 'TFRecord', 'tfrecords',
'tfrecord_compressed', 'TFRecord_Compressed',
'tfrecords_gzip')
def test_write_small_dataset_success(self, file_type):
temp_dir = self.create_tempdir()
temp_dataset_file = os.path.join(temp_dir.full_path, 'train')
file_writers.write_small_dataset([self._example], temp_dataset_file,
file_type)
self.assertTrue(os.path.exists(temp_dataset_file))
def test_write_small_dataset_unrecognized_format(self):
file_type = 'bar'
temp_dir = self.create_tempdir()
temp_dataset_file = os.path.join(temp_dir.full_path, 'train')
with self.assertRaises(ValueError):
file_writers.write_small_dataset([self._example], temp_dataset_file,
file_type)
if __name__ == '__main__':
tf.test.main()
| 1,987 | 35.814815 | 74 | py |
models | models-master/official/core/exp_factory.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Experiment factory methods."""
from official.core import config_definitions as cfg
from official.core import registry
_REGISTERED_CONFIGS = {}
def register_config_factory(name):
"""Register ExperimentConfig factory method."""
return registry.register(_REGISTERED_CONFIGS, name)
def get_exp_config(exp_name: str) -> cfg.ExperimentConfig:
"""Looks up the `ExperimentConfig` according to the `exp_name`."""
exp_creater = registry.lookup(_REGISTERED_CONFIGS, exp_name)
return exp_creater()
| 1,115 | 32.818182 | 74 | py |
models | models-master/official/core/tf_example_feature_key_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tf_example_feature_key."""
import dataclasses
import inspect
from absl.testing import absltest
from absl.testing import parameterized
from official.core import tf_example_feature_key
@tf_example_feature_key.dataclass
class TestFeatureKey(tf_example_feature_key.TfExampleFeatureKeyBase):
test: str = 'foo/bar'
class TfExampleFeatureKeyTest(parameterized.TestCase):
def test_add_prefix_success(self):
test_key = TestFeatureKey('prefix')
self.assertEqual(test_key.test, 'prefix/foo/bar')
@parameterized.parameters(None, '')
def test_add_prefix_skip_success(self, prefix):
test_key = TestFeatureKey(prefix)
self.assertEqual(test_key.test, 'foo/bar')
def test_all_feature_key_classes_are_valid(self):
for _, obj in inspect.getmembers(tf_example_feature_key):
if inspect.isclass(obj):
self.assertTrue(dataclasses.is_dataclass(obj))
self.assertTrue(
issubclass(obj, tf_example_feature_key.TfExampleFeatureKeyBase))
if __name__ == '__main__':
absltest.main()
| 1,649 | 32 | 76 | py |
models | models-master/official/core/export_base.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for model export."""
import abc
import functools
import time
from typing import Any, Callable, Dict, Mapping, List, Optional, Text, Union
from absl import logging
import tensorflow as tf
MAX_DIRECTORY_CREATION_ATTEMPTS = 10
class ExportModule(tf.Module, metaclass=abc.ABCMeta):
"""Base Export Module."""
def __init__(self,
params,
model: Union[tf.Module, tf.keras.Model],
inference_step: Optional[Callable[..., Any]] = None,
*,
preprocessor: Optional[Callable[..., Any]] = None,
postprocessor: Optional[Callable[..., Any]] = None):
"""Instantiates an ExportModel.
Examples:
`inference_step` must be a function that has `model` as an kwarg or the
second positional argument.
```
def _inference_step(inputs, model=None):
return model(inputs, training=False)
module = ExportModule(params, model, inference_step=_inference_step)
```
`preprocessor` and `postprocessor` could be either functions or `tf.Module`.
The usages of preprocessor and postprocessor are managed by the
implementation of `serve()` method.
Args:
params: A dataclass for parameters to the module.
model: A model instance which contains weights and forward computation.
inference_step: An optional callable to forward-pass the model. If not
specified, it creates a parital function with `model` as an required
kwarg.
preprocessor: An optional callable to preprocess the inputs.
postprocessor: An optional callable to postprocess the model outputs.
"""
super().__init__(name=None)
self.model = model
self.params = params
if inference_step is not None:
self.inference_step = functools.partial(inference_step, model=self.model)
else:
if issubclass(type(model), tf.keras.Model):
# Default to self.model.call instead of self.model.__call__ to avoid
# keras tracing logic designed for training.
# Since most of Model Garden's call doesn't not have training kwargs
# or the default is False, we don't pass anything here.
# Please pass custom inference step if your model has training=True as
# default.
self.inference_step = self.model.call
else:
self.inference_step = functools.partial(
self.model.__call__, training=False)
self.preprocessor = preprocessor
self.postprocessor = postprocessor
@abc.abstractmethod
def serve(self) -> Mapping[Text, tf.Tensor]:
"""The bare inference function which should run on all devices.
Expecting tensors are passed in through keyword arguments. Returns a
dictionary of tensors, when the keys will be used inside the SignatureDef.
"""
@abc.abstractmethod
def get_inference_signatures(
self, function_keys: Dict[Text, Text]) -> Mapping[Text, Any]:
"""Get defined function signatures."""
def export(export_module: ExportModule,
function_keys: Union[List[Text], Dict[Text, Text]],
export_savedmodel_dir: Text,
checkpoint_path: Optional[Text] = None,
timestamped: bool = True,
save_options: Optional[tf.saved_model.SaveOptions] = None,
checkpoint: Optional[tf.train.Checkpoint] = None) -> Text:
"""Exports to SavedModel format.
Args:
export_module: a ExportModule with the keras Model and serving tf.functions.
function_keys: a list of string keys to retrieve pre-defined serving
signatures. The signaute keys will be set with defaults. If a dictionary
is provided, the values will be used as signature keys.
export_savedmodel_dir: Output saved model directory.
checkpoint_path: Object-based checkpoint path or directory.
timestamped: Whether to export the savedmodel to a timestamped directory.
save_options: `SaveOptions` for `tf.saved_model.save`.
checkpoint: An optional tf.train.Checkpoint. If provided, the export module
will use it to read the weights.
Returns:
The savedmodel directory path.
"""
ckpt_dir_or_file = checkpoint_path
if ckpt_dir_or_file is not None and tf.io.gfile.isdir(ckpt_dir_or_file):
ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file)
if ckpt_dir_or_file:
if checkpoint is None:
checkpoint = tf.train.Checkpoint(model=export_module.model)
checkpoint.read(
ckpt_dir_or_file).assert_existing_objects_matched().expect_partial()
if isinstance(function_keys, list):
if len(function_keys) == 1:
function_keys = {
function_keys[0]: tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY
}
else:
raise ValueError(
'If the function_keys is a list, it must contain a single element. %s'
% function_keys)
signatures = export_module.get_inference_signatures(function_keys)
if timestamped:
export_dir = get_timestamped_export_dir(export_savedmodel_dir).decode(
'utf-8')
else:
export_dir = export_savedmodel_dir
tf.saved_model.save(
export_module, export_dir, signatures=signatures, options=save_options)
return export_dir
def get_timestamped_export_dir(export_dir_base):
"""Builds a path to a new subdirectory within the base directory.
Args:
export_dir_base: A string containing a directory to write the exported graph
and checkpoints.
Returns:
The full path of the new subdirectory (which is not actually created yet).
Raises:
RuntimeError: if repeated attempts fail to obtain a unique timestamped
directory name.
"""
attempts = 0
while attempts < MAX_DIRECTORY_CREATION_ATTEMPTS:
timestamp = int(time.time())
result_dir = tf.io.gfile.join(
tf.compat.as_bytes(export_dir_base), tf.compat.as_bytes(str(timestamp)))
if not tf.io.gfile.exists(result_dir):
# Collisions are still possible (though extremely unlikely): this
# directory is not actually created yet, but it will be almost
# instantly on return from this function.
return result_dir
time.sleep(1)
attempts += 1
logging.warning('Directory %s already exists; retrying (attempt %s/%s)',
str(result_dir), attempts, MAX_DIRECTORY_CREATION_ATTEMPTS)
raise RuntimeError('Failed to obtain a unique export directory name after '
f'{MAX_DIRECTORY_CREATION_ATTEMPTS} attempts.')
| 7,025 | 37.393443 | 80 | py |
models | models-master/official/core/base_trainer_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensorflow_models.core.trainers.trainer."""
# pylint: disable=g-direct-tensorflow-import
import gc
import multiprocessing
import os
import sys
from absl.testing import parameterized
import orbit
import portpicker
import tensorflow as tf
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from official.core import base_trainer as trainer_lib
from official.core import config_definitions as cfg
from official.core import train_lib
from official.utils.testing import mock_task
TPU_TEST = 'test_tpu' in sys.argv[0]
GPU_TEST = 'test_gpu' in sys.argv[0]
def all_strategy_combinations():
return combinations.combine(
distribution=[
strategy_combinations.default_strategy,
strategy_combinations.cloud_tpu_strategy,
strategy_combinations.one_device_strategy_gpu,
],)
def create_in_process_cluster(num_workers, num_ps):
"""Creates and starts local servers and returns the cluster_resolver."""
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
cluster_dict = {}
cluster_dict['worker'] = ['localhost:%s' % port for port in worker_ports]
if num_ps > 0:
cluster_dict['ps'] = ['localhost:%s' % port for port in ps_ports]
cluster_spec = tf.train.ClusterSpec(cluster_dict)
# Workers need some inter_ops threads to work properly.
worker_config = tf.compat.v1.ConfigProto()
if multiprocessing.cpu_count() < num_workers + 1:
worker_config.inter_op_parallelism_threads = num_workers + 1
for i in range(num_workers):
tf.distribute.Server(
cluster_spec,
job_name='worker',
task_index=i,
config=worker_config,
protocol='grpc')
for i in range(num_ps):
tf.distribute.Server(
cluster_spec, job_name='ps', task_index=i, protocol='grpc')
cluster_resolver = tf.distribute.cluster_resolver.SimpleClusterResolver(
cluster_spec, rpc_layer='grpc')
return cluster_resolver
def dataset_fn(input_context=None):
del input_context
def dummy_data(_):
return tf.zeros((1, 1), dtype=tf.float32)
dataset = tf.data.Dataset.range(1)
dataset = dataset.repeat()
dataset = dataset.map(
dummy_data, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset
class MockAsyncTrainer(trainer_lib._AsyncTrainer):
"""Mock AsyncTrainer to test the _AsyncTrainer class."""
def __init__(self):
self._strategy = tf.distribute.get_strategy()
self.init_async()
self.global_step = tf.Variable(
0,
dtype=tf.int64,
name='global_step',
trainable=False,
aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA)
self.eval_global_step = tf.Variable(
0,
dtype=tf.int64,
name='eval_global_step',
trainable=False,
aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA)
train_dataset = self.distribute_dataset(dataset_fn)
orbit.StandardTrainer.__init__(
self, train_dataset, options=orbit.StandardTrainerOptions())
validation_dataset = self.distribute_dataset(dataset_fn)
orbit.StandardEvaluator.__init__(
self,
validation_dataset,
options=orbit.StandardEvaluatorOptions(use_tf_while_loop=True))
def train_loop_begin(self):
self.global_step.assign(0)
def train_step(self, iterator):
def replica_step(_):
self.global_step.assign_add(1)
self._strategy.run(replica_step, args=(next(iterator),))
def train_loop_end(self):
self.join()
return self.global_step.numpy()
def eval_begin(self):
self.eval_global_step.assign(0)
def eval_step(self, iterator):
def replica_step(_):
self.eval_global_step.assign_add(1)
self._strategy.run(replica_step, args=(next(iterator),))
def eval_end(self):
self.join()
return self.eval_global_step.numpy()
class TrainerTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super().setUp()
self._config = cfg.ExperimentConfig(
trainer=cfg.TrainerConfig(
optimizer_config=cfg.OptimizationConfig({
'optimizer': {
'type': 'sgd'
},
'learning_rate': {
'type': 'constant'
}
})))
def tearDown(self):
gc.collect()
# This will only contain uncollectable garbage, i.e. reference cycles
# involving objects with __del__ defined.
self.assertEmpty(gc.garbage)
super().tearDown()
def create_test_trainer(self, config, model_dir=None, task=None):
task = task or mock_task.MockTask(config.task, logging_dir=model_dir)
ckpt_exporter = train_lib.maybe_create_best_ckpt_exporter(config, model_dir)
trainer = trainer_lib.Trainer(
config,
task,
model=task.build_model(),
optimizer=task.create_optimizer(config.trainer.optimizer_config,
config.runtime),
checkpoint_exporter=ckpt_exporter)
return trainer
@combinations.generate(all_strategy_combinations())
def test_trainer_train(self, distribution):
with distribution.scope():
trainer = self.create_test_trainer(self._config)
logs = trainer.train(tf.convert_to_tensor(5, dtype=tf.int32))
self.assertIn('training_loss', logs)
self.assertIn('learning_rate', logs)
@combinations.generate(all_strategy_combinations())
def test_trainer_passing_datasets(self, distribution):
with distribution.scope():
task = mock_task.MockTask(self._config)
train_dataset = orbit.utils.make_distributed_dataset(
distribution, task.build_inputs, self._config.task.train_data)
validation_dataset = orbit.utils.make_distributed_dataset(
distribution, task.build_inputs, self._config.task.validation_data)
self._config.task.train_data = None
self._config.task.validation_data = None
trainer = trainer_lib.Trainer(
self._config,
task,
model=task.build_model(),
optimizer=task.create_optimizer(self._config.trainer.optimizer_config,
self._config.runtime),
train_dataset=train_dataset,
validation_dataset=validation_dataset)
logs = trainer.train(tf.convert_to_tensor(5, dtype=tf.int32))
self.assertIn('training_loss', logs)
self.assertIn('learning_rate', logs)
logs = trainer.evaluate(tf.convert_to_tensor(5, dtype=tf.int32))
self.assertIn('validation_loss', logs)
def test_base_async_trainer(self):
if TPU_TEST or GPU_TEST:
self.skipTest('Aysnc training is not available on GPU/GPU.')
num_workers = 3
num_ps = 2
cluster_resolver = create_in_process_cluster(num_workers, num_ps)
distribution = tf.distribute.experimental.ParameterServerStrategy(
cluster_resolver)
with distribution.scope():
trainer = MockAsyncTrainer()
trainer.init_async()
self.assertIsInstance(
trainer._coordinator,
tf.distribute.experimental.coordinator.ClusterCoordinator)
self.assertEqual(trainer.train(tf.constant(10)), 10)
self.assertEqual(trainer.evaluate(tf.constant(11)), 11)
def test_async_trainer_train(self):
if TPU_TEST or GPU_TEST:
self.skipTest('Aysnc training is not available on GPU/TPU.')
num_workers = 3
num_ps = 2
cluster_resolver = create_in_process_cluster(num_workers, num_ps)
distribution = tf.distribute.experimental.ParameterServerStrategy(
cluster_resolver)
with distribution.scope():
config = cfg.ExperimentConfig(**self._config.as_dict())
config.trainer.eval_tf_while_loop = True
trainer = self.create_test_trainer(config)
logs = trainer.train(tf.convert_to_tensor(5, dtype=tf.int32))
self.assertIn('training_loss', logs)
self.assertIn('learning_rate', logs)
def test_async_trainer_validate(self):
if TPU_TEST or GPU_TEST:
self.skipTest('Aysnc training is not available on GPU/GPU.')
num_workers = 3
num_ps = 2
cluster_resolver = create_in_process_cluster(num_workers, num_ps)
distribution = tf.distribute.experimental.ParameterServerStrategy(
cluster_resolver)
with distribution.scope():
config = cfg.ExperimentConfig(**self._config.as_dict())
config.trainer.eval_tf_while_loop = True
trainer = self.create_test_trainer(config)
logs = trainer.evaluate(tf.convert_to_tensor(5, dtype=tf.int32))
self.assertIn('acc', logs)
self.assertIn('validation_loss', logs)
@combinations.generate(all_strategy_combinations())
def test_trainer_validate(self, distribution):
with distribution.scope():
trainer = self.create_test_trainer(self._config)
logs = trainer.evaluate(tf.convert_to_tensor(5, dtype=tf.int32))
self.assertEqual(logs['counter'], 5. * distribution.num_replicas_in_sync)
self.assertIn('validation_loss', logs)
@combinations.generate(all_strategy_combinations())
def test_trainer_validate_without_loss(self, distribution):
class MockTaskWithoutValidationLoss(mock_task.MockTask):
def validation_step(self, inputs, model, metrics=None):
# Disable validation loss.
logs = super().validation_step(inputs, model)
del logs[self.loss]
return logs
with distribution.scope():
task = MockTaskWithoutValidationLoss()
trainer = self.create_test_trainer(self._config, task=task)
logs = trainer.evaluate(tf.convert_to_tensor(5, dtype=tf.int32))
self.assertEqual(logs['counter'], 5. * distribution.num_replicas_in_sync)
self.assertNotIn('validation_loss', logs)
@combinations.generate(
combinations.combine(
mixed_precision_dtype=['float32', 'bfloat16', 'float16'],
loss_scale=[None, 'dynamic', 128, 256],
))
def test_configure_optimizer(self, mixed_precision_dtype, loss_scale):
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(
mixed_precision_dtype=mixed_precision_dtype, loss_scale=loss_scale),
trainer=cfg.TrainerConfig(
optimizer_config=cfg.OptimizationConfig({
'optimizer': {
'type': 'sgd'
},
'learning_rate': {
'type': 'constant'
},
})))
trainer = self.create_test_trainer(config)
if mixed_precision_dtype == 'float16':
self.assertIsInstance(trainer.optimizer,
tf.keras.mixed_precision.LossScaleOptimizer)
if loss_scale in (None, 'dynamic'):
self.assertTrue(trainer.optimizer.dynamic)
else:
self.assertFalse(trainer.optimizer.dynamic)
self.assertEqual(trainer.optimizer.initial_scale, loss_scale)
else:
self.assertIsInstance(
trainer.optimizer,
(tf.keras.optimizers.SGD, tf.keras.optimizers.legacy.SGD))
metrics = trainer.train(tf.convert_to_tensor(5, dtype=tf.int32))
self.assertIn('training_loss', metrics)
def test_export_best_ckpt(self):
config = cfg.ExperimentConfig(
trainer=cfg.TrainerConfig(
best_checkpoint_export_subdir='best_ckpt',
best_checkpoint_eval_metric='acc',
optimizer_config=cfg.OptimizationConfig({
'optimizer': {
'type': 'sgd'
},
'learning_rate': {
'type': 'constant'
}
})))
model_dir = self.get_temp_dir()
trainer = self.create_test_trainer(config, model_dir=model_dir)
trainer.train(tf.convert_to_tensor(1, dtype=tf.int32))
trainer.evaluate(tf.convert_to_tensor(1, dtype=tf.int32))
self.assertTrue(
tf.io.gfile.exists(os.path.join(model_dir, 'best_ckpt', 'info.json')))
def test_model_with_compiled_loss(self):
task = mock_task.MockTask()
model = task.build_model()
model.compile(loss=tf.keras.losses.CategoricalCrossentropy())
trainer = trainer_lib.Trainer(
self._config,
task,
model=model,
optimizer=task.create_optimizer(self._config.trainer.optimizer_config))
logs = trainer.train(tf.convert_to_tensor(5, dtype=tf.int32))
self.assertIn('training_loss', logs)
if __name__ == '__main__':
tf.test.main()
| 13,011 | 34.747253 | 80 | py |
models | models-master/official/core/test_utils.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for testing."""
import tensorflow as tf
class FakeKerasModel(tf.keras.Model):
"""Fake keras model for testing."""
def __init__(self):
super().__init__()
self.dense = tf.keras.layers.Dense(4, activation=tf.nn.relu)
self.dense2 = tf.keras.layers.Dense(4, activation=tf.nn.relu)
def call(self, inputs): # pytype: disable=signature-mismatch # overriding-parameter-count-checks
return self.dense2(self.dense(inputs))
class _Dense(tf.Module):
"""A dense layer."""
def __init__(self, input_dim, output_size, name=None):
super().__init__(name=name)
with self.name_scope:
self.w = tf.Variable(
tf.random.normal([input_dim, output_size]), name='w')
self.b = tf.Variable(tf.zeros([output_size]), name='b')
@tf.Module.with_name_scope
def __call__(self, x):
y = tf.matmul(x, self.w) + self.b
return tf.nn.relu(y)
class FakeModule(tf.Module):
"""Fake model using tf.Module for testing."""
def __init__(self, input_size, name=None):
super().__init__(name=name)
with self.name_scope:
self.dense = _Dense(input_size, 4, name='dense')
self.dense2 = _Dense(4, 4, name='dense_1')
@tf.Module.with_name_scope
def __call__(self, x):
return self.dense2(self.dense(x))
| 1,877 | 30.3 | 100 | py |
models | models-master/official/common/distribute_utils.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for running models in a distributed setting."""
import json
import os
import tensorflow as tf
def _collective_communication(all_reduce_alg):
"""Return a CollectiveCommunication based on all_reduce_alg.
Args:
all_reduce_alg: a string specifying which collective communication to pick,
or None.
Returns:
tf.distribute.experimental.CollectiveCommunication object
Raises:
ValueError: if `all_reduce_alg` not in [None, "ring", "nccl"]
"""
collective_communication_options = {
None: tf.distribute.experimental.CollectiveCommunication.AUTO,
"ring": tf.distribute.experimental.CollectiveCommunication.RING,
"nccl": tf.distribute.experimental.CollectiveCommunication.NCCL
}
if all_reduce_alg not in collective_communication_options:
raise ValueError(
"When used with `multi_worker_mirrored`, valid values for "
"all_reduce_alg are [`ring`, `nccl`]. Supplied value: {}".format(
all_reduce_alg))
return collective_communication_options[all_reduce_alg]
def _mirrored_cross_device_ops(all_reduce_alg, num_packs):
"""Return a CrossDeviceOps based on all_reduce_alg and num_packs.
Args:
all_reduce_alg: a string specifying which cross device op to pick, or None.
num_packs: an integer specifying number of packs for the cross device op.
Returns:
tf.distribute.CrossDeviceOps object or None.
Raises:
ValueError: if `all_reduce_alg` not in [None, "nccl", "hierarchical_copy"].
"""
if all_reduce_alg is None:
return None
mirrored_all_reduce_options = {
"nccl": tf.distribute.NcclAllReduce,
"hierarchical_copy": tf.distribute.HierarchicalCopyAllReduce
}
if all_reduce_alg not in mirrored_all_reduce_options:
raise ValueError(
"When used with `mirrored`, valid values for all_reduce_alg are "
"[`nccl`, `hierarchical_copy`]. Supplied value: {}".format(
all_reduce_alg))
cross_device_ops_class = mirrored_all_reduce_options[all_reduce_alg]
return cross_device_ops_class(num_packs=num_packs)
def tpu_initialize(tpu_address):
"""Initializes TPU for TF 2.x training.
Args:
tpu_address: string, bns address of master TPU worker.
Returns:
A TPUClusterResolver.
"""
cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
tpu=tpu_address)
if tpu_address not in ("", "local"):
tf.config.experimental_connect_to_cluster(cluster_resolver)
tf.tpu.experimental.initialize_tpu_system(cluster_resolver)
return cluster_resolver
def get_distribution_strategy(distribution_strategy="mirrored",
num_gpus=0,
all_reduce_alg=None,
num_packs=1,
tpu_address=None,
**kwargs):
"""Return a Strategy for running the model.
Args:
distribution_strategy: a string specifying which distribution strategy to
use. Accepted values are "off", "one_device", "mirrored",
"parameter_server", "multi_worker_mirrored", and "tpu" -- case
insensitive. "tpu" means to use TPUStrategy using `tpu_address`.
"off" means to use the default strategy which is obtained from
tf.distribute.get_strategy (for details on the default strategy, see
https://www.tensorflow.org/guide/distributed_training#default_strategy).
num_gpus: Number of GPUs to run this model.
all_reduce_alg: Optional. Specifies which algorithm to use when performing
all-reduce. For `MirroredStrategy`, valid values are "nccl" and
"hierarchical_copy". For `MultiWorkerMirroredStrategy`, valid values are
"ring" and "nccl". If None, DistributionStrategy will choose based on
device topology.
num_packs: Optional. Sets the `num_packs` in `tf.distribute.NcclAllReduce`
or `tf.distribute.HierarchicalCopyAllReduce` for `MirroredStrategy`.
tpu_address: Optional. String that represents TPU to connect to. Must not be
None if `distribution_strategy` is set to `tpu`.
**kwargs: Additional kwargs for internal usages.
Returns:
tf.distribute.Strategy object.
Raises:
ValueError: if `distribution_strategy` is "off" or "one_device" and
`num_gpus` is larger than 1; or `num_gpus` is negative or if
`distribution_strategy` is `tpu` but `tpu_address` is not specified.
"""
del kwargs
if num_gpus < 0:
raise ValueError("`num_gpus` can not be negative.")
if not isinstance(distribution_strategy, str):
msg = ("distribution_strategy must be a string but got: %s." %
(distribution_strategy,))
if distribution_strategy == False: # pylint: disable=singleton-comparison,g-explicit-bool-comparison
msg += (" If you meant to pass the string 'off', make sure you add "
"quotes around 'off' so that yaml interprets it as a string "
"instead of a bool.")
raise ValueError(msg)
distribution_strategy = distribution_strategy.lower()
if distribution_strategy == "off":
if num_gpus > 1:
raise ValueError(f"When {num_gpus} GPUs are specified, "
"distribution_strategy flag cannot be set to `off`.")
# Return the default distribution strategy.
return tf.distribute.get_strategy()
if distribution_strategy == "tpu":
# When tpu_address is an empty string, we communicate with local TPUs.
cluster_resolver = tpu_initialize(tpu_address)
return tf.distribute.TPUStrategy(cluster_resolver)
if distribution_strategy == "multi_worker_mirrored":
return tf.distribute.experimental.MultiWorkerMirroredStrategy(
communication=_collective_communication(all_reduce_alg))
if distribution_strategy == "one_device":
if num_gpus == 0:
return tf.distribute.OneDeviceStrategy("device:CPU:0")
if num_gpus > 1:
raise ValueError("`OneDeviceStrategy` can not be used for more than "
"one device.")
return tf.distribute.OneDeviceStrategy("device:GPU:0")
if distribution_strategy == "mirrored":
if num_gpus == 0:
devices = ["device:CPU:0"]
else:
devices = ["device:GPU:%d" % i for i in range(num_gpus)]
return tf.distribute.MirroredStrategy(
devices=devices,
cross_device_ops=_mirrored_cross_device_ops(all_reduce_alg, num_packs))
if distribution_strategy == "parameter_server":
cluster_resolver = tf.distribute.cluster_resolver.TFConfigClusterResolver()
return tf.distribute.experimental.ParameterServerStrategy(cluster_resolver)
raise ValueError("Unrecognized Distribution Strategy: %r" %
distribution_strategy)
def configure_cluster(worker_hosts=None, task_index=-1):
"""Set multi-worker cluster spec in TF_CONFIG environment variable.
Args:
worker_hosts: comma-separated list of worker ip:port pairs.
task_index: index of the worker.
Returns:
Number of workers in the cluster.
"""
tf_config = json.loads(os.environ.get("TF_CONFIG", "{}"))
if tf_config:
num_workers = (
len(tf_config["cluster"].get("chief", [])) +
len(tf_config["cluster"].get("worker", [])))
elif worker_hosts:
workers = worker_hosts.split(",")
num_workers = len(workers)
if num_workers > 1 and task_index < 0:
raise ValueError("Must specify task_index when number of workers > 1")
task_index = 0 if num_workers == 1 else task_index
os.environ["TF_CONFIG"] = json.dumps({
"cluster": {
"worker": workers
},
"task": {
"type": "worker",
"index": task_index
}
})
else:
num_workers = 1
return num_workers
def get_strategy_scope(strategy):
if strategy:
strategy_scope = strategy.scope()
else:
strategy_scope = DummyContextManager()
return strategy_scope
class DummyContextManager(object):
def __enter__(self):
pass
def __exit__(self, *args):
pass
| 8,562 | 35.594017 | 105 | py |
models | models-master/official/common/distribute_utils_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for distribution util functions."""
import sys
import tensorflow as tf
from official.common import distribute_utils
TPU_TEST = 'test_tpu' in sys.argv[0]
class DistributeUtilsTest(tf.test.TestCase):
"""Tests for distribute util functions."""
def test_invalid_args(self):
with self.assertRaisesRegex(ValueError, '`num_gpus` can not be negative.'):
_ = distribute_utils.get_distribution_strategy(num_gpus=-1)
with self.assertRaisesRegex(ValueError,
'.*If you meant to pass the string .*'):
_ = distribute_utils.get_distribution_strategy(
distribution_strategy=False, num_gpus=0)
with self.assertRaisesRegex(ValueError, 'When 2 GPUs are specified.*'):
_ = distribute_utils.get_distribution_strategy(
distribution_strategy='off', num_gpus=2)
with self.assertRaisesRegex(ValueError,
'`OneDeviceStrategy` can not be used.*'):
_ = distribute_utils.get_distribution_strategy(
distribution_strategy='one_device', num_gpus=2)
def test_one_device_strategy_cpu(self):
ds = distribute_utils.get_distribution_strategy('one_device', num_gpus=0)
self.assertEquals(ds.num_replicas_in_sync, 1)
self.assertEquals(len(ds.extended.worker_devices), 1)
self.assertIn('CPU', ds.extended.worker_devices[0])
def test_one_device_strategy_gpu(self):
ds = distribute_utils.get_distribution_strategy('one_device', num_gpus=1)
self.assertEquals(ds.num_replicas_in_sync, 1)
self.assertEquals(len(ds.extended.worker_devices), 1)
self.assertIn('GPU', ds.extended.worker_devices[0])
def test_mirrored_strategy(self):
# CPU only.
_ = distribute_utils.get_distribution_strategy(num_gpus=0)
# 5 GPUs.
ds = distribute_utils.get_distribution_strategy(num_gpus=5)
self.assertEquals(ds.num_replicas_in_sync, 5)
self.assertEquals(len(ds.extended.worker_devices), 5)
for device in ds.extended.worker_devices:
self.assertIn('GPU', device)
_ = distribute_utils.get_distribution_strategy(
distribution_strategy='mirrored',
num_gpus=2,
all_reduce_alg='nccl',
num_packs=2)
with self.assertRaisesRegex(
ValueError,
'When used with `mirrored`, valid values for all_reduce_alg are.*'):
_ = distribute_utils.get_distribution_strategy(
distribution_strategy='mirrored',
num_gpus=2,
all_reduce_alg='dummy',
num_packs=2)
def test_mwms(self):
distribute_utils.configure_cluster(worker_hosts=None, task_index=-1)
ds = distribute_utils.get_distribution_strategy(
'multi_worker_mirrored', all_reduce_alg='nccl')
self.assertIsInstance(
ds, tf.distribute.experimental.MultiWorkerMirroredStrategy)
with self.assertRaisesRegex(
ValueError,
'When used with `multi_worker_mirrored`, valid values.*'):
_ = distribute_utils.get_distribution_strategy(
'multi_worker_mirrored', all_reduce_alg='dummy')
def test_no_strategy(self):
ds = distribute_utils.get_distribution_strategy('off')
self.assertIs(ds, tf.distribute.get_strategy())
def test_tpu_strategy(self):
if not TPU_TEST:
self.skipTest('Only Cloud TPU VM instances can have local TPUs.')
with self.assertRaises(ValueError):
_ = distribute_utils.get_distribution_strategy('tpu')
ds = distribute_utils.get_distribution_strategy('tpu', tpu_address='local')
self.assertIsInstance(
ds, tf.distribute.TPUStrategy)
def test_invalid_strategy(self):
with self.assertRaisesRegexp(
ValueError,
'distribution_strategy must be a string but got: False. If'):
distribute_utils.get_distribution_strategy(False)
with self.assertRaisesRegexp(
ValueError, 'distribution_strategy must be a string but got: 1'):
distribute_utils.get_distribution_strategy(1)
def test_get_strategy_scope(self):
ds = distribute_utils.get_distribution_strategy('one_device', num_gpus=0)
with distribute_utils.get_strategy_scope(ds):
self.assertIs(tf.distribute.get_strategy(), ds)
with distribute_utils.get_strategy_scope(None):
self.assertIsNot(tf.distribute.get_strategy(), ds)
if __name__ == '__main__':
tf.test.main()
| 4,904 | 38.24 | 79 | py |
models | models-master/official/common/streamz_counters.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Global streamz counters."""
from tensorflow.python.eager import monitoring
progressive_policy_creation_counter = monitoring.Counter(
"/tensorflow/training/fast_training/progressive_policy_creation",
"Counter for the number of ProgressivePolicy creations.")
stack_vars_to_vars_call_counter = monitoring.Counter(
"/tensorflow/training/fast_training/tf_vars_to_vars",
"Counter for the number of low-level stacking API calls.")
| 1,057 | 36.785714 | 74 | py |
models | models-master/official/common/flags.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The central place to define flags."""
from absl import flags
def define_flags():
"""Defines flags.
All flags are defined as optional, but in practice most models use some of
these flags and so mark_flags_as_required() should be called after calling
this function. Typically, 'experiment', 'mode', and 'model_dir' are required.
For example:
```
from absl import flags
from official.common import flags as tfm_flags # pylint: disable=line-too-long
...
tfm_flags.define_flags()
flags.mark_flags_as_required(['experiment', 'mode', 'model_dir'])
```
The reason all flags are optional is because unit tests often do not set or
use any of the flags.
"""
flags.DEFINE_string(
'experiment', default=None, help=
'The experiment type registered, specifying an ExperimentConfig.')
flags.DEFINE_enum(
'mode',
default=None,
enum_values=[
'train', 'eval', 'train_and_eval', 'continuous_eval',
'continuous_train_and_eval', 'train_and_validate',
'train_and_post_eval'
],
help='Mode to run: `train`, `eval`, `train_and_eval`, '
'`continuous_eval`, `continuous_train_and_eval` and '
'`train_and_validate` (which is not implemented in '
'the open source version).')
flags.DEFINE_string(
'model_dir',
default=None,
help='The directory where the model and training/evaluation summaries'
'are stored.')
flags.DEFINE_multi_string(
'config_file',
default=None,
help='YAML/JSON files which specifies overrides. The override order '
'follows the order of args. Note that each file '
'can be used as an override template to override the default parameters '
'specified in Python. If the same parameter is specified in both '
'`--config_file` and `--params_override`, `config_file` will be used '
'first, followed by params_override.')
flags.DEFINE_string(
'params_override',
default=None,
help='a YAML/JSON string or a YAML file which specifies additional '
'overrides over the default parameters and those specified in '
'`--config_file`. Note that this is supposed to be used only to override '
'the model parameters, but not the parameters like TPU specific flags. '
'One canonical use case of `--config_file` and `--params_override` is '
'users first define a template config file using `--config_file`, then '
'use `--params_override` to adjust the minimal set of tuning parameters, '
'for example setting up different `train_batch_size`. The final override '
'order of parameters: default_model_params --> params from config_file '
'--> params in params_override. See also the help message of '
'`--config_file`.')
# The libraries rely on gin often make mistakes that include flags inside
# the library files which causes conflicts.
try:
flags.DEFINE_multi_string(
'gin_file', default=None, help='List of paths to the config files.')
except flags.DuplicateFlagError:
pass
try:
flags.DEFINE_multi_string(
'gin_params',
default=None,
help='Newline separated list of Gin parameter bindings.')
except flags.DuplicateFlagError:
pass
flags.DEFINE_string(
'tpu',
default=None,
help='The Cloud TPU to use for training. This should be either the name '
'used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 '
'url.')
flags.DEFINE_string(
'tf_data_service', default=None, help='The tf.data service address')
flags.DEFINE_string(
'tpu_platform', default=None, help='TPU platform type.')
| 4,286 | 36.278261 | 81 | py |
models | models-master/official/common/registry_imports.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""All necessary imports for registration."""
# pylint: disable=unused-import
from official import vision
from official.nlp import tasks
from official.nlp.configs import experiment_configs
from official.utils.testing import mock_task
| 843 | 39.190476 | 74 | py |
models | models-master/official/common/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 610 | 37.1875 | 74 | py |
models | models-master/official/common/dataset_fn.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility library for picking an appropriate dataset function."""
import functools
from typing import Any, Callable, Type, Union
import tensorflow as tf
PossibleDatasetType = Union[Type[tf.data.Dataset], Callable[[tf.Tensor], Any]]
def pick_dataset_fn(file_type: str) -> PossibleDatasetType:
if file_type == 'tfrecord':
return tf.data.TFRecordDataset
if file_type == 'tfrecord_compressed':
return functools.partial(tf.data.TFRecordDataset, compression_type='GZIP')
raise ValueError('Unrecognized file_type: {}'.format(file_type))
| 1,848 | 40.088889 | 80 | py |
models | models-master/official/nlp/optimization.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Legacy functions and classes related to optimization."""
from absl import logging
import gin
import tensorflow as tf
from official.modeling.optimization import lamb
from official.modeling.optimization import legacy_adamw
AdamWeightDecay = legacy_adamw.AdamWeightDecay
LAMB = lamb.LAMB
class WarmUp(tf.keras.optimizers.schedules.LearningRateSchedule):
"""Applies a warmup schedule on a given learning rate decay schedule."""
def __init__(self,
initial_learning_rate,
decay_schedule_fn,
warmup_steps,
power=1.0,
name=None):
super(WarmUp, self).__init__()
self.initial_learning_rate = initial_learning_rate
self.warmup_steps = warmup_steps
self.power = power
self.decay_schedule_fn = decay_schedule_fn
self.name = name
def __call__(self, step):
with tf.name_scope(self.name or 'WarmUp') as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
global_step_float = tf.cast(step, tf.float32)
warmup_steps_float = tf.cast(self.warmup_steps, tf.float32)
warmup_percent_done = global_step_float / warmup_steps_float
warmup_learning_rate = (
self.initial_learning_rate *
tf.math.pow(warmup_percent_done, self.power))
return tf.cond(
global_step_float < warmup_steps_float,
lambda: warmup_learning_rate,
lambda: self.decay_schedule_fn(step),
name=name)
def get_config(self):
return {
'initial_learning_rate': self.initial_learning_rate,
'decay_schedule_fn': self.decay_schedule_fn,
'warmup_steps': self.warmup_steps,
'power': self.power,
'name': self.name
}
@gin.configurable
def create_optimizer(init_lr,
num_train_steps,
num_warmup_steps,
end_lr=0.0,
optimizer_type='adamw',
beta_1=0.9,
poly_power=1.0):
"""Creates an optimizer with learning rate schedule."""
# Implements linear decay of the learning rate.
lr_schedule = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=init_lr,
decay_steps=num_train_steps,
end_learning_rate=end_lr,
power=poly_power)
if num_warmup_steps:
lr_schedule = WarmUp(
initial_learning_rate=init_lr,
decay_schedule_fn=lr_schedule,
warmup_steps=num_warmup_steps)
if optimizer_type == 'adamw':
logging.info('using Adamw optimizer')
optimizer = AdamWeightDecay(
learning_rate=lr_schedule,
weight_decay_rate=0.01,
beta_1=beta_1,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'])
elif optimizer_type == 'lamb':
logging.info('using Lamb optimizer')
optimizer = LAMB(
learning_rate=lr_schedule,
weight_decay_rate=0.01,
beta_1=beta_1,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'],
)
else:
raise ValueError('Unsupported optimizer type: ', optimizer_type)
return optimizer
| 3,878 | 33.026316 | 78 | py |
models | models-master/official/nlp/continuous_finetune_lib.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TFM continuous finetuning+eval training driver library."""
import gc
import os
import time
from typing import Any, Mapping, Optional
from absl import logging
import tensorflow as tf
from official.common import distribute_utils
from official.core import config_definitions
from official.core import task_factory
from official.core import train_lib
from official.core import train_utils
from official.modeling import performance
from official.modeling.multitask import configs
from official.modeling.multitask import train_lib as multitask_train_lib
def _flatten_dict(xs):
"""Flatten a nested dictionary.
The nested keys are flattened to a tuple.
Example::
xs = {'foo': 1, 'bar': {'a': 2, 'b': {}}}
flat_xs = flatten_dict(xs)
print(flat_xs)
# {
# ('foo',): 1,
# ('bar', 'a'): 2,
# }
Note that empty dictionaries are ignored and
will not be restored by `unflatten_dict`.
Args:
xs: a nested dictionary
Returns:
The flattened dictionary.
"""
assert isinstance(xs, dict), 'input is not a dict'
def _flatten(xs, prefix):
if not isinstance(xs, dict):
return {prefix: xs}
result = {}
for key, value in xs.items():
path = prefix + (key,)
result.update(_flatten(value, path))
return result
return _flatten(xs, ())
def run_continuous_finetune(
mode: str,
params: config_definitions.ExperimentConfig,
model_dir: str,
run_post_eval: bool = False,
pretrain_steps: Optional[int] = None,
) -> Mapping[str, Any]:
"""Run modes with continuous training.
Currently only supports continuous_train_and_eval.
Args:
mode: A 'str', specifying the mode. continuous_train_and_eval - monitors a
checkpoint directory. Once a new checkpoint is discovered, loads the
checkpoint, finetune the model by training it (probably on another dataset
or with another task), then evaluate the finetuned model.
params: ExperimentConfig instance.
model_dir: A 'str', a path to store model checkpoints and summaries.
run_post_eval: Whether to run post eval once after training, metrics logs
are returned.
pretrain_steps: Optional, the number of total training steps for the
pretraining job.
Returns:
eval logs: returns eval metrics logs when run_post_eval is set to True,
othewise, returns {}.
"""
assert mode == 'continuous_train_and_eval', (
'Only continuous_train_and_eval is supported by continuous_finetune. '
'Got mode: {}'.format(mode))
# Sets mixed_precision policy. Using 'mixed_float16' or 'mixed_bfloat16'
# can have significant impact on model speeds by utilizing float16 in case of
# GPUs, and bfloat16 in the case of TPUs. loss_scale takes effect only when
# dtype is float16
if params.runtime.mixed_precision_dtype:
performance.set_mixed_precision_policy(params.runtime.mixed_precision_dtype)
distribution_strategy = distribute_utils.get_distribution_strategy(
distribution_strategy=params.runtime.distribution_strategy,
all_reduce_alg=params.runtime.all_reduce_alg,
num_gpus=params.runtime.num_gpus,
tpu_address=params.runtime.tpu)
retry_times = 0
while not tf.io.gfile.isdir(params.task.init_checkpoint):
# Wait for the init_checkpoint directory to be created.
if retry_times >= 60:
raise ValueError(
'ExperimentConfig.task.init_checkpoint must be a directory for '
'continuous_train_and_eval mode.')
retry_times += 1
time.sleep(60)
summary_writer = tf.summary.create_file_writer(
os.path.join(model_dir, 'eval'))
global_step = 0
def timeout_fn():
if pretrain_steps and global_step < pretrain_steps:
# Keeps waiting for another timeout period.
logging.info(
'Continue waiting for new checkpoint as current pretrain '
'global_step=%d and target is %d.', global_step, pretrain_steps)
return False
# Quits the loop.
return True
for pretrain_ckpt in tf.train.checkpoints_iterator(
checkpoint_dir=params.task.init_checkpoint,
min_interval_secs=10,
timeout=params.trainer.continuous_eval_timeout,
timeout_fn=timeout_fn):
# If there are checkpoints, they might be the finetune checkpoint of a
# different pretrained checkpoint. So we just remove all checkpoints.
train_utils.remove_ckpts(model_dir)
with distribution_strategy.scope():
global_step = train_utils.read_global_step_from_checkpoint(pretrain_ckpt)
# Replaces params.task.init_checkpoint to make sure that we load
# exactly this pretrain checkpoint.
if params.trainer.best_checkpoint_export_subdir:
best_ckpt_subdir = '{}_{}'.format(
params.trainer.best_checkpoint_export_subdir, global_step)
params_replaced = params.replace(
task={'init_checkpoint': pretrain_ckpt},
trainer={'best_checkpoint_export_subdir': best_ckpt_subdir})
else:
params_replaced = params.replace(task={'init_checkpoint': pretrain_ckpt})
params_replaced.lock()
logging.info('Running finetuning with params: %s', params_replaced)
with distribution_strategy.scope():
if isinstance(params, configs.MultiEvalExperimentConfig):
task = task_factory.get_task(params_replaced.task)
eval_tasks = [
task_factory.get_task(config.task_config, name=config.task_name)
for config in params.eval_tasks
]
(_,
eval_metrics) = multitask_train_lib.run_experiment_with_multitask_eval(
distribution_strategy=distribution_strategy,
train_task=task,
eval_tasks=eval_tasks,
mode='train_and_eval',
params=params_replaced,
model_dir=model_dir,
run_post_eval=True,
save_summary=False)
else:
task = task_factory.get_task(
params_replaced.task, logging_dir=model_dir)
_, eval_metrics = train_lib.run_experiment(
distribution_strategy=distribution_strategy,
task=task,
mode='train_and_eval',
params=params_replaced,
model_dir=model_dir,
run_post_eval=True,
save_summary=False)
logging.info('Evaluation finished. Pretrain global_step: %d', global_step)
train_utils.write_json_summary(model_dir, global_step, eval_metrics)
if not os.path.basename(model_dir): # if model_dir.endswith('/')
summary_grp = os.path.dirname(model_dir) + '_' + task.name
else:
summary_grp = os.path.basename(model_dir) + '_' + task.name
summaries = {}
for name, value in _flatten_dict(eval_metrics).items():
summaries[summary_grp + '/' + '-'.join(name)] = value
train_utils.write_summary(summary_writer, global_step, summaries)
train_utils.remove_ckpts(model_dir)
# In TF2, the resource life cycle is bound with the python object life
# cycle. Force trigger python garbage collection here so those resources
# can be deallocated in time, so it doesn't cause OOM when allocating new
# objects.
# TODO(b/169178664): Fix cycle reference in Keras model and revisit to see
# if we need gc here.
gc.collect()
if run_post_eval:
return eval_metrics
return {}
| 7,884 | 35.169725 | 80 | py |
models | models-master/official/nlp/continuous_finetune_lib_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from absl import flags
from absl.testing import flagsaver
from absl.testing import parameterized
import tensorflow as tf
# pylint: disable=unused-import
from official.common import registry_imports
# pylint: enable=unused-import
from official.common import flags as tfm_flags
from official.core import task_factory
from official.core import train_lib
from official.core import train_utils
from official.nlp import continuous_finetune_lib
FLAGS = flags.FLAGS
tfm_flags.define_flags()
class ContinuousFinetuneTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super().setUp()
self._model_dir = os.path.join(self.get_temp_dir(), 'model_dir')
def testContinuousFinetune(self):
pretrain_steps = 1
src_model_dir = self.get_temp_dir()
flags_dict = dict(
experiment='mock',
mode='continuous_train_and_eval',
model_dir=self._model_dir,
params_override={
'task': {
'init_checkpoint': src_model_dir,
},
'trainer': {
'continuous_eval_timeout': 1,
'steps_per_loop': 1,
'train_steps': 1,
'validation_steps': 1,
'best_checkpoint_export_subdir': 'best_ckpt',
'best_checkpoint_eval_metric': 'acc',
'optimizer_config': {
'optimizer': {
'type': 'sgd'
},
'learning_rate': {
'type': 'constant'
}
}
}
})
with flagsaver.flagsaver(**flags_dict):
# Train and save some checkpoints.
params = train_utils.parse_configuration(flags.FLAGS)
distribution_strategy = tf.distribute.get_strategy()
with distribution_strategy.scope():
task = task_factory.get_task(params.task, logging_dir=src_model_dir)
_ = train_lib.run_experiment(
distribution_strategy=distribution_strategy,
task=task,
mode='train',
params=params,
model_dir=src_model_dir)
params = train_utils.parse_configuration(FLAGS)
eval_metrics = continuous_finetune_lib.run_continuous_finetune(
FLAGS.mode,
params,
FLAGS.model_dir,
run_post_eval=True,
pretrain_steps=pretrain_steps)
self.assertIn('best_acc', eval_metrics)
self.assertFalse(
tf.io.gfile.exists(os.path.join(FLAGS.model_dir, 'checkpoint')))
if __name__ == '__main__':
tf.test.main()
| 3,182 | 31.151515 | 76 | py |
models | models-master/official/nlp/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/nlp/train.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TFM common training driver."""
from absl import app
from absl import flags
from absl import logging
import gin
import tensorflow as tf
from official.common import distribute_utils
# pylint: disable=unused-import
from official.common import registry_imports
# pylint: enable=unused-import
from official.common import flags as tfm_flags
from official.core import task_factory
from official.core import train_lib
from official.core import train_utils
from official.modeling import performance
from official.nlp import continuous_finetune_lib
FLAGS = flags.FLAGS
flags.DEFINE_integer(
'pretrain_steps',
default=None,
help='The number of total training steps for the pretraining job.')
flags.DEFINE_bool(
'enable_async_checkpointing',
default=True,
help='A boolean indicating whether to enable async checkpoint saving')
def _run_experiment_with_preemption_recovery(params, model_dir):
"""Runs experiment and tries to reconnect when encounting a preemption."""
keep_training = True
while keep_training:
preemption_watcher = None
try:
distribution_strategy = distribute_utils.get_distribution_strategy(
distribution_strategy=params.runtime.distribution_strategy,
all_reduce_alg=params.runtime.all_reduce_alg,
num_gpus=params.runtime.num_gpus,
tpu_address=params.runtime.tpu,
**params.runtime.model_parallelism())
with distribution_strategy.scope():
task = task_factory.get_task(params.task, logging_dir=model_dir)
# pylint: disable=line-too-long
preemption_watcher = None # copybara-replace
# pylint: enable=line-too-long
train_lib.run_experiment(
distribution_strategy=distribution_strategy,
task=task,
mode=FLAGS.mode,
params=params,
model_dir=model_dir,
enable_async_checkpointing=FLAGS.enable_async_checkpointing)
keep_training = False
except tf.errors.OpError as e:
if preemption_watcher and preemption_watcher.preemption_message:
preemption_watcher.block_until_worker_exit()
logging.info(
'Some TPU workers had been preempted (message: %s), '
'retarting training from the last checkpoint...',
preemption_watcher.preemption_message)
keep_training = True
else:
raise e from None
def main(_):
gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_params)
params = train_utils.parse_configuration(FLAGS)
model_dir = FLAGS.model_dir
if 'train' in FLAGS.mode:
# Pure eval modes do not output yaml files. Otherwise continuous eval job
# may race against the train job for writing the same file.
train_utils.serialize_config(params, model_dir)
if FLAGS.mode == 'continuous_train_and_eval':
continuous_finetune_lib.run_continuous_finetune(
FLAGS.mode, params, model_dir, pretrain_steps=FLAGS.pretrain_steps)
else:
# Sets mixed_precision policy. Using 'mixed_float16' or 'mixed_bfloat16'
# can have significant impact on model speeds by utilizing float16 in case
# of GPUs, and bfloat16 in the case of TPUs. loss_scale takes effect only
# when dtype is float16
if params.runtime.mixed_precision_dtype:
performance.set_mixed_precision_policy(
params.runtime.mixed_precision_dtype)
_run_experiment_with_preemption_recovery(params, model_dir)
train_utils.save_gin_config(FLAGS.mode, model_dir)
if __name__ == '__main__':
tfm_flags.define_flags()
flags.mark_flags_as_required(['experiment', 'mode', 'model_dir'])
app.run(main)
| 4,214 | 35.652174 | 78 | py |
models | models-master/official/nlp/tools/export_tfhub.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Exports a BERT-like encoder and its preprocessing as SavedModels for TF Hub.
This tool creates preprocessor and encoder SavedModels suitable for uploading
to https://tfhub.dev that implement the preprocessor and encoder APIs defined
at https://www.tensorflow.org/hub/common_saved_model_apis/text.
For a full usage guide, see
https://github.com/tensorflow/models/blob/master/official/nlp/docs/tfhub.md
Minimal usage examples:
1) Exporting an Encoder from checkpoint and config.
```
export_tfhub \
--encoder_config_file=${BERT_DIR:?}/bert_encoder.yaml \
--model_checkpoint_path=${BERT_DIR:?}/bert_model.ckpt \
--vocab_file=${BERT_DIR:?}/vocab.txt \
--export_type=model \
--export_path=/tmp/bert_model
```
An --encoder_config_file can specify encoder types other than BERT.
For BERT, a --bert_config_file in the legacy JSON format can be passed instead.
Flag --vocab_file (and flag --do_lower_case, whose default value is guessed
from the vocab_file path) capture how BertTokenizer was used in pre-training.
Use flag --sp_model_file instead if SentencepieceTokenizer was used.
Changing --export_type to model_with_mlm additionally creates an `.mlm`
subobject on the exported SavedModel that can be called to produce
the logits of the Masked Language Model task from pretraining.
The help string for flag --model_checkpoint_path explains the checkpoint
formats required for each --export_type.
2) Exporting a preprocessor SavedModel
```
export_tfhub \
--vocab_file ${BERT_DIR:?}/vocab.txt \
--export_type preprocessing --export_path /tmp/bert_preprocessing
```
Be sure to use flag values that match the encoder and how it has been
pre-trained (see above for --vocab_file vs --sp_model_file).
If your encoder has been trained with text preprocessing for which tfhub.dev
already has SavedModel, you could guide your users to reuse that one instead
of exporting and publishing your own.
TODO(b/175369555): When exporting to users of TensorFlow 2.4, add flag
`--experimental_disable_assert_in_preprocessing`.
"""
from absl import app
from absl import flags
import gin
from official.legacy.bert import configs
from official.modeling import hyperparams
from official.nlp.configs import encoders
from official.nlp.tools import export_tfhub_lib
FLAGS = flags.FLAGS
flags.DEFINE_enum(
"export_type", "model",
["model", "model_with_mlm", "preprocessing"],
"The overall type of SavedModel to export. Flags "
"--bert_config_file/--encoder_config_file and --vocab_file/--sp_model_file "
"control which particular encoder model and preprocessing are exported.")
flags.DEFINE_string(
"export_path", None,
"Directory to which the SavedModel is written.")
flags.DEFINE_string(
"encoder_config_file", None,
"A yaml file representing `encoders.EncoderConfig` to define the encoder "
"(BERT or other). "
"Exactly one of --bert_config_file and --encoder_config_file can be set. "
"Needed for --export_type model and model_with_mlm.")
flags.DEFINE_string(
"bert_config_file", None,
"A JSON file with a legacy BERT configuration to define the BERT encoder. "
"Exactly one of --bert_config_file and --encoder_config_file can be set. "
"Needed for --export_type model and model_with_mlm.")
flags.DEFINE_bool(
"copy_pooler_dense_to_encoder", False,
"When the model is trained using `BertPretrainerV2`, the pool layer "
"of next sentence prediction task exists in `ClassificationHead` passed "
"to `BertPretrainerV2`. If True, we will copy this pooler's dense layer "
"to the encoder that is exported by this tool (as in classic BERT). "
"Using `BertPretrainerV2` and leaving this False exports an untrained "
"(randomly initialized) pooling layer, which some authors recommend for "
"subsequent fine-tuning,")
flags.DEFINE_string(
"model_checkpoint_path", None,
"File path to a pre-trained model checkpoint. "
"For --export_type model, this has to be an object-based (TF2) checkpoint "
"that can be restored to `tf.train.Checkpoint(encoder=encoder)` "
"for the `encoder` defined by the config file."
"(Legacy checkpoints with `model=` instead of `encoder=` are also "
"supported for now.) "
"For --export_type model_with_mlm, it must be restorable to "
"`tf.train.Checkpoint(**BertPretrainerV2(...).checkpoint_items)`. "
"(For now, `tf.train.Checkpoint(pretrainer=BertPretrainerV2(...))` is also "
"accepted.)")
flags.DEFINE_string(
"vocab_file", None,
"For encoders trained on BertTokenzier input: "
"the vocabulary file that the encoder model was trained with. "
"Exactly one of --vocab_file and --sp_model_file can be set. "
"Needed for --export_type model, model_with_mlm and preprocessing.")
flags.DEFINE_string(
"sp_model_file", None,
"For encoders trained on SentencepieceTokenzier input: "
"the SentencePiece .model file that the encoder model was trained with. "
"Exactly one of --vocab_file and --sp_model_file can be set. "
"Needed for --export_type model, model_with_mlm and preprocessing.")
flags.DEFINE_bool(
"do_lower_case", None,
"Whether to lowercase before tokenization. "
"If left as None, and --vocab_file is set, do_lower_case will be enabled "
"if 'uncased' appears in the name of --vocab_file. "
"If left as None, and --sp_model_file set, do_lower_case defaults to true. "
"Needed for --export_type model, model_with_mlm and preprocessing.")
flags.DEFINE_integer(
"default_seq_length", 128,
"The sequence length of preprocessing results from "
"top-level preprocess method. This is also the default "
"sequence length for the bert_pack_inputs subobject."
"Needed for --export_type preprocessing.")
flags.DEFINE_bool(
"tokenize_with_offsets", False, # TODO(b/181866850)
"Whether to export a .tokenize_with_offsets subobject for "
"--export_type preprocessing.")
flags.DEFINE_multi_string(
"gin_file", default=None,
help="List of paths to the config files.")
flags.DEFINE_multi_string(
"gin_params", default=None,
help="List of Gin bindings.")
flags.DEFINE_bool( # TODO(b/175369555): Remove this flag and its use.
"experimental_disable_assert_in_preprocessing", False,
"Export a preprocessing model without tf.Assert ops. "
"Usually, that would be a bad idea, except TF2.4 has an issue with "
"Assert ops in tf.functions used in Dataset.map() on a TPU worker, "
"and omitting the Assert ops lets SavedModels avoid the issue.")
def main(argv):
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_params)
if bool(FLAGS.vocab_file) == bool(FLAGS.sp_model_file):
raise ValueError("Exactly one of `vocab_file` and `sp_model_file` "
"can be specified, but got %s and %s." %
(FLAGS.vocab_file, FLAGS.sp_model_file))
do_lower_case = export_tfhub_lib.get_do_lower_case(
FLAGS.do_lower_case, FLAGS.vocab_file, FLAGS.sp_model_file)
if FLAGS.export_type in ("model", "model_with_mlm"):
if bool(FLAGS.bert_config_file) == bool(FLAGS.encoder_config_file):
raise ValueError("Exactly one of `bert_config_file` and "
"`encoder_config_file` can be specified, but got "
"%s and %s." %
(FLAGS.bert_config_file, FLAGS.encoder_config_file))
if FLAGS.bert_config_file:
bert_config = configs.BertConfig.from_json_file(FLAGS.bert_config_file)
encoder_config = None
else:
bert_config = None
encoder_config = encoders.EncoderConfig()
encoder_config = hyperparams.override_params_dict(
encoder_config, FLAGS.encoder_config_file, is_strict=True)
export_tfhub_lib.export_model(
FLAGS.export_path,
bert_config=bert_config,
encoder_config=encoder_config,
model_checkpoint_path=FLAGS.model_checkpoint_path,
vocab_file=FLAGS.vocab_file,
sp_model_file=FLAGS.sp_model_file,
do_lower_case=do_lower_case,
with_mlm=FLAGS.export_type == "model_with_mlm",
copy_pooler_dense_to_encoder=FLAGS.copy_pooler_dense_to_encoder)
elif FLAGS.export_type == "preprocessing":
export_tfhub_lib.export_preprocessing(
FLAGS.export_path,
vocab_file=FLAGS.vocab_file,
sp_model_file=FLAGS.sp_model_file,
do_lower_case=do_lower_case,
default_seq_length=FLAGS.default_seq_length,
tokenize_with_offsets=FLAGS.tokenize_with_offsets,
experimental_disable_assert=
FLAGS.experimental_disable_assert_in_preprocessing)
else:
raise app.UsageError(
"Unknown value '%s' for flag --export_type" % FLAGS.export_type)
if __name__ == "__main__":
app.run(main)
| 9,443 | 41.927273 | 80 | py |
models | models-master/official/nlp/tools/export_tfhub_lib_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests export_tfhub_lib."""
import os
import tempfile
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow import estimator as tf_estimator
import tensorflow_hub as hub
import tensorflow_text as text
from sentencepiece import SentencePieceTrainer
from official.legacy.bert import configs
from official.modeling import tf_utils
from official.nlp.configs import encoders
from official.nlp.modeling import layers
from official.nlp.modeling import models
from official.nlp.tools import export_tfhub_lib
def _get_bert_config_or_encoder_config(use_bert_config,
hidden_size,
num_hidden_layers,
encoder_type="albert",
vocab_size=100):
"""Generates config args for export_tfhub_lib._create_model().
Args:
use_bert_config: bool. If True, returns legacy BertConfig.
hidden_size: int.
num_hidden_layers: int.
encoder_type: str. Can be ['albert', 'bert', 'bert_v2']. If use_bert_config
== True, then model_type is not used.
vocab_size: int.
Returns:
bert_config, encoder_config. Only one is not None. If
`use_bert_config` == True, the first config is valid. Otherwise
`bert_config` == None.
"""
if use_bert_config:
bert_config = configs.BertConfig(
vocab_size=vocab_size,
hidden_size=hidden_size,
intermediate_size=32,
max_position_embeddings=128,
num_attention_heads=2,
num_hidden_layers=num_hidden_layers)
encoder_config = None
else:
bert_config = None
if encoder_type == "albert":
encoder_config = encoders.EncoderConfig(
type="albert",
albert=encoders.AlbertEncoderConfig(
vocab_size=vocab_size,
embedding_width=16,
hidden_size=hidden_size,
intermediate_size=32,
max_position_embeddings=128,
num_attention_heads=2,
num_layers=num_hidden_layers,
dropout_rate=0.1))
else:
# encoder_type can be 'bert' or 'bert_v2'.
model_config = encoders.BertEncoderConfig(
vocab_size=vocab_size,
embedding_size=16,
hidden_size=hidden_size,
intermediate_size=32,
max_position_embeddings=128,
num_attention_heads=2,
num_layers=num_hidden_layers,
dropout_rate=0.1)
kwargs = {"type": encoder_type, encoder_type: model_config}
encoder_config = encoders.EncoderConfig(**kwargs)
return bert_config, encoder_config
def _get_vocab_or_sp_model_dummy(temp_dir, use_sp_model):
"""Returns tokenizer asset args for export_tfhub_lib.export_model()."""
dummy_file = os.path.join(temp_dir, "dummy_file.txt")
with tf.io.gfile.GFile(dummy_file, "w") as f:
f.write("dummy content")
if use_sp_model:
vocab_file, sp_model_file = None, dummy_file
else:
vocab_file, sp_model_file = dummy_file, None
return vocab_file, sp_model_file
def _read_asset(asset: tf.saved_model.Asset):
return tf.io.gfile.GFile(asset.asset_path.numpy()).read()
def _find_lambda_layers(layer):
"""Returns list of all Lambda layers in a Keras model."""
if isinstance(layer, tf.keras.layers.Lambda):
return [layer]
elif hasattr(layer, "layers"): # It's nested, like a Model.
result = []
for l in layer.layers:
result += _find_lambda_layers(l)
return result
else:
return []
class ExportModelTest(tf.test.TestCase, parameterized.TestCase):
"""Tests exporting a Transformer Encoder model as a SavedModel.
This covers export from an Encoder checkpoint to a SavedModel without
the .mlm subobject. This is no longer preferred, but still useful
for models like Electra that are trained without the MLM task.
The export code is generic. This test focuses on two main cases
(the most important ones in practice when this was written in 2020):
- BERT built from a legacy BertConfig, for use with BertTokenizer.
- ALBERT built from an EncoderConfig (as a representative of all other
choices beyond BERT, for use with SentencepieceTokenizer (the one
alternative to BertTokenizer).
"""
@parameterized.named_parameters(
("Bert_Legacy", True, None), ("Albert", False, "albert"),
("BertEncoder", False, "bert"), ("BertEncoderV2", False, "bert_v2"))
def test_export_model(self, use_bert, encoder_type):
# Create the encoder and export it.
hidden_size = 16
num_hidden_layers = 1
bert_config, encoder_config = _get_bert_config_or_encoder_config(
use_bert,
hidden_size=hidden_size,
num_hidden_layers=num_hidden_layers,
encoder_type=encoder_type)
bert_model, encoder = export_tfhub_lib._create_model(
bert_config=bert_config, encoder_config=encoder_config, with_mlm=False)
self.assertEmpty(
_find_lambda_layers(bert_model),
"Lambda layers are non-portable since they serialize Python bytecode.")
model_checkpoint_dir = os.path.join(self.get_temp_dir(), "checkpoint")
checkpoint = tf.train.Checkpoint(encoder=encoder)
checkpoint.save(os.path.join(model_checkpoint_dir, "test"))
model_checkpoint_path = tf.train.latest_checkpoint(model_checkpoint_dir)
vocab_file, sp_model_file = _get_vocab_or_sp_model_dummy(
self.get_temp_dir(), use_sp_model=not use_bert)
export_path = os.path.join(self.get_temp_dir(), "hub")
export_tfhub_lib.export_model(
export_path=export_path,
bert_config=bert_config,
encoder_config=encoder_config,
model_checkpoint_path=model_checkpoint_path,
with_mlm=False,
vocab_file=vocab_file,
sp_model_file=sp_model_file,
do_lower_case=True)
# Restore the exported model.
hub_layer = hub.KerasLayer(export_path, trainable=True)
# Check legacy tokenization data.
if use_bert:
self.assertTrue(hub_layer.resolved_object.do_lower_case.numpy())
self.assertEqual("dummy content",
_read_asset(hub_layer.resolved_object.vocab_file))
self.assertFalse(hasattr(hub_layer.resolved_object, "sp_model_file"))
else:
self.assertFalse(hasattr(hub_layer.resolved_object, "do_lower_case"))
self.assertFalse(hasattr(hub_layer.resolved_object, "vocab_file"))
self.assertEqual("dummy content",
_read_asset(hub_layer.resolved_object.sp_model_file))
# Check restored weights.
self.assertEqual(
len(bert_model.trainable_weights), len(hub_layer.trainable_weights))
for source_weight, hub_weight in zip(bert_model.trainable_weights,
hub_layer.trainable_weights):
self.assertAllClose(source_weight.numpy(), hub_weight.numpy())
# Check computation.
seq_length = 10
dummy_ids = np.zeros((2, seq_length), dtype=np.int32)
input_dict = dict(
input_word_ids=dummy_ids,
input_mask=dummy_ids,
input_type_ids=dummy_ids)
hub_output = hub_layer(input_dict)
source_output = bert_model(input_dict)
encoder_output = encoder(input_dict)
self.assertEqual(hub_output["pooled_output"].shape, (2, hidden_size))
self.assertEqual(hub_output["sequence_output"].shape,
(2, seq_length, hidden_size))
self.assertLen(hub_output["encoder_outputs"], num_hidden_layers)
for key in ("pooled_output", "sequence_output", "encoder_outputs"):
self.assertAllClose(source_output[key], hub_output[key])
self.assertAllClose(source_output[key], encoder_output[key])
# The "default" output of BERT as a text representation is pooled_output.
self.assertAllClose(hub_output["pooled_output"], hub_output["default"])
# Test that training=True makes a difference (activates dropout).
def _dropout_mean_stddev(training, num_runs=20):
input_ids = np.array([[14, 12, 42, 95, 99]], np.int32)
input_dict = dict(
input_word_ids=input_ids,
input_mask=np.ones_like(input_ids),
input_type_ids=np.zeros_like(input_ids))
outputs = np.concatenate([
hub_layer(input_dict, training=training)["pooled_output"]
for _ in range(num_runs)
])
return np.mean(np.std(outputs, axis=0))
self.assertLess(_dropout_mean_stddev(training=False), 1e-6)
self.assertGreater(_dropout_mean_stddev(training=True), 1e-3)
# Test propagation of seq_length in shape inference.
input_word_ids = tf.keras.layers.Input(shape=(seq_length,), dtype=tf.int32)
input_mask = tf.keras.layers.Input(shape=(seq_length,), dtype=tf.int32)
input_type_ids = tf.keras.layers.Input(shape=(seq_length,), dtype=tf.int32)
input_dict = dict(
input_word_ids=input_word_ids,
input_mask=input_mask,
input_type_ids=input_type_ids)
output_dict = hub_layer(input_dict)
pooled_output = output_dict["pooled_output"]
sequence_output = output_dict["sequence_output"]
encoder_outputs = output_dict["encoder_outputs"]
self.assertEqual(pooled_output.shape.as_list(), [None, hidden_size])
self.assertEqual(sequence_output.shape.as_list(),
[None, seq_length, hidden_size])
self.assertLen(encoder_outputs, num_hidden_layers)
class ExportModelWithMLMTest(tf.test.TestCase, parameterized.TestCase):
"""Tests exporting a Transformer Encoder model as a SavedModel.
This covers export from a Pretrainer checkpoint to a SavedModel including
the .mlm subobject, which is the preferred way since 2020.
The export code is generic. This test focuses on two main cases
(the most important ones in practice when this was written in 2020):
- BERT built from a legacy BertConfig, for use with BertTokenizer.
- ALBERT built from an EncoderConfig (as a representative of all other
choices beyond BERT, for use with SentencepieceTokenizer (the one
alternative to BertTokenizer).
"""
def test_copy_pooler_dense_to_encoder(self):
encoder_config = encoders.EncoderConfig(
type="bert",
bert=encoders.BertEncoderConfig(
hidden_size=24, intermediate_size=48, num_layers=2))
cls_heads = [
layers.ClassificationHead(
inner_dim=24, num_classes=2, name="next_sentence")
]
encoder = encoders.build_encoder(encoder_config)
pretrainer = models.BertPretrainerV2(
encoder_network=encoder,
classification_heads=cls_heads,
mlm_activation=tf_utils.get_activation(
encoder_config.get().hidden_activation))
# Makes sure the pretrainer variables are created.
_ = pretrainer(pretrainer.inputs)
checkpoint = tf.train.Checkpoint(**pretrainer.checkpoint_items)
model_checkpoint_dir = os.path.join(self.get_temp_dir(), "checkpoint")
checkpoint.save(os.path.join(model_checkpoint_dir, "test"))
vocab_file, sp_model_file = _get_vocab_or_sp_model_dummy(
self.get_temp_dir(), use_sp_model=True)
export_path = os.path.join(self.get_temp_dir(), "hub")
export_tfhub_lib.export_model(
export_path=export_path,
encoder_config=encoder_config,
model_checkpoint_path=tf.train.latest_checkpoint(model_checkpoint_dir),
with_mlm=True,
copy_pooler_dense_to_encoder=True,
vocab_file=vocab_file,
sp_model_file=sp_model_file,
do_lower_case=True)
# Restores a hub KerasLayer.
hub_layer = hub.KerasLayer(export_path, trainable=True)
dummy_ids = np.zeros((2, 10), dtype=np.int32)
input_dict = dict(
input_word_ids=dummy_ids,
input_mask=dummy_ids,
input_type_ids=dummy_ids)
hub_pooled_output = hub_layer(input_dict)["pooled_output"]
encoder_outputs = encoder(input_dict)
# Verify that hub_layer's pooled_output is the same as the output of next
# sentence prediction's dense layer.
pretrained_pooled_output = cls_heads[0].dense(
(encoder_outputs["sequence_output"][:, 0, :]))
self.assertAllClose(hub_pooled_output, pretrained_pooled_output)
# But the pooled_output between encoder and hub_layer are not the same.
encoder_pooled_output = encoder_outputs["pooled_output"]
self.assertNotAllClose(hub_pooled_output, encoder_pooled_output)
@parameterized.named_parameters(
("Bert", True),
("Albert", False),
)
def test_export_model_with_mlm(self, use_bert):
# Create the encoder and export it.
hidden_size = 16
num_hidden_layers = 2
bert_config, encoder_config = _get_bert_config_or_encoder_config(
use_bert, hidden_size, num_hidden_layers)
bert_model, pretrainer = export_tfhub_lib._create_model(
bert_config=bert_config, encoder_config=encoder_config, with_mlm=True)
self.assertEmpty(
_find_lambda_layers(bert_model),
"Lambda layers are non-portable since they serialize Python bytecode.")
bert_model_with_mlm = bert_model.mlm
model_checkpoint_dir = os.path.join(self.get_temp_dir(), "checkpoint")
checkpoint = tf.train.Checkpoint(**pretrainer.checkpoint_items)
checkpoint.save(os.path.join(model_checkpoint_dir, "test"))
model_checkpoint_path = tf.train.latest_checkpoint(model_checkpoint_dir)
vocab_file, sp_model_file = _get_vocab_or_sp_model_dummy(
self.get_temp_dir(), use_sp_model=not use_bert)
export_path = os.path.join(self.get_temp_dir(), "hub")
export_tfhub_lib.export_model(
export_path=export_path,
bert_config=bert_config,
encoder_config=encoder_config,
model_checkpoint_path=model_checkpoint_path,
with_mlm=True,
vocab_file=vocab_file,
sp_model_file=sp_model_file,
do_lower_case=True)
# Restore the exported model.
hub_layer = hub.KerasLayer(export_path, trainable=True)
# Check legacy tokenization data.
if use_bert:
self.assertTrue(hub_layer.resolved_object.do_lower_case.numpy())
self.assertEqual("dummy content",
_read_asset(hub_layer.resolved_object.vocab_file))
self.assertFalse(hasattr(hub_layer.resolved_object, "sp_model_file"))
else:
self.assertFalse(hasattr(hub_layer.resolved_object, "do_lower_case"))
self.assertFalse(hasattr(hub_layer.resolved_object, "vocab_file"))
self.assertEqual("dummy content",
_read_asset(hub_layer.resolved_object.sp_model_file))
# Check restored weights.
# Note that we set `_auto_track_sub_layers` to False when exporting the
# SavedModel, so hub_layer has the same number of weights as bert_model;
# otherwise, hub_layer will have extra weights from its `mlm` subobject.
self.assertEqual(
len(bert_model.trainable_weights), len(hub_layer.trainable_weights))
for source_weight, hub_weight in zip(bert_model.trainable_weights,
hub_layer.trainable_weights):
self.assertAllClose(source_weight, hub_weight)
# Check computation.
seq_length = 10
dummy_ids = np.zeros((2, seq_length), dtype=np.int32)
input_dict = dict(
input_word_ids=dummy_ids,
input_mask=dummy_ids,
input_type_ids=dummy_ids)
hub_outputs_dict = hub_layer(input_dict)
source_outputs_dict = bert_model(input_dict)
encoder_outputs_dict = pretrainer.encoder_network(
[dummy_ids, dummy_ids, dummy_ids])
self.assertEqual(hub_outputs_dict["pooled_output"].shape, (2, hidden_size))
self.assertEqual(hub_outputs_dict["sequence_output"].shape,
(2, seq_length, hidden_size))
for output_key in ("pooled_output", "sequence_output", "encoder_outputs"):
self.assertAllClose(source_outputs_dict[output_key],
hub_outputs_dict[output_key])
self.assertAllClose(source_outputs_dict[output_key],
encoder_outputs_dict[output_key])
# The "default" output of BERT as a text representation is pooled_output.
self.assertAllClose(hub_outputs_dict["pooled_output"],
hub_outputs_dict["default"])
# Test that training=True makes a difference (activates dropout).
def _dropout_mean_stddev(training, num_runs=20):
input_ids = np.array([[14, 12, 42, 95, 99]], np.int32)
input_dict = dict(
input_word_ids=input_ids,
input_mask=np.ones_like(input_ids),
input_type_ids=np.zeros_like(input_ids))
outputs = np.concatenate([
hub_layer(input_dict, training=training)["pooled_output"]
for _ in range(num_runs)
])
return np.mean(np.std(outputs, axis=0))
self.assertLess(_dropout_mean_stddev(training=False), 1e-6)
self.assertGreater(_dropout_mean_stddev(training=True), 1e-3)
# Checks sub-object `mlm`.
self.assertTrue(hasattr(hub_layer.resolved_object, "mlm"))
self.assertLen(hub_layer.resolved_object.mlm.trainable_variables,
len(bert_model_with_mlm.trainable_weights))
self.assertLen(hub_layer.resolved_object.mlm.trainable_variables,
len(pretrainer.trainable_weights))
for source_weight, hub_weight, pretrainer_weight in zip(
bert_model_with_mlm.trainable_weights,
hub_layer.resolved_object.mlm.trainable_variables,
pretrainer.trainable_weights):
self.assertAllClose(source_weight, hub_weight)
self.assertAllClose(source_weight, pretrainer_weight)
max_predictions_per_seq = 4
mlm_positions = np.zeros((2, max_predictions_per_seq), dtype=np.int32)
input_dict = dict(
input_word_ids=dummy_ids,
input_mask=dummy_ids,
input_type_ids=dummy_ids,
masked_lm_positions=mlm_positions)
hub_mlm_outputs_dict = hub_layer.resolved_object.mlm(input_dict)
source_mlm_outputs_dict = bert_model_with_mlm(input_dict)
for output_key in ("pooled_output", "sequence_output", "mlm_logits",
"encoder_outputs"):
self.assertAllClose(hub_mlm_outputs_dict[output_key],
source_mlm_outputs_dict[output_key])
pretrainer_mlm_logits_output = pretrainer(input_dict)["mlm_logits"]
self.assertAllClose(hub_mlm_outputs_dict["mlm_logits"],
pretrainer_mlm_logits_output)
# Test that training=True makes a difference (activates dropout).
def _dropout_mean_stddev_mlm(training, num_runs=20):
input_ids = np.array([[14, 12, 42, 95, 99]], np.int32)
mlm_position_ids = np.array([[1, 2, 3, 4]], np.int32)
input_dict = dict(
input_word_ids=input_ids,
input_mask=np.ones_like(input_ids),
input_type_ids=np.zeros_like(input_ids),
masked_lm_positions=mlm_position_ids)
outputs = np.concatenate([
hub_layer.resolved_object.mlm(input_dict,
training=training)["pooled_output"]
for _ in range(num_runs)
])
return np.mean(np.std(outputs, axis=0))
self.assertLess(_dropout_mean_stddev_mlm(training=False), 1e-6)
self.assertGreater(_dropout_mean_stddev_mlm(training=True), 1e-3)
# Test propagation of seq_length in shape inference.
input_word_ids = tf.keras.layers.Input(shape=(seq_length,), dtype=tf.int32)
input_mask = tf.keras.layers.Input(shape=(seq_length,), dtype=tf.int32)
input_type_ids = tf.keras.layers.Input(shape=(seq_length,), dtype=tf.int32)
input_dict = dict(
input_word_ids=input_word_ids,
input_mask=input_mask,
input_type_ids=input_type_ids)
hub_outputs_dict = hub_layer(input_dict)
self.assertEqual(hub_outputs_dict["pooled_output"].shape.as_list(),
[None, hidden_size])
self.assertEqual(hub_outputs_dict["sequence_output"].shape.as_list(),
[None, seq_length, hidden_size])
_STRING_NOT_TO_LEAK = "private_path_component_"
class ExportPreprocessingTest(tf.test.TestCase, parameterized.TestCase):
def _make_vocab_file(self, vocab, filename="vocab.txt", add_mask_token=False):
"""Creates wordpiece vocab file with given words plus special tokens.
The tokens of the resulting model are, in this order:
[PAD], [UNK], [CLS], [SEP], [MASK]*, ...vocab...
*=if requested by args.
This function also accepts wordpieces that start with the ## continuation
marker, but avoiding those makes this function interchangeable with
_make_sp_model_file(), up to the extra dimension returned by BertTokenizer.
Args:
vocab: a list of strings with the words or wordpieces to put into the
model's vocabulary. Do not include special tokens here.
filename: Optionally, a filename (relative to the temporary directory
created by this function).
add_mask_token: an optional bool, whether to include a [MASK] token.
Returns:
The absolute filename of the created vocab file.
"""
full_vocab = ["[PAD]", "[UNK]", "[CLS]", "[SEP]"
] + ["[MASK]"] * add_mask_token + vocab
path = os.path.join(
tempfile.mkdtemp(
dir=self.get_temp_dir(), # New subdir each time.
prefix=_STRING_NOT_TO_LEAK),
filename)
with tf.io.gfile.GFile(path, "w") as f:
f.write("\n".join(full_vocab + [""]))
return path
def _make_sp_model_file(self, vocab, prefix="spm", add_mask_token=False):
"""Creates Sentencepiece word model with given words plus special tokens.
The tokens of the resulting model are, in this order:
<pad>, <unk>, [CLS], [SEP], [MASK]*, ...vocab..., <s>, </s>
*=if requested by args.
The words in the input vocab are plain text, without the whitespace marker.
That makes this function interchangeable with _make_vocab_file().
Args:
vocab: a list of strings with the words to put into the model's
vocabulary. Do not include special tokens here.
prefix: an optional string, to change the filename prefix for the model
(relative to the temporary directory created by this function).
add_mask_token: an optional bool, whether to include a [MASK] token.
Returns:
The absolute filename of the created Sentencepiece model file.
"""
model_prefix = os.path.join(
tempfile.mkdtemp(dir=self.get_temp_dir()), # New subdir each time.
prefix)
input_file = model_prefix + "_train_input.txt"
# Create input text for training the sp model from the tokens provided.
# Repeat tokens, the earlier the more, because they are sorted by frequency.
input_text = []
for i, token in enumerate(vocab):
input_text.append(" ".join([token] * (len(vocab) - i)))
with tf.io.gfile.GFile(input_file, "w") as f:
f.write("\n".join(input_text + [""]))
control_symbols = "[CLS],[SEP]"
full_vocab_size = len(vocab) + 6 # <pad>, <unk>, [CLS], [SEP], <s>, </s>.
if add_mask_token:
control_symbols += ",[MASK]"
full_vocab_size += 1
flags = dict(
model_prefix=model_prefix,
model_type="word",
input=input_file,
pad_id=0,
unk_id=1,
control_symbols=control_symbols,
vocab_size=full_vocab_size,
bos_id=full_vocab_size - 2,
eos_id=full_vocab_size - 1)
SentencePieceTrainer.Train(" ".join(
["--{}={}".format(k, v) for k, v in flags.items()]))
return model_prefix + ".model"
def _do_export(self,
vocab,
do_lower_case,
default_seq_length=128,
tokenize_with_offsets=True,
use_sp_model=False,
experimental_disable_assert=False,
add_mask_token=False):
"""Runs SavedModel export and returns the export_path."""
export_path = tempfile.mkdtemp(dir=self.get_temp_dir())
vocab_file = sp_model_file = None
if use_sp_model:
sp_model_file = self._make_sp_model_file(
vocab, add_mask_token=add_mask_token)
else:
vocab_file = self._make_vocab_file(vocab, add_mask_token=add_mask_token)
export_tfhub_lib.export_preprocessing(
export_path,
vocab_file=vocab_file,
sp_model_file=sp_model_file,
do_lower_case=do_lower_case,
tokenize_with_offsets=tokenize_with_offsets,
default_seq_length=default_seq_length,
experimental_disable_assert=experimental_disable_assert)
# Invalidate the original filename to verify loading from the SavedModel.
tf.io.gfile.remove(sp_model_file or vocab_file)
return export_path
def test_no_leaks(self):
"""Tests not leaking the path to the original vocab file."""
path = self._do_export(["d", "ef", "abc", "xy"],
do_lower_case=True,
use_sp_model=False)
with tf.io.gfile.GFile(os.path.join(path, "saved_model.pb"), "rb") as f:
self.assertFalse( # pylint: disable=g-generic-assert
_STRING_NOT_TO_LEAK.encode("ascii") in f.read())
@parameterized.named_parameters(("Bert", False), ("Sentencepiece", True))
def test_exported_callables(self, use_sp_model):
preprocess = tf.saved_model.load(
self._do_export(
["d", "ef", "abc", "xy"],
do_lower_case=True,
# TODO(b/181866850): drop this.
tokenize_with_offsets=not use_sp_model,
# TODO(b/175369555): drop this.
experimental_disable_assert=True,
use_sp_model=use_sp_model))
def fold_dim(rt):
"""Removes the word/subword distinction of BertTokenizer."""
return rt if use_sp_model else rt.merge_dims(1, 2)
# .tokenize()
inputs = tf.constant(["abc d ef", "ABC D EF d"])
token_ids = preprocess.tokenize(inputs)
self.assertAllEqual(
fold_dim(token_ids), tf.ragged.constant([[6, 4, 5], [6, 4, 5, 4]]))
special_tokens_dict = {
k: v.numpy().item() # Expecting eager Tensor, converting to Python.
for k, v in preprocess.tokenize.get_special_tokens_dict().items()
}
self.assertDictEqual(
special_tokens_dict,
dict(
padding_id=0,
start_of_sequence_id=2,
end_of_segment_id=3,
vocab_size=4 + 6 if use_sp_model else 4 + 4))
# .tokenize_with_offsets()
if use_sp_model:
# TODO(b/181866850): Enable tokenize_with_offsets when it works and test.
self.assertFalse(hasattr(preprocess, "tokenize_with_offsets"))
else:
token_ids, start_offsets, limit_offsets = (
preprocess.tokenize_with_offsets(inputs))
self.assertAllEqual(
fold_dim(token_ids), tf.ragged.constant([[6, 4, 5], [6, 4, 5, 4]]))
self.assertAllEqual(
fold_dim(start_offsets), tf.ragged.constant([[0, 4, 6], [0, 4, 6,
9]]))
self.assertAllEqual(
fold_dim(limit_offsets), tf.ragged.constant([[3, 5, 8], [3, 5, 8,
10]]))
self.assertIs(preprocess.tokenize.get_special_tokens_dict,
preprocess.tokenize_with_offsets.get_special_tokens_dict)
# Root callable.
bert_inputs = preprocess(inputs)
self.assertAllEqual(bert_inputs["input_word_ids"].shape.as_list(), [2, 128])
self.assertAllEqual(
bert_inputs["input_word_ids"][:, :10],
tf.constant([[2, 6, 4, 5, 3, 0, 0, 0, 0, 0],
[2, 6, 4, 5, 4, 3, 0, 0, 0, 0]]))
self.assertAllEqual(bert_inputs["input_mask"].shape.as_list(), [2, 128])
self.assertAllEqual(
bert_inputs["input_mask"][:, :10],
tf.constant([[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0, 0, 0, 0]]))
self.assertAllEqual(bert_inputs["input_type_ids"].shape.as_list(), [2, 128])
self.assertAllEqual(
bert_inputs["input_type_ids"][:, :10],
tf.constant([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]))
# .bert_pack_inputs()
inputs_2 = tf.constant(["d xy", "xy abc"])
token_ids_2 = preprocess.tokenize(inputs_2)
bert_inputs = preprocess.bert_pack_inputs([token_ids, token_ids_2],
seq_length=256)
self.assertAllEqual(bert_inputs["input_word_ids"].shape.as_list(), [2, 256])
self.assertAllEqual(
bert_inputs["input_word_ids"][:, :10],
tf.constant([[2, 6, 4, 5, 3, 4, 7, 3, 0, 0],
[2, 6, 4, 5, 4, 3, 7, 6, 3, 0]]))
self.assertAllEqual(bert_inputs["input_mask"].shape.as_list(), [2, 256])
self.assertAllEqual(
bert_inputs["input_mask"][:, :10],
tf.constant([[1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 0]]))
self.assertAllEqual(bert_inputs["input_type_ids"].shape.as_list(), [2, 256])
self.assertAllEqual(
bert_inputs["input_type_ids"][:, :10],
tf.constant([[0, 0, 0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 0]]))
# For BertTokenizer only: repeat relevant parts for do_lower_case=False,
# default_seq_length=10, experimental_disable_assert=False,
# tokenize_with_offsets=False, and without folding the word/subword dimension.
def test_cased_length10(self):
preprocess = tf.saved_model.load(
self._do_export(["d", "##ef", "abc", "ABC"],
do_lower_case=False,
default_seq_length=10,
tokenize_with_offsets=False,
use_sp_model=False,
experimental_disable_assert=False))
inputs = tf.constant(["abc def", "ABC DEF"])
token_ids = preprocess.tokenize(inputs)
self.assertAllEqual(token_ids,
tf.ragged.constant([[[6], [4, 5]], [[7], [1]]]))
self.assertFalse(hasattr(preprocess, "tokenize_with_offsets"))
bert_inputs = preprocess(inputs)
self.assertAllEqual(
bert_inputs["input_word_ids"],
tf.constant([[2, 6, 4, 5, 3, 0, 0, 0, 0, 0],
[2, 7, 1, 3, 0, 0, 0, 0, 0, 0]]))
self.assertAllEqual(
bert_inputs["input_mask"],
tf.constant([[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0, 0, 0]]))
self.assertAllEqual(
bert_inputs["input_type_ids"],
tf.constant([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]))
inputs_2 = tf.constant(["d ABC", "ABC abc"])
token_ids_2 = preprocess.tokenize(inputs_2)
bert_inputs = preprocess.bert_pack_inputs([token_ids, token_ids_2])
# Test default seq_length=10.
self.assertAllEqual(
bert_inputs["input_word_ids"],
tf.constant([[2, 6, 4, 5, 3, 4, 7, 3, 0, 0],
[2, 7, 1, 3, 7, 6, 3, 0, 0, 0]]))
self.assertAllEqual(
bert_inputs["input_mask"],
tf.constant([[1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 0, 0, 0]]))
self.assertAllEqual(
bert_inputs["input_type_ids"],
tf.constant([[0, 0, 0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 0, 0, 0]]))
# XLA requires fixed shapes for tensors found in graph mode.
# Statically known shapes in Python are a particularly firm way to
# guarantee that, and they are generally more convenient to work with.
# We test that the exported SavedModel plays well with TF's shape
# inference when applied to fully or partially known input shapes.
@parameterized.named_parameters(("Bert", False), ("Sentencepiece", True))
def test_shapes(self, use_sp_model):
preprocess = tf.saved_model.load(
self._do_export(
["abc", "def"],
do_lower_case=True,
# TODO(b/181866850): drop this.
tokenize_with_offsets=not use_sp_model,
# TODO(b/175369555): drop this.
experimental_disable_assert=True,
use_sp_model=use_sp_model))
def expected_bert_input_shapes(batch_size, seq_length):
return dict(
input_word_ids=[batch_size, seq_length],
input_mask=[batch_size, seq_length],
input_type_ids=[batch_size, seq_length])
for batch_size in [7, None]:
if use_sp_model:
token_out_shape = [batch_size, None] # No word/subword distinction.
else:
token_out_shape = [batch_size, None, None]
self.assertEqual(
_result_shapes_in_tf_function(preprocess.tokenize,
tf.TensorSpec([batch_size], tf.string)),
token_out_shape, "with batch_size=%s" % batch_size)
# TODO(b/181866850): Enable tokenize_with_offsets when it works and test.
if use_sp_model:
self.assertFalse(hasattr(preprocess, "tokenize_with_offsets"))
else:
self.assertEqual(
_result_shapes_in_tf_function(
preprocess.tokenize_with_offsets,
tf.TensorSpec([batch_size], tf.string)), [token_out_shape] * 3,
"with batch_size=%s" % batch_size)
self.assertEqual(
_result_shapes_in_tf_function(
preprocess.bert_pack_inputs,
[tf.RaggedTensorSpec([batch_size, None, None], tf.int32)] * 2,
seq_length=256), expected_bert_input_shapes(batch_size, 256),
"with batch_size=%s" % batch_size)
self.assertEqual(
_result_shapes_in_tf_function(preprocess,
tf.TensorSpec([batch_size], tf.string)),
expected_bert_input_shapes(batch_size, 128),
"with batch_size=%s" % batch_size)
@parameterized.named_parameters(("Bert", False), ("Sentencepiece", True))
def test_reexport(self, use_sp_model):
"""Test that preprocess keeps working after another save/load cycle."""
path1 = self._do_export(
["d", "ef", "abc", "xy"],
do_lower_case=True,
default_seq_length=10,
tokenize_with_offsets=False,
experimental_disable_assert=True, # TODO(b/175369555): drop this.
use_sp_model=use_sp_model)
path2 = path1.rstrip("/") + ".2"
model1 = tf.saved_model.load(path1)
tf.saved_model.save(model1, path2)
# Delete the first SavedModel to test that the sceond one loads by itself.
# https://github.com/tensorflow/tensorflow/issues/46456 reports such a
# failure case for BertTokenizer.
tf.io.gfile.rmtree(path1)
model2 = tf.saved_model.load(path2)
inputs = tf.constant(["abc d ef", "ABC D EF d"])
bert_inputs = model2(inputs)
self.assertAllEqual(
bert_inputs["input_word_ids"],
tf.constant([[2, 6, 4, 5, 3, 0, 0, 0, 0, 0],
[2, 6, 4, 5, 4, 3, 0, 0, 0, 0]]))
self.assertAllEqual(
bert_inputs["input_mask"],
tf.constant([[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0, 0, 0, 0]]))
self.assertAllEqual(
bert_inputs["input_type_ids"],
tf.constant([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]))
@parameterized.named_parameters(("Bert", True), ("Albert", False))
def test_preprocessing_for_mlm(self, use_bert):
"""Combines both SavedModel types and TF.text helpers for MLM."""
# Create the preprocessing SavedModel with a [MASK] token.
non_special_tokens = [
"hello", "world", "nice", "movie", "great", "actors", "quick", "fox",
"lazy", "dog"
]
preprocess = tf.saved_model.load(
self._do_export(
non_special_tokens,
do_lower_case=True,
tokenize_with_offsets=use_bert, # TODO(b/181866850): drop this.
experimental_disable_assert=True, # TODO(b/175369555): drop this.
add_mask_token=True,
use_sp_model=not use_bert))
vocab_size = len(non_special_tokens) + (5 if use_bert else 7)
# Create the encoder SavedModel with an .mlm subobject.
hidden_size = 16
num_hidden_layers = 2
bert_config, encoder_config = _get_bert_config_or_encoder_config(
use_bert_config=use_bert,
hidden_size=hidden_size,
num_hidden_layers=num_hidden_layers,
vocab_size=vocab_size)
_, pretrainer = export_tfhub_lib._create_model(
bert_config=bert_config, encoder_config=encoder_config, with_mlm=True)
model_checkpoint_dir = os.path.join(self.get_temp_dir(), "checkpoint")
checkpoint = tf.train.Checkpoint(**pretrainer.checkpoint_items)
checkpoint.save(os.path.join(model_checkpoint_dir, "test"))
model_checkpoint_path = tf.train.latest_checkpoint(model_checkpoint_dir)
vocab_file, sp_model_file = _get_vocab_or_sp_model_dummy( # Not used below.
self.get_temp_dir(), use_sp_model=not use_bert)
encoder_export_path = os.path.join(self.get_temp_dir(), "encoder_export")
export_tfhub_lib.export_model(
export_path=encoder_export_path,
bert_config=bert_config,
encoder_config=encoder_config,
model_checkpoint_path=model_checkpoint_path,
with_mlm=True,
vocab_file=vocab_file,
sp_model_file=sp_model_file,
do_lower_case=True)
encoder = tf.saved_model.load(encoder_export_path)
# Get special tokens from the vocab (and vocab size).
special_tokens_dict = preprocess.tokenize.get_special_tokens_dict()
self.assertEqual(int(special_tokens_dict["vocab_size"]), vocab_size)
padding_id = int(special_tokens_dict["padding_id"])
self.assertEqual(padding_id, 0)
start_of_sequence_id = int(special_tokens_dict["start_of_sequence_id"])
self.assertEqual(start_of_sequence_id, 2)
end_of_segment_id = int(special_tokens_dict["end_of_segment_id"])
self.assertEqual(end_of_segment_id, 3)
mask_id = int(special_tokens_dict["mask_id"])
self.assertEqual(mask_id, 4)
# A batch of 3 segment pairs.
raw_segments = [
tf.constant(["hello", "nice movie", "quick fox"]),
tf.constant(["world", "great actors", "lazy dog"])
]
batch_size = 3
# Misc hyperparameters.
seq_length = 10
max_selections_per_seq = 2
# Tokenize inputs.
tokenized_segments = [preprocess.tokenize(s) for s in raw_segments]
# Trim inputs to eventually fit seq_lentgh.
num_special_tokens = len(raw_segments) + 1
trimmed_segments = text.WaterfallTrimmer(
seq_length - num_special_tokens).trim(tokenized_segments)
# Combine input segments into one input sequence.
input_ids, segment_ids = text.combine_segments(
trimmed_segments,
start_of_sequence_id=start_of_sequence_id,
end_of_segment_id=end_of_segment_id)
# Apply random masking controlled by policy objects.
(masked_input_ids, masked_lm_positions,
masked_ids) = text.mask_language_model(
input_ids=input_ids,
item_selector=text.RandomItemSelector(
max_selections_per_seq,
selection_rate=0.5, # Adjusted for the short test examples.
unselectable_ids=[start_of_sequence_id, end_of_segment_id]),
mask_values_chooser=text.MaskValuesChooser(
vocab_size=vocab_size,
mask_token=mask_id,
# Always put [MASK] to have a predictable result.
mask_token_rate=1.0,
random_token_rate=0.0))
# Pad to fixed-length Transformer encoder inputs.
input_word_ids, _ = text.pad_model_inputs(
masked_input_ids, seq_length, pad_value=padding_id)
input_type_ids, input_mask = text.pad_model_inputs(
segment_ids, seq_length, pad_value=0)
masked_lm_positions, _ = text.pad_model_inputs(
masked_lm_positions, max_selections_per_seq, pad_value=0)
masked_lm_positions = tf.cast(masked_lm_positions, tf.int32)
num_predictions = int(tf.shape(masked_lm_positions)[1])
# Test transformer inputs.
self.assertEqual(num_predictions, max_selections_per_seq)
expected_word_ids = np.array([
# [CLS] hello [SEP] world [SEP]
[2, 5, 3, 6, 3, 0, 0, 0, 0, 0],
# [CLS] nice movie [SEP] great actors [SEP]
[2, 7, 8, 3, 9, 10, 3, 0, 0, 0],
# [CLS] brown fox [SEP] lazy dog [SEP]
[2, 11, 12, 3, 13, 14, 3, 0, 0, 0]
])
for i in range(batch_size):
for j in range(num_predictions):
k = int(masked_lm_positions[i, j])
if k != 0:
expected_word_ids[i, k] = 4 # [MASK]
self.assertAllEqual(input_word_ids, expected_word_ids)
# Call the MLM head of the Transformer encoder.
mlm_inputs = dict(
input_word_ids=input_word_ids,
input_mask=input_mask,
input_type_ids=input_type_ids,
masked_lm_positions=masked_lm_positions,
)
mlm_outputs = encoder.mlm(mlm_inputs)
self.assertEqual(mlm_outputs["pooled_output"].shape,
(batch_size, hidden_size))
self.assertEqual(mlm_outputs["sequence_output"].shape,
(batch_size, seq_length, hidden_size))
self.assertEqual(mlm_outputs["mlm_logits"].shape,
(batch_size, num_predictions, vocab_size))
self.assertLen(mlm_outputs["encoder_outputs"], num_hidden_layers)
# A real trainer would now compute the loss of mlm_logits
# trying to predict the masked_ids.
del masked_ids # Unused.
@parameterized.named_parameters(("Bert", False), ("Sentencepiece", True))
def test_special_tokens_in_estimator(self, use_sp_model):
"""Tests getting special tokens without an Eager init context."""
preprocess_export_path = self._do_export(["d", "ef", "abc", "xy"],
do_lower_case=True,
use_sp_model=use_sp_model,
tokenize_with_offsets=False)
def _get_special_tokens_dict(obj):
"""Returns special tokens of restored tokenizer as Python values."""
if tf.executing_eagerly():
special_tokens_numpy = {
k: v.numpy() for k, v in obj.get_special_tokens_dict()
}
else:
with tf.Graph().as_default():
# This code expects `get_special_tokens_dict()` to be a tf.function
# with no dependencies (bound args) from the context it was loaded in,
# and boldly assumes that it can just be called in a dfferent context.
special_tokens_tensors = obj.get_special_tokens_dict()
with tf.compat.v1.Session() as sess:
special_tokens_numpy = sess.run(special_tokens_tensors)
return {
k: v.item() # Numpy to Python.
for k, v in special_tokens_numpy.items()
}
def input_fn():
self.assertFalse(tf.executing_eagerly())
# Build a preprocessing Model.
sentences = tf.keras.layers.Input(shape=[], dtype=tf.string)
preprocess = tf.saved_model.load(preprocess_export_path)
tokenize = hub.KerasLayer(preprocess.tokenize)
special_tokens_dict = _get_special_tokens_dict(tokenize.resolved_object)
for k, v in special_tokens_dict.items():
self.assertIsInstance(v, int, "Unexpected type for {}".format(k))
tokens = tokenize(sentences)
packed_inputs = layers.BertPackInputs(
4, special_tokens_dict=special_tokens_dict)(
tokens)
preprocessing = tf.keras.Model(sentences, packed_inputs)
# Map the dataset.
ds = tf.data.Dataset.from_tensors(
(tf.constant(["abc", "D EF"]), tf.constant([0, 1])))
ds = ds.map(lambda features, labels: (preprocessing(features), labels))
return ds
def model_fn(features, labels, mode):
del labels # Unused.
return tf_estimator.EstimatorSpec(
mode=mode, predictions=features["input_word_ids"])
estimator = tf_estimator.Estimator(model_fn=model_fn)
outputs = list(estimator.predict(input_fn))
self.assertAllEqual(outputs, np.array([[2, 6, 3, 0], [2, 4, 5, 3]]))
# TODO(b/175369555): Remove that code and its test.
@parameterized.named_parameters(("Bert", False), ("Sentencepiece", True))
def test_check_no_assert(self, use_sp_model):
"""Tests the self-check during export without assertions."""
preprocess_export_path = self._do_export(["d", "ef", "abc", "xy"],
do_lower_case=True,
use_sp_model=use_sp_model,
tokenize_with_offsets=False,
experimental_disable_assert=False)
with self.assertRaisesRegex(AssertionError,
r"failed to suppress \d+ Assert ops"):
export_tfhub_lib._check_no_assert(preprocess_export_path)
def _result_shapes_in_tf_function(fn, *args, **kwargs):
"""Returns shapes (as lists) observed on the result of `fn`.
Args:
fn: A callable.
*args: TensorSpecs for Tensor-valued arguments and actual values for
Python-valued arguments to fn.
**kwargs: Same for keyword arguments.
Returns:
The nest of partial tensor shapes (as lists) that is statically known inside
tf.function(fn)(*args, **kwargs) for the nest of its results.
"""
# Use a captured mutable container for a side outout from the wrapper.
uninitialized = "uninitialized!"
result_shapes_container = [uninitialized]
assert result_shapes_container[0] is uninitialized
@tf.function
def shape_reporting_wrapper(*args, **kwargs):
result = fn(*args, **kwargs)
result_shapes_container[0] = tf.nest.map_structure(
lambda x: x.shape.as_list(), result)
return result
shape_reporting_wrapper.get_concrete_function(*args, **kwargs)
assert result_shapes_container[0] is not uninitialized
return result_shapes_container[0]
if __name__ == "__main__":
tf.test.main()
| 46,347 | 41.875116 | 80 | py |
models | models-master/official/nlp/tools/export_tfhub_lib.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library of components of export_tfhub.py. See docstring there for more."""
import contextlib
import hashlib
import os
import tempfile
from typing import Optional, Text, Tuple
# Import libraries
from absl import logging
import tensorflow as tf
# pylint: disable=g-direct-tensorflow-import TODO(b/175369555): Remove these.
from tensorflow.core.protobuf import saved_model_pb2
from tensorflow.python.ops import control_flow_assert
# pylint: enable=g-direct-tensorflow-import
from official.legacy.bert import configs
from official.modeling import tf_utils
from official.nlp.configs import encoders
from official.nlp.modeling import layers
from official.nlp.modeling import models
from official.nlp.modeling import networks
def get_bert_encoder(bert_config):
"""Returns a BertEncoder with dict outputs."""
bert_encoder = networks.BertEncoder(
vocab_size=bert_config.vocab_size,
hidden_size=bert_config.hidden_size,
num_layers=bert_config.num_hidden_layers,
num_attention_heads=bert_config.num_attention_heads,
intermediate_size=bert_config.intermediate_size,
activation=tf_utils.get_activation(bert_config.hidden_act),
dropout_rate=bert_config.hidden_dropout_prob,
attention_dropout_rate=bert_config.attention_probs_dropout_prob,
max_sequence_length=bert_config.max_position_embeddings,
type_vocab_size=bert_config.type_vocab_size,
initializer=tf.keras.initializers.TruncatedNormal(
stddev=bert_config.initializer_range),
embedding_width=bert_config.embedding_size,
dict_outputs=True)
return bert_encoder
def get_do_lower_case(do_lower_case, vocab_file=None, sp_model_file=None):
"""Returns do_lower_case, replacing None by a guess from vocab file name."""
if do_lower_case is not None:
return do_lower_case
elif vocab_file:
do_lower_case = "uncased" in vocab_file
logging.info("Using do_lower_case=%s based on name of vocab_file=%s",
do_lower_case, vocab_file)
return do_lower_case
elif sp_model_file:
do_lower_case = True # All public ALBERTs (as of Oct 2020) do it.
logging.info("Defaulting to do_lower_case=%s for Sentencepiece tokenizer",
do_lower_case)
return do_lower_case
else:
raise ValueError("Must set vocab_file or sp_model_file.")
def _create_model(
*,
bert_config: Optional[configs.BertConfig] = None,
encoder_config: Optional[encoders.EncoderConfig] = None,
with_mlm: bool,
) -> Tuple[tf.keras.Model, tf.keras.Model]:
"""Creates the model to export and the model to restore the checkpoint.
Args:
bert_config: A legacy `BertConfig` to create a `BertEncoder` object. Exactly
one of encoder_config and bert_config must be set.
encoder_config: An `EncoderConfig` to create an encoder of the configured
type (`BertEncoder` or other).
with_mlm: A bool to control the second component of the result. If True,
will create a `BertPretrainerV2` object; otherwise, will create a
`BertEncoder` object.
Returns:
A Tuple of (1) a Keras model that will be exported, (2) a `BertPretrainerV2`
object or `BertEncoder` object depending on the value of `with_mlm`
argument, which contains the first model and will be used for restoring
weights from the checkpoint.
"""
if (bert_config is not None) == (encoder_config is not None):
raise ValueError("Exactly one of `bert_config` and `encoder_config` "
"can be specified, but got %s and %s" %
(bert_config, encoder_config))
if bert_config is not None:
encoder = get_bert_encoder(bert_config)
else:
encoder = encoders.build_encoder(encoder_config)
# Convert from list of named inputs to dict of inputs keyed by name.
# Only the latter accepts a dict of inputs after restoring from SavedModel.
if isinstance(encoder.inputs, list) or isinstance(encoder.inputs, tuple):
encoder_inputs_dict = {x.name: x for x in encoder.inputs}
else:
# encoder.inputs by default is dict for BertEncoderV2.
encoder_inputs_dict = encoder.inputs
encoder_output_dict = encoder(encoder_inputs_dict)
# For interchangeability with other text representations,
# add "default" as an alias for BERT's whole-input reptesentations.
encoder_output_dict["default"] = encoder_output_dict["pooled_output"]
core_model = tf.keras.Model(
inputs=encoder_inputs_dict, outputs=encoder_output_dict)
if with_mlm:
if bert_config is not None:
hidden_act = bert_config.hidden_act
else:
assert encoder_config is not None
hidden_act = encoder_config.get().hidden_activation
pretrainer = models.BertPretrainerV2(
encoder_network=encoder,
mlm_activation=tf_utils.get_activation(hidden_act))
if isinstance(pretrainer.inputs, dict):
pretrainer_inputs_dict = pretrainer.inputs
else:
pretrainer_inputs_dict = {x.name: x for x in pretrainer.inputs}
pretrainer_output_dict = pretrainer(pretrainer_inputs_dict)
mlm_model = tf.keras.Model(
inputs=pretrainer_inputs_dict, outputs=pretrainer_output_dict)
# Set `_auto_track_sub_layers` to False, so that the additional weights
# from `mlm` sub-object will not be included in the core model.
# TODO(b/169210253): Use a public API when available.
core_model._auto_track_sub_layers = False # pylint: disable=protected-access
core_model.mlm = mlm_model
return core_model, pretrainer
else:
return core_model, encoder
def export_model(export_path: Text,
*,
bert_config: Optional[configs.BertConfig] = None,
encoder_config: Optional[encoders.EncoderConfig] = None,
model_checkpoint_path: Text,
with_mlm: bool,
copy_pooler_dense_to_encoder: bool = False,
vocab_file: Optional[Text] = None,
sp_model_file: Optional[Text] = None,
do_lower_case: Optional[bool] = None) -> None:
"""Exports an Encoder as SavedModel after restoring pre-trained weights.
The exported SavedModel implements a superset of the Encoder API for
Text embeddings with Transformer Encoders described at
https://www.tensorflow.org/hub/common_saved_model_apis/text.
In particular, the exported SavedModel can be used in the following way:
```
# Calls default interface (encoder only).
encoder = hub.load(...)
encoder_inputs = dict(
input_word_ids=..., # Shape [batch, seq_length], dtype=int32
input_mask=..., # Shape [batch, seq_length], dtype=int32
input_type_ids=..., # Shape [batch, seq_length], dtype=int32
)
encoder_outputs = encoder(encoder_inputs)
assert encoder_outputs.keys() == {
"pooled_output", # Shape [batch_size, width], dtype=float32
"default", # Alias for "pooled_output" (aligns with other models).
"sequence_output" # Shape [batch_size, seq_length, width], dtype=float32
"encoder_outputs", # List of Tensors with outputs of all transformer layers.
}
```
If `with_mlm` is True, the exported SavedModel can also be called in the
following way:
```
# Calls expanded interface that includes logits of the Masked Language Model.
mlm_inputs = dict(
input_word_ids=..., # Shape [batch, seq_length], dtype=int32
input_mask=..., # Shape [batch, seq_length], dtype=int32
input_type_ids=..., # Shape [batch, seq_length], dtype=int32
masked_lm_positions=..., # Shape [batch, num_predictions], dtype=int32
)
mlm_outputs = encoder.mlm(mlm_inputs)
assert mlm_outputs.keys() == {
"pooled_output", # Shape [batch, width], dtype=float32
"sequence_output", # Shape [batch, seq_length, width], dtype=float32
"encoder_outputs", # List of Tensors with outputs of all transformer layers.
"mlm_logits" # Shape [batch, num_predictions, vocab_size], dtype=float32
}
```
Args:
export_path: The SavedModel output directory.
bert_config: An optional `configs.BertConfig` object. Note: exactly one of
`bert_config` and following `encoder_config` must be specified.
encoder_config: An optional `encoders.EncoderConfig` object.
model_checkpoint_path: The path to the checkpoint.
with_mlm: Whether to export the additional mlm sub-object.
copy_pooler_dense_to_encoder: Whether to copy the pooler's dense layer used
in the next sentence prediction task to the encoder.
vocab_file: The path to the wordpiece vocab file, or None.
sp_model_file: The path to the sentencepiece model file, or None. Exactly
one of vocab_file and sp_model_file must be set.
do_lower_case: Whether to lower-case text before tokenization.
"""
if with_mlm:
core_model, pretrainer = _create_model(
bert_config=bert_config,
encoder_config=encoder_config,
with_mlm=with_mlm)
encoder = pretrainer.encoder_network
# It supports both the new pretrainer checkpoint produced by TF-NLP and
# the checkpoint converted from TF1 (original BERT, SmallBERTs).
checkpoint_items = pretrainer.checkpoint_items
checkpoint = tf.train.Checkpoint(**checkpoint_items)
else:
core_model, encoder = _create_model(
bert_config=bert_config,
encoder_config=encoder_config,
with_mlm=with_mlm)
checkpoint = tf.train.Checkpoint(
model=encoder, # Legacy checkpoints.
encoder=encoder)
checkpoint.restore(model_checkpoint_path).assert_existing_objects_matched()
if copy_pooler_dense_to_encoder:
logging.info("Copy pooler's dense layer to the encoder.")
pooler_checkpoint = tf.train.Checkpoint(
**{"next_sentence.pooler_dense": encoder.pooler_layer})
pooler_checkpoint.restore(
model_checkpoint_path).assert_existing_objects_matched()
# Before SavedModels for preprocessing appeared in Oct 2020, the encoders
# provided this information to let users do preprocessing themselves.
# We keep doing that for now. It helps users to upgrade incrementally.
# Moreover, it offers an escape hatch for advanced users who want the
# full vocab, not the high-level operations from the preprocessing model.
if vocab_file:
core_model.vocab_file = tf.saved_model.Asset(vocab_file)
if do_lower_case is None:
raise ValueError("Must pass do_lower_case if passing vocab_file.")
core_model.do_lower_case = tf.Variable(do_lower_case, trainable=False)
elif sp_model_file:
# This was used by ALBERT, with implied values of do_lower_case=True
# and strip_diacritics=True.
core_model.sp_model_file = tf.saved_model.Asset(sp_model_file)
else:
raise ValueError("Must set vocab_file or sp_model_file")
core_model.save(export_path, include_optimizer=False, save_format="tf")
class BertPackInputsSavedModelWrapper(tf.train.Checkpoint):
"""Wraps a BertPackInputs layer for export to SavedModel.
The wrapper object is suitable for use with `tf.saved_model.save()` and
`.load()`. The wrapper object is callable with inputs and outputs like the
BertPackInputs layer, but differs from saving an unwrapped Keras object:
- The inputs can be a list of 1 or 2 RaggedTensors of dtype int32 and
ragged rank 1 or 2. (In Keras, saving to a tf.function in a SavedModel
would fix the number of RaggedTensors and their ragged rank.)
- The call accepts an optional keyword argument `seq_length=` to override
the layer's .seq_length hyperparameter. (In Keras, a hyperparameter
could not be changed after saving to a tf.function in a SavedModel.)
"""
def __init__(self, bert_pack_inputs: layers.BertPackInputs):
super().__init__()
# Preserve the layer's configured seq_length as a default but make it
# overridable. Having this dynamically determined default argument
# requires self.__call__ to be defined in this indirect way.
default_seq_length = bert_pack_inputs.seq_length
@tf.function(autograph=False)
def call(inputs, seq_length=default_seq_length):
return layers.BertPackInputs.bert_pack_inputs(
inputs,
seq_length=seq_length,
start_of_sequence_id=bert_pack_inputs.start_of_sequence_id,
end_of_segment_id=bert_pack_inputs.end_of_segment_id,
padding_id=bert_pack_inputs.padding_id)
self.__call__ = call
for ragged_rank in range(1, 3):
for num_segments in range(1, 3):
_ = self.__call__.get_concrete_function([
tf.RaggedTensorSpec([None] * (ragged_rank + 1), dtype=tf.int32)
for _ in range(num_segments)
],
seq_length=tf.TensorSpec(
[], tf.int32))
def create_preprocessing(*,
vocab_file: Optional[str] = None,
sp_model_file: Optional[str] = None,
do_lower_case: bool,
tokenize_with_offsets: bool,
default_seq_length: int) -> tf.keras.Model:
"""Returns a preprocessing Model for given tokenization parameters.
This function builds a Keras Model with attached subobjects suitable for
saving to a SavedModel. The resulting SavedModel implements the Preprocessor
API for Text embeddings with Transformer Encoders described at
https://www.tensorflow.org/hub/common_saved_model_apis/text.
Args:
vocab_file: The path to the wordpiece vocab file, or None.
sp_model_file: The path to the sentencepiece model file, or None. Exactly
one of vocab_file and sp_model_file must be set. This determines the type
of tokenzer that is used.
do_lower_case: Whether to do lower case.
tokenize_with_offsets: Whether to include the .tokenize_with_offsets
subobject.
default_seq_length: The sequence length of preprocessing results from root
callable. This is also the default sequence length for the
bert_pack_inputs subobject.
Returns:
A tf.keras.Model object with several attached subobjects, suitable for
saving as a preprocessing SavedModel.
"""
# Select tokenizer.
if bool(vocab_file) == bool(sp_model_file):
raise ValueError("Must set exactly one of vocab_file, sp_model_file")
if vocab_file:
tokenize = layers.BertTokenizer(
vocab_file=vocab_file,
lower_case=do_lower_case,
tokenize_with_offsets=tokenize_with_offsets)
else:
tokenize = layers.SentencepieceTokenizer(
model_file_path=sp_model_file,
lower_case=do_lower_case,
strip_diacritics=True, # Strip diacritics to follow ALBERT model.
tokenize_with_offsets=tokenize_with_offsets)
# The root object of the preprocessing model can be called to do
# one-shot preprocessing for users with single-sentence inputs.
sentences = tf.keras.layers.Input(shape=(), dtype=tf.string, name="sentences")
if tokenize_with_offsets:
tokens, start_offsets, limit_offsets = tokenize(sentences)
else:
tokens = tokenize(sentences)
pack = layers.BertPackInputs(
seq_length=default_seq_length,
special_tokens_dict=tokenize.get_special_tokens_dict())
model_inputs = pack(tokens)
preprocessing = tf.keras.Model(sentences, model_inputs)
# Individual steps of preprocessing are made available as named subobjects
# to enable more general preprocessing. For saving, they need to be Models
# in their own right.
preprocessing.tokenize = tf.keras.Model(sentences, tokens)
# Provide an equivalent to tokenize.get_special_tokens_dict().
preprocessing.tokenize.get_special_tokens_dict = tf.train.Checkpoint()
preprocessing.tokenize.get_special_tokens_dict.__call__ = tf.function(
lambda: tokenize.get_special_tokens_dict(), # pylint: disable=[unnecessary-lambda]
input_signature=[])
if tokenize_with_offsets:
preprocessing.tokenize_with_offsets = tf.keras.Model(
sentences, [tokens, start_offsets, limit_offsets])
preprocessing.tokenize_with_offsets.get_special_tokens_dict = (
preprocessing.tokenize.get_special_tokens_dict)
# Conceptually, this should be
# preprocessing.bert_pack_inputs = tf.keras.Model(tokens, model_inputs)
# but technicalities require us to use a wrapper (see comments there).
# In particular, seq_length can be overridden when calling this.
preprocessing.bert_pack_inputs = BertPackInputsSavedModelWrapper(pack)
return preprocessing
def _move_to_tmpdir(file_path: Optional[Text], tmpdir: Text) -> Optional[Text]:
"""Returns new path with same basename and hash of original path."""
if file_path is None:
return None
olddir, filename = os.path.split(file_path)
hasher = hashlib.sha1()
hasher.update(olddir.encode("utf-8"))
target_dir = os.path.join(tmpdir, hasher.hexdigest())
target_file = os.path.join(target_dir, filename)
tf.io.gfile.mkdir(target_dir)
tf.io.gfile.copy(file_path, target_file)
return target_file
def export_preprocessing(export_path: Text,
*,
vocab_file: Optional[Text] = None,
sp_model_file: Optional[Text] = None,
do_lower_case: bool,
tokenize_with_offsets: bool,
default_seq_length: int,
experimental_disable_assert: bool = False) -> None:
"""Exports preprocessing to a SavedModel for TF Hub."""
with tempfile.TemporaryDirectory() as tmpdir:
# TODO(b/175369555): Remove experimental_disable_assert and its use.
with _maybe_disable_assert(experimental_disable_assert):
preprocessing = create_preprocessing(
vocab_file=_move_to_tmpdir(vocab_file, tmpdir),
sp_model_file=_move_to_tmpdir(sp_model_file, tmpdir),
do_lower_case=do_lower_case,
tokenize_with_offsets=tokenize_with_offsets,
default_seq_length=default_seq_length)
preprocessing.save(export_path, include_optimizer=False, save_format="tf")
if experimental_disable_assert:
_check_no_assert(export_path)
# It helps the unit test to prevent stray copies of the vocab file.
if tf.io.gfile.exists(tmpdir):
raise IOError("Failed to clean up TemporaryDirectory")
# TODO(b/175369555): Remove all workarounds for this bug of TensorFlow 2.4
# when this bug is no longer a concern for publishing new models.
# TensorFlow 2.4 has a placement issue with Assert ops in tf.functions called
# from Dataset.map() on a TPU worker. They end up on the TPU coordinator,
# and invoking them from the TPU worker is either inefficient (when possible)
# or impossible (notably when using "headless" TPU workers on Cloud that do not
# have a channel to the coordinator). The bug has been fixed in time for TF 2.5.
# To work around this, the following code avoids Assert ops in the exported
# SavedModels. It monkey-patches calls to tf.Assert from inside TensorFlow and
# replaces them by a no-op while building the exported model. This is fragile,
# so _check_no_assert() validates the result. The resulting model should be fine
# to read on future versions of TF, even if this workaround at export time
# may break eventually. (Failing unit tests will tell.)
def _dont_assert(condition, data, summarize=None, name="Assert"):
"""The no-op version of tf.Assert installed by _maybe_disable_assert."""
del condition, data, summarize # Unused.
if tf.executing_eagerly():
return
with tf.name_scope(name):
return tf.no_op(name="dont_assert")
@contextlib.contextmanager
def _maybe_disable_assert(disable_assert):
"""Scoped monkey patch of control_flow_assert.Assert to a no-op."""
if not disable_assert:
yield
return
original_assert = control_flow_assert.Assert
control_flow_assert.Assert = _dont_assert
yield
control_flow_assert.Assert = original_assert
def _check_no_assert(saved_model_path):
"""Raises AssertionError if SavedModel contains Assert ops."""
saved_model_filename = os.path.join(saved_model_path, "saved_model.pb")
with tf.io.gfile.GFile(saved_model_filename, "rb") as f:
saved_model = saved_model_pb2.SavedModel.FromString(f.read())
assert_nodes = []
graph_def = saved_model.meta_graphs[0].graph_def
assert_nodes += [
"node '{}' in global graph".format(n.name)
for n in graph_def.node
if n.op == "Assert"
]
for fdef in graph_def.library.function:
assert_nodes += [
"node '{}' in function '{}'".format(n.name, fdef.signature.name)
for n in fdef.node_def
if n.op == "Assert"
]
if assert_nodes:
raise AssertionError(
"Internal tool error: "
"failed to suppress {} Assert ops in SavedModel:\n{}".format(
len(assert_nodes), "\n".join(assert_nodes[:10])))
| 21,391 | 42.303644 | 89 | py |
models | models-master/official/nlp/tools/squad_evaluate_v2_0.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluation script for SQuAD version 2.0.
The functions are copied and modified from
https://raw.githubusercontent.com/white127/SQUAD-2.0-bidaf/master/evaluate-v2.0.py
In addition to basic functionality, we also compute additional statistics and
plot precision-recall curves if an additional na_prob.json file is provided.
This file is expected to map question ID's to the model's predicted probability
that a question is unanswerable.
"""
import collections
import re
import string
from absl import logging
def _make_qid_to_has_ans(dataset):
qid_to_has_ans = {}
for article in dataset:
for p in article['paragraphs']:
for qa in p['qas']:
qid_to_has_ans[qa['id']] = bool(qa['answers'])
return qid_to_has_ans
def _normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
regex = re.compile(r'\b(a|an|the)\b', re.UNICODE)
return re.sub(regex, ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def _get_tokens(s):
if not s: return []
return _normalize_answer(s).split()
def _compute_exact(a_gold, a_pred):
return int(_normalize_answer(a_gold) == _normalize_answer(a_pred))
def _compute_f1(a_gold, a_pred):
"""Compute F1-score."""
gold_toks = _get_tokens(a_gold)
pred_toks = _get_tokens(a_pred)
common = collections.Counter(gold_toks) & collections.Counter(pred_toks)
num_same = sum(common.values())
if not gold_toks or not pred_toks:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks)
if num_same == 0:
return 0
precision = 1.0 * num_same / len(pred_toks)
recall = 1.0 * num_same / len(gold_toks)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def _get_raw_scores(dataset, predictions):
"""Compute raw scores."""
exact_scores = {}
f1_scores = {}
for article in dataset:
for p in article['paragraphs']:
for qa in p['qas']:
qid = qa['id']
gold_answers = [a['text'] for a in qa['answers']
if _normalize_answer(a['text'])]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
gold_answers = ['']
if qid not in predictions:
logging.error('Missing prediction for %s', qid)
continue
a_pred = predictions[qid]
# Take max over all gold answers
exact_scores[qid] = max(_compute_exact(a, a_pred) for a in gold_answers)
f1_scores[qid] = max(_compute_f1(a, a_pred) for a in gold_answers)
return exact_scores, f1_scores
def _apply_no_ans_threshold(
scores, na_probs, qid_to_has_ans, na_prob_thresh=1.0):
new_scores = {}
for qid, s in scores.items():
pred_na = na_probs[qid] > na_prob_thresh
if pred_na:
new_scores[qid] = float(not qid_to_has_ans[qid])
else:
new_scores[qid] = s
return new_scores
def _make_eval_dict(exact_scores, f1_scores, qid_list=None):
"""Make evaluation result dictionary."""
if not qid_list:
total = len(exact_scores)
return collections.OrderedDict([
('exact', 100.0 * sum(exact_scores.values()) / total),
('f1', 100.0 * sum(f1_scores.values()) / total),
('total', total),
])
else:
total = len(qid_list)
return collections.OrderedDict([
('exact', 100.0 * sum(exact_scores[k] for k in qid_list) / total),
('f1', 100.0 * sum(f1_scores[k] for k in qid_list) / total),
('total', total),
])
def _merge_eval(main_eval, new_eval, prefix):
for k in new_eval:
main_eval['%s_%s' % (prefix, k)] = new_eval[k]
def _make_precision_recall_eval(scores, na_probs, num_true_pos, qid_to_has_ans):
"""Make evaluation dictionary containing average recision recall."""
qid_list = sorted(na_probs, key=lambda k: na_probs[k])
true_pos = 0.0
cur_p = 1.0
cur_r = 0.0
precisions = [1.0]
recalls = [0.0]
avg_prec = 0.0
for i, qid in enumerate(qid_list):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
cur_p = true_pos / float(i+1)
cur_r = true_pos / float(num_true_pos)
if i == len(qid_list) - 1 or na_probs[qid] != na_probs[qid_list[i+1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(cur_p)
recalls.append(cur_r)
return {'ap': 100.0 * avg_prec}
def _run_precision_recall_analysis(
main_eval, exact_raw, f1_raw, na_probs, qid_to_has_ans):
"""Run precision recall analysis and return result dictionary."""
num_true_pos = sum(1 for v in qid_to_has_ans.values() if v)
if num_true_pos == 0:
return
pr_exact = _make_precision_recall_eval(
exact_raw, na_probs, num_true_pos, qid_to_has_ans)
pr_f1 = _make_precision_recall_eval(
f1_raw, na_probs, num_true_pos, qid_to_has_ans)
oracle_scores = {k: float(v) for k, v in qid_to_has_ans.items()}
pr_oracle = _make_precision_recall_eval(
oracle_scores, na_probs, num_true_pos, qid_to_has_ans)
_merge_eval(main_eval, pr_exact, 'pr_exact')
_merge_eval(main_eval, pr_f1, 'pr_f1')
_merge_eval(main_eval, pr_oracle, 'pr_oracle')
def _find_best_thresh(predictions, scores, na_probs, qid_to_has_ans):
"""Find the best threshold for no answer probability."""
num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])
cur_score = num_no_ans
best_score = cur_score
best_thresh = 0.0
qid_list = sorted(na_probs, key=lambda k: na_probs[k])
for qid in qid_list:
if qid not in scores: continue
if qid_to_has_ans[qid]:
diff = scores[qid]
else:
if predictions[qid]:
diff = -1
else:
diff = 0
cur_score += diff
if cur_score > best_score:
best_score = cur_score
best_thresh = na_probs[qid]
return 100.0 * best_score / len(scores), best_thresh
def _find_all_best_thresh(
main_eval, predictions, exact_raw, f1_raw, na_probs, qid_to_has_ans):
best_exact, exact_thresh = _find_best_thresh(
predictions, exact_raw, na_probs, qid_to_has_ans)
best_f1, f1_thresh = _find_best_thresh(
predictions, f1_raw, na_probs, qid_to_has_ans)
main_eval['final_exact'] = best_exact
main_eval['final_exact_thresh'] = exact_thresh
main_eval['final_f1'] = best_f1
main_eval['final_f1_thresh'] = f1_thresh
def evaluate(dataset, predictions, na_probs=None):
"""Evaluate prediction results."""
new_orig_data = []
for article in dataset:
for p in article['paragraphs']:
for qa in p['qas']:
if qa['id'] in predictions:
new_para = {'qas': [qa]}
new_article = {'paragraphs': [new_para]}
new_orig_data.append(new_article)
dataset = new_orig_data
if na_probs is None:
na_probs = {k: 0.0 for k in predictions}
qid_to_has_ans = _make_qid_to_has_ans(dataset) # maps qid to True/False
has_ans_qids = [k for k, v in qid_to_has_ans.items() if v]
no_ans_qids = [k for k, v in qid_to_has_ans.items() if not v]
exact_raw, f1_raw = _get_raw_scores(dataset, predictions)
exact_thresh = _apply_no_ans_threshold(exact_raw, na_probs, qid_to_has_ans)
f1_thresh = _apply_no_ans_threshold(f1_raw, na_probs, qid_to_has_ans)
out_eval = _make_eval_dict(exact_thresh, f1_thresh)
if has_ans_qids:
has_ans_eval = _make_eval_dict(
exact_thresh, f1_thresh, qid_list=has_ans_qids)
_merge_eval(out_eval, has_ans_eval, 'HasAns')
if no_ans_qids:
no_ans_eval = _make_eval_dict(exact_thresh, f1_thresh, qid_list=no_ans_qids)
_merge_eval(out_eval, no_ans_eval, 'NoAns')
_find_all_best_thresh(
out_eval, predictions, exact_raw, f1_raw, na_probs, qid_to_has_ans)
_run_precision_recall_analysis(
out_eval, exact_raw, f1_raw, na_probs, qid_to_has_ans)
return out_eval
| 8,625 | 33.504 | 82 | py |
models | models-master/official/nlp/tools/tf1_bert_checkpoint_converter_lib.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Convert checkpoints created by Estimator (tf1) to be Keras compatible."""
import numpy as np
import tensorflow.compat.v1 as tf # TF 1.x
# Mapping between old <=> new names. The source pattern in original variable
# name will be replaced by destination pattern.
BERT_NAME_REPLACEMENTS = (
("bert", "bert_model"),
("embeddings/word_embeddings", "word_embeddings/embeddings"),
("embeddings/token_type_embeddings",
"embedding_postprocessor/type_embeddings"),
("embeddings/position_embeddings",
"embedding_postprocessor/position_embeddings"),
("embeddings/LayerNorm", "embedding_postprocessor/layer_norm"),
("attention/self", "self_attention"),
("attention/output/dense", "self_attention_output"),
("attention/output/LayerNorm", "self_attention_layer_norm"),
("intermediate/dense", "intermediate"),
("output/dense", "output"),
("output/LayerNorm", "output_layer_norm"),
("pooler/dense", "pooler_transform"),
)
BERT_V2_NAME_REPLACEMENTS = (
("bert/", ""),
("encoder", "transformer"),
("embeddings/word_embeddings", "word_embeddings/embeddings"),
("embeddings/token_type_embeddings", "type_embeddings/embeddings"),
("embeddings/position_embeddings", "position_embedding/embeddings"),
("embeddings/LayerNorm", "embeddings/layer_norm"),
("attention/self", "self_attention"),
("attention/output/dense", "self_attention/attention_output"),
("attention/output/LayerNorm", "self_attention_layer_norm"),
("intermediate/dense", "intermediate"),
("output/dense", "output"),
("output/LayerNorm", "output_layer_norm"),
("pooler/dense", "pooler_transform"),
("cls/predictions", "bert/cls/predictions"),
("cls/predictions/output_bias", "cls/predictions/output_bias/bias"),
("cls/seq_relationship/output_bias", "predictions/transform/logits/bias"),
("cls/seq_relationship/output_weights",
"predictions/transform/logits/kernel"),
)
BERT_PERMUTATIONS = ()
BERT_V2_PERMUTATIONS = (("cls/seq_relationship/output_weights", (1, 0)),)
def _bert_name_replacement(var_name, name_replacements):
"""Gets the variable name replacement."""
for src_pattern, tgt_pattern in name_replacements:
if src_pattern in var_name:
old_var_name = var_name
var_name = var_name.replace(src_pattern, tgt_pattern)
tf.logging.info("Converted: %s --> %s", old_var_name, var_name)
return var_name
def _has_exclude_patterns(name, exclude_patterns):
"""Checks if a string contains substrings that match patterns to exclude."""
for p in exclude_patterns:
if p in name:
return True
return False
def _get_permutation(name, permutations):
"""Checks whether a variable requires transposition by pattern matching."""
for src_pattern, permutation in permutations:
if src_pattern in name:
tf.logging.info("Permuted: %s --> %s", name, permutation)
return permutation
return None
def _get_new_shape(name, shape, num_heads):
"""Checks whether a variable requires reshape by pattern matching."""
if "self_attention/attention_output/kernel" in name:
return tuple([num_heads, shape[0] // num_heads, shape[1]])
if "self_attention/attention_output/bias" in name:
return shape
patterns = [
"self_attention/query", "self_attention/value", "self_attention/key"
]
for pattern in patterns:
if pattern in name:
if "kernel" in name:
return tuple([shape[0], num_heads, shape[1] // num_heads])
if "bias" in name:
return tuple([num_heads, shape[0] // num_heads])
return None
def create_v2_checkpoint(model,
src_checkpoint,
output_path,
checkpoint_model_name="model"):
"""Converts a name-based matched TF V1 checkpoint to TF V2 checkpoint."""
# Uses streaming-restore in eager model to read V1 name-based checkpoints.
model.load_weights(src_checkpoint).assert_existing_objects_matched()
if hasattr(model, "checkpoint_items"):
checkpoint_items = model.checkpoint_items
else:
checkpoint_items = {}
checkpoint_items[checkpoint_model_name] = model
checkpoint = tf.train.Checkpoint(**checkpoint_items)
checkpoint.save(output_path)
def convert(checkpoint_from_path,
checkpoint_to_path,
num_heads,
name_replacements,
permutations,
exclude_patterns=None):
"""Migrates the names of variables within a checkpoint.
Args:
checkpoint_from_path: Path to source checkpoint to be read in.
checkpoint_to_path: Path to checkpoint to be written out.
num_heads: The number of heads of the model.
name_replacements: A list of tuples of the form (match_str, replace_str)
describing variable names to adjust.
permutations: A list of tuples of the form (match_str, permutation)
describing permutations to apply to given variables. Note that match_str
should match the original variable name, not the replaced one.
exclude_patterns: A list of string patterns to exclude variables from
checkpoint conversion.
Returns:
A dictionary that maps the new variable names to the Variable objects.
A dictionary that maps the old variable names to the new variable names.
"""
with tf.Graph().as_default():
tf.logging.info("Reading checkpoint_from_path %s", checkpoint_from_path)
reader = tf.train.NewCheckpointReader(checkpoint_from_path)
name_shape_map = reader.get_variable_to_shape_map()
new_variable_map = {}
conversion_map = {}
for var_name in name_shape_map:
if exclude_patterns and _has_exclude_patterns(var_name, exclude_patterns):
continue
# Get the original tensor data.
tensor = reader.get_tensor(var_name)
# Look up the new variable name, if any.
new_var_name = _bert_name_replacement(var_name, name_replacements)
# See if we need to reshape the underlying tensor.
new_shape = None
if num_heads > 0:
new_shape = _get_new_shape(new_var_name, tensor.shape, num_heads)
if new_shape:
tf.logging.info("Veriable %s has a shape change from %s to %s",
var_name, tensor.shape, new_shape)
tensor = np.reshape(tensor, new_shape)
# See if we need to permute the underlying tensor.
permutation = _get_permutation(var_name, permutations)
if permutation:
tensor = np.transpose(tensor, permutation)
# Create a new variable with the possibly-reshaped or transposed tensor.
var = tf.Variable(tensor, name=var_name)
# Save the variable into the new variable map.
new_variable_map[new_var_name] = var
# Keep a list of converter variables for sanity checking.
if new_var_name != var_name:
conversion_map[var_name] = new_var_name
saver = tf.train.Saver(new_variable_map)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
tf.logging.info("Writing checkpoint_to_path %s", checkpoint_to_path)
saver.save(sess, checkpoint_to_path, write_meta_graph=False)
tf.logging.info("Summary:")
tf.logging.info(" Converted %d variable name(s).", len(new_variable_map))
tf.logging.info(" Converted: %s", str(conversion_map))
| 7,865 | 37.940594 | 80 | py |
models | models-master/official/nlp/tools/tokenization.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding=utf-8
"""Tokenization classes implementation.
The file is forked from:
https://github.com/google-research/bert/blob/master/tokenization.py.
"""
import collections
import re
import unicodedata
import six
import tensorflow as tf
import sentencepiece as spm
SPIECE_UNDERLINE = "▁"
def validate_case_matches_checkpoint(do_lower_case, init_checkpoint):
"""Checks whether the casing config is consistent with the checkpoint name."""
# The casing has to be passed in by the user and there is no explicit check
# as to whether it matches the checkpoint. The casing information probably
# should have been stored in the bert_config.json file, but it's not, so
# we have to heuristically detect it to validate.
if not init_checkpoint:
return
m = re.match("^.*?([A-Za-z0-9_-]+)/bert_model.ckpt", init_checkpoint)
if m is None:
return
model_name = m.group(1)
lower_models = [
"uncased_L-24_H-1024_A-16", "uncased_L-12_H-768_A-12",
"multilingual_L-12_H-768_A-12", "chinese_L-12_H-768_A-12"
]
cased_models = [
"cased_L-12_H-768_A-12", "cased_L-24_H-1024_A-16",
"multi_cased_L-12_H-768_A-12"
]
is_bad_config = False
if model_name in lower_models and not do_lower_case:
is_bad_config = True
actual_flag = "False"
case_name = "lowercased"
opposite_flag = "True"
if model_name in cased_models and do_lower_case:
is_bad_config = True
actual_flag = "True"
case_name = "cased"
opposite_flag = "False"
if is_bad_config:
raise ValueError(
"You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. "
"However, `%s` seems to be a %s model, so you "
"should pass in `--do_lower_case=%s` so that the fine-tuning matches "
"how the model was pre-training. If this error is wrong, please "
"just comment out this check." %
(actual_flag, init_checkpoint, model_name, case_name, opposite_flag))
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def printable_text(text):
"""Returns text encoded in a way suitable for print or `tf.logging`."""
# These functions want `str` for both Python2 and Python3, but in one case
# it's a Unicode string and in the other it's a byte string.
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text
elif isinstance(text, unicode):
return text.encode("utf-8")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with tf.io.gfile.GFile(vocab_file, "r") as reader:
while True:
token = convert_to_unicode(reader.readline())
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab
def convert_by_vocab(vocab, items):
"""Converts a sequence of [tokens|ids] using the vocab."""
output = []
for item in items:
output.append(vocab[item])
return output
def convert_tokens_to_ids(vocab, tokens):
return convert_by_vocab(vocab, tokens)
def convert_ids_to_tokens(inv_vocab, ids):
return convert_by_vocab(inv_vocab, ids)
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class FullTokenizer(object):
"""Runs end-to-end tokenziation."""
def __init__(self, vocab_file, do_lower_case=True, split_on_punc=True):
self.vocab = load_vocab(vocab_file)
self.inv_vocab = {v: k for k, v in self.vocab.items()}
self.basic_tokenizer = BasicTokenizer(
do_lower_case=do_lower_case, split_on_punc=split_on_punc)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
def tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
return convert_by_vocab(self.vocab, tokens)
def convert_ids_to_tokens(self, ids):
return convert_by_vocab(self.inv_vocab, ids)
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True, split_on_punc=True):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
split_on_punc: Whether to apply split on punctuations. By default BERT
starts a new token for punctuations. This makes detokenization difficult
for tasks like seq2seq decoding.
"""
self.do_lower_case = do_lower_case
self.split_on_punc = split_on_punc
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = convert_to_unicode(text)
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
if self.split_on_punc:
split_tokens.extend(self._run_split_on_punc(token))
else:
split_tokens.append(token)
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenziation."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=400):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer.
Returns:
A list of wordpiece tokens.
"""
text = convert_to_unicode(text)
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically control characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat in ("Cc", "Cf"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
def preprocess_text(inputs, remove_space=True, lower=False):
"""Preprocesses data by removing extra space and normalize data.
This method is used together with sentence piece tokenizer and is forked from:
https://github.com/google-research/google-research/blob/e1f6fa00/albert/tokenization.py
Args:
inputs: The input text.
remove_space: Whether to remove the extra space.
lower: Whether to lowercase the text.
Returns:
The preprocessed text.
"""
outputs = inputs
if remove_space:
outputs = " ".join(inputs.strip().split())
if six.PY2 and isinstance(outputs, str):
try:
outputs = six.ensure_text(outputs, "utf-8")
except UnicodeDecodeError:
outputs = six.ensure_text(outputs, "latin-1")
outputs = unicodedata.normalize("NFKD", outputs)
outputs = "".join([c for c in outputs if not unicodedata.combining(c)])
if lower:
outputs = outputs.lower()
return outputs
def encode_pieces(sp_model, text, sample=False):
"""Segements text into pieces.
This method is used together with sentence piece tokenizer and is forked from:
https://github.com/google-research/google-research/blob/e1f6fa00/albert/tokenization.py
Args:
sp_model: A spm.SentencePieceProcessor object.
text: The input text to be segemented.
sample: Whether to randomly sample a segmentation output or return a
deterministic one.
Returns:
A list of token pieces.
"""
if six.PY2 and isinstance(text, six.text_type):
text = six.ensure_binary(text, "utf-8")
if not sample:
pieces = sp_model.EncodeAsPieces(text)
else:
pieces = sp_model.SampleEncodeAsPieces(text, 64, 0.1)
new_pieces = []
for piece in pieces:
piece = printable_text(piece)
if len(piece) > 1 and piece[-1] == "," and piece[-2].isdigit():
cur_pieces = sp_model.EncodeAsPieces(piece[:-1].replace(
SPIECE_UNDERLINE, ""))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
cur_pieces = cur_pieces[1:]
else:
cur_pieces[0] = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(cur_pieces)
else:
new_pieces.append(piece)
return new_pieces
def encode_ids(sp_model, text, sample=False):
"""Segments text and return token ids.
This method is used together with sentence piece tokenizer and is forked from:
https://github.com/google-research/google-research/blob/e1f6fa00/albert/tokenization.py
Args:
sp_model: A spm.SentencePieceProcessor object.
text: The input text to be segemented.
sample: Whether to randomly sample a segmentation output or return a
deterministic one.
Returns:
A list of token ids.
"""
pieces = encode_pieces(sp_model, text, sample=sample)
ids = [sp_model.PieceToId(piece) for piece in pieces]
return ids
class FullSentencePieceTokenizer(object):
"""Runs end-to-end sentence piece tokenization.
The interface of this class is intended to keep the same as above
`FullTokenizer` class for easier usage.
"""
def __init__(self, sp_model_file):
"""Inits FullSentencePieceTokenizer.
Args:
sp_model_file: The path to the sentence piece model file.
"""
self.sp_model = spm.SentencePieceProcessor()
self.sp_model.Load(sp_model_file)
self.vocab = {
self.sp_model.IdToPiece(i): i
for i in six.moves.range(self.sp_model.GetPieceSize())
}
def tokenize(self, text):
"""Tokenizes text into pieces."""
return encode_pieces(self.sp_model, text)
def convert_tokens_to_ids(self, tokens):
"""Converts a list of tokens to a list of ids."""
return [self.sp_model.PieceToId(printable_text(token)) for token in tokens]
def convert_ids_to_tokens(self, ids):
"""Converts a list of ids ot a list of tokens."""
return [self.sp_model.IdToPiece(id_) for id_ in ids]
| 16,589 | 29.608856 | 89 | py |
models | models-master/official/nlp/tools/tokenization_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import six
import tensorflow as tf
from official.nlp.tools import tokenization
class TokenizationTest(tf.test.TestCase):
"""Tokenization test.
The implementation is forked from
https://github.com/google-research/bert/blob/master/tokenization_test.py."
"""
def test_full_tokenizer(self):
vocab_tokens = [
"[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn",
"##ing", ","
]
with tempfile.NamedTemporaryFile(delete=False) as vocab_writer:
if six.PY2:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
else:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens
]).encode("utf-8"))
vocab_file = vocab_writer.name
tokenizer = tokenization.FullTokenizer(vocab_file)
os.unlink(vocab_file)
tokens = tokenizer.tokenize(u"UNwant\u00E9d,running")
self.assertAllEqual(tokens, ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertAllEqual(
tokenizer.convert_tokens_to_ids(tokens), [7, 4, 5, 10, 8, 9])
def test_chinese(self):
tokenizer = tokenization.BasicTokenizer()
self.assertAllEqual(
tokenizer.tokenize(u"ah\u535A\u63A8zz"),
[u"ah", u"\u535A", u"\u63A8", u"zz"])
def test_basic_tokenizer_lower(self):
tokenizer = tokenization.BasicTokenizer(do_lower_case=True)
self.assertAllEqual(
tokenizer.tokenize(u" \tHeLLo!how \n Are yoU? "),
["hello", "!", "how", "are", "you", "?"])
self.assertAllEqual(tokenizer.tokenize(u"H\u00E9llo"), ["hello"])
def test_basic_tokenizer_no_lower(self):
tokenizer = tokenization.BasicTokenizer(do_lower_case=False)
self.assertAllEqual(
tokenizer.tokenize(u" \tHeLLo!how \n Are yoU? "),
["HeLLo", "!", "how", "Are", "yoU", "?"])
def test_basic_tokenizer_no_split_on_punc(self):
tokenizer = tokenization.BasicTokenizer(
do_lower_case=True, split_on_punc=False)
self.assertAllEqual(
tokenizer.tokenize(u" \tHeLLo!how \n Are yoU? "),
["hello!how", "are", "you?"])
def test_wordpiece_tokenizer(self):
vocab_tokens = [
"[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn",
"##ing", "##!", "!"
]
vocab = {}
for (i, token) in enumerate(vocab_tokens):
vocab[token] = i
tokenizer = tokenization.WordpieceTokenizer(vocab=vocab)
self.assertAllEqual(tokenizer.tokenize(""), [])
self.assertAllEqual(
tokenizer.tokenize("unwanted running"),
["un", "##want", "##ed", "runn", "##ing"])
self.assertAllEqual(
tokenizer.tokenize("unwanted running !"),
["un", "##want", "##ed", "runn", "##ing", "!"])
self.assertAllEqual(
tokenizer.tokenize("unwanted running!"),
["un", "##want", "##ed", "runn", "##ing", "##!"])
self.assertAllEqual(
tokenizer.tokenize("unwantedX running"), ["[UNK]", "runn", "##ing"])
def test_convert_tokens_to_ids(self):
vocab_tokens = [
"[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn",
"##ing"
]
vocab = {}
for (i, token) in enumerate(vocab_tokens):
vocab[token] = i
self.assertAllEqual(
tokenization.convert_tokens_to_ids(
vocab, ["un", "##want", "##ed", "runn", "##ing"]), [7, 4, 5, 8, 9])
def test_is_whitespace(self):
self.assertTrue(tokenization._is_whitespace(u" "))
self.assertTrue(tokenization._is_whitespace(u"\t"))
self.assertTrue(tokenization._is_whitespace(u"\r"))
self.assertTrue(tokenization._is_whitespace(u"\n"))
self.assertTrue(tokenization._is_whitespace(u"\u00A0"))
self.assertFalse(tokenization._is_whitespace(u"A"))
self.assertFalse(tokenization._is_whitespace(u"-"))
def test_is_control(self):
self.assertTrue(tokenization._is_control(u"\u0005"))
self.assertFalse(tokenization._is_control(u"A"))
self.assertFalse(tokenization._is_control(u" "))
self.assertFalse(tokenization._is_control(u"\t"))
self.assertFalse(tokenization._is_control(u"\r"))
self.assertFalse(tokenization._is_control(u"\U0001F4A9"))
def test_is_punctuation(self):
self.assertTrue(tokenization._is_punctuation(u"-"))
self.assertTrue(tokenization._is_punctuation(u"$"))
self.assertTrue(tokenization._is_punctuation(u"`"))
self.assertTrue(tokenization._is_punctuation(u"."))
self.assertFalse(tokenization._is_punctuation(u"A"))
self.assertFalse(tokenization._is_punctuation(u" "))
if __name__ == "__main__":
tf.test.main()
| 5,217 | 32.235669 | 80 | py |
models | models-master/official/nlp/tools/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 610 | 37.1875 | 74 | py |
models | models-master/official/nlp/tools/tf2_bert_encoder_checkpoint_converter.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A converter from a V1 BERT encoder checkpoint to a V2 encoder checkpoint.
The conversion will yield an object-oriented checkpoint that can be used
to restore a BertEncoder or BertPretrainerV2 object (see the `converted_model`
FLAG below).
"""
import os
from absl import app
from absl import flags
import tensorflow as tf
from official.legacy.bert import configs
from official.modeling import tf_utils
from official.nlp.modeling import models
from official.nlp.modeling import networks
from official.nlp.tools import tf1_bert_checkpoint_converter_lib
FLAGS = flags.FLAGS
flags.DEFINE_string("bert_config_file", None,
"Bert configuration file to define core bert layers.")
flags.DEFINE_string(
"checkpoint_to_convert", None,
"Initial checkpoint from a pretrained BERT model core (that is, only the "
"BertModel, with no task heads.)")
flags.DEFINE_string("converted_checkpoint_path", None,
"Name for the created object-based V2 checkpoint.")
flags.DEFINE_string("checkpoint_model_name", "encoder",
"The name of the model when saving the checkpoint, i.e., "
"the checkpoint will be saved using: "
"tf.train.Checkpoint(FLAGS.checkpoint_model_name=model).")
flags.DEFINE_enum(
"converted_model", "encoder", ["encoder", "pretrainer"],
"Whether to convert the checkpoint to a `BertEncoder` model or a "
"`BertPretrainerV2` model (with mlm but without classification heads).")
def _create_bert_model(cfg):
"""Creates a BERT keras core model from BERT configuration.
Args:
cfg: A `BertConfig` to create the core model.
Returns:
A BertEncoder network.
"""
bert_encoder = networks.BertEncoder(
vocab_size=cfg.vocab_size,
hidden_size=cfg.hidden_size,
num_layers=cfg.num_hidden_layers,
num_attention_heads=cfg.num_attention_heads,
intermediate_size=cfg.intermediate_size,
activation=tf_utils.get_activation(cfg.hidden_act),
dropout_rate=cfg.hidden_dropout_prob,
attention_dropout_rate=cfg.attention_probs_dropout_prob,
max_sequence_length=cfg.max_position_embeddings,
type_vocab_size=cfg.type_vocab_size,
initializer=tf.keras.initializers.TruncatedNormal(
stddev=cfg.initializer_range),
embedding_width=cfg.embedding_size)
return bert_encoder
def _create_bert_pretrainer_model(cfg):
"""Creates a BERT keras core model from BERT configuration.
Args:
cfg: A `BertConfig` to create the core model.
Returns:
A BertPretrainerV2 model.
"""
bert_encoder = _create_bert_model(cfg)
pretrainer = models.BertPretrainerV2(
encoder_network=bert_encoder,
mlm_activation=tf_utils.get_activation(cfg.hidden_act),
mlm_initializer=tf.keras.initializers.TruncatedNormal(
stddev=cfg.initializer_range))
# Makes sure the pretrainer variables are created.
_ = pretrainer(pretrainer.inputs)
return pretrainer
def convert_checkpoint(bert_config,
output_path,
v1_checkpoint,
checkpoint_model_name="model",
converted_model="encoder"):
"""Converts a V1 checkpoint into an OO V2 checkpoint."""
output_dir, _ = os.path.split(output_path)
tf.io.gfile.makedirs(output_dir)
# Create a temporary V1 name-converted checkpoint in the output directory.
temporary_checkpoint_dir = os.path.join(output_dir, "temp_v1")
temporary_checkpoint = os.path.join(temporary_checkpoint_dir, "ckpt")
tf1_bert_checkpoint_converter_lib.convert(
checkpoint_from_path=v1_checkpoint,
checkpoint_to_path=temporary_checkpoint,
num_heads=bert_config.num_attention_heads,
name_replacements=(
tf1_bert_checkpoint_converter_lib.BERT_V2_NAME_REPLACEMENTS),
permutations=tf1_bert_checkpoint_converter_lib.BERT_V2_PERMUTATIONS,
exclude_patterns=["adam", "Adam"])
if converted_model == "encoder":
model = _create_bert_model(bert_config)
elif converted_model == "pretrainer":
model = _create_bert_pretrainer_model(bert_config)
else:
raise ValueError("Unsupported converted_model: %s" % converted_model)
# Create a V2 checkpoint from the temporary checkpoint.
tf1_bert_checkpoint_converter_lib.create_v2_checkpoint(
model, temporary_checkpoint, output_path, checkpoint_model_name)
# Clean up the temporary checkpoint, if it exists.
try:
tf.io.gfile.rmtree(temporary_checkpoint_dir)
except tf.errors.OpError:
# If it doesn't exist, we don't need to clean it up; continue.
pass
def main(argv):
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
output_path = FLAGS.converted_checkpoint_path
v1_checkpoint = FLAGS.checkpoint_to_convert
checkpoint_model_name = FLAGS.checkpoint_model_name
converted_model = FLAGS.converted_model
bert_config = configs.BertConfig.from_json_file(FLAGS.bert_config_file)
convert_checkpoint(
bert_config=bert_config,
output_path=output_path,
v1_checkpoint=v1_checkpoint,
checkpoint_model_name=checkpoint_model_name,
converted_model=converted_model)
if __name__ == "__main__":
app.run(main)
| 5,826 | 35.192547 | 78 | py |
models | models-master/official/nlp/tools/tf2_albert_encoder_checkpoint_converter.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A converter from a tf1 ALBERT encoder checkpoint to a tf2 encoder checkpoint.
The conversion will yield an object-oriented checkpoint that can be used
to restore an AlbertEncoder object.
"""
import os
from absl import app
from absl import flags
import tensorflow as tf
from official.legacy.albert import configs
from official.modeling import tf_utils
from official.nlp.modeling import models
from official.nlp.modeling import networks
from official.nlp.tools import tf1_bert_checkpoint_converter_lib
FLAGS = flags.FLAGS
flags.DEFINE_string("albert_config_file", None,
"Albert configuration file to define core bert layers.")
flags.DEFINE_string(
"checkpoint_to_convert", None,
"Initial checkpoint from a pretrained BERT model core (that is, only the "
"BertModel, with no task heads.)")
flags.DEFINE_string("converted_checkpoint_path", None,
"Name for the created object-based V2 checkpoint.")
flags.DEFINE_string("checkpoint_model_name", "encoder",
"The name of the model when saving the checkpoint, i.e., "
"the checkpoint will be saved using: "
"tf.train.Checkpoint(FLAGS.checkpoint_model_name=model).")
flags.DEFINE_enum(
"converted_model", "encoder", ["encoder", "pretrainer"],
"Whether to convert the checkpoint to a `AlbertEncoder` model or a "
"`BertPretrainerV2` model (with mlm but without classification heads).")
ALBERT_NAME_REPLACEMENTS = (
("bert/encoder/", ""),
("bert/", ""),
("embeddings/word_embeddings", "word_embeddings/embeddings"),
("embeddings/position_embeddings", "position_embedding/embeddings"),
("embeddings/token_type_embeddings", "type_embeddings/embeddings"),
("embeddings/LayerNorm", "embeddings/layer_norm"),
("embedding_hidden_mapping_in", "embedding_projection"),
("group_0/inner_group_0/", ""),
("attention_1/self", "self_attention"),
("attention_1/output/dense", "self_attention/attention_output"),
("transformer/LayerNorm/", "transformer/self_attention_layer_norm/"),
("ffn_1/intermediate/dense", "intermediate"),
("ffn_1/intermediate/output/dense", "output"),
("transformer/LayerNorm_1/", "transformer/output_layer_norm/"),
("pooler/dense", "pooler_transform"),
("cls/predictions", "bert/cls/predictions"),
("cls/predictions/output_bias", "cls/predictions/output_bias/bias"),
("cls/seq_relationship/output_bias", "predictions/transform/logits/bias"),
("cls/seq_relationship/output_weights",
"predictions/transform/logits/kernel"),
)
def _create_albert_model(cfg):
"""Creates an ALBERT keras core model from BERT configuration.
Args:
cfg: A `AlbertConfig` to create the core model.
Returns:
A keras model.
"""
albert_encoder = networks.AlbertEncoder(
vocab_size=cfg.vocab_size,
hidden_size=cfg.hidden_size,
embedding_width=cfg.embedding_size,
num_layers=cfg.num_hidden_layers,
num_attention_heads=cfg.num_attention_heads,
intermediate_size=cfg.intermediate_size,
activation=tf_utils.get_activation(cfg.hidden_act),
dropout_rate=cfg.hidden_dropout_prob,
attention_dropout_rate=cfg.attention_probs_dropout_prob,
max_sequence_length=cfg.max_position_embeddings,
type_vocab_size=cfg.type_vocab_size,
initializer=tf.keras.initializers.TruncatedNormal(
stddev=cfg.initializer_range))
return albert_encoder
def _create_pretrainer_model(cfg):
"""Creates a pretrainer with AlbertEncoder from ALBERT configuration.
Args:
cfg: A `BertConfig` to create the core model.
Returns:
A BertPretrainerV2 model.
"""
albert_encoder = _create_albert_model(cfg)
pretrainer = models.BertPretrainerV2(
encoder_network=albert_encoder,
mlm_activation=tf_utils.get_activation(cfg.hidden_act),
mlm_initializer=tf.keras.initializers.TruncatedNormal(
stddev=cfg.initializer_range))
# Makes sure masked_lm layer's variables in pretrainer are created.
_ = pretrainer(pretrainer.inputs)
return pretrainer
def convert_checkpoint(bert_config, output_path, v1_checkpoint,
checkpoint_model_name,
converted_model="encoder"):
"""Converts a V1 checkpoint into an OO V2 checkpoint."""
output_dir, _ = os.path.split(output_path)
# Create a temporary V1 name-converted checkpoint in the output directory.
temporary_checkpoint_dir = os.path.join(output_dir, "temp_v1")
temporary_checkpoint = os.path.join(temporary_checkpoint_dir, "ckpt")
tf1_bert_checkpoint_converter_lib.convert(
checkpoint_from_path=v1_checkpoint,
checkpoint_to_path=temporary_checkpoint,
num_heads=bert_config.num_attention_heads,
name_replacements=ALBERT_NAME_REPLACEMENTS,
permutations=tf1_bert_checkpoint_converter_lib.BERT_V2_PERMUTATIONS,
exclude_patterns=["adam", "Adam"])
# Create a V2 checkpoint from the temporary checkpoint.
if converted_model == "encoder":
model = _create_albert_model(bert_config)
elif converted_model == "pretrainer":
model = _create_pretrainer_model(bert_config)
else:
raise ValueError("Unsupported converted_model: %s" % converted_model)
tf1_bert_checkpoint_converter_lib.create_v2_checkpoint(
model, temporary_checkpoint, output_path, checkpoint_model_name)
# Clean up the temporary checkpoint, if it exists.
try:
tf.io.gfile.rmtree(temporary_checkpoint_dir)
except tf.errors.OpError:
# If it doesn't exist, we don't need to clean it up; continue.
pass
def main(_):
output_path = FLAGS.converted_checkpoint_path
v1_checkpoint = FLAGS.checkpoint_to_convert
checkpoint_model_name = FLAGS.checkpoint_model_name
converted_model = FLAGS.converted_model
albert_config = configs.AlbertConfig.from_json_file(FLAGS.albert_config_file)
convert_checkpoint(albert_config, output_path, v1_checkpoint,
checkpoint_model_name,
converted_model=converted_model)
if __name__ == "__main__":
app.run(main)
| 6,676 | 38.046784 | 80 | py |
models | models-master/official/nlp/tools/squad_evaluate_v1_1.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluation of SQuAD predictions (version 1.1).
The functions are copied from
https://worksheets.codalab.org/rest/bundles/0xbcd57bee090b421c982906709c8c27e1/contents/blob/.
The SQuAD dataset is described in this paper:
SQuAD: 100,000+ Questions for Machine Comprehension of Text
Pranav Rajpurkar, Jian Zhang, Konstantin Lopyrev, Percy Liang
https://nlp.stanford.edu/pubs/rajpurkar2016squad.pdf
"""
import collections
import re
import string
# pylint: disable=g-bad-import-order
from absl import logging
# pylint: enable=g-bad-import-order
def _normalize_answer(s):
"""Lowers text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r"\b(a|an|the)\b", " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def _f1_score(prediction, ground_truth):
"""Computes F1 score by comparing prediction to ground truth."""
prediction_tokens = _normalize_answer(prediction).split()
ground_truth_tokens = _normalize_answer(ground_truth).split()
prediction_counter = collections.Counter(prediction_tokens)
ground_truth_counter = collections.Counter(ground_truth_tokens)
common = prediction_counter & ground_truth_counter
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def _exact_match_score(prediction, ground_truth):
"""Checks if predicted answer exactly matches ground truth answer."""
return _normalize_answer(prediction) == _normalize_answer(ground_truth)
def _metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
"""Computes the max over all metric scores."""
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def evaluate(dataset, predictions):
"""Evaluates predictions for a dataset."""
f1 = exact_match = total = 0
for article in dataset:
for paragraph in article["paragraphs"]:
for qa in paragraph["qas"]:
total += 1
if qa["id"] not in predictions:
message = "Unanswered question " + qa["id"] + " will receive score 0."
logging.error(message)
continue
ground_truths = [entry["text"] for entry in qa["answers"]]
prediction = predictions[qa["id"]]
exact_match += _metric_max_over_ground_truths(_exact_match_score,
prediction, ground_truths)
f1 += _metric_max_over_ground_truths(_f1_score, prediction,
ground_truths)
exact_match = exact_match / total
f1 = f1 / total
return {"exact_match": exact_match, "final_f1": f1}
| 3,724 | 33.813084 | 94 | py |
models | models-master/official/nlp/finetuning/binary_helper.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The helper for finetuning binaries."""
import json
import math
import sys
from typing import Any, Dict, List, Optional
from absl import logging
import tensorflow as tf
from official.core import config_definitions as cfg
from official.modeling import hyperparams
from official.nlp.configs import encoders
from official.nlp.data import question_answering_dataloader
from official.nlp.data import sentence_prediction_dataloader
from official.nlp.data import tagging_dataloader
from official.nlp.tasks import question_answering
from official.nlp.tasks import sentence_prediction
from official.nlp.tasks import tagging
def override_trainer_cfg(trainer_cfg: cfg.TrainerConfig, learning_rate: float,
num_epoch: int, global_batch_size: int,
warmup_ratio: float, training_data_size: int,
eval_data_size: int, num_eval_per_epoch: int,
best_checkpoint_export_subdir: str,
best_checkpoint_eval_metric: str,
best_checkpoint_metric_comp: str):
"""Overrides a `cfg.TrainerConfig` object."""
steps_per_epoch = training_data_size // global_batch_size
train_steps = steps_per_epoch * num_epoch
# TODO(b/165081095): always set to -1 after the bug is resolved.
if eval_data_size:
eval_steps = int(math.ceil(eval_data_size / global_batch_size))
else:
eval_steps = -1 # exhaust the validation data.
warmp_steps = int(train_steps * warmup_ratio)
validation_interval = steps_per_epoch // num_eval_per_epoch
trainer_cfg.override({
'optimizer_config': {
'learning_rate': {
'type': 'polynomial',
'polynomial': {
'decay_steps': train_steps,
'initial_learning_rate': learning_rate,
'end_learning_rate': 0,
}
},
'optimizer': {
'type': 'adamw',
},
'warmup': {
'polynomial': {
'warmup_steps': warmp_steps,
},
'type': 'polynomial',
},
},
'train_steps': train_steps,
'validation_interval': validation_interval,
'validation_steps': eval_steps,
'best_checkpoint_export_subdir': best_checkpoint_export_subdir,
'best_checkpoint_eval_metric': best_checkpoint_eval_metric,
'best_checkpoint_metric_comp': best_checkpoint_metric_comp,
})
def load_model_config_file(model_config_file: str) -> Dict[str, Any]:
"""Loads bert config json file or `encoders.EncoderConfig` in yaml file."""
if not model_config_file:
# model_config_file may be empty when using tf.hub.
return {}
try:
encoder_config = encoders.EncoderConfig()
encoder_config = hyperparams.override_params_dict(
encoder_config, model_config_file, is_strict=True)
logging.info('Load encoder_config yaml file from %s.', model_config_file)
return encoder_config.as_dict()
except KeyError:
pass
logging.info('Load bert config json file from %s', model_config_file)
with tf.io.gfile.GFile(model_config_file, 'r') as reader:
text = reader.read()
config = json.loads(text)
def get_value(key1, key2):
if key1 in config and key2 in config:
raise ValueError('Unexpected that both %s and %s are in config.' %
(key1, key2))
return config[key1] if key1 in config else config[key2]
def get_value_or_none(key):
return config[key] if key in config else None
# Support both legacy bert_config attributes and the new config attributes.
return {
'bert': {
'attention_dropout_rate':
get_value('attention_dropout_rate',
'attention_probs_dropout_prob'),
'dropout_rate':
get_value('dropout_rate', 'hidden_dropout_prob'),
'hidden_activation':
get_value('hidden_activation', 'hidden_act'),
'hidden_size':
config['hidden_size'],
'embedding_size':
get_value_or_none('embedding_size'),
'initializer_range':
config['initializer_range'],
'intermediate_size':
config['intermediate_size'],
'max_position_embeddings':
config['max_position_embeddings'],
'num_attention_heads':
config['num_attention_heads'],
'num_layers':
get_value('num_layers', 'num_hidden_layers'),
'type_vocab_size':
config['type_vocab_size'],
'vocab_size':
config['vocab_size'],
}
}
def override_sentence_prediction_task_config(
task_cfg: sentence_prediction.SentencePredictionConfig,
model_config_file: str,
init_checkpoint: str,
hub_module_url: str,
global_batch_size: int,
train_input_path: str,
validation_input_path: str,
seq_length: int,
num_classes: int,
metric_type: Optional[str] = 'accuracy',
label_type: Optional[str] = 'int'):
"""Overrides a `SentencePredictionConfig` object."""
task_cfg.override({
'init_checkpoint': init_checkpoint,
'metric_type': metric_type,
'model': {
'num_classes': num_classes,
'encoder': load_model_config_file(model_config_file),
},
'hub_module_url': hub_module_url,
'train_data': {
'drop_remainder': True,
'global_batch_size': global_batch_size,
'input_path': train_input_path,
'is_training': True,
'seq_length': seq_length,
'label_type': label_type,
},
'validation_data': {
'drop_remainder': False,
'global_batch_size': global_batch_size,
'input_path': validation_input_path,
'is_training': False,
'seq_length': seq_length,
'label_type': label_type,
}
})
def override_qa_task_config(
task_cfg: question_answering.QuestionAnsweringConfig,
model_config_file: str, init_checkpoint: str, hub_module_url: str,
global_batch_size: int, train_input_path: str, validation_input_path: str,
seq_length: int, tokenization: str, vocab_file: str, do_lower_case: bool,
version_2_with_negative: bool):
"""Overrides a `QuestionAnsweringConfig` object."""
task_cfg.override({
'init_checkpoint': init_checkpoint,
'model': {
'encoder': load_model_config_file(model_config_file),
},
'hub_module_url': hub_module_url,
'train_data': {
'drop_remainder': True,
'global_batch_size': global_batch_size,
'input_path': train_input_path,
'is_training': True,
'seq_length': seq_length,
},
'validation_data': {
'do_lower_case': do_lower_case,
'drop_remainder': False,
'global_batch_size': global_batch_size,
'input_path': validation_input_path,
'is_training': False,
'seq_length': seq_length,
'tokenization': tokenization,
'version_2_with_negative': version_2_with_negative,
'vocab_file': vocab_file,
}
})
def override_tagging_task_config(task_cfg: tagging.TaggingConfig,
model_config_file: str, init_checkpoint: str,
hub_module_url: str, global_batch_size: int,
train_input_path: str,
validation_input_path: str, seq_length: int,
class_names: List[str]):
"""Overrides a `TaggingConfig` object."""
task_cfg.override({
'init_checkpoint': init_checkpoint,
'model': {
'encoder': load_model_config_file(model_config_file),
},
'hub_module_url': hub_module_url,
'train_data': {
'drop_remainder': True,
'global_batch_size': global_batch_size,
'input_path': train_input_path,
'is_training': True,
'seq_length': seq_length,
},
'validation_data': {
'drop_remainder': False,
'global_batch_size': global_batch_size,
'input_path': validation_input_path,
'is_training': False,
'seq_length': seq_length,
},
'class_names': class_names,
})
def write_glue_classification(task,
model,
input_file,
output_file,
predict_batch_size,
seq_length,
class_names,
label_type='int',
min_float_value=None,
max_float_value=None):
"""Makes classification predictions for glue and writes to output file.
Args:
task: `Task` instance.
model: `keras.Model` instance.
input_file: Input test data file path.
output_file: Output test data file path.
predict_batch_size: Batch size for prediction.
seq_length: Input sequence length.
class_names: List of string class names.
label_type: String denoting label type ('int', 'float'), defaults to 'int'.
min_float_value: If set, predictions will be min-clipped to this value (only
for regression when `label_type` is set to 'float'). Defaults to `None`
(no clipping).
max_float_value: If set, predictions will be max-clipped to this value (only
for regression when `label_type` is set to 'float'). Defaults to `None`
(no clipping).
"""
if label_type not in ('int', 'float'):
raise ValueError('Unsupported `label_type`. Given: %s, expected `int` or '
'`float`.' % label_type)
data_config = sentence_prediction_dataloader.SentencePredictionDataConfig(
input_path=input_file,
global_batch_size=predict_batch_size,
is_training=False,
seq_length=seq_length,
label_type=label_type,
drop_remainder=False,
include_example_id=True)
predictions = sentence_prediction.predict(task, data_config, model)
if label_type == 'float':
min_float_value = (-sys.float_info.max
if min_float_value is None else min_float_value)
max_float_value = (
sys.float_info.max if max_float_value is None else max_float_value)
# Clip predictions to range [min_float_value, max_float_value].
predictions = [
min(max(prediction, min_float_value), max_float_value)
for prediction in predictions
]
with tf.io.gfile.GFile(output_file, 'w') as writer:
writer.write('index\tprediction\n')
for index, prediction in enumerate(predictions):
if label_type == 'float':
# Regression.
writer.write('%d\t%.3f\n' % (index, prediction))
else:
# Classification.
writer.write('%d\t%s\n' % (index, class_names[prediction]))
def write_superglue_classification(task,
model,
input_file,
output_file,
predict_batch_size,
seq_length,
class_names,
label_type='int'):
"""Makes classification predictions for superglue and writes to output file.
Args:
task: `Task` instance.
model: `keras.Model` instance.
input_file: Input test data file path.
output_file: Output test data file path.
predict_batch_size: Batch size for prediction.
seq_length: Input sequence length.
class_names: List of string class names.
label_type: String denoting label type ('int', 'float'), defaults to 'int'.
"""
if label_type not in 'int':
raise ValueError('Unsupported `label_type`. Given: %s, expected `int` or '
'`float`.' % label_type)
data_config = sentence_prediction_dataloader.SentencePredictionDataConfig(
input_path=input_file,
global_batch_size=predict_batch_size,
is_training=False,
seq_length=seq_length,
label_type=label_type,
drop_remainder=False,
include_example_id=True)
predictions = sentence_prediction.predict(task, data_config, model)
with tf.io.gfile.GFile(output_file, 'w') as writer:
for index, prediction in enumerate(predictions):
if label_type == 'int':
# Classification.
writer.write('{"idx": %d, "label": %s}\n' %
(index, class_names[prediction]))
def write_xtreme_classification(task,
model,
input_file,
output_file,
predict_batch_size,
seq_length,
class_names,
translated_input_file=None,
test_time_aug_wgt=0.3):
"""Makes classification predictions for xtreme and writes to output file."""
data_config = sentence_prediction_dataloader.SentencePredictionDataConfig(
input_path=input_file,
seq_length=seq_length,
is_training=False,
label_type='int',
global_batch_size=predict_batch_size,
drop_remainder=False,
include_example_id=True)
if translated_input_file is not None:
data_config_aug = (
sentence_prediction_dataloader.SentencePredictionDataConfig(
input_path=translated_input_file,
seq_length=seq_length,
is_training=False,
label_type='int',
global_batch_size=predict_batch_size,
drop_remainder=False,
include_example_id=True))
else:
data_config_aug = None
predictions = sentence_prediction.predict(task, data_config, model,
data_config_aug, test_time_aug_wgt)
with tf.io.gfile.GFile(output_file, 'w') as writer:
for prediction in predictions:
writer.write('%s\n' % class_names[prediction])
def write_question_answering(task,
model,
input_file,
output_file,
predict_batch_size,
seq_length,
tokenization,
vocab_file,
do_lower_case,
version_2_with_negative=False):
"""Makes question answering predictions and writes to output file."""
data_config = question_answering_dataloader.QADataConfig(
do_lower_case=do_lower_case,
doc_stride=128,
drop_remainder=False,
global_batch_size=predict_batch_size,
input_path=input_file,
is_training=False,
query_length=64,
seq_length=seq_length,
tokenization=tokenization,
version_2_with_negative=version_2_with_negative,
vocab_file=vocab_file)
all_predictions, _, _ = question_answering.predict(task, data_config, model)
with tf.io.gfile.GFile(output_file, 'w') as writer:
writer.write(json.dumps(all_predictions, indent=4) + '\n')
def write_tagging(task, model, input_file, output_file, predict_batch_size,
seq_length):
"""Makes tagging predictions and writes to output file."""
data_config = tagging_dataloader.TaggingDataConfig(
input_path=input_file,
is_training=False,
seq_length=seq_length,
global_batch_size=predict_batch_size,
drop_remainder=False,
include_sentence_id=True)
results = tagging.predict(task, data_config, model)
class_names = task.task_config.class_names
last_sentence_id = -1
with tf.io.gfile.GFile(output_file, 'w') as writer:
for sentence_id, _, predict_ids in results:
token_labels = [class_names[x] for x in predict_ids]
assert sentence_id == last_sentence_id or (
sentence_id == last_sentence_id + 1)
if sentence_id != last_sentence_id and last_sentence_id != -1:
writer.write('\n')
writer.write('\n'.join(token_labels))
writer.write('\n')
last_sentence_id = sentence_id
| 16,761 | 36.58296 | 80 | py |
models | models-master/official/nlp/finetuning/superglue/run_superglue.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs prediction to generate submission files for SuperGLUE tasks."""
import functools
import json
import os
import pprint
from absl import app
from absl import flags
from absl import logging
import gin
import tensorflow as tf
from official.common import distribute_utils
# Imports registered experiment configs.
from official.common import registry_imports # pylint: disable=unused-import
from official.core import exp_factory
from official.core import task_factory
from official.core import train_lib
from official.core import train_utils
from official.modeling.hyperparams import params_dict
from official.nlp.finetuning import binary_helper
from official.nlp.finetuning.superglue import flags as superglue_flags
# Device configs.
flags.DEFINE_string('distribution_strategy', 'tpu',
'The Distribution Strategy to use for training.')
flags.DEFINE_string(
'tpu', '',
'The Cloud TPU to use for training. This should be either the name '
'used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 url.')
flags.DEFINE_integer('num_gpus', 1, 'The number of GPUs to use at each worker.')
FLAGS = flags.FLAGS
EXPERIMENT_TYPE = 'bert/sentence_prediction'
BEST_CHECKPOINT_EXPORT_SUBDIR = 'best_ckpt'
EVAL_METRIC_MAP = {
'AX-b': 'matthews_corrcoef',
'CB': 'cls_accuracy',
'COPA': 'cls_accuracy',
'MULTIRC': 'exact_match',
'RTE': 'cls_accuracy',
'WiC': 'cls_accuracy',
'WSC': 'cls_accuracy',
'BoolQ': 'cls_accuracy',
'ReCoRD': 'cls_accuracy',
'AX-g': 'cls_accuracy',
}
AXG_CLASS_NAMES = ['entailment', 'not_entailment']
RTE_CLASS_NAMES = ['entailment', 'not_entailment']
CB_CLASS_NAMES = ['entailment', 'neutral', 'contradiction']
BOOLQ_CLASS_NAMES = ['True', 'False']
def _override_exp_config_by_file(exp_config, exp_config_files):
"""Overrides an `ExperimentConfig` object by files."""
for exp_config_file in exp_config_files:
if not tf.io.gfile.exists(exp_config_file):
raise ValueError('%s does not exist.' % exp_config_file)
params_dict.override_params_dict(
exp_config, exp_config_file, is_strict=True)
return exp_config
def _override_exp_config_by_flags(exp_config, input_meta_data):
"""Overrides an `ExperimentConfig` object by flags."""
if FLAGS.task_name in 'AX-b':
override_task_cfg_fn = functools.partial(
binary_helper.override_sentence_prediction_task_config,
num_classes=input_meta_data['num_labels'],
metric_type='matthews_corrcoef')
elif FLAGS.task_name in ('CB', 'COPA', 'RTE', 'WiC', 'WSC', 'BoolQ', 'ReCoRD',
'AX-g'):
override_task_cfg_fn = functools.partial(
binary_helper.override_sentence_prediction_task_config,
num_classes=input_meta_data['num_labels'])
else:
raise ValueError('Task %s not supported.' % FLAGS.task_name)
binary_helper.override_trainer_cfg(
exp_config.trainer,
learning_rate=FLAGS.learning_rate,
num_epoch=FLAGS.num_epoch,
global_batch_size=FLAGS.global_batch_size,
warmup_ratio=FLAGS.warmup_ratio,
training_data_size=input_meta_data['train_data_size'],
eval_data_size=input_meta_data['eval_data_size'],
num_eval_per_epoch=FLAGS.num_eval_per_epoch,
best_checkpoint_export_subdir=BEST_CHECKPOINT_EXPORT_SUBDIR,
best_checkpoint_eval_metric=EVAL_METRIC_MAP[FLAGS.task_name],
best_checkpoint_metric_comp='higher')
override_task_cfg_fn(
exp_config.task,
model_config_file=FLAGS.model_config_file,
init_checkpoint=FLAGS.init_checkpoint,
hub_module_url=FLAGS.hub_module_url,
global_batch_size=FLAGS.global_batch_size,
train_input_path=FLAGS.train_input_path,
validation_input_path=FLAGS.validation_input_path,
seq_length=input_meta_data['max_seq_length'])
return exp_config
def _get_exp_config(input_meta_data, exp_config_files):
"""Gets an `ExperimentConfig` object."""
exp_config = exp_factory.get_exp_config(EXPERIMENT_TYPE)
if exp_config_files:
logging.info(
'Loading `ExperimentConfig` from file, and flags will be ignored.')
exp_config = _override_exp_config_by_file(exp_config, exp_config_files)
else:
logging.info('Loading `ExperimentConfig` from flags.')
exp_config = _override_exp_config_by_flags(exp_config, input_meta_data)
exp_config.validate()
exp_config.lock()
pp = pprint.PrettyPrinter()
logging.info('Final experiment parameters: %s',
pp.pformat(exp_config.as_dict()))
return exp_config
def _write_submission_file(task, seq_length):
"""Writes submission files that can be uploaded to the leaderboard."""
tf.io.gfile.makedirs(os.path.dirname(FLAGS.test_output_path))
model = task.build_model()
ckpt_file = tf.train.latest_checkpoint(
os.path.join(FLAGS.model_dir, BEST_CHECKPOINT_EXPORT_SUBDIR))
logging.info('Restoring checkpoints from %s', ckpt_file)
checkpoint = tf.train.Checkpoint(model=model)
checkpoint.read(ckpt_file).expect_partial()
write_fn = binary_helper.write_superglue_classification
write_fn_map = {
'RTE': functools.partial(write_fn, class_names=RTE_CLASS_NAMES),
'AX-g': functools.partial(write_fn, class_names=AXG_CLASS_NAMES),
'CB': functools.partial(write_fn, class_names=CB_CLASS_NAMES),
'BoolQ': functools.partial(write_fn, class_names=BOOLQ_CLASS_NAMES)
}
logging.info('Predicting %s', FLAGS.test_input_path)
write_fn_map[FLAGS.task_name](
task=task,
model=model,
input_file=FLAGS.test_input_path,
output_file=FLAGS.test_output_path,
predict_batch_size=(task.task_config.train_data.global_batch_size),
seq_length=seq_length)
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
superglue_flags.validate_flags(FLAGS, file_exists_fn=tf.io.gfile.exists)
gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_params)
distribution_strategy = distribute_utils.get_distribution_strategy(
distribution_strategy=FLAGS.distribution_strategy,
num_gpus=FLAGS.num_gpus,
tpu_address=FLAGS.tpu)
with tf.io.gfile.GFile(FLAGS.input_meta_data_path, 'rb') as reader:
input_meta_data = json.loads(reader.read().decode('utf-8'))
with distribution_strategy.scope():
task = None
if 'train_eval' in FLAGS.mode:
logging.info('Starting training and eval...')
logging.info('Model dir: %s', FLAGS.model_dir)
exp_config = _get_exp_config(
input_meta_data=input_meta_data, exp_config_files=FLAGS.config_file)
train_utils.serialize_config(exp_config, FLAGS.model_dir)
task = task_factory.get_task(exp_config.task, logging_dir=FLAGS.model_dir)
train_lib.run_experiment(
distribution_strategy=distribution_strategy,
task=task,
mode='train_and_eval',
params=exp_config,
model_dir=FLAGS.model_dir)
if 'predict' in FLAGS.mode:
logging.info('Starting predict...')
# When mode is `predict`, `task` will be None.
if task is None:
exp_config = _get_exp_config(
input_meta_data=input_meta_data,
exp_config_files=[os.path.join(FLAGS.model_dir, 'params.yaml')])
task = task_factory.get_task(
exp_config.task, logging_dir=FLAGS.model_dir)
_write_submission_file(task, input_meta_data['max_seq_length'])
if __name__ == '__main__':
superglue_flags.define_flags()
flags.mark_flag_as_required('mode')
flags.mark_flag_as_required('task_name')
app.run(main)
| 8,161 | 35.765766 | 80 | py |
models | models-master/official/nlp/finetuning/superglue/flags.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common flags for SuperGLUE finetuning binary."""
from typing import Callable
from absl import flags
from absl import logging
def define_flags():
"""Defines flags."""
# ===========================================================================
# SuperGlue binary flags.
# ===========================================================================
flags.DEFINE_enum(
'mode', 'train_eval_and_predict',
['train_eval_and_predict', 'train_eval', 'predict'],
'The mode to run the binary. If `train_eval_and_predict` '
'it will (1) train on the training data and (2) evaluate on '
'the validation data and (3) finally generate predictions '
'on the prediction data; if `train_eval`, it will only '
'run training and evaluation; if `predict`, it will only '
'run prediction using the model in `model_dir`.')
flags.DEFINE_enum('task_name', None, [
'AX-b',
'CB',
'COPA',
'MULTIRC',
'RTE',
'WiC',
'WSC',
'BoolQ',
'ReCoRD',
'AX-g',
], 'The type of SuperGLUE task.')
flags.DEFINE_string('train_input_path', None,
'The file path to the training data.')
flags.DEFINE_string('validation_input_path', None,
'The file path to the evaluation data.')
flags.DEFINE_string('test_input_path', None,
'The file path to the test input data.')
flags.DEFINE_string('test_output_path', None,
'The file path to the test output data.')
flags.DEFINE_string(
'model_dir', '', 'The model directory containing '
'subdirectories for each task. Only needed for "predict" '
'mode. For all other modes, if not provided, a unique '
'directory will be created automatically for each run.')
flags.DEFINE_string(
'input_meta_data_path', None, 'Path to file that contains '
'metadata about input file. It is output by the `create_finetuning_data` '
'binary. Required for all modes except "predict".')
flags.DEFINE_string('init_checkpoint', '',
'Initial checkpoint from a pre-trained BERT model.')
flags.DEFINE_string(
'model_config_file', '', 'The config file specifying the architecture '
'of the pre-trained model. The file can be either a bert_config.json '
'file or `encoders.EncoderConfig` in yaml file.')
flags.DEFINE_string(
'hub_module_url', '', 'TF-Hub path/url to a pretrained model. If '
'specified, `init_checkpoint` and `model_config_file` flag should not be '
'used.')
flags.DEFINE_multi_string('gin_file', None,
'List of paths to the gin config files.')
flags.DEFINE_multi_string(
'gin_params', None, 'Newline separated list of gin parameter bindings.')
flags.DEFINE_multi_string(
'config_file', None, 'This is the advanced usage to specify the '
'`ExperimentConfig` directly. When specified, '
'we will ignore FLAGS related to `ExperimentConfig` such as '
'`train_input_path`, `validation_input_path` and following hparams.')
# ===========================================================================
# Tuning hparams.
# ===========================================================================
flags.DEFINE_integer('global_batch_size', 32,
'Global batch size for train/eval/predict.')
flags.DEFINE_float('learning_rate', 3e-5, 'Initial learning rate.')
flags.DEFINE_integer('num_epoch', 3, 'Number of training epochs.')
flags.DEFINE_float('warmup_ratio', 0.1,
'Proportion of learning rate warmup steps.')
flags.DEFINE_integer('num_eval_per_epoch', 2,
'Number of evaluations to run per epoch.')
def validate_flags(flags_obj: flags.FlagValues, file_exists_fn: Callable[[str],
bool]):
"""Raises ValueError if any flags are misconfigured.
Args:
flags_obj: A `flags.FlagValues` object, usually from `flags.FLAG`.
file_exists_fn: A callable to decide if a file path exists or not.
"""
def _check_path_exists(flag_path, flag_name):
if not file_exists_fn(flag_path):
raise ValueError('Flag `%s` at %s does not exist.' %
(flag_name, flag_path))
def _validate_path(flag_path, flag_name):
if not flag_path:
raise ValueError('Flag `%s` must be provided in mode %s.' %
(flag_name, flags_obj.mode))
_check_path_exists(flag_path, flag_name)
if 'train' in flags_obj.mode:
_validate_path(flags_obj.train_input_path, 'train_input_path')
_validate_path(flags_obj.input_meta_data_path, 'input_meta_data_path')
if flags_obj.gin_file:
for gin_file in flags_obj.gin_file:
_check_path_exists(gin_file, 'gin_file')
if flags_obj.config_file:
for config_file in flags_obj.config_file:
_check_path_exists(config_file, 'config_file')
if 'eval' in flags_obj.mode:
_validate_path(flags_obj.validation_input_path, 'validation_input_path')
if flags_obj.mode == 'predict':
# model_dir is only needed strictly in 'predict' mode.
_validate_path(flags_obj.model_dir, 'model_dir')
if 'predict' in flags_obj.mode:
_validate_path(flags_obj.test_input_path, 'test_input_path')
if not flags_obj.config_file and flags_obj.mode != 'predict':
if flags_obj.hub_module_url:
if flags_obj.init_checkpoint or flags_obj.model_config_file:
raise ValueError(
'When `hub_module_url` is specified, `init_checkpoint` and '
'`model_config_file` should be empty.')
logging.info('Using the pretrained tf.hub from %s',
flags_obj.hub_module_url)
else:
if not (flags_obj.init_checkpoint and flags_obj.model_config_file):
raise ValueError('Both `init_checkpoint` and `model_config_file` '
'should be specified if `config_file` is not '
'specified.')
_validate_path(flags_obj.model_config_file, 'model_config_file')
logging.info(
'Using the pretrained checkpoint from %s and model_config_file from '
'%s.', flags_obj.init_checkpoint, flags_obj.model_config_file)
| 6,896 | 38.637931 | 80 | py |
models | models-master/official/nlp/finetuning/glue/run_glue.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs prediction to generate submission files for GLUE tasks."""
import functools
import json
import os
import pprint
from absl import app
from absl import flags
from absl import logging
import gin
import tensorflow as tf
from official.common import distribute_utils
# Imports registered experiment configs.
from official.common import registry_imports # pylint: disable=unused-import
from official.core import exp_factory
from official.core import task_factory
from official.core import train_lib
from official.core import train_utils
from official.modeling.hyperparams import params_dict
from official.nlp.finetuning import binary_helper
from official.nlp.finetuning.glue import flags as glue_flags
# Device configs.
flags.DEFINE_string('distribution_strategy', 'tpu',
'The Distribution Strategy to use for training.')
flags.DEFINE_string(
'tpu', '',
'The Cloud TPU to use for training. This should be either the name '
'used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 url.')
flags.DEFINE_integer('num_gpus', 1, 'The number of GPUs to use at each worker.')
_MODE = flags.DEFINE_enum(
'mode', 'train_eval_and_predict',
['train_eval_and_predict', 'train_eval', 'predict'],
'The mode to run the binary. If `train_eval_and_predict` '
'it will (1) train on the training data and (2) evaluate on '
'the validation data and (3) finally generate predictions '
'on the prediction data; if `train_eval`, it will only '
'run training and evaluation; if `predict`, it will only '
'run prediction using the model in `model_dir`.')
# TODO(kitsing) The `params_override` flag is currently not being used.
# Only declared to make xm_job_3p.XMTPUJob happy.
_PARAMS_OVERRIDE = flags.DEFINE_string(
'params_override', '', 'Overridden parameters.'
)
FLAGS = flags.FLAGS
EXPERIMENT_TYPE = 'bert/sentence_prediction'
BEST_CHECKPOINT_EXPORT_SUBDIR = 'best_ckpt'
EVAL_METRIC_MAP = {
'AX': 'matthews_corrcoef',
'COLA': 'matthews_corrcoef',
'MNLI': 'cls_accuracy',
'MRPC': 'f1',
'QNLI': 'cls_accuracy',
'QQP': 'f1',
'RTE': 'cls_accuracy',
'SST-2': 'cls_accuracy',
'STS-B': 'pearson_spearman_corr',
'WNLI': 'cls_accuracy',
}
AX_CLASS_NAMES = ['contradiction', 'entailment', 'neutral']
COLA_CLASS_NAMES = ['0', '1']
MNLI_CLASS_NAMES = ['contradiction', 'entailment', 'neutral']
MRPC_CLASS_NAMES = ['0', '1']
QNLI_CLASS_NAMES = ['entailment', 'not_entailment']
QQP_CLASS_NAMES = ['0', '1']
RTE_CLASS_NAMES = ['entailment', 'not_entailment']
SST_2_CLASS_NAMES = ['0', '1']
WNLI_CLASS_NAMES = ['0', '1']
def _override_exp_config_by_file(exp_config, exp_config_files):
"""Overrides an `ExperimentConfig` object by files."""
for exp_config_file in exp_config_files:
if not tf.io.gfile.exists(exp_config_file):
raise ValueError('%s does not exist.' % exp_config_file)
params_dict.override_params_dict(
exp_config, exp_config_file, is_strict=True)
return exp_config
def _override_exp_config_by_flags(exp_config, input_meta_data):
"""Overrides an `ExperimentConfig` object by flags."""
if FLAGS.task_name in ('AX', 'COLA',):
override_task_cfg_fn = functools.partial(
binary_helper.override_sentence_prediction_task_config,
num_classes=input_meta_data['num_labels'],
metric_type='matthews_corrcoef')
elif FLAGS.task_name in ('MNLI', 'QNLI', 'RTE', 'SST-2',
'WNLI'):
override_task_cfg_fn = functools.partial(
binary_helper.override_sentence_prediction_task_config,
num_classes=input_meta_data['num_labels'])
elif FLAGS.task_name in ('QQP', 'MRPC'):
override_task_cfg_fn = functools.partial(
binary_helper.override_sentence_prediction_task_config,
metric_type='f1',
num_classes=input_meta_data['num_labels'])
elif FLAGS.task_name in ('STS-B',):
override_task_cfg_fn = functools.partial(
binary_helper.override_sentence_prediction_task_config,
num_classes=1,
metric_type='pearson_spearman_corr',
label_type='float')
else:
raise ValueError('Task %s not supported.' % FLAGS.task_name)
binary_helper.override_trainer_cfg(
exp_config.trainer,
learning_rate=FLAGS.learning_rate,
num_epoch=FLAGS.num_epoch,
global_batch_size=FLAGS.global_batch_size,
warmup_ratio=FLAGS.warmup_ratio,
training_data_size=input_meta_data['train_data_size'],
eval_data_size=input_meta_data['eval_data_size'],
num_eval_per_epoch=FLAGS.num_eval_per_epoch,
best_checkpoint_export_subdir=BEST_CHECKPOINT_EXPORT_SUBDIR,
best_checkpoint_eval_metric=EVAL_METRIC_MAP[FLAGS.task_name],
best_checkpoint_metric_comp='higher')
override_task_cfg_fn(
exp_config.task,
model_config_file=FLAGS.model_config_file,
init_checkpoint=FLAGS.init_checkpoint,
hub_module_url=FLAGS.hub_module_url,
global_batch_size=FLAGS.global_batch_size,
train_input_path=FLAGS.train_input_path,
validation_input_path=FLAGS.validation_input_path,
seq_length=input_meta_data['max_seq_length'])
return exp_config
def _get_exp_config(input_meta_data, exp_config_files):
"""Gets an `ExperimentConfig` object."""
exp_config = exp_factory.get_exp_config(EXPERIMENT_TYPE)
if exp_config_files:
logging.info(
'Loading `ExperimentConfig` from file, and flags will be ignored.')
exp_config = _override_exp_config_by_file(exp_config, exp_config_files)
else:
logging.info('Loading `ExperimentConfig` from flags.')
exp_config = _override_exp_config_by_flags(exp_config, input_meta_data)
exp_config.validate()
exp_config.lock()
pp = pprint.PrettyPrinter()
logging.info('Final experiment parameters: %s',
pp.pformat(exp_config.as_dict()))
return exp_config
def _write_submission_file(task, seq_length):
"""Writes submission files that can be uploaded to the leaderboard."""
tf.io.gfile.makedirs(os.path.dirname(FLAGS.test_output_path))
model = task.build_model()
ckpt_file = tf.train.latest_checkpoint(
os.path.join(FLAGS.model_dir, BEST_CHECKPOINT_EXPORT_SUBDIR))
logging.info('Restoring checkpoints from %s', ckpt_file)
checkpoint = tf.train.Checkpoint(model=model)
checkpoint.read(ckpt_file).expect_partial()
write_fn = binary_helper.write_glue_classification
write_fn_map = {
'AX':
functools.partial(
write_fn, class_names=AX_CLASS_NAMES),
'COLA':
functools.partial(
write_fn, class_names=COLA_CLASS_NAMES),
'MNLI':
functools.partial(
write_fn, class_names=MNLI_CLASS_NAMES),
'MRPC':
functools.partial(
write_fn, class_names=MRPC_CLASS_NAMES),
'QNLI':
functools.partial(
write_fn, class_names=QNLI_CLASS_NAMES),
'QQP':
functools.partial(
write_fn, class_names=QQP_CLASS_NAMES),
'RTE':
functools.partial(
write_fn, class_names=RTE_CLASS_NAMES),
'SST-2':
functools.partial(
write_fn, class_names=SST_2_CLASS_NAMES),
'STS-B':
# No class_names (regression), clip predictions to [0.0, 5.0] per glue
# benchmark grader.
functools.partial(
write_fn, class_names=None, label_type='float',
min_float_value=0.0, max_float_value=5.0),
'WNLI':
functools.partial(
write_fn, class_names=WNLI_CLASS_NAMES),
}
logging.info('Predicting %s', FLAGS.test_input_path)
write_fn_map[FLAGS.task_name](
task=task,
model=model,
input_file=FLAGS.test_input_path,
output_file=FLAGS.test_output_path,
predict_batch_size=(
task.task_config.train_data.global_batch_size),
seq_length=seq_length)
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
glue_flags.validate_flags(FLAGS, file_exists_fn=tf.io.gfile.exists)
gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_params)
distribution_strategy = distribute_utils.get_distribution_strategy(
distribution_strategy=FLAGS.distribution_strategy,
num_gpus=FLAGS.num_gpus,
tpu_address=FLAGS.tpu)
with tf.io.gfile.GFile(FLAGS.input_meta_data_path, 'rb') as reader:
input_meta_data = json.loads(reader.read().decode('utf-8'))
with distribution_strategy.scope():
task = None
if 'train_eval' in _MODE.value:
logging.info('Starting training and eval...')
logging.info('Model dir: %s', FLAGS.model_dir)
exp_config = _get_exp_config(
input_meta_data=input_meta_data,
exp_config_files=FLAGS.config_file)
train_utils.serialize_config(exp_config, FLAGS.model_dir)
task = task_factory.get_task(exp_config.task, logging_dir=FLAGS.model_dir)
train_lib.run_experiment(
distribution_strategy=distribution_strategy,
task=task,
mode='train_and_eval',
params=exp_config,
model_dir=FLAGS.model_dir)
if 'predict' in _MODE.value:
logging.info('Starting predict...')
# When mode is `predict`, `task` will be None.
if task is None:
exp_config = _get_exp_config(
input_meta_data=input_meta_data,
exp_config_files=[os.path.join(FLAGS.model_dir, 'params.yaml')])
task = task_factory.get_task(
exp_config.task, logging_dir=FLAGS.model_dir)
_write_submission_file(task, input_meta_data['max_seq_length'])
if __name__ == '__main__':
glue_flags.define_flags()
flags.mark_flag_as_required('mode')
flags.mark_flag_as_required('task_name')
app.run(main)
| 10,358 | 35.22028 | 80 | py |
models | models-master/official/nlp/finetuning/glue/flags.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common flags for GLUE finetuning binary."""
from typing import Callable
from absl import flags
from absl import logging
def define_flags():
"""Defines flags."""
# ===========================================================================
# Glue binary flags.
# ===========================================================================
flags.DEFINE_enum('task_name', None, [
'AX', 'COLA', 'MNLI', 'MRPC', 'QNLI', 'QQP', 'RTE', 'SST-2', 'STS-B',
'WNLI'
], 'The type of GLUE task.')
flags.DEFINE_string('train_input_path', None,
'The file path to the training data.')
flags.DEFINE_string('validation_input_path', None,
'The file path to the evaluation data.')
flags.DEFINE_string('test_input_path', None,
'The file path to the test input data.')
flags.DEFINE_string('test_output_path', None,
'The file path to the test output data.')
flags.DEFINE_string('model_dir', '', 'The model directory containing '
'subdirectories for each task. Only needed for "predict" '
'mode. For all other modes, if not provided, a unique '
'directory will be created automatically for each run.')
flags.DEFINE_string(
'input_meta_data_path', None, 'Path to file that contains '
'metadata about input file. It is output by the `create_finetuning_data` '
'binary. Required for all modes except "predict".')
flags.DEFINE_string('init_checkpoint', '',
'Initial checkpoint from a pre-trained BERT model.')
flags.DEFINE_string(
'model_config_file', '', 'The config file specifying the architecture '
'of the pre-trained model. The file can be either a bert_config.json '
'file or `encoders.EncoderConfig` in yaml file.')
flags.DEFINE_string(
'hub_module_url', '', 'TF-Hub path/url to a pretrained model. If '
'specified, `init_checkpoint` and `model_config_file` flag should not be '
'used.')
flags.DEFINE_multi_string('gin_file', None,
'List of paths to the gin config files.')
flags.DEFINE_multi_string('gin_params', None,
'Newline separated list of gin parameter bindings.')
flags.DEFINE_multi_string(
'config_file', None, 'This is the advanced usage to specify the '
'`ExperimentConfig` directly. When specified, '
'we will ignore FLAGS related to `ExperimentConfig` such as '
'`train_input_path`, `validation_input_path` and following hparams.')
# ===========================================================================
# Tuning hparams.
# ===========================================================================
flags.DEFINE_integer('global_batch_size', 32,
'Global batch size for train/eval/predict.')
flags.DEFINE_float('learning_rate', 3e-5, 'Initial learning rate.')
flags.DEFINE_integer('num_epoch', 3, 'Number of training epochs.')
flags.DEFINE_float('warmup_ratio', 0.1,
'Proportion of learning rate warmup steps.')
flags.DEFINE_integer('num_eval_per_epoch', 2,
'Number of evaluations to run per epoch.')
def validate_flags(flags_obj: flags.FlagValues,
file_exists_fn: Callable[[str], bool]):
"""Raises ValueError if any flags are misconfigured.
Args:
flags_obj: A `flags.FlagValues` object, usually from `flags.FLAG`.
file_exists_fn: A callable to decide if a file path exists or not.
"""
def _check_path_exists(flag_path, flag_name):
if not file_exists_fn(flag_path):
raise ValueError('Flag `%s` at %s does not exist.' %
(flag_name, flag_path))
def _validate_path(flag_path, flag_name):
if not flag_path:
raise ValueError('Flag `%s` must be provided in mode %s.' %
(flag_name, flags_obj.mode))
_check_path_exists(flag_path, flag_name)
if 'train' in flags_obj.mode:
_validate_path(flags_obj.train_input_path, 'train_input_path')
_validate_path(flags_obj.input_meta_data_path, 'input_meta_data_path')
if flags_obj.gin_file:
for gin_file in flags_obj.gin_file:
_check_path_exists(gin_file, 'gin_file')
if flags_obj.config_file:
for config_file in flags_obj.config_file:
_check_path_exists(config_file, 'config_file')
if 'eval' in flags_obj.mode:
_validate_path(flags_obj.validation_input_path, 'validation_input_path')
if flags_obj.mode == 'predict':
# model_dir is only needed strictly in 'predict' mode.
_validate_path(flags_obj.model_dir, 'model_dir')
if 'predict' in flags_obj.mode:
_validate_path(flags_obj.test_input_path, 'test_input_path')
if not flags_obj.config_file and flags_obj.mode != 'predict':
if flags_obj.hub_module_url:
if flags_obj.init_checkpoint or flags_obj.model_config_file:
raise ValueError(
'When `hub_module_url` is specified, `init_checkpoint` and '
'`model_config_file` should be empty.')
logging.info(
'Using the pretrained tf.hub from %s', flags_obj.hub_module_url)
else:
if not (flags_obj.init_checkpoint and flags_obj.model_config_file):
raise ValueError('Both `init_checkpoint` and `model_config_file` '
'should be specified if `config_file` is not '
'specified.')
_validate_path(flags_obj.model_config_file, 'model_config_file')
logging.info(
'Using the pretrained checkpoint from %s and model_config_file from '
'%s.', flags_obj.init_checkpoint, flags_obj.model_config_file)
| 6,325 | 39.812903 | 80 | py |
models | models-master/official/nlp/serving/export_savedmodel_util.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common library to export a SavedModel from the export module."""
from typing import Dict, List, Optional, Union, Any
import tensorflow as tf
from official.core import export_base
get_timestamped_export_dir = export_base.get_timestamped_export_dir
def export(export_module: export_base.ExportModule,
function_keys: Union[List[str], Dict[str, str]],
export_savedmodel_dir: str,
checkpoint_path: Optional[str] = None,
timestamped: bool = True,
module_key: Optional[str] = None,
checkpoint_kwargs: Optional[Dict[str, Any]] = None) -> str:
"""Exports to SavedModel format.
Args:
export_module: a ExportModule with the keras Model and serving tf.functions.
function_keys: a list of string keys to retrieve pre-defined serving
signatures. The signaute keys will be set with defaults. If a dictionary
is provided, the values will be used as signature keys.
export_savedmodel_dir: Output saved model directory.
checkpoint_path: Object-based checkpoint path or directory.
timestamped: Whether to export the savedmodel to a timestamped directory.
module_key: Optional string to identify a checkpoint object to load for the
model in the export module.
checkpoint_kwargs: Optional dict used as keyword args to create the
checkpoint object. Not used if module_key is present.
Returns:
The savedmodel directory path.
"""
save_options = tf.saved_model.SaveOptions(function_aliases={
'tpu_candidate': export_module.serve,
})
if module_key:
kwargs = {module_key: export_module.model}
checkpoint = tf.train.Checkpoint(**kwargs)
elif checkpoint_kwargs:
checkpoint = tf.train.Checkpoint(**checkpoint_kwargs)
else:
checkpoint = None
return export_base.export(
export_module,
function_keys,
export_savedmodel_dir,
checkpoint_path,
timestamped,
save_options,
checkpoint=checkpoint)
| 2,583 | 37 | 80 | py |
models | models-master/official/nlp/serving/export_savedmodel.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A binary/library to export TF-NLP serving `SavedModel`."""
import dataclasses
import os
from typing import Any, Dict, Text
from absl import app
from absl import flags
import yaml
from official.core import base_task
from official.core import task_factory
from official.modeling import hyperparams
from official.modeling.hyperparams import base_config
from official.nlp.serving import export_savedmodel_util
from official.nlp.serving import serving_modules
from official.nlp.tasks import masked_lm
from official.nlp.tasks import question_answering
from official.nlp.tasks import sentence_prediction
from official.nlp.tasks import tagging
from official.nlp.tasks import translation
FLAGS = flags.FLAGS
SERVING_MODULES = {
sentence_prediction.SentencePredictionTask:
serving_modules.SentencePrediction,
masked_lm.MaskedLMTask:
serving_modules.MaskedLM,
question_answering.QuestionAnsweringTask:
serving_modules.QuestionAnswering,
tagging.TaggingTask:
serving_modules.Tagging,
translation.TranslationTask:
serving_modules.Translation
}
def define_flags():
"""Defines flags."""
flags.DEFINE_string("task_name", "SentencePrediction", "The task to export.")
flags.DEFINE_string("config_file", None,
"The path to task/experiment yaml config file.")
flags.DEFINE_string(
"checkpoint_path", None,
"Object-based checkpoint path, from the training model directory.")
flags.DEFINE_string("export_savedmodel_dir", None,
"Output saved model directory.")
flags.DEFINE_string(
"serving_params", None,
"a YAML/JSON string or csv string for the serving parameters.")
flags.DEFINE_string(
"function_keys", None,
"A string key to retrieve pre-defined serving signatures.")
flags.DEFINE_string(
"module_key", None,
"For multi-task case, load the export module weights from a specific "
"checkpoint item.")
flags.DEFINE_bool("convert_tpu", False, "")
flags.DEFINE_multi_integer("allowed_batch_size", None,
"Allowed batch sizes for batching ops.")
flags.DEFINE_integer("num_batch_threads", 4,
"Number of threads to do TPU batching.")
flags.DEFINE_integer("batch_timeout_micros", 100000,
"TPU batch function timeout in microseconds.")
flags.DEFINE_integer("max_enqueued_batches", 1000,
"Max number of batches in queue for TPU batching.")
def lookup_export_module(task: base_task.Task):
export_module_cls = SERVING_MODULES.get(task.__class__, None)
if export_module_cls is None:
ValueError("No registered export module for the task: %s", task.__class__)
return export_module_cls
def create_export_module(*, task_name: Text, config_file: Text,
serving_params: Dict[Text, Any]):
"""Creates a ExportModule."""
task_config_cls = None
task_cls = None
# pylint: disable=protected-access
for key, value in task_factory._REGISTERED_TASK_CLS.items():
print(key.__name__)
if task_name in key.__name__:
task_config_cls, task_cls = key, value
break
if task_cls is None:
raise ValueError("Failed to identify the task class. The provided task "
f"name is {task_name}")
# pylint: enable=protected-access
# TODO(hongkuny): Figure out how to separate the task config from experiments.
@dataclasses.dataclass
class Dummy(base_config.Config):
task: task_config_cls = dataclasses.field(default_factory=task_config_cls)
dummy_exp = Dummy()
dummy_exp = hyperparams.override_params_dict(
dummy_exp, config_file, is_strict=False)
dummy_exp.task.validation_data = None
task = task_cls(dummy_exp.task)
model = task.build_model()
export_module_cls = lookup_export_module(task)
params = export_module_cls.Params(**serving_params)
return export_module_cls(params=params, model=model)
def main(_):
serving_params = yaml.load(
hyperparams.nested_csv_str_to_json_str(FLAGS.serving_params),
Loader=yaml.FullLoader)
export_module = create_export_module(
task_name=FLAGS.task_name,
config_file=FLAGS.config_file,
serving_params=serving_params)
export_dir = export_savedmodel_util.export(
export_module,
function_keys=[FLAGS.function_keys],
checkpoint_path=FLAGS.checkpoint_path,
export_savedmodel_dir=FLAGS.export_savedmodel_dir,
module_key=FLAGS.module_key)
if FLAGS.convert_tpu:
# pylint: disable=g-import-not-at-top
from cloud_tpu.inference_converter_v2 import converter_options_v2_pb2
from cloud_tpu.inference_converter_v2.python import converter
tpu_dir = os.path.join(export_dir, "tpu")
batch_options = []
if FLAGS.allowed_batch_size is not None:
allowed_batch_sizes = sorted(FLAGS.allowed_batch_size)
batch_option = converter_options_v2_pb2.BatchOptionsV2(
num_batch_threads=FLAGS.num_batch_threads,
max_batch_size=allowed_batch_sizes[-1],
batch_timeout_micros=FLAGS.batch_timeout_micros,
allowed_batch_sizes=allowed_batch_sizes,
max_enqueued_batches=FLAGS.max_enqueued_batches
)
batch_options.append(batch_option)
converter_options = converter_options_v2_pb2.ConverterOptionsV2(
tpu_functions=[
converter_options_v2_pb2.TpuFunction(function_alias="tpu_candidate")
],
batch_options=batch_options,
)
converter.ConvertSavedModel(export_dir, tpu_dir, converter_options)
if __name__ == "__main__":
define_flags()
app.run(main)
| 6,225 | 36.281437 | 80 | py |
models | models-master/official/nlp/serving/serving_modules.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Serving export modules for TF Model Garden NLP models."""
# pylint:disable=missing-class-docstring
import dataclasses
from typing import Dict, List, Optional, Text
import tensorflow as tf
import tensorflow_text as tf_text
from official.core import export_base
from official.modeling.hyperparams import base_config
from official.nlp.data import sentence_prediction_dataloader
def features_to_int32(features: Dict[str, tf.Tensor]) -> Dict[str, tf.Tensor]:
"""Converts tf.int64 features to tf.int32, keep other features the same.
tf.Example only supports tf.int64, but the TPU only supports tf.int32.
Args:
features: Input tensor dictionary.
Returns:
Features with tf.int64 converted to tf.int32.
"""
converted_features = {}
for name, tensor in features.items():
if tensor.dtype == tf.int64:
converted_features[name] = tf.cast(tensor, tf.int32)
else:
converted_features[name] = tensor
return converted_features
class SentencePrediction(export_base.ExportModule):
"""The export module for the sentence prediction task."""
@dataclasses.dataclass
class Params(base_config.Config):
inputs_only: bool = True
parse_sequence_length: Optional[int] = None
use_v2_feature_names: bool = True
# For text input processing.
text_fields: Optional[List[str]] = None
# Either specify these values for preprocessing by Python code...
tokenization: str = "WordPiece" # WordPiece or SentencePiece
# Text vocab file if tokenization is WordPiece, or sentencepiece.ModelProto
# file if tokenization is SentencePiece.
vocab_file: str = ""
lower_case: bool = True
# ...or load preprocessing from a SavedModel at this location.
preprocessing_hub_module_url: str = ""
def __init__(self, params, model: tf.keras.Model, inference_step=None):
super().__init__(params, model, inference_step)
if params.use_v2_feature_names:
self.input_word_ids_field = "input_word_ids"
self.input_type_ids_field = "input_type_ids"
else:
self.input_word_ids_field = "input_ids"
self.input_type_ids_field = "segment_ids"
if params.text_fields:
self._text_processor = sentence_prediction_dataloader.TextProcessor(
seq_length=params.parse_sequence_length,
vocab_file=params.vocab_file,
tokenization=params.tokenization,
lower_case=params.lower_case,
preprocessing_hub_module_url=params.preprocessing_hub_module_url)
def _serve_tokenized_input(self,
input_word_ids,
input_mask=None,
input_type_ids=None) -> tf.Tensor:
if input_type_ids is None:
# Requires CLS token is the first token of inputs.
input_type_ids = tf.zeros_like(input_word_ids)
if input_mask is None:
# The mask has 1 for real tokens and 0 for padding tokens.
input_mask = tf.where(
tf.equal(input_word_ids, 0), tf.zeros_like(input_word_ids),
tf.ones_like(input_word_ids))
inputs = dict(
input_word_ids=input_word_ids,
input_mask=input_mask,
input_type_ids=input_type_ids)
return self.inference_step(inputs)
@tf.function
def serve(self,
input_word_ids,
input_mask=None,
input_type_ids=None) -> Dict[str, tf.Tensor]:
return dict(
outputs=self._serve_tokenized_input(input_word_ids, input_mask,
input_type_ids))
@tf.function
def serve_probability(self,
input_word_ids,
input_mask=None,
input_type_ids=None) -> Dict[str, tf.Tensor]:
return dict(
outputs=tf.nn.softmax(
self._serve_tokenized_input(input_word_ids, input_mask,
input_type_ids)))
@tf.function
def serve_examples(self, inputs) -> Dict[str, tf.Tensor]:
sequence_length = self.params.parse_sequence_length
inputs_only = self.params.inputs_only
name_to_features = {
self.input_word_ids_field:
tf.io.FixedLenFeature([sequence_length], tf.int64),
}
if not inputs_only:
name_to_features.update({
"input_mask":
tf.io.FixedLenFeature([sequence_length], tf.int64),
self.input_type_ids_field:
tf.io.FixedLenFeature([sequence_length], tf.int64)
})
features = tf.io.parse_example(inputs, name_to_features)
features = features_to_int32(features)
return self.serve(
features[self.input_word_ids_field],
input_mask=None if inputs_only else features["input_mask"],
input_type_ids=None
if inputs_only else features[self.input_type_ids_field])
@tf.function
def serve_text_examples(self, inputs) -> Dict[str, tf.Tensor]:
name_to_features = {}
for text_field in self.params.text_fields:
name_to_features[text_field] = tf.io.FixedLenFeature([], tf.string)
features = tf.io.parse_example(inputs, name_to_features)
segments = [features[x] for x in self.params.text_fields]
model_inputs = self._text_processor(segments)
if self.params.inputs_only:
return self.serve(input_word_ids=model_inputs["input_word_ids"])
return self.serve(**model_inputs)
def get_inference_signatures(self, function_keys: Dict[Text, Text]):
signatures = {}
valid_keys = ("serve", "serve_examples", "serve_text_examples")
for func_key, signature_key in function_keys.items():
if func_key not in valid_keys:
raise ValueError("Invalid function key for the module: %s with key %s. "
"Valid keys are: %s" %
(self.__class__, func_key, valid_keys))
if func_key == "serve":
if self.params.inputs_only:
signatures[signature_key] = self.serve.get_concrete_function(
input_word_ids=tf.TensorSpec(
shape=[None, None], dtype=tf.int32, name="input_word_ids"))
else:
signatures[signature_key] = self.serve.get_concrete_function(
input_word_ids=tf.TensorSpec(
shape=[None, None], dtype=tf.int32, name="input_word_ids"),
input_mask=tf.TensorSpec(
shape=[None, None], dtype=tf.int32, name="input_mask"),
input_type_ids=tf.TensorSpec(
shape=[None, None], dtype=tf.int32, name="input_type_ids"))
if func_key == "serve_examples":
signatures[signature_key] = self.serve_examples.get_concrete_function(
tf.TensorSpec(shape=[None], dtype=tf.string, name="examples"))
if func_key == "serve_text_examples":
signatures[
signature_key] = self.serve_text_examples.get_concrete_function(
tf.TensorSpec(shape=[None], dtype=tf.string, name="examples"))
return signatures
class MaskedLM(export_base.ExportModule):
"""The export module for the Bert Pretrain (MaskedLM) task."""
def __init__(self, params, model: tf.keras.Model, inference_step=None):
super().__init__(params, model, inference_step)
if params.use_v2_feature_names:
self.input_word_ids_field = "input_word_ids"
self.input_type_ids_field = "input_type_ids"
else:
self.input_word_ids_field = "input_ids"
self.input_type_ids_field = "segment_ids"
@dataclasses.dataclass
class Params(base_config.Config):
cls_head_name: str = "next_sentence"
use_v2_feature_names: bool = True
parse_sequence_length: Optional[int] = None
max_predictions_per_seq: Optional[int] = None
@tf.function
def serve(self, input_word_ids, input_mask, input_type_ids,
masked_lm_positions) -> Dict[str, tf.Tensor]:
inputs = dict(
input_word_ids=input_word_ids,
input_mask=input_mask,
input_type_ids=input_type_ids,
masked_lm_positions=masked_lm_positions)
outputs = self.inference_step(inputs)
return dict(classification=outputs[self.params.cls_head_name])
@tf.function
def serve_examples(self, inputs) -> Dict[str, tf.Tensor]:
sequence_length = self.params.parse_sequence_length
max_predictions_per_seq = self.params.max_predictions_per_seq
name_to_features = {
self.input_word_ids_field:
tf.io.FixedLenFeature([sequence_length], tf.int64),
"input_mask":
tf.io.FixedLenFeature([sequence_length], tf.int64),
self.input_type_ids_field:
tf.io.FixedLenFeature([sequence_length], tf.int64),
"masked_lm_positions":
tf.io.FixedLenFeature([max_predictions_per_seq], tf.int64)
}
features = tf.io.parse_example(inputs, name_to_features)
features = features_to_int32(features)
return self.serve(
input_word_ids=features[self.input_word_ids_field],
input_mask=features["input_mask"],
input_type_ids=features[self.input_word_ids_field],
masked_lm_positions=features["masked_lm_positions"])
def get_inference_signatures(self, function_keys: Dict[Text, Text]):
signatures = {}
valid_keys = ("serve", "serve_examples")
for func_key, signature_key in function_keys.items():
if func_key not in valid_keys:
raise ValueError("Invalid function key for the module: %s with key %s. "
"Valid keys are: %s" %
(self.__class__, func_key, valid_keys))
if func_key == "serve":
signatures[signature_key] = self.serve.get_concrete_function(
input_word_ids=tf.TensorSpec(
shape=[None, None], dtype=tf.int32, name="input_word_ids"),
input_mask=tf.TensorSpec(
shape=[None, None], dtype=tf.int32, name="input_mask"),
input_type_ids=tf.TensorSpec(
shape=[None, None], dtype=tf.int32, name="input_type_ids"),
masked_lm_positions=tf.TensorSpec(
shape=[None, None], dtype=tf.int32, name="masked_lm_positions"))
if func_key == "serve_examples":
signatures[signature_key] = self.serve_examples.get_concrete_function(
tf.TensorSpec(shape=[None], dtype=tf.string, name="examples"))
return signatures
class QuestionAnswering(export_base.ExportModule):
"""The export module for the question answering task."""
@dataclasses.dataclass
class Params(base_config.Config):
parse_sequence_length: Optional[int] = None
use_v2_feature_names: bool = True
def __init__(self, params, model: tf.keras.Model, inference_step=None):
super().__init__(params, model, inference_step)
if params.use_v2_feature_names:
self.input_word_ids_field = "input_word_ids"
self.input_type_ids_field = "input_type_ids"
else:
self.input_word_ids_field = "input_ids"
self.input_type_ids_field = "segment_ids"
@tf.function
def serve(self,
input_word_ids,
input_mask=None,
input_type_ids=None) -> Dict[str, tf.Tensor]:
if input_mask is None:
# The mask has 1 for real tokens and 0 for padding tokens.
input_mask = tf.where(
tf.equal(input_word_ids, 0), tf.zeros_like(input_word_ids),
tf.ones_like(input_word_ids))
inputs = dict(
input_word_ids=input_word_ids,
input_mask=input_mask,
input_type_ids=input_type_ids)
outputs = self.inference_step(inputs)
return dict(start_logits=outputs[0], end_logits=outputs[1])
@tf.function
def serve_examples(self, inputs) -> Dict[str, tf.Tensor]:
sequence_length = self.params.parse_sequence_length
name_to_features = {
self.input_word_ids_field:
tf.io.FixedLenFeature([sequence_length], tf.int64),
"input_mask":
tf.io.FixedLenFeature([sequence_length], tf.int64),
self.input_type_ids_field:
tf.io.FixedLenFeature([sequence_length], tf.int64)
}
features = tf.io.parse_example(inputs, name_to_features)
features = features_to_int32(features)
return self.serve(
input_word_ids=features[self.input_word_ids_field],
input_mask=features["input_mask"],
input_type_ids=features[self.input_type_ids_field])
def get_inference_signatures(self, function_keys: Dict[Text, Text]):
signatures = {}
valid_keys = ("serve", "serve_examples")
for func_key, signature_key in function_keys.items():
if func_key not in valid_keys:
raise ValueError("Invalid function key for the module: %s with key %s. "
"Valid keys are: %s" %
(self.__class__, func_key, valid_keys))
if func_key == "serve":
signatures[signature_key] = self.serve.get_concrete_function(
input_word_ids=tf.TensorSpec(
shape=[None, None], dtype=tf.int32, name="input_word_ids"),
input_mask=tf.TensorSpec(
shape=[None, None], dtype=tf.int32, name="input_mask"),
input_type_ids=tf.TensorSpec(
shape=[None, None], dtype=tf.int32, name="input_type_ids"))
if func_key == "serve_examples":
signatures[signature_key] = self.serve_examples.get_concrete_function(
tf.TensorSpec(shape=[None], dtype=tf.string, name="examples"))
return signatures
class Tagging(export_base.ExportModule):
"""The export module for the tagging task."""
@dataclasses.dataclass
class Params(base_config.Config):
parse_sequence_length: Optional[int] = None
use_v2_feature_names: bool = True
output_encoder_outputs: bool = False
def __init__(self, params, model: tf.keras.Model, inference_step=None):
super().__init__(params, model, inference_step)
if params.use_v2_feature_names:
self.input_word_ids_field = "input_word_ids"
self.input_type_ids_field = "input_type_ids"
else:
self.input_word_ids_field = "input_ids"
self.input_type_ids_field = "segment_ids"
@tf.function
def serve(self, input_word_ids, input_mask,
input_type_ids) -> Dict[str, tf.Tensor]:
inputs = dict(
input_word_ids=input_word_ids,
input_mask=input_mask,
input_type_ids=input_type_ids)
outputs = self.inference_step(inputs)
if self.params.output_encoder_outputs:
return dict(
logits=outputs["logits"], encoder_outputs=outputs["encoder_outputs"])
else:
return dict(logits=outputs["logits"])
@tf.function
def serve_examples(self, inputs) -> Dict[str, tf.Tensor]:
sequence_length = self.params.parse_sequence_length
name_to_features = {
self.input_word_ids_field:
tf.io.FixedLenFeature([sequence_length], tf.int64),
"input_mask":
tf.io.FixedLenFeature([sequence_length], tf.int64),
self.input_type_ids_field:
tf.io.FixedLenFeature([sequence_length], tf.int64)
}
features = tf.io.parse_example(inputs, name_to_features)
features = features_to_int32(features)
return self.serve(
input_word_ids=features[self.input_word_ids_field],
input_mask=features["input_mask"],
input_type_ids=features[self.input_type_ids_field])
def get_inference_signatures(self, function_keys: Dict[Text, Text]):
signatures = {}
valid_keys = ("serve", "serve_examples")
for func_key, signature_key in function_keys.items():
if func_key not in valid_keys:
raise ValueError("Invalid function key for the module: %s with key %s. "
"Valid keys are: %s" %
(self.__class__, func_key, valid_keys))
if func_key == "serve":
signatures[signature_key] = self.serve.get_concrete_function(
input_word_ids=tf.TensorSpec(
shape=[None, None],
dtype=tf.int32,
name=self.input_word_ids_field),
input_mask=tf.TensorSpec(
shape=[None, None], dtype=tf.int32, name="input_mask"),
input_type_ids=tf.TensorSpec(
shape=[None, None],
dtype=tf.int32,
name=self.input_type_ids_field))
if func_key == "serve_examples":
signatures[signature_key] = self.serve_examples.get_concrete_function(
tf.TensorSpec(shape=[None], dtype=tf.string, name="examples"))
return signatures
class Translation(export_base.ExportModule):
"""The export module for the translation task."""
@dataclasses.dataclass
class Params(base_config.Config):
sentencepiece_model_path: str = ""
# Needs to be specified if padded_decode is True/on TPUs.
batch_size: Optional[int] = None
def __init__(self, params, model: tf.keras.Model, inference_step=None):
super().__init__(params, model, inference_step)
self._sp_tokenizer = tf_text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(params.sentencepiece_model_path, "rb").read(),
add_eos=True)
try:
empty_str_tokenized = self._sp_tokenizer.tokenize("").numpy()
except tf.errors.InternalError:
raise ValueError(
"EOS token not in tokenizer vocab."
"Please make sure the tokenizer generates a single token for an "
"empty string.")
self._eos_id = empty_str_tokenized.item()
self._batch_size = params.batch_size
@tf.function
def serve(self, inputs) -> Dict[str, tf.Tensor]:
return self.inference_step(inputs)
@tf.function
def serve_text(self, text: tf.Tensor) -> Dict[str, tf.Tensor]:
tokenized = self._sp_tokenizer.tokenize(text).to_tensor(0)
return self._sp_tokenizer.detokenize(
self.serve({"inputs": tokenized})["outputs"])
def get_inference_signatures(self, function_keys: Dict[Text, Text]):
signatures = {}
valid_keys = ("serve_text")
for func_key, signature_key in function_keys.items():
if func_key not in valid_keys:
raise ValueError("Invalid function key for the module: %s with key %s. "
"Valid keys are: %s" %
(self.__class__, func_key, valid_keys))
if func_key == "serve_text":
signatures[signature_key] = self.serve_text.get_concrete_function(
tf.TensorSpec(shape=[self._batch_size],
dtype=tf.string, name="text"))
return signatures
| 18,889 | 39.976139 | 80 | py |
models | models-master/official/nlp/serving/export_savedmodel_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for nlp.serving.export_saved_model."""
from absl.testing import parameterized
import tensorflow as tf
from official.nlp.configs import bert
from official.nlp.configs import encoders
from official.nlp.serving import export_savedmodel
from official.nlp.serving import export_savedmodel_util
from official.nlp.tasks import masked_lm
from official.nlp.tasks import sentence_prediction
from official.nlp.tasks import tagging
class ExportSavedModelTest(tf.test.TestCase, parameterized.TestCase):
def test_create_export_module(self):
export_module = export_savedmodel.create_export_module(
task_name="SentencePrediction",
config_file=None,
serving_params={
"inputs_only": False,
"parse_sequence_length": 10
})
self.assertEqual(export_module.name, "sentence_prediction")
self.assertFalse(export_module.params.inputs_only)
self.assertEqual(export_module.params.parse_sequence_length, 10)
def test_sentence_prediction(self):
config = sentence_prediction.SentencePredictionConfig(
model=sentence_prediction.ModelConfig(
encoder=encoders.EncoderConfig(
bert=encoders.BertEncoderConfig(vocab_size=30522,
num_layers=1)),
num_classes=2))
task = sentence_prediction.SentencePredictionTask(config)
model = task.build_model()
ckpt = tf.train.Checkpoint(model=model)
ckpt_path = ckpt.save(self.get_temp_dir())
export_module_cls = export_savedmodel.lookup_export_module(task)
serving_params = {"inputs_only": False}
params = export_module_cls.Params(**serving_params)
export_module = export_module_cls(params=params, model=model)
export_dir = export_savedmodel_util.export(
export_module,
function_keys=["serve"],
checkpoint_path=ckpt_path,
export_savedmodel_dir=self.get_temp_dir())
imported = tf.saved_model.load(export_dir)
serving_fn = imported.signatures["serving_default"]
dummy_ids = tf.ones((1, 5), dtype=tf.int32)
inputs = dict(
input_word_ids=dummy_ids,
input_mask=dummy_ids,
input_type_ids=dummy_ids)
ref_outputs = model(inputs)
outputs = serving_fn(**inputs)
self.assertAllClose(ref_outputs, outputs["outputs"])
self.assertEqual(outputs["outputs"].shape, (1, 2))
def test_masked_lm(self):
config = masked_lm.MaskedLMConfig(
model=bert.PretrainerConfig(
encoder=encoders.EncoderConfig(
bert=encoders.BertEncoderConfig(vocab_size=30522,
num_layers=1)),
cls_heads=[
bert.ClsHeadConfig(inner_dim=10, num_classes=2, name="foo")
]))
task = masked_lm.MaskedLMTask(config)
model = task.build_model()
ckpt = tf.train.Checkpoint(model=model)
ckpt_path = ckpt.save(self.get_temp_dir())
export_module_cls = export_savedmodel.lookup_export_module(task)
serving_params = {
"cls_head_name": "foo",
"parse_sequence_length": 10,
"max_predictions_per_seq": 5
}
params = export_module_cls.Params(**serving_params)
export_module = export_module_cls(params=params, model=model)
export_dir = export_savedmodel_util.export(
export_module,
function_keys={
"serve": "serving_default",
"serve_examples": "serving_examples"
},
checkpoint_path=ckpt_path,
export_savedmodel_dir=self.get_temp_dir())
imported = tf.saved_model.load(export_dir)
self.assertSameElements(imported.signatures.keys(),
["serving_default", "serving_examples"])
serving_fn = imported.signatures["serving_default"]
dummy_ids = tf.ones((1, 10), dtype=tf.int32)
dummy_pos = tf.ones((1, 5), dtype=tf.int32)
outputs = serving_fn(
input_word_ids=dummy_ids,
input_mask=dummy_ids,
input_type_ids=dummy_ids,
masked_lm_positions=dummy_pos)
self.assertEqual(outputs["classification"].shape, (1, 2))
@parameterized.parameters(True, False)
def test_tagging(self, output_encoder_outputs):
hidden_size = 768
num_classes = 3
config = tagging.TaggingConfig(
model=tagging.ModelConfig(
encoder=encoders.EncoderConfig(
bert=encoders.BertEncoderConfig(
hidden_size=hidden_size, num_layers=1))),
class_names=["class_0", "class_1", "class_2"])
task = tagging.TaggingTask(config)
model = task.build_model()
ckpt = tf.train.Checkpoint(model=model)
ckpt_path = ckpt.save(self.get_temp_dir())
export_module_cls = export_savedmodel.lookup_export_module(task)
serving_params = {
"parse_sequence_length": 10,
}
params = export_module_cls.Params(
**serving_params, output_encoder_outputs=output_encoder_outputs)
export_module = export_module_cls(params=params, model=model)
export_dir = export_savedmodel_util.export(
export_module,
function_keys={
"serve": "serving_default",
"serve_examples": "serving_examples"
},
checkpoint_path=ckpt_path,
export_savedmodel_dir=self.get_temp_dir())
imported = tf.saved_model.load(export_dir)
self.assertCountEqual(imported.signatures.keys(),
["serving_default", "serving_examples"])
serving_fn = imported.signatures["serving_default"]
dummy_ids = tf.ones((1, 5), dtype=tf.int32)
inputs = dict(
input_word_ids=dummy_ids,
input_mask=dummy_ids,
input_type_ids=dummy_ids)
outputs = serving_fn(**inputs)
self.assertEqual(outputs["logits"].shape, (1, 5, num_classes))
if output_encoder_outputs:
self.assertEqual(outputs["encoder_outputs"].shape, (1, 5, hidden_size))
if __name__ == "__main__":
tf.test.main()
| 6,513 | 38.478788 | 77 | py |
models | models-master/official/nlp/serving/serving_modules_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for nlp.serving.serving_modules."""
import os
from absl.testing import parameterized
import tensorflow as tf
from sentencepiece import SentencePieceTrainer
from official.core import export_base
from official.nlp.configs import bert
from official.nlp.configs import encoders
from official.nlp.serving import serving_modules
from official.nlp.tasks import masked_lm
from official.nlp.tasks import question_answering
from official.nlp.tasks import sentence_prediction
from official.nlp.tasks import tagging
from official.nlp.tasks import translation
def _create_fake_serialized_examples(features_dict):
"""Creates a fake dataset."""
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
def create_str_feature(value):
f = tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
return f
examples = []
for _ in range(10):
features = {}
for key, values in features_dict.items():
if isinstance(values, bytes):
features[key] = create_str_feature(values)
else:
features[key] = create_int_feature(values)
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
examples.append(tf_example.SerializeToString())
return tf.constant(examples)
def _create_fake_vocab_file(vocab_file_path):
tokens = ["[PAD]"]
for i in range(1, 100):
tokens.append("[unused%d]" % i)
tokens.extend(["[UNK]", "[CLS]", "[SEP]", "[MASK]", "hello", "world"])
with tf.io.gfile.GFile(vocab_file_path, "w") as outfile:
outfile.write("\n".join(tokens))
def _train_sentencepiece(input_path, vocab_size, model_path, eos_id=1):
argstr = " ".join([
f"--input={input_path}", f"--vocab_size={vocab_size}",
"--character_coverage=0.995",
f"--model_prefix={model_path}", "--model_type=bpe",
"--bos_id=-1", "--pad_id=0", f"--eos_id={eos_id}", "--unk_id=2"
])
SentencePieceTrainer.Train(argstr)
def _generate_line_file(filepath, lines):
with tf.io.gfile.GFile(filepath, "w") as f:
for l in lines:
f.write("{}\n".format(l))
def _make_sentencepeice(output_dir):
src_lines = ["abc ede fg", "bbcd ef a g", "de f a a g"]
tgt_lines = ["dd cc a ef g", "bcd ef a g", "gef cd ba"]
sentencepeice_input_path = os.path.join(output_dir, "inputs.txt")
_generate_line_file(sentencepeice_input_path, src_lines + tgt_lines)
sentencepeice_model_prefix = os.path.join(output_dir, "sp")
_train_sentencepiece(sentencepeice_input_path, 11, sentencepeice_model_prefix)
sentencepeice_model_path = "{}.model".format(sentencepeice_model_prefix)
return sentencepeice_model_path
class ServingModulesTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(
# use_v2_feature_names
True,
False)
def test_sentence_prediction(self, use_v2_feature_names):
if use_v2_feature_names:
input_word_ids_field = "input_word_ids"
input_type_ids_field = "input_type_ids"
else:
input_word_ids_field = "input_ids"
input_type_ids_field = "segment_ids"
config = sentence_prediction.SentencePredictionConfig(
model=sentence_prediction.ModelConfig(
encoder=encoders.EncoderConfig(
bert=encoders.BertEncoderConfig(vocab_size=30522,
num_layers=1)),
num_classes=2))
task = sentence_prediction.SentencePredictionTask(config)
model = task.build_model()
params = serving_modules.SentencePrediction.Params(
inputs_only=True,
parse_sequence_length=10,
use_v2_feature_names=use_v2_feature_names)
export_module = serving_modules.SentencePrediction(
params=params, model=model)
functions = export_module.get_inference_signatures({
"serve": "serving_default",
"serve_examples": "serving_examples"
})
self.assertSameElements(functions.keys(),
["serving_default", "serving_examples"])
dummy_ids = tf.ones((10, 10), dtype=tf.int32)
outputs = functions["serving_default"](dummy_ids)
self.assertEqual(outputs["outputs"].shape, (10, 2))
params = serving_modules.SentencePrediction.Params(
inputs_only=False,
parse_sequence_length=10,
use_v2_feature_names=use_v2_feature_names)
export_module = serving_modules.SentencePrediction(
params=params, model=model)
functions = export_module.get_inference_signatures({
"serve": "serving_default",
"serve_examples": "serving_examples"
})
outputs = functions["serving_default"](
input_word_ids=dummy_ids,
input_mask=dummy_ids,
input_type_ids=dummy_ids)
self.assertEqual(outputs["outputs"].shape, (10, 2))
dummy_ids = tf.ones((10,), dtype=tf.int32)
examples = _create_fake_serialized_examples({
input_word_ids_field: dummy_ids,
"input_mask": dummy_ids,
input_type_ids_field: dummy_ids
})
outputs = functions["serving_examples"](examples)
self.assertEqual(outputs["outputs"].shape, (10, 2))
with self.assertRaises(ValueError):
_ = export_module.get_inference_signatures({"foo": None})
@parameterized.parameters(
# inputs_only
True,
False)
def test_sentence_prediction_text(self, inputs_only):
vocab_file_path = os.path.join(self.get_temp_dir(), "vocab.txt")
_create_fake_vocab_file(vocab_file_path)
config = sentence_prediction.SentencePredictionConfig(
model=sentence_prediction.ModelConfig(
encoder=encoders.EncoderConfig(
bert=encoders.BertEncoderConfig(vocab_size=30522,
num_layers=1)),
num_classes=2))
task = sentence_prediction.SentencePredictionTask(config)
model = task.build_model()
params = serving_modules.SentencePrediction.Params(
inputs_only=inputs_only,
parse_sequence_length=10,
text_fields=["foo", "bar"],
vocab_file=vocab_file_path)
export_module = serving_modules.SentencePrediction(
params=params, model=model)
examples = _create_fake_serialized_examples({
"foo": b"hello world",
"bar": b"hello world"
})
functions = export_module.get_inference_signatures({
"serve_text_examples": "serving_default",
})
outputs = functions["serving_default"](examples)
self.assertEqual(outputs["outputs"].shape, (10, 2))
@parameterized.parameters(
# use_v2_feature_names
True,
False)
def test_masked_lm(self, use_v2_feature_names):
if use_v2_feature_names:
input_word_ids_field = "input_word_ids"
input_type_ids_field = "input_type_ids"
else:
input_word_ids_field = "input_ids"
input_type_ids_field = "segment_ids"
config = masked_lm.MaskedLMConfig(
model=bert.PretrainerConfig(
encoder=encoders.EncoderConfig(
bert=encoders.BertEncoderConfig(vocab_size=30522,
num_layers=1)),
cls_heads=[
bert.ClsHeadConfig(
inner_dim=10, num_classes=2, name="next_sentence")
]))
task = masked_lm.MaskedLMTask(config)
model = task.build_model()
params = serving_modules.MaskedLM.Params(
parse_sequence_length=10,
max_predictions_per_seq=5,
use_v2_feature_names=use_v2_feature_names)
export_module = serving_modules.MaskedLM(params=params, model=model)
functions = export_module.get_inference_signatures({
"serve": "serving_default",
"serve_examples": "serving_examples"
})
self.assertSameElements(functions.keys(),
["serving_default", "serving_examples"])
dummy_ids = tf.ones((10, 10), dtype=tf.int32)
dummy_pos = tf.ones((10, 5), dtype=tf.int32)
outputs = functions["serving_default"](
input_word_ids=dummy_ids,
input_mask=dummy_ids,
input_type_ids=dummy_ids,
masked_lm_positions=dummy_pos)
self.assertEqual(outputs["classification"].shape, (10, 2))
dummy_ids = tf.ones((10,), dtype=tf.int32)
dummy_pos = tf.ones((5,), dtype=tf.int32)
examples = _create_fake_serialized_examples({
input_word_ids_field: dummy_ids,
"input_mask": dummy_ids,
input_type_ids_field: dummy_ids,
"masked_lm_positions": dummy_pos
})
outputs = functions["serving_examples"](examples)
self.assertEqual(outputs["classification"].shape, (10, 2))
@parameterized.parameters(
# use_v2_feature_names
True,
False)
def test_question_answering(self, use_v2_feature_names):
if use_v2_feature_names:
input_word_ids_field = "input_word_ids"
input_type_ids_field = "input_type_ids"
else:
input_word_ids_field = "input_ids"
input_type_ids_field = "segment_ids"
config = question_answering.QuestionAnsweringConfig(
model=question_answering.ModelConfig(
encoder=encoders.EncoderConfig(
bert=encoders.BertEncoderConfig(vocab_size=30522,
num_layers=1))),
validation_data=None)
task = question_answering.QuestionAnsweringTask(config)
model = task.build_model()
params = serving_modules.QuestionAnswering.Params(
parse_sequence_length=10, use_v2_feature_names=use_v2_feature_names)
export_module = serving_modules.QuestionAnswering(
params=params, model=model)
functions = export_module.get_inference_signatures({
"serve": "serving_default",
"serve_examples": "serving_examples"
})
self.assertSameElements(functions.keys(),
["serving_default", "serving_examples"])
dummy_ids = tf.ones((10, 10), dtype=tf.int32)
outputs = functions["serving_default"](
input_word_ids=dummy_ids,
input_mask=dummy_ids,
input_type_ids=dummy_ids)
self.assertEqual(outputs["start_logits"].shape, (10, 10))
self.assertEqual(outputs["end_logits"].shape, (10, 10))
dummy_ids = tf.ones((10,), dtype=tf.int32)
examples = _create_fake_serialized_examples({
input_word_ids_field: dummy_ids,
"input_mask": dummy_ids,
input_type_ids_field: dummy_ids
})
outputs = functions["serving_examples"](examples)
self.assertEqual(outputs["start_logits"].shape, (10, 10))
self.assertEqual(outputs["end_logits"].shape, (10, 10))
@parameterized.parameters(
# (use_v2_feature_names, output_encoder_outputs)
(True, True),
(False, False))
def test_tagging(self, use_v2_feature_names, output_encoder_outputs):
if use_v2_feature_names:
input_word_ids_field = "input_word_ids"
input_type_ids_field = "input_type_ids"
else:
input_word_ids_field = "input_ids"
input_type_ids_field = "segment_ids"
hidden_size = 768
num_classes = 3
config = tagging.TaggingConfig(
model=tagging.ModelConfig(
encoder=encoders.EncoderConfig(
bert=encoders.BertEncoderConfig(
hidden_size=hidden_size, num_layers=1))),
class_names=["class_0", "class_1", "class_2"])
task = tagging.TaggingTask(config)
model = task.build_model()
params = serving_modules.Tagging.Params(
parse_sequence_length=10,
use_v2_feature_names=use_v2_feature_names,
output_encoder_outputs=output_encoder_outputs)
export_module = serving_modules.Tagging(params=params, model=model)
functions = export_module.get_inference_signatures({
"serve": "serving_default",
"serve_examples": "serving_examples"
})
dummy_ids = tf.ones((10, 10), dtype=tf.int32)
outputs = functions["serving_default"](
input_word_ids=dummy_ids,
input_mask=dummy_ids,
input_type_ids=dummy_ids)
self.assertEqual(outputs["logits"].shape, (10, 10, num_classes))
if output_encoder_outputs:
self.assertEqual(outputs["encoder_outputs"].shape, (10, 10, hidden_size))
dummy_ids = tf.ones((10,), dtype=tf.int32)
examples = _create_fake_serialized_examples({
input_word_ids_field: dummy_ids,
"input_mask": dummy_ids,
input_type_ids_field: dummy_ids
})
outputs = functions["serving_examples"](examples)
self.assertEqual(outputs["logits"].shape, (10, 10, num_classes))
if output_encoder_outputs:
self.assertEqual(outputs["encoder_outputs"].shape, (10, 10, hidden_size))
with self.assertRaises(ValueError):
_ = export_module.get_inference_signatures({"foo": None})
@parameterized.parameters(
(False, None),
(True, 2))
def test_translation(self, padded_decode, batch_size):
sp_path = _make_sentencepeice(self.get_temp_dir())
encdecoder = translation.EncDecoder(
num_attention_heads=4, intermediate_size=256)
config = translation.TranslationConfig(
model=translation.ModelConfig(
encoder=encdecoder,
decoder=encdecoder,
embedding_width=256,
padded_decode=padded_decode,
decode_max_length=100),
sentencepiece_model_path=sp_path,
)
task = translation.TranslationTask(config)
model = task.build_model()
params = serving_modules.Translation.Params(
sentencepiece_model_path=sp_path, batch_size=batch_size)
export_module = serving_modules.Translation(params=params, model=model)
functions = export_module.get_inference_signatures({
"serve_text": "serving_default"
})
outputs = functions["serving_default"](tf.constant(["abcd", "ef gh"]))
self.assertEqual(outputs.shape, (2,))
self.assertEqual(outputs.dtype, tf.string)
tmp_dir = self.get_temp_dir()
tmp_dir = os.path.join(tmp_dir, "padded_decode", str(padded_decode))
export_base_dir = os.path.join(tmp_dir, "export")
ckpt_dir = os.path.join(tmp_dir, "ckpt")
ckpt_path = tf.train.Checkpoint(model=model).save(ckpt_dir)
export_dir = export_base.export(export_module,
{"serve_text": "serving_default"},
export_base_dir, ckpt_path)
loaded = tf.saved_model.load(export_dir)
infer = loaded.signatures["serving_default"]
out = infer(text=tf.constant(["abcd", "ef gh"]))
self.assertLen(out["output_0"], 2)
if __name__ == "__main__":
tf.test.main()
| 15,068 | 37.441327 | 80 | py |
models | models-master/official/nlp/serving/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 610 | 37.1875 | 74 | py |
models | models-master/official/nlp/metrics/bleu_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test functions in compute_blue.py."""
import tempfile
import tensorflow as tf
from official.nlp.metrics import bleu
class ComputeBleuTest(tf.test.TestCase):
def _create_temp_file(self, text):
temp_file = tempfile.NamedTemporaryFile(delete=False)
with tf.io.gfile.GFile(temp_file.name, "w") as w:
w.write(text)
return temp_file.name
def test_bleu_same(self):
ref = self._create_temp_file("test 1 two 3\nmore tests!")
hyp = self._create_temp_file("test 1 two 3\nmore tests!")
uncased_score = bleu.bleu_wrapper(ref, hyp, False)
cased_score = bleu.bleu_wrapper(ref, hyp, True)
self.assertEqual(100, uncased_score)
self.assertEqual(100, cased_score)
def test_bleu_same_different_case(self):
ref = self._create_temp_file("Test 1 two 3\nmore tests!")
hyp = self._create_temp_file("test 1 two 3\nMore tests!")
uncased_score = bleu.bleu_wrapper(ref, hyp, False)
cased_score = bleu.bleu_wrapper(ref, hyp, True)
self.assertEqual(100, uncased_score)
self.assertLess(cased_score, 100)
def test_bleu_different(self):
ref = self._create_temp_file("Testing\nmore tests!")
hyp = self._create_temp_file("Dog\nCat")
uncased_score = bleu.bleu_wrapper(ref, hyp, False)
cased_score = bleu.bleu_wrapper(ref, hyp, True)
self.assertLess(uncased_score, 100)
self.assertLess(cased_score, 100)
def test_bleu_tokenize(self):
s = "Test0, 1 two, 3"
tokenized = bleu.bleu_tokenize(s)
self.assertEqual(["Test0", ",", "1", "two", ",", "3"], tokenized)
def test_bleu_list(self):
ref = ["test 1 two 3", "more tests!"]
hyp = ["test 1 two 3", "More tests!"]
uncased_score = bleu.bleu_on_list(ref, hyp, False)
cased_score = bleu.bleu_on_list(ref, hyp, True)
self.assertEqual(uncased_score, 100)
self.assertLess(cased_score, 100)
if __name__ == "__main__":
tf.test.main()
| 2,498 | 33.232877 | 74 | py |
models | models-master/official/nlp/metrics/bleu.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to compute official BLEU score.
Source:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/bleu_hook.py
"""
import collections
import math
import re
import sys
import unicodedata
import numpy as np
import tensorflow as tf
class UnicodeRegex(object):
"""Ad-hoc hack to recognize all punctuation and symbols."""
def __init__(self):
punctuation = self.property_chars("P")
self.nondigit_punct_re = re.compile(r"([^\d])([" + punctuation + r"])")
self.punct_nondigit_re = re.compile(r"([" + punctuation + r"])([^\d])")
self.symbol_re = re.compile("([" + self.property_chars("S") + "])")
def property_chars(self, prefix):
return "".join(
chr(x)
for x in range(sys.maxunicode)
if unicodedata.category(chr(x)).startswith(prefix))
uregex = UnicodeRegex()
def bleu_tokenize(string):
r"""Tokenize a string following the official BLEU implementation.
See https://github.com/moses-smt/mosesdecoder/'
'blob/master/scripts/generic/mteval-v14.pl#L954-L983
In our case, the input string is expected to be just one line
and no HTML entities de-escaping is needed.
So we just tokenize on punctuation and symbols,
except when a punctuation is preceded and followed by a digit
(e.g. a comma/dot as a thousand/decimal separator).
Note that a numer (e.g. a year) followed by a dot at the end of sentence
is NOT tokenized,
i.e. the dot stays with the number because `s/(\p{P})(\P{N})/ $1 $2/g`
does not match this case (unless we add a space after each sentence).
However, this error is already in the original mteval-v14.pl
and we want to be consistent with it.
Args:
string: the input string
Returns:
a list of tokens
"""
string = uregex.nondigit_punct_re.sub(r"\1 \2 ", string)
string = uregex.punct_nondigit_re.sub(r" \1 \2", string)
string = uregex.symbol_re.sub(r" \1 ", string)
return string.split()
def bleu_wrapper(ref_filename, hyp_filename, case_sensitive=False):
"""Compute BLEU for two files (reference and hypothesis translation)."""
ref_lines = tf.io.gfile.GFile(ref_filename).read().strip().splitlines()
hyp_lines = tf.io.gfile.GFile(hyp_filename).read().strip().splitlines()
return bleu_on_list(ref_lines, hyp_lines, case_sensitive)
def _get_ngrams_with_counter(segment, max_order):
"""Extracts all n-grams up to a given maximum order from an input segment.
Args:
segment: text segment from which n-grams will be extracted.
max_order: maximum length in tokens of the n-grams returned by this methods.
Returns:
The Counter containing all n-grams upto max_order in segment
with a count of how many times each n-gram occurred.
"""
ngram_counts = collections.Counter()
for order in range(1, max_order + 1):
for i in range(0, len(segment) - order + 1):
ngram = tuple(segment[i:i + order])
ngram_counts[ngram] += 1
return ngram_counts
def compute_bleu(reference_corpus,
translation_corpus,
max_order=4,
use_bp=True):
"""Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of references for each translation. Each reference
should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation should
be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
use_bp: boolean, whether to apply brevity penalty.
Returns:
BLEU score.
"""
reference_length = 0
translation_length = 0
bp = 1.0
geo_mean = 0
matches_by_order = [0] * max_order
possible_matches_by_order = [0] * max_order
precisions = []
for (references, translations) in zip(reference_corpus, translation_corpus):
reference_length += len(references)
translation_length += len(translations)
ref_ngram_counts = _get_ngrams_with_counter(references, max_order)
translation_ngram_counts = _get_ngrams_with_counter(translations, max_order)
overlap = dict((ngram, min(count, translation_ngram_counts[ngram]))
for ngram, count in ref_ngram_counts.items())
for ngram in overlap:
matches_by_order[len(ngram) - 1] += overlap[ngram]
for ngram in translation_ngram_counts:
possible_matches_by_order[len(ngram) -
1] += translation_ngram_counts[ngram]
precisions = [0] * max_order
smooth = 1.0
for i in range(0, max_order):
if possible_matches_by_order[i] > 0:
precisions[i] = float(matches_by_order[i]) / possible_matches_by_order[i]
if matches_by_order[i] > 0:
precisions[i] = float(
matches_by_order[i]) / possible_matches_by_order[i]
else:
smooth *= 2
precisions[i] = 1.0 / (smooth * possible_matches_by_order[i])
else:
precisions[i] = 0.0
if max(precisions) > 0:
p_log_sum = sum(math.log(p) for p in precisions if p)
geo_mean = math.exp(p_log_sum / max_order)
if use_bp:
ratio = translation_length / reference_length
bp = 0. if ratio < 1e-6 else math.exp(1 -
1. / ratio) if ratio < 1.0 else 1.0
bleu = geo_mean * bp
return np.float32(bleu)
def bleu_on_list(ref_lines, hyp_lines, case_sensitive=False):
"""Compute BLEU for two list of strings (reference and hypothesis)."""
if len(ref_lines) != len(hyp_lines):
raise ValueError(
"Reference and translation files have different number of "
"lines (%d VS %d). If training only a few steps (100-200), the "
"translation may be empty." % (len(ref_lines), len(hyp_lines)))
if not case_sensitive:
ref_lines = [x.lower() for x in ref_lines]
hyp_lines = [x.lower() for x in hyp_lines]
ref_tokens = [bleu_tokenize(x) for x in ref_lines]
hyp_tokens = [bleu_tokenize(x) for x in hyp_lines]
return compute_bleu(ref_tokens, hyp_tokens) * 100
| 6,577 | 34.176471 | 88 | py |
models | models-master/official/nlp/metrics/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/nlp/configs/bert.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Multi-head BERT encoder network with classification heads.
Includes configurations and instantiation methods.
"""
from typing import List, Optional, Text
import dataclasses
from official.modeling.hyperparams import base_config
from official.nlp.configs import encoders
@dataclasses.dataclass
class ClsHeadConfig(base_config.Config):
inner_dim: int = 0
num_classes: int = 2
activation: Optional[Text] = "tanh"
dropout_rate: float = 0.0
cls_token_idx: int = 0
name: Optional[Text] = None
@dataclasses.dataclass
class PretrainerConfig(base_config.Config):
"""Pretrainer configuration."""
encoder: encoders.EncoderConfig = dataclasses.field(
default_factory=encoders.EncoderConfig
)
cls_heads: List[ClsHeadConfig] = dataclasses.field(default_factory=list)
mlm_activation: str = "gelu"
mlm_initializer_range: float = 0.02
# Currently only used for mobile bert.
mlm_output_weights_use_proj: bool = False
| 1,551 | 31.333333 | 74 | py |
models | models-master/official/nlp/configs/wmt_transformer_experiments.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=g-doc-return-or-yield,line-too-long
"""WMT translation configurations."""
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import optimization
from official.nlp.data import wmt_dataloader
from official.nlp.tasks import translation
@exp_factory.register_config_factory('wmt_transformer/large')
def wmt_transformer_large() -> cfg.ExperimentConfig:
"""WMT Transformer Large.
Please refer to
tensorflow_models/official/nlp/data/train_sentencepiece.py
to generate sentencepiece_model
and pass
--params_override=task.sentencepiece_model_path='YOUR_PATH'
to the train script.
"""
learning_rate = 2.0
hidden_size = 1024
learning_rate *= (hidden_size**-0.5)
warmup_steps = 16000
train_steps = 300000
token_batch_size = 24576
encdecoder = translation.EncDecoder(
num_attention_heads=16, intermediate_size=hidden_size * 4)
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(enable_xla=True),
task=translation.TranslationConfig(
model=translation.ModelConfig(
encoder=encdecoder,
decoder=encdecoder,
embedding_width=hidden_size,
padded_decode=True,
decode_max_length=100),
train_data=wmt_dataloader.WMTDataConfig(
tfds_name='wmt14_translate/de-en',
tfds_split='train',
src_lang='en',
tgt_lang='de',
is_training=True,
global_batch_size=token_batch_size,
static_batch=True,
max_seq_length=64
),
validation_data=wmt_dataloader.WMTDataConfig(
tfds_name='wmt14_translate/de-en',
tfds_split='test',
src_lang='en',
tgt_lang='de',
is_training=False,
global_batch_size=32,
static_batch=True,
max_seq_length=100,
),
sentencepiece_model_path=None,
),
trainer=cfg.TrainerConfig(
train_steps=train_steps,
validation_steps=-1,
steps_per_loop=1000,
summary_interval=1000,
checkpoint_interval=5000,
validation_interval=5000,
max_to_keep=1,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'adam',
'adam': {
'beta_2': 0.997,
'epsilon': 1e-9,
},
},
'learning_rate': {
'type': 'power',
'power': {
'initial_learning_rate': learning_rate,
'power': -0.5,
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': warmup_steps,
'warmup_learning_rate': 0.0
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.sentencepiece_model_path != None',
])
return config
| 3,791 | 33.162162 | 74 | py |
models | models-master/official/nlp/configs/pretraining_experiments.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pretraining experiment configurations."""
# pylint: disable=g-doc-return-or-yield,line-too-long
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import optimization
from official.nlp.data import pretrain_dataloader
from official.nlp.data import pretrain_dynamic_dataloader
from official.nlp.data import pretrain_text_dataloader
from official.nlp.tasks import masked_lm
_TRAINER = cfg.TrainerConfig(
train_steps=1000000,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'adamw',
'adamw': {
'weight_decay_rate':
0.01,
'exclude_from_weight_decay': [
'LayerNorm', 'layer_norm', 'bias'
],
}
},
'learning_rate': {
'type': 'polynomial',
'polynomial': {
'initial_learning_rate': 1e-4,
'end_learning_rate': 0.0,
}
},
'warmup': {
'type': 'polynomial'
}
}))
@exp_factory.register_config_factory('bert/pretraining')
def bert_pretraining() -> cfg.ExperimentConfig:
"""BERT pretraining experiment."""
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(enable_xla=True),
task=masked_lm.MaskedLMConfig(
train_data=pretrain_dataloader.BertPretrainDataConfig(),
validation_data=pretrain_dataloader.BertPretrainDataConfig(
is_training=False)),
trainer=_TRAINER,
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
@exp_factory.register_config_factory('bert/pretraining_dynamic')
def bert_dynamic() -> cfg.ExperimentConfig:
"""BERT base with dynamic input sequences.
TPU needs to run with tf.data service with round-robin behavior.
"""
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(enable_xla=True),
task=masked_lm.MaskedLMConfig(
train_data=pretrain_dynamic_dataloader.BertPretrainDataConfig(),
validation_data=pretrain_dataloader.BertPretrainDataConfig(
is_training=False)),
trainer=_TRAINER,
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
@exp_factory.register_config_factory('bert/text_wiki_pretraining')
def bert_text_wiki_pretraining() -> cfg.ExperimentConfig:
r"""BERT with wiki text tfds.
Note that: only wikipedia english corpus is used. It cannot exactly reproduce
BERT training setup because the next sentence sampling is hard to match the
implementation with tf ops.
"""
config = cfg.ExperimentConfig(
task=masked_lm.MaskedLMConfig(
train_data=pretrain_text_dataloader.BertPretrainTextDataConfig(
tfds_name='wikipedia/20201201.en',
tfds_split='train',
vocab_file_path='TODO for users',
),
validation_data=pretrain_text_dataloader.BertPretrainTextDataConfig(
tfds_name='wikipedia/20201201.en',
tfds_split='train',
vocab_file_path='TODO for users',
is_training=False)),
trainer=_TRAINER,
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
| 4,082 | 34.815789 | 79 | py |
models | models-master/official/nlp/configs/experiment_configs.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Experiments definition."""
# pylint: disable=unused-import
from official.nlp.configs import finetuning_experiments
from official.nlp.configs import pretraining_experiments
from official.nlp.configs import wmt_transformer_experiments
| 845 | 41.3 | 74 | py |
models | models-master/official/nlp/configs/encoders_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for official.nlp.configs.encoders."""
import os
import tensorflow as tf
from official.modeling import hyperparams
from official.nlp.configs import encoders
from official.nlp.modeling import networks
from official.projects.teams import teams
class EncodersTest(tf.test.TestCase):
def test_encoder_from_yaml(self):
config = encoders.EncoderConfig(
type="bert", bert=encoders.BertEncoderConfig(num_layers=1))
encoder = encoders.build_encoder(config)
ckpt = tf.train.Checkpoint(encoder=encoder)
ckpt_path = ckpt.save(self.get_temp_dir() + "/ckpt")
params_save_path = os.path.join(self.get_temp_dir(), "params.yaml")
hyperparams.save_params_dict_to_yaml(config, params_save_path)
retored_cfg = encoders.EncoderConfig.from_yaml(params_save_path)
retored_encoder = encoders.build_encoder(retored_cfg)
status = tf.train.Checkpoint(encoder=retored_encoder).restore(ckpt_path)
status.assert_consumed()
def test_build_teams(self):
config = encoders.EncoderConfig(
type="any", any=teams.TeamsEncoderConfig(num_layers=1))
encoder = encoders.build_encoder(config)
self.assertIsInstance(encoder, networks.EncoderScaffold)
self.assertIsInstance(encoder.embedding_network,
networks.PackedSequenceEmbedding)
if __name__ == "__main__":
tf.test.main()
| 1,963 | 36.056604 | 76 | py |
models | models-master/official/nlp/configs/finetuning_experiments.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Finetuning experiment configurations."""
# pylint: disable=g-doc-return-or-yield,line-too-long
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import optimization
from official.nlp.data import question_answering_dataloader
from official.nlp.data import sentence_prediction_dataloader
from official.nlp.data import tagging_dataloader
from official.nlp.tasks import question_answering
from official.nlp.tasks import sentence_prediction
from official.nlp.tasks import tagging
@exp_factory.register_config_factory('bert/sentence_prediction')
def bert_sentence_prediction() -> cfg.ExperimentConfig:
r"""BERT GLUE."""
config = cfg.ExperimentConfig(
task=sentence_prediction.SentencePredictionConfig(
train_data=sentence_prediction_dataloader
.SentencePredictionDataConfig(),
validation_data=sentence_prediction_dataloader
.SentencePredictionDataConfig(
is_training=False, drop_remainder=False)),
trainer=cfg.TrainerConfig(
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'adamw',
'adamw': {
'weight_decay_rate':
0.01,
'exclude_from_weight_decay':
['LayerNorm', 'layer_norm', 'bias'],
}
},
'learning_rate': {
'type': 'polynomial',
'polynomial': {
'initial_learning_rate': 3e-5,
'end_learning_rate': 0.0,
}
},
'warmup': {
'type': 'polynomial'
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
@exp_factory.register_config_factory('bert/sentence_prediction_text')
def bert_sentence_prediction_text() -> cfg.ExperimentConfig:
r"""BERT sentence prediction with raw text data.
Example: use tf.text and tfds as input with glue_mnli_text.yaml
"""
config = cfg.ExperimentConfig(
task=sentence_prediction.SentencePredictionConfig(
train_data=sentence_prediction_dataloader
.SentencePredictionTextDataConfig(),
validation_data=sentence_prediction_dataloader
.SentencePredictionTextDataConfig(
is_training=False, drop_remainder=False)),
trainer=cfg.TrainerConfig(
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'adamw',
'adamw': {
'weight_decay_rate':
0.01,
'exclude_from_weight_decay':
['LayerNorm', 'layer_norm', 'bias'],
}
},
'learning_rate': {
'type': 'polynomial',
'polynomial': {
'initial_learning_rate': 3e-5,
'end_learning_rate': 0.0,
}
},
'warmup': {
'type': 'polynomial'
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
@exp_factory.register_config_factory('bert/squad')
def bert_squad() -> cfg.ExperimentConfig:
"""BERT Squad V1/V2."""
config = cfg.ExperimentConfig(
task=question_answering.QuestionAnsweringConfig(
train_data=question_answering_dataloader.QADataConfig(),
validation_data=question_answering_dataloader.QADataConfig()),
trainer=cfg.TrainerConfig(
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'adamw',
'adamw': {
'weight_decay_rate':
0.01,
'exclude_from_weight_decay':
['LayerNorm', 'layer_norm', 'bias'],
}
},
'learning_rate': {
'type': 'polynomial',
'polynomial': {
'initial_learning_rate': 8e-5,
'end_learning_rate': 0.0,
}
},
'warmup': {
'type': 'polynomial'
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
@exp_factory.register_config_factory('bert/tagging')
def bert_tagging() -> cfg.ExperimentConfig:
"""BERT tagging task."""
config = cfg.ExperimentConfig(
task=tagging.TaggingConfig(
train_data=tagging_dataloader.TaggingDataConfig(),
validation_data=tagging_dataloader.TaggingDataConfig(
is_training=False, drop_remainder=False)),
trainer=cfg.TrainerConfig(
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'adamw',
'adamw': {
'weight_decay_rate':
0.01,
'exclude_from_weight_decay':
['LayerNorm', 'layer_norm', 'bias'],
}
},
'learning_rate': {
'type': 'polynomial',
'polynomial': {
'initial_learning_rate': 8e-5,
'end_learning_rate': 0.0,
}
},
'warmup': {
'type': 'polynomial'
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None',
])
return config
| 6,571 | 35.511111 | 74 | py |
models | models-master/official/nlp/configs/electra.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ELECTRA model configurations and instantiation methods."""
from typing import List
import dataclasses
from official.modeling.hyperparams import base_config
from official.nlp.configs import bert
from official.nlp.configs import encoders
@dataclasses.dataclass
class ElectraPretrainerConfig(base_config.Config):
"""ELECTRA pretrainer configuration."""
num_masked_tokens: int = 76
sequence_length: int = 512
num_classes: int = 2
discriminator_loss_weight: float = 50.0
tie_embeddings: bool = True
disallow_correct: bool = False
generator_encoder: encoders.EncoderConfig = dataclasses.field(
default_factory=encoders.EncoderConfig
)
discriminator_encoder: encoders.EncoderConfig = dataclasses.field(
default_factory=encoders.EncoderConfig
)
cls_heads: List[bert.ClsHeadConfig] = dataclasses.field(default_factory=list)
| 1,467 | 34.804878 | 79 | py |
models | models-master/official/nlp/configs/__init__.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 609 | 39.666667 | 74 | py |
models | models-master/official/nlp/configs/encoders.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transformer Encoders.
Includes configurations and factory methods.
"""
import dataclasses
from typing import Optional, Sequence
import gin
import tensorflow as tf
from official.modeling import hyperparams
from official.modeling import tf_utils
from official.nlp.modeling import layers
from official.nlp.modeling import networks
from official.projects.bigbird import encoder as bigbird_encoder
@dataclasses.dataclass
class BertEncoderConfig(hyperparams.Config):
"""BERT encoder configuration."""
vocab_size: int = 30522
hidden_size: int = 768
num_layers: int = 12
num_attention_heads: int = 12
hidden_activation: str = "gelu"
intermediate_size: int = 3072
dropout_rate: float = 0.1
attention_dropout_rate: float = 0.1
max_position_embeddings: int = 512
type_vocab_size: int = 2
initializer_range: float = 0.02
embedding_size: Optional[int] = None
output_range: Optional[int] = None
return_all_encoder_outputs: bool = False
return_attention_scores: bool = False
# Pre/Post-LN Transformer
norm_first: bool = False
@dataclasses.dataclass
class MobileBertEncoderConfig(hyperparams.Config):
"""MobileBERT encoder configuration.
Attributes:
word_vocab_size: number of words in the vocabulary.
word_embed_size: word embedding size.
type_vocab_size: number of word types.
max_sequence_length: maximum length of input sequence.
num_blocks: number of transformer block in the encoder model.
hidden_size: the hidden size for the transformer block.
num_attention_heads: number of attention heads in the transformer block.
intermediate_size: the size of the "intermediate" (a.k.a., feed forward)
layer.
hidden_activation: the non-linear activation function to apply to the
output of the intermediate/feed-forward layer.
hidden_dropout_prob: dropout probability for the hidden layers.
attention_probs_dropout_prob: dropout probability of the attention
probabilities.
intra_bottleneck_size: the size of bottleneck.
initializer_range: The stddev of the truncated_normal_initializer for
initializing all weight matrices.
use_bottleneck_attention: Use attention inputs from the bottleneck
transformation. If true, the following `key_query_shared_bottleneck`
will be ignored.
key_query_shared_bottleneck: whether to share linear transformation for keys
and queries.
num_feedforward_networks: number of stacked feed-forward networks.
normalization_type: the type of normalization_type, only 'no_norm' and
'layer_norm' are supported. 'no_norm' represents the element-wise linear
transformation for the student model, as suggested by the original
MobileBERT paper. 'layer_norm' is used for the teacher model.
classifier_activation: if using the tanh activation for the final
representation of the [CLS] token in fine-tuning.
"""
word_vocab_size: int = 30522
word_embed_size: int = 128
type_vocab_size: int = 2
max_sequence_length: int = 512
num_blocks: int = 24
hidden_size: int = 512
num_attention_heads: int = 4
intermediate_size: int = 4096
hidden_activation: str = "gelu"
hidden_dropout_prob: float = 0.1
attention_probs_dropout_prob: float = 0.1
intra_bottleneck_size: int = 1024
initializer_range: float = 0.02
use_bottleneck_attention: bool = False
key_query_shared_bottleneck: bool = False
num_feedforward_networks: int = 1
normalization_type: str = "layer_norm"
classifier_activation: bool = True
input_mask_dtype: str = "int32"
@dataclasses.dataclass
class AlbertEncoderConfig(hyperparams.Config):
"""ALBERT encoder configuration."""
vocab_size: int = 30000
embedding_width: int = 128
hidden_size: int = 768
num_layers: int = 12
num_attention_heads: int = 12
hidden_activation: str = "gelu"
intermediate_size: int = 3072
dropout_rate: float = 0.0
attention_dropout_rate: float = 0.0
max_position_embeddings: int = 512
type_vocab_size: int = 2
initializer_range: float = 0.02
@dataclasses.dataclass
class BigBirdEncoderConfig(hyperparams.Config):
"""BigBird encoder configuration."""
vocab_size: int = 50358
hidden_size: int = 768
num_layers: int = 12
num_attention_heads: int = 12
hidden_activation: str = "gelu"
intermediate_size: int = 3072
dropout_rate: float = 0.1
attention_dropout_rate: float = 0.1
# Pre/Post-LN Transformer
norm_first: bool = False
max_position_embeddings: int = 4096
num_rand_blocks: int = 3
block_size: int = 64
type_vocab_size: int = 16
initializer_range: float = 0.02
embedding_width: Optional[int] = None
use_gradient_checkpointing: bool = False
@dataclasses.dataclass
class KernelEncoderConfig(hyperparams.Config):
"""Linear encoder configuration."""
vocab_size: int = 30522
hidden_size: int = 768
num_layers: int = 12
num_attention_heads: int = 12
hidden_activation: str = "gelu"
intermediate_size: int = 3072
dropout_rate: float = 0.1
attention_dropout_rate: float = 0.1
# Pre/Post-LN Transformer
norm_first: bool = False
max_position_embeddings: int = 512
type_vocab_size: int = 2
initializer_range: float = 0.02
embedding_size: Optional[int] = None
feature_transform: str = "exp"
num_random_features: int = 256
redraw: bool = False
is_short_seq: bool = False
begin_kernel: int = 0
scale: Optional[float] = None
@dataclasses.dataclass
class ReuseEncoderConfig(hyperparams.Config):
"""Reuse encoder configuration."""
vocab_size: int = 30522
hidden_size: int = 768
num_layers: int = 12
num_attention_heads: int = 12
hidden_activation: str = "gelu"
intermediate_size: int = 3072
dropout_rate: float = 0.1
attention_dropout_rate: float = 0.1
max_position_embeddings: int = 512
type_vocab_size: int = 2
initializer_range: float = 0.02
embedding_size: Optional[int] = None
output_range: Optional[int] = None
return_all_encoder_outputs: bool = False
# Pre/Post-LN Transformer
norm_first: bool = False
# Reuse transformer
reuse_attention: int = -1
use_relative_pe: bool = False
pe_max_seq_length: int = 512
max_reuse_layer_idx: int = 6
@dataclasses.dataclass
class XLNetEncoderConfig(hyperparams.Config):
"""XLNet encoder configuration."""
vocab_size: int = 32000
num_layers: int = 24
hidden_size: int = 1024
num_attention_heads: int = 16
head_size: int = 64
inner_size: int = 4096
inner_activation: str = "gelu"
dropout_rate: float = 0.1
attention_dropout_rate: float = 0.1
attention_type: str = "bi"
bi_data: bool = False
tie_attention_biases: bool = False
memory_length: int = 0
same_length: bool = False
clamp_length: int = -1
reuse_length: int = 0
use_cls_mask: bool = False
embedding_width: int = 1024
initializer_range: float = 0.02
two_stream: bool = False
@dataclasses.dataclass
class QueryBertConfig(hyperparams.Config):
"""Query BERT encoder configuration."""
vocab_size: int = 30522
hidden_size: int = 768
num_layers: int = 12
num_attention_heads: int = 12
hidden_activation: str = "gelu"
intermediate_size: int = 3072
dropout_rate: float = 0.1
attention_dropout_rate: float = 0.1
max_position_embeddings: int = 512
type_vocab_size: int = 2
initializer_range: float = 0.02
embedding_size: Optional[int] = None
output_range: Optional[int] = None
return_all_encoder_outputs: bool = False
return_attention_scores: bool = False
# Pre/Post-LN Transformer
norm_first: bool = False
@dataclasses.dataclass
class FNetEncoderConfig(hyperparams.Config):
"""FNet encoder configuration."""
vocab_size: int = 30522
hidden_size: int = 768
num_layers: int = 12
num_attention_heads: int = 12
inner_activation: str = "gelu"
inner_dim: int = 3072
output_dropout: float = 0.1
attention_dropout: float = 0.1
max_sequence_length: int = 512
type_vocab_size: int = 2
initializer_range: float = 0.02
embedding_width: Optional[int] = None
output_range: Optional[int] = None
norm_first: bool = False
use_fft: bool = False
attention_layers: Sequence[int] = ()
@dataclasses.dataclass
class SparseMixerEncoderConfig(hyperparams.Config):
"""SparseMixer encoder configuration."""
vocab_size: int = 30522
hidden_size: int = 768
num_layers: int = 14
moe_layers: Sequence[int] = (5, 6, 7, 8)
attention_layers: Sequence[int] = (10, 11, 12, 13)
num_experts: int = 16
train_capacity_factor: float = 1.
eval_capacity_factor: float = 1.
examples_per_group: float = 1.
use_fft: bool = False
num_attention_heads: int = 8
max_sequence_length: int = 512
type_vocab_size: int = 2
inner_dim: int = 3072
inner_activation: str = "gelu"
output_dropout: float = 0.1
attention_dropout: float = 0.1
initializer_range: float = 0.02
output_range: Optional[int] = None
embedding_width: Optional[int] = None
norm_first: bool = False
@dataclasses.dataclass
class EncoderConfig(hyperparams.OneOfConfig):
"""Encoder configuration."""
type: Optional[str] = "bert"
albert: AlbertEncoderConfig = dataclasses.field(
default_factory=AlbertEncoderConfig
)
bert: BertEncoderConfig = dataclasses.field(default_factory=BertEncoderConfig)
bert_v2: BertEncoderConfig = dataclasses.field(
default_factory=BertEncoderConfig
)
bigbird: BigBirdEncoderConfig = dataclasses.field(
default_factory=BigBirdEncoderConfig
)
kernel: KernelEncoderConfig = dataclasses.field(
default_factory=KernelEncoderConfig
)
mobilebert: MobileBertEncoderConfig = dataclasses.field(
default_factory=MobileBertEncoderConfig
)
reuse: ReuseEncoderConfig = dataclasses.field(
default_factory=ReuseEncoderConfig
)
xlnet: XLNetEncoderConfig = dataclasses.field(
default_factory=XLNetEncoderConfig
)
query_bert: QueryBertConfig = dataclasses.field(
default_factory=QueryBertConfig
)
fnet: FNetEncoderConfig = dataclasses.field(default_factory=FNetEncoderConfig)
sparse_mixer: SparseMixerEncoderConfig = dataclasses.field(
default_factory=SparseMixerEncoderConfig
)
# If `any` is used, the encoder building relies on any.BUILDER.
any: hyperparams.Config = dataclasses.field(
default_factory=hyperparams.Config
)
@gin.configurable
def build_encoder(config: EncoderConfig,
embedding_layer: Optional[tf.keras.layers.Layer] = None,
encoder_cls=None,
bypass_config: bool = False):
"""Instantiate a Transformer encoder network from EncoderConfig.
Args:
config: the one-of encoder config, which provides encoder parameters of a
chosen encoder.
embedding_layer: an external embedding layer passed to the encoder.
encoder_cls: an external encoder cls not included in the supported encoders,
usually used by gin.configurable.
bypass_config: whether to ignore config instance to create the object with
`encoder_cls`.
Returns:
An encoder instance.
"""
if bypass_config:
return encoder_cls()
encoder_type = config.type
encoder_cfg = config.get()
if encoder_cls and encoder_cls.__name__ == "EncoderScaffold":
embedding_cfg = dict(
vocab_size=encoder_cfg.vocab_size,
type_vocab_size=encoder_cfg.type_vocab_size,
hidden_size=encoder_cfg.hidden_size,
max_seq_length=encoder_cfg.max_position_embeddings,
initializer=tf.keras.initializers.TruncatedNormal(
stddev=encoder_cfg.initializer_range),
dropout_rate=encoder_cfg.dropout_rate,
)
hidden_cfg = dict(
num_attention_heads=encoder_cfg.num_attention_heads,
intermediate_size=encoder_cfg.intermediate_size,
intermediate_activation=tf_utils.get_activation(
encoder_cfg.hidden_activation),
dropout_rate=encoder_cfg.dropout_rate,
attention_dropout_rate=encoder_cfg.attention_dropout_rate,
kernel_initializer=tf.keras.initializers.TruncatedNormal(
stddev=encoder_cfg.initializer_range),
)
kwargs = dict(
embedding_cfg=embedding_cfg,
hidden_cfg=hidden_cfg,
num_hidden_instances=encoder_cfg.num_layers,
pooled_output_dim=encoder_cfg.hidden_size,
pooler_layer_initializer=tf.keras.initializers.TruncatedNormal(
stddev=encoder_cfg.initializer_range),
return_all_layer_outputs=encoder_cfg.return_all_encoder_outputs,
dict_outputs=True)
return encoder_cls(**kwargs)
if encoder_type == "any":
encoder = encoder_cfg.BUILDER(encoder_cfg)
if not isinstance(encoder,
(tf.Module, tf.keras.Model, tf.keras.layers.Layer)):
raise ValueError("The BUILDER returns an unexpected instance. The "
"`build_encoder` should returns a tf.Module, "
"tf.keras.Model or tf.keras.layers.Layer. However, "
f"we get {encoder.__class__}")
return encoder
if encoder_type == "mobilebert":
return networks.MobileBERTEncoder(
word_vocab_size=encoder_cfg.word_vocab_size,
word_embed_size=encoder_cfg.word_embed_size,
type_vocab_size=encoder_cfg.type_vocab_size,
max_sequence_length=encoder_cfg.max_sequence_length,
num_blocks=encoder_cfg.num_blocks,
hidden_size=encoder_cfg.hidden_size,
num_attention_heads=encoder_cfg.num_attention_heads,
intermediate_size=encoder_cfg.intermediate_size,
intermediate_act_fn=encoder_cfg.hidden_activation,
hidden_dropout_prob=encoder_cfg.hidden_dropout_prob,
attention_probs_dropout_prob=encoder_cfg.attention_probs_dropout_prob,
intra_bottleneck_size=encoder_cfg.intra_bottleneck_size,
initializer_range=encoder_cfg.initializer_range,
use_bottleneck_attention=encoder_cfg.use_bottleneck_attention,
key_query_shared_bottleneck=encoder_cfg.key_query_shared_bottleneck,
num_feedforward_networks=encoder_cfg.num_feedforward_networks,
normalization_type=encoder_cfg.normalization_type,
classifier_activation=encoder_cfg.classifier_activation,
input_mask_dtype=encoder_cfg.input_mask_dtype)
if encoder_type == "albert":
return networks.AlbertEncoder(
vocab_size=encoder_cfg.vocab_size,
embedding_width=encoder_cfg.embedding_width,
hidden_size=encoder_cfg.hidden_size,
num_layers=encoder_cfg.num_layers,
num_attention_heads=encoder_cfg.num_attention_heads,
max_sequence_length=encoder_cfg.max_position_embeddings,
type_vocab_size=encoder_cfg.type_vocab_size,
intermediate_size=encoder_cfg.intermediate_size,
activation=tf_utils.get_activation(encoder_cfg.hidden_activation),
dropout_rate=encoder_cfg.dropout_rate,
attention_dropout_rate=encoder_cfg.attention_dropout_rate,
initializer=tf.keras.initializers.TruncatedNormal(
stddev=encoder_cfg.initializer_range),
dict_outputs=True)
if encoder_type == "bigbird":
# TODO(frederickliu): Support use_gradient_checkpointing and update
# experiments to use the EncoderScaffold only.
if encoder_cfg.use_gradient_checkpointing:
return bigbird_encoder.BigBirdEncoder(
vocab_size=encoder_cfg.vocab_size,
hidden_size=encoder_cfg.hidden_size,
num_layers=encoder_cfg.num_layers,
num_attention_heads=encoder_cfg.num_attention_heads,
intermediate_size=encoder_cfg.intermediate_size,
activation=tf_utils.get_activation(encoder_cfg.hidden_activation),
dropout_rate=encoder_cfg.dropout_rate,
attention_dropout_rate=encoder_cfg.attention_dropout_rate,
num_rand_blocks=encoder_cfg.num_rand_blocks,
block_size=encoder_cfg.block_size,
max_position_embeddings=encoder_cfg.max_position_embeddings,
type_vocab_size=encoder_cfg.type_vocab_size,
initializer=tf.keras.initializers.TruncatedNormal(
stddev=encoder_cfg.initializer_range),
embedding_width=encoder_cfg.embedding_width,
use_gradient_checkpointing=encoder_cfg.use_gradient_checkpointing)
embedding_cfg = dict(
vocab_size=encoder_cfg.vocab_size,
type_vocab_size=encoder_cfg.type_vocab_size,
hidden_size=encoder_cfg.hidden_size,
max_seq_length=encoder_cfg.max_position_embeddings,
initializer=tf.keras.initializers.TruncatedNormal(
stddev=encoder_cfg.initializer_range),
dropout_rate=encoder_cfg.dropout_rate)
attention_cfg = dict(
num_heads=encoder_cfg.num_attention_heads,
key_dim=int(encoder_cfg.hidden_size // encoder_cfg.num_attention_heads),
kernel_initializer=tf.keras.initializers.TruncatedNormal(
stddev=encoder_cfg.initializer_range),
max_rand_mask_length=encoder_cfg.max_position_embeddings,
num_rand_blocks=encoder_cfg.num_rand_blocks,
from_block_size=encoder_cfg.block_size,
to_block_size=encoder_cfg.block_size,
)
hidden_cfg = dict(
num_attention_heads=encoder_cfg.num_attention_heads,
intermediate_size=encoder_cfg.intermediate_size,
intermediate_activation=tf_utils.get_activation(
encoder_cfg.hidden_activation),
dropout_rate=encoder_cfg.dropout_rate,
attention_dropout_rate=encoder_cfg.attention_dropout_rate,
norm_first=encoder_cfg.norm_first,
kernel_initializer=tf.keras.initializers.TruncatedNormal(
stddev=encoder_cfg.initializer_range),
attention_cls=layers.BigBirdAttention,
attention_cfg=attention_cfg)
kwargs = dict(
embedding_cfg=embedding_cfg,
hidden_cls=layers.TransformerScaffold,
hidden_cfg=hidden_cfg,
num_hidden_instances=encoder_cfg.num_layers,
mask_cls=layers.BigBirdMasks,
mask_cfg=dict(block_size=encoder_cfg.block_size),
pooled_output_dim=encoder_cfg.hidden_size,
pooler_layer_initializer=tf.keras.initializers.TruncatedNormal(
stddev=encoder_cfg.initializer_range),
return_all_layer_outputs=False,
dict_outputs=True,
layer_idx_as_attention_seed=True)
return networks.EncoderScaffold(**kwargs)
if encoder_type == "kernel":
embedding_cfg = dict(
vocab_size=encoder_cfg.vocab_size,
type_vocab_size=encoder_cfg.type_vocab_size,
hidden_size=encoder_cfg.hidden_size,
max_seq_length=encoder_cfg.max_position_embeddings,
initializer=tf.keras.initializers.TruncatedNormal(
stddev=encoder_cfg.initializer_range),
dropout_rate=encoder_cfg.dropout_rate)
attention_cfg = dict(
num_heads=encoder_cfg.num_attention_heads,
key_dim=int(encoder_cfg.hidden_size // encoder_cfg.num_attention_heads),
kernel_initializer=tf.keras.initializers.TruncatedNormal(
stddev=encoder_cfg.initializer_range),
feature_transform=encoder_cfg.feature_transform,
num_random_features=encoder_cfg.num_random_features,
redraw=encoder_cfg.redraw,
is_short_seq=encoder_cfg.is_short_seq,
begin_kernel=encoder_cfg.begin_kernel,
scale=encoder_cfg.scale,
)
hidden_cfg = dict(
num_attention_heads=encoder_cfg.num_attention_heads,
intermediate_size=encoder_cfg.intermediate_size,
intermediate_activation=tf_utils.get_activation(
encoder_cfg.hidden_activation),
dropout_rate=encoder_cfg.dropout_rate,
attention_dropout_rate=encoder_cfg.attention_dropout_rate,
norm_first=encoder_cfg.norm_first,
kernel_initializer=tf.keras.initializers.TruncatedNormal(
stddev=encoder_cfg.initializer_range),
attention_cls=layers.KernelAttention,
attention_cfg=attention_cfg)
kwargs = dict(
embedding_cfg=embedding_cfg,
hidden_cls=layers.TransformerScaffold,
hidden_cfg=hidden_cfg,
num_hidden_instances=encoder_cfg.num_layers,
mask_cls=layers.KernelMask,
pooled_output_dim=encoder_cfg.hidden_size,
pooler_layer_initializer=tf.keras.initializers.TruncatedNormal(
stddev=encoder_cfg.initializer_range),
return_all_layer_outputs=False,
dict_outputs=True,
layer_idx_as_attention_seed=True)
return networks.EncoderScaffold(**kwargs)
if encoder_type == "xlnet":
return networks.XLNetBase(
vocab_size=encoder_cfg.vocab_size,
num_layers=encoder_cfg.num_layers,
hidden_size=encoder_cfg.hidden_size,
num_attention_heads=encoder_cfg.num_attention_heads,
head_size=encoder_cfg.head_size,
inner_size=encoder_cfg.inner_size,
dropout_rate=encoder_cfg.dropout_rate,
attention_dropout_rate=encoder_cfg.attention_dropout_rate,
attention_type=encoder_cfg.attention_type,
bi_data=encoder_cfg.bi_data,
two_stream=encoder_cfg.two_stream,
tie_attention_biases=encoder_cfg.tie_attention_biases,
memory_length=encoder_cfg.memory_length,
clamp_length=encoder_cfg.clamp_length,
reuse_length=encoder_cfg.reuse_length,
inner_activation=encoder_cfg.inner_activation,
use_cls_mask=encoder_cfg.use_cls_mask,
embedding_width=encoder_cfg.embedding_width,
initializer=tf.keras.initializers.RandomNormal(
stddev=encoder_cfg.initializer_range))
if encoder_type == "reuse":
embedding_cfg = dict(
vocab_size=encoder_cfg.vocab_size,
type_vocab_size=encoder_cfg.type_vocab_size,
hidden_size=encoder_cfg.hidden_size,
max_seq_length=encoder_cfg.max_position_embeddings,
initializer=tf.keras.initializers.TruncatedNormal(
stddev=encoder_cfg.initializer_range),
dropout_rate=encoder_cfg.dropout_rate)
hidden_cfg = dict(
num_attention_heads=encoder_cfg.num_attention_heads,
inner_dim=encoder_cfg.intermediate_size,
inner_activation=tf_utils.get_activation(
encoder_cfg.hidden_activation),
output_dropout=encoder_cfg.dropout_rate,
attention_dropout=encoder_cfg.attention_dropout_rate,
norm_first=encoder_cfg.norm_first,
kernel_initializer=tf.keras.initializers.TruncatedNormal(
stddev=encoder_cfg.initializer_range),
reuse_attention=encoder_cfg.reuse_attention,
use_relative_pe=encoder_cfg.use_relative_pe,
pe_max_seq_length=encoder_cfg.pe_max_seq_length,
max_reuse_layer_idx=encoder_cfg.max_reuse_layer_idx)
kwargs = dict(
embedding_cfg=embedding_cfg,
hidden_cls=layers.ReuseTransformer,
hidden_cfg=hidden_cfg,
num_hidden_instances=encoder_cfg.num_layers,
pooled_output_dim=encoder_cfg.hidden_size,
pooler_layer_initializer=tf.keras.initializers.TruncatedNormal(
stddev=encoder_cfg.initializer_range),
return_all_layer_outputs=False,
dict_outputs=True,
feed_layer_idx=True,
recursive=True)
return networks.EncoderScaffold(**kwargs)
if encoder_type == "query_bert":
embedding_layer = layers.FactorizedEmbedding(
vocab_size=encoder_cfg.vocab_size,
embedding_width=encoder_cfg.embedding_size,
output_dim=encoder_cfg.hidden_size,
initializer=tf.keras.initializers.TruncatedNormal(
stddev=encoder_cfg.initializer_range),
name="word_embeddings")
return networks.BertEncoderV2(
vocab_size=encoder_cfg.vocab_size,
hidden_size=encoder_cfg.hidden_size,
num_layers=encoder_cfg.num_layers,
num_attention_heads=encoder_cfg.num_attention_heads,
intermediate_size=encoder_cfg.intermediate_size,
activation=tf_utils.get_activation(encoder_cfg.hidden_activation),
dropout_rate=encoder_cfg.dropout_rate,
attention_dropout_rate=encoder_cfg.attention_dropout_rate,
max_sequence_length=encoder_cfg.max_position_embeddings,
type_vocab_size=encoder_cfg.type_vocab_size,
initializer=tf.keras.initializers.TruncatedNormal(
stddev=encoder_cfg.initializer_range),
output_range=encoder_cfg.output_range,
embedding_layer=embedding_layer,
return_all_encoder_outputs=encoder_cfg.return_all_encoder_outputs,
return_attention_scores=encoder_cfg.return_attention_scores,
dict_outputs=True,
norm_first=encoder_cfg.norm_first)
if encoder_type == "fnet":
return networks.FNet(
vocab_size=encoder_cfg.vocab_size,
hidden_size=encoder_cfg.hidden_size,
num_layers=encoder_cfg.num_layers,
num_attention_heads=encoder_cfg.num_attention_heads,
inner_dim=encoder_cfg.inner_dim,
inner_activation=tf_utils.get_activation(encoder_cfg.inner_activation),
output_dropout=encoder_cfg.output_dropout,
attention_dropout=encoder_cfg.attention_dropout,
max_sequence_length=encoder_cfg.max_sequence_length,
type_vocab_size=encoder_cfg.type_vocab_size,
initializer=tf.keras.initializers.TruncatedNormal(
stddev=encoder_cfg.initializer_range),
output_range=encoder_cfg.output_range,
embedding_width=encoder_cfg.embedding_width,
embedding_layer=embedding_layer,
norm_first=encoder_cfg.norm_first,
use_fft=encoder_cfg.use_fft,
attention_layers=encoder_cfg.attention_layers)
if encoder_type == "sparse_mixer":
return networks.SparseMixer(
vocab_size=encoder_cfg.vocab_size,
hidden_size=encoder_cfg.hidden_size,
num_layers=encoder_cfg.num_layers,
moe_layers=encoder_cfg.moe_layers,
attention_layers=encoder_cfg.attention_layers,
num_experts=encoder_cfg.num_experts,
train_capacity_factor=encoder_cfg.train_capacity_factor,
eval_capacity_factor=encoder_cfg.eval_capacity_factor,
examples_per_group=encoder_cfg.examples_per_group,
use_fft=encoder_cfg.use_fft,
num_attention_heads=encoder_cfg.num_attention_heads,
max_sequence_length=encoder_cfg.max_sequence_length,
type_vocab_size=encoder_cfg.type_vocab_size,
inner_dim=encoder_cfg.inner_dim,
inner_activation=tf_utils.get_activation(encoder_cfg.inner_activation),
output_dropout=encoder_cfg.output_dropout,
attention_dropout=encoder_cfg.attention_dropout,
initializer=tf.keras.initializers.TruncatedNormal(
stddev=encoder_cfg.initializer_range),
output_range=encoder_cfg.output_range,
embedding_width=encoder_cfg.embedding_width,
norm_first=encoder_cfg.norm_first,
embedding_layer=embedding_layer)
bert_encoder_cls = networks.BertEncoder
if encoder_type == "bert_v2":
bert_encoder_cls = networks.BertEncoderV2
# Uses the default BERTEncoder configuration schema to create the encoder.
# If it does not match, please add a switch branch by the encoder type.
return bert_encoder_cls(
vocab_size=encoder_cfg.vocab_size,
hidden_size=encoder_cfg.hidden_size,
num_layers=encoder_cfg.num_layers,
num_attention_heads=encoder_cfg.num_attention_heads,
intermediate_size=encoder_cfg.intermediate_size,
activation=tf_utils.get_activation(encoder_cfg.hidden_activation),
dropout_rate=encoder_cfg.dropout_rate,
attention_dropout_rate=encoder_cfg.attention_dropout_rate,
max_sequence_length=encoder_cfg.max_position_embeddings,
type_vocab_size=encoder_cfg.type_vocab_size,
initializer=tf.keras.initializers.TruncatedNormal(
stddev=encoder_cfg.initializer_range),
output_range=encoder_cfg.output_range,
embedding_width=encoder_cfg.embedding_size,
embedding_layer=embedding_layer,
return_all_encoder_outputs=encoder_cfg.return_all_encoder_outputs,
return_attention_scores=encoder_cfg.return_attention_scores,
dict_outputs=True,
norm_first=encoder_cfg.norm_first)
| 28,534 | 39.190141 | 80 | py |
models | models-master/official/nlp/data/classifier_data_lib.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT library to process data for classification task."""
import collections
import csv
import importlib
import json
import os
from absl import logging
import tensorflow as tf
import tensorflow_datasets as tfds
from official.nlp.tools import tokenization
class InputExample(object):
"""A single training/test example for simple seq regression/classification."""
def __init__(self,
guid,
text_a,
text_b=None,
label=None,
weight=None,
example_id=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string for classification, float for regression. The
label of the example. This should be specified for train and dev
examples, but not for test examples.
weight: (Optional) float. The weight of the example to be used during
training.
example_id: (Optional) int. The int identification number of example in
the corpus.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
self.weight = weight
self.example_id = example_id
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
input_ids,
input_mask,
segment_ids,
label_id,
is_real_example=True,
weight=None,
example_id=None):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.is_real_example = is_real_example
self.weight = weight
self.example_id = example_id
class DataProcessor(object):
"""Base class for converters for seq regression/classification datasets."""
def __init__(self, process_text_fn=tokenization.convert_to_unicode):
self.process_text_fn = process_text_fn
self.is_regression = False
self.label_type = None
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for prediction."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@staticmethod
def get_processor_name():
"""Gets the string identifier of the processor."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with tf.io.gfile.GFile(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
@classmethod
def _read_jsonl(cls, input_file):
"""Reads a json line file."""
with tf.io.gfile.GFile(input_file, "r") as f:
lines = []
for json_str in f:
lines.append(json.loads(json_str))
return lines
def featurize_example(self, *kargs, **kwargs):
"""Converts a single `InputExample` into a single `InputFeatures`."""
return convert_single_example(*kargs, **kwargs)
class DefaultGLUEDataProcessor(DataProcessor):
"""Processor for the SuperGLUE dataset."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples_tfds("train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples_tfds("validation")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples_tfds("test")
def _create_examples_tfds(self, set_type):
"""Creates examples for the training/dev/test sets."""
raise NotImplementedError()
class AxProcessor(DataProcessor):
"""Processor for the AX dataset (GLUE diagnostics dataset)."""
def get_train_examples(self, data_dir):
"""See base class."""
train_mnli_dataset = tfds.load(
"glue/mnli", split="train", try_gcs=True).as_numpy_iterator()
return self._create_examples_tfds(train_mnli_dataset, "train")
def get_dev_examples(self, data_dir):
"""See base class."""
val_mnli_dataset = tfds.load(
"glue/mnli", split="validation_matched",
try_gcs=True).as_numpy_iterator()
return self._create_examples_tfds(val_mnli_dataset, "validation")
def get_test_examples(self, data_dir):
"""See base class."""
test_ax_dataset = tfds.load(
"glue/ax", split="test", try_gcs=True).as_numpy_iterator()
return self._create_examples_tfds(test_ax_dataset, "test")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
@staticmethod
def get_processor_name():
"""See base class."""
return "AX"
def _create_examples_tfds(self, dataset, set_type):
"""Creates examples for the training/dev/test sets."""
dataset = list(dataset)
dataset.sort(key=lambda x: x["idx"])
examples = []
for i, example in enumerate(dataset):
guid = "%s-%s" % (set_type, i)
label = "contradiction"
text_a = self.process_text_fn(example["hypothesis"])
text_b = self.process_text_fn(example["premise"])
if set_type != "test":
label = self.get_labels()[example["label"]]
examples.append(
InputExample(
guid=guid, text_a=text_a, text_b=text_b, label=label,
weight=None))
return examples
class ColaProcessor(DefaultGLUEDataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_labels(self):
"""See base class."""
return ["0", "1"]
@staticmethod
def get_processor_name():
"""See base class."""
return "COLA"
def _create_examples_tfds(self, set_type):
"""Creates examples for the training/dev/test sets."""
dataset = tfds.load(
"glue/cola", split=set_type, try_gcs=True).as_numpy_iterator()
dataset = list(dataset)
dataset.sort(key=lambda x: x["idx"])
examples = []
for i, example in enumerate(dataset):
guid = "%s-%s" % (set_type, i)
label = "0"
text_a = self.process_text_fn(example["sentence"])
if set_type != "test":
label = str(example["label"])
examples.append(
InputExample(
guid=guid, text_a=text_a, text_b=None, label=label, weight=None))
return examples
class ImdbProcessor(DataProcessor):
"""Processor for the IMDb dataset."""
def get_labels(self):
return ["neg", "pos"]
def get_train_examples(self, data_dir):
return self._create_examples(os.path.join(data_dir, "train"))
def get_dev_examples(self, data_dir):
return self._create_examples(os.path.join(data_dir, "test"))
@staticmethod
def get_processor_name():
"""See base class."""
return "IMDB"
def _create_examples(self, data_dir):
"""Creates examples."""
examples = []
for label in ["neg", "pos"]:
cur_dir = os.path.join(data_dir, label)
for filename in tf.io.gfile.listdir(cur_dir):
if not filename.endswith("txt"):
continue
if len(examples) % 1000 == 0:
logging.info("Loading dev example %d", len(examples))
path = os.path.join(cur_dir, filename)
with tf.io.gfile.GFile(path, "r") as f:
text = f.read().strip().replace("<br />", " ")
examples.append(
InputExample(
guid="unused_id", text_a=text, text_b=None, label=label))
return examples
class MnliProcessor(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version)."""
def __init__(self,
mnli_type="matched",
process_text_fn=tokenization.convert_to_unicode):
super(MnliProcessor, self).__init__(process_text_fn)
self.dataset = tfds.load("glue/mnli", try_gcs=True)
if mnli_type not in ("matched", "mismatched"):
raise ValueError("Invalid `mnli_type`: %s" % mnli_type)
self.mnli_type = mnli_type
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples_tfds("train")
def get_dev_examples(self, data_dir):
"""See base class."""
if self.mnli_type == "matched":
return self._create_examples_tfds("validation_matched")
else:
return self._create_examples_tfds("validation_mismatched")
def get_test_examples(self, data_dir):
"""See base class."""
if self.mnli_type == "matched":
return self._create_examples_tfds("test_matched")
else:
return self._create_examples_tfds("test_mismatched")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
@staticmethod
def get_processor_name():
"""See base class."""
return "MNLI"
def _create_examples_tfds(self, set_type):
"""Creates examples for the training/dev/test sets."""
dataset = tfds.load(
"glue/mnli", split=set_type, try_gcs=True).as_numpy_iterator()
dataset = list(dataset)
dataset.sort(key=lambda x: x["idx"])
examples = []
for i, example in enumerate(dataset):
guid = "%s-%s" % (set_type, i)
label = "contradiction"
text_a = self.process_text_fn(example["hypothesis"])
text_b = self.process_text_fn(example["premise"])
if set_type != "test":
label = self.get_labels()[example["label"]]
examples.append(
InputExample(
guid=guid, text_a=text_a, text_b=text_b, label=label,
weight=None))
return examples
class MrpcProcessor(DefaultGLUEDataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_labels(self):
"""See base class."""
return ["0", "1"]
@staticmethod
def get_processor_name():
"""See base class."""
return "MRPC"
def _create_examples_tfds(self, set_type):
"""Creates examples for the training/dev/test sets."""
dataset = tfds.load(
"glue/mrpc", split=set_type, try_gcs=True).as_numpy_iterator()
dataset = list(dataset)
dataset.sort(key=lambda x: x["idx"])
examples = []
for i, example in enumerate(dataset):
guid = "%s-%s" % (set_type, i)
label = "0"
text_a = self.process_text_fn(example["sentence1"])
text_b = self.process_text_fn(example["sentence2"])
if set_type != "test":
label = str(example["label"])
examples.append(
InputExample(
guid=guid, text_a=text_a, text_b=text_b, label=label,
weight=None))
return examples
class PawsxProcessor(DataProcessor):
"""Processor for the PAWS-X data set."""
supported_languages = ["de", "en", "es", "fr", "ja", "ko", "zh"]
def __init__(self,
language="en",
process_text_fn=tokenization.convert_to_unicode):
super(PawsxProcessor, self).__init__(process_text_fn)
if language == "all":
self.languages = PawsxProcessor.supported_languages
elif language not in PawsxProcessor.supported_languages:
raise ValueError("language %s is not supported for PAWS-X task." %
language)
else:
self.languages = [language]
def get_train_examples(self, data_dir):
"""See base class."""
lines = []
for language in self.languages:
if language == "en":
train_tsv = "train.tsv"
else:
train_tsv = "translated_train.tsv"
# Skips the header.
lines.extend(
self._read_tsv(os.path.join(data_dir, language, train_tsv))[1:])
examples = []
for i, line in enumerate(lines):
guid = "train-%d" % i
text_a = self.process_text_fn(line[1])
text_b = self.process_text_fn(line[2])
label = self.process_text_fn(line[3])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_dev_examples(self, data_dir):
"""See base class."""
lines = []
for lang in PawsxProcessor.supported_languages:
lines.extend(
self._read_tsv(os.path.join(data_dir, lang, "dev_2k.tsv"))[1:])
examples = []
for i, line in enumerate(lines):
guid = "dev-%d" % i
text_a = self.process_text_fn(line[1])
text_b = self.process_text_fn(line[2])
label = self.process_text_fn(line[3])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_test_examples(self, data_dir):
"""See base class."""
examples_by_lang = {k: [] for k in self.supported_languages}
for lang in self.supported_languages:
lines = self._read_tsv(os.path.join(data_dir, lang, "test_2k.tsv"))[1:]
for i, line in enumerate(lines):
guid = "test-%d" % i
text_a = self.process_text_fn(line[1])
text_b = self.process_text_fn(line[2])
label = self.process_text_fn(line[3])
examples_by_lang[lang].append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples_by_lang
def get_labels(self):
"""See base class."""
return ["0", "1"]
@staticmethod
def get_processor_name():
"""See base class."""
return "XTREME-PAWS-X"
class QnliProcessor(DefaultGLUEDataProcessor):
"""Processor for the QNLI data set (GLUE version)."""
def get_labels(self):
"""See base class."""
return ["entailment", "not_entailment"]
@staticmethod
def get_processor_name():
"""See base class."""
return "QNLI"
def _create_examples_tfds(self, set_type):
"""Creates examples for the training/dev/test sets."""
dataset = tfds.load(
"glue/qnli", split=set_type, try_gcs=True).as_numpy_iterator()
dataset = list(dataset)
dataset.sort(key=lambda x: x["idx"])
examples = []
for i, example in enumerate(dataset):
guid = "%s-%s" % (set_type, i)
label = "entailment"
text_a = self.process_text_fn(example["question"])
text_b = self.process_text_fn(example["sentence"])
if set_type != "test":
label = self.get_labels()[example["label"]]
examples.append(
InputExample(
guid=guid, text_a=text_a, text_b=text_b, label=label,
weight=None))
return examples
class QqpProcessor(DefaultGLUEDataProcessor):
"""Processor for the QQP data set (GLUE version)."""
def get_labels(self):
"""See base class."""
return ["0", "1"]
@staticmethod
def get_processor_name():
"""See base class."""
return "QQP"
def _create_examples_tfds(self, set_type):
"""Creates examples for the training/dev/test sets."""
dataset = tfds.load(
"glue/qqp", split=set_type, try_gcs=True).as_numpy_iterator()
dataset = list(dataset)
dataset.sort(key=lambda x: x["idx"])
examples = []
for i, example in enumerate(dataset):
guid = "%s-%s" % (set_type, i)
label = "0"
text_a = self.process_text_fn(example["question1"])
text_b = self.process_text_fn(example["question2"])
if set_type != "test":
label = str(example["label"])
examples.append(
InputExample(
guid=guid, text_a=text_a, text_b=text_b, label=label,
weight=None))
return examples
class RteProcessor(DefaultGLUEDataProcessor):
"""Processor for the RTE data set (GLUE version)."""
def get_labels(self):
"""See base class."""
# All datasets are converted to 2-class split, where for 3-class datasets we
# collapse neutral and contradiction into not_entailment.
return ["entailment", "not_entailment"]
@staticmethod
def get_processor_name():
"""See base class."""
return "RTE"
def _create_examples_tfds(self, set_type):
"""Creates examples for the training/dev/test sets."""
dataset = tfds.load(
"glue/rte", split=set_type, try_gcs=True).as_numpy_iterator()
dataset = list(dataset)
dataset.sort(key=lambda x: x["idx"])
examples = []
for i, example in enumerate(dataset):
guid = "%s-%s" % (set_type, i)
label = "entailment"
text_a = self.process_text_fn(example["sentence1"])
text_b = self.process_text_fn(example["sentence2"])
if set_type != "test":
label = self.get_labels()[example["label"]]
examples.append(
InputExample(
guid=guid, text_a=text_a, text_b=text_b, label=label,
weight=None))
return examples
class SstProcessor(DefaultGLUEDataProcessor):
"""Processor for the SST-2 data set (GLUE version)."""
def get_labels(self):
"""See base class."""
return ["0", "1"]
@staticmethod
def get_processor_name():
"""See base class."""
return "SST-2"
def _create_examples_tfds(self, set_type):
"""Creates examples for the training/dev/test sets."""
dataset = tfds.load(
"glue/sst2", split=set_type, try_gcs=True).as_numpy_iterator()
dataset = list(dataset)
dataset.sort(key=lambda x: x["idx"])
examples = []
for i, example in enumerate(dataset):
guid = "%s-%s" % (set_type, i)
label = "0"
text_a = self.process_text_fn(example["sentence"])
if set_type != "test":
label = str(example["label"])
examples.append(
InputExample(
guid=guid, text_a=text_a, text_b=None, label=label, weight=None))
return examples
class StsBProcessor(DefaultGLUEDataProcessor):
"""Processor for the STS-B data set (GLUE version)."""
def __init__(self, process_text_fn=tokenization.convert_to_unicode):
super(StsBProcessor, self).__init__(process_text_fn=process_text_fn)
self.is_regression = True
self.label_type = float
self._labels = None
def _create_examples_tfds(self, set_type):
"""Creates examples for the training/dev/test sets."""
dataset = tfds.load(
"glue/stsb", split=set_type, try_gcs=True).as_numpy_iterator()
dataset = list(dataset)
dataset.sort(key=lambda x: x["idx"])
examples = []
for i, example in enumerate(dataset):
guid = "%s-%s" % (set_type, i)
label = 0.0
text_a = self.process_text_fn(example["sentence1"])
text_b = self.process_text_fn(example["sentence2"])
if set_type != "test":
label = self.label_type(example["label"])
examples.append(
InputExample(
guid=guid, text_a=text_a, text_b=text_b, label=label,
weight=None))
return examples
def get_labels(self):
"""See base class."""
return self._labels
@staticmethod
def get_processor_name():
"""See base class."""
return "STS-B"
class TfdsProcessor(DataProcessor):
"""Processor for generic text classification and regression TFDS data set.
The TFDS parameters are expected to be provided in the tfds_params string, in
a comma-separated list of parameter assignments.
Examples:
tfds_params="dataset=scicite,text_key=string"
tfds_params="dataset=imdb_reviews,test_split=,dev_split=test"
tfds_params="dataset=glue/cola,text_key=sentence"
tfds_params="dataset=glue/sst2,text_key=sentence"
tfds_params="dataset=glue/qnli,text_key=question,text_b_key=sentence"
tfds_params="dataset=glue/mrpc,text_key=sentence1,text_b_key=sentence2"
tfds_params="dataset=glue/stsb,text_key=sentence1,text_b_key=sentence2,"
"is_regression=true,label_type=float"
tfds_params="dataset=snli,text_key=premise,text_b_key=hypothesis,"
"skip_label=-1"
Possible parameters (please refer to the documentation of Tensorflow Datasets
(TFDS) for the meaning of individual parameters):
dataset: Required dataset name (potentially with subset and version number).
data_dir: Optional TFDS source root directory.
module_import: Optional Dataset module to import.
train_split: Name of the train split (defaults to `train`).
dev_split: Name of the dev split (defaults to `validation`).
test_split: Name of the test split (defaults to `test`).
text_key: Key of the text_a feature (defaults to `text`).
text_b_key: Key of the second text feature if available.
label_key: Key of the label feature (defaults to `label`).
test_text_key: Key of the text feature to use in test set.
test_text_b_key: Key of the second text feature to use in test set.
test_label: String to be used as the label for all test examples.
label_type: Type of the label key (defaults to `int`).
weight_key: Key of the float sample weight (is not used if not provided).
is_regression: Whether the task is a regression problem (defaults to False).
skip_label: Skip examples with given label (defaults to None).
"""
def __init__(self,
tfds_params,
process_text_fn=tokenization.convert_to_unicode):
super(TfdsProcessor, self).__init__(process_text_fn)
self._process_tfds_params_str(tfds_params)
if self.module_import:
importlib.import_module(self.module_import)
self.dataset, info = tfds.load(
self.dataset_name, data_dir=self.data_dir, with_info=True)
if self.is_regression:
self._labels = None
else:
self._labels = list(range(info.features[self.label_key].num_classes))
def _process_tfds_params_str(self, params_str):
"""Extracts TFDS parameters from a comma-separated assignements string."""
dtype_map = {"int": int, "float": float}
cast_str_to_bool = lambda s: s.lower() not in ["false", "0"]
tuples = [x.split("=") for x in params_str.split(",")]
d = {k.strip(): v.strip() for k, v in tuples}
self.dataset_name = d["dataset"] # Required.
self.data_dir = d.get("data_dir", None)
self.module_import = d.get("module_import", None)
self.train_split = d.get("train_split", "train")
self.dev_split = d.get("dev_split", "validation")
self.test_split = d.get("test_split", "test")
self.text_key = d.get("text_key", "text")
self.text_b_key = d.get("text_b_key", None)
self.label_key = d.get("label_key", "label")
self.test_text_key = d.get("test_text_key", self.text_key)
self.test_text_b_key = d.get("test_text_b_key", self.text_b_key)
self.test_label = d.get("test_label", "test_example")
self.label_type = dtype_map[d.get("label_type", "int")]
self.is_regression = cast_str_to_bool(d.get("is_regression", "False"))
self.weight_key = d.get("weight_key", None)
self.skip_label = d.get("skip_label", None)
if self.skip_label is not None:
self.skip_label = self.label_type(self.skip_label)
def get_train_examples(self, data_dir):
assert data_dir is None
return self._create_examples(self.train_split, "train")
def get_dev_examples(self, data_dir):
assert data_dir is None
return self._create_examples(self.dev_split, "dev")
def get_test_examples(self, data_dir):
assert data_dir is None
return self._create_examples(self.test_split, "test")
def get_labels(self):
return self._labels
def get_processor_name(self):
return "TFDS_" + self.dataset_name
def _create_examples(self, split_name, set_type):
"""Creates examples for the training/dev/test sets."""
if split_name not in self.dataset:
raise ValueError("Split {} not available.".format(split_name))
dataset = self.dataset[split_name].as_numpy_iterator()
examples = []
text_b, weight = None, None
for i, example in enumerate(dataset):
guid = "%s-%s" % (set_type, i)
if set_type == "test":
text_a = self.process_text_fn(example[self.test_text_key])
if self.test_text_b_key:
text_b = self.process_text_fn(example[self.test_text_b_key])
label = self.test_label
else:
text_a = self.process_text_fn(example[self.text_key])
if self.text_b_key:
text_b = self.process_text_fn(example[self.text_b_key])
label = self.label_type(example[self.label_key])
if self.skip_label is not None and label == self.skip_label:
continue
if self.weight_key:
weight = float(example[self.weight_key])
examples.append(
InputExample(
guid=guid,
text_a=text_a,
text_b=text_b,
label=label,
weight=weight))
return examples
class WnliProcessor(DefaultGLUEDataProcessor):
"""Processor for the WNLI data set (GLUE version)."""
def get_labels(self):
"""See base class."""
return ["0", "1"]
@staticmethod
def get_processor_name():
"""See base class."""
return "WNLI"
def _create_examples_tfds(self, set_type):
"""Creates examples for the training/dev/test sets."""
dataset = tfds.load(
"glue/wnli", split=set_type, try_gcs=True).as_numpy_iterator()
dataset = list(dataset)
dataset.sort(key=lambda x: x["idx"])
examples = []
for i, example in enumerate(dataset):
guid = "%s-%s" % (set_type, i)
label = "0"
text_a = self.process_text_fn(example["sentence1"])
text_b = self.process_text_fn(example["sentence2"])
if set_type != "test":
label = str(example["label"])
examples.append(
InputExample(
guid=guid, text_a=text_a, text_b=text_b, label=label,
weight=None))
return examples
class XnliProcessor(DataProcessor):
"""Processor for the XNLI data set."""
supported_languages = [
"ar", "bg", "de", "el", "en", "es", "fr", "hi", "ru", "sw", "th", "tr",
"ur", "vi", "zh"
]
def __init__(self,
language="en",
process_text_fn=tokenization.convert_to_unicode):
super(XnliProcessor, self).__init__(process_text_fn)
if language == "all":
self.languages = XnliProcessor.supported_languages
elif language not in XnliProcessor.supported_languages:
raise ValueError("language %s is not supported for XNLI task." % language)
else:
self.languages = [language]
def get_train_examples(self, data_dir):
"""See base class."""
lines = []
for language in self.languages:
# Skips the header.
lines.extend(
self._read_tsv(
os.path.join(data_dir, "multinli",
"multinli.train.%s.tsv" % language))[1:])
examples = []
for i, line in enumerate(lines):
guid = "train-%d" % i
text_a = self.process_text_fn(line[0])
text_b = self.process_text_fn(line[1])
label = self.process_text_fn(line[2])
if label == self.process_text_fn("contradictory"):
label = self.process_text_fn("contradiction")
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_dev_examples(self, data_dir):
"""See base class."""
lines = self._read_tsv(os.path.join(data_dir, "xnli.dev.tsv"))
examples = []
for i, line in enumerate(lines):
if i == 0:
continue
guid = "dev-%d" % i
text_a = self.process_text_fn(line[6])
text_b = self.process_text_fn(line[7])
label = self.process_text_fn(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_test_examples(self, data_dir):
"""See base class."""
lines = self._read_tsv(os.path.join(data_dir, "xnli.test.tsv"))
examples_by_lang = {k: [] for k in XnliProcessor.supported_languages}
for i, line in enumerate(lines):
if i == 0:
continue
guid = "test-%d" % i
language = self.process_text_fn(line[0])
text_a = self.process_text_fn(line[6])
text_b = self.process_text_fn(line[7])
label = self.process_text_fn(line[1])
examples_by_lang[language].append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples_by_lang
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
@staticmethod
def get_processor_name():
"""See base class."""
return "XNLI"
class XtremePawsxProcessor(DataProcessor):
"""Processor for the XTREME PAWS-X data set."""
supported_languages = ["de", "en", "es", "fr", "ja", "ko", "zh"]
def __init__(self,
process_text_fn=tokenization.convert_to_unicode,
translated_data_dir=None,
only_use_en_dev=True):
"""See base class.
Args:
process_text_fn: See base class.
translated_data_dir: If specified, will also include translated data in
the training and testing data.
only_use_en_dev: If True, only use english dev data. Otherwise, use dev
data from all languages.
"""
super(XtremePawsxProcessor, self).__init__(process_text_fn)
self.translated_data_dir = translated_data_dir
self.only_use_en_dev = only_use_en_dev
def get_train_examples(self, data_dir):
"""See base class."""
examples = []
if self.translated_data_dir is None:
lines = self._read_tsv(os.path.join(data_dir, "train-en.tsv"))
for i, line in enumerate(lines):
guid = "train-%d" % i
text_a = self.process_text_fn(line[0])
text_b = self.process_text_fn(line[1])
label = self.process_text_fn(line[2])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
else:
for lang in self.supported_languages:
lines = self._read_tsv(
os.path.join(self.translated_data_dir, "translate-train",
f"en-{lang}-translated.tsv"))
for i, line in enumerate(lines):
guid = f"train-{lang}-{i}"
text_a = self.process_text_fn(line[2])
text_b = self.process_text_fn(line[3])
label = self.process_text_fn(line[4])
examples.append(
InputExample(
guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_dev_examples(self, data_dir):
"""See base class."""
examples = []
if self.only_use_en_dev:
lines = self._read_tsv(os.path.join(data_dir, "dev-en.tsv"))
for i, line in enumerate(lines):
guid = "dev-%d" % i
text_a = self.process_text_fn(line[0])
text_b = self.process_text_fn(line[1])
label = self.process_text_fn(line[2])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
else:
for lang in self.supported_languages:
lines = self._read_tsv(os.path.join(data_dir, f"dev-{lang}.tsv"))
for i, line in enumerate(lines):
guid = f"dev-{lang}-{i}"
text_a = self.process_text_fn(line[0])
text_b = self.process_text_fn(line[1])
label = self.process_text_fn(line[2])
examples.append(
InputExample(
guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_test_examples(self, data_dir):
"""See base class."""
examples_by_lang = {}
for lang in self.supported_languages:
examples_by_lang[lang] = []
lines = self._read_tsv(os.path.join(data_dir, f"test-{lang}.tsv"))
for i, line in enumerate(lines):
guid = f"test-{lang}-{i}"
text_a = self.process_text_fn(line[0])
text_b = self.process_text_fn(line[1])
label = "0"
examples_by_lang[lang].append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
if self.translated_data_dir is not None:
for lang in self.supported_languages:
if lang == "en":
continue
examples_by_lang[f"{lang}-en"] = []
lines = self._read_tsv(
os.path.join(self.translated_data_dir, "translate-test",
f"test-{lang}-en-translated.tsv"))
for i, line in enumerate(lines):
guid = f"test-{lang}-en-{i}"
text_a = self.process_text_fn(line[2])
text_b = self.process_text_fn(line[3])
label = "0"
examples_by_lang[f"{lang}-en"].append(
InputExample(
guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples_by_lang
def get_labels(self):
"""See base class."""
return ["0", "1"]
@staticmethod
def get_processor_name():
"""See base class."""
return "XTREME-PAWS-X"
class XtremeXnliProcessor(DataProcessor):
"""Processor for the XTREME XNLI data set."""
supported_languages = [
"ar", "bg", "de", "el", "en", "es", "fr", "hi", "ru", "sw", "th", "tr",
"ur", "vi", "zh"
]
def __init__(self,
process_text_fn=tokenization.convert_to_unicode,
translated_data_dir=None,
only_use_en_dev=True):
"""See base class.
Args:
process_text_fn: See base class.
translated_data_dir: If specified, will also include translated data in
the training data.
only_use_en_dev: If True, only use english dev data. Otherwise, use dev
data from all languages.
"""
super(XtremeXnliProcessor, self).__init__(process_text_fn)
self.translated_data_dir = translated_data_dir
self.only_use_en_dev = only_use_en_dev
def get_train_examples(self, data_dir):
"""See base class."""
lines = self._read_tsv(os.path.join(data_dir, "train-en.tsv"))
examples = []
if self.translated_data_dir is None:
for i, line in enumerate(lines):
guid = "train-%d" % i
text_a = self.process_text_fn(line[0])
text_b = self.process_text_fn(line[1])
label = self.process_text_fn(line[2])
if label == self.process_text_fn("contradictory"):
label = self.process_text_fn("contradiction")
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
else:
for lang in self.supported_languages:
lines = self._read_tsv(
os.path.join(self.translated_data_dir, "translate-train",
f"en-{lang}-translated.tsv"))
for i, line in enumerate(lines):
guid = f"train-{lang}-{i}"
text_a = self.process_text_fn(line[2])
text_b = self.process_text_fn(line[3])
label = self.process_text_fn(line[4])
if label == self.process_text_fn("contradictory"):
label = self.process_text_fn("contradiction")
examples.append(
InputExample(
guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_dev_examples(self, data_dir):
"""See base class."""
examples = []
if self.only_use_en_dev:
lines = self._read_tsv(os.path.join(data_dir, "dev-en.tsv"))
for i, line in enumerate(lines):
guid = "dev-%d" % i
text_a = self.process_text_fn(line[0])
text_b = self.process_text_fn(line[1])
label = self.process_text_fn(line[2])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
else:
for lang in self.supported_languages:
lines = self._read_tsv(os.path.join(data_dir, f"dev-{lang}.tsv"))
for i, line in enumerate(lines):
guid = f"dev-{lang}-{i}"
text_a = self.process_text_fn(line[0])
text_b = self.process_text_fn(line[1])
label = self.process_text_fn(line[2])
if label == self.process_text_fn("contradictory"):
label = self.process_text_fn("contradiction")
examples.append(
InputExample(
guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_test_examples(self, data_dir):
"""See base class."""
examples_by_lang = {}
for lang in self.supported_languages:
examples_by_lang[lang] = []
lines = self._read_tsv(os.path.join(data_dir, f"test-{lang}.tsv"))
for i, line in enumerate(lines):
guid = f"test-{lang}-{i}"
text_a = self.process_text_fn(line[0])
text_b = self.process_text_fn(line[1])
label = "contradiction"
examples_by_lang[lang].append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
if self.translated_data_dir is not None:
for lang in self.supported_languages:
if lang == "en":
continue
examples_by_lang[f"{lang}-en"] = []
lines = self._read_tsv(
os.path.join(self.translated_data_dir, "translate-test",
f"test-{lang}-en-translated.tsv"))
for i, line in enumerate(lines):
guid = f"test-{lang}-en-{i}"
text_a = self.process_text_fn(line[2])
text_b = self.process_text_fn(line[3])
label = "contradiction"
examples_by_lang[f"{lang}-en"].append(
InputExample(
guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples_by_lang
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
@staticmethod
def get_processor_name():
"""See base class."""
return "XTREME-XNLI"
def convert_single_example(ex_index, example, label_list, max_seq_length,
tokenizer):
"""Converts a single `InputExample` into a single `InputFeatures`."""
label_map = {}
if label_list:
for (i, label) in enumerate(label_list):
label_map[label] = i
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:(max_seq_length - 2)]
seg_id_a = 0
seg_id_b = 1
seg_id_cls = 0
seg_id_pad = 0
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(seg_id_cls)
for token in tokens_a:
tokens.append(token)
segment_ids.append(seg_id_a)
tokens.append("[SEP]")
segment_ids.append(seg_id_a)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(seg_id_b)
tokens.append("[SEP]")
segment_ids.append(seg_id_b)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(seg_id_pad)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = label_map[example.label] if label_map else example.label
if ex_index < 5:
logging.info("*** Example ***")
logging.info("guid: %s", (example.guid))
logging.info("tokens: %s",
" ".join([tokenization.printable_text(x) for x in tokens]))
logging.info("input_ids: %s", " ".join([str(x) for x in input_ids]))
logging.info("input_mask: %s", " ".join([str(x) for x in input_mask]))
logging.info("segment_ids: %s", " ".join([str(x) for x in segment_ids]))
logging.info("label: %s (id = %s)", example.label, str(label_id))
logging.info("weight: %s", example.weight)
logging.info("example_id: %s", example.example_id)
feature = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
is_real_example=True,
weight=example.weight,
example_id=example.example_id)
return feature
class AXgProcessor(DataProcessor):
"""Processor for the AXg dataset (SuperGLUE diagnostics dataset)."""
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_jsonl(os.path.join(data_dir, "AX-g.jsonl")), "test")
def get_labels(self):
"""See base class."""
return ["entailment", "not_entailment"]
@staticmethod
def get_processor_name():
"""See base class."""
return "AXg"
def _create_examples(self, lines, set_type):
"""Creates examples for the training/dev/test sets."""
examples = []
for line in lines:
guid = "%s-%s" % (set_type, self.process_text_fn(str(line["idx"])))
text_a = self.process_text_fn(line["premise"])
text_b = self.process_text_fn(line["hypothesis"])
label = self.process_text_fn(line["label"])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class BoolQProcessor(DefaultGLUEDataProcessor):
"""Processor for the BoolQ dataset (SuperGLUE diagnostics dataset)."""
def get_labels(self):
"""See base class."""
return ["True", "False"]
@staticmethod
def get_processor_name():
"""See base class."""
return "BoolQ"
def _create_examples_tfds(self, set_type):
"""Creates examples for the training/dev/test sets."""
dataset = tfds.load(
"super_glue/boolq", split=set_type, try_gcs=True).as_numpy_iterator()
examples = []
for example in dataset:
guid = "%s-%s" % (set_type, self.process_text_fn(str(example["idx"])))
text_a = self.process_text_fn(example["question"])
text_b = self.process_text_fn(example["passage"])
label = "False"
if set_type != "test":
label = self.get_labels()[example["label"]]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class CBProcessor(DefaultGLUEDataProcessor):
"""Processor for the CB dataset (SuperGLUE diagnostics dataset)."""
def get_labels(self):
"""See base class."""
return ["entailment", "neutral", "contradiction"]
@staticmethod
def get_processor_name():
"""See base class."""
return "CB"
def _create_examples_tfds(self, set_type):
"""Creates examples for the training/dev/test sets."""
dataset = tfds.load(
"super_glue/cb", split=set_type, try_gcs=True).as_numpy_iterator()
examples = []
for example in dataset:
guid = "%s-%s" % (set_type, self.process_text_fn(str(example["idx"])))
text_a = self.process_text_fn(example["premise"])
text_b = self.process_text_fn(example["hypothesis"])
label = "entailment"
if set_type != "test":
label = self.get_labels()[example["label"]]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class SuperGLUERTEProcessor(DefaultGLUEDataProcessor):
"""Processor for the RTE dataset (SuperGLUE version)."""
def get_labels(self):
"""See base class."""
# All datasets are converted to 2-class split, where for 3-class datasets we
# collapse neutral and contradiction into not_entailment.
return ["entailment", "not_entailment"]
@staticmethod
def get_processor_name():
"""See base class."""
return "RTESuperGLUE"
def _create_examples_tfds(self, set_type):
"""Creates examples for the training/dev/test sets."""
examples = []
dataset = tfds.load(
"super_glue/rte", split=set_type, try_gcs=True).as_numpy_iterator()
for example in dataset:
guid = "%s-%s" % (set_type, self.process_text_fn(str(example["idx"])))
text_a = self.process_text_fn(example["premise"])
text_b = self.process_text_fn(example["hypothesis"])
label = "entailment"
if set_type != "test":
label = self.get_labels()[example["label"]]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class WiCInputExample(InputExample):
"""Processor for the WiC dataset (SuperGLUE version)."""
def __init__(self,
guid,
text_a,
text_b=None,
label=None,
word=None,
weight=None,
example_id=None):
"""A single training/test example for simple seq regression/classification."""
super(WiCInputExample, self).__init__(guid, text_a, text_b, label, weight,
example_id)
self.word = word
class WiCProcessor(DefaultGLUEDataProcessor):
"""Processor for the RTE dataset (SuperGLUE version)."""
def get_labels(self):
"""Not used."""
return []
@staticmethod
def get_processor_name():
"""See base class."""
return "RTESuperGLUE"
def _create_examples_tfds(self, set_type):
"""Creates examples for the training/dev/test sets."""
examples = []
dataset = tfds.load(
"super_glue/wic", split=set_type, try_gcs=True).as_numpy_iterator()
for example in dataset:
guid = "%s-%s" % (set_type, self.process_text_fn(str(example["idx"])))
text_a = self.process_text_fn(example["sentence1"])
text_b = self.process_text_fn(example["sentence2"])
word = self.process_text_fn(example["word"])
label = 0
if set_type != "test":
label = example["label"]
examples.append(
WiCInputExample(
guid=guid, text_a=text_a, text_b=text_b, word=word, label=label))
return examples
def featurize_example(self, ex_index, example, label_list, max_seq_length,
tokenizer):
"""Here we concate sentence1, sentence2, word together with [SEP] tokens."""
del label_list
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = tokenizer.tokenize(example.text_b)
tokens_word = tokenizer.tokenize(example.word)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP], [SEP] with "- 4"
# Here we only pop out the first two sentence tokens.
_truncate_seq_pair(tokens_a, tokens_b,
max_seq_length - 4 - len(tokens_word))
seg_id_a = 0
seg_id_b = 1
seg_id_c = 2
seg_id_cls = 0
seg_id_pad = 0
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(seg_id_cls)
for token in tokens_a:
tokens.append(token)
segment_ids.append(seg_id_a)
tokens.append("[SEP]")
segment_ids.append(seg_id_a)
for token in tokens_b:
tokens.append(token)
segment_ids.append(seg_id_b)
tokens.append("[SEP]")
segment_ids.append(seg_id_b)
for token in tokens_word:
tokens.append(token)
segment_ids.append(seg_id_c)
tokens.append("[SEP]")
segment_ids.append(seg_id_c)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(seg_id_pad)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = example.label
if ex_index < 5:
logging.info("*** Example ***")
logging.info("guid: %s", (example.guid))
logging.info("tokens: %s",
" ".join([tokenization.printable_text(x) for x in tokens]))
logging.info("input_ids: %s", " ".join([str(x) for x in input_ids]))
logging.info("input_mask: %s", " ".join([str(x) for x in input_mask]))
logging.info("segment_ids: %s", " ".join([str(x) for x in segment_ids]))
logging.info("label: %s (id = %s)", example.label, str(label_id))
logging.info("weight: %s", example.weight)
logging.info("example_id: %s", example.example_id)
feature = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
is_real_example=True,
weight=example.weight,
example_id=example.example_id)
return feature
def file_based_convert_examples_to_features(examples,
label_list,
max_seq_length,
tokenizer,
output_file,
label_type=None,
featurize_fn=None):
"""Convert a set of `InputExample`s to a TFRecord file."""
tf.io.gfile.makedirs(os.path.dirname(output_file))
writer = tf.io.TFRecordWriter(output_file)
for ex_index, example in enumerate(examples):
if ex_index % 10000 == 0:
logging.info("Writing example %d of %d", ex_index, len(examples))
if featurize_fn:
feature = featurize_fn(ex_index, example, label_list, max_seq_length,
tokenizer)
else:
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
def create_float_feature(values):
f = tf.train.Feature(float_list=tf.train.FloatList(value=list(values)))
return f
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
if label_type is not None and label_type == float:
features["label_ids"] = create_float_feature([feature.label_id])
elif feature.label_id is not None:
features["label_ids"] = create_int_feature([feature.label_id])
features["is_real_example"] = create_int_feature(
[int(feature.is_real_example)])
if feature.weight is not None:
features["weight"] = create_float_feature([feature.weight])
if feature.example_id is not None:
features["example_id"] = create_int_feature([feature.example_id])
else:
features["example_id"] = create_int_feature([ex_index])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def generate_tf_record_from_data_file(processor,
data_dir,
tokenizer,
train_data_output_path=None,
eval_data_output_path=None,
test_data_output_path=None,
max_seq_length=128):
"""Generates and saves training data into a tf record file.
Args:
processor: Input processor object to be used for generating data. Subclass
of `DataProcessor`.
data_dir: Directory that contains train/eval/test data to process.
tokenizer: The tokenizer to be applied on the data.
train_data_output_path: Output to which processed tf record for training
will be saved.
eval_data_output_path: Output to which processed tf record for evaluation
will be saved.
test_data_output_path: Output to which processed tf record for testing
will be saved. Must be a pattern template with {} if processor has
language specific test data.
max_seq_length: Maximum sequence length of the to be generated
training/eval data.
Returns:
A dictionary containing input meta data.
"""
assert train_data_output_path or eval_data_output_path
label_list = processor.get_labels()
label_type = getattr(processor, "label_type", None)
is_regression = getattr(processor, "is_regression", False)
has_sample_weights = getattr(processor, "weight_key", False)
num_training_data = 0
if train_data_output_path:
train_input_data_examples = processor.get_train_examples(data_dir)
file_based_convert_examples_to_features(train_input_data_examples,
label_list, max_seq_length,
tokenizer, train_data_output_path,
label_type,
processor.featurize_example)
num_training_data = len(train_input_data_examples)
if eval_data_output_path:
eval_input_data_examples = processor.get_dev_examples(data_dir)
file_based_convert_examples_to_features(eval_input_data_examples,
label_list, max_seq_length,
tokenizer, eval_data_output_path,
label_type,
processor.featurize_example)
meta_data = {
"processor_type": processor.get_processor_name(),
"train_data_size": num_training_data,
"max_seq_length": max_seq_length,
}
if test_data_output_path:
test_input_data_examples = processor.get_test_examples(data_dir)
if isinstance(test_input_data_examples, dict):
for language, examples in test_input_data_examples.items():
file_based_convert_examples_to_features(
examples, label_list, max_seq_length, tokenizer,
test_data_output_path.format(language), label_type,
processor.featurize_example)
meta_data["test_{}_data_size".format(language)] = len(examples)
else:
file_based_convert_examples_to_features(test_input_data_examples,
label_list, max_seq_length,
tokenizer, test_data_output_path,
label_type,
processor.featurize_example)
meta_data["test_data_size"] = len(test_input_data_examples)
if is_regression:
meta_data["task_type"] = "bert_regression"
meta_data["label_type"] = {int: "int", float: "float"}[label_type]
else:
meta_data["task_type"] = "bert_classification"
meta_data["num_labels"] = len(processor.get_labels())
if has_sample_weights:
meta_data["has_sample_weights"] = True
if eval_data_output_path:
meta_data["eval_data_size"] = len(eval_input_data_examples)
return meta_data
| 57,071 | 34.382517 | 82 | py |
models | models-master/official/nlp/data/sentence_prediction_dataloader_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for official.nlp.data.sentence_prediction_dataloader."""
import os
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from sentencepiece import SentencePieceTrainer
from official.nlp.data import sentence_prediction_dataloader as loader
def _create_fake_preprocessed_dataset(output_path, seq_length, label_type):
"""Creates a fake dataset."""
writer = tf.io.TFRecordWriter(output_path)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
def create_float_feature(values):
f = tf.train.Feature(float_list=tf.train.FloatList(value=list(values)))
return f
for _ in range(100):
features = {}
input_ids = np.random.randint(100, size=(seq_length))
features['input_ids'] = create_int_feature(input_ids)
features['input_mask'] = create_int_feature(np.ones_like(input_ids))
features['segment_ids'] = create_int_feature(np.ones_like(input_ids))
if label_type == 'int':
features['label_ids'] = create_int_feature([1])
elif label_type == 'float':
features['label_ids'] = create_float_feature([0.5])
else:
raise ValueError('Unsupported label_type: %s' % label_type)
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
def _create_fake_raw_dataset(output_path, text_fields, label_type):
"""Creates a fake tf record file."""
writer = tf.io.TFRecordWriter(output_path)
def create_str_feature(value):
f = tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
return f
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
def create_float_feature(values):
f = tf.train.Feature(float_list=tf.train.FloatList(value=list(values)))
return f
for _ in range(100):
features = {}
for text_field in text_fields:
features[text_field] = create_str_feature([b'hello world'])
if label_type == 'int':
features['label'] = create_int_feature([0])
elif label_type == 'float':
features['label'] = create_float_feature([0.5])
else:
raise ValueError('Unexpected label_type: %s' % label_type)
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
def _create_fake_sentencepiece_model(output_dir):
vocab = ['a', 'b', 'c', 'd', 'e', 'abc', 'def', 'ABC', 'DEF']
model_prefix = os.path.join(output_dir, 'spm_model')
input_text_file_path = os.path.join(output_dir, 'train_input.txt')
with tf.io.gfile.GFile(input_text_file_path, 'w') as f:
f.write(' '.join(vocab + ['\n']))
# Add 7 more tokens: <pad>, <unk>, [CLS], [SEP], [MASK], <s>, </s>.
full_vocab_size = len(vocab) + 7
flags = dict(
model_prefix=model_prefix,
model_type='word',
input=input_text_file_path,
pad_id=0,
unk_id=1,
control_symbols='[CLS],[SEP],[MASK]',
vocab_size=full_vocab_size,
bos_id=full_vocab_size - 2,
eos_id=full_vocab_size - 1)
SentencePieceTrainer.Train(' '.join(
['--{}={}'.format(k, v) for k, v in flags.items()]))
return model_prefix + '.model'
def _create_fake_vocab_file(vocab_file_path):
tokens = ['[PAD]']
for i in range(1, 100):
tokens.append('[unused%d]' % i)
tokens.extend(['[UNK]', '[CLS]', '[SEP]', '[MASK]', 'hello', 'world'])
with tf.io.gfile.GFile(vocab_file_path, 'w') as outfile:
outfile.write('\n'.join(tokens))
class SentencePredictionDataTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(('int', tf.int32), ('float', tf.float32))
def test_load_dataset(self, label_type, expected_label_type):
input_path = os.path.join(self.get_temp_dir(), 'train.tf_record')
batch_size = 10
seq_length = 128
_create_fake_preprocessed_dataset(input_path, seq_length, label_type)
data_config = loader.SentencePredictionDataConfig(
input_path=input_path,
seq_length=seq_length,
global_batch_size=batch_size,
label_type=label_type)
dataset = loader.SentencePredictionDataLoader(data_config).load()
features = next(iter(dataset))
self.assertCountEqual(
['input_word_ids', 'input_type_ids', 'input_mask', 'label_ids'],
features.keys())
self.assertEqual(features['input_word_ids'].shape, (batch_size, seq_length))
self.assertEqual(features['input_mask'].shape, (batch_size, seq_length))
self.assertEqual(features['input_type_ids'].shape, (batch_size, seq_length))
self.assertEqual(features['label_ids'].shape, (batch_size,))
self.assertEqual(features['label_ids'].dtype, expected_label_type)
def test_load_dataset_with_label_mapping(self):
input_path = os.path.join(self.get_temp_dir(), 'train.tf_record')
batch_size = 10
seq_length = 128
_create_fake_preprocessed_dataset(input_path, seq_length, 'int')
data_config = loader.SentencePredictionDataConfig(
input_path=input_path,
seq_length=seq_length,
global_batch_size=batch_size,
label_type='int',
label_name=('label_ids', 'next_sentence_labels'))
dataset = loader.SentencePredictionDataLoader(data_config).load()
features = next(iter(dataset))
self.assertCountEqual([
'input_word_ids', 'input_mask', 'input_type_ids',
'next_sentence_labels', 'label_ids'
], features.keys())
self.assertEqual(features['input_word_ids'].shape, (batch_size, seq_length))
self.assertEqual(features['input_mask'].shape, (batch_size, seq_length))
self.assertEqual(features['input_type_ids'].shape, (batch_size, seq_length))
self.assertEqual(features['label_ids'].shape, (batch_size,))
self.assertEqual(features['label_ids'].dtype, tf.int32)
self.assertEqual(features['next_sentence_labels'].shape, (batch_size,))
self.assertEqual(features['next_sentence_labels'].dtype, tf.int32)
class SentencePredictionTfdsDataLoaderTest(tf.test.TestCase,
parameterized.TestCase):
@parameterized.parameters(True, False)
def test_python_wordpiece_preprocessing(self, use_tfds):
batch_size = 10
seq_length = 256 # Non-default value.
lower_case = True
tf_record_path = os.path.join(self.get_temp_dir(), 'train.tf_record')
text_fields = ['sentence1', 'sentence2']
if not use_tfds:
_create_fake_raw_dataset(tf_record_path, text_fields, label_type='int')
vocab_file_path = os.path.join(self.get_temp_dir(), 'vocab.txt')
_create_fake_vocab_file(vocab_file_path)
data_config = loader.SentencePredictionTextDataConfig(
input_path='' if use_tfds else tf_record_path,
tfds_name='glue/mrpc' if use_tfds else '',
tfds_split='train' if use_tfds else '',
text_fields=text_fields,
global_batch_size=batch_size,
seq_length=seq_length,
is_training=True,
lower_case=lower_case,
vocab_file=vocab_file_path)
dataset = loader.SentencePredictionTextDataLoader(data_config).load()
features = next(iter(dataset))
label_field = data_config.label_field
expected_keys = [
'input_word_ids', 'input_type_ids', 'input_mask', label_field
]
if use_tfds:
expected_keys += ['idx']
self.assertCountEqual(expected_keys, features.keys())
self.assertEqual(features['input_word_ids'].shape, (batch_size, seq_length))
self.assertEqual(features['input_mask'].shape, (batch_size, seq_length))
self.assertEqual(features['input_type_ids'].shape, (batch_size, seq_length))
self.assertEqual(features[label_field].shape, (batch_size,))
@parameterized.parameters(True, False)
def test_python_sentencepiece_preprocessing(self, use_tfds):
batch_size = 10
seq_length = 256 # Non-default value.
lower_case = True
tf_record_path = os.path.join(self.get_temp_dir(), 'train.tf_record')
text_fields = ['sentence1', 'sentence2']
if not use_tfds:
_create_fake_raw_dataset(tf_record_path, text_fields, label_type='int')
sp_model_file_path = _create_fake_sentencepiece_model(self.get_temp_dir())
data_config = loader.SentencePredictionTextDataConfig(
input_path='' if use_tfds else tf_record_path,
tfds_name='glue/mrpc' if use_tfds else '',
tfds_split='train' if use_tfds else '',
text_fields=text_fields,
global_batch_size=batch_size,
seq_length=seq_length,
is_training=True,
lower_case=lower_case,
tokenization='SentencePiece',
vocab_file=sp_model_file_path,
)
dataset = loader.SentencePredictionTextDataLoader(data_config).load()
features = next(iter(dataset))
label_field = data_config.label_field
expected_keys = [
'input_word_ids', 'input_type_ids', 'input_mask', label_field
]
if use_tfds:
expected_keys += ['idx']
self.assertCountEqual(expected_keys, features.keys())
self.assertEqual(features['input_word_ids'].shape, (batch_size, seq_length))
self.assertEqual(features['input_mask'].shape, (batch_size, seq_length))
self.assertEqual(features['input_type_ids'].shape, (batch_size, seq_length))
self.assertEqual(features[label_field].shape, (batch_size,))
@parameterized.parameters(True, False)
def test_saved_model_preprocessing(self, use_tfds):
batch_size = 10
seq_length = 256 # Non-default value.
tf_record_path = os.path.join(self.get_temp_dir(), 'train.tf_record')
text_fields = ['sentence1', 'sentence2']
if not use_tfds:
_create_fake_raw_dataset(tf_record_path, text_fields, label_type='float')
vocab_file_path = os.path.join(self.get_temp_dir(), 'vocab.txt')
_create_fake_vocab_file(vocab_file_path)
data_config = loader.SentencePredictionTextDataConfig(
input_path='' if use_tfds else tf_record_path,
tfds_name='glue/mrpc' if use_tfds else '',
tfds_split='train' if use_tfds else '',
text_fields=text_fields,
global_batch_size=batch_size,
seq_length=seq_length,
is_training=True,
preprocessing_hub_module_url=(
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3'),
label_type='int' if use_tfds else 'float',
)
dataset = loader.SentencePredictionTextDataLoader(data_config).load()
features = next(iter(dataset))
label_field = data_config.label_field
expected_keys = [
'input_word_ids', 'input_type_ids', 'input_mask', label_field
]
if use_tfds:
expected_keys += ['idx']
self.assertCountEqual(expected_keys, features.keys())
self.assertEqual(features['input_word_ids'].shape, (batch_size, seq_length))
self.assertEqual(features['input_mask'].shape, (batch_size, seq_length))
self.assertEqual(features['input_type_ids'].shape, (batch_size, seq_length))
self.assertEqual(features[label_field].shape, (batch_size,))
if __name__ == '__main__':
tf.test.main()
| 11,662 | 39.079038 | 80 | py |
models | models-master/official/nlp/data/sentence_retrieval_lib.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT library to process data for cross lingual sentence retrieval task."""
import os
from absl import logging
from official.nlp.data import classifier_data_lib
from official.nlp.tools import tokenization
class BuccProcessor(classifier_data_lib.DataProcessor):
"""Procssor for Xtreme BUCC data set."""
supported_languages = ["de", "fr", "ru", "zh"]
def __init__(self, process_text_fn=tokenization.convert_to_unicode):
super(BuccProcessor, self).__init__(process_text_fn)
self.languages = BuccProcessor.supported_languages
def get_dev_examples(self, data_dir, file_pattern):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, file_pattern.format("dev"))),
"sample")
def get_test_examples(self, data_dir, file_pattern):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, file_pattern.format("test"))),
"test")
@staticmethod
def get_processor_name():
"""See base class."""
return "BUCC"
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
example_id = int(line[0].split("-")[1])
text_a = self.process_text_fn(line[1])
examples.append(
classifier_data_lib.InputExample(
guid=guid, text_a=text_a, example_id=example_id))
return examples
class TatoebaProcessor(classifier_data_lib.DataProcessor):
"""Procssor for Xtreme Tatoeba data set."""
supported_languages = [
"af", "ar", "bg", "bn", "de", "el", "es", "et", "eu", "fa", "fi", "fr",
"he", "hi", "hu", "id", "it", "ja", "jv", "ka", "kk", "ko", "ml", "mr",
"nl", "pt", "ru", "sw", "ta", "te", "th", "tl", "tr", "ur", "vi", "zh"
]
def __init__(self, process_text_fn=tokenization.convert_to_unicode):
super(TatoebaProcessor, self).__init__(process_text_fn)
self.languages = TatoebaProcessor.supported_languages
def get_test_examples(self, data_dir, file_path):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, file_path)), "test")
@staticmethod
def get_processor_name():
"""See base class."""
return "TATOEBA"
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = self.process_text_fn(line[0])
examples.append(
classifier_data_lib.InputExample(
guid=guid, text_a=text_a, example_id=i))
return examples
def generate_sentence_retrevial_tf_record(processor,
data_dir,
tokenizer,
eval_data_output_path=None,
test_data_output_path=None,
max_seq_length=128):
"""Generates the tf records for retrieval tasks.
Args:
processor: Input processor object to be used for generating data. Subclass
of `DataProcessor`.
data_dir: Directory that contains train/eval data to process. Data files
should be in from.
tokenizer: The tokenizer to be applied on the data.
eval_data_output_path: Output to which processed tf record for evaluation
will be saved.
test_data_output_path: Output to which processed tf record for testing
will be saved. Must be a pattern template with {} if processor has
language specific test data.
max_seq_length: Maximum sequence length of the to be generated
training/eval data.
Returns:
A dictionary containing input meta data.
"""
assert eval_data_output_path or test_data_output_path
if processor.get_processor_name() == "BUCC":
path_pattern = "{}-en.{{}}.{}"
if processor.get_processor_name() == "TATOEBA":
path_pattern = "{}-en.{}"
meta_data = {
"processor_type": processor.get_processor_name(),
"max_seq_length": max_seq_length,
"number_eval_data": {},
"number_test_data": {},
}
logging.info("Start to process %s task data", processor.get_processor_name())
for lang_a in processor.languages:
for lang_b in [lang_a, "en"]:
if eval_data_output_path:
eval_input_data_examples = processor.get_dev_examples(
data_dir, os.path.join(path_pattern.format(lang_a, lang_b)))
num_eval_data = len(eval_input_data_examples)
logging.info("Processing %d dev examples of %s-en.%s", num_eval_data,
lang_a, lang_b)
output_file = os.path.join(
eval_data_output_path,
"{}-en-{}.{}.tfrecords".format(lang_a, lang_b, "dev"))
classifier_data_lib.file_based_convert_examples_to_features(
eval_input_data_examples, None, max_seq_length, tokenizer,
output_file, None)
meta_data["number_eval_data"][f"{lang_a}-en.{lang_b}"] = num_eval_data
if test_data_output_path:
test_input_data_examples = processor.get_test_examples(
data_dir, os.path.join(path_pattern.format(lang_a, lang_b)))
num_test_data = len(test_input_data_examples)
logging.info("Processing %d test examples of %s-en.%s", num_test_data,
lang_a, lang_b)
output_file = os.path.join(
test_data_output_path,
"{}-en-{}.{}.tfrecords".format(lang_a, lang_b, "test"))
classifier_data_lib.file_based_convert_examples_to_features(
test_input_data_examples, None, max_seq_length, tokenizer,
output_file, None)
meta_data["number_test_data"][f"{lang_a}-en.{lang_b}"] = num_test_data
return meta_data
| 6,413 | 37.407186 | 79 | py |
models | models-master/official/nlp/data/classifier_data_lib_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for third_party.tensorflow_models.official.nlp.data.classifier_data_lib."""
import os
import tempfile
from absl.testing import parameterized
import tensorflow as tf
import tensorflow_datasets as tfds
from official.nlp.data import classifier_data_lib
from official.nlp.tools import tokenization
def decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
return tf.io.parse_single_example(record, name_to_features)
class BertClassifierLibTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super(BertClassifierLibTest, self).setUp()
self.model_dir = self.get_temp_dir()
self.processors = {
"CB": classifier_data_lib.CBProcessor,
"SUPERGLUE-RTE": classifier_data_lib.SuperGLUERTEProcessor,
"BOOLQ": classifier_data_lib.BoolQProcessor,
"WIC": classifier_data_lib.WiCProcessor,
}
vocab_tokens = [
"[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn",
"##ing", ","
]
with tempfile.NamedTemporaryFile(delete=False) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens
]).encode("utf-8"))
vocab_file = vocab_writer.name
self.tokenizer = tokenization.FullTokenizer(vocab_file)
@parameterized.parameters(
{"task_type": "CB"},
{"task_type": "BOOLQ"},
{"task_type": "SUPERGLUE-RTE"},
{"task_type": "WIC"},
)
def test_generate_dataset_from_tfds_processor(self, task_type):
with tfds.testing.mock_data(num_examples=5):
output_path = os.path.join(self.model_dir, task_type)
processor = self.processors[task_type]()
classifier_data_lib.generate_tf_record_from_data_file(
processor,
None,
self.tokenizer,
train_data_output_path=output_path,
eval_data_output_path=output_path,
test_data_output_path=output_path)
files = tf.io.gfile.glob(output_path)
self.assertNotEmpty(files)
train_dataset = tf.data.TFRecordDataset(output_path)
seq_length = 128
label_type = tf.int64
name_to_features = {
"input_ids": tf.io.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.io.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.io.FixedLenFeature([seq_length], tf.int64),
"label_ids": tf.io.FixedLenFeature([], label_type),
}
train_dataset = train_dataset.map(
lambda record: decode_record(record, name_to_features))
# If data is retrieved without error, then all requirements
# including data type/shapes are met.
_ = next(iter(train_dataset))
if __name__ == "__main__":
tf.test.main()
| 3,352 | 33.927083 | 84 | py |
models | models-master/official/nlp/data/pretrain_dataloader.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Loads dataset for the BERT pretraining task."""
import dataclasses
from typing import Mapping, Optional
from absl import logging
import numpy as np
import tensorflow as tf
from official.common import dataset_fn
from official.core import config_definitions as cfg
from official.core import input_reader
from official.nlp.data import data_loader
from official.nlp.data import data_loader_factory
@dataclasses.dataclass
class BertPretrainDataConfig(cfg.DataConfig):
"""Data config for BERT pretraining task (tasks/masked_lm)."""
input_path: str = ''
global_batch_size: int = 512
is_training: bool = True
seq_length: int = 512
max_predictions_per_seq: int = 76
use_next_sentence_label: bool = True
use_position_id: bool = False
# Historically, BERT implementations take `input_ids` and `segment_ids` as
# feature names. Inside the TF Model Garden implementation, the Keras model
# inputs are set as `input_word_ids` and `input_type_ids`. When
# v2_feature_names is True, the data loader assumes the tf.Examples use
# `input_word_ids` and `input_type_ids` as keys.
use_v2_feature_names: bool = False
file_type: str = 'tfrecord'
@data_loader_factory.register_data_loader_cls(BertPretrainDataConfig)
class BertPretrainDataLoader(data_loader.DataLoader):
"""A class to load dataset for bert pretraining task."""
def __init__(self, params):
"""Inits `BertPretrainDataLoader` class.
Args:
params: A `BertPretrainDataConfig` object.
"""
self._params = params
self._seq_length = params.seq_length
self._max_predictions_per_seq = params.max_predictions_per_seq
self._use_next_sentence_label = params.use_next_sentence_label
self._use_position_id = params.use_position_id
def _name_to_features(self):
name_to_features = {
'input_mask':
tf.io.FixedLenFeature([self._seq_length], tf.int64),
'masked_lm_positions':
tf.io.FixedLenFeature([self._max_predictions_per_seq], tf.int64),
'masked_lm_ids':
tf.io.FixedLenFeature([self._max_predictions_per_seq], tf.int64),
'masked_lm_weights':
tf.io.FixedLenFeature([self._max_predictions_per_seq], tf.float32),
}
if self._params.use_v2_feature_names:
name_to_features.update({
'input_word_ids': tf.io.FixedLenFeature([self._seq_length], tf.int64),
'input_type_ids': tf.io.FixedLenFeature([self._seq_length], tf.int64),
})
else:
name_to_features.update({
'input_ids': tf.io.FixedLenFeature([self._seq_length], tf.int64),
'segment_ids': tf.io.FixedLenFeature([self._seq_length], tf.int64),
})
if self._use_next_sentence_label:
name_to_features['next_sentence_labels'] = tf.io.FixedLenFeature([1],
tf.int64)
if self._use_position_id:
name_to_features['position_ids'] = tf.io.FixedLenFeature(
[self._seq_length], tf.int64)
return name_to_features
def _decode(self, record: tf.Tensor):
"""Decodes a serialized tf.Example."""
name_to_features = self._name_to_features()
example = tf.io.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.cast(t, tf.int32)
example[name] = t
return example
def _parse(self, record: Mapping[str, tf.Tensor]):
"""Parses raw tensors into a dict of tensors to be consumed by the model."""
x = {
'input_mask': record['input_mask'],
'masked_lm_positions': record['masked_lm_positions'],
'masked_lm_ids': record['masked_lm_ids'],
'masked_lm_weights': record['masked_lm_weights'],
}
if self._params.use_v2_feature_names:
x['input_word_ids'] = record['input_word_ids']
x['input_type_ids'] = record['input_type_ids']
else:
x['input_word_ids'] = record['input_ids']
x['input_type_ids'] = record['segment_ids']
if self._use_next_sentence_label:
x['next_sentence_labels'] = record['next_sentence_labels']
if self._use_position_id:
x['position_ids'] = record['position_ids']
return x
def load(self, input_context: Optional[tf.distribute.InputContext] = None):
"""Returns a tf.dataset.Dataset."""
reader = input_reader.InputReader(
params=self._params,
dataset_fn=dataset_fn.pick_dataset_fn(self._params.file_type),
decoder_fn=self._decode,
parser_fn=self._parse)
return reader.read(input_context)
@dataclasses.dataclass
class XLNetPretrainDataConfig(cfg.DataConfig):
"""Data config for XLNet pretraining task.
Attributes:
input_path: See base class.
global_batch_size: See base calss.
is_training: See base class.
seq_length: The length of each sequence.
max_predictions_per_seq: The number of predictions per sequence.
reuse_length: The number of tokens in a previous segment to reuse. This
should be the same value used during pretrain data creation.
sample_strategy: The strategy used to sample factorization permutations.
Possible values: 'single_token', 'whole_word', 'token_span', 'word_span'.
min_num_tokens: The minimum number of tokens to sample in a span. This is
used when `sample_strategy` is 'token_span'.
max_num_tokens: The maximum number of tokens to sample in a span. This is
used when `sample_strategy` is 'token_span'.
min_num_words: The minimum number of words to sample in a span. This is used
when `sample_strategy` is 'word_span'.
max_num_words: The maximum number of words to sample in a span. This is used
when `sample_strategy` is 'word_span'.
permutation_size: The length of the longest permutation. This can be set to
`reuse_length`. This should NOT be greater than `reuse_length`, otherwise
this may introduce data leaks.
leak_ratio: The percentage of masked tokens that are leaked.
segment_sep_id: The ID of the SEP token used when preprocessing the dataset.
segment_cls_id: The ID of the CLS token used when preprocessing the dataset.
"""
input_path: str = ''
global_batch_size: int = 512
is_training: bool = True
seq_length: int = 512
max_predictions_per_seq: int = 76
reuse_length: int = 256
sample_strategy: str = 'word_span'
min_num_tokens: int = 1
max_num_tokens: int = 5
min_num_words: int = 1
max_num_words: int = 5
permutation_size: int = 256
leak_ratio: float = 0.1
segment_sep_id: int = 4
segment_cls_id: int = 3
@data_loader_factory.register_data_loader_cls(XLNetPretrainDataConfig)
class XLNetPretrainDataLoader(data_loader.DataLoader):
"""A class to load dataset for xlnet pretraining task."""
def __init__(self, params: XLNetPretrainDataConfig):
"""Inits `XLNetPretrainDataLoader` class.
Args:
params: A `XLNetPretrainDataConfig` object.
"""
self._params = params
self._seq_length = params.seq_length
self._max_predictions_per_seq = params.max_predictions_per_seq
self._reuse_length = params.reuse_length
self._num_replicas_in_sync = None
self._permutation_size = params.permutation_size
self._sep_id = params.segment_sep_id
self._cls_id = params.segment_cls_id
self._sample_strategy = params.sample_strategy
self._leak_ratio = params.leak_ratio
def _decode(self, record: tf.Tensor):
"""Decodes a serialized tf.Example."""
name_to_features = {
'input_word_ids': tf.io.FixedLenFeature([self._seq_length], tf.int64),
'input_type_ids': tf.io.FixedLenFeature([self._seq_length], tf.int64),
'boundary_indices': tf.io.VarLenFeature(tf.int64),
}
example = tf.io.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.cast(t, tf.int32)
example[name] = t
return example
def _parse(self, record: Mapping[str, tf.Tensor]):
"""Parses raw tensors into a dict of tensors to be consumed by the model."""
x = {}
inputs = record['input_word_ids']
x['input_type_ids'] = record['input_type_ids']
if self._sample_strategy in ['whole_word', 'word_span']:
boundary = tf.sparse.to_dense(record['boundary_indices'])
else:
boundary = None
input_mask = self._online_sample_mask(inputs=inputs, boundary=boundary)
if self._reuse_length > 0:
if self._permutation_size > self._reuse_length:
logging.warning(
'`permutation_size` is greater than `reuse_length` (%d > %d).'
'This may introduce data leakage.', self._permutation_size,
self._reuse_length)
# Enable the memory mechanism.
# Permute the reuse and non-reuse segments separately.
non_reuse_len = self._seq_length - self._reuse_length
if not (self._reuse_length % self._permutation_size == 0 and
non_reuse_len % self._permutation_size == 0):
raise ValueError('`reuse_length` and `seq_length` should both be '
'a multiple of `permutation_size`.')
# Creates permutation mask and target mask for the first reuse_len tokens.
# The tokens in this part are reused from the last sequence.
perm_mask_0, target_mask_0, tokens_0, masked_0 = self._get_factorization(
inputs=inputs[:self._reuse_length],
input_mask=input_mask[:self._reuse_length])
# Creates permutation mask and target mask for the rest of tokens in
# current example, which are concatentation of two new segments.
perm_mask_1, target_mask_1, tokens_1, masked_1 = self._get_factorization(
inputs[self._reuse_length:], input_mask[self._reuse_length:])
perm_mask_0 = tf.concat([
perm_mask_0,
tf.zeros([self._reuse_length, non_reuse_len], dtype=tf.int32)
],
axis=1)
perm_mask_1 = tf.concat([
tf.ones([non_reuse_len, self._reuse_length], dtype=tf.int32),
perm_mask_1
],
axis=1)
perm_mask = tf.concat([perm_mask_0, perm_mask_1], axis=0)
target_mask = tf.concat([target_mask_0, target_mask_1], axis=0)
tokens = tf.concat([tokens_0, tokens_1], axis=0)
masked_tokens = tf.concat([masked_0, masked_1], axis=0)
else:
# Disable the memory mechanism.
if self._seq_length % self._permutation_size != 0:
raise ValueError('`seq_length` should be a multiple of '
'`permutation_size`.')
# Permute the entire sequence together
perm_mask, target_mask, tokens, masked_tokens = self._get_factorization(
inputs=inputs, input_mask=input_mask)
x['permutation_mask'] = tf.reshape(perm_mask,
[self._seq_length, self._seq_length])
x['input_word_ids'] = tokens
x['masked_tokens'] = masked_tokens
target = tokens
if self._max_predictions_per_seq is not None:
indices = tf.range(self._seq_length, dtype=tf.int32)
bool_target_mask = tf.cast(target_mask, tf.bool)
indices = tf.boolean_mask(indices, bool_target_mask)
# account for extra padding due to CLS/SEP.
actual_num_predict = tf.shape(indices)[0]
pad_len = self._max_predictions_per_seq - actual_num_predict
target_mapping = tf.one_hot(indices, self._seq_length, dtype=tf.int32)
paddings = tf.zeros([pad_len, self._seq_length],
dtype=target_mapping.dtype)
target_mapping = tf.concat([target_mapping, paddings], axis=0)
x['target_mapping'] = tf.reshape(
target_mapping, [self._max_predictions_per_seq, self._seq_length])
target = tf.boolean_mask(target, bool_target_mask)
paddings = tf.zeros([pad_len], dtype=target.dtype)
target = tf.concat([target, paddings], axis=0)
x['target'] = tf.reshape(target, [self._max_predictions_per_seq])
target_mask = tf.concat([
tf.ones([actual_num_predict], dtype=tf.int32),
tf.zeros([pad_len], dtype=tf.int32)
],
axis=0)
x['target_mask'] = tf.reshape(target_mask,
[self._max_predictions_per_seq])
else:
x['target'] = tf.reshape(target, [self._seq_length])
x['target_mask'] = tf.reshape(target_mask, [self._seq_length])
return x
def _index_pair_to_mask(self, begin_indices: tf.Tensor,
end_indices: tf.Tensor,
inputs: tf.Tensor) -> tf.Tensor:
"""Converts beginning and end indices into an actual mask."""
non_func_mask = tf.logical_and(
tf.not_equal(inputs, self._sep_id), tf.not_equal(inputs, self._cls_id))
all_indices = tf.where(
non_func_mask, tf.range(self._seq_length, dtype=tf.int32),
tf.constant(-1, shape=[self._seq_length], dtype=tf.int32))
candidate_matrix = tf.cast(
tf.logical_and(all_indices[None, :] >= begin_indices[:, None],
all_indices[None, :] < end_indices[:, None]), tf.float32)
cumsum_matrix = tf.reshape(
tf.cumsum(tf.reshape(candidate_matrix, [-1])), [-1, self._seq_length])
masked_matrix = tf.cast(cumsum_matrix <= self._max_predictions_per_seq,
tf.float32)
target_mask = tf.reduce_sum(candidate_matrix * masked_matrix, axis=0)
return tf.cast(target_mask, tf.bool)
def _single_token_mask(self, inputs: tf.Tensor) -> tf.Tensor:
"""Samples individual tokens as prediction targets."""
all_indices = tf.range(self._seq_length, dtype=tf.int32)
non_func_mask = tf.logical_and(
tf.not_equal(inputs, self._sep_id), tf.not_equal(inputs, self._cls_id))
non_func_indices = tf.boolean_mask(all_indices, non_func_mask)
masked_pos = tf.random.shuffle(non_func_indices)
masked_pos = tf.sort(masked_pos[:self._max_predictions_per_seq])
sparse_indices = tf.stack([tf.zeros_like(masked_pos), masked_pos], axis=-1)
sparse_indices = tf.cast(sparse_indices, tf.int64)
sparse_indices = tf.sparse.SparseTensor(
sparse_indices,
values=tf.ones_like(masked_pos),
dense_shape=(1, self._seq_length))
target_mask = tf.sparse.to_dense(sp_input=sparse_indices, default_value=0)
return tf.squeeze(tf.cast(target_mask, tf.bool))
def _whole_word_mask(self, inputs: tf.Tensor,
boundary: tf.Tensor) -> tf.Tensor:
"""Samples whole words as prediction targets."""
pair_indices = tf.concat([boundary[:-1, None], boundary[1:, None]], axis=1)
cand_pair_indices = tf.random.shuffle(
pair_indices)[:self._max_predictions_per_seq]
begin_indices = cand_pair_indices[:, 0]
end_indices = cand_pair_indices[:, 1]
return self._index_pair_to_mask(
begin_indices=begin_indices, end_indices=end_indices, inputs=inputs)
def _token_span_mask(self, inputs: tf.Tensor) -> tf.Tensor:
"""Samples token spans as prediction targets."""
min_num_tokens = self._params.min_num_tokens
max_num_tokens = self._params.max_num_tokens
mask_alpha = self._seq_length / self._max_predictions_per_seq
round_to_int = lambda x: tf.cast(tf.round(x), tf.int32)
# Sample span lengths from a zipf distribution
span_len_seq = np.arange(min_num_tokens, max_num_tokens + 1)
probs = np.array([1.0 / (i + 1) for i in span_len_seq])
probs /= np.sum(probs)
logits = tf.constant(np.log(probs), dtype=tf.float32)
span_lens = tf.random.categorical(
logits=logits[None],
num_samples=self._max_predictions_per_seq,
dtype=tf.int32,
)[0] + min_num_tokens
# Sample the ratio [0.0, 1.0) of left context lengths
span_lens_float = tf.cast(span_lens, tf.float32)
left_ratio = tf.random.uniform(
shape=[self._max_predictions_per_seq], minval=0.0, maxval=1.0)
left_ctx_len = left_ratio * span_lens_float * (mask_alpha - 1)
left_ctx_len = round_to_int(left_ctx_len)
# Compute the offset from left start to the right end
right_offset = round_to_int(span_lens_float * mask_alpha) - left_ctx_len
# Get the actual begin and end indices
begin_indices = (
tf.cumsum(left_ctx_len) + tf.cumsum(right_offset, exclusive=True))
end_indices = begin_indices + span_lens
# Remove out of range indices
valid_idx_mask = end_indices < self._seq_length
begin_indices = tf.boolean_mask(begin_indices, valid_idx_mask)
end_indices = tf.boolean_mask(end_indices, valid_idx_mask)
# Shuffle valid indices
num_valid = tf.cast(tf.shape(begin_indices)[0], tf.int32)
order = tf.random.shuffle(tf.range(num_valid, dtype=tf.int32))
begin_indices = tf.gather(begin_indices, order)
end_indices = tf.gather(end_indices, order)
return self._index_pair_to_mask(
begin_indices=begin_indices, end_indices=end_indices, inputs=inputs)
def _word_span_mask(self, inputs: tf.Tensor, boundary: tf.Tensor):
"""Sample whole word spans as prediction targets."""
min_num_words = self._params.min_num_words
max_num_words = self._params.max_num_words
# Note: 1.2 is the token-to-word ratio
mask_alpha = self._seq_length / self._max_predictions_per_seq / 1.2
round_to_int = lambda x: tf.cast(tf.round(x), tf.int32)
# Sample span lengths from a zipf distribution
span_len_seq = np.arange(min_num_words, max_num_words + 1)
probs = np.array([1.0 / (i + 1) for i in span_len_seq])
probs /= np.sum(probs)
logits = tf.constant(np.log(probs), dtype=tf.float32)
# Sample `num_predict` words here: note that this is over sampling
span_lens = tf.random.categorical(
logits=logits[None],
num_samples=self._max_predictions_per_seq,
dtype=tf.int32,
)[0] + min_num_words
# Sample the ratio [0.0, 1.0) of left context lengths
span_lens_float = tf.cast(span_lens, tf.float32)
left_ratio = tf.random.uniform(
shape=[self._max_predictions_per_seq], minval=0.0, maxval=1.0)
left_ctx_len = left_ratio * span_lens_float * (mask_alpha - 1)
left_ctx_len = round_to_int(left_ctx_len)
right_offset = round_to_int(span_lens_float * mask_alpha) - left_ctx_len
begin_indices = (
tf.cumsum(left_ctx_len) + tf.cumsum(right_offset, exclusive=True))
end_indices = begin_indices + span_lens
# Remove out of range indices
max_boundary_index = tf.cast(tf.shape(boundary)[0] - 1, tf.int32)
valid_idx_mask = end_indices < max_boundary_index
begin_indices = tf.boolean_mask(begin_indices, valid_idx_mask)
end_indices = tf.boolean_mask(end_indices, valid_idx_mask)
begin_indices = tf.gather(boundary, begin_indices)
end_indices = tf.gather(boundary, end_indices)
# Shuffle valid indices
num_valid = tf.cast(tf.shape(begin_indices)[0], tf.int32)
order = tf.random.shuffle(tf.range(num_valid, dtype=tf.int32))
begin_indices = tf.gather(begin_indices, order)
end_indices = tf.gather(end_indices, order)
return self._index_pair_to_mask(
begin_indices=begin_indices, end_indices=end_indices, inputs=inputs)
def _online_sample_mask(self, inputs: tf.Tensor,
boundary: tf.Tensor) -> tf.Tensor:
"""Samples target positions for predictions.
Descriptions of each strategy:
- 'single_token': Samples individual tokens as prediction targets.
- 'token_span': Samples spans of tokens as prediction targets.
- 'whole_word': Samples individual words as prediction targets.
- 'word_span': Samples spans of words as prediction targets.
Args:
inputs: The input tokens.
boundary: The `int` Tensor of indices indicating whole word boundaries.
This is used in 'whole_word' and 'word_span'
Returns:
The sampled `bool` input mask.
Raises:
`ValueError`: if `max_predictions_per_seq` is not set or if boundary is
not provided for 'whole_word' and 'word_span' sample strategies.
"""
if self._max_predictions_per_seq is None:
raise ValueError('`max_predictions_per_seq` must be set.')
if boundary is None and 'word' in self._sample_strategy:
raise ValueError('`boundary` must be provided for {} strategy'.format(
self._sample_strategy))
if self._sample_strategy == 'single_token':
return self._single_token_mask(inputs)
elif self._sample_strategy == 'token_span':
return self._token_span_mask(inputs)
elif self._sample_strategy == 'whole_word':
return self._whole_word_mask(inputs, boundary)
elif self._sample_strategy == 'word_span':
return self._word_span_mask(inputs, boundary)
else:
raise NotImplementedError('Invalid sample strategy.')
def _get_factorization(self, inputs: tf.Tensor, input_mask: tf.Tensor):
"""Samples a permutation of the factorization order.
Args:
inputs: the input tokens.
input_mask: the `bool` Tensor of the same shape as `inputs`. If `True`,
then this means select for partial prediction.
Returns:
perm_mask: An `int32` Tensor of shape [seq_length, seq_length] consisting
of 0s and 1s. If perm_mask[i][j] == 0, then this means that the i-th
token (in original order) cannot attend to the jth attention token.
target_mask: An `int32` Tensor of shape [seq_len] consisting of 0s and 1s.
If target_mask[i] == 1, then the i-th token needs to be predicted and
the mask will be used as input. This token will be included in the loss.
If target_mask[i] == 0, then the token (or [SEP], [CLS]) will be used as
input. This token will not be included in the loss.
tokens: int32 Tensor of shape [seq_length].
masked_tokens: int32 Tensor of shape [seq_length].
"""
factorization_length = tf.shape(inputs)[0]
# Generate permutation indices
index = tf.range(factorization_length, dtype=tf.int32)
index = tf.transpose(tf.reshape(index, [-1, self._permutation_size]))
index = tf.random.shuffle(index)
index = tf.reshape(tf.transpose(index), [-1])
input_mask = tf.cast(input_mask, tf.bool)
# non-functional tokens
non_func_tokens = tf.logical_not(
tf.logical_or(
tf.equal(inputs, self._sep_id), tf.equal(inputs, self._cls_id)))
masked_tokens = tf.logical_and(input_mask, non_func_tokens)
non_masked_or_func_tokens = tf.logical_not(masked_tokens)
smallest_index = -2 * tf.ones([factorization_length], dtype=tf.int32)
# Similar to BERT, randomly leak some masked tokens
if self._leak_ratio > 0:
leak_tokens = tf.logical_and(
masked_tokens,
tf.random.uniform([factorization_length], maxval=1.0) <
self._leak_ratio)
can_attend_self = tf.logical_or(non_masked_or_func_tokens, leak_tokens)
else:
can_attend_self = non_masked_or_func_tokens
to_index = tf.where(can_attend_self, smallest_index, index)
from_index = tf.where(can_attend_self, to_index + 1, to_index)
# For masked tokens, can attend if i > j
# For context tokens, always can attend each other
can_attend = from_index[:, None] > to_index[None, :]
perm_mask = tf.cast(can_attend, tf.int32)
# Only masked tokens are included in the loss
target_mask = tf.cast(masked_tokens, tf.int32)
return perm_mask, target_mask, inputs, masked_tokens
def load(self, input_context: Optional[tf.distribute.InputContext] = None):
"""Returns a tf.dataset.Dataset."""
if input_context:
self._num_replicas_in_sync = input_context.num_replicas_in_sync
reader = input_reader.InputReader(
params=self._params, decoder_fn=self._decode, parser_fn=self._parse)
return reader.read(input_context)
| 24,593 | 40.684746 | 80 | py |
models | models-master/official/nlp/data/create_xlnet_pretraining_data_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for official.nlp.data.create_xlnet_pretraining_data."""
import os
import tempfile
from typing import List
from absl import logging
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.nlp.data import create_xlnet_pretraining_data as cpd
_VOCAB_WORDS = ["vocab_1", "vocab_2"]
# pylint: disable=invalid-name
def _create_files(
temp_dir: str, file_contents: List[List[str]]) -> List[str]:
"""Writes arbitrary documents into files."""
root_dir = tempfile.mkdtemp(dir=temp_dir)
files = []
for i, file_content in enumerate(file_contents):
destination = os.path.join(root_dir, "%d.txt" % i)
with open(destination, "wb") as f:
for line in file_content:
f.write(line.encode("utf-8"))
files.append(destination)
return files
def _get_mock_tokenizer():
"""Creates a mock tokenizer."""
class MockSpieceModel:
"""Mock Spiece model for testing."""
def __init__(self):
self._special_piece_to_id = {
"<unk>": 0,
}
for piece in set(list('!"#$%&\"()*+,-./:;?@[\\]^_`{|}~')):
self._special_piece_to_id[piece] = 1
def EncodeAsPieces(self, inputs: str) -> List[str]:
return inputs
def SampleEncodeAsPieces(self,
inputs: str,
nbest_size: int,
theta: float) -> List[str]:
del nbest_size, theta
return inputs
def PieceToId(self, piece: str) -> int:
return ord(piece[0])
def IdToPiece(self, id_: int) -> str:
return chr(id_) * 3
class Tokenizer:
"""Mock Tokenizer for testing."""
def __init__(self):
self.sp_model = MockSpieceModel()
def convert_ids_to_tokens(self, ids: List[int]) -> List[str]:
return [self.sp_model.IdToPiece(id_) for id_ in ids]
return Tokenizer()
class PreprocessDataTest(tf.test.TestCase):
def test_remove_extraneous_space(self):
line = " abc "
output = cpd._preprocess_line(line)
self.assertEqual(output, "abc")
def test_symbol_replacements(self):
self.assertEqual(cpd._preprocess_line("``abc``"), "\"abc\"")
self.assertEqual(cpd._preprocess_line("''abc''"), "\"abc\"")
def test_accent_replacements(self):
self.assertEqual(cpd._preprocess_line("åbc"), "abc")
def test_lower_case(self):
self.assertEqual(cpd._preprocess_line("ABC", do_lower_case=True), "abc")
def test_end_to_end(self):
self.assertEqual(
cpd._preprocess_line("HelLo ``wórLd``", do_lower_case=True),
"hello \"world\"")
class PreprocessAndTokenizeFilesTest(tf.test.TestCase):
def test_basic_end_to_end(self):
documents = [
[
"This is sentence 1.\n",
"This is sentence 2.\n",
"Sentence 3 is what this is.\n",
],
[
"This is the second document.\n",
"This is the second line of the second document.\n"
],
]
input_files = _create_files(temp_dir=self.get_temp_dir(),
file_contents=documents)
all_data = cpd.preprocess_and_tokenize_input_files(
input_files=input_files,
tokenizer=_get_mock_tokenizer(),
log_example_freq=1)
self.assertEqual(len(all_data), len(documents))
for token_ids, sentence_ids in all_data:
self.assertEqual(len(token_ids), len(sentence_ids))
def test_basic_correctness(self):
documents = [["a\n", "b\n", "c\n"]]
input_files = _create_files(temp_dir=self.get_temp_dir(),
file_contents=documents)
all_data = cpd.preprocess_and_tokenize_input_files(
input_files=input_files,
tokenizer=_get_mock_tokenizer(),
log_example_freq=1)
token_ids, sentence_ids = all_data[0]
self.assertAllClose(token_ids, [97, 98, 99])
self.assertAllClose(sentence_ids, [True, False, True])
def test_correctness_with_spaces_and_accents(self):
documents = [[
" å \n",
"b \n",
" c \n",
]]
input_files = _create_files(temp_dir=self.get_temp_dir(),
file_contents=documents)
all_data = cpd.preprocess_and_tokenize_input_files(
input_files=input_files,
tokenizer=_get_mock_tokenizer(),
log_example_freq=1)
token_ids, sentence_ids = all_data[0]
self.assertAllClose(token_ids, [97, 98, 99])
self.assertAllClose(sentence_ids, [True, False, True])
class BatchReshapeTests(tf.test.TestCase):
def test_basic_functionality(self):
per_host_batch_size = 3
mock_shape = (20,)
# Should truncate and reshape.
expected_result_shape = (3, 6)
tokens = np.zeros(mock_shape)
sentence_ids = np.zeros(mock_shape)
reshaped_data = cpd._reshape_to_batch_dimensions(
tokens=tokens,
sentence_ids=sentence_ids,
per_host_batch_size=per_host_batch_size)
for values in reshaped_data:
self.assertEqual(len(values.flatten()) % per_host_batch_size, 0)
self.assertAllClose(values.shape, expected_result_shape)
class CreateSegmentsTest(tf.test.TestCase):
def test_basic_functionality(self):
data_length = 10
tokens = np.arange(data_length)
sentence_ids = np.concatenate([np.zeros(data_length // 2),
np.ones(data_length // 2)])
begin_index = 0
total_length = 8
a_data, b_data, label = cpd._create_a_and_b_segments(
tokens=tokens,
sentence_ids=sentence_ids,
begin_index=begin_index,
total_length=total_length,
no_cut_probability=0.)
self.assertAllClose(a_data, [0, 1, 2, 3])
self.assertAllClose(b_data, [5, 6, 7, 8])
self.assertEqual(label, 1)
def test_no_cut(self):
data_length = 10
tokens = np.arange(data_length)
sentence_ids = np.zeros(data_length)
begin_index = 0
total_length = 8
a_data, b_data, label = cpd._create_a_and_b_segments(
tokens=tokens,
sentence_ids=sentence_ids,
begin_index=begin_index,
total_length=total_length,
no_cut_probability=0.)
self.assertGreater(len(a_data), 0)
self.assertGreater(len(b_data), 0)
self.assertEqual(label, 0)
def test_no_cut_with_probability(self):
data_length = 10
tokens = np.arange(data_length)
sentence_ids = np.concatenate([np.zeros(data_length // 2),
np.ones(data_length // 2)])
begin_index = 0
total_length = 8
a_data, b_data, label = cpd._create_a_and_b_segments(
tokens=tokens,
sentence_ids=sentence_ids,
begin_index=begin_index,
total_length=total_length,
no_cut_probability=1.)
self.assertGreater(len(a_data), 0)
self.assertGreater(len(b_data), 0)
self.assertEqual(label, 0)
class CreateInstancesTest(tf.test.TestCase):
"""Tests conversions of Token/Sentence IDs to training instances."""
def test_basic(self):
data_length = 12
tokens = np.arange(data_length)
sentence_ids = np.zeros(data_length)
seq_length = 8
instances = cpd._convert_tokens_to_instances(
tokens=tokens,
sentence_ids=sentence_ids,
per_host_batch_size=2,
seq_length=seq_length,
reuse_length=4,
tokenizer=_get_mock_tokenizer(),
bi_data=False,
num_cores_per_host=1,
logging_frequency=1)
for instance in instances:
self.assertEqual(len(instance.data), seq_length)
self.assertEqual(len(instance.segment_ids), seq_length)
self.assertIsInstance(instance.label, int)
self.assertIsInstance(instance.boundary_indices, list)
class TFRecordPathTests(tf.test.TestCase):
def test_basic(self):
base_kwargs = dict(
per_host_batch_size=1,
num_cores_per_host=1,
seq_length=2,
reuse_length=1)
config1 = dict(
prefix="test",
suffix="",
bi_data=True,
use_eod_token=False,
do_lower_case=True)
config1.update(base_kwargs)
expectation1 = "test_seqlen-2_reuse-1_bs-1_cores-1_uncased_bi.tfrecord"
self.assertEqual(cpd.get_tfrecord_name(**config1), expectation1)
config2 = dict(
prefix="",
suffix="test",
bi_data=False,
use_eod_token=False,
do_lower_case=False)
config2.update(base_kwargs)
expectation2 = "seqlen-2_reuse-1_bs-1_cores-1_cased_uni_test.tfrecord"
self.assertEqual(cpd.get_tfrecord_name(**config2), expectation2)
config3 = dict(
prefix="",
suffix="",
use_eod_token=True,
bi_data=False,
do_lower_case=True)
config3.update(base_kwargs)
expectation3 = "seqlen-2_reuse-1_bs-1_cores-1_uncased_eod_uni.tfrecord"
self.assertEqual(cpd.get_tfrecord_name(**config3), expectation3)
class TestCreateTFRecords(parameterized.TestCase, tf.test.TestCase):
@parameterized.named_parameters(
("bi_data_only", True, False, False),
("eod_token_only", False, True, True),
("lower_case_only", False, False, True),
("all_enabled", True, True, True),
)
def test_end_to_end(self,
bi_data: bool,
use_eod_token: bool,
do_lower_case: bool):
tokenizer = _get_mock_tokenizer()
num_documents = 5
sentences_per_document = 10
document_length = 50
documents = [
["a " * document_length for _ in range(sentences_per_document)]
for _ in range(num_documents)]
save_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
files = _create_files(temp_dir=self.get_temp_dir(), file_contents=documents)
cpd.create_tfrecords(
tokenizer=tokenizer,
input_file_or_files=",".join(files),
use_eod_token=use_eod_token,
do_lower_case=do_lower_case,
per_host_batch_size=8,
seq_length=8,
reuse_length=4,
bi_data=bi_data,
num_cores_per_host=2,
save_dir=save_dir)
self.assertTrue(any(filter(lambda x: x.endswith(".json"),
os.listdir(save_dir))))
self.assertTrue(any(filter(lambda x: x.endswith(".tfrecord"),
os.listdir(save_dir))))
if __name__ == "__main__":
np.random.seed(0)
logging.set_verbosity(logging.INFO)
tf.test.main()
| 10,927 | 29.696629 | 80 | py |
models | models-master/official/nlp/data/data_loader_factory.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A global factory to access NLP registered data loaders."""
from official.core import registry
_REGISTERED_DATA_LOADER_CLS = {}
def register_data_loader_cls(data_config_cls):
"""Decorates a factory of DataLoader for lookup by a subclass of DataConfig.
This decorator supports registration of data loaders as follows:
```
@dataclasses.dataclass
class MyDataConfig(DataConfig):
# Add fields here.
pass
@register_data_loader_cls(MyDataConfig)
class MyDataLoader:
# Inherits def __init__(self, data_config).
pass
my_data_config = MyDataConfig()
# Returns MyDataLoader(my_data_config).
my_loader = get_data_loader(my_data_config)
```
Args:
data_config_cls: a subclass of DataConfig (*not* an instance
of DataConfig).
Returns:
A callable for use as class decorator that registers the decorated class
for creation from an instance of data_config_cls.
"""
return registry.register(_REGISTERED_DATA_LOADER_CLS, data_config_cls)
def get_data_loader(data_config):
"""Creates a data_loader from data_config."""
return registry.lookup(_REGISTERED_DATA_LOADER_CLS, data_config.__class__)(
data_config)
| 1,788 | 29.322034 | 78 | py |
models | models-master/official/nlp/data/create_xlnet_pretraining_data.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create LM TF examples for XLNet."""
import dataclasses
import json
import math
import os
import random
from typing import Iterable, Mapping, List, Optional, Tuple
import unicodedata
# Import libraries
from absl import app
from absl import flags
from absl import logging
import numpy as np
import tensorflow as tf
from official.nlp.tools import tokenization
special_symbols = {
"<unk>": 0,
"<s>": 1,
"</s>": 2,
"<cls>": 3,
"<sep>": 4,
"<pad>": 5,
"<mask>": 6,
"<eod>": 7,
"<eop>": 8,
}
FLAGS = flags.FLAGS
flags.DEFINE_integer("seq_length", 512,
help="Sequence length.")
flags.DEFINE_integer("reuse_length", 256,
help="Number of token that can be reused as memory. "
"Could be half of `seq_len`.")
flags.DEFINE_string("input_file", None,
"Input raw text file (or comma-separated list of files).")
flags.DEFINE_string(
"save_dir", None,
"Directory for saving processed data.")
flags.DEFINE_string("sp_model_file", "",
"The path to the model used by sentence piece tokenizer.")
flags.DEFINE_bool("use_eod_token", True,
"Whether or not to include EOD tokens.")
flags.DEFINE_bool("bi_data", True, "Whether or not to use bi-directional data.")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer("per_host_batch_size", 32, "Batch size per host.")
flags.DEFINE_integer("num_cores_per_host", 16,
"The number of (TPU) cores per host.")
flags.DEFINE_string("prefix", "", "Filename prefix.")
flags.DEFINE_string("suffix", "", "Filename suffix.")
flags.DEFINE_integer("task_id", None,
"The id of the current task.")
flags.DEFINE_integer("num_tasks", None,
"The total number of tasks.")
flags.DEFINE_integer("num_passes", 1, "The number of times to run the script.")
@dataclasses.dataclass
class TrainingInstance:
"""Representation of a single XLNet Pretraining instance."""
data: Iterable[int]
segment_ids: Iterable[int]
boundary_indices: Iterable[int]
label: int
def to_feature(self) -> Mapping[str, tf.train.Feature]:
feat = lambda x: tf.train.Feature(int64_list=tf.train.Int64List(value=x))
return dict(
input_word_ids=feat(self.data),
input_type_ids=feat(self.segment_ids),
boundary_indices=feat(self.boundary_indices),
label=feat([self.label]))
def to_example(self) -> tf.train.Example:
return tf.train.Example(
features=tf.train.Features(feature=self.to_feature()))
def __str__(self):
def seq_to_str(seq):
return " ".join([str(x) for x in seq])
s = ""
s += "tokens: %s\n" % seq_to_str(self.data)
s += "segment_ids: %s\n" % seq_to_str(self.segment_ids)
s += "boundary_indices: %s\n" % seq_to_str(self.boundary_indices)
s += "label: %s\n" % self.label
s += "\n"
return s
def __repr__(self):
return self.__str__()
def _preprocess_line(line: str, do_lower_case: bool = False) -> str:
"""Preprocesses an individual raw text line.
This function will:
- Remove extraneous spaces.
- Replace `` with ", and '' with ".
- Replaces accents.
- Applies lower casing.
Args:
line: The input line to preprocess.
do_lower_case: Whether or not to lower case the text.
Returns:
The preprocessed line.
"""
line = " ".join(line.split())
line = line.replace("``", "\"").replace("''", "\"")
# Replace accents.
line = unicodedata.normalize("NFKD", line)
line = "".join([c for c in line if not unicodedata.combining(c)])
if do_lower_case:
line = line.lower()
return line
def preprocess_and_tokenize_input_files(
input_files: Iterable[str],
tokenizer: tokenization.FullSentencePieceTokenizer,
use_eod: bool = True,
do_lower_case: bool = False,
log_example_freq: int = 100000) -> List[Tuple[np.array, np.array]]:
"""Preprocesses and encodes raw text from input files.
This function preprocesses raw text and encodes them into tokens using a
`SentencePieceModel` tokenization method. This also provides the sentence
indicator for each token.
Args:
input_files: The list of input file names.
tokenizer: The SentencePiece tokenizer that has the attribute `sp_model`.
use_eod: Whether or not to use an EOD indicator. If `False`, then EOD is
not included.
do_lower_case: Whether or not to apply lower casing during raw text
preprocessing.
log_example_freq: The optional field for how many lines to process before
emitting an info log.
Returns:
The preprocessed list. Each entry in the list is a tuple consisting of
the token IDs and the sentence IDs.
"""
all_data = []
eod_symbol = special_symbols["<eod>"]
total_number_of_lines = 0
# Input file format:
# (1) One sentence per line. These should ideally be actual sentences, not
# entire paragraphs or arbitrary spans of text. (Because we use the
# sentence boundaries for the "next sentence prediction" task).
# (2) Blank lines between documents. Document boundaries are needed so
# that the "next sentence prediction" task doesn't span between documents.
for input_file in input_files:
line_count = 0
logging.info("Preprocessing %s", input_file)
all_tokens = []
all_sentence_ids = []
sentence_id = True
with tf.io.gfile.GFile(input_file, "rb") as reader:
while True:
line = tokenization.convert_to_unicode(reader.readline())
if not line:
break
line_count += 1
if line_count % log_example_freq == 0:
logging.info("Loading line %d", line_count)
line = line.strip()
if not line:
if use_eod:
token_ids = [eod_symbol]
sentence_id = not sentence_id
else:
continue
else:
preprocessed_line = _preprocess_line(
line=line, do_lower_case=do_lower_case)
token_ids = tokenization.encode_ids(
sp_model=tokenizer.sp_model, text=preprocessed_line)
all_tokens.extend(token_ids)
all_sentence_ids.extend([sentence_id] * len(token_ids))
sentence_id = not sentence_id
logging.info("Finished processing %s. Number of lines: %d",
input_file, line_count)
if line_count == 0:
continue
total_number_of_lines += line_count
all_tokens = np.array(all_tokens, dtype=np.int64)
all_sentence_ids = np.array(all_sentence_ids, dtype=bool)
all_data.append((all_tokens, all_sentence_ids))
logging.info("Completed text preprocessing. Total number of lines: %d",
total_number_of_lines)
return all_data
def _reshape_to_batch_dimensions(
tokens: np.array,
sentence_ids: np.array,
per_host_batch_size: int) -> Tuple[np.array, np.array]:
"""Truncates and reshapes input data with a batch major dimension.
Args:
tokens: The input token ids. This should have the same shape as
`sentence_ids`.
sentence_ids: The input sentence ids. This should have the same shape as
`token_ids`.
per_host_batch_size: The target per-host batch size.
Returns:
The tuple of reshaped tokens and sentence_ids.
"""
num_steps = len(tokens) // per_host_batch_size
truncated_data_length = num_steps * per_host_batch_size
logging.info("per_host_batch_size: %d", per_host_batch_size)
logging.info("num_steps: %d", num_steps)
def truncate_and_reshape(a):
return a[:truncated_data_length].reshape((per_host_batch_size, num_steps))
return (truncate_and_reshape(tokens), truncate_and_reshape(sentence_ids))
def _create_a_and_b_segments(
tokens: np.array,
sentence_ids: np.array,
begin_index: int,
total_length: int,
no_cut_probability: float = 0.5):
"""Splits segments A and B from a single instance of tokens and sentence ids.
Args:
tokens: The 1D input token ids. This represents an individual entry within a
batch.
sentence_ids: The 1D input sentence ids. This represents an indivdual entry
within a batch. This should be the same length as `tokens`.
begin_index: The reference beginning index to split data.
total_length: The target combined length of segments A and B.
no_cut_probability: The probability of not cutting a segment despite
a cut possibly existing.
Returns:
A tuple consisting of A data, B data, and label.
"""
data_length = tokens.shape[0]
if begin_index + total_length >= data_length:
logging.info("[_create_segments]: begin_index %d + total_length %d >= "
"data_length %d", begin_index, total_length, data_length)
return None
end_index = begin_index + 1
cut_indices = []
# Identify all indices where sentence IDs change from one to the next.
while end_index < data_length:
if sentence_ids[end_index] != sentence_ids[end_index - 1]:
if end_index - begin_index >= total_length:
break
cut_indices.append(end_index)
end_index += 1
a_begin = begin_index
if not cut_indices or random.random() < no_cut_probability:
# Segments A and B are contained within the same sentence.
label = 0
if not cut_indices:
a_end = end_index
else:
a_end = random.choice(cut_indices)
b_length = max(1, total_length - (a_end - a_begin))
b_begin = random.randint(0, data_length - 1 - b_length)
b_end = b_begin + b_length
while b_begin > 0 and sentence_ids[b_begin - 1] == sentence_ids[b_begin]:
b_begin -= 1
while (b_end < data_length - 1 and
sentence_ids[b_end - 1] == sentence_ids[b_end]):
b_end += 1
else:
# Segments A and B are different sentences.
label = 1
a_end = random.choice(cut_indices)
b_begin = a_end
b_end = end_index
while a_end - a_begin + b_end - b_begin > total_length:
if a_end - a_begin > b_end - b_begin:
# Delete only the right side for the LM objective.
a_end -= 1
else:
b_end -= 1
if a_end >= data_length or b_end >= data_length:
logging.info("[_create_segments]: a_end %d or b_end %d >= data_length %d",
a_end, b_end, data_length)
return None
a_data = tokens[a_begin: a_end]
b_data = tokens[b_begin: b_end]
return a_data, b_data, label
def _is_functional_piece(piece: str) -> bool:
return piece != "<unk>" and piece.startswith("<") and piece.endswith(">")
def _is_start_piece(piece: str) -> bool:
special_pieces = set(list('!"#$%&\"()*+,-./:;?@[\\]^_`{|}~'))
if (piece.startswith("▁") or piece in special_pieces):
return True
else:
return False
def _get_boundary_indices(
data: np.array,
tokenizer: tokenization.FullSentencePieceTokenizer) -> np.array:
"""Gets the boundary indices of whole words."""
seq_length = len(data)
boundary_indices = []
for index, piece in enumerate(tokenizer.convert_ids_to_tokens(data.tolist())):
if _is_start_piece(piece) and not _is_functional_piece(piece):
boundary_indices.append(index)
boundary_indices.append(seq_length)
return boundary_indices
def _convert_tokens_to_instances(
tokens: np.array,
sentence_ids: np.array,
per_host_batch_size: int,
seq_length: int,
reuse_length: int,
bi_data: bool,
tokenizer: tokenization.FullSentencePieceTokenizer,
num_cores_per_host: int = 0,
logging_frequency: int = 500) -> List[TrainingInstance]:
"""Converts tokens and sentence IDs into individual training instances.
The format of data in the XLNet pretraining task is very similar to the
BERT pretraining task. Two segments A and B are randomly sampled, and the
contatenation of A and B into a single sequence is used to perform
language modeling.
To create an XLNet Pretraining instance from a single long sequence, S:
- Create a segment of length `reuse_length`. This first segment represents
past tokens. During modeling, this segment is used to cache obtained
content representations for the segment recurrence mechanism.
- Similar to BERT, create a segment of length `seq_length` - `reuse_length`
composed of A and B segments.
For XLNet, the order is "A", "SEP", "B", "SEP", "CLS".
Args:
tokens: All tokens concatenated into a single list.
sentence_ids: All sentence IDs concatenated into a single list.
per_host_batch_size: The target batch size per host.
seq_length: The max sequence length.
reuse_length: The number of tokens to use from the previous segment.
bi_data: Whether or not to use bidirectional data.
tokenizer: The SentencePiece tokenizer that has the attribute `sp_model`.
num_cores_per_host: The number of cores per host. This is required if
`bi_data` = `True`.
logging_frequency: The frequency at which to log status updates.
Returns:
A list of `TrainingInstance` objects.
"""
instances = []
per_core_batch_size = (per_host_batch_size // num_cores_per_host
if bi_data else None)
if bi_data:
logging.info("Bi-directional data enabled.")
assert per_host_batch_size % (2 * num_cores_per_host) == 0
forward_tokens, forward_sentence_ids = _reshape_to_batch_dimensions(
tokens=tokens,
sentence_ids=sentence_ids,
per_host_batch_size=per_host_batch_size // 2)
forward_data_shape = (num_cores_per_host, 1, per_core_batch_size // 2, -1)
forward_tokens = forward_tokens.reshape(forward_data_shape)
forward_sentence_ids = forward_sentence_ids.reshape(forward_data_shape)
backwards_tokens = forward_tokens[:, :, :, ::-1]
backwards_sentence_ids = forward_sentence_ids[:, :, :, ::-1]
tokens = np.concatenate([forward_tokens, backwards_tokens], 1).reshape(
per_host_batch_size, -1)
sentence_ids = np.concatenate(
[forward_sentence_ids, backwards_sentence_ids]).reshape(
per_host_batch_size, -1)
else:
logging.info("Bi-directional data disabled.")
tokens, sentence_ids = _reshape_to_batch_dimensions(
tokens=tokens,
sentence_ids=sentence_ids,
per_host_batch_size=per_host_batch_size)
logging.info("Tokens shape: %s", tokens.shape)
data_length = tokens.shape[1]
sep = np.array([special_symbols["<sep>"]], dtype=np.int64)
cls = np.array([special_symbols["<cls>"]], dtype=np.int64)
# 2 sep, 1 cls
num_special_tokens = 3
data_index = 0
batch_number = 0
step_size = reuse_length if reuse_length else seq_length
num_batches = math.ceil(data_length / step_size)
while data_index + seq_length <= data_length:
if batch_number % logging_frequency == 0:
logging.info("Processing batch %d of %d", batch_number, num_batches)
for batch_index in range(per_host_batch_size):
previous_segment_tokens = tokens[
batch_index, data_index: data_index + reuse_length]
results = _create_a_and_b_segments(
tokens=tokens[batch_index],
sentence_ids=sentence_ids[batch_index],
begin_index=data_index + reuse_length,
total_length=seq_length - reuse_length - num_special_tokens)
if results is None:
logging.info("Stopping at data index: %d", data_index)
break
a_data, b_data, label = results
data = np.concatenate(
[previous_segment_tokens, a_data, sep, b_data, sep, cls])
a_length = a_data.shape[0]
b_length = b_data.shape[0]
segment_ids = ([0] * (reuse_length + a_length) + [0]
+ [1] * b_length + [1] + [2])
boundary_indices = _get_boundary_indices(tokenizer=tokenizer,
data=data)
assert len(data) == seq_length
assert len(segment_ids) == seq_length
assert len(boundary_indices) > 0 # pylint: disable=g-explicit-length-test
instances.append(TrainingInstance(
data=data,
segment_ids=segment_ids,
boundary_indices=boundary_indices,
label=label))
batch_number += 1
data_index += step_size
return instances
def write_instances_to_tfrecord(
instances: Iterable[TrainingInstance],
save_path: str):
"""Writes instances to TFRecord."""
record_writer = tf.io.TFRecordWriter(save_path)
logging.info("Start writing to %s.", save_path)
for i, instance in enumerate(instances):
if i < 5:
logging.info("Instance %d: %s", i, str(instance))
record_writer.write(instance.to_example().SerializeToString())
record_writer.close()
logging.info("Done writing %s.", save_path)
def shuffle_and_combine_preprocessed_data(
all_data: List[Tuple[np.array, np.array]]) -> Tuple[np.array, np.array]:
"""Shuffles and combines preprocessed token/sentence IDs from documents."""
document_permutation = np.random.permutation(len(all_data))
previous_sentence_id = None
all_tokens, all_sentence_ids = [], []
for document_index in document_permutation:
tokens, sentence_ids = all_data[document_index]
# pylint: disable=g-explicit-length-test
if len(tokens) == 0:
continue
if (previous_sentence_id is not None and
sentence_ids[0] == previous_sentence_id):
sentence_ids = np.logical_not(sentence_ids)
all_tokens.append(tokens)
all_sentence_ids.append(sentence_ids)
previous_sentence_id = sentence_ids[-1]
return np.concatenate(all_tokens), np.concatenate(all_sentence_ids)
def get_tfrecord_name(
per_host_batch_size: int,
num_cores_per_host: int,
seq_length: int,
bi_data: bool,
reuse_length: int,
do_lower_case: bool,
use_eod_token: bool,
prefix: str = "",
suffix: str = "",
pass_id: int = 0,
num_passes: int = 1,
task_id: int = None,
num_tasks: int = None) -> str:
"""Formats the resulting TFRecord name based on provided inputs."""
components = []
if prefix:
components.append(prefix)
components.append("seqlen-{}".format(seq_length))
if reuse_length == 0:
components.append("memless")
else:
components.append("reuse-{}".format(reuse_length))
components.append("bs-{}".format(per_host_batch_size))
components.append("cores-{}".format(num_cores_per_host))
if do_lower_case:
components.append("uncased")
else:
components.append("cased")
if use_eod_token:
components.append("eod")
if bi_data:
components.append("bi")
else:
components.append("uni")
if suffix:
components.append(suffix)
s = "_".join(components) + ".tfrecord"
if num_passes == 1 and task_id is None:
return s
if task_id is None:
num_tasks = 1
task_id = 0
current_shard = task_id * num_passes + pass_id
total_shards = num_tasks * num_passes
return s + "-{}-of-{}".format(current_shard, total_shards)
def create_tfrecords(
tokenizer: tokenization.FullSentencePieceTokenizer,
input_file_or_files: str,
use_eod_token: bool,
do_lower_case: bool,
per_host_batch_size: int,
seq_length: int,
reuse_length: int,
bi_data: bool,
num_cores_per_host: int,
save_dir: str,
prefix: str = "",
suffix: str = "",
num_tasks: Optional[int] = None,
task_id: Optional[int] = None,
num_passes: int = 1):
"""Runs the end-to-end preprocessing pipeline."""
logging.info("Input configuration:")
logging.info("input file(s): %s", input_file_or_files)
logging.info("use_eod_token: %s", use_eod_token)
logging.info("do_lower_case: %s", do_lower_case)
logging.info("per_host_batch_size: %d", per_host_batch_size)
logging.info("seq_length: %d", seq_length)
logging.info("reuse_length: %d", reuse_length)
logging.info("bi_data: %s", bi_data)
logging.info("num_cores_per_host: %d", num_cores_per_host)
logging.info("save_dir: %s", save_dir)
if task_id is not None and num_tasks is not None:
logging.info("task_id: %d", task_id)
logging.info("num_tasks: %d", num_tasks)
input_files = []
for input_pattern in input_file_or_files.split(","):
input_files.extend(tf.io.gfile.glob(input_pattern))
logging.info("*** Reading from input files ***")
for input_file in input_files:
logging.info(" %s", input_file)
logging.info("Shuffling the files with a fixed random seed.")
np.random.shuffle(input_files)
if num_tasks is not None:
assert task_id is not None
logging.info("Total number of input files: %d", len(input_files))
logging.info("Splitting into %d shards of %d files each.",
num_tasks, len(input_files) // num_tasks)
input_files = input_files[task_id::num_tasks]
all_data = preprocess_and_tokenize_input_files(
input_files=input_files,
tokenizer=tokenizer,
use_eod=use_eod_token,
do_lower_case=do_lower_case)
for pass_id in range(num_passes):
logging.info("Beginning pass %d of %d", pass_id, num_passes)
tokens, sentence_ids = shuffle_and_combine_preprocessed_data(all_data)
assert len(tokens) == len(sentence_ids)
filename = get_tfrecord_name(
per_host_batch_size=per_host_batch_size,
num_cores_per_host=num_cores_per_host,
seq_length=seq_length,
bi_data=bi_data,
use_eod_token=use_eod_token,
reuse_length=reuse_length,
do_lower_case=do_lower_case,
prefix=prefix,
suffix=suffix,
pass_id=pass_id,
num_passes=num_passes,
num_tasks=num_tasks,
task_id=task_id)
save_path = os.path.join(save_dir, filename)
if os.path.exists(save_path):
# If the path already exists, then we were probably preempted but
# previously wrote this file.
logging.info("%s already exists, skipping this batch.", save_path)
else:
instances = _convert_tokens_to_instances(
tokenizer=tokenizer,
tokens=tokens,
sentence_ids=sentence_ids,
per_host_batch_size=per_host_batch_size,
seq_length=seq_length,
reuse_length=reuse_length,
bi_data=bi_data,
num_cores_per_host=num_cores_per_host)
write_instances_to_tfrecord(instances=instances, save_path=save_path)
if task_id is None or task_id == 0:
corpus_info = {
"vocab_size": 32000,
"per_host_batch_size": per_host_batch_size,
"num_cores_per_host": num_cores_per_host,
"seq_length": seq_length,
"reuse_length": reuse_length,
"do_lower_case": do_lower_case,
"bi_data": bi_data,
"use_eod_token": use_eod_token,
}
corpus_fname = os.path.basename(filename) + ".json"
corpus_destination = os.path.join(save_dir, corpus_fname)
logging.info("Saving corpus info to %s", corpus_destination)
with tf.io.gfile.GFile(corpus_destination, "w") as fp:
json.dump(corpus_info, fp)
def main(_):
tokenizer = tokenization.FullSentencePieceTokenizer(FLAGS.sp_model_file)
create_tfrecords(
tokenizer=tokenizer,
input_file_or_files=FLAGS.input_file,
use_eod_token=FLAGS.use_eod_token,
do_lower_case=FLAGS.do_lower_case,
per_host_batch_size=FLAGS.per_host_batch_size,
seq_length=FLAGS.seq_length,
reuse_length=FLAGS.reuse_length,
bi_data=FLAGS.bi_data,
num_cores_per_host=FLAGS.num_cores_per_host,
save_dir=FLAGS.save_dir,
prefix=FLAGS.prefix,
suffix=FLAGS.suffix,
num_tasks=FLAGS.num_tasks,
task_id=FLAGS.task_id,
num_passes=FLAGS.num_passes)
if __name__ == "__main__":
np.random.seed(0)
logging.set_verbosity(logging.INFO)
app.run(main)
| 24,187 | 32.501385 | 80 | py |
models | models-master/official/nlp/data/pretrain_dynamic_dataloader_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for nlp.data.pretrain_dynamic_dataloader."""
import os
from absl import logging
from absl.testing import parameterized
import numpy as np
import orbit
import tensorflow as tf
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from official.nlp.configs import bert
from official.nlp.configs import encoders
from official.nlp.data import pretrain_dataloader
from official.nlp.data import pretrain_dynamic_dataloader
from official.nlp.tasks import masked_lm
def _create_fake_dataset(output_path, seq_length, num_masked_tokens,
max_seq_length, num_examples):
"""Creates a fake dataset."""
writer = tf.io.TFRecordWriter(output_path)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
def create_float_feature(values):
f = tf.train.Feature(float_list=tf.train.FloatList(value=list(values)))
return f
rng = np.random.default_rng(37)
for _ in range(num_examples):
features = {}
padding = np.zeros(shape=(max_seq_length - seq_length), dtype=np.int32)
input_ids = rng.integers(low=1, high=100, size=(seq_length))
features['input_ids'] = create_int_feature(
np.concatenate((input_ids, padding)))
features['input_mask'] = create_int_feature(
np.concatenate((np.ones_like(input_ids), padding)))
features['segment_ids'] = create_int_feature(
np.concatenate((np.ones_like(input_ids), padding)))
features['position_ids'] = create_int_feature(
np.concatenate((np.ones_like(input_ids), padding)))
features['masked_lm_positions'] = create_int_feature(
rng.integers(60, size=(num_masked_tokens), dtype=np.int64))
features['masked_lm_ids'] = create_int_feature(
rng.integers(100, size=(num_masked_tokens), dtype=np.int64))
features['masked_lm_weights'] = create_float_feature(
np.ones((num_masked_tokens,), dtype=np.float32))
features['next_sentence_labels'] = create_int_feature(np.array([0]))
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
class PretrainDynamicDataLoaderTest(tf.test.TestCase, parameterized.TestCase):
@combinations.generate(
combinations.combine(
distribution_strategy=[
strategy_combinations.cloud_tpu_strategy,
],
mode='eager'))
def test_distribution_strategy(self, distribution_strategy):
max_seq_length = 128
batch_size = 8
input_path = os.path.join(self.get_temp_dir(), 'train.tf_record')
_create_fake_dataset(
input_path,
seq_length=60,
num_masked_tokens=20,
max_seq_length=max_seq_length,
num_examples=batch_size)
data_config = pretrain_dynamic_dataloader.BertPretrainDataConfig(
is_training=False,
input_path=input_path,
seq_bucket_lengths=[64, 128],
global_batch_size=batch_size)
dataloader = pretrain_dynamic_dataloader.PretrainingDynamicDataLoader(
data_config)
distributed_ds = orbit.utils.make_distributed_dataset(
distribution_strategy, dataloader.load)
train_iter = iter(distributed_ds)
with distribution_strategy.scope():
config = masked_lm.MaskedLMConfig(
init_checkpoint=self.get_temp_dir(),
model=bert.PretrainerConfig(
encoders.EncoderConfig(
bert=encoders.BertEncoderConfig(
vocab_size=30522, num_layers=1)),
cls_heads=[
bert.ClsHeadConfig(
inner_dim=10, num_classes=2, name='next_sentence')
]),
train_data=data_config)
task = masked_lm.MaskedLMTask(config)
model = task.build_model()
metrics = task.build_metrics()
@tf.function
def step_fn(features):
return task.validation_step(features, model, metrics=metrics)
distributed_outputs = distribution_strategy.run(
step_fn, args=(next(train_iter),))
local_results = tf.nest.map_structure(
distribution_strategy.experimental_local_results, distributed_outputs)
logging.info('Dynamic padding: local_results= %s', str(local_results))
dynamic_metrics = {}
for metric in metrics:
dynamic_metrics[metric.name] = metric.result()
data_config = pretrain_dataloader.BertPretrainDataConfig(
is_training=False,
input_path=input_path,
seq_length=max_seq_length,
max_predictions_per_seq=20,
global_batch_size=batch_size)
dataloader = pretrain_dataloader.BertPretrainDataLoader(data_config)
distributed_ds = orbit.utils.make_distributed_dataset(
distribution_strategy, dataloader.load)
train_iter = iter(distributed_ds)
with distribution_strategy.scope():
metrics = task.build_metrics()
@tf.function
def step_fn_b(features):
return task.validation_step(features, model, metrics=metrics)
distributed_outputs = distribution_strategy.run(
step_fn_b, args=(next(train_iter),))
local_results = tf.nest.map_structure(
distribution_strategy.experimental_local_results, distributed_outputs)
logging.info('Static padding: local_results= %s', str(local_results))
static_metrics = {}
for metric in metrics:
static_metrics[metric.name] = metric.result()
for key in static_metrics:
# We need to investigate the differences on losses.
if key != 'next_sentence_loss':
self.assertEqual(dynamic_metrics[key], static_metrics[key])
def test_load_dataset(self):
tf.random.set_seed(0)
max_seq_length = 128
batch_size = 2
input_path_1 = os.path.join(self.get_temp_dir(), 'train_1.tf_record')
_create_fake_dataset(
input_path_1,
seq_length=60,
num_masked_tokens=20,
max_seq_length=max_seq_length,
num_examples=batch_size)
input_path_2 = os.path.join(self.get_temp_dir(), 'train_2.tf_record')
_create_fake_dataset(
input_path_2,
seq_length=100,
num_masked_tokens=70,
max_seq_length=max_seq_length,
num_examples=batch_size)
input_paths = ','.join([input_path_1, input_path_2])
data_config = pretrain_dynamic_dataloader.BertPretrainDataConfig(
is_training=False,
input_path=input_paths,
seq_bucket_lengths=[64, 128],
use_position_id=True,
global_batch_size=batch_size,
deterministic=True)
dataset = pretrain_dynamic_dataloader.PretrainingDynamicDataLoader(
data_config).load()
dataset_it = iter(dataset)
features = next(dataset_it)
self.assertCountEqual([
'input_word_ids',
'input_mask',
'input_type_ids',
'next_sentence_labels',
'masked_lm_positions',
'masked_lm_ids',
'masked_lm_weights',
'position_ids',
], features.keys())
# Sequence length dimension should be bucketized and pad to 64.
self.assertEqual(features['input_word_ids'].shape, (batch_size, 64))
self.assertEqual(features['input_mask'].shape, (batch_size, 64))
self.assertEqual(features['input_type_ids'].shape, (batch_size, 64))
self.assertEqual(features['position_ids'].shape, (batch_size, 64))
self.assertEqual(features['masked_lm_positions'].shape, (batch_size, 20))
features = next(dataset_it)
self.assertEqual(features['input_word_ids'].shape, (batch_size, 128))
self.assertEqual(features['input_mask'].shape, (batch_size, 128))
self.assertEqual(features['input_type_ids'].shape, (batch_size, 128))
self.assertEqual(features['position_ids'].shape, (batch_size, 128))
self.assertEqual(features['masked_lm_positions'].shape, (batch_size, 70))
def test_load_dataset_not_same_masks(self):
max_seq_length = 128
batch_size = 2
input_path_1 = os.path.join(self.get_temp_dir(), 'train_3.tf_record')
_create_fake_dataset(
input_path_1,
seq_length=60,
num_masked_tokens=20,
max_seq_length=max_seq_length,
num_examples=batch_size)
input_path_2 = os.path.join(self.get_temp_dir(), 'train_4.tf_record')
_create_fake_dataset(
input_path_2,
seq_length=60,
num_masked_tokens=15,
max_seq_length=max_seq_length,
num_examples=batch_size)
input_paths = ','.join([input_path_1, input_path_2])
data_config = pretrain_dynamic_dataloader.BertPretrainDataConfig(
is_training=False,
input_path=input_paths,
seq_bucket_lengths=[64, 128],
use_position_id=True,
global_batch_size=batch_size * 2)
dataset = pretrain_dynamic_dataloader.PretrainingDynamicDataLoader(
data_config).load()
dataset_it = iter(dataset)
with self.assertRaisesRegex(
tf.errors.InvalidArgumentError, '.*Number of non padded mask tokens.*'):
next(dataset_it)
if __name__ == '__main__':
tf.test.main()
| 9,620 | 38.109756 | 80 | py |
models | models-master/official/nlp/data/wmt_dataloader_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for official.nlp.data.wmt_dataloader."""
import os
from absl.testing import parameterized
import tensorflow as tf
from sentencepiece import SentencePieceTrainer
from official.nlp.data import wmt_dataloader
def _generate_line_file(filepath, lines):
with tf.io.gfile.GFile(filepath, 'w') as f:
for l in lines:
f.write('{}\n'.format(l))
def _generate_record_file(filepath, src_lines, tgt_lines, unique_id=False):
writer = tf.io.TFRecordWriter(filepath)
for i, (src, tgt) in enumerate(zip(src_lines, tgt_lines)):
features = {
'en': tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[src.encode()])),
'reverse_en': tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[tgt.encode()])),
}
if unique_id:
features['unique_id'] = tf.train.Feature(
int64_list=tf.train.Int64List(value=[i])),
example = tf.train.Example(
features=tf.train.Features(
feature=features))
writer.write(example.SerializeToString())
writer.close()
def _train_sentencepiece(input_path, vocab_size, model_path, eos_id=1):
argstr = ' '.join([
f'--input={input_path}', f'--vocab_size={vocab_size}',
'--character_coverage=0.995',
f'--model_prefix={model_path}', '--model_type=bpe',
'--bos_id=-1', '--pad_id=0', f'--eos_id={eos_id}', '--unk_id=2'
])
SentencePieceTrainer.Train(argstr)
class WMTDataLoaderTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super(WMTDataLoaderTest, self).setUp()
self._temp_dir = self.get_temp_dir()
src_lines = [
'abc ede fg',
'bbcd ef a g',
'de f a a g'
]
tgt_lines = [
'dd cc a ef g',
'bcd ef a g',
'gef cd ba'
]
self._record_train_input_path = os.path.join(self._temp_dir, 'train.record')
_generate_record_file(self._record_train_input_path, src_lines, tgt_lines)
self._record_test_input_path = os.path.join(self._temp_dir, 'test.record')
_generate_record_file(self._record_test_input_path, src_lines, tgt_lines,
unique_id=True)
self._sentencepeice_input_path = os.path.join(self._temp_dir, 'inputs.txt')
_generate_line_file(self._sentencepeice_input_path, src_lines + tgt_lines)
sentencepeice_model_prefix = os.path.join(self._temp_dir, 'sp')
_train_sentencepiece(self._sentencepeice_input_path, 20,
sentencepeice_model_prefix)
self._sentencepeice_model_path = '{}.model'.format(
sentencepeice_model_prefix)
@parameterized.named_parameters(
('train_static', True, True, 100, (2, 35)),
('train_non_static', True, False, 100, (12, 7)),
('non_train_static', False, True, 3, (3, 35)),
('non_train_non_static', False, False, 50, (2, 7)),)
def test_load_dataset(
self, is_training, static_batch, batch_size, expected_shape):
data_config = wmt_dataloader.WMTDataConfig(
input_path=self._record_train_input_path
if is_training else self._record_test_input_path,
max_seq_length=35,
global_batch_size=batch_size,
is_training=is_training,
static_batch=static_batch,
src_lang='en',
tgt_lang='reverse_en',
sentencepiece_model_path=self._sentencepeice_model_path)
dataset = wmt_dataloader.WMTDataLoader(data_config).load()
examples = next(iter(dataset))
inputs, targets = examples['inputs'], examples['targets']
self.assertEqual(inputs.shape, expected_shape)
self.assertEqual(targets.shape, expected_shape)
def test_load_dataset_raise_invalid_window(self):
batch_tokens_size = 10 # this is too small to form buckets.
data_config = wmt_dataloader.WMTDataConfig(
input_path=self._record_train_input_path,
max_seq_length=100,
global_batch_size=batch_tokens_size,
is_training=True,
static_batch=False,
src_lang='en',
tgt_lang='reverse_en',
sentencepiece_model_path=self._sentencepeice_model_path)
with self.assertRaisesRegex(
ValueError, 'The token budget, global batch size, is too small.*'):
_ = wmt_dataloader.WMTDataLoader(data_config).load()
if __name__ == '__main__':
tf.test.main()
| 4,892 | 36.351145 | 80 | py |
models | models-master/official/nlp/data/data_loader.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An abstraction that NLP models define input pipelines."""
import abc
from typing import Optional
import tensorflow as tf
class DataLoader(metaclass=abc.ABCMeta):
"""An abstract class defining the APIs for tf.data input pipeline."""
@abc.abstractmethod
def load(
self,
input_context: Optional[tf.distribute.InputContext] = None
) -> tf.data.Dataset:
"""Implements DataLoader load method.
Builds the entire input pipeline inside the load method. Users can define
states inside the DataLoader class and returns a tf.data dataset
object.
Args:
input_context: This is a context class that is passed to the user's input
function and contains information about the compute replicas and input
pipelines. This object is used for multi-host inputs and passed by the
distribution strategy.
Returns:
A per-host tf.data dataset. Note that, we usually create the distributed
dataset through the load method, so we should not directly return a
distributed dataset here.
"""
pass
| 1,688 | 33.469388 | 79 | py |
models | models-master/official/nlp/data/dual_encoder_dataloader.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Loads dataset for the dual encoder (retrieval) task."""
import dataclasses
import functools
import itertools
from typing import Iterable, Mapping, Optional, Tuple
import tensorflow as tf
import tensorflow_hub as hub
from official.common import dataset_fn
from official.core import config_definitions as cfg
from official.core import input_reader
from official.nlp.data import data_loader
from official.nlp.data import data_loader_factory
from official.nlp.modeling import layers
@dataclasses.dataclass
class DualEncoderDataConfig(cfg.DataConfig):
"""Data config for dual encoder task (tasks/dual_encoder)."""
# Either set `input_path`...
input_path: str = ''
# ...or `tfds_name` and `tfds_split` to specify input.
tfds_name: str = ''
tfds_split: str = ''
global_batch_size: int = 32
# Either build preprocessing with Python code by specifying these values...
vocab_file: str = ''
lower_case: bool = True
# ...or load preprocessing from a SavedModel at this location.
preprocessing_hub_module_url: str = ''
left_text_fields: Tuple[str] = ('left_input',)
right_text_fields: Tuple[str] = ('right_input',)
is_training: bool = True
seq_length: int = 128
file_type: str = 'tfrecord'
@data_loader_factory.register_data_loader_cls(DualEncoderDataConfig)
class DualEncoderDataLoader(data_loader.DataLoader):
"""A class to load dataset for dual encoder task (tasks/dual_encoder)."""
def __init__(self, params):
if bool(params.tfds_name) == bool(params.input_path):
raise ValueError('Must specify either `tfds_name` and `tfds_split` '
'or `input_path`.')
if bool(params.vocab_file) == bool(params.preprocessing_hub_module_url):
raise ValueError('Must specify exactly one of vocab_file (with matching '
'lower_case flag) or preprocessing_hub_module_url.')
self._params = params
self._seq_length = params.seq_length
self._left_text_fields = params.left_text_fields
self._right_text_fields = params.right_text_fields
if params.preprocessing_hub_module_url:
preprocessing_hub_module = hub.load(params.preprocessing_hub_module_url)
self._tokenizer = preprocessing_hub_module.tokenize
self._pack_inputs = functools.partial(
preprocessing_hub_module.bert_pack_inputs,
seq_length=params.seq_length)
else:
self._tokenizer = layers.BertTokenizer(
vocab_file=params.vocab_file, lower_case=params.lower_case)
self._pack_inputs = layers.BertPackInputs(
seq_length=params.seq_length,
special_tokens_dict=self._tokenizer.get_special_tokens_dict())
def _decode(self, record: tf.Tensor):
"""Decodes a serialized tf.Example."""
name_to_features = {
x: tf.io.FixedLenFeature([], tf.string)
for x in itertools.chain(
*[self._left_text_fields, self._right_text_fields])
}
example = tf.io.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in example:
t = example[name]
if t.dtype == tf.int64:
t = tf.cast(t, tf.int32)
example[name] = t
return example
def _bert_tokenize(
self, record: Mapping[str, tf.Tensor],
text_fields: Iterable[str]) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]:
"""Tokenize the input in text_fields using BERT tokenizer.
Args:
record: A tfexample record contains the features.
text_fields: A list of fields to be tokenzied.
Returns:
The tokenized features in a tuple of (input_word_ids, input_mask,
input_type_ids).
"""
segments_text = [record[x] for x in text_fields]
segments_tokens = [self._tokenizer(s) for s in segments_text]
segments = [tf.cast(x.merge_dims(1, 2), tf.int32) for x in segments_tokens]
return self._pack_inputs(segments)
def _bert_preprocess(
self, record: Mapping[str, tf.Tensor]) -> Mapping[str, tf.Tensor]:
"""Perform the bert word piece tokenization for left and right inputs."""
def _switch_prefix(string, old, new):
if string.startswith(old): return new + string[len(old):]
raise ValueError('Expected {} to start with {}'.format(string, old))
def _switch_key_prefix(d, old, new):
return {_switch_prefix(key, old, new): value for key, value in d.items()} # pytype: disable=attribute-error # trace-all-classes
model_inputs = _switch_key_prefix(
self._bert_tokenize(record, self._left_text_fields),
'input_', 'left_')
model_inputs.update(_switch_key_prefix(
self._bert_tokenize(record, self._right_text_fields),
'input_', 'right_'))
return model_inputs
def load(self, input_context: Optional[tf.distribute.InputContext] = None):
"""Returns a tf.dataset.Dataset."""
reader = input_reader.InputReader(
params=self._params,
# Skip `decoder_fn` for tfds input.
decoder_fn=self._decode if self._params.input_path else None,
dataset_fn=dataset_fn.pick_dataset_fn(self._params.file_type),
postprocess_fn=self._bert_preprocess)
return reader.read(input_context)
| 5,809 | 38.256757 | 135 | py |
models | models-master/official/nlp/data/pretrain_dataloader_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for official.nlp.data.pretrain_dataloader."""
import itertools
import os
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.nlp.data import pretrain_dataloader
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
def _create_fake_bert_dataset(
output_path,
seq_length,
max_predictions_per_seq,
use_position_id,
use_next_sentence_label,
use_v2_feature_names=False):
"""Creates a fake dataset."""
writer = tf.io.TFRecordWriter(output_path)
def create_float_feature(values):
f = tf.train.Feature(float_list=tf.train.FloatList(value=list(values)))
return f
for _ in range(100):
features = {}
input_ids = np.random.randint(100, size=(seq_length))
features["input_mask"] = create_int_feature(np.ones_like(input_ids))
if use_v2_feature_names:
features["input_word_ids"] = create_int_feature(input_ids)
features["input_type_ids"] = create_int_feature(np.ones_like(input_ids))
else:
features["input_ids"] = create_int_feature(input_ids)
features["segment_ids"] = create_int_feature(np.ones_like(input_ids))
features["masked_lm_positions"] = create_int_feature(
np.random.randint(100, size=(max_predictions_per_seq)))
features["masked_lm_ids"] = create_int_feature(
np.random.randint(100, size=(max_predictions_per_seq)))
features["masked_lm_weights"] = create_float_feature(
[1.0] * max_predictions_per_seq)
if use_next_sentence_label:
features["next_sentence_labels"] = create_int_feature([1])
if use_position_id:
features["position_ids"] = create_int_feature(range(0, seq_length))
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
def _create_fake_xlnet_dataset(
output_path, seq_length, max_predictions_per_seq):
"""Creates a fake dataset."""
writer = tf.io.TFRecordWriter(output_path)
for _ in range(100):
features = {}
input_ids = np.random.randint(100, size=(seq_length))
num_boundary_indices = np.random.randint(1, seq_length)
if max_predictions_per_seq is not None:
input_mask = np.zeros_like(input_ids)
input_mask[:max_predictions_per_seq] = 1
np.random.shuffle(input_mask)
else:
input_mask = np.ones_like(input_ids)
features["input_mask"] = create_int_feature(input_mask)
features["input_word_ids"] = create_int_feature(input_ids)
features["input_type_ids"] = create_int_feature(np.ones_like(input_ids))
features["boundary_indices"] = create_int_feature(
sorted(np.random.randint(seq_length, size=(num_boundary_indices))))
features["target"] = create_int_feature(input_ids + 1)
features["label"] = create_int_feature([1])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
class BertPretrainDataTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(itertools.product(
(False, True),
(False, True),
))
def test_load_data(self, use_next_sentence_label, use_position_id):
train_data_path = os.path.join(self.get_temp_dir(), "train.tf_record")
seq_length = 128
max_predictions_per_seq = 20
_create_fake_bert_dataset(
train_data_path,
seq_length,
max_predictions_per_seq,
use_next_sentence_label=use_next_sentence_label,
use_position_id=use_position_id)
data_config = pretrain_dataloader.BertPretrainDataConfig(
input_path=train_data_path,
max_predictions_per_seq=max_predictions_per_seq,
seq_length=seq_length,
global_batch_size=10,
is_training=True,
use_next_sentence_label=use_next_sentence_label,
use_position_id=use_position_id)
dataset = pretrain_dataloader.BertPretrainDataLoader(data_config).load()
features = next(iter(dataset))
self.assertLen(features,
6 + int(use_next_sentence_label) + int(use_position_id))
self.assertIn("input_word_ids", features)
self.assertIn("input_mask", features)
self.assertIn("input_type_ids", features)
self.assertIn("masked_lm_positions", features)
self.assertIn("masked_lm_ids", features)
self.assertIn("masked_lm_weights", features)
self.assertEqual("next_sentence_labels" in features,
use_next_sentence_label)
self.assertEqual("position_ids" in features, use_position_id)
def test_v2_feature_names(self):
train_data_path = os.path.join(self.get_temp_dir(), "train.tf_record")
seq_length = 128
max_predictions_per_seq = 20
_create_fake_bert_dataset(
train_data_path,
seq_length,
max_predictions_per_seq,
use_next_sentence_label=True,
use_position_id=False,
use_v2_feature_names=True)
data_config = pretrain_dataloader.BertPretrainDataConfig(
input_path=train_data_path,
max_predictions_per_seq=max_predictions_per_seq,
seq_length=seq_length,
global_batch_size=10,
is_training=True,
use_next_sentence_label=True,
use_position_id=False,
use_v2_feature_names=True)
dataset = pretrain_dataloader.BertPretrainDataLoader(data_config).load()
features = next(iter(dataset))
self.assertIn("input_word_ids", features)
self.assertIn("input_mask", features)
self.assertIn("input_type_ids", features)
self.assertIn("masked_lm_positions", features)
self.assertIn("masked_lm_ids", features)
self.assertIn("masked_lm_weights", features)
class XLNetPretrainDataTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(itertools.product(
("single_token", "whole_word", "token_span"),
(0, 64),
(20, None),
))
def test_load_data(
self, sample_strategy, reuse_length, max_predictions_per_seq):
train_data_path = os.path.join(self.get_temp_dir(), "train.tf_record")
seq_length = 128
batch_size = 5
_create_fake_xlnet_dataset(
train_data_path, seq_length, max_predictions_per_seq)
data_config = pretrain_dataloader.XLNetPretrainDataConfig(
input_path=train_data_path,
max_predictions_per_seq=max_predictions_per_seq,
seq_length=seq_length,
global_batch_size=batch_size,
is_training=True,
reuse_length=reuse_length,
sample_strategy=sample_strategy,
min_num_tokens=1,
max_num_tokens=2,
permutation_size=seq_length // 2,
leak_ratio=0.1)
if max_predictions_per_seq is None:
with self.assertRaises(ValueError):
dataset = pretrain_dataloader.XLNetPretrainDataLoader(
data_config).load()
features = next(iter(dataset))
else:
dataset = pretrain_dataloader.XLNetPretrainDataLoader(data_config).load()
features = next(iter(dataset))
self.assertIn("input_word_ids", features)
self.assertIn("input_type_ids", features)
self.assertIn("permutation_mask", features)
self.assertIn("masked_tokens", features)
self.assertIn("target", features)
self.assertIn("target_mask", features)
self.assertAllClose(features["input_word_ids"].shape,
(batch_size, seq_length))
self.assertAllClose(features["input_type_ids"].shape,
(batch_size, seq_length))
self.assertAllClose(features["permutation_mask"].shape,
(batch_size, seq_length, seq_length))
self.assertAllClose(features["masked_tokens"].shape,
(batch_size, seq_length,))
if max_predictions_per_seq is not None:
self.assertIn("target_mapping", features)
self.assertAllClose(features["target_mapping"].shape,
(batch_size, max_predictions_per_seq, seq_length))
self.assertAllClose(features["target_mask"].shape,
(batch_size, max_predictions_per_seq))
self.assertAllClose(features["target"].shape,
(batch_size, max_predictions_per_seq))
else:
self.assertAllClose(features["target_mask"].shape,
(batch_size, seq_length))
self.assertAllClose(features["target"].shape,
(batch_size, seq_length))
if __name__ == "__main__":
tf.test.main()
| 9,141 | 36.621399 | 79 | py |
models | models-master/official/nlp/data/question_answering_dataloader_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for official.nlp.data.question_answering_dataloader."""
import os
import numpy as np
import tensorflow as tf
from official.nlp.data import question_answering_dataloader
def _create_fake_dataset(output_path, seq_length):
"""Creates a fake dataset."""
writer = tf.io.TFRecordWriter(output_path)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
for _ in range(100):
features = {}
input_ids = np.random.randint(100, size=(seq_length))
features['input_ids'] = create_int_feature(input_ids)
features['input_mask'] = create_int_feature(np.ones_like(input_ids))
features['segment_ids'] = create_int_feature(np.ones_like(input_ids))
features['start_positions'] = create_int_feature(np.array([0]))
features['end_positions'] = create_int_feature(np.array([10]))
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
class QuestionAnsweringDataTest(tf.test.TestCase):
def test_load_dataset(self):
seq_length = 128
batch_size = 10
input_path = os.path.join(self.get_temp_dir(), 'train.tf_record')
_create_fake_dataset(input_path, seq_length)
data_config = question_answering_dataloader.QADataConfig(
is_training=True,
input_path=input_path,
seq_length=seq_length,
global_batch_size=batch_size)
dataset = question_answering_dataloader.QuestionAnsweringDataLoader(
data_config).load()
features, labels = next(iter(dataset))
self.assertCountEqual(['input_word_ids', 'input_mask', 'input_type_ids'],
features.keys())
self.assertEqual(features['input_word_ids'].shape, (batch_size, seq_length))
self.assertEqual(features['input_mask'].shape, (batch_size, seq_length))
self.assertEqual(features['input_type_ids'].shape, (batch_size, seq_length))
self.assertCountEqual(['start_positions', 'end_positions'], labels.keys())
self.assertEqual(labels['start_positions'].shape, (batch_size,))
self.assertEqual(labels['end_positions'].shape, (batch_size,))
if __name__ == '__main__':
tf.test.main()
| 2,830 | 36.746667 | 80 | py |
models | models-master/official/nlp/data/create_pretraining_data_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for official.nlp.data.create_pretraining_data."""
import random
import tensorflow as tf
from official.nlp.data import create_pretraining_data as cpd
_VOCAB_WORDS = ["vocab_1", "vocab_2"]
class CreatePretrainingDataTest(tf.test.TestCase):
def assertTokens(self, input_tokens, output_tokens, masked_positions,
masked_labels):
# Ensure the masked positions are unique.
self.assertCountEqual(masked_positions, set(masked_positions))
# Ensure we can reconstruct the input from the output.
reconstructed_tokens = output_tokens
for pos, label in zip(masked_positions, masked_labels):
reconstructed_tokens[pos] = label
self.assertEqual(input_tokens, reconstructed_tokens)
# Ensure each label is valid.
for pos, label in zip(masked_positions, masked_labels):
output_token = output_tokens[pos]
if (output_token == "[MASK]" or output_token in _VOCAB_WORDS or
output_token == input_tokens[pos]):
continue
self.fail("invalid mask value: {}".format(output_token))
def test_wordpieces_to_grams(self):
tests = [
(["That", "cone"], [(0, 1), (1, 2)]),
(["That", "cone", "##s"], [(0, 1), (1, 3)]),
(["Swit", "##zer", "##land"], [(0, 3)]),
(["[CLS]", "Up", "##dog"], [(1, 3)]),
(["[CLS]", "Up", "##dog", "[SEP]", "Down"], [(1, 3), (4, 5)]),
]
for inp, expected in tests:
output = cpd._wordpieces_to_grams(inp)
self.assertEqual(expected, output)
def test_window(self):
input_list = [1, 2, 3, 4]
window_outputs = [
(1, [[1], [2], [3], [4]]),
(2, [[1, 2], [2, 3], [3, 4]]),
(3, [[1, 2, 3], [2, 3, 4]]),
(4, [[1, 2, 3, 4]]),
(5, []),
]
for window, expected in window_outputs:
output = cpd._window(input_list, window)
self.assertEqual(expected, list(output))
def test_create_masked_lm_predictions(self):
tokens = ["[CLS]", "a", "##a", "b", "##b", "c", "##c", "[SEP]"]
rng = random.Random(123)
for _ in range(0, 5):
output_tokens, masked_positions, masked_labels = (
cpd.create_masked_lm_predictions(
tokens=tokens,
masked_lm_prob=1.0,
max_predictions_per_seq=3,
vocab_words=_VOCAB_WORDS,
rng=rng,
do_whole_word_mask=False,
max_ngram_size=None))
self.assertEqual(len(masked_positions), 3)
self.assertEqual(len(masked_labels), 3)
self.assertTokens(tokens, output_tokens, masked_positions, masked_labels)
def test_create_masked_lm_predictions_whole_word(self):
tokens = ["[CLS]", "a", "##a", "b", "##b", "c", "##c", "[SEP]"]
rng = random.Random(345)
for _ in range(0, 5):
output_tokens, masked_positions, masked_labels = (
cpd.create_masked_lm_predictions(
tokens=tokens,
masked_lm_prob=1.0,
max_predictions_per_seq=3,
vocab_words=_VOCAB_WORDS,
rng=rng,
do_whole_word_mask=True,
max_ngram_size=None))
# since we can't get exactly three tokens without breaking a word we
# only take two.
self.assertEqual(len(masked_positions), 2)
self.assertEqual(len(masked_labels), 2)
self.assertTokens(tokens, output_tokens, masked_positions, masked_labels)
# ensure that we took an entire word.
self.assertIn(masked_labels, [["a", "##a"], ["b", "##b"], ["c", "##c"]])
def test_create_masked_lm_predictions_ngram(self):
tokens = ["[CLS]"] + ["tok{}".format(i) for i in range(0, 512)] + ["[SEP]"]
rng = random.Random(345)
for _ in range(0, 5):
output_tokens, masked_positions, masked_labels = (
cpd.create_masked_lm_predictions(
tokens=tokens,
masked_lm_prob=1.0,
max_predictions_per_seq=76,
vocab_words=_VOCAB_WORDS,
rng=rng,
do_whole_word_mask=True,
max_ngram_size=3))
self.assertEqual(len(masked_positions), 76)
self.assertEqual(len(masked_labels), 76)
self.assertTokens(tokens, output_tokens, masked_positions, masked_labels)
if __name__ == "__main__":
tf.test.main()
| 4,857 | 36.658915 | 79 | py |
models | models-master/official/nlp/data/tagging_dataloader_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for official.nlp.data.tagging_data_loader."""
import os
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.nlp.data import tagging_dataloader
def _create_fake_dataset(output_path, seq_length, include_sentence_id):
"""Creates a fake dataset."""
writer = tf.io.TFRecordWriter(output_path)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
for i in range(100):
features = {}
input_ids = np.random.randint(100, size=(seq_length))
features['input_ids'] = create_int_feature(input_ids)
features['input_mask'] = create_int_feature(np.ones_like(input_ids))
features['segment_ids'] = create_int_feature(np.ones_like(input_ids))
features['label_ids'] = create_int_feature(
np.random.randint(10, size=(seq_length)))
if include_sentence_id:
features['sentence_id'] = create_int_feature([i])
features['sub_sentence_id'] = create_int_feature([0])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
class TaggingDataLoaderTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(True, False)
def test_load_dataset(self, include_sentence_id):
seq_length = 16
batch_size = 10
train_data_path = os.path.join(self.get_temp_dir(), 'train.tf_record')
_create_fake_dataset(train_data_path, seq_length, include_sentence_id)
data_config = tagging_dataloader.TaggingDataConfig(
input_path=train_data_path,
seq_length=seq_length,
global_batch_size=batch_size,
include_sentence_id=include_sentence_id)
dataset = tagging_dataloader.TaggingDataLoader(data_config).load()
features, labels = next(iter(dataset))
expected_keys = ['input_word_ids', 'input_mask', 'input_type_ids']
if include_sentence_id:
expected_keys.extend(['sentence_id', 'sub_sentence_id'])
self.assertCountEqual(expected_keys, features.keys())
self.assertEqual(features['input_word_ids'].shape, (batch_size, seq_length))
self.assertEqual(features['input_mask'].shape, (batch_size, seq_length))
self.assertEqual(features['input_type_ids'].shape, (batch_size, seq_length))
self.assertEqual(labels.shape, (batch_size, seq_length))
if include_sentence_id:
self.assertEqual(features['sentence_id'].shape, (batch_size,))
self.assertEqual(features['sub_sentence_id'].shape, (batch_size,))
if __name__ == '__main__':
tf.test.main()
| 3,196 | 37.518072 | 80 | py |
models | models-master/official/nlp/data/data_loader_factory_test.py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for official.nlp.data.data_loader_factory."""
import dataclasses
import tensorflow as tf
from official.core import config_definitions as cfg
from official.nlp.data import data_loader_factory
@dataclasses.dataclass
class MyDataConfig(cfg.DataConfig):
is_training: bool = True
@data_loader_factory.register_data_loader_cls(MyDataConfig)
class MyDataLoader:
def __init__(self, params):
self.params = params
class DataLoaderFactoryTest(tf.test.TestCase):
def test_register_and_load(self):
train_config = MyDataConfig()
train_loader = data_loader_factory.get_data_loader(train_config)
self.assertTrue(train_loader.params.is_training)
if __name__ == "__main__":
tf.test.main()
| 1,325 | 27.826087 | 74 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.