repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
qemu | qemu-master/tests/avocado/avocado_qemu/__init__.py | # Test class and utilities for functional tests
#
# Copyright (c) 2018 Red Hat, Inc.
#
# Author:
# Cleber Rosa <[email protected]>
#
# This work is licensed under the terms of the GNU GPL, version 2 or
# later. See the COPYING file in the top-level directory.
import logging
import os
import shutil
import subprocess
import sys
import tempfile
import time
import uuid
import avocado
from avocado.utils import cloudinit, datadrainer, process, ssh, vmimage
from avocado.utils.path import find_command
from qemu.machine import QEMUMachine
from qemu.utils import (get_info_usernet_hostfwd_port, kvm_available,
tcg_available)
#: The QEMU build root directory. It may also be the source directory
#: if building from the source dir, but it's safer to use BUILD_DIR for
#: that purpose. Be aware that if this code is moved outside of a source
#: and build tree, it will not be accurate.
BUILD_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
if os.path.islink(os.path.dirname(os.path.dirname(__file__))):
# The link to the avocado tests dir in the source code directory
lnk = os.path.dirname(os.path.dirname(__file__))
#: The QEMU root source directory
SOURCE_DIR = os.path.dirname(os.path.dirname(os.readlink(lnk)))
else:
SOURCE_DIR = BUILD_DIR
def has_cmd(name, args=None):
"""
This function is for use in a @avocado.skipUnless decorator, e.g.:
@skipUnless(*has_cmd('sudo -n', ('sudo', '-n', 'true')))
def test_something_that_needs_sudo(self):
...
"""
if args is None:
args = ('which', name)
try:
_, stderr, exitcode = run_cmd(args)
except Exception as e:
exitcode = -1
stderr = str(e)
if exitcode != 0:
cmd_line = ' '.join(args)
err = f'{name} required, but "{cmd_line}" failed: {stderr.strip()}'
return (False, err)
else:
return (True, '')
def has_cmds(*cmds):
"""
This function is for use in a @avocado.skipUnless decorator and
allows checking for the availability of multiple commands, e.g.:
@skipUnless(*has_cmds(('cmd1', ('cmd1', '--some-parameter')),
'cmd2', 'cmd3'))
def test_something_that_needs_cmd1_and_cmd2(self):
...
"""
for cmd in cmds:
if isinstance(cmd, str):
cmd = (cmd,)
ok, errstr = has_cmd(*cmd)
if not ok:
return (False, errstr)
return (True, '')
def run_cmd(args):
subp = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
stdout, stderr = subp.communicate()
ret = subp.returncode
return (stdout, stderr, ret)
def is_readable_executable_file(path):
return os.path.isfile(path) and os.access(path, os.R_OK | os.X_OK)
def pick_default_qemu_bin(bin_prefix='qemu-system-', arch=None):
"""
Picks the path of a QEMU binary, starting either in the current working
directory or in the source tree root directory.
:param arch: the arch to use when looking for a QEMU binary (the target
will match the arch given). If None (the default), arch
will be the current host system arch (as given by
:func:`os.uname`).
:type arch: str
:returns: the path to the default QEMU binary or None if one could not
be found
:rtype: str or None
"""
if arch is None:
arch = os.uname()[4]
# qemu binary path does not match arch for powerpc, handle it
if 'ppc64le' in arch:
arch = 'ppc64'
qemu_bin_name = bin_prefix + arch
qemu_bin_paths = [
os.path.join(".", qemu_bin_name),
os.path.join(BUILD_DIR, qemu_bin_name),
os.path.join(BUILD_DIR, "build", qemu_bin_name),
]
for path in qemu_bin_paths:
if is_readable_executable_file(path):
return path
return None
def _console_interaction(test, success_message, failure_message,
send_string, keep_sending=False, vm=None):
assert not keep_sending or send_string
if vm is None:
vm = test.vm
console = vm.console_socket.makefile(mode='rb', encoding='utf-8')
console_logger = logging.getLogger('console')
while True:
if send_string:
vm.console_socket.sendall(send_string.encode())
if not keep_sending:
send_string = None # send only once
try:
msg = console.readline().decode().strip()
except UnicodeDecodeError:
msg = None
if not msg:
continue
console_logger.debug(msg)
if success_message is None or success_message in msg:
break
if failure_message and failure_message in msg:
console.close()
fail = 'Failure message found in console: "%s". Expected: "%s"' % \
(failure_message, success_message)
test.fail(fail)
def interrupt_interactive_console_until_pattern(test, success_message,
failure_message=None,
interrupt_string='\r'):
"""
Keep sending a string to interrupt a console prompt, while logging the
console output. Typical use case is to break a boot loader prompt, such:
Press a key within 5 seconds to interrupt boot process.
5
4
3
2
1
Booting default image...
:param test: an Avocado test containing a VM that will have its console
read and probed for a success or failure message
:type test: :class:`avocado_qemu.QemuSystemTest`
:param success_message: if this message appears, test succeeds
:param failure_message: if this message appears, test fails
:param interrupt_string: a string to send to the console before trying
to read a new line
"""
_console_interaction(test, success_message, failure_message,
interrupt_string, True)
def wait_for_console_pattern(test, success_message, failure_message=None,
vm=None):
"""
Waits for messages to appear on the console, while logging the content
:param test: an Avocado test containing a VM that will have its console
read and probed for a success or failure message
:type test: :class:`avocado_qemu.QemuSystemTest`
:param success_message: if this message appears, test succeeds
:param failure_message: if this message appears, test fails
"""
_console_interaction(test, success_message, failure_message, None, vm=vm)
def exec_command(test, command):
"""
Send a command to a console (appending CRLF characters), while logging
the content.
:param test: an Avocado test containing a VM.
:type test: :class:`avocado_qemu.QemuSystemTest`
:param command: the command to send
:type command: str
"""
_console_interaction(test, None, None, command + '\r')
def exec_command_and_wait_for_pattern(test, command,
success_message, failure_message=None):
"""
Send a command to a console (appending CRLF characters), then wait
for success_message to appear on the console, while logging the.
content. Mark the test as failed if failure_message is found instead.
:param test: an Avocado test containing a VM that will have its console
read and probed for a success or failure message
:type test: :class:`avocado_qemu.QemuSystemTest`
:param command: the command to send
:param success_message: if this message appears, test succeeds
:param failure_message: if this message appears, test fails
"""
_console_interaction(test, success_message, failure_message, command + '\r')
class QemuBaseTest(avocado.Test):
# default timeout for all tests, can be overridden
timeout = 120
def _get_unique_tag_val(self, tag_name):
"""
Gets a tag value, if unique for a key
"""
vals = self.tags.get(tag_name, [])
if len(vals) == 1:
return vals.pop()
return None
def setUp(self, bin_prefix):
self.arch = self.params.get('arch',
default=self._get_unique_tag_val('arch'))
self.cpu = self.params.get('cpu',
default=self._get_unique_tag_val('cpu'))
default_qemu_bin = pick_default_qemu_bin(bin_prefix, arch=self.arch)
self.qemu_bin = self.params.get('qemu_bin',
default=default_qemu_bin)
if self.qemu_bin is None:
self.cancel("No QEMU binary defined or found in the build tree")
def fetch_asset(self, name,
asset_hash=None, algorithm=None,
locations=None, expire=None,
find_only=False, cancel_on_missing=True):
return super().fetch_asset(name,
asset_hash=asset_hash,
algorithm=algorithm,
locations=locations,
expire=expire,
find_only=find_only,
cancel_on_missing=cancel_on_missing)
class QemuSystemTest(QemuBaseTest):
"""Facilitates system emulation tests."""
def setUp(self):
self._vms = {}
super().setUp('qemu-system-')
accel_required = self._get_unique_tag_val('accel')
if accel_required:
self.require_accelerator(accel_required)
self.machine = self.params.get('machine',
default=self._get_unique_tag_val('machine'))
def require_accelerator(self, accelerator):
"""
Requires an accelerator to be available for the test to continue
It takes into account the currently set qemu binary.
If the check fails, the test is canceled. If the check itself
for the given accelerator is not available, the test is also
canceled.
:param accelerator: name of the accelerator, such as "kvm" or "tcg"
:type accelerator: str
"""
checker = {'tcg': tcg_available,
'kvm': kvm_available}.get(accelerator)
if checker is None:
self.cancel("Don't know how to check for the presence "
"of accelerator %s" % accelerator)
if not checker(qemu_bin=self.qemu_bin):
self.cancel("%s accelerator does not seem to be "
"available" % accelerator)
def require_netdev(self, netdevname):
netdevhelp = run_cmd([self.qemu_bin,
'-M', 'none', '-netdev', 'help'])[0];
if netdevhelp.find('\n' + netdevname + '\n') < 0:
self.cancel('no support for user networking')
def require_multiprocess(self):
"""
Test for the presence of the x-pci-proxy-dev which is required
to support multiprocess.
"""
devhelp = run_cmd([self.qemu_bin,
'-M', 'none', '-device', 'help'])[0];
if devhelp.find('x-pci-proxy-dev') < 0:
self.cancel('no support for multiprocess device emulation')
def _new_vm(self, name, *args):
self._sd = tempfile.TemporaryDirectory(prefix="qemu_")
vm = QEMUMachine(self.qemu_bin, base_temp_dir=self.workdir,
sock_dir=self._sd.name, log_dir=self.logdir)
self.log.debug('QEMUMachine "%s" created', name)
self.log.debug('QEMUMachine "%s" temp_dir: %s', name, vm.temp_dir)
self.log.debug('QEMUMachine "%s" log_dir: %s', name, vm.log_dir)
if args:
vm.add_args(*args)
return vm
@property
def vm(self):
return self.get_vm(name='default')
def get_vm(self, *args, name=None):
if not name:
name = str(uuid.uuid4())
if self._vms.get(name) is None:
self._vms[name] = self._new_vm(name, *args)
if self.cpu is not None:
self._vms[name].add_args('-cpu', self.cpu)
if self.machine is not None:
self._vms[name].set_machine(self.machine)
return self._vms[name]
def set_vm_arg(self, arg, value):
"""
Set an argument to list of extra arguments to be given to the QEMU
binary. If the argument already exists then its value is replaced.
:param arg: the QEMU argument, such as "-cpu" in "-cpu host"
:type arg: str
:param value: the argument value, such as "host" in "-cpu host"
:type value: str
"""
if not arg or not value:
return
if arg not in self.vm.args:
self.vm.args.extend([arg, value])
else:
idx = self.vm.args.index(arg) + 1
if idx < len(self.vm.args):
self.vm.args[idx] = value
else:
self.vm.args.append(value)
def tearDown(self):
for vm in self._vms.values():
vm.shutdown()
self._sd = None
super().tearDown()
class QemuUserTest(QemuBaseTest):
"""Facilitates user-mode emulation tests."""
def setUp(self):
self._ldpath = []
super().setUp('qemu-')
def add_ldpath(self, ldpath):
self._ldpath.append(os.path.abspath(ldpath))
def run(self, bin_path, args=[]):
qemu_args = " ".join(["-L %s" % ldpath for ldpath in self._ldpath])
bin_args = " ".join(args)
return process.run("%s %s %s %s" % (self.qemu_bin, qemu_args,
bin_path, bin_args))
class LinuxSSHMixIn:
"""Contains utility methods for interacting with a guest via SSH."""
def ssh_connect(self, username, credential, credential_is_key=True):
self.ssh_logger = logging.getLogger('ssh')
res = self.vm.command('human-monitor-command',
command_line='info usernet')
port = get_info_usernet_hostfwd_port(res)
self.assertIsNotNone(port)
self.assertGreater(port, 0)
self.log.debug('sshd listening on port: %d', port)
if credential_is_key:
self.ssh_session = ssh.Session('127.0.0.1', port=port,
user=username, key=credential)
else:
self.ssh_session = ssh.Session('127.0.0.1', port=port,
user=username, password=credential)
for i in range(10):
try:
self.ssh_session.connect()
return
except:
time.sleep(i)
self.fail('ssh connection timeout')
def ssh_command(self, command):
self.ssh_logger.info(command)
result = self.ssh_session.cmd(command)
stdout_lines = [line.rstrip() for line
in result.stdout_text.splitlines()]
for line in stdout_lines:
self.ssh_logger.info(line)
stderr_lines = [line.rstrip() for line
in result.stderr_text.splitlines()]
for line in stderr_lines:
self.ssh_logger.warning(line)
self.assertEqual(result.exit_status, 0,
f'Guest command failed: {command}')
return stdout_lines, stderr_lines
class LinuxDistro:
"""Represents a Linux distribution
Holds information of known distros.
"""
#: A collection of known distros and their respective image checksum
KNOWN_DISTROS = {
'fedora': {
'31': {
'x86_64':
{'checksum': ('e3c1b309d9203604922d6e255c2c5d09'
'8a309c2d46215d8fc026954f3c5c27a0'),
'pxeboot_url': ('https://archives.fedoraproject.org/'
'pub/archive/fedora/linux/releases/31/'
'Everything/x86_64/os/images/pxeboot/'),
'kernel_params': ('root=UUID=b1438b9b-2cab-4065-a99a-'
'08a96687f73c ro no_timer_check '
'net.ifnames=0 console=tty1 '
'console=ttyS0,115200n8'),
},
'aarch64':
{'checksum': ('1e18d9c0cf734940c4b5d5ec592facae'
'd2af0ad0329383d5639c997fdf16fe49'),
'pxeboot_url': 'https://archives.fedoraproject.org/'
'pub/archive/fedora/linux/releases/31/'
'Everything/aarch64/os/images/pxeboot/',
'kernel_params': ('root=UUID=b6950a44-9f3c-4076-a9c2-'
'355e8475b0a7 ro earlyprintk=pl011,0x9000000'
' ignore_loglevel no_timer_check'
' printk.time=1 rd_NO_PLYMOUTH'
' console=ttyAMA0'),
},
'ppc64':
{'checksum': ('7c3528b85a3df4b2306e892199a9e1e4'
'3f991c506f2cc390dc4efa2026ad2f58')},
's390x':
{'checksum': ('4caaab5a434fd4d1079149a072fdc789'
'1e354f834d355069ca982fdcaf5a122d')},
},
'32': {
'aarch64':
{'checksum': ('b367755c664a2d7a26955bbfff985855'
'adfa2ca15e908baf15b4b176d68d3967'),
'pxeboot_url': ('http://dl.fedoraproject.org/pub/fedora/linux/'
'releases/32/Server/aarch64/os/images/'
'pxeboot/'),
'kernel_params': ('root=UUID=3df75b65-be8d-4db4-8655-'
'14d95c0e90c5 ro no_timer_check net.ifnames=0'
' console=tty1 console=ttyS0,115200n8'),
},
},
'33': {
'aarch64':
{'checksum': ('e7f75cdfd523fe5ac2ca9eeece68edc1'
'a81f386a17f969c1d1c7c87031008a6b'),
'pxeboot_url': ('http://dl.fedoraproject.org/pub/fedora/linux/'
'releases/33/Server/aarch64/os/images/'
'pxeboot/'),
'kernel_params': ('root=UUID=d20b3ffa-6397-4a63-a734-'
'1126a0208f8a ro no_timer_check net.ifnames=0'
' console=tty1 console=ttyS0,115200n8'
' console=tty0'),
},
},
}
}
def __init__(self, name, version, arch):
self.name = name
self.version = version
self.arch = arch
try:
info = self.KNOWN_DISTROS.get(name).get(version).get(arch)
except AttributeError:
# Unknown distro
info = None
self._info = info or {}
@property
def checksum(self):
"""Gets the cloud-image file checksum"""
return self._info.get('checksum', None)
@checksum.setter
def checksum(self, value):
self._info['checksum'] = value
@property
def pxeboot_url(self):
"""Gets the repository url where pxeboot files can be found"""
return self._info.get('pxeboot_url', None)
@property
def default_kernel_params(self):
"""Gets the default kernel parameters"""
return self._info.get('kernel_params', None)
class LinuxTest(LinuxSSHMixIn, QemuSystemTest):
"""Facilitates having a cloud-image Linux based available.
For tests that intend to interact with guests, this is a better choice
to start with than the more vanilla `QemuSystemTest` class.
"""
distro = None
username = 'root'
password = 'password'
smp = '2'
memory = '1024'
def _set_distro(self):
distro_name = self.params.get(
'distro',
default=self._get_unique_tag_val('distro'))
if not distro_name:
distro_name = 'fedora'
distro_version = self.params.get(
'distro_version',
default=self._get_unique_tag_val('distro_version'))
if not distro_version:
distro_version = '31'
self.distro = LinuxDistro(distro_name, distro_version, self.arch)
# The distro checksum behaves differently than distro name and
# version. First, it does not respect a tag with the same
# name, given that it's not expected to be used for filtering
# (distro name versions are the natural choice). Second, the
# order of precedence is: parameter, attribute and then value
# from KNOWN_DISTROS.
distro_checksum = self.params.get('distro_checksum',
default=None)
if distro_checksum:
self.distro.checksum = distro_checksum
def setUp(self, ssh_pubkey=None, network_device_type='virtio-net'):
super().setUp()
self.require_netdev('user')
self._set_distro()
self.vm.add_args('-smp', self.smp)
self.vm.add_args('-m', self.memory)
# The following network device allows for SSH connections
self.vm.add_args('-netdev', 'user,id=vnet,hostfwd=:127.0.0.1:0-:22',
'-device', '%s,netdev=vnet' % network_device_type)
self.set_up_boot()
if ssh_pubkey is None:
ssh_pubkey, self.ssh_key = self.set_up_existing_ssh_keys()
self.set_up_cloudinit(ssh_pubkey)
def set_up_existing_ssh_keys(self):
ssh_public_key = os.path.join(SOURCE_DIR, 'tests', 'keys', 'id_rsa.pub')
source_private_key = os.path.join(SOURCE_DIR, 'tests', 'keys', 'id_rsa')
ssh_dir = os.path.join(self.workdir, '.ssh')
os.mkdir(ssh_dir, mode=0o700)
ssh_private_key = os.path.join(ssh_dir,
os.path.basename(source_private_key))
shutil.copyfile(source_private_key, ssh_private_key)
os.chmod(ssh_private_key, 0o600)
return (ssh_public_key, ssh_private_key)
def download_boot(self):
self.log.debug('Looking for and selecting a qemu-img binary to be '
'used to create the bootable snapshot image')
# If qemu-img has been built, use it, otherwise the system wide one
# will be used. If none is available, the test will cancel.
qemu_img = os.path.join(BUILD_DIR, 'qemu-img')
if not os.path.exists(qemu_img):
qemu_img = find_command('qemu-img', False)
if qemu_img is False:
self.cancel('Could not find "qemu-img", which is required to '
'create the bootable image')
vmimage.QEMU_IMG = qemu_img
self.log.info('Downloading/preparing boot image')
# Fedora 31 only provides ppc64le images
image_arch = self.arch
if self.distro.name == 'fedora':
if image_arch == 'ppc64':
image_arch = 'ppc64le'
try:
boot = vmimage.get(
self.distro.name, arch=image_arch, version=self.distro.version,
checksum=self.distro.checksum,
algorithm='sha256',
cache_dir=self.cache_dirs[0],
snapshot_dir=self.workdir)
except:
self.cancel('Failed to download/prepare boot image')
return boot.path
def prepare_cloudinit(self, ssh_pubkey=None):
self.log.info('Preparing cloudinit image')
try:
cloudinit_iso = os.path.join(self.workdir, 'cloudinit.iso')
pubkey_content = None
if ssh_pubkey:
with open(ssh_pubkey) as pubkey:
pubkey_content = pubkey.read()
cloudinit.iso(cloudinit_iso, self.name,
username=self.username,
password=self.password,
# QEMU's hard coded usermode router address
phone_home_host='10.0.2.2',
phone_home_port=self.phone_server.server_port,
authorized_key=pubkey_content)
except Exception:
self.cancel('Failed to prepare the cloudinit image')
return cloudinit_iso
def set_up_boot(self):
path = self.download_boot()
self.vm.add_args('-drive', 'file=%s' % path)
def set_up_cloudinit(self, ssh_pubkey=None):
self.phone_server = cloudinit.PhoneHomeServer(('0.0.0.0', 0),
self.name)
cloudinit_iso = self.prepare_cloudinit(ssh_pubkey)
self.vm.add_args('-drive', 'file=%s,format=raw' % cloudinit_iso)
def launch_and_wait(self, set_up_ssh_connection=True):
self.vm.set_console()
self.vm.launch()
console_drainer = datadrainer.LineLogger(self.vm.console_socket.fileno(),
logger=self.log.getChild('console'))
console_drainer.start()
self.log.info('VM launched, waiting for boot confirmation from guest')
while not self.phone_server.instance_phoned_back:
self.phone_server.handle_request()
if set_up_ssh_connection:
self.log.info('Setting up the SSH connection')
self.ssh_connect(self.username, self.ssh_key)
| 25,619 | 37.295964 | 88 | py |
qemu | qemu-master/roms/edk2-build.py | #!/usr/bin/python3
"""
build helper script for edk2, see
https://gitlab.com/kraxel/edk2-build-config
"""
import os
import sys
import shutil
import argparse
import subprocess
import configparser
rebase_prefix = ""
version_override = None
release_date = None
# pylint: disable=unused-variable
def check_rebase():
""" detect 'git rebase -x edk2-build.py master' testbuilds """
global rebase_prefix
global version_override
gitdir = '.git'
if os.path.isfile(gitdir):
with open(gitdir, 'r', encoding = 'utf-8') as f:
(unused, gitdir) = f.read().split()
if not os.path.exists(f'{gitdir}/rebase-merge/msgnum'):
return
with open(f'{gitdir}/rebase-merge/msgnum', 'r', encoding = 'utf-8') as f:
msgnum = int(f.read())
with open(f'{gitdir}/rebase-merge/end', 'r', encoding = 'utf-8') as f:
end = int(f.read())
with open(f'{gitdir}/rebase-merge/head-name', 'r', encoding = 'utf-8') as f:
head = f.read().strip().split('/')
rebase_prefix = f'[ {int(msgnum/2)} / {int(end/2)} - {head[-1]} ] '
if msgnum != end and not version_override:
# fixed version speeds up builds
version_override = "test-build-patch-series"
def get_coredir(cfg):
if cfg.has_option('global', 'core'):
return os.path.abspath(cfg['global']['core'])
return os.getcwd()
def get_version(cfg):
coredir = get_coredir(cfg)
if version_override:
version = version_override
print('')
print(f'### version [override]: {version}')
return version
if os.environ.get('RPM_PACKAGE_NAME'):
version = os.environ.get('RPM_PACKAGE_NAME')
version += '-' + os.environ.get('RPM_PACKAGE_VERSION')
version += '-' + os.environ.get('RPM_PACKAGE_RELEASE')
print('')
print(f'### version [rpmbuild]: {version}')
return version
if os.path.exists(coredir + '/.git'):
cmdline = [ 'git', 'describe', '--tags', '--abbrev=8',
'--match=edk2-stable*' ]
result = subprocess.run(cmdline, cwd = coredir,
stdout = subprocess.PIPE,
check = True)
version = result.stdout.decode().strip()
print('')
print(f'### version [git]: {version}')
return version
return None
def pcd_string(name, value):
return f'{name}=L{value}\\0'
def pcd_version(cfg):
version = get_version(cfg)
if version is None:
return []
return [ '--pcd', pcd_string('PcdFirmwareVersionString', version) ]
def pcd_release_date():
if release_date is None:
return []
return [ '--pcd', pcd_string('PcdFirmwareReleaseDateString', release_date) ]
def build_message(line, line2 = None):
if os.environ.get('TERM') in [ 'xterm', 'xterm-256color' ]:
# setxterm title
start = '\x1b]2;'
end = '\x07'
print(f'{start}{rebase_prefix}{line}{end}', end = '')
print('')
print('###')
print(f'### {rebase_prefix}{line}')
if line2:
print(f'### {line2}')
print('###', flush = True)
def build_run(cmdline, name, section, silent = False):
print(cmdline, flush = True)
if silent:
print('### building in silent mode ...', flush = True)
result = subprocess.run(cmdline, check = False,
stdout = subprocess.PIPE,
stderr = subprocess.STDOUT)
logfile = f'{section}.log'
print(f'### writing log to {logfile} ...')
with open(logfile, 'wb') as f:
f.write(result.stdout)
if result.returncode:
print('### BUILD FAILURE')
print('### output')
print(result.stdout.decode())
print(f'### exit code: {result.returncode}')
else:
print('### OK')
else:
result = subprocess.run(cmdline, check = False)
if result.returncode:
print(f'ERROR: {cmdline[0]} exited with {result.returncode}'
f' while building {name}')
sys.exit(result.returncode)
def build_copy(plat, tgt, dstdir, copy):
srcdir = f'Build/{plat}/{tgt}_GCC5'
names = copy.split()
srcfile = names[0]
if len(names) > 1:
dstfile = names[1]
else:
dstfile = os.path.basename(srcfile)
print(f'# copy: {srcdir} / {srcfile} => {dstdir} / {dstfile}')
src = srcdir + '/' + srcfile
dst = dstdir + '/' + dstfile
os.makedirs(os.path.dirname(dst), exist_ok = True)
shutil.copy(src, dst)
def pad_file(dstdir, pad):
args = pad.split()
if len(args) < 2:
raise RuntimeError(f'missing arg for pad ({args})')
name = args[0]
size = args[1]
cmdline = [
'truncate',
'--size', size,
dstdir + '/' + name,
]
print(f'# padding: {dstdir} / {name} => {size}')
subprocess.run(cmdline, check = True)
# pylint: disable=too-many-branches
def build_one(cfg, build, jobs = None, silent = False):
cmdline = [ 'build' ]
cmdline += [ '-t', 'GCC5' ]
cmdline += [ '-p', cfg[build]['conf'] ]
if (cfg[build]['conf'].startswith('OvmfPkg/') or
cfg[build]['conf'].startswith('ArmVirtPkg/')):
cmdline += pcd_version(cfg)
cmdline += pcd_release_date()
if jobs:
cmdline += [ '-n', jobs ]
for arch in cfg[build]['arch'].split():
cmdline += [ '-a', arch ]
if 'opts' in cfg[build]:
for name in cfg[build]['opts'].split():
section = 'opts.' + name
for opt in cfg[section]:
cmdline += [ '-D', opt + '=' + cfg[section][opt] ]
if 'pcds' in cfg[build]:
for name in cfg[build]['pcds'].split():
section = 'pcds.' + name
for pcd in cfg[section]:
cmdline += [ '--pcd', pcd + '=' + cfg[section][pcd] ]
if 'tgts' in cfg[build]:
tgts = cfg[build]['tgts'].split()
else:
tgts = [ 'DEBUG' ]
for tgt in tgts:
desc = None
if 'desc' in cfg[build]:
desc = cfg[build]['desc']
build_message(f'building: {cfg[build]["conf"]} ({cfg[build]["arch"]}, {tgt})',
f'description: {desc}')
build_run(cmdline + [ '-b', tgt ],
cfg[build]['conf'],
build + '.' + tgt,
silent)
if 'plat' in cfg[build]:
# copy files
for cpy in cfg[build]:
if not cpy.startswith('cpy'):
continue
build_copy(cfg[build]['plat'],
tgt,
cfg[build]['dest'],
cfg[build][cpy])
# pad builds
for pad in cfg[build]:
if not pad.startswith('pad'):
continue
pad_file(cfg[build]['dest'],
cfg[build][pad])
def build_basetools(silent = False):
build_message('building: BaseTools')
basedir = os.environ['EDK_TOOLS_PATH']
cmdline = [ 'make', '-C', basedir ]
build_run(cmdline, 'BaseTools', 'build.basetools', silent)
def binary_exists(name):
for pdir in os.environ['PATH'].split(':'):
if os.path.exists(pdir + '/' + name):
return True
return False
def prepare_env(cfg):
""" mimic Conf/BuildEnv.sh """
workspace = os.getcwd()
packages = [ workspace, ]
path = os.environ['PATH'].split(':')
dirs = [
'BaseTools/Bin/Linux-x86_64',
'BaseTools/BinWrappers/PosixLike'
]
if cfg.has_option('global', 'pkgs'):
for pkgdir in cfg['global']['pkgs'].split():
packages.append(os.path.abspath(pkgdir))
coredir = get_coredir(cfg)
if coredir != workspace:
packages.append(coredir)
# add basetools to path
for pdir in dirs:
p = coredir + '/' + pdir
if not os.path.exists(p):
continue
if p in path:
continue
path.insert(0, p)
# run edksetup if needed
toolsdef = coredir + '/Conf/tools_def.txt'
if not os.path.exists(toolsdef):
os.makedirs(os.path.dirname(toolsdef), exist_ok = True)
build_message('running BaseTools/BuildEnv')
cmdline = [ 'bash', 'BaseTools/BuildEnv' ]
subprocess.run(cmdline, cwd = coredir, check = True)
# set variables
os.environ['PATH'] = ':'.join(path)
os.environ['PACKAGES_PATH'] = ':'.join(packages)
os.environ['WORKSPACE'] = workspace
os.environ['EDK_TOOLS_PATH'] = coredir + '/BaseTools'
os.environ['CONF_PATH'] = coredir + '/Conf'
os.environ['PYTHON_COMMAND'] = '/usr/bin/python3'
os.environ['PYTHONHASHSEED'] = '1'
# for cross builds
if binary_exists('arm-linux-gnu-gcc'):
os.environ['GCC5_ARM_PREFIX'] = 'arm-linux-gnu-'
if binary_exists('loongarch64-linux-gnu-gcc'):
os.environ['GCC5_LOONGARCH64_PREFIX'] = 'loongarch64-linux-gnu-'
hostarch = os.uname().machine
if binary_exists('aarch64-linux-gnu-gcc') and hostarch != 'aarch64':
os.environ['GCC5_AARCH64_PREFIX'] = 'aarch64-linux-gnu-'
if binary_exists('riscv64-linux-gnu-gcc') and hostarch != 'riscv64':
os.environ['GCC5_RISCV64_PREFIX'] = 'riscv64-linux-gnu-'
if binary_exists('x86_64-linux-gnu-gcc') and hostarch != 'x86_64':
os.environ['GCC5_IA32_PREFIX'] = 'x86_64-linux-gnu-'
os.environ['GCC5_X64_PREFIX'] = 'x86_64-linux-gnu-'
os.environ['GCC5_BIN'] = 'x86_64-linux-gnu-'
def build_list(cfg):
for build in cfg.sections():
if not build.startswith('build.'):
continue
name = build.lstrip('build.')
desc = 'no description'
if 'desc' in cfg[build]:
desc = cfg[build]['desc']
print(f'# {name:20s} - {desc}')
def main():
parser = argparse.ArgumentParser(prog = 'edk2-build',
description = 'edk2 build helper script')
parser.add_argument('-c', '--config', dest = 'configfile',
type = str, default = '.edk2.builds', metavar = 'FILE',
help = 'read configuration from FILE (default: .edk2.builds)')
parser.add_argument('-C', '--directory', dest = 'directory', type = str,
help = 'change to DIR before building', metavar = 'DIR')
parser.add_argument('-j', '--jobs', dest = 'jobs', type = str,
help = 'allow up to JOBS parallel build jobs',
metavar = 'JOBS')
parser.add_argument('-m', '--match', dest = 'match', type = str,
help = 'only run builds matching INCLUDE (substring)',
metavar = 'INCLUDE')
parser.add_argument('-x', '--exclude', dest = 'exclude', type = str,
help = 'skip builds matching EXCLUDE (substring)',
metavar = 'EXCLUDE')
parser.add_argument('-l', '--list', dest = 'list',
action = 'store_true', default = False,
help = 'list build configs available')
parser.add_argument('--silent', dest = 'silent',
action = 'store_true', default = False,
help = 'write build output to logfiles, '
'write to console only on errors')
parser.add_argument('--core', dest = 'core', type = str, metavar = 'DIR',
help = 'location of the core edk2 repository '
'(i.e. where BuildTools are located)')
parser.add_argument('--pkg', '--package', dest = 'pkgs',
type = str, action = 'append', metavar = 'DIR',
help = 'location(s) of additional packages '
'(can be specified multiple times)')
parser.add_argument('--version-override', dest = 'version_override',
type = str, metavar = 'VERSION',
help = 'set firmware build version')
parser.add_argument('--release-date', dest = 'release_date',
type = str, metavar = 'DATE',
help = 'set firmware build release date (in MM/DD/YYYY format)')
options = parser.parse_args()
if options.directory:
os.chdir(options.directory)
if not os.path.exists(options.configfile):
print('config file "{options.configfile}" not found')
return 1
cfg = configparser.ConfigParser()
cfg.optionxform = str
cfg.read(options.configfile)
if options.list:
build_list(cfg)
return
if not cfg.has_section('global'):
cfg.add_section('global')
if options.core:
cfg.set('global', 'core', options.core)
if options.pkgs:
cfg.set('global', 'pkgs', ' '.join(options.pkgs))
global version_override
global release_date
check_rebase()
if options.version_override:
version_override = options.version_override
if options.release_date:
release_date = options.release_date
prepare_env(cfg)
build_basetools(options.silent)
for build in cfg.sections():
if not build.startswith('build.'):
continue
if options.match and options.match not in build:
print(f'# skipping "{build}" (not matching "{options.match}")')
continue
if options.exclude and options.exclude in build:
print(f'# skipping "{build}" (matching "{options.exclude}")')
continue
build_one(cfg, build, options.jobs, options.silent)
return 0
if __name__ == '__main__':
sys.exit(main())
| 13,632 | 34.782152 | 88 | py |
qemu | qemu-master/docs/conf.py | # -*- coding: utf-8 -*-
#
# QEMU documentation build configuration file, created by
# sphinx-quickstart on Thu Jan 31 16:40:14 2019.
#
# This config file can be used in one of two ways:
# (1) as a common config file which is included by the conf.py
# for each of QEMU's manuals: in this case sphinx-build is run multiple
# times, once per subdirectory.
# (2) as a top level conf file which will result in building all
# the manuals into a single document: in this case sphinx-build is
# run once, on the top-level docs directory.
#
# QEMU's makefiles take option (1), which allows us to install
# only the ones the user cares about (in particular we don't want
# to ship the 'devel' manual to end-users).
# Third-party sites such as readthedocs.org will take option (2).
#
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
import sphinx
from distutils.version import LooseVersion
from sphinx.errors import ConfigError
# Make Sphinx fail cleanly if using an old Python, rather than obscurely
# failing because some code in one of our extensions doesn't work there.
# In newer versions of Sphinx this will display nicely; in older versions
# Sphinx will also produce a Python backtrace but at least the information
# gets printed...
if sys.version_info < (3,6):
raise ConfigError(
"QEMU requires a Sphinx that uses Python 3.6 or better\n")
# The per-manual conf.py will set qemu_docdir for a single-manual build;
# otherwise set it here if this is an entire-manual-set build.
# This is always the absolute path of the docs/ directory in the source tree.
try:
qemu_docdir
except NameError:
qemu_docdir = os.path.abspath(".")
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use an absolute path starting from qemu_docdir.
#
# Our extensions are in docs/sphinx; the qapidoc extension requires
# the QAPI modules from scripts/.
sys.path.insert(0, os.path.join(qemu_docdir, "sphinx"))
sys.path.insert(0, os.path.join(qemu_docdir, "../scripts"))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# Sphinx 1.5 and earlier can't build our docs because they are too
# picky about the syntax of the argument to the option:: directive
# (see Sphinx bugs #646, #3366).
needs_sphinx = '1.6'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['kerneldoc', 'qmp_lexer', 'hxtool', 'depfile', 'qapidoc']
if sphinx.version_info[:3] > (4, 0, 0):
tags.add('sphinx4')
extensions += ['dbusdoc']
else:
extensions += ['fakedbusdoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = [os.path.join(qemu_docdir, '_templates')]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# Interpret `single-backticks` to be a cross-reference to any kind of
# referenceable object. Unresolvable or ambiguous references will emit a
# warning at build time.
default_role = 'any'
# General information about the project.
project = u'QEMU'
copyright = u'2022, The QEMU Project Developers'
author = u'The QEMU Project Developers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
# Extract this information from the VERSION file, for the benefit of
# standalone Sphinx runs as used by readthedocs.org. Builds run from
# the Makefile will pass version and release on the sphinx-build
# command line, which override this.
try:
extracted_version = None
with open(os.path.join(qemu_docdir, '../VERSION')) as f:
extracted_version = f.readline().strip()
except:
pass
finally:
if extracted_version:
version = release = extracted_version
else:
version = release = "unknown version"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# Sphinx defaults to warning about use of :option: for options not defined
# with "option::" in the document being processed. Turn that off.
suppress_warnings = ["ref.option"]
# The rst_epilog fragment is effectively included in every rST file.
# We use it to define substitutions based on build config that
# can then be used in the documentation. The fallback if the
# environment variable is not set is for the benefit of readthedocs
# style document building; our Makefile always sets the variable.
confdir = os.getenv('CONFDIR', "/etc/qemu")
rst_epilog = ".. |CONFDIR| replace:: ``" + confdir + "``\n"
# We slurp in the defs.rst.inc and literally include it into rst_epilog,
# because Sphinx's include:: directive doesn't work with absolute paths
# and there isn't any one single relative path that will work for all
# documents and for both via-make and direct sphinx-build invocation.
with open(os.path.join(qemu_docdir, 'defs.rst.inc')) as f:
rst_epilog += f.read()
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
try:
import sphinx_rtd_theme
except ImportError:
raise ConfigError(
'The Sphinx \'sphinx_rtd_theme\' HTML theme was not found.\n'
)
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
if LooseVersion(sphinx_rtd_theme.__version__) >= LooseVersion("0.4.3"):
html_theme_options = {
"style_nav_header_background": "#802400",
"navigation_with_keys": True,
}
html_logo = os.path.join(qemu_docdir, "../ui/icons/qemu_128x128.png")
html_favicon = os.path.join(qemu_docdir, "../ui/icons/qemu_32x32.png")
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = [os.path.join(qemu_docdir, "sphinx-static")]
html_css_files = [
'theme_overrides.css',
]
html_js_files = [
'custom.js',
]
html_context = {
"display_gitlab": True,
"gitlab_user": "qemu-project",
"gitlab_repo": "qemu",
"gitlab_version": "master",
"conf_py_path": "/docs/", # Path in the checkout to the docs root
}
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#html_sidebars = {}
# Don't copy the rST source files to the HTML output directory,
# and don't put links to the sources into the output HTML.
html_copy_source = False
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'QEMUdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'QEMU.tex', u'QEMU Documentation',
u'The QEMU Project Developers', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# Individual manual/conf.py can override this to create man pages
man_pages = [
('interop/qemu-ga', 'qemu-ga',
'QEMU Guest Agent',
['Michael Roth <[email protected]>'], 8),
('interop/qemu-ga-ref', 'qemu-ga-ref',
'QEMU Guest Agent Protocol Reference',
[], 7),
('interop/qemu-qmp-ref', 'qemu-qmp-ref',
'QEMU QMP Reference Manual',
[], 7),
('interop/qemu-storage-daemon-qmp-ref', 'qemu-storage-daemon-qmp-ref',
'QEMU Storage Daemon QMP Reference Manual',
[], 7),
('system/qemu-manpage', 'qemu',
'QEMU User Documentation',
['Fabrice Bellard'], 1),
('system/qemu-block-drivers', 'qemu-block-drivers',
'QEMU block drivers reference',
['Fabrice Bellard and the QEMU Project developers'], 7),
('system/qemu-cpu-models', 'qemu-cpu-models',
'QEMU CPU Models',
['The QEMU Project developers'], 7),
('tools/qemu-img', 'qemu-img',
'QEMU disk image utility',
['Fabrice Bellard'], 1),
('tools/qemu-nbd', 'qemu-nbd',
'QEMU Disk Network Block Device Server',
['Anthony Liguori <[email protected]>'], 8),
('tools/qemu-pr-helper', 'qemu-pr-helper',
'QEMU persistent reservation helper',
[], 8),
('tools/qemu-storage-daemon', 'qemu-storage-daemon',
'QEMU storage daemon',
[], 1),
('tools/qemu-trace-stap', 'qemu-trace-stap',
'QEMU SystemTap trace tool',
[], 1),
('tools/virtfs-proxy-helper', 'virtfs-proxy-helper',
'QEMU 9p virtfs proxy filesystem helper',
['M. Mohan Kumar'], 1),
]
man_make_section_directory = False
# We use paths starting from qemu_docdir here so that you can run
# sphinx-build from anywhere and the kerneldoc extension can still
# find everything.
kerneldoc_bin = ['perl', os.path.join(qemu_docdir, '../scripts/kernel-doc')]
kerneldoc_srctree = os.path.join(qemu_docdir, '..')
hxtool_srctree = os.path.join(qemu_docdir, '..')
qapidoc_srctree = os.path.join(qemu_docdir, '..')
dbusdoc_srctree = os.path.join(qemu_docdir, '..')
dbus_index_common_prefix = ["org.qemu."]
| 10,864 | 34.622951 | 79 | py |
qemu | qemu-master/docs/sphinx/fakedbusdoc.py | # D-Bus XML documentation extension, compatibility gunk for <sphinx4
#
# Copyright (C) 2021, Red Hat Inc.
#
# SPDX-License-Identifier: LGPL-2.1-or-later
#
# Author: Marc-André Lureau <[email protected]>
"""dbus-doc is a Sphinx extension that provides documentation from D-Bus XML."""
from docutils.parsers.rst import Directive
from sphinx.application import Sphinx
from typing import Any, Dict
class FakeDBusDocDirective(Directive):
has_content = True
required_arguments = 1
def run(self):
return []
def setup(app: Sphinx) -> Dict[str, Any]:
"""Register a fake dbus-doc directive with Sphinx"""
app.add_directive("dbus-doc", FakeDBusDocDirective)
| 691 | 25.615385 | 80 | py |
qemu | qemu-master/docs/sphinx/qapidoc.py | # coding=utf-8
#
# QEMU qapidoc QAPI file parsing extension
#
# Copyright (c) 2020 Linaro
#
# This work is licensed under the terms of the GNU GPLv2 or later.
# See the COPYING file in the top-level directory.
"""
qapidoc is a Sphinx extension that implements the qapi-doc directive
The purpose of this extension is to read the documentation comments
in QAPI schema files, and insert them all into the current document.
It implements one new rST directive, "qapi-doc::".
Each qapi-doc:: directive takes one argument, which is the
pathname of the schema file to process, relative to the source tree.
The docs/conf.py file must set the qapidoc_srctree config value to
the root of the QEMU source tree.
The Sphinx documentation on writing extensions is at:
https://www.sphinx-doc.org/en/master/development/index.html
"""
import os
import re
from docutils import nodes
from docutils.statemachine import ViewList
from docutils.parsers.rst import directives, Directive
from sphinx.errors import ExtensionError
from sphinx.util.nodes import nested_parse_with_titles
import sphinx
from qapi.gen import QAPISchemaVisitor
from qapi.error import QAPIError, QAPISemError
from qapi.schema import QAPISchema
# Sphinx up to 1.6 uses AutodocReporter; 1.7 and later
# use switch_source_input. Check borrowed from kerneldoc.py.
Use_SSI = sphinx.__version__[:3] >= '1.7'
if Use_SSI:
from sphinx.util.docutils import switch_source_input
else:
from sphinx.ext.autodoc import AutodocReporter
__version__ = '1.0'
# Function borrowed from pydash, which is under the MIT license
def intersperse(iterable, separator):
"""Yield the members of *iterable* interspersed with *separator*."""
iterable = iter(iterable)
yield next(iterable)
for item in iterable:
yield separator
yield item
class QAPISchemaGenRSTVisitor(QAPISchemaVisitor):
"""A QAPI schema visitor which generates docutils/Sphinx nodes
This class builds up a tree of docutils/Sphinx nodes corresponding
to documentation for the various QAPI objects. To use it, first
create a QAPISchemaGenRSTVisitor object, and call its
visit_begin() method. Then you can call one of the two methods
'freeform' (to add documentation for a freeform documentation
chunk) or 'symbol' (to add documentation for a QAPI symbol). These
will cause the visitor to build up the tree of document
nodes. Once you've added all the documentation via 'freeform' and
'symbol' method calls, you can call 'get_document_nodes' to get
the final list of document nodes (in a form suitable for returning
from a Sphinx directive's 'run' method).
"""
def __init__(self, sphinx_directive):
self._cur_doc = None
self._sphinx_directive = sphinx_directive
self._top_node = nodes.section()
self._active_headings = [self._top_node]
def _make_dlitem(self, term, defn):
"""Return a dlitem node with the specified term and definition.
term should be a list of Text and literal nodes.
defn should be one of:
- a string, which will be handed to _parse_text_into_node
- a list of Text and literal nodes, which will be put into
a paragraph node
"""
dlitem = nodes.definition_list_item()
dlterm = nodes.term('', '', *term)
dlitem += dlterm
if defn:
dldef = nodes.definition()
if isinstance(defn, list):
dldef += nodes.paragraph('', '', *defn)
else:
self._parse_text_into_node(defn, dldef)
dlitem += dldef
return dlitem
def _make_section(self, title):
"""Return a section node with optional title"""
section = nodes.section(ids=[self._sphinx_directive.new_serialno()])
if title:
section += nodes.title(title, title)
return section
def _nodes_for_ifcond(self, ifcond, with_if=True):
"""Return list of Text, literal nodes for the ifcond
Return a list which gives text like ' (If: condition)'.
If with_if is False, we don't return the "(If: " and ")".
"""
doc = ifcond.docgen()
if not doc:
return []
doc = nodes.literal('', doc)
if not with_if:
return [doc]
nodelist = [nodes.Text(' ('), nodes.strong('', 'If: ')]
nodelist.append(doc)
nodelist.append(nodes.Text(')'))
return nodelist
def _nodes_for_one_member(self, member):
"""Return list of Text, literal nodes for this member
Return a list of doctree nodes which give text like
'name: type (optional) (If: ...)' suitable for use as the
'term' part of a definition list item.
"""
term = [nodes.literal('', member.name)]
if member.type.doc_type():
term.append(nodes.Text(': '))
term.append(nodes.literal('', member.type.doc_type()))
if member.optional:
term.append(nodes.Text(' (optional)'))
if member.ifcond.is_present():
term.extend(self._nodes_for_ifcond(member.ifcond))
return term
def _nodes_for_variant_when(self, variants, variant):
"""Return list of Text, literal nodes for variant 'when' clause
Return a list of doctree nodes which give text like
'when tagname is variant (If: ...)' suitable for use in
the 'variants' part of a definition list.
"""
term = [nodes.Text(' when '),
nodes.literal('', variants.tag_member.name),
nodes.Text(' is '),
nodes.literal('', '"%s"' % variant.name)]
if variant.ifcond.is_present():
term.extend(self._nodes_for_ifcond(variant.ifcond))
return term
def _nodes_for_members(self, doc, what, base=None, variants=None):
"""Return list of doctree nodes for the table of members"""
dlnode = nodes.definition_list()
for section in doc.args.values():
term = self._nodes_for_one_member(section.member)
# TODO drop fallbacks when undocumented members are outlawed
if section.text:
defn = section.text
elif (variants and variants.tag_member == section.member
and not section.member.type.doc_type()):
values = section.member.type.member_names()
defn = [nodes.Text('One of ')]
defn.extend(intersperse([nodes.literal('', v) for v in values],
nodes.Text(', ')))
else:
defn = [nodes.Text('Not documented')]
dlnode += self._make_dlitem(term, defn)
if base:
dlnode += self._make_dlitem([nodes.Text('The members of '),
nodes.literal('', base.doc_type())],
None)
if variants:
for v in variants.variants:
if v.type.is_implicit():
assert not v.type.base and not v.type.variants
for m in v.type.local_members:
term = self._nodes_for_one_member(m)
term.extend(self._nodes_for_variant_when(variants, v))
dlnode += self._make_dlitem(term, None)
else:
term = [nodes.Text('The members of '),
nodes.literal('', v.type.doc_type())]
term.extend(self._nodes_for_variant_when(variants, v))
dlnode += self._make_dlitem(term, None)
if not dlnode.children:
return []
section = self._make_section(what)
section += dlnode
return [section]
def _nodes_for_enum_values(self, doc):
"""Return list of doctree nodes for the table of enum values"""
seen_item = False
dlnode = nodes.definition_list()
for section in doc.args.values():
termtext = [nodes.literal('', section.member.name)]
if section.member.ifcond.is_present():
termtext.extend(self._nodes_for_ifcond(section.member.ifcond))
# TODO drop fallbacks when undocumented members are outlawed
if section.text:
defn = section.text
else:
defn = [nodes.Text('Not documented')]
dlnode += self._make_dlitem(termtext, defn)
seen_item = True
if not seen_item:
return []
section = self._make_section('Values')
section += dlnode
return [section]
def _nodes_for_arguments(self, doc, boxed_arg_type):
"""Return list of doctree nodes for the arguments section"""
if boxed_arg_type:
assert not doc.args
section = self._make_section('Arguments')
dlnode = nodes.definition_list()
dlnode += self._make_dlitem(
[nodes.Text('The members of '),
nodes.literal('', boxed_arg_type.name)],
None)
section += dlnode
return [section]
return self._nodes_for_members(doc, 'Arguments')
def _nodes_for_features(self, doc):
"""Return list of doctree nodes for the table of features"""
seen_item = False
dlnode = nodes.definition_list()
for section in doc.features.values():
dlnode += self._make_dlitem([nodes.literal('', section.name)],
section.text)
seen_item = True
if not seen_item:
return []
section = self._make_section('Features')
section += dlnode
return [section]
def _nodes_for_example(self, exampletext):
"""Return list of doctree nodes for a code example snippet"""
return [nodes.literal_block(exampletext, exampletext)]
def _nodes_for_sections(self, doc):
"""Return list of doctree nodes for additional sections"""
nodelist = []
for section in doc.sections:
snode = self._make_section(section.name)
if section.name and section.name.startswith('Example'):
snode += self._nodes_for_example(section.text)
else:
self._parse_text_into_node(section.text, snode)
nodelist.append(snode)
return nodelist
def _nodes_for_if_section(self, ifcond):
"""Return list of doctree nodes for the "If" section"""
nodelist = []
if ifcond.is_present():
snode = self._make_section('If')
snode += nodes.paragraph(
'', '', *self._nodes_for_ifcond(ifcond, with_if=False)
)
nodelist.append(snode)
return nodelist
def _add_doc(self, typ, sections):
"""Add documentation for a command/object/enum...
We assume we're documenting the thing defined in self._cur_doc.
typ is the type of thing being added ("Command", "Object", etc)
sections is a list of nodes for sections to add to the definition.
"""
doc = self._cur_doc
snode = nodes.section(ids=[self._sphinx_directive.new_serialno()])
snode += nodes.title('', '', *[nodes.literal(doc.symbol, doc.symbol),
nodes.Text(' (' + typ + ')')])
self._parse_text_into_node(doc.body.text, snode)
for s in sections:
if s is not None:
snode += s
self._add_node_to_current_heading(snode)
def visit_enum_type(self, name, info, ifcond, features, members, prefix):
doc = self._cur_doc
self._add_doc('Enum',
self._nodes_for_enum_values(doc)
+ self._nodes_for_features(doc)
+ self._nodes_for_sections(doc)
+ self._nodes_for_if_section(ifcond))
def visit_object_type(self, name, info, ifcond, features,
base, members, variants):
doc = self._cur_doc
if base and base.is_implicit():
base = None
self._add_doc('Object',
self._nodes_for_members(doc, 'Members', base, variants)
+ self._nodes_for_features(doc)
+ self._nodes_for_sections(doc)
+ self._nodes_for_if_section(ifcond))
def visit_alternate_type(self, name, info, ifcond, features, variants):
doc = self._cur_doc
self._add_doc('Alternate',
self._nodes_for_members(doc, 'Members')
+ self._nodes_for_features(doc)
+ self._nodes_for_sections(doc)
+ self._nodes_for_if_section(ifcond))
def visit_command(self, name, info, ifcond, features, arg_type,
ret_type, gen, success_response, boxed, allow_oob,
allow_preconfig, coroutine):
doc = self._cur_doc
self._add_doc('Command',
self._nodes_for_arguments(doc,
arg_type if boxed else None)
+ self._nodes_for_features(doc)
+ self._nodes_for_sections(doc)
+ self._nodes_for_if_section(ifcond))
def visit_event(self, name, info, ifcond, features, arg_type, boxed):
doc = self._cur_doc
self._add_doc('Event',
self._nodes_for_arguments(doc,
arg_type if boxed else None)
+ self._nodes_for_features(doc)
+ self._nodes_for_sections(doc)
+ self._nodes_for_if_section(ifcond))
def symbol(self, doc, entity):
"""Add documentation for one symbol to the document tree
This is the main entry point which causes us to add documentation
nodes for a symbol (which could be a 'command', 'object', 'event',
etc). We do this by calling 'visit' on the schema entity, which
will then call back into one of our visit_* methods, depending
on what kind of thing this symbol is.
"""
self._cur_doc = doc
entity.visit(self)
self._cur_doc = None
def _start_new_heading(self, heading, level):
"""Start a new heading at the specified heading level
Create a new section whose title is 'heading' and which is placed
in the docutils node tree as a child of the most recent level-1
heading. Subsequent document sections (commands, freeform doc chunks,
etc) will be placed as children of this new heading section.
"""
if len(self._active_headings) < level:
raise QAPISemError(self._cur_doc.info,
'Level %d subheading found outside a '
'level %d heading'
% (level, level - 1))
snode = self._make_section(heading)
self._active_headings[level - 1] += snode
self._active_headings = self._active_headings[:level]
self._active_headings.append(snode)
def _add_node_to_current_heading(self, node):
"""Add the node to whatever the current active heading is"""
self._active_headings[-1] += node
def freeform(self, doc):
"""Add a piece of 'freeform' documentation to the document tree
A 'freeform' document chunk doesn't relate to any particular
symbol (for instance, it could be an introduction).
If the freeform document starts with a line of the form
'= Heading text', this is a section or subsection heading, with
the heading level indicated by the number of '=' signs.
"""
# QAPIDoc documentation says free-form documentation blocks
# must have only a body section, nothing else.
assert not doc.sections
assert not doc.args
assert not doc.features
self._cur_doc = doc
text = doc.body.text
if re.match(r'=+ ', text):
# Section/subsection heading (if present, will always be
# the first line of the block)
(heading, _, text) = text.partition('\n')
(leader, _, heading) = heading.partition(' ')
self._start_new_heading(heading, len(leader))
if text == '':
return
node = self._make_section(None)
self._parse_text_into_node(text, node)
self._add_node_to_current_heading(node)
self._cur_doc = None
def _parse_text_into_node(self, doctext, node):
"""Parse a chunk of QAPI-doc-format text into the node
The doc comment can contain most inline rST markup, including
bulleted and enumerated lists.
As an extra permitted piece of markup, @var will be turned
into ``var``.
"""
# Handle the "@var means ``var`` case
doctext = re.sub(r'@([\w-]+)', r'``\1``', doctext)
rstlist = ViewList()
for line in doctext.splitlines():
# The reported line number will always be that of the start line
# of the doc comment, rather than the actual location of the error.
# Being more precise would require overhaul of the QAPIDoc class
# to track lines more exactly within all the sub-parts of the doc
# comment, as well as counting lines here.
rstlist.append(line, self._cur_doc.info.fname,
self._cur_doc.info.line)
# Append a blank line -- in some cases rST syntax errors get
# attributed to the line after one with actual text, and if there
# isn't anything in the ViewList corresponding to that then Sphinx
# 1.6's AutodocReporter will then misidentify the source/line location
# in the error message (usually attributing it to the top-level
# .rst file rather than the offending .json file). The extra blank
# line won't affect the rendered output.
rstlist.append("", self._cur_doc.info.fname, self._cur_doc.info.line)
self._sphinx_directive.do_parse(rstlist, node)
def get_document_nodes(self):
"""Return the list of docutils nodes which make up the document"""
return self._top_node.children
class QAPISchemaGenDepVisitor(QAPISchemaVisitor):
"""A QAPI schema visitor which adds Sphinx dependencies each module
This class calls the Sphinx note_dependency() function to tell Sphinx
that the generated documentation output depends on the input
schema file associated with each module in the QAPI input.
"""
def __init__(self, env, qapidir):
self._env = env
self._qapidir = qapidir
def visit_module(self, name):
if name != "./builtin":
qapifile = self._qapidir + '/' + name
self._env.note_dependency(os.path.abspath(qapifile))
super().visit_module(name)
class QAPIDocDirective(Directive):
"""Extract documentation from the specified QAPI .json file"""
required_argument = 1
optional_arguments = 1
option_spec = {
'qapifile': directives.unchanged_required
}
has_content = False
def new_serialno(self):
"""Return a unique new ID string suitable for use as a node's ID"""
env = self.state.document.settings.env
return 'qapidoc-%d' % env.new_serialno('qapidoc')
def run(self):
env = self.state.document.settings.env
qapifile = env.config.qapidoc_srctree + '/' + self.arguments[0]
qapidir = os.path.dirname(qapifile)
try:
schema = QAPISchema(qapifile)
# First tell Sphinx about all the schema files that the
# output documentation depends on (including 'qapifile' itself)
schema.visit(QAPISchemaGenDepVisitor(env, qapidir))
vis = QAPISchemaGenRSTVisitor(self)
vis.visit_begin(schema)
for doc in schema.docs:
if doc.symbol:
vis.symbol(doc, schema.lookup_entity(doc.symbol))
else:
vis.freeform(doc)
return vis.get_document_nodes()
except QAPIError as err:
# Launder QAPI parse errors into Sphinx extension errors
# so they are displayed nicely to the user
raise ExtensionError(str(err))
def do_parse(self, rstlist, node):
"""Parse rST source lines and add them to the specified node
Take the list of rST source lines rstlist, parse them as
rST, and add the resulting docutils nodes as children of node.
The nodes are parsed in a way that allows them to include
subheadings (titles) without confusing the rendering of
anything else.
"""
# This is from kerneldoc.py -- it works around an API change in
# Sphinx between 1.6 and 1.7. Unlike kerneldoc.py, we use
# sphinx.util.nodes.nested_parse_with_titles() rather than the
# plain self.state.nested_parse(), and so we can drop the saving
# of title_styles and section_level that kerneldoc.py does,
# because nested_parse_with_titles() does that for us.
if Use_SSI:
with switch_source_input(self.state, rstlist):
nested_parse_with_titles(self.state, rstlist, node)
else:
save = self.state.memo.reporter
self.state.memo.reporter = AutodocReporter(
rstlist, self.state.memo.reporter)
try:
nested_parse_with_titles(self.state, rstlist, node)
finally:
self.state.memo.reporter = save
def setup(app):
""" Register qapi-doc directive with Sphinx"""
app.add_config_value('qapidoc_srctree', None, 'env')
app.add_directive('qapi-doc', QAPIDocDirective)
return dict(
version=__version__,
parallel_read_safe=True,
parallel_write_safe=True
)
| 22,053 | 38.736937 | 79 | py |
qemu | qemu-master/docs/sphinx/qmp_lexer.py | # QEMU Monitor Protocol Lexer Extension
#
# Copyright (C) 2019, Red Hat Inc.
#
# Authors:
# Eduardo Habkost <[email protected]>
# John Snow <[email protected]>
#
# This work is licensed under the terms of the GNU GPLv2 or later.
# See the COPYING file in the top-level directory.
"""qmp_lexer is a Sphinx extension that provides a QMP lexer for code blocks."""
from pygments.lexer import RegexLexer, DelegatingLexer
from pygments.lexers.data import JsonLexer
from pygments import token
from sphinx import errors
class QMPExampleMarkersLexer(RegexLexer):
"""
QMPExampleMarkersLexer lexes QMP example annotations.
This lexer adds support for directionality flow and elision indicators.
"""
tokens = {
'root': [
(r'-> ', token.Generic.Prompt),
(r'<- ', token.Generic.Prompt),
(r' ?\.{3} ?', token.Generic.Prompt),
]
}
class QMPExampleLexer(DelegatingLexer):
"""QMPExampleLexer lexes annotated QMP examples."""
def __init__(self, **options):
super(QMPExampleLexer, self).__init__(JsonLexer, QMPExampleMarkersLexer,
token.Error, **options)
def setup(sphinx):
"""For use by the Sphinx extensions API."""
try:
sphinx.require_sphinx('2.1')
sphinx.add_lexer('QMP', QMPExampleLexer)
except errors.VersionRequirementError:
sphinx.add_lexer('QMP', QMPExampleLexer())
| 1,436 | 31.659091 | 80 | py |
qemu | qemu-master/docs/sphinx/dbusdomain.py | # D-Bus sphinx domain extension
#
# Copyright (C) 2021, Red Hat Inc.
#
# SPDX-License-Identifier: LGPL-2.1-or-later
#
# Author: Marc-André Lureau <[email protected]>
from typing import (
Any,
Dict,
Iterable,
Iterator,
List,
NamedTuple,
Optional,
Tuple,
cast,
)
from docutils import nodes
from docutils.nodes import Element, Node
from docutils.parsers.rst import directives
from sphinx import addnodes
from sphinx.addnodes import desc_signature, pending_xref
from sphinx.directives import ObjectDescription
from sphinx.domains import Domain, Index, IndexEntry, ObjType
from sphinx.locale import _
from sphinx.roles import XRefRole
from sphinx.util import nodes as node_utils
from sphinx.util.docfields import Field, TypedField
from sphinx.util.typing import OptionSpec
class DBusDescription(ObjectDescription[str]):
"""Base class for DBus objects"""
option_spec: OptionSpec = ObjectDescription.option_spec.copy()
option_spec.update(
{
"deprecated": directives.flag,
}
)
def get_index_text(self, modname: str, name: str) -> str:
"""Return the text for the index entry of the object."""
raise NotImplementedError("must be implemented in subclasses")
def add_target_and_index(
self, name: str, sig: str, signode: desc_signature
) -> None:
ifacename = self.env.ref_context.get("dbus:interface")
node_id = name
if ifacename:
node_id = f"{ifacename}.{node_id}"
signode["names"].append(name)
signode["ids"].append(node_id)
if "noindexentry" not in self.options:
indextext = self.get_index_text(ifacename, name)
if indextext:
self.indexnode["entries"].append(
("single", indextext, node_id, "", None)
)
domain = cast(DBusDomain, self.env.get_domain("dbus"))
domain.note_object(name, self.objtype, node_id, location=signode)
class DBusInterface(DBusDescription):
"""
Implementation of ``dbus:interface``.
"""
def get_index_text(self, ifacename: str, name: str) -> str:
return ifacename
def before_content(self) -> None:
self.env.ref_context["dbus:interface"] = self.arguments[0]
def after_content(self) -> None:
self.env.ref_context.pop("dbus:interface")
def handle_signature(self, sig: str, signode: desc_signature) -> str:
signode += addnodes.desc_annotation("interface ", "interface ")
signode += addnodes.desc_name(sig, sig)
return sig
def run(self) -> List[Node]:
_, node = super().run()
name = self.arguments[0]
section = nodes.section(ids=[name + "-section"])
section += nodes.title(name, "%s interface" % name)
section += node
return [self.indexnode, section]
class DBusMember(DBusDescription):
signal = False
class DBusMethod(DBusMember):
"""
Implementation of ``dbus:method``.
"""
option_spec: OptionSpec = DBusMember.option_spec.copy()
option_spec.update(
{
"noreply": directives.flag,
}
)
doc_field_types: List[Field] = [
TypedField(
"arg",
label=_("Arguments"),
names=("arg",),
rolename="arg",
typerolename=None,
typenames=("argtype", "type"),
),
TypedField(
"ret",
label=_("Returns"),
names=("ret",),
rolename="ret",
typerolename=None,
typenames=("rettype", "type"),
),
]
def get_index_text(self, ifacename: str, name: str) -> str:
return _("%s() (%s method)") % (name, ifacename)
def handle_signature(self, sig: str, signode: desc_signature) -> str:
params = addnodes.desc_parameterlist()
returns = addnodes.desc_parameterlist()
contentnode = addnodes.desc_content()
self.state.nested_parse(self.content, self.content_offset, contentnode)
for child in contentnode:
if isinstance(child, nodes.field_list):
for field in child:
ty, sg, name = field[0].astext().split(None, 2)
param = addnodes.desc_parameter()
param += addnodes.desc_sig_keyword_type(sg, sg)
param += addnodes.desc_sig_space()
param += addnodes.desc_sig_name(name, name)
if ty == "arg":
params += param
elif ty == "ret":
returns += param
anno = "signal " if self.signal else "method "
signode += addnodes.desc_annotation(anno, anno)
signode += addnodes.desc_name(sig, sig)
signode += params
if not self.signal and "noreply" not in self.options:
ret = addnodes.desc_returns()
ret += returns
signode += ret
return sig
class DBusSignal(DBusMethod):
"""
Implementation of ``dbus:signal``.
"""
doc_field_types: List[Field] = [
TypedField(
"arg",
label=_("Arguments"),
names=("arg",),
rolename="arg",
typerolename=None,
typenames=("argtype", "type"),
),
]
signal = True
def get_index_text(self, ifacename: str, name: str) -> str:
return _("%s() (%s signal)") % (name, ifacename)
class DBusProperty(DBusMember):
"""
Implementation of ``dbus:property``.
"""
option_spec: OptionSpec = DBusMember.option_spec.copy()
option_spec.update(
{
"type": directives.unchanged,
"readonly": directives.flag,
"writeonly": directives.flag,
"readwrite": directives.flag,
"emits-changed": directives.unchanged,
}
)
doc_field_types: List[Field] = []
def get_index_text(self, ifacename: str, name: str) -> str:
return _("%s (%s property)") % (name, ifacename)
def transform_content(self, contentnode: addnodes.desc_content) -> None:
fieldlist = nodes.field_list()
access = None
if "readonly" in self.options:
access = _("read-only")
if "writeonly" in self.options:
access = _("write-only")
if "readwrite" in self.options:
access = _("read & write")
if access:
content = nodes.Text(access)
fieldname = nodes.field_name("", _("Access"))
fieldbody = nodes.field_body("", nodes.paragraph("", "", content))
field = nodes.field("", fieldname, fieldbody)
fieldlist += field
emits = self.options.get("emits-changed", None)
if emits:
content = nodes.Text(emits)
fieldname = nodes.field_name("", _("Emits Changed"))
fieldbody = nodes.field_body("", nodes.paragraph("", "", content))
field = nodes.field("", fieldname, fieldbody)
fieldlist += field
if len(fieldlist) > 0:
contentnode.insert(0, fieldlist)
def handle_signature(self, sig: str, signode: desc_signature) -> str:
contentnode = addnodes.desc_content()
self.state.nested_parse(self.content, self.content_offset, contentnode)
ty = self.options.get("type")
signode += addnodes.desc_annotation("property ", "property ")
signode += addnodes.desc_name(sig, sig)
signode += addnodes.desc_sig_punctuation("", ":")
signode += addnodes.desc_sig_keyword_type(ty, ty)
return sig
def run(self) -> List[Node]:
self.name = "dbus:member"
return super().run()
class DBusXRef(XRefRole):
def process_link(self, env, refnode, has_explicit_title, title, target):
refnode["dbus:interface"] = env.ref_context.get("dbus:interface")
if not has_explicit_title:
title = title.lstrip(".") # only has a meaning for the target
target = target.lstrip("~") # only has a meaning for the title
# if the first character is a tilde, don't display the module/class
# parts of the contents
if title[0:1] == "~":
title = title[1:]
dot = title.rfind(".")
if dot != -1:
title = title[dot + 1 :]
# if the first character is a dot, search more specific namespaces first
# else search builtins first
if target[0:1] == ".":
target = target[1:]
refnode["refspecific"] = True
return title, target
class DBusIndex(Index):
"""
Index subclass to provide a D-Bus interfaces index.
"""
name = "dbusindex"
localname = _("D-Bus Interfaces Index")
shortname = _("dbus")
def generate(
self, docnames: Iterable[str] = None
) -> Tuple[List[Tuple[str, List[IndexEntry]]], bool]:
content: Dict[str, List[IndexEntry]] = {}
# list of prefixes to ignore
ignores: List[str] = self.domain.env.config["dbus_index_common_prefix"]
ignores = sorted(ignores, key=len, reverse=True)
ifaces = sorted(
[
x
for x in self.domain.data["objects"].items()
if x[1].objtype == "interface"
],
key=lambda x: x[0].lower(),
)
for name, (docname, node_id, _) in ifaces:
if docnames and docname not in docnames:
continue
for ignore in ignores:
if name.startswith(ignore):
name = name[len(ignore) :]
stripped = ignore
break
else:
stripped = ""
entries = content.setdefault(name[0].lower(), [])
entries.append(IndexEntry(stripped + name, 0, docname, node_id, "", "", ""))
# sort by first letter
sorted_content = sorted(content.items())
return sorted_content, False
class ObjectEntry(NamedTuple):
docname: str
node_id: str
objtype: str
class DBusDomain(Domain):
"""
Implementation of the D-Bus domain.
"""
name = "dbus"
label = "D-Bus"
object_types: Dict[str, ObjType] = {
"interface": ObjType(_("interface"), "iface", "obj"),
"method": ObjType(_("method"), "meth", "obj"),
"signal": ObjType(_("signal"), "sig", "obj"),
"property": ObjType(_("property"), "attr", "_prop", "obj"),
}
directives = {
"interface": DBusInterface,
"method": DBusMethod,
"signal": DBusSignal,
"property": DBusProperty,
}
roles = {
"iface": DBusXRef(),
"meth": DBusXRef(),
"sig": DBusXRef(),
"prop": DBusXRef(),
}
initial_data: Dict[str, Dict[str, Tuple[Any]]] = {
"objects": {}, # fullname -> ObjectEntry
}
indices = [
DBusIndex,
]
@property
def objects(self) -> Dict[str, ObjectEntry]:
return self.data.setdefault("objects", {}) # fullname -> ObjectEntry
def note_object(
self, name: str, objtype: str, node_id: str, location: Any = None
) -> None:
self.objects[name] = ObjectEntry(self.env.docname, node_id, objtype)
def clear_doc(self, docname: str) -> None:
for fullname, obj in list(self.objects.items()):
if obj.docname == docname:
del self.objects[fullname]
def find_obj(self, typ: str, name: str) -> Optional[Tuple[str, ObjectEntry]]:
# skip parens
if name[-2:] == "()":
name = name[:-2]
if typ in ("meth", "sig", "prop"):
try:
ifacename, name = name.rsplit(".", 1)
except ValueError:
pass
return self.objects.get(name)
def resolve_xref(
self,
env: "BuildEnvironment",
fromdocname: str,
builder: "Builder",
typ: str,
target: str,
node: pending_xref,
contnode: Element,
) -> Optional[Element]:
"""Resolve the pending_xref *node* with the given *typ* and *target*."""
objdef = self.find_obj(typ, target)
if objdef:
return node_utils.make_refnode(
builder, fromdocname, objdef.docname, objdef.node_id, contnode
)
def get_objects(self) -> Iterator[Tuple[str, str, str, str, str, int]]:
for refname, obj in self.objects.items():
yield (refname, refname, obj.objtype, obj.docname, obj.node_id, 1)
def setup(app):
app.add_domain(DBusDomain)
app.add_config_value("dbus_index_common_prefix", [], "env")
| 12,736 | 30.29484 | 88 | py |
qemu | qemu-master/docs/sphinx/kerneldoc.py | # coding=utf-8
#
# Copyright © 2016 Intel Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# Authors:
# Jani Nikula <[email protected]>
#
# Please make sure this works on both python2 and python3.
#
import codecs
import os
import subprocess
import sys
import re
import glob
from docutils import nodes, statemachine
from docutils.statemachine import ViewList
from docutils.parsers.rst import directives, Directive
#
# AutodocReporter is only good up to Sphinx 1.7
#
import sphinx
Use_SSI = sphinx.__version__[:3] >= '1.7'
if Use_SSI:
from sphinx.util.docutils import switch_source_input
else:
from sphinx.ext.autodoc import AutodocReporter
import kernellog
__version__ = '1.0'
class KernelDocDirective(Directive):
"""Extract kernel-doc comments from the specified file"""
required_argument = 1
optional_arguments = 4
option_spec = {
'doc': directives.unchanged_required,
'functions': directives.unchanged,
'export': directives.unchanged,
'internal': directives.unchanged,
}
has_content = False
def run(self):
env = self.state.document.settings.env
cmd = env.config.kerneldoc_bin + ['-rst', '-enable-lineno']
# Pass the version string to kernel-doc, as it needs to use a different
# dialect, depending what the C domain supports for each specific
# Sphinx versions
cmd += ['-sphinx-version', sphinx.__version__]
# Pass through the warnings-as-errors flag
if env.config.kerneldoc_werror:
cmd += ['-Werror']
filename = env.config.kerneldoc_srctree + '/' + self.arguments[0]
export_file_patterns = []
# Tell sphinx of the dependency
env.note_dependency(os.path.abspath(filename))
tab_width = self.options.get('tab-width', self.state.document.settings.tab_width)
# FIXME: make this nicer and more robust against errors
if 'export' in self.options:
cmd += ['-export']
export_file_patterns = str(self.options.get('export')).split()
elif 'internal' in self.options:
cmd += ['-internal']
export_file_patterns = str(self.options.get('internal')).split()
elif 'doc' in self.options:
cmd += ['-function', str(self.options.get('doc'))]
elif 'functions' in self.options:
functions = self.options.get('functions').split()
if functions:
for f in functions:
cmd += ['-function', f]
else:
cmd += ['-no-doc-sections']
for pattern in export_file_patterns:
for f in glob.glob(env.config.kerneldoc_srctree + '/' + pattern):
env.note_dependency(os.path.abspath(f))
cmd += ['-export-file', f]
cmd += [filename]
try:
kernellog.verbose(env.app,
'calling kernel-doc \'%s\'' % (" ".join(cmd)))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
out, err = codecs.decode(out, 'utf-8'), codecs.decode(err, 'utf-8')
if p.returncode != 0:
sys.stderr.write(err)
kernellog.warn(env.app,
'kernel-doc \'%s\' failed with return code %d' % (" ".join(cmd), p.returncode))
return [nodes.error(None, nodes.paragraph(text = "kernel-doc missing"))]
elif env.config.kerneldoc_verbosity > 0:
sys.stderr.write(err)
lines = statemachine.string2lines(out, tab_width, convert_whitespace=True)
result = ViewList()
lineoffset = 0;
line_regex = re.compile("^#define LINENO ([0-9]+)$")
for line in lines:
match = line_regex.search(line)
if match:
# sphinx counts lines from 0
lineoffset = int(match.group(1)) - 1
# we must eat our comments since the upset the markup
else:
result.append(line, filename, lineoffset)
lineoffset += 1
node = nodes.section()
self.do_parse(result, node)
return node.children
except Exception as e: # pylint: disable=W0703
kernellog.warn(env.app, 'kernel-doc \'%s\' processing failed with: %s' %
(" ".join(cmd), str(e)))
return [nodes.error(None, nodes.paragraph(text = "kernel-doc missing"))]
def do_parse(self, result, node):
if Use_SSI:
with switch_source_input(self.state, result):
self.state.nested_parse(result, 0, node, match_titles=1)
else:
save = self.state.memo.title_styles, self.state.memo.section_level, self.state.memo.reporter
self.state.memo.reporter = AutodocReporter(result, self.state.memo.reporter)
self.state.memo.title_styles, self.state.memo.section_level = [], 0
try:
self.state.nested_parse(result, 0, node, match_titles=1)
finally:
self.state.memo.title_styles, self.state.memo.section_level, self.state.memo.reporter = save
def setup(app):
app.add_config_value('kerneldoc_bin', None, 'env')
app.add_config_value('kerneldoc_srctree', None, 'env')
app.add_config_value('kerneldoc_verbosity', 1, 'env')
app.add_config_value('kerneldoc_werror', 0, 'env')
app.add_directive('kernel-doc', KernelDocDirective)
return dict(
version = __version__,
parallel_read_safe = True,
parallel_write_safe = True
)
| 6,773 | 36.016393 | 110 | py |
qemu | qemu-master/docs/sphinx/hxtool.py | # coding=utf-8
#
# QEMU hxtool .hx file parsing extension
#
# Copyright (c) 2020 Linaro
#
# This work is licensed under the terms of the GNU GPLv2 or later.
# See the COPYING file in the top-level directory.
"""hxtool is a Sphinx extension that implements the hxtool-doc directive"""
# The purpose of this extension is to read fragments of rST
# from .hx files, and insert them all into the current document.
# The rST fragments are delimited by SRST/ERST lines.
# The conf.py file must set the hxtool_srctree config value to
# the root of the QEMU source tree.
# Each hxtool-doc:: directive takes one argument which is the
# path of the .hx file to process, relative to the source tree.
import os
import re
from enum import Enum
from docutils import nodes
from docutils.statemachine import ViewList
from docutils.parsers.rst import directives, Directive
from sphinx.errors import ExtensionError
from sphinx.util.nodes import nested_parse_with_titles
import sphinx
# Sphinx up to 1.6 uses AutodocReporter; 1.7 and later
# use switch_source_input. Check borrowed from kerneldoc.py.
Use_SSI = sphinx.__version__[:3] >= '1.7'
if Use_SSI:
from sphinx.util.docutils import switch_source_input
else:
from sphinx.ext.autodoc import AutodocReporter
__version__ = '1.0'
# We parse hx files with a state machine which may be in one of two
# states: reading the C code fragment, or inside a rST fragment.
class HxState(Enum):
CTEXT = 1
RST = 2
def serror(file, lnum, errtext):
"""Raise an exception giving a user-friendly syntax error message"""
raise ExtensionError('%s line %d: syntax error: %s' % (file, lnum, errtext))
def parse_directive(line):
"""Return first word of line, if any"""
return re.split('\W', line)[0]
def parse_defheading(file, lnum, line):
"""Handle a DEFHEADING directive"""
# The input should be "DEFHEADING(some string)", though note that
# the 'some string' could be the empty string. If the string is
# empty we ignore the directive -- these are used only to add
# blank lines in the plain-text content of the --help output.
#
# Return the heading text. We strip out any trailing ':' for
# consistency with other headings in the rST documentation.
match = re.match(r'DEFHEADING\((.*?):?\)', line)
if match is None:
serror(file, lnum, "Invalid DEFHEADING line")
return match.group(1)
def parse_archheading(file, lnum, line):
"""Handle an ARCHHEADING directive"""
# The input should be "ARCHHEADING(some string, other arg)",
# though note that the 'some string' could be the empty string.
# As with DEFHEADING, empty string ARCHHEADINGs will be ignored.
#
# Return the heading text. We strip out any trailing ':' for
# consistency with other headings in the rST documentation.
match = re.match(r'ARCHHEADING\((.*?):?,.*\)', line)
if match is None:
serror(file, lnum, "Invalid ARCHHEADING line")
return match.group(1)
class HxtoolDocDirective(Directive):
"""Extract rST fragments from the specified .hx file"""
required_argument = 1
optional_arguments = 1
option_spec = {
'hxfile': directives.unchanged_required
}
has_content = False
def run(self):
env = self.state.document.settings.env
hxfile = env.config.hxtool_srctree + '/' + self.arguments[0]
# Tell sphinx of the dependency
env.note_dependency(os.path.abspath(hxfile))
state = HxState.CTEXT
# We build up lines of rST in this ViewList, which we will
# later put into a 'section' node.
rstlist = ViewList()
current_node = None
node_list = []
with open(hxfile) as f:
lines = (l.rstrip() for l in f)
for lnum, line in enumerate(lines, 1):
directive = parse_directive(line)
if directive == 'HXCOMM':
pass
elif directive == 'SRST':
if state == HxState.RST:
serror(hxfile, lnum, 'expected ERST, found SRST')
else:
state = HxState.RST
elif directive == 'ERST':
if state == HxState.CTEXT:
serror(hxfile, lnum, 'expected SRST, found ERST')
else:
state = HxState.CTEXT
elif directive == 'DEFHEADING' or directive == 'ARCHHEADING':
if directive == 'DEFHEADING':
heading = parse_defheading(hxfile, lnum, line)
else:
heading = parse_archheading(hxfile, lnum, line)
if heading == "":
continue
# Put the accumulated rST into the previous node,
# and then start a fresh section with this heading.
if len(rstlist) > 0:
if current_node is None:
# We had some rST fragments before the first
# DEFHEADING. We don't have a section to put
# these in, so rather than magicing up a section,
# make it a syntax error.
serror(hxfile, lnum,
'first DEFHEADING must precede all rST text')
self.do_parse(rstlist, current_node)
rstlist = ViewList()
if current_node is not None:
node_list.append(current_node)
section_id = 'hxtool-%d' % env.new_serialno('hxtool')
current_node = nodes.section(ids=[section_id])
current_node += nodes.title(heading, heading)
else:
# Not a directive: put in output if we are in rST fragment
if state == HxState.RST:
# Sphinx counts its lines from 0
rstlist.append(line, hxfile, lnum - 1)
if current_node is None:
# We don't have multiple sections, so just parse the rst
# fragments into a dummy node so we can return the children.
current_node = nodes.section()
self.do_parse(rstlist, current_node)
return current_node.children
else:
# Put the remaining accumulated rST into the last section, and
# return all the sections.
if len(rstlist) > 0:
self.do_parse(rstlist, current_node)
node_list.append(current_node)
return node_list
# This is from kerneldoc.py -- it works around an API change in
# Sphinx between 1.6 and 1.7. Unlike kerneldoc.py, we use
# sphinx.util.nodes.nested_parse_with_titles() rather than the
# plain self.state.nested_parse(), and so we can drop the saving
# of title_styles and section_level that kerneldoc.py does,
# because nested_parse_with_titles() does that for us.
def do_parse(self, result, node):
if Use_SSI:
with switch_source_input(self.state, result):
nested_parse_with_titles(self.state, result, node)
else:
save = self.state.memo.reporter
self.state.memo.reporter = AutodocReporter(result, self.state.memo.reporter)
try:
nested_parse_with_titles(self.state, result, node)
finally:
self.state.memo.reporter = save
def setup(app):
""" Register hxtool-doc directive with Sphinx"""
app.add_config_value('hxtool_srctree', None, 'env')
app.add_directive('hxtool-doc', HxtoolDocDirective)
return dict(
version = __version__,
parallel_read_safe = True,
parallel_write_safe = True
)
| 7,850 | 39.678756 | 88 | py |
qemu | qemu-master/docs/sphinx/dbusparser.py | # Based from "GDBus - GLib D-Bus Library":
#
# Copyright (C) 2008-2011 Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General
# Public License along with this library; if not, see <http://www.gnu.org/licenses/>.
#
# Author: David Zeuthen <[email protected]>
import xml.parsers.expat
class Annotation:
def __init__(self, key, value):
self.key = key
self.value = value
self.annotations = []
self.since = ""
class Arg:
def __init__(self, name, signature):
self.name = name
self.signature = signature
self.annotations = []
self.doc_string = ""
self.since = ""
class Method:
def __init__(self, name, h_type_implies_unix_fd=True):
self.name = name
self.h_type_implies_unix_fd = h_type_implies_unix_fd
self.in_args = []
self.out_args = []
self.annotations = []
self.doc_string = ""
self.since = ""
self.deprecated = False
self.unix_fd = False
class Signal:
def __init__(self, name):
self.name = name
self.args = []
self.annotations = []
self.doc_string = ""
self.since = ""
self.deprecated = False
class Property:
def __init__(self, name, signature, access):
self.name = name
self.signature = signature
self.access = access
self.annotations = []
self.arg = Arg("value", self.signature)
self.arg.annotations = self.annotations
self.readable = False
self.writable = False
if self.access == "readwrite":
self.readable = True
self.writable = True
elif self.access == "read":
self.readable = True
elif self.access == "write":
self.writable = True
else:
raise ValueError('Invalid access type "{}"'.format(self.access))
self.doc_string = ""
self.since = ""
self.deprecated = False
self.emits_changed_signal = True
class Interface:
def __init__(self, name):
self.name = name
self.methods = []
self.signals = []
self.properties = []
self.annotations = []
self.doc_string = ""
self.doc_string_brief = ""
self.since = ""
self.deprecated = False
class DBusXMLParser:
STATE_TOP = "top"
STATE_NODE = "node"
STATE_INTERFACE = "interface"
STATE_METHOD = "method"
STATE_SIGNAL = "signal"
STATE_PROPERTY = "property"
STATE_ARG = "arg"
STATE_ANNOTATION = "annotation"
STATE_IGNORED = "ignored"
def __init__(self, xml_data, h_type_implies_unix_fd=True):
self._parser = xml.parsers.expat.ParserCreate()
self._parser.CommentHandler = self.handle_comment
self._parser.CharacterDataHandler = self.handle_char_data
self._parser.StartElementHandler = self.handle_start_element
self._parser.EndElementHandler = self.handle_end_element
self.parsed_interfaces = []
self._cur_object = None
self.state = DBusXMLParser.STATE_TOP
self.state_stack = []
self._cur_object = None
self._cur_object_stack = []
self.doc_comment_last_symbol = ""
self._h_type_implies_unix_fd = h_type_implies_unix_fd
self._parser.Parse(xml_data)
COMMENT_STATE_BEGIN = "begin"
COMMENT_STATE_PARAMS = "params"
COMMENT_STATE_BODY = "body"
COMMENT_STATE_SKIP = "skip"
def handle_comment(self, data):
comment_state = DBusXMLParser.COMMENT_STATE_BEGIN
lines = data.split("\n")
symbol = ""
body = ""
in_para = False
params = {}
for line in lines:
orig_line = line
line = line.lstrip()
if comment_state == DBusXMLParser.COMMENT_STATE_BEGIN:
if len(line) > 0:
colon_index = line.find(": ")
if colon_index == -1:
if line.endswith(":"):
symbol = line[0 : len(line) - 1]
comment_state = DBusXMLParser.COMMENT_STATE_PARAMS
else:
comment_state = DBusXMLParser.COMMENT_STATE_SKIP
else:
symbol = line[0:colon_index]
rest_of_line = line[colon_index + 2 :].strip()
if len(rest_of_line) > 0:
body += rest_of_line + "\n"
comment_state = DBusXMLParser.COMMENT_STATE_PARAMS
elif comment_state == DBusXMLParser.COMMENT_STATE_PARAMS:
if line.startswith("@"):
colon_index = line.find(": ")
if colon_index == -1:
comment_state = DBusXMLParser.COMMENT_STATE_BODY
if not in_para:
in_para = True
body += orig_line + "\n"
else:
param = line[1:colon_index]
docs = line[colon_index + 2 :]
params[param] = docs
else:
comment_state = DBusXMLParser.COMMENT_STATE_BODY
if len(line) > 0:
if not in_para:
in_para = True
body += orig_line + "\n"
elif comment_state == DBusXMLParser.COMMENT_STATE_BODY:
if len(line) > 0:
if not in_para:
in_para = True
body += orig_line + "\n"
else:
if in_para:
body += "\n"
in_para = False
if in_para:
body += "\n"
if symbol != "":
self.doc_comment_last_symbol = symbol
self.doc_comment_params = params
self.doc_comment_body = body
def handle_char_data(self, data):
# print 'char_data=%s'%data
pass
def handle_start_element(self, name, attrs):
old_state = self.state
old_cur_object = self._cur_object
if self.state == DBusXMLParser.STATE_IGNORED:
self.state = DBusXMLParser.STATE_IGNORED
elif self.state == DBusXMLParser.STATE_TOP:
if name == DBusXMLParser.STATE_NODE:
self.state = DBusXMLParser.STATE_NODE
else:
self.state = DBusXMLParser.STATE_IGNORED
elif self.state == DBusXMLParser.STATE_NODE:
if name == DBusXMLParser.STATE_INTERFACE:
self.state = DBusXMLParser.STATE_INTERFACE
iface = Interface(attrs["name"])
self._cur_object = iface
self.parsed_interfaces.append(iface)
elif name == DBusXMLParser.STATE_ANNOTATION:
self.state = DBusXMLParser.STATE_ANNOTATION
anno = Annotation(attrs["name"], attrs["value"])
self._cur_object.annotations.append(anno)
self._cur_object = anno
else:
self.state = DBusXMLParser.STATE_IGNORED
# assign docs, if any
if "name" in attrs and self.doc_comment_last_symbol == attrs["name"]:
self._cur_object.doc_string = self.doc_comment_body
if "short_description" in self.doc_comment_params:
short_description = self.doc_comment_params["short_description"]
self._cur_object.doc_string_brief = short_description
if "since" in self.doc_comment_params:
self._cur_object.since = self.doc_comment_params["since"].strip()
elif self.state == DBusXMLParser.STATE_INTERFACE:
if name == DBusXMLParser.STATE_METHOD:
self.state = DBusXMLParser.STATE_METHOD
method = Method(
attrs["name"], h_type_implies_unix_fd=self._h_type_implies_unix_fd
)
self._cur_object.methods.append(method)
self._cur_object = method
elif name == DBusXMLParser.STATE_SIGNAL:
self.state = DBusXMLParser.STATE_SIGNAL
signal = Signal(attrs["name"])
self._cur_object.signals.append(signal)
self._cur_object = signal
elif name == DBusXMLParser.STATE_PROPERTY:
self.state = DBusXMLParser.STATE_PROPERTY
prop = Property(attrs["name"], attrs["type"], attrs["access"])
self._cur_object.properties.append(prop)
self._cur_object = prop
elif name == DBusXMLParser.STATE_ANNOTATION:
self.state = DBusXMLParser.STATE_ANNOTATION
anno = Annotation(attrs["name"], attrs["value"])
self._cur_object.annotations.append(anno)
self._cur_object = anno
else:
self.state = DBusXMLParser.STATE_IGNORED
# assign docs, if any
if "name" in attrs and self.doc_comment_last_symbol == attrs["name"]:
self._cur_object.doc_string = self.doc_comment_body
if "since" in self.doc_comment_params:
self._cur_object.since = self.doc_comment_params["since"].strip()
elif self.state == DBusXMLParser.STATE_METHOD:
if name == DBusXMLParser.STATE_ARG:
self.state = DBusXMLParser.STATE_ARG
arg_name = None
if "name" in attrs:
arg_name = attrs["name"]
arg = Arg(arg_name, attrs["type"])
direction = attrs.get("direction", "in")
if direction == "in":
self._cur_object.in_args.append(arg)
elif direction == "out":
self._cur_object.out_args.append(arg)
else:
raise ValueError('Invalid direction "{}"'.format(direction))
self._cur_object = arg
elif name == DBusXMLParser.STATE_ANNOTATION:
self.state = DBusXMLParser.STATE_ANNOTATION
anno = Annotation(attrs["name"], attrs["value"])
self._cur_object.annotations.append(anno)
self._cur_object = anno
else:
self.state = DBusXMLParser.STATE_IGNORED
# assign docs, if any
if self.doc_comment_last_symbol == old_cur_object.name:
if "name" in attrs and attrs["name"] in self.doc_comment_params:
doc_string = self.doc_comment_params[attrs["name"]]
if doc_string is not None:
self._cur_object.doc_string = doc_string
if "since" in self.doc_comment_params:
self._cur_object.since = self.doc_comment_params[
"since"
].strip()
elif self.state == DBusXMLParser.STATE_SIGNAL:
if name == DBusXMLParser.STATE_ARG:
self.state = DBusXMLParser.STATE_ARG
arg_name = None
if "name" in attrs:
arg_name = attrs["name"]
arg = Arg(arg_name, attrs["type"])
self._cur_object.args.append(arg)
self._cur_object = arg
elif name == DBusXMLParser.STATE_ANNOTATION:
self.state = DBusXMLParser.STATE_ANNOTATION
anno = Annotation(attrs["name"], attrs["value"])
self._cur_object.annotations.append(anno)
self._cur_object = anno
else:
self.state = DBusXMLParser.STATE_IGNORED
# assign docs, if any
if self.doc_comment_last_symbol == old_cur_object.name:
if "name" in attrs and attrs["name"] in self.doc_comment_params:
doc_string = self.doc_comment_params[attrs["name"]]
if doc_string is not None:
self._cur_object.doc_string = doc_string
if "since" in self.doc_comment_params:
self._cur_object.since = self.doc_comment_params[
"since"
].strip()
elif self.state == DBusXMLParser.STATE_PROPERTY:
if name == DBusXMLParser.STATE_ANNOTATION:
self.state = DBusXMLParser.STATE_ANNOTATION
anno = Annotation(attrs["name"], attrs["value"])
self._cur_object.annotations.append(anno)
self._cur_object = anno
else:
self.state = DBusXMLParser.STATE_IGNORED
elif self.state == DBusXMLParser.STATE_ARG:
if name == DBusXMLParser.STATE_ANNOTATION:
self.state = DBusXMLParser.STATE_ANNOTATION
anno = Annotation(attrs["name"], attrs["value"])
self._cur_object.annotations.append(anno)
self._cur_object = anno
else:
self.state = DBusXMLParser.STATE_IGNORED
elif self.state == DBusXMLParser.STATE_ANNOTATION:
if name == DBusXMLParser.STATE_ANNOTATION:
self.state = DBusXMLParser.STATE_ANNOTATION
anno = Annotation(attrs["name"], attrs["value"])
self._cur_object.annotations.append(anno)
self._cur_object = anno
else:
self.state = DBusXMLParser.STATE_IGNORED
else:
raise ValueError(
'Unhandled state "{}" while entering element with name "{}"'.format(
self.state, name
)
)
self.state_stack.append(old_state)
self._cur_object_stack.append(old_cur_object)
def handle_end_element(self, name):
self.state = self.state_stack.pop()
self._cur_object = self._cur_object_stack.pop()
def parse_dbus_xml(xml_data):
parser = DBusXMLParser(xml_data, True)
return parser.parsed_interfaces
| 14,665 | 38.213904 | 86 | py |
qemu | qemu-master/docs/sphinx/kernellog.py | # SPDX-License-Identifier: GPL-2.0
#
# Sphinx has deprecated its older logging interface, but the replacement
# only goes back to 1.6. So here's a wrapper layer to keep around for
# as long as we support 1.4.
#
import sphinx
if sphinx.__version__[:3] >= '1.6':
UseLogging = True
from sphinx.util import logging
logger = logging.getLogger('kerneldoc')
else:
UseLogging = False
def warn(app, message):
if UseLogging:
logger.warning(message)
else:
app.warn(message)
def verbose(app, message):
if UseLogging:
logger.verbose(message)
else:
app.verbose(message)
| 626 | 20.62069 | 72 | py |
qemu | qemu-master/docs/sphinx/depfile.py | # coding=utf-8
#
# QEMU depfile generation extension
#
# Copyright (c) 2020 Red Hat, Inc.
#
# This work is licensed under the terms of the GNU GPLv2 or later.
# See the COPYING file in the top-level directory.
"""depfile is a Sphinx extension that writes a dependency file for
an external build system"""
import os
import sphinx
import sys
from pathlib import Path
__version__ = '1.0'
def get_infiles(env):
for x in env.found_docs:
yield env.doc2path(x)
yield from ((os.path.join(env.srcdir, dep)
for dep in env.dependencies[x]))
for mod in sys.modules.values():
if hasattr(mod, '__file__'):
if mod.__file__:
yield mod.__file__
# this is perhaps going to include unused files:
for static_path in env.config.html_static_path + env.config.templates_path:
for path in Path(static_path).rglob('*'):
yield str(path)
def write_depfile(app, exception):
if exception:
return
env = app.env
if not env.config.depfile:
return
# Using a directory as the output file does not work great because
# its timestamp does not necessarily change when the contents change.
# So create a timestamp file.
if env.config.depfile_stamp:
with open(env.config.depfile_stamp, 'w') as f:
pass
with open(env.config.depfile, 'w') as f:
print((env.config.depfile_stamp or app.outdir) + ": \\", file=f)
print(*get_infiles(env), file=f)
for x in get_infiles(env):
print(x + ":", file=f)
def setup(app):
app.add_config_value('depfile', None, 'env')
app.add_config_value('depfile_stamp', None, 'env')
app.connect('build-finished', write_depfile)
return dict(
version = __version__,
parallel_read_safe = True,
parallel_write_safe = True
)
| 1,872 | 26.955224 | 79 | py |
qemu | qemu-master/docs/sphinx/dbusdoc.py | # D-Bus XML documentation extension
#
# Copyright (C) 2021, Red Hat Inc.
#
# SPDX-License-Identifier: LGPL-2.1-or-later
#
# Author: Marc-André Lureau <[email protected]>
"""dbus-doc is a Sphinx extension that provides documentation from D-Bus XML."""
import os
import re
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterator,
List,
Optional,
Sequence,
Set,
Tuple,
Type,
TypeVar,
Union,
)
import sphinx
from docutils import nodes
from docutils.nodes import Element, Node
from docutils.parsers.rst import Directive, directives
from docutils.parsers.rst.states import RSTState
from docutils.statemachine import StringList, ViewList
from sphinx.application import Sphinx
from sphinx.errors import ExtensionError
from sphinx.util import logging
from sphinx.util.docstrings import prepare_docstring
from sphinx.util.docutils import SphinxDirective, switch_source_input
from sphinx.util.nodes import nested_parse_with_titles
import dbusdomain
from dbusparser import parse_dbus_xml
logger = logging.getLogger(__name__)
__version__ = "1.0"
class DBusDoc:
def __init__(self, sphinx_directive, dbusfile):
self._cur_doc = None
self._sphinx_directive = sphinx_directive
self._dbusfile = dbusfile
self._top_node = nodes.section()
self.result = StringList()
self.indent = ""
def add_line(self, line: str, *lineno: int) -> None:
"""Append one line of generated reST to the output."""
if line.strip(): # not a blank line
self.result.append(self.indent + line, self._dbusfile, *lineno)
else:
self.result.append("", self._dbusfile, *lineno)
def add_method(self, method):
self.add_line(f".. dbus:method:: {method.name}")
self.add_line("")
self.indent += " "
for arg in method.in_args:
self.add_line(f":arg {arg.signature} {arg.name}: {arg.doc_string}")
for arg in method.out_args:
self.add_line(f":ret {arg.signature} {arg.name}: {arg.doc_string}")
self.add_line("")
for line in prepare_docstring("\n" + method.doc_string):
self.add_line(line)
self.indent = self.indent[:-3]
def add_signal(self, signal):
self.add_line(f".. dbus:signal:: {signal.name}")
self.add_line("")
self.indent += " "
for arg in signal.args:
self.add_line(f":arg {arg.signature} {arg.name}: {arg.doc_string}")
self.add_line("")
for line in prepare_docstring("\n" + signal.doc_string):
self.add_line(line)
self.indent = self.indent[:-3]
def add_property(self, prop):
self.add_line(f".. dbus:property:: {prop.name}")
self.indent += " "
self.add_line(f":type: {prop.signature}")
access = {"read": "readonly", "write": "writeonly", "readwrite": "readwrite"}[
prop.access
]
self.add_line(f":{access}:")
if prop.emits_changed_signal:
self.add_line(f":emits-changed: yes")
self.add_line("")
for line in prepare_docstring("\n" + prop.doc_string):
self.add_line(line)
self.indent = self.indent[:-3]
def add_interface(self, iface):
self.add_line(f".. dbus:interface:: {iface.name}")
self.add_line("")
self.indent += " "
for line in prepare_docstring("\n" + iface.doc_string):
self.add_line(line)
for method in iface.methods:
self.add_method(method)
for sig in iface.signals:
self.add_signal(sig)
for prop in iface.properties:
self.add_property(prop)
self.indent = self.indent[:-3]
def parse_generated_content(state: RSTState, content: StringList) -> List[Node]:
"""Parse a generated content by Documenter."""
with switch_source_input(state, content):
node = nodes.paragraph()
node.document = state.document
state.nested_parse(content, 0, node)
return node.children
class DBusDocDirective(SphinxDirective):
"""Extract documentation from the specified D-Bus XML file"""
has_content = True
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
def run(self):
reporter = self.state.document.reporter
try:
source, lineno = reporter.get_source_and_line(self.lineno) # type: ignore
except AttributeError:
source, lineno = (None, None)
logger.debug("[dbusdoc] %s:%s: input:\n%s", source, lineno, self.block_text)
env = self.state.document.settings.env
dbusfile = env.config.qapidoc_srctree + "/" + self.arguments[0]
with open(dbusfile, "rb") as f:
xml_data = f.read()
xml = parse_dbus_xml(xml_data)
doc = DBusDoc(self, dbusfile)
for iface in xml:
doc.add_interface(iface)
result = parse_generated_content(self.state, doc.result)
return result
def setup(app: Sphinx) -> Dict[str, Any]:
"""Register dbus-doc directive with Sphinx"""
app.add_config_value("dbusdoc_srctree", None, "env")
app.add_directive("dbus-doc", DBusDocDirective)
dbusdomain.setup(app)
return dict(version=__version__, parallel_read_safe=True, parallel_write_safe=True)
| 5,364 | 31.125749 | 87 | py |
qemu | qemu-master/.gitlab-ci.d/check-patch.py | #!/usr/bin/env python3
#
# check-patch.py: run checkpatch.pl across all commits in a branch
#
# Copyright (C) 2020 Red Hat, Inc.
#
# SPDX-License-Identifier: GPL-2.0-or-later
import os
import os.path
import sys
import subprocess
namespace = "qemu-project"
if len(sys.argv) >= 2:
namespace = sys.argv[1]
cwd = os.getcwd()
reponame = os.path.basename(cwd)
repourl = "https://gitlab.com/%s/%s.git" % (namespace, reponame)
# GitLab CI environment does not give us any direct info about the
# base for the user's branch. We thus need to figure out a common
# ancestor between the user's branch and current git master.
subprocess.check_call(["git", "remote", "add", "check-patch", repourl])
subprocess.check_call(["git", "fetch", "check-patch", "master"],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
ancestor = subprocess.check_output(["git", "merge-base",
"check-patch/master", "HEAD"],
universal_newlines=True)
ancestor = ancestor.strip()
log = subprocess.check_output(["git", "log", "--format=%H %s",
ancestor + "..."],
universal_newlines=True)
subprocess.check_call(["git", "remote", "rm", "check-patch"])
if log == "":
print("\nNo commits since %s, skipping checks\n" % ancestor)
sys.exit(0)
errors = False
print("\nChecking all commits since %s...\n" % ancestor, flush=True)
ret = subprocess.run(["scripts/checkpatch.pl", "--terse", ancestor + "..."])
if ret.returncode != 0:
print(" ❌ FAIL one or more commits failed scripts/checkpatch.pl")
sys.exit(1)
sys.exit(0)
| 1,689 | 28.649123 | 76 | py |
qemu | qemu-master/.gitlab-ci.d/check-dco.py | #!/usr/bin/env python3
#
# check-dco.py: validate all commits are signed off
#
# Copyright (C) 2020 Red Hat, Inc.
#
# SPDX-License-Identifier: GPL-2.0-or-later
import os
import os.path
import sys
import subprocess
namespace = "qemu-project"
if len(sys.argv) >= 2:
namespace = sys.argv[1]
cwd = os.getcwd()
reponame = os.path.basename(cwd)
repourl = "https://gitlab.com/%s/%s.git" % (namespace, reponame)
subprocess.check_call(["git", "remote", "add", "check-dco", repourl])
subprocess.check_call(["git", "fetch", "check-dco", "master"],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
ancestor = subprocess.check_output(["git", "merge-base",
"check-dco/master", "HEAD"],
universal_newlines=True)
ancestor = ancestor.strip()
subprocess.check_call(["git", "remote", "rm", "check-dco"])
errors = False
print("\nChecking for 'Signed-off-by: NAME <EMAIL>' " +
"on all commits since %s...\n" % ancestor)
log = subprocess.check_output(["git", "log", "--format=%H %s",
ancestor + "..."],
universal_newlines=True)
if log == "":
commits = []
else:
commits = [[c[0:40], c[41:]] for c in log.strip().split("\n")]
for sha, subject in commits:
msg = subprocess.check_output(["git", "show", "-s", sha],
universal_newlines=True)
lines = msg.strip().split("\n")
print("🔍 %s %s" % (sha, subject))
sob = False
for line in lines:
if "Signed-off-by:" in line:
sob = True
if "localhost" in line:
print(" ❌ FAIL: bad email in %s" % line)
errors = True
if not sob:
print(" ❌ FAIL missing Signed-off-by tag")
errors = True
if errors:
print("""
❌ ERROR: One or more commits are missing a valid Signed-off-By tag.
This project requires all contributors to assert that their contributions
are provided in compliance with the terms of the Developer's Certificate
of Origin 1.1 (DCO):
https://developercertificate.org/
To indicate acceptance of the DCO every commit must have a tag
Signed-off-by: REAL NAME <EMAIL>
This can be achieved by passing the "-s" flag to the "git commit" command.
To bulk update all commits on current branch "git rebase" can be used:
git rebase -i master -x 'git commit --amend --no-edit -s'
""")
sys.exit(1)
sys.exit(0)
| 2,504 | 25.368421 | 74 | py |
adanet | adanet-master/research/improve_nas/setup.py | """Setup file.
Copyright 2019 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from setuptools import find_packages
from setuptools import setup
REQUIRED_PACKAGES = ['tensorflow>=1.12',
'adanet==0.5.0']
setup(
name='trainer',
version='0.1',
install_requires=REQUIRED_PACKAGES,
packages=find_packages(),
include_package_data=True,
description='improve nas model'
)
| 934 | 29.16129 | 72 | py |
adanet | adanet-master/research/improve_nas/trainer/cifar100.py | # Lint as: python3
"""CIFAR-100 data and convenience functions.
Copyright 2019 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.keras.datasets import cifar100
# pylint: disable=g-import-not-at-top
try:
from adanet.research.improve_nas.trainer import image_processing
except ImportError as e:
from trainer import image_processing
# pylint: enable=g-import-not-at-top
FEATURES = 'x'
PreprocessingType = image_processing.PreprocessingType
class Provider(object):
"""A CIFAR-100 data provider."""
def __init__(self,
params_string='',
seed=None):
"""Returns a CIFAR-100 `Provider`."""
# For testing
self._seed = seed
default_params = tf.contrib.training.HParams(
cutout=True, augmentation=PreprocessingType.BASIC)
self._params = default_params.parse(params_string)
def _preprocess_data(self, image, label, training, preprocess):
"""Apply Inception data augmentation and preprocessing."""
# Unpack `Element` tuple.
# image, label = element
if preprocess:
image_height, image_width = self._shape()[:2]
if self._params.augmentation == PreprocessingType.BASIC:
image = image_processing.resize_and_normalize(image, image_height,
image_width)
if training:
image = image_processing.basic_augmentation(image, image_height,
image_width, self._seed)
else:
raise ValueError('Unsupported data augmentation type: `%s`' %
self._params.augmentation)
if training and self._params.cutout:
# According to https://arxiv.org/abs/1708.04552, cutting out 16x16
# works best.
image = image_processing.cutout(image, pad_size=8, seed=self._seed)
# Set shapes so that they are defined.
image.set_shape(self._shape())
if label is not None:
label.set_shape([1])
return {FEATURES: image}, label
def _cifar100_dataset(self, partition):
"""Returns a partition of the CIFAR-100 `Dataset`."""
cifar100_data = None
try:
cifar100_data = cifar100.load_data()
tf.logging.info('Loaded cifar100.')
except: # pylint: disable=bare-except
tf.logging.info(
'Can not load cifar100 from internet. Creating dummy data for '
'testing.')
data = np.zeros((3, 32, 32, 3))
labels = np.array([[47], [52], [5]])
data[:, 0, 0] = [220, 25, 47]
data[:, -1, 0, 0] = 128
cifar100_data = ((data, labels), (data, labels))
(x_train, y_train), (x_test, y_test) = cifar100_data
x = None
y = None
if partition == 'train':
x, y = x_train, y_train
else:
x, y = x_test, y_test
dataset = tf.data.Dataset.from_tensor_slices((x, y.astype(np.int32)))
return dataset.cache()
def _shape(self):
"""Returns a 3-dimensional list with the shape of the image."""
return [32, 32, 3]
def get_input_fn(self,
partition,
mode,
batch_size,
preprocess=True,
use_tpu=False):
"""See `data.Provider` get_input_fn."""
def input_fn(params=None):
"""Provides batches of CIFAR images.
Args:
params: A dict containing the batch_size on TPU, otherwise None.
Returns:
images: A `Tensor` of size [batch_size, 32, 32, 3]
labels: A `Tensor` of size [batch_size, 1],
"""
batch_size_ = batch_size
if use_tpu:
batch_size_ = params.get('batch_size', batch_size)
training = mode == tf.estimator.ModeKeys.TRAIN
dataset = self._cifar100_dataset(partition)
dataset = dataset.map(
functools.partial(
self._preprocess_data, training=training, preprocess=preprocess))
if training:
dataset = dataset.apply(
tf.contrib.data.shuffle_and_repeat(
buffer_size=500, seed=self._seed))
return dataset.batch(
batch_size_,
drop_remainder=use_tpu).prefetch(tf.data.experimental.AUTOTUNE
).make_one_shot_iterator().get_next()
return input_fn
def get_head(self, name=None):
"""Returns a `Head` instance for CIFAR-100 with the given name."""
return tf.contrib.estimator.multi_class_head(
100, name=name, loss_reduction=tf.losses.Reduction.SUM_OVER_BATCH_SIZE)
def get_feature_columns(self):
"""Returns feature columns."""
feature_columns = [
tf.feature_column.numeric_column(key=FEATURES, shape=self._shape())
]
return feature_columns
| 5,382 | 31.823171 | 79 | py |
adanet | adanet-master/research/improve_nas/trainer/adanet_improve_nas.py | # Lint as: python3
"""Defines adanet estimator builder.
Copyright 2019 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import adanet
import tensorflow.compat.v1 as tf
# pylint: disable=g-import-not-at-top
try:
from adanet.research.improve_nas.trainer import improve_nas
from adanet.research.improve_nas.trainer import optimizer
except ImportError as e:
from trainer import improve_nas
from trainer import optimizer
# pylint: enable=g-import-not-at-top
class GeneratorType(object):
"""Controls what generator is used."""
DYNAMIC = "dynamic"
SIMPLE = "simple"
class Builder(object):
"""An AdaNet estimator builder."""
def estimator(self,
data_provider,
run_config,
hparams,
train_steps=None,
seed=None):
"""Returns an AdaNet `Estimator` for train and evaluation.
Args:
data_provider: Data `Provider` for dataset to model.
run_config: `RunConfig` object to configure the runtime settings.
hparams: `HParams` instance defining custom hyperparameters.
train_steps: number of train steps.
seed: An integer seed if determinism is required.
Returns:
Returns an `Estimator`.
"""
max_iteration_steps = int(train_steps / hparams.boosting_iterations)
optimizer_fn = optimizer.fn_with_name(
hparams.optimizer,
learning_rate_schedule=hparams.learning_rate_schedule,
cosine_decay_steps=max_iteration_steps)
hparams.add_hparam("total_training_steps", max_iteration_steps)
if hparams.generator == GeneratorType.SIMPLE:
subnetwork_generator = improve_nas.Generator(
feature_columns=data_provider.get_feature_columns(),
optimizer_fn=optimizer_fn,
iteration_steps=max_iteration_steps,
checkpoint_dir=run_config.model_dir,
hparams=hparams,
seed=seed)
elif hparams.generator == GeneratorType.DYNAMIC:
subnetwork_generator = improve_nas.DynamicGenerator(
feature_columns=data_provider.get_feature_columns(),
optimizer_fn=optimizer_fn,
iteration_steps=max_iteration_steps,
checkpoint_dir=run_config.model_dir,
hparams=hparams,
seed=seed)
else:
raise ValueError("Invalid generator: `%s`" % hparams.generator)
evaluator = None
if hparams.use_evaluator:
evaluator = adanet.Evaluator(
input_fn=data_provider.get_input_fn(
partition="train",
mode=tf.estimator.ModeKeys.EVAL,
batch_size=hparams.evaluator_batch_size),
steps=hparams.evaluator_steps)
return adanet.Estimator(
head=data_provider.get_head(),
subnetwork_generator=subnetwork_generator,
max_iteration_steps=max_iteration_steps,
adanet_lambda=hparams.adanet_lambda,
adanet_beta=hparams.adanet_beta,
mixture_weight_type=hparams.mixture_weight_type,
force_grow=hparams.force_grow,
evaluator=evaluator,
config=run_config,
model_dir=run_config.model_dir)
def hparams(self, default_batch_size, hparams_string):
"""Returns hyperparameters, including any flag value overrides.
In order to allow for automated hyperparameter tuning, model hyperparameters
are aggregated within a tf.HParams object. In this case, here are the
hyperparameters and their descriptions:
- optimizer: Name of the optimizer to use. See `optimizers.fn_with_name`.
- learning_rate_schedule: Learning rate schedule string.
- initial_learning_rate: The initial learning rate to use during training.
- num_cells: Number of cells in the model. Must be divisible by 3.
- num_conv_filters: The initial number of convolutional filters. The final
layer will have 24*num_conv_filters channels.
- weight_decay: Float amount of weight decay to apply to train loss.
- use_aux_head: Whether to create an auxiliary head for training. This adds
some non-determinism to training.
- knowledge_distillation: Whether subnetworks should learn from the
logits of the 'previous ensemble'/'previous subnetwork' in addition to
the labels to distill/transfer/compress the knowledge in a manner
inspired by Born Again Networks [Furlanello et al., 2018]
(https://arxiv.org/abs/1805.04770) and Distilling the Knowledge in
a Neural Network [Hinton at al., 2015]
(https://arxiv.org/abs/1503.02531).
- model_version: See `improve_nas.ModelVersion`.
- adanet_lambda: See `adanet.Estimator`.
- adanet_beta: See `adanet.Estimator`.
- generator: Type of generator. `simple` generator is just ensembling,
`dynamic` generator gradually grows the network.
- boosting_iterations: The number of boosting iterations to perform. The
final ensemble will have at most this many subnetworks comprising it.
- evaluator_batch_size: Batch size for the evaluator to use when comparing
candidates.
- evaluator_steps: Number of batches for the evaluator to use when
comparing candidates.
- learn_mixture_weights: Whether to learn adanet mixture weights.
- mixture_weight_type: Type of mxture weights.
- batch_size: Batch size for training.
- force_grow: Force AdaNet to add a candidate in each itteration, even if it
would decreases the performance of the ensemble.
- label_smoothing: Strength of label smoothing that will be applied (even
non true labels will have a non zero representation in one hot encoding
when computing loss).
- clip_gradients: Clip gradient to this value.
- aux_head_weight: NASNet cell parameter. Weight of auxiliary loss.
- stem_multiplier: NASNet cell parameter.
- drop_path_keep_prob: NASNet cell parameter. Propability for drop_path
regularization.
- dense_dropout_keep_prob: NASNet cell parameter. Dropout keep probability.
- filter_scaling_rate: NASNet cell parameter. Controls growth of number of
filters.
- num_reduction_layers: NASNet cell parameter. Number of reduction layers
that will be added to the architecture.
- data_format: NASNet cell parameter. Controls whether data is in channels
last or channels first format.
- skip_reduction_layer_input: NASNet cell parameter. Whether to skip
reduction layer.
- use_bounded_activation: NASNet cell parameter. Whether to use bounded
activations.
- use_evaluator: Boolean whether to use the adanet.Evaluator to choose the
best ensemble at each round.
Args:
default_batch_size: The default batch_size specified for training.
hparams_string: If the hparams_string is given, then it will use any
values specified in hparams to override any individually-set
hyperparameter. This logic allows tuners to override hyperparameter
settings to find optimal values.
Returns:
The hyperparameters as a tf.HParams object.
"""
hparams = tf.contrib.training.HParams(
# Nasnet config hparams (default cifar config)
num_cells=3,
num_conv_filters=10,
aux_head_weight=0.4,
stem_multiplier=3.0,
drop_path_keep_prob=0.6,
use_aux_head=True,
dense_dropout_keep_prob=1.0,
filter_scaling_rate=2.0,
num_reduction_layers=2,
data_format="NHWC",
skip_reduction_layer_input=0,
use_bounded_activation=False,
# Other hparams
clip_gradients=5,
optimizer="momentum",
learning_rate_schedule="cosine",
initial_learning_rate=.025,
weight_decay=5e-4,
label_smoothing=0.1,
knowledge_distillation=improve_nas.KnowledgeDistillation.ADAPTIVE,
model_version="cifar",
adanet_lambda=0.,
adanet_beta=0.,
generator=GeneratorType.SIMPLE,
boosting_iterations=3,
force_grow=True,
evaluator_batch_size=-1,
evaluator_steps=-1,
batch_size=default_batch_size,
learn_mixture_weights=False,
mixture_weight_type=adanet.MixtureWeightType.SCALAR,
use_evaluator=True,
)
if hparams_string:
hparams = hparams.parse(hparams_string)
if hparams.evaluator_batch_size < 0:
hparams.evaluator_batch_size = default_batch_size
if hparams.evaluator_steps < 0:
hparams.evaluator_steps = None
return hparams
| 9,085 | 39.744395 | 80 | py |
adanet | adanet-master/research/improve_nas/trainer/improve_nas.py | # Lint as: python3
"""Defines NASNet subnetwork and subnetwork generators.
Copyright 2019 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import adanet
import tensorflow.compat.v1 as tf
# pylint: disable=g-import-not-at-top
try:
from adanet.research.improve_nas.trainer import nasnet
from adanet.research.improve_nas.trainer import subnetwork_utils
except ImportError as e:
from trainer import nasnet
from trainer import subnetwork_utils
# pylint: enable=g-import-not-at-top
_PREVIOUS_NUM_CELLS = "num_cells"
_PREVIOUS_CONV_FILTERS = "num_conv_filters"
class KnowledgeDistillation(object):
"""Controls what type of knowledge distillation is used.
In knowledge distillation we want the new subnetwork to learn from the logits
of previous ensemble or previous subnetwork.
The following distillations are defined:
* `ADAPTIVE`: Distill previous ensemble. Inspired by Distilling the Knowledge
in a Neural Network [Hinton at al., 2015]
(https://arxiv.org/abs/1503.02531).
* `BORN_AGAIN`: Distill previous subnetwork. Introduced in Born Again Networks
[Furlanello et al., 2018](https://arxiv.org/abs/1805.04770).
* `NONE`: Do not use knowledge distillation.
"""
ADAPTIVE = "adaptive"
BORN_AGAIN = "born_again"
NONE = "none"
class Builder(adanet.subnetwork.Builder):
"""Builds a NASNet subnetwork for AdaNet."""
def __init__(self, feature_columns, optimizer_fn, checkpoint_dir, hparams,
seed):
"""Initializes a `Builder`.
Args:
feature_columns: The input feature columns of the problem.
optimizer_fn: Function that accepts a float 'learning_rate' argument and
returns an `Optimizer` instance and learning rate `Tensor` which may
have a custom learning rate schedule applied.
checkpoint_dir: Checkpoint directory.
hparams: A `HParams` instance.
seed: A Python integer. Used to create random seeds. See
tf.set_random_seed for behavior.
Returns:
An instance of `Subnetwork`.
"""
self._feature_columns = feature_columns
self._optimizer_fn = optimizer_fn
self._checkpoint_dir = checkpoint_dir
self._hparams = hparams
self._aux_head_weight = hparams.aux_head_weight
self._learn_mixture_weights = hparams.learn_mixture_weights
self._initial_learning_rate = hparams.initial_learning_rate
self._knowledge_distillation = hparams.knowledge_distillation
self._label_smoothing = hparams.label_smoothing
self._model_version = hparams.model_version
self._weight_decay = hparams.weight_decay
# `num_cells` and `num_conv_filters` are not directly used here. They are
# passed inside hparams to build_nasnet function. They are just saved in
# `shared`.
self._num_cells = hparams.num_cells
self._num_conv_filters = hparams.num_conv_filters
self._seed = seed
def build_subnetwork(self,
features,
logits_dimension,
training,
iteration_step,
summary,
previous_ensemble=None):
"""See `adanet.subnetwork.Builder`."""
# Prepare the input.
assert len(self._feature_columns) == 1, "Got feature columns: {}".format(
self._feature_columns)
images = tf.to_float(features[self._feature_columns[0].name])
self._name_scope = tf.get_default_graph().get_name_scope()
seed = self._seed
if seed is not None and previous_ensemble:
# Deterministically change the seed for different iterations so that
# subnetworks are not correlated.
seed += len(previous_ensemble.weighted_subnetworks)
arg_scope = nasnet.nasnet_cifar_arg_scope(weight_decay=self._weight_decay)
with tf.contrib.slim.arg_scope(arg_scope):
build_fn = nasnet.build_nasnet_cifar
logits, end_points = build_fn(
images,
num_classes=logits_dimension,
is_training=training,
config=self._hparams)
last_layer = end_points["global_pool"]
subnetwork_shared_data = {
_PREVIOUS_NUM_CELLS: tf.constant(self._num_cells),
_PREVIOUS_CONV_FILTERS: tf.constant(self._num_conv_filters)
}
return adanet.Subnetwork(
last_layer=last_layer,
logits=logits,
complexity=1,
shared=subnetwork_shared_data)
def build_subnetwork_train_op(self, subnetwork, loss, var_list, labels,
iteration_step, summary, previous_ensemble):
"""See `adanet.subnetwork.Builder`."""
del loss # Loss for training is defined below.
# The AdaNet Estimator is responsible for incrementing the global step.
optimizer, learning_rate = self._optimizer_fn(
learning_rate=self._initial_learning_rate)
with tf.name_scope(""):
summary.scalar("learning_rate/adanet/subnetwork", learning_rate)
onehot_labels = tf.one_hot(
tf.reshape(labels, [-1]), subnetwork.logits.shape[-1], dtype=tf.int32)
loss = tf.losses.softmax_cross_entropy(
onehot_labels=onehot_labels,
logits=subnetwork.logits,
weights=1.0,
label_smoothing=self._label_smoothing)
# Add knowledge ditillation loss.
if previous_ensemble:
if self._knowledge_distillation == KnowledgeDistillation.ADAPTIVE:
loss += tf.losses.softmax_cross_entropy(
onehot_labels=tf.nn.softmax(previous_ensemble.logits),
logits=subnetwork.logits,
weights=1.0,
scope="loss_adaptive_kd")
if self._knowledge_distillation == KnowledgeDistillation.BORN_AGAIN:
loss += tf.losses.softmax_cross_entropy(
onehot_labels=tf.nn.softmax(
previous_ensemble.weighted_subnetworks[-1].logits),
logits=subnetwork.logits,
weights=1.0,
scope="loss_born_again_kd")
# Add weight decay.
loss += tf.losses.get_regularization_loss(scope=self._name_scope)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
if self._hparams.clip_gradients > 0:
optimizer = tf.contrib.estimator.clip_gradients_by_norm(
optimizer, self._hparams.clip_gradients)
return optimizer.minimize(loss, var_list=var_list)
def build_mixture_weights_train_op(self, loss, var_list, logits, labels,
iteration_step, summary):
"""See `adanet.subnetwork.Builder`."""
if not self._learn_mixture_weights:
return tf.no_op("mixture_weights_train_op")
# The AdaNet Estimator is responsible for incrementing the global step.
optimizer, learning_rate = self._optimizer_fn(
learning_rate=self._initial_learning_rate)
summary.scalar("learning_rate/adanet/mixture_weights", learning_rate)
return optimizer.minimize(loss=loss, var_list=var_list)
@property
def name(self):
"""Returns this subnetwork's name."""
name = "NasNet_A_{}_{}".format(self._hparams.num_cells / 3,
self._hparams.num_conv_filters * 24)
if self._knowledge_distillation != KnowledgeDistillation.NONE:
name += "_" + self._knowledge_distillation
name += "_" + self._model_version
return name
class Generator(adanet.subnetwork.Generator):
"""Generates a list of Builders."""
def __init__(self,
feature_columns,
optimizer_fn,
iteration_steps,
checkpoint_dir,
hparams,
seed=None):
"""Initializes a `Generator`.
Args:
feature_columns: The input feature columns of the problem.
optimizer_fn: Function that accepts a float 'learning_rate' argument and
returns an `Optimizer` instance and learning rate `Tensor` which may
have a custom learning rate schedule applied.
iteration_steps: The number of train steps in per iteration. Required for
ScheduleDropPath algorithm.
checkpoint_dir: Checkpoint directory.
hparams: Hyper-parameters.
seed: A Python integer. Used to create random seeds. See
tf.set_random_seed for behavior.
Returns:
An instance of `Generator`.
Raises:
ValueError: If num_cells is not divisible by 3.
"""
if hparams.num_cells % 3 != 0:
raise ValueError("num_cells must be a multiple of 3.")
self._builder_fn = functools.partial(
Builder,
feature_columns=feature_columns,
optimizer_fn=optimizer_fn,
checkpoint_dir=checkpoint_dir,
seed=seed,
hparams=hparams)
def generate_candidates(self, previous_ensemble, iteration_number,
previous_ensemble_reports, all_reports):
"""See `adanet.subnetwork.Generator`."""
return [self._builder_fn()]
class DynamicGenerator(adanet.subnetwork.Generator):
"""Generates a list of `Builders`."""
def __init__(self,
feature_columns,
optimizer_fn,
iteration_steps,
checkpoint_dir,
hparams,
seed=None):
"""Generator that gradually grows the architecture.
In each iteration, we generate one deeper candidate and one wider candidate.
Args:
feature_columns: The input feature columns of the problem.
optimizer_fn: Function that accepts a float 'learning_rate' argument and
returns an `Optimizer` instance and learning rate `Tensor` which may
have a custom learning rate schedule applied.
iteration_steps: The number of train steps in per iteration. Required for
ScheduleDropPath algorithm.
checkpoint_dir: Checkpoint directory.
hparams: Hyper-parameters.
seed: A Python integer. Used to create random seeds. See
tf.set_random_seed for behavior.
Returns:
An instance of `Generator`.
Raises:
ValueError: If num_cells is not divisible by 3.
"""
if hparams.num_cells % 3 != 0:
raise ValueError("num_cells must be a multiple of 3.")
self._hparams = hparams
self._builder_fn = functools.partial(
Builder,
feature_columns=feature_columns,
optimizer_fn=optimizer_fn,
checkpoint_dir=checkpoint_dir,
seed=seed)
def generate_candidates(self, previous_ensemble, iteration_number,
previous_ensemble_reports, all_reports):
"""See `adanet.subnetwork.Generator`."""
num_cells = self._hparams.num_cells
num_conv_filters = self._hparams.num_conv_filters
# Get the architecture of the last subnetwork.
if previous_ensemble:
num_cells = int(
subnetwork_utils.get_persisted_value_from_ensemble(
previous_ensemble, _PREVIOUS_NUM_CELLS))
num_conv_filters = int(
subnetwork_utils.get_persisted_value_from_ensemble(
previous_ensemble, _PREVIOUS_CONV_FILTERS))
candidates = [
self._builder_fn(
hparams=subnetwork_utils.copy_update(
self._hparams,
num_cells=num_cells + 3,
num_conv_filters=num_conv_filters)),
self._builder_fn(
hparams=subnetwork_utils.copy_update(
self._hparams,
num_cells=num_cells,
num_conv_filters=num_conv_filters + 10)),
]
return candidates
| 12,011 | 34.433628 | 80 | py |
adanet | adanet-master/research/improve_nas/trainer/nasnet_utils.py | # Copyright 2019 The AdaNet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Lint as: python3
"""A custom module for some common operations used by NASNet.
Functions exposed in this file:
- calc_reduction_layers
- get_channel_index
- get_channel_dim
- global_avg_pool
- factorized_reduction
- drop_path
Classes exposed in this file:
- NasNetABaseCell
- NasNetANormalCell
- NasNetAReductionCell
Copy of: https://github.com/tensorflow/models/blob/master/research/slim/nets/
nasnet/nasnet_utils.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
arg_scope = tf.contrib.framework.arg_scope
slim = tf.contrib.slim
DATA_FORMAT_NCHW = 'NCHW'
DATA_FORMAT_NHWC = 'NHWC'
INVALID = 'null'
# The cap for tf.clip_by_value, it's hinted from the activation distribution
# that the majority of activation values are in the range [-6, 6].
CLIP_BY_VALUE_CAP = 6
def calc_reduction_layers(num_cells, num_reduction_layers):
"""Figure out what layers should have reductions."""
reduction_layers = []
for pool_num in range(1, num_reduction_layers + 1):
layer_num = (float(pool_num) / (num_reduction_layers + 1)) * num_cells
layer_num = int(layer_num)
reduction_layers.append(layer_num)
return reduction_layers
@tf.contrib.framework.add_arg_scope
def get_channel_index(data_format=INVALID):
assert data_format != INVALID
axis = 3 if data_format == 'NHWC' else 1
return axis
@tf.contrib.framework.add_arg_scope
def get_channel_dim(shape, data_format=INVALID):
assert data_format != INVALID
assert len(shape) == 4
if data_format == 'NHWC':
return int(shape[3])
elif data_format == 'NCHW':
return int(shape[1])
else:
raise ValueError('Not a valid data_format', data_format)
@tf.contrib.framework.add_arg_scope
def global_avg_pool(x, data_format=INVALID):
"""Average pool away the height and width spatial dimensions of x."""
assert data_format != INVALID
assert data_format in ['NHWC', 'NCHW']
assert x.shape.ndims == 4
if data_format == 'NHWC':
return tf.reduce_mean(x, [1, 2])
else:
return tf.reduce_mean(x, [2, 3])
@tf.contrib.framework.add_arg_scope
def factorized_reduction(net, output_filters, stride, data_format=INVALID):
"""Reduces the shape of net without information loss due to striding."""
assert data_format != INVALID
if stride == 1:
net = slim.conv2d(net, output_filters, 1, scope='path_conv')
net = slim.batch_norm(net, scope='path_bn')
return net
if data_format == 'NHWC':
stride_spec = [1, stride, stride, 1]
else:
stride_spec = [1, 1, stride, stride]
# Skip path 1
path1 = tf.nn.avg_pool(
net, [1, 1, 1, 1], stride_spec, 'VALID', data_format=data_format)
path1 = slim.conv2d(path1, int(output_filters / 2), 1, scope='path1_conv')
# Skip path 2
# First pad with 0's on the right and bottom, then shift the filter to
# include those 0's that were added.
if data_format == 'NHWC':
pad_arr = [[0, 0], [0, 1], [0, 1], [0, 0]]
path2 = tf.pad(net, pad_arr)[:, 1:, 1:, :]
concat_axis = 3
else:
pad_arr = [[0, 0], [0, 0], [0, 1], [0, 1]]
path2 = tf.pad(net, pad_arr)[:, :, 1:, 1:]
concat_axis = 1
path2 = tf.nn.avg_pool(
path2, [1, 1, 1, 1], stride_spec, 'VALID', data_format=data_format)
# If odd number of filters, add an additional one to the second path.
final_filter_size = int(output_filters / 2) + int(output_filters % 2)
path2 = slim.conv2d(path2, final_filter_size, 1, scope='path2_conv')
# Concat and apply BN
final_path = tf.concat(values=[path1, path2], axis=concat_axis)
final_path = slim.batch_norm(final_path, scope='final_path_bn')
return final_path
@tf.contrib.framework.add_arg_scope
def drop_path(net, keep_prob, is_training=True):
"""Drops out a whole example hiddenstate with the specified probability."""
if is_training:
batch_size = tf.shape(net)[0]
noise_shape = [batch_size, 1, 1, 1]
random_tensor = keep_prob
random_tensor += tf.random_uniform(noise_shape, dtype=tf.float32)
binary_tensor = tf.cast(tf.floor(random_tensor), net.dtype)
keep_prob_inv = tf.cast(1.0 / keep_prob, net.dtype)
net = net * keep_prob_inv * binary_tensor
return net
def _operation_to_filter_shape(operation):
splitted_operation = operation.split('x')
filter_shape = int(splitted_operation[0][-1])
assert filter_shape == int(
splitted_operation[1][0]), 'Rectangular filters not supported.'
return filter_shape
def _operation_to_num_layers(operation):
splitted_operation = operation.split('_')
if 'x' in splitted_operation[-1]:
return 1
return int(splitted_operation[-1])
def _operation_to_info(operation):
"""Takes in operation name and returns meta information.
An example would be 'separable_3x3_4' -> (3, 4).
Args:
operation: String that corresponds to convolution operation.
Returns:
Tuple of (filter shape, num layers).
"""
num_layers = _operation_to_num_layers(operation)
filter_shape = _operation_to_filter_shape(operation)
return num_layers, filter_shape
def _stacked_separable_conv(net, stride, operation, filter_size,
use_bounded_activation):
"""Takes in an operations and parses it to the correct sep operation."""
num_layers, kernel_size = _operation_to_info(operation)
activation_fn = tf.nn.relu6 if use_bounded_activation else tf.nn.relu
for layer_num in range(num_layers - 1):
net = activation_fn(net)
net = slim.separable_conv2d(
net,
filter_size,
kernel_size,
depth_multiplier=1,
scope='separable_{0}x{0}_{1}'.format(kernel_size, layer_num + 1),
stride=stride)
net = slim.batch_norm(
net, scope='bn_sep_{0}x{0}_{1}'.format(kernel_size, layer_num + 1))
stride = 1
net = activation_fn(net)
net = slim.separable_conv2d(
net,
filter_size,
kernel_size,
depth_multiplier=1,
scope='separable_{0}x{0}_{1}'.format(kernel_size, num_layers),
stride=stride)
net = slim.batch_norm(
net, scope='bn_sep_{0}x{0}_{1}'.format(kernel_size, num_layers))
return net
def _operation_to_pooling_type(operation):
"""Takes in the operation string and returns the pooling type."""
splitted_operation = operation.split('_')
return splitted_operation[0]
def _operation_to_pooling_shape(operation):
"""Takes in the operation string and returns the pooling kernel shape."""
splitted_operation = operation.split('_')
shape = splitted_operation[-1]
assert 'x' in shape
filter_height, filter_width = shape.split('x')
assert filter_height == filter_width
return int(filter_height)
def _operation_to_pooling_info(operation):
"""Parses the pooling operation string to return its type and shape."""
pooling_type = _operation_to_pooling_type(operation)
pooling_shape = _operation_to_pooling_shape(operation)
return pooling_type, pooling_shape
def _pooling(net, stride, operation, use_bounded_activation):
"""Parses operation and performs the correct pooling operation on net."""
padding = 'SAME'
pooling_type, pooling_shape = _operation_to_pooling_info(operation)
if use_bounded_activation:
net = tf.nn.relu6(net)
if pooling_type == 'avg':
net = slim.avg_pool2d(net, pooling_shape, stride=stride, padding=padding)
elif pooling_type == 'max':
net = slim.max_pool2d(net, pooling_shape, stride=stride, padding=padding)
else:
raise NotImplementedError('Unimplemented pooling type: ', pooling_type)
return net
class NasNetABaseCell(object):
"""NASNet Cell class that is used as a 'layer' in image architectures."""
def __init__(self, num_conv_filters, operations, used_hiddenstates,
hiddenstate_indices, drop_path_keep_prob, total_num_cells,
total_training_steps, use_bounded_activation=False):
"""Constructs a NasNetABaseCell.
Args:
num_conv_filters: The number of filters for each convolution operation.
operations: List of operations that are performed in the NASNet Cell in
order.
used_hiddenstates: Binary array that signals if the hiddenstate was used
within the cell. This is used to determine what outputs of the cell
should be concatenated together.
hiddenstate_indices: Determines what hiddenstates should be combined
together with the specified operations to create the NASNet cell.
drop_path_keep_prob: Keep probability during DropPath regularization.
total_num_cells: Total number of cells.
total_training_steps: Total training steps.
use_bounded_activation: Whether or not to use bounded activations. Bounded
activations better lend themselves to quantized inference.
"""
self._num_conv_filters = num_conv_filters
self._operations = operations
self._used_hiddenstates = used_hiddenstates
self._hiddenstate_indices = hiddenstate_indices
self._drop_path_keep_prob = drop_path_keep_prob
self._total_num_cells = total_num_cells
self._total_training_steps = total_training_steps
self._use_bounded_activation = use_bounded_activation
def _reduce_prev_layer(self, prev_layer, curr_layer):
"""Matches dimension of prev_layer to the curr_layer."""
# Set the prev layer to the current layer if it is none
if prev_layer is None:
return curr_layer
curr_num_filters = self._filter_size
prev_num_filters = get_channel_dim(prev_layer.shape)
curr_filter_shape = int(curr_layer.shape[2])
prev_filter_shape = int(prev_layer.shape[2])
activation_fn = tf.nn.relu6 if self._use_bounded_activation else tf.nn.relu
if curr_filter_shape != prev_filter_shape:
prev_layer = activation_fn(prev_layer)
prev_layer = factorized_reduction(
prev_layer, curr_num_filters, stride=2)
elif curr_num_filters != prev_num_filters:
prev_layer = activation_fn(prev_layer)
prev_layer = slim.conv2d(
prev_layer, curr_num_filters, 1, scope='prev_1x1')
prev_layer = slim.batch_norm(prev_layer, scope='prev_bn')
return prev_layer
def _cell_base(self, net, prev_layer):
"""Runs the beginning of the conv cell before the predicted ops are run."""
num_filters = self._filter_size
# Check to be sure prev layer stuff is setup correctly
prev_layer = self._reduce_prev_layer(prev_layer, net)
net = tf.nn.relu6(net) if self._use_bounded_activation else tf.nn.relu(net)
net = slim.conv2d(net, num_filters, 1, scope='1x1')
net = slim.batch_norm(net, scope='beginning_bn')
# num_or_size_splits=1
net = [net]
net.append(prev_layer)
return net
def __call__(self, net, scope=None, filter_scaling=1, stride=1,
prev_layer=None, cell_num=-1, current_step=None):
"""Runs the conv cell."""
self._cell_num = cell_num
self._filter_scaling = filter_scaling
self._filter_size = int(self._num_conv_filters * filter_scaling)
i = 0
with tf.variable_scope(scope):
net = self._cell_base(net, prev_layer)
for iteration in range(5):
with tf.variable_scope('comb_iter_{}'.format(iteration)):
left_hiddenstate_idx, right_hiddenstate_idx = (
self._hiddenstate_indices[i],
self._hiddenstate_indices[i + 1])
original_input_left = left_hiddenstate_idx < 2
original_input_right = right_hiddenstate_idx < 2
h1 = net[left_hiddenstate_idx]
h2 = net[right_hiddenstate_idx]
operation_left = self._operations[i]
operation_right = self._operations[i+1]
i += 2
# Apply conv operations
with tf.variable_scope('left'):
h1 = self._apply_conv_operation(h1, operation_left,
stride, original_input_left,
current_step)
with tf.variable_scope('right'):
h2 = self._apply_conv_operation(h2, operation_right,
stride, original_input_right,
current_step)
# Combine hidden states using 'add'.
with tf.variable_scope('combine'):
h = h1 + h2
if self._use_bounded_activation:
h = tf.nn.relu6(h)
# Add hiddenstate to the list of hiddenstates we can choose from
net.append(h)
with tf.variable_scope('cell_output'):
net = self._combine_unused_states(net)
return net
def _apply_conv_operation(self, net, operation,
stride, is_from_original_input, current_step):
"""Applies the predicted conv operation to net."""
# Dont stride if this is not one of the original hiddenstates
if stride > 1 and not is_from_original_input:
stride = 1
input_filters = get_channel_dim(net.shape)
filter_size = self._filter_size
if 'separable' in operation:
net = _stacked_separable_conv(net, stride, operation, filter_size,
self._use_bounded_activation)
if self._use_bounded_activation:
net = tf.clip_by_value(net, -CLIP_BY_VALUE_CAP, CLIP_BY_VALUE_CAP)
elif operation in ['none']:
if self._use_bounded_activation:
net = tf.nn.relu6(net)
# Check if a stride is needed, then use a strided 1x1 here
if stride > 1 or (input_filters != filter_size):
if not self._use_bounded_activation:
net = tf.nn.relu(net)
net = slim.conv2d(net, filter_size, 1, stride=stride, scope='1x1')
net = slim.batch_norm(net, scope='bn_1')
if self._use_bounded_activation:
net = tf.clip_by_value(net, -CLIP_BY_VALUE_CAP, CLIP_BY_VALUE_CAP)
elif 'pool' in operation:
net = _pooling(net, stride, operation, self._use_bounded_activation)
if input_filters != filter_size:
net = slim.conv2d(net, filter_size, 1, stride=1, scope='1x1')
net = slim.batch_norm(net, scope='bn_1')
if self._use_bounded_activation:
net = tf.clip_by_value(net, -CLIP_BY_VALUE_CAP, CLIP_BY_VALUE_CAP)
else:
raise ValueError('Unimplemented operation', operation)
if operation != 'none':
net = self._apply_drop_path(net, current_step=current_step)
return net
def _combine_unused_states(self, net):
"""Concatenate the unused hidden states of the cell."""
used_hiddenstates = self._used_hiddenstates
final_height = int(net[-1].shape[2])
final_num_filters = get_channel_dim(net[-1].shape)
assert len(used_hiddenstates) == len(net)
for idx, used_h in enumerate(used_hiddenstates):
curr_height = int(net[idx].shape[2])
curr_num_filters = get_channel_dim(net[idx].shape)
# Determine if a reduction should be applied to make the number of
# filters match.
should_reduce = final_num_filters != curr_num_filters
should_reduce = (final_height != curr_height) or should_reduce
should_reduce = should_reduce and not used_h
if should_reduce:
stride = 2 if final_height != curr_height else 1
with tf.variable_scope('reduction_{}'.format(idx)):
net[idx] = factorized_reduction(
net[idx], final_num_filters, stride)
states_to_combine = (
[h for h, is_used in zip(net, used_hiddenstates) if not is_used])
# Return the concat of all the states
concat_axis = get_channel_index()
net = tf.concat(values=states_to_combine, axis=concat_axis)
return net
@tf.contrib.framework.add_arg_scope # No public API. For internal use only.
def _apply_drop_path(self, net, current_step=None,
use_summaries=False, drop_connect_version='v3'):
"""Apply drop_path regularization.
Args:
net: the Tensor that gets drop_path regularization applied.
current_step: a float32 Tensor with the current global_step value,
to be divided by hparams.total_training_steps. Usually None, which
defaults to tf.train.get_or_create_global_step() properly casted.
use_summaries: a Python boolean. If set to False, no summaries are output.
drop_connect_version: one of 'v1', 'v2', 'v3', controlling whether
the dropout rate is scaled by current_step (v1), layer (v2), or
both (v3, the default).
Returns:
The dropped-out value of `net`.
"""
drop_path_keep_prob = self._drop_path_keep_prob
if drop_path_keep_prob < 1.0:
assert drop_connect_version in ['v1', 'v2', 'v3']
if drop_connect_version in ['v2', 'v3']:
# Scale keep prob by layer number
assert self._cell_num != -1
# The added 2 is for the reduction cells
num_cells = self._total_num_cells
layer_ratio = (self._cell_num + 1)/float(num_cells)
if use_summaries:
with tf.device('/cpu:0'):
tf.summary.scalar('layer_ratio', layer_ratio)
drop_path_keep_prob = 1 - layer_ratio * (1 - drop_path_keep_prob)
if drop_connect_version in ['v1', 'v3']:
# Decrease the keep probability over time
if current_step is None:
current_step = tf.train.get_or_create_global_step()
current_step = tf.cast(current_step, tf.float32)
drop_path_burn_in_steps = self._total_training_steps
current_ratio = current_step / drop_path_burn_in_steps
current_ratio = tf.minimum(1.0, current_ratio)
if use_summaries:
with tf.device('/cpu:0'):
tf.summary.scalar('current_ratio', current_ratio)
drop_path_keep_prob = (1 - current_ratio * (1 - drop_path_keep_prob))
if use_summaries:
with tf.device('/cpu:0'):
tf.summary.scalar('drop_path_keep_prob', drop_path_keep_prob)
net = drop_path(net, drop_path_keep_prob)
return net
class NasNetANormalCell(NasNetABaseCell):
"""NASNetA Normal Cell."""
def __init__(self, num_conv_filters, drop_path_keep_prob, total_num_cells,
total_training_steps, use_bounded_activation=False):
operations = ['separable_5x5_2',
'separable_3x3_2',
'separable_5x5_2',
'separable_3x3_2',
'avg_pool_3x3',
'none',
'avg_pool_3x3',
'avg_pool_3x3',
'separable_3x3_2',
'none']
used_hiddenstates = [1, 0, 0, 0, 0, 0, 0]
hiddenstate_indices = [0, 1, 1, 1, 0, 1, 1, 1, 0, 0]
super(NasNetANormalCell, self).__init__(num_conv_filters, operations,
used_hiddenstates,
hiddenstate_indices,
drop_path_keep_prob,
total_num_cells,
total_training_steps,
use_bounded_activation)
class NasNetAReductionCell(NasNetABaseCell):
"""NASNetA Reduction Cell."""
def __init__(self, num_conv_filters, drop_path_keep_prob, total_num_cells,
total_training_steps, use_bounded_activation=False):
operations = ['separable_5x5_2',
'separable_7x7_2',
'max_pool_3x3',
'separable_7x7_2',
'avg_pool_3x3',
'separable_5x5_2',
'none',
'avg_pool_3x3',
'separable_3x3_2',
'max_pool_3x3']
used_hiddenstates = [1, 1, 1, 0, 0, 0, 0]
hiddenstate_indices = [0, 1, 0, 1, 0, 1, 3, 2, 2, 0]
super(NasNetAReductionCell, self).__init__(num_conv_filters, operations,
used_hiddenstates,
hiddenstate_indices,
drop_path_keep_prob,
total_num_cells,
total_training_steps,
use_bounded_activation)
| 20,751 | 37.934334 | 80 | py |
adanet | adanet-master/research/improve_nas/trainer/adanet_improve_nas_test.py | # Lint as: python3
"""Tests for improve_nas.
Copyright 2019 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import flags
from absl.testing import parameterized
from adanet.research.improve_nas.trainer import adanet_improve_nas
from adanet.research.improve_nas.trainer import fake_data
import tensorflow.compat.v1 as tf
class AdaNetQuetzalBuilderTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.named_parameters({
"testcase_name": "simple_generator",
"hparams_string": ("optimizer=sgd,boosting_iterations=2,generator=simple,"
"initial_learning_rate=.1,use_aux_head=False,"
"num_cells=3,num_conv_filters=2,use_evaluator=False"),
}, {
"testcase_name": "dynamic_generator",
"hparams_string":
("optimizer=sgd,boosting_iterations=1,generator=dynamic,"
"initial_learning_rate=.1,use_aux_head=False,"
"num_cells=3,num_conv_filters=2,use_evaluator=False"),
})
def test_estimator(self,
hparams_string,
batch_size=1):
"""Structural test to make sure Estimator Builder works."""
seed = 42
# Set up and clean test directory.
model_dir = os.path.join(flags.FLAGS.test_tmpdir,
"AdanetImproveNasBuilderTest")
if tf.gfile.Exists(model_dir):
tf.gfile.DeleteRecursively(model_dir)
tf.gfile.MkDir(model_dir)
data_provider = fake_data.FakeImageProvider(seed=seed)
estimator_builder = adanet_improve_nas.Builder()
hparams = estimator_builder.hparams(
default_batch_size=3, hparams_string=hparams_string)
run_config = tf.estimator.RunConfig(
tf_random_seed=seed, model_dir=model_dir)
_ = data_provider.get_input_fn(
"train",
tf.estimator.ModeKeys.TRAIN,
batch_size=batch_size)
test_input_fn = data_provider.get_input_fn(
"test",
tf.estimator.ModeKeys.EVAL,
batch_size=batch_size)
estimator = estimator_builder.estimator(
data_provider=data_provider,
run_config=run_config,
hparams=hparams,
train_steps=10,
seed=seed)
eval_metrics = estimator.evaluate(input_fn=test_input_fn, steps=1)
self.assertGreater(eval_metrics["loss"], 0.0)
if __name__ == "__main__":
tf.test.main()
| 2,985 | 33.321839 | 80 | py |
adanet | adanet-master/research/improve_nas/trainer/fake_data.py | # Lint as: python3
"""Fake dataset for testing and debugging.
Copyright 2019 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
class FakeImageProvider(object):
"""A fake image data provider."""
def __init__(self,
num_examples=3,
num_classes=3,
image_dim=8,
channels=1,
seed=42):
self._num_examples = num_examples
self._num_classes = num_classes
self._seed = seed
self._channels = channels
self._image_dim = image_dim
def get_head(self, name=None):
return tf.contrib.estimator.multi_class_head(
self._num_classes, name=name, loss_reduction=tf.losses.Reduction.SUM)
def _shape(self):
return [self._image_dim, self._image_dim, self._channels]
def get_input_fn(self,
partition,
mode,
batch_size):
"""See `data.Provider` get_input_fn."""
del partition
def input_fn(params=None):
"""Input_fn to return."""
del params # Unused.
np.random.seed(self._seed)
if mode == tf.estimator.ModeKeys.EVAL:
np.random.seed(self._seed + 1)
images = tf.to_float(
tf.convert_to_tensor(
np.random.rand(self._num_examples, *self._shape())))
labels = tf.convert_to_tensor(
np.random.randint(0, high=2, size=(self._num_examples, 1)))
dataset = tf.data.Dataset.from_tensor_slices(({"x": images}, labels))
if mode == tf.estimator.ModeKeys.TRAIN:
dataset = dataset.repeat()
dataset = dataset.batch(batch_size)
iterator = dataset.make_one_shot_iterator()
return iterator.get_next()
return input_fn
def get_feature_columns(self):
feature_columns = [
tf.feature_column.numeric_column(key="x", shape=self._shape()),
]
return feature_columns
| 2,536 | 29.566265 | 77 | py |
adanet | adanet-master/research/improve_nas/trainer/cifar10_test.py | # Lint as: python3
"""Tests for cifar10 dataset.
Copyright 2019 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from adanet.research.improve_nas.trainer import cifar10
import tensorflow.compat.v1 as tf
class Cifar10Test(tf.test.TestCase):
def _check_dimensions(self, partition):
provider = cifar10.Provider(seed=4)
input_fn = provider.get_input_fn(
partition, tf.contrib.learn.ModeKeys.TRAIN, batch_size=3)
data, labels = input_fn()
self.assertIn(cifar10.FEATURES, data)
features = data[cifar10.FEATURES]
init = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
with self.test_session() as sess:
sess.run(init)
self.assertEqual((3, 32, 32, 3), sess.run(features).shape)
self.assertEqual((3, 1), sess.run(labels).shape)
def test_read_cifar10(self):
for partition in ["train", "test"]:
self._check_dimensions(partition)
def test_no_preprocess(self):
provider = cifar10.Provider(seed=4)
input_fn = provider.get_input_fn(
"train",
tf.contrib.learn.ModeKeys.TRAIN,
batch_size=3,
preprocess=False)
data, label = input_fn()
init = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
with self.test_session() as sess:
sess.run(init)
data_result = sess.run(data["x"])
self.assertEqual((3, 32, 32, 3), data_result.shape)
self.assertAllEqual([148, 141, 174], data_result[0][0][0])
self.assertAllEqual([[5], [9], [3]], sess.run(label))
def test_basic_preprocess(self):
provider = cifar10.Provider(
params_string="augmentation=basic", seed=4)
input_fn = provider.get_input_fn(
"train",
tf.contrib.learn.ModeKeys.TRAIN,
batch_size=3,
preprocess=True)
data, label = input_fn()
init = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
with self.test_session() as sess:
sess.run(init)
data_result = sess.run(data["x"])
self.assertEqual((3, 32, 32, 3), data_result.shape)
self.assertAllEqual([0, 0, 0], data_result[0, 0, 0])
self.assertAlmostEqual(0.0, data_result[0, -1, 0, 0], places=3)
self.assertAllEqual([[5], [9], [3]], sess.run(label))
if __name__ == "__main__":
tf.test.main()
| 3,005 | 33.551724 | 72 | py |
adanet | adanet-master/research/improve_nas/trainer/image_processing.py | # Lint as: python3
"""Image preprocessing and augmentation function for a single image.
Copyright 2019 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
class PreprocessingType(object):
"""Type of preprocessing to be applied on the image.
* `INCEPTION`: Preprocessing used in inception.
* `BASIC`: Minimalistic preprocessing used in NasNet for cifar.
"""
INCEPTION = "inception"
BASIC = "basic"
def basic_augmentation(image, image_height, image_width, seed=None):
"""Augment image according to NasNet paper (random flip + random crop)."""
# source: https://arxiv.org/pdf/1707.07012.pdf appendix A.1
padding = 4
image = tf.image.random_flip_left_right(image, seed=seed)
image = tf.pad(image, [[padding, padding], [padding, padding], [0, 0]])
image = tf.random_crop(image, [image_height, image_width, 3], seed=seed)
return image
def resize_and_normalize(image, height, width):
"""Convert image to float, resize and normalize to zero mean and [-1, 1]."""
if image.dtype != tf.float32:
# Rescale pixel values to float in interval [0.0, 1.0].
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
# Resize the image to the specified height and width.
image = tf.expand_dims(image, 0)
image = tf.image.resize_bilinear(image, [height, width], align_corners=False)
image = tf.squeeze(image, [0])
# Rescale pixels to range [-0.5, 0.5].
image = tf.subtract(image, 0.5)
# Rescale pixels to range [-1, 1].
image = tf.multiply(image, 2.0)
return image
def cutout(image, pad_size, replace=0, seed=None):
"""Apply cutout (https://arxiv.org/abs/1708.04552) to image.
Forked from learning/brain/research/meta_architect/image/image_processing.py?
l=1172&rcl=193953073
Args:
image: Image `Tensor` with shape [height, width, channels].
pad_size: The cutout shape will be at most [pad_size * 2, pad_size * 2].
replace: Value for replacing cutout values.
seed: Random seed.
Returns:
Image `Tensor` with cutout applied.
"""
with tf.variable_scope("cutout"):
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
image_depth = tf.shape(image)[2]
# Sample the location in the image where the zero mask will be applied.
cutout_center_height = tf.random_uniform(
shape=[], minval=0, maxval=image_height, seed=seed, dtype=tf.int32)
cutout_center_width = tf.random_uniform(
shape=[], minval=0, maxval=image_width, seed=seed, dtype=tf.int32)
lower_pad = tf.maximum(0, cutout_center_height - pad_size)
upper_pad = tf.maximum(0, image_height - cutout_center_height - pad_size)
left_pad = tf.maximum(0, cutout_center_width - pad_size)
right_pad = tf.maximum(0, image_width - cutout_center_width - pad_size)
cutout_shape = [
image_height - (lower_pad + upper_pad),
image_width - (left_pad + right_pad)
]
padding_dims = [[lower_pad, upper_pad], [left_pad, right_pad]]
mask = tf.pad(
tf.zeros(cutout_shape, dtype=image.dtype),
padding_dims,
constant_values=1)
mask = tf.expand_dims(mask, -1)
mask = tf.tile(mask, [1, 1, image_depth])
image = tf.where(
tf.equal(mask, 0),
tf.ones_like(image, dtype=image.dtype) * replace, image)
return image
| 3,948 | 33.640351 | 79 | py |
adanet | adanet-master/research/improve_nas/trainer/cifar100_test.py | # Lint as: python3
"""Tests for cifar100 dataset.
Copyright 2019 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from adanet.research.improve_nas.trainer import cifar100
import tensorflow.compat.v1 as tf
class Cifar100Test(tf.test.TestCase):
def _check_dimensions(self, partition):
provider = cifar100.Provider(seed=4)
input_fn = provider.get_input_fn(
partition, tf.contrib.learn.ModeKeys.TRAIN, batch_size=3)
data, labels = input_fn()
self.assertIn(cifar100.FEATURES, data)
features = data[cifar100.FEATURES]
init = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
with self.test_session() as sess:
sess.run(init)
self.assertEqual((3, 32, 32, 3), sess.run(features).shape)
self.assertEqual((3, 1), sess.run(labels).shape)
def test_read_cifar100(self):
for partition in ["train", "test"]:
self._check_dimensions(partition)
def test_no_preprocess(self):
provider = cifar100.Provider(seed=4)
input_fn = provider.get_input_fn(
"train",
tf.contrib.learn.ModeKeys.TRAIN,
batch_size=3,
preprocess=False)
data, label = input_fn()
init = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
with self.test_session() as sess:
sess.run(init)
self.assertAllEqual([220, 25, 47], sess.run(data["x"])[0][0][0])
self.assertAllEqual([[47], [5], [52]], sess.run(label))
def test_basic_preprocess(self):
provider = cifar100.Provider(
params_string="augmentation=basic", seed=4)
input_fn = provider.get_input_fn(
"train",
tf.contrib.learn.ModeKeys.TRAIN,
batch_size=3,
preprocess=True)
data, label = input_fn()
init = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
with self.test_session() as sess:
sess.run(init)
data_result = sess.run(data["x"])
self.assertEqual((3, 32, 32, 3), data_result.shape)
self.assertAllEqual([0, 0, 0], data_result[0, 0, 0])
self.assertAlmostEqual(0.0, data_result[0, -1, 0, 0], places=3)
self.assertAllEqual([[47], [5], [52]], sess.run(label))
if __name__ == "__main__":
tf.test.main()
| 2,926 | 33.435294 | 72 | py |
adanet | adanet-master/research/improve_nas/trainer/cifar10.py | # Lint as: python3
"""CIFAR-10 data and convenience functions.
Copyright 2019 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.keras.datasets import cifar10
# pylint: disable=g-import-not-at-top
try:
from adanet.research.improve_nas.trainer import image_processing
except ImportError as e:
from trainer import image_processing
# pylint: enable=g-import-not-at-top
FEATURES = 'x'
PreprocessingType = image_processing.PreprocessingType
class Provider(object):
"""A CIFAR-10 data provider."""
def __init__(self,
params_string='',
seed=None):
"""Returns a CIFAR-10 `Provider`."""
# For testing
self._seed = seed
default_params = tf.contrib.training.HParams(
cutout=True, augmentation=PreprocessingType.BASIC)
self._params = default_params.parse(params_string)
def _preprocess_data(self, image, label, training, preprocess):
"""Apply Inception data augmentation and preprocessing."""
# Unpack `Element` tuple.
# image, label = element
if preprocess:
image_height, image_width = self._shape()[:2]
if self._params.augmentation == PreprocessingType.BASIC:
image = image_processing.resize_and_normalize(image, image_height,
image_width)
if training:
image = image_processing.basic_augmentation(image, image_height,
image_width, self._seed)
else:
raise ValueError('Unsupported data augmentation type: `%s`' %
self._params.augmentation)
if training and self._params.cutout:
# According to https://arxiv.org/abs/1708.04552, cutting out 16x16
# works best.
image = image_processing.cutout(image, pad_size=8, seed=self._seed)
# Set shapes so that they are defined.
image.set_shape(self._shape())
if label is not None:
label.set_shape([1])
return {FEATURES: image}, label
def _cifar10_dataset(self, partition):
"""Returns a partition of the CIFAR-10 `Dataset`."""
cifar10_data = None
try:
cifar10_data = cifar10.load_data()
tf.logging.info('Loaded cifar10.')
except: # pylint: disable=bare-except
tf.logging.info(
'Can not load cifar10 from internet. Creating dummy data for '
'testing.')
data = np.zeros((3, 32, 32, 3))
labels = np.array([[5], [3], [9]])
data[:, 0, 0] = [148, 141, 174]
data[:, -1, 0, 0] = 128
cifar10_data = ((data, labels), (data, labels))
(x_train, y_train), (x_test, y_test) = cifar10_data
x = None
y = None
if partition == 'train':
x, y = x_train, y_train
else:
x, y = x_test, y_test
dataset = tf.data.Dataset.from_tensor_slices((x, y.astype(np.int32)))
return dataset.cache()
def _shape(self):
"""Returns a 3-dimensional list with the shape of the image."""
return [32, 32, 3]
def get_input_fn(self,
partition,
mode,
batch_size,
preprocess=True,
use_tpu=False):
"""See `data.Provider` get_input_fn."""
def input_fn(params=None):
"""Provides batches of CIFAR images.
Args:
params: A dict containing the batch_size on TPU, otherwise None.
Returns:
images: A `Tensor` of size [batch_size, 32, 32, 3]
labels: A `Tensor` of size [batch_size, 1],
"""
batch_size_ = batch_size
if use_tpu:
batch_size_ = params.get('batch_size', batch_size)
training = mode == tf.estimator.ModeKeys.TRAIN
dataset = self._cifar10_dataset(partition)
dataset = dataset.map(
functools.partial(
self._preprocess_data, training=training, preprocess=preprocess))
if training:
dataset = dataset.apply(
tf.contrib.data.shuffle_and_repeat(
buffer_size=500, seed=self._seed))
return dataset.batch(
batch_size_,
drop_remainder=use_tpu).prefetch(tf.data.experimental.AUTOTUNE
).make_one_shot_iterator().get_next()
return input_fn
def get_head(self, name=None):
"""Returns a `Head` instance for multiclass CIFAR-10 with the given name."""
return tf.contrib.estimator.multi_class_head(
10, name=name, loss_reduction=tf.losses.Reduction.SUM_OVER_BATCH_SIZE)
def get_feature_columns(self):
"""Returns feature columns."""
feature_columns = [
tf.feature_column.numeric_column(key=FEATURES, shape=self._shape())
]
return feature_columns
| 5,373 | 32.5875 | 80 | py |
adanet | adanet-master/research/improve_nas/trainer/nasnet.py | # Copyright 2019 The AdaNet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Lint as: python3
"""Contains the definition for the NASNet classification networks.
Paper: https://arxiv.org/abs/1707.07012
Copy of: https://github.com/tensorflow/models/blob/master/research/slim/nets/
nasnet/nasnet.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import tensorflow.compat.v1 as tf
# pylint: disable=g-import-not-at-top
try:
from adanet.research.improve_nas.trainer import nasnet_utils
except ImportError as e:
from trainer import nasnet_utils
# pylint: enable=g-import-not-at-top
arg_scope = tf.contrib.framework.arg_scope
slim = tf.contrib.slim
# Notes for training NASNet Cifar Model
# -------------------------------------
# batch_size: 32
# learning rate: 0.025
# cosine (single period) learning rate decay
# auxiliary head loss weighting: 0.4
# clip global norm of all gradients by 5
def cifar_config():
return tf.contrib.training.HParams(
stem_multiplier=3.0,
drop_path_keep_prob=0.6,
num_cells=18,
use_aux_head=1,
num_conv_filters=32,
dense_dropout_keep_prob=1.0,
filter_scaling_rate=2.0,
num_reduction_layers=2,
data_format='NHWC',
skip_reduction_layer_input=0,
# 600 epochs with a batch size of 32
# This is used for the drop path probabilities since it needs to increase
# the drop out probability over the course of training.
total_training_steps=937500,
use_bounded_activation=False,
)
# Notes for training large NASNet model on ImageNet
# -------------------------------------
# batch size (per replica): 16
# learning rate: 0.015 * 100
# learning rate decay factor: 0.97
# num epochs per decay: 2.4
# sync sgd with 100 replicas
# auxiliary head loss weighting: 0.4
# label smoothing: 0.1
# clip global norm of all gradients by 10
def large_imagenet_config():
return tf.contrib.training.HParams(
stem_multiplier=3.0,
dense_dropout_keep_prob=0.5,
num_cells=18,
filter_scaling_rate=2.0,
num_conv_filters=168,
drop_path_keep_prob=0.7,
use_aux_head=1,
num_reduction_layers=2,
data_format='NHWC',
skip_reduction_layer_input=1,
total_training_steps=250000,
use_bounded_activation=False,
)
# Notes for training the mobile NASNet ImageNet model
# -------------------------------------
# batch size (per replica): 32
# learning rate: 0.04 * 50
# learning rate scaling factor: 0.97
# num epochs per decay: 2.4
# sync sgd with 50 replicas
# auxiliary head weighting: 0.4
# label smoothing: 0.1
# clip global norm of all gradients by 10
def mobile_imagenet_config():
return tf.contrib.training.HParams(
stem_multiplier=1.0,
dense_dropout_keep_prob=0.5,
num_cells=12,
filter_scaling_rate=2.0,
drop_path_keep_prob=1.0,
num_conv_filters=44,
use_aux_head=1,
num_reduction_layers=2,
data_format='NHWC',
skip_reduction_layer_input=0,
total_training_steps=250000,
use_bounded_activation=False,
)
def _update_hparams(hparams, is_training):
"""Update hparams for given is_training option."""
if not is_training:
hparams.set_hparam('drop_path_keep_prob', 1.0)
def nasnet_cifar_arg_scope(weight_decay=5e-4,
batch_norm_decay=0.9,
batch_norm_epsilon=1e-5):
"""Defines the default arg scope for the NASNet-A Cifar model.
Args:
weight_decay: The weight decay to use for regularizing the model.
batch_norm_decay: Decay for batch norm moving average.
batch_norm_epsilon: Small float added to variance to avoid dividing by zero
in batch norm.
Returns:
An `arg_scope` to use for the NASNet Cifar Model.
"""
batch_norm_params = {
# Decay for the moving averages.
'decay': batch_norm_decay,
# epsilon to prevent 0s in variance.
'epsilon': batch_norm_epsilon,
'scale': True,
'fused': True,
}
weights_regularizer = tf.contrib.layers.l2_regularizer(weight_decay)
weights_initializer = tf.contrib.layers.variance_scaling_initializer(
mode='FAN_OUT')
with arg_scope([slim.fully_connected, slim.conv2d, slim.separable_conv2d],
weights_regularizer=weights_regularizer,
weights_initializer=weights_initializer):
with arg_scope([slim.fully_connected],
activation_fn=None, scope='FC'):
with arg_scope([slim.conv2d, slim.separable_conv2d],
activation_fn=None, biases_initializer=None):
with arg_scope([slim.batch_norm], **batch_norm_params) as sc:
return sc
def nasnet_mobile_arg_scope(weight_decay=4e-5,
batch_norm_decay=0.9997,
batch_norm_epsilon=1e-3):
"""Defines the default arg scope for the NASNet-A Mobile ImageNet model.
Args:
weight_decay: The weight decay to use for regularizing the model.
batch_norm_decay: Decay for batch norm moving average.
batch_norm_epsilon: Small float added to variance to avoid dividing by zero
in batch norm.
Returns:
An `arg_scope` to use for the NASNet Mobile Model.
"""
batch_norm_params = {
# Decay for the moving averages.
'decay': batch_norm_decay,
# epsilon to prevent 0s in variance.
'epsilon': batch_norm_epsilon,
'scale': True,
'fused': True,
}
weights_regularizer = tf.contrib.layers.l2_regularizer(weight_decay)
weights_initializer = tf.contrib.layers.variance_scaling_initializer(
mode='FAN_OUT')
with arg_scope([slim.fully_connected, slim.conv2d, slim.separable_conv2d],
weights_regularizer=weights_regularizer,
weights_initializer=weights_initializer):
with arg_scope([slim.fully_connected],
activation_fn=None, scope='FC'):
with arg_scope([slim.conv2d, slim.separable_conv2d],
activation_fn=None, biases_initializer=None):
with arg_scope([slim.batch_norm], **batch_norm_params) as sc:
return sc
def nasnet_large_arg_scope(weight_decay=5e-5,
batch_norm_decay=0.9997,
batch_norm_epsilon=1e-3):
"""Defines the default arg scope for the NASNet-A Large ImageNet model.
Args:
weight_decay: The weight decay to use for regularizing the model.
batch_norm_decay: Decay for batch norm moving average.
batch_norm_epsilon: Small float added to variance to avoid dividing by zero
in batch norm.
Returns:
An `arg_scope` to use for the NASNet Large Model.
"""
batch_norm_params = {
# Decay for the moving averages.
'decay': batch_norm_decay,
# epsilon to prevent 0s in variance.
'epsilon': batch_norm_epsilon,
'scale': True,
'fused': True,
}
weights_regularizer = tf.contrib.layers.l2_regularizer(weight_decay)
weights_initializer = tf.contrib.layers.variance_scaling_initializer(
mode='FAN_OUT')
with arg_scope([slim.fully_connected, slim.conv2d, slim.separable_conv2d],
weights_regularizer=weights_regularizer,
weights_initializer=weights_initializer):
with arg_scope([slim.fully_connected],
activation_fn=None, scope='FC'):
with arg_scope([slim.conv2d, slim.separable_conv2d],
activation_fn=None, biases_initializer=None):
with arg_scope([slim.batch_norm], **batch_norm_params) as sc:
return sc
def _build_aux_head(net, end_points, num_classes, hparams, scope):
"""Auxiliary head used for all models across all datasets."""
activation_fn = tf.nn.relu6 if hparams.use_bounded_activation else tf.nn.relu
with tf.variable_scope(scope):
aux_logits = tf.identity(net)
with tf.variable_scope('aux_logits'):
aux_logits = slim.avg_pool2d(
aux_logits, [5, 5], stride=3, padding='VALID')
aux_logits = slim.conv2d(aux_logits, 128, [1, 1], scope='proj')
aux_logits = slim.batch_norm(aux_logits, scope='aux_bn0')
aux_logits = activation_fn(aux_logits)
# Shape of feature map before the final layer.
shape = aux_logits.shape
if hparams.data_format == 'NHWC':
shape = shape[1:3]
else:
shape = shape[2:4]
aux_logits = slim.conv2d(aux_logits, 768, shape, padding='VALID')
aux_logits = slim.batch_norm(aux_logits, scope='aux_bn1')
aux_logits = activation_fn(aux_logits)
aux_logits = tf.contrib.layers.flatten(aux_logits)
aux_logits = slim.fully_connected(aux_logits, num_classes)
end_points['AuxLogits'] = aux_logits
def _imagenet_stem(inputs, hparams, stem_cell, current_step=None):
"""Stem used for models trained on ImageNet."""
num_stem_cells = 2
# 149 x 149 x 32
num_stem_filters = int(32 * hparams.stem_multiplier)
net = slim.conv2d(
inputs, num_stem_filters, [3, 3], stride=2, scope='conv0',
padding='VALID')
net = slim.batch_norm(net, scope='conv0_bn')
# Run the reduction cells
cell_outputs = [None, net]
filter_scaling = 1.0 / (hparams.filter_scaling_rate**num_stem_cells)
for cell_num in range(num_stem_cells):
net = stem_cell(
net,
scope='cell_stem_{}'.format(cell_num),
filter_scaling=filter_scaling,
stride=2,
prev_layer=cell_outputs[-2],
cell_num=cell_num,
current_step=current_step)
cell_outputs.append(net)
filter_scaling *= hparams.filter_scaling_rate
return net, cell_outputs
def _cifar_stem(inputs, hparams):
"""Stem used for models trained on Cifar."""
num_stem_filters = int(hparams.num_conv_filters * hparams.stem_multiplier)
net = slim.conv2d(
inputs,
num_stem_filters,
3,
scope='l1_stem_3x3')
net = slim.batch_norm(net, scope='l1_stem_bn')
return net, [None, net]
def build_nasnet_cifar(images, num_classes,
is_training=True,
config=None,
current_step=None):
"""Build NASNet model for the Cifar Dataset."""
hparams = cifar_config() if config is None else copy.deepcopy(config)
_update_hparams(hparams, is_training)
if tf.test.is_gpu_available() and hparams.data_format == 'NHWC':
tf.logging.info('A GPU is available on the machine, consider using NCHW '
'data format for increased speed on GPU.')
if hparams.data_format == 'NCHW':
images = tf.transpose(images, [0, 3, 1, 2])
# Calculate the total number of cells in the network
# Add 2 for the reduction cells
total_num_cells = hparams.num_cells + 2
normal_cell = nasnet_utils.NasNetANormalCell(
hparams.num_conv_filters, hparams.drop_path_keep_prob,
total_num_cells, hparams.total_training_steps,
hparams.use_bounded_activation)
reduction_cell = nasnet_utils.NasNetAReductionCell(
hparams.num_conv_filters, hparams.drop_path_keep_prob,
total_num_cells, hparams.total_training_steps,
hparams.use_bounded_activation)
with arg_scope([slim.dropout, nasnet_utils.drop_path, slim.batch_norm],
is_training=is_training):
with arg_scope([slim.avg_pool2d,
slim.max_pool2d,
slim.conv2d,
slim.batch_norm,
slim.separable_conv2d,
nasnet_utils.factorized_reduction,
nasnet_utils.global_avg_pool,
nasnet_utils.get_channel_index,
nasnet_utils.get_channel_dim],
data_format=hparams.data_format):
return _build_nasnet_base(images,
normal_cell=normal_cell,
reduction_cell=reduction_cell,
num_classes=num_classes,
hparams=hparams,
is_training=is_training,
stem_type='cifar',
current_step=current_step)
build_nasnet_cifar.default_image_size = 32
def build_nasnet_mobile(images, num_classes,
is_training=True,
final_endpoint=None,
config=None,
current_step=None):
"""Build NASNet Mobile model for the ImageNet Dataset."""
hparams = (mobile_imagenet_config() if config is None
else copy.deepcopy(config))
_update_hparams(hparams, is_training)
if tf.test.is_gpu_available() and hparams.data_format == 'NHWC':
tf.logging.info('A GPU is available on the machine, consider using NCHW '
'data format for increased speed on GPU.')
if hparams.data_format == 'NCHW':
images = tf.transpose(images, [0, 3, 1, 2])
# Calculate the total number of cells in the network
# Add 2 for the reduction cells
total_num_cells = hparams.num_cells + 2
# If ImageNet, then add an additional two for the stem cells
total_num_cells += 2
normal_cell = nasnet_utils.NasNetANormalCell(
hparams.num_conv_filters, hparams.drop_path_keep_prob,
total_num_cells, hparams.total_training_steps,
hparams.use_bounded_activation)
reduction_cell = nasnet_utils.NasNetAReductionCell(
hparams.num_conv_filters, hparams.drop_path_keep_prob,
total_num_cells, hparams.total_training_steps,
hparams.use_bounded_activation)
with arg_scope([slim.dropout, nasnet_utils.drop_path, slim.batch_norm],
is_training=is_training):
with arg_scope([slim.avg_pool2d,
slim.max_pool2d,
slim.conv2d,
slim.batch_norm,
slim.separable_conv2d,
nasnet_utils.factorized_reduction,
nasnet_utils.global_avg_pool,
nasnet_utils.get_channel_index,
nasnet_utils.get_channel_dim],
data_format=hparams.data_format):
return _build_nasnet_base(images,
normal_cell=normal_cell,
reduction_cell=reduction_cell,
num_classes=num_classes,
hparams=hparams,
is_training=is_training,
stem_type='imagenet',
final_endpoint=final_endpoint,
current_step=current_step)
build_nasnet_mobile.default_image_size = 224
def build_nasnet_large(images, num_classes,
is_training=True,
final_endpoint=None,
config=None,
current_step=None):
"""Build NASNet Large model for the ImageNet Dataset."""
hparams = (large_imagenet_config() if config is None
else copy.deepcopy(config))
_update_hparams(hparams, is_training)
if tf.test.is_gpu_available() and hparams.data_format == 'NHWC':
tf.logging.info('A GPU is available on the machine, consider using NCHW '
'data format for increased speed on GPU.')
if hparams.data_format == 'NCHW':
images = tf.transpose(images, [0, 3, 1, 2])
# Calculate the total number of cells in the network
# Add 2 for the reduction cells
total_num_cells = hparams.num_cells + 2
# If ImageNet, then add an additional two for the stem cells
total_num_cells += 2
normal_cell = nasnet_utils.NasNetANormalCell(
hparams.num_conv_filters, hparams.drop_path_keep_prob,
total_num_cells, hparams.total_training_steps,
hparams.use_bounded_activation)
reduction_cell = nasnet_utils.NasNetAReductionCell(
hparams.num_conv_filters, hparams.drop_path_keep_prob,
total_num_cells, hparams.total_training_steps,
hparams.use_bounded_activation)
with arg_scope([slim.dropout, nasnet_utils.drop_path, slim.batch_norm],
is_training=is_training):
with arg_scope([slim.avg_pool2d,
slim.max_pool2d,
slim.conv2d,
slim.batch_norm,
slim.separable_conv2d,
nasnet_utils.factorized_reduction,
nasnet_utils.global_avg_pool,
nasnet_utils.get_channel_index,
nasnet_utils.get_channel_dim],
data_format=hparams.data_format):
return _build_nasnet_base(images,
normal_cell=normal_cell,
reduction_cell=reduction_cell,
num_classes=num_classes,
hparams=hparams,
is_training=is_training,
stem_type='imagenet',
final_endpoint=final_endpoint,
current_step=current_step)
build_nasnet_large.default_image_size = 331
def _build_nasnet_base(images,
normal_cell,
reduction_cell,
num_classes,
hparams,
is_training,
stem_type,
final_endpoint=None,
current_step=None):
"""Constructs a NASNet image model."""
end_points = {}
def add_and_check_endpoint(endpoint_name, net):
end_points[endpoint_name] = net
return final_endpoint and (endpoint_name == final_endpoint)
# Find where to place the reduction cells or stride normal cells
reduction_indices = nasnet_utils.calc_reduction_layers(
hparams.num_cells, hparams.num_reduction_layers)
stem_cell = reduction_cell
if stem_type == 'imagenet':
stem = lambda: _imagenet_stem(images, hparams, stem_cell)
elif stem_type == 'cifar':
stem = lambda: _cifar_stem(images, hparams)
else:
raise ValueError('Unknown stem_type: ', stem_type)
net, cell_outputs = stem()
if add_and_check_endpoint('Stem', net): return net, end_points
# Setup for building in the auxiliary head.
aux_head_cell_idxes = []
if len(reduction_indices) >= 2:
aux_head_cell_idxes.append(reduction_indices[1] - 1)
# Run the cells
filter_scaling = 1.0
# true_cell_num accounts for the stem cells
true_cell_num = 2 if stem_type == 'imagenet' else 0
activation_fn = tf.nn.relu6 if hparams.use_bounded_activation else tf.nn.relu
for cell_num in range(hparams.num_cells):
stride = 1
if hparams.skip_reduction_layer_input:
prev_layer = cell_outputs[-2]
if cell_num in reduction_indices:
filter_scaling *= hparams.filter_scaling_rate
net = reduction_cell(
net,
scope='reduction_cell_{}'.format(reduction_indices.index(cell_num)),
filter_scaling=filter_scaling,
stride=2,
prev_layer=cell_outputs[-2],
cell_num=true_cell_num,
current_step=current_step)
if add_and_check_endpoint(
'Reduction_Cell_{}'.format(reduction_indices.index(cell_num)), net):
return net, end_points
true_cell_num += 1
cell_outputs.append(net)
if not hparams.skip_reduction_layer_input:
prev_layer = cell_outputs[-2]
net = normal_cell(
net,
scope='cell_{}'.format(cell_num),
filter_scaling=filter_scaling,
stride=stride,
prev_layer=prev_layer,
cell_num=true_cell_num,
current_step=current_step)
if add_and_check_endpoint('Cell_{}'.format(cell_num), net):
return net, end_points
true_cell_num += 1
if (hparams.use_aux_head and cell_num in aux_head_cell_idxes and
num_classes and is_training):
aux_net = activation_fn(net)
_build_aux_head(aux_net, end_points, num_classes, hparams,
scope='aux_{}'.format(cell_num))
cell_outputs.append(net)
# Final softmax layer
with tf.variable_scope('final_layer'):
net = activation_fn(net)
net = nasnet_utils.global_avg_pool(net)
if add_and_check_endpoint('global_pool', net) or not num_classes:
return net, end_points
net = slim.dropout(net, hparams.dense_dropout_keep_prob, scope='dropout')
logits = slim.fully_connected(net, num_classes)
if add_and_check_endpoint('Logits', logits):
return net, end_points
predictions = tf.nn.softmax(logits, name='predictions')
if add_and_check_endpoint('Predictions', predictions):
return net, end_points
return logits, end_points
| 21,147 | 37.035971 | 80 | py |
adanet | adanet-master/research/improve_nas/trainer/__init__.py | """Copyright 2019 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
| 589 | 38.333333 | 72 | py |
adanet | adanet-master/research/improve_nas/trainer/improve_nas_test.py | # Lint as: python3
"""Tests for improve_nas.
Copyright 2019 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
from absl import flags
from absl.testing import parameterized
import adanet
from adanet.research.improve_nas.trainer import improve_nas
import numpy as np
import tensorflow.compat.v1 as tf
_IMAGE_DIM = 32
class _FakeSummary(object):
"""A fake `Summary`."""
def scalar(self, name, tensor):
del name # Unused
del tensor # Unused
def _optimizer(learning_rate):
return tf.train.GradientDescentOptimizer(learning_rate), learning_rate
def _builder(snapshot=False,
knowledge_distillation=improve_nas.KnowledgeDistillation.NONE,
checkpoint_dir=None,
use_aux_head=False,
learn_mixture_weights=False,
model_version="cifar"):
hparams = tf.contrib.training.HParams(
clip_gradients=5.,
stem_multiplier=3.0,
drop_path_keep_prob=0.6,
num_cells=3,
use_aux_head=use_aux_head,
aux_head_weight=0.4,
label_smoothing=0.1,
num_conv_filters=4,
dense_dropout_keep_prob=1.0,
filter_scaling_rate=2.0,
num_reduction_layers=2,
data_format="NHWC",
use_bounded_activation=False,
skip_reduction_layer_input=0,
initial_learning_rate=.01,
complexity_decay_rate=0.9,
weight_decay=.0001,
knowledge_distillation=knowledge_distillation,
snapshot=snapshot,
learn_mixture_weights=learn_mixture_weights,
mixture_weight_type=adanet.MixtureWeightType.SCALAR,
model_version=model_version,
total_training_steps=100)
return improve_nas.Builder(
[tf.feature_column.numeric_column(key="x", shape=[32, 32, 3])],
seed=11,
optimizer_fn=_optimizer,
checkpoint_dir=checkpoint_dir,
hparams=hparams)
def _subnetwork_generator(checkpoint_dir):
hparams = tf.contrib.training.HParams(
clip_gradients=5.,
stem_multiplier=3.0,
drop_path_keep_prob=0.6,
num_cells=3,
use_aux_head=False,
aux_head_weight=0.4,
label_smoothing=0.1,
num_conv_filters=4,
dense_dropout_keep_prob=1.0,
filter_scaling_rate=2.0,
complexity_decay_rate=0.9,
num_reduction_layers=2,
data_format="NHWC",
skip_reduction_layer_input=0,
initial_learning_rate=.01,
use_bounded_activation=False,
weight_decay=.0001,
knowledge_distillation=improve_nas.KnowledgeDistillation.NONE,
snapshot=False,
learn_mixture_weights=False,
mixture_weight_type=adanet.MixtureWeightType.SCALAR,
model_version="cifar",
total_training_steps=100)
return improve_nas.Generator(
[tf.feature_column.numeric_column(key="x", shape=[32, 32, 3])],
seed=11,
optimizer_fn=_optimizer,
iteration_steps=3,
checkpoint_dir=checkpoint_dir,
hparams=hparams)
class ImproveNasBuilderTest(parameterized.TestCase, tf.test.TestCase):
def setUp(self):
super(ImproveNasBuilderTest, self).setUp()
# Setup and cleanup test directory.
self.test_subdirectory = os.path.join(tf.flags.FLAGS.test_tmpdir, self.id())
shutil.rmtree(self.test_subdirectory, ignore_errors=True)
os.makedirs(self.test_subdirectory)
def tearDown(self):
super(ImproveNasBuilderTest, self).tearDown()
shutil.rmtree(self.test_subdirectory, ignore_errors=True)
@parameterized.named_parameters({
"testcase_name": "two_subnetworks_adaptive_knowledge_distillation_aux",
"builder_params": [
{
"knowledge_distillation":
improve_nas.KnowledgeDistillation.ADAPTIVE,
"use_aux_head": True,
},
{
"knowledge_distillation":
improve_nas.KnowledgeDistillation.ADAPTIVE,
"use_aux_head": True,
},
],
"want_name": "NasNet_A_1.0_96_adaptive_cifar",
}, {
"testcase_name": "two_subnetworks_born_again_knowledge_distillation_w",
"builder_params": [
{
"knowledge_distillation":
improve_nas.KnowledgeDistillation.BORN_AGAIN,
"use_aux_head":
True,
"learn_mixture_weights": True,
},
{
"knowledge_distillation":
improve_nas.KnowledgeDistillation.BORN_AGAIN,
"use_aux_head":
True,
"learn_mixture_weights": True,
},
],
"want_name": "NasNet_A_1.0_96_born_again_cifar",
})
def test_build_subnetwork(self, builder_params, want_name):
with tf.Graph().as_default() as g, self.test_session(graph=g) as sess:
data = np.concatenate([
np.ones((1, _IMAGE_DIM, _IMAGE_DIM, 1)), 2. * np.ones(
(1, _IMAGE_DIM, _IMAGE_DIM, 1))
])
features = {"x": tf.constant(data)}
labels = tf.constant([0, 1])
training = True
mode = tf.estimator.ModeKeys.TRAIN
head = tf.contrib.estimator.binary_classification_head(
loss_reduction=tf.losses.Reduction.SUM)
ensemble = None
name = None
subnetwork = None
builders = []
for builder_param in builder_params:
builders.append(
_builder(checkpoint_dir=self.test_subdirectory, **builder_param))
for idx, builder in enumerate(builders):
name = builder.name
# Pass the subnetworks of previous builders to the next builder.
with tf.variable_scope("subnetwork_{}".format(idx)):
subnetwork = builder.build_subnetwork(
features=features,
logits_dimension=head.logits_dimension,
training=training,
iteration_step=tf.train.get_or_create_global_step(),
summary=_FakeSummary(),
previous_ensemble=ensemble)
logits = subnetwork.logits
weighted_subnetworks = []
if ensemble:
logits += ensemble.logits
weighted_subnetworks = ensemble.weighted_subnetworks
ensemble = adanet.Ensemble(
weighted_subnetworks=weighted_subnetworks + [
adanet.WeightedSubnetwork(
name=None,
logits=logits,
weight=None,
subnetwork=subnetwork)
],
logits=logits,
bias=0.)
estimator_spec = head.create_estimator_spec(
features=features,
labels=labels,
mode=mode,
train_op_fn=lambda loss: tf.no_op(),
logits=ensemble.logits)
sess.run(tf.global_variables_initializer())
train_op = builders[-1].build_subnetwork_train_op(
subnetwork,
estimator_spec.loss,
var_list=None,
labels=labels,
iteration_step=tf.train.get_or_create_global_step(),
summary=_FakeSummary(),
previous_ensemble=ensemble)
for _ in range(10):
sess.run(train_op)
self.assertEqual(want_name, name)
self.assertGreater(sess.run(estimator_spec.loss), 0.0)
class QuetzalGeneratorTest(tf.test.TestCase):
def test_candidate_generation(self):
self.test_subdirectory = os.path.join(flags.FLAGS.test_tmpdir, self.id())
shutil.rmtree(self.test_subdirectory, ignore_errors=True)
os.mkdir(self.test_subdirectory)
subnetwork_generator = _subnetwork_generator(self.test_subdirectory)
subnetwork_builders = subnetwork_generator.generate_candidates(
previous_ensemble=None,
# The following arguments are unused by
# quetzal.Generator.
iteration_number=0,
previous_ensemble_reports=[],
all_reports=[])
self.assertEqual(1, len(subnetwork_builders))
if __name__ == "__main__":
tf.test.main()
| 8,461 | 32.184314 | 80 | py |
adanet | adanet-master/research/improve_nas/trainer/trainer.py | # Lint as: python3
"""Script to any experiment from paper.
Copyright 2019 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
import tensorflow.compat.v1 as tf
# pylint: disable=g-import-not-at-top
try:
from adanet.research.improve_nas.trainer import adanet_improve_nas
from adanet.research.improve_nas.trainer import cifar10
from adanet.research.improve_nas.trainer import cifar100
from adanet.research.improve_nas.trainer import fake_data
print("Imported from adanet.")
except ImportError as e:
from trainer import adanet_improve_nas
from trainer import cifar10
from trainer import cifar100
from trainer import fake_data
print("Imported from trainer.")
# pylint: enable=g-import-not-at-top
FLAGS = flags.FLAGS
flags.DEFINE_integer("batch_size", 32,
"Batch size used for training, eval and inference.")
flags.DEFINE_integer("train_steps", 1000000, "Number of training steps.")
flags.DEFINE_integer("save_summary_steps", 2000,
"Save summaries every this many steps.")
flags.DEFINE_string(
"hparams", "",
"""A comma-separated list of `name=value` hyperparameter values.""")
flags.DEFINE_string(
"dataset", "",
"Dataset name: 'cifar10', 'cifar100' or 'fake'. 'fake' dataset is mainly "
"for test runs.")
flags.DEFINE_integer("tf_random_seed", None,
"Graph level random seed for TensorFlow.")
flags.DEFINE_integer("eval_steps", None,
"Number of batches used for evaluation. If `None`, the "
"whole eval dataset is used")
flags.DEFINE_integer(
"save_checkpoints_secs", 600, "Number of seconds between checkpoint saves. "
"This flag is ignored when autotune is used. "
"Cannot be used with save_checkpoints_steps -- exactly one of "
"save_checkpoints_secs and save_checkpoints_steps must be zero, and the "
"other must be a strictly positive integer. Defaults to 120s.")
flags.DEFINE_integer(
"save_checkpoints_steps", 0,
"Number of global steps between checkpoint saves."
"This flag is ignored when autotune is used. "
"Cannot be used with save_checkpoints_secs -- exactly one of "
"save_checkpoints_secs and save_checkpoints_steps must be zero, and the "
"other must be a strictly positive integer. Defaults to 0, which means "
"save_checkpoints_steps is ignored. To use save_checkpoints_steps "
"instead, set save_checkpoints_secs to 0 and set save_checkpoints_steps "
"to a positive integer.")
flags.DEFINE_string(
"data_params", "",
"""A comma-separated list of `name=value` data provider parameter values.
This flag is used to override data provider default settings for
preprocessing or selecting different configurations for a given data
provider.""")
flags.DEFINE_integer(
"keep_checkpoint_max", 5,
"The maximum number of recent checkpoint files to keep. As new files are "
"created, older files are deleted. If None or 0, all checkpoint files are "
"kept. Defaults to 5 (i.e. the 5 most recent checkpoint files are kept.)")
flags.DEFINE_string(
"job-dir", "",
"Unused. Must be here because of ml-engine.")
flags.DEFINE_string(
"model_dir", None, """Directory for saving models and logs.""")
def make_run_config():
"""Makes a RunConfig object with FLAGS.
Returns:
tf.estimator.RunConfig.
Raises:
ValueError: If not exactly one of `save_checkpoints_secs` and
`save_checkpoints_steps` is specified.
"""
save_checkpoints_secs = FLAGS.save_checkpoints_secs or None
save_checkpoints_steps = FLAGS.save_checkpoints_steps or None
if save_checkpoints_secs and save_checkpoints_steps:
raise ValueError("save_checkpoints_secs and save_checkpoints_steps "
"cannot both be non-zero.")
if not (save_checkpoints_secs or save_checkpoints_steps):
raise ValueError("save_checkpoints_secs and save_checkpoints_steps "
"cannot both be zero.")
# An error is thrown by absl.flags if train.sh passes tf_random_seed=None, so
# it passes -1 instead.
if FLAGS.tf_random_seed == -1:
tf_random_seed = None
else:
tf_random_seed = FLAGS.tf_random_seed
return tf.estimator.RunConfig(
save_summary_steps=FLAGS.save_summary_steps,
model_dir=FLAGS.model_dir,
save_checkpoints_secs=save_checkpoints_secs,
save_checkpoints_steps=save_checkpoints_steps,
keep_checkpoint_max=FLAGS.keep_checkpoint_max,
tf_random_seed=tf_random_seed)
def main(argv):
del argv
run_config = make_run_config()
estimator_builder = adanet_improve_nas.Builder()
hparams = estimator_builder.hparams(FLAGS.batch_size, FLAGS.hparams)
tf.logging.info("Running Experiment with HParams: %s", hparams)
if FLAGS.dataset == "cifar10":
data_provider = cifar10.Provider()
elif FLAGS.dataset == "cifar100":
data_provider = cifar100.Provider()
elif FLAGS.dataset == "fake":
data_provider = fake_data.FakeImageProvider(
num_examples=10,
num_classes=10,
image_dim=32,
channels=3,
seed=42)
else:
raise ValueError("Invalid dataset")
estimator = estimator_builder.estimator(
data_provider=data_provider,
run_config=run_config,
hparams=hparams,
train_steps=FLAGS.train_steps)
train_spec = tf.estimator.TrainSpec(
input_fn=data_provider.get_input_fn(
partition="train",
mode=tf.estimator.ModeKeys.TRAIN,
batch_size=FLAGS.batch_size),
max_steps=FLAGS.train_steps
)
eval_spec = tf.estimator.EvalSpec(
input_fn=data_provider.get_input_fn(
partition="test",
mode=tf.estimator.ModeKeys.EVAL,
batch_size=FLAGS.batch_size),
steps=FLAGS.eval_steps,
start_delay_secs=10,
throttle_secs=1800
)
tf.logging.info("Training!")
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
tf.logging.info("Done training!")
if __name__ == "__main__":
tf.app.run(main)
| 6,647 | 35.527473 | 80 | py |
adanet | adanet-master/research/improve_nas/trainer/optimizer.py | # Lint as: python3
"""Definition of optimizers and learning rate schedules.
Copyright 2019 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import functools
import tensorflow.compat.v1 as tf
class LearningRateSchedule(object):
"""A learning rate decay schedule interface."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def apply(self, learning_rate):
"""Applies the learning rate decay schedule to the given learning rate.
Args:
learning_rate: Float `Tensor` learning rate.
Returns:
Float `Tensor` learning rate with applied decay schedule.
"""
class Constant(LearningRateSchedule):
"""A constant schedule."""
def apply(self, learning_rate):
"""See `LearningRateSchedule`."""
return learning_rate
class Cosine(LearningRateSchedule):
"""Cosine."""
def __init__(self, decay_steps, alpha):
"""Returns a `Cosine` instance.
Args:
decay_steps: Number of steps to decay over.
alpha: Minimum learning rate value as a fraction of learning_rate.
Returns:
A `Cosine` instance.
"""
self._decay_fn = functools.partial(
tf.train.cosine_decay, decay_steps=decay_steps, alpha=alpha)
def apply(self, learning_rate):
"""See `LearningRateSchedule`."""
# Start at -1 since we increment before reading.
global_step = tf.get_variable("decay_step", initializer=-1, trainable=False)
increment_op = tf.assign_add(global_step, 1)
with tf.control_dependencies([increment_op]):
learning_rate = self._decay_fn(
learning_rate=learning_rate, global_step=global_step.read_value())
return learning_rate
def fn_with_name(optimizer_name,
learning_rate_schedule="constant",
cosine_decay_steps=None):
"""Returns an optimizer_fn with the given name.
Args:
optimizer_name: Optimizer name string for identifying the optimizer. Either
'adagrad', 'adam', 'momentum', or 'sgd'.
learning_rate_schedule: Type of learning rate schedule to use. Opened for
future extensions.
cosine_decay_steps: See `Cosine`.
Returns:
An optimizer_fn which takes a `learning_rate` scalar `Tensor` argument and
returns an `Optimizer` instance.
Raises:
ValueError: If `optimizer_name` is invalid.
"""
optimizers = {
"adagrad": tf.train.AdagradOptimizer,
"adam": tf.train.AdamOptimizer,
"lazy_adam": tf.contrib.opt.LazyAdamOptimizer,
"momentum": functools.partial(tf.train.MomentumOptimizer, momentum=.9),
"rmsprop": tf.train.RMSPropOptimizer,
"sgd": tf.train.GradientDescentOptimizer,
}
optimizer_name = optimizer_name.lower()
if optimizer_name not in optimizers:
raise ValueError("Invalid optimizer '{}'".format(optimizer_name))
optimizer_fn = optimizers[optimizer_name]
schedules = {
"constant":
Constant(),
"cosine":
Cosine(decay_steps=cosine_decay_steps, alpha=0.0),
}
schedule_name = learning_rate_schedule.lower()
if schedule_name not in schedules:
raise ValueError(
"Invalid learning_rate_schedule '{}'".format(schedule_name))
schedule = schedules[schedule_name]
def _optimizer_with_schedule(learning_rate):
learning_rate = schedule.apply(learning_rate)
optimizer = optimizer_fn(learning_rate)
return optimizer, learning_rate
return _optimizer_with_schedule
| 4,015 | 29.424242 | 80 | py |
adanet | adanet-master/research/improve_nas/trainer/subnetwork_utils.py | # Lint as: python3
"""Definition of helpful functions to work with AdaNet subnetworks.
Copyright 2019 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
import copy
import tensorflow.compat.v1 as tf
def capture_variables(fn):
"""Utility function that captures which tf variables were created by `fn`.
This function encourages style that is easy to write, resonably easy to
understand but against google codestyle.
In general, you have an function `f` that takes some arguments (`a` and `b`)
and returns some output. You may enclose it in lambda and get
`fn == lambda: f(a,b)`, which is a function without arguments that does the
same as `f`.
This idiom makes variable management much easier and less error prone. Usable
for prototyping or debugging.
Args:
fn: function with no arguments.
Returns:
tuple: First element of this touple is a list of tf variables created by
fn, second is the actual output of fn
"""
vars_before_fn = tf.trainable_variables()
fn_return = fn()
vars_after_fn = tf.trainable_variables()
fn_vars = list(set(vars_after_fn) - set(vars_before_fn))
return set(fn_vars), fn_return
def copy_update(hparams, **kwargs):
"""Deep copy hparams with values updated by kwargs.
This enables to use hparams in an immutable manner.
Args:
hparams: hyperparameters.
**kwargs: keyword arguments to change in hparams.
Returns:
updated hyperparameters object. Change in this object is not propagated to
the original hparams
"""
values = hparams.values()
values.update(kwargs)
values = copy.deepcopy(values)
hp = tf.contrib.training.HParams(**values)
return hp
def get_persisted_value_from_ensemble(ensemble, key):
"""Return constant persisted tensor values from the previous subnetwork.
Args:
ensemble: Previous ensemble.
key: Name of constant to get from eprsisted tensor.
Returns:
int|float value of the constant.
"""
previous_subnetwork = ensemble.weighted_subnetworks[-1].subnetwork
persisted_tensor = previous_subnetwork.shared[key]
return persisted_tensor
| 2,686 | 30.244186 | 79 | py |
adanet | adanet-master/docs/source/conf.py | # -*- coding: utf-8 -*-
# Copyright 2018 The AdaNet Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration file for the Sphinx documentation builder.
This file does only contain a selection of the most common options. For a
full list see the documentation:
http://www.sphinx-doc.org/en/master/usage/configuration.html
"""
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
from adanet import version as ver # pylint: disable=g-import-not-at-top
# -- Project information -----------------------------------------------------
project = u'adanet'
copyright = u'2018, AdaNet Authors' # pylint: disable=redefined-builtin
author = u'AdaNet Authors'
# The short X.Y version
version = ver.__version__
# The full version, including alpha/beta/rc tags
release = version
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'recommonmark',
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
# templates_path = ['_templates']
# The suffix(es) of source filenames.
source_suffix = {
'.rst': 'restructuredtext',
'.txt': 'markdown',
'.md': 'markdown',
}
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
html_logo = './assets/adanet_tangram_logo.png'
html_context = {
'css_files': ['_static/custom.css'],
}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'adanetdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'adanet.tex', u'adanet Documentation', u'AdaNet Authors',
'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, 'adanet', u'adanet Documentation', [author], 1)]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'adanet', u'adanet Documentation', author, 'adanet',
'One line description of project.', 'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_init_with_doc = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
| 6,636 | 31.218447 | 79 | py |
adanet | adanet-master/adanet/modelflow_test.py | # Lint as: python3
# Copyright 2020 The AdaNet Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test ModelFlow imports."""
import adanet.experimental as adanet
import tensorflow.compat.v2 as tf
class ModelFlowTest(tf.test.TestCase):
def test_public(self):
self.assertIsNotNone(adanet.controllers.SequentialController)
self.assertIsNotNone(adanet.keras.EnsembleModel)
self.assertIsNotNone(adanet.keras.MeanEnsemble)
self.assertIsNotNone(adanet.keras.WeightedEnsemble)
self.assertIsNotNone(adanet.keras.ModelSearch)
self.assertIsNotNone(adanet.phases.AutoEnsemblePhase)
self.assertIsNotNone(adanet.phases.InputPhase)
self.assertIsNotNone(adanet.phases.KerasTrainerPhase)
self.assertIsNotNone(adanet.phases.KerasTunerPhase)
self.assertIsNotNone(adanet.phases.RepeatPhase)
self.assertIsNotNone(adanet.schedulers.InProcessScheduler)
self.assertIsNotNone(adanet.storages.InMemoryStorage)
self.assertIsNotNone(adanet.work_units.KerasTrainerWorkUnit)
self.assertIsNotNone(adanet.work_units.KerasTunerWorkUnit)
if __name__ == "__main__":
tf.test.main()
| 1,637 | 38.95122 | 74 | py |
adanet | adanet-master/adanet/version.py | # Copyright 2018 The AdaNet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the version string."""
__version__ = u"0.9.0"
| 744 | 40.388889 | 80 | py |
adanet | adanet-master/adanet/adanet_test.py | """Test AdaNet package.
Copyright 2018 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import adanet
from adanet.examples import simple_dnn
import tensorflow.compat.v1 as tf
class AdaNetTest(tf.test.TestCase):
def test_public(self):
self.assertIsNotNone(adanet.__version__)
self.assertIsNotNone(adanet.AutoEnsembleEstimator)
self.assertIsNotNone(adanet.AutoEnsembleSubestimator)
self.assertIsNotNone(adanet.AutoEnsembleTPUEstimator)
self.assertIsNotNone(adanet.distributed.PlacementStrategy)
self.assertIsNotNone(adanet.distributed.ReplicationStrategy)
self.assertIsNotNone(adanet.distributed.RoundRobinStrategy)
self.assertIsNotNone(adanet.ensemble.Ensemble)
self.assertIsNotNone(adanet.ensemble.Ensembler)
self.assertIsNotNone(adanet.ensemble.TrainOpSpec)
self.assertIsNotNone(adanet.ensemble.AllStrategy)
self.assertIsNotNone(adanet.ensemble.Candidate)
self.assertIsNotNone(adanet.ensemble.GrowStrategy)
self.assertIsNotNone(adanet.ensemble.Strategy)
self.assertIsNotNone(adanet.ensemble.ComplexityRegularized)
self.assertIsNotNone(adanet.ensemble.ComplexityRegularizedEnsembler)
self.assertIsNotNone(adanet.ensemble.MeanEnsemble)
self.assertIsNotNone(adanet.ensemble.MeanEnsembler)
self.assertIsNotNone(adanet.ensemble.MixtureWeightType)
self.assertIsNotNone(adanet.ensemble.WeightedSubnetwork)
self.assertIsNotNone(adanet.Ensemble)
self.assertIsNotNone(adanet.Estimator)
self.assertIsNotNone(adanet.Evaluator)
self.assertIsNotNone(adanet.MixtureWeightType)
self.assertIsNotNone(adanet.replay.Config)
self.assertIsNotNone(adanet.ReportMaterializer)
self.assertIsNotNone(adanet.Subnetwork)
self.assertIsNotNone(adanet.subnetwork.Builder)
self.assertIsNotNone(adanet.subnetwork.Generator)
self.assertIsNotNone(adanet.subnetwork.Subnetwork)
self.assertIsNotNone(adanet.subnetwork.TrainOpSpec)
self.assertIsNotNone(adanet.Summary)
self.assertIsNotNone(adanet.TPUEstimator)
self.assertIsNotNone(adanet.WeightedSubnetwork)
self.assertIsNotNone(simple_dnn.Generator)
if __name__ == "__main__":
tf.test.main()
| 2,679 | 40.875 | 72 | py |
adanet | adanet-master/adanet/__init__.py | # Copyright 2018 The AdaNet Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AdaNet: Fast and flexible AutoML with learning guarantees."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from adanet import distributed
from adanet import ensemble
from adanet import replay
from adanet import subnetwork
from adanet.autoensemble import AutoEnsembleEstimator
from adanet.autoensemble import AutoEnsembleSubestimator
from adanet.autoensemble import AutoEnsembleTPUEstimator
from adanet.core import Estimator
from adanet.core import Evaluator
from adanet.core import ReportMaterializer
from adanet.core import Summary
from adanet.core import TPUEstimator
# For backwards compatibility. Previously all Ensemblers were complexity
# regularized using the AdaNet objective.
from adanet.ensemble import ComplexityRegularized as Ensemble
from adanet.ensemble import MixtureWeightType
from adanet.ensemble import WeightedSubnetwork
from adanet.subnetwork import Subnetwork
from adanet.version import __version__
__all__ = [
"AutoEnsembleEstimator",
"AutoEnsembleSubestimator",
"AutoEnsembleTPUEstimator",
"distributed",
"ensemble",
"Ensemble",
"Estimator",
"Evaluator",
"replay",
"ReportMaterializer",
"subnetwork",
"Summary",
"TPUEstimator",
"MixtureWeightType",
"WeightedSubnetwork",
"Subnetwork",
]
| 1,948 | 31.483333 | 74 | py |
adanet | adanet-master/adanet/pip_package/setup.py | # Copyright 2018 The AdaNet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Lint as: python3
"""Setup for pip package."""
from adanet import version
import setuptools
# Can't import the module during setup.py.
# Use execfile to find __version__.
with open('adanet/version.py') as in_file:
exec(in_file.read())
REQUIRED_PACKAGES = [
'absl-py>=0.7,<1.0',
'six>=1.11,<2.0',
'numpy>=1.15,<2.0',
'nose>=1.3,<2.0',
'rednose>=1.3,<2.0',
'coverage>=4.5,<5.0',
'protobuf>=3.6,<4.0',
'mock>=3.0,<4.0',
]
setuptools.setup(
name='adanet', # Automatic: adanet, etc. Case insensitive.
version=version.__version__.replace('-', ''),
description=(
'adanet is a lightweight and scalable TensorFlow AutoML framework for '
'training and deploying adaptive neural networks using the AdaNet '
'algorithm [Cortes et al. ICML 2017](https://arxiv.org/abs/1607.01097).'
),
long_description='',
url='https://github.com/tensorflow/adanet',
author='Google LLC',
install_requires=REQUIRED_PACKAGES,
packages=setuptools.find_packages(),
# PyPI package information.
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
license='Apache 2.0',
keywords=('tensorflow machine learning automl module subgraph framework '
'ensemble neural network adaptive metalearning'),
)
| 2,713 | 36.694444 | 80 | py |
adanet | adanet-master/adanet/core/architecture_test.py | """Test for the AdaNet architecture.
Copyright 2019 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from adanet.core.architecture import _Architecture
import tensorflow.compat.v1 as tf
class ArchitectureTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.named_parameters({
"testcase_name": "empty",
"subnetworks": [],
"want": (),
}, {
"testcase_name": "single",
"subnetworks": [(0, "linear")],
"want": ((0, "linear"),),
}, {
"testcase_name": "different_iterations",
"subnetworks": [(0, "linear"), (1, "dnn")],
"want": ((0, "linear"), (1, "dnn")),
}, {
"testcase_name": "same_iterations",
"subnetworks": [(0, "linear"), (0, "dnn"), (1, "dnn")],
"want": ((0, "linear"), (0, "dnn"), (1, "dnn")),
})
def test_subnetworks(self, subnetworks, want):
arch = _Architecture("foo", "dummy_ensembler_name")
for subnetwork in subnetworks:
arch.add_subnetwork(*subnetwork)
self.assertEqual(want, arch.subnetworks)
@parameterized.named_parameters({
"testcase_name": "empty",
"subnetworks": [],
"want": (),
}, {
"testcase_name": "single",
"subnetworks": [(0, "linear")],
"want": ((0, ("linear",)),),
}, {
"testcase_name": "different_iterations",
"subnetworks": [(0, "linear"), (1, "dnn")],
"want": ((0, ("linear",)), (1, ("dnn",))),
}, {
"testcase_name": "same_iterations",
"subnetworks": [(0, "linear"), (0, "dnn"), (1, "dnn")],
"want": ((0, ("linear", "dnn")), (1, ("dnn",))),
})
def test_subnetworks_grouped_by_iteration(self, subnetworks, want):
arch = _Architecture("foo", "dummy_ensembler_name")
for subnetwork in subnetworks:
arch.add_subnetwork(*subnetwork)
self.assertEqual(want, arch.subnetworks_grouped_by_iteration)
def test_set_and_add_replay_index(self):
arch = _Architecture("foo", "dummy_ensembler_name")
arch.set_replay_indices([1, 2, 3])
self.assertAllEqual([1, 2, 3], arch.replay_indices)
arch.add_replay_index(4)
self.assertAllEqual([1, 2, 3, 4], arch.replay_indices)
def test_serialization_lifecycle(self):
arch = _Architecture("foo", "dummy_ensembler_name", replay_indices=[1, 2])
arch.add_subnetwork(0, "linear")
arch.add_subnetwork(0, "dnn")
arch.add_subnetwork(1, "dnn")
self.assertEqual("foo", arch.ensemble_candidate_name)
self.assertEqual("dummy_ensembler_name", arch.ensembler_name)
self.assertEqual(((0, ("linear", "dnn")), (1, ("dnn",))),
arch.subnetworks_grouped_by_iteration)
iteration_number = 2
global_step = 100
serialized = arch.serialize(iteration_number, global_step)
self.assertEqual(
'{"ensemble_candidate_name": "foo", "ensembler_name": '
'"dummy_ensembler_name", "global_step": 100, "iteration_number": 2, '
'"replay_indices": [1, 2], '
'"subnetworks": [{"builder_name": "linear", "iteration_number": 0}, '
'{"builder_name": "dnn", "iteration_number": 0},'
' {"builder_name": "dnn", "iteration_number": 1}]}', serialized)
deserialized_arch = _Architecture.deserialize(serialized)
self.assertEqual(arch.ensemble_candidate_name,
deserialized_arch.ensemble_candidate_name)
self.assertEqual(arch.ensembler_name,
deserialized_arch.ensembler_name)
self.assertEqual(arch.subnetworks_grouped_by_iteration,
deserialized_arch.subnetworks_grouped_by_iteration)
self.assertEqual(global_step, deserialized_arch.global_step)
if __name__ == "__main__":
tf.test.main()
| 4,300 | 37.061947 | 78 | py |
adanet | adanet-master/adanet/core/tpu_estimator_test.py | """Tests AdaNet TPU estimator.
Copyright 2018 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import json
import os
from absl.testing import parameterized
from adanet import tf_compat
from adanet.core import testing_utils as tu
from adanet.core.tpu_estimator import TPUEstimator
from adanet.subnetwork import Builder
from adanet.subnetwork import Report
from adanet.subnetwork import SimpleGenerator
from adanet.subnetwork import Subnetwork
import numpy as np
import tensorflow.compat.v2 as tf
# pylint: disable=g-direct-tensorflow-import
# pylint: disable=g-import-not-at-top
from tensorflow_estimator.python.estimator.head import regression_head
try:
from tensorflow_estimator.contrib.estimator.python.estimator import head as head_lib
except (AttributeError, ImportError):
head_lib = None
# pylint: enable=g-direct-tensorflow-import
# pylint: enable=g-direct-tensorflow-import
class _DNNBuilder(Builder):
"""A simple DNN subnetwork builder."""
def __init__(self,
name,
feature_columns=None,
learning_rate=.01,
layer_size=16,
seed=13,
use_tpu=False):
self._name = name
self._feature_columns = feature_columns
self._learning_rate = learning_rate
self._layer_size = layer_size
self._seed = seed
self._use_tpu = use_tpu
@property
def name(self):
return self._name
def build_subnetwork(self,
features,
logits_dimension,
training,
iteration_step,
summary,
previous_ensemble=None):
seed = self._seed
if previous_ensemble:
# Increment seed so different iterations don't learn the exact same thing.
seed += 1
with tf.compat.v1.variable_scope("dnn"):
persisted_tensors = {}
with tf.compat.v1.variable_scope("hidden_layer"):
if self._feature_columns:
input_layer = tf.compat.v1.feature_column.input_layer(
features=features, feature_columns=self._feature_columns)
else:
input_layer = features["x"]
w = tf.compat.v1.get_variable(
shape=[input_layer.shape[1], self._layer_size],
initializer=tf.compat.v1.glorot_uniform_initializer(seed=seed),
name="weight")
hidden_layer = tf.matmul(input_layer, w)
if previous_ensemble:
other_hidden_layer = previous_ensemble.weighted_subnetworks[
-1].subnetwork.persisted_tensors["hidden_layer"]
hidden_layer = tf.concat([hidden_layer, other_hidden_layer], axis=1)
# Use a leaky-relu activation so that gradients can flow even when
# outputs are negative. Leaky relu has a non-zero slope when x < 0.
# Otherwise success at learning is completely dependent on random seed.
hidden_layer = tf.nn.leaky_relu(hidden_layer, alpha=.2)
persisted_tensors["hidden_layer"] = hidden_layer
if training:
# This change will only be in the next iteration if
# `freeze_training_graph` is `True`.
persisted_tensors["hidden_layer"] = 2 * hidden_layer
with tf.compat.v1.variable_scope("logits"):
logits = tf.compat.v1.layers.dense(
hidden_layer,
logits_dimension,
kernel_initializer=tf.compat.v1.glorot_uniform_initializer(seed=seed))
summary.scalar("scalar", 3)
summary.image("image", tf.ones([1, 3, 3, 1]))
with tf.compat.v1.variable_scope("nested"):
summary.scalar("scalar", 5)
return Subnetwork(
last_layer=logits,
logits=logits,
complexity=3,
persisted_tensors=persisted_tensors)
def build_subnetwork_train_op(self, subnetwork, loss, var_list, labels,
iteration_step, summary, previous_ensemble):
optimizer = tf.compat.v1.train.GradientDescentOptimizer(
learning_rate=self._learning_rate)
if self._use_tpu:
optimizer = tf.compat.v1.tpu.CrossShardOptimizer(optimizer)
return optimizer.minimize(loss, var_list=var_list)
def build_subnetwork_report(self):
return Report(
hparams={"layer_size": self._layer_size},
attributes={"complexity": tf.constant(3, dtype=tf.int32)},
metrics={
"moo": (tf.constant(3,
dtype=tf.int32), tf.constant(3, dtype=tf.int32))
})
class _NanLossBuilder(Builder):
"""A subnetwork builder always produces a NaN loss."""
@property
def name(self):
return "nan"
def build_subnetwork(self,
features,
logits_dimension,
training,
iteration_step,
summary,
previous_ensemble=None):
logits = tf_compat.v1.layers.dense(
features["x"],
logits_dimension,
kernel_initializer=tf_compat.v1.glorot_uniform_initializer(
seed=42)) * np.nan
return Subnetwork(last_layer=logits, logits=logits, complexity=0)
def build_subnetwork_train_op(self, subnetwork, loss, var_list, labels,
iteration_step, summary, previous_ensemble):
return tf.no_op()
def make_regression_head(use_tpu):
if use_tpu and head_lib:
# AdaNet TPU currently requires the old head.
return head_lib.regression_head(
loss_reduction=tf_compat.v1.losses.Reduction.SUM_OVER_BATCH_SIZE)
# TF 2.0 eliminates tf.contrib.
return regression_head.RegressionHead(
loss_reduction=tf_compat.SUM_OVER_BATCH_SIZE)
class TPUEstimatorTest(tu.AdanetTestCase):
def setUp(self):
super(TPUEstimatorTest, self).setUp()
if not tf_compat.version_greater_or_equal("1.14.0"):
self.skipTest("TPUEmbedding not supported in version 1.13.0 and below.")
# TPUConfig initializes model_dir from TF_CONFIG and checks that the user
# provided model_dir matches the TF_CONFIG one.
tf_config = {"model_dir": self.test_subdirectory}
os.environ["TF_CONFIG"] = json.dumps(tf_config)
def tearDown(self):
super(TPUEstimatorTest, self).tearDown()
del os.environ["TF_CONFIG"]
@parameterized.named_parameters(
{
"testcase_name":
"not_use_tpu",
"use_tpu":
False,
"subnetwork_generator":
SimpleGenerator([_DNNBuilder("dnn", use_tpu=False)]),
"want_loss":
0.41315794,
},
)
def test_tpu_estimator_simple_lifecycle(self, use_tpu, subnetwork_generator,
want_loss):
config = tf.compat.v1.estimator.tpu.RunConfig(master="", tf_random_seed=42)
estimator = TPUEstimator(
# TODO: Add test with estimator Head v2.
head=make_regression_head(use_tpu),
subnetwork_generator=subnetwork_generator,
max_iteration_steps=10,
model_dir=self.test_subdirectory,
config=config,
use_tpu=use_tpu,
train_batch_size=64 if use_tpu else 0)
max_steps = 30
xor_features = [[1., 0.], [0., 0], [0., 1.], [1., 1.]]
xor_labels = [[1.], [0.], [1.], [0.]]
train_input_fn = tu.dummy_input_fn(xor_features, xor_labels)
# Train.
estimator.train(
input_fn=train_input_fn, steps=None, max_steps=max_steps, hooks=None)
# Evaluate.
eval_results = estimator.evaluate(
input_fn=train_input_fn, steps=1, hooks=None)
# Predict.
predictions = estimator.predict(
input_fn=tu.dataset_input_fn(features=[0., 0.], return_dataset=True))
# We need to iterate over all the predictions before moving on, otherwise
# the TPU will not be shut down.
for prediction in predictions:
self.assertIsNotNone(prediction["predictions"])
# Export SavedModel.
def serving_input_fn():
"""Input fn for serving export, starting from serialized example."""
serialized_example = tf.compat.v1.placeholder(
dtype=tf.string, shape=(None), name="serialized_example")
return tf.estimator.export.ServingInputReceiver(
features={"x": tf.constant([[0., 0.]], name="serving_x")},
receiver_tensors=serialized_example)
estimator.export_saved_model(
export_dir_base=estimator.model_dir,
serving_input_receiver_fn=serving_input_fn)
self.assertAlmostEqual(want_loss, eval_results["loss"], places=2)
self.assertEqual(max_steps, eval_results["global_step"])
self.assertEqual(2, eval_results["iteration"])
@parameterized.named_parameters(
{
"testcase_name": "not_use_tpu",
"use_tpu": False,
"want_loss": 0.55584925,
"want_adanet_loss": .64416,
"want_eval_summary_loss": 0.555849,
"want_predictions": 0.46818,
},
)
def test_tpu_estimator_summaries(self, use_tpu, want_loss, want_adanet_loss,
want_eval_summary_loss, want_predictions):
max_steps = 10
config = tf.compat.v1.estimator.tpu.RunConfig(
tf_random_seed=42, save_summary_steps=2, log_step_count_steps=max_steps)
assert config.log_step_count_steps
def metric_fn(predictions):
return {
"predictions": tf_compat.v1.metrics.mean(predictions["predictions"])
}
estimator = TPUEstimator(
head=make_regression_head(use_tpu),
subnetwork_generator=SimpleGenerator(
[_DNNBuilder("dnn", use_tpu=use_tpu)]),
max_iteration_steps=max_steps,
model_dir=self.test_subdirectory,
metric_fn=metric_fn,
config=config,
use_tpu=use_tpu,
train_batch_size=64 if use_tpu else 0)
xor_features = [[1., 0.], [0., 0], [0., 1.], [1., 1.]]
xor_labels = [[1.], [0.], [1.], [0.]]
train_input_fn = tu.dummy_input_fn(xor_features, xor_labels)
estimator.train(input_fn=train_input_fn, max_steps=max_steps)
eval_results = estimator.evaluate(input_fn=train_input_fn, steps=1)
self.assertAlmostEqual(want_loss, eval_results["loss"], places=2)
self.assertEqual(max_steps, eval_results["global_step"])
self.assertEqual(0, eval_results["iteration"])
subnetwork_subdir = os.path.join(self.test_subdirectory,
"subnetwork/t0_dnn")
ensemble_subdir = os.path.join(
self.test_subdirectory, "ensemble/t0_dnn_grow_complexity_regularized")
# TODO: Why is the adanet_loss written to 'loss'?
self.assertAlmostEqual(
want_adanet_loss,
tu.check_eventfile_for_keyword("loss", self.test_subdirectory),
places=1)
self.assertEqual(
0.,
tu.check_eventfile_for_keyword("iteration/adanet/iteration",
self.test_subdirectory))
self.assertAlmostEqual(
3.,
tu.check_eventfile_for_keyword("scalar", subnetwork_subdir),
places=3)
self.assertEqual(
(3, 3, 1),
tu.check_eventfile_for_keyword(
# When TF 2 behavior is enabled AdaNet uses V2 summaries.
"image" if tf_compat.is_v2_behavior_enabled() else "image/image/0",
subnetwork_subdir))
self.assertAlmostEqual(
5.,
tu.check_eventfile_for_keyword("nested/scalar", subnetwork_subdir),
places=3)
self.assertAlmostEqual(
want_adanet_loss,
tu.check_eventfile_for_keyword(
"adanet_loss/adanet/adanet_weighted_ensemble", ensemble_subdir),
places=1)
self.assertAlmostEqual(
0.,
tu.check_eventfile_for_keyword(
"complexity_regularization/adanet/adanet_weighted_ensemble",
ensemble_subdir),
places=1)
self.assertAlmostEqual(
1.,
tu.check_eventfile_for_keyword(
"mixture_weight_norms/adanet/"
"adanet_weighted_ensemble/subnetwork_0", ensemble_subdir),
places=1)
# Eval metric summaries are always written out during eval.
subnetwork_eval_subdir = os.path.join(subnetwork_subdir, "eval")
self.assertAlmostEqual(
want_eval_summary_loss,
tu.check_eventfile_for_keyword("loss", subnetwork_eval_subdir),
places=1)
# TODO: Check why some eval metrics are zero on TPU.
self.assertAlmostEqual(
0.0 if use_tpu else want_eval_summary_loss,
tu.check_eventfile_for_keyword("average_loss", subnetwork_eval_subdir),
places=1)
self.assertAlmostEqual(
want_predictions,
tu.check_eventfile_for_keyword("predictions", subnetwork_eval_subdir),
places=3)
eval_subdir = os.path.join(self.test_subdirectory, "eval")
ensemble_eval_subdir = os.path.join(ensemble_subdir, "eval")
for subdir in [ensemble_eval_subdir, eval_subdir]:
self.assertEqual([b"| dnn |"],
tu.check_eventfile_for_keyword(
"architecture/adanet/ensembles/0", subdir))
if subdir == eval_subdir:
self.assertAlmostEqual(
want_loss, tu.check_eventfile_for_keyword("loss", subdir), places=1)
# TODO: Check why some eval metrics are zero on TPU.
self.assertAlmostEqual(
0.0 if use_tpu else want_eval_summary_loss,
tu.check_eventfile_for_keyword("average_loss", subdir),
places=1)
if __name__ == "__main__":
tf.test.main()
| 13,969 | 35.285714 | 86 | py |
adanet | adanet-master/adanet/core/architecture.py | # Copyright 2019 The AdaNet Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An internal AdaNet model architecture definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import json
class _Architecture(object):
"""An AdaNet model architecture.
This data structure is the blueprint for reconstructing an AdaNet model. It
contains not only information about the underlying Ensemble, but also the
`adanet.subnetwork.Builder` instances that compose the ensemble, the
`adanet.ensemble.Ensembler` that constructed it, as well as the sequence
of states in the search space that led to the construction of this model.
In addition, it stores `replay_indices` A list of indices (an index per
boosting iteration); Holding the index of the ensemble in the candidate list
throughout the run.
It is serializable and deserializable for persistent storage.
"""
def __init__(self, ensemble_candidate_name, ensembler_name, global_step=None,
replay_indices=None):
self._ensemble_candidate_name = ensemble_candidate_name
self._ensembler_name = ensembler_name
self._global_step = global_step
self._subnets = []
self._replay_indices = replay_indices or []
@property
def ensemble_candidate_name(self):
"""The ensemble candidate's name.
Returns:
String name of the ensemble candidate with this architecture.
"""
return self._ensemble_candidate_name
@property
def ensembler_name(self):
"""The ensembler's name.
Returns:
String name of the ensembler that constructed the ensemble.
"""
return self._ensembler_name
@property
def global_step(self):
"""The global step when this architecture was serialized.
Returns:
Integer global step.
"""
return self._global_step
@property
def subnetworks(self):
"""The component subnetworks.
Returns:
An Iterable of (iteration_number, builder_name) tuples.
"""
return tuple(self._subnets)
@property
def replay_indices(self):
"""The list of replay indices.
Returns:
A list of integers (an integer per boosting iteration); Holding the index
of the ensemble in the candidate list throughout the run
"""
return self._replay_indices
@property
def subnetworks_grouped_by_iteration(self):
"""The component subnetworks grouped by iteration number.
Returns:
An Iterable of (iteration_number, builder_names) tuples where the builder
names are grouped by iteration number.
"""
subnet_by_iteration = {}
for iteration_number, builder_name in self._subnets:
if iteration_number not in subnet_by_iteration:
subnet_by_iteration[iteration_number] = []
subnet_by_iteration[iteration_number].append(builder_name)
return tuple([
(i, tuple(subnet_by_iteration[i])) for i in sorted(subnet_by_iteration)
])
def add_subnetwork(self, iteration_number, builder_name):
"""Adds the given subnetwork metadata.
Args:
iteration_number: Integer iteration number when this Subnetwork was
created.
builder_name: String name of the `adanet.subnetwork.Builder` that produced
this Subnetwork.
"""
self._subnets.append((iteration_number, builder_name))
# TODO: Remove setters and getters.
def add_replay_index(self, index):
self._replay_indices.append(index)
def set_replay_indices(self, indices):
self._replay_indices = copy.copy(indices)
def serialize(self, iteration_number, global_step):
"""Returns a string serialization of this object."""
# TODO: Confirm that it makes sense to have global step of 0.
assert global_step is not None
ensemble_arch = {
"ensemble_candidate_name": self.ensemble_candidate_name,
"iteration_number": int(iteration_number),
"global_step": int(global_step),
"ensembler_name": self.ensembler_name,
"subnetworks": [],
"replay_indices": self._replay_indices
}
for iteration_number, builder_name in self._subnets:
subnetwork_arch = {
"iteration_number": int(iteration_number),
"builder_name": builder_name,
}
ensemble_arch["subnetworks"].append(subnetwork_arch)
return json.dumps(ensemble_arch, sort_keys=True)
@staticmethod
def deserialize(serialized_architecture):
"""Deserializes a serialized architecture.
Args:
serialized_architecture: String representation of an `_Architecture`
obtained by calling `serialize`.
Returns:
A deserialized `_Architecture` instance.
"""
ensemble_arch = json.loads(serialized_architecture)
architecture = _Architecture(ensemble_arch["ensemble_candidate_name"],
ensemble_arch["ensembler_name"],
ensemble_arch["global_step"],
ensemble_arch["replay_indices"])
for subnet in ensemble_arch["subnetworks"]:
architecture.add_subnetwork(subnet["iteration_number"],
subnet["builder_name"])
return architecture
| 5,714 | 31.844828 | 80 | py |
adanet | adanet-master/adanet/core/timer.py | """A simple timer implementation.
Copyright 2018 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
class _CountDownTimer(object):
"""A simple count down timer implementation."""
def __init__(self, duration_secs):
"""Initializes a `_CountDownTimer`.
Args:
duration_secs: Float seconds for countdown.
Returns:
A `_CountDownTimer` instance.
"""
self._start_time_secs = time.time()
self._duration_secs = duration_secs
def secs_remaining(self):
"""Returns the remaining countdown seconds."""
diff = self._duration_secs - (time.time() - self._start_time_secs)
return max(0., diff)
| 1,282 | 26.891304 | 72 | py |
adanet | adanet-master/adanet/core/eval_metrics.py | """AdaNet metrics objects and functions.
Copyright 2019 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import inspect
from absl import logging
from adanet import tf_compat
import six
import tensorflow.compat.v2 as tf
def _call_eval_metrics(eval_metrics):
if not eval_metrics:
return {}
fn, args = eval_metrics
if isinstance(args, dict):
return fn(**args)
else:
return fn(*args)
class _EvalMetricsStore(object):
"""Stores and manipulate eval_metric tuples."""
def __init__(self):
self._metric_fns = []
self._args = []
def add_eval_metrics(self, metric_fn, args):
"""Adds an eval_metrics tuple to the internal store."""
self._metric_fns.append(metric_fn)
self._args.append(args)
@property
def metric_fns(self):
return self._metric_fns
def flatten_args(self):
"""Flattens the eval_metrics arguments to a list."""
from tensorflow.python.util import nest # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
return nest.flatten(self._args)
def pack_args(self, args):
"""Packs the given list of arguments into the internal args structure."""
from tensorflow.python.util import nest # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
return nest.pack_sequence_as(self._args, args)
class _SubnetworkMetrics(object):
"""A object which creates evaluation metrics for Subnetworks."""
def __init__(self, use_tpu=False):
"""Creates a _SubnetworkMetrics.
Args:
use_tpu: Whether to use TPU-specific variable sharing logic. This ensures
that eval metrics created on TPU can be written to disk on the host CPU.
Returns:
A `_SubnetworkMetrics` instance.
"""
self._use_tpu = use_tpu
self._eval_metrics_store = _EvalMetricsStore()
def create_eval_metrics(self, features, labels, estimator_spec, metric_fn):
"""Creates evaluation metrics from the given arguments.
Args:
features: Input `dict` of `Tensor` objects.
labels: Labels `Tensor` or a dictionary of string label name to `Tensor`
(for multi-head).
estimator_spec: The `EstimatorSpec` created by a `Head` instance.
metric_fn: A function which should obey the following signature:
- Args: can only have following three arguments in any order:
* predictions: Predictions `Tensor` or dict of `Tensor` created by given
`Head`.
* features: Input `dict` of `Tensor` objects created by `input_fn` which
is given to `estimator.evaluate` as an argument.
* labels: Labels `Tensor` or dict of `Tensor` (for multi-head) created
by `input_fn` which is given to `estimator.evaluate` as an argument.
- Returns: Dict of metric results keyed by name. Final metrics are a union
of this and `estimator`s existing metrics. If there is a name conflict
between this and `estimator`s existing metrics, this will override the
existing one. The values of the dict are the results of calling a metric
function, namely a `(metric_tensor, update_op)` tuple.
"""
# If estimator_spec is not a TPUEstimatorSpec we create dummy metric_fn
# and args.
if isinstance(estimator_spec, tf.estimator.EstimatorSpec):
spec_fn, spec_args = lambda: estimator_spec.eval_metric_ops, []
else:
spec_fn, spec_args = estimator_spec.eval_metrics
self._eval_metrics_store.add_eval_metrics(
self._templatize_metric_fn(spec_fn), spec_args)
loss_fn = lambda loss: {"loss": tf_compat.v1.metrics.mean(loss)}
loss_fn_args = [tf.reshape(estimator_spec.loss, [1])]
if not self._use_tpu:
loss_ops = _call_eval_metrics((loss_fn, loss_fn_args))
loss_fn, loss_fn_args = lambda: loss_ops, []
self._eval_metrics_store.add_eval_metrics(
self._templatize_metric_fn(loss_fn), loss_fn_args)
# NOTE: the user supplied metrics_fn must be added last. This is because we
# want user metrics to override AdaNet's metrics.
if metric_fn:
metric_fn_args = {}
# Calling low level getargs for py_2_and_3 compatibility.
argspec = inspect.getargs(metric_fn.__code__).args
if "features" in argspec:
metric_fn_args["features"] = features
if "labels" in argspec:
metric_fn_args["labels"] = labels
if "predictions" in argspec:
metric_fn_args["predictions"] = estimator_spec.predictions
if not self._use_tpu:
metric_fn_ops = _call_eval_metrics((metric_fn, metric_fn_args))
metric_fn, metric_fn_args = lambda: metric_fn_ops, []
self._eval_metrics_store.add_eval_metrics(
self._templatize_metric_fn(metric_fn), metric_fn_args)
def _templatize_metric_fn(self, metric_fn):
"""Wraps the given metric_fn with a template so it's Variables are shared.
Hooks on TPU cannot depend on any graph Tensors. Instead the eval metrics
returned by metric_fn are stored in Variables. These variables are later
read from the evaluation hooks which run on the host CPU.
Args:
metric_fn: The function to wrap with a template.
Returns:
The original metric_fn wrapped with a template function.
"""
def _metric_fn(*args, **kwargs):
"""The wrapping function to be returned."""
# We can only be passed in either a dict or a list of tensors.
args = args if args else kwargs
metrics = _call_eval_metrics((metric_fn, args))
if not self._use_tpu:
return metrics
logging.log_first_n(logging.INFO,
"Writing eval metrics to variables for TPU", 1)
wrapped_metrics = {}
for i, key in enumerate(sorted(metrics)):
tensor, op = tf_compat.metric_op(metrics[key])
# key cannot be in var name since it may contain illegal chars.
var = tf_compat.v1.get_variable(
"metric_{}".format(i),
shape=tensor.shape,
dtype=tensor.dtype,
trainable=False,
initializer=tf_compat.v1.zeros_initializer(),
collections=[tf_compat.v1.GraphKeys.LOCAL_VARIABLES])
if isinstance(op, tf.Operation) or op.shape != tensor.shape:
with tf.control_dependencies([op]):
op = var.assign(tensor)
metric = (var, var.assign(op))
wrapped_metrics[key] = metric
return wrapped_metrics
return tf_compat.v1.make_template("metric_fn_template", _metric_fn)
def eval_metrics_tuple(self):
"""Returns tuple of (metric_fn, tensors) which can be executed on TPU."""
if not self._eval_metrics_store.metric_fns:
return None
def _metric_fn(*args):
metric_fns = self._eval_metrics_store.metric_fns
metric_fn_args = self._eval_metrics_store.pack_args(args)
eval_metric_ops = {}
for metric_fn, args in zip(metric_fns, metric_fn_args):
eval_metric_ops.update(_call_eval_metrics((metric_fn, args)))
return eval_metric_ops
return _metric_fn, self._eval_metrics_store.flatten_args()
def eval_metrics_ops(self):
"""Returns the eval_metrics_ops."""
return _call_eval_metrics(self.eval_metrics_tuple())
class _EnsembleMetrics(_SubnetworkMetrics):
"""A object which creates evaluation metrics for Ensembles."""
def create_eval_metrics(self, features, labels, estimator_spec, metric_fn,
architecture):
"""Overrides parent's method to also add the ensemble's architecture."""
super(_EnsembleMetrics, self).create_eval_metrics(features, labels,
estimator_spec, metric_fn)
self._eval_metrics_store.add_eval_metrics(
self._architecture_as_metric(architecture), [])
def _architecture_as_metric(self, architecture):
"""Returns a representation of an ensemble's architecture as a tf.metric."""
def _architecture_metric_fn():
"""Manually creates the tf.metric with a serialized tf.Summary proto."""
# TODO: Should architecture.subnetworks be sorted by iteration
# number first? Or perhaps, to make this more general, to have one line
# for each iteration, with "|" as a delimiter if there are multiple
# subnetworks in one iteration? Something like:
# 0 linear
# 1 dnn_width_32_depth_1 | dnn_width_64_depth_1
# 2
# 3 dnn_with_32_depth_2
# Also consider adding ensemble candidate's name, though that is already
# included in the ensemble name.
architecture_ = " | ".join([name for _, name in architecture.subnetworks])
architecture_ = "| {} |".format(architecture_)
summary_metadata = tf_compat.v1.SummaryMetadata(
plugin_data=tf_compat.v1.SummaryMetadata.PluginData(
plugin_name="text"))
summary_proto = tf_compat.v1.summary.Summary()
summary_proto.value.add(
metadata=summary_metadata,
tag="architecture/adanet",
tensor=tf_compat.v1.make_tensor_proto(architecture_, dtype=tf.string))
architecture_summary = tf.convert_to_tensor(
value=summary_proto.SerializeToString(), name="architecture")
return {
"architecture/adanet/ensembles": (architecture_summary, tf.no_op())
}
if not self._use_tpu:
ops = _architecture_metric_fn()
return lambda: ops
else:
return _architecture_metric_fn
class _IterationMetrics(object):
"""A object which creates evaluation metrics for an Iteration."""
def __init__(self,
iteration_number,
candidates,
subnetwork_specs,
use_tpu=False,
replay_indices_for_all=None):
self._iteration_number = iteration_number
self._candidates = candidates
self._subnetwork_specs = subnetwork_specs
self._use_tpu = use_tpu
self._replay_indices_for_all = replay_indices_for_all
self._candidates_eval_metrics_store = self._build_eval_metrics_store(
[candidate.ensemble_spec for candidate in self._candidates])
self._subnetworks_eval_metrics_store = self._build_eval_metrics_store(
self._subnetwork_specs)
self._best_eval_metrics_tuple = None
def _build_eval_metrics_store(self, specs):
"""Creates an _EvalMetricsStore from Subnetwork or Ensemble specs."""
store = _EvalMetricsStore()
for spec in specs:
if not spec.eval_metrics or not spec.eval_metrics.eval_metrics_tuple():
continue
metric_fn, args = spec.eval_metrics.eval_metrics_tuple()
store.add_eval_metrics(metric_fn, args)
return store
def best_eval_metric_ops(self, best_candidate_index, mode):
"""Returns best ensemble's metrics."""
return _call_eval_metrics(
self.best_eval_metrics_tuple(best_candidate_index, mode))
def best_eval_metrics_tuple(self, best_candidate_index, mode):
"""Returns (metric_fn, tensors) which computes the best ensemble's metrics.
Specifically, when metric_fn(tensors) is called, it separates the metric ops
by metric name. All candidates are not required to have the same metrics.
When they all share a given metric, an additional metric is added which
represents that of the best candidate.
Args:
best_candidate_index: `Tensor` index of the best candidate in the list.
mode: Defines whether this is training, evaluation or inference. Eval
metrics are only defined during evaluation. See `ModeKeys`.
Returns:
Dict of metric results keyed by name. The values of the dict are the
results of calling a metric function.
"""
if mode != tf.estimator.ModeKeys.EVAL:
return None
candidate_args = self._candidates_eval_metrics_store.flatten_args()
subnetwork_args = self._subnetworks_eval_metrics_store.flatten_args()
args = candidate_args + subnetwork_args
args.append(tf.reshape(best_candidate_index, [1]))
def _replay_eval_metrics(best_candidate_idx, eval_metric_ops):
"""Saves replay indices as eval metrics."""
# _replay_indices_for_all is a dict: {candidate: [list of replay_indices]}
# We are finding the max length replay list.
pad_value = max([len(v) for _, v in self._replay_indices_for_all.items()])
# Creating a matrix of (#candidate) times (max length replay indices).
# Entry i,j is the jth replay index of the ith candidate (ensemble).
replay_indices_as_tensor = tf.constant([
value + [-1] * (pad_value - len(value))
for _, value in self._replay_indices_for_all.items()
])
# Passing the right entries (entries of the best candidate). Note: we use
# TensorShape.as_list here so the code works on both TF 1.0 and 2.0.
for iteration in range(replay_indices_as_tensor.get_shape().as_list()[1]):
index_t = replay_indices_as_tensor[best_candidate_idx, iteration]
eval_metric_ops["best_ensemble_index_{}".format(iteration)] = (index_t,
index_t)
def _best_eval_metrics_fn(*args):
"""Returns the best eval metrics."""
with tf_compat.v1.variable_scope("best_eval_metrics"):
args = list(args)
idx, idx_update_op = tf_compat.v1.metrics.mean(args.pop())
idx = tf.cast(idx, tf.int32)
metric_fns = self._candidates_eval_metrics_store.metric_fns
metric_fn_args = self._candidates_eval_metrics_store.pack_args(
args[:len(candidate_args)])
candidate_grouped_metrics = self._group_metric_ops(
metric_fns, metric_fn_args)
metric_fns = self._subnetworks_eval_metrics_store.metric_fns
metric_fn_args = self._subnetworks_eval_metrics_store.pack_args(
args[(len(args) - len(subnetwork_args)):])
subnetwork_grouped_metrics = self._group_metric_ops(
metric_fns, metric_fn_args)
eval_metric_ops = {}
for metric_name in sorted(candidate_grouped_metrics):
metric_ops = candidate_grouped_metrics[metric_name]
if len(metric_ops) != len(self._candidates):
continue
if metric_name == "loss":
continue
values, ops = list(six.moves.zip(*metric_ops))
best_value = tf.stack(values)[idx]
# All tensors in this function have been outfed from the TPU, so we
# must update them manually, otherwise the TPU will hang indefinitely
# for the value of idx to update.
ops = list(ops)
ops.append(idx_update_op)
# Bundle subnetwork eval metric ops and ensemble "loss"" ops (which
# is a restricted Estimator keyword) into other metric ops so that
# they are computed.
ensemble_loss_ops = candidate_grouped_metrics.get("loss", tf.no_op())
all_ops = tf.group(ops, ensemble_loss_ops, subnetwork_grouped_metrics)
eval_metric_ops[metric_name] = (best_value, all_ops)
iteration_number = tf.constant(self._iteration_number)
eval_metric_ops["iteration"] = (iteration_number, iteration_number)
if self._replay_indices_for_all:
_replay_eval_metrics(idx, eval_metric_ops)
# tf.estimator.Estimator does not allow a "loss" key to be present in
# its eval_metrics.
assert "loss" not in eval_metric_ops
return eval_metric_ops
if not self._use_tpu:
if not self._best_eval_metrics_tuple:
best_ops = _call_eval_metrics((_best_eval_metrics_fn, args))
self._best_eval_metrics_tuple = lambda: best_ops, []
return self._best_eval_metrics_tuple
return _best_eval_metrics_fn, args
def _group_metric_ops(self, metric_fns, metric_fn_args):
"""Runs the metric_fns and groups the returned metric ops by name.
Args:
metric_fns: The eval_metrics functions to run.
metric_fn_args: The eval_metrics function arguments.
Returns:
The metric ops grouped by name.
"""
grouped_metrics = collections.defaultdict(list)
for metric_fn, args in zip(metric_fns, metric_fn_args):
eval_metric_ops = _call_eval_metrics((metric_fn, args))
for metric_name in sorted(eval_metric_ops):
metric_op = tf_compat.metric_op(eval_metric_ops[metric_name])
grouped_metrics[metric_name].append(metric_op)
return grouped_metrics
| 16,930 | 38.558411 | 109 | py |
adanet | adanet-master/adanet/core/ensemble_builder_test.py | """Test AdaNet ensemble single graph implementation.
Copyright 2018 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
from absl.testing import parameterized
from adanet import tf_compat
from adanet.core.ensemble_builder import _EnsembleBuilder
from adanet.core.ensemble_builder import _SubnetworkManager
from adanet.core.summary import Summary
import adanet.core.testing_utils as tu
from adanet.ensemble import Candidate as EnsembleCandidate
from adanet.ensemble import ComplexityRegularizedEnsembler
from adanet.ensemble import MeanEnsemble
from adanet.ensemble import MeanEnsembler
from adanet.ensemble import MixtureWeightType
from adanet.subnetwork import Builder
from adanet.subnetwork import Subnetwork
import tensorflow.compat.v1 as tf_v1
import tensorflow.compat.v2 as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util
from tensorflow.python.training import training as train
from tensorflow.python.training import training_util
# pylint: enable=g-direct-tensorflow-import
from tensorflow_estimator.python.estimator.head import binary_class_head
from tensorflow_estimator.python.estimator.head import multi_head as multi_head_lib
class _Builder(Builder):
def __init__(self,
subnetwork_train_op_fn,
mixture_weights_train_op_fn,
use_logits_last_layer,
seed=42,
multi_head=False):
self._subnetwork_train_op_fn = subnetwork_train_op_fn
self._mixture_weights_train_op_fn = mixture_weights_train_op_fn
self._use_logits_last_layer = use_logits_last_layer
self._seed = seed
self._multi_head = multi_head
@property
def name(self):
return "test"
def build_subnetwork(self,
features,
logits_dimension,
training,
iteration_step,
summary,
previous_ensemble=None):
assert features is not None
assert training is not None
assert iteration_step is not None
assert summary is not None
# Trainable variables collection should always be empty when
# build_subnetwork is called.
assert not tf_compat.v1.get_collection(
tf_compat.v1.GraphKeys.TRAINABLE_VARIABLES)
# Subnetworks get iteration steps instead of global steps.
step_name = "subnetwork_test/iteration_step"
assert step_name == tf_compat.tensor_name(
tf_compat.v1.train.get_global_step())
assert step_name == tf_compat.tensor_name(train.get_global_step())
assert step_name == tf_compat.tensor_name(training_util.get_global_step())
assert step_name == tf_compat.tensor_name(tf_v1.train.get_global_step())
assert step_name == tf_compat.tensor_name(
tf_compat.v1.train.get_or_create_global_step())
assert step_name == tf_compat.tensor_name(train.get_or_create_global_step())
assert step_name == tf_compat.tensor_name(
training_util.get_or_create_global_step())
assert step_name == tf_compat.tensor_name(
tf_v1.train.get_or_create_global_step())
# Subnetworks get scoped summaries.
assert "fake_scalar" == tf_compat.v1.summary.scalar("scalar", 1.)
assert "fake_image" == tf_compat.v1.summary.image("image", 1.)
assert "fake_histogram" == tf_compat.v1.summary.histogram("histogram", 1.)
assert "fake_audio" == tf_compat.v1.summary.audio("audio", 1., 1.)
last_layer = tu.dummy_tensor(shape=(2, 3))
def logits_fn(logits_dim):
return tf_compat.v1.layers.dense(
last_layer,
units=logits_dim,
kernel_initializer=tf_compat.v1.glorot_uniform_initializer(
seed=self._seed))
if self._multi_head:
logits = {
"head1": logits_fn(logits_dimension / 2),
"head2": logits_fn(logits_dimension / 2)
}
last_layer = {"head1": last_layer, "head2": last_layer}
else:
logits = logits_fn(logits_dimension)
return Subnetwork(
last_layer=logits if self._use_logits_last_layer else last_layer,
logits=logits,
complexity=2,
persisted_tensors={})
def build_subnetwork_train_op(self, subnetwork, loss, var_list, labels,
iteration_step, summary, previous_ensemble):
assert iteration_step is not None
assert summary is not None
return self._subnetwork_train_op_fn(loss, var_list)
def build_mixture_weights_train_op(self, loss, var_list, logits, labels,
iteration_step, summary):
assert iteration_step is not None
assert summary is not None
return self._mixture_weights_train_op_fn(loss, var_list)
class _BuilderPrunerAll(_Builder):
"""Removed previous ensemble completely."""
def prune_previous_ensemble(self, previous_ensemble):
return []
class _BuilderPrunerLeaveOne(_Builder):
"""Removed previous ensemble completely."""
def prune_previous_ensemble(self, previous_ensemble):
if previous_ensemble:
return [0]
return []
class _FakeSummary(Summary):
"""A fake adanet.Summary."""
def scalar(self, name, tensor, family=None):
return "fake_scalar"
def image(self, name, tensor, max_outputs=3, family=None):
return "fake_image"
def histogram(self, name, values, family=None):
return "fake_histogram"
def audio(self, name, tensor, sample_rate, max_outputs=3, family=None):
return "fake_audio"
@contextlib.contextmanager
def current_scope(self):
yield
class EnsembleBuilderTest(tu.AdanetTestCase):
@parameterized.named_parameters(
{
"testcase_name": "no_previous_ensemble",
"want_logits": [[.016], [.117]],
"want_loss": 1.338,
"want_adanet_loss": 1.338,
"want_ensemble_trainable_vars": 1,
}, {
"testcase_name": "mean_ensembler",
"want_logits": [[.621], [.979]],
"want_loss": 1.3702,
"want_adanet_loss": 1.3702,
"want_ensemble_trainable_vars": 0,
"ensembler_class": MeanEnsembler,
"want_predictions": {
MeanEnsemble.MEAN_LAST_LAYER: [[-0.2807, -0.1377, -0.6763],
[0.0245, -0.8935, -0.8284]],
}
}, {
"testcase_name": "no_previous_ensemble_prune_all",
"want_logits": [[.016], [.117]],
"want_loss": 1.338,
"want_adanet_loss": 1.338,
"want_ensemble_trainable_vars": 1,
"subnetwork_builder_class": _BuilderPrunerAll
}, {
"testcase_name": "no_previous_ensemble_prune_leave_one",
"want_logits": [[.016], [.117]],
"want_loss": 1.338,
"want_adanet_loss": 1.338,
"want_ensemble_trainable_vars": 1,
"subnetwork_builder_class": _BuilderPrunerLeaveOne
}, {
"testcase_name": "default_mixture_weight_initializer_scalar",
"mixture_weight_initializer": None,
"mixture_weight_type": MixtureWeightType.SCALAR,
"use_logits_last_layer": True,
"want_logits": [[.580], [.914]],
"want_loss": 1.362,
"want_adanet_loss": 1.362,
"want_ensemble_trainable_vars": 1,
}, {
"testcase_name": "default_mixture_weight_initializer_vector",
"mixture_weight_initializer": None,
"mixture_weight_type": MixtureWeightType.VECTOR,
"use_logits_last_layer": True,
"want_logits": [[.580], [.914]],
"want_loss": 1.362,
"want_adanet_loss": 1.362,
"want_ensemble_trainable_vars": 1,
}, {
"testcase_name": "default_mixture_weight_initializer_matrix",
"mixture_weight_initializer": None,
"mixture_weight_type": MixtureWeightType.MATRIX,
"want_logits": [[.016], [.117]],
"want_loss": 1.338,
"want_adanet_loss": 1.338,
"want_ensemble_trainable_vars": 1,
}, {
"testcase_name":
"default_mixture_weight_initializer_matrix_on_logits",
"mixture_weight_initializer":
None,
"mixture_weight_type":
MixtureWeightType.MATRIX,
"use_logits_last_layer":
True,
"want_logits": [[.030], [.047]],
"want_loss":
1.378,
"want_adanet_loss":
1.378,
"want_ensemble_trainable_vars":
1,
}, {
"testcase_name": "no_previous_ensemble_use_bias",
"use_bias": True,
"want_logits": [[0.013], [0.113]],
"want_loss": 1.338,
"want_adanet_loss": 1.338,
"want_ensemble_trainable_vars": 2,
}, {
"testcase_name": "no_previous_ensemble_predict_mode",
"mode": tf.estimator.ModeKeys.PREDICT,
"want_logits": [[0.], [0.]],
"want_ensemble_trainable_vars": 1,
}, {
"testcase_name": "no_previous_ensemble_lambda",
"adanet_lambda": .01,
"want_logits": [[.014], [.110]],
"want_loss": 1.340,
"want_adanet_loss": 1.343,
"want_ensemble_trainable_vars": 1,
}, {
"testcase_name": "no_previous_ensemble_beta",
"adanet_beta": .1,
"want_logits": [[.006], [.082]],
"want_loss": 1.349,
"want_adanet_loss": 1.360,
"want_ensemble_trainable_vars": 1,
}, {
"testcase_name": "no_previous_ensemble_lambda_and_beta",
"adanet_lambda": .01,
"adanet_beta": .1,
"want_logits": [[.004], [.076]],
"want_loss": 1.351,
"want_adanet_loss": 1.364,
"want_ensemble_trainable_vars": 1,
}, {
"testcase_name": "multi_head",
"want_logits": {
"head1": [[.016], [.117]],
"head2": [[.016], [.117]],
},
"want_loss": 2.675,
"want_adanet_loss": 2.675,
"multi_head": True,
"want_ensemble_trainable_vars": 2,
"want_subnetwork_trainable_vars": 4,
}, {
"testcase_name": "expect_subnetwork_exports",
"mode": tf.estimator.ModeKeys.PREDICT,
"want_logits": [[0.], [0.]],
"want_ensemble_trainable_vars": 1,
"export_subnetworks": True,
}, {
"testcase_name": "multi_head_expect_subnetwork_exports",
"mode": tf.estimator.ModeKeys.PREDICT,
"multi_head": True,
"want_logits": {
"head1": [[0.], [0.]],
"head2": [[0.], [0.]],
},
"want_ensemble_trainable_vars": 2,
"want_subnetwork_trainable_vars": 4,
"export_subnetworks": True,
}, {
"testcase_name": "replay_no_prev",
"adanet_beta": .1,
"want_logits": [[.006], [.082]],
"want_loss": 1.349,
"want_adanet_loss": 1.360,
"want_ensemble_trainable_vars": 1,
"my_ensemble_index": 2,
"want_replay_indices": [2],
})
@test_util.run_in_graph_and_eager_modes
def test_build_ensemble_spec(
self,
want_logits,
want_loss=None,
want_adanet_loss=None,
want_ensemble_trainable_vars=None,
adanet_lambda=0.,
adanet_beta=0.,
ensemble_spec_fn=lambda: None,
use_bias=False,
use_logits_last_layer=False,
mixture_weight_type=MixtureWeightType.MATRIX,
mixture_weight_initializer=tf_compat.v1.zeros_initializer(),
warm_start_mixture_weights=True,
subnetwork_builder_class=_Builder,
mode=tf.estimator.ModeKeys.TRAIN,
multi_head=False,
want_subnetwork_trainable_vars=2,
ensembler_class=ComplexityRegularizedEnsembler,
my_ensemble_index=None,
want_replay_indices=None,
want_predictions=None,
export_subnetworks=False,
previous_ensemble_spec=None,
previous_iteration_checkpoint=None):
seed = 64
if multi_head:
head = multi_head_lib.MultiHead(heads=[
binary_class_head.BinaryClassHead(
name="head1", loss_reduction=tf_compat.SUM),
binary_class_head.BinaryClassHead(
name="head2", loss_reduction=tf_compat.SUM)
])
else:
head = binary_class_head.BinaryClassHead(loss_reduction=tf_compat.SUM)
builder = _EnsembleBuilder(
head=head,
export_subnetwork_logits=export_subnetworks,
export_subnetwork_last_layer=export_subnetworks)
def _subnetwork_train_op_fn(loss, var_list):
self.assertLen(var_list, want_subnetwork_trainable_vars)
self.assertEqual(
var_list,
tf_compat.v1.get_collection(
tf_compat.v1.GraphKeys.TRAINABLE_VARIABLES))
# Subnetworks get iteration steps instead of global steps.
self.assertEqual("subnetwork_test/iteration_step",
tf_compat.v1.train.get_global_step().op.name)
# Subnetworks get scoped summaries.
self.assertEqual("fake_scalar", tf_compat.v1.summary.scalar("scalar", 1.))
self.assertEqual("fake_image", tf_compat.v1.summary.image("image", 1.))
self.assertEqual("fake_histogram",
tf_compat.v1.summary.histogram("histogram", 1.))
self.assertEqual("fake_audio",
tf_compat.v1.summary.audio("audio", 1., 1.))
optimizer = tf_compat.v1.train.GradientDescentOptimizer(learning_rate=.1)
return optimizer.minimize(loss, var_list=var_list)
def _mixture_weights_train_op_fn(loss, var_list):
self.assertLen(var_list, want_ensemble_trainable_vars)
self.assertEqual(
var_list,
tf_compat.v1.get_collection(
tf_compat.v1.GraphKeys.TRAINABLE_VARIABLES))
# Subnetworks get iteration steps instead of global steps.
self.assertEqual("ensemble_test/iteration_step",
tf_compat.v1.train.get_global_step().op.name)
# Subnetworks get scoped summaries.
self.assertEqual("fake_scalar", tf_compat.v1.summary.scalar("scalar", 1.))
self.assertEqual("fake_image", tf_compat.v1.summary.image("image", 1.))
self.assertEqual("fake_histogram",
tf_compat.v1.summary.histogram("histogram", 1.))
self.assertEqual("fake_audio",
tf_compat.v1.summary.audio("audio", 1., 1.))
if not var_list:
return tf.no_op()
optimizer = tf_compat.v1.train.GradientDescentOptimizer(learning_rate=.1)
return optimizer.minimize(loss, var_list=var_list)
previous_ensemble = None
previous_ensemble_spec = ensemble_spec_fn()
if previous_ensemble_spec:
previous_ensemble = previous_ensemble_spec.ensemble
subnetwork_manager = _SubnetworkManager(head)
subnetwork_builder = subnetwork_builder_class(
_subnetwork_train_op_fn,
_mixture_weights_train_op_fn,
use_logits_last_layer,
seed,
multi_head=multi_head)
with tf.Graph().as_default() as g:
tf_compat.v1.train.get_or_create_global_step()
# A trainable variable to later verify that creating models does not
# affect the global variables collection.
_ = tf_compat.v1.get_variable("some_var", shape=0, trainable=True)
features = {"x": tf.constant([[1.], [2.]])}
if multi_head:
labels = {"head1": tf.constant([0, 1]), "head2": tf.constant([0, 1])}
else:
labels = tf.constant([0, 1])
session_config = tf.compat.v1.ConfigProto(
gpu_options=tf.compat.v1.GPUOptions(allow_growth=True))
subnetwork_spec = subnetwork_manager.build_subnetwork_spec(
name="test",
subnetwork_builder=subnetwork_builder,
summary=_FakeSummary(),
features=features,
mode=mode,
labels=labels,
previous_ensemble=previous_ensemble)
ensembler_kwargs = {}
if ensembler_class is ComplexityRegularizedEnsembler:
ensembler_kwargs.update({
"mixture_weight_type": mixture_weight_type,
"mixture_weight_initializer": mixture_weight_initializer,
"warm_start_mixture_weights": warm_start_mixture_weights,
"model_dir": self.test_subdirectory,
"adanet_lambda": adanet_lambda,
"adanet_beta": adanet_beta,
"use_bias": use_bias
})
if ensembler_class is MeanEnsembler:
ensembler_kwargs.update({"add_mean_last_layer_predictions": True})
ensemble_spec = builder.build_ensemble_spec(
# Note: when ensemble_spec is not None and warm_start_mixture_weights
# is True, we need to make sure that the bias and mixture weights are
# already saved to the checkpoint_dir.
name="test",
previous_ensemble_spec=previous_ensemble_spec,
candidate=EnsembleCandidate("foo", [subnetwork_builder], None),
ensembler=ensembler_class(**ensembler_kwargs),
subnetwork_specs=[subnetwork_spec],
summary=_FakeSummary(),
features=features,
iteration_number=1,
labels=labels,
my_ensemble_index=my_ensemble_index,
mode=mode,
previous_iteration_checkpoint=previous_iteration_checkpoint)
if want_replay_indices:
self.assertAllEqual(want_replay_indices,
ensemble_spec.architecture.replay_indices)
with tf_compat.v1.Session(
graph=g, config=session_config).as_default() as sess:
sess.run(tf_compat.v1.global_variables_initializer())
# Equals the number of subnetwork and ensemble trainable variables,
# plus the one 'some_var' created earlier.
self.assertLen(
tf_compat.v1.trainable_variables(),
want_subnetwork_trainable_vars + want_ensemble_trainable_vars + 1)
# Get the real global step outside a subnetwork's context.
self.assertEqual("global_step",
tf_compat.v1.train.get_global_step().op.name)
self.assertEqual("global_step", train.get_global_step().op.name)
self.assertEqual("global_step", tf_v1.train.get_global_step().op.name)
self.assertEqual("global_step", training_util.get_global_step().op.name)
self.assertEqual("global_step",
tf_compat.v1.train.get_or_create_global_step().op.name)
self.assertEqual("global_step",
train.get_or_create_global_step().op.name)
self.assertEqual("global_step",
tf_v1.train.get_or_create_global_step().op.name)
self.assertEqual("global_step",
training_util.get_or_create_global_step().op.name)
# Get global tf.summary outside a subnetwork's context.
self.assertNotEqual("fake_scalar",
tf_compat.v1.summary.scalar("scalar", 1.))
self.assertNotEqual("fake_image",
tf_compat.v1.summary.image("image", 1.))
self.assertNotEqual("fake_histogram",
tf_compat.v1.summary.histogram("histogram", 1.))
self.assertNotEqual("fake_audio",
tf_compat.v1.summary.audio("audio", 1., 1.))
if mode == tf.estimator.ModeKeys.PREDICT:
self.assertAllClose(
want_logits, sess.run(ensemble_spec.ensemble.logits), atol=1e-3)
self.assertIsNone(ensemble_spec.loss)
self.assertIsNone(ensemble_spec.adanet_loss)
self.assertIsNone(ensemble_spec.train_op)
self.assertIsNotNone(ensemble_spec.export_outputs)
if not export_subnetworks:
return
if not multi_head:
subnetwork_logits = sess.run(ensemble_spec.export_outputs[
_EnsembleBuilder._SUBNETWORK_LOGITS_EXPORT_SIGNATURE].outputs)
self.assertAllClose(subnetwork_logits["test"],
sess.run(subnetwork_spec.subnetwork.logits))
subnetwork_last_layer = sess.run(ensemble_spec.export_outputs[
_EnsembleBuilder._SUBNETWORK_LAST_LAYER_EXPORT_SIGNATURE]
.outputs)
self.assertAllClose(subnetwork_last_layer["test"],
sess.run(subnetwork_spec.subnetwork.last_layer))
else:
self.assertIn("subnetwork_logits_head2",
ensemble_spec.export_outputs)
subnetwork_logits_head1 = sess.run(
ensemble_spec.export_outputs["subnetwork_logits_head1"].outputs)
self.assertAllClose(
subnetwork_logits_head1["test"],
sess.run(subnetwork_spec.subnetwork.logits["head1"]))
self.assertIn("subnetwork_logits_head2",
ensemble_spec.export_outputs)
subnetwork_last_layer_head1 = sess.run(
ensemble_spec.export_outputs["subnetwork_last_layer_head1"]
.outputs)
self.assertAllClose(
subnetwork_last_layer_head1["test"],
sess.run(subnetwork_spec.subnetwork.last_layer["head1"]))
return
# Verify that train_op works, previous loss should be greater than loss
# after a train op.
loss = sess.run(ensemble_spec.loss)
train_op = tf.group(subnetwork_spec.train_op.train_op,
ensemble_spec.train_op.train_op)
for _ in range(3):
sess.run(train_op)
self.assertGreater(loss, sess.run(ensemble_spec.loss))
self.assertAllClose(
want_logits, sess.run(ensemble_spec.ensemble.logits), atol=1e-3)
if ensembler_class is ComplexityRegularizedEnsembler:
# Bias should learn a non-zero value when used.
bias = sess.run(ensemble_spec.ensemble.bias)
if isinstance(bias, dict):
bias = sum(abs(b) for b in bias.values())
if use_bias:
self.assertNotEqual(0., bias)
else:
self.assertAlmostEqual(0., bias)
self.assertAlmostEqual(
want_loss, sess.run(ensemble_spec.loss), places=3)
self.assertAlmostEqual(
want_adanet_loss, sess.run(ensemble_spec.adanet_loss), places=3)
if want_predictions:
self.assertAllClose(
want_predictions,
sess.run(ensemble_spec.ensemble.predictions),
atol=1e-3)
class EnsembleBuilderMetricFnTest(parameterized.TestCase, tf.test.TestCase):
def _make_metrics(self,
metric_fn,
mode=tf.estimator.ModeKeys.EVAL,
multi_head=False,
sess=None):
with context.graph_mode():
if multi_head:
head = multi_head_lib.MultiHead(heads=[
binary_class_head.BinaryClassHead(
name="head1", loss_reduction=tf_compat.SUM),
binary_class_head.BinaryClassHead(
name="head2", loss_reduction=tf_compat.SUM)
])
labels = {"head1": tf.constant([0, 1]), "head2": tf.constant([0, 1])}
else:
head = binary_class_head.BinaryClassHead(loss_reduction=tf_compat.SUM)
labels = tf.constant([0, 1])
features = {"x": tf.constant([[1.], [2.]])}
builder = _EnsembleBuilder(head, metric_fn=metric_fn)
subnetwork_manager = _SubnetworkManager(head, metric_fn=metric_fn)
subnetwork_builder = _Builder(
lambda unused0, unused1: tf.no_op(),
lambda unused0, unused1: tf.no_op(),
use_logits_last_layer=True)
subnetwork_spec = subnetwork_manager.build_subnetwork_spec(
name="test",
subnetwork_builder=subnetwork_builder,
summary=_FakeSummary(),
features=features,
mode=mode,
labels=labels)
ensemble_spec = builder.build_ensemble_spec(
name="test",
candidate=EnsembleCandidate("foo", [subnetwork_builder], None),
ensembler=ComplexityRegularizedEnsembler(
mixture_weight_type=MixtureWeightType.SCALAR),
subnetwork_specs=[subnetwork_spec],
summary=_FakeSummary(),
features=features,
iteration_number=0,
labels=labels,
mode=mode,
my_ensemble_index=0,
previous_ensemble_spec=None,
previous_iteration_checkpoint=None)
subnetwork_metric_ops = subnetwork_spec.eval_metrics.eval_metrics_ops()
ensemble_metric_ops = ensemble_spec.eval_metrics.eval_metrics_ops()
evaluate = self.evaluate
if sess is not None:
evaluate = sess.run
evaluate((tf_compat.v1.global_variables_initializer(),
tf_compat.v1.local_variables_initializer()))
evaluate((subnetwork_metric_ops, ensemble_metric_ops))
# Return the idempotent tensor part of the (tensor, op) metrics tuple.
return {
k: evaluate(subnetwork_metric_ops[k][0])
for k in subnetwork_metric_ops
}, {k: evaluate(ensemble_metric_ops[k][0]) for k in ensemble_metric_ops}
def setUp(self):
super(EnsembleBuilderMetricFnTest, self).setUp()
tf_compat.v1.train.create_global_step()
@parameterized.named_parameters(
{
"testcase_name": "mode_train",
"mode": tf.estimator.ModeKeys.TRAIN,
}, {
"testcase_name": "mode_predict",
"mode": tf.estimator.ModeKeys.PREDICT,
})
@test_util.run_in_graph_and_eager_modes
def test_only_adds_metrics_when_evaluating(self, mode):
"""Ensures that metrics are only added during evaluation.
Adding metrics during training will break when running on TPU.
Args:
mode: The mode with which to run the test.
"""
def metric_fn(features):
return {"mean_x": tf_compat.v1.metrics.mean(features["x"])}
subnetwork_metrics, ensemble_metrics = self._make_metrics(metric_fn, mode)
self.assertEmpty(subnetwork_metrics)
self.assertEmpty(ensemble_metrics)
@test_util.run_in_graph_and_eager_modes
def test_should_add_metrics(self):
def _test_metric_fn(metric_fn):
subnetwork_metrics, ensemble_metrics = self._make_metrics(metric_fn)
self.assertIn("mean_x", subnetwork_metrics)
self.assertIn("mean_x", ensemble_metrics)
self.assertEqual(1.5, subnetwork_metrics["mean_x"])
self.assertEqual(1.5, ensemble_metrics["mean_x"])
# assert that it keeps original head metrics
self.assertIn("average_loss", subnetwork_metrics)
self.assertIn("average_loss", ensemble_metrics)
def metric_fn_1(features):
return {"mean_x": tf_compat.v1.metrics.mean(features["x"])}
# TODO: Add support for tf.keras.metrics.Mean like `add_metrics`.
_test_metric_fn(metric_fn_1)
@test_util.run_in_graph_and_eager_modes
def test_should_error_out_for_not_recognized_args(self):
head = binary_class_head.BinaryClassHead(loss_reduction=tf_compat.SUM)
def metric_fn(features, not_recognized):
_, _ = features, not_recognized
return {}
with self.assertRaisesRegexp(ValueError, "not_recognized"):
_EnsembleBuilder(head, metric_fn=metric_fn)
@test_util.run_in_graph_and_eager_modes
def test_all_supported_args(self):
def metric_fn(features, predictions, labels):
self.assertIn("x", features)
self.assertIsNotNone(labels)
self.assertIn("logistic", predictions)
return {}
self._make_metrics(metric_fn)
@test_util.run_in_graph_and_eager_modes
def test_all_supported_args_in_different_order(self):
def metric_fn(labels, features, predictions):
self.assertIn("x", features)
self.assertIsNotNone(labels)
self.assertIn("logistic", predictions)
return {}
self._make_metrics(metric_fn)
@test_util.run_in_graph_and_eager_modes
def test_all_args_are_optional(self):
def _test_metric_fn(metric_fn):
subnetwork_metrics, ensemble_metrics = self._make_metrics(metric_fn)
self.assertEqual(2., subnetwork_metrics["two"])
self.assertEqual(2., ensemble_metrics["two"])
def metric_fn_1():
return {"two": tf_compat.v1.metrics.mean(tf.constant([2.]))}
# TODO: Add support for tf.keras.metrics.Mean like `add_metrics`.
_test_metric_fn(metric_fn_1)
@test_util.run_in_graph_and_eager_modes
def test_overrides_existing_metrics(self):
def _test_metric_fn(metric_fn):
subnetwork_metrics, ensemble_metrics = self._make_metrics(metric_fn=None)
self.assertNotEqual(2., subnetwork_metrics["average_loss"])
self.assertNotEqual(2., ensemble_metrics["average_loss"])
with tf.Graph().as_default() as g, self.test_session(g) as sess:
subnetwork_metrics, ensemble_metrics = self._make_metrics(
metric_fn=metric_fn, sess=sess)
self.assertEqual(2., subnetwork_metrics["average_loss"])
self.assertEqual(2., ensemble_metrics["average_loss"])
def metric_fn_1():
return {"average_loss": tf_compat.v1.metrics.mean(tf.constant([2.]))}
# TODO: Add support for tf.keras.metrics.Mean like `add_metrics`.
_test_metric_fn(metric_fn_1)
@test_util.run_in_graph_and_eager_modes
def test_multi_head(self):
"""Tests b/123084079."""
def metric_fn(predictions):
self.assertIn(("head1", "logits"), predictions)
self.assertIn(("head2", "logits"), predictions)
return {}
self._make_metrics(metric_fn, multi_head=True)
@test_util.run_in_graph_and_eager_modes
def test_operation_metrics(self):
def metric_fn():
var = tf_compat.v1.get_variable(
"metric_var",
shape=[],
trainable=False,
initializer=tf_compat.v1.zeros_initializer(),
collections=[tf_compat.v1.GraphKeys.LOCAL_VARIABLES])
# A metric with an op that doesn't return a Tensor.
op = tf.group(tf_compat.v1.assign_add(var, 1))
return {"operation_metric": (var, op)}
subnetwork_metrics, ensemble_metrics = self._make_metrics(metric_fn)
self.assertEqual(1., subnetwork_metrics["operation_metric"])
self.assertEqual(1., ensemble_metrics["operation_metric"])
@test_util.run_in_graph_and_eager_modes
def test_eval_metric_different_shape_op(self):
def metric_fn():
var = tf_compat.v1.get_variable(
"metric_var",
shape=[2],
trainable=False,
initializer=tf_compat.v1.zeros_initializer(),
collections=[tf_compat.v1.GraphKeys.LOCAL_VARIABLES])
# Shape of metric different from shape of op
op = tf_compat.v1.assign_add(var, [1, 2])
metric = tf.reshape(var[0] + var[1], [])
return {"different_shape_metric": (metric, op)}
subnetwork_metrics, ensemble_metrics = self._make_metrics(metric_fn)
self.assertEqual(3., subnetwork_metrics["different_shape_metric"])
self.assertEqual(3., ensemble_metrics["different_shape_metric"])
if __name__ == "__main__":
tf.test.main()
| 31,597 | 37.770552 | 83 | py |
adanet | adanet-master/adanet/core/testing_utils.py | """Test utilities for AdaNet single graph implementation.
Copyright 2018 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import struct
import sys
from absl import flags
from absl.testing import parameterized
from adanet import ensemble as ensemble_lib
from adanet import subnetwork as subnetwork_lib
from adanet import tf_compat
from adanet.core.architecture import _Architecture
from adanet.core.candidate import _Candidate
from adanet.core.ensemble_builder import _EnsembleSpec
from adanet.core.ensemble_builder import _SubnetworkSpec
from adanet.core.eval_metrics import _EnsembleMetrics
from adanet.core.eval_metrics import _IterationMetrics
from adanet.core.eval_metrics import _SubnetworkMetrics
import tensorflow.compat.v2 as tf
def dummy_tensor(shape=(), random_seed=42):
"""Returns a randomly initialized tensor."""
return tf.Variable(
tf_compat.random_normal(shape=shape, seed=random_seed),
trainable=False).read_value()
class ExportOutputKeys(object):
"""Different export output keys for the dummy ensemble builder."""
CLASSIFICATION_CLASSES = "classification_classes"
CLASSIFICATION_SCORES = "classification_scores"
REGRESSION = "regression"
PREDICTION = "prediction"
INVALID = "invalid"
def dummy_ensemble_spec(name,
random_seed=42,
num_subnetworks=1,
bias=0.,
loss=None,
adanet_loss=None,
eval_metrics=None,
variables=None,
dict_predictions=False,
export_output_key=None,
subnetwork_builders=None,
train_op=None):
"""Creates a dummy `_EnsembleSpec` instance.
Args:
name: _EnsembleSpec's name.
random_seed: A scalar random seed.
num_subnetworks: The number of fake subnetworks in this ensemble.
bias: Bias value.
loss: Float loss to return. When None, it's picked from a random
distribution.
adanet_loss: Float AdaNet loss to return. When None, it's picked from a
random distribution.
eval_metrics: Optional eval metrics tuple of (metric_fn, tensor args).
variables: List of `tf.Variable` instances associated with the ensemble.
dict_predictions: Boolean whether to return predictions as a dictionary of
`Tensor` or just a single float `Tensor`.
export_output_key: An `ExportOutputKeys` for faking export outputs.
subnetwork_builders: List of `adanet.subnetwork.Builder` objects.
train_op: A train op.
Returns:
A dummy `_EnsembleSpec` instance.
"""
if loss is None:
loss = dummy_tensor([], random_seed)
if adanet_loss is None:
adanet_loss = dummy_tensor([], random_seed * 2)
else:
adanet_loss = tf.convert_to_tensor(value=adanet_loss)
logits = dummy_tensor([], random_seed * 3)
if dict_predictions:
predictions = {
"logits": logits,
"classes": tf.cast(tf.abs(logits), dtype=tf.int64)
}
else:
predictions = logits
weighted_subnetworks = [
ensemble_lib.WeightedSubnetwork(
name=name,
iteration_number=1,
logits=dummy_tensor([2, 1], random_seed * 4),
weight=dummy_tensor([2, 1], random_seed * 4),
subnetwork=subnetwork_lib.Subnetwork(
last_layer=dummy_tensor([1, 2], random_seed * 4),
logits=dummy_tensor([2, 1], random_seed * 4),
complexity=1.,
persisted_tensors={}))
]
export_outputs = _dummy_export_outputs(export_output_key, logits, predictions)
bias = tf.constant(bias)
return _EnsembleSpec(
name=name,
ensemble=ensemble_lib.ComplexityRegularized(
weighted_subnetworks=weighted_subnetworks * num_subnetworks,
bias=bias,
logits=logits,
),
architecture=_Architecture("dummy_ensemble_candidate", "dummy_ensembler"),
subnetwork_builders=subnetwork_builders,
predictions=predictions,
step=tf.Variable(0),
variables=variables,
loss=loss,
adanet_loss=adanet_loss,
train_op=train_op,
eval_metrics=eval_metrics,
export_outputs=export_outputs)
def _dummy_export_outputs(export_output_key, logits, predictions):
"""Returns a dummy export output dictionary for the given key."""
export_outputs = None
if export_output_key == ExportOutputKeys.CLASSIFICATION_CLASSES:
export_outputs = {
export_output_key:
tf.estimator.export.ClassificationOutput(
classes=tf.as_string(logits))
}
elif export_output_key == ExportOutputKeys.CLASSIFICATION_SCORES:
export_outputs = {
export_output_key:
tf.estimator.export.ClassificationOutput(scores=logits)
}
elif export_output_key == ExportOutputKeys.REGRESSION:
export_outputs = {
export_output_key: tf.estimator.export.RegressionOutput(value=logits)
}
elif export_output_key == ExportOutputKeys.PREDICTION:
export_outputs = {
export_output_key:
tf.estimator.export.PredictOutput(outputs=predictions)
}
elif export_output_key == ExportOutputKeys.INVALID:
export_outputs = {export_output_key: predictions}
return export_outputs
def dummy_estimator_spec(loss=None, random_seed=42, eval_metric_ops=None):
"""Creates a dummy `EstimatorSpec` instance.
Args:
loss: Float loss to return. When None, it's picked from a random
distribution.
random_seed: Scalar seed for random number generators.
eval_metric_ops: Optional dictionary of metric ops.
Returns:
A `EstimatorSpec` instance.
"""
if loss is None:
loss = dummy_tensor([], random_seed)
predictions = dummy_tensor([], random_seed * 2)
return tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.TRAIN,
predictions=predictions,
loss=loss,
# Train_op cannot be tf.no_op() for Estimator, because in eager mode
# tf.no_op() returns None.
train_op=tf.constant(0.),
eval_metric_ops=eval_metric_ops)
def dummy_input_fn(features, labels):
"""Returns an input_fn that returns feature and labels `Tensors`."""
def _input_fn(params=None):
del params # Unused.
input_features = {"x": tf.constant(features, name="x")}
input_labels = tf.constant(labels, name="y")
return input_features, input_labels
return _input_fn
def dataset_input_fn(features=8., labels=9., return_dataset=False):
"""Returns feature and label `Tensors` via a `Dataset`."""
labels = labels or 0.
def _input_fn(params=None):
"""The `Dataset` input_fn which will be returned."""
del params # Unused.
def _map(f, l):
return {"x": f}, l
input_features = tf.data.Dataset.from_tensors([features])
input_labels = tf.data.Dataset.from_tensors([labels])
dataset = tf.data.Dataset.zip((input_features, input_labels)).map(_map)
if return_dataset:
return dataset
return tf.compat.v1.data.make_one_shot_iterator(dataset).get_next()
return _input_fn
def head():
from tensorflow_estimator.python.estimator.head import regression_head # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
return regression_head.RegressionHead(
loss_reduction=tf_compat.SUM_OVER_BATCH_SIZE)
class ModifierSessionRunHook(tf_compat.SessionRunHook):
"""Modifies the graph by adding a variable."""
def __init__(self, var_name="hook_created_variable"):
self._var_name = var_name
self._begun = False
def begin(self):
"""Adds a variable to the graph.
Raises:
ValueError: If we've already begun a run.
"""
if self._begun:
raise ValueError("begin called twice without end.")
self._begun = True
_ = tf_compat.v1.get_variable(name=self._var_name, initializer="")
def end(self, session):
"""Adds a variable to the graph.
Args:
session: A `tf.Session` object that can be used to run ops.
Raises:
ValueError: If we've not begun a run.
"""
_ = session
if not self._begun:
raise ValueError("end called without begin.")
self._begun = False
class AdanetTestCase(parameterized.TestCase, tf.test.TestCase):
"""A parameterized `TestCase` that manages a test subdirectory."""
def setUp(self):
super(AdanetTestCase, self).setUp()
# Setup and cleanup test directory.
# Flags are not automatically parsed at this point.
flags.FLAGS(sys.argv)
self.test_subdirectory = os.path.join(flags.FLAGS.test_tmpdir, self.id())
shutil.rmtree(self.test_subdirectory, ignore_errors=True)
os.makedirs(self.test_subdirectory)
def tearDown(self):
super(AdanetTestCase, self).tearDown()
shutil.rmtree(self.test_subdirectory, ignore_errors=True)
def summary_simple_value(summary_value):
"""Returns the scalar parsed from the summary proto tensor_value bytes."""
return struct.unpack("<f", summary_value.tensor.tensor_content)[0]
def check_eventfile_for_keyword(keyword, dir_):
"""Checks event files for the keyword."""
tf_compat.v1.summary.FileWriterCache.clear()
if not tf.io.gfile.exists(dir_):
raise ValueError("Directory '{}' not found.".format(dir_))
# Get last `Event` written.
filenames = os.path.join(dir_, "events*")
event_paths = tf.io.gfile.glob(filenames)
if not event_paths:
raise ValueError("Path '{}' not found.".format(filenames))
for event_path in event_paths:
for last_event in tf_compat.v1.train.summary_iterator(event_path):
if last_event.summary is not None:
for value in last_event.summary.value:
if keyword == value.tag:
if value.HasField("simple_value"):
return value.simple_value
if value.HasField("image"):
return (value.image.height, value.image.width,
value.image.colorspace)
if value.HasField("tensor"):
if value.metadata.plugin_data.plugin_name == "scalars":
return summary_simple_value(value)
if value.metadata.plugin_data.plugin_name == "images":
return (int(value.tensor.string_val[0]),
int(value.tensor.string_val[1]), 1)
if value.tensor.string_val is not None:
return value.tensor.string_val
raise ValueError("Keyword '{}' not found in path '{}'.".format(
keyword, filenames))
def create_ensemble_metrics(metric_fn,
use_tpu=False,
features=None,
labels=None,
estimator_spec=None,
architecture=None):
"""Creates an instance of the _EnsembleMetrics class.
Args:
metric_fn: A function which should obey the following signature:
- Args: can only have following three arguments in any order:
* predictions: Predictions `Tensor` or dict of `Tensor` created by given
`Head`.
* features: Input `dict` of `Tensor` objects created by `input_fn` which
is given to `estimator.evaluate` as an argument.
* labels: Labels `Tensor` or dict of `Tensor` (for multi-head) created
by `input_fn` which is given to `estimator.evaluate` as an argument.
- Returns: Dict of metric results keyed by name. Final metrics are a union
of this and `estimator`s existing metrics. If there is a name conflict
between this and `estimator`s existing metrics, this will override the
existing one. The values of the dict are the results of calling a metric
function, namely a `(metric_tensor, update_op)` tuple.
use_tpu: Whether to use TPU-specific variable sharing logic.
features: Input `dict` of `Tensor` objects.
labels: Labels `Tensor` or a dictionary of string label name to `Tensor`
(for multi-head).
estimator_spec: The `EstimatorSpec` created by a `Head` instance.
architecture: `_Architecture` object.
Returns:
An instance of _EnsembleMetrics.
"""
if not estimator_spec:
estimator_spec = tf_compat.v1.estimator.tpu.TPUEstimatorSpec(
mode=tf.estimator.ModeKeys.EVAL,
loss=tf.constant(2.),
predictions=None,
eval_metrics=None)
if not use_tpu:
estimator_spec = estimator_spec.as_estimator_spec()
if not architecture:
architecture = _Architecture(None, None)
metrics = _EnsembleMetrics(use_tpu=use_tpu)
metrics.create_eval_metrics(features, labels, estimator_spec, metric_fn,
architecture)
return metrics
def create_subnetwork_metrics(metric_fn,
use_tpu=False,
features=None,
labels=None,
estimator_spec=None):
"""Creates an instance of the _SubnetworkMetrics class.
Args:
metric_fn: A function which should obey the following signature:
- Args: can only have following three arguments in any order:
* predictions: Predictions `Tensor` or dict of `Tensor` created by given
`Head`.
* features: Input `dict` of `Tensor` objects created by `input_fn` which
is given to `estimator.evaluate` as an argument.
* labels: Labels `Tensor` or dict of `Tensor` (for multi-head) created
by `input_fn` which is given to `estimator.evaluate` as an argument.
- Returns: Dict of metric results keyed by name. Final metrics are a union
of this and `estimator`s existing metrics. If there is a name conflict
between this and `estimator`s existing metrics, this will override the
existing one. The values of the dict are the results of calling a metric
function, namely a `(metric_tensor, update_op)` tuple.
use_tpu: Whether to use TPU-specific variable sharing logic.
features: Input `dict` of `Tensor` objects.
labels: Labels `Tensor` or a dictionary of string label name to `Tensor`
(for multi-head).
estimator_spec: The `EstimatorSpec` created by a `Head` instance.
Returns:
An instance of _SubnetworkMetrics.
"""
if not estimator_spec:
estimator_spec = tf_compat.v1.estimator.tpu.TPUEstimatorSpec(
mode=tf.estimator.ModeKeys.EVAL,
loss=tf.constant(2.),
predictions=None,
eval_metrics=None)
if not use_tpu:
estimator_spec = estimator_spec.as_estimator_spec()
metrics = _SubnetworkMetrics(use_tpu=use_tpu)
metrics.create_eval_metrics(features, labels, estimator_spec, metric_fn)
return metrics
def create_iteration_metrics(subnetwork_metrics=None,
ensemble_metrics=None,
use_tpu=False,
iteration_number=1):
"""Creates an instance of the _IterationMetrics class.
Args:
subnetwork_metrics: List of _SubnetworkMetrics objects.
ensemble_metrics: List of _EnsembleMetrics objects.
use_tpu: Whether to use TPU-specific variable sharing logic.
iteration_number: What number iteration these metrics are for.
Returns:
An instance of _IterationMetrics that has been populated with the
input metrics.
"""
subnetwork_metrics = subnetwork_metrics or []
ensemble_metrics = ensemble_metrics or []
candidates = []
for i, metric in enumerate(ensemble_metrics):
spec = _EnsembleSpec(
name="ensemble_{}".format(i),
ensemble=None,
architecture=None,
subnetwork_builders=None,
predictions=None,
step=None,
variables=None,
eval_metrics=metric)
candidate = _Candidate(
ensemble_spec=spec, adanet_loss=tf.constant(i), variables=None)
candidates.append(candidate)
subnetwork_specs = []
for i, metric in enumerate(subnetwork_metrics):
spec = _SubnetworkSpec(
name="subnetwork_{}".format(i),
subnetwork=None,
builder=None,
predictions=None,
step=None,
loss=None,
train_op=None,
asset_dir=None,
eval_metrics=metric,
variables=None)
subnetwork_specs.append(spec)
return _IterationMetrics(
iteration_number,
candidates,
subnetwork_specs=subnetwork_specs,
use_tpu=use_tpu)
| 16,916 | 33.52449 | 138 | py |
adanet | adanet-master/adanet/core/summary_test.py | """Test AdaNet summary single graph implementation for TF 1.
Copyright 2018 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
from absl.testing import parameterized
from adanet import tf_compat
from adanet.core import testing_utils as tu
from adanet.core.summary import _ScopedSummary
from adanet.core.summary import _TPUScopedSummary
from adanet.core.summary import monkey_patched_summaries
from six.moves import range
import tensorflow.compat.v1 as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.ops import summary_ops_v2
# pylint: enable=g-direct-tensorflow-import
def decode(proto_str):
"""Decodes a proto string."""
return proto_str.decode("utf-8")
class ScopedSummaryTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.named_parameters(
{
"testcase_name": "without_scope",
"scope": None,
}, {
"testcase_name": "with_scope",
"scope": "with_scope",
})
@tf_compat.skip_for_tf2
def test_scope(self, scope):
scoped_summary = _ScopedSummary(scope)
self.assertEqual(scope, scoped_summary.scope)
@parameterized.named_parameters(
{
"testcase_name": "without_scope",
"scope": None,
}, {
"testcase_name": "with_scope",
"scope": "with_scope",
}, {
"testcase_name": "skip_summary",
"scope": None,
"skip_summary": True,
})
@tf_compat.skip_for_tf2
def test_scalar_summary(self, scope, skip_summary=False):
scoped_summary = _ScopedSummary(scope, skip_summary)
with self.test_session() as s:
i = tf.constant(3)
with tf.name_scope("outer"):
im = scoped_summary.scalar("inner", i)
summary_str = s.run(im)
if skip_summary:
self.assertEqual("", decode(summary_str))
return
summary = tf.Summary()
summary.ParseFromString(summary_str)
values = summary.value
self.assertLen(values, 1)
self.assertEqual(values[0].tag, "outer/inner")
self.assertEqual(values[0].simple_value, 3.0)
@parameterized.named_parameters(
{
"testcase_name": "without_scope",
"scope": None,
}, {
"testcase_name": "with_scope",
"scope": "with_scope",
})
@tf_compat.skip_for_tf2
def test_scalar_summary_with_family(self, scope):
scoped_summary = _ScopedSummary(scope)
with self.test_session() as s:
i = tf.constant(7)
with tf.name_scope("outer"):
im1 = scoped_summary.scalar("inner", i, family="family")
im2 = scoped_summary.scalar("inner", i, family="family")
sm1, sm2 = s.run([im1, im2])
summary = tf.Summary()
summary.ParseFromString(sm1)
values = summary.value
self.assertLen(values, 1)
self.assertEqual(values[0].tag, "family/outer/family/inner")
self.assertEqual(values[0].simple_value, 7.0)
summary.ParseFromString(sm2)
values = summary.value
self.assertLen(values, 1)
self.assertEqual(values[0].tag, "family/outer/family/inner_1")
self.assertEqual(values[0].simple_value, 7.0)
@parameterized.named_parameters(
{
"testcase_name": "without_scope",
"scope": None,
}, {
"testcase_name": "with_scope",
"scope": "with_scope",
})
@tf_compat.skip_for_tf2
def test_summarizing_variable(self, scope):
scoped_summary = _ScopedSummary(scope)
with self.test_session() as s:
c = tf.constant(42.0)
v = tf.Variable(c)
ss = scoped_summary.scalar("summary", v)
init = tf.global_variables_initializer()
s.run(init)
summ_str = s.run(ss)
summary = tf.Summary()
summary.ParseFromString(summ_str)
self.assertLen(summary.value, 1)
value = summary.value[0]
self.assertEqual(value.tag, "summary")
self.assertEqual(value.simple_value, 42.0)
@parameterized.named_parameters(
{
"testcase_name": "without_scope",
"scope": None,
}, {
"testcase_name": "with_scope",
"scope": "with_scope",
}, {
"testcase_name": "skip_summary",
"scope": None,
"skip_summary": True,
})
@tf_compat.skip_for_tf2
def test_image_summary(self, scope, skip_summary=False):
scoped_summary = _ScopedSummary(scope, skip_summary)
with self.test_session() as s:
i = tf.ones((5, 4, 4, 3))
with tf.name_scope("outer"):
im = scoped_summary.image("inner", i, max_outputs=3)
summary_str = s.run(im)
if skip_summary:
self.assertEqual("", decode(summary_str))
return
summary = tf.Summary()
summary.ParseFromString(summary_str)
values = summary.value
self.assertLen(values, 3)
tags = sorted(v.tag for v in values)
expected = sorted("outer/inner/image/{}".format(i) for i in range(3))
self.assertEqual(tags, expected)
@parameterized.named_parameters(
{
"testcase_name": "without_scope",
"scope": None,
}, {
"testcase_name": "with_scope",
"scope": "with_scope",
})
@tf_compat.skip_for_tf2
def test_image_summary_with_family(self, scope):
scoped_summary = _ScopedSummary(scope)
with self.test_session() as s:
i = tf.ones((5, 2, 3, 1))
with tf.name_scope("outer"):
im = scoped_summary.image("inner", i, max_outputs=3, family="family")
summary_str = s.run(im)
summary = tf.Summary()
summary.ParseFromString(summary_str)
values = summary.value
self.assertLen(values, 3)
tags = sorted(v.tag for v in values)
expected = sorted(
"family/outer/family/inner/image/{}".format(i) for i in range(3))
self.assertEqual(tags, expected)
@parameterized.named_parameters(
{
"testcase_name": "without_scope",
"scope": None,
}, {
"testcase_name": "with_scope",
"scope": "with_scope",
}, {
"testcase_name": "skip_summary",
"scope": None,
"skip_summary": True,
})
@tf_compat.skip_for_tf2
def test_histogram_summary(self, scope, skip_summary=False):
scoped_summary = _ScopedSummary(scope, skip_summary)
with self.test_session() as s:
i = tf.ones((5, 4, 4, 3))
with tf.name_scope("outer"):
summ_op = scoped_summary.histogram("inner", i)
summary_str = s.run(summ_op)
if skip_summary:
self.assertEqual("", decode(summary_str))
return
summary = tf.Summary()
summary.ParseFromString(summary_str)
self.assertLen(summary.value, 1)
self.assertEqual(summary.value[0].tag, "outer/inner")
@parameterized.named_parameters(
{
"testcase_name": "without_scope",
"scope": None,
}, {
"testcase_name": "with_scope",
"scope": "with_scope",
})
@tf_compat.skip_for_tf2
def test_histogram_summary_with_family(self, scope):
scoped_summary = _ScopedSummary(scope)
with self.test_session() as s:
i = tf.ones((5, 4, 4, 3))
with tf.name_scope("outer"):
summ_op = scoped_summary.histogram("inner", i, family="family")
summary_str = s.run(summ_op)
summary = tf.Summary()
summary.ParseFromString(summary_str)
self.assertLen(summary.value, 1)
self.assertEqual(summary.value[0].tag, "family/outer/family/inner")
@parameterized.named_parameters(
{
"testcase_name": "without_scope",
"scope": None,
}, {
"testcase_name": "with_scope",
"scope": "with_scope",
}, {
"testcase_name": "skip_summary",
"scope": None,
"skip_summary": True,
})
@tf_compat.skip_for_tf2
def test_audio_summary(self, scope, skip_summary=False):
scoped_summary = _ScopedSummary(scope, skip_summary)
with self.test_session() as s:
i = tf.ones((5, 3, 4))
with tf.name_scope("outer"):
aud = scoped_summary.audio("inner", i, 0.2, max_outputs=3)
summary_str = s.run(aud)
if skip_summary:
self.assertEqual("", decode(summary_str))
return
summary = tf.Summary()
summary.ParseFromString(summary_str)
values = summary.value
self.assertLen(values, 3)
tags = sorted(v.tag for v in values)
expected = sorted("outer/inner/audio/{}".format(i) for i in range(3))
self.assertEqual(tags, expected)
@parameterized.named_parameters(
{
"testcase_name": "without_scope",
"scope": None,
}, {
"testcase_name": "with_scope",
"scope": "with_scope",
})
@tf_compat.skip_for_tf2
def test_audio_summary_with_family(self, scope):
scoped_summary = _ScopedSummary(scope)
with self.test_session() as s:
i = tf.ones((5, 3, 4))
with tf.name_scope("outer"):
aud = scoped_summary.audio(
"inner", i, 0.2, max_outputs=3, family="family")
summary_str = s.run(aud)
summary = tf.Summary()
summary.ParseFromString(summary_str)
values = summary.value
self.assertLen(values, 3)
tags = sorted(v.tag for v in values)
expected = sorted(
"family/outer/family/inner/audio/{}".format(i) for i in range(3))
self.assertEqual(tags, expected)
@parameterized.named_parameters(
{
"testcase_name": "without_scope",
"scope": None,
}, {
"testcase_name": "with_scope",
"scope": "with_scope",
})
@tf_compat.skip_for_tf2
def test_summary_name_conversion(self, scope):
scoped_summary = _ScopedSummary(scope)
c = tf.constant(3)
summary = tf.Summary()
with self.test_session() as sess:
s = scoped_summary.scalar("name with spaces", c)
summary.ParseFromString(sess.run(s))
self.assertEqual(summary.value[0].tag, "name_with_spaces")
s2 = scoped_summary.scalar("name with many $#illegal^: characters!", c)
summary.ParseFromString(sess.run(s2))
self.assertEqual(summary.value[0].tag,
"name_with_many___illegal___characters_")
s3 = scoped_summary.scalar("/name/with/leading/slash", c)
summary.ParseFromString(sess.run(s3))
self.assertEqual(summary.value[0].tag, "name/with/leading/slash")
@parameterized.named_parameters(
{
"testcase_name": "single_graph",
"nest_graph": False,
}, {
"testcase_name": "nested_graph",
"nest_graph": True,
})
@tf_compat.skip_for_tf2
def test_merge_all(self, nest_graph):
c0 = tf.constant(0)
c1 = tf.constant(1)
scoped_summary0 = _ScopedSummary()
scoped_summary0.scalar("c0", c0)
scoped_summary0.scalar("c1", c1)
scoped_summary1 = _ScopedSummary("scope1")
scoped_summary1.scalar("c0", c0)
scoped_summary1.scalar("c1", c1)
scoped_summary2 = _ScopedSummary("scope2")
scoped_summary2.scalar("c0", c0)
scoped_summary2.scalar("c1", c1)
config = tf.compat.v1.ConfigProto(
gpu_options=tf.compat.v1.GPUOptions(allow_growth=True))
if nest_graph:
with tf.Graph().as_default():
scoped_summary2.scalar("c2", tf.constant(2))
with tf.Session(config=config) as sess:
summaries = scoped_summary2.merge_all()
tf.logging.warn("summaries %s", summaries)
summary = tf.Summary()
summary.ParseFromString(sess.run(tf.summary.merge(summaries)))
self.assertEqual(["c2"], [s.tag for s in summary.value])
self.assertEqual([2], [s.simple_value for s in summary.value])
with tf.Session(config=config) as sess:
for scoped_summary in [scoped_summary0, scoped_summary1, scoped_summary2]:
summaries = scoped_summary.merge_all()
summary = tf.Summary()
summary.ParseFromString(sess.run(tf.summary.merge(summaries)))
self.assertEqual(["c0", "c1"], [s.tag for s in summary.value])
self.assertEqual([0, 1], [s.simple_value for s in summary.value])
@tf_compat.skip_for_tf2
def test_summary_args(self):
summary = _ScopedSummary()
summary.scalar("scalar", 1, "family")
summary.image("image", 1, 3, "family")
summary.histogram("histogram", 1, "family")
summary.audio("audio", 1, 3, 3, "family")
self.assertLen(summary.merge_all(), 4)
@tf_compat.skip_for_tf2
def test_summary_kwargs(self):
summary = _ScopedSummary()
summary.scalar(name="scalar", tensor=1, family="family")
summary.image(name="image", tensor=1, max_outputs=3, family="family")
summary.histogram(name="histogram", values=1, family="family")
summary.audio(
name="audio", tensor=1, sample_rate=3, max_outputs=3, family="family")
self.assertLen(summary.merge_all(), 4)
class TPUScopedSummaryTest(tu.AdanetTestCase):
def read_single_event_from_eventfile(self, summary):
dir_ = self.test_subdirectory
if summary.namespace:
dir_ = os.path.join(dir_, summary.namespace)
if summary.scope:
dir_ = os.path.join(dir_, summary.scope)
event_files = sorted(tf.gfile.Glob(os.path.join(dir_, "*.v2")))
events = list(tf.train.summary_iterator(event_files[-1]))
# Expect a boilerplate event for the file_version, then the summary one.
self.assertGreaterEqual(len(events), 2)
return events[1:]
def write_summaries(self, summary):
summary_ops = []
writer = summary_ops_v2.create_file_writer(summary.logdir)
with writer.as_default(), summary_ops_v2.always_record_summaries():
for summary_fn, tensor in summary.summary_tuples():
summary_ops.append(summary_fn(tensor, step=10))
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(summary_ops_v2.summary_writer_initializer_op())
sess.run(summary_ops)
sess.run(writer.flush())
@parameterized.named_parameters(
{
"testcase_name": "without_scope",
"scope": None,
}, {
"testcase_name": "with_scope",
"scope": "with_scope",
})
@tf_compat.skip_for_tf2
def test_scope(self, scope):
scoped_summary = _TPUScopedSummary(self.test_subdirectory, scope=scope)
self.assertEqual(scope, scoped_summary.scope)
@parameterized.named_parameters(
{
"testcase_name": "without_scope",
"scope": None,
}, {
"testcase_name": "with_scope",
"scope": "with_scope",
}, {
"testcase_name": "skip_summary",
"scope": None,
"skip_summary": True,
})
@tf_compat.skip_for_tf2
def test_scalar_summary(self, scope, skip_summary=False):
scoped_summary = _TPUScopedSummary(
self.test_subdirectory, scope=scope, skip_summary=skip_summary)
i = tf.constant(3)
with tf.name_scope("outer"):
scoped_summary.scalar("inner", i)
self.write_summaries(scoped_summary)
if skip_summary:
return
events = self.read_single_event_from_eventfile(scoped_summary)
values = events[0].summary.value
self.assertLen(values, 1)
self.assertEqual(values[0].tag, "outer/inner")
self.assertEqual(values[0].simple_value, 3.0)
@parameterized.named_parameters(
{
"testcase_name": "without_scope",
"scope": None,
}, {
"testcase_name": "with_scope",
"scope": "with_scope",
})
@tf_compat.skip_for_tf2
def test_scalar_summary_with_family(self, scope):
scoped_summary = _TPUScopedSummary(self.test_subdirectory, scope=scope)
i = tf.constant(7)
with tf.name_scope("outer"):
scoped_summary.scalar("inner", i, family="family")
scoped_summary.scalar("inner", i, family="family")
self.write_summaries(scoped_summary)
events = self.read_single_event_from_eventfile(scoped_summary)
self.assertLen(events[0].summary.value, 1)
self.assertLen(events[1].summary.value, 1)
self.assertEqual(
{
"family/outer/family/inner": 7.0,
"family/outer/family/inner_1": 7.0
}, {
event.summary.value[0].tag: event.summary.value[0].simple_value
for event in events
})
@parameterized.named_parameters(
{
"testcase_name": "without_scope",
"scope": None,
}, {
"testcase_name": "with_scope",
"scope": "with_scope",
})
@tf_compat.skip_for_tf2
def test_summarizing_variable(self, scope):
scoped_summary = _TPUScopedSummary(self.test_subdirectory, scope=scope)
c = tf.constant(42.0)
v = tf.Variable(c)
scoped_summary.scalar("summary", v)
self.write_summaries(scoped_summary)
events = self.read_single_event_from_eventfile(scoped_summary)
values = events[0].summary.value
self.assertLen(values, 1)
value = values[0]
self.assertEqual(value.tag, "summary")
self.assertEqual(value.simple_value, 42.0)
@parameterized.named_parameters(
{
"testcase_name": "without_scope",
"scope": None,
}, {
"testcase_name": "with_scope",
"scope": "with_scope",
}, {
"testcase_name": "skip_summary",
"scope": None,
"skip_summary": True,
})
@tf_compat.skip_for_tf2
def test_image_summary(self, scope, skip_summary=False):
scoped_summary = _TPUScopedSummary(
self.test_subdirectory, scope=scope, skip_summary=skip_summary)
i = tf.ones((5, 4, 4, 3))
with tf.name_scope("outer"):
scoped_summary.image("inner", i, max_outputs=3)
self.write_summaries(scoped_summary)
if skip_summary:
return
events = self.read_single_event_from_eventfile(scoped_summary)
values = events[0].summary.value
self.assertLen(values, 3)
tags = sorted(v.tag for v in values)
expected = sorted("outer/inner/image/{}".format(i) for i in range(3))
self.assertEqual(tags, expected)
@parameterized.named_parameters(
{
"testcase_name": "without_scope",
"scope": None,
}, {
"testcase_name": "with_scope",
"scope": "with_scope",
})
@tf_compat.skip_for_tf2
def test_image_summary_with_family(self, scope):
scoped_summary = _TPUScopedSummary(self.test_subdirectory, scope=scope)
i = tf.ones((5, 2, 3, 1))
with tf.name_scope("outer"):
scoped_summary.image("inner", i, max_outputs=3, family="family")
self.write_summaries(scoped_summary)
events = self.read_single_event_from_eventfile(scoped_summary)
values = events[0].summary.value
self.assertLen(values, 3)
tags = sorted(v.tag for v in values)
expected = sorted(
"family/outer/family/inner/image/{}".format(i) for i in range(3))
self.assertEqual(tags, expected)
@parameterized.named_parameters(
{
"testcase_name": "without_scope",
"scope": None,
}, {
"testcase_name": "with_scope",
"scope": "with_scope",
}, {
"testcase_name": "skip_summary",
"scope": None,
"skip_summary": True,
})
@tf_compat.skip_for_tf2
def test_histogram_summary(self, scope, skip_summary=False):
scoped_summary = _TPUScopedSummary(
self.test_subdirectory, scope=scope, skip_summary=skip_summary)
i = tf.ones((5, 4, 4, 3))
with tf.name_scope("outer"):
scoped_summary.histogram("inner", i)
self.write_summaries(scoped_summary)
if skip_summary:
return
events = self.read_single_event_from_eventfile(scoped_summary)
values = events[0].summary.value
self.assertLen(values, 1)
self.assertEqual(values[0].tag, "outer/inner")
@parameterized.named_parameters(
{
"testcase_name": "without_scope",
"scope": None,
}, {
"testcase_name": "with_scope",
"scope": "with_scope",
})
@tf_compat.skip_for_tf2
def test_histogram_summary_with_family(self, scope):
scoped_summary = _TPUScopedSummary(self.test_subdirectory, scope=scope)
i = tf.ones((5, 4, 4, 3))
with tf.name_scope("outer"):
scoped_summary.histogram("inner", i, family="family")
self.write_summaries(scoped_summary)
events = self.read_single_event_from_eventfile(scoped_summary)
values = events[0].summary.value
self.assertLen(values, 1)
self.assertEqual(values[0].tag, "family/outer/family/inner")
@parameterized.named_parameters(
{
"testcase_name": "without_scope",
"scope": None,
}, {
"testcase_name": "with_scope",
"scope": "with_scope",
}, {
"testcase_name": "skip_summary",
"scope": None,
"skip_summary": True,
})
@tf_compat.skip_for_tf2
def test_audio_summary(self, scope, skip_summary=False):
scoped_summary = _TPUScopedSummary(
self.test_subdirectory, scope=scope, skip_summary=skip_summary)
i = tf.ones((5, 3, 4))
with tf.name_scope("outer"):
scoped_summary.audio("inner", i, 0.2, max_outputs=3)
self.write_summaries(scoped_summary)
if skip_summary:
return
events = self.read_single_event_from_eventfile(scoped_summary)
values = events[0].summary.value
self.assertLen(values, 3)
tags = sorted(v.tag for v in values)
expected = sorted("outer/inner/audio/{}".format(i) for i in range(3))
self.assertEqual(tags, expected)
@parameterized.named_parameters(
{
"testcase_name": "without_scope",
"scope": None,
}, {
"testcase_name": "with_scope",
"scope": "with_scope",
})
@tf_compat.skip_for_tf2
def test_audio_summary_with_family(self, scope):
scoped_summary = _TPUScopedSummary(self.test_subdirectory, scope=scope)
i = tf.ones((5, 3, 4))
with tf.name_scope("outer"):
scoped_summary.audio("inner", i, 0.2, max_outputs=3, family="family")
self.write_summaries(scoped_summary)
events = self.read_single_event_from_eventfile(scoped_summary)
values = events[0].summary.value
self.assertLen(values, 3)
tags = sorted(v.tag for v in values)
expected = sorted(
"family/outer/family/inner/audio/{}".format(i) for i in range(3))
self.assertEqual(tags, expected)
@parameterized.named_parameters(
{
"testcase_name": "without_scope",
"scope": None,
}, {
"testcase_name": "with_scope",
"scope": "with_scope",
})
@tf_compat.skip_for_tf2
def test_summary_name_conversion(self, scope):
scoped_summary = _TPUScopedSummary(self.test_subdirectory, scope=scope)
c = tf.constant(3)
scoped_summary.scalar("name with spaces", c)
scoped_summary.scalar("name with many $#illegal^: characters!", c)
scoped_summary.scalar("/name/with/leading/slash", c)
self.write_summaries(scoped_summary)
events = self.read_single_event_from_eventfile(scoped_summary)
self.assertLen(events, 3)
tags = [event.summary.value[0].tag for event in events]
self.assertIn("name_with_spaces", tags)
self.assertIn("name_with_many___illegal___characters_", tags)
self.assertIn("name/with/leading/slash", tags)
@parameterized.named_parameters(
{
"testcase_name": "without_scope",
"scope": None,
}, {
"testcase_name": "with_scope",
"scope": "with_scope",
})
@tf_compat.skip_for_tf2
def test_current_scope(self, scope):
scoped_summary = _TPUScopedSummary(self.test_subdirectory, scope=scope)
i = tf.constant(3)
with tf.variable_scope("outer1"):
with tf.variable_scope("outer2"):
with scoped_summary.current_scope():
with tf.variable_scope("inner1"):
scoped_summary.scalar("inner2/a/b/c", i)
self.write_summaries(scoped_summary)
events = self.read_single_event_from_eventfile(scoped_summary)
values = events[0].summary.value
self.assertLen(values, 1)
self.assertEqual(values[0].tag, "inner1/inner2/a/b/c")
self.assertEqual(values[0].simple_value, 3.0)
@tf_compat.skip_for_tf2
def test_summary_args(self):
summary = _TPUScopedSummary(self.test_subdirectory)
summary.scalar("scalar", 1, "family")
summary.image("image", 1, 3, "family")
summary.histogram("histogram", 1, "family")
summary.audio("audio", 1, 3, 3, "family")
self.assertLen(summary.summary_tuples(), 4)
@tf_compat.skip_for_tf2
def test_summary_kwargs(self):
summary = _TPUScopedSummary(self.test_subdirectory)
summary.scalar(name="scalar", tensor=1, family="family")
summary.image(name="image", tensor=1, max_outputs=3, family="family")
summary.histogram(name="histogram", values=1, family="family")
summary.audio(
name="audio", tensor=1, sample_rate=3, max_outputs=3, family="family")
self.assertLen(summary.summary_tuples(), 4)
def _summaries():
"""Returns all summary functions."""
fns = [
tf.summary.scalar, tf.summary.audio, tf.summary.histogram,
tf.summary.image, tf_compat.v1.summary.scalar, tf_compat.v1.summary.audio,
tf_compat.v1.summary.histogram, tf_compat.v1.summary.image,
tf_compat.v2.summary.scalar, tf_compat.v2.summary.audio,
tf_compat.v2.summary.histogram, tf_compat.v2.summary.image
]
try:
fns += [
tf.contrib.summary.scalar, tf.contrib.summary.audio,
tf.contrib.summary.histogram, tf.contrib.summary.image
]
except (AttributeError, ImportError):
# TF 2.0 eliminates tf.contrib.
pass
return fns
class MonkeyPatchTest(parameterized.TestCase, tf.test.TestCase):
def _get_summary_ops(self, summary):
if isinstance(summary, _TPUScopedSummary):
return [fn(arg, step=10) for fn, arg in summary.summary_tuples()]
return summary.merge_all()
@parameterized.named_parameters(
{
"testcase_name": "with_scoped_summary",
"summary_maker": _ScopedSummary,
}, {
"testcase_name":
"with_tpu_scoped_summary",
"summary_maker":
functools.partial(_TPUScopedSummary, logdir="/tmp/fakedir")
})
@tf_compat.skip_for_tf2
def test_monkey_patched_summaries_args(self, summary_maker):
summary = summary_maker()
before = _summaries()
with monkey_patched_summaries(summary):
for want, got in zip(before, _summaries()):
self.assertNotEqual(want, got)
tf.summary.scalar("scalar", 1, ["collection"], "family")
tf.summary.image("image", 1, 3, ["collection"], "family")
tf.summary.histogram("histogram", 1, ["collection"], "family")
tf.summary.audio("audio", 1, 3, 3, ["collection"], "family")
want_summary_fn_count = 4
try:
tf.contrib.summary.scalar("scalar_v2", 1, "family", 10)
tf.contrib.summary.image("image_v2", 1, True, 3, "family", 10)
tf.contrib.summary.histogram("histogram_v2", 1, "family", 10)
tf.contrib.summary.audio("audio_v2", 1, 3, 3, "family", 10)
want_summary_fn_count += 4
except (AttributeError, ImportError):
# TF 2.0 eliminates tf.contrib.
pass
self.assertEqual(before, _summaries())
self.assertLen(self._get_summary_ops(summary), want_summary_fn_count)
@parameterized.named_parameters(
{
"testcase_name": "with_scoped_summary",
"summary_maker": _ScopedSummary,
}, {
"testcase_name":
"with_tpu_scoped_summary",
"summary_maker":
functools.partial(_TPUScopedSummary, logdir="/tmp/fakedir"),
})
@tf_compat.skip_for_tf2
def test_monkey_patched_summaries_kwargs(self, summary_maker):
summary = summary_maker()
before = _summaries()
with monkey_patched_summaries(summary):
for want, got in zip(before, _summaries()):
self.assertNotEqual(want, got)
tf.summary.scalar(
name="scalar", tensor=1, collections=["collection"], family="family")
tf.summary.image(
name="image",
tensor=1,
max_outputs=3,
collections=["collection"],
family="family")
tf.summary.histogram(
name="histogram",
values=1,
collections=["collection"],
family="family")
tf.summary.audio(
name="audio",
tensor=1,
sample_rate=3,
max_outputs=3,
collections=["collection"],
family="family")
want_summary_fn_count = 4
try:
tf.contrib.summary.scalar(
name="scalar_v2", tensor=1, family="family", step=10)
tf.contrib.summary.image(
name="image_v2",
tensor=1,
bad_color=True,
max_images=3,
family="family",
step=10)
tf.contrib.summary.histogram(
name="histogram_v2", tensor=1, family="family", step=10)
tf.contrib.summary.audio(
name="audio_v2",
tensor=1,
sample_rate=3,
max_outputs=3,
family="family",
step=10)
want_summary_fn_count += 4
except (AttributeError, ImportError):
# TF 2.0 eliminates tf.contrib.
pass
self.assertEqual(before, _summaries())
self.assertLen(self._get_summary_ops(summary), want_summary_fn_count)
if __name__ == "__main__":
tf.test.main()
| 29,769 | 33.179104 | 80 | py |
adanet | adanet-master/adanet/core/estimator_distributed_test.py | """Test AdaNet estimator cluster training support.
Copyright 2019 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import itertools
import json
import os
import shutil
import socket
import subprocess
import sys
import time
from absl import flags
from absl import logging
from absl.testing import parameterized
from adanet import tf_compat
from adanet.core.timer import _CountDownTimer
import tensorflow.compat.v2 as tf
# A process. name is a string identifying the process in logs. stderr is a file
# object of the process's stderr.
_ProcessInfo = collections.namedtuple("_ProcessInfo",
["name", "popen", "stderr"])
def _create_task_process(task_type, task_index, estimator_type,
placement_strategy, tf_config, model_dir):
"""Creates a process for a single estimator task.
Args:
task_type: 'chief', 'worker' or 'ps'.
task_index: The index of the task within the cluster.
estimator_type: The estimator type to train. 'estimator' or 'autoensemble'.
placement_strategy: The distributed placement strategy.
tf_config: Dictionary representation of the TF_CONFIG environment variable.
This method creates a copy as to not mutate the input dict.
model_dir: The Estimator's model directory.
Returns:
A _ProcessInfo namedtuple of the running process. The stderr field of this
tuple must be closed by the caller once the process ends.
"""
process_name = "%s_%s" % (task_type, task_index)
args = ["python", "adanet/core/estimator_distributed_test_runner.py"]
args.append("--estimator_type={}".format(estimator_type))
args.append("--placement_strategy={}".format(placement_strategy))
# Log everything to stderr.
args.append("--stderrthreshold=info")
args.append("--model_dir={}".format(model_dir))
logging.info("Spawning %s process: %s", process_name, " ".join(args))
stderr_filename = os.path.join(model_dir, "%s_stderr.txt" % process_name)
logging.info("Logging to %s", model_dir)
stderr_file = open(stderr_filename, "w+")
tf_config = copy.deepcopy(tf_config)
tf_config["task"]["type"] = task_type
tf_config["task"]["index"] = task_index
json_tf_config = json.dumps(tf_config)
env = os.environ.copy()
# Allow stderr to be viewed before the process ends.
env["PYTHONUNBUFFERED"] = "1"
env["TF_CPP_MIN_LOG_LEVEL"] = "0"
env["TF_CONFIG"] = json_tf_config
# Change gRPC polling strategy to prevent blocking forever.
# See https://github.com/tensorflow/tensorflow/issues/17852.
env["GRPC_POLL_STRATEGY"] = "poll"
popen = subprocess.Popen(args, stderr=stderr_file, env=env)
return _ProcessInfo(process_name, popen, stderr_file)
def _pick_unused_port():
"""Returns a free port on localhost."""
for family in (socket.AF_INET6, socket.AF_INET):
try:
sock = socket.socket(family, socket.SOCK_STREAM)
sock.bind(("", 0)) # Passing port '0' binds to a free port on localhost.
port = sock.getsockname()[1]
sock.close()
return port
except socket.error:
continue
raise socket.error
def log_all(process, status):
"""Logs full text to INFO without truncating."""
logging.info("Logging STDERR for %s process %s", status, process.name)
logging.info("===================== BEGIN %s LOG =====================",
process.name)
process.stderr.seek(0)
for line in process.stderr:
logging.info("FROM %s: %s", process.name, line)
logging.info("====================== END %s LOG ======================",
process.name)
class EstimatorDistributedTrainingTest(parameterized.TestCase,
tf.test.TestCase):
"""Tests distributed training."""
def setUp(self):
super(EstimatorDistributedTrainingTest, self).setUp()
flags.FLAGS(sys.argv)
# Setup and cleanup test directory.
self.test_subdirectory = os.path.join(flags.FLAGS.test_tmpdir, self.id())
shutil.rmtree(self.test_subdirectory, ignore_errors=True)
os.makedirs(self.test_subdirectory)
def _wait_for_processes(self, wait_processes, kill_processes, timeout_secs):
"""Waits until all `wait_processes` finish, then kills `kill_processes`.
Fails an assert if a process in `wait_processes` finishes unsuccessfully.
The processes in `kill_processes` are assumed to never finish so they are
killed.
Args:
wait_processes: A list of _ProcessInfo tuples. This function will wait for
each to finish.
kill_processes: A list of _ProcessInfo tuples. Each will be killed once
every process in `wait_processes` is finished.
timeout_secs: Seconds to wait before timing out and terminating processes.
Returns:
A list of strings, each which is a string of the stderr of a wait process.
Raises:
Exception: When waiting for tasks to finish times out.
"""
timer = _CountDownTimer(timeout_secs)
finished_wait_processes = set()
poll_count = {wait_process: 0.0 for wait_process in wait_processes}
while len(finished_wait_processes) < len(wait_processes):
if timer.secs_remaining() == 0:
logging.error("Timed out! Outputting logs of unfinished processes:")
for i, wait_process in enumerate(wait_processes):
if i in finished_wait_processes:
continue
log_all(wait_process, "incompleted")
raise Exception("Timed out waiting for tasks to complete.")
for i, wait_process in enumerate(wait_processes):
if i in finished_wait_processes:
continue
ret_code = wait_process.popen.poll()
if ret_code is None:
poll_count[wait_process] += 0.25
if ((poll_count[wait_process] / 10.) -
int(poll_count[wait_process] / 10.)) == 0:
logging.info("%d secs has elapsed for %s", poll_count[wait_process],
wait_process.name)
continue
logging.info("%s finished", wait_process.name)
log_all(wait_process, "completed")
self.assertEqual(0, ret_code)
finished_wait_processes.add(i)
for kill_process in kill_processes:
ret_code = kill_process.popen.poll()
# Kill processes should not end until we kill them.
# If it returns early, note the return code.
if ret_code is not None:
logging.error("kill process %s ended with ret_code %d",
kill_process.name, ret_code)
log_all(kill_process, "ended with code {}".format(ret_code))
self.assertIsNone(ret_code)
# Delay between polling loops.
time.sleep(0.25)
logging.info("All wait processes finished")
for i, kill_process in enumerate(kill_processes):
# Kill each kill process.
kill_process.popen.kill()
kill_process.popen.wait()
log_all(kill_process, "killed")
# pylint: disable=g-complex-comprehension
@parameterized.named_parameters(
itertools.chain(*[
[
{
"testcase_name": "{}_one_worker".format(placement),
"placement_strategy": placement,
"num_workers": 1,
"num_ps": 0,
},
{
"testcase_name": "{}_one_worker_one_ps".format(placement),
"placement_strategy": placement,
"num_workers": 1,
"num_ps": 1,
},
{
"testcase_name": "{}_two_workers_one_ps".format(placement),
"placement_strategy": placement,
"num_workers": 2,
"num_ps": 1,
},
{
"testcase_name":
"{}_three_workers_three_ps".format(placement),
"placement_strategy":
placement,
"num_workers":
3,
"num_ps":
3,
},
{
"testcase_name": "{}_five_workers_three_ps".format(placement),
"placement_strategy": placement,
"num_workers": 5,
"num_ps": 3,
},
{
"testcase_name":
"autoensemble_{}_five_workers_three_ps".format(placement),
"estimator":
"autoensemble",
"placement_strategy":
placement,
"num_workers":
5,
"num_ps":
3,
},
# TODO: Need to restore boosted trees support.
# {
# "testcase_name":
# "autoensemble_trees_multiclass_{}_five_workers_three_ps"
# .format(placement),
# "estimator":
# "autoensemble_trees_multiclass",
# "placement_strategy":
# placement,
# "num_workers":
# 5,
# "num_ps":
# 3,
# },
{
"testcase_name":
"estimator_with_experimental_multiworker_{}_five_workers"
.format(placement),
"estimator":
"estimator_with_experimental_multiworker_strategy",
"placement_strategy":
placement,
"num_workers":
5,
# Multiworker strategy means that all workers hold a copy of
# the variables, and there are no parameter servers.
"num_ps":
0,
},
] for placement in ["replication", "round_robin"]
]))
# pylint: enable=g-complex-comprehension
# TODO: Test distributed training in TF 2.
@tf_compat.skip_for_tf2
def test_distributed_training(self,
num_workers,
num_ps,
placement_strategy,
estimator="estimator"):
"""Uses multiprocessing to simulate a distributed training environment."""
# Inspired by `tf.test.create_local_cluster`.
worker_ports = [_pick_unused_port() for _ in range(num_workers)]
ps_ports = [_pick_unused_port() for _ in range(num_ps)]
ws_targets = ["localhost:%s" % port for port in worker_ports]
ps_targets = ["localhost:%s" % port for port in ps_ports]
# For details see:
# https://www.tensorflow.org/api_docs/python/tf/estimator/train_and_evaluate
tf_config = {
"cluster": {
# The chief is always worker 0.
"chief": [ws_targets[0]],
},
"task": {
"type": "chief",
"index": 0
},
}
# The chief is already worker 0.
if len(ws_targets) > 1:
tf_config["cluster"]["worker"] = ws_targets[1:]
if ps_targets:
tf_config["cluster"]["ps"] = ps_targets
worker_processes = []
ps_processes = []
evaluator_processes = []
model_dir = self.test_subdirectory
# Chief
worker_processes.append(
_create_task_process("chief", 0, estimator, placement_strategy,
tf_config, model_dir))
# Workers
for i in range(len(ws_targets[1:])):
worker_processes.append(
_create_task_process("worker", i, estimator, placement_strategy,
tf_config, model_dir))
# Parameter Servers (PS)
for i in range(len(ps_targets)):
ps_processes.append(
_create_task_process("ps", i, estimator, placement_strategy,
tf_config, model_dir))
# Evaluator
evaluator_processes.append(
_create_task_process("evaluator", 0, estimator, placement_strategy,
tf_config, model_dir))
# Run processes.
try:
# NOTE: Parameter servers do not shut down on their own.
self._wait_for_processes(
worker_processes + evaluator_processes,
kill_processes=ps_processes,
timeout_secs=500)
finally:
for process in worker_processes + ps_processes + evaluator_processes:
try:
process.popen.kill()
except OSError:
pass # It's OK (and expected) if the process already exited.
process.stderr.close()
if __name__ == "__main__":
tf.test.main()
| 13,190 | 36.053371 | 80 | py |
adanet | adanet-master/adanet/core/summary_v2_test.py | """Test AdaNet summary single graph implementation for TF 2.
Copyright 2019 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import struct
from absl.testing import parameterized
from adanet import tf_compat
from adanet.core import testing_utils as tu
from adanet.core.summary import _ScopedSummaryV2
import tensorflow.compat.v2 as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util
# pylint: enable=g-direct-tensorflow-import
def simple_value(summary_value):
"""Returns the scalar parsed from the summary proto tensor_value bytes."""
return struct.unpack("<f", summary_value.tensor.tensor_content)[0]
class ScopedSummaryV2Test(tu.AdanetTestCase):
def read_single_event_from_eventfile(self, summary):
dir_ = self.test_subdirectory
if summary.namespace:
dir_ = os.path.join(dir_, summary.namespace)
if summary.scope:
dir_ = os.path.join(dir_, summary.scope)
event_files = sorted(tf.io.gfile.glob(os.path.join(dir_, "*.v2")))
events = list(tf.compat.v1.train.summary_iterator(event_files[-1]))
# Expect a boilerplate event for the file_version, then the summary one.
self.assertGreaterEqual(len(events), 2)
return events[1:]
def write_summaries(self, summary):
summary_ops = []
writer = tf.summary.create_file_writer(summary.logdir)
with writer.as_default():
for summary_fn, tensor in summary.summary_tuples():
summary_ops.append(summary_fn(tensor, step=10))
writer_flush = writer.flush()
self.evaluate([tf.compat.v1.global_variables_initializer(), writer.init()])
self.evaluate(summary_ops)
self.evaluate(writer_flush)
@parameterized.named_parameters(
{
"testcase_name": "without_scope",
"scope": None,
}, {
"testcase_name": "with_scope",
"scope": "with_scope",
})
@tf_compat.skip_for_tf1
@test_util.run_in_graph_and_eager_modes
def test_scope(self, scope):
scoped_summary = _ScopedSummaryV2(self.test_subdirectory, scope=scope)
self.assertEqual(scope, scoped_summary.scope)
@parameterized.named_parameters(
{
"testcase_name": "without_scope",
"scope": None,
}, {
"testcase_name": "with_scope",
"scope": "with_scope",
}, {
"testcase_name": "skip_summary",
"scope": None,
"skip_summary": True,
})
@tf_compat.skip_for_tf1
@test_util.run_in_graph_and_eager_modes
def test_scalar_summary(self, scope, skip_summary=False):
with context.graph_mode():
scoped_summary = _ScopedSummaryV2(
self.test_subdirectory, scope=scope, skip_summary=skip_summary)
i = tf.constant(3)
with tf.name_scope("outer"):
scoped_summary.scalar("inner", i)
self.write_summaries(scoped_summary)
if skip_summary:
return
events = self.read_single_event_from_eventfile(scoped_summary)
values = events[0].summary.value
self.assertLen(values, 1)
self.assertEqual(values[0].tag, "outer/inner")
self.assertEqual(simple_value(values[0]), 3.0)
@parameterized.named_parameters(
{
"testcase_name": "without_scope",
"scope": None,
}, {
"testcase_name": "with_scope",
"scope": "with_scope",
})
@tf_compat.skip_for_tf1
@test_util.run_in_graph_and_eager_modes
def test_summarizing_variable(self, scope):
scoped_summary = _ScopedSummaryV2(self.test_subdirectory, scope=scope)
c = tf.constant(42.0)
v = tf.Variable(c)
scoped_summary.scalar("summary", v)
self.write_summaries(scoped_summary)
events = self.read_single_event_from_eventfile(scoped_summary)
values = events[0].summary.value
self.assertLen(values, 1)
value = values[0]
self.assertEqual(value.tag, "summary")
self.assertEqual(simple_value(value), 42.0)
@parameterized.named_parameters(
{
"testcase_name": "without_scope",
"scope": None,
}, {
"testcase_name": "with_scope",
"scope": "with_scope",
}, {
"testcase_name": "skip_summary",
"scope": None,
"skip_summary": True,
})
@tf_compat.skip_for_tf1
@test_util.run_in_graph_and_eager_modes
def test_image_summary(self, scope, skip_summary=False):
with context.graph_mode():
scoped_summary = _ScopedSummaryV2(
self.test_subdirectory, scope=scope, skip_summary=skip_summary)
i = tf.ones((5, 4, 4, 3))
with tf.name_scope("outer"):
scoped_summary.image("inner", i, max_outputs=3)
self.write_summaries(scoped_summary)
if skip_summary:
return
events = self.read_single_event_from_eventfile(scoped_summary)
values = events[0].summary.value
self.assertLen(values, 1)
self.assertEqual("outer/inner", values[0].tag)
@parameterized.named_parameters(
{
"testcase_name": "without_scope",
"scope": None,
}, {
"testcase_name": "with_scope",
"scope": "with_scope",
}, {
"testcase_name": "skip_summary",
"scope": None,
"skip_summary": True,
})
@tf_compat.skip_for_tf1
@test_util.run_in_graph_and_eager_modes
def test_histogram_summary(self, scope, skip_summary=False):
with context.graph_mode():
scoped_summary = _ScopedSummaryV2(
self.test_subdirectory, scope=scope, skip_summary=skip_summary)
i = tf.ones((5, 4, 4, 3))
with tf.name_scope("outer"):
scoped_summary.histogram("inner", i)
self.write_summaries(scoped_summary)
if skip_summary:
return
events = self.read_single_event_from_eventfile(scoped_summary)
values = events[0].summary.value
self.assertLen(values, 1)
self.assertEqual("outer/inner", values[0].tag)
@parameterized.named_parameters(
{
"testcase_name": "without_scope",
"scope": None,
}, {
"testcase_name": "with_scope",
"scope": "with_scope",
}, {
"testcase_name": "skip_summary",
"scope": None,
"skip_summary": True,
})
@tf_compat.skip_for_tf1
@test_util.run_in_graph_and_eager_modes
def test_audio_summary(self, scope, skip_summary=False):
with context.graph_mode():
scoped_summary = _ScopedSummaryV2(
self.test_subdirectory, scope=scope, skip_summary=skip_summary)
i = tf.ones((5, 3, 4))
with tf.name_scope("outer"):
scoped_summary.audio("inner", i, sample_rate=2, max_outputs=3)
self.write_summaries(scoped_summary)
if skip_summary:
return
events = self.read_single_event_from_eventfile(scoped_summary)
values = events[0].summary.value
self.assertLen(values, 1)
self.assertLen(values, 1)
self.assertEqual(values[0].tag, "outer/inner")
@parameterized.named_parameters(
{
"testcase_name": "without_scope",
"scope": None,
}, {
"testcase_name": "with_scope",
"scope": "with_scope",
})
@tf_compat.skip_for_tf1
@test_util.run_in_graph_and_eager_modes
def test_summary_name_conversion(self, scope):
scoped_summary = _ScopedSummaryV2(self.test_subdirectory, scope=scope)
c = tf.constant(3)
scoped_summary.scalar("name with spaces", c)
scoped_summary.scalar("name with many $#illegal^: characters!", c)
scoped_summary.scalar("/name/with/leading/slash", c)
self.write_summaries(scoped_summary)
events = self.read_single_event_from_eventfile(scoped_summary)
self.assertLen(events, 3)
tags = [event.summary.value[0].tag for event in events]
# Characters that were illegal in TF 1 are now valid in TF 2.
self.assertIn("name with spaces", tags)
self.assertIn("name with many $#illegal^: characters!", tags)
self.assertIn("name/with/leading/slash", tags)
@parameterized.named_parameters(
{
"testcase_name": "without_scope",
"scope": None,
}, {
"testcase_name": "with_scope",
"scope": "with_scope",
})
@tf_compat.skip_for_tf1
@test_util.run_in_graph_and_eager_modes
def test_current_scope(self, scope):
with context.graph_mode():
scoped_summary = _ScopedSummaryV2(self.test_subdirectory, scope=scope)
i = tf.constant(3)
with tf.compat.v1.variable_scope("outer1"):
with tf.compat.v1.variable_scope("outer2"):
with scoped_summary.current_scope():
with tf.compat.v1.variable_scope("inner1"):
scoped_summary.scalar("inner2/a/b/c", i)
self.write_summaries(scoped_summary)
events = self.read_single_event_from_eventfile(scoped_summary)
values = events[0].summary.value
self.assertLen(values, 1)
self.assertEqual(values[0].tag, "inner1/inner2/a/b/c")
self.assertEqual(simple_value(values[0]), 3.0)
@tf_compat.skip_for_tf1
@test_util.run_in_graph_and_eager_modes
def test_summary_args(self):
summary = _ScopedSummaryV2(self.test_subdirectory)
summary.scalar("scalar", 1, "family")
summary.image("image", 1, 3, "family")
summary.histogram("histogram", 1, "family")
summary.audio("audio", 1, 3, 3, "family")
self.assertLen(summary.summary_tuples(), 4)
@tf_compat.skip_for_tf1
@test_util.run_in_graph_and_eager_modes
def test_summary_kwargs(self):
summary = _ScopedSummaryV2(self.test_subdirectory)
summary.scalar(name="scalar", tensor=1, family="family")
summary.image(name="image", tensor=1, max_outputs=3, family="family")
summary.histogram(name="histogram", values=1, family="family")
summary.audio(
name="audio", tensor=1, sample_rate=3, max_outputs=3, family="family")
self.assertLen(summary.summary_tuples(), 4)
if __name__ == "__main__":
tf.enable_v2_behavior()
tf.test.main()
| 10,507 | 34.026667 | 79 | py |
adanet | adanet-master/adanet/core/iteration.py | """An AdaNet iteration implementation in Tensorflow using a single graph.
Copyright 2018 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import copy
import json
import os
from absl import logging
from adanet import distributed
from adanet import subnetwork
from adanet import tf_compat
from adanet.core.ensemble_builder import _EnsembleSpec
from adanet.core.eval_metrics import _IterationMetrics
import numpy as np
import tensorflow.compat.v2 as tf
from typing import Any
class _TrainManager(object):
"""Manages the training of SubnetworkSpecs and EnsembleSpecs.
This object maintains a dictionary of states for each SubnetworkSpec and
EnsembleSpec to coordinate and manage training. Users can check the
training status of a spec, or request that it stops training.
It also persists metadata about specs to disk in order to be consistent across
runs and robust to preemptions.
"""
def __init__(self, subnetwork_specs, ensemble_specs, train_manager_dir,
is_chief):
"""Initializes a _TrainManager instance.
Args:
subnetwork_specs: List of `_SubnetworkSpec` instances to monitor.
ensemble_specs: List of `EstimatorSpec` instances to monitor.
train_manager_dir: Directory for storing metadata about training. When a
spec should no longer be trained, a JSON file with its name and metadata
is written to this directory, to persist across runs and preemptions.
is_chief: Boolean whether the current worker is a chief.
"""
if not tf.io.gfile.exists(train_manager_dir):
tf.io.gfile.makedirs(train_manager_dir)
self._train_manager_dir = train_manager_dir
self._is_training = {
spec.name: not self._is_done_training(spec)
for spec in subnetwork_specs + ensemble_specs
}
self._ensemble_specs = set([e.name for e in ensemble_specs])
self._is_chief = is_chief
def should_train(self, spec):
"""Whether the given spec should keep training."""
return self._is_training[spec.name]
def _is_done_training(self, spec):
"""If the file exists, then the candidate is done training."""
return tf.io.gfile.exists(self._filename_for(spec))
def _filename_for(self, spec):
"""Returns the filename to identify the spec."""
return os.path.join(self._train_manager_dir, "{}.json".format(spec.name))
def request_stop(self, spec, message):
"""Registers that given spec should no longer train."""
self._is_training[spec.name] = False
# Only write to disk if chief worker, otherwise there is a risk of conflicts
# and race conditions during writes.
if self._is_chief and not self._is_done_training(spec):
with tf.io.gfile.GFile(self._filename_for(spec), "w") as record_file:
# TODO: Consider making these messages be some kind of Enum.
# There # might be a case where we want to parse these files. For
# example, in iteration n+1, maybe we no longer even want to build
# NaN candidates.
message = {"message": message}
record_file.write(json.dumps(message))
def is_over(self):
"""Whether all specs are done training and the iteration is over."""
for k in sorted(self._is_training):
if k in self._ensemble_specs:
# In case the sub-estimator is done training (e.g. dataset ran out of
# data without repeat) but the "max_iteration_steps" is not reached.
continue
if self._is_training[k]:
# Still needs to train.
return False
return True
class _NanLossHook(tf_compat.SessionRunHook):
"""Monitors a spec's loss tensor and stops its training if loss is NaN."""
def __init__(self, train_manager, spec):
"""Initializes a `NanTensorHook`.
Args:
train_manager: The current iteration's `_TrainManager`.
spec: Either a `SubnetworkSpec` or `EnsembleSpec` to monitor.
"""
self._train_manager = train_manager
self._spec = spec
def before_run(self, run_context):
del run_context # Unused
if self._train_manager.should_train(self._spec):
return tf_compat.SessionRunArgs(self._spec.loss)
def after_run(self, run_context, run_values):
loss = run_values.results
if loss is None or not np.isnan(loss):
return
logging.warning("'%s' diverged with loss = NaN.", self._spec.name)
# TODO: Re-enable once we know that evaluation won't
# fail from NaNs.
# self._train_manager.request_stop(self._spec, "NaN loss during training.")
class _TrainingLimitHook(tf_compat.SessionRunHook):
"""Limits a given spec's training to a maximum number of steps.
Is also responsible for incrementing the spec's step.
"""
def __init__(self, train_manager, spec, max_steps, increment_step_op):
"""Initializes a _TrainingLimitHook instance.
Args:
train_manager: The current iteration's `_TrainManager`.
spec: Either a `SubnetworkSpec` or `EnsembleSpec` to monitor.
max_steps: Maximum number steps to train the given spec.
increment_step_op: That increments the current step and executes one train
op run.
"""
self._train_manager = train_manager
self._spec = spec
self._max_steps = max_steps
self._increment_step_op = increment_step_op
def after_create_session(self, session, coord):
if not self._train_manager.should_train(self._spec):
return
if self._spec.step is None:
# None for dummy candidates used during round-robin placement.
self._train_manager.request_stop(self._spec, "Dummy candidate to ignore.")
return
step_value = session.run(self._spec.step)
if self._should_stop(step_value):
logging.info("Skipping '%s' training which already trained %d steps",
self._spec.name, step_value)
self._train_manager.request_stop(self._spec, "Training already complete.")
def before_run(self, run_context):
del run_context # Unused
if not self._train_manager.should_train(self._spec):
return None
if self._increment_step_op is None:
# None on TPU.
return tf_compat.SessionRunArgs(self._spec.step)
return tf_compat.SessionRunArgs(self._increment_step_op)
def after_run(self, run_context, run_values):
step_value = run_values.results
if step_value is None:
return
if self._should_stop(step_value):
logging.info("Now stopping '%s' training after %d steps", self._spec.name,
step_value)
self._train_manager.request_stop(
self._spec, "Training complete after {} steps.".format(step_value))
def _should_stop(self, step):
return self._max_steps is not None and step >= self._max_steps
class _GlobalStepSetterHook(tf_compat.SessionRunHook):
"""A hook for setting the global step variable.
Should only be run on CPU and GPU, but not TPU. TPUs run many training steps
per hook run, so the global step should be incremented in an op along with the
candidates' train ops.
"""
def __init__(self, train_manager, subnetwork_specs, base_global_step,
global_step_combiner_fn):
"""Initializes a _GlobalStepSetterHook instance.
Args:
train_manager: The current iteration's `_TrainManager`.
subnetwork_specs: List of `_SubnetworkSpec` instances for this iteration.
base_global_step: Integer global step at the beginning of this iteration.
global_step_combiner_fn: Function for combining each subnetwork's
iteration step into the global step.
"""
self._train_manager = train_manager
self._subnetwork_specs = subnetwork_specs
self._base_global_step = base_global_step
self._global_step_combiner_fn = global_step_combiner_fn
def begin(self):
logging.info("Starting iteration at global step %s", self._base_global_step)
steps = [
self._base_global_step + s.step.read_value()
for s in self._subnetwork_specs
]
updated_global_step = self._global_step_combiner_fn(steps)
global_step = tf_compat.v1.train.get_global_step()
self._assign_global_step_op = global_step.assign(updated_global_step)
def after_run(self, run_context, run_values):
# Global step cannot be retrieved via SessionRunArgs and before_run due to
# race condition in hook execution.
run_context.session.run(self._assign_global_step_op)
class _TrainingHookRunnerHook(tf_compat.SessionRunHook):
"""Hook wrapper for executing a spec's training hook.
Will only run the hook according to the current TrainManager.
"""
def __init__(self, train_manager, spec, hook):
"""Initializes a _TrainingHookRunnerHook instance.
Only accepts a single hook, since merging hooks is complex and should be
handled by the MonitoredTrainingSession instead.
Args:
train_manager: The current iteration's `_TrainManager`.
spec: Either a `SubnetworkSpec` or `EnsembleSpec` to train.
hook: The spec's training hook to execute.
"""
self._train_manager = train_manager
self._spec = spec
self._hook = hook
def begin(self):
self._hook.begin()
@contextlib.contextmanager
def _session_run_context(self):
"""Intercepts input out of range errors to gracefully stop spec training."""
try:
yield
except (tf.errors.OutOfRangeError, StopIteration) as e:
logging.info("Now stopping '%s' training after hitting end of input",
self._spec.name)
self._train_manager.request_stop(self._spec,
"OutOfRangeError: {}".format(e))
def after_create_session(self, session, coord):
with self._session_run_context():
self._hook.after_create_session(session, coord)
def before_run(self, run_context):
if self._train_manager.should_train(self._spec):
# Use a tmp run context to intercept if the hook requests stop.
tmp_run_context = tf_compat.v1.train.SessionRunContext(
run_context.original_args, run_context.session)
with self._session_run_context():
return self._hook.before_run(tmp_run_context)
if tmp_run_context.stop_requested:
self._train_manager.request_stop(self._spec, "Stop requested.")
def after_run(self, run_context, run_values):
if self._train_manager.should_train(self._spec):
# Use a tmp run context to intercept if the hook requests stop.
tmp_run_context = tf_compat.v1.train.SessionRunContext(
run_context.original_args, run_context.session)
with self._session_run_context():
self._hook.after_run(tmp_run_context, run_values)
if tmp_run_context.stop_requested:
self._train_manager.request_stop(self._spec, "Stop requested.")
def end(self, session):
with self._session_run_context():
self._hook.end(session)
# TODO: Replace candidates with ensemble_specs.
class _Iteration(
collections.namedtuple("_Iteration", [
"number", "candidates", "subnetwork_specs", "estimator_spec",
"best_candidate_index", "summaries", "train_manager",
"subnetwork_reports", "checkpoint", "previous_iteration"
])):
"""An AdaNet iteration.
An AdaNet iteration represents the simultaneous training of multiple
candidates for one iteration of the AdaNet loop, and tracks the best
candidate's loss, predictions, and evaluation metrics.
There must be maximum one _Iteration per graph.
"""
def __new__(cls, number, candidates, subnetwork_specs, estimator_spec,
best_candidate_index, summaries, train_manager,
subnetwork_reports, checkpoint, previous_iteration):
"""Creates a validated `_Iteration` instance.
Args:
number: The iteration number.
candidates: List of `_Candidate` instances to track.
subnetwork_specs: List of `_SubnetworkSpec` instances.
estimator_spec: `EstimatorSpec` instance.
best_candidate_index: Int `Tensor` indicating the best candidate's index.
summaries: List of `adanet.Summary` instances for each candidate.
train_manager: The current `_TrainManager` for monitoring candidate per
training.
subnetwork_reports: Dict mapping string names to `subnetwork.Report`s, one
per candidate.
checkpoint: The `tf.train.Checkpoint` object associated with this
iteration.
previous_iteration: The iteration occuring before this one or None if this
is the first iteration.
Returns:
A validated `_Iteration` object.
Raises:
ValueError: If validation fails.
"""
if not isinstance(number, (int, np.integer)):
raise ValueError("number must be an integer")
if number < 0:
raise ValueError("number must be greater than 0 got %d" % (number))
if not isinstance(candidates, list) or not candidates:
raise ValueError("candidates must be a non-empty list")
if estimator_spec is None:
raise ValueError("estimator_spec is required")
if best_candidate_index is None:
raise ValueError("best_candidate_index is required")
if not isinstance(subnetwork_reports, dict):
raise ValueError("subnetwork_reports must be a dict")
return super(_Iteration, cls).__new__(
cls,
number=number,
candidates=candidates,
subnetwork_specs=subnetwork_specs,
estimator_spec=estimator_spec,
best_candidate_index=best_candidate_index,
summaries=summaries,
train_manager=train_manager,
subnetwork_reports=subnetwork_reports,
checkpoint=checkpoint,
previous_iteration=previous_iteration)
def _is_numeric(tensor):
"""Determines if given tensor is a float numeric."""
if not isinstance(tensor, tf.Tensor):
return False
return tensor.dtype in [tf.bfloat16, tf.float16, tf.float32, tf.float64]
class _IterationBuilder(object):
"""Builds AdaNet iterations."""
def __init__(self,
candidate_builder,
subnetwork_manager,
ensemble_builder,
ensemblers,
max_steps,
summary_maker,
global_step_combiner_fn=tf.math.reduce_mean,
placement_strategy=distributed.ReplicationStrategy(),
replicate_ensemble_in_training=False,
use_tpu=False,
debug=False,
enable_ensemble_summaries=True,
enable_subnetwork_summaries=True,
enable_subnetwork_reports=True):
"""Creates an `_IterationBuilder` instance.
Args:
candidate_builder: A `_CandidateBuilder` instance.
subnetwork_manager: A `_SubnetworkManager` instance.
ensemble_builder: An `_EnsembleBuilder` instance.
ensemblers: An iterable of :class:`adanet.ensemble.Ensembler` objects that
define how to ensemble a group of subnetworks.
max_steps: Maximum number of steps to train candidate subnetworks.
summary_maker: A function that constructs an `adanet.Summary` instance
from (namespace, scope, and skip_summary).
global_step_combiner_fn: Function for combining each subnetwork's
iteration step into the global step.
placement_strategy: A `PlacementStrategy` for assigning subnetworks and
ensembles to specific workers.
replicate_ensemble_in_training: Whether to build the frozen subnetworks in
`training` mode during training.
use_tpu: Whether AdaNet is running on TPU.
debug: Boolean to enable debug mode which will check features and labels
for Infs and NaNs.
enable_ensemble_summaries: Whether to record summaries to display in
TensorBoard for each ensemble candidate. Disable to reduce memory and
disk usage per run.
enable_subnetwork_summaries: Whether to record summaries to display in
TensorBoard for each subnetwork. Disable to reduce memory and disk usage
per run.
enable_subnetwork_reports: Whether to enable generating subnetwork
reports.
Returns:
An `_IterationBuilder` object.
"""
if max_steps is not None and max_steps <= 0:
raise ValueError("max_steps must be > 0 or None")
self._candidate_builder = candidate_builder
self._subnetwork_manager = subnetwork_manager
self._ensemble_builder = ensemble_builder
self._ensemblers = ensemblers
self._max_steps = max_steps
self._summary_maker = summary_maker
self._global_step_combiner_fn = global_step_combiner_fn
self._placement_strategy = placement_strategy
self._replicate_ensemble_in_training = replicate_ensemble_in_training
self._use_tpu = use_tpu
self._debug = debug
self._enable_ensemble_summaries = enable_ensemble_summaries
self._enable_subnetwork_summaries = enable_subnetwork_summaries
self._enable_subnetwork_reports = enable_subnetwork_reports
super(_IterationBuilder, self).__init__()
@property
def placement_strategy(self):
return self._placement_strategy
@placement_strategy.setter
def placement_strategy(self, new_placement_strategy):
self._placement_strategy = new_placement_strategy
def _check_numerics(self, features, labels):
"""Checks for NaNs and Infs in input features and labels.
Args:
features: Dictionary of `Tensor` objects keyed by feature name.
labels: Labels `Tensor` or a dictionary of string label name to `Tensor`
(for multi-head). Can be `None`.
Returns:
A features and labels tuple with same types and respective inputs, but
with numeric check ops wrapping them.
"""
if not self._debug:
return features, labels
checked_features, checked_labels = {}, {}
logging.info("DEBUG: Checking numerics of float features.")
for name in sorted(features):
if not _is_numeric(features[name]):
continue
logging.info("DEBUG: Checking numerics of float feature '%s'.", name)
checked_features[name] = tf.debugging.check_numerics(
features[name], "features '{}'".format(name))
if isinstance(labels, dict):
for name in sorted(labels):
if not _is_numeric(labels[name]):
continue
logging.info("DEBUG: Checking numerics of float label '%s'.", name)
checked_labels[name] = tf.debugging.check_numerics(
labels[name], "labels '{}'".format(name))
elif labels is not None and _is_numeric(labels):
logging.info("DEBUG: Checking numerics of labels.")
checked_labels = tf.debugging.check_numerics(labels, "'labels'")
return checked_features, checked_labels
def build_iteration(self,
base_global_step,
iteration_number,
ensemble_candidates,
subnetwork_builders,
features,
mode,
config,
labels=None,
previous_ensemble_summary=None,
rebuilding=False,
rebuilding_ensembler_name=None,
best_ensemble_index_override=None,
previous_iteration=None):
"""Builds and returns AdaNet iteration t.
This method uses the generated the candidate subnetworks given the ensemble
at iteration t-1 and creates graph operations to train them. The returned
`_Iteration` tracks the training of all candidates to know when the
iteration is over, and tracks the best candidate's predictions and loss, as
defined by lowest complexity-regularized loss on the train set.
Args:
base_global_step: Integer global step at the beginning of this iteration.
iteration_number: Integer iteration number.
ensemble_candidates: Iterable of `adanet.ensemble.Candidate` instances.
subnetwork_builders: A list of `Builders` for adding ` Subnetworks` to the
graph. Each subnetwork is then wrapped in a `_Candidate` to train.
features: Dictionary of `Tensor` objects keyed by feature name.
mode: Defines whether this is training, evaluation or prediction. See
`ModeKeys`.
config: The `tf.estimator.RunConfig` to use this iteration.
labels: `Tensor` of labels. Can be `None`.
previous_ensemble_summary: The `adanet.Summary` for the previous ensemble.
rebuilding: Boolean whether the iteration is being rebuilt only to restore
the previous best subnetworks and ensembles.
rebuilding_ensembler_name: Optional ensembler to restrict to, only
relevant when rebuilding is set as True.
best_ensemble_index_override: Integer index to identify the best ensemble
candidate instead of computing the best ensemble index dynamically
conditional on the ensemble AdaNet losses.
previous_iteration: The iteration occuring before this one or None if this
is the first iteration.
Returns:
An _Iteration instance.
Raises:
ValueError: If subnetwork_builders is empty.
ValueError: If two subnetworks share the same name.
ValueError: If two ensembles share the same name.
"""
self._placement_strategy.config = config
logging.info("%s iteration %s", "Rebuilding" if rebuilding else "Building",
iteration_number)
if not subnetwork_builders:
raise ValueError("Each iteration must have at least one Builder.")
# TODO: Consider moving builder mode logic to ensemble_builder.py.
builder_mode = mode
if rebuilding:
# Build the subnetworks and ensembles in EVAL mode by default. This way
# their outputs aren't affected by dropout etc.
builder_mode = tf.estimator.ModeKeys.EVAL
if mode == tf.estimator.ModeKeys.PREDICT:
builder_mode = mode
# Only replicate in training mode when the user requests it.
if self._replicate_ensemble_in_training and (
mode == tf.estimator.ModeKeys.TRAIN):
builder_mode = mode
features, labels = self._check_numerics(features, labels)
replay_indices_for_all = {}
training = mode == tf.estimator.ModeKeys.TRAIN
skip_summaries = mode == tf.estimator.ModeKeys.PREDICT or rebuilding
with tf_compat.v1.variable_scope("iteration_{}".format(iteration_number)):
seen_builder_names = {}
candidates = []
summaries = []
subnetwork_reports = {}
previous_ensemble = None
previous_ensemble_spec = None
previous_iteration_checkpoint = None
if previous_iteration:
previous_iteration_checkpoint = previous_iteration.checkpoint
previous_best_candidate = previous_iteration.candidates[-1]
previous_ensemble_spec = previous_best_candidate.ensemble_spec
previous_ensemble = previous_ensemble_spec.ensemble
replay_indices_for_all[len(candidates)] = copy.copy(
previous_ensemble_spec.architecture.replay_indices)
# Include previous best subnetwork as a candidate so that its
# predictions are returned until a new candidate outperforms.
seen_builder_names = {previous_ensemble_spec.name: True}
candidates.append(previous_best_candidate)
if self._enable_ensemble_summaries:
summaries.append(previous_ensemble_summary)
# Generate subnetwork reports.
if (self._enable_subnetwork_reports and
mode == tf.estimator.ModeKeys.EVAL):
metrics = previous_ensemble_spec.eval_metrics.eval_metrics_ops()
subnetwork_report = subnetwork.Report(
hparams={},
attributes={},
metrics=metrics,
)
subnetwork_report.metrics["adanet_loss"] = tf_compat.v1.metrics.mean(
previous_ensemble_spec.adanet_loss)
subnetwork_reports["previous_ensemble"] = subnetwork_report
for subnetwork_builder in subnetwork_builders:
if subnetwork_builder.name in seen_builder_names:
raise ValueError("Two subnetworks have the same name '{}'".format(
subnetwork_builder.name))
seen_builder_names[subnetwork_builder.name] = True
subnetwork_specs = []
num_subnetworks = len(subnetwork_builders)
skip_summary = skip_summaries or not self._enable_subnetwork_summaries
for i, subnetwork_builder in enumerate(subnetwork_builders):
if not self._placement_strategy.should_build_subnetwork(
num_subnetworks, i) and not rebuilding:
continue
with self._placement_strategy.subnetwork_devices(num_subnetworks, i):
subnetwork_name = "t{}_{}".format(iteration_number,
subnetwork_builder.name)
subnetwork_summary = self._summary_maker(
namespace="subnetwork",
scope=subnetwork_name,
skip_summary=skip_summary)
if not skip_summary:
summaries.append(subnetwork_summary)
logging.info("%s subnetwork '%s'",
"Rebuilding" if rebuilding else "Building",
subnetwork_builder.name)
subnetwork_spec = self._subnetwork_manager.build_subnetwork_spec(
name=subnetwork_name,
subnetwork_builder=subnetwork_builder,
summary=subnetwork_summary,
features=features,
mode=builder_mode,
labels=labels,
previous_ensemble=previous_ensemble,
config=config)
subnetwork_specs.append(subnetwork_spec)
# Workers that don't build ensembles need a dummy candidate in order
# to train the subnetwork.
# Because only ensembles can be considered candidates, we need to
# convert the subnetwork into a dummy ensemble and subsequently a
# dummy candidate. However, this dummy candidate is never considered a
# true candidate during candidate evaluation and selection.
# TODO: Eliminate need for candidates.
if not self._placement_strategy.should_build_ensemble(
num_subnetworks) and not rebuilding:
candidates.append(
self._create_dummy_candidate(subnetwork_spec,
subnetwork_builders,
subnetwork_summary, training))
# Generate subnetwork reports.
if (self._enable_subnetwork_reports and
mode != tf.estimator.ModeKeys.PREDICT):
subnetwork_report = subnetwork_builder.build_subnetwork_report()
if not subnetwork_report:
subnetwork_report = subnetwork.Report(
hparams={}, attributes={}, metrics={})
metrics = subnetwork_spec.eval_metrics.eval_metrics_ops()
for metric_name in sorted(metrics):
metric = metrics[metric_name]
subnetwork_report.metrics[metric_name] = metric
subnetwork_reports[subnetwork_builder.name] = subnetwork_report
# Create (ensemble_candidate*ensembler) ensembles.
skip_summary = skip_summaries or not self._enable_ensemble_summaries
seen_ensemble_names = {}
for ensembler in self._ensemblers:
if rebuilding and rebuilding_ensembler_name and (
ensembler.name != rebuilding_ensembler_name):
continue
for ensemble_candidate in ensemble_candidates:
if not self._placement_strategy.should_build_ensemble(
num_subnetworks) and not rebuilding:
continue
ensemble_name = "t{}_{}_{}".format(iteration_number,
ensemble_candidate.name,
ensembler.name)
if ensemble_name in seen_ensemble_names:
raise ValueError(
"Two ensembles have the same name '{}'".format(ensemble_name))
seen_ensemble_names[ensemble_name] = True
summary = self._summary_maker(
namespace="ensemble",
scope=ensemble_name,
skip_summary=skip_summary)
if not skip_summary:
summaries.append(summary)
ensemble_spec = self._ensemble_builder.build_ensemble_spec(
name=ensemble_name,
candidate=ensemble_candidate,
ensembler=ensembler,
subnetwork_specs=subnetwork_specs,
summary=summary,
features=features,
mode=builder_mode,
iteration_number=iteration_number,
labels=labels,
my_ensemble_index=len(candidates),
previous_ensemble_spec=previous_ensemble_spec,
previous_iteration_checkpoint=previous_iteration_checkpoint)
# TODO: Eliminate need for candidates.
candidate = self._candidate_builder.build_candidate(
ensemble_spec=ensemble_spec,
training=training,
summary=summary,
rebuilding=rebuilding)
replay_indices_for_all[len(candidates)] = copy.copy(
ensemble_spec.architecture.replay_indices)
candidates.append(candidate)
# TODO: Move adanet_loss from subnetwork report to a new
# ensemble report, since the adanet_loss is associated with an
# ensemble, and only when using a ComplexityRegularizedEnsemblers.
# Keep adanet_loss in subnetwork report for backwards compatibility.
if len(ensemble_candidates) != len(subnetwork_builders):
continue
if len(ensemble_candidate.subnetwork_builders) > 1:
continue
if mode == tf.estimator.ModeKeys.PREDICT:
continue
builder_name = ensemble_candidate.subnetwork_builders[0].name
if self._enable_subnetwork_reports:
subnetwork_reports[builder_name].metrics[
"adanet_loss"] = tf_compat.v1.metrics.mean(
ensemble_spec.adanet_loss)
# Dynamically select the outputs of best candidate.
best_candidate_index = self._best_candidate_index(
candidates, best_ensemble_index_override)
best_predictions = self._best_predictions(candidates,
best_candidate_index)
best_loss = self._best_loss(candidates, best_candidate_index, mode)
best_export_outputs = self._best_export_outputs(candidates,
best_candidate_index,
mode, best_predictions)
train_manager_dir = os.path.join(config.model_dir, "train_manager",
"t{}".format(iteration_number))
train_manager, training_chief_hooks, training_hooks = self._create_hooks(
base_global_step, subnetwork_specs, candidates, num_subnetworks,
rebuilding, train_manager_dir, config.is_chief)
local_init_ops = []
if previous_ensemble_spec:
for s in previous_ensemble_spec.ensemble.subnetworks:
if s.local_init_ops:
local_init_ops.extend(s.local_init_ops)
for subnetwork_spec in subnetwork_specs:
if (subnetwork_spec and subnetwork_spec.subnetwork and
subnetwork_spec.subnetwork.local_init_ops):
local_init_ops.extend(subnetwork_spec.subnetwork.local_init_ops)
summary = self._summary_maker(
namespace=None, scope=None, skip_summary=skip_summaries)
summaries.append(summary)
with summary.current_scope():
summary.scalar("iteration/adanet/iteration", iteration_number)
if best_loss is not None:
summary.scalar("loss", best_loss)
iteration_metrics = _IterationMetrics(iteration_number, candidates,
subnetwork_specs, self._use_tpu,
replay_indices_for_all)
checkpoint = self._make_checkpoint(candidates, subnetwork_specs,
iteration_number, previous_iteration)
if self._use_tpu:
estimator_spec = tf_compat.v1.estimator.tpu.TPUEstimatorSpec(
mode=mode,
predictions=best_predictions,
loss=best_loss,
train_op=self._create_tpu_train_op(base_global_step,
subnetwork_specs, candidates,
mode, num_subnetworks, config),
eval_metrics=iteration_metrics.best_eval_metrics_tuple(
best_candidate_index, mode),
export_outputs=best_export_outputs,
training_hooks=training_hooks,
scaffold_fn=self._get_scaffold_fn(local_init_ops))
else:
estimator_spec = tf.estimator.EstimatorSpec(
mode=mode,
predictions=best_predictions,
loss=best_loss,
# All training happens in hooks so we don't need a train op.
train_op=tf.no_op() if training else None,
eval_metric_ops=iteration_metrics.best_eval_metric_ops(
best_candidate_index, mode),
export_outputs=best_export_outputs,
training_chief_hooks=training_chief_hooks,
training_hooks=training_hooks,
scaffold=self._get_scaffold_fn(local_init_ops)())
return _Iteration(
number=iteration_number,
candidates=candidates,
subnetwork_specs=subnetwork_specs,
estimator_spec=estimator_spec,
best_candidate_index=best_candidate_index,
summaries=summaries,
train_manager=train_manager,
subnetwork_reports=subnetwork_reports,
checkpoint=checkpoint,
previous_iteration=previous_iteration)
def _get_scaffold_fn(self, local_init_ops):
"""Creates a method generating a scaffold.
TODO: Make this code compatible with TPU estimators.
Args:
local_init_ops: List of tf.Operations to call during initialization.
Returns:
Method returning a `tf.train.Scaffold`.
"""
def get_scaffold():
return tf_compat.v1.train.Scaffold(
local_init_op=tf.group(
local_init_ops +
[tf_compat.v1.train.Scaffold.default_local_init_op()]))
return get_scaffold
def _create_dummy_candidate(self, subnetwork_spec, subnetwork_builders,
subnetwork_summary, training):
"""Returns a dummy candidate for the given SubnetworkSpec.
AdaNet only considers ensembles as candidate models, and ensembles
are represented as `_Candidates`. When training only subnetworks, such as
on a subnetwork-worker in the RoundRobinStrategy, then we still need a
candidate to manage the training of the subnetwork, even if it gets
discarded, hence the dummy candidate.
Args:
subnetwork_spec: The subnetwork spec for the dummy candidate to wrap.
subnetwork_builders: List of all subnetwork builders generated this
iteration.
subnetwork_summary: `_Summary` object to use for TensorBoard.
training: Whether or not we are currently training.
"""
dummy_ensemble_spec = _EnsembleSpec(
name="dummy_{}".format(subnetwork_spec.name),
ensemble=None,
architecture=None,
subnetwork_builders=subnetwork_builders,
predictions=subnetwork_spec.predictions,
loss=subnetwork_spec.loss,
step=None,
adanet_loss=0.,
variables=[])
return self._candidate_builder.build_candidate(
ensemble_spec=dummy_ensemble_spec,
training=training,
summary=subnetwork_summary,
track_moving_average=False)
def _create_tpu_train_op(self, base_global_step, subnetwork_specs, candidates,
mode, num_subnetworks, config):
"""Returns the train op for this set of candidates.
This train op combines the train ops from all the candidates into a single
train op. Additionally, it is responsible for incrementing the global step.
The train op is only non-None during the `TRAIN` mode.
Args:
base_global_step: Integer global step at the beginning of this iteration.
subnetwork_specs: List of `_SubnetworkSpec` instances for this iteration.
candidates: List of `_Candidate` instances to train.
mode: Defines whether this is training, evaluation or inference. The train
op is only non-None during `TRAIN`. See `ModeKeys`.
num_subnetworks: Integer number of subnetwork builders generated for the
current iteration.
config: The `tf.estimator.RunConfig` to use this iteration.
Returns:
A `Tensor` train op.
"""
if mode != tf.estimator.ModeKeys.TRAIN:
return None
ensemble_specs = [c.ensemble_spec for c in candidates]
with tf_compat.v1.variable_scope("train_op"):
train_ops = []
if self._placement_strategy.should_train_subnetworks(num_subnetworks):
for subnetwork_spec in subnetwork_specs:
if subnetwork_spec.train_op is not None:
train_ops.append(subnetwork_spec.train_op.train_op)
for ensemble_spec in ensemble_specs:
if ensemble_spec.train_op is not None:
# The train op of a previous ensemble is None even during `TRAIN`.
train_ops.append(ensemble_spec.train_op.train_op)
with tf.control_dependencies(train_ops):
# Increment steps after train ops complete to avoid non-determinism.
increment_ops = [s.step.assign_add(1) for s in subnetwork_specs]
increment_ops += [e.step.assign_add(1) for e in ensemble_specs]
if not config.is_chief:
return tf.group(*increment_ops)
# AdaNet's chief worker is responsible for setting the global step, not
# the candidates it trains. Assigning the global step is the final
# action performed in the train op.
with tf.control_dependencies(increment_ops):
steps = [s.step.read_value() for s in subnetwork_specs]
global_step = tf_compat.v1.train.get_global_step()
return global_step.assign(
tf.cast(
base_global_step + self._global_step_combiner_fn(steps),
dtype=tf.int64))
def _create_hooks(self, base_global_step, subnetwork_specs, candidates,
num_subnetworks, rebuilding, train_manager_dir, is_chief):
"""Returns the hooks to monitor and train this iteration.
Args:
base_global_step: Integer global step at the beginning of this iteration.
subnetwork_specs: List of `_SubnetworkSpec` instances.
candidates: List of `_Candidate` instances to compare.
num_subnetworks: Integer number of subnetwork builders generated for the
current iteration.
rebuilding: Boolean whether the iteration is being rebuilt only to restore
the previous best subnetworks and ensembles.
train_manager_dir: Directory for the TrainManager to store spec metadata.
is_chief: Whether the current worker is chief.
Returns:
A 3-tuple of a _TrainManager for monitoring training, a list of
`SessionRunHooks` to run on chief, and a list of `SessionRunHooks` to run
on all workers.
"""
training_chief_hooks, training_hooks = [], []
ensemble_specs = [c.ensemble_spec for c in candidates]
train_manager = _TrainManager(subnetwork_specs, ensemble_specs,
train_manager_dir, is_chief)
if not self._use_tpu:
# On TPU, the global step gets incremented in an op since it doesn't have
# hook run granularity of CPU and GPU training.
training_chief_hooks.append(
_GlobalStepSetterHook(train_manager, subnetwork_specs,
base_global_step,
self._global_step_combiner_fn))
should_train_subnetworks = (
self._placement_strategy.should_train_subnetworks(num_subnetworks))
for spec in subnetwork_specs:
if not self._use_tpu:
training_hooks.append(_NanLossHook(train_manager, spec))
# We increment the step along with the global step as part of the train
# op on TPU, whereas on CPU and GPU we use hooks for fine grained control.
if self._use_tpu or not should_train_subnetworks or spec.train_op is None:
increment_step_op = None
else:
with tf.control_dependencies([spec.train_op.train_op]):
increment_step_op = spec.step.assign_add(1)
# TPU also supports uneven training, but up to num_iterations_per_loop.
training_hooks.append(
_TrainingLimitHook(
train_manager,
spec,
self._max_steps,
increment_step_op=increment_step_op))
if not should_train_subnetworks and not rebuilding:
continue
self._add_hooks(spec, train_manager, training_chief_hooks, training_hooks)
for spec in ensemble_specs:
if not self._use_tpu:
training_hooks.append(_NanLossHook(train_manager, spec))
# See above comment about incrementing the step on CPU vs. TPU.
if self._use_tpu or spec.train_op is None:
increment_step_op = None
else:
with tf.control_dependencies([spec.train_op.train_op]):
increment_step_op = spec.step.assign_add(1)
training_hooks.append(
_TrainingLimitHook(
train_manager,
spec,
self._max_steps,
increment_step_op=increment_step_op))
self._add_hooks(spec, train_manager, training_chief_hooks, training_hooks)
return train_manager, training_chief_hooks, training_hooks
def _add_hooks(self, spec, train_manager, training_chief_hooks,
training_hooks):
"""Appends spec train hooks to the given hook lists."""
if not spec.train_op:
return
for hook in spec.train_op.chief_hooks:
training_chief_hooks.append(
_TrainingHookRunnerHook(train_manager, spec, hook))
for hook in spec.train_op.hooks:
training_hooks.append(_TrainingHookRunnerHook(train_manager, spec, hook))
def _best_candidate_index(self, candidates, best_ensemble_index_override):
"""Returns the index of the best candidate in the list.
The best candidate is the one with the smallest AdaNet loss, unless
`best_ensemble_index_override` is given.
TODO: Best ensemble index should always be static during EVAL
and PREDICT modes.
In case a candidate has a NaN loss, their loss is immediately set to
infinite, so that they are not selected. As long as one candidate ensemble
has a non-NaN loss during training, the dreaded `NanLossDuringTrainingError`
should not be raised.
Args:
candidates: List of `_Candidate` instances to choose from.
best_ensemble_index_override: Integer index to return instead of computing
the best ensemble index dynamically.
Returns:
An integer `Tensor` representing the index of the best candidate.
"""
with tf_compat.v1.variable_scope("best_candidate_index"):
if best_ensemble_index_override is not None:
return tf.constant(best_ensemble_index_override)
if len(candidates) == 1:
return tf.constant(0)
adanet_losses = [candidate.adanet_loss for candidate in candidates]
# Replace NaNs with -Infs so that NaN loss candidates are always chosen,
# causing tf.estimator.Estimator to raise a NanLossDuringTrainingError.
adanet_losses = tf.where(
tf_compat.v1.is_nan(adanet_losses),
tf.ones_like(adanet_losses) * -np.inf, adanet_losses)
return tf.argmin(input=adanet_losses, axis=0)
def _best_predictions(self, candidates, best_candidate_index):
"""Returns the best predictions from a set of candidates.
Args:
candidates: List of `_Candidate` instances to compare.
best_candidate_index: `Tensor` index of the best candidate in the list.
Returns:
A `Tensor` or dictionary of `Tensor`s representing the best candidate's
predictions (depending on what the subnetworks return).
"""
if len(candidates) == 1:
return candidates[0].ensemble_spec.predictions
with tf_compat.v1.variable_scope("best_predictions"):
if isinstance(candidates[0].ensemble_spec.predictions, dict):
predictions = {}
for candidate in candidates:
ensemble_spec = candidate.ensemble_spec
for key in sorted(ensemble_spec.predictions):
tensor = ensemble_spec.predictions[key]
if key in predictions:
predictions[key].append(tensor)
else:
predictions[key] = [tensor]
else:
predictions = []
for candidate in candidates:
ensemble_spec = candidate.ensemble_spec
predictions.append(ensemble_spec.predictions)
if isinstance(predictions, dict):
best_predictions = {}
for key in sorted(predictions):
tensor_list = predictions[key]
best_predictions[key] = tf.stack(tensor_list)[best_candidate_index]
else:
best_predictions = tf.stack(predictions)[best_candidate_index]
return best_predictions
def _best_loss(self, candidates, best_candidate_index, mode):
"""Returns the best loss from a set of candidates.
Args:
candidates: List of `_Candidate` instances to compare.
best_candidate_index: `Tensor` index of the best candidate in the list.
mode: Defines whether this is training, evaluation or inference. Loss is
always None during inference. See `ModeKeys`.
Returns:
Float `Tensor` of the best candidate's loss.
"""
if mode == tf.estimator.ModeKeys.PREDICT:
return None
if len(candidates) == 1:
return candidates[0].ensemble_spec.loss
with tf_compat.v1.variable_scope("best_loss"):
losses = [c.ensemble_spec.loss for c in candidates]
loss = tf.slice(tf.stack(losses), [best_candidate_index], [1])
return tf.reshape(loss, [])
def _best_export_outputs(self, candidates, best_candidate_index, mode,
best_predictions):
"""Returns the best `SavedModel` export outputs from a set of candidates.
Assumes that all candidate ensembles have identical export output keys and
`ExportOutput` types.
Args:
candidates: List of `_Candidate` instances to compare.
best_candidate_index: `Tensor` index of the best candidate in the list.
mode: Defines whether this is training, evaluation or inference. Export
outputs are always None during training and evaluation. See `ModeKeys`.
best_predictions: A `Tensor` or dictionary of `Tensor`s representing the
best candidate's predictions (depending on what the subnetworks return).
Returns:
A `Tensor` dictionary representing the best candidate's export outputs.
Raises:
TypeError: If the `ExportOutput` type is not supported.
"""
if mode != tf.estimator.ModeKeys.PREDICT:
return None
if len(candidates) == 1:
return candidates[0].ensemble_spec.export_outputs
with tf_compat.v1.variable_scope("best_export_outputs"):
# Group tensors by export output key and ExportOutput type.
export_outputs = {} # type: Any
for candidate in candidates:
ensemble_spec = candidate.ensemble_spec
for key in sorted(ensemble_spec.export_outputs):
export_output = ensemble_spec.export_outputs[key]
if isinstance(export_output,
tf.estimator.export.ClassificationOutput):
if key not in export_outputs:
export_outputs[key] = ([], [])
if export_output.scores is not None:
export_outputs[key][0].append(export_output.scores)
if export_output.classes is not None:
export_outputs[key][1].append(export_output.classes)
elif isinstance(export_output, tf.estimator.export.RegressionOutput):
if key not in export_outputs:
export_outputs[key] = []
export_outputs[key].append(export_output.value)
elif isinstance(export_output, tf.estimator.export.PredictOutput):
# Use self._best_predictions() below to get prediction output.
continue
else:
raise TypeError(
"Values in export_outputs must be ClassificationOutput, "
"RegressionOutput, or PredictOutput objects. Given: {}".format(
export_output))
# Stack tensor lists into correct ExportOutput type, outputting the
# correct values based on the best candidate index.
best_export_outputs = {}
for key in sorted(candidates[0].ensemble_spec.export_outputs):
export_output = candidates[0].ensemble_spec.export_outputs[key]
if isinstance(export_output, tf.estimator.export.ClassificationOutput):
scores, classes = None, None
if export_outputs[key][0]:
scores = tf.stack(export_outputs[key][0])[best_candidate_index]
if export_outputs[key][1]:
classes = tf.stack(export_outputs[key][1])[best_candidate_index]
output = tf.estimator.export.ClassificationOutput(
scores=scores, classes=classes)
elif isinstance(export_output, tf.estimator.export.RegressionOutput):
value = tf.stack(export_outputs[key])[best_candidate_index]
output = tf.estimator.export.RegressionOutput(value)
else:
predictions = copy.copy(export_output.outputs)
predictions.update(best_predictions)
output = tf.estimator.export.PredictOutput(predictions)
best_export_outputs[key] = output
return best_export_outputs
def _make_checkpoint(self, candidates, subnetwork_specs, iteration_number,
previous_iteration):
"""Returns a `tf.train.Checkpoint` for the iteration."""
# TODO: Handle hook created variables.
# TODO: Handle TPU embedding variables.
trackable = {}
for candidate in candidates:
for ensemble_var in candidate.ensemble_spec.variables:
trackable["{}_{}".format(candidate.ensemble_spec.name,
ensemble_var.name)] = ensemble_var
for candidate_var in candidate.variables:
trackable["candidate_{}_{}".format(candidate.ensemble_spec.name,
candidate_var.name)] = candidate_var
for subnetwork_spec in subnetwork_specs:
for subnetwork_var in subnetwork_spec.variables:
trackable["{}_{}".format(subnetwork_spec.name,
subnetwork_var.name)] = subnetwork_var
global_step = tf_compat.v1.train.get_global_step()
# TODO: Currently, TPUEstimator has no global_step set when
# exporting the saved model.
if global_step is not None:
trackable[tf_compat.v1.GraphKeys.GLOBAL_STEP] = global_step
trackable["iteration_number"] = tf_compat.v1.get_variable(
"iteration_number",
dtype=tf.int64,
# Lambda initializer required for TPU.
initializer=lambda: tf.constant(iteration_number, dtype=tf.int64),
trainable=False)
if previous_iteration:
trackable["previous_iteration"] = previous_iteration.checkpoint
logging.info("TRACKABLE: %s", trackable)
checkpoint = tf_compat.v2.train.Checkpoint(**trackable)
# Make the save counter to satisfy the assert_consumed() assertion later.
# This property creates variables the first time it is called.
checkpoint.save_counter # pylint: disable=pointless-statement
return checkpoint
| 52,252 | 41.447604 | 80 | py |
adanet | adanet-master/adanet/core/evaluator.py | """An AdaNet evaluator implementation in Tensorflow using a single graph.
Copyright 2018 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from absl import logging
from adanet import tf_compat
import numpy as np
import tensorflow.compat.v2 as tf
# TODO: Remove uses of Evaluator once AdaNet Ranker is implemented.
class Evaluator(object):
"""Evaluates candidate ensemble performance."""
class Objective(object):
"""The Evaluator objective for the metric being optimized.
Two objectives are currently supported:
- MINIMIZE: Lower is better for the metric being optimized.
- MAXIMIZE: Higher is better for the metric being optimized.
"""
MINIMIZE = "minimize"
MAXIMIZE = "maximize"
def __init__(self,
input_fn,
metric_name="adanet_loss",
objective=Objective.MINIMIZE,
steps=None):
"""Initializes a new Evaluator instance.
Args:
input_fn: Input function returning a tuple of: features - Dictionary of
string feature name to `Tensor`. labels - `Tensor` of labels.
metric_name: The name of the evaluation metrics to use when choosing the
best ensemble. Must refer to a valid evaluation metric.
objective: Either `Objective.MINIMIZE` or `Objective.MAXIMIZE`.
steps: Number of steps for which to evaluate the ensembles. If an
`OutOfRangeError` occurs, evaluation stops. If set to None, will iterate
the dataset until all inputs are exhausted.
Returns:
An :class:`adanet.Evaluator` instance.
"""
self._input_fn = input_fn
self._steps = steps
self._metric_name = metric_name
self._objective = objective
if objective == self.Objective.MINIMIZE:
self._objective_fn = np.nanargmin
elif objective == self.Objective.MAXIMIZE:
self._objective_fn = np.nanargmax
else:
raise ValueError(
"Evaluator objective must be one of MINIMIZE or MAXIMIZE.")
@property
def input_fn(self):
"""Return the input_fn."""
return self._input_fn
@property
def steps(self):
"""Return the number of evaluation steps."""
return self._steps
@property
def metric_name(self):
"""Returns the name of the metric being optimized."""
return self._metric_name
@property
def objective_fn(self):
"""Returns a fn which selects the best metric based on the objective."""
return self._objective_fn
def evaluate(self, sess, ensemble_metrics):
"""Evaluates the given AdaNet objectives on the data from `input_fn`.
The candidates are fed the same batches of features and labels as
provided by `input_fn`, and their losses are computed and summed over
`steps` batches.
Args:
sess: `Session` instance with most recent variable values loaded.
ensemble_metrics: A list dictionaries of `tf.metrics` for each candidate
ensemble.
Returns:
List of evaluated metrics.
"""
evals_completed = 0
if self.steps is None:
logging_frequency = 1000
elif self.steps < 10:
logging_frequency = 1
else:
logging_frequency = math.floor(self.steps / 10.)
objective_metrics = [em[self._metric_name] for em in ensemble_metrics]
sess.run(tf_compat.v1.local_variables_initializer())
while True:
if self.steps is not None and evals_completed == self.steps:
break
try:
evals_completed += 1
if (evals_completed % logging_frequency == 0 or
self.steps == evals_completed):
logging.info("Ensemble evaluation [%d/%s]", evals_completed,
self.steps or "??")
sess.run(objective_metrics)
except tf.errors.OutOfRangeError:
logging.info("Encountered end of input after %d evaluations",
evals_completed)
break
# Evaluating the first element is idempotent for metric tuples.
return sess.run([metric[0] for metric in objective_metrics])
| 4,624 | 31.801418 | 80 | py |
adanet | adanet-master/adanet/core/ensemble_builder.py | """An AdaNet ensemble definition in Tensorflow using a single graph.
Copyright 2018 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import copy
import functools
import inspect
import os
from absl import logging
from adanet import ensemble as ensemble_lib
from adanet import subnetwork as subnetwork_lib
from adanet import tf_compat
from adanet.core.architecture import _Architecture
from adanet.core.eval_metrics import _EnsembleMetrics
from adanet.core.eval_metrics import _SubnetworkMetrics
from adanet.core.summary import monkey_patched_summaries
import tensorflow.compat.v1 as tf_v1
import tensorflow.compat.v2 as tf
_VALID_METRIC_FN_ARGS = {"features", "labels", "predictions"}
class _EnsembleSpec(
collections.namedtuple("_EnsembleSpec", [
"name",
"ensemble",
"architecture",
"subnetwork_builders",
"subnetwork_specs",
"predictions",
"step",
"variables",
"loss",
"adanet_loss",
"train_op",
"eval_metrics",
"export_outputs",
])):
"""Ensemble training and evaluation `Tensors` and `Ops`.
Args:
name: String name of this ensemble. Should be unique in the graph.
ensemble: The `adanet.ensemble.Ensemble` of interest.
architecture: The `_Architecture` that represents this ensemble.
subnetwork_builders: The Iterable of candidate subnetworks for the current
iteration.
predictions: Predictions `Tensor` or dict of `Tensor`.
step: `tf.Variable` step counter representing the number of steps this
ensemble trained for. Resets at every AdaNet iteration.
variables: List of `tf.Variable` instances associated with the ensemble.
loss: Loss `Tensor` as defined by the surrogate loss function Phi in
Equations (4), (5), and (6). Must be either scalar, or with shape `[1]`.
adanet_loss: Loss `Tensor` as defined by F(w) in Equation (4). Must be
either scalar, or with shape `[1]`. The AdaNet algorithm aims to minimize
this objective which balances training loss with the total complexity of
the subnetworks in the ensemble.
train_op: Candidate ensemble's mixture weights `TrainOpSpec`.
eval_metrics: `_EnsembleMetrics` object.
export_outputs: Describes the output signatures to be exported to
`SavedModel` and used during serving. See `tf.estimator.EstimatorSpec`.
subnetwork_specs: Iterable of `_SubnetworkSpecs` for this iteration.
Returns:
An `EnsembleSpec` object.
"""
def __new__(cls,
name,
ensemble,
architecture,
subnetwork_builders,
predictions,
step,
variables,
loss=None,
adanet_loss=None,
train_op=None,
eval_metrics=None,
export_outputs=None,
subnetwork_specs=None):
if subnetwork_specs is None:
subnetwork_specs = []
return super(_EnsembleSpec, cls).__new__(
cls,
name=name,
ensemble=ensemble,
architecture=architecture,
subnetwork_builders=subnetwork_builders,
subnetwork_specs=subnetwork_specs,
predictions=predictions,
step=step,
variables=variables,
loss=loss,
adanet_loss=adanet_loss,
train_op=train_op,
eval_metrics=eval_metrics,
export_outputs=export_outputs)
def _verify_metric_fn_args(metric_fn):
if not metric_fn:
return
# Calling low level getargs for py_2_and_3 compatibility.
args = set(inspect.getargs(metric_fn.__code__).args)
invalid_args = list(args - _VALID_METRIC_FN_ARGS)
if invalid_args:
raise ValueError("metric_fn (%s) has following not expected args: %s" %
(metric_fn, invalid_args))
def _get_value(target, key):
if isinstance(target, dict):
return target[key]
return target
def _to_train_op_spec(train_op):
if isinstance(train_op, subnetwork_lib.TrainOpSpec):
return train_op
return subnetwork_lib.TrainOpSpec(train_op)
@contextlib.contextmanager
def _monkey_patch_context(iteration_step_scope, scoped_summary, trainable_vars):
"""Monkey-patches global attributes with subnetwork-specifics ones."""
# pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
from tensorflow.python.training import training as train
from tensorflow.python.training import training_util
# pylint: enable=g-direct-tensorflow-import,g-import-not-at-top
old_get_global_step_fn = tf_compat.v1.train.get_global_step
old_get_or_create_global_step_fn = tf_compat.v1.train.get_or_create_global_step
old_trainable_vars = tf_compat.v1.trainable_variables()
def iteration_step(graph=None):
graph = graph or tf_compat.v1.get_default_graph()
with graph.as_default() as g, g.name_scope(None):
with tf_compat.v1.variable_scope(
iteration_step_scope, reuse=tf_compat.v1.AUTO_REUSE):
return tf_compat.v1.get_variable(
"iteration_step",
shape=[],
initializer=tf_compat.v1.zeros_initializer(),
trainable=False,
dtype=tf.int64)
# monkey-patch global attributes.
setattr(tf_compat.v1.train, "get_global_step", iteration_step)
setattr(tf_compat.v1.train, "get_or_create_global_step", iteration_step)
setattr(tf_v1.train, "get_global_step", iteration_step)
setattr(tf_v1.train, "get_or_create_global_step", iteration_step)
setattr(tf.train, "get_global_step", iteration_step)
setattr(tf.train, "get_or_create_global_step", iteration_step)
setattr(train, "get_global_step", iteration_step)
setattr(training_util, "get_global_step", iteration_step)
setattr(train, "get_or_create_global_step", iteration_step)
setattr(training_util, "get_or_create_global_step", iteration_step)
# The TPUEmbedding uses dummy variables to coordinate sending and receiving
# gradients. If no gradients are computed on these dummy variables, the
# TPUEmbedding will throw an error.
embedding_variables = tf_compat.v1.get_collection(
"tpu_embedding_dummy_table_variables")
_set_trainable_variables(trainable_vars + embedding_variables)
try:
with monkey_patched_summaries(scoped_summary):
yield
finally:
# Revert monkey-patches.
new_trainable_vars = _get_current_vars(
diffbase={"trainable": trainable_vars})["trainable"]
_set_trainable_variables(old_trainable_vars + new_trainable_vars)
setattr(training_util, "get_or_create_global_step",
old_get_or_create_global_step_fn)
setattr(train, "get_or_create_global_step",
old_get_or_create_global_step_fn)
setattr(training_util, "get_global_step", old_get_global_step_fn)
setattr(train, "get_global_step", old_get_global_step_fn)
setattr(tf.train, "get_or_create_global_step",
old_get_or_create_global_step_fn)
setattr(tf.train, "get_global_step", old_get_global_step_fn)
setattr(tf_v1.train, "get_or_create_global_step",
old_get_or_create_global_step_fn)
setattr(tf_v1.train, "get_global_step", old_get_global_step_fn)
setattr(tf_compat.v1.train, "get_or_create_global_step",
old_get_or_create_global_step_fn)
setattr(tf_compat.v1.train, "get_global_step", old_get_global_step_fn)
def _clear_trainable_variables():
del tf_compat.v1.get_collection_ref(
tf_compat.v1.GraphKeys.TRAINABLE_VARIABLES)[:]
def _set_trainable_variables(var_list):
_clear_trainable_variables()
for var in var_list:
assert isinstance(var, tf.Variable)
tf_compat.v1.add_to_collections(tf_compat.v1.GraphKeys.TRAINABLE_VARIABLES,
var)
def _get_current_vars(diffbase=None):
"""Returns all current trainable, global, and savable variables.
Args:
diffbase: A dictionary of lists variables to diffbase. The allowed keys are:
"trainable", "global" and "savable".
Returns:
A dictionary containing the current trainable, global and savable variables.
The expected keys are: "trainable", "global" and "savable".
"""
trainable_vars = tf_compat.v1.trainable_variables()
global_vars = tf_compat.v1.global_variables()
savable_vars = tf_compat.v1.get_collection(
tf_compat.v1.GraphKeys.SAVEABLE_OBJECTS)
# Since newly created variables are appended to the global collections, we can
# obtain the newer variables by taking slices of the ends of the collections.
if diffbase:
if "trainable" in diffbase:
trainable_vars = trainable_vars[len(diffbase["trainable"]):]
if "global" in diffbase:
global_vars = global_vars[len(diffbase["global"]):]
if "savable" in diffbase:
savable_vars = savable_vars[len(diffbase["savable"]):]
return {
"trainable": trainable_vars,
"global": global_vars,
"savable": savable_vars,
}
class _EnsembleBuilder(object):
"""Builds `_EnsembleSpec` instances.
Args:
head: A `tf.contrib.estimator.Head` instance.
metric_fn: A function which should obey the following signature:
- Args: can only have following three arguments in any order:
* predictions: Predictions `Tensor` or dict of `Tensor` created by given
`Head`.
* features: Input `dict` of `Tensor` objects created by `input_fn` which
is given to `estimator.evaluate` as an argument.
* labels: Labels `Tensor` or dict of `Tensor` (for multi-head) created
by `input_fn` which is given to `estimator.evaluate` as an argument.
- Returns: Dict of metric results keyed by name. Final metrics are a union
of this and `Head's` existing metrics. If there is a name conflict
between this and `estimator`s existing metrics, this will override the
existing one. The values of the dict are the results of calling a metric
function, namely a `(metric_tensor, update_op)` tuple.
use_tpu: Whether AdaNet is running on TPU.
export_subnetwork_logits: Include subnetwork logits in exports.
export_subnetwork_last_layer: Include subnetwork last layer in exports.
Returns:
An `_EnsembleBuilder` instance.
"""
_SUBNETWORK_LOGITS_EXPORT_SIGNATURE = "subnetwork_logits"
_SUBNETWORK_LAST_LAYER_EXPORT_SIGNATURE = "subnetwork_last_layer"
def __init__(self,
head,
metric_fn=None,
use_tpu=False,
export_subnetwork_logits=False,
export_subnetwork_last_layer=False):
_verify_metric_fn_args(metric_fn)
self._head = head
self._metric_fn = metric_fn
self._use_tpu = use_tpu
self._export_subnetwork_logits = export_subnetwork_logits
self._export_subnetwork_last_layer = export_subnetwork_last_layer
def build_ensemble_spec(self,
name,
candidate,
ensembler,
subnetwork_specs,
summary,
features,
mode,
iteration_number,
labels,
my_ensemble_index,
previous_ensemble_spec,
previous_iteration_checkpoint):
"""Builds an `_EnsembleSpec` with the given `adanet.ensemble.Candidate`.
Args:
name: The string name of the ensemble. Typically the name of the builder
that returned the given `Subnetwork`.
candidate: The `adanet.ensemble.Candidate` for this spec.
ensembler: The :class:`adanet.ensemble.Ensembler` to use to ensemble a
group of subnetworks.
subnetwork_specs: Iterable of `_SubnetworkSpecs` for this iteration.
summary: A `_ScopedSummary` instance for recording ensemble summaries.
features: Input `dict` of `Tensor` objects.
mode: Estimator `ModeKeys` indicating training, evaluation, or inference.
iteration_number: Integer current iteration number.
labels: Labels `Tensor` or a dictionary of string label name to `Tensor`
(for multi-head).
my_ensemble_index: An integer holding the index of the ensemble in the
candidates list of AdaNet.
previous_ensemble_spec: Link the rest of the `_EnsembleSpec` from
iteration t-1. Used for creating the subnetwork train_op.
previous_iteration_checkpoint: `tf.train.Checkpoint` for iteration t-1.
Returns:
An `_EnsembleSpec` instance.
"""
with tf_compat.v1.variable_scope("ensemble_{}".format(name)):
step = tf_compat.v1.get_variable(
"step",
shape=[],
initializer=tf_compat.v1.zeros_initializer(),
trainable=False,
dtype=tf.int64)
# Convert to tensor so that users cannot mutate it.
step_tensor = tf.convert_to_tensor(value=step)
with summary.current_scope():
summary.scalar("iteration_step/adanet/iteration_step", step_tensor)
replay_indices = []
if previous_ensemble_spec:
replay_indices = copy.copy(
previous_ensemble_spec.architecture.replay_indices)
if my_ensemble_index is not None:
replay_indices.append(my_ensemble_index)
architecture = _Architecture(
candidate.name, ensembler.name, replay_indices=replay_indices)
previous_subnetworks = []
previous_subnetwork_specs = []
subnetwork_builders = []
previous_ensemble = None
if previous_ensemble_spec:
previous_ensemble = previous_ensemble_spec.ensemble
previous_architecture = previous_ensemble_spec.architecture
keep_indices = range(len(previous_ensemble.subnetworks))
if len(candidate.subnetwork_builders) == 1 and previous_ensemble:
# Prune previous ensemble according to the subnetwork.Builder for
# backwards compatibility.
subnetwork_builder = candidate.subnetwork_builders[0]
prune_previous_ensemble = getattr(subnetwork_builder,
"prune_previous_ensemble", None)
if callable(prune_previous_ensemble):
logging.warn(
"Using an `adanet.subnetwork.Builder#prune_previous_ensemble` "
"is deprecated. Please use a custom `adanet.ensemble.Strategy` "
"instead.")
keep_indices = prune_previous_ensemble(previous_ensemble)
for i, builder in enumerate(previous_ensemble_spec.subnetwork_builders):
if i not in keep_indices:
continue
if builder not in candidate.previous_ensemble_subnetwork_builders:
continue
previous_subnetworks.append(previous_ensemble.subnetworks[i])
previous_subnetwork_specs.append(
previous_ensemble_spec.subnetwork_specs[i])
subnetwork_builders.append(builder)
architecture.add_subnetwork(*previous_architecture.subnetworks[i])
for builder in candidate.subnetwork_builders:
architecture.add_subnetwork(iteration_number, builder.name)
subnetwork_builders.append(builder)
subnetwork_spec_map = {s.builder.name: s for s in subnetwork_specs}
relevant_subnetwork_specs = [
subnetwork_spec_map[s.name] for s in candidate.subnetwork_builders
]
ensemble_scope = tf_compat.v1.get_variable_scope()
old_vars = _get_current_vars()
with summary.current_scope(), _monkey_patch_context(
iteration_step_scope=ensemble_scope,
scoped_summary=summary,
trainable_vars=[]):
ensemble = ensembler.build_ensemble(
subnetworks=[s.subnetwork for s in relevant_subnetwork_specs],
previous_ensemble_subnetworks=previous_subnetworks,
features=features,
labels=labels,
logits_dimension=self._head.logits_dimension,
training=mode == tf.estimator.ModeKeys.TRAIN,
iteration_step=step_tensor,
summary=summary,
previous_ensemble=previous_ensemble,
previous_iteration_checkpoint=previous_iteration_checkpoint)
estimator_spec = _create_estimator_spec(self._head, features, labels,
mode, ensemble.logits,
self._use_tpu)
ensemble_loss = estimator_spec.loss
adanet_loss = None
if mode != tf.estimator.ModeKeys.PREDICT:
adanet_loss = estimator_spec.loss
# Add ensembler specific loss
if isinstance(ensemble, ensemble_lib.ComplexityRegularized):
adanet_loss += ensemble.complexity_regularization
predictions = estimator_spec.predictions
export_outputs = estimator_spec.export_outputs
if (self._export_subnetwork_logits and
export_outputs and subnetwork_spec_map):
first_subnetwork_logits = list(
subnetwork_spec_map.values())[0].subnetwork.logits
if isinstance(first_subnetwork_logits, dict):
for head_name in first_subnetwork_logits.keys():
subnetwork_logits = {
subnetwork_name: subnetwork_spec.subnetwork.logits[head_name]
for subnetwork_name, subnetwork_spec in
subnetwork_spec_map.items()
}
export_outputs.update({
"{}_{}".format(
_EnsembleBuilder._SUBNETWORK_LOGITS_EXPORT_SIGNATURE,
head_name):
tf.estimator.export.PredictOutput(subnetwork_logits)
})
else:
subnetwork_logits = {
subnetwork_name: subnetwork_spec.subnetwork.logits for
subnetwork_name, subnetwork_spec in subnetwork_spec_map.items()
}
export_outputs.update({
_EnsembleBuilder._SUBNETWORK_LOGITS_EXPORT_SIGNATURE:
tf.estimator.export.PredictOutput(subnetwork_logits)
})
if (self._export_subnetwork_last_layer and export_outputs and
subnetwork_spec_map and
list(subnetwork_spec_map.values())[0].subnetwork.last_layer is
not None):
first_subnetwork_last_layer = list(
subnetwork_spec_map.values())[0].subnetwork.last_layer
if isinstance(first_subnetwork_last_layer, dict):
for head_name in first_subnetwork_last_layer.keys():
subnetwork_last_layer = {
subnetwork_name:
subnetwork_spec.subnetwork.last_layer[head_name] for
subnetwork_name, subnetwork_spec in subnetwork_spec_map.items()
}
export_outputs.update({
"{}_{}".format(
_EnsembleBuilder._SUBNETWORK_LAST_LAYER_EXPORT_SIGNATURE,
head_name):
tf.estimator.export.PredictOutput(subnetwork_last_layer)
})
else:
subnetwork_last_layer = {
subnetwork_name: subnetwork_spec.subnetwork.last_layer for
subnetwork_name, subnetwork_spec in subnetwork_spec_map.items()
}
export_outputs.update({
_EnsembleBuilder._SUBNETWORK_LAST_LAYER_EXPORT_SIGNATURE:
tf.estimator.export.PredictOutput(subnetwork_last_layer)
})
if ensemble.predictions and predictions:
predictions.update(ensemble.predictions)
if ensemble.predictions and export_outputs:
export_outputs.update({
k: tf.estimator.export.PredictOutput(v)
for k, v in ensemble.predictions.items()
})
ensemble_metrics = _EnsembleMetrics(use_tpu=self._use_tpu)
if mode == tf.estimator.ModeKeys.EVAL:
ensemble_metrics.create_eval_metrics(
features=features,
labels=labels,
estimator_spec=estimator_spec,
metric_fn=self._metric_fn,
architecture=architecture)
if mode == tf.estimator.ModeKeys.TRAIN:
with summary.current_scope():
summary.scalar("loss", estimator_spec.loss)
ensemble_trainable_vars = _get_current_vars(
diffbase=old_vars)["trainable"]
# Create train ops for training subnetworks and ensembles.
train_op = None
if mode == tf.estimator.ModeKeys.TRAIN:
# Note that these mixture weights are on top of the last_layer of the
# subnetwork constructed in TRAIN mode, which means that dropout is
# still applied when the mixture weights are being trained.
ensemble_scope = tf_compat.v1.get_variable_scope()
with tf_compat.v1.variable_scope("train_mixture_weights"):
with summary.current_scope(), _monkey_patch_context(
iteration_step_scope=ensemble_scope,
scoped_summary=summary,
trainable_vars=ensemble_trainable_vars):
# For backwards compatibility.
subnetwork_builder = candidate.subnetwork_builders[0]
old_train_op_fn = getattr(subnetwork_builder,
"build_mixture_weights_train_op", None)
if callable(old_train_op_fn):
logging.warn(
"The `build_mixture_weights_train_op` method is deprecated. "
"Please use the `Ensembler#build_train_op` instead.")
train_op = _to_train_op_spec(
subnetwork_builder.build_mixture_weights_train_op(
loss=adanet_loss,
var_list=ensemble_trainable_vars,
logits=ensemble.logits,
labels=labels,
iteration_step=step_tensor,
summary=summary))
else:
train_op = _to_train_op_spec(
ensembler.build_train_op(
ensemble=ensemble,
loss=adanet_loss,
var_list=ensemble_trainable_vars,
labels=labels,
iteration_step=step_tensor,
summary=summary,
previous_ensemble=previous_ensemble))
new_vars = _get_current_vars(diffbase=old_vars)
# Sort our dictionary by key to remove non-determinism of variable order.
new_vars = collections.OrderedDict(sorted(new_vars.items()))
# Combine all trainable, global and savable variables into a single list.
ensemble_variables = sum(new_vars.values(), []) + [step]
return _EnsembleSpec(
name=name,
architecture=architecture,
subnetwork_builders=subnetwork_builders,
subnetwork_specs=previous_subnetwork_specs + relevant_subnetwork_specs,
ensemble=ensemble,
predictions=predictions,
step=step,
variables=ensemble_variables,
loss=ensemble_loss,
adanet_loss=adanet_loss,
train_op=train_op,
eval_metrics=ensemble_metrics,
export_outputs=export_outputs)
def _create_estimator_spec(head, features, labels, mode, logits, use_tpu):
"""Creates the head's EstimatorSpec or TPUEstimatorSpec on TPU."""
if use_tpu:
create_spec_fn = head._create_tpu_estimator_spec # pylint: disable=protected-access
else:
create_spec_fn = head.create_estimator_spec
return create_spec_fn(
features=features,
labels=labels,
mode=mode,
logits=logits,
train_op_fn=lambda _: tf.no_op())
class _SubnetworkSpec(
collections.namedtuple("_SubnetworkSpec", [
"name",
"subnetwork",
"builder",
"predictions",
"step",
"variables",
"loss",
"train_op",
"eval_metrics",
"asset_dir",
])):
"""Subnetwork training and evaluation `Tensors` and `Ops`.
Args:
name: String name of this subnetwork. Should be unique in the graph.
subnetwork: The `adanet.subnetwork.Subnetwork` for this spec.
builder: The `adanet.subnetwork.Builder` that produced `subnetwork`.
predictions: Predictions `Tensor` or dict of `Tensor`.
step: `tf.Variable` step counter representing the number of steps this
subnetwork trained for. Resets at every AdaNet iteration.
loss: Loss `Tensor` as computed by the `Head`. Must be either scalar, or
with shape `[1]`.
train_op: Candidate subnetwork's `TrainOpSpec`.
eval_metrics: `_SubnetworkMetrics` object.
asset_dir: Checkpoint directory for the sub-estimators.
Returns:
A `_SubnetworkSpec` object.
"""
def __new__(cls,
name,
subnetwork,
builder,
predictions,
step,
variables,
loss=None,
train_op=None,
eval_metrics=None,
asset_dir=None):
return super(_SubnetworkSpec, cls).__new__(
cls,
name=name,
subnetwork=subnetwork,
builder=builder,
predictions=predictions,
step=step,
variables=variables,
loss=loss,
train_op=train_op,
eval_metrics=eval_metrics,
asset_dir=asset_dir)
class _SubnetworkManager(object):
"""Builds `_SubnetworkSpec` instances.
This class manages an `adanet.subnetwork.Builder`, creates its subnetwork and
train ops, and returns a `_SubnetworkSpec` that holds them.
Args:
head: A `tf.contrib.estimator.Head` instance.
metric_fn: A function which should obey the following signature:
- Args: can only have following three arguments in any order:
* predictions: Predictions `Tensor` or dict of `Tensor` created by given
`Head`.
* features: Input `dict` of `Tensor` objects created by `input_fn` which
is given to `estimator.evaluate` as an argument.
* labels: Labels `Tensor` or dict of `Tensor` (for multi-head) created
by `input_fn` which is given to `estimator.evaluate` as an argument.
- Returns: Dict of metric results keyed by name. Final metrics are a union
of this and `Head's` existing metrics. If there is a name conflict
between this and `estimator`s existing metrics, this will override the
existing one. The values of the dict are the results of calling a metric
function, namely a `(metric_tensor, update_op)` tuple.
use_tpu: Whether AdaNet is running on TPU.
Returns:
An `_SubnetworkManager` instance.
Raises:
ValueError: If `max_steps` is <= 0.
"""
def __init__(self, head, metric_fn=None, use_tpu=False):
_verify_metric_fn_args(metric_fn)
self._head = head
self._metric_fn = metric_fn
self._use_tpu = use_tpu
def build_subnetwork_spec(self,
name,
subnetwork_builder,
summary,
features,
mode,
labels=None,
previous_ensemble=None,
config=None):
"""Builds a `_SubnetworkSpec` from the given `adanet.subnetwork.Builder`.
Args:
name: String name of the subnetwork.
subnetwork_builder: A `adanet.Builder` instance which defines how to train
the subnetwork and ensemble mixture weights.
summary: A `_ScopedSummary` instance for recording ensemble summaries.
features: Input `dict` of `Tensor` objects.
mode: Estimator's `ModeKeys`.
labels: Labels `Tensor` or a dictionary of string label name to `Tensor`
(for multi-head). Can be `None`.
previous_ensemble: The previous `Ensemble` from iteration t-1. Used for
creating the subnetwork train_op.
config: The `tf.estimator.RunConfig` to use this iteration.
Returns:
An new `EnsembleSpec` instance with the `Subnetwork` appended.
"""
old_vars = _get_current_vars()
with tf_compat.v1.variable_scope("subnetwork_{}".format(name)):
step = tf_compat.v1.get_variable(
"step",
shape=[],
initializer=tf_compat.v1.zeros_initializer(),
trainable=False,
dtype=tf.int64)
# Convert to tensor so that users cannot mutate it.
step_tensor = tf.convert_to_tensor(value=step)
with summary.current_scope():
summary.scalar("iteration_step/adanet/iteration_step", step_tensor)
if config:
subnetwork_config = config.replace(
model_dir=os.path.join(config.model_dir, "assets", name))
else:
subnetwork_config = tf.estimator.RunConfig(
session_config=tf.compat.v1.ConfigProto(
gpu_options=tf.compat.v1.GPUOptions(allow_growth=True)))
build_subnetwork = functools.partial(
subnetwork_builder.build_subnetwork,
features=features,
logits_dimension=self._head.logits_dimension,
training=mode == tf.estimator.ModeKeys.TRAIN,
iteration_step=step_tensor,
summary=summary,
previous_ensemble=previous_ensemble)
# Check which args are in the implemented build_subnetwork method
# signature for backwards compatibility.
# Calling low level getargs for py_2_and_3 compatibility.
defined_args = inspect.getargs(
subnetwork_builder.build_subnetwork.__code__).args
if "labels" in defined_args:
build_subnetwork = functools.partial(build_subnetwork, labels=labels)
if "config" in defined_args:
build_subnetwork = functools.partial(
build_subnetwork, config=subnetwork_config)
subnetwork_scope = tf_compat.v1.get_variable_scope()
with summary.current_scope(), _monkey_patch_context(
iteration_step_scope=subnetwork_scope,
scoped_summary=summary,
trainable_vars=[]):
subnetwork = build_subnetwork()
subnetwork_var_list = _get_current_vars(diffbase=old_vars)["trainable"]
estimator_spec = _create_estimator_spec(self._head, features, labels,
mode, subnetwork.logits,
self._use_tpu)
subnetwork_metrics = _SubnetworkMetrics(self._use_tpu)
if mode == tf.estimator.ModeKeys.EVAL:
subnetwork_metrics.create_eval_metrics(
features=features,
labels=labels,
estimator_spec=estimator_spec,
metric_fn=self._metric_fn)
if mode == tf.estimator.ModeKeys.TRAIN:
with summary.current_scope():
summary.scalar("loss", estimator_spec.loss)
# Create train ops for training subnetworks and ensembles.
train_op = None
if mode == tf.estimator.ModeKeys.TRAIN and subnetwork_builder:
with summary.current_scope(), _monkey_patch_context(
iteration_step_scope=subnetwork_scope,
scoped_summary=summary,
trainable_vars=subnetwork_var_list):
train_op = _to_train_op_spec(
subnetwork_builder.build_subnetwork_train_op(
subnetwork=subnetwork,
loss=estimator_spec.loss,
var_list=subnetwork_var_list,
labels=labels,
iteration_step=step_tensor,
summary=summary,
previous_ensemble=previous_ensemble))
new_vars = _get_current_vars(diffbase=old_vars)
# Sort our dictionary by key to remove non-determinism of variable order.
new_vars = collections.OrderedDict(sorted(new_vars.items()))
# Combine all trainable, global and savable variables into a single list.
subnetwork_variables = sum(new_vars.values(), []) + [step]
return _SubnetworkSpec(
name=name,
subnetwork=subnetwork,
builder=subnetwork_builder,
predictions=estimator_spec.predictions,
variables=subnetwork_variables,
loss=estimator_spec.loss,
step=step,
train_op=train_op,
eval_metrics=subnetwork_metrics,
asset_dir=subnetwork_config.model_dir)
| 32,594 | 39.440447 | 88 | py |
adanet | adanet-master/adanet/core/estimator_v2_test.py | """Test AdaNet estimator single graph implementation for TF 2.
Copyright 2019 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import logging
from adanet import tf_compat
from adanet.core import testing_utils as tu
from adanet.core.estimator import Estimator
from adanet.core.report_materializer import ReportMaterializer
from adanet.subnetwork import Builder
from adanet.subnetwork import SimpleGenerator
from adanet.subnetwork import Subnetwork
import tensorflow.compat.v2 as tf
from tensorflow_estimator.python.estimator.head import regression_head
logging.set_verbosity(logging.INFO)
XOR_FEATURES = [[1., 0.], [0., 0], [0., 1.], [1., 1.]]
XOR_LABELS = [[1.], [0.], [1.], [0.]]
class _SimpleBuilder(Builder):
"""A simple subnetwork builder that takes feature_columns."""
def __init__(self, name, seed=42):
self._name = name
self._seed = seed
@property
def name(self):
return self._name
def build_subnetwork(self,
features,
logits_dimension,
training,
iteration_step,
summary,
previous_ensemble=None):
seed = self._seed
if previous_ensemble:
# Increment seed so different iterations don't learn the exact same thing.
seed += 1
with tf_compat.v1.variable_scope("simple"):
input_layer = tf_compat.v1.feature_column.input_layer(
features=features,
feature_columns=tf.feature_column.numeric_column("x", 2))
last_layer = input_layer
with tf_compat.v1.variable_scope("logits"):
logits = tf_compat.v1.layers.dense(
last_layer,
logits_dimension,
kernel_initializer=tf_compat.v1.glorot_uniform_initializer(seed=seed))
summary.scalar("scalar", 3)
batch_size = features["x"].get_shape().as_list()[0]
summary.image("image", tf.ones([batch_size, 3, 3, 1]))
with tf_compat.v1.variable_scope("nested"):
summary.scalar("scalar", 5)
return Subnetwork(
last_layer=last_layer,
logits=logits,
complexity=1,
persisted_tensors={},
)
def build_subnetwork_train_op(self, subnetwork, loss, var_list, labels,
iteration_step, summary, previous_ensemble):
optimizer = tf_compat.v1.train.GradientDescentOptimizer(learning_rate=.001)
return optimizer.minimize(loss, var_list=var_list)
class EstimatorSummaryWriterTest(tu.AdanetTestCase):
"""Test that Tensorboard summaries get written correctly."""
@tf_compat.skip_for_tf1
def test_summaries(self):
"""Tests that summaries are written to candidate directory."""
run_config = tf.estimator.RunConfig(
tf_random_seed=42,
log_step_count_steps=2,
save_summary_steps=2,
model_dir=self.test_subdirectory)
subnetwork_generator = SimpleGenerator([_SimpleBuilder("dnn")])
report_materializer = ReportMaterializer(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]]), steps=1)
estimator = Estimator(
head=regression_head.RegressionHead(
loss_reduction=tf_compat.SUM_OVER_BATCH_SIZE),
subnetwork_generator=subnetwork_generator,
report_materializer=report_materializer,
max_iteration_steps=10,
config=run_config)
train_input_fn = tu.dummy_input_fn([[1., 0.]], [[1.]])
estimator.train(input_fn=train_input_fn, max_steps=3)
ensemble_loss = 1.52950
self.assertAlmostEqual(
ensemble_loss,
tu.check_eventfile_for_keyword("loss", self.test_subdirectory),
places=3)
self.assertIsNotNone(
tu.check_eventfile_for_keyword("global_step/sec",
self.test_subdirectory))
self.assertEqual(
0.,
tu.check_eventfile_for_keyword("iteration/adanet/iteration",
self.test_subdirectory))
subnetwork_subdir = os.path.join(self.test_subdirectory,
"subnetwork/t0_dnn")
self.assertAlmostEqual(
3.,
tu.check_eventfile_for_keyword("scalar", subnetwork_subdir),
places=3)
self.assertEqual((3, 3, 1),
tu.check_eventfile_for_keyword("image", subnetwork_subdir))
self.assertAlmostEqual(
5.,
tu.check_eventfile_for_keyword("nested/scalar", subnetwork_subdir),
places=3)
ensemble_subdir = os.path.join(
self.test_subdirectory, "ensemble/t0_dnn_grow_complexity_regularized")
self.assertAlmostEqual(
ensemble_loss,
tu.check_eventfile_for_keyword(
"adanet_loss/adanet/adanet_weighted_ensemble", ensemble_subdir),
places=1)
self.assertAlmostEqual(
0.,
tu.check_eventfile_for_keyword(
"complexity_regularization/adanet/adanet_weighted_ensemble",
ensemble_subdir),
places=3)
self.assertAlmostEqual(
1.,
tu.check_eventfile_for_keyword(
"mixture_weight_norms/adanet/"
"adanet_weighted_ensemble/subnetwork_0", ensemble_subdir),
places=3)
if __name__ == "__main__":
tf.enable_v2_behavior()
tf.test.main()
| 5,850 | 33.216374 | 80 | py |
adanet | adanet-master/adanet/core/evaluator_test.py | """Test AdaNet evaluator single graph implementation.
Copyright 2018 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from adanet import tf_compat
from adanet.core.evaluator import Evaluator
import adanet.core.testing_utils as tu
import numpy as np
import tensorflow.compat.v2 as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util
# pylint: enable=g-direct-tensorflow-import
def _fake_adanet_losses_0(input_fn):
_, labels = input_fn()
return [
tf.reduce_sum(labels),
tf.reduce_sum(labels * 2),
]
def _fake_adanet_losses_1(input_fn):
_, labels = input_fn()
return [
tf.reduce_sum(labels * 2),
tf.reduce_sum(labels),
]
class EvaluatorTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.named_parameters({
"testcase_name": "choose_index_0",
"input_fn": tu.dummy_input_fn([[1., 2]], [[3.]]),
"steps": 3,
"adanet_losses": _fake_adanet_losses_0,
"want_adanet_losses": [3, 6],
}, {
"testcase_name": "choose_index_1",
"input_fn": tu.dummy_input_fn([[1., 2]], [[3.]]),
"steps": 3,
"adanet_losses": _fake_adanet_losses_1,
"want_adanet_losses": [6, 3],
}, {
"testcase_name": "none_steps",
"input_fn": tu.dataset_input_fn(),
"steps": None,
"adanet_losses": _fake_adanet_losses_1,
"want_adanet_losses": [18, 9],
}, {
"testcase_name": "input_fn_out_of_range",
"input_fn": tu.dataset_input_fn(),
"steps": 3,
"adanet_losses": _fake_adanet_losses_1,
"want_adanet_losses": [18, 9],
})
@test_util.run_in_graph_and_eager_modes
def test_evaluate_no_metric_fn_falls_back_to_adanet_losses(
self, input_fn, steps, adanet_losses, want_adanet_losses):
with context.graph_mode():
adanet_losses = adanet_losses(input_fn)
metrics = [{
"adanet_loss": tf_compat.v1.metrics.mean(loss)
} for loss in adanet_losses]
with self.test_session() as sess:
evaluator = Evaluator(input_fn=input_fn, steps=steps)
adanet_losses = evaluator.evaluate(sess, ensemble_metrics=metrics)
self.assertEqual(want_adanet_losses, adanet_losses)
@parameterized.named_parameters(
{
"testcase_name": "minimize_returns_nanargmin",
"objective": Evaluator.Objective.MAXIMIZE,
"expected_objective_fn": np.nanargmax,
"metric_fn": lambda x, y: None
}, {
"testcase_name": "maximize_returns_nanargmax",
"objective": Evaluator.Objective.MINIMIZE,
"expected_objective_fn": np.nanargmin,
"metric_fn": lambda x, y: None
})
@test_util.run_in_graph_and_eager_modes
def test_objective(self, objective, expected_objective_fn, metric_fn=None):
evaluator = Evaluator(input_fn=None, objective=objective)
self.assertEqual(expected_objective_fn, evaluator.objective_fn)
@test_util.run_in_graph_and_eager_modes
def test_objective_unsupported_objective(self):
with self.assertRaises(ValueError):
Evaluator(input_fn=None, objective="non_existent_objective")
@test_util.run_in_graph_and_eager_modes
def test_evaluate(self):
with context.graph_mode():
input_fn = tu.dummy_input_fn([[1., 2]], [[3.]])
_, labels = input_fn()
predictions = [labels * 2, labels * 3]
metrics = []
for preds in predictions:
metrics.append({
"mse": tf_compat.v1.metrics.mean_squared_error(labels, preds),
"other_metric_1": (tf.constant(1), tf.constant(1)),
"other_metric_2": (tf.constant(2), tf.constant(2))
})
with self.test_session() as sess:
evaluator = Evaluator(input_fn=input_fn, metric_name="mse", steps=3)
metrics = evaluator.evaluate(sess, ensemble_metrics=metrics)
self.assertEqual([9, 36], metrics)
@test_util.run_in_graph_and_eager_modes
def test_evaluate_invalid_metric(self):
with context.graph_mode():
input_fn = tu.dummy_input_fn([[1., 2]], [[3.]])
_, labels = input_fn()
predictions = [labels * 2, labels * 3]
metrics = []
for preds in predictions:
metrics.append({
"mse": tf_compat.v1.metrics.mean_squared_error(labels, preds),
"other_metric_1": (tf.constant(1), tf.constant(1)),
"other_metric_2": (tf.constant(2), tf.constant(2))
})
with self.test_session() as sess:
evaluator = Evaluator(input_fn=input_fn, metric_name="dne", steps=3)
with self.assertRaises(KeyError):
metrics = evaluator.evaluate(sess, ensemble_metrics=metrics)
if __name__ == "__main__":
tf.test.main()
| 5,382 | 33.954545 | 77 | py |
adanet | adanet-master/adanet/core/eval_metrics_test.py | """Tests for AdaNet eval metrics.
Copyright 2019 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from adanet import tf_compat
from adanet.core.architecture import _Architecture
from adanet.core.eval_metrics import _call_eval_metrics
import adanet.core.testing_utils as tu
import tensorflow.compat.v2 as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util
# pylint: enable=g-direct-tensorflow-import
class MetricsTest(tu.AdanetTestCase):
def setup_graph(self):
# We only test the multi head since this is the general case.
self._features = {"x": tf.constant([[1.], [2.]])}
heads = ("head_1", "head_2")
labels = tf.constant([0, 1])
self._labels = {head: labels for head in heads}
predictions = {(head, "predictions"): labels for head in heads}
loss = tf.constant(2.)
self._estimator_spec = tf_compat.v1.estimator.tpu.TPUEstimatorSpec(
mode=tf.estimator.ModeKeys.EVAL,
loss=loss,
predictions=predictions,
eval_metrics=(self._spec_metric_fn, {
"features": self._features,
"labels": self._labels,
"predictions": predictions,
"loss": loss
}))
def _run_metrics(self, metrics):
metric_ops = metrics
if isinstance(metric_ops, tuple):
metric_ops = _call_eval_metrics(metric_ops)
self.evaluate((tf_compat.v1.global_variables_initializer(),
tf_compat.v1.local_variables_initializer()))
self.evaluate(metric_ops)
return {k: self.evaluate(metric_ops[k][0]) for k in metric_ops}
def _assert_tensors_equal(self, actual, expected):
actual, expected = self.evaluate((actual, expected))
self.assertEqual(actual, expected)
def _spec_metric_fn(self, features, labels, predictions, loss):
actual = [features, labels, predictions, loss]
expected = [
self._features, self._labels, self._estimator_spec.predictions,
self._estimator_spec.loss
]
self._assert_tensors_equal(actual, expected)
return {"metric_1": tf_compat.v1.metrics.mean(tf.constant(1.))}
def _metric_fn(self, features, predictions):
actual = [features, predictions]
expected = [self._features, self._estimator_spec.predictions]
self._assert_tensors_equal(actual, expected)
return {"metric_2": tf_compat.v1.metrics.mean(tf.constant(2.))}
@parameterized.named_parameters(
{
"testcase_name": "use_tpu",
"use_tpu": True,
},
{
# TODO: Figure out why this gives error in TF 2.0:
# ValueError: Please call update_state(...) on the "mean_1" metric.
"testcase_name": "not_use_tpu",
"use_tpu": False,
})
@test_util.run_in_graph_and_eager_modes
def test_subnetwork_metrics(self, use_tpu):
with context.graph_mode():
self.setup_graph()
spec = self._estimator_spec
if not use_tpu:
spec = spec.as_estimator_spec()
metrics = tu.create_subnetwork_metrics(
self._metric_fn,
use_tpu=use_tpu,
features=self._features,
labels=self._labels,
estimator_spec=spec)
actual = self._run_metrics(metrics.eval_metrics_tuple())
expected = {"loss": 2., "metric_1": 1., "metric_2": 2.}
self.assertEqual(actual, expected)
@test_util.run_in_graph_and_eager_modes
def test_subnetwork_metrics_user_metric_fn_overrides_metrics(self):
with context.graph_mode():
self.setup_graph()
overridden_value = 100.
def _overriding_metric_fn():
value = tf.constant(overridden_value)
return {"metric_1": tf_compat.v1.metrics.mean(value)}
metrics = tu.create_subnetwork_metrics(
_overriding_metric_fn,
features=self._features,
labels=self._labels,
estimator_spec=self._estimator_spec)
actual = self._run_metrics(metrics.eval_metrics_tuple())
expected = {"loss": 2., "metric_1": overridden_value}
self.assertEqual(actual, expected)
@test_util.run_in_graph_and_eager_modes
def test_ensemble_metrics(self):
with context.graph_mode():
self.setup_graph()
architecture = _Architecture("test_ensemble_candidate", "test_ensembler")
architecture.add_subnetwork(iteration_number=0, builder_name="b_0_0")
architecture.add_subnetwork(iteration_number=0, builder_name="b_0_1")
architecture.add_subnetwork(iteration_number=1, builder_name="b_1_0")
architecture.add_subnetwork(iteration_number=2, builder_name="b_2_0")
metrics = tu.create_ensemble_metrics(
self._metric_fn,
features=self._features,
labels=self._labels,
estimator_spec=self._estimator_spec,
architecture=architecture)
actual = self._run_metrics(metrics.eval_metrics_tuple())
serialized_arch_proto = actual["architecture/adanet/ensembles"]
expected_arch_string = b"| b_0_0 | b_0_1 | b_1_0 | b_2_0 |"
self.assertIn(expected_arch_string, serialized_arch_proto)
@parameterized.named_parameters(
{
"testcase_name": "use_tpu_evaluating",
"use_tpu": True,
"mode": tf.estimator.ModeKeys.EVAL,
}, {
"testcase_name": "use_tpu_not_evaluating",
"use_tpu": True,
"mode": tf.estimator.ModeKeys.TRAIN,
}, {
"testcase_name": "not_use_tpu_evaluating",
"use_tpu": False,
"mode": tf.estimator.ModeKeys.EVAL,
}, {
"testcase_name": "not_use_tpu_not_evaluating",
"use_tpu": False,
"mode": tf.estimator.ModeKeys.TRAIN,
})
@test_util.run_in_graph_and_eager_modes
def test_iteration_metrics(self, use_tpu, mode):
with context.graph_mode():
self.setup_graph()
best_candidate_index = 3
ensemble_metrics = []
for i in range(10):
def metric_fn(val=i):
metric = tf.keras.metrics.Mean()
metric.update_state(tf.constant(val))
return {
"ensemble_v1_metric": tf_compat.v1.metrics.mean(tf.constant(val)),
"ensemble_keras_metric": metric
}
ensemble_metrics.append(tu.create_ensemble_metrics(metric_fn))
metrics = tu.create_iteration_metrics(ensemble_metrics=ensemble_metrics)
metrics_fn = (
metrics.best_eval_metrics_tuple
if use_tpu else metrics.best_eval_metric_ops)
actual = self._run_metrics(
metrics_fn(tf.constant(best_candidate_index), mode) or {})
if mode == tf.estimator.ModeKeys.EVAL:
expected = {
"ensemble_v1_metric": best_candidate_index,
"ensemble_keras_metric": best_candidate_index,
"iteration": 1
}
# We don't actually provide an architecture, so the default will be
# inside.
del actual["architecture/adanet/ensembles"]
else:
expected = {}
self.assertEqual(actual, expected)
@test_util.run_in_graph_and_eager_modes
def test_metric_ops_not_duplicated_on_cpu(self):
with context.graph_mode():
self.setup_graph()
metric_fn = lambda: {"metric": (tf.constant(5), tf.constant(5))}
best_candidate_index = 3
mode = tf.estimator.ModeKeys.EVAL
ensemble_metrics = tu.create_ensemble_metrics(metric_fn)
subnetwork_metrics = tu.create_subnetwork_metrics(metric_fn)
iteration_metrics = tu.create_iteration_metrics(
ensemble_metrics=[ensemble_metrics],
subnetwork_metrics=[subnetwork_metrics])
ensemble_ops1 = ensemble_metrics.eval_metrics_ops()
ensemble_ops2 = ensemble_metrics.eval_metrics_ops()
subnetwork_ops1 = subnetwork_metrics.eval_metrics_ops()
subnetwork_ops2 = subnetwork_metrics.eval_metrics_ops()
iteration_ops1 = iteration_metrics.best_eval_metric_ops(
best_candidate_index, mode)
iteration_ops2 = iteration_metrics.best_eval_metric_ops(
best_candidate_index, mode)
self.assertEqual(subnetwork_ops1, subnetwork_ops2)
self.assertEqual(ensemble_ops1, ensemble_ops2)
self.assertEqual(iteration_ops1, iteration_ops2)
for ops in [ensemble_ops1, subnetwork_ops1, iteration_ops1]:
self.assertIsNotNone(ops)
if __name__ == "__main__":
tf.test.main()
| 8,994 | 35.714286 | 80 | py |
adanet | adanet-master/adanet/core/report_accessor.py | """Store and retrieve adanet.IterationReport protos.
Copyright 2018 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
from absl import logging
from adanet import subnetwork
import numpy as np
import six
import tensorflow.compat.v2 as tf
# TODO: Encapsulate conversion and serialization of a
# MaterializedReport dict within MaterializedReport.
def _json_report_to_materialized_report(iteration_report_json):
"""Converts a JSON loaded iteration report to a `MaterializedReport` list."""
subnetwork_reports = []
for subnetwork_report_json in iteration_report_json["subnetwork_reports"]:
subnetwork_reports.append(
subnetwork.MaterializedReport(
iteration_number=int(iteration_report_json["iteration_number"]),
name=subnetwork_report_json["name"],
hparams=subnetwork_report_json["hparams"],
attributes=subnetwork_report_json["attributes"],
metrics=subnetwork_report_json["metrics"],
included_in_final_ensemble=subnetwork_report_json[
"included_in_final_ensemble"]))
return subnetwork_reports
def _validate_report_dict(dictionary):
"""Validates that entries of a MaterializedReport dictionary field."""
for key, value in dictionary.items():
if isinstance(value, np.integer):
dictionary[key] = int(value)
if isinstance(value, np.float):
dictionary[key] = float(value)
if isinstance(value, (six.string_types, six.binary_type)):
if six.PY2:
if not isinstance(value, six.text_type):
dictionary[key] = six.u(value).encode("utf-8")
if six.PY3:
dictionary[key] = str(dictionary[key])
elif not isinstance(value, (bool, six.text_type, int, float)):
raise ValueError("Values must be a binary type "
"(str in python 2; bytes in python 3), "
"a text type (unicode in python 2; str in python 3), "
"int, bool, or float, but its type is {}.".format(
type(value)))
return dictionary
def _subnetwork_report_to_dict(subnetwork_report):
"""Converts a Subnetwork report to a JSON serializable dict."""
return {
"name": subnetwork_report.name,
"hparams": _validate_report_dict(subnetwork_report.hparams),
"attributes": _validate_report_dict(subnetwork_report.attributes),
"metrics": _validate_report_dict(subnetwork_report.metrics),
"included_in_final_ensemble": subnetwork_report.included_in_final_ensemble
}
class _ReportAccessor(object):
"""Store and retrieve report JSON files."""
def __init__(self, report_dir, filename="iteration_reports.json"):
"""Creates a `_ReportAccessor` instance.
Args:
report_dir: Directory to store the report.
filename: Name of the file.
Returns:
A `_ReportAccessor` instance.
"""
tf.io.gfile.makedirs(report_dir)
self._full_filepath = os.path.join(report_dir, filename)
def write_iteration_report(self, iteration_number, materialized_reports):
"""Writes an iteration's `MaterializedReports` to a JSON file.
TODO: Remove iteration_number from the argument of this method.
Note that even materialized_reports also contain iteration
number, those are ignored -- only the iteration_number that is passed into
this method would be written to the proto.
Args:
iteration_number: Int for the iteration number.
materialized_reports: A list of `adanet.subnetwork.MaterializedReport`
objects.
"""
iteration_report = {
"iteration_number":
int(iteration_number),
"subnetwork_reports":
list(map(_subnetwork_report_to_dict, materialized_reports))
}
self._append_iteration_report_json(iteration_report)
logging.info("Wrote IterationReport for iteration %s to %s",
iteration_number, self._full_filepath)
def _append_iteration_report_json(self, iteration_report):
"""Appends an iteration report dictionary object to the output file."""
iteration_reports = []
if os.path.exists(self._full_filepath):
with open(self._full_filepath, "r") as f:
iteration_reports = json.load(f)
iteration_reports.append(iteration_report)
with open(self._full_filepath, "w") as f:
json.dump(iteration_reports, f)
def read_iteration_reports(self):
"""Reads all iterations of the Report.
Each `adanet.subnetwork.MaterializedReport` list is one AdaNet iteration.
The first list in the sequence is iteration 0, followed by iteration 1, and
so on.
Returns:
Iterable of lists of `adanet.subnetwork.MaterializedReport`s.
"""
if os.path.exists(self._full_filepath):
with open(self._full_filepath, "r") as f:
iteration_reports = json.load(f)
return [
_json_report_to_materialized_report(ir) for ir in iteration_reports
]
return []
| 5,581 | 33.8875 | 80 | py |
adanet | adanet-master/adanet/core/candidate.py | """The AdaNet candidate implementation in Tensorflow using a single graph.
Copyright 2018 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from adanet import tf_compat
import tensorflow.compat.v2 as tf
class _Candidate(
collections.namedtuple("_Candidate",
["ensemble_spec", "adanet_loss", "variables"])):
"""An AdaNet candidate.
A `_Candidate` tracks the progress of a candidate subnetwork's training
within an ensemble, as well as their AdaNet loss over time.
"""
def __new__(cls, ensemble_spec, adanet_loss, variables):
"""Creates a validated `_Candidate` instance.
Args:
ensemble_spec: The `_EnsembleSpec` instance to track.
adanet_loss: float `Tensor` representing the ensemble's AdaNet loss on the
training set as defined in Equation (4) of the paper.
variables: List of `tf.Variable` instances associated with the ensemble.
Returns:
A validated `_Candidate` object.
Raises:
ValueError: If validation fails.
"""
if ensemble_spec is None:
raise ValueError("ensemble_spec is required")
if adanet_loss is None:
raise ValueError("adanet_loss is required")
return super(_Candidate, cls).__new__(
cls,
ensemble_spec=ensemble_spec,
adanet_loss=adanet_loss,
variables=variables)
class _CandidateBuilder(object):
"""Builds AdaNet candidates."""
def __init__(self, adanet_loss_decay=.999):
"""Creates a `_CandidateBuilder` instance.
Args:
adanet_loss_decay: Float. The adanet loss is tracked as an exponential
moving average, so this is the decay rate to use.
Returns:
A `_CandidateBuilder` object.
"""
self._adanet_loss_decay = adanet_loss_decay
super(_CandidateBuilder, self).__init__()
def build_candidate(self,
ensemble_spec,
training,
summary,
rebuilding=False,
track_moving_average=True):
"""Builds and returns an AdaNet candidate.
Args:
ensemble_spec: `_EnsembleSpec` instance to track.
training: A python boolean indicating whether the graph is in training
mode or prediction mode.
summary: A `Summary` for recording summaries for TensorBoard.
rebuilding: Boolean whether the iteration is being rebuilt only to restore
the previous best subnetworks and ensembles.
track_moving_average: Bool whether to track the moving average of the
ensemble's adanet loss.
Returns:
A _Candidate instance.
"""
from tensorflow.python.training import moving_averages # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
candidate_scope = "candidate_{}".format(ensemble_spec.name)
with tf_compat.v1.variable_scope(candidate_scope):
adanet_loss = ensemble_spec.adanet_loss
variables = []
if track_moving_average:
loss_to_track = ensemble_spec.adanet_loss
if loss_to_track is None:
# Dummy loss so that we always create moving averages variables.
# If we pass a None loss assign_moving_average raises an exception.
loss_to_track = tf.constant(99999., name="dummy_adanet_loss")
adanet_loss_var = tf_compat.v1.get_variable(
"adanet_loss", initializer=0., trainable=False)
update_adanet_loss_op = moving_averages.assign_moving_average(
adanet_loss_var, loss_to_track, decay=self._adanet_loss_decay)
# Get the two moving average variables created by assign_moving_average.
# Since these two variables are the most recently created ones, we can
# slice them both from the end of the global variables collection.
variables = [adanet_loss_var] + tf_compat.v1.global_variables()[-2:]
if training and not rebuilding:
with tf.control_dependencies([update_adanet_loss_op]):
adanet_loss = adanet_loss_var.read_value()
else:
adanet_loss = adanet_loss_var.read_value()
with summary.current_scope():
summary.scalar("adanet_loss/adanet/adanet_weighted_ensemble",
adanet_loss)
return _Candidate(
ensemble_spec=ensemble_spec,
adanet_loss=adanet_loss,
variables=variables)
| 4,987 | 34.884892 | 124 | py |
adanet | adanet-master/adanet/core/tpu_estimator.py | """An AdaNet estimator implementation which can run on TPU.
Copyright 2018 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import functools
from absl import logging
from adanet import tf_compat
from adanet.core.estimator import Estimator
import tensorflow.compat.v2 as tf
# pylint: disable=g-classes-have-attributes
class TPUEstimator(Estimator, tf.compat.v1.estimator.tpu.TPUEstimator):
"""An :class:`adanet.Estimator` capable of training and evaluating on TPU.
Unless :code:`use_tpu=False`, training will run on TPU. However, certain parts
of the AdaNet training loop, such as report materialization and best candidate
selection, will still occurr on CPU. Furthermore, if using TPUEmbedding (i.e.
:code:`embedding_config_spec` is supplied), inference will also occurr on CPU.
TODO: Provide the missing functionality detailed below.
N.B: Embeddings using the TPUEmbedding (i.e. :code:`embedding_config_spec`
is provided) only support :code:`shared_embedding_columns` when running for
multiple AdaNet iterations. Using regular :code:`embedding_columns` will cause
iterations 2..n to fail because of mismatched embedding scopes.
Args:
head: See :class:`adanet.Estimator`.
subnetwork_generator: See :class:`adanet.Estimator`.
max_iteration_steps: See :class:`adanet.Estimator`.
ensemblers: See :class:`adanet.Estimator`.
ensemble_strategies: See :class:`adanet.Estimator`.
evaluator: See :class:`adanet.Estimator`.
report_materializer: See :class:`adanet.Estimator`.
metric_fn: See :class:`adanet.Estimator`.
force_grow: See :class:`adanet.Estimator`.
replicate_ensemble_in_training: See :class:`adanet.Estimator`.
adanet_loss_decay: See :class:`adanet.Estimator`.
report_dir: See :class:`adanet.Estimator`.
config: See :class:`adanet.Estimator`.
use_tpu: Boolean to enable training on TPU. Defaults to :code:`True` and is
only provided to allow debugging models on CPU/GPU. Use
:class:`adanet.Estimator` instead if you do not plan to run on TPU.
eval_on_tpu: Boolean to enable evaluating on TPU. Defaults to :code:`True`.
Ignored if :code:`use_tpu=False`.
export_to_tpu: See :class:`tf.compat.v1.estimator.tpu.TPUEstimator`.
train_batch_size: See :class:`tf.compat.v1.estimator.tpu.TPUEstimator`.
Defaults to 0 if `None`.
eval_batch_size: See :class:`tf.compat.v1.estimator.tpu.TPUEstimator`.
Defaults to train_batch_size if `None`.
predict_batch_size: See :class:`tf.compat.v1.estimator.tpu.TPUEstimator`.
Defaults to eval_batch_size if `None`.
embedding_config_spec: See :class:`tf.compat.v1.estimator.tpu.TPUEstimator`.
If supplied, :code:`predict` will be called on CPU and no TPU compatible
:code:`SavedModel` will be exported.
debug: See :class:`adanet.Estimator`.
enable_ensemble_summaries: See :class:`adanet.Estimator`.
enable_subnetwork_summaries: See :class:`adanet.Estimator`.
export_subnetwork_logits: Whether to include subnetwork logits in exports.
export_subnetwork_last_layer: Whether to include subnetwork last layer in
exports.
global_step_combiner_fn: See :class:`adanet.Estimator`.
max_iterations: See :class:`adanet.Estimator`.
replay_config: See :class:`adanet.Estimator`.
add_predict_batch_config: If True, supplies a default
`tpu_estimator.BatchConfig` when calling
`tpu_estimator.model_fn_inference_on_tpu`, otherwise supplies None.
**kwargs: Extra keyword args passed to the parent.
"""
def __init__(self,
head,
subnetwork_generator,
max_iteration_steps,
ensemblers=None,
ensemble_strategies=None,
evaluator=None,
report_materializer=None,
metric_fn=None,
force_grow=False,
replicate_ensemble_in_training=False,
adanet_loss_decay=.9,
model_dir=None,
report_dir=None,
config=None,
use_tpu=True,
eval_on_tpu=True,
export_to_tpu=True,
train_batch_size=None,
eval_batch_size=None,
predict_batch_size=None,
embedding_config_spec=None,
debug=False,
enable_ensemble_summaries=True,
enable_subnetwork_summaries=True,
export_subnetwork_logits=False,
export_subnetwork_last_layer=True,
global_step_combiner_fn=tf.math.reduce_mean,
max_iterations=None,
replay_config=None,
add_predict_batch_config=True,
**kwargs):
self._use_tpu = use_tpu
if not self._use_tpu:
logging.warning(
"This adanet.TPUEstimator is meant to be used for running on TPU. "
"If you want to run on CPU/GPU, use adanet.Estimator instead.")
# TPUEstimator modifies config under the hood. We keep track of it here so
# we can use it from _create_temp_run_config.
self._original_config = config or tf_compat.v1.estimator.tpu.RunConfig()
self._eval_on_tpu = eval_on_tpu if self._use_tpu else False
self._export_to_tpu = export_to_tpu
self._train_batch_size = train_batch_size or 0
self._eval_batch_size = eval_batch_size or train_batch_size or 0
self._predict_batch_size = (
predict_batch_size or eval_batch_size or train_batch_size or 0)
self._embedding_config_spec = embedding_config_spec
self._add_predict_batch_config = add_predict_batch_config
if self._embedding_config_spec:
logging.warning(
"TPU does not support inference with TPUEmbedding. Force setting "
"`export_to_tpu=False` so no TPU SavedModel will be exported.")
self._export_to_tpu = False
from tensorflow_estimator.python.estimator.tpu import tpu_estimator # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
super(TPUEstimator, self).__init__(
head=head,
subnetwork_generator=subnetwork_generator,
max_iteration_steps=max_iteration_steps,
ensemblers=ensemblers,
ensemble_strategies=ensemble_strategies,
evaluator=evaluator,
report_materializer=report_materializer,
metric_fn=metric_fn,
force_grow=force_grow,
replicate_ensemble_in_training=replicate_ensemble_in_training,
adanet_loss_decay=adanet_loss_decay,
model_dir=model_dir,
report_dir=report_dir,
config=self._original_config,
use_tpu=self._use_tpu,
eval_on_tpu=self._eval_on_tpu,
export_to_tpu=self._export_to_tpu,
export_saved_model_api_version=(
tpu_estimator.ExportSavedModelApiVersion.V2),
train_batch_size=self._train_batch_size,
eval_batch_size=self._eval_batch_size,
predict_batch_size=self._predict_batch_size,
embedding_config_spec=self._embedding_config_spec,
debug=debug,
enable_ensemble_summaries=enable_ensemble_summaries,
enable_subnetwork_summaries=enable_subnetwork_summaries,
export_subnetwork_logits=export_subnetwork_logits,
export_subnetwork_last_layer=export_subnetwork_last_layer,
global_step_combiner_fn=global_step_combiner_fn,
max_iterations=max_iterations,
replay_config=replay_config,
**kwargs)
def predict(self,
input_fn,
predict_keys=None,
hooks=None,
checkpoint_path=None,
yield_single_examples=True):
use_tpu = self._use_tpu
eval_on_tpu = self._eval_on_tpu
if self._embedding_config_spec:
logging.warning("TPU does not support inference with TPUEmbedding. "
"Falling back to CPU.")
use_tpu = False
eval_on_tpu = False
if not checkpoint_path:
checkpoint_path = tf.train.latest_checkpoint(self.model_dir)
logging.info("Computing predictions for AdaNet model at checkpoint: %s",
checkpoint_path)
params = self.params
params.update({
"best_ensemble_index":
self._compute_best_ensemble_index(checkpoint_path, hooks),
"checkpoint_path":
checkpoint_path,
})
from tensorflow_estimator.python.estimator.tpu import tpu_estimator # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
# TODO: Consider extracting a common function to use here and in
# _create_temp_estimator().
estimator = tf_compat.v1.estimator.tpu.TPUEstimator(
model_fn=self._create_model_fn(hooks=hooks, is_export=False),
params=params,
config=self._original_config,
model_dir=self.model_dir,
use_tpu=use_tpu,
eval_on_tpu=eval_on_tpu,
export_to_tpu=self._export_to_tpu,
export_saved_model_api_version=(
tpu_estimator.ExportSavedModelApiVersion.V2),
train_batch_size=self._train_batch_size,
eval_batch_size=self._eval_batch_size,
predict_batch_size=self._predict_batch_size,
embedding_config_spec=self._embedding_config_spec)
return estimator.predict(
input_fn,
predict_keys=predict_keys,
hooks=hooks,
checkpoint_path=checkpoint_path,
yield_single_examples=yield_single_examples)
def _create_temp_run_config(self, temp_model_dir):
"""See the `Estimator` base class for details."""
return tf_compat.v1.estimator.tpu.RunConfig(
model_dir=temp_model_dir,
tpu_config=self._original_config.tpu_config,
evaluation_master=self._original_config.evaluation_master,
master=self._original_config.master,
cluster=self._original_config.cluster,
tf_random_seed=self._original_config.tf_random_seed,
session_config=self._original_config.session_config,
protocol=self._original_config.protocol)
def _create_temp_estimator(self, config, **create_model_fn_args):
"""See the `Estimator` base class for details."""
from tensorflow_estimator.python.estimator.tpu import tpu_estimator # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
temp_model_dir = config.model_dir
return tf_compat.v1.estimator.tpu.TPUEstimator(
model_fn=self._create_model_fn(**create_model_fn_args),
config=config,
model_dir=temp_model_dir,
use_tpu=self._use_tpu,
eval_on_tpu=self._eval_on_tpu,
export_to_tpu=self._export_to_tpu,
export_saved_model_api_version=(
tpu_estimator.ExportSavedModelApiVersion.V2),
train_batch_size=self._train_batch_size,
eval_batch_size=self._eval_batch_size,
predict_batch_size=self._predict_batch_size,
embedding_config_spec=self._embedding_config_spec)
@contextlib.contextmanager
def _call_input_fn_in_new_graph(self, input_fn, mode, config):
"""See the `Estimator` base class for details."""
# Bind parameters to input_fn since the parent's input_fn is not expected to
# have any arguments.
from tensorflow.python.util import function_utils # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
input_fn_args = function_utils.fn_args(input_fn)
kwargs = {}
if "mode" in input_fn_args:
kwargs["mode"] = mode
if "params" in input_fn_args:
kwargs["params"] = self.params
if "config" in input_fn_args:
kwargs["config"] = config
input_fn = functools.partial(input_fn, **kwargs)
with super(TPUEstimator,
self)._call_input_fn_in_new_graph(input_fn, mode, config) as res:
yield res
def _create_estimator_spec(self, current_iteration, mode,
iteration_number_tensor, previous_iteration_vars,
is_growing_phase, evaluation_name):
"""See the `Estimator` base class for details."""
if not self._use_tpu:
return super(TPUEstimator, self)._create_estimator_spec(
current_iteration, mode, iteration_number_tensor,
previous_iteration_vars, is_growing_phase, evaluation_name)
training = mode == tf.estimator.ModeKeys.TRAIN
iteration_estimator_spec = current_iteration.estimator_spec
training_hooks = self._training_hooks(current_iteration, training,
iteration_number_tensor,
previous_iteration_vars,
is_growing_phase)
if is_growing_phase:
training_hooks = self._process_hooks_for_growing_phase(training_hooks)
evaluation_hooks = self._evaluation_hooks(current_iteration, training,
evaluation_name)
return tf_compat.v1.estimator.tpu.TPUEstimatorSpec(
mode=mode,
predictions=iteration_estimator_spec.predictions,
loss=iteration_estimator_spec.loss,
train_op=self._train_op(iteration_estimator_spec, is_growing_phase),
host_call=self._create_host_call(current_iteration, training),
eval_metrics=iteration_estimator_spec.eval_metrics,
export_outputs=iteration_estimator_spec.export_outputs,
# Return a constant summary_op, otherwise `Estimator` creates summary
# ops that do not work on TPU.
scaffold_fn=lambda: tf.compat.v1.train.Scaffold( # pylint: disable=g-long-lambda
summary_op=tf.constant("")),
training_hooks=training_hooks,
evaluation_hooks=evaluation_hooks)
def _training_hooks(self, current_iteration, training,
iteration_number_tensor, previous_iteration_vars,
is_growing_phase):
"""See the `Estimator` base class for details."""
training_hooks = super(TPUEstimator,
self)._training_hooks(current_iteration, training,
iteration_number_tensor,
previous_iteration_vars,
is_growing_phase)
if self._use_tpu:
# Remove summary hooks on TPU since summaries are saved via host_call.
training_hooks = [
hook for hook in training_hooks
if not isinstance(hook, tf.compat.v1.train.SummarySaverHook)
]
return training_hooks
def _create_host_call(self, current_iteration, training):
"""Construct a host_call writing scalar summaries.
Args:
current_iteration: The current `_Iteration`.
training: Boolean indicating whether in training mode.
Returns:
(fn, args) Pair to be called by TPUEstimator as the host_call.
"""
if not training:
return lambda **kwargs: [tf.no_op()], {}
# Collect and flatten summary functions and arguments.
summary_kwargs = collections.OrderedDict()
gs_t = tf.reshape(tf.cast(tf.train.get_global_step(), dtype=tf.int32), [1])
summary_kwargs["global_step"] = gs_t
summary_fns = collections.defaultdict(list)
for i, summary in enumerate(current_iteration.summaries):
for j, (summary_fn, tensor) in enumerate(summary.summary_tuples()):
summary_fns[i].append(summary_fn)
summary_kwargs["summary_{}_{}".format(i, j)] = tensor
def _host_call_fn(**kwargs):
"""Training host call.
Creates summaries for training metrics.
Args:
**kwargs: Dict of {str: Tensor} , with `Tensor` of shape `[batch]`. Must
contain key "global_step" with value of current global_step Tensor.
Returns:
List of summary ops to run on the CPU host.
"""
from tensorflow.python.ops import summary_ops_v2 # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
gs = tf.cast(kwargs.pop("global_step")[0], dtype=tf.int64)
for i, summary in enumerate(current_iteration.summaries):
with summary_ops_v2.create_file_writer(summary.logdir).as_default():
with summary_ops_v2.record_summaries_every_n_global_steps(
n=self.config.save_summary_steps, global_step=gs):
for j, summary_fn in enumerate(summary_fns[i]):
tensor = kwargs["summary_{}_{}".format(i, j)]
summary_fn(tensor, step=gs)
summary.clear_summary_tuples()
return tf.compat.v1.summary.all_v2_summary_ops()
return _host_call_fn, summary_kwargs
def _create_model_fn(self,
is_growing_phase=False,
is_inside_training_loop=False,
is_export=False,
evaluation_name=None,
best_ensemble_index=None,
checkpoint_path=None,
hooks=None):
"""See the `Estimator` base class for details."""
from tensorflow_estimator.python.estimator.tpu import tpu_estimator # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
adanet_model_fn = super(TPUEstimator, self)._create_model_fn(
is_growing_phase, is_inside_training_loop, is_export, evaluation_name,
best_ensemble_index, checkpoint_path, hooks)
def _model_fn(features, labels, mode, params, config):
"""The model_fn to return which supports exporting on TPU."""
if (is_export and params["use_tpu"] and
mode == tf.estimator.ModeKeys.PREDICT):
batch_config = None
if self._add_predict_batch_config:
batch_config = tpu_estimator.BatchConfig(
# Set num_batch_threads to the number of TPU cores on Servomatic.
num_batch_threads=2,
max_batch_size=self._predict_batch_size,
# TODO: Magic number. Investigate whether there is a
# better way to set this, or have the user pass it in.
batch_timeout_micros=60 * 1000,
allowed_batch_sizes=[self._predict_batch_size])
return tpu_estimator.model_fn_inference_on_tpu(
adanet_model_fn,
features=features,
labels=labels,
config=config,
params=params,
batch_config=batch_config)
return adanet_model_fn(features, labels, mode, params, config)
return _model_fn
| 18,891 | 42.832947 | 137 | py |
adanet | adanet-master/adanet/core/report_materializer.py | """Materializes the subnetwork.Reports.
Copyright 2018 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from absl import logging
from adanet import subnetwork
from adanet import tf_compat
import tensorflow.compat.v2 as tf
class ReportMaterializer(object):
"""Materializes reports.
Specifically it materializes a subnetwork's :class:`adanet.subnetwork.Report`
instances into :class:`adanet.subnetwork.MaterializedReport` instances.
Requires an input function `input_fn` that returns a tuple of:
* features: Dictionary of string feature name to `Tensor`.
* labels: `Tensor` of labels.
Args:
input_fn: The input function.
steps: Number of steps for which to materialize the ensembles. If an
`OutOfRangeError` occurs, materialization stops. If set to None, will
iterate the dataset until all inputs are exhausted.
Returns:
A `ReportMaterializer` instance.
"""
def __init__(self, input_fn, steps=None):
self._input_fn = input_fn
self._steps = steps
super(ReportMaterializer, self).__init__()
@property
def input_fn(self):
"""Returns the input_fn that materialize_subnetwork_reports would run on.
Even though this property appears to be unused, it would be used to build
the AdaNet model graph inside AdaNet estimator.train(). After the graph is
built, the queue_runners are started and the initializers are run,
AdaNet estimator.train() passes its tf.Session as an argument to
materialize_subnetwork_reports(), thus indirectly making input_fn
available to materialize_subnetwork_reports.
"""
return self._input_fn
@property
def steps(self):
"""Return the number of steps."""
return self._steps
def materialize_subnetwork_reports(self, sess, iteration_number,
subnetwork_reports,
included_subnetwork_names):
"""Materializes the Tensor objects in subnetwork_reports using sess.
This converts the Tensors in subnetwork_reports to ndarrays, logs the
progress, converts the ndarrays to python primitives, then packages them
into `adanet.subnetwork.MaterializedReports`.
Args:
sess: `Session` instance with most recent variable values loaded.
iteration_number: Integer iteration number.
subnetwork_reports: Dict mapping string names to `subnetwork.Report`
objects to be materialized.
included_subnetwork_names: List of string names of the
`subnetwork.Report`s that are included in the final ensemble.
Returns:
List of `adanet.subnetwork.MaterializedReport` objects.
"""
# A metric is a tuple where the first element is a Tensor and
# the second element is an update op. We collate the update ops here.
metric_update_ops = []
for subnetwork_report in subnetwork_reports.values():
for metric_tuple in subnetwork_report.metrics.values():
metric_update_ops.append(tf_compat.metric_op(metric_tuple)[1])
# Extract the Tensors to be materialized.
tensors_to_materialize = {}
for name, subnetwork_report in subnetwork_reports.items():
metrics = {
metric_key: tf_compat.metric_op(metric_tuple)[0]
for metric_key, metric_tuple in subnetwork_report.metrics.items()
}
tensors_to_materialize[name] = {
"attributes": subnetwork_report.attributes,
"metrics": metrics
}
if self.steps is None:
logging_frequency = 1000
elif self.steps < 10:
logging_frequency = 1
else:
logging_frequency = math.floor(self.steps / 10.)
steps_completed = 0
while True:
if self.steps is not None and steps_completed == self.steps:
break
try:
steps_completed += 1
if (steps_completed % logging_frequency == 0 or
self.steps == steps_completed):
logging.info("Report materialization [%d/%s]", steps_completed,
self.steps or "??")
sess.run(metric_update_ops)
except tf.errors.OutOfRangeError:
logging.info("Encountered end of input during report materialization")
break
materialized_tensors_dict = sess.run(tensors_to_materialize)
logging.info("Materialized subnetwork_reports.")
# Convert scalar ndarrays into python primitives, then place them into
# subnetwork.MaterializedReports.
materialized_reports = []
for name, materialized_tensors in materialized_tensors_dict.items():
attributes = {
key: value.item() if hasattr(value, "item") else value
for key, value in materialized_tensors["attributes"].items()
}
metrics = {
key: value.item() if hasattr(value, "item") else value
for key, value in materialized_tensors["metrics"].items()
}
materialized_reports.append(
subnetwork.MaterializedReport(
iteration_number=iteration_number,
name=name,
hparams=subnetwork_reports[name].hparams,
attributes=attributes,
metrics=metrics,
included_in_final_ensemble=(name in included_subnetwork_names)))
return materialized_reports
| 5,843 | 35.298137 | 79 | py |
adanet | adanet-master/adanet/core/__init__.py | """TensorFLow AdaNet core logic.
Copyright 2018 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from adanet.core.estimator import Estimator
from adanet.core.evaluator import Evaluator
from adanet.core.report_materializer import ReportMaterializer
from adanet.core.summary import Summary
from adanet.core.tpu_estimator import TPUEstimator
__all__ = [
"Estimator",
"Evaluator",
"ReportMaterializer",
"Summary",
"TPUEstimator",
]
| 1,083 | 29.971429 | 72 | py |
adanet | adanet-master/adanet/core/timer_test.py | """Tests for timer.
Copyright 2018 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from adanet.core.timer import _CountDownTimer
import tensorflow.compat.v1 as tf
class CountDownTimerTest(tf.test.TestCase):
def test_secs_remaining_long(self):
timer = _CountDownTimer(60)
time.sleep(.1)
secs_remaining = timer.secs_remaining()
self.assertLess(0., secs_remaining)
self.assertGreater(60., secs_remaining)
def test_secs_remaining_short(self):
timer = _CountDownTimer(.001)
time.sleep(.1)
secs_remaining = timer.secs_remaining()
self.assertEqual(0., secs_remaining)
def test_secs_remaining_zero(self):
timer = _CountDownTimer(0.)
time.sleep(.01)
secs_remaining = timer.secs_remaining()
self.assertEqual(0., secs_remaining)
if __name__ == "__main__":
tf.test.main()
| 1,475 | 27.384615 | 72 | py |
adanet | adanet-master/adanet/core/report_materializer_test.py | """Test AdaNet materializer single graph implementation.
Copyright 2018 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from adanet import subnetwork
from adanet import tf_compat
from adanet.core.report_materializer import ReportMaterializer
import adanet.core.testing_utils as tu
import tensorflow.compat.v2 as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util
# pylint: enable=g-direct-tensorflow-import
def decode(param):
"""Decodes the given param when it is bytes."""
if isinstance(param, (float, int)):
return param
return param.decode("utf-8")
class ReportMaterializerTest(parameterized.TestCase, tf.test.TestCase):
# pylint: disable=g-long-lambda
@parameterized.named_parameters(
{
"testcase_name":
"one_empty_subnetwork",
"input_fn":
tu.dummy_input_fn([[1., 2]], [[3.]]),
"subnetwork_reports_fn":
lambda features, labels: {
"foo":
subnetwork.Report(hparams={}, attributes={}, metrics={}),
},
"steps":
3,
"included_subnetwork_names": ["foo"],
"want_materialized_reports": [
subnetwork.MaterializedReport(
iteration_number=0,
name="foo",
hparams={},
attributes={},
metrics={},
included_in_final_ensemble=True,
),
],
}, {
"testcase_name":
"one_subnetwork",
"input_fn":
tu.dummy_input_fn([[1., 2]], [[3.]]),
"subnetwork_reports_fn":
lambda features, labels: {
"foo":
subnetwork.Report(
hparams={
"learning_rate": 1.e-5,
"optimizer": "sgd",
"num_layers": 0,
"use_side_inputs": True,
},
attributes={
"weight_norms": tf.constant(3.14),
"foo": tf.constant("bar"),
"parameters": tf.constant(7777),
"boo": tf.constant(True),
},
metrics={},
),
},
"steps":
3,
"included_subnetwork_names": ["foo"],
"want_materialized_reports": [
subnetwork.MaterializedReport(
iteration_number=0,
name="foo",
hparams={
"learning_rate": 1.e-5,
"optimizer": "sgd",
"num_layers": 0,
"use_side_inputs": True,
},
attributes={
"weight_norms": 3.14,
"foo": "bar",
"parameters": 7777,
"boo": True,
},
metrics={},
included_in_final_ensemble=True,
),
],
}, {
"testcase_name":
"one_subnetwork_iteration_2",
"input_fn":
tu.dummy_input_fn([[1., 2]], [[3.]]),
"subnetwork_reports_fn":
lambda features, labels: {
"foo":
subnetwork.Report(
hparams={
"learning_rate": 1.e-5,
"optimizer": "sgd",
"num_layers": 0,
"use_side_inputs": True,
},
attributes={
"weight_norms": tf.constant(3.14),
"foo": tf.constant("bar"),
"parameters": tf.constant(7777),
"boo": tf.constant(True),
},
metrics={},
),
},
"steps":
3,
"iteration_number":
2,
"included_subnetwork_names": ["foo"],
"want_materialized_reports": [
subnetwork.MaterializedReport(
iteration_number=2,
name="foo",
hparams={
"learning_rate": 1.e-5,
"optimizer": "sgd",
"num_layers": 0,
"use_side_inputs": True,
},
attributes={
"weight_norms": 3.14,
"foo": "bar",
"parameters": 7777,
"boo": True,
},
metrics={},
included_in_final_ensemble=True,
),
],
}, {
"testcase_name":
"two_subnetworks",
"input_fn":
tu.dummy_input_fn([[1., 2]], [[3.]]),
"subnetwork_reports_fn":
lambda features, labels: {
"foo1":
subnetwork.Report(
hparams={
"learning_rate": 1.e-5,
"optimizer": "sgd",
"num_layers": 0,
"use_side_inputs": True,
},
attributes={
"weight_norms": tf.constant(3.14),
"foo": tf.constant("bar"),
"parameters": tf.constant(7777),
"boo": tf.constant(True),
},
metrics={},
),
"foo2":
subnetwork.Report(
hparams={
"learning_rate": 1.e-6,
"optimizer": "sgd",
"num_layers": 1,
"use_side_inputs": True,
},
attributes={
"weight_norms": tf.constant(3.1445),
"foo": tf.constant("baz"),
"parameters": tf.constant(7788),
"boo": tf.constant(True),
},
metrics={},
),
},
"steps":
3,
"included_subnetwork_names": ["foo2"],
"want_materialized_reports": [
subnetwork.MaterializedReport(
iteration_number=0,
name="foo1",
hparams={
"learning_rate": 1.e-5,
"optimizer": "sgd",
"num_layers": 0,
"use_side_inputs": True,
},
attributes={
"weight_norms": 3.14,
"foo": "bar",
"parameters": 7777,
"boo": True,
},
metrics={},
included_in_final_ensemble=False,
),
subnetwork.MaterializedReport(
iteration_number=0,
name="foo2",
hparams={
"learning_rate": 1.e-6,
"optimizer": "sgd",
"num_layers": 1,
"use_side_inputs": True,
},
attributes={
"weight_norms": 3.1445,
"foo": "baz",
"parameters": 7788,
"boo": True,
},
metrics={},
included_in_final_ensemble=True,
),
],
}, {
"testcase_name":
"two_subnetworks_zero_included",
"input_fn":
tu.dummy_input_fn([[1., 2]], [[3.]]),
"subnetwork_reports_fn":
lambda features, labels: {
"foo1":
subnetwork.Report(
hparams={},
attributes={},
metrics={},
),
"foo2":
subnetwork.Report(
hparams={},
attributes={},
metrics={},
),
},
"steps":
3,
"included_subnetwork_names": [],
"want_materialized_reports": [
subnetwork.MaterializedReport(
iteration_number=0,
name="foo1",
hparams={},
attributes={},
metrics={},
included_in_final_ensemble=False,
),
subnetwork.MaterializedReport(
iteration_number=0,
name="foo2",
hparams={},
attributes={},
metrics={},
included_in_final_ensemble=False,
),
],
}, {
"testcase_name":
"two_subnetworks_both_included",
"input_fn":
tu.dummy_input_fn([[1., 2]], [[3.]]),
"subnetwork_reports_fn":
lambda features, labels: {
"foo1":
subnetwork.Report(
hparams={},
attributes={},
metrics={},
),
"foo2":
subnetwork.Report(
hparams={},
attributes={},
metrics={},
),
},
"steps":
3,
"included_subnetwork_names": ["foo1", "foo2"],
"want_materialized_reports": [
subnetwork.MaterializedReport(
iteration_number=0,
name="foo1",
hparams={},
attributes={},
metrics={},
included_in_final_ensemble=True,
),
subnetwork.MaterializedReport(
iteration_number=0,
name="foo2",
hparams={},
attributes={},
metrics={},
included_in_final_ensemble=True,
),
],
}, {
"testcase_name":
"materialize_metrics",
"input_fn":
tu.dummy_input_fn([[1., 1.], [1., 1.], [1., 1.]],
[[1.], [2.], [3.]]),
"subnetwork_reports_fn":
lambda features, labels: {
"foo":
subnetwork.Report(
hparams={},
attributes={},
metrics={"moo": tf_compat.v1.metrics.mean(labels)},
),
},
"steps":
3,
"included_subnetwork_names": ["foo"],
"want_materialized_reports": [
subnetwork.MaterializedReport(
iteration_number=0,
name="foo",
hparams={},
attributes={},
metrics={"moo": 2.},
included_in_final_ensemble=True,
),
],
}, {
"testcase_name":
"materialize_metrics_none_steps",
"input_fn":
tu.dataset_input_fn([[1., 1.], [1., 1.], [1., 1.]],
[[1.], [2.], [3.]]),
"subnetwork_reports_fn":
lambda features, labels: {
"foo":
subnetwork.Report(
hparams={},
attributes={},
metrics={"moo": tf_compat.v1.metrics.mean(labels)},
),
},
"steps":
None,
"included_subnetwork_names": ["foo"],
"want_materialized_reports": [
subnetwork.MaterializedReport(
iteration_number=0,
name="foo",
hparams={},
attributes={},
metrics={"moo": 2.},
included_in_final_ensemble=True,
),
],
}, {
"testcase_name":
"materialize_metrics_non_tensor_op",
"input_fn":
tu.dummy_input_fn([[1., 2]], [[3.]]),
"subnetwork_reports_fn":
lambda features, labels: {
"foo":
subnetwork.Report(
hparams={},
attributes={},
metrics={"moo": (tf.constant(42), tf.no_op())},
),
},
"steps":
3,
"included_subnetwork_names": ["foo"],
"want_materialized_reports": [
subnetwork.MaterializedReport(
iteration_number=0,
name="foo",
hparams={},
attributes={},
metrics={"moo": 42},
included_in_final_ensemble=True,
),
],
})
@test_util.run_in_graph_and_eager_modes
def test_materialize_subnetwork_reports(self,
input_fn,
subnetwork_reports_fn,
steps,
iteration_number=0,
included_subnetwork_names=None,
want_materialized_reports=None):
with context.graph_mode():
tf.constant(0.) # dummy op so that the session graph is never empty.
features, labels = input_fn()
subnetwork_reports = subnetwork_reports_fn(features, labels)
with self.test_session() as sess:
sess.run(tf_compat.v1.initializers.local_variables())
report_materializer = ReportMaterializer(input_fn=input_fn, steps=steps)
materialized_reports = (
report_materializer.materialize_subnetwork_reports(
sess, iteration_number, subnetwork_reports,
included_subnetwork_names))
self.assertEqual(
len(want_materialized_reports), len(materialized_reports))
materialized_reports_dict = {
blrm.name: blrm for blrm in materialized_reports
}
for want_materialized_report in want_materialized_reports:
materialized_report = (
materialized_reports_dict[want_materialized_report.name])
self.assertEqual(iteration_number,
materialized_report.iteration_number)
self.assertEqual(
set(want_materialized_report.hparams.keys()),
set(materialized_report.hparams.keys()))
for hparam_key, want_hparam in (
want_materialized_report.hparams.items()):
if isinstance(want_hparam, float):
self.assertAllClose(want_hparam,
materialized_report.hparams[hparam_key])
else:
self.assertEqual(want_hparam,
materialized_report.hparams[hparam_key])
self.assertSetEqual(
set(want_materialized_report.attributes.keys()),
set(materialized_report.attributes.keys()))
for attribute_key, want_attribute in (
want_materialized_report.attributes.items()):
if isinstance(want_attribute, float):
self.assertAllClose(
want_attribute,
decode(materialized_report.attributes[attribute_key]))
else:
self.assertEqual(
want_attribute,
decode(materialized_report.attributes[attribute_key]))
self.assertSetEqual(
set(want_materialized_report.metrics.keys()),
set(materialized_report.metrics.keys()))
for metric_key, want_metric in (
want_materialized_report.metrics.items()):
if isinstance(want_metric, float):
self.assertAllClose(
want_metric, decode(materialized_report.metrics[metric_key]))
else:
self.assertEqual(want_metric,
decode(materialized_report.metrics[metric_key]))
if __name__ == "__main__":
tf.test.main()
| 17,761 | 35.774327 | 80 | py |
adanet | adanet-master/adanet/core/report_accessor_test.py | """Tests for run_report_accessor.py.
Copyright 2018 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from adanet import subnetwork
from adanet.core.report_accessor import _ReportAccessor
import tensorflow.compat.v2 as tf
class ReportAccessorTest(parameterized.TestCase, tf.test.TestCase):
def test_read_from_empty_file(self):
report_accessor = _ReportAccessor(self.get_temp_dir())
self.assertEqual([], list(report_accessor.read_iteration_reports()))
def test_add_to_empty_file(self):
report_accessor = _ReportAccessor(self.get_temp_dir())
materialized_reports = [
subnetwork.MaterializedReport(
iteration_number=0,
name="foo",
hparams={
"p1": 1,
"p2": "default_hparam",
"p3": b"binary_hparam",
"p4": u"unicode_hparam",
"p5": True,
},
attributes={
"a1": 1,
"a2": "default_attribute",
"a3": b"binary_attribute",
"a4": u"unicode_attribute",
"a5": True,
},
metrics={
"m1": 1,
"m2": "default_metric",
"m3": b"binary_metric",
"m4": u"unicode_metric",
"m5": True,
},
included_in_final_ensemble=True,
),
]
report_accessor.write_iteration_report(
iteration_number=0,
materialized_reports=materialized_reports,
)
actual_iteration_reports = list(report_accessor.read_iteration_reports())
self.assertLen(actual_iteration_reports, 1)
self.assertEqual(materialized_reports, actual_iteration_reports[0])
def test_add_to_existing_file(self):
materialized_reports = [
[
subnetwork.MaterializedReport(
iteration_number=0,
name="foo1",
hparams={
"p1": 11,
"p2": "default_hparam",
"p3": b"binary_hparam",
"p4": u"unicode_hparam",
"p5": True,
},
attributes={
"a1": 11,
"a2": "default_attribute",
"a3": b"binary_attribute",
"a4": u"unicode_attribute",
"a5": True,
},
metrics={
"m1": 11,
"m2": "default_metric",
"m3": b"binary_metric",
"m4": u"unicode_metric",
"m5": True,
},
included_in_final_ensemble=False,
),
subnetwork.MaterializedReport(
iteration_number=0,
name="foo2",
hparams={
"p1": 12,
"p2": "default_hparam",
"p3": b"binary_hparam",
"p4": u"unicode_hparam",
"p5": True,
},
attributes={
"a1": 12,
"a2": "default_attribute",
"a3": b"binary_attribute",
"a4": u"unicode_attribute",
"a5": True,
},
metrics={
"m1": 12,
"m2": "default_metric",
"m3": b"binary_metric",
"m4": u"unicode_metric",
"m5": True,
},
included_in_final_ensemble=True,
),
],
[
subnetwork.MaterializedReport(
iteration_number=1,
name="foo1",
hparams={
"p1": 21,
"p2": "default_hparam",
"p3": b"binary_hparam",
"p4": u"unicode_hparam",
"p5": True,
},
attributes={
"a1": 21,
"a2": "default_attribute",
"a3": b"binary_attribute",
"a4": u"unicode_attribute",
"a5": True,
},
metrics={
"m1": 21,
"m2": "default_metric",
"m3": b"binary_metric",
"m4": u"unicode_metric",
"m5": True,
},
included_in_final_ensemble=True,
),
subnetwork.MaterializedReport(
iteration_number=1,
name="foo2",
hparams={
"p1": 22,
"p2": "default_hparam",
"p3": b"binary_hparam",
"p4": u"unicode_hparam",
"p5": True,
},
attributes={
"a1": 22,
"a2": "default_attribute",
"a3": b"binary_attribute",
"a4": u"unicode_attribute",
"a5": True,
},
metrics={
"m1": 22,
"m2": "default_metric",
"m3": b"binary_metric",
"m4": u"unicode_metric",
"m5": True,
},
included_in_final_ensemble=False,
),
],
[
subnetwork.MaterializedReport(
iteration_number=2,
name="foo1",
hparams={
"p1": 31,
"p2": "default_hparam",
"p3": b"binary_hparam",
"p4": u"unicode_hparam",
"p5": True,
},
attributes={
"a1": 31,
"a2": "default_attribute",
"a3": b"binary_attribute",
"a4": u"unicode_attribute",
"a5": True,
},
metrics={
"m1": 31,
"m2": "default_metric",
"m3": b"binary_metric",
"m4": u"unicode_metric",
"m5": True,
},
included_in_final_ensemble=False,
),
subnetwork.MaterializedReport(
iteration_number=2,
name="foo2",
hparams={
"p1": 32,
"p2": "default_hparam",
"p3": b"binary_hparam",
"p4": u"unicode_hparam",
"p5": True,
},
attributes={
"a1": 32,
"a2": "default_attribute",
"a3": b"binary_attribute",
"a4": u"unicode_attribute",
"a5": True,
},
metrics={
"m1": 32,
"m2": "default_metric",
"m3": b"binary_metric",
"m4": u"unicode_metric",
"m5": True,
},
included_in_final_ensemble=True,
),
],
]
report_accessor = _ReportAccessor(self.get_temp_dir())
report_accessor.write_iteration_report(0, materialized_reports[0])
report_accessor.write_iteration_report(1, materialized_reports[1])
report_accessor.write_iteration_report(2, materialized_reports[2])
actual_reports = list(report_accessor.read_iteration_reports())
self.assertEqual(materialized_reports, actual_reports)
def test_write_iteration_report_encoding(self):
"""Tests GitHub issue #4."""
report_accessor = _ReportAccessor(self.get_temp_dir())
binary_type_value = b"\n\x83\x01\n;adanet/iteration_2/ensemble_2_layer_dnn/"
text_type_value = u"\U0001f937"
materialized_reports = [
subnetwork.MaterializedReport(
iteration_number=0,
name="foo",
hparams={
"p1": binary_type_value,
"p2": text_type_value,
},
attributes={
"a1": binary_type_value,
"a2": text_type_value,
},
metrics={
"m1": binary_type_value,
"m2": text_type_value,
},
included_in_final_ensemble=True,
),
]
report_accessor.write_iteration_report(
iteration_number=0,
materialized_reports=materialized_reports,
)
actual_iteration_reports = list(report_accessor.read_iteration_reports())
self.assertLen(actual_iteration_reports, 1)
@parameterized.named_parameters({
"testcase_name": "hparams_invalid_type",
"hparams": {
"h1": None
},
}, {
"testcase_name": "attributes_invalid_type",
"attributes": {
"a1": None
},
}, {
"testcase_name": "metrics_invalid_type",
"metrics": {
"m1": None
},
})
def test_value_error(self, hparams=None, attributes=None, metrics=None):
if hparams is None:
hparams = {}
if attributes is None:
attributes = {}
if metrics is None:
metrics = {}
report_accessor = _ReportAccessor(self.get_temp_dir())
materialized_reports = [
subnetwork.MaterializedReport(
iteration_number=0,
name="foo",
hparams=hparams,
attributes=attributes,
metrics=metrics,
included_in_final_ensemble=True,
),
]
with self.assertRaises(ValueError):
report_accessor.write_iteration_report(
iteration_number=0,
materialized_reports=materialized_reports,
)
if __name__ == "__main__":
tf.test.main()
| 10,536 | 31.521605 | 80 | py |
adanet | adanet-master/adanet/core/estimator_distributed_test_runner.py | # List as: python2, python3
# Copyright 2019 The AdaNet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Used to run estimators for distributed tests.
In distributed tests, we spawn processes to run estimator tasks like chief,
workers, parameter servers. The role of each task is determined by the TF_CONFIG
environment variable.
For more information on how tf.estimator.RunConfig uses TF_CONFIG, see
https://www.tensorflow.org/api_docs/python/tf/estimator/RunConfig.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import json
import os
import sys
# Allow this file to import adanet.
sys.path.insert(
0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "../.."))
# pylint: disable=g-import-not-at-top
from absl import app
from absl import flags
from absl import logging
from adanet import tf_compat
from adanet.autoensemble.estimator import AutoEnsembleEstimator
from adanet.core.estimator import Estimator
from adanet.core.evaluator import Evaluator
from adanet.distributed.placement import RoundRobinStrategy
from adanet.subnetwork import Builder
from adanet.subnetwork import SimpleGenerator
from adanet.subnetwork import Subnetwork
# TODO: Switch back to TF 2.0 once the distribution bug is fixed.
import tensorflow.compat.v1 as tf
# pylint: disable=g-direct-tensorflow-import
# Contrib
try:
from tensorflow.contrib.boosted_trees.python.utils import losses as bt_losses
except ImportError:
# Not much we can do here except skip the test.
bt_losses = None
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.training import session_manager as session_manager_lib
from tensorflow_estimator.python.estimator import training as training_lib
from tensorflow_estimator.python.estimator.canned import head as head_lib
# pylint: enable=g-import-not-at-top
# pylint: enable=g-direct-tensorflow-import
flags.DEFINE_enum("estimator_type", "estimator", [
"estimator", "autoensemble", "autoensemble_trees_multiclass",
"estimator_with_experimental_multiworker_strategy"
], "The estimator type to train.")
flags.DEFINE_enum("placement_strategy", "replication", [
"replication",
"round_robin",
], "The distributed placement strategy.")
flags.DEFINE_string("model_dir", "", "The model directory.")
FLAGS = flags.FLAGS
class SessionManager(session_manager_lib.SessionManager):
"""A session manager with a shorter recovery time."""
def __init__(self, *args, **kwargs):
# Reduced wait time.
kwargs["recovery_wait_secs"] = .5
super(SessionManager, self).__init__(*args, **kwargs)
@contextlib.contextmanager
def _monkey_patch_distributed_training_times():
"""Monkey-patches global attributes with subnetwork-specifics ones."""
old_delay_secs_per_worker = training_lib._DELAY_SECS_PER_WORKER # pylint: disable=protected-access
old_session_manager = session_manager_lib.SessionManager
old_min_max_variable_partitioner = (
partitioned_variables.min_max_variable_partitioner)
# monkey-patch global attributes.
session_manager_lib.SessionManager = SessionManager
# Override default delay per worker to speed up tests.
training_lib._DELAY_SECS_PER_WORKER = .2 # pylint: disable=protected-access
# NOTE: DNNEstimator uses min-max partitioner under the hood which will not
# partition layers unless they are above a certain size. In order to test that
# we handle partitioned variables correctly in distributed training we patch
# the min size to be significantly lower. For more context, see b/133435012
# and b/136958627. For some reason, creating a custom DNN using a fixed
# partitioner does not cause the issues described in the bugs so we must test
# DNNEstimator.
def patched_min_max_variable_partitioner(max_partitions=1,
axis=0,
min_slice_size=64,
bytes_per_string_element=16):
del min_slice_size # Unused, min_slice_size is patched to be constant.
return old_min_max_variable_partitioner(
max_partitions=max_partitions,
axis=axis,
min_slice_size=64,
bytes_per_string_element=bytes_per_string_element)
partitioned_variables.min_max_variable_partitioner = (
patched_min_max_variable_partitioner)
try:
yield
finally:
# Revert monkey-patches.
session_manager_lib.SessionManager = old_session_manager
training_lib._DELAY_SECS_PER_WORKER = old_delay_secs_per_worker # pylint: disable=protected-access
partitioned_variables.min_max_variable_partitioner = (
old_min_max_variable_partitioner)
class _DNNBuilder(Builder):
"""A simple DNN subnetwork builder."""
def __init__(self, name, config, layer_size=3, seed=13):
self._name = name
self._layer_size = layer_size
self._config = config
self._seed = seed
@property
def name(self):
return self._name
def build_subnetwork(self,
features,
logits_dimension,
training,
iteration_step,
summary,
previous_ensemble=None):
seed = self._seed
if previous_ensemble:
# Increment seed so different iterations don't learn the exact same thing.
seed += 1
num_ps_replicas = self._config.num_ps_replicas if self._config else 0
partitioner = tf_compat.v1.min_max_variable_partitioner(
max_partitions=num_ps_replicas)
with tf_compat.v1.variable_scope("dnn", partitioner=partitioner):
shared = {}
with tf_compat.v1.variable_scope("hidden_layer"):
w = tf_compat.v1.get_variable(
shape=[2, self._layer_size],
initializer=tf_compat.v1.glorot_uniform_initializer(seed=seed),
name="weight")
hidden_layer = tf.matmul(features["x"], w)
if previous_ensemble:
other_hidden_layer = previous_ensemble.weighted_subnetworks[
-1].subnetwork.shared["hidden_layer"]
hidden_layer = tf.concat([hidden_layer, other_hidden_layer], axis=1)
# Use a leaky-relu activation so that gradients can flow even when
# outputs are negative. Leaky relu has a non-zero slope when x < 0.
# Otherwise success at learning is completely dependent on random seed.
hidden_layer = tf.nn.leaky_relu(hidden_layer, alpha=.2)
shared["hidden_layer"] = hidden_layer
with tf_compat.v1.variable_scope("logits"):
logits = tf_compat.v1.layers.dense(
hidden_layer,
logits_dimension,
kernel_initializer=tf_compat.v1.glorot_uniform_initializer(
seed=seed))
summary.scalar("scalar", 3)
return Subnetwork(
last_layer=logits, logits=logits, complexity=3, shared=shared)
def build_subnetwork_train_op(self, subnetwork, loss, var_list, labels,
iteration_step, summary, previous_ensemble):
optimizer = tf_compat.v1.train.AdamOptimizer(learning_rate=.001)
return optimizer.minimize(loss, var_list=var_list)
def train_and_evaluate_estimator():
"""Runs Estimator distributed training."""
# The tf.estimator.RunConfig automatically parses the TF_CONFIG environment
# variables during construction.
# For more information on how tf.estimator.RunConfig uses TF_CONFIG, see
# https://www.tensorflow.org/api_docs/python/tf/estimator/RunConfig.
config = tf.estimator.RunConfig(
tf_random_seed=42,
save_checkpoints_steps=10,
save_checkpoints_secs=None,
# Keep all checkpoints to avoid checkpoint GC causing failures during
# evaluation.
# TODO: Prevent checkpoints that are currently being
# evaluated by another process from being garbage collected.
keep_checkpoint_max=None,
model_dir=FLAGS.model_dir,
session_config=tf_compat.v1.ConfigProto(
log_device_placement=False,
# Ignore other workers; only talk to parameter servers.
# Otherwise, when a chief/worker terminates, the others will hang.
device_filters=["/job:ps"]))
def input_fn():
input_features = {"x": tf.constant(features, name="x")}
input_labels = tf.constant(labels, name="y")
return tf.data.Dataset.from_tensors((input_features, input_labels)).repeat()
kwargs = {
"max_iteration_steps": 100,
"force_grow": True,
"delay_secs_per_worker": .2,
"max_worker_delay_secs": 1,
"worker_wait_secs": 1,
# Set low timeout to reduce wait time for failures.
"worker_wait_timeout_secs": 180,
"evaluator": Evaluator(input_fn, steps=10),
"config": config
}
head = head_lib._regression_head( # pylint: disable=protected-access
loss_reduction=tf.losses.Reduction.SUM_OVER_BATCH_SIZE)
features = [[1., 0.], [0., 0], [0., 1.], [1., 1.]]
labels = [[1.], [0.], [1.], [0.]]
estimator_type = FLAGS.estimator_type
if FLAGS.placement_strategy == "round_robin":
kwargs["experimental_placement_strategy"] = RoundRobinStrategy()
if estimator_type == "autoensemble":
feature_columns = [tf.feature_column.numeric_column("x", shape=[2])]
# pylint: disable=g-long-lambda
# TODO: Switch optimizers to tf.keras.optimizers.Adam once the
# distribution bug is fixed.
candidate_pool = {
"linear":
tf.estimator.LinearEstimator(
head=head,
feature_columns=feature_columns,
optimizer=lambda: tf_compat.v1.train.AdamOptimizer(
learning_rate=.001)),
"dnn":
tf.estimator.DNNEstimator(
head=head,
feature_columns=feature_columns,
optimizer=lambda: tf_compat.v1.train.AdamOptimizer(
learning_rate=.001),
hidden_units=[3]),
"dnn2":
tf.estimator.DNNEstimator(
head=head,
feature_columns=feature_columns,
optimizer=lambda: tf_compat.v1.train.AdamOptimizer(
learning_rate=.001),
hidden_units=[10, 10]),
}
# pylint: enable=g-long-lambda
estimator = AutoEnsembleEstimator(
head=head, candidate_pool=candidate_pool, **kwargs)
elif estimator_type == "estimator":
subnetwork_generator = SimpleGenerator([
_DNNBuilder("dnn1", config, layer_size=3),
_DNNBuilder("dnn2", config, layer_size=4),
_DNNBuilder("dnn3", config, layer_size=5),
])
estimator = Estimator(
head=head, subnetwork_generator=subnetwork_generator, **kwargs)
elif FLAGS.estimator_type == "autoensemble_trees_multiclass":
if not bt_losses:
logging.warning(
"Skipped autoensemble_trees_multiclass test since contrib is missing."
)
return
n_classes = 3
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss( # pylint: disable=protected-access
n_classes=n_classes,
loss_reduction=tf.losses.Reduction.SUM_OVER_BATCH_SIZE)
def tree_loss_fn(labels, logits):
result = bt_losses.per_example_maxent_loss(
labels=labels, logits=logits, num_classes=n_classes, weights=None)
return result[0]
tree_head = head_lib._multi_class_head_with_softmax_cross_entropy_loss( # pylint: disable=protected-access
loss_fn=tree_loss_fn,
n_classes=n_classes,
loss_reduction=tf.losses.Reduction.SUM_OVER_BATCH_SIZE)
labels = [[1], [0], [1], [2]]
feature_columns = [tf.feature_column.numeric_column("x", shape=[2])]
# TODO: Switch optimizers to tf.keras.optimizers.Adam once the
# distribution bug is fixed.
candidate_pool = lambda config: { # pylint: disable=g-long-lambda
"linear":
tf.estimator.LinearEstimator(
head=head,
feature_columns=feature_columns,
optimizer=tf_compat.v1.train.AdamOptimizer(
learning_rate=.001),
config=config),
"gbdt":
tf.estimator.BoostedTreesEstimator(
head=tree_head,
feature_columns=feature_columns,
n_trees=10,
n_batches_per_layer=1,
center_bias=False,
config=config),
}
estimator = AutoEnsembleEstimator(
head=head, candidate_pool=candidate_pool, **kwargs)
elif estimator_type == "estimator_with_experimental_multiworker_strategy":
def _model_fn(features, labels, mode):
"""Test model_fn."""
layer = tf.keras.layers.Dense(1)
logits = layer(features["x"])
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {"logits": logits}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
loss = tf.losses.mean_squared_error(
labels=labels,
predictions=logits,
reduction=tf.losses.Reduction.SUM_OVER_BATCH_SIZE)
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(mode, loss=loss)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.GradientDescentOptimizer(0.2)
train_op = optimizer.minimize(
loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
if json.loads(os.environ["TF_CONFIG"])["task"]["type"] == "evaluator":
# The evaluator job would crash if MultiWorkerMirroredStrategy is called.
distribution = None
else:
distribution = tf.distribute.experimental.MultiWorkerMirroredStrategy()
multiworker_config = tf.estimator.RunConfig(
tf_random_seed=42,
model_dir=FLAGS.model_dir,
train_distribute=distribution,
session_config=tf_compat.v1.ConfigProto(log_device_placement=False))
# TODO: Replace with adanet.Estimator. Currently this just verifies
# that the distributed testing framework supports distribute strategies.
estimator = tf.estimator.Estimator(
model_fn=_model_fn, config=multiworker_config)
train_hooks = [
tf.estimator.ProfilerHook(save_steps=50, output_dir=FLAGS.model_dir)
]
# Train for three iterations.
train_spec = tf.estimator.TrainSpec(
input_fn=input_fn, max_steps=300, hooks=train_hooks)
eval_spec = tf.estimator.EvalSpec(
input_fn=input_fn, steps=1, start_delay_secs=.5, throttle_secs=.05)
# Calling train_and_evaluate is the official way to perform distributed
# training with an Estimator. Calling Estimator#train directly results
# in an error when the TF_CONFIG is setup for a cluster.
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
def main(argv):
del argv # Unused.
# Reduce hard-coded waits, delays, and timeouts for quicker tests.
with _monkey_patch_distributed_training_times():
train_and_evaluate_estimator()
if __name__ == "__main__":
app.run(main)
| 15,527 | 37.626866 | 111 | py |
adanet | adanet-master/adanet/core/candidate_test.py | """Test AdaNet single graph candidate implementation.
Copyright 2018 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
from absl.testing import parameterized
from adanet import tf_compat
from adanet.core.candidate import _Candidate
from adanet.core.candidate import _CandidateBuilder
import adanet.core.testing_utils as tu
import tensorflow.compat.v2 as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util
# pylint: enable=g-direct-tensorflow-import
class CandidateTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.named_parameters({
"testcase_name": "valid",
"ensemble_spec": tu.dummy_ensemble_spec("foo"),
"adanet_loss": [.1],
})
@test_util.run_in_graph_and_eager_modes
def test_new(self, ensemble_spec, adanet_loss, variables=None):
with self.test_session():
got = _Candidate(ensemble_spec, adanet_loss, variables)
self.assertEqual(got.ensemble_spec, ensemble_spec)
self.assertEqual(got.adanet_loss, adanet_loss)
@parameterized.named_parameters(
{
"testcase_name": "none_ensemble_spec",
"ensemble_spec": None,
"adanet_loss": [.1],
}, {
"testcase_name": "none_adanet_loss",
"ensemble_spec": tu.dummy_ensemble_spec("foo"),
"adanet_loss": None,
})
@test_util.run_in_graph_and_eager_modes
def test_new_errors(self, ensemble_spec, adanet_loss, variables=None):
with self.test_session():
with self.assertRaises(ValueError):
_Candidate(ensemble_spec, adanet_loss, variables)
class _FakeSummary(object):
"""A fake adanet.Summary."""
def scalar(self, name, tensor, family=None):
del name
del tensor
del family
return "fake_scalar"
@contextlib.contextmanager
def current_scope(self):
yield
class CandidateBuilderTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.named_parameters(
{
"testcase_name": "evaluate",
"training": False,
"want_adanet_losses": [0., 0., 0.],
}, {
"testcase_name": "train_exactly_max_steps",
"training": True,
"want_adanet_losses": [1., .750, .583],
}, {
"testcase_name": "train_one_step_max_one_step",
"training": True,
"want_adanet_losses": [1.],
}, {
"testcase_name": "train_two_steps_max_two_steps",
"training": True,
"want_adanet_losses": [1., .750],
}, {
"testcase_name": "train_three_steps_max_four_steps",
"training": True,
"want_adanet_losses": [1., .750, .583],
}, {
"testcase_name": "eval_one_step",
"training": False,
"want_adanet_losses": [0.],
})
@test_util.run_in_graph_and_eager_modes
def test_build_candidate(self, training, want_adanet_losses):
# `Cadidate#build_candidate` will only ever be called in graph mode.
with context.graph_mode():
# A fake adanet_loss that halves at each train step: 1.0, 0.5, 0.25, ...
fake_adanet_loss = tf.Variable(1.)
fake_train_op = fake_adanet_loss.assign(fake_adanet_loss / 2)
fake_ensemble_spec = tu.dummy_ensemble_spec(
"new", adanet_loss=fake_adanet_loss, train_op=fake_train_op)
builder = _CandidateBuilder()
candidate = builder.build_candidate(
ensemble_spec=fake_ensemble_spec,
training=training,
summary=_FakeSummary())
self.evaluate(tf_compat.v1.global_variables_initializer())
adanet_losses = []
for _ in range(len(want_adanet_losses)):
adanet_loss = self.evaluate(candidate.adanet_loss)
adanet_losses.append(adanet_loss)
self.evaluate(fake_train_op)
# Verify that adanet_loss moving average works.
self.assertAllClose(want_adanet_losses, adanet_losses, atol=1e-3)
if __name__ == "__main__":
tf.test.main()
| 4,613 | 32.678832 | 78 | py |
adanet | adanet-master/adanet/core/estimator.py | """An AdaNet estimator implementation in Tensorflow using a single graph.
Copyright 2018 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import errno
import inspect
import os
import time
from absl import logging
from adanet import distributed as distributed_lib
from adanet import ensemble as ensemble_lib
from adanet import tf_compat
from adanet.core.architecture import _Architecture
from adanet.core.candidate import _CandidateBuilder
from adanet.core.ensemble_builder import _EnsembleBuilder
from adanet.core.ensemble_builder import _SubnetworkManager
from adanet.core.iteration import _Iteration
from adanet.core.iteration import _IterationBuilder
from adanet.core.report_accessor import _ReportAccessor
from adanet.core.summary import _ScopedSummary
from adanet.core.summary import _ScopedSummaryV2
from adanet.core.summary import _TPUScopedSummary
from adanet.core.timer import _CountDownTimer
from adanet.distributed.devices import monkey_patch_default_variable_placement_strategy
import numpy as np
import six
import tensorflow.compat.v2 as tf
from typing import Any, Callable, Dict, Optional, Sequence, Text # (b/144172555) pylint:disable=unused-import
class _StopAfterTrainingHook(tf_compat.SessionRunHook):
"""Hook that requests stop once iteration is over."""
def __init__(self, iteration, after_fn):
# type: (_Iteration, Callable[[], None]) -> None
"""Initializes a `_StopAfterTrainingHook`.
Args:
iteration: An `_Iteration` instance.
after_fn: A function to call after training stopped.
Returns:
A `_StopAfterTrainingHook` instance.
"""
self._iteration = iteration
self._after_fn = after_fn
def before_run(self, run_context):
"""See `SessionRunHook`."""
self._stop_if_is_over(run_context)
def after_run(self, run_context, run_values):
"""See `SessionRunHook`."""
self._stop_if_is_over(run_context)
def _stop_if_is_over(self, run_context):
"""Signals the monitored session to step when the iteration is over."""
if not self._iteration.train_manager.is_over():
return
logging.info("Now stopping iteration %d training", self._iteration.number)
run_context.request_stop()
self._after_fn()
class _SummaryV2SaverHook(tf_compat.SessionRunHook):
"""A hook that writes summaries to the appropriate log directory on disk."""
def __init__(self, summaries, save_steps=None, save_secs=None):
"""Initializes a `SummaryV2SaverHook` for writing TF 2 summaries.
Args:
summaries: List of `_ScopedSummaryV2` instances.
save_steps: `int`, save summaries every N steps. Exactly one of
`save_secs` and `save_steps` should be set.
save_secs: `int`, save summaries every N seconds.
"""
self._summaries = summaries
self._summary_ops = []
self._writer_init_ops = []
self._timer = tf_compat.v1.train.SecondOrStepTimer(
every_secs=save_secs, every_steps=save_steps)
def begin(self):
self._next_step = None
self._global_step_tensor = tf_compat.v1.train.get_global_step()
for summary in self._summaries:
assert isinstance(summary, _ScopedSummaryV2)
writer = tf_compat.v2.summary.create_file_writer(summary.logdir)
with writer.as_default():
for summary_fn, tensor in summary.summary_tuples():
self._summary_ops.append(
summary_fn(tensor, step=tf.compat.v1.train.get_global_step()))
self._writer_init_ops.append(writer.init())
def after_create_session(self, session, coord):
session.run(self._writer_init_ops)
def before_run(self, run_context):
requests = {"global_step": self._global_step_tensor}
self._request_summary = (
self._next_step is None or
self._timer.should_trigger_for_step(self._next_step))
if self._request_summary:
requests["summary"] = self._summary_ops
return tf_compat.SessionRunArgs(requests)
def after_run(self, run_context, run_values):
stale_global_step = run_values.results["global_step"]
global_step = stale_global_step + 1
if self._next_step is None or self._request_summary:
global_step = run_context.session.run(self._global_step_tensor)
if self._request_summary:
self._timer.update_last_triggered_step(global_step)
self._next_step = global_step + 1
def end(self, session):
# TODO: Run writer.flush() at Session end.
# Currently disabled because the flush op crashes between iterations.
return
class _EvalMetricSaverHook(tf_compat.SessionRunHook):
"""A hook for writing candidate evaluation metrics as summaries to disk."""
def __init__(self, name, kind, eval_metrics, output_dir):
# type: (Text, Text, Any, Text) -> None
"""Initializes a `_EvalMetricSaverHook` instance.
Args:
name: String name of candidate owner of these metrics.
kind: The kind of candidate that the metrics belong to (e.g. subnetwork).
eval_metrics: Tuple of (metric_fn, tensors) which returns a dict of metric
results keyed by name. The values of the dict are the results of calling
a metric function, namely a `(metric_tensor, update_op)` tuple.
`metric_tensor` should be evaluated without any impact on state
(typically is a pure computation based on variables.). For example, it
should not trigger the `update_op` or require any input fetching.
output_dir: Directory for writing evaluation summaries.
Returns:
An `_EvalMetricSaverHook` instance.
"""
self._name = name
self._kind = kind
self._eval_metrics = eval_metrics
self._output_dir = output_dir
def begin(self):
"""See `SessionRunHook`."""
# The metric_fn is called with tf.placeholders to simply read the value of
# the metric variables. The metrics themselves are computed as a result of
# being returned in the EstimatorSpec by _adanet_model_fn.
metric_fn, tensors = self._eval_metrics.eval_metrics_tuple()
tensors = [tf_compat.v1.placeholder(t.dtype, t.shape) for t in tensors]
eval_metric_ops = metric_fn(*tensors)
self._eval_metric_tensors = {}
for key in sorted(eval_metric_ops):
value = tf_compat.metric_op(eval_metric_ops[key])
self._eval_metric_tensors[key] = value[0]
def _dict_to_str(self, dictionary):
"""Get a `str` representation of a `dict`.
Args:
dictionary: The `dict` to be represented as `str`.
Returns:
A `str` representing the `dictionary`.
"""
return ", ".join(
"{} = {}".format(k, v) for k, v in sorted(dictionary.items()))
def end(self, session):
"""See `SessionRunHook`."""
# Forked from tensorflow/python/estimator/estimator.py function called
# _write_dict_to_summary.
current_global_step = tf_compat.v1.train.get_global_step()
eval_dict, current_global_step = session.run(
(self._eval_metric_tensors, current_global_step))
logging.info("Saving %s '%s' dict for global step %d: %s", self._kind,
self._name, current_global_step, self._dict_to_str(eval_dict))
summary_writer = tf_compat.v1.summary.FileWriterCache.get(self._output_dir)
summary_proto = tf_compat.v1.summary.Summary()
for key in eval_dict:
value = eval_dict[key]
if isinstance(value, (np.float32, float)):
summary_proto.value.add(tag=key, simple_value=float(value))
elif isinstance(value, six.binary_type):
summ = tf_compat.v1.summary.Summary.FromString(value)
for i, _ in enumerate(summ.value):
summ.value[i].tag = "{}/{}".format(key, i)
summary_proto.value.extend(summ.value)
else:
logging.warn(
"Skipping summary for %s, must be a float, np.float32, "
"or a serialized string of Summary.", key)
summary_writer.add_summary(summary_proto, current_global_step)
summary_writer.flush()
# Note(b/137672676): Do not explicitly call summary_writer.close() here.
# This will cause eval summaries to not be written out after the first time
# in continuous evals.
class _OverwriteCheckpointHook(tf_compat.SessionRunHook):
"""Hook to overwrite the latest checkpoint with next iteration variables."""
def __init__(self, current_iteration, iteration_number_tensor,
previous_iteration_vars, config, enable_v2_checkpoint):
"""Initializes an _OverwriteCheckpointHook instance.
Args:
current_iteration: Current `_Iteration` object.
iteration_number_tensor: Int variable `Tensor` storing the current
iteration number.
previous_iteration_vars: Variables to restore from the previous iteration
before overwriting the checkpoint.
config: The Estimator's RunConfig object.
enable_v2_checkpoint: Whether `tf.train.Checkpoint` is used for
checkpointing.
"""
self._current_iteration = current_iteration
self._iteration_number = current_iteration.number
self._iteration_number_tensor = iteration_number_tensor
self._previous_iteration_vars = previous_iteration_vars
self._model_dir = config.model_dir
self._checkpoint_state = tf.train.get_checkpoint_state(self._model_dir)
self._keep_checkpoint_max = config.keep_checkpoint_max
self._enable_v2_checkpoint = enable_v2_checkpoint
self._update_op = None
self._overwrite_saver = None
self._checkpoint_overwritten = False
def begin(self):
"""Creates the savers and adds ops needed for overwriting the checkpoint.
Two savers are created, a restore saver which is passed the variables from
the previous iteration to restore, and an overwrite saver which will
actually overwrite the checkpoint.
"""
from tensorflow.python.training.tracking import graph_view # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
if self._enable_v2_checkpoint:
prev_checkpoint = self._current_iteration.previous_iteration.checkpoint
self._status = prev_checkpoint.restore(
self._checkpoint_state.model_checkpoint_path)
# Because we prune the previous iteration's candidates, only a subset of
# the variables present in the checkpoint will be used. Assert they are
# restored.
self._status.expect_partial().assert_existing_objects_matched()
self._overwrite_saver = tf_compat.v1.train.Saver(
var_list=graph_view.ObjectGraphView(
self._current_iteration.checkpoint).frozen_saveable_objects(),
sharded=True,
max_to_keep=self._keep_checkpoint_max)
else:
self._restore_saver = tf_compat.v1.train.Saver(
sharded=True, var_list=self._previous_iteration_vars)
# Note: self._iteration_number already contains the value of the next
# iteration since _OverwriteCheckpointHook should only execute during the
# graph growing phase.
self._update_op = self._iteration_number_tensor.assign(
self._iteration_number)
self._overwrite_saver = tf_compat.v1.train.Saver(
sharded=True, max_to_keep=self._keep_checkpoint_max)
self._overwrite_saver.recover_last_checkpoints(
self._checkpoint_state.all_model_checkpoint_paths)
def before_run(self, run_context):
"""Overwrites checkpoint before any calls to session.run().
This is to ensure that the values of the variables in the overwritten
checkpoint match those in the pevious iteration checkpoint.
Args:
run_context: The tf.train.SessionRunContext passed to the hook.
"""
if not self._checkpoint_overwritten:
session = run_context.session
if self._enable_v2_checkpoint:
self._status.initialize_or_restore(session)
else:
self._restore_saver.restore(
session, self._checkpoint_state.model_checkpoint_path)
session.run(self._update_op)
checkpoint_path = os.path.join(self._model_dir, "increment.ckpt")
logging.info(
"Overwriting checkpoint with new graph for iteration %d to %s-%d",
self._current_iteration.number, checkpoint_path,
self._current_iteration.number)
# Specify global_step=self._iteration_number to append the iteration
# number to the checkpoint name, e.g. <model_dir>/increment.ckpt-1.
self._overwrite_saver.save(
session, checkpoint_path, global_step=self._current_iteration.number)
self._checkpoint_overwritten = True
def _copy_recursively(source, destination):
"""Copies a directory and its content.
Args:
source: Source directory.
destination: Destination directory.
"""
for src_dir, _, src_files in tf.io.gfile.walk(source):
dst_dir = os.path.join(destination, os.path.relpath(src_dir, source))
if not tf.io.gfile.exists(dst_dir):
tf.io.gfile.makedirs(dst_dir)
for src_file in src_files:
tf.io.gfile.copy(
os.path.join(src_dir, src_file),
os.path.join(dst_dir, src_file),
overwrite=True)
class _GraphGrowingHookDecorator(tf_compat.SessionRunHook):
"""Decorates a SessionRunHook to only run begin() and end() methods."""
def __init__(self, hook):
# type: (tf_compat.SessionRunHook) -> None
"""Initializes a _GraphGrowingHookDecorator instance.
Args:
hook: The SessionRunHook to decorate.
"""
self._hook = hook
def begin(self):
self._hook.begin()
def end(self, session):
self._hook.end(session)
def _delete_directory(directory):
# type: (Text) -> None
"""Removes directory and handles any folder or file exceptions."""
if not tf.io.gfile.exists(directory):
return
try:
tf.io.gfile.rmtree(directory)
except (tf.errors.PermissionDeniedError,
tf.errors.FailedPreconditionError) as e:
logging.info("Ignoring folder or file issues: %s '%s'", e.error_code,
e.message)
@contextlib.contextmanager
def _disable_asserts_for_confusion_matrix_at_thresholds():
"""Disables asserts in metrics_impl._confusion_matrix_at_thresholds.
AdaNet sometimes have a few NaN and non-NaN subnetworks at a given iteration.
This doesn't crash during training, since AdaNet simply chooses the best
subnetwork among the non-NaN candidates. However, during estimator.evaluate(),
AdaNet evaluates all subnetworks and ensembles. This triggers an assertion
failure in V1 binary classifier _Head, since it expects the predictions to be
between 0 and 1, and NaN is not between 0 and 1. This causes AdaNet and
to raise an exception during estimator.evaluate(), even though the final model
is servable. Hence, we disable these assertions during evaluate(), and allow
the NaNs to be written to disk.
Yields:
Nothing. Simply returns control back to the caller.
"""
from tensorflow.python.ops import metrics_impl # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
def _no_op_assert(x, y, data=None, summarize=None, message=None, name=None):
"""Dummy assert that never fails."""
del x, y, data, summarize, message, name # unused
return tf.no_op()
old_confusion_matrix_at_thresholds = (
metrics_impl._confusion_matrix_at_thresholds) # pylint:disable=protected-access
def _confusion_matrix_at_thresholds_without_asserts(labels,
predictions,
thresholds,
weights=None,
includes=None):
"""Calls _confusion_matrix_at_thresholds without asserts; returns output."""
from tensorflow.python.ops import check_ops # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
old_assert_greater_equal = check_ops.assert_greater_equal
old_assert_less_equal = check_ops.assert_less_equal
setattr(check_ops, "assert_greater_equal", _no_op_assert)
setattr(check_ops, "assert_less_equal", _no_op_assert)
conf_matrix = old_confusion_matrix_at_thresholds(labels, predictions,
thresholds, weights,
includes)
setattr(check_ops, "assert_greater_equal", old_assert_greater_equal)
setattr(check_ops, "assert_less_equal", old_assert_less_equal)
return conf_matrix
setattr(metrics_impl, "_confusion_matrix_at_thresholds",
_confusion_matrix_at_thresholds_without_asserts)
try:
yield
finally:
setattr(metrics_impl, "_confusion_matrix_at_thresholds",
old_confusion_matrix_at_thresholds)
class Estimator(tf.estimator.Estimator):
# pyformat: disable
r"""A :class:`tf.estimator.Estimator` for training, evaluation, and serving.
This implementation uses an :class:`adanet.subnetwork.Generator` as its weak
learning algorithm for generating candidate subnetworks. These are trained in
parallel using a single graph per iteration. At the end of each iteration, the
estimator saves the sub-graph of the best subnetwork ensemble and its weights
as a separate checkpoint. At the beginning of the next iteration, the
estimator imports the previous iteration's frozen graph and adds ops for the
next candidates as part of a new graph and session. This allows the estimator
have the performance of Tensorflow's static graph constraint (minus the
performance hit of reconstructing a graph between iterations), while having
the flexibility of having a dynamic graph.
NOTE: Subclassing :class:`tf.estimator.Estimator` is only necessary to work
with :meth:`tf.estimator.train_and_evaluate` which asserts that the estimator
argument is a :class:`tf.estimator.Estimator` subclass. However, all training
is delegated to a separate :class:`tf.estimator.Estimator` instance. It is
responsible for supporting both local and distributed training. As such, the
:class:`adanet.Estimator` is only responsible for bookkeeping across
iterations.
Args:
head: A :class:`tf.contrib.estimator.Head` instance for computing loss and
evaluation metrics for every candidate.
subnetwork_generator: The :class:`adanet.subnetwork.Generator` which defines
the candidate subnetworks to train and evaluate at every AdaNet iteration.
max_iteration_steps: Total number of steps for which to train candidates per
iteration. If :class:`OutOfRange` or :class:`StopIteration` occurs in the
middle, training stops before `max_iteration_steps` steps. When
:code:`None`, it will train the current iteration forever.
ensemblers: An iterable of :class:`adanet.ensemble.Ensembler` objects that
define how to ensemble a group of subnetworks. If there are multiple,
each should have a different `name` property.
ensemble_strategies: An iterable of :class:`adanet.ensemble.Strategy`
objects that define the candidate ensembles of subnetworks to explore at
each iteration.
evaluator: An :class:`adanet.Evaluator` for candidate selection after all
subnetworks are done training. When :code:`None`, candidate selection uses
a moving average of their :class:`adanet.Ensemble` AdaNet loss during
training instead. In order to use the *AdaNet algorithm* as described in
[Cortes et al., '17], the given :class:`adanet.Evaluator` must be created
with the same dataset partition used during training. Otherwise, this
framework will perform *AdaNet.HoldOut* which uses a holdout set for
candidate selection, but does not benefit from learning guarantees.
report_materializer: An :class:`adanet.ReportMaterializer`. Its reports are
made available to the `subnetwork_generator` at the next iteration, so
that it can adapt its search space. When `None`, the
`subnetwork_generator` :meth:`generate_candidates` method will receive
empty Lists for their `previous_ensemble_reports` and `all_reports`
arguments.
metric_fn: A function for adding custom evaluation metrics, which should
obey the following signature:
- `Args`:
Can only have the following three arguments in any order:
- :code:`predictions`: Predictions `Tensor` or dict of `Tensor`
created by given :code:`head`.
- :code:`features`: Input `dict` of `Tensor` objects created by
:code:`input_fn` which is given to :meth:`estimator.evaluate` as an
argument.
- :code:`labels`: Labels `Tensor` or dict of `Tensor` (for multi-head)
created by :code:`input_fn` which is given to
:meth:`estimator.evaluate` as an argument.
- `Returns`: Dict of metric results keyed by name. Final metrics are a
union of this and :code:`head`'s existing metrics. If there is a name
conflict between this and :code:`head`s existing metrics, this will
override the existing one. The values of the dict are the results of
calling a metric function, namely a :code:`(metric_tensor, update_op)`
tuple.
force_grow: Boolean override that forces the ensemble to grow by one
subnetwork at the end of each iteration. Normally at the end of each
iteration, AdaNet selects the best candidate ensemble according to its
performance on the AdaNet objective. In some cases, the best ensemble is
the `previous_ensemble` as opposed to one that includes a newly trained
subnetwork. When `True`, the algorithm will not select the
`previous_ensemble` as the best candidate, and will ensure that after n
iterations the final ensemble is composed of n subnetworks.
replicate_ensemble_in_training: Whether to rebuild the frozen subnetworks of
the ensemble in training mode, which can change the outputs of the frozen
subnetworks in the ensemble. When `False` and during candidate training,
the frozen subnetworks in the ensemble are in prediction mode, so
training-only ops like dropout are not applied to them. When `True` and
training the candidates, the frozen subnetworks will be in training mode
as well, so they will apply training-only ops like dropout. This argument
is useful for regularizing learning mixture weights, or for making
training-only side inputs available in subsequent iterations. For most
use-cases, this should be `False`.
adanet_loss_decay: Float decay for the exponential-moving-average of the
AdaNet objective throughout training. This moving average is a data-
driven way tracking the best candidate with only the training set.
delay_secs_per_worker: Float number of seconds to delay starting the
i-th worker. Staggering worker start-up during distributed asynchronous
SGD can improve training stability and speed up convergence. Each worker
will wait (i+1) * delay_secs_per_worker seconds before beginning training.
max_worker_delay_secs: Float max number of seconds to delay starting the
i-th worker. Staggering worker start-up during distributed asynchronous
SGD can improve training stability and speed up convergence. Each worker
will wait up to max_worker_delay_secs before beginning training.
worker_wait_secs: Float number of seconds for workers to wait before
checking if the chief prepared the next iteration.
worker_wait_timeout_secs: Float number of seconds for workers to wait for
chief to prepare the next iteration during distributed training. This is
needed to prevent workers waiting indefinitely for a chief that may have
crashed or been turned down. When the timeout is exceeded, the worker
exits the train loop. In situations where the chief job is much slower
than the worker jobs, this timeout should be increased.
model_dir: Directory to save model parameters, graph and etc. This can also
be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
report_dir: Directory where the
:class:`adanet.subnetwork.MaterializedReport`s materialized by
:code:`report_materializer` would be saved. If :code:`report_materializer`
is :code:`None`, this will not save anything. If :code:`None` or
empty string, defaults to :code:`<model_dir>/report`.
config: :class:`RunConfig` object to configure the runtime settings.
debug: Boolean to enable debug mode which will check features and labels
for Infs and NaNs.
enable_ensemble_summaries: Whether to record summaries to display in
TensorBoard for each ensemble candidate. Disable to reduce memory and disk
usage per run.
enable_subnetwork_summaries: Whether to record summaries to display in
TensorBoard for each subnetwork. Disable to reduce memory and disk usage
per run.
global_step_combiner_fn: Function for combining each subnetwork's
iteration step into the global step. By default it is the average of all
subnetwork iteration steps, which may affect the global_steps/sec as
subnetworks early stop and no longer increase their iteration step.
max_iterations: Integer maximum number of AdaNet iterations (a.k.a. rounds)
of generating new subnetworks and ensembles, training them, and evaluating
them against the current best ensemble. When :code:`None`, AdaNet will
keep iterating until `Estimator#train` terminates. Otherwise, if
:code:`max_iteratios` is supplied and is met or exceeded during training,
training will terminate even before `steps` or `max_steps`.
export_subnetwork_logits: Whether to include subnetwork logits in exports.
export_subnetwork_last_layer: Whether to include subnetwork last layer in
exports.
replay_config: Optional :class:`adanet.replay.Config` to specify a previous
AdaNet run to replay. Given the exact same search space but potentially
different training data, the `replay_config` causes the estimator to
reconstruct the previously trained model without performing a search.
NOTE: The previous run must have executed with identical hyperparameters
as the new run in order to be replayable. The only supported difference is
that the underlying data can change.
**kwargs: Extra keyword args passed to the parent.
Returns:
An :class:`adanet.Estimator` instance.
Raises:
:code:`ValueError`: If :code:`subnetwork_generator` is :code:`None`.
:code:`ValueError`: If :code:`max_iteration_steps` is <= 0.
:code:`ValueError`: If :code:`model_dir` is not specified during distributed
training.
:code:`ValueError`: If :code:`max_iterations` is <= 0.
"""
# pyformat: enable
class _Keys(object):
CURRENT_ITERATION = "current_iteration"
SUBNETWORK_GENERATOR = "subnetwork_generator"
def __init__(self,
head,
subnetwork_generator,
max_iteration_steps,
ensemblers=None,
ensemble_strategies=None,
evaluator=None,
report_materializer=None,
metric_fn=None,
force_grow=False,
replicate_ensemble_in_training=False,
adanet_loss_decay=.9,
delay_secs_per_worker=5,
max_worker_delay_secs=60,
worker_wait_secs=5,
worker_wait_timeout_secs=7200,
model_dir=None,
report_dir=None,
config=None,
debug=False,
enable_ensemble_summaries=True,
enable_subnetwork_summaries=True,
global_step_combiner_fn=tf.math.reduce_mean,
max_iterations=None,
export_subnetwork_logits=False,
export_subnetwork_last_layer=True,
replay_config=None,
**kwargs):
if subnetwork_generator is None:
raise ValueError("subnetwork_generator can't be None.")
if max_iteration_steps is not None and max_iteration_steps <= 0.:
raise ValueError("max_iteration_steps must be > 0 or None.")
if max_iterations is not None and max_iterations <= 0.:
raise ValueError("max_iterations must be > 0 or None.")
is_distributed_training = config and config.num_worker_replicas > 1
is_model_dir_specified = model_dir or (config and config.model_dir)
if is_distributed_training and not is_model_dir_specified:
# A common model dir for the chief and workers is required for
# coordination during distributed training.
raise ValueError(
"For distributed training, a model_dir must be specified.")
self._subnetwork_generator = subnetwork_generator
# Overwrite superclass's assert that members are not overwritten in order
# to overwrite public methods. Note that we are doing something that is not
# explicitly supported by the Estimator API and may break in the future.
tf.estimator.Estimator._assert_members_are_not_overridden = staticmethod( # pylint: disable=protected-access
lambda _: None)
self._enable_v2_checkpoint = kwargs.pop("enable_v2_checkpoint", False)
self._evaluator = evaluator
self._report_materializer = report_materializer
self._force_grow = force_grow
self._delay_secs_per_worker = delay_secs_per_worker
self._max_worker_delay_secs = max_worker_delay_secs
self._worker_wait_secs = worker_wait_secs
self._worker_wait_timeout_secs = worker_wait_timeout_secs
self._max_iterations = max_iterations
self._replay_config = replay_config
# Added for backwards compatibility.
default_ensembler_args = [
"mixture_weight_type", "mixture_weight_initializer",
"warm_start_mixture_weights", "adanet_lambda", "adanet_beta", "use_bias"
]
default_ensembler_kwargs = {
k: v for k, v in kwargs.items() if k in default_ensembler_args
}
if default_ensembler_kwargs:
logging.warning(
"The following arguments have been moved to "
"`adanet.ensemble.ComplexityRegularizedEnsembler` which can be "
"specified in the `ensemblers` argument: %s",
sorted(default_ensembler_kwargs.keys()))
for key in default_ensembler_kwargs:
del kwargs[key]
# Experimental feature.
placement_strategy_arg = "experimental_placement_strategy"
placement_strategy = kwargs.pop(placement_strategy_arg, None)
if placement_strategy:
logging.warning(
"%s is an experimental feature. Its behavior is not guaranteed "
"to be backwards compatible.", placement_strategy_arg)
self._warm_start_settings = kwargs.get("warm_start_from")
# Monkey patch the default variable placement strategy that Estimator uses
# since it does not support workers having different graphs from the chief.
# TODO: Consider using `RunConfig.replace` with the new device_fn,
# but this can cause issues since RunConfig automatically parses TF_CONFIG
# environment variable.
with monkey_patch_default_variable_placement_strategy():
# This `Estimator` is responsible for bookkeeping across iterations, and
# for training the subnetworks in both a local and distributed setting.
# Subclassing improves future-proofing against new private methods being
# added to `tf.estimator.Estimator` that are expected to be callable by
# external functions, such as in b/110435640.
super(Estimator, self).__init__(
model_fn=self._create_model_fn(),
params={},
config=config,
model_dir=model_dir,
**kwargs)
if default_ensembler_kwargs and ensemblers:
raise ValueError("When specifying the `ensemblers` argument, "
"the following arguments must not be given: {}".format(
default_ensembler_kwargs.keys()))
if not ensemblers:
default_ensembler_kwargs["model_dir"] = self.model_dir
ensemblers = [
ensemble_lib.ComplexityRegularizedEnsembler(
**default_ensembler_kwargs)
]
# These are defined after base Estimator's init so that they can
# use the same temporary model_dir as the underlying Estimator even if
# model_dir is not provided.
self._use_tpu = kwargs.get("use_tpu", False)
ensemble_builder = _EnsembleBuilder(
head=head,
metric_fn=metric_fn,
use_tpu=self._use_tpu,
export_subnetwork_logits=export_subnetwork_logits,
export_subnetwork_last_layer=export_subnetwork_last_layer)
# TODO: Merge CandidateBuilder into SubnetworkManager.
candidate_builder = _CandidateBuilder(adanet_loss_decay=adanet_loss_decay)
subnetwork_manager = _SubnetworkManager(
head=head, metric_fn=metric_fn, use_tpu=self._use_tpu)
if not placement_strategy:
placement_strategy = distributed_lib.ReplicationStrategy()
self._iteration_builder = _IterationBuilder(
candidate_builder,
subnetwork_manager,
ensemble_builder,
ensemblers,
max_iteration_steps,
self._summary_maker,
global_step_combiner_fn,
placement_strategy,
replicate_ensemble_in_training,
use_tpu=self._use_tpu,
debug=debug,
enable_ensemble_summaries=enable_ensemble_summaries,
enable_subnetwork_summaries=enable_subnetwork_summaries,
enable_subnetwork_reports=self._report_materializer is not None)
self._ensemble_strategies = ensemble_strategies or [
ensemble_lib.GrowStrategy()
]
report_dir = report_dir or os.path.join(self._model_dir, "report")
self._report_accessor = _ReportAccessor(report_dir)
def _summary_maker(self, scope=None, skip_summary=False, namespace=None):
"""Constructs a `_ScopedSummary`."""
if tf_compat.is_v2_behavior_enabled():
# Here we assume TF 2 behavior is enabled.
return _ScopedSummaryV2(
logdir=self._model_dir,
scope=scope,
skip_summary=skip_summary,
namespace=namespace)
if self._use_tpu:
return _TPUScopedSummary(
logdir=self._model_dir,
scope=scope,
skip_summary=skip_summary,
namespace=namespace)
else:
return _ScopedSummary(
scope=scope, skip_summary=skip_summary, namespace=namespace)
def _checkpoint_iteration_number(self, checkpoint_path):
# type: (Text) -> int
"""Returns the iteration number from the latest checkpoint."""
if checkpoint_path is None:
return 0
if self._enable_v2_checkpoint:
return tf_compat.load_variable(
checkpoint_path, "iteration_number", shape=[], dtype=tf.int64)
return tf.train.load_variable(checkpoint_path,
self._Keys.CURRENT_ITERATION).item()
def _checkpoint_global_step(self, checkpoint_path):
# type: (Text) -> int
"""Returns the global step from the given checkpoint."""
if checkpoint_path is None:
return 0
if self._enable_v2_checkpoint:
return tf_compat.load_variable(
checkpoint_path,
tf_compat.v1.GraphKeys.GLOBAL_STEP,
shape=[],
dtype=tf.int64)
return tf.train.load_variable(checkpoint_path,
tf_compat.v1.GraphKeys.GLOBAL_STEP).item()
def train(self,
input_fn,
hooks=None,
steps=None,
max_steps=None,
saving_listeners=None):
# pyformat: disable
"""Trains a model given training data :code:`input_fn`.
NOTE: If a given input_fn raises an :code:`OutOfRangeError`, then *all* of
training will exit. The best practice is to make the training dataset repeat
forever, in order to perform model search for more than one iteration.
Args:
input_fn: A function that provides input data for training as minibatches.
See [Premade Estimators](
https://tensorflow.org/guide/premade_estimators#create_input_functions)
for more information. The function should construct and return one of
the following:
* A :code:`tf.data.Dataset` object: Outputs of `Dataset` object must
be a tuple `(features, labels)` with same constraints as below.
* A tuple `(features, labels)`: Where `features` is a
:code:`tf.Tensor` or a dictionary of string feature name to
`Tensor` and `labels` is a :code:`Tensor` or a dictionary of string
label name to `Tensor`. Both `features` and `labels` are consumed by
`model_fn`. They should satisfy the expectation of `model_fn` from
inputs.
hooks: List of :code:`tf.train.SessionRunHook` subclass instances. Used
for callbacks inside the training loop.
steps: Number of steps for which to train the model. If :code:`None`,
train forever or train until `input_fn` generates the
:code:`tf.errors.OutOfRange` error or :code:`StopIteration` exception.
`steps` works incrementally. If you call two times `train(steps=10)`
then training occurs in total 20 steps. If :code:`OutOfRange` or
:code:`StopIteration` occurs in the middle, training stops before 20
steps. If you don't want to have incremental behavior please set
`max_steps` instead. If set, `max_steps` must be :code:`None`.
max_steps: Number of total steps for which to train model. If
:code:`None`, train forever or train until `input_fn` generates the
:code:`tf.errors.OutOfRange` error or :code:`StopIteration` exception.
If set, `steps` must be `None`. If :code:`OutOfRange` or
:code:`StopIteration` occurs in the middle, training stops before
`max_steps` steps. Two calls to `train(steps=100)` means 200 training
iterations. On the other hand, two calls to `train(max_steps=100)`
means that the second call will not do any iteration since first call
did all 100 steps.
saving_listeners: list of :code:`CheckpointSaverListener` objects. Used
for callbacks that run immediately before or after checkpoint savings.
Returns:
`self`, for chaining.
Raises:
ValueError: If both `steps` and `max_steps` are not `None`.
ValueError: If either `steps` or `max_steps <= 0`.
"""
# pyformat: enable
if (steps is not None) and (max_steps is not None):
raise ValueError("Can not provide both steps and max_steps.")
if steps is not None and steps <= 0:
raise ValueError("Must specify steps > 0, given: {}".format(steps))
latest_checkpoint = tf.train.latest_checkpoint(self.model_dir)
latest_global_steps = self._checkpoint_global_step(latest_checkpoint)
if steps is not None:
max_steps = latest_global_steps + steps
# Each iteration of this AdaNet loop represents an `_Iteration`. The
# current iteration number is stored as a variable in the checkpoint so
# that training can be stopped and started at anytime.
with monkey_patch_default_variable_placement_strategy():
while True:
latest_checkpoint = tf.train.latest_checkpoint(self.model_dir)
latest_global_steps = self._checkpoint_global_step(latest_checkpoint)
current_iteration = self._checkpoint_iteration_number(latest_checkpoint)
logging.info("Beginning training AdaNet iteration %s",
current_iteration)
self._iteration_ended = False
# Delegate training to a temporary estimator instead of super to make
# passing arguments more functional (via params).
temp_estimator = self._create_temp_estimator(
config=self.config,
is_inside_training_loop=True,
checkpoint_path=latest_checkpoint,
hooks=hooks)
result = temp_estimator.train(
input_fn=input_fn,
hooks=hooks,
max_steps=max_steps,
saving_listeners=saving_listeners)
# In TensorFlow v2.0.0.rc1 and below, saving listeners are attached to
# the first CheckpointSaverHook each time train is called. Instead, we
# pass in the saving_listeners in the first AdaNet iteration only.
if not tf_compat.version_greater_or_equal("2.0.0.rc1"):
saving_listeners = None
logging.info("Finished training Adanet iteration %s", current_iteration)
# If training ended because the maximum number of training steps
# occurred, exit training.
latest_checkpoint = tf.train.latest_checkpoint(self.model_dir)
global_steps = self._checkpoint_global_step(latest_checkpoint)
if max_steps is not None and global_steps >= max_steps:
logging.info("Training ended after %s global steps", global_steps)
return result
# If training ended for any reason other than the iteration ending,
# exit training.
if not self._iteration_ended:
logging.info("Training stop requested")
return result
max_iterations = self._max_iterations
if max_iterations and current_iteration + 1 >= max_iterations:
logging.info(
"Training ended after exceeding maximum AdaNet iterations")
if steps is not None and global_steps - latest_global_steps < steps:
logging.warning(
"Both `max_iterations` and `steps` were specified, but "
"`max_iterations` takes precedence over `steps`")
return result
logging.info("Beginning bookkeeping phase for iteration %s",
current_iteration)
# The chief prepares the next AdaNet iteration, and increments the
# iteration number by 1.
if self.config.is_chief:
with self._force_replication_strategy():
self._execute_bookkeeping_phase(
input_fn,
current_iteration,
train_hooks=hooks or [],
checkpoint_path=latest_checkpoint)
# This inner loop serves mainly for synchronizing the workers with the
# chief during distributed training. Workers that finish training early
# wait for the chief to prepare the next iteration and increment the
# iteration number. Workers that are slow to finish training quickly
# move onto the next iteration. And workers that go offline and return
# online after training ended terminate gracefully.
wait_for_chief = not self.config.is_chief
timer = _CountDownTimer(self._worker_wait_timeout_secs)
while wait_for_chief:
# Fetch the latest checkpoint.
latest_checkpoint = tf.train.latest_checkpoint(self.model_dir)
# If the chief hits max_steps, it will stop training itself and not
# increment the iteration number, so this is how the worker knows to
# exit if it wakes up and the chief is gone.
# TODO: Support steps parameter.
if self._checkpoint_global_step(latest_checkpoint) >= max_steps:
return result
# In distributed training, a worker may end training before the chief
# overwrites the checkpoint with the incremented iteration number. If
# that is the case, it should wait for the chief to do so. Otherwise
# the worker will get stuck waiting for its weights to be initialized.
next_iteration = self._checkpoint_iteration_number(latest_checkpoint)
if next_iteration > current_iteration:
break
logging.info("Iteration number in latest checkpoint: %d",
next_iteration)
# Check timeout when waiting for potentially downed chief.
if timer.secs_remaining() == 0:
logging.error(
"Chief job did not prepare iteration %d after %s secs. It "
"may have been preempted, been turned down, or crashed. This "
"worker is now exiting training.", current_iteration + 1,
self._worker_wait_timeout_secs)
return result
logging.info("Waiting for chief to prepare iteration %d",
current_iteration + 1)
time.sleep(self._worker_wait_secs)
# Stagger starting workers to prevent training instability.
# Mimics behavior of tf.estimator.train_and_evaluate.
if not self.config.is_chief and self.config.task_type == "worker":
task_id = self.config.task_id or 0
# Stagger each worker up to 60 secs.
delay_secs = min(self._max_worker_delay_secs,
(task_id + 1.) * self._delay_secs_per_worker)
if delay_secs > 0.:
logging.info("Waiting %d secs before continuing training.",
delay_secs)
time.sleep(delay_secs)
logging.info("Finished bookkeeping phase for iteration %s",
current_iteration)
def evaluate(self,
input_fn,
steps=None,
hooks=None,
checkpoint_path=None,
name=None):
if not checkpoint_path:
checkpoint_path = tf.train.latest_checkpoint(self.model_dir)
logging.info("Evaluating AdaNet model at checkpoint: %s", checkpoint_path)
# Delegate evaluation to a temporary estimator instead of super to make
# passing arguments more functional (via params).
temp_estimator = self._create_temp_estimator(
config=self.config,
checkpoint_path=checkpoint_path,
evaluation_name=name,
# Ensure that the read to get the iteration number and read to restore
# variable values come from the same checkpoint during evaluation.
best_ensemble_index=self._compute_best_ensemble_index(
checkpoint_path, hooks),
hooks=hooks)
with _disable_asserts_for_confusion_matrix_at_thresholds():
result = temp_estimator.evaluate(
input_fn,
steps=steps,
hooks=hooks,
checkpoint_path=checkpoint_path,
name=name)
return result
def predict(self,
input_fn,
predict_keys=None,
hooks=None,
checkpoint_path=None,
yield_single_examples=True):
if not checkpoint_path:
checkpoint_path = tf.train.latest_checkpoint(self.model_dir)
logging.info("Computing predictions for AdaNet model at checkpoint: %s",
checkpoint_path)
# Delegate predicting to a temporary estimator instead of super to make
# passing arguments more functional (via params).
temp_estimator = self._create_temp_estimator(
config=self.config,
best_ensemble_index=self._compute_best_ensemble_index(
checkpoint_path, hooks=hooks),
checkpoint_path=checkpoint_path,
hooks=hooks)
return temp_estimator.predict(
input_fn=input_fn,
predict_keys=predict_keys,
checkpoint_path=checkpoint_path,
yield_single_examples=yield_single_examples,
hooks=hooks)
from tensorflow.python.util import deprecation # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
@deprecation.deprecated(
None, "This function has been renamed, use `export_saved_model` instead.")
def export_savedmodel(self,
export_dir_base,
serving_input_receiver_fn,
hooks=None,
assets_extra=None,
as_text=False,
checkpoint_path=None,
strip_default_attrs=False):
if not checkpoint_path:
checkpoint_path = tf.train.latest_checkpoint(self.model_dir)
logging.info("Exporting SavedModel for AdaNet model at checkpoint: %s",
checkpoint_path)
# Delegate exporting to a temporary estimator instead of super to make
# passing arguments more functional (via params).
temp_estimator = self._create_temp_estimator(
config=self.config,
hooks=hooks,
best_ensemble_index=self._compute_best_ensemble_index(
checkpoint_path, hooks=hooks),
checkpoint_path=checkpoint_path,
is_export=True)
with self._force_replication_strategy():
return temp_estimator.export_savedmodel(
export_dir_base=export_dir_base,
serving_input_receiver_fn=serving_input_receiver_fn,
assets_extra=assets_extra,
as_text=as_text,
checkpoint_path=checkpoint_path,
strip_default_attrs=strip_default_attrs)
def export_saved_model(self,
export_dir_base,
serving_input_receiver_fn,
hooks=None,
assets_extra=None,
as_text=False,
checkpoint_path=None,
experimental_mode=tf.estimator.ModeKeys.PREDICT):
if not checkpoint_path:
checkpoint_path = tf.train.latest_checkpoint(self.model_dir)
logging.info("Exporting SavedModel for AdaNet model at checkpoint: %s",
checkpoint_path)
# Delegate exporting to a temporary estimator instead of super to make
# passing arguments more functional (via params).
temp_estimator = self._create_temp_estimator(
config=self.config,
best_ensemble_index=self._compute_best_ensemble_index(
checkpoint_path, hooks=hooks),
checkpoint_path=checkpoint_path,
hooks=hooks,
is_export=True)
with self._force_replication_strategy():
return temp_estimator.export_saved_model(
export_dir_base=export_dir_base,
serving_input_receiver_fn=serving_input_receiver_fn,
assets_extra=assets_extra,
as_text=as_text,
checkpoint_path=checkpoint_path,
experimental_mode=experimental_mode)
def experimental_export_all_saved_models(self,
export_dir_base,
input_receiver_fn_map,
hooks=None,
assets_extra=None,
as_text=False,
checkpoint_path=None):
if not checkpoint_path:
checkpoint_path = tf.train.latest_checkpoint(self.model_dir)
logging.info("Exporting SavedModel for AdaNet model at checkpoint: %s",
checkpoint_path)
# Delegate exporting to a temporary estimator instead of super to make
# passing arguments more functional (via params).
temp_estimator = self._create_temp_estimator(
config=self.config,
best_ensemble_index=self._compute_best_ensemble_index(
checkpoint_path, hooks=hooks),
checkpoint_path=checkpoint_path,
hooks=hooks,
is_export=True)
with self._force_replication_strategy():
return temp_estimator.experimental_export_all_saved_models(
export_dir_base=export_dir_base,
input_receiver_fn_map=input_receiver_fn_map,
assets_extra=assets_extra,
as_text=as_text,
checkpoint_path=checkpoint_path)
def _compute_best_ensemble_index(self, checkpoint_path, hooks):
# type: (Text, Sequence[tf_compat.SessionRunHook]) -> Optional[int]
"""Runs the Evaluator to obtain the best ensemble index among candidates."""
# AdaNet Replay.
if self._replay_config:
iteration_number = self._checkpoint_iteration_number(checkpoint_path)
best_index = self._replay_config.get_best_ensemble_index(iteration_number)
if best_index is not None:
return best_index
if self._evaluator:
return self._execute_candidate_evaluation_phase(
self._evaluator.input_fn,
export_best_architecture=False,
checkpoint_path=checkpoint_path,
hooks=hooks)
return None
@contextlib.contextmanager
def _force_replication_strategy(self):
"""Sets placement_strategy to always be ReplicationStrategy.
This is useful during the bookkeeping phase and when Estimator's export
saved model functions are called. In both of these cases, local tf.Sessions
are created which do not have access to the cluster. Therefore,
RoundRobinReplicationStrategy will fail when it tries to place ops on
cluster devices which the local tf.Sessions cannot access.
Yields:
Nothing. Simply returns control back to the caller.
"""
temp_placement_strategy = self._iteration_builder.placement_strategy
try:
placement_strategy = distributed_lib.ReplicationStrategy()
self._iteration_builder.placement_strategy = placement_strategy
yield
finally:
self._iteration_builder.placement_strategy = temp_placement_strategy
@contextlib.contextmanager
def _call_input_fn_in_new_graph(self, input_fn, mode, config):
"""Calls the given input_fn and yields results within a new graph context.
Yields features, labels, and hooks from the result of an Estimator input_fn.
Args:
input_fn: a function that takes no arguments and returns one of:
* A 'tf.data.Dataset' object: Outputs of `Dataset` object must be a
tuple (features, labels) with same constraints as below.
* A tuple (features, labels): Where `features` is a `Tensor` or a
dictionary of string feature name to `Tensor` and `labels` is a
`Tensor` or a dictionary of string label name to `Tensor`. Both
`features` and `labels` are consumed by `model_fn`. They should
satisfy the expectation of `model_fn` from inputs.
mode: Defines whether this is training, evaluation or prediction. See
`ModeKeys`.
config: The current `tf.estimator.RunConfig`.
Yields:
Tuple of features, labels, and input_hooks, where features are as
described above, labels are as described above or None, and input_hooks
are a list of SessionRunHooks to be included when running.
Raises:
ValueError: if the result is a list or tuple of length != 2.
"""
from tensorflow_estimator.python.estimator import util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
with tf.Graph().as_default() as g:
tf_compat.v1.set_random_seed(config.tf_random_seed)
# Create global step before calling model_fn as does superclass.
self._create_and_assert_global_step(g)
with tf.device("/cpu:0"):
input_fn_outs = input_fn()
yield util.parse_input_fn_result(input_fn_outs)
def _create_temp_run_config(self, temp_model_dir):
# type: (Text) -> tf.estimator.RunConfig
"""Creates a temp `RunConfig` for the bookkeeping phase."""
config = self.config
return tf.estimator.RunConfig(
model_dir=temp_model_dir,
tf_random_seed=config.tf_random_seed,
session_config=config.session_config,
protocol=config.protocol)
def _create_temp_estimator(self, config, **create_model_fn_args):
# type: (tf.estimator.RunConfig, Any[...]) -> tf.estimator.Estimator # pylint:disable=line-too-long
"""Creates a temp `Estimator` to grow the graph for the next iteration."""
return tf.estimator.Estimator(
model_fn=self._create_model_fn(**create_model_fn_args),
config=config,
warm_start_from=self._warm_start_settings)
def _execute_bookkeeping_phase(self, train_input_fn, iteration_number,
train_hooks, checkpoint_path):
"""Run the AdaNet bookkeeping phase to prepare the next iteration.
This method creates a TensorFlow graph up to three times:
1. To evaluate all candidate ensembles to find the best one.
2. To materialize reports and store them to disk (if report_materializer
exists).
3. To grow the TensorFlow graph and overwrite the model directory's
checkpoint with the next iteration's ops.
Args:
train_input_fn: The input_fn used during training.
iteration_number: Integer current iteration number.
train_hooks: List of `SessionRunHook` passed for training.
checkpoint_path: Path to the checkpoint to restore from.
"""
next_iteration_number = iteration_number + 1
logging.info("Preparing iteration %s:", next_iteration_number)
if self._evaluator:
evaluator_input_fn = self._evaluator.input_fn
else:
evaluator_input_fn = train_input_fn
best_ensemble_index = self._execute_candidate_evaluation_phase(
evaluator_input_fn,
export_best_architecture=True,
checkpoint_path=checkpoint_path,
hooks=train_hooks)
self._execute_report_materialization_phase(
best_ensemble_index, checkpoint_path=checkpoint_path, hooks=train_hooks)
self._execute_graph_growing_phase(train_input_fn, train_hooks,
checkpoint_path)
logging.info("Finished preparing iteration %s.", next_iteration_number)
def _execute_candidate_evaluation_phase(self, evaluator_input_fn,
export_best_architecture,
checkpoint_path, hooks):
"""Evaluates and chooses the best ensemble for this iteration.
Args:
evaluator_input_fn: The input_fn for evaluation.
export_best_architecture: Boolean whether to persist the best ensemble's
architecture to the model_dir.
checkpoint_path: Path to the checkpoint to restore from.
hooks: A list of `tf.estimator.SessionRunHook`s.
Returns:
Integer index of the best ensemble withing the list of candidate ensembles
for the current iteration.
"""
logging.info("Evaluating candidates...")
config = self.config
mode = tf.estimator.ModeKeys.EVAL
with self._call_input_fn_in_new_graph(evaluator_input_fn, mode,
config) as (features, labels,
input_hooks):
current_iteration, _ = self._create_iteration(
features,
labels,
mode,
config,
is_growing_phase=False,
checkpoint_path=checkpoint_path,
hooks=hooks)
best_ensemble_index = self._get_best_ensemble_index(
current_iteration, input_hooks, checkpoint_path)
architecture = current_iteration.candidates[
best_ensemble_index].ensemble_spec.architecture
if export_best_architecture:
iteration_number = self._checkpoint_iteration_number(checkpoint_path)
new_architecture_filename = self._architecture_filename(iteration_number)
logging.info("Exporting best ensemble architecture to %s",
new_architecture_filename)
self._save_architecture(new_architecture_filename, architecture,
checkpoint_path)
logging.info("Done evaluating candidates.")
return best_ensemble_index
def _execute_report_materialization_phase(self, best_ensemble_index,
checkpoint_path, hooks):
"""Materializes and store subnetwork reports."""
if not self._report_materializer:
return
logging.info("Materializing reports...")
input_fn = self._report_materializer.input_fn
mode = tf.estimator.ModeKeys.EVAL
config = self.config
with self._call_input_fn_in_new_graph(input_fn, mode,
config) as (features, labels,
input_hooks):
current_iteration, _ = self._create_iteration(
features,
labels,
mode,
config,
is_growing_phase=False,
checkpoint_path=checkpoint_path,
hooks=hooks)
self._materialize_report(current_iteration, input_hooks,
best_ensemble_index, checkpoint_path)
logging.info("Done materializing reports.")
def _execute_graph_growing_phase(self, train_input_fn, train_hooks,
checkpoint_path):
"""Grows the tensorflow graph for the next iteration.
Normally the MonitoredTrainingSession does not allow one to add new ops to
the TensorFlow graph once training starts. To get around this limitation,
create the graph for the next iteration and overwrite the model directory
checkpoint with the expanded graph.
Args:
train_input_fn: The input_fn used during training.
train_hooks: List of `SessionRunHook` passed for training.
checkpoint_path: Path of the checkpoint to use for restoring variables.
"""
logging.info("Adapting graph and incrementing iteration number...")
config = self.config
temp_model_dir = os.path.join(self.model_dir, "temp_model_dir")
if not tf.io.gfile.exists(temp_model_dir):
tf.io.gfile.makedirs(temp_model_dir)
# Since deleting a model_dir can fail, we need each temporary directory to
# be unique. So we use the UTC time when creating it.
time_in_millis = int(time.time() * 1000)
temp_model_sub_dir = os.path.join(temp_model_dir, str(time_in_millis))
temp_run_config = config.replace(model_dir=temp_model_sub_dir)
temp_estimator = self._create_temp_estimator(
config=temp_run_config,
is_growing_phase=True,
is_inside_training_loop=True,
checkpoint_path=checkpoint_path,
hooks=train_hooks)
_copy_recursively(
os.path.join(self._model_dir, "assets"),
os.path.join(temp_model_sub_dir, "assets"))
# Do not train with any saving_listeners since this is just a temporary
# estimator.
temp_estimator.train(
input_fn=train_input_fn,
max_steps=1,
hooks=self._process_hooks_for_growing_phase(train_hooks),
saving_listeners=None)
_copy_recursively(
os.path.join(temp_model_sub_dir, "assets"),
os.path.join(self._model_dir, "assets"))
_delete_directory(temp_model_dir)
logging.info("Done adapting graph and incrementing iteration number.")
def _architecture_filename(self, iteration_number):
# type: (int) -> Text
"""Returns the filename of the given iteration's frozen graph."""
frozen_checkpoint = os.path.join(self.model_dir, "architecture")
return "{}-{}.json".format(frozen_checkpoint, iteration_number)
def _get_best_ensemble_index(self,
current_iteration,
input_hooks,
checkpoint_path=None):
# type: (_Iteration, Sequence[tf_compat.SessionRunHook], Text) -> int
"""Returns the best candidate ensemble's index in this iteration.
Evaluates the ensembles using an `Evaluator` when provided. Otherwise,
it returns the index of the best candidate as defined by the `_Iteration`.
Args:
current_iteration: Current `_Iteration`.
input_hooks: List of SessionRunHooks to be included when running.
checkpoint_path: Checkpoint to use when determining the best index.
Returns:
Index of the best ensemble in the iteration's list of `_Candidates`.
"""
# AdaNet Replay.
if self._replay_config:
best_index = self._replay_config.get_best_ensemble_index(
current_iteration.number)
if best_index is not None:
return best_index
# Skip the evaluation phase when there is only one candidate subnetwork.
if len(current_iteration.candidates) == 1:
logging.info("'%s' is the only ensemble",
current_iteration.candidates[0].ensemble_spec.name)
return 0
# The zero-th index candidate at iteration t>0 is always the
# previous_ensemble.
if current_iteration.number > 0 and self._force_grow and (len(
current_iteration.candidates) == 2):
logging.info("With `force_grow` enabled, '%s' is the only ensemble",
current_iteration.candidates[1].ensemble_spec.name)
return 1
logging.info("Starting ensemble evaluation for iteration %s",
current_iteration.number)
for hook in input_hooks:
hook.begin()
with tf_compat.v1.Session(config=self.config.session_config) as sess:
init = tf.group(
tf_compat.v1.global_variables_initializer(),
tf_compat.v1.local_variables_initializer(),
tf_compat.v1.tables_initializer(),
current_iteration.estimator_spec.scaffold.local_init_op if isinstance(
current_iteration.estimator_spec,
tf.estimator.EstimatorSpec) else tf.no_op())
sess.run(init)
if self._enable_v2_checkpoint:
status = current_iteration.checkpoint.restore(checkpoint_path)
status.expect_partial() # Optional sanity checks.
status.initialize_or_restore(sess)
else:
saver = tf_compat.v1.train.Saver(sharded=True)
saver.restore(sess, checkpoint_path)
coord = tf.train.Coordinator()
for hook in input_hooks:
hook.after_create_session(sess, coord)
tf_compat.v1.train.start_queue_runners(sess=sess, coord=coord)
ensemble_metrics = []
for candidate in current_iteration.candidates:
metrics = candidate.ensemble_spec.eval_metrics.eval_metrics_ops()
metrics["adanet_loss"] = tf_compat.v1.metrics.mean(
candidate.ensemble_spec.adanet_loss)
ensemble_metrics.append(metrics)
if self._evaluator:
metric_name = self._evaluator.metric_name
metrics = self._evaluator.evaluate(sess, ensemble_metrics)
objective_fn = self._evaluator.objective_fn
else:
metric_name = "adanet_loss"
metrics = sess.run(
[c.adanet_loss for c in current_iteration.candidates])
objective_fn = np.nanargmin
values = []
for i in range(len(current_iteration.candidates)):
ensemble_name = current_iteration.candidates[i].ensemble_spec.name
values.append("{}/{} = {:.6f}".format(metric_name, ensemble_name,
metrics[i]))
logging.info("Computed ensemble metrics: %s", ", ".join(values))
if self._force_grow and current_iteration.number > 0:
logging.info(
"The `force_grow` override is enabled, so the "
"the performance of the previous ensemble will be ignored.")
# NOTE: The zero-th index candidate at iteration t>0 is always the
# previous_ensemble.
metrics = metrics[1:]
index = objective_fn(metrics) + 1
else:
index = objective_fn(metrics)
logging.info("Finished ensemble evaluation for iteration %s",
current_iteration.number)
logging.info("'%s' at index %s is the best ensemble",
current_iteration.candidates[index].ensemble_spec.name, index)
return index
def _materialize_report(self, current_iteration, input_hooks,
best_ensemble_index, checkpoint_path):
"""Generates reports as defined by `Builder`s.
Materializes the Tensors and metrics defined in the `Builder`s'
`build_subnetwork_report` method using `ReportMaterializer`, and stores
them to disk using `_ReportAccessor`.
Args:
current_iteration: Current `_Iteration`.
input_hooks: List of SessionRunHooks to be included when running.
best_ensemble_index: Integer index of the best candidate ensemble.
checkpoint_path: Path of the checkpoint to use.
"""
logging.info("Starting metric logging for iteration %s",
current_iteration.number)
best_candidate = current_iteration.candidates[best_ensemble_index]
best_architecture = best_candidate.ensemble_spec.architecture
included_subnetwork_names = [
name for i, name in best_architecture.subnetworks
if i == current_iteration.number
]
for hook in input_hooks:
hook.begin()
if self._enable_v2_checkpoint:
status = current_iteration.checkpoint.restore(checkpoint_path)
# Verify that restoring subset of ops from previous iteration works.
status.expect_partial() # Optional sanity checks.
with tf_compat.v1.Session(config=self.config.session_config) as sess:
init = tf.group(
tf_compat.v1.global_variables_initializer(),
tf_compat.v1.local_variables_initializer(),
tf_compat.v1.tables_initializer(),
current_iteration.estimator_spec.scaffold.local_init_op if isinstance(
current_iteration.estimator_spec,
tf.estimator.EstimatorSpec) else tf.no_op())
sess.run(init)
if self._enable_v2_checkpoint:
status.initialize_or_restore(sess)
else:
saver = tf_compat.v1.train.Saver(sharded=True)
saver.restore(sess, checkpoint_path)
coord = tf.train.Coordinator()
for hook in input_hooks:
hook.after_create_session(sess, coord)
tf_compat.v1.train.start_queue_runners(sess=sess, coord=coord)
materialized_reports = (
self._report_materializer.materialize_subnetwork_reports(
sess, current_iteration.number,
current_iteration.subnetwork_reports, included_subnetwork_names))
self._report_accessor.write_iteration_report(current_iteration.number,
materialized_reports)
logging.info("Finished saving subnetwork reports for iteration %s",
current_iteration.number)
def _process_hooks_for_growing_phase(self, hooks):
"""Processes hooks which will run during the graph growing phase.
In particular the following things are done:
- CheckpointSaverHooks are filtered out since they are not intended to
run between training runs and will cause errors. We also reset the
CheckpointSaverHooks' Saver between iterations, see b/122795064 for more
details.
- Decorate the remaining hooks with _GraphGrowingHookDecorator to only run
the begin() and end() methods during the graph growing phase.
Args:
hooks: The list of `SessionRunHooks` to process.
Returns:
The processed hooks which should run during the growing phase.
"""
processed_hooks = []
for hook in hooks:
# Reset CheckpointSaverHooks' Saver and filter out.
if isinstance(hook, tf_compat.CheckpointSaverHook):
hook._saver = None # pylint: disable=protected-access
continue
# Do not decorate the _OverwriteCheckpointHook since it should always
# run during the graph growing phase.
if not isinstance(hook, _OverwriteCheckpointHook):
hook = _GraphGrowingHookDecorator(hook)
processed_hooks.append(hook)
return processed_hooks
def _training_chief_hooks(self, current_iteration, training):
"""Returns chief-only training hooks to be run this iteration.
Args:
current_iteration: Current `_Iteration`.
training: Whether in training mode.
Returns:
A list of `SessionRunHook` instances.
"""
if not training:
return []
training_hooks = []
if tf_compat.is_v2_behavior_enabled():
# Use V2 summaries and hook when user is using TF 2 behavior.
training_hooks.append(
_SummaryV2SaverHook(
current_iteration.summaries,
save_steps=self.config.save_summary_steps))
else:
# Fallback to V1 summaries.
for summary in current_iteration.summaries:
output_dir = self.model_dir
if summary.scope:
output_dir = os.path.join(output_dir, summary.namespace,
summary.scope)
summary_saver_hook = tf_compat.SummarySaverHook(
save_steps=self.config.save_summary_steps,
output_dir=output_dir,
summary_op=summary.merge_all())
training_hooks.append(summary_saver_hook)
training_hooks += list(
current_iteration.estimator_spec.training_chief_hooks)
return training_hooks
def _training_hooks(self, current_iteration, training,
iteration_number_tensor, previous_iteration_vars,
is_growing_phase):
"""Returns training hooks to be run on all workers and chief this iteration.
Args:
current_iteration: Current `_Iteration`.
training: Whether in training mode.
iteration_number_tensor: An int tensor of the current AdaNet iteraiton.
previous_iteration_vars: The variables of the previous iteration to be
restored by the _OverwriteCheckpointHook. If empty, no
_OverwriteCheckpointHook will be created.
is_growing_phase: Whether we are in the AdaNet graph growing phase.
Returns:
A list of `SessionRunHook` instances.
"""
if not training:
return []
def after_fn():
self._iteration_ended = True
training_hooks = list(current_iteration.estimator_spec.training_hooks) + [
_StopAfterTrainingHook(current_iteration, after_fn=after_fn)
]
if is_growing_phase:
training_hooks.append(
_OverwriteCheckpointHook(current_iteration, iteration_number_tensor,
previous_iteration_vars, self.config,
self._enable_v2_checkpoint))
return training_hooks
def _evaluation_hooks(self, current_iteration, training, evaluation_name):
"""Returns evaluation hooks for this iteration.
Args:
current_iteration: Current `_Iteration`.
training: Whether in training mode.
evaluation_name: String name to append to the eval directory.
Returns:
A list of `SessionRunHook` instances.
"""
if training:
return []
evaluation_hooks = []
for subnetwork_spec in current_iteration.subnetwork_specs:
evaluation_hooks.append(
self._create_eval_metric_saver_hook(
subnetwork_spec.eval_metrics,
subnetwork_spec.name,
kind="subnetwork",
evaluation_name=evaluation_name))
for candidate in current_iteration.candidates:
evaluation_hooks.append(
self._create_eval_metric_saver_hook(
candidate.ensemble_spec.eval_metrics,
candidate.ensemble_spec.name,
kind="ensemble",
evaluation_name=evaluation_name))
return evaluation_hooks
def _create_eval_metric_saver_hook(self, eval_metrics, name, kind,
evaluation_name):
eval_subdir = "eval"
if evaluation_name:
eval_subdir = "eval_{}".format(evaluation_name)
return _EvalMetricSaverHook(
name=name,
kind=kind,
eval_metrics=eval_metrics,
output_dir=os.path.join(self.model_dir, kind, name, eval_subdir))
def _save_architecture(self, filename, architecture, checkpoint_path):
# type: (Text, _Architecture, Text) -> None
"""Persists the ensemble's architecture in a serialized format.
Writes to a text file with one subnetwork's iteration number and name
per line.
Args:
filename: String filename to persist the ensemble architecture.
architecture: Target `_Architecture` instance.
checkpoint_path: Path of the checkpoint to use.
"""
# Make directories since model_dir may not have been created yet.
tf.io.gfile.makedirs(os.path.dirname(filename))
iteration_number = self._checkpoint_iteration_number(checkpoint_path)
global_step = self._checkpoint_global_step(checkpoint_path)
serialized_architecture = architecture.serialize(iteration_number,
global_step)
logging.info("Saving architecture to %s:\n%s", filename,
serialized_architecture)
with tf.io.gfile.GFile(filename, "w") as record_file:
record_file.write(serialized_architecture)
def _read_architecture(self, filename):
# type: (Text) -> _Architecture
"""Reads an ensemble architecture from disk.
Assumes the file was written with `_save_architecture`.
Args:
filename: String filename where features were recorded.
Returns:
An `_Architecture` instance.
Raises:
OSError: When file not found at `filename`.
"""
if not tf.io.gfile.exists(filename):
raise OSError(errno.ENOENT, os.strerror(errno.ENOENT), filename)
with tf.io.gfile.GFile(filename, "rb") as gfile:
return _Architecture.deserialize(gfile.read().decode())
def _find_ensemble_candidate(self, ensemble_candidate_name,
ensemble_candidates):
# type: (Text, Sequence[ensemble_lib.Candidate]) -> ensemble_lib.Candidate
"""Returns the ensemble candidate with the given name."""
for ensemble_candidate in ensemble_candidates:
if ensemble_candidate.name == ensemble_candidate_name:
return ensemble_candidate
raise ValueError(
"Could not find a matching ensemble candidate with name '{}'. "
"Are you sure the `adanet.ensemble.Strategy` is deterministic?".format(
ensemble_candidate_name))
# TODO: Refactor architecture building logic to its own module.
def _architecture_ensemble_spec(self, architecture, iteration_number,
features, mode, labels,
previous_ensemble_spec, config,
previous_iteration, hooks):
"""Returns an `_EnsembleSpec` with the given architecture.
Creates the ensemble architecture by calling `generate_subnetworks` on
`self._subnetwork_generator` and only calling `build_subnetwork` on
`Builders` included in the architecture. Once their ops are created, their
variables are restored from the checkpoint.
Args:
architecture: An `_Architecture` instance.
iteration_number: Integer current iteration number.
features: Dictionary of `Tensor` objects keyed by feature name.
mode: Defines whether this is training, evaluation or prediction. See
`ModeKeys`.
labels: Labels `Tensor` or a dictionary of string label name to `Tensor`
(for multi-head). Can be `None`.
previous_ensemble_spec: The `_EnsembleSpec` for the previous iteration.
Will be `None` for the first iteration.
config: The current `tf.estimator.RunConfig`.
previous_iteration: The previous `_Iteration`.
hooks: A list of `tf.estimator.SessionRunHook`s.
Returns:
An `EnsembleSpec` instance for the given architecture.
Raises:
ValueError: If a subnetwork from `architecture` is not found in the
generated candidate `Builders` of the specified iteration.
"""
previous_ensemble = None
if previous_ensemble_spec:
previous_ensemble = previous_ensemble_spec.ensemble
current_iteration = previous_iteration
for t, names in architecture.subnetworks_grouped_by_iteration:
if t != iteration_number:
continue
previous_ensemble_reports, all_reports = [], []
if self._report_materializer:
previous_ensemble_reports, all_reports = (
self._collate_subnetwork_reports(iteration_number))
generated_subnetwork_builders = (
self._call_generate_candidates(
previous_ensemble=previous_ensemble,
iteration_number=iteration_number,
previous_ensemble_reports=previous_ensemble_reports,
all_reports=all_reports,
config=config))
subnetwork_builder_names = {
b.name: b for b in generated_subnetwork_builders
}
rebuild_subnetwork_builders = []
for name in names:
if name not in subnetwork_builder_names:
raise ValueError(
"Required subnetwork builder is missing for iteration {}: {}"
.format(iteration_number, name))
rebuild_subnetwork_builders.append(subnetwork_builder_names[name])
previous_ensemble_summary = None
previous_ensemble_subnetwork_builders = None
if previous_ensemble_spec:
# Always skip summaries when rebuilding previous architecture,
# since they are not useful.
previous_ensemble_summary = self._summary_maker(
namespace="ensemble",
scope=previous_ensemble_spec.name,
skip_summary=True)
previous_ensemble_subnetwork_builders = (
previous_ensemble_spec.subnetwork_builders)
ensemble_candidates = []
for ensemble_strategy in self._ensemble_strategies:
ensemble_candidates += ensemble_strategy.generate_ensemble_candidates(
rebuild_subnetwork_builders, previous_ensemble_subnetwork_builders)
ensemble_candidate = self._find_ensemble_candidate(
architecture.ensemble_candidate_name, ensemble_candidates)
current_iteration = self._iteration_builder.build_iteration(
base_global_step=architecture.global_step,
iteration_number=iteration_number,
ensemble_candidates=[ensemble_candidate],
subnetwork_builders=rebuild_subnetwork_builders,
features=features,
labels=labels,
mode=mode,
config=config,
previous_ensemble_summary=previous_ensemble_summary,
rebuilding=True,
rebuilding_ensembler_name=architecture.ensembler_name,
previous_iteration=current_iteration)
max_candidates = 2 if previous_ensemble_spec else 1
assert len(current_iteration.candidates) == max_candidates
previous_ensemble_spec = current_iteration.candidates[-1].ensemble_spec
previous_ensemble = previous_ensemble_spec.ensemble
previous_ensemble_spec.architecture.set_replay_indices(
architecture.replay_indices)
return current_iteration
def _collate_subnetwork_reports(self, iteration_number):
"""Prepares subnetwork.Reports to be passed to Generator.
Reads subnetwork.MaterializedReports from past iterations,
collates those that were included in previous_ensemble into
previous_ensemble_reports as a List of subnetwork.MaterializedReports,
and collates all reports from previous iterations into all_reports as
another List of subnetwork.MaterializedReports.
Args:
iteration_number: Python integer AdaNet iteration number, starting from 0.
Returns:
(previous_ensemble_reports: List<subnetwork.MaterializedReport>,
materialized_reports: List<MaterializedReport>)
"""
materialized_reports_all = (self._report_accessor.read_iteration_reports())
previous_ensemble_reports = []
all_reports = []
# Since the number of iteration reports changes after the
# MATERIALIZE_REPORT phase, we need to make sure that we always pass the
# same reports to the Generator in the same iteration,
# otherwise the graph that is built in the FREEZE_ENSEMBLE phase would be
# different from the graph built in the training phase.
# Iteration 0 should have 0 iteration reports passed to the
# Generator, since there are no previous iterations.
# Iteration 1 should have 1 list of reports for Builders
# generated in iteration 0.
# Iteration 2 should have 2 lists of reports -- one for iteration 0,
# one for iteration 1. Note that the list of reports for iteration >= 1
# should contain "previous_ensemble", in addition to the
# Builders at the start of that iteration.
# Iteration t should have t lists of reports.
for i, iteration_reports in enumerate(materialized_reports_all):
# This ensures that the FREEZE_ENSEMBLE phase does not pass the reports
# generated in the previous phase of the same iteration to the
# Generator when building the graph.
if i >= iteration_number:
break
chosen_subnetworks_in_this_iteration = [
subnetwork_report for subnetwork_report in iteration_reports
if subnetwork_report.included_in_final_ensemble
]
previous_ensemble_reports += chosen_subnetworks_in_this_iteration
all_reports.extend(iteration_reports)
return previous_ensemble_reports, all_reports
def _train_op(self, iteration_estimator_spec, is_growing_phase):
"""Returns the iteration train op or tf.no_op if growing the graph."""
train_op = iteration_estimator_spec.train_op
if is_growing_phase:
train_op = tf_compat.v1.train.get_global_step().assign_add(1)
# NOTE: some version of TensorFlow check that train_op is an Op or Tensor
# and crash if train_op is a Variable.
train_op = tf.identity(train_op)
return train_op
def _create_estimator_spec(self, current_iteration, mode,
iteration_number_tensor, previous_iteration_vars,
is_growing_phase, evaluation_name):
"""Creates the EstimatorSpec which will be returned by _adanet_model_fn."""
from tensorflow.python.training.tracking import graph_view # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
training = mode == tf.estimator.ModeKeys.TRAIN
iteration_estimator_spec = current_iteration.estimator_spec
training_chief_hooks = self._training_chief_hooks(current_iteration,
training)
training_hooks = self._training_hooks(current_iteration, training,
iteration_number_tensor,
previous_iteration_vars,
is_growing_phase)
if is_growing_phase:
training_chief_hooks = self._process_hooks_for_growing_phase(
training_chief_hooks)
training_hooks = self._process_hooks_for_growing_phase(training_hooks)
saver = None
if self._enable_v2_checkpoint:
saver = tf_compat.v1.train.Saver(
var_list=graph_view.ObjectGraphView(
current_iteration.checkpoint).frozen_saveable_objects(),
sharded=True,
max_to_keep=self.config.keep_checkpoint_max)
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=iteration_estimator_spec.predictions,
loss=iteration_estimator_spec.loss,
train_op=self._train_op(iteration_estimator_spec, is_growing_phase),
eval_metric_ops=iteration_estimator_spec.eval_metric_ops,
training_chief_hooks=training_chief_hooks,
training_hooks=training_hooks,
evaluation_hooks=self._evaluation_hooks(current_iteration, training,
evaluation_name),
scaffold=tf_compat.v1.train.Scaffold(
summary_op=tf.constant(""),
saver=saver,
local_init_op=current_iteration.estimator_spec.scaffold
.local_init_op if isinstance(current_iteration.estimator_spec,
tf.estimator.EstimatorSpec) else None),
export_outputs=iteration_estimator_spec.export_outputs)
def _call_generate_candidates(self, previous_ensemble, iteration_number,
previous_ensemble_reports, all_reports, config):
# Calling low level getargs for py_2_and_3 compatibility.
defined_args = inspect.getargs(
self._subnetwork_generator.generate_candidates.__code__).args
generate_args = dict(
previous_ensemble=previous_ensemble,
iteration_number=iteration_number,
previous_ensemble_reports=previous_ensemble_reports,
all_reports=all_reports)
if "config" in defined_args:
generate_args["config"] = config
return self._subnetwork_generator.generate_candidates(**generate_args)
def _create_iteration(self,
features,
labels,
mode,
config,
is_growing_phase,
checkpoint_path,
hooks,
best_ensemble_index_override=None):
"""Constructs the TF ops and variables for the current iteration.
Args:
features: Dictionary of `Tensor` objects keyed by feature name.
labels: Labels `Tensor` or a dictionary of string label name to `Tensor`
(for multi-head). Can be `None`.
mode: Defines whether this is training, evaluation or prediction. See
`ModeKeys`.
config: The current `tf.estimator.RunConfig`.
is_growing_phase: Whether we are in the AdaNet graph growing phase.
checkpoint_path: Path of the checkpoint to use. When `None`, this method
uses the latest checkpoint instead.
hooks: A list of `tf.estimator.SessionRunHooks`.
best_ensemble_index_override: Integer index to identify the latest
iteration's best ensemble candidate instead of computing the best
ensemble index dynamically conditional on the ensemble AdaNet losses.
Returns:
A two-tuple of the current `_Iteration`, and list of variables from
the previous iteration for restoring during the graph growing phase.
"""
# Use the evaluation checkpoint path to get both the iteration number and
# variable values to avoid any race conditions between the first and second
# checkpoint reads.
iteration_number = self._checkpoint_iteration_number(checkpoint_path)
if mode == tf.estimator.ModeKeys.EVAL and checkpoint_path is None:
# This should only happen during some tests, so we log instead of
# asserting here.
logging.warning("There are no checkpoints available during evaluation. "
"Variables will be initialized to their defaults.")
if is_growing_phase:
assert mode == tf.estimator.ModeKeys.TRAIN
assert config.is_chief
iteration_number += 1
# Only record summaries when training.
skip_summaries = (mode != tf.estimator.ModeKeys.TRAIN or is_growing_phase)
base_global_step = 0
with tf_compat.v1.variable_scope("adanet"):
previous_iteration = None
previous_ensemble_spec = None
previous_ensemble = None
previous_ensemble_summary = None
previous_ensemble_subnetwork_builders = None
architecture = None
for i in range(iteration_number):
architecture_filename = self._architecture_filename(i)
if not tf.io.gfile.exists(architecture_filename):
continue
architecture = self._read_architecture(architecture_filename)
logging.info(
"Importing architecture from %s: [%s].", architecture_filename,
", ".join(
sorted([
"'{}:{}'".format(t, n)
for t, n in architecture.subnetworks_grouped_by_iteration
])))
base_global_step = architecture.global_step
previous_iteration = self._architecture_ensemble_spec(
architecture, i, features, mode, labels, previous_ensemble_spec,
config, previous_iteration, hooks)
previous_ensemble_spec = previous_iteration.candidates[-1].ensemble_spec
previous_ensemble = previous_ensemble_spec.ensemble
previous_ensemble_summary = self._summary_maker(
namespace="ensemble",
scope=previous_ensemble_spec.name,
skip_summary=skip_summaries)
previous_ensemble_subnetwork_builders = (
previous_ensemble_spec.subnetwork_builders)
previous_iteration_vars = None
if is_growing_phase:
# Keep track of the previous iteration variables so we can restore them
# from the previous checkpoint after growing the graph. After this line,
# any variables created will not have a matching one in the checkpoint
# until it gets overwritten.
# Note: It's not possible to just create a tf.train.Saver here since
# this code is also run on TPU, which does not support creating Savers
# inside model_fn.
previous_iteration_vars = (
tf_compat.v1.get_collection(tf_compat.v1.GraphKeys.GLOBAL_VARIABLES)
+ tf_compat.v1.get_collection(
tf_compat.v1.GraphKeys.SAVEABLE_OBJECTS))
previous_ensemble_reports, all_reports = [], []
if self._report_materializer:
previous_ensemble_reports, all_reports = (
self._collate_subnetwork_reports(iteration_number))
subnetwork_builders = self._call_generate_candidates(
previous_ensemble=previous_ensemble,
iteration_number=iteration_number,
previous_ensemble_reports=previous_ensemble_reports,
all_reports=all_reports,
config=config)
ensemble_candidates = []
for ensemble_strategy in self._ensemble_strategies:
ensemble_candidates += ensemble_strategy.generate_ensemble_candidates(
subnetwork_builders, previous_ensemble_subnetwork_builders)
current_iteration = self._iteration_builder.build_iteration(
base_global_step=base_global_step,
iteration_number=iteration_number,
ensemble_candidates=ensemble_candidates,
subnetwork_builders=subnetwork_builders,
features=features,
labels=labels,
mode=mode,
config=config,
previous_ensemble_summary=previous_ensemble_summary,
best_ensemble_index_override=best_ensemble_index_override,
previous_iteration=previous_iteration)
return current_iteration, previous_iteration_vars
def _create_model_fn(self,
is_growing_phase=False,
is_inside_training_loop=False,
is_export=False,
evaluation_name=None,
best_ensemble_index=None,
checkpoint_path=None,
hooks=None):
"""Creates the AdaNet model_fn.
Args:
is_growing_phase: Whether the model_fn will be called in the growing
phase.
is_inside_training_loop: Whether the model_fn will be called inside the
AdaNet training loop.
is_export: Whether the model_fn will be called from functions which export
a SavedModel.
evaluation_name: String name to append to the eval directory.
best_ensemble_index: The index of the best performing ensemble in the
latest AdaNet iteration.
checkpoint_path: The checkpoint path from which to restore variables.
hooks: Extra hooks to use when creating the graph.
Returns:
The adanet_model_fn which will create the computation graph when called.
"""
del is_export # Unused.
def _adanet_model_fn(features, labels, mode, params, config):
"""AdaNet model_fn.
Args:
features: Dictionary of `Tensor` objects keyed by feature name.
labels: Labels `Tensor` or a dictionary of string label name to `Tensor`
(for multi-head). Can be `None`.
mode: Defines whether this is training, evaluation or prediction. See
`ModeKeys`.
params: A dict of parameters.
config: The current `tf.estimator.RunConfig`.
Returns:
A `EstimatorSpec` instance.
Raises:
UserWarning: When calling model_fn directly in TRAIN mode.
"""
del params # Unused.
path = checkpoint_path or tf.train.latest_checkpoint(self.model_dir)
training = mode == tf.estimator.ModeKeys.TRAIN
if training and not is_inside_training_loop:
raise UserWarning(
"The adanet.Estimator's model_fn should not be called directly in "
"TRAIN mode, because its behavior is undefined outside the context "
"of its `train` method. If you are trying to add custom metrics "
"with `tf.contrib.estimator.add_metrics`, pass the `metric_fn` to "
"this `Estimator's` constructor instead.")
current_iteration, previous_iteration_vars = self._create_iteration(
features,
labels,
mode,
config,
is_growing_phase,
checkpoint_path=path,
hooks=hooks,
best_ensemble_index_override=best_ensemble_index)
# Variable which allows us to read the current iteration from a
# checkpoint. This must be created here so it is available when calling
# _execute_bookkeeping_phase after the first iteration.
iteration_number_tensor = None
if not self._enable_v2_checkpoint:
iteration_number_tensor = tf_compat.v1.get_variable(
self._Keys.CURRENT_ITERATION,
shape=[],
dtype=tf.int64,
initializer=tf_compat.v1.zeros_initializer(),
trainable=False)
return self._create_estimator_spec(
current_iteration,
mode,
iteration_number_tensor,
previous_iteration_vars,
is_growing_phase,
evaluation_name=evaluation_name)
return _adanet_model_fn
| 97,558 | 42.88619 | 128 | py |
adanet | adanet-master/adanet/core/estimator_test.py | """Test AdaNet estimator single graph implementation.
Copyright 2018 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import time
from absl import logging
from absl.testing import parameterized
from adanet import replay
from adanet import tf_compat
from adanet.core import testing_utils as tu
from adanet.core.estimator import Estimator
from adanet.core.evaluator import Evaluator
from adanet.core.report_materializer import ReportMaterializer
from adanet.distributed.placement import RoundRobinStrategy
from adanet.ensemble import AllStrategy
from adanet.ensemble import ComplexityRegularizedEnsembler
from adanet.ensemble import GrowStrategy
from adanet.ensemble import MixtureWeightType
from adanet.ensemble import SoloStrategy
from adanet.subnetwork import Builder
from adanet.subnetwork import Generator
from adanet.subnetwork import MaterializedReport
from adanet.subnetwork import Report
from adanet.subnetwork import SimpleGenerator
from adanet.subnetwork import Subnetwork
from adanet.subnetwork import TrainOpSpec
import numpy as np
import tensorflow.compat.v2 as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util
from tensorflow.python.tools import saved_model_utils
# pylint: enable=g-direct-tensorflow-import
from tensorflow_estimator.python.estimator.canned.head import _binary_logistic_head_with_sigmoid_cross_entropy_loss as binary_class_head_v1
from tensorflow_estimator.python.estimator.export import export
from tensorflow_estimator.python.estimator.head import binary_class_head
from tensorflow_estimator.python.estimator.head import multi_head as multi_head_lib
from tensorflow_estimator.python.estimator.head import regression_head
logging.set_verbosity(logging.INFO)
XOR_FEATURES = [[1., 0.], [0., 0], [0., 1.], [1., 1.]]
XOR_LABELS = [[1.], [0.], [1.], [0.]]
class _DNNBuilder(Builder):
"""A simple DNN subnetwork builder."""
def __init__(self,
name,
learning_rate=.001,
mixture_weight_learning_rate=.001,
return_penultimate_layer=True,
layer_size=1,
subnetwork_chief_hooks=None,
subnetwork_hooks=None,
mixture_weight_chief_hooks=None,
mixture_weight_hooks=None,
seed=13):
self._name = name
self._learning_rate = learning_rate
self._mixture_weight_learning_rate = mixture_weight_learning_rate
self._return_penultimate_layer = return_penultimate_layer
self._layer_size = layer_size
self._subnetwork_chief_hooks = subnetwork_chief_hooks
self._subnetwork_hooks = subnetwork_hooks
self._mixture_weight_chief_hooks = mixture_weight_chief_hooks
self._mixture_weight_hooks = mixture_weight_hooks
self._seed = seed
@property
def name(self):
return self._name
def build_subnetwork(self,
features,
logits_dimension,
training,
iteration_step,
summary,
previous_ensemble=None):
seed = self._seed
if previous_ensemble:
# Increment seed so different iterations don't learn the exact same thing.
seed += 1
with tf_compat.v1.variable_scope("dnn"):
persisted_tensors = {}
with tf_compat.v1.variable_scope("hidden_layer"):
w = tf_compat.v1.get_variable(
shape=[2, self._layer_size],
initializer=tf_compat.v1.glorot_uniform_initializer(seed=seed),
name="weight")
disjoint_op = tf.constant([1], name="disjoint_op")
with tf_compat.v1.colocate_with(disjoint_op): # tests b/118865235
hidden_layer = tf.matmul(features["x"], w)
if previous_ensemble:
other_hidden_layer = previous_ensemble.weighted_subnetworks[
-1].subnetwork.persisted_tensors["hidden_layer"]
hidden_layer = tf.concat([hidden_layer, other_hidden_layer], axis=1)
# Use a leaky-relu activation so that gradients can flow even when
# outputs are negative. Leaky relu has a non-zero slope when x < 0.
# Otherwise success at learning is completely dependent on random seed.
hidden_layer = tf.nn.leaky_relu(hidden_layer, alpha=.2)
persisted_tensors["hidden_layer"] = hidden_layer
if training:
# This change will only be in the next iteration if
# `freeze_training_graph` is `True`.
persisted_tensors["hidden_layer"] = 2 * hidden_layer
last_layer = hidden_layer
with tf_compat.v1.variable_scope("logits"):
logits = tf_compat.v1.layers.dense(
hidden_layer,
logits_dimension,
kernel_initializer=tf_compat.v1.glorot_uniform_initializer(seed=seed))
summary.scalar("scalar", 3)
batch_size = features["x"].get_shape().as_list()[0]
summary.image("image", tf.ones([batch_size, 3, 3, 1]))
with tf_compat.v1.variable_scope("nested"):
summary.scalar("scalar", 5)
return Subnetwork(
last_layer=last_layer if self._return_penultimate_layer else logits,
logits=logits,
complexity=3,
persisted_tensors=persisted_tensors,
shared=persisted_tensors)
def build_subnetwork_train_op(self, subnetwork, loss, var_list, labels,
iteration_step, summary, previous_ensemble):
optimizer = tf_compat.v1.train.GradientDescentOptimizer(
learning_rate=self._learning_rate)
train_op = optimizer.minimize(loss, var_list=var_list)
if not self._subnetwork_hooks:
return train_op
return TrainOpSpec(train_op, self._subnetwork_chief_hooks,
self._subnetwork_hooks)
def build_mixture_weights_train_op(self, loss, var_list, logits, labels,
iteration_step, summary):
optimizer = tf_compat.v1.train.GradientDescentOptimizer(
learning_rate=self._mixture_weight_learning_rate)
train_op = optimizer.minimize(loss, var_list=var_list)
if not self._mixture_weight_hooks:
return train_op
return TrainOpSpec(train_op, self._mixture_weight_chief_hooks,
self._mixture_weight_hooks)
def build_subnetwork_report(self):
return Report(
hparams={"layer_size": self._layer_size},
attributes={"complexity": tf.constant(3, dtype=tf.int32)},
metrics={
"moo": (tf.constant(3,
dtype=tf.int32), tf.constant(3, dtype=tf.int32))
})
class _SimpleBuilder(Builder):
"""A simple subnetwork builder that takes feature_columns."""
def __init__(self, name, feature_columns, seed=42):
self._name = name
self._feature_columns = feature_columns
self._seed = seed
@property
def name(self):
return self._name
def build_subnetwork(self,
features,
logits_dimension,
training,
iteration_step,
summary,
previous_ensemble=None):
seed = self._seed
if previous_ensemble:
# Increment seed so different iterations don't learn the exact same thing.
seed += 1
with tf_compat.v1.variable_scope("simple"):
input_layer = tf_compat.v1.feature_column.input_layer(
features=features, feature_columns=self._feature_columns)
last_layer = input_layer
with tf_compat.v1.variable_scope("logits"):
logits = tf_compat.v1.layers.dense(
last_layer,
logits_dimension,
kernel_initializer=tf_compat.v1.glorot_uniform_initializer(seed=seed))
return Subnetwork(
last_layer=last_layer,
logits=logits,
complexity=1,
persisted_tensors={},
)
def build_subnetwork_train_op(self, subnetwork, loss, var_list, labels,
iteration_step, summary, previous_ensemble):
optimizer = tf_compat.v1.train.GradientDescentOptimizer(learning_rate=.001)
return optimizer.minimize(loss, var_list=var_list)
def build_mixture_weights_train_op(self, loss, var_list, logits, labels,
iteration_step, summary):
optimizer = tf_compat.v1.train.GradientDescentOptimizer(learning_rate=.001)
return optimizer.minimize(loss, var_list=var_list)
class _NanLossBuilder(Builder):
"""A subnetwork builder always produces a NaN loss."""
@property
def name(self):
return "nan"
def build_subnetwork(self,
features,
logits_dimension,
training,
iteration_step,
summary,
previous_ensemble=None):
logits = tf_compat.v1.layers.dense(
features["x"],
logits_dimension,
kernel_initializer=tf_compat.v1.glorot_uniform_initializer(
seed=42)) * np.nan
return Subnetwork(last_layer=logits, logits=logits, complexity=0)
def build_subnetwork_train_op(self, subnetwork, loss, var_list, labels,
iteration_step, summary, previous_ensemble):
return tf.no_op()
class _FrozenLinearBuilder(Builder):
"""A simple linear subnetwork builder that doesn't train."""
def __init__(self, name, seed=42):
self._name = name
self._seed = seed
@property
def name(self):
return self._name
def build_subnetwork(self,
features,
logits_dimension,
training,
iteration_step,
summary,
previous_ensemble=None):
logits = tf_compat.v1.layers.dense(
features["x"],
logits_dimension,
kernel_initializer=tf_compat.v1.glorot_uniform_initializer(
seed=self._seed))
return Subnetwork(
last_layer=features["x"],
logits=logits,
complexity=1,
persisted_tensors={},
)
def build_subnetwork_train_op(self, subnetwork, loss, var_list, labels,
iteration_step, summary, previous_ensemble):
return tf.no_op()
class _FakeGenerator(Generator):
"""Generator that exposed generate_candidates' arguments."""
def __init__(self, spy_fn, subnetwork_builders):
"""Checks the arguments passed to generate_candidates.
Args:
spy_fn: (iteration_number, previous_ensemble_reports, all_reports) -> ().
Spies on the arguments passed to generate_candidates whenever it is
called.
subnetwork_builders: List of `Builder`s to return in every call to
generate_candidates.
"""
self._spy_fn = spy_fn
self._subnetwork_builders = subnetwork_builders
def generate_candidates(self, previous_ensemble, iteration_number,
previous_ensemble_reports, all_reports):
"""Spys on arguments passed in, then returns a fixed list of candidates."""
del previous_ensemble # unused
self._spy_fn(iteration_number, previous_ensemble_reports, all_reports)
return self._subnetwork_builders
class _WidthLimitingDNNBuilder(_DNNBuilder):
"""Limits the width of the previous_ensemble."""
def __init__(self,
name,
learning_rate=.001,
mixture_weight_learning_rate=.001,
return_penultimate_layer=True,
layer_size=1,
width_limit=None,
seed=13):
if width_limit is not None and width_limit == 0:
raise ValueError("width_limit must be at least 1 or None.")
super(_WidthLimitingDNNBuilder,
self).__init__(name, learning_rate, mixture_weight_learning_rate,
return_penultimate_layer, layer_size, seed)
self._width_limit = width_limit
def prune_previous_ensemble(self, previous_ensemble):
indices = list(range(len(previous_ensemble.weighted_subnetworks)))
if self._width_limit is None:
return indices
if self._width_limit == 1:
return []
return indices[-self._width_limit + 1:] # pylint: disable=invalid-unary-operand-type
class _FakeEvaluator(object):
"""Fakes an `adanet.Evaluator`."""
def __init__(self, input_fn):
self._input_fn = input_fn
@property
def input_fn(self):
"""Return the input_fn."""
return self._input_fn
@property
def steps(self):
"""Return the number of evaluation steps."""
return 1
@property
def metric_name(self):
"""Returns the name of the metric being optimized."""
return "adanet_loss"
@property
def objective_fn(self):
"""Always returns the minimize objective."""
return np.nanargmin
def evaluate(self, sess, ensemble_metrics):
"""Abstract method to be overridden in subclasses."""
del sess, ensemble_metrics # Unused.
raise NotImplementedError
class _AlwaysLastEvaluator(_FakeEvaluator):
def evaluate(self, sess, ensemble_metrics):
"""Always makes the last loss the smallest."""
del sess # Unused.
losses = [np.inf] * len(ensemble_metrics)
losses[-1] = 0.
return losses
class _AlwaysSecondToLastEvaluator(_FakeEvaluator):
def evaluate(self, sess, ensemble_metrics):
"""Always makes the second to last loss the smallest."""
del sess # Unused.
losses = [np.inf] * len(ensemble_metrics)
losses[-2] = 0.
return losses
class _EarlyStoppingHook(tf_compat.SessionRunHook):
"""Hook that immediately requests training to stop."""
def after_run(self, run_context, run_values):
run_context.request_stop()
class EstimatorTest(tu.AdanetTestCase):
@parameterized.named_parameters(
{
"testcase_name": "one_step",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 1,
"steps": 1,
"max_steps": None,
"want_loss": 0.49899703,
"want_iteration": 0,
"want_global_step": 1,
},
{
"testcase_name": "enable_v2_checkpoint",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 100,
"steps": 300,
"max_steps": None,
"want_loss": 0.3221922,
"want_iteration": 2,
"want_global_step": 300,
"enable_v2_checkpoint": True,
},
{
"testcase_name": "none_max_iteration_steps",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": None,
"steps": 300,
"max_steps": None,
"want_loss": 0.32487726,
"want_iteration": 0,
"want_global_step": 300,
},
{
"testcase_name": "single_builder_max_steps",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 200,
"max_steps": 300,
"want_loss": 0.32420248,
"want_iteration": 1,
"want_global_step": 300,
},
{
"testcase_name": "single_builder_steps",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 200,
"steps": 300,
"max_steps": None,
"want_loss": 0.32420248,
"want_iteration": 1,
"want_global_step": 300,
},
{
"testcase_name": "single_builder_two_max_iteration_fewer_max_steps",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 200,
"max_iterations": 2,
"max_steps": 300,
"want_loss": 0.32420248,
"want_iteration": 1,
"want_global_step": 300,
},
{
"testcase_name": "single_builder_no_bias",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 200,
"use_bias": False,
"want_loss": 0.496736,
"want_iteration": 1,
"want_global_step": 300,
},
{
"testcase_name":
"single_builder_subnetwork_hooks",
"subnetwork_generator":
SimpleGenerator([
_DNNBuilder(
"dnn",
subnetwork_chief_hooks=[
tu.ModifierSessionRunHook("chief_hook_var")
],
subnetwork_hooks=[tu.ModifierSessionRunHook("hook_var")])
]),
"max_iteration_steps":
200,
"use_bias":
False,
"want_loss":
0.496736,
"want_iteration":
1,
"want_global_step":
300,
},
{
"testcase_name":
"single_builder_mixture_weight_hooks",
"subnetwork_generator":
SimpleGenerator([
_DNNBuilder(
"dnn",
mixture_weight_chief_hooks=[
tu.ModifierSessionRunHook("chief_hook_var")
],
mixture_weight_hooks=[
tu.ModifierSessionRunHook("hook_var")
])
]),
"max_iteration_steps":
200,
"use_bias":
False,
"want_loss":
0.496736,
"want_iteration":
1,
"want_global_step":
300,
},
{
"testcase_name":
"single_builder_scalar_mixture_weight",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn", return_penultimate_layer=False)]),
"max_iteration_steps":
200,
"mixture_weight_type":
MixtureWeightType.SCALAR,
"want_loss":
0.32317898,
"want_iteration":
1,
"want_global_step":
300,
},
{
"testcase_name":
"single_builder_vector_mixture_weight",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn", return_penultimate_layer=False)]),
"max_iteration_steps":
200,
"mixture_weight_type":
MixtureWeightType.VECTOR,
"want_loss":
0.32317898,
"want_iteration":
1,
"want_global_step":
300,
},
{
"testcase_name": "single_builder_replicate_ensemble_in_training",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"replicate_ensemble_in_training": True,
"max_iteration_steps": 200,
"max_steps": 300,
"want_loss": 0.32420215,
"want_iteration": 1,
"want_global_step": 300,
},
{
"testcase_name": "single_builder_with_hook",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 200,
"hooks": [tu.ModifierSessionRunHook()],
"want_loss": 0.32420248,
"want_iteration": 1,
"want_global_step": 300,
},
{
"testcase_name": "high_max_iteration_steps",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 500,
"want_loss": 0.32487726,
"want_iteration": 0,
"want_global_step": 300,
},
{
"testcase_name":
"two_builders",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", seed=99)]),
"max_iteration_steps":
200,
"want_loss":
0.27713922,
"want_iteration":
1,
"want_global_step":
300,
},
{
"testcase_name":
"two_builders_different_layer_sizes",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"max_iteration_steps":
200,
"want_loss":
0.29696745,
"want_iteration":
1,
"want_global_step":
300,
},
{
"testcase_name":
"two_builders_one_max_iteration_none_steps_and_none_max_steps",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"max_iteration_steps":
200,
"max_iterations":
1,
"steps":
None,
"max_steps":
None,
"want_loss":
0.35249719,
"want_iteration":
0,
"want_global_step":
200,
},
{
"testcase_name":
"two_builders_one_max_iteration_two_hundred_steps",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"max_iteration_steps":
200,
"max_iterations":
1,
"steps":
300,
"max_steps":
None,
"want_loss":
0.35249719,
"want_iteration":
0,
"want_global_step":
200,
},
{
"testcase_name":
"two_builders_two_max_iteration_none_steps_and_none_max_steps",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"max_iteration_steps":
200,
"max_iterations":
2,
"steps":
None,
"max_steps":
None,
"want_loss":
0.26503286,
"want_iteration":
1,
"want_global_step":
400,
},
{
"testcase_name":
"two_builders_different_layer_sizes_three_iterations",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"max_iteration_steps":
100,
"want_loss":
0.26433355,
"want_iteration":
2,
"want_global_step":
300,
},
{
"testcase_name":
"two_dnn_export_subnetworks",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"max_iteration_steps":
100,
"want_loss":
0.26433355,
"want_iteration":
2,
"want_global_step":
300,
"export_subnetworks":
True,
},
{
"testcase_name":
"width_limiting_builder_no_pruning",
"subnetwork_generator":
SimpleGenerator([_WidthLimitingDNNBuilder("no_pruning")]),
"max_iteration_steps":
75,
"want_loss":
0.32001898,
"want_iteration":
3,
"want_global_step":
300,
},
{
"testcase_name":
"width_limiting_builder_some_pruning",
"subnetwork_generator":
SimpleGenerator(
[_WidthLimitingDNNBuilder("some_pruning", width_limit=2)]),
"max_iteration_steps":
75,
"want_loss":
0.38592532,
"want_iteration":
3,
"want_global_step":
300,
},
{
"testcase_name":
"width_limiting_builder_prune_all",
"subnetwork_generator":
SimpleGenerator(
[_WidthLimitingDNNBuilder("prune_all", width_limit=1)]),
"max_iteration_steps":
75,
"want_loss":
0.43161362,
"want_iteration":
3,
"want_global_step":
300,
},
{
"testcase_name":
"width_limiting_builder_mixed",
"subnetwork_generator":
SimpleGenerator([
_WidthLimitingDNNBuilder("no_pruning"),
_WidthLimitingDNNBuilder("some_pruning", width_limit=2),
_WidthLimitingDNNBuilder("prune_all", width_limit=1)
]),
"max_iteration_steps":
75,
"want_loss":
0.32001898,
"want_iteration":
3,
"want_global_step":
300,
},
{
"testcase_name":
"evaluator_good_input",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"evaluator":
Evaluator(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]]), steps=3),
"max_iteration_steps":
200,
"want_loss":
0.36189985,
"want_iteration":
1,
"want_global_step":
300,
},
{
"testcase_name":
"evaluator_bad_input",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"evaluator":
Evaluator(
input_fn=tu.dummy_input_fn([[1., 1.]], [[1.]]), steps=3),
"max_iteration_steps":
200,
"want_loss":
0.29696745,
"want_iteration":
1,
"want_global_step":
300,
},
{
"testcase_name":
"evaluator_always_last",
"subnetwork_generator":
SimpleGenerator([
_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3),
]),
"evaluator":
_AlwaysLastEvaluator(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]])),
"max_iteration_steps":
None,
"want_loss":
0.31389591,
"want_iteration":
0,
"want_global_step":
300,
},
{
"testcase_name":
"evaluator_always_second_to_last",
"subnetwork_generator":
SimpleGenerator([
_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3),
]),
"evaluator":
_AlwaysSecondToLastEvaluator(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]])),
"max_iteration_steps":
None,
"want_loss":
0.32487726,
"want_iteration":
0,
"want_global_step":
300,
},
{
"testcase_name":
"report_materializer",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"report_materializer":
ReportMaterializer(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]]), steps=1),
"max_iteration_steps":
200,
"want_loss":
0.29696745,
"want_iteration":
1,
"want_global_step":
300,
},
{
"testcase_name":
"all_strategy",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"ensemble_strategies": [AllStrategy()],
"max_iteration_steps":
200,
"want_loss":
0.29196805,
"want_iteration":
1,
"want_global_step":
300,
},
{
"testcase_name":
"all_strategy_multiple_ensemblers",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"ensemble_strategies": [AllStrategy()],
"ensemblers": [
ComplexityRegularizedEnsembler(),
ComplexityRegularizedEnsembler(use_bias=True, name="with_bias")
],
"max_iteration_steps":
200,
"want_loss":
0.23053232,
"want_iteration":
1,
"want_global_step":
300,
},
{
"testcase_name":
"solo_strategy",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"ensemble_strategies": [SoloStrategy()],
"max_iteration_steps":
200,
"want_loss":
0.35249719,
"want_iteration":
1,
"want_global_step":
300,
},
{
"testcase_name":
"solo_strategy_three_iterations",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"ensemble_strategies": [SoloStrategy()],
"max_iteration_steps":
100,
"want_loss":
0.36163166,
"want_iteration":
2,
"want_global_step":
300,
},
{
"testcase_name":
"multi_ensemble_strategy",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"ensemble_strategies":
[AllStrategy(), GrowStrategy(),
SoloStrategy()],
"max_iteration_steps":
100,
"want_loss":
0.24838975,
"want_iteration":
2,
"want_global_step":
300,
},
{
"testcase_name":
"dataset_train_input_fn",
"subnetwork_generator":
SimpleGenerator([_DNNBuilder("dnn")]),
# pylint: disable=g-long-lambda
"train_input_fn":
lambda: tf.data.Dataset.from_tensors(({
"x": XOR_FEATURES
}, XOR_LABELS)).repeat(),
# pylint: enable=g-long-lambda
"max_iteration_steps":
100,
"want_loss":
0.32219219,
"want_iteration":
2,
"want_global_step":
300,
},
{
"testcase_name":
"early_stopping_subnetwork",
"subnetwork_generator":
SimpleGenerator([
_DNNBuilder("dnn"),
_DNNBuilder("dnn2", subnetwork_hooks=[_EarlyStoppingHook()])
]),
"max_iteration_steps":
100,
"max_steps":
200,
"want_loss":
0.2958503,
# Since one subnetwork stops after 1 step and global step is the
# mean of iteration steps, global step will be incremented at half
# the rate.
"want_iteration":
3,
"want_global_step":
200,
})
def test_lifecycle(self,
subnetwork_generator,
want_loss,
want_iteration,
want_global_step,
max_iteration_steps,
mixture_weight_type=MixtureWeightType.MATRIX,
evaluator=None,
use_bias=True,
replicate_ensemble_in_training=False,
hooks=None,
ensemblers=None,
ensemble_strategies=None,
max_steps=300,
steps=None,
report_materializer=None,
train_input_fn=None,
max_iterations=None,
export_subnetworks=False,
enable_v2_checkpoint=False):
"""Train entire estimator lifecycle using XOR dataset."""
run_config = tf.estimator.RunConfig(tf_random_seed=42)
def _metric_fn(predictions):
mean = tf.keras.metrics.Mean()
mean.update_state(predictions["predictions"])
return {"keras_mean": mean}
default_ensembler_kwargs = {
"mixture_weight_type": mixture_weight_type,
"mixture_weight_initializer": tf_compat.v1.zeros_initializer(),
"warm_start_mixture_weights": True,
"use_bias": use_bias,
"enable_v2_checkpoint": enable_v2_checkpoint,
}
if ensemblers:
default_ensembler_kwargs = {}
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
max_iteration_steps=max_iteration_steps,
evaluator=evaluator,
ensemblers=ensemblers,
ensemble_strategies=ensemble_strategies,
report_materializer=report_materializer,
replicate_ensemble_in_training=replicate_ensemble_in_training,
metric_fn=_metric_fn,
model_dir=self.test_subdirectory,
config=run_config,
max_iterations=max_iterations,
export_subnetwork_logits=export_subnetworks,
export_subnetwork_last_layer=export_subnetworks,
**default_ensembler_kwargs)
if not train_input_fn:
train_input_fn = tu.dummy_input_fn(XOR_FEATURES, XOR_LABELS)
# Train.
estimator.train(
input_fn=train_input_fn, steps=steps, max_steps=max_steps, hooks=hooks)
# Evaluate.
eval_results = estimator.evaluate(
input_fn=train_input_fn, steps=10, hooks=hooks)
logging.info("%s", eval_results)
self.assertAlmostEqual(want_loss, eval_results["loss"], places=3)
self.assertEqual(want_global_step, eval_results["global_step"])
self.assertEqual(want_iteration, eval_results["iteration"])
# Predict.
predictions = estimator.predict(
input_fn=tu.dataset_input_fn(features=[0., 0.], labels=None))
for prediction in predictions:
self.assertIsNotNone(prediction["predictions"])
# Export SavedModel.
def serving_input_fn():
"""Input fn for serving export, starting from serialized example."""
serialized_example = tf_compat.v1.placeholder(
dtype=tf.string, shape=(None), name="serialized_example")
return tf.estimator.export.ServingInputReceiver(
features={"x": tf.constant([[0., 0.]], name="serving_x")},
receiver_tensors=serialized_example)
export_saved_model_fn = getattr(estimator, "export_saved_model", None)
if not callable(export_saved_model_fn):
export_saved_model_fn = estimator.export_savedmodel
export_dir_base = os.path.join(self.test_subdirectory, "export")
export_saved_model_fn(
export_dir_base=export_dir_base,
serving_input_receiver_fn=serving_input_fn)
if export_subnetworks:
saved_model = saved_model_utils.read_saved_model(
os.path.join(export_dir_base,
tf.io.gfile.listdir(export_dir_base)[0]))
export_signature_def = saved_model.meta_graphs[0].signature_def
self.assertIn("subnetwork_logits", export_signature_def.keys())
self.assertIn("subnetwork_last_layer", export_signature_def.keys())
@parameterized.named_parameters(
{
"testcase_name":
"hash_bucket_with_one_hot",
"feature_column": (tf.feature_column.indicator_column(
categorical_column=(
tf.feature_column.categorical_column_with_hash_bucket(
key="human_names", hash_bucket_size=4, dtype=tf.string)))
),
}, {
"testcase_name":
"vocab_list_with_one_hot",
"feature_column": (tf.feature_column.indicator_column(
categorical_column=(
tf.feature_column.categorical_column_with_vocabulary_list(
key="human_names",
vocabulary_list=["alice", "bob"],
dtype=tf.string)))),
}, {
"testcase_name":
"hash_bucket_with_embedding",
"feature_column": (tf.feature_column.embedding_column(
categorical_column=(
tf.feature_column.categorical_column_with_hash_bucket(
key="human_names", hash_bucket_size=4, dtype=tf.string)),
dimension=2)),
}, {
"testcase_name":
"vocab_list_with_embedding",
"feature_column": (tf.feature_column.embedding_column(
categorical_column=(
tf.feature_column.categorical_column_with_vocabulary_list(
key="human_names",
vocabulary_list=["alice", "bob"],
dtype=tf.string)),
dimension=2)),
})
def test_categorical_columns(self, feature_column):
def train_input_fn():
input_features = {
"human_names": tf.constant([["alice"], ["bob"]], name="human_names")
}
input_labels = tf.constant([[1.], [0.]], name="starts_with_a")
return input_features, input_labels
report_materializer = ReportMaterializer(input_fn=train_input_fn, steps=1)
estimator = Estimator(
head=regression_head.RegressionHead(),
subnetwork_generator=SimpleGenerator(
[_SimpleBuilder(name="simple", feature_columns=[feature_column])]),
report_materializer=report_materializer,
mixture_weight_type=MixtureWeightType.MATRIX,
mixture_weight_initializer=tf_compat.v1.zeros_initializer(),
warm_start_mixture_weights=True,
max_iteration_steps=1,
use_bias=True,
model_dir=self.test_subdirectory)
estimator.train(input_fn=train_input_fn, max_steps=3)
@parameterized.named_parameters(
{
"testcase_name": "no_subnetwork_generator",
"subnetwork_generator": None,
"max_iteration_steps": 100,
"want_error": ValueError,
},
{
"testcase_name": "negative_max_iteration_steps",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": -1,
"want_error": ValueError,
},
{
"testcase_name": "zero_max_iteration_steps",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 0,
"want_error": ValueError,
},
{
"testcase_name": "negative_max_iterations",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 1,
"max_iterations": -1,
"want_error": ValueError,
},
{
"testcase_name": "zero_max_iterations",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 1,
"max_iterations": 0,
"want_error": ValueError,
},
{
"testcase_name": "steps_and_max_steps",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 1,
"steps": 1,
"max_steps": 1,
"want_error": ValueError,
},
{
"testcase_name": "zero_steps",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 1,
"steps": 0,
"max_steps": None,
"want_error": ValueError,
},
{
"testcase_name": "nan_loss_builder",
"subnetwork_generator": SimpleGenerator([_NanLossBuilder()]),
"max_iteration_steps": 1,
"max_steps": None,
"want_error": tf_compat.v1.estimator.NanLossDuringTrainingError,
},
{
"testcase_name":
"nan_loss_builder_first",
"subnetwork_generator":
SimpleGenerator([
_NanLossBuilder(),
_DNNBuilder("dnn"),
]),
"max_iteration_steps":
1,
"max_steps":
None,
"want_error":
tf_compat.v1.estimator.NanLossDuringTrainingError,
},
{
"testcase_name":
"nan_loss_builder_last",
"subnetwork_generator":
SimpleGenerator([
_DNNBuilder("dnn"),
_NanLossBuilder(),
]),
"max_iteration_steps":
1,
"max_steps":
None,
"want_error":
tf_compat.v1.estimator.NanLossDuringTrainingError,
},
)
def test_train_error(self,
subnetwork_generator,
max_iteration_steps,
want_error,
steps=None,
max_steps=10,
max_iterations=None):
report_materializer = ReportMaterializer(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]]), steps=1)
with self.assertRaises(want_error):
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
report_materializer=report_materializer,
mixture_weight_type=MixtureWeightType.MATRIX,
mixture_weight_initializer=tf_compat.v1.zeros_initializer(),
warm_start_mixture_weights=True,
max_iteration_steps=max_iteration_steps,
use_bias=True,
max_iterations=max_iterations,
model_dir=self.test_subdirectory)
train_input_fn = tu.dummy_input_fn([[1., 0.]], [[1.]])
estimator.train(input_fn=train_input_fn, steps=steps, max_steps=max_steps)
def test_binary_head_asserts_are_disabled(self):
"""Tests b/140267630."""
subnetwork_generator = SimpleGenerator([
_DNNBuilder("dnn"),
_NanLossBuilder(),
])
estimator = Estimator(
head=binary_class_head_v1(),
subnetwork_generator=subnetwork_generator,
max_iteration_steps=10,
model_dir=self.test_subdirectory)
eval_input_fn = tu.dummy_input_fn([[1., 0.]], [[1.]])
estimator.evaluate(input_fn=eval_input_fn, steps=1)
class KerasCNNBuilder(Builder):
"""Builds a CNN subnetwork for AdaNet."""
def __init__(self, name, learning_rate, num_dense, units=3, seed=42):
"""Initializes a `SimpleCNNBuilder`.
Args:
name: String name.
learning_rate: The float learning rate to use.
num_dense: Number of layers.
units: Units per layer.
seed: The random seed.
Returns:
An instance of `SimpleCNNBuilder`.
"""
self._name = name
self._learning_rate = learning_rate
self._num_dense = num_dense
self._units = units
self._seed = seed
def build_subnetwork(self,
features,
logits_dimension,
training,
iteration_step,
summary,
previous_ensemble=None):
"""See `adanet.subnetwork.Builder`."""
seed = self._seed
if previous_ensemble:
seed += len(previous_ensemble.weighted_subnetworks)
images = list(features.values())[0]
images = tf.reshape(images, [-1, 2, 2, 1])
kernel_initializer = tf_compat.v1.keras.initializers.he_normal(seed=seed)
x = images
x = tf.keras.layers.Conv2D(
filters=3,
kernel_size=1,
padding="same",
activation="relu",
kernel_initializer=kernel_initializer)(
x)
x = tf.keras.layers.MaxPool2D(pool_size=1, strides=1)(x)
x = tf.keras.layers.Flatten()(x)
for _ in range(self._num_dense):
x = tf_compat.v1.layers.Dense(
units=self._units,
activation="relu",
kernel_initializer=kernel_initializer)(
x)
logits = tf.keras.layers.Dense(
units=1, activation=None, kernel_initializer=kernel_initializer)(
x)
complexity = tf.constant(1)
return Subnetwork(
last_layer=x, logits=logits, complexity=complexity, shared={})
def build_subnetwork_train_op(self,
subnetwork,
loss,
var_list,
labels,
iteration_step,
summary,
previous_ensemble=None):
optimizer = tf_compat.v1.train.GradientDescentOptimizer(self._learning_rate)
return optimizer.minimize(loss=loss, var_list=var_list)
@property
def name(self):
return self._name
# TODO: Test should be enabled when we support Keras layers.
# class EstimatorKerasLayersTest(tu.AdanetTestCase):
#
# def test_lifecycle(self):
# """Train entire estimator lifecycle using XOR dataset."""
#
# run_config = tf.estimator.RunConfig(tf_random_seed=42)
# estimator = Estimator(
# head=tu.head(),
# subnetwork_generator=SimpleGenerator([
# KerasCNNBuilder("cnn0", learning_rate=.001, num_dense=1, units=3),
# ]),
# max_iteration_steps=100,
# evaluator=Evaluator(
# input_fn=tu.dummy_input_fn([[1., 1., .1, .1]], [[0.]]), steps=3),
# model_dir=self.test_subdirectory,
# force_grow=True,
# config=run_config)
#
# xor_features = [[1., 0., 1., 0.], [0., 0., 0., 0.], [0., 1., 0., 1.],
# [1., 1., 1., 1.]]
# xor_labels = [[1.], [0.], [1.], [0.]]
# train_input_fn = tu.dummy_input_fn(xor_features, xor_labels)
#
# # Train.
# estimator.train(input_fn=train_input_fn, max_steps=300)
#
# # Restore from checkpoint to check that variables match up.
# estimator.train(input_fn=train_input_fn, max_steps=1)
#
# # Evaluate.
# eval_results = estimator.evaluate(input_fn=train_input_fn, steps=3)
# logging.info("%s", eval_results)
# want_loss = 0.164
# self.assertAlmostEqual(want_loss, eval_results["loss"], places=3)
#
# # Predict.
# predictions = estimator.predict(
# input_fn=tu.dataset_input_fn(features=[0., 0., 0., 0.], labels=None))
# for prediction in predictions:
# self.assertIsNotNone(prediction["predictions"])
#
# # Export SavedModel.
# def serving_input_fn():
# """Input fn for serving export, starting from serialized example."""
# serialized_example = tf_compat.v1.placeholder(
# dtype=tf.string, shape=(None), name="serialized_example")
# return tf.estimator.export.ServingInputReceiver(
# features={"x": tf.constant([[0., 0., 0., 0.]], name="serving_x")},
# receiver_tensors=serialized_example)
#
# estimator.export_saved_model(
# export_dir_base=self.test_subdirectory,
# serving_input_receiver_fn=serving_input_fn)
class MultiHeadBuilder(Builder):
"""Builds a subnetwork for AdaNet that uses dict labels."""
def __init__(self, learning_rate=.001, split_logits=False, seed=42):
"""Initializes a `LabelsDictBuilder`.
Args:
learning_rate: The float learning rate to use.
split_logits: Whether to return a dict of logits or a single concatenated
logits `Tensor`.
seed: The random seed.
Returns:
An instance of `MultiHeadBuilder`.
"""
self._learning_rate = learning_rate
self._split_logits = split_logits
self._seed = seed
def build_subnetwork(self,
features,
logits_dimension,
training,
iteration_step,
summary,
previous_ensemble=None):
"""See `adanet.subnetwork.Builder`."""
seed = self._seed
if previous_ensemble:
seed += len(previous_ensemble.weighted_subnetworks)
kernel_initializer = tf_compat.v1.keras.initializers.he_normal(seed=seed)
x = features["x"]
logits = tf_compat.v1.layers.dense(
x,
units=logits_dimension,
activation=None,
kernel_initializer=kernel_initializer)
if self._split_logits:
# Return different logits, one for each head.
logits1, logits2 = tf.split(logits, [1, 1], 1)
logits = {
"head1": logits1,
"head2": logits2,
}
complexity = tf.constant(1)
return Subnetwork(
last_layer=logits,
logits=logits,
complexity=complexity,
persisted_tensors={})
def build_subnetwork_train_op(self,
subnetwork,
loss,
var_list,
labels,
iteration_step,
summary,
previous_ensemble=None):
optimizer = tf_compat.v1.train.GradientDescentOptimizer(self._learning_rate)
return optimizer.minimize(loss=loss, var_list=var_list)
def build_mixture_weights_train_op(self, loss, var_list, logits, labels,
iteration_step, summary):
optimizer = tf_compat.v1.train.GradientDescentOptimizer(self._learning_rate)
return optimizer.minimize(loss=loss, var_list=var_list)
@property
def name(self):
return "multi_head"
class EstimatorMultiHeadTest(tu.AdanetTestCase):
@parameterized.named_parameters(
{
"testcase_name": "concatenated_logits",
"builders": [MultiHeadBuilder()],
"want_loss": 3.218,
}, {
"testcase_name": "split_logits_with_export_subnetworks",
"builders": [MultiHeadBuilder(split_logits=True)],
"want_loss": 3.224,
"export_subnetworks": True,
}, {
"testcase_name": "split_logits",
"builders": [MultiHeadBuilder(split_logits=True)],
"want_loss": 3.224,
})
def test_lifecycle(self, builders, want_loss, export_subnetworks=False):
"""Train entire estimator lifecycle using XOR dataset."""
run_config = tf.estimator.RunConfig(tf_random_seed=42)
xor_features = [[1., 0., 1., 0.], [0., 0., 0., 0.], [0., 1., 0., 1.],
[1., 1., 1., 1.]]
xor_labels = [[1.], [0.], [1.], [0.]]
def train_input_fn():
return {
"x": tf.constant(xor_features)
}, {
"head1": tf.constant(xor_labels),
"head2": tf.constant(xor_labels)
}
estimator = Estimator(
head=multi_head_lib.MultiHead(heads=[
regression_head.RegressionHead(
name="head1", loss_reduction=tf_compat.SUM_OVER_BATCH_SIZE),
regression_head.RegressionHead(
name="head2", loss_reduction=tf_compat.SUM_OVER_BATCH_SIZE),
]),
subnetwork_generator=SimpleGenerator(builders),
max_iteration_steps=3,
evaluator=Evaluator(input_fn=train_input_fn, steps=1),
model_dir=self.test_subdirectory,
config=run_config,
export_subnetwork_logits=export_subnetworks,
export_subnetwork_last_layer=export_subnetworks)
# Train.
estimator.train(input_fn=train_input_fn, max_steps=9)
# Evaluate.
eval_results = estimator.evaluate(input_fn=train_input_fn, steps=3)
self.assertAlmostEqual(want_loss, eval_results["loss"], places=3)
# Predict.
predictions = estimator.predict(
input_fn=tu.dataset_input_fn(features=[0., 0., 0., 0.], labels=None))
for prediction in predictions:
self.assertIsNotNone(prediction[("head1", "predictions")])
self.assertIsNotNone(prediction[("head2", "predictions")])
# Export SavedModel.
def serving_input_fn():
"""Input fn for serving export, starting from serialized example."""
serialized_example = tf_compat.v1.placeholder(
dtype=tf.string, shape=(None), name="serialized_example")
return tf.estimator.export.ServingInputReceiver(
features={"x": tf.constant([[0., 0., 0., 0.]], name="serving_x")},
receiver_tensors=serialized_example)
export_saved_model_fn = getattr(estimator, "export_saved_model", None)
if not callable(export_saved_model_fn):
export_saved_model_fn = estimator.export_savedmodel
export_dir_base = os.path.join(self.test_subdirectory, "export")
export_saved_model_fn(
export_dir_base=export_dir_base,
serving_input_receiver_fn=serving_input_fn)
if export_subnetworks:
saved_model = saved_model_utils.read_saved_model(
os.path.join(export_dir_base,
tf.io.gfile.listdir(export_dir_base)[0]))
export_signature_def = saved_model.meta_graphs[0].signature_def
self.assertIn("subnetwork_logits_head1", export_signature_def.keys())
self.assertIn("subnetwork_logits_head2", export_signature_def.keys())
self.assertIn("subnetwork_last_layer_head1", export_signature_def.keys())
self.assertIn("subnetwork_last_layer_head2", export_signature_def.keys())
class EstimatorCallingModelFnDirectlyTest(tu.AdanetTestCase):
"""Tests b/112108745. Warn users not to call model_fn directly."""
def test_calling_model_fn_directly(self):
subnetwork_generator = SimpleGenerator([_DNNBuilder("dnn")])
report_materializer = ReportMaterializer(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]]), steps=1)
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
report_materializer=report_materializer,
max_iteration_steps=3,
use_bias=True,
model_dir=self.test_subdirectory)
model_fn = estimator.model_fn
train_input_fn = tu.dummy_input_fn([[1., 0.]], [[1.]])
tf_compat.v1.train.create_global_step()
features, labels = train_input_fn()
with self.assertRaises(UserWarning):
model_fn(
features=features,
mode=tf.estimator.ModeKeys.TRAIN,
labels=labels,
config={})
def test_calling_model_fn_directly_for_predict(self):
with context.graph_mode():
subnetwork_generator = SimpleGenerator([_DNNBuilder("dnn")])
report_materializer = ReportMaterializer(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]]), steps=1)
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
report_materializer=report_materializer,
max_iteration_steps=3,
use_bias=True,
model_dir=self.test_subdirectory)
model_fn = estimator.model_fn
train_input_fn = tu.dummy_input_fn([[1., 0.]], [[1.]])
tf_compat.v1.train.create_global_step()
features, labels = train_input_fn()
model_fn(
features=features,
mode=tf.estimator.ModeKeys.PREDICT,
labels=labels,
config=tf.estimator.RunConfig(
save_checkpoints_steps=1,
keep_checkpoint_max=3,
model_dir=self.test_subdirectory,
))
class EstimatorCheckpointTest(tu.AdanetTestCase):
"""Tests estimator checkpoints."""
@parameterized.named_parameters(
{
"testcase_name": "single_iteration",
"max_iteration_steps": 3,
"keep_checkpoint_max": 3,
"want_num_checkpoints": 3,
}, {
"testcase_name": "single_iteration_keep_one",
"max_iteration_steps": 3,
"keep_checkpoint_max": 1,
"want_num_checkpoints": 1,
}, {
"testcase_name": "three_iterations",
"max_iteration_steps": 1,
"keep_checkpoint_max": 3,
"want_num_checkpoints": 3,
}, {
"testcase_name": "three_iterations_keep_one",
"max_iteration_steps": 1,
"keep_checkpoint_max": 1,
"want_num_checkpoints": 1,
})
def test_checkpoints(self,
max_iteration_steps,
keep_checkpoint_max,
want_num_checkpoints,
max_steps=3):
config = tf.estimator.RunConfig(
save_checkpoints_steps=1,
keep_checkpoint_max=keep_checkpoint_max,
)
subnetwork_generator = SimpleGenerator([_DNNBuilder("dnn")])
report_materializer = ReportMaterializer(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]]), steps=1)
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
report_materializer=report_materializer,
mixture_weight_type=MixtureWeightType.MATRIX,
mixture_weight_initializer=tf_compat.v1.zeros_initializer(),
warm_start_mixture_weights=True,
max_iteration_steps=max_iteration_steps,
use_bias=True,
config=config,
model_dir=self.test_subdirectory)
train_input_fn = tu.dummy_input_fn([[1., 0.]], [[1.]])
estimator.train(input_fn=train_input_fn, max_steps=max_steps)
checkpoints = tf.io.gfile.glob(
os.path.join(self.test_subdirectory, "*.meta"))
self.assertEqual(want_num_checkpoints, len(checkpoints))
def _check_eventfile_for_keyword(keyword, dir_):
"""Checks event files for the keyword."""
tf_compat.v1.summary.FileWriterCache.clear()
if not tf.io.gfile.exists(dir_):
raise ValueError("Directory '{}' not found.".format(dir_))
# Get last `Event` written.
filenames = os.path.join(dir_, "events*")
event_paths = tf.io.gfile.glob(filenames)
if not event_paths:
raise ValueError("Path '{}' not found.".format(filenames))
for last_event in tf_compat.v1.train.summary_iterator(event_paths[-1]):
if last_event.summary is not None:
for value in last_event.summary.value:
if keyword == value.tag:
if value.HasField("simple_value"):
return value.simple_value
if value.HasField("image"):
return (value.image.height, value.image.width,
value.image.colorspace)
if value.HasField("tensor"):
return value.tensor.string_val
raise ValueError("Keyword '{}' not found in path '{}'.".format(
keyword, filenames))
class _FakeMetric(object):
"""A fake metric."""
def __init__(self, value, dtype):
self._value = value
self._dtype = dtype
def to_metric(self):
tensor = tf.convert_to_tensor(value=self._value, dtype=self._dtype)
return (tensor, tensor)
class _EvalMetricsHead(object):
"""A fake head with the given evaluation metrics."""
def __init__(self, fake_metrics):
self._fake_metrics = fake_metrics
@property
def logits_dimension(self):
return 1
def create_estimator_spec(self,
features,
mode,
logits,
labels=None,
train_op_fn=None):
del features # Unused
metric_ops = None
if self._fake_metrics:
metric_ops = {}
for k, fake_metric in self._fake_metrics.items():
metric_ops[k] = fake_metric.to_metric()
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=logits,
loss=tf.reduce_mean(input_tensor=labels - logits),
eval_metric_ops=metric_ops,
train_op=train_op_fn(1))
def _mean_keras_metric(value):
"""Returns the mean of given value as a Keras metric."""
mean = tf.keras.metrics.Mean()
mean.update_state(value)
return mean
class EstimatorSummaryWriterTest(tu.AdanetTestCase):
"""Test that Tensorboard summaries get written correctly."""
@tf_compat.skip_for_tf2
def test_summaries(self):
"""Tests that summaries are written to candidate directory."""
run_config = tf.estimator.RunConfig(
tf_random_seed=42, log_step_count_steps=2, save_summary_steps=2)
subnetwork_generator = SimpleGenerator(
[_DNNBuilder("dnn", mixture_weight_learning_rate=.001)])
report_materializer = ReportMaterializer(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]]), steps=1)
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
report_materializer=report_materializer,
mixture_weight_type=MixtureWeightType.MATRIX,
mixture_weight_initializer=tf_compat.v1.zeros_initializer(),
warm_start_mixture_weights=True,
max_iteration_steps=10,
use_bias=True,
config=run_config,
model_dir=self.test_subdirectory)
train_input_fn = tu.dummy_input_fn([[1., 0.]], [[1.]])
estimator.train(input_fn=train_input_fn, max_steps=3)
ensemble_loss = 1.
self.assertAlmostEqual(
ensemble_loss,
_check_eventfile_for_keyword("loss", self.test_subdirectory),
places=3)
self.assertIsNotNone(
_check_eventfile_for_keyword("global_step/sec", self.test_subdirectory))
self.assertEqual(
0.,
_check_eventfile_for_keyword("iteration/adanet/iteration",
self.test_subdirectory))
subnetwork_subdir = os.path.join(self.test_subdirectory,
"subnetwork/t0_dnn")
self.assertAlmostEqual(
3., _check_eventfile_for_keyword("scalar", subnetwork_subdir), places=3)
self.assertEqual((3, 3, 1),
_check_eventfile_for_keyword("image/image/0",
subnetwork_subdir))
self.assertAlmostEqual(
5.,
_check_eventfile_for_keyword("nested/scalar", subnetwork_subdir),
places=3)
ensemble_subdir = os.path.join(
self.test_subdirectory, "ensemble/t0_dnn_grow_complexity_regularized")
self.assertAlmostEqual(
ensemble_loss,
_check_eventfile_for_keyword(
"adanet_loss/adanet/adanet_weighted_ensemble", ensemble_subdir),
places=3)
self.assertAlmostEqual(
0.,
_check_eventfile_for_keyword(
"complexity_regularization/adanet/adanet_weighted_ensemble",
ensemble_subdir),
places=3)
self.assertAlmostEqual(
0.,
_check_eventfile_for_keyword(
"mixture_weight_norms/adanet/"
"adanet_weighted_ensemble/subnetwork_0", ensemble_subdir),
places=3)
@tf_compat.skip_for_tf2
def test_disable_summaries(self):
"""Tests that summaries can be disabled for ensembles and subnetworks."""
run_config = tf.estimator.RunConfig(
tf_random_seed=42, log_step_count_steps=2, save_summary_steps=2)
subnetwork_generator = SimpleGenerator(
[_DNNBuilder("dnn", mixture_weight_learning_rate=.001)])
report_materializer = ReportMaterializer(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]]), steps=1)
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
report_materializer=report_materializer,
mixture_weight_type=MixtureWeightType.MATRIX,
mixture_weight_initializer=tf_compat.v1.zeros_initializer(),
warm_start_mixture_weights=True,
max_iteration_steps=10,
use_bias=True,
config=run_config,
model_dir=self.test_subdirectory,
enable_ensemble_summaries=False,
enable_subnetwork_summaries=False,
)
train_input_fn = tu.dummy_input_fn([[1., 0.]], [[1.]])
estimator.train(input_fn=train_input_fn, max_steps=3)
ensemble_loss = 1.
self.assertAlmostEqual(
ensemble_loss,
_check_eventfile_for_keyword("loss", self.test_subdirectory),
places=3)
self.assertIsNotNone(
_check_eventfile_for_keyword("global_step/sec", self.test_subdirectory))
self.assertEqual(
0.,
_check_eventfile_for_keyword("iteration/adanet/iteration",
self.test_subdirectory))
subnetwork_subdir = os.path.join(self.test_subdirectory,
"subnetwork/t0_dnn")
with self.assertRaises(ValueError):
_check_eventfile_for_keyword("scalar", subnetwork_subdir)
with self.assertRaises(ValueError):
_check_eventfile_for_keyword("image/image/0", subnetwork_subdir)
with self.assertRaises(ValueError):
_check_eventfile_for_keyword("nested/scalar", subnetwork_subdir)
ensemble_subdir = os.path.join(
self.test_subdirectory, "ensemble/t0_dnn_grow_complexity_regularized")
with self.assertRaises(ValueError):
_check_eventfile_for_keyword(
"adanet_loss/adanet/adanet_weighted_ensemble", ensemble_subdir)
with self.assertRaises(ValueError):
_check_eventfile_for_keyword(
"complexity_regularization/adanet/adanet_weighted_ensemble",
ensemble_subdir)
with self.assertRaises(ValueError):
_check_eventfile_for_keyword(
"mixture_weight_norms/adanet/"
"adanet_weighted_ensemble/subnetwork_0", ensemble_subdir)
# pylint: disable=g-long-lambda
@parameterized.named_parameters(
{
"testcase_name": "none_metrics",
"head": _EvalMetricsHead(None),
"want_summaries": [],
"want_loss": -1.791,
}, {
"testcase_name":
"metrics_fn",
"head":
_EvalMetricsHead(None),
"metric_fn":
lambda predictions: {
"avg": tf_compat.v1.metrics.mean(predictions)
},
"want_summaries": ["avg"],
"want_loss":
-1.791,
}, {
"testcase_name":
"keras_metrics_fn",
"head":
_EvalMetricsHead(None),
"metric_fn":
lambda predictions: {
"avg": _mean_keras_metric(predictions)
},
"want_summaries": ["avg"],
"want_loss":
-1.791,
}, {
"testcase_name": "empty_metrics",
"head": _EvalMetricsHead({}),
"want_summaries": [],
"want_loss": -1.791,
}, {
"testcase_name":
"evaluation_name",
"head":
_EvalMetricsHead({}),
"evaluation_name":
"continuous",
"want_summaries": [],
"want_loss":
-1.791,
"global_subdir":
"eval_continuous",
"subnetwork_subdir":
"subnetwork/t0_dnn/eval_continuous",
"ensemble_subdir":
"ensemble/t0_dnn_grow_complexity_regularized/eval_continuous",
}, {
"testcase_name":
"regression_head",
"head":
regression_head.RegressionHead(
loss_reduction=tf_compat.SUM_OVER_BATCH_SIZE),
"want_summaries": ["average_loss"],
"want_loss":
.256,
}, {
"testcase_name":
"binary_classification_head",
"head":
binary_class_head.BinaryClassHead(
loss_reduction=tf_compat.SUM_OVER_BATCH_SIZE),
"learning_rate":
.6,
"want_summaries": ["average_loss", "accuracy", "recall"],
"want_loss":
0.122,
}, {
"testcase_name":
"all_metrics",
"head":
_EvalMetricsHead({
"float32":
_FakeMetric(1., tf.float32),
"float64":
_FakeMetric(1., tf.float64),
"serialized_summary":
_FakeMetric(
tf_compat.v1.Summary(value=[
tf_compat.v1.Summary.Value(
tag="summary_tag", simple_value=1.)
]).SerializeToString(), tf.string),
}),
"want_summaries": [
"float32",
"float64",
"serialized_summary/0",
],
"want_loss":
-1.791,
})
# pylint: enable=g-long-lambda
def test_eval_metrics(
self,
head,
want_loss,
want_summaries,
evaluation_name=None,
metric_fn=None,
learning_rate=.01,
global_subdir="eval",
subnetwork_subdir="subnetwork/t0_dnn/eval",
ensemble_subdir="ensemble/t0_dnn_grow_complexity_regularized/eval"):
"""Test that AdaNet evaluation metrics get persisted correctly."""
seed = 42
run_config = tf.estimator.RunConfig(tf_random_seed=seed)
subnetwork_generator = SimpleGenerator([
_DNNBuilder(
"dnn",
learning_rate=learning_rate,
mixture_weight_learning_rate=0.,
layer_size=8,
seed=seed)
])
estimator = Estimator(
head=head,
subnetwork_generator=subnetwork_generator,
max_iteration_steps=100,
metric_fn=metric_fn,
config=run_config,
model_dir=self.test_subdirectory)
train_input_fn = tu.dummy_input_fn(XOR_FEATURES, XOR_LABELS)
estimator.train(input_fn=train_input_fn, max_steps=100)
metrics = estimator.evaluate(
input_fn=train_input_fn, steps=1, name=evaluation_name)
self.assertAlmostEqual(want_loss, metrics["loss"], places=3)
global_subdir = os.path.join(self.test_subdirectory, global_subdir)
subnetwork_subdir = os.path.join(self.test_subdirectory, subnetwork_subdir)
ensemble_subdir = os.path.join(self.test_subdirectory, ensemble_subdir)
self.assertAlmostEqual(
want_loss,
_check_eventfile_for_keyword("loss", subnetwork_subdir),
places=3)
for metric in want_summaries:
self.assertIsNotNone(
_check_eventfile_for_keyword(metric, subnetwork_subdir),
msg="{} should be under 'eval'.".format(metric))
for dir_ in [global_subdir, ensemble_subdir]:
self.assertAlmostEqual(metrics["loss"],
_check_eventfile_for_keyword("loss", dir_))
self.assertEqual([b"| dnn |"],
_check_eventfile_for_keyword(
"architecture/adanet/ensembles/0", dir_))
for metric in want_summaries:
self.assertTrue(
_check_eventfile_for_keyword(metric, dir_) > 0.,
msg="{} should be under 'eval'.".format(metric))
class EstimatorMembersOverrideTest(tu.AdanetTestCase):
"""Tests b/77494544 fix."""
def test_assert_members_are_not_overridden(self):
"""Assert that AdaNet estimator does not break other estimators."""
config = tf.estimator.RunConfig()
subnetwork_generator = SimpleGenerator([_DNNBuilder("dnn")])
report_materializer = ReportMaterializer(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]]), steps=1)
adanet = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
report_materializer=report_materializer,
mixture_weight_type=MixtureWeightType.MATRIX,
mixture_weight_initializer=tf_compat.v1.zeros_initializer(),
warm_start_mixture_weights=True,
max_iteration_steps=10,
use_bias=True,
config=config)
self.assertIsNotNone(adanet)
if hasattr(tf.estimator, "LinearEstimator"):
estimator_fn = tf.estimator.LinearEstimator
else:
estimator_fn = tf.contrib.estimator.LinearEstimator
linear = estimator_fn(
head=tu.head(), feature_columns=[tf.feature_column.numeric_column("x")])
self.assertIsNotNone(linear)
def _dummy_feature_dict_input_fn(features, labels):
"""Returns an input_fn that returns feature and labels `Tensors`."""
def _input_fn():
input_features = {}
for key, feature in features.items():
input_features[key] = tf.constant(feature, name=key)
input_labels = tf.constant(labels, name="labels")
return input_features, input_labels
return _input_fn
class EstimatorDifferentFeaturesPerModeTest(tu.AdanetTestCase):
"""Tests b/109751254."""
@parameterized.named_parameters(
{
"testcase_name": "extra_train_features",
"train_features": {
"x": [[1., 0.]],
"extra": [[1., 0.]],
},
"eval_features": {
"x": [[1., 0.]],
},
"predict_features": {
"x": [[1., 0.]],
},
}, {
"testcase_name": "extra_eval_features",
"train_features": {
"x": [[1., 0.]],
},
"eval_features": {
"x": [[1., 0.]],
"extra": [[1., 0.]],
},
"predict_features": {
"x": [[1., 0.]],
},
}, {
"testcase_name": "extra_predict_features",
"train_features": {
"x": [[1., 0.]],
},
"eval_features": {
"x": [[1., 0.]],
},
"predict_features": {
"x": [[1., 0.]],
"extra": [[1., 0.]],
},
})
def test_different_features_per_mode(self, train_features, eval_features,
predict_features):
"""Tests tests different numbers of features per mode."""
run_config = tf.estimator.RunConfig(tf_random_seed=42)
subnetwork_generator = SimpleGenerator([_DNNBuilder("dnn")])
report_materializer = ReportMaterializer(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]]), steps=1)
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
report_materializer=report_materializer,
mixture_weight_type=MixtureWeightType.MATRIX,
mixture_weight_initializer=tf_compat.v1.zeros_initializer(),
warm_start_mixture_weights=True,
max_iteration_steps=1,
use_bias=True,
model_dir=self.test_subdirectory,
config=run_config)
labels = [[1.]]
train_input_fn = _dummy_feature_dict_input_fn(train_features, labels)
# Train.
estimator.train(input_fn=train_input_fn, max_steps=2)
# Evaluate.
eval_input_fn = _dummy_feature_dict_input_fn(eval_features, labels)
estimator.evaluate(input_fn=eval_input_fn, steps=1)
# Predict.
predict_input_fn = _dummy_feature_dict_input_fn(predict_features, None)
estimator.predict(input_fn=predict_input_fn)
# Export SavedModel.
def serving_input_fn():
"""Input fn for serving export, starting from serialized example."""
serialized_example = tf_compat.v1.placeholder(
dtype=tf.string, shape=(None), name="serialized_example")
features = {}
for key, value in predict_features.items():
features[key] = tf.constant(value)
return tf.estimator.export.ServingInputReceiver(
features=features, receiver_tensors=serialized_example)
export_saved_model_fn = getattr(estimator, "export_saved_model", None)
if not callable(export_saved_model_fn):
export_saved_model_fn = estimator.export_savedmodel
export_saved_model_fn(
export_dir_base=self.test_subdirectory,
serving_input_receiver_fn=serving_input_fn)
class EstimatorExportSavedModelTest(tu.AdanetTestCase):
def test_export_saved_model_for_predict(self):
"""Tests SavedModel exporting functionality for predict (b/110435640)."""
run_config = tf.estimator.RunConfig(tf_random_seed=42)
subnetwork_generator = SimpleGenerator([_DNNBuilder("dnn")])
report_materializer = ReportMaterializer(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]]), steps=1)
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
report_materializer=report_materializer,
mixture_weight_type=MixtureWeightType.MATRIX,
mixture_weight_initializer=tf_compat.v1.zeros_initializer(),
warm_start_mixture_weights=True,
max_iteration_steps=1,
use_bias=True,
model_dir=self.test_subdirectory,
config=run_config)
features = {"x": [[1., 0.]]}
labels = [[1.]]
train_input_fn = _dummy_feature_dict_input_fn(features, labels)
# Train.
estimator.train(input_fn=train_input_fn, max_steps=2)
# Export SavedModel.
def serving_input_fn():
"""Input fn for serving export, starting from serialized example."""
serialized_example = tf_compat.v1.placeholder(
dtype=tf.string, shape=(None), name="serialized_example")
for key, value in features.items():
features[key] = tf.constant(value)
return tf.estimator.export.ServingInputReceiver(
features=features, receiver_tensors=serialized_example)
estimator.export_saved_model(
export_dir_base=self.test_subdirectory,
serving_input_receiver_fn=serving_input_fn,
experimental_mode=tf.estimator.ModeKeys.PREDICT)
@test_util.run_in_graph_and_eager_modes
def test_export_saved_model_for_eval(self):
"""Tests SavedModel exporting functionality for eval (b/110991908)."""
run_config = tf.estimator.RunConfig(tf_random_seed=42)
subnetwork_generator = SimpleGenerator(
[_DNNBuilder("dnn", layer_size=8, learning_rate=1.)])
estimator = Estimator(
head=binary_class_head.BinaryClassHead(),
subnetwork_generator=subnetwork_generator,
max_iteration_steps=100,
model_dir=self.test_subdirectory,
config=run_config)
train_input_fn = tu.dummy_input_fn(XOR_FEATURES, XOR_LABELS)
# Train.
estimator.train(input_fn=train_input_fn, max_steps=300)
metrics = estimator.evaluate(input_fn=train_input_fn, steps=1)
self.assertAlmostEqual(.067, metrics["average_loss"], places=3)
self.assertAlmostEqual(1., metrics["recall"], places=3)
self.assertAlmostEqual(1., metrics["accuracy"], places=3)
# Export SavedModel.
def serving_input_fn():
"""Input fn for serving export, starting from serialized example."""
serialized_example = tf_compat.v1.placeholder(
dtype=tf.string, shape=(None), name="serialized_example")
return export.SupervisedInputReceiver(
features={"x": tf.constant(XOR_FEATURES)},
labels=tf.constant(XOR_LABELS),
receiver_tensors=serialized_example)
export_dir_base = os.path.join(self.test_subdirectory, "export")
try:
estimator.export_saved_model(
export_dir_base=export_dir_base,
serving_input_receiver_fn=serving_input_fn,
experimental_mode=tf.estimator.ModeKeys.EVAL)
except AttributeError:
pass
try:
tf.contrib.estimator.export_saved_model_for_mode(
estimator,
export_dir_base=export_dir_base,
input_receiver_fn=serving_input_fn,
mode=tf.estimator.ModeKeys.EVAL)
except AttributeError:
pass
subdir = tf.io.gfile.listdir(export_dir_base)[0]
with context.graph_mode(), self.test_session() as sess:
meta_graph_def = tf_compat.v1.saved_model.loader.load(
sess, ["eval"], os.path.join(export_dir_base, subdir))
signature_def = meta_graph_def.signature_def.get("eval")
# Read zero metric.
self.assertAlmostEqual(
0.,
sess.run(
tf_compat.v1.saved_model.utils.get_tensor_from_tensor_info(
signature_def.outputs["metrics/average_loss/value"])),
places=3)
# Run metric update op.
sess.run((tf_compat.v1.saved_model.utils.get_tensor_from_tensor_info(
signature_def.outputs["metrics/average_loss/update_op"]),
tf_compat.v1.saved_model.utils.get_tensor_from_tensor_info(
signature_def.outputs["metrics/accuracy/update_op"]),
tf_compat.v1.saved_model.utils.get_tensor_from_tensor_info(
signature_def.outputs["metrics/recall/update_op"])))
# Read metric again; it should no longer be zero.
self.assertAlmostEqual(
0.067,
sess.run(
tf_compat.v1.saved_model.utils.get_tensor_from_tensor_info(
signature_def.outputs["metrics/average_loss/value"])),
places=3)
self.assertAlmostEqual(
1.,
sess.run(
tf_compat.v1.saved_model.utils.get_tensor_from_tensor_info(
signature_def.outputs["metrics/recall/value"])),
places=3)
self.assertAlmostEqual(
1.,
sess.run(
tf_compat.v1.saved_model.utils.get_tensor_from_tensor_info(
signature_def.outputs["metrics/accuracy/value"])),
places=3)
def test_export_saved_model_always_uses_replication_placement(self):
"""Tests b/137675014."""
run_config = tf.estimator.RunConfig(tf_random_seed=42)
subnetwork_generator = SimpleGenerator(
[_DNNBuilder("dnn1"), _DNNBuilder("dnn2")])
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
max_iteration_steps=1,
model_dir=self.test_subdirectory,
config=run_config,
experimental_placement_strategy=RoundRobinStrategy())
features = {"x": [[1., 0.]]}
labels = [[1.]]
train_input_fn = _dummy_feature_dict_input_fn(features, labels)
# Train.
estimator.train(input_fn=train_input_fn, max_steps=2)
# Export SavedModel.
def serving_input_fn():
"""Input fn for serving export, starting from serialized example."""
serialized_example = tf_compat.v1.placeholder(
dtype=tf.string, shape=(None), name="serialized_example")
tensor_features = {}
for key, value in features.items():
tensor_features[key] = tf.constant(value)
return tf.estimator.export.ServingInputReceiver(
features=tensor_features, receiver_tensors=serialized_example)
# Fake the number of PS replicas so RoundRobinStrategy will be used.
estimator._config._num_ps_replicas = 2
# If we're still using RoundRobinStrategy, this call will fail by trying
# to place ops on non-existent devices.
# Check all three export methods.
estimator.export_saved_model(
export_dir_base=self.test_subdirectory,
serving_input_receiver_fn=serving_input_fn,
experimental_mode=tf.estimator.ModeKeys.PREDICT)
try:
estimator.export_savedmodel(
export_dir_base=self.test_subdirectory,
serving_input_receiver_fn=serving_input_fn)
except AttributeError as error:
# Log deprecation errors.
logging.warning("Testing estimator#export_savedmodel: %s", error)
estimator.experimental_export_all_saved_models(
export_dir_base=self.test_subdirectory,
input_receiver_fn_map={
tf.estimator.ModeKeys.PREDICT: serving_input_fn,
})
class EstimatorReportTest(tu.AdanetTestCase):
"""Tests report generation and usage."""
def compare_report_lists(self, report_list1, report_list2):
# Essentially assertEqual(report_list1, report_list2), but ignoring
# the "metrics" attribute.
def make_qualified_name(iteration_number, name):
return "iteration_{}/{}".format(iteration_number, name)
report_dict_1 = {
make_qualified_name(report.iteration_number, report.name): report
for report in report_list1
}
report_dict_2 = {
make_qualified_name(report.iteration_number, report.name): report
for report in report_list2
}
self.assertEqual(len(report_list1), len(report_list2))
for qualified_name in report_dict_1.keys():
report_1 = report_dict_1[qualified_name]
report_2 = report_dict_2[qualified_name]
self.assertEqual(
report_1.hparams,
report_2.hparams,
msg="{} vs. {}".format(report_1, report_2))
self.assertEqual(
report_1.attributes,
report_2.attributes,
msg="{} vs. {}".format(report_1, report_2))
self.assertEqual(
report_1.included_in_final_ensemble,
report_2.included_in_final_ensemble,
msg="{} vs. {}".format(report_1, report_2))
for metric_key, metric_value in report_1.metrics.items():
self.assertEqual(
metric_value,
report_2.metrics[metric_key],
msg="{} vs. {}".format(report_1, report_2))
@parameterized.named_parameters(
{
"testcase_name": "one_iteration_one_subnetwork",
"subnetwork_builders": [_DNNBuilder("dnn", layer_size=1),],
"num_iterations": 1,
"want_materialized_iteration_reports": [[
MaterializedReport(
iteration_number=0,
name="dnn",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
]],
"want_previous_ensemble_reports": [],
"want_all_reports": [],
},
{
"testcase_name": "one_iteration_three_subnetworks",
"subnetwork_builders": [
# learning_rate is set to 0 for all but one Builder
# to make sure that only one of them can learn.
_DNNBuilder(
"dnn_1",
layer_size=1,
learning_rate=0.,
mixture_weight_learning_rate=0.),
_DNNBuilder(
"dnn_2",
layer_size=2,
learning_rate=0.,
mixture_weight_learning_rate=0.),
# fixing the match for dnn_3 to win.
_DNNBuilder("dnn_3", layer_size=3),
],
"num_iterations": 1,
"want_materialized_iteration_reports": [[
MaterializedReport(
iteration_number=0,
name="dnn_1",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=0,
name="dnn_2",
hparams={"layer_size": 2},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=0,
name="dnn_3",
hparams={"layer_size": 3},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
]],
"want_previous_ensemble_reports": [],
"want_all_reports": [],
},
{
"testcase_name":
"three_iterations_one_subnetwork",
"subnetwork_builders": [_DNNBuilder("dnn", layer_size=1),],
"num_iterations":
3,
"want_materialized_iteration_reports": [
[
MaterializedReport(
iteration_number=0,
name="dnn",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
)
],
[
MaterializedReport(
iteration_number=1,
name="previous_ensemble",
hparams={},
attributes={},
metrics={},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=1,
name="dnn",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
],
[
MaterializedReport(
iteration_number=2,
name="previous_ensemble",
hparams={},
attributes={},
metrics={},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=2,
name="dnn",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
],
],
"want_previous_ensemble_reports": [
MaterializedReport(
iteration_number=0,
name="dnn",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
MaterializedReport(
iteration_number=1,
name="dnn",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
],
"want_all_reports": [
MaterializedReport(
iteration_number=0,
name="dnn",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
MaterializedReport(
iteration_number=1,
name="previous_ensemble",
hparams={},
attributes={},
metrics={},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=1,
name="dnn",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
],
},
{
"testcase_name":
"three_iterations_three_subnetworks",
"subnetwork_builders": [
# learning_rate is set to 0 for all but one Builder
# to make sure that only one of them can learn.
_DNNBuilder(
"dnn_1",
layer_size=1,
learning_rate=0.,
mixture_weight_learning_rate=0.),
_DNNBuilder(
"dnn_2",
layer_size=2,
learning_rate=0.,
mixture_weight_learning_rate=0.),
# fixing the match for dnn_3 to win in every iteration.
_DNNBuilder("dnn_3", layer_size=3),
],
"num_iterations":
3,
"want_materialized_iteration_reports": [
[
MaterializedReport(
iteration_number=0,
name="dnn_1",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=0,
name="dnn_2",
hparams={"layer_size": 2},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=0,
name="dnn_3",
hparams={"layer_size": 3},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
],
[
MaterializedReport(
iteration_number=1,
name="previous_ensemble",
hparams={},
attributes={},
metrics={},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=1,
name="dnn_1",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=1,
name="dnn_2",
hparams={"layer_size": 2},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=1,
name="dnn_3",
hparams={"layer_size": 3},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
],
[
MaterializedReport(
iteration_number=2,
name="previous_ensemble",
hparams={},
attributes={},
metrics={},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=2,
name="dnn_1",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=2,
name="dnn_2",
hparams={"layer_size": 2},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=2,
name="dnn_3",
hparams={"layer_size": 3},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
],
],
"want_previous_ensemble_reports": [
MaterializedReport(
iteration_number=0,
name="dnn_3",
hparams={"layer_size": 3},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
MaterializedReport(
iteration_number=1,
name="dnn_3",
hparams={"layer_size": 3},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
],
"want_all_reports": [
MaterializedReport(
iteration_number=0,
name="dnn_1",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=0,
name="dnn_2",
hparams={"layer_size": 2},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=0,
name="dnn_3",
hparams={"layer_size": 3},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
MaterializedReport(
iteration_number=1,
name="previous_ensemble",
hparams={},
attributes={},
metrics={},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=1,
name="dnn_1",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=1,
name="dnn_2",
hparams={"layer_size": 2},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=1,
name="dnn_3",
hparams={"layer_size": 3},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
],
},
)
def test_report_generation_and_usage(self, subnetwork_builders,
num_iterations,
want_materialized_iteration_reports,
want_previous_ensemble_reports,
want_all_reports):
# Stores the iteration_number, previous_ensemble_reports and all_reports
# arguments in the self._iteration_reports dictionary, overwriting what
# was seen in previous iterations.
spied_iteration_reports = {}
def _spy_fn(iteration_number, previous_ensemble_reports, all_reports):
spied_iteration_reports[iteration_number] = {
"previous_ensemble_reports": previous_ensemble_reports,
"all_reports": all_reports,
}
subnetwork_generator = _FakeGenerator(
spy_fn=_spy_fn, subnetwork_builders=subnetwork_builders)
max_iteration_steps = 5
max_steps = max_iteration_steps * num_iterations + 1
train_input_fn = tu.dummy_input_fn([[1., 0.]], [[1.]])
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
mixture_weight_type=MixtureWeightType.MATRIX,
mixture_weight_initializer=tf_compat.v1.zeros_initializer(),
warm_start_mixture_weights=True,
max_iteration_steps=max_iteration_steps,
use_bias=True,
report_materializer=ReportMaterializer(
input_fn=train_input_fn, steps=1),
model_dir=self.test_subdirectory)
report_accessor = estimator._report_accessor
estimator.train(input_fn=train_input_fn, max_steps=max_steps)
materialized_iteration_reports = list(
report_accessor.read_iteration_reports())
self.assertEqual(num_iterations, len(materialized_iteration_reports))
for i in range(num_iterations):
want_materialized_reports = (want_materialized_iteration_reports[i])
materialized_reports = materialized_iteration_reports[i]
self.compare_report_lists(want_materialized_reports, materialized_reports)
# Compute argmin adanet loss.
argmin_adanet_loss = 0
smallest_known_adanet_loss = float("inf")
for j, materialized_subnetwork_report in enumerate(materialized_reports):
if (smallest_known_adanet_loss >
materialized_subnetwork_report.metrics["adanet_loss"]):
smallest_known_adanet_loss = (
materialized_subnetwork_report.metrics["adanet_loss"])
argmin_adanet_loss = j
# Check that the subnetwork with the lowest adanet loss is the one
# that is included in the final ensemble.
for j, materialized_reports in enumerate(materialized_reports):
self.assertEqual(j == argmin_adanet_loss,
materialized_reports.included_in_final_ensemble)
# Check the arguments passed into the generate_candidates method of the
# Generator.
iteration_report = spied_iteration_reports[num_iterations - 1]
self.compare_report_lists(want_previous_ensemble_reports,
iteration_report["previous_ensemble_reports"])
self.compare_report_lists(want_all_reports, iteration_report["all_reports"])
class EstimatorForceGrowTest(tu.AdanetTestCase):
"""Tests the force_grow override.
Uses linear subnetworks with the same seed. They will produce identical
outputs, so unless the `force_grow` override is set, none of the new
subnetworks will improve the AdaNet objective, and AdaNet will not add them to
the ensemble.
"""
@parameterized.named_parameters(
{
"testcase_name": "one_builder_no_force_grow",
"builders": [_FrozenLinearBuilder("linear")],
"force_grow": False,
"want_subnetworks": 1,
}, {
"testcase_name": "two_builders_no_force_grow",
"builders": [
_FrozenLinearBuilder("linear"),
_FrozenLinearBuilder("linear2"),
],
"force_grow": False,
"want_subnetworks": 1,
}, {
"testcase_name": "one_builder",
"builders": [_FrozenLinearBuilder("linear")],
"force_grow": True,
"want_subnetworks": 2,
}, {
"testcase_name": "two_builders",
"builders":
[_FrozenLinearBuilder("linear"),
_FrozenLinearBuilder("linear2")],
"force_grow": True,
"want_subnetworks": 2,
}, {
"testcase_name":
"two_builders_with_evaluator",
"builders":
[_FrozenLinearBuilder("linear"),
_FrozenLinearBuilder("linear2")],
"force_grow":
True,
"evaluator":
Evaluator(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]]), steps=1),
"want_subnetworks":
3,
})
def test_force_grow(self,
builders,
force_grow,
want_subnetworks,
evaluator=None):
"""Test force grow with identical frozen subnetworks."""
run_config = tf.estimator.RunConfig(tf_random_seed=42)
subnetwork_generator = SimpleGenerator(builders)
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
max_iteration_steps=1,
evaluator=evaluator,
force_grow=force_grow,
model_dir=self.test_subdirectory,
config=run_config)
train_input_fn = tu.dummy_input_fn(XOR_FEATURES, XOR_LABELS)
# Train for four iterations.
estimator.train(input_fn=train_input_fn, max_steps=3)
# Evaluate.
eval_results = estimator.evaluate(input_fn=train_input_fn, steps=1)
self.assertEqual(
want_subnetworks,
str(eval_results["architecture/adanet/ensembles"]).count(" linear "))
class EstimatorDebugTest(tu.AdanetTestCase):
"""Tests b/125483534. Detect NaNs in input_fns."""
# pylint: disable=g-long-lambda
@parameterized.named_parameters(
{
"testcase_name":
"nan_features",
"head":
regression_head.RegressionHead(
name="y", loss_reduction=tf_compat.SUM_OVER_BATCH_SIZE),
"input_fn":
lambda: ({
"x": tf.math.log([[1., 0.]])
}, tf.zeros([1, 1]))
}, {
"testcase_name":
"nan_label",
"head":
regression_head.RegressionHead(
name="y", loss_reduction=tf_compat.SUM_OVER_BATCH_SIZE),
"input_fn":
lambda: ({
"x": tf.ones([1, 2])
}, tf.math.log([[0.]]))
}, {
"testcase_name":
"nan_labels_dict",
"head":
multi_head_lib.MultiHead(heads=[
regression_head.RegressionHead(
name="y", loss_reduction=tf_compat.SUM_OVER_BATCH_SIZE),
]),
"input_fn":
lambda: ({
"x": tf.ones([1, 2])
}, {
"y": tf.math.log([[0.]])
})
})
# pylint: enable=g-long-lambda
def test_nans_from_input_fn(self, head, input_fn):
subnetwork_generator = SimpleGenerator([_DNNBuilder("dnn")])
estimator = Estimator(
head=head,
subnetwork_generator=subnetwork_generator,
max_iteration_steps=3,
model_dir=self.test_subdirectory,
debug=True)
with self.assertRaises(tf.errors.InvalidArgumentError):
estimator.train(input_fn=input_fn, max_steps=3)
class EstimatorEvaluateDuringTrainHookTest(tu.AdanetTestCase):
"""Tests b/129000842 with a hook that calls estimator.evaluate()."""
def test_train(self):
run_config = tf.estimator.RunConfig(tf_random_seed=42)
subnetwork_generator = SimpleGenerator([_DNNBuilder("dnn")])
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
max_iteration_steps=1,
model_dir=self.test_subdirectory,
config=run_config)
train_input_fn = tu.dummy_input_fn(XOR_FEATURES, XOR_LABELS)
class EvalTrainHook(tf.estimator.SessionRunHook):
def end(self, session):
estimator.evaluate(input_fn=train_input_fn, steps=1)
# This should not infinite loop.
estimator.train(
input_fn=train_input_fn, max_steps=3, hooks=[EvalTrainHook()])
class CheckpointSaverHookDuringTrainingTest(tu.AdanetTestCase):
"""Tests b/139057887."""
def test_checkpoint_saver_hooks_not_decorated_during_training(self):
run_config = tf.estimator.RunConfig(tf_random_seed=42)
subnetwork_generator = SimpleGenerator([_DNNBuilder("dnn")])
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
max_iteration_steps=1,
model_dir=self.test_subdirectory,
config=run_config)
train_input_fn = tu.dummy_input_fn(XOR_FEATURES, XOR_LABELS)
saver_hook = tf_compat.v1.train.CheckpointSaverHook(
checkpoint_dir=self.test_subdirectory, save_steps=10)
listener = tf_compat.v1.train.CheckpointSaverListener()
estimator.train(
input_fn=train_input_fn,
max_steps=3,
hooks=[saver_hook],
saving_listeners=[listener])
# If CheckpointSaverHook was not recognized during training then all
# saving_listeners would be attached to a default CheckpointSaverHook that
# Estimator creates.
self.assertLen(saver_hook._listeners, 1)
self.assertIs(saver_hook._listeners[0], listener)
class EstimatorTFLearnRunConfigTest(tu.AdanetTestCase):
"""Tests b/129483642 for tf.contrib.learn.RunConfig.
Checks that TF_CONFIG is overwritten correctly when no cluster is specified
in the RunConfig and the only task is of type chief.
"""
def test_train(self):
try:
run_config = tf.contrib.learn.RunConfig(tf_random_seed=42)
# Removed in TF 1.15 (nightly). See
# https://travis-ci.org/tensorflow/adanet/jobs/583471908
_ = run_config._session_creation_timeout_secs
except AttributeError:
self.skipTest("There is no tf.contrib in TF 2.0.")
try:
tf_config = {
"task": {
"type": "chief",
"index": 0
},
}
os.environ["TF_CONFIG"] = json.dumps(tf_config)
run_config = tf.contrib.learn.RunConfig(tf_random_seed=42)
run_config._is_chief = True # pylint: disable=protected-access
subnetwork_generator = SimpleGenerator([_DNNBuilder("dnn")])
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
max_iteration_steps=1,
model_dir=self.test_subdirectory,
config=run_config)
train_input_fn = tu.dummy_input_fn(XOR_FEATURES, XOR_LABELS)
# Will fail if TF_CONFIG is not overwritten correctly in
# Estimator#prepare_next_iteration.
estimator.train(input_fn=train_input_fn, max_steps=3)
finally:
# Revert TF_CONFIG environment variable in order to not break other tests.
del os.environ["TF_CONFIG"]
class EstimatorReplayTest(tu.AdanetTestCase):
@parameterized.named_parameters(
{
"testcase_name": "no_evaluator",
"evaluator": None,
"replay_evaluator": None,
"want_architecture": " dnn3 | dnn3 | dnn ",
}, {
"testcase_name":
"evaluator",
"evaluator":
Evaluator(
input_fn=tu.dummy_input_fn(XOR_FEATURES, XOR_LABELS),
steps=1),
"replay_evaluator":
Evaluator(
input_fn=tu.dummy_input_fn([[0., 0.], [0., 0], [0., 0.],
[0., 0.]], [[0], [0], [0], [0]]),
steps=1),
"want_architecture":
" dnn3 | dnn3 | dnn ",
})
def test_replay(self, evaluator, replay_evaluator, want_architecture):
"""Train entire estimator lifecycle using Replay."""
original_model_dir = os.path.join(self.test_subdirectory, "original")
run_config = tf.estimator.RunConfig(
tf_random_seed=42, model_dir=original_model_dir)
subnetwork_generator = SimpleGenerator([
_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3),
_DNNBuilder("dnn3", layer_size=5),
])
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
max_iteration_steps=10,
evaluator=evaluator,
config=run_config)
train_input_fn = tu.dummy_input_fn(XOR_FEATURES, XOR_LABELS)
# Train for three iterations.
estimator.train(input_fn=train_input_fn, max_steps=30)
# Evaluate.
eval_results = estimator.evaluate(input_fn=train_input_fn, steps=1)
self.assertIn(want_architecture,
str(eval_results["architecture/adanet/ensembles"]))
replay_run_config = tf.estimator.RunConfig(
tf_random_seed=42,
model_dir=os.path.join(self.test_subdirectory, "replayed"))
# Use different features and labels to represent a shift in the data
# distribution.
different_features = [[0., 0.], [0., 0], [0., 0.], [0., 0.]]
different_labels = [[0], [0], [0], [0]]
replay_estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
max_iteration_steps=10,
evaluator=replay_evaluator,
config=replay_run_config,
replay_config=replay.Config(best_ensemble_indices=[2, 3, 1]))
train_input_fn = tu.dummy_input_fn(different_features, different_labels)
# Train for three iterations.
replay_estimator.train(input_fn=train_input_fn, max_steps=30)
# Evaluate.
eval_results = replay_estimator.evaluate(input_fn=train_input_fn, steps=1)
self.assertIn(want_architecture,
str(eval_results["architecture/adanet/ensembles"]))
if __name__ == "__main__":
tf.test.main()
| 115,180 | 33.734922 | 139 | py |
adanet | adanet-master/adanet/core/iteration_test.py | """Test AdaNet iteration single graph implementation.
Copyright 2018 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from absl.testing import parameterized
from adanet import tf_compat
from adanet.core.architecture import _Architecture
from adanet.core.candidate import _Candidate
from adanet.core.ensemble_builder import _EnsembleSpec
from adanet.core.ensemble_builder import _SubnetworkSpec
from adanet.core.iteration import _Iteration
from adanet.core.iteration import _IterationBuilder
from adanet.core.iteration import _TrainManager
from adanet.core.summary import _ScopedSummary
from adanet.core.summary import _TPUScopedSummary
import adanet.core.testing_utils as tu
from adanet.ensemble import Candidate as EnsembleCandidate
from adanet.subnetwork import Builder as SubnetworkBuilder
from adanet.subnetwork import Report as SubnetworkReport
from adanet.subnetwork import Subnetwork
from adanet.subnetwork import TrainOpSpec
import tensorflow.compat.v2 as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util
# pylint: enable=g-direct-tensorflow-import
from tensorflow_estimator.python.estimator.head import binary_class_head
from tensorflow_estimator.python.estimator.head import regression_head
def _dummy_candidate():
"""Returns a dummy `_Candidate` instance."""
return _Candidate(
ensemble_spec=tu.dummy_ensemble_spec("foo"),
adanet_loss=1.,
variables=[tf.Variable(1.)])
class IterationTest(tu.AdanetTestCase):
# pylint: disable=g-long-lambda
@parameterized.named_parameters(
{
"testcase_name": "single_candidate",
"number": 0,
"candidates": [_dummy_candidate()],
"estimator_spec": tu.dummy_estimator_spec(),
"best_candidate_index": 0,
}, {
"testcase_name": "two_candidates",
"number": 0,
"candidates": [_dummy_candidate(),
_dummy_candidate()],
"estimator_spec": tu.dummy_estimator_spec(),
"best_candidate_index": 0,
}, {
"testcase_name": "positive_number",
"number": 1,
"candidates": [_dummy_candidate()],
"estimator_spec": tu.dummy_estimator_spec(),
"best_candidate_index": 0,
}, {
"testcase_name": "zero_best_predictions",
"number": 1,
"candidates": [_dummy_candidate()],
"estimator_spec": tu.dummy_estimator_spec(),
"best_candidate_index": 0,
}, {
"testcase_name": "zero_best_loss",
"number": 1,
"candidates": [_dummy_candidate()],
"estimator_spec": tu.dummy_estimator_spec(),
"best_candidate_index": 0,
}, {
"testcase_name":
"pass_subnetwork_report",
"number":
1,
"candidates": [_dummy_candidate()],
"estimator_spec":
tu.dummy_estimator_spec(),
"best_candidate_index":
0,
"subnetwork_reports_fn":
lambda: {
"foo":
SubnetworkReport(
hparams={"dropout": 1.0},
attributes={"aoo": tf.constant("aoo")},
metrics={
"moo": (tf.constant("moo1"), tf.constant("moo2"))
})
},
})
@test_util.run_in_graph_and_eager_modes
def test_new(self,
number,
candidates,
estimator_spec,
best_candidate_index,
subnetwork_reports_fn=None):
if subnetwork_reports_fn is None:
subnetwork_reports = {}
else:
subnetwork_reports = subnetwork_reports_fn()
iteration = _Iteration(
number=number,
candidates=candidates,
subnetwork_specs=None,
estimator_spec=estimator_spec,
best_candidate_index=best_candidate_index,
summaries=[],
subnetwork_reports=subnetwork_reports,
train_manager=_TrainManager([], [],
self.test_subdirectory,
is_chief=True),
previous_iteration=None,
checkpoint=None)
self.assertEqual(iteration.number, number)
self.assertEqual(iteration.candidates, candidates)
self.assertEqual(iteration.estimator_spec, estimator_spec)
self.assertEqual(iteration.best_candidate_index, best_candidate_index)
self.assertEqual(iteration.subnetwork_reports, subnetwork_reports)
@parameterized.named_parameters(
{
"testcase_name": "negative_number",
"number": -1,
}, {
"testcase_name": "float_number",
"number": 1.213,
}, {
"testcase_name": "none_number",
"number": None,
}, {
"testcase_name": "empty_candidates",
"candidates": lambda: [],
}, {
"testcase_name": "none_candidates",
"candidates": lambda: None,
}, {
"testcase_name": "non_list_candidates",
"candidates": lambda: {
"foo": _dummy_candidate()
},
}, {
"testcase_name": "none_estimator_spec",
"estimator_spec": None,
}, {
"testcase_name": "none_best_candidate_index",
"best_candidate_index": None,
}, {
"testcase_name": "none_subnetwork_reports",
"subnetwork_reports": lambda: None,
})
@test_util.run_in_graph_and_eager_modes
def test_new_errors(self,
number=0,
candidates=lambda: [_dummy_candidate()],
estimator_spec=tu.dummy_estimator_spec(),
best_candidate_index=0,
subnetwork_reports=lambda: []):
with self.assertRaises(ValueError):
_Iteration(
number=number,
candidates=candidates(),
subnetwork_specs=None,
estimator_spec=estimator_spec,
best_candidate_index=best_candidate_index,
summaries=[],
subnetwork_reports=subnetwork_reports(),
train_manager=_TrainManager([], [],
self.test_subdirectory,
is_chief=True),
previous_iteration=None,
checkpoint=None)
class _FakeBuilder(SubnetworkBuilder):
def __init__(self, name, random_seed=11, chief_hook=None):
self._name = name
self._random_seed = random_seed
self._chief_hook = chief_hook
@property
def name(self):
return self._name
@property
def seed(self):
return self._random_seed
def build_subnetwork(self,
features,
logits_dimension,
training,
iteration_step,
summary,
previous_ensemble=None):
return Subnetwork(
last_layer=tu.dummy_tensor(),
logits=tu.dummy_tensor([2, logits_dimension]),
complexity=tu.dummy_tensor(),
persisted_tensors={"random_seed": self._random_seed})
def build_subnetwork_train_op(self, subnetwork, loss, var_list, labels,
iteration_step, summary, previous_ensemble):
if self._chief_hook:
return TrainOpSpec(
train_op=tf.no_op(), chief_hooks=[self._chief_hook], hooks=None)
return None
def build_mixture_weights_train_op(self, loss, var_list, logits, labels,
iteration_step, summary):
return None
class _FakeEnsembleBuilder(object):
def __init__(self,
dict_predictions=False,
eval_metric_ops_fn=None,
export_output_key=None):
self._dict_predictions = dict_predictions
if not eval_metric_ops_fn:
eval_metric_ops_fn = lambda: {"a": (tf.constant(1), tf.constant(1))}
self._eval_metric_ops_fn = eval_metric_ops_fn
self._export_output_key = export_output_key
def build_ensemble_spec(self,
name,
candidate,
ensembler,
subnetwork_specs,
summary,
features,
mode,
iteration_number,
labels=None,
previous_ensemble_spec=None,
my_ensemble_index=None,
params=None,
previous_iteration_checkpoint=None):
del ensembler
del subnetwork_specs
del summary
del features
del mode
del labels
del iteration_number
del params
del my_ensemble_index
del previous_iteration_checkpoint
num_subnetworks = 0
if previous_ensemble_spec:
num_subnetworks += 1
return tu.dummy_ensemble_spec(
name=name,
num_subnetworks=num_subnetworks,
random_seed=candidate.subnetwork_builders[0].seed,
subnetwork_builders=candidate.subnetwork_builders,
dict_predictions=self._dict_predictions,
eval_metrics=tu.create_ensemble_metrics(
metric_fn=self._eval_metric_ops_fn),
export_output_key=self._export_output_key,
variables=[tf.Variable(1.)])
class _FakeSubnetworkManager(object):
def build_subnetwork_spec(self,
name,
subnetwork_builder,
summary,
features,
mode,
labels=None,
previous_ensemble=None,
config=None,
params=None):
del summary
del features
del mode
del labels
del previous_ensemble
del params
del config
return _SubnetworkSpec(
name=name,
subnetwork=None,
builder=subnetwork_builder,
step=tf.Variable(0, dtype=tf.int64),
variables=[tf.Variable(1.)],
predictions=None,
loss=None,
train_op=subnetwork_builder.build_subnetwork_train_op(
*[None for _ in range(7)]),
eval_metrics=tu.create_subnetwork_metrics(
metric_fn=lambda: {"a": (tf.constant(1), tf.constant(1))}))
class _FakeCandidateBuilder(object):
def build_candidate(self,
ensemble_spec,
training,
summary,
rebuilding):
del training # Unused
del summary # Unused
del rebuilding # Unused
return _Candidate(
ensemble_spec=ensemble_spec,
adanet_loss=ensemble_spec.adanet_loss,
variables=[tf.Variable(1.)])
def _export_output_tensors(export_outputs):
"""Returns a dict of `Tensor`, tuple of `Tensor`, or dict of `Tensor`."""
outputs = {}
for key, export_output in export_outputs.items():
if isinstance(export_output, tf.estimator.export.ClassificationOutput):
result = ()
if export_output.classes is not None:
result += (tf.strings.to_number(export_output.classes),)
if export_output.scores is not None:
result += (export_output.scores,)
outputs[key] = result
elif isinstance(export_output, tf.estimator.export.RegressionOutput):
outputs[key] = export_output.value
elif isinstance(export_output, tf.estimator.export.PredictOutput):
outputs[key] = export_output.outputs
return outputs
class _FakeEnsembler(object):
@property
def name(self):
return "fake_ensembler"
class _FakeIteration(object):
def __init__(self, fake_ensemble_spec):
self.number = 0
self.checkpoint = tf.train.Checkpoint()
self.candidates = [
_FakeCandidateBuilder().build_candidate(fake_ensemble_spec, None, None,
None)
]
class IterationBuilderTest(tu.AdanetTestCase):
@parameterized.named_parameters(
{
"testcase_name": "negative_max_steps",
"max_steps": -1,
}, {
"testcase_name": "zero_max_steps",
"max_steps": 0,
})
@test_util.run_in_graph_and_eager_modes
def test_init_errors(self, max_steps):
with self.assertRaises(ValueError):
_IterationBuilder(
_FakeCandidateBuilder(),
_FakeSubnetworkManager(),
_FakeEnsembleBuilder(),
summary_maker=_ScopedSummary,
ensemblers=[_FakeEnsembler()],
max_steps=max_steps)
# pylint: disable=g-long-lambda
@parameterized.named_parameters(
{
"testcase_name": "single_subnetwork_fn",
"ensemble_builder": _FakeEnsembleBuilder(),
"subnetwork_builders": [_FakeBuilder("training")],
"features": lambda: [[1., -1., 0.]],
"labels": lambda: [1],
"want_loss": 1.403943,
"want_predictions": 2.129,
"want_best_candidate_index": 0,
},
{
"testcase_name":
"single_subnetwork_fn_mock_summary",
"ensemble_builder":
_FakeEnsembleBuilder(),
"subnetwork_builders": [_FakeBuilder("training")],
"summary_maker":
functools.partial(_TPUScopedSummary, logdir="/tmp/fakedir"),
"features":
lambda: [[1., -1., 0.]],
"labels":
lambda: [1],
"want_loss":
1.403943,
"want_predictions":
2.129,
"want_best_candidate_index":
0,
},
{
"testcase_name":
"single_subnetwork_with_eval_metrics",
"ensemble_builder":
_FakeEnsembleBuilder(eval_metric_ops_fn=lambda:
{"a": (tf.constant(1), tf.constant(2))}),
"subnetwork_builders": [_FakeBuilder("training",),],
"mode":
tf.estimator.ModeKeys.EVAL,
"features":
lambda: [[1., -1., 0.]],
"labels":
lambda: [1],
"want_loss":
1.403943,
"want_predictions":
2.129,
"want_eval_metric_ops": ["a", "iteration"],
"want_best_candidate_index":
0,
},
{
"testcase_name":
"single_subnetwork_with_non_tensor_eval_metric_op",
"ensemble_builder":
_FakeEnsembleBuilder(eval_metric_ops_fn=lambda:
{"a": (tf.constant(1), tf.no_op())}),
"subnetwork_builders": [_FakeBuilder("training",),],
"mode":
tf.estimator.ModeKeys.EVAL,
"features":
lambda: [[1., -1., 0.]],
"labels":
lambda: [1],
"want_loss":
1.403943,
"want_predictions":
2.129,
"want_eval_metric_ops": ["a", "iteration"],
"want_best_candidate_index":
0,
},
{
"testcase_name": "single_subnetwork_done_training_fn",
"ensemble_builder": _FakeEnsembleBuilder(),
"subnetwork_builders": [_FakeBuilder("done")],
"features": lambda: [[1., -1., 0.]],
"labels": lambda: [1],
"want_loss": 1.403943,
"want_predictions": 2.129,
"want_best_candidate_index": 0,
},
{
"testcase_name": "single_dict_predictions_subnetwork_fn",
"ensemble_builder": _FakeEnsembleBuilder(dict_predictions=True),
"subnetwork_builders": [_FakeBuilder("training")],
"features": lambda: [[1., -1., 0.]],
"labels": lambda: [1],
"want_loss": 1.403943,
"want_predictions": {
"classes": 2,
"logits": 2.129
},
"want_best_candidate_index": 0,
},
{
"testcase_name": "previous_ensemble",
"ensemble_builder": _FakeEnsembleBuilder(),
"subnetwork_builders": [_FakeBuilder("training")],
"features": lambda: [[1., -1., 0.]],
"labels": lambda: [1],
"previous_iteration":
lambda: _FakeIteration(
tu.dummy_ensemble_spec("old", variables=[tf.Variable(1.)])),
"want_loss": 1.403943,
"want_predictions": 2.129,
"want_best_candidate_index": 1,
},
{
"testcase_name":
"previous_ensemble_is_best",
"ensemble_builder":
_FakeEnsembleBuilder(),
"subnetwork_builders": [_FakeBuilder("training")],
"features":
lambda: [[1., -1., 0.]],
"labels":
lambda: [1],
"previous_iteration":
lambda: _FakeIteration(
tu.dummy_ensemble_spec(
"old", random_seed=12, variables=[tf.Variable(1.)])),
"want_loss":
-.437,
"want_predictions":
.688,
"want_best_candidate_index":
0,
},
{
"testcase_name":
"previous_ensemble_spec_and_eval_metrics",
"ensemble_builder":
_FakeEnsembleBuilder(eval_metric_ops_fn=lambda:
{"a": (tf.constant(1), tf.constant(2))}),
"subnetwork_builders": [_FakeBuilder("training")],
"mode":
tf.estimator.ModeKeys.EVAL,
"features":
lambda: [[1., -1., 0.]],
"labels":
lambda: [1],
"previous_iteration":
lambda: _FakeIteration(
tu.dummy_ensemble_spec(
"old",
eval_metrics=tu.create_ensemble_metrics(
metric_fn=lambda:
{"a": (tf.constant(1), tf.constant(2))}),
variables=[tf.Variable(1.)])),
"want_loss":
1.403943,
"want_predictions":
2.129,
"want_eval_metric_ops": ["a", "iteration"],
"want_best_candidate_index":
1,
},
{
"testcase_name": "two_subnetwork_fns",
"ensemble_builder": _FakeEnsembleBuilder(),
"subnetwork_builders": [
_FakeBuilder("training"),
_FakeBuilder("training2", random_seed=7)
],
"features": lambda: [[1., -1., 0.]],
"labels": lambda: [1],
"want_loss": 1.40394,
"want_predictions": 2.129,
"want_best_candidate_index": 0,
},
{
"testcase_name": "two_subnetwork_fns_other_best",
"ensemble_builder": _FakeEnsembleBuilder(),
"subnetwork_builders": [
_FakeBuilder("training"),
_FakeBuilder("training2", random_seed=12)
],
"features": lambda: [[1., -1., 0.]],
"labels": lambda: [1],
"want_loss": -.437,
"want_predictions": .688,
"want_best_candidate_index": 1,
},
{
"testcase_name": "two_subnetwork_one_training_fns",
"ensemble_builder": _FakeEnsembleBuilder(),
"subnetwork_builders":
[_FakeBuilder("training"),
_FakeBuilder("done", random_seed=7)],
"features": lambda: [[1., -1., 0.]],
"labels": lambda: [1],
"want_loss": 1.403943,
"want_predictions": 2.129,
"want_best_candidate_index": 0,
},
{
"testcase_name": "two_subnetwork_done_training_fns",
"ensemble_builder": _FakeEnsembleBuilder(),
"subnetwork_builders":
[_FakeBuilder("done"),
_FakeBuilder("done1", random_seed=7)],
"features": lambda: [[1., -1., 0.]],
"labels": lambda: [1],
"want_loss": 1.403943,
"want_predictions": 2.129,
"want_best_candidate_index": 0,
},
{
"testcase_name": "two_dict_predictions_subnetwork_fns",
"ensemble_builder": _FakeEnsembleBuilder(dict_predictions=True),
"subnetwork_builders": [
_FakeBuilder("training"),
_FakeBuilder("training2", random_seed=7)
],
"features": lambda: [[1., -1., 0.]],
"labels": lambda: [1],
"want_loss": 1.404,
"want_predictions": {
"classes": 2,
"logits": 2.129
},
"want_best_candidate_index": 0,
},
{
"testcase_name":
"two_dict_predictions_subnetwork_fns_predict_classes",
"ensemble_builder":
_FakeEnsembleBuilder(
dict_predictions=True,
export_output_key=tu.ExportOutputKeys.CLASSIFICATION_CLASSES),
"subnetwork_builders": [
_FakeBuilder("training"),
_FakeBuilder("training2", random_seed=7)
],
"mode":
tf.estimator.ModeKeys.PREDICT,
"features":
lambda: [[1., -1., 0.]],
"labels":
lambda: [1],
"want_loss":
1.404,
"want_predictions": {
"classes": 2,
"logits": 2.129
},
"want_best_candidate_index":
0,
"want_export_outputs": {
tu.ExportOutputKeys.CLASSIFICATION_CLASSES: [2.129],
"serving_default": [2.129],
},
},
{
"testcase_name":
"two_dict_predictions_subnetwork_fns_predict_scores",
"ensemble_builder":
_FakeEnsembleBuilder(
dict_predictions=True,
export_output_key=tu.ExportOutputKeys.CLASSIFICATION_SCORES),
"subnetwork_builders": [
_FakeBuilder("training"),
_FakeBuilder("training2", random_seed=7)
],
"mode":
tf.estimator.ModeKeys.PREDICT,
"features":
lambda: [[1., -1., 0.]],
"labels":
lambda: [1],
"want_loss":
1.404,
"want_predictions": {
"classes": 2,
"logits": 2.129
},
"want_best_candidate_index":
0,
"want_export_outputs": {
tu.ExportOutputKeys.CLASSIFICATION_SCORES: [2.129],
"serving_default": [2.129],
},
},
{
"testcase_name":
"two_dict_predictions_subnetwork_fns_predict_regression",
"ensemble_builder":
_FakeEnsembleBuilder(
dict_predictions=True,
export_output_key=tu.ExportOutputKeys.REGRESSION),
"subnetwork_builders": [
_FakeBuilder("training"),
_FakeBuilder("training2", random_seed=7)
],
"mode":
tf.estimator.ModeKeys.PREDICT,
"features":
lambda: [[1., -1., 0.]],
"labels":
lambda: [1],
"want_predictions": {
"classes": 2,
"logits": 2.129
},
"want_best_candidate_index":
0,
"want_export_outputs": {
tu.ExportOutputKeys.REGRESSION: 2.129,
"serving_default": 2.129,
},
},
{
"testcase_name":
"two_dict_predictions_subnetwork_fns_predict_prediction",
"ensemble_builder":
_FakeEnsembleBuilder(
dict_predictions=True,
export_output_key=tu.ExportOutputKeys.PREDICTION),
"subnetwork_builders": [
_FakeBuilder("training"),
_FakeBuilder("training2", random_seed=7)
],
"mode":
tf.estimator.ModeKeys.PREDICT,
"features":
lambda: [[1., -1., 0.]],
"labels":
lambda: [1],
"want_predictions": {
"classes": 2,
"logits": 2.129
},
"want_best_candidate_index":
0,
"want_export_outputs": {
tu.ExportOutputKeys.PREDICTION: {
"classes": 2,
"logits": 2.129
},
"serving_default": {
"classes": 2,
"logits": 2.129
},
},
},
{
"testcase_name": "chief_session_run_hook",
"ensemble_builder": _FakeEnsembleBuilder(),
"subnetwork_builders": [
_FakeBuilder("training", chief_hook=tu.ModifierSessionRunHook())
],
"features": lambda: [[1., -1., 0.]],
"labels": lambda: [1],
"want_loss": 1.403943,
"want_predictions": 2.129,
"want_best_candidate_index": 0,
"want_chief_hooks": True,
})
@test_util.run_in_graph_and_eager_modes
def test_build_iteration(self,
ensemble_builder,
subnetwork_builders,
features,
labels,
want_predictions,
want_best_candidate_index,
want_eval_metric_ops=(),
previous_iteration=None,
want_loss=None,
want_export_outputs=None,
mode=tf.estimator.ModeKeys.TRAIN,
summary_maker=_ScopedSummary,
want_chief_hooks=False):
with context.graph_mode():
tf_compat.v1.train.create_global_step()
builder = _IterationBuilder(
_FakeCandidateBuilder(),
_FakeSubnetworkManager(),
ensemble_builder,
summary_maker=summary_maker,
ensemblers=[_FakeEnsembler()],
max_steps=1)
iteration = builder.build_iteration(
base_global_step=0,
iteration_number=0,
ensemble_candidates=[
EnsembleCandidate(b.name, [b], None) for b in subnetwork_builders
],
previous_iteration=previous_iteration()
if previous_iteration else None,
subnetwork_builders=subnetwork_builders,
features=features(),
labels=labels(),
mode=mode,
config=tf.estimator.RunConfig(model_dir=self.test_subdirectory))
init = tf.group(tf_compat.v1.global_variables_initializer(),
tf_compat.v1.local_variables_initializer())
self.evaluate(init)
estimator_spec = iteration.estimator_spec
if want_chief_hooks:
self.assertNotEmpty(iteration.estimator_spec.training_chief_hooks)
self.assertAllClose(
want_predictions,
self.evaluate(estimator_spec.predictions),
atol=1e-3)
# A default architecture metric is always included, even if we don't
# specify one.
eval_metric_ops = estimator_spec.eval_metric_ops
if "architecture/adanet/ensembles" in eval_metric_ops:
del eval_metric_ops["architecture/adanet/ensembles"]
self.assertEqual(set(want_eval_metric_ops), set(eval_metric_ops.keys()))
self.assertEqual(want_best_candidate_index,
self.evaluate(iteration.best_candidate_index))
if mode == tf.estimator.ModeKeys.PREDICT:
self.assertIsNotNone(estimator_spec.export_outputs)
self.assertAllClose(
want_export_outputs,
self.evaluate(
_export_output_tensors(estimator_spec.export_outputs)),
atol=1e-3)
self.assertIsNone(iteration.estimator_spec.train_op)
self.assertIsNone(iteration.estimator_spec.loss)
self.assertIsNotNone(want_export_outputs)
return
self.assertAlmostEqual(
want_loss, self.evaluate(iteration.estimator_spec.loss), places=3)
self.assertIsNone(iteration.estimator_spec.export_outputs)
if mode == tf.estimator.ModeKeys.TRAIN:
self.evaluate(iteration.estimator_spec.train_op)
@parameterized.named_parameters(
{
"testcase_name": "empty_subnetwork_builders",
"ensemble_builder": _FakeEnsembleBuilder(),
"subnetwork_builders": [],
"want_raises": ValueError,
}, {
"testcase_name": "same_subnetwork_builder_names",
"ensemble_builder": _FakeEnsembleBuilder(),
"subnetwork_builders":
[_FakeBuilder("same_name"),
_FakeBuilder("same_name")],
"want_raises": ValueError,
}, {
"testcase_name":
"same_ensembler_names",
"ensemble_builder":
_FakeEnsembleBuilder(),
"multiple_candidates": True,
"subnetwork_builders": [_FakeBuilder("fake_builder_name")],
"want_raises":
ValueError,
}, {
"testcase_name":
"predict_invalid",
"ensemble_builder":
_FakeEnsembleBuilder(
dict_predictions=True,
export_output_key=tu.ExportOutputKeys.INVALID),
"subnetwork_builders": [
_FakeBuilder("training"),
_FakeBuilder("training2", random_seed=7)
],
"mode":
tf.estimator.ModeKeys.PREDICT,
"want_raises":
TypeError,
})
@test_util.run_in_graph_and_eager_modes
def test_build_iteration_error(self,
ensemble_builder,
subnetwork_builders,
want_raises,
multiple_candidates=False,
mode=tf.estimator.ModeKeys.TRAIN,
summary_maker=_ScopedSummary):
with context.graph_mode():
tf_compat.v1.train.create_global_step()
builder = _IterationBuilder(
_FakeCandidateBuilder(),
_FakeSubnetworkManager(),
ensemble_builder,
summary_maker=summary_maker,
ensemblers=[_FakeEnsembler()],
max_steps=100)
features = [[1., -1., 0.]]
labels = [1]
ensemble_candidates = [
EnsembleCandidate("test", subnetwork_builders, None)
]
if multiple_candidates:
ensemble_candidates += [
EnsembleCandidate("test", subnetwork_builders, None)
]
with self.assertRaises(want_raises):
builder.build_iteration(
base_global_step=0,
iteration_number=0,
ensemble_candidates=ensemble_candidates,
subnetwork_builders=subnetwork_builders,
features=features,
labels=labels,
mode=mode,
config=tf.estimator.RunConfig(model_dir=self.test_subdirectory))
class _HeadEnsembleBuilder(object):
def __init__(self, head):
self._head = head
def build_ensemble_spec(self,
name,
candidate,
ensembler,
subnetwork_specs,
summary,
features,
mode,
iteration_number,
labels=None,
previous_ensemble_spec=None,
my_ensemble_index=None,
params=None,
previous_iteration_checkpoint=None):
del ensembler
del subnetwork_specs
del summary
del iteration_number
del previous_ensemble_spec
del my_ensemble_index
del params
del previous_iteration_checkpoint
logits = [[.5]]
estimator_spec = self._head.create_estimator_spec(
features=features, mode=mode, labels=labels, logits=logits)
return _EnsembleSpec(
name=name,
ensemble=None,
architecture=_Architecture("foo", "bar"),
subnetwork_builders=candidate.subnetwork_builders,
predictions=estimator_spec.predictions,
step=tf.Variable(0, dtype=tf.int64),
variables=[tf.Variable(1.)],
loss=None,
adanet_loss=.1,
train_op=None,
eval_metrics=None,
export_outputs=estimator_spec.export_outputs)
class IterationExportOutputsTest(tu.AdanetTestCase):
@parameterized.named_parameters(
{
"testcase_name": "regression_head",
"head": regression_head.RegressionHead(),
}, {
"testcase_name": "binary_classification_head",
"head": binary_class_head.BinaryClassHead(),
})
@test_util.run_in_graph_and_eager_modes
def test_head_export_outputs(self, head):
with context.graph_mode():
tf_compat.v1.train.create_global_step()
ensemble_builder = _HeadEnsembleBuilder(head)
builder = _IterationBuilder(
_FakeCandidateBuilder(),
_FakeSubnetworkManager(),
ensemble_builder,
summary_maker=_ScopedSummary,
ensemblers=[_FakeEnsembler()],
max_steps=10)
features = [[1., -1., 0.]]
labels = [1]
mode = tf.estimator.ModeKeys.PREDICT
subnetwork_builders = [_FakeBuilder("test")]
iteration = builder.build_iteration(
base_global_step=0,
iteration_number=0,
ensemble_candidates=[
EnsembleCandidate("test", subnetwork_builders, [tf.Variable(1.)])
],
subnetwork_builders=subnetwork_builders,
features=features,
labels=labels,
config=tf.estimator.RunConfig(model_dir=self.test_subdirectory),
mode=mode)
# Compare iteration outputs with default head outputs.
spec = head.create_estimator_spec(
features=features, labels=labels, mode=mode, logits=[[.5]])
self.assertEqual(
len(spec.export_outputs),
len(iteration.estimator_spec.export_outputs))
for key in spec.export_outputs:
if isinstance(spec.export_outputs[key],
tf.estimator.export.RegressionOutput):
self.assertAlmostEqual(
self.evaluate(spec.export_outputs[key].value),
self.evaluate(iteration.estimator_spec.export_outputs[key].value))
continue
if isinstance(spec.export_outputs[key],
tf.estimator.export.ClassificationOutput):
self.assertAllClose(
self.evaluate(spec.export_outputs[key].scores),
self.evaluate(
iteration.estimator_spec.export_outputs[key].scores))
self.assertAllEqual(
self.evaluate(spec.export_outputs[key].classes),
self.evaluate(
iteration.estimator_spec.export_outputs[key].classes))
continue
if isinstance(spec.export_outputs[key],
tf.estimator.export.PredictOutput):
if "classes" in spec.export_outputs[key].outputs:
# Verify string Tensor outputs separately.
self.assertAllEqual(
self.evaluate(spec.export_outputs[key].outputs["classes"]),
self.evaluate(iteration.estimator_spec.export_outputs[key]
.outputs["classes"]))
del spec.export_outputs[key].outputs["classes"]
del iteration.estimator_spec.export_outputs[key].outputs["classes"]
if "all_classes" in spec.export_outputs[key].outputs:
# Verify string Tensor outputs separately.
self.assertAllEqual(
self.evaluate(spec.export_outputs[key].outputs["all_classes"]),
self.evaluate(iteration.estimator_spec.export_outputs[key]
.outputs["all_classes"]))
del spec.export_outputs[key].outputs["all_classes"]
del iteration.estimator_spec.export_outputs[key].outputs[
"all_classes"]
self.assertAllClose(
self.evaluate(spec.export_outputs[key].outputs),
self.evaluate(
iteration.estimator_spec.export_outputs[key].outputs))
continue
self.fail("Invalid export_output for {}.".format(key))
if __name__ == "__main__":
tf.test.main()
| 36,973 | 34.246902 | 80 | py |
adanet | adanet-master/adanet/core/summary.py | """Tensorboard summaries for the single graph AdaNet implementation.
Copyright 2018 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import contextlib
import os
from absl import logging
from adanet import tf_compat
import six
import tensorflow.compat.v1 as tf_v1
import tensorflow.compat.v2 as tf
# pylint: disable=g-direct-tensorflow-import
from tensorboard import compat
from tensorflow.python.ops import summary_op_util
from tensorflow.python.summary import summary as summary_lib
# pylint: enable=g-direct-tensorflow-import
_DEFAULT_SCOPE = "default"
@six.add_metaclass(abc.ABCMeta)
class Summary(object):
"""Interface for writing summaries to Tensorboard."""
@abc.abstractmethod
def scalar(self, name, tensor, family=None, description=None):
"""Outputs a `tf.Summary` protocol buffer containing a single scalar value.
The generated tf.Summary has a Tensor.proto containing the input Tensor.
Args:
name: A name for this summary. The summary tag used for TensorBoard will
be this name prefixed by any active name scopes.
tensor: A real numeric scalar value, convertible to a float32 Tensor.
family: Optional; if provided, used as the prefix of the summary tag name,
which controls the tab name used for display on Tensorboard. DEPRECATED
in TF 2.
description: Optional long-form description for this summary, as a
constant str. Markdown is supported. Defaults to empty.
Returns:
A scalar `Tensor` of type `string`. Which contains a `tf.Summary`
protobuf.
Raises:
ValueError: If tensor has the wrong shape or type.
"""
@abc.abstractmethod
def image(self, name, tensor, max_outputs=3, family=None, description=None):
"""Outputs a `tf.Summary` protocol buffer with images.
The summary has up to `max_outputs` summary values containing images. The
images are built from `tensor` which must be 4-D with shape `[batch_size,
height, width, channels]` and where `channels` can be:
* 1: `tensor` is interpreted as Grayscale.
* 3: `tensor` is interpreted as RGB.
* 4: `tensor` is interpreted as RGBA.
The images have the same number of channels as the input tensor. For float
input, the values are normalized one image at a time to fit in the range
`[0, 255]`. `uint8` values are unchanged. The op uses two different
normalization algorithms:
* If the input values are all positive, they are rescaled so the largest
one is 255.
* If any input value is negative, the values are shifted so input value 0.0
is at 127. They are then rescaled so that either the smallest value is 0,
or the largest one is 255.
The `tag` in the outputted tf.Summary.Value protobufs is generated based on
the
name, with a suffix depending on the max_outputs setting:
* If `max_outputs` is 1, the summary value tag is '*name*/image'.
* If `max_outputs` is greater than 1, the summary value tags are
generated sequentially as '*name*/image/0', '*name*/image/1', etc.
Args:
name: A name for this summary. The summary tag used for TensorBoard will
be this name prefixed by any active name scopes.
tensor: A Tensor representing pixel data with shape [k, h, w, c], where k
is the number of images, h and w are the height and width of the images,
and c is the number of channels, which should be 1, 2, 3, or 4
(grayscale, grayscale with alpha, RGB, RGBA). Any of the dimensions may
be statically unknown (i.e., None). Floating point data will be clipped
to the range [0,1).
max_outputs: Optional int or rank-0 integer Tensor. At most this many
images will be emitted at each step. When more than max_outputs many
images are provided, the first max_outputs many images will be used and
the rest silently discarded.
family: Optional; if provided, used as the prefix of the summary tag name,
which controls the tab name used for display on Tensorboard. DEPRECATED
in TF 2.
description: Optional long-form description for this summary, as a
constant str. Markdown is supported. Defaults to empty.
Returns:
A scalar `Tensor` of type `string`. The serialized `tf.Summary` protocol
buffer.
"""
@abc.abstractmethod
def histogram(self,
name,
values,
family=None,
buckets=None,
description=None):
"""Outputs a `tf.Summary` protocol buffer with a histogram.
Adding a histogram summary makes it possible to visualize your data's
distribution in TensorBoard. You can see a detailed explanation of the
TensorBoard histogram dashboard
[here](https://www.tensorflow.org/get_started/tensorboard_histograms).
The generated [`tf.Summary`](
tensorflow/core/framework/summary.proto)
has one summary value containing a histogram for `values`.
This op reports an `InvalidArgument` error if any value is not finite.
Args:
name: A name for this summary. The summary tag used for TensorBoard will
be this name prefixed by any active name scopes.
values: A Tensor of any shape. Must be castable to float64.
family: Optional; if provided, used as the prefix of the summary tag name,
which controls the tab name used for display on Tensorboard. DEPRECATED
in TF 2.
buckets: Optional positive int. The output will have this many buckets,
except in two edge cases. If there is no data, then there are no
buckets. If there is data but all points have the same value, then there
is one bucket whose left and right endpoints are the same.
description: Optional long-form description for this summary, as a
constant str. Markdown is supported. Defaults to empty.
Returns:
A scalar `Tensor` of type `string`. The serialized `tf.Summary` protocol
buffer.
"""
@abc.abstractmethod
def audio(self,
name,
tensor,
sample_rate,
max_outputs=3,
family=None,
encoding=None,
description=None):
"""Writes an audio summary.
Args:
name: A name for this summary. The summary tag used for TensorBoard will
be this name prefixed by any active name scopes.
tensor: A Tensor representing audio data with shape [k, t, c], where k is
the number of audio clips, t is the number of frames, and c is the
number of channels. Elements should be floating-point values in [-1.0,
1.0]. Any of the dimensions may be statically unknown (i.e., None).
sample_rate: An int or rank-0 int32 Tensor that represents the sample
rate, in Hz. Must be positive.
max_outputs: Optional int or rank-0 integer Tensor. At most this many
audio clips will be emitted at each step. When more than max_outputs
many clips are provided, the first max_outputs many clips will be used
and the rest silently discarded.
family: Optional; if provided, used as the prefix of the summary tag name,
which controls the tab name used for display on Tensorboard. DEPRECATED
in TF 2.
encoding: Optional constant str for the desired encoding. Only "wav" is
currently supported, but this is not guaranteed to remain the default,
so if you want "wav" in particular, set this explicitly.
description: Optional long-form description for this summary, as a
constant str. Markdown is supported. Defaults to empty.
Returns:
A scalar `Tensor` of type `string`. The serialized `tf.Summary` protocol
buffer.
"""
def _strip_scope(name, scope, additional_scope):
"""Returns the name with scope stripped from it."""
if additional_scope:
name = name.replace("{}/".format(additional_scope), "")
if not scope:
scope = _DEFAULT_SCOPE
name = name.replace("{}/".format(scope), "", 1)
return name
class _ScopedSummary(Summary):
"""Records summaries in a given scope.
Each scope gets assigned a different collection where summary ops gets added.
This allows Tensorboard to display summaries with different scopes but the
same name in the same charts.
"""
def __init__(self, scope=None, skip_summary=False, namespace=None):
"""Initializes a `_ScopedSummary`.
Args:
scope: String scope name.
skip_summary: Whether to record summary ops.
namespace: Optional string namespace for the summary.
Returns:
A `_ScopedSummary` instance.
"""
if tf_compat.tpu_function.get_tpu_context().number_of_shards:
logging.log_first_n(
logging.WARN,
"Scoped summaries will be skipped since they do not support TPU", 1)
skip_summary = True
self._scope = scope
self._namespace = namespace
self._additional_scope = None
self._skip_summary = skip_summary
self._summary_ops = []
self._actual_summary_scalar_fn = summary_lib.scalar
self._actual_summary_image_fn = summary_lib.image
self._actual_summary_histogram_fn = summary_lib.histogram
self._actual_summary_audio_fn = summary_lib.audio
@property
def scope(self):
"""Returns scope string."""
return self._scope
@property
def namespace(self):
"""Returns namespace string."""
return self._namespace
@contextlib.contextmanager
def current_scope(self):
"""Registers the current context's scope to strip it from summary tags."""
self._additional_scope = tf_compat.v1.get_default_graph().get_name_scope()
yield
self._additional_scope = None
@contextlib.contextmanager
def _strip_tag_scope(self):
"""Monkey patches `summary_op_util.summary_scope` to strip tag scopes."""
original_summary_scope = summary_op_util.summary_scope
@contextlib.contextmanager
def strip_tag_scope_fn(name, family=None, default_name=None, values=None):
tag, scope = (None, None)
with original_summary_scope(name, family, default_name, values) as (t, s):
tag = _strip_scope(t, self.scope, self._additional_scope)
scope = s
yield tag, scope
summary_op_util.summary_scope = strip_tag_scope_fn
yield
summary_op_util.summary_scope = original_summary_scope
def _prefix_scope(self, name):
"""Prefixes summary name with scope."""
if self._scope:
if name[0] == "/":
name = name[1:]
return "{scope}/{name}".format(scope=self._scope, name=name)
return name
def scalar(self, name, tensor, family=None):
"""See `Summary`."""
if self._skip_summary:
return tf.constant("")
with self._strip_tag_scope():
summary = self._actual_summary_scalar_fn(
name=self._prefix_scope(name),
tensor=tensor,
family=family,
collections=[])
self._summary_ops.append(summary)
return summary
def image(self, name, tensor, max_outputs=3, family=None):
"""See `Summary`."""
if self._skip_summary:
return tf.constant("")
with self._strip_tag_scope():
summary = self._actual_summary_image_fn(
name=self._prefix_scope(name),
tensor=tensor,
max_outputs=max_outputs,
family=family,
collections=[])
self._summary_ops.append(summary)
return summary
def histogram(self, name, values, family=None):
"""See `Summary`."""
if self._skip_summary:
return tf.constant("")
with self._strip_tag_scope():
summary = self._actual_summary_histogram_fn(
name=self._prefix_scope(name),
values=values,
family=family,
collections=[])
self._summary_ops.append(summary)
return summary
def audio(self, name, tensor, sample_rate, max_outputs=3, family=None):
"""See `Summary`."""
if self._skip_summary:
return tf.constant("")
with self._strip_tag_scope():
summary = self._actual_summary_audio_fn(
name=self._prefix_scope(name),
tensor=tensor,
sample_rate=sample_rate,
max_outputs=max_outputs,
family=family,
collections=[])
self._summary_ops.append(summary)
return summary
def merge_all(self):
"""Returns the list of this graph's scoped summary ops.
Note: this is an abuse of the tf.summary.merge_all API since it is expected
to return a summary op with all summaries merged. However, ScopedSummary is
only used in the internal implementation, so this should be OK.
"""
current_graph = tf_compat.v1.get_default_graph()
return [op for op in self._summary_ops if op.graph == current_graph]
# TODO: _ScopedSummary and _ScopedSummaryV2 share a lot of the same
# methods. Extract a base class for the two, or move shared methods into
# Summary.
class _ScopedSummaryV2(Summary):
"""Records summaries in a given scope.
Only for TPUEstimator.
Each scope gets assigned a different collection where summary ops gets added.
This allows Tensorboard to display summaries with different scopes but the
same name in the same charts.
"""
def __init__(self, logdir, namespace=None, scope=None, skip_summary=False):
"""Initializes a `_TPUScopedSummary`.
Args:
logdir: String directory path for logging summaries.
namespace: String namespace to append to the logdir. Can be shared with
other `_ScopedSummary` objects.
scope: String scope name.
skip_summary: Whether to record summary ops.
Returns:
A `_ScopedSummary` instance.
"""
# pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
from tensorboard.plugins.audio import summary_v2 as audio_v2_lib
from tensorboard.plugins.histogram import summary_v2 as histogram_v2_lib
from tensorboard.plugins.image import summary_v2 as image_v2_lib
from tensorboard.plugins.scalar import summary_v2 as scalar_v2_lib
# pylint: enable=g-direct-tensorflow-import,g-import-not-at-top
assert logdir
if scope == _DEFAULT_SCOPE:
raise ValueError("scope cannot be 'default'.")
if namespace:
logdir = os.path.join(logdir, namespace)
if scope:
logdir = os.path.join(logdir, scope)
self._logdir = logdir
self._namespace = namespace
self._scope = scope
self._additional_scope = None
self._skip_summary = skip_summary
self._actual_summary_scalar_fn = scalar_v2_lib.scalar
self._actual_summary_image_fn = image_v2_lib.image
self._actual_summary_histogram_fn = histogram_v2_lib.histogram
self._actual_summary_audio_fn = audio_v2_lib.audio
self._summary_tuples = []
@property
def namespace(self):
"""Returns namespace string."""
return self._namespace
@property
def scope(self):
"""Returns scope string."""
return self._scope
@property
def logdir(self):
"""Returns the logdir."""
return self._logdir
@property
def writer(self):
"""Returns the file writer."""
return self._writer
@contextlib.contextmanager
def current_scope(self):
"""Registers the current context's scope to strip it from summary tags."""
self._additional_scope = tf_compat.v1.get_default_graph().get_name_scope()
try:
yield
finally:
self._additional_scope = None
@contextlib.contextmanager
def _strip_tag_scope(self, additional_scope):
"""Monkey patches `summary_op_util.summary_scope` to strip tag scopes."""
# pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
from tensorflow.python.ops import summary_ops_v2 as summary_v2_lib
from tensorflow.python.ops.summary_ops_v2 import _INVALID_SCOPE_CHARACTERS
# pylint: enable=g-direct-tensorflow-import,g-import-not-at-top
original_summary_scope = summary_op_util.summary_scope
original_summary_scope_v2 = getattr(summary_v2_lib, "summary_scope")
# TF 1.
@contextlib.contextmanager
def strip_tag_scope_fn(name, family=None, default_name=None, values=None):
tag, scope = (None, None)
with original_summary_scope(name, family, default_name, values) as (t, s):
tag = _strip_scope(t, self.scope, additional_scope)
scope = s
yield tag, scope
# TF 2.
@contextlib.contextmanager
def monkey_patched_summary_scope_fn(name,
default_name="summary",
values=None):
"""Rescopes the summary tag with the ScopedSummary's scope."""
name = name or default_name
current_scope = tf_compat.v1.get_default_graph().get_name_scope()
tag = current_scope + "/" + name if current_scope else name
# Strip illegal characters from the scope name, and if that leaves
# nothing, use None instead so we pick up the default name.
name = _INVALID_SCOPE_CHARACTERS.sub("", name) or None
with tf.compat.v1.name_scope(name, default_name, values) as scope:
tag = _strip_scope(tag, self.scope, additional_scope)
yield tag, scope
setattr(summary_op_util, "summary_scope", strip_tag_scope_fn)
setattr(summary_v2_lib, "summary_scope", monkey_patched_summary_scope_fn)
setattr(compat.tf2.summary.experimental, "summary_scope",
monkey_patched_summary_scope_fn)
setattr(compat.tf2.summary, "summary_scope",
monkey_patched_summary_scope_fn)
try:
yield
finally:
setattr(summary_op_util, "summary_scope", original_summary_scope)
setattr(summary_v2_lib, "summary_scope", original_summary_scope_v2)
setattr(compat.tf2.summary.experimental, "summary_scope",
original_summary_scope_v2)
setattr(compat.tf2.summary, "summary_scope", original_summary_scope_v2)
def _prefix_scope(self, name):
scope = self._scope
if name[0] == "/":
name = name[1:]
if not scope:
scope = _DEFAULT_SCOPE
return "{scope}/{name}".format(scope=scope, name=name)
def _create_summary(self, summary_fn, name, tensor):
"""Creates a summary op.
This will create a function that takes a `Tensor` and adds it to a list with
its matching `tensor`.
Args:
summary_fn: A function that takes a name string and `Tensor` and returns a
summary op.
name: String name of the summary.
tensor: `Tensor` to pass to the summary.
"""
if self._skip_summary:
return
# additional_scope is set with the context from `current_scope`.
# e.g. "foo/bar".
additional_scope = self._additional_scope
# name_scope is from whichever scope the summary actually gets called in.
# e.g. "foo/bar/baz"
name_scope = tf_compat.v1.get_default_graph().get_name_scope()
# Reuse name_scope if it exists by appending "/" to it.
name_scope = name_scope + "/" if name_scope else name_scope
def _summary_fn(tensor, step):
"""Creates a summary with the given `Tensor`."""
summary_name = self._prefix_scope(name)
# Recover the current name scope when this fn is be called, because the
# scope may be different when fns are called.
# e.g. "foo/bar/baz/scalar" will become "baz/scalar" when
# additional_scope is "foo/bar".
# TODO: Figure out a cleaner way to handle this.
assert not tf_compat.v1.get_default_graph().get_name_scope()
with tf_compat.v1.name_scope(name_scope):
with self._strip_tag_scope(additional_scope):
# TODO: Do summaries need to be reduced before writing?
# Presumably each tensor core creates its own summary so we may be
# writing out num_tensor_cores copies of the same value.
return summary_fn(summary_name, tensor, step)
self._summary_tuples.append((_summary_fn, tensor))
def scalar(self, name, tensor, family=None, description=None):
def _summary_fn(name, tensor, step):
return self._actual_summary_scalar_fn(
name=name, data=tensor, description=description, step=step)
self._create_summary(_summary_fn, name,
tf.reshape(tf.convert_to_tensor(value=tensor), []))
def image(self, name, tensor, max_outputs=3, family=None, description=None):
def _summary_fn(name, tensor, step):
return self._actual_summary_image_fn(
name=name,
data=tensor,
max_outputs=max_outputs,
description=description,
step=step)
self._create_summary(_summary_fn, name, tf.cast(tensor, tf.float32))
def histogram(self,
name,
values,
family=None,
buckets=None,
description=None):
def _summary_fn(name, tensor, step):
return self._actual_summary_histogram_fn(
name=name,
data=tensor,
buckets=buckets,
description=description,
step=step)
self._create_summary(_summary_fn, name, tf.convert_to_tensor(value=values))
def audio(self,
name,
tensor,
sample_rate,
max_outputs=3,
family=None,
encoding=None,
description=None):
def _summary_fn(name, tensor, step):
return self._actual_summary_audio_fn(
name=name,
data=tensor,
sample_rate=sample_rate,
encoding=encoding,
description=description,
step=step)
self._create_summary(_summary_fn, name, tf.cast(tensor, tf.float32))
def summary_tuples(self):
"""Returns an iterable of functions that convert a Tensor to a summary.
Used for TPU host calls.
Returns:
Iterable of functions that take a single `Tensor` argument.
"""
return tuple(self._summary_tuples)
def clear_summary_tuples(self):
"""Clears the list of current summary tuples."""
self._summary_tuples = []
class _TPUScopedSummary(_ScopedSummaryV2):
"""Records summaries in a given scope.
Only for TPUEstimator.
Each scope gets assigned a different collection where summary ops gets added.
This allows Tensorboard to display summaries with different scopes but the
same name in the same charts.
"""
def __init__(self, logdir, namespace=None, scope=None, skip_summary=False):
super(_TPUScopedSummary, self).__init__(logdir, namespace, scope,
skip_summary)
from tensorflow.python.ops import summary_ops_v2 as summary_v2_lib # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
self._actual_summary_scalar_fn = summary_v2_lib.scalar
self._actual_summary_image_fn = summary_v2_lib.image
self._actual_summary_histogram_fn = summary_v2_lib.histogram
self._actual_summary_audio_fn = summary_v2_lib.audio
def scalar(self, name, tensor, family=None):
def _summary_fn(name, tensor, step):
return self._actual_summary_scalar_fn(
name=name, tensor=tensor, family=family, step=step)
self._create_summary(_summary_fn, name,
tf.reshape(tf.convert_to_tensor(value=tensor), [1]))
def image(self, name, tensor, max_outputs=3, family=None):
def _summary_fn(name, tensor, step):
return self._actual_summary_image_fn(
name=name,
tensor=tensor,
max_images=max_outputs,
family=family,
step=step)
self._create_summary(_summary_fn, name, tf.cast(tensor, tf.float32))
def histogram(self, name, values, family=None):
def _summary_fn(name, tensor, step):
return self._actual_summary_histogram_fn(
name=name, tensor=tensor, family=family, step=step)
self._create_summary(_summary_fn, name, tf.convert_to_tensor(value=values))
def audio(self, name, tensor, sample_rate, max_outputs=3, family=None):
def _summary_fn(name, tensor, step):
return self._actual_summary_audio_fn(
name=name,
tensor=tensor,
sample_rate=sample_rate,
max_outputs=max_outputs,
family=family,
step=step)
self._create_summary(_summary_fn, name, tf.cast(tensor, tf.float32))
class _SummaryWrapper(object):
"""Wraps an `adanet.Summary` to provide summary-like APIs."""
def __init__(self, summary):
self._summary = summary
def scalar(self, name, tensor, collections=None, family=None):
"""See `tf.summary.scalar`."""
if collections is not None:
logging.warning(
"The `collections` argument will be "
"ignored for scalar summary: %s, %s", name, tensor)
return self._summary.scalar(name=name, tensor=tensor, family=family)
def image(self, name, tensor, max_outputs=3, collections=None, family=None):
"""See `tf.summary.image`."""
if collections is not None:
logging.warning(
"The `collections` argument will be "
"ignored for image summary: %s, %s", name, tensor)
return self._summary.image(
name=name, tensor=tensor, max_outputs=max_outputs, family=family)
def histogram(self, name, values, collections=None, family=None):
"""See `tf.summary.histogram`."""
if collections is not None:
logging.warning(
"The `collections` argument will be "
"ignored for histogram summary: %s, %s", name, values)
return self._summary.histogram(name=name, values=values, family=family)
def audio(self,
name,
tensor,
sample_rate,
max_outputs=3,
collections=None,
family=None):
"""See `tf.summary.audio`."""
if collections is not None:
logging.warning(
"The `collections` argument will be "
"ignored for audio summary: %s, %s", name, tensor)
return self._summary.audio(
name=name,
tensor=tensor,
sample_rate=sample_rate,
max_outputs=max_outputs,
family=family)
def scalar_v2(self, name, tensor, family=None, step=None):
"""See `tf.contrib.summary.scalar`."""
if step is not None:
logging.warning(
"The `step` argument will be ignored to use the global step for "
"scalar summary: %s, %s", name, tensor)
return self._summary.scalar(name=name, tensor=tensor, family=family)
def image_v2(self,
name,
tensor,
bad_color=None,
max_images=3,
family=None,
step=None):
"""See `tf.contrib.summary.image`."""
if step is not None:
logging.warning(
"The `step` argument will be ignored to use the global step for "
"image summary: %s, %s", name, tensor)
# TODO: Add support for `bad_color` arg.
if bad_color is not None:
logging.warning(
"The `bad_color` arg is not supported for image summary: %s, %s",
name, tensor)
return self._summary.image(
name=name, tensor=tensor, max_outputs=max_images, family=family)
def histogram_v2(self, name, tensor, family=None, step=None):
"""See `tf.contrib.summary.histogram`."""
if step is not None:
logging.warning(
"The `step` argument will be ignored to use the global step for "
"histogram summary: %s, %s", name, tensor)
return self._summary.histogram(name=name, values=tensor, family=family)
def audio_v2(self,
name,
tensor,
sample_rate,
max_outputs,
family=None,
step=None):
"""See `tf.contrib.summary.audio`."""
if step is not None:
logging.warning(
"The `step` argument will be ignored to use the global step for "
"audio summary: %s, %s", name, tensor)
return self._summary.audio(
name=name,
tensor=tensor,
sample_rate=sample_rate,
max_outputs=max_outputs,
family=family)
def scalar_v3(self, name, data, step=None, description=None):
"""See `tf.compat.v2.summary.scalar`."""
if step is not None:
logging.warning(
"The `step` argument will be ignored to use the iteration step for "
"scalar summary: %s", name)
return self._summary.scalar(name=name, tensor=data, description=description)
def image_v3(self, name, data, step=None, max_outputs=3, description=None):
"""See `tf.compat.v2.summary.image`."""
if step is not None:
logging.warning(
"The `step` argument will be ignored to use the iteration step for "
"image summary: %s", name)
return self._summary.image(
name=name,
tensor=data,
max_outputs=max_outputs,
description=description)
def histogram_v3(self, name, data, step=None, buckets=None, description=None):
"""See `tf.compat.v2.summary.histogram`."""
if step is not None:
logging.warning(
"The `step` argument will be ignored to use the global step for "
"histogram summary: %s", name)
return self._summary.histogram(
name=name, tensor=data, buckets=buckets, description=description)
def audio_v3(self,
name,
data,
sample_rate,
step=None,
max_outputs=3,
encoding=None,
description=None):
"""See `tf.compat.v2.summary.audio`."""
if step is not None:
logging.warning(
"The `step` argument will be ignored to use the global step for "
"audio summary: %s", name)
return self._summary.audio(
name=name,
tensor=data,
sample_rate=sample_rate,
max_outputs=max_outputs,
encoding=encoding,
description=description)
@contextlib.contextmanager
def monkey_patched_summaries(summary):
"""A context where global summary functions point to the given summary.
Restores original summary functions upon exit.
NOTE: This function is not thread-safe.
Args:
summary: An `adanet.Summary` instance.
Yields:
A context where summary functions are routed to the given `adanet.Summary`.
"""
from tensorflow.python.ops import summary_ops_v2 as summary_v2_lib # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
old_summary_scalar = summary_lib.scalar
old_summary_image = summary_lib.image
old_summary_histogram = summary_lib.histogram
old_summary_audio = summary_lib.audio
old_summary_v2_scalar = summary_v2_lib.scalar
old_summary_v2_image = summary_v2_lib.image
old_summary_v2_histogram = summary_v2_lib.histogram
old_summary_v2_audio = summary_v2_lib.audio
old_summary_compat_v2_scalar = tf_compat.v2.summary.scalar
old_summary_compat_v2_image = tf_compat.v2.summary.image
old_summary_compat_v2_histogram = tf_compat.v2.summary.histogram
old_summary_compat_v2_audio = tf_compat.v2.summary.audio
# Monkey-patch global attributes.
wrapped_summary = _SummaryWrapper(summary)
setattr(tf_v1.summary, "scalar", wrapped_summary.scalar)
setattr(tf_v1.summary, "image", wrapped_summary.image)
setattr(tf_v1.summary, "histogram", wrapped_summary.histogram)
setattr(tf_v1.summary, "audio", wrapped_summary.audio)
setattr(tf_compat.v1.summary, "scalar", wrapped_summary.scalar)
setattr(tf_compat.v1.summary, "image", wrapped_summary.image)
setattr(tf_compat.v1.summary, "histogram", wrapped_summary.histogram)
setattr(tf_compat.v1.summary, "audio", wrapped_summary.audio)
setattr(summary_lib, "scalar", wrapped_summary.scalar)
setattr(summary_lib, "image", wrapped_summary.image)
setattr(summary_lib, "histogram", wrapped_summary.histogram)
setattr(summary_lib, "audio", wrapped_summary.audio)
setattr(tf_compat.v2.summary, "scalar", wrapped_summary.scalar_v3)
setattr(tf_compat.v2.summary, "image", wrapped_summary.image_v3)
setattr(tf_compat.v2.summary, "histogram", wrapped_summary.histogram_v3)
setattr(tf_compat.v2.summary, "audio", wrapped_summary.audio_v3)
setattr(summary_v2_lib, "scalar", wrapped_summary.scalar_v2)
setattr(summary_v2_lib, "image", wrapped_summary.image_v2)
setattr(summary_v2_lib, "histogram", wrapped_summary.histogram_v2)
setattr(summary_v2_lib, "audio", wrapped_summary.audio_v2)
try:
# TF 2.0 eliminates tf.contrib.
setattr(tf_v1.contrib.summary, "scalar", wrapped_summary.scalar_v2)
setattr(tf_v1.contrib.summary, "image", wrapped_summary.image_v2)
setattr(tf_v1.contrib.summary, "histogram", wrapped_summary.histogram_v2)
setattr(tf_v1.contrib.summary, "audio", wrapped_summary.audio_v2)
except (AttributeError, ImportError):
# TF 2.0 eliminates tf.contrib.
# Also set the new tf.summary to be use the new summaries in TF 2.
if tf_compat.version_greater_or_equal("2.0.0"):
setattr(tf.summary, "scalar", wrapped_summary.scalar_v3)
setattr(tf.summary, "image", wrapped_summary.image_v3)
setattr(tf.summary, "histogram", wrapped_summary.histogram_v3)
setattr(tf.summary, "audio", wrapped_summary.audio_v3)
try:
yield
finally:
# Revert monkey-patches.
try:
setattr(tf_v1.contrib.summary, "audio", old_summary_v2_audio)
setattr(tf_v1.contrib.summary, "histogram", old_summary_v2_histogram)
setattr(tf_v1.contrib.summary, "image", old_summary_v2_image)
setattr(tf_v1.contrib.summary, "scalar", old_summary_v2_scalar)
except (AttributeError, ImportError):
# TF 2.0 eliminates tf.contrib.
pass
setattr(summary_v2_lib, "audio", old_summary_v2_audio)
setattr(summary_v2_lib, "histogram", old_summary_v2_histogram)
setattr(summary_v2_lib, "image", old_summary_v2_image)
setattr(summary_v2_lib, "scalar", old_summary_v2_scalar)
setattr(tf.summary, "audio", old_summary_compat_v2_audio)
setattr(tf.summary, "histogram", old_summary_compat_v2_histogram)
setattr(tf.summary, "image", old_summary_compat_v2_image)
setattr(tf.summary, "scalar", old_summary_compat_v2_scalar)
setattr(tf_compat.v2.summary, "audio", old_summary_compat_v2_audio)
setattr(tf_compat.v2.summary, "histogram", old_summary_compat_v2_histogram)
setattr(tf_compat.v2.summary, "image", old_summary_compat_v2_image)
setattr(tf_compat.v2.summary, "scalar", old_summary_compat_v2_scalar)
setattr(summary_lib, "audio", old_summary_audio)
setattr(summary_lib, "histogram", old_summary_histogram)
setattr(summary_lib, "image", old_summary_image)
setattr(summary_lib, "scalar", old_summary_scalar)
setattr(tf_compat.v1.summary, "audio", old_summary_audio)
setattr(tf_compat.v1.summary, "histogram", old_summary_histogram)
setattr(tf_compat.v1.summary, "image", old_summary_image)
setattr(tf_compat.v1.summary, "scalar", old_summary_scalar)
setattr(tf_v1.summary, "audio", old_summary_audio)
setattr(tf_v1.summary, "histogram", old_summary_histogram)
setattr(tf_v1.summary, "image", old_summary_image)
setattr(tf_v1.summary, "scalar", old_summary_scalar)
| 35,495 | 35.443532 | 136 | py |
adanet | adanet-master/adanet/examples/simple_dnn.py | """A simple dense neural network search space.
Copyright 2018 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import functools
import adanet
import tensorflow.compat.v2 as tf
_NUM_LAYERS_KEY = "num_layers"
class _SimpleDNNBuilder(adanet.subnetwork.Builder):
"""Builds a DNN subnetwork for AdaNet."""
def __init__(self, feature_columns, optimizer, layer_size, num_layers,
learn_mixture_weights, dropout, seed):
"""Initializes a `_DNNBuilder`.
Args:
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
optimizer: An `Optimizer` instance for training both the subnetwork and
the mixture weights.
layer_size: The number of nodes to output at each hidden layer.
num_layers: The number of hidden layers.
learn_mixture_weights: Whether to solve a learning problem to find the
best mixture weights, or use their default value according to the
mixture weight type. When `False`, the subnetworks will return a no_op
for the mixture weight train op.
dropout: The dropout rate, between 0 and 1. E.g. "rate=0.1" would drop out
10% of input units.
seed: A random seed.
Returns:
An instance of `_DNNBuilder`.
"""
self._feature_columns = feature_columns
self._optimizer = optimizer
self._layer_size = layer_size
self._num_layers = num_layers
self._learn_mixture_weights = learn_mixture_weights
self._dropout = dropout
self._seed = seed
def build_subnetwork(self,
features,
logits_dimension,
training,
iteration_step,
summary,
previous_ensemble=None):
"""See `adanet.subnetwork.Builder`."""
input_layer = tf.compat.v1.feature_column.input_layer(
features=features, feature_columns=self._feature_columns)
last_layer = input_layer
for _ in range(self._num_layers):
last_layer = tf.compat.v1.layers.dense(
last_layer,
units=self._layer_size,
activation=tf.nn.relu,
kernel_initializer=tf.compat.v1.glorot_uniform_initializer(
seed=self._seed))
last_layer = tf.compat.v1.layers.dropout(
last_layer, rate=self._dropout, seed=self._seed, training=training)
logits = tf.compat.v1.layers.dense(
last_layer,
units=logits_dimension,
kernel_initializer=tf.compat.v1.glorot_uniform_initializer(
seed=self._seed))
# Approximate the Rademacher complexity of this subnetwork as the square-
# root of its depth.
complexity = tf.sqrt(tf.cast(self._num_layers, dtype=tf.float32))
with tf.name_scope(""):
summary.scalar("complexity", complexity)
summary.scalar("num_layers", self._num_layers)
shared = {_NUM_LAYERS_KEY: self._num_layers}
return adanet.Subnetwork(
last_layer=last_layer,
logits=logits,
complexity=complexity,
shared=shared)
def build_subnetwork_train_op(self, subnetwork, loss, var_list, labels,
iteration_step, summary, previous_ensemble):
"""See `adanet.subnetwork.Builder`."""
# NOTE: The `adanet.Estimator` increments the global step.
update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
return self._optimizer.minimize(loss=loss, var_list=var_list)
# TODO: Delete deprecated build_mixture_weights_train_op method.
# Use adanet.ensemble.Ensembler instead.
def build_mixture_weights_train_op(self, loss, var_list, logits, labels,
iteration_step, summary):
"""See `adanet.subnetwork.Builder`."""
if not self._learn_mixture_weights:
return tf.no_op("mixture_weights_train_op")
# NOTE: The `adanet.Estimator` increments the global step.
return self._optimizer.minimize(loss=loss, var_list=var_list)
@property
def name(self):
"""See `adanet.subnetwork.Builder`."""
if self._num_layers == 0:
# A DNN with no hidden layers is a linear model.
return "linear"
return "{}_layer_dnn".format(self._num_layers)
class Generator(adanet.subnetwork.Generator):
"""Generates a two DNN subnetworks at each iteration.
The first DNN has an identical shape to the most recently added subnetwork
in `previous_ensemble`. The second has the same shape plus one more dense
layer on top. This is similar to the adaptive network presented in Figure 2 of
[Cortes et al. ICML 2017](https://arxiv.org/abs/1607.01097), without the
connections to hidden layers of networks from previous iterations.
"""
def __init__(self,
feature_columns,
optimizer,
layer_size=32,
initial_num_layers=0,
learn_mixture_weights=False,
dropout=0.,
seed=None):
"""Initializes a DNN `Generator`.
Args:
feature_columns: An iterable containing all the feature columns used by
DNN models. All items in the set should be instances of classes derived
from `FeatureColumn`.
optimizer: An `Optimizer` instance for training both the subnetwork and
the mixture weights.
layer_size: Number of nodes in each hidden layer of the subnetwork
candidates. Note that this parameter is ignored in a DNN with no hidden
layers.
initial_num_layers: Minimum number of layers for each DNN subnetwork. At
iteration 0, the subnetworks will be `initial_num_layers` deep.
Subnetworks at subsequent iterations will be at least as deep.
learn_mixture_weights: Whether to solve a learning problem to find the
best mixture weights, or use their default value according to the
mixture weight type. When `False`, the subnetworks will return a no_op
for the mixture weight train op.
dropout: The dropout rate, between 0 and 1. E.g. "rate=0.1" would drop out
10% of input units.
seed: A random seed.
Returns:
An instance of `Generator`.
Raises:
ValueError: If feature_columns is empty.
ValueError: If layer_size < 1.
ValueError: If initial_num_layers < 0.
"""
if not feature_columns:
raise ValueError("feature_columns must not be empty")
if layer_size < 1:
raise ValueError("layer_size must be >= 1")
if initial_num_layers < 0:
raise ValueError("initial_num_layers must be >= 0")
self._initial_num_layers = initial_num_layers
self._dnn_builder_fn = functools.partial(
_SimpleDNNBuilder,
feature_columns=feature_columns,
optimizer=optimizer,
layer_size=layer_size,
learn_mixture_weights=learn_mixture_weights,
dropout=dropout,
seed=seed)
def generate_candidates(self, previous_ensemble, iteration_number,
previous_ensemble_reports, all_reports):
"""See `adanet.subnetwork.Generator`."""
num_layers = self._initial_num_layers
if previous_ensemble:
num_layers = previous_ensemble.weighted_subnetworks[-1].subnetwork.shared[
_NUM_LAYERS_KEY]
return [
self._dnn_builder_fn(num_layers=num_layers),
self._dnn_builder_fn(num_layers=num_layers + 1),
]
| 8,005 | 36.411215 | 80 | py |
adanet | adanet-master/adanet/examples/__init__.py | """Some examples using AdaNet.
Copyright 2018 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
| 618 | 35.411765 | 72 | py |
adanet | adanet-master/adanet/examples/simple_dnn_test.py | """Tests for a simple dense neural network search space.
Copyright 2018 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from absl.testing import parameterized
import adanet
from adanet.examples import simple_dnn
import tensorflow.compat.v2 as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util
# pylint: enable=g-direct-tensorflow-import
class _FakeEnsemble(object):
"""A fake ensemble of one subnetwork."""
def __init__(self, num_layers):
shared_tensors = {"num_layers": num_layers}
self._weighted_subnetworks = [
adanet.WeightedSubnetwork(
name=None,
iteration_number=None,
weight=None,
logits=None,
subnetwork=adanet.Subnetwork(
last_layer=[1],
logits=[1],
complexity=1,
shared=shared_tensors))
]
@property
def weighted_subnetworks(self):
return self._weighted_subnetworks
class GeneratorTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.named_parameters({
"testcase_name": "defaults",
"want_names": ["linear", "1_layer_dnn"],
"want_subnetwork_losses": [.871, .932],
"want_mixture_weight_losses": [.871, .932],
"want_complexities": [0., 1.],
}, {
"testcase_name": "learn_mixture_weights",
"learn_mixture_weights": True,
"want_names": ["linear", "1_layer_dnn"],
"want_subnetwork_losses": [.871, .932],
"want_mixture_weight_losses": [.842, .892],
"want_complexities": [0., 1.],
}, {
"testcase_name": "one_initial_num_layers",
"initial_num_layers": 1,
"want_names": ["1_layer_dnn", "2_layer_dnn"],
"want_subnetwork_losses": [.932, .660],
"want_mixture_weight_losses": [.932, .660],
"want_complexities": [1., 1.414],
}, {
"testcase_name": "previous_ensemble",
"previous_ensemble": _FakeEnsemble(1),
"want_names": ["1_layer_dnn", "2_layer_dnn"],
"want_subnetwork_losses": [.932, .660],
"want_mixture_weight_losses": [.932, .660],
"want_complexities": [1., 1.414],
})
@test_util.run_in_graph_and_eager_modes
def test_generate_candidates(self,
want_names,
want_subnetwork_losses,
want_mixture_weight_losses,
want_complexities,
learn_mixture_weights=False,
initial_num_layers=0,
previous_ensemble=None):
feature_columns = [tf.feature_column.numeric_column("x")]
generator = simple_dnn.Generator(
feature_columns=feature_columns,
optimizer=tf.compat.v1.train.GradientDescentOptimizer(.1),
layer_size=3,
initial_num_layers=initial_num_layers,
learn_mixture_weights=learn_mixture_weights,
seed=42)
with context.graph_mode(), tf.Graph().as_default() as g:
iteration_step = tf.compat.v1.train.create_global_step()
features = {"x": [[1.], [2.]]}
labels = tf.constant([[0.], [1.]])
names = []
subnetwork_losses = []
mixture_weight_losses = []
complexities = []
for builder in generator.generate_candidates(
previous_ensemble,
# The following arguments are not used by
# simple_dnn.BuilderGenerator's generate_candidates.
iteration_number=0,
previous_ensemble_reports=[],
all_reports=[]):
names.append(builder.name)
# 1. Build subnetwork graph.
subnetwork = builder.build_subnetwork(
features,
logits_dimension=1,
training=True,
iteration_step=iteration_step,
summary=tf.summary,
previous_ensemble=previous_ensemble)
# 2. Build subnetwork train ops.
subnetwork_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
logits=subnetwork.logits, labels=labels))
subnetwork_train_op = builder.build_subnetwork_train_op(
subnetwork,
subnetwork_loss,
var_list=None,
labels=labels,
iteration_step=iteration_step,
summary=tf.summary,
previous_ensemble=None)
# 3. Build mixture weight train ops.
# Stop gradients since mixture weights should have not propagate
# beyond top layer.
subnetwork_logits = tf.stop_gradient(subnetwork.logits)
# Mixture weight will initialize to a one-valued scalar.
mixture_weight_logits = tf.compat.v1.layers.dense(
subnetwork_logits,
units=1,
use_bias=False,
kernel_initializer=tf.ones_initializer())
mixture_weight_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
logits=mixture_weight_logits, labels=labels))
mixture_weight_train_op = builder.build_mixture_weights_train_op(
mixture_weight_loss,
var_list=None,
labels=labels,
logits=mixture_weight_logits,
iteration_step=iteration_step,
summary=tf.summary)
with self.test_session(graph=g) as sess:
sess.run(tf.compat.v1.global_variables_initializer())
sess.run(subnetwork_train_op)
sess.run(mixture_weight_train_op)
subnetwork_losses.append(sess.run(subnetwork_loss))
mixture_weight_losses.append(sess.run(mixture_weight_loss))
complexities.append(sess.run(subnetwork.complexity))
self.assertEqual(want_names, names)
self.assertAllClose(want_subnetwork_losses, subnetwork_losses, atol=1e-3)
self.assertAllClose(
want_mixture_weight_losses, mixture_weight_losses, atol=1e-3)
self.assertAllClose(want_complexities, complexities, atol=1e-3)
@parameterized.named_parameters({
"testcase_name": "empty_feature_columns",
"feature_columns": [],
}, {
"testcase_name": "zero_layer_size",
"feature_columns": [tf.feature_column.numeric_column("x")],
"layer_size": 0,
}, {
"testcase_name": "negative_initial_num_layers",
"feature_columns": [tf.feature_column.numeric_column("x")],
"initial_num_layers": -1,
})
def test_constructor_errors(self,
feature_columns,
layer_size=3,
initial_num_layers=0):
with self.assertRaises(ValueError):
simple_dnn.Generator(
feature_columns=feature_columns,
optimizer=tf.compat.v1.train.GradientDescentOptimizer(.1),
layer_size=layer_size,
initial_num_layers=initial_num_layers)
if __name__ == "__main__":
tf.test.main()
| 7,398 | 35.810945 | 77 | py |
adanet | adanet-master/adanet/replay/__init__.py | # Copyright 2019 The AdaNet Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines mechanisms for deterministically replaying an AdaNet model search."""
# TODO: Add more detailed documentation.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import tensorflow.compat.v1 as tf
class Config(object): # pylint: disable=g-classes-have-attributes
# pyformat: disable
"""Defines how to deterministically replay an AdaNet model search.
Specifically, it reconstructs the previous model and trains its components
in the correct order without performing any search.
Args:
best_ensemble_indices: A list of the best ensemble indices (one per
iteration).
Returns:
An :class:`adanet.replay.Config` instance.
"""
# pyformat: enable
def __init__(self, best_ensemble_indices=None):
self._best_ensemble_indices = best_ensemble_indices
@property
def best_ensemble_indices(self):
"""The best ensemble indices per iteration."""
return self._best_ensemble_indices
def get_best_ensemble_index(self, iteration_number):
"""Returns the best ensemble index given an iteration number."""
# If we are provided the list
if (self._best_ensemble_indices
and iteration_number < len(self._best_ensemble_indices)):
return self._best_ensemble_indices[iteration_number]
return None
__all__ = ["Config"]
| 1,975 | 30.365079 | 80 | py |
adanet | adanet-master/adanet/distributed/placement.py | # Copyright 2019 The AdaNet Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Distributed placement strategies."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import contextlib
from absl import logging
from adanet import tf_compat
from adanet.distributed.devices import _OpNameHashStrategy
import numpy as np
import six
@six.add_metaclass(abc.ABCMeta)
class PlacementStrategy(object): # pytype: disable=ignored-metaclass
"""Abstract placement strategy for distributed training.
Given a cluster of workers, the placement strategy determines which subgraph
each worker constructs.
"""
@property
def config(self):
"""Returns this strategy's configuration.
Returns:
The :class:`tf.estimator.RunConfig` instance that defines the cluster.
"""
return self._config
@config.setter
def config(self, config):
"""Configures the placement strategy with the given cluster description.
Args:
config: A :class:`tf.estimator.RunConfig` instance that defines the
cluster.
"""
self._config = config
@abc.abstractmethod
def should_build_ensemble(self, num_subnetworks):
"""Whether to build the ensemble on the current worker.
Args:
num_subnetworks: Integer number of subnetworks to train in the current
iteration.
Returns:
Boolean whether to build the ensemble on the current worker.
"""
@abc.abstractmethod
def should_build_subnetwork(self, num_subnetworks, subnetwork_index):
"""Whether to build the given subnetwork on the current worker.
Args:
num_subnetworks: Integer number of subnetworks to train in the current
iteration.
subnetwork_index: Integer index of the subnetwork in the list of the
current iteration's subnetworks.
Returns:
Boolean whether to build the given subnetwork on the current worker.
"""
@abc.abstractmethod
def should_train_subnetworks(self, num_subnetworks):
"""Whether to train subnetworks on the current worker.
Args:
num_subnetworks: Integer number of subnetworks to train in the current
iteration.
Returns:
Boolean whether to train subnetworks on the current worker.
"""
@abc.abstractmethod
@contextlib.contextmanager
def subnetwork_devices(self, num_subnetworks, subnetwork_index):
"""A context for assigning subnetwork ops to devices."""
class ReplicationStrategy(PlacementStrategy):
# pyformat: disable
"""A simple strategy that replicates the same graph on every worker.
This strategy does not scale well as the number of subnetworks and workers
increases. For :math:`m` workers, :math:`n` parameter servers, and :math:`k`
subnetworks, this strategy will scale with :math:`O(m)` training speedup,
:math:`O(m*n*k)` variable fetches from parameter servers, and :math:`O(k)`
memory required per worker. Additionally there will be :math:`O(m)` stale
gradients per subnetwork when training with asynchronous SGD.
Returns:
A :class:`ReplicationStrategy` instance for the current cluster.
"""
# pyformat: enable
def should_build_ensemble(self, num_subnetworks):
return True
def should_build_subnetwork(self, num_subnetworks, subnetwork_index):
return True
def should_train_subnetworks(self, num_subnetworks):
return True
@contextlib.contextmanager
def subnetwork_devices(self, num_subnetworks, subnetwork_index):
# Use default devices.
yield
class RoundRobinStrategy(PlacementStrategy):
# pyformat: disable
"""A strategy that round-robin assigns subgraphs to specific workers.
Specifically, it selects dedicated workers to only train ensemble variables,
and round-robin assigns subnetworks to dedicated subnetwork-training workers.
Unlike :class:`ReplicationStrategy`, this strategy scales better with the
number of subnetworks, workers, and parameter servers. For :math:`m` workers,
:math:`n` parameter servers, and :math:`k` subnetworks, this strategy will
scale with :math:`O(m/k)` training speedup, :math:`O(m*n/k)` variable fetches
from parameter servers, and :math:`O(1)` memory required per worker.
Additionally, there will only be :math:`O(m/k)` stale gradients per subnetwork
when training with asynchronous SGD, which reduces training instability versus
:class:`ReplicationStrategy`.
When there are more workers than subnetworks, this strategy assigns
subnetworks to workers modulo the number of subnetworks.
Conversely, when there are more subnetworks than workers, this round robin
assigns subnetworks modulo the number of workers. So certain workers may end
up training more than one subnetwork.
This strategy gracefully handles scenarios when the number of subnetworks
does not perfectly divide the number of workers and vice-versa. It also
supports different numbers of subnetworks at different iterations, and
reloading training with a resized cluster.
Args:
drop_remainder: Bool whether to drop remaining subnetworks that haven't been
assigned to a worker in the remainder after perfect division of workers by
the current iteration's num_subnetworks + 1. When :code:`True`, each subnetwork
worker will only train a single subnetwork, and subnetworks that have not
been assigned to assigned to a worker are dropped. NOTE: This can result
in subnetworks not being assigned to any worker when
num_workers < num_subnetworks + 1. When :code:`False`, remaining subnetworks
during the round-robin assignment will be placed on workers that already
have a subnetwork.
Returns:
A :class:`RoundRobinStrategy` instance for the current cluster.
"""
# pyformat: enable
# TODO: Allow user to disable ensemble workers. For example, when there
# are no ensemble variables to train, such as in a uniform average ensemble,
# there is no need for a non-chief to create the full ensemble during
# training, except for the chief to initialize the ensemble's non-trainable
# variables.
# TODO: Optional code organization suggestion:
# Explicitly define what a "task" is, to make the below code clearer. One way
# of doing this:
#
# def _worker_tasks(self, num_subnetworks):
# """Returns the set of tasks that this worker can work on.
#
# Each task is represented by an integer between 0 and num_subnetworks
# (inclusive). 0 corresponds to the task of training the ensemble(s), 1
# corresponds to the task of training subnetwork 0, 2 corresponds to the
# task of training subnetwork 1, and so on.
#
# Examples:
# - 1 worker, 3 subnetworks. This would return {0, 1, 2, 3} for the only
# worker, since the only worker would have to train the ensemble(s) and
# all 3 subnetworks.
# - 2 workers, 3 subnetworks. This would return {0} for worker 0, and
# {1, 2, 3} for worker 1. This means that the first worker trains the
# ensemble(s), while the second worker trains all three subnetworks.
# - 4 workers, 3 subnetworks. This would return {0} for worker 0, {1} for
# worker 1, {2} for worker 2, and {3} for worker 3. This means that
# worker 0 trains the ensemble(s) while the rest of the workers train
# one subnetwork each.
# - 5 workers, 3 subnetworks. This would return {0} for worker 0, {1} for
# worker 1, {2} for worker 2, {3} for worker 3, and {1} for worker 4.
# This is like the previous case, except that worker 4 also helps to
# train subnetwork 0.
# """
#
# That way, should_build_ensemble can just be:
#
# return 0 in self._worker_tasks(...)
#
# then should_build_subnetwork can just be:
#
# if (subnetwork_index in self._worker_tasks(...) or 0 in
# subnetwork_index in self._worker_tasks(...)):
# return True
# return False
#
# and should_train_subnetwork can just be:
#
# return subnetwork_index in self._worker_tasks(...)
def __init__(self, drop_remainder=False, dedicate_parameter_servers=True):
self._drop_remainder = drop_remainder
self._dedicate_parameter_servers = dedicate_parameter_servers
@property
def _num_workers(self):
return self.config.num_worker_replicas
@property
def _worker_index(self):
return self.config.global_id_in_cluster or 0
def _worker_task(self, num_subnetworks):
"""Returns the worker index modulo the number of subnetworks."""
if self._drop_remainder and self._num_workers > 1 and (num_subnetworks >
self._num_workers):
logging.log_first_n(
logging.WARNING,
"With drop_remainer=True, %s workers and %s subnetworks, the last %s "
"subnetworks will be dropped and will not be trained", 1,
self._num_workers, num_subnetworks,
num_subnetworks - self._num_workers - 1)
# The first worker will always build the ensemble so we add 1.
return self._worker_index % (num_subnetworks + 1)
def should_build_ensemble(self, num_subnetworks):
if num_subnetworks == 1:
return True
worker_task = self._worker_task(num_subnetworks)
# The ensemble builder is always the first worker task.
return worker_task == 0
def should_build_subnetwork(self, num_subnetworks, subnetwork_index):
if num_subnetworks == 1:
return True
worker_task = self._worker_task(num_subnetworks)
if worker_task == 0:
# The zeroth index worker is an ensemble worker.
return True
subnetwork_worker_index = worker_task - 1
if self._drop_remainder:
return subnetwork_worker_index == subnetwork_index
workers_per_subnetwork = self._num_workers // (num_subnetworks + 1)
if self._num_workers % (num_subnetworks + 1) == 0:
num_subnetwork_workers = num_subnetworks
elif self._worker_index >= workers_per_subnetwork * (num_subnetworks + 1):
num_subnetwork_workers = self._num_workers % (num_subnetworks + 1) - 1
else:
num_subnetwork_workers = num_subnetworks
return subnetwork_worker_index == subnetwork_index % num_subnetwork_workers
def should_train_subnetworks(self, num_subnetworks):
if num_subnetworks == 1 or self._num_workers == 1:
return True
return not self.should_build_ensemble(num_subnetworks)
@contextlib.contextmanager
def subnetwork_devices(self, num_subnetworks, subnetwork_index):
if not self._dedicate_parameter_servers:
# Use default device placement.
yield
return
# Each subnetwork gets its own dedicated parameter servers
num_ps_replicas = self.config.num_ps_replicas
ps_numbers = np.array(range(num_ps_replicas))
subnetwork_group = subnetwork_index
if num_ps_replicas > 0 and num_subnetworks > num_ps_replicas:
subnetwork_group = subnetwork_index % num_ps_replicas
ps_group = np.array_split(ps_numbers, num_subnetworks)[subnetwork_group]
# Assign ops to parameter servers based on hashed op names.
ps_strategy = _OpNameHashStrategy(len(ps_group))
def device_fn(op):
"""Assigns variables to a subnetwork's dedicated parameter servers."""
# Import here to avoid strict BUILD deps check.
from tensorflow.core.framework import node_def_pb2 # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
node_def = op if isinstance(op, node_def_pb2.NodeDef) else op.node_def
from tensorflow.python.training import device_setter # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
if num_ps_replicas > 0 and node_def.op in device_setter.STANDARD_PS_OPS:
# ps_group lists the task ids in the group. Adding the first task id in
# the group to the task number determined by the PS strategy gives the
# correct parameter server assignment.
return "/job:ps/task:{}".format(ps_group[0] + ps_strategy(op))
return op.device
with tf_compat.v1.device(device_fn):
yield
| 12,552 | 38.105919 | 124 | py |
adanet | adanet-master/adanet/distributed/placement_test.py | # Copyright 2019 The AdaNet Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Distributed placement strategy tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
from absl.testing import parameterized
from adanet.distributed.placement import ReplicationStrategy
from adanet.distributed.placement import RoundRobinStrategy
import tensorflow.compat.v2 as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util
# pylint: enable=g-direct-tensorflow-import
class ReplicationStrategyTest(tf.test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_strategy(self):
strategy = ReplicationStrategy()
num_subnetworks = 3
subnetwork_index = 1
self.assertTrue(strategy.should_build_ensemble(num_subnetworks))
self.assertTrue(
strategy.should_build_subnetwork(num_subnetworks, subnetwork_index))
self.assertTrue(strategy.should_train_subnetworks(num_subnetworks))
class WorkerConfig(object):
def __init__(self, num_worker_replicas, global_id_in_cluster):
self.num_worker_replicas = num_worker_replicas
self.global_id_in_cluster = global_id_in_cluster
class ParameterServerConfig(object):
def __init__(self, num_ps_replicas):
self.num_ps_replicas = num_ps_replicas
def _testcase_name(name, drop_remainder):
return "{}{}".format(name, "_drop_remainder" if drop_remainder else "")
class RoundRobinStrategyTest(parameterized.TestCase, tf.test.TestCase):
# pylint: disable=g-complex-comprehension
@parameterized.named_parameters(
itertools.chain(*[[
{
"testcase_name":
_testcase_name("one_worker_one_subnetwork", drop_remainder),
"num_workers":
1,
"num_subnetworks":
1,
"drop_remainder":
drop_remainder,
"want_should_build_ensemble": [True],
"want_should_build_subnetwork": [[True]],
"want_should_train_subnetworks": [True],
},
{
"testcase_name":
_testcase_name("three_workers_one_subnetworks", drop_remainder
),
"num_workers":
3,
"num_subnetworks":
1,
"drop_remainder":
drop_remainder,
"want_should_build_ensemble": [True, True, True],
"want_should_build_subnetwork": [[True], [True], [True]],
"want_should_train_subnetworks": [True, True, True],
},
{
"testcase_name":
_testcase_name("two_workers_one_subnetworks", drop_remainder),
"num_workers":
2,
"num_subnetworks":
5,
"drop_remainder":
drop_remainder,
"want_should_build_ensemble": [True, False],
"want_should_build_subnetwork": [[True, True, True, True, True],
[
True,
not drop_remainder,
not drop_remainder,
not drop_remainder,
not drop_remainder,
]],
"want_should_train_subnetworks": [False, True],
},
{
"testcase_name":
_testcase_name("one_worker_three_subnetworks", drop_remainder
),
"num_workers":
1,
"num_subnetworks":
3,
"drop_remainder":
drop_remainder,
"want_should_build_ensemble": [True],
"want_should_build_subnetwork": [[True, True, True]],
"want_should_train_subnetworks": [True],
},
{
"testcase_name":
_testcase_name("two_workers_three_subnetworks", drop_remainder
),
"num_workers":
2,
"num_subnetworks":
3,
"drop_remainder":
drop_remainder,
"want_should_build_ensemble": [True, False],
"want_should_build_subnetwork": [
[True, True, True],
[True, not drop_remainder, not drop_remainder],
],
"want_should_train_subnetworks": [False, True],
},
{
"testcase_name":
_testcase_name("three_workers_three_subnetworks",
drop_remainder),
"num_workers":
3,
"num_subnetworks":
3,
"drop_remainder":
drop_remainder,
"want_should_build_ensemble": [True, False, False],
"want_should_build_subnetwork": [
[True, True, True],
[True, False, not drop_remainder],
[False, True, False],
],
"want_should_train_subnetworks": [False, True, True],
},
{
"testcase_name":
_testcase_name("four_workers_three_subnetworks",
drop_remainder),
"num_workers":
4,
"num_subnetworks":
3,
"drop_remainder":
drop_remainder,
"want_should_build_ensemble": [True, False, False, False],
"want_should_build_subnetwork": [
[True, True, True],
[True, False, False],
[False, True, False],
[False, False, True],
],
"want_should_train_subnetworks": [False, True, True, True],
},
{
"testcase_name":
_testcase_name("five_workers_three_subnetworks",
drop_remainder),
"num_workers":
5,
"num_subnetworks":
3,
"drop_remainder":
drop_remainder,
"want_should_build_ensemble": [True, False, False, False, True],
"want_should_build_subnetwork": [
[True, True, True],
[True, False, False],
[False, True, False],
[False, False, True],
[True, True, True],
],
"want_should_train_subnetworks": [False, True, True, True, False],
},
{
"testcase_name":
_testcase_name("six_workers_three_subnetworks", drop_remainder
),
"num_workers":
6,
"num_subnetworks":
3,
"drop_remainder":
drop_remainder,
"want_should_build_ensemble":
[True, False, False, False, True, False],
"want_should_build_subnetwork": [
[True, True, True],
[True, False, False],
[False, True, False],
[False, False, True],
[True, True, True],
[True, not drop_remainder, not drop_remainder],
],
"want_should_train_subnetworks":
[False, True, True, True, False, True],
},
{
"testcase_name":
_testcase_name("seven_workers_three_subnetworks",
drop_remainder),
"num_workers":
7,
"num_subnetworks":
3,
"drop_remainder":
drop_remainder,
"want_should_build_ensemble":
[True, False, False, False, True, False, False],
"want_should_build_subnetwork": [
[True, True, True],
[True, False, False],
[False, True, False],
[False, False, True],
[True, True, True],
[True, False, not drop_remainder],
[False, True, False],
],
"want_should_train_subnetworks":
[False, True, True, True, False, True, True],
},
{
"testcase_name":
_testcase_name("eight_workers_three_subnetworks",
drop_remainder),
"num_workers":
8,
"num_subnetworks":
3,
"drop_remainder":
drop_remainder,
"want_should_build_ensemble":
[True, False, False, False, True, False, False, False],
"want_should_build_subnetwork": [
[True, True, True],
[True, False, False],
[False, True, False],
[False, False, True],
[True, True, True],
[True, False, False],
[False, True, False],
[False, False, True],
],
"want_should_train_subnetworks":
[False, True, True, True, False, True, True, True],
},
] for drop_remainder in [False, True]]))
# pylint: enable=g-complex-comprehension
@test_util.run_in_graph_and_eager_modes
def test_worker_methods(self, num_workers, num_subnetworks, drop_remainder,
want_should_build_ensemble,
want_should_build_subnetwork,
want_should_train_subnetworks):
should_build_ensemble = []
should_build_subnetwork = []
should_train_subnetworks = []
for worker_index in range(num_workers):
strategy = RoundRobinStrategy(drop_remainder)
strategy.config = WorkerConfig(num_workers, worker_index)
should_build_ensemble.append(
strategy.should_build_ensemble(num_subnetworks))
should_build_subnetwork.append([])
should_train_subnetworks.append(
strategy.should_train_subnetworks(num_subnetworks))
for subnetwork_index in range(num_subnetworks):
should_build_subnetwork[-1].append(
strategy.should_build_subnetwork(num_subnetworks, subnetwork_index))
self.assertEqual(want_should_build_ensemble, should_build_ensemble)
self.assertEqual(want_should_build_subnetwork, should_build_subnetwork)
self.assertEqual(want_should_train_subnetworks, should_train_subnetworks)
@parameterized.named_parameters(
{
"testcase_name":
"one_ps_one_subnetwork",
"num_ps":
1,
"num_subnetworks":
1,
"want_variable_devices": [[
"/job:ps/task:0", "/job:ps/task:0", "/job:ps/task:0",
"/job:ps/task:0"
],],
},
{
"testcase_name":
"three_ps_one_subnetwork",
"num_ps":
3,
"num_subnetworks":
1,
"want_variable_devices": [[
"/job:ps/task:1", "/job:ps/task:0", "/job:ps/task:2",
"/job:ps/task:0"
],],
},
{
"testcase_name":
"two_ps_five_subnetworks",
"num_ps":
2,
"num_subnetworks":
5,
"want_variable_devices": [
[
"/job:ps/task:0", "/job:ps/task:0", "/job:ps/task:0",
"/job:ps/task:0"
],
[
"/job:ps/task:1", "/job:ps/task:1", "/job:ps/task:1",
"/job:ps/task:1"
],
[
"/job:ps/task:0", "/job:ps/task:0", "/job:ps/task:0",
"/job:ps/task:0"
],
[
"/job:ps/task:1", "/job:ps/task:1", "/job:ps/task:1",
"/job:ps/task:1"
],
[
"/job:ps/task:0", "/job:ps/task:0", "/job:ps/task:0",
"/job:ps/task:0"
],
],
},
{
"testcase_name":
"one_ps_three_subnetworks",
"num_ps":
1,
"num_subnetworks":
3,
"want_variable_devices": [
[
"/job:ps/task:0", "/job:ps/task:0", "/job:ps/task:0",
"/job:ps/task:0"
],
[
"/job:ps/task:0", "/job:ps/task:0", "/job:ps/task:0",
"/job:ps/task:0"
],
[
"/job:ps/task:0", "/job:ps/task:0", "/job:ps/task:0",
"/job:ps/task:0"
],
],
},
{
"testcase_name":
"two_ps_three_subnetworks",
"num_ps":
2,
"num_subnetworks":
3,
"want_variable_devices": [
[
"/job:ps/task:0", "/job:ps/task:0", "/job:ps/task:0",
"/job:ps/task:0"
],
[
"/job:ps/task:1", "/job:ps/task:1", "/job:ps/task:1",
"/job:ps/task:1"
],
[
"/job:ps/task:0", "/job:ps/task:0", "/job:ps/task:0",
"/job:ps/task:0"
],
],
},
{
"testcase_name":
"three_ps_three_subnetworks",
"num_ps":
3,
"num_subnetworks":
3,
"want_variable_devices": [
[
"/job:ps/task:0", "/job:ps/task:0", "/job:ps/task:0",
"/job:ps/task:0"
],
[
"/job:ps/task:1", "/job:ps/task:1", "/job:ps/task:1",
"/job:ps/task:1"
],
[
"/job:ps/task:2", "/job:ps/task:2", "/job:ps/task:2",
"/job:ps/task:2"
],
],
},
{
"testcase_name":
"three_ps_three_subnetworks_no_dedicated_parameter_servers",
"num_ps":
3,
"num_subnetworks":
3,
"dedicate_parameter_servers":
False,
"want_variable_devices": [
["", "", "", ""],
["", "", "", ""],
["", "", "", ""],
],
},
{
"testcase_name":
"four_ps_three_subnetworks",
"num_ps":
4,
"num_subnetworks":
3,
"want_variable_devices": [
[
"/job:ps/task:1", "/job:ps/task:0", "/job:ps/task:0",
"/job:ps/task:0"
],
[
"/job:ps/task:2", "/job:ps/task:2", "/job:ps/task:2",
"/job:ps/task:2"
],
[
"/job:ps/task:3", "/job:ps/task:3", "/job:ps/task:3",
"/job:ps/task:3"
],
],
},
{
"testcase_name":
"five_ps_three_subnetworks",
"num_ps":
5,
"num_subnetworks":
3,
"want_variable_devices": [
[
"/job:ps/task:1", "/job:ps/task:0", "/job:ps/task:0",
"/job:ps/task:0"
],
[
"/job:ps/task:2", "/job:ps/task:3", "/job:ps/task:3",
"/job:ps/task:2"
],
[
"/job:ps/task:4", "/job:ps/task:4", "/job:ps/task:4",
"/job:ps/task:4"
],
],
},
{
"testcase_name":
"six_ps_three_subnetworks",
"num_ps":
6,
"num_subnetworks":
3,
"want_variable_devices": [
[
"/job:ps/task:1", "/job:ps/task:0", "/job:ps/task:0",
"/job:ps/task:0"
],
[
"/job:ps/task:2", "/job:ps/task:3", "/job:ps/task:3",
"/job:ps/task:2"
],
[
"/job:ps/task:5", "/job:ps/task:4", "/job:ps/task:4",
"/job:ps/task:5"
],
],
},
{
"testcase_name":
"seven_ps_three_subnetworks",
"num_ps":
7,
"num_subnetworks":
3,
"want_variable_devices": [
[
"/job:ps/task:1", "/job:ps/task:0", "/job:ps/task:2",
"/job:ps/task:0"
],
[
"/job:ps/task:3", "/job:ps/task:4", "/job:ps/task:4",
"/job:ps/task:3"
],
[
"/job:ps/task:6", "/job:ps/task:5", "/job:ps/task:5",
"/job:ps/task:6"
],
],
},
{
"testcase_name":
"eight_ps_three_subnetworks",
"num_ps":
8,
"num_subnetworks":
3,
"want_variable_devices": [
[
"/job:ps/task:1", "/job:ps/task:0", "/job:ps/task:2",
"/job:ps/task:0"
],
[
"/job:ps/task:4", "/job:ps/task:5", "/job:ps/task:5",
"/job:ps/task:4"
],
[
"/job:ps/task:7", "/job:ps/task:6", "/job:ps/task:6",
"/job:ps/task:7"
],
],
},
)
@test_util.run_in_graph_and_eager_modes
def test_device_methods(self,
num_ps,
num_subnetworks,
want_variable_devices,
dedicate_parameter_servers=True):
with context.graph_mode():
x = tf.constant([[1., 0.]])
strategy = RoundRobinStrategy(
dedicate_parameter_servers=dedicate_parameter_servers)
strategy.config = ParameterServerConfig(num_ps)
variable_devices = []
for i in range(num_subnetworks):
with strategy.subnetwork_devices(num_subnetworks, i):
subnetwork = tf.keras.Sequential()
subnetwork.add(tf.keras.layers.Dense(4))
subnetwork.add(tf.keras.layers.Dense(3))
subnetwork(x)
variable_devices.append([w.op.device for w in subnetwork.weights])
self.assertEqual(want_variable_devices, variable_devices)
if __name__ == "__main__":
tf.test.main()
| 19,604 | 33.334501 | 80 | py |
adanet | adanet-master/adanet/distributed/devices.py | # Copyright 2019 The AdaNet Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Device placement functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import hashlib
class _OpNameHashStrategy(object):
"""Returns the ps task index for placement using a hash of the op name."""
def __init__(self, num_tasks):
"""Create a new `_OpNameHashStrategy`.
Args:
num_tasks: Number of ps tasks to cycle among.
"""
self._num_tasks = num_tasks
def __call__(self, op):
"""Choose a ps task index for the given `Operation`.
Hashes the op name and assigns it to a ps task modulo the number of tasks.
This ensures that variables with the same name are always placed on the same
parameter server.
Args:
op: An `Operation` to be placed on ps.
Returns:
The ps task index to use for the `Operation`.
"""
hashed = int(hashlib.sha256(op.name.encode("utf-8")).hexdigest(), 16)
return hashed % self._num_tasks
@contextlib.contextmanager
def monkey_patch_default_variable_placement_strategy():
"""Monkey patches the default variable placement strategy.
This strategy is used by tf.train.replica_device_setter. The new strategy
allows workers to having different graphs from the chief.
Yields:
A context with the monkey-patched default variable placement strategy.
"""
# Import here to avoid strict BUILD deps check.
from tensorflow.python.training import device_setter # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
old_round_robin_strategy = device_setter._RoundRobinStrategy # pylint: disable=protected-access
setattr(device_setter, "_RoundRobinStrategy", _OpNameHashStrategy)
try:
yield
finally:
setattr(device_setter, "_RoundRobinStrategy", old_round_robin_strategy)
| 2,402 | 31.917808 | 120 | py |
adanet | adanet-master/adanet/distributed/devices_test.py | # Copyright 2019 The AdaNet Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Device placement function tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from adanet.distributed.devices import monkey_patch_default_variable_placement_strategy
import tensorflow.compat.v2 as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util
# pylint: enable=g-direct-tensorflow-import
class DevicesTest(parameterized.TestCase, tf.test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_monkey_patch_default_variable_placement_strategy_no_ps(self):
with context.graph_mode():
with monkey_patch_default_variable_placement_strategy():
device_fn = tf.compat.v1.train.replica_device_setter(ps_tasks=0)
self.assertIsNone(device_fn)
@parameterized.named_parameters(
{
"testcase_name":
"one_ps",
"num_tasks":
1,
"op_names": ["foo", "bar", "baz"],
"before_want_ps":
["/job:ps/task:0", "/job:ps/task:0", "/job:ps/task:0"],
"after_want_ps":
["/job:ps/task:0", "/job:ps/task:0", "/job:ps/task:0"],
}, {
"testcase_name":
"three_ps",
"num_tasks":
3,
"op_names": ["foo", "bar", "baz"],
"before_want_ps":
["/job:ps/task:0", "/job:ps/task:1", "/job:ps/task:2"],
"after_want_ps":
["/job:ps/task:2", "/job:ps/task:0", "/job:ps/task:1"],
}, {
"testcase_name":
"reverse_three_ps",
"num_tasks":
3,
"op_names": ["baz", "bar", "foo"],
"before_want_ps":
["/job:ps/task:0", "/job:ps/task:1", "/job:ps/task:2"],
"after_want_ps":
["/job:ps/task:1", "/job:ps/task:0", "/job:ps/task:2"],
}, {
"testcase_name":
"six_ps",
"num_tasks":
6,
"op_names": ["foo", "bar", "baz"],
"before_want_ps":
["/job:ps/task:0", "/job:ps/task:1", "/job:ps/task:2"],
"after_want_ps":
["/job:ps/task:2", "/job:ps/task:3", "/job:ps/task:4"],
}, {
"testcase_name":
"reverse_six_ps",
"num_tasks":
6,
"op_names": ["baz", "bar", "foo"],
"before_want_ps":
["/job:ps/task:0", "/job:ps/task:1", "/job:ps/task:2"],
"after_want_ps":
["/job:ps/task:4", "/job:ps/task:3", "/job:ps/task:2"],
})
@test_util.run_in_graph_and_eager_modes
def test_monkey_patch_default_variable_placement_strategy(
self, num_tasks, op_names, before_want_ps, after_want_ps):
"""Checks that ps placement is based on var name."""
with context.graph_mode():
var_ops = [tf.Variable(0., name=op_name).op for op_name in op_names]
before_device_fn = tf.compat.v1.train.replica_device_setter(
ps_tasks=num_tasks)
self.assertEqual(before_want_ps, [before_device_fn(op) for op in var_ops])
with monkey_patch_default_variable_placement_strategy():
after_device_fn = tf.compat.v1.train.replica_device_setter(
ps_tasks=num_tasks)
self.assertEqual(after_want_ps, [after_device_fn(op) for op in var_ops])
# Check that monkey-patch is only for the context.
before_device_fn = tf.compat.v1.train.replica_device_setter(
ps_tasks=num_tasks)
self.assertEqual(before_want_ps, [before_device_fn(op) for op in var_ops])
if __name__ == "__main__":
tf.test.main()
| 4,283 | 36.252174 | 87 | py |
adanet | adanet-master/adanet/distributed/__init__.py | # Copyright 2019 The AdaNet Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The `adanet.distributed` package.
This package methods for distributing computation using the TensorFlow
computation graph.
"""
# TODO: Add more details documentation.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from adanet.distributed.placement import PlacementStrategy
from adanet.distributed.placement import ReplicationStrategy
from adanet.distributed.placement import RoundRobinStrategy
__all__ = [
"PlacementStrategy",
"ReplicationStrategy",
"RoundRobinStrategy",
]
| 1,159 | 32.142857 | 74 | py |
adanet | adanet-master/adanet/autoensemble/tpu_estimator_test.py | # Lint as: python3
"""Tests for AutoEnsembleTPUEstimator.
Copyright 2019 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import os
import shutil
import sys
from absl import flags
from absl.testing import parameterized
from adanet import tf_compat
from adanet.autoensemble.estimator import AutoEnsembleTPUEstimator
import numpy as np
import tensorflow.compat.v2 as tf
# pylint: disable=g-import-not-at-top
# pylint: disable=g-direct-tensorflow-import
try:
from tensorflow_estimator.contrib.estimator.python.estimator import head as head_lib
except (AttributeError, ImportError):
head_lib = None
from tensorflow_estimator.python.estimator.canned import dnn
# pylint: enable=g-direct-tensorflow-import
# pylint: enable=g-import-not-at-top
class _DNNTPUEstimator(tf.compat.v1.estimator.tpu.TPUEstimator):
def __init__(self,
head,
hidden_units,
feature_columns,
optimizer,
use_tpu,
embedding_config_spec=None):
config = tf.compat.v1.estimator.tpu.RunConfig(
tpu_config=tf.compat.v1.estimator.tpu.TPUConfig(
per_host_input_for_training=tf.compat.v1.estimator.tpu
.InputPipelineConfig.PER_HOST_V2))
def model_fn(features, labels, mode=None, params=None, config=None):
del params # Unused.
return dnn._dnn_model_fn(
features=features,
labels=labels,
mode=mode,
head=head,
hidden_units=hidden_units,
feature_columns=tuple(feature_columns or []),
optimizer=optimizer,
config=config,
use_tpu=use_tpu)
super(_DNNTPUEstimator, self).__init__(
model_fn=model_fn,
config=config,
train_batch_size=64,
use_tpu=use_tpu,
embedding_config_spec=embedding_config_spec)
class AutoEnsembleTPUEstimatorTest(parameterized.TestCase, tf.test.TestCase):
def setUp(self):
super(AutoEnsembleTPUEstimatorTest, self).setUp()
# Setup and cleanup test directory.
# Flags are not automatically parsed at this point.
flags.FLAGS(sys.argv)
self.test_subdirectory = os.path.join(flags.FLAGS.test_tmpdir, self.id())
shutil.rmtree(self.test_subdirectory, ignore_errors=True)
os.makedirs(self.test_subdirectory)
def tearDown(self):
super(AutoEnsembleTPUEstimatorTest, self).tearDown()
shutil.rmtree(self.test_subdirectory, ignore_errors=True)
@parameterized.named_parameters(
{
"testcase_name": "not_use_tpu",
"use_tpu": False,
},
)
# TODO: Support V2 head and optimizer in AdaNet TPU.
@tf_compat.skip_for_tf2
def test_auto_ensemble_estimator_lifecycle(self, use_tpu):
head = head_lib.regression_head()
feature_columns = [tf.feature_column.numeric_column("xor", shape=2)]
def optimizer_fn():
optimizer = tf.compat.v1.train.GradientDescentOptimizer(learning_rate=.01)
if use_tpu:
optimizer = tf.compat.v1.tpu.CrossShardOptimizer(optimizer)
return optimizer
candidate_pool = {
"tpu_estimator_dnn":
_DNNTPUEstimator(
head=head,
feature_columns=feature_columns,
optimizer=optimizer_fn,
hidden_units=[3],
use_tpu=True),
"tpu_estimator_wider_dnn":
_DNNTPUEstimator(
head=head,
feature_columns=feature_columns,
optimizer=optimizer_fn,
hidden_units=[6],
use_tpu=True),
"estimator_dnn":
tf.compat.v1.estimator.DNNEstimator(
head=head,
feature_columns=feature_columns,
optimizer=optimizer_fn,
hidden_units=[3]),
"estimator_linear":
tf.compat.v1.estimator.LinearEstimator(
head=head,
feature_columns=feature_columns,
optimizer=optimizer_fn),
}
run_config = tf.compat.v1.estimator.tpu.RunConfig(
master="", tf_random_seed=42)
estimator = AutoEnsembleTPUEstimator(
head=head,
candidate_pool=candidate_pool,
max_iteration_steps=10,
model_dir=self.test_subdirectory,
config=run_config,
use_tpu=use_tpu,
train_batch_size=4,
force_grow=True)
features = {"xor": [[0., 0.], [0., 1.], [1., 0.], [1., 1.]]}
labels = [[0.], [1.], [1.], [0.]]
def train_input_fn(params):
del params # Unused.
input_features = {}
for key, feature in features.items():
input_features[key] = tf.constant(feature, name=key)
input_labels = tf.constant(labels, name="labels")
return input_features, input_labels
def test_input_fn(params):
del params # Unused.
return tf.compat.v1.data.Dataset.from_tensor_slices(
[features["xor"]]).map(lambda f: {"xor": f})
# Train for three iterations.
estimator.train(input_fn=train_input_fn, max_steps=30)
# Evaluate.
eval_results = estimator.evaluate(input_fn=train_input_fn, steps=1)
self.assertAllClose(30, eval_results["global_step"])
self.assertAllClose(0.315863, eval_results["loss"], atol=.3)
# Predict.
predictions = estimator.predict(input_fn=test_input_fn)
# We need to iterate over all the predictions before moving on, otherwise
# the TPU will not be shut down.
for prediction in predictions:
self.assertIsNotNone(prediction["predictions"])
# Export SavedModel.
def serving_input_fn():
"""Input fn for serving export, starting from serialized example."""
serialized_example = tf.compat.v1.placeholder(
dtype=tf.string, shape=(None), name="serialized_example")
for key in features:
features[key] = tf.constant([[0., 0.], [0., 0.]])
return tf.estimator.export.ServingInputReceiver(
features=features, receiver_tensors=serialized_example)
export_dir_base = os.path.join(self.test_subdirectory, "export")
export_saved_model_fn = getattr(estimator, "export_saved_model", None)
if not callable(export_saved_model_fn):
export_saved_model_fn = estimator.export_savedmodel
export_saved_model_fn(
export_dir_base=export_dir_base,
serving_input_receiver_fn=serving_input_fn)
if __name__ == "__main__":
tf.test.main()
| 7,018 | 32.583732 | 86 | py |
adanet | adanet-master/adanet/autoensemble/estimator_v2_test.py | """Tests for AdaNet AutoEnsembleEstimator in TF 2.
Copyright 2019 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import sys
from absl import flags
from absl.testing import parameterized
from adanet import tf_compat
from adanet.autoensemble.estimator import AutoEnsembleEstimator
import tensorflow.compat.v2 as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.estimator.export import export
from tensorflow_estimator.python.estimator.head import regression_head
# pylint: enable=g-direct-tensorflow-import
class AutoEnsembleEstimatorV2Test(parameterized.TestCase, tf.test.TestCase):
def setUp(self):
super(AutoEnsembleEstimatorV2Test, self).setUp()
# Setup and cleanup test directory.
# Flags are not automatically parsed at this point.
flags.FLAGS(sys.argv)
self.test_subdirectory = os.path.join(flags.FLAGS.test_tmpdir, self.id())
shutil.rmtree(self.test_subdirectory, ignore_errors=True)
os.makedirs(self.test_subdirectory)
def tearDown(self):
super(AutoEnsembleEstimatorV2Test, self).tearDown()
shutil.rmtree(self.test_subdirectory, ignore_errors=True)
# pylint: disable=g-long-lambda
@parameterized.named_parameters(
{
"testcase_name":
"candidate_pool_lambda",
"candidate_pool":
lambda head, feature_columns, optimizer: lambda config: {
"dnn":
tf.compat.v2.estimator.DNNEstimator(
head=head,
feature_columns=feature_columns,
optimizer=optimizer,
hidden_units=[3],
config=config),
"linear":
tf.compat.v2.estimator.LinearEstimator(
head=head,
feature_columns=feature_columns,
optimizer=optimizer,
config=config),
},
"want_loss":
.209,
},)
# pylint: enable=g-long-lambda
@tf_compat.skip_for_tf1
def test_auto_ensemble_estimator_lifecycle(self,
candidate_pool,
want_loss,
max_train_steps=30):
features = {"input_1": [[1., 0.]]}
labels = [[1.]]
run_config = tf.estimator.RunConfig(tf_random_seed=42)
head = regression_head.RegressionHead()
# Always create optimizers in a lambda to prevent error like:
# `RuntimeError: Cannot set `iterations` to a new Variable after the
# Optimizer weights have been created`
optimizer = lambda: tf.keras.optimizers.SGD(lr=.01)
feature_columns = [tf.feature_column.numeric_column("input_1", shape=[2])]
def train_input_fn():
input_features = {}
for key, feature in features.items():
input_features[key] = tf.constant(feature, name=key)
input_labels = tf.constant(labels, name="labels")
return input_features, input_labels
def test_input_fn():
dataset = tf.data.Dataset.from_tensors([tf.constant(features["input_1"])])
input_features = tf.compat.v1.data.make_one_shot_iterator(
dataset).get_next()
return {"input_1": input_features}, None
estimator = AutoEnsembleEstimator(
head=head,
candidate_pool=candidate_pool(head, feature_columns, optimizer),
max_iteration_steps=10,
force_grow=True,
model_dir=self.test_subdirectory,
config=run_config)
# Train for three iterations.
estimator.train(input_fn=train_input_fn, max_steps=max_train_steps)
# Evaluate.
eval_results = estimator.evaluate(input_fn=train_input_fn, steps=1)
self.assertAllClose(max_train_steps, eval_results["global_step"])
self.assertAllClose(want_loss, eval_results["loss"], atol=.3)
# Predict.
predictions = estimator.predict(input_fn=test_input_fn)
for prediction in predictions:
self.assertIsNotNone(prediction["predictions"])
# Export SavedModel.
def serving_input_fn():
"""Input fn for serving export, starting from serialized example."""
serialized_example = tf.compat.v1.placeholder(
dtype=tf.string, shape=(None), name="serialized_example")
for key, value in features.items():
features[key] = tf.constant(value)
return export.SupervisedInputReceiver(
features=features,
labels=tf.constant(labels),
receiver_tensors=serialized_example)
export_dir_base = os.path.join(self.test_subdirectory, "export")
estimator.export_saved_model(
export_dir_base=export_dir_base,
serving_input_receiver_fn=serving_input_fn)
if __name__ == "__main__":
tf.enable_v2_behavior()
tf.test.main()
| 5,484 | 35.324503 | 80 | py |
adanet | adanet-master/adanet/autoensemble/common.py | """Common utilities for AutoEnsemblers.
Copyright 2019 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import inspect
from adanet import subnetwork as subnetwork_lib
from adanet import tf_compat
import tensorflow.compat.v2 as tf
def _default_logits(estimator_spec):
from tensorflow.python.estimator.canned import prediction_keys # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
if isinstance(estimator_spec.predictions, dict):
pred_keys = prediction_keys.PredictionKeys
if pred_keys.LOGITS in estimator_spec.predictions:
return estimator_spec.predictions[pred_keys.LOGITS]
if pred_keys.PREDICTIONS in estimator_spec.predictions:
return estimator_spec.predictions[pred_keys.PREDICTIONS]
return estimator_spec.predictions
class _SecondaryTrainOpRunnerHook(tf_compat.SessionRunHook):
"""A hook for running a train op separate from the main session run call."""
def __init__(self, train_op):
"""Initializes a `_SecondaryTrainOpRunnerHook`.
Args:
train_op: The secondary train op to execute before runs.
"""
self._train_op = train_op
def before_run(self, run_context):
run_context.session.run(self._train_op)
class AutoEnsembleSubestimator( # pylint: disable=g-classes-have-attributes
collections.namedtuple("AutoEnsembleSubestimator",
["estimator", "train_input_fn", "prediction_only"])):
"""A subestimator to train and consider for ensembling.
Args:
estimator: A `tf.estimator.Estimator` or `tf.estimator.tpu.TPUEstimator`
instance to consider for ensembling.
train_input_fn: A function that provides input data for training as
minibatches. It can be used to implement ensemble methods like bootstrap
aggregating (a.k.a bagging) where each subnetwork trains on different
slices of the training data. The function should construct and return one
of the following:
* A `tf.data.Dataset` object: Outputs of `Dataset` object must be a tuple
`(features, labels)` with same constraints as below. NOTE: A Dataset
must return *at least* two batches before hitting the end-of-input,
otherwise all of training terminates.
TODO: Figure out how to handle single-batch datasets.
* A tuple `(features, labels)`: Where `features` is a `tf.Tensor` or a
dictionary of string feature name to `Tensor` and `labels` is a
`Tensor` or a dictionary of string label name to `Tensor`. Both
`features` and `labels` are consumed by `estimator#model_fn`. They
should satisfy the expectation of `estimator#model_fn` from inputs.
prediction_only: If set to True, only runs the subestimator in prediction
mode.
Returns:
An `AutoEnsembleSubestimator` instance to be auto-ensembled.
"""
# pylint: enable=g-classes-have-attributes
def __new__(cls, estimator, train_input_fn=None, prediction_only=False):
return super(AutoEnsembleSubestimator,
cls).__new__(cls, estimator, train_input_fn, prediction_only)
class _BuilderFromSubestimator(subnetwork_lib.Builder):
"""An `adanet.Builder` from a :class:`tf.estimator.Estimator`."""
def __init__(self, name, subestimator, logits_fn, last_layer_fn, config):
self._name = name
self._subestimator = subestimator
self._logits_fn = logits_fn
self._last_layer_fn = last_layer_fn
self._config = config
@property
def name(self):
return self._name
def _call_model_fn(self, subestimator, features, labels, mode, summary):
with summary.current_scope():
model_fn = subestimator.estimator.model_fn
estimator_spec = model_fn(
features=features, labels=labels, mode=mode, config=self._config)
logits = self._logits_fn(estimator_spec=estimator_spec)
last_layer = logits
if self._last_layer_fn:
last_layer = self._last_layer_fn(estimator_spec=estimator_spec)
if estimator_spec.scaffold and estimator_spec.scaffold.local_init_op:
local_init_op = estimator_spec.scaffold.local_init_op
else:
local_init_op = None
train_op = subnetwork_lib.TrainOpSpec(
estimator_spec.train_op,
chief_hooks=estimator_spec.training_chief_hooks,
hooks=estimator_spec.training_hooks)
return logits, last_layer, train_op, local_init_op
def build_subnetwork(self,
features,
labels,
logits_dimension,
training,
iteration_step,
summary,
previous_ensemble,
config=None):
# We don't need an EVAL mode since AdaNet takes care of evaluation for us.
subestimator = self._subestimator(config)
mode = tf.estimator.ModeKeys.PREDICT
if training and not subestimator.prediction_only:
mode = tf.estimator.ModeKeys.TRAIN
# Call in template to ensure that variables are created once and reused.
call_model_fn_template = tf.compat.v1.make_template("model_fn",
self._call_model_fn)
subestimator_features, subestimator_labels = features, labels
local_init_ops = []
if training and subestimator.train_input_fn:
# TODO: Consider tensorflow_estimator/python/estimator/util.py.
inputs = subestimator.train_input_fn()
if isinstance(inputs, (tf_compat.DatasetV1, tf_compat.DatasetV2)):
subestimator_features, subestimator_labels = (
tf_compat.make_one_shot_iterator(inputs).get_next())
else:
subestimator_features, subestimator_labels = inputs
# Construct subnetwork graph first because of dependencies on scope.
_, _, bagging_train_op_spec, sub_local_init_op = call_model_fn_template(
subestimator, subestimator_features, subestimator_labels, mode,
summary)
# Graph for ensemble learning gets model_fn_1 for scope.
logits, last_layer, _, ensemble_local_init_op = call_model_fn_template(
subestimator, features, labels, mode, summary)
if sub_local_init_op:
local_init_ops.append(sub_local_init_op)
if ensemble_local_init_op:
local_init_ops.append(ensemble_local_init_op)
# Run train op in a hook so that exceptions can be intercepted by the
# AdaNet framework instead of the Estimator's monitored training session.
hooks = bagging_train_op_spec.hooks + (_SecondaryTrainOpRunnerHook(
bagging_train_op_spec.train_op),)
train_op_spec = subnetwork_lib.TrainOpSpec(
train_op=tf.no_op(),
chief_hooks=bagging_train_op_spec.chief_hooks,
hooks=hooks)
else:
logits, last_layer, train_op_spec, local_init_op = call_model_fn_template(
subestimator, features, labels, mode, summary)
if local_init_op:
local_init_ops.append(local_init_op)
# TODO: Replace with variance complexity measure.
complexity = tf.constant(0.)
return subnetwork_lib.Subnetwork(
logits=logits,
last_layer=last_layer,
shared={"train_op": train_op_spec},
complexity=complexity,
local_init_ops=local_init_ops)
def build_subnetwork_train_op(self, subnetwork, loss, var_list, labels,
iteration_step, summary, previous_ensemble):
return subnetwork.shared["train_op"]
def _convert_to_subestimator(candidate):
"""Converts a candidate to an AutoEnsembleSubestimator."""
if callable(candidate):
return candidate
if isinstance(candidate, AutoEnsembleSubestimator):
return lambda config: candidate
from tensorflow_estimator.python.estimator import estimator as estimator_lib # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
if isinstance(candidate,
(estimator_lib.Estimator, estimator_lib.EstimatorV2)):
return lambda config: AutoEnsembleSubestimator(candidate)
raise ValueError(
"subestimator in candidate_pool must have type tf.estimator.Estimator or "
"adanet.AutoEnsembleSubestimator but got {}".format(candidate.__class__))
class _GeneratorFromCandidatePool(subnetwork_lib.Generator):
"""An `adanet.Generator` from a pool of `Estimator` and `Model` instances."""
def __init__(self, candidate_pool, logits_fn, last_layer_fn):
self._candidate_pool = candidate_pool
if logits_fn is None:
logits_fn = _default_logits
self._logits_fn = logits_fn
self._last_layer_fn = last_layer_fn
def generate_candidates(self, previous_ensemble, iteration_number,
previous_ensemble_reports, all_reports, config):
assert config
builders = []
candidate_pool = self._maybe_call_candidate_pool(config, iteration_number)
if isinstance(candidate_pool, dict):
for name in sorted(candidate_pool):
builders.append(
_BuilderFromSubestimator(
name,
_convert_to_subestimator(candidate_pool[name]),
logits_fn=self._logits_fn,
last_layer_fn=self._last_layer_fn,
config=config))
return builders
for i, estimator in enumerate(candidate_pool):
name = "{class_name}{index}".format(
class_name=estimator.__class__.__name__, index=i)
builders.append(
_BuilderFromSubestimator(
name,
_convert_to_subestimator(estimator),
logits_fn=self._logits_fn,
last_layer_fn=self._last_layer_fn,
config=config))
return builders
def _maybe_call_candidate_pool(self, config, iteration_number):
if callable(self._candidate_pool):
# candidate_pool can be a function.
candidate_pool_args = inspect.getargs(self._candidate_pool.__code__).args
if "iteration_number" in candidate_pool_args:
# TODO: Make the "config" argument optional using introspection.
return self._candidate_pool(
config=config, iteration_number=iteration_number)
else:
return self._candidate_pool(config=config)
return self._candidate_pool
| 10,818 | 39.219331 | 144 | py |
adanet | adanet-master/adanet/autoensemble/__init__.py | """The TensorFlow AdaNet autoensemble module.
Copyright 2018 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from adanet.autoensemble.common import AutoEnsembleSubestimator
from adanet.autoensemble.estimator import AutoEnsembleEstimator
from adanet.autoensemble.estimator import AutoEnsembleTPUEstimator
__all__ = [
"AutoEnsembleEstimator",
"AutoEnsembleSubestimator",
"AutoEnsembleTPUEstimator",
]
| 1,047 | 32.806452 | 72 | py |
adanet | adanet-master/adanet/autoensemble/estimator.py | """An estimator that learns to ensemble.
Copyright 2018 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from adanet import core
from adanet.autoensemble.common import _GeneratorFromCandidatePool
import tensorflow.compat.v2 as tf
class AutoEnsembleEstimator(core.Estimator): # pylint: disable=g-classes-have-attributes
# pyformat: disable
"""A :class:`tf.estimator.Estimator` that learns to ensemble models.
Specifically, it learns to ensemble models from a candidate pool using the
Adanet algorithm.
.. code-block:: python
# A simple example of learning to ensemble linear and neural network
# models.
import adanet
import tensorflow as tf
feature_columns = ...
head = MultiClassHead(n_classes=10)
# Learn to ensemble linear and DNN models.
estimator = adanet.AutoEnsembleEstimator(
head=head,
candidate_pool=lambda config: {
"linear":
tf.estimator.LinearEstimator(
head=head,
feature_columns=feature_columns,
config=config,
optimizer=...),
"dnn":
tf.estimator.DNNEstimator(
head=head,
feature_columns=feature_columns,
config=config,
optimizer=...,
hidden_units=[1000, 500, 100])},
max_iteration_steps=50)
# Input builders
def input_fn_train:
# Returns tf.data.Dataset of (x, y) tuple where y represents label's
# class index.
pass
def input_fn_eval:
# Returns tf.data.Dataset of (x, y) tuple where y represents label's
# class index.
pass
def input_fn_predict:
# Returns tf.data.Dataset of (x, None) tuple.
pass
estimator.train(input_fn=input_fn_train, steps=100)
metrics = estimator.evaluate(input_fn=input_fn_eval, steps=10)
predictions = estimator.predict(input_fn=input_fn_predict)
Or to train candidate subestimators on different training data subsets:
.. code-block:: python
train_data_files = [...]
# Learn to ensemble linear and DNN models.
estimator = adanet.AutoEnsembleEstimator(
head=head,
candidate_pool=lambda config: {
"linear":
adanet.AutoEnsembleSubestimator(
tf.estimator.LinearEstimator(
head=head,
feature_columns=feature_columns,
config=config,
optimizer=...),
make_train_input_fn(train_data_files[:-1])),
"dnn":
adanet.AutoEnsembleSubestimator(
tf.estimator.DNNEstimator(
head=head,
feature_columns=feature_columns,
config=config,
optimizer=...,
hidden_units=[1000, 500, 100]),
make_train_input_fn(train_data_files[0:]))},
max_iteration_steps=50)
estimator.train(input_fn=make_train_input_fn(train_data_files), steps=100)
Args:
head: A :class:`tf.contrib.estimator.Head` instance for computing loss and
evaluation metrics for every candidate.
candidate_pool: List of :class:`tf.estimator.Estimator` and
:class:`AutoEnsembleSubestimator` objects, or dict of string name to
:class:`tf.estimator.Estimator` and :class:`AutoEnsembleSubestimator`
objects that are candidate subestimators to ensemble at each iteration.
The order does not directly affect which candidates will be included in
the final ensemble, but will affect the name of the candidate. When using
a dict, the string key becomes the candidate subestimator's name.
Alternatively, this argument can be a function that takes a `config`
argument and returns the aforementioned values in case the
objects need to be re-instantiated at each adanet iteration.
max_iteration_steps: Total number of steps for which to train candidates per
iteration. If `OutOfRange` or `StopIteration` occurs in the middle,
training stops before `max_iteration_steps` steps.
logits_fn: A function for fetching the subnetwork logits from a
:class:`tf.estimator.EstimatorSpec`, which should obey the following
signature:
- `Args`: Can only have following argument:
- estimator_spec: The candidate's :class:`tf.estimator.EstimatorSpec`.
- `Returns`: Logits :class:`tf.Tensor` or dict of string to logits
:class:`tf.Tensor` (for multi-head) for the candidate subnetwork
extracted from the given `estimator_spec`. When `None`, it will
default to returning `estimator_spec.predictions` when they are a
:class:`tf.Tensor` or the :class:`tf.Tensor` for the key 'logits' when
they are a dict of string to :class:`tf.Tensor`.
last_layer_fn: An optional function for fetching the subnetwork last_layer
from a :class:`tf.estimator.EstimatorSpec`, which should obey the
following signature:
- `Args`: Can only have following argument:
- estimator_spec: The candidate's :class:`tf.estimator.EstimatorSpec`.
- `Returns`: Last layer :class:`tf.Tensor` or dict of string to last
layer :class:`tf.Tensor` (for multi-head) for the candidate subnetwork
extracted from the given `estimator_spec`. The last_layer can be used
for learning ensembles or exporting them as embeddings.
When `None`, it will default to using the logits as the last_layer.
ensemblers: See :class:`adanet.Estimator`.
ensemble_strategies: See :class:`adanet.Estimator`.
evaluator: See :class:`adanet.Estimator`.
metric_fn: See :class:`adanet.Estimator`.
force_grow: See :class:`adanet.Estimator`.
adanet_loss_decay: See :class:`adanet.Estimator`.
worker_wait_timeout_secs: See :class:`adanet.Estimator`.
model_dir: See :class:`adanet.Estimator`.
config: See :class:`adanet.Estimator`.
debug: See :class:`adanet.Estimator`.
enable_ensemble_summaries: See :class:`adanet.Estimator`.
enable_subnetwork_summaries: See :class:`adanet.Estimator`.
global_step_combiner_fn: See :class:`adanet.Estimator`.
max_iterations: See :class:`adanet.Estimator`.
replay_config: See :class:`adanet.Estimator`.
**kwargs: Extra keyword args passed to the parent.
Returns:
An :class:`adanet.AutoEnsembleEstimator` instance.
Raises:
ValueError: If any of the candidates in `candidate_pool` are not
:class:`tf.estimator.Estimator` instances.
"""
# pyformat: enable
def __init__(self,
head,
candidate_pool,
max_iteration_steps,
ensemblers=None,
ensemble_strategies=None,
logits_fn=None,
last_layer_fn=None,
evaluator=None,
metric_fn=None,
force_grow=False,
adanet_loss_decay=.9,
worker_wait_timeout_secs=7200,
model_dir=None,
config=None,
debug=False,
enable_ensemble_summaries=True,
enable_subnetwork_summaries=True,
global_step_combiner_fn=tf.math.reduce_mean,
max_iterations=None,
replay_config=None,
**kwargs):
subnetwork_generator = _GeneratorFromCandidatePool(candidate_pool,
logits_fn, last_layer_fn)
super(AutoEnsembleEstimator, self).__init__(
head=head,
subnetwork_generator=subnetwork_generator,
max_iteration_steps=max_iteration_steps,
ensemblers=ensemblers,
ensemble_strategies=ensemble_strategies,
evaluator=evaluator,
metric_fn=metric_fn,
force_grow=force_grow,
adanet_loss_decay=adanet_loss_decay,
worker_wait_timeout_secs=worker_wait_timeout_secs,
model_dir=model_dir,
config=config,
debug=debug,
enable_ensemble_summaries=enable_ensemble_summaries,
enable_subnetwork_summaries=enable_subnetwork_summaries,
global_step_combiner_fn=global_step_combiner_fn,
max_iterations=max_iterations,
replay_config=replay_config,
**kwargs)
class AutoEnsembleTPUEstimator(core.TPUEstimator): # pylint: disable=g-classes-have-attributes
# pyformat: disable
"""A :class:`tf.estimator.tpu.TPUEstimator` that learns to ensemble models.
Specifically, it learns to ensemble models from a candidate pool using the
Adanet algorithm.
This estimator is capable of training and evaluating on TPU. It can ensemble
both :class:`tf.estimator.tpu.TPUEstimator` candidates as well as regular
:class:`tf.estimator.Estimator` candidates, as long as these candidates are
TPU compatible.
Note the following restrictions compared to AutoEnsembleEstimator:
* All candidates must wrap their optimizers with a
:class:`tf.tpu.CrossShardOptimizer`.
* The `input_fn` must expose a `params` argument.
* The `model_fn` of :class:`tf.estimator.tpu.TPUEstimator` candidates must
also expose a `params` argument.
WARNING: This Estimator is a work in progress and the API could change at any
moment. May not support all AutoEnsembleEstimator features.
.. code-block:: python
# A simple example of learning to ensemble linear and neural network
# models on TPU.
import adanet
import tensorflow as tf
feature_columns = ...
head = MultiClassHead(n_classes=10)
# Learn to ensemble linear and DNN models.
estimator = adanet.AutoEnsembleTPUEstimator(
head=head,
candidate_pool=lambda config: {
"linear":
tf.estimator.LinearEstimator(
head=head,
feature_columns=feature_columns,
config=config,
optimizer=tf.tpu.CrossShardOptimizer(...)),
"dnn":
tf.estimator.DNNEstimator(
head=head,
feature_columns=feature_columns,
config=config,
optimizer=tf.tpu.CrossShardOptimzier(...),
hidden_units=[1000, 500, 100])},
max_iteration_steps=50)
# Input builders
def input_fn_train(params):
# Returns tf.data.Dataset of (x, y) tuple where y represents label's
# class index.
pass
def input_fn_eval(params):
# Returns tf.data.Dataset of (x, y) tuple where y represents label's
# class index.
pass
def input_fn_predict():
# Returns tf.data.Dataset of (x, None) tuple.
pass
estimator.train(input_fn=input_fn_train, steps=100)
metrics = estimator.evaluate(input_fn=input_fn_eval, steps=10)
predictions = estimator.predict(input_fn=input_fn_predict)
Args:
head: A :class:`tf.contrib.estimator.Head` instance for computing loss and
evaluation metrics for every candidate.
candidate_pool: List of :class:`tf.estimator.tpu.TPUEstimator` and
:class:`AutoEnsembleSubestimator` objects, or dict of string name to
:class:`tf.estimator.tpu.TPUEstimator` and
:class:`AutoEnsembleSubestimator` objects that are candidate subestimators
to ensemble at each iteration. The order does not directly affect which
candidates will be included in the final ensemble, but will affect the
name of the candidate. When using a dict, the string key becomes the
candidate subestimator's name. Alternatively, this argument can be a
function that takes a `config` argument and returns the aforementioned
values in case the objects need to be re-instantiated at each adanet
iteration.
max_iteration_steps: See :class:`adanet.Estimator`.
logits_fn: A function for fetching the subnetwork logits from a
:class:`tf.estimator.EstimatorSpec`, which should obey the following
signature:
- `Args`: Can only have following argument:
- estimator_spec: The candidate's :class:`tf.estimator.EstimatorSpec`.
- `Returns`: Logits :class:`tf.Tensor` or dict of string to logits
:class:`tf.Tensor` (for multi-head) for the candidate subnetwork
extracted from the given `estimator_spec`. When `None`, it will
default to returning `estimator_spec.predictions` when they are a
:class:`tf.Tensor` or the :class:`tf.Tensor` for the key 'logits' when
they are a dict of string to :class:`tf.Tensor`.
last_layer_fn: An optional function for fetching the subnetwork last_layer
from a :class:`tf.estimator.EstimatorSpec`, which should obey the
following signature:
- `Args`: Can only have following argument:
- estimator_spec: The candidate's :class:`tf.estimator.EstimatorSpec`.
- `Returns`: Last layer :class:`tf.Tensor` or dict of string to last
layer :class:`tf.Tensor` (for multi-head) for the candidate subnetwork
extracted from the given `estimator_spec`. The last_layer can be used
for learning ensembles or exporting them as embeddings.
When `None`, it will default to using the logits as the last_layer.
ensemblers: See :class:`adanet.Estimator`.
ensemble_strategies: See :class:`adanet.Estimator`.
evaluator: See :class:`adanet.Estimator`.
metric_fn: See :class:`adanet.Estimator`.
force_grow: See :class:`adanet.Estimator`.
adanet_loss_decay: See :class:`adanet.Estimator`.
model_dir: See :class:`adanet.Estimator`.
config: See :class:`adanet.Estimator`.
use_tpu: See :class:`adanet.Estimator`.
eval_on_tpu: See :class:`adanet.Estimator`.
export_to_tpu: See :class:`adanet.Estimator`.
train_batch_size: See :class:`adanet.Estimator`.
eval_batch_size: See :class:`adanet.Estimator`.
embedding_config_spec: See :class:`adanet.Estimator`.
debug: See :class:`adanet.Estimator`.
enable_ensemble_summaries: See :class:`adanet.Estimator`.
enable_subnetwork_summaries: See :class:`adanet.Estimator`.
global_step_combiner_fn: See :class:`adanet.Estimator`.
max_iterations: See :class:`adanet.Estimator`.
replay_config: See :class:`adanet.Estimator`.
**kwargs: Extra keyword args passed to the parent.
Returns:
An :class:`adanet.AutoEnsembleTPUEstimator` instance.
Raises:
ValueError: If any of the candidates in `candidate_pool` are not
:class:`tf.estimator.Estimator` instances.
"""
# pyformat: enable
def __init__(self,
head,
candidate_pool,
max_iteration_steps,
ensemblers=None,
ensemble_strategies=None,
logits_fn=None,
last_layer_fn=None,
evaluator=None,
metric_fn=None,
force_grow=False,
adanet_loss_decay=.9,
model_dir=None,
config=None,
use_tpu=True,
eval_on_tpu=True,
export_to_tpu=True,
train_batch_size=None,
eval_batch_size=None,
predict_batch_size=None,
embedding_config_spec=None,
debug=False,
enable_ensemble_summaries=True,
enable_subnetwork_summaries=True,
global_step_combiner_fn=tf.math.reduce_mean,
max_iterations=None,
replay_config=None,
**kwargs):
subnetwork_generator = _GeneratorFromCandidatePool(candidate_pool,
logits_fn, last_layer_fn)
super(AutoEnsembleTPUEstimator, self).__init__(
head=head,
subnetwork_generator=subnetwork_generator,
max_iteration_steps=max_iteration_steps,
ensemblers=ensemblers,
ensemble_strategies=ensemble_strategies,
evaluator=evaluator,
metric_fn=metric_fn,
force_grow=force_grow,
adanet_loss_decay=adanet_loss_decay,
model_dir=model_dir,
config=config,
use_tpu=use_tpu,
eval_on_tpu=eval_on_tpu,
export_to_tpu=export_to_tpu,
train_batch_size=train_batch_size,
eval_batch_size=eval_batch_size,
predict_batch_size=predict_batch_size,
embedding_config_spec=embedding_config_spec,
debug=debug,
enable_ensemble_summaries=enable_ensemble_summaries,
enable_subnetwork_summaries=enable_subnetwork_summaries,
global_step_combiner_fn=global_step_combiner_fn,
max_iterations=max_iterations,
replay_config=replay_config,
**kwargs)
| 17,628 | 41.479518 | 95 | py |
adanet | adanet-master/adanet/autoensemble/estimator_test.py | """Tests for AdaNet AutoEnsembleEstimator in TF 1.
Copyright 2018 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import os
import shutil
import sys
from absl import flags
from absl import logging
from absl.testing import parameterized
from adanet import tf_compat
from adanet.autoensemble.common import AutoEnsembleSubestimator
from adanet.autoensemble.estimator import _GeneratorFromCandidatePool
from adanet.autoensemble.estimator import AutoEnsembleEstimator
import tensorflow.compat.v1 as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.estimator.export import export
# pylint: enable=g-direct-tensorflow-import
logging.set_verbosity(logging.INFO)
# Ensures "local_init_op" is called.
class CheckLocalInitOpEstimator(tf.estimator.Estimator):
def __init__(self):
super(CheckLocalInitOpEstimator,
self).__init__(model_fn=self._get_model_fn())
def _get_model_fn(self):
def _model_fn(features, labels, mode, params):
del labels
del params
flag = tf.Variable(initial_value=False, collections=[])
set_flag = tf.assign(flag, True)
test_flag = tf.debugging.Assert(tf.equal(flag, True), [flag])
scaffold = tf.train.Scaffold(
local_init_op=tf.group(tf.train.Scaffold.default_local_init_op(),
set_flag))
# Note: Not consuming the feature stales the test input_fn.
feature = next(iter(features.values()))
with tf.control_dependencies([feature, test_flag]):
batch_size = tf.shape(feature)[0]
predictions = tf.zeros([batch_size, 1])
return tf.estimator.EstimatorSpec(
mode,
loss=tf.constant(-1.0),
train_op=test_flag,
scaffold=scaffold,
predictions=predictions)
return _model_fn
# Creates an a file in the model directory during training, and check the
# presence of this file during eval and inference.
class CheckAssetEstimator(tf.estimator.Estimator):
def __init__(self, config):
super(CheckAssetEstimator, self).__init__(
config=config, model_fn=self._get_model_fn())
def _get_model_fn(self):
def _model_fn(features, labels, mode, params):
del labels
del params
scaffold = None
training_hooks = None
if mode == tf.estimator.ModeKeys.TRAIN:
class CheckpointSaverListener(tf.train.CheckpointSaverListener):
def after_save(subself, session, global_step_value): # pylint: disable=no-self-argument
tf.logging.info("Creating data in %s", self._model_dir)
with tf.gfile.Open(os.path.join(self._model_dir, "data"), "w") as f:
f.write("Hello")
assert tf.io.gfile.exists(os.path.join(self._model_dir, "data"))
listener = CheckpointSaverListener()
saver_hook = tf.estimator.CheckpointSaverHook(
self._model_dir, listeners=[listener], save_steps=1000000)
training_hooks = [saver_hook]
else:
def check_asset():
tf.logging.info("Checking data %s", self._model_dir)
assert tf.io.gfile.exists(os.path.join(self._model_dir, "data"))
return 1
check_asset_op = tf.compat.v1.py_func(check_asset, [], tf.int64)
scaffold = tf.train.Scaffold(
local_init_op=tf.group(tf.train.Scaffold.default_local_init_op(),
check_asset_op))
feature = next(iter(features.values()))
with tf.control_dependencies([feature]):
batch_size = tf.shape(feature)[0]
predictions = tf.zeros([batch_size, 1])
train_op = tf.no_op()
loss = tf.constant(-1.0)
return tf.estimator.EstimatorSpec(
mode,
loss=loss,
train_op=train_op,
scaffold=scaffold,
predictions=predictions,
training_hooks=training_hooks)
return _model_fn
class AutoEnsembleEstimatorTest(parameterized.TestCase, tf.test.TestCase):
def setUp(self):
super(AutoEnsembleEstimatorTest, self).setUp()
# Setup and cleanup test directory.
# Flags are not automatically parsed at this point.
flags.FLAGS(sys.argv)
self.test_subdirectory = os.path.join(flags.FLAGS.test_tmpdir, self.id())
shutil.rmtree(self.test_subdirectory, ignore_errors=True)
os.makedirs(self.test_subdirectory)
def tearDown(self):
super(AutoEnsembleEstimatorTest, self).tearDown()
shutil.rmtree(self.test_subdirectory, ignore_errors=True)
# pylint: disable=g-long-lambda
@parameterized.named_parameters(
{
"testcase_name":
"dict_candidate_pool",
"candidate_pool":
lambda head, feature_columns, optimizer: {
"dnn":
tf.estimator.DNNEstimator(
head=head,
feature_columns=feature_columns,
optimizer=optimizer,
hidden_units=[3]),
"linear":
tf.estimator.LinearEstimator(
head=head,
feature_columns=feature_columns,
optimizer=optimizer),
},
"want_loss":
.209,
},
{
"testcase_name":
"list_candidate_pool",
"candidate_pool":
lambda head, feature_columns, optimizer: [
tf.estimator.DNNEstimator(
head=head,
feature_columns=feature_columns,
optimizer=optimizer,
hidden_units=[3]),
tf.estimator.LinearEstimator(
head=head,
feature_columns=feature_columns,
optimizer=optimizer),
],
"want_loss":
.209,
},
{
"testcase_name":
"candidate_pool_lambda",
"candidate_pool":
lambda head, feature_columns, optimizer: lambda config: {
"dnn":
tf.estimator.DNNEstimator(
head=head,
feature_columns=feature_columns,
optimizer=optimizer,
hidden_units=[3],
config=config),
"linear":
tf.estimator.LinearEstimator(
head=head,
feature_columns=feature_columns,
optimizer=optimizer,
config=config),
},
"want_loss":
.209,
},
{
"testcase_name":
"bagging",
"candidate_pool":
lambda head, feature_columns, optimizer: {
"same_train_data":
AutoEnsembleSubestimator(
tf.estimator.LinearEstimator(
head=head,
feature_columns=feature_columns,
optimizer=optimizer)),
"different_train_data":
AutoEnsembleSubestimator(
tf.estimator.DNNEstimator(
head=head,
feature_columns=feature_columns,
optimizer=optimizer,
hidden_units=[3]),
train_input_fn=lambda: ({
"input_1": tf.constant([[0., 1.]])
}, tf.constant([[1.]]))),
},
# TODO: Figure out why this test's loss changes with every
# change to the TensorFlow graph.
"want_loss":
0.2,
},
{
"testcase_name":
"bagging_out_of_range_error",
"max_train_steps":
15,
"candidate_pool":
lambda head, feature_columns, optimizer: {
"same_train_data":
AutoEnsembleSubestimator(
tf.estimator.LinearEstimator(
head=head,
feature_columns=feature_columns,
optimizer=optimizer)),
"different_train_data":
AutoEnsembleSubestimator(
tf.estimator.DNNEstimator(
head=head,
feature_columns=feature_columns,
optimizer=optimizer,
hidden_units=[3]),
# TODO: Dataset must have at least 2 batches,
# otherwise all of training terminates.
train_input_fn=lambda: tf.data.Dataset.
from_tensor_slices(({
"input_1": [[0., 1.], [0., 1.]]
}, [[1.], [1.]])).batch(1),
),
},
# TODO: Figure out why this test's loss changes with every
# change to the TensorFlow graph.
"want_loss":
0.3,
},
{
"testcase_name":
"check_local_init_op",
"max_train_steps":
10,
"candidate_pool":
lambda head, feature_columns, optimizer: {
"expect_local_init_op":
AutoEnsembleSubestimator(CheckLocalInitOpEstimator()),
},
"want_loss":
1.0,
},
{
"testcase_name":
"check_iteration_count",
"max_train_steps":
10,
"candidate_pool":
lambda head, feature_columns, optimizer:
(lambda config, iteration_number: {
"dnn":
AutoEnsembleSubestimator(
tf.estimator.DNNEstimator(
head=head,
feature_columns=feature_columns,
optimizer=optimizer,
hidden_units=[3])),
"linear":
AutoEnsembleSubestimator(
tf.estimator.LinearEstimator(
head=head,
feature_columns=feature_columns,
optimizer=optimizer)),
}),
"want_loss":
.209,
},
{
"testcase_name":
"check_has_asset",
"max_train_steps":
10,
"candidate_pool":
lambda head, feature_columns, optimizer: {
"linear":
lambda subconfig: AutoEnsembleSubestimator(
tf.estimator.LinearEstimator(
head=head,
feature_columns=feature_columns,
optimizer=optimizer,
config=subconfig)),
"check_asset_1":
lambda subconfig: AutoEnsembleSubestimator(
CheckAssetEstimator(config=subconfig)),
"check_asset_2":
lambda subconfig: AutoEnsembleSubestimator(
CheckAssetEstimator(config=subconfig)),
},
"want_loss":
.209,
})
# pylint: enable=g-long-lambda
@tf_compat.skip_for_tf2
def test_auto_ensemble_estimator_lifecycle(self,
candidate_pool,
want_loss,
max_train_steps=30):
features = {"input_1": [[1., 0.]]}
labels = [[1.]]
run_config = tf.estimator.RunConfig(tf_random_seed=42)
head = tf.contrib.estimator.regression_head(
loss_reduction=tf.losses.Reduction.SUM_OVER_BATCH_SIZE)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=.01)
feature_columns = [tf.feature_column.numeric_column("input_1", shape=[2])]
def train_input_fn():
input_features = {}
for key, feature in features.items():
input_features[key] = tf.constant(feature, name=key)
input_labels = tf.constant(labels, name="labels")
return input_features, input_labels
def test_input_fn():
input_features = tf.data.Dataset.from_tensors([
tf.constant(features["input_1"])
]).make_one_shot_iterator().get_next()
return {"input_1": input_features}, None
estimator = AutoEnsembleEstimator(
head=head,
candidate_pool=candidate_pool(head, feature_columns, optimizer),
max_iteration_steps=10,
force_grow=True,
model_dir=self.test_subdirectory,
config=run_config)
# Train for three iterations.
estimator.train(input_fn=train_input_fn, max_steps=max_train_steps)
# Evaluate.
eval_results = estimator.evaluate(input_fn=train_input_fn, steps=1)
self.assertAllClose(max_train_steps, eval_results["global_step"])
self.assertAllClose(want_loss, eval_results["loss"], atol=.3)
# Predict.
predictions = estimator.predict(input_fn=test_input_fn)
for prediction in predictions:
self.assertIsNotNone(prediction["predictions"])
# Export SavedModel.
def serving_input_fn():
"""Input fn for serving export, starting from serialized example."""
serialized_example = tf.placeholder(
dtype=tf.string, shape=(None), name="serialized_example")
for key, value in features.items():
features[key] = tf.constant(value)
return export.SupervisedInputReceiver(
features=features,
labels=tf.constant(labels),
receiver_tensors=serialized_example)
export_dir_base = os.path.join(self.test_subdirectory, "export")
export_saved_model_fn = getattr(estimator, "export_saved_model", None)
if not callable(export_saved_model_fn):
export_saved_model_fn = estimator.export_savedmodel
export_saved_model_fn(
export_dir_base=export_dir_base,
serving_input_receiver_fn=serving_input_fn)
@tf_compat.skip_for_tf2
def test_last_layer_fn(self):
head = tf.contrib.estimator.regression_head(
loss_reduction=tf.losses.Reduction.SUM_OVER_BATCH_SIZE)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=.01)
feature_columns = [tf.feature_column.numeric_column("input_1", shape=[2])]
cand_pool = [
tf.estimator.DNNEstimator(
head=head,
feature_columns=feature_columns,
optimizer=optimizer,
hidden_units=[3])
]
input_features = {}
features = {"input_1": [[1., 0.]]}
labels = [[1.]]
for key, feature in features.items():
input_features[key] = tf.constant(feature, name=key)
input_labels = tf.constant(labels, name="labels")
class _FakeSummary(object):
"""A fake adanet.Summary."""
def scalar(self, name, tensor, family=None):
del name, tensor, family
return "fake_scalar"
@contextlib.contextmanager
def current_scope(self):
yield
def _adanet_last_layer_fn(estimator_spec):
del estimator_spec
return input_labels
# Call with custom last_layer_fn which simply returns the labels tensor.
generator = _GeneratorFromCandidatePool(
cand_pool, logits_fn=None, last_layer_fn=_adanet_last_layer_fn)
candidates = generator.generate_candidates(
previous_ensemble=None,
iteration_number=None,
previous_ensemble_reports=None,
all_reports=None,
config=tf.estimator.RunConfig())
subnetwork = candidates[0].build_subnetwork(input_features,
input_labels, None, False, 1,
_FakeSummary(), None)
self.assertEqual(input_labels, subnetwork.last_layer)
@tf_compat.skip_for_tf2
def test_extra_checkpoint_saver_hook(self):
"""Tests b/122795064."""
features = {"input_1": [[1., 0.]]}
labels = [[1.]]
run_config = tf.estimator.RunConfig(tf_random_seed=42)
head = tf.contrib.estimator.binary_classification_head(
loss_reduction=tf.losses.Reduction.SUM_OVER_BATCH_SIZE)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=.01)
feature_columns = [tf.feature_column.numeric_column("input_1", shape=[2])]
estimator = AutoEnsembleEstimator(
head=head,
candidate_pool=[
tf.estimator.LinearClassifier(
n_classes=2,
feature_columns=feature_columns,
optimizer=optimizer),
tf.estimator.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
optimizer=optimizer,
hidden_units=[3]),
],
max_iteration_steps=3,
force_grow=True,
model_dir=self.test_subdirectory,
config=run_config)
ckpt_dir = os.path.join(self.test_subdirectory)
hooks = [tf.train.CheckpointSaverHook(ckpt_dir, save_steps=1)]
def train_input_fn():
input_features = {}
for key, feature in features.items():
input_features[key] = tf.constant(feature, name=key)
input_labels = tf.constant(labels, name="labels")
return input_features, input_labels
estimator.train(input_fn=train_input_fn, max_steps=6, hooks=hooks)
if __name__ == "__main__":
tf.test.main()
| 18,311 | 34.835616 | 98 | py |
adanet | adanet-master/adanet/ensemble/weighted_test.py | """Test AdaNet single weighted subnetwork and ensembler implementation.
Copyright 2019 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
from absl.testing import parameterized
from adanet import ensemble
from adanet import subnetwork
from adanet import tf_compat
from adanet.core.summary import Summary
import mock
import tensorflow.compat.v2 as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util
# pylint: enable=g-direct-tensorflow-import
class _FakeCheckpoint(object):
"""A fake `tf.train.Checkpoint`."""
def restore(self, save_path):
del save_path # unused
class _FakeSummary(Summary):
"""A fake adanet.Summary."""
scalars = collections.defaultdict(list)
def scalar(self, name, tensor, family=None):
self.scalars[name].append(tensor)
return 'fake_scalar'
def image(self, name, tensor, max_outputs=3, family=None):
return 'fake_image'
def histogram(self, name, values, family=None):
return 'fake_histogram'
def audio(self, name, tensor, sample_rate, max_outputs=3, family=None):
return 'fake_audio'
def clear_scalars(self):
self.scalars.clear()
@contextlib.contextmanager
def current_scope(self):
yield
def _get_norm_summary_key(subnetwork_index):
return ('mixture_weight_norms/adanet/adanet_weighted_ensemble/subnetwork_%s' %
subnetwork_index)
def _get_fractions_summary_key(subnetwork_index):
return (
'mixture_weight_fractions/adanet/adanet_weighted_ensemble/subnetwork_%s' %
subnetwork_index)
def _get_complexity_regularization_summary_key():
return 'complexity_regularization/adanet/adanet_weighted_ensemble'
class ComplexityRegularizedEnsemblerTest(parameterized.TestCase,
tf.test.TestCase):
def setUp(self):
super(ComplexityRegularizedEnsemblerTest, self).setUp()
self._optimizer = tf_compat.v1.train.GradientDescentOptimizer(
learning_rate=.1)
self.easy_ensembler = ensemble.ComplexityRegularizedEnsembler(
optimizer=self._optimizer)
mock.patch.object(tf.train, 'load_variable', autospec=False).start()
mock.patch.object(
tf.compat.v1.train, 'load_variable', autospec=False).start()
mock.patch.object(
tf.compat.v2.train, 'load_variable', autospec=False).start()
mock.patch.object(
ensemble.ComplexityRegularizedEnsembler, '_load_variable',
autospec=False).start()
def _load_variable(var, previous_iteration_checkpoint):
del var # unused
assert previous_iteration_checkpoint is not None
return 1.0
complexity_regularized_ensembler = ensemble.ComplexityRegularizedEnsembler
complexity_regularized_ensembler._load_variable.side_effect = _load_variable
self.summary = _FakeSummary()
def _build_easy_ensemble(self, subnetworks):
return self.easy_ensembler.build_ensemble(
subnetworks=subnetworks,
previous_ensemble_subnetworks=None,
features=None,
labels=None,
logits_dimension=None,
training=None,
iteration_step=None,
summary=self.summary,
previous_ensemble=None,
previous_iteration_checkpoint=None)
def _build_subnetwork(self, multi_head=False):
last_layer = tf.Variable(
tf_compat.random_normal(shape=(2, 3)), trainable=False).read_value()
def new_logits():
return tf_compat.v1.layers.dense(
last_layer,
units=1,
kernel_initializer=tf_compat.v1.glorot_uniform_initializer())
if multi_head:
logits = {k: new_logits() for k in multi_head}
last_layer = {k: last_layer for k in multi_head}
else:
logits = new_logits()
return subnetwork.Subnetwork(last_layer=logits, logits=logits, complexity=2)
@parameterized.named_parameters(
{
'testcase_name': 'default',
'expected_summary_scalars': {
_get_norm_summary_key(0): [1],
_get_fractions_summary_key(0): [1],
_get_complexity_regularization_summary_key(): [0.],
},
'expected_complexity_regularization': 0.,
}, {
'testcase_name': 'one_previous_network',
'num_previous_ensemble_subnetworks': 1,
'expected_summary_scalars': {
_get_norm_summary_key(0): [0.5],
_get_norm_summary_key(1): [0.5],
_get_fractions_summary_key(0): [0.5],
_get_fractions_summary_key(1): [0.5],
_get_complexity_regularization_summary_key(): [0.],
},
'expected_complexity_regularization': 0.,
}, {
'testcase_name': 'one_previous_network_with_lambda',
'adanet_lambda': 0.1,
'num_previous_ensemble_subnetworks': 1,
'expected_summary_scalars': {
_get_norm_summary_key(0): [0.5],
_get_norm_summary_key(1): [0.5],
_get_fractions_summary_key(0): [0.5],
_get_fractions_summary_key(1): [0.5],
_get_complexity_regularization_summary_key(): [0.2],
},
'expected_complexity_regularization': 0.2,
}, {
'testcase_name': 'two_subnetworks_one_previous_network_with_lambda',
'adanet_lambda': 0.1,
'num_previous_ensemble_subnetworks': 1,
'expected_summary_scalars': {
_get_norm_summary_key(0): [0.5],
_get_norm_summary_key(1): [0.5],
_get_fractions_summary_key(0): [0.5],
_get_fractions_summary_key(1): [0.5],
_get_complexity_regularization_summary_key(): [0.2],
},
'expected_complexity_regularization': 0.2,
}, {
'testcase_name': 'all_previous_networks_with_lambda',
'adanet_lambda': 0.1,
'num_previous_ensemble_subnetworks': 2,
'expected_summary_scalars': {
_get_norm_summary_key(0): [1 / 3.],
_get_norm_summary_key(1): [1 / 3.],
_get_norm_summary_key(2): [1 / 3.],
_get_fractions_summary_key(0): [1 / 3.],
_get_fractions_summary_key(1): [1 / 3.],
_get_fractions_summary_key(2): [1 / 3.],
_get_complexity_regularization_summary_key(): [1 / 5.],
},
'expected_complexity_regularization': 1 / 5.,
}, {
'testcase_name': 'all_previous_networks_and_two_subnetworks',
'num_subnetworks': 2,
'adanet_lambda': 0.1,
'num_previous_ensemble_subnetworks': 2,
'expected_summary_scalars': {
_get_norm_summary_key(0): [1 / 4.],
_get_norm_summary_key(1): [1 / 4.],
_get_norm_summary_key(2): [1 / 4.],
_get_norm_summary_key(3): [1 / 4.],
_get_fractions_summary_key(0): [1 / 4.],
_get_fractions_summary_key(1): [1 / 4.],
_get_fractions_summary_key(2): [1 / 4.],
_get_fractions_summary_key(3): [1 / 4.],
_get_complexity_regularization_summary_key(): [1 / 5.],
},
'expected_complexity_regularization': 1 / 5.,
}, {
'testcase_name': 'all_nets_and_string_multihead',
'num_subnetworks': 2,
'adanet_lambda': 0.1,
'multi_head': ['head1', 'head2'],
'use_bias': True,
'num_previous_ensemble_subnetworks': 2,
'expected_summary_scalars': {
'complexity_regularization/adanet/adanet_weighted_ensemble_head2':
[0.2],
'complexity_regularization/adanet/adanet_weighted_ensemble_head1':
[0.2],
'mixture_weight_fractions/adanet/adanet_weighted_ensemble/subnetwork_head1_1':
[0.25],
'mixture_weight_norms/adanet/adanet_weighted_ensemble/subnetwork_head1_0':
[0.25],
'mixture_weight_norms/adanet/adanet_weighted_ensemble/subnetwork_head1_2':
[0.25],
'mixture_weight_norms/adanet/adanet_weighted_ensemble/subnetwork_head1_3':
[0.25],
'mixture_weight_norms/adanet/adanet_weighted_ensemble/subnetwork_head2_0':
[0.25],
'mixture_weight_norms/adanet/adanet_weighted_ensemble/subnetwork_head2_1':
[0.25],
'mixture_weight_norms/adanet/adanet_weighted_ensemble/subnetwork_head2_2':
[0.25],
'mixture_weight_norms/adanet/adanet_weighted_ensemble/subnetwork_head2_3':
[0.25],
'mixture_weight_fractions/adanet/adanet_weighted_ensemble/subnetwork_head1_3':
[0.25],
'mixture_weight_fractions/adanet/adanet_weighted_ensemble/subnetwork_head1_2':
[0.25],
'mixture_weight_norms/adanet/adanet_weighted_ensemble/subnetwork_head1_1':
[0.25],
'mixture_weight_fractions/adanet/adanet_weighted_ensemble/subnetwork_head1_0':
[0.25],
'mixture_weight_fractions/adanet/adanet_weighted_ensemble/subnetwork_head2_0':
[0.25],
'mixture_weight_fractions/adanet/adanet_weighted_ensemble/subnetwork_head2_1':
[0.25],
'mixture_weight_fractions/adanet/adanet_weighted_ensemble/subnetwork_head2_2':
[0.25],
'mixture_weight_fractions/adanet/adanet_weighted_ensemble/subnetwork_head2_3':
[0.25]
},
'expected_complexity_regularization': 2 / 5.,
}, {
'testcase_name': 'all_nets_and_string_tuple_multihead',
'num_subnetworks': 2,
'adanet_lambda': 0.1,
'multi_head': [('bar', 'baz'), ('foo', 'bar')],
'use_bias': True,
'num_previous_ensemble_subnetworks': 2,
'expected_summary_scalars': {
'mixture_weight_norms/adanet/adanet_weighted_ensemble/subnetwork_bar_baz_3':
[0.25],
'mixture_weight_norms/adanet/adanet_weighted_ensemble/subnetwork_bar_baz_2':
[0.25],
'mixture_weight_norms/adanet/adanet_weighted_ensemble/subnetwork_bar_baz_1':
[0.25],
'mixture_weight_norms/adanet/adanet_weighted_ensemble/subnetwork_bar_baz_0':
[0.25],
'mixture_weight_norms/adanet/adanet_weighted_ensemble/subnetwork_foo_bar_1':
[0.25],
'mixture_weight_fractions/adanet/adanet_weighted_ensemble/subnetwork_bar_baz_3':
[0.25],
'mixture_weight_fractions/adanet/adanet_weighted_ensemble/subnetwork_bar_baz_2':
[0.25],
'mixture_weight_fractions/adanet/adanet_weighted_ensemble/subnetwork_bar_baz_1':
[0.25],
'mixture_weight_fractions/adanet/adanet_weighted_ensemble/subnetwork_bar_baz_0':
[0.25],
'mixture_weight_fractions/adanet/adanet_weighted_ensemble/subnetwork_foo_bar_1':
[0.25],
'mixture_weight_norms/adanet/adanet_weighted_ensemble/subnetwork_foo_bar_2':
[0.25],
'mixture_weight_fractions/adanet/adanet_weighted_ensemble/subnetwork_foo_bar_0':
[0.25],
'complexity_regularization/adanet/adanet_weighted_ensemble_foo_bar':
[0.2],
'complexity_regularization/adanet/adanet_weighted_ensemble_bar_baz':
[0.2],
'mixture_weight_fractions/adanet/adanet_weighted_ensemble/subnetwork_foo_bar_2':
[0.25],
'mixture_weight_norms/adanet/adanet_weighted_ensemble/subnetwork_foo_bar_3':
[0.25],
'mixture_weight_fractions/adanet/adanet_weighted_ensemble/subnetwork_foo_bar_3':
[0.25],
'mixture_weight_norms/adanet/adanet_weighted_ensemble/subnetwork_foo_bar_0':
[0.25]
},
'expected_complexity_regularization': 2 / 5.,
}, {
'testcase_name': 'all_nets_and_tuple_multihead',
'num_subnetworks': 2,
'adanet_lambda': 0.1,
'multi_head': [('bar', 0), ('foo', 1)],
'use_bias': True,
'num_previous_ensemble_subnetworks': 2,
'expected_summary_scalars': {
'mixture_weight_norms/adanet/adanet_weighted_ensemble/subnetwork_bar_0_2':
[0.25],
'mixture_weight_fractions/adanet/adanet_weighted_ensemble/subnetwork_foo_1_3':
[0.25],
'mixture_weight_fractions/adanet/adanet_weighted_ensemble/subnetwork_foo_1_2':
[0.25],
'mixture_weight_fractions/adanet/adanet_weighted_ensemble/subnetwork_foo_1_1':
[0.25],
'mixture_weight_fractions/adanet/adanet_weighted_ensemble/subnetwork_foo_1_0':
[0.25],
'mixture_weight_norms/adanet/adanet_weighted_ensemble/subnetwork_bar_0_1':
[0.25],
'mixture_weight_norms/adanet/adanet_weighted_ensemble/subnetwork_bar_0_0':
[0.25],
'mixture_weight_norms/adanet/adanet_weighted_ensemble/subnetwork_bar_0_3':
[0.25],
'mixture_weight_fractions/adanet/adanet_weighted_ensemble/subnetwork_bar_0_2':
[0.25],
'mixture_weight_fractions/adanet/adanet_weighted_ensemble/subnetwork_bar_0_1':
[0.25],
'mixture_weight_norms/adanet/adanet_weighted_ensemble/subnetwork_foo_1_3':
[0.25],
'complexity_regularization/adanet/adanet_weighted_ensemble_bar_0':
[0.2],
'mixture_weight_norms/adanet/adanet_weighted_ensemble/subnetwork_foo_1_1':
[0.25],
'mixture_weight_norms/adanet/adanet_weighted_ensemble/subnetwork_foo_1_0':
[0.25],
'complexity_regularization/adanet/adanet_weighted_ensemble_foo_1':
[0.2],
'mixture_weight_fractions/adanet/adanet_weighted_ensemble/subnetwork_bar_0_0':
[0.25],
'mixture_weight_fractions/adanet/adanet_weighted_ensemble/subnetwork_bar_0_3':
[0.25],
'mixture_weight_norms/adanet/adanet_weighted_ensemble/subnetwork_foo_1_2':
[0.25]
},
'expected_complexity_regularization': 2 / 5.,
}, {
'testcase_name': 'all_nets_and_number_multihead',
'num_subnetworks': 2,
'adanet_lambda': 0.1,
'multi_head': [0, 1],
'use_bias': True,
'num_previous_ensemble_subnetworks': 2,
'expected_summary_scalars': {
'mixture_weight_norms/adanet/adanet_weighted_ensemble/subnetwork_1_2':
[0.25],
'mixture_weight_norms/adanet/adanet_weighted_ensemble/subnetwork_1_3':
[0.25],
'complexity_regularization/adanet/adanet_weighted_ensemble_1':
[0.2],
'mixture_weight_norms/adanet/adanet_weighted_ensemble/subnetwork_1_1':
[0.25],
'complexity_regularization/adanet/adanet_weighted_ensemble':
[0.2],
'mixture_weight_fractions/adanet/adanet_weighted_ensemble/subnetwork_1_3':
[0.25],
'mixture_weight_norms/adanet/adanet_weighted_ensemble/subnetwork_0':
[0.25],
'mixture_weight_norms/adanet/adanet_weighted_ensemble/subnetwork_1':
[0.25],
'mixture_weight_norms/adanet/adanet_weighted_ensemble/subnetwork_2':
[0.25],
'mixture_weight_norms/adanet/adanet_weighted_ensemble/subnetwork_3':
[0.25],
'mixture_weight_fractions/adanet/adanet_weighted_ensemble/subnetwork_0':
[0.25],
'mixture_weight_fractions/adanet/adanet_weighted_ensemble/subnetwork_1':
[0.25],
'mixture_weight_fractions/adanet/adanet_weighted_ensemble/subnetwork_2':
[0.25],
'mixture_weight_fractions/adanet/adanet_weighted_ensemble/subnetwork_3':
[0.25],
'mixture_weight_fractions/adanet/adanet_weighted_ensemble/subnetwork_1_2':
[0.25],
'mixture_weight_norms/adanet/adanet_weighted_ensemble/subnetwork_1_0':
[0.25],
'mixture_weight_fractions/adanet/adanet_weighted_ensemble/subnetwork_1_0':
[0.25],
'mixture_weight_fractions/adanet/adanet_weighted_ensemble/subnetwork_1_1':
[0.25]
},
'expected_complexity_regularization': 2 / 5.,
}, {
'testcase_name': 'all_nets_with_warm_start',
'num_subnetworks': 2,
'adanet_lambda': 0.1,
'warm_start_mixture_weights': True,
'num_previous_ensemble_subnetworks': 2,
'expected_summary_scalars': {
'mixture_weight_fractions/adanet/adanet_weighted_ensemble/subnetwork_2':
[0.1],
'mixture_weight_fractions/adanet/adanet_weighted_ensemble/subnetwork_3':
[0.1],
'complexity_regularization/adanet/adanet_weighted_ensemble':
[0.5],
'mixture_weight_fractions/adanet/adanet_weighted_ensemble/subnetwork_1':
[0.4],
'mixture_weight_norms/adanet/adanet_weighted_ensemble/subnetwork_2':
[0.25],
'mixture_weight_fractions/adanet/adanet_weighted_ensemble/subnetwork_0':
[0.4],
'mixture_weight_norms/adanet/adanet_weighted_ensemble/subnetwork_0':
[1.0],
'mixture_weight_norms/adanet/adanet_weighted_ensemble/subnetwork_1':
[1.0],
'mixture_weight_norms/adanet/adanet_weighted_ensemble/subnetwork_3':
[0.25]
},
'expected_complexity_regularization': 1 / 2.,
}, {
'testcase_name': 'all_nets_with_warm_start_and_multihead',
'num_subnetworks': 2,
'adanet_lambda': 0.1,
'multi_head': ['head1', 'head2'],
'use_bias': True,
'warm_start_mixture_weights': True,
'num_previous_ensemble_subnetworks': 2,
'expected_summary_scalars': {
'complexity_regularization/adanet/adanet_weighted_ensemble_head2':
[0.5],
'complexity_regularization/adanet/adanet_weighted_ensemble_head1':
[0.5],
'mixture_weight_norms/adanet/adanet_weighted_ensemble/subnetwork_head2_1':
[1.0],
'mixture_weight_norms/adanet/adanet_weighted_ensemble/subnetwork_head2_0':
[1.0],
'mixture_weight_norms/adanet/adanet_weighted_ensemble/subnetwork_head2_3':
[0.25],
'mixture_weight_norms/adanet/adanet_weighted_ensemble/subnetwork_head2_2':
[0.25],
'mixture_weight_norms/adanet/adanet_weighted_ensemble/subnetwork_head1_2':
[0.25],
'mixture_weight_norms/adanet/adanet_weighted_ensemble/subnetwork_head1_3':
[0.25],
'mixture_weight_norms/adanet/adanet_weighted_ensemble/subnetwork_head1_0':
[1.0],
'mixture_weight_norms/adanet/adanet_weighted_ensemble/subnetwork_head1_1':
[1.0],
'mixture_weight_fractions/adanet/adanet_weighted_ensemble/subnetwork_head1_2':
[0.1],
'mixture_weight_fractions/adanet/adanet_weighted_ensemble/subnetwork_head1_3':
[0.1],
'mixture_weight_fractions/adanet/adanet_weighted_ensemble/subnetwork_head1_0':
[0.4],
'mixture_weight_fractions/adanet/adanet_weighted_ensemble/subnetwork_head1_1':
[0.4],
'mixture_weight_fractions/adanet/adanet_weighted_ensemble/subnetwork_head2_1':
[0.4],
'mixture_weight_fractions/adanet/adanet_weighted_ensemble/subnetwork_head2_0':
[0.4],
'mixture_weight_fractions/adanet/adanet_weighted_ensemble/subnetwork_head2_3':
[0.1],
'mixture_weight_fractions/adanet/adanet_weighted_ensemble/subnetwork_head2_2':
[0.1]
},
'expected_complexity_regularization': 1.,
'name': 'with_bias',
})
@test_util.run_in_graph_and_eager_modes
def test_build_ensemble(self,
mixture_weight_type=ensemble.MixtureWeightType.SCALAR,
mixture_weight_initializer=None,
warm_start_mixture_weights=False,
adanet_lambda=0.,
adanet_beta=0.,
multi_head=None,
use_bias=False,
num_subnetworks=1,
num_previous_ensemble_subnetworks=0,
expected_complexity_regularization=0.,
expected_summary_scalars=None,
name=None):
with context.graph_mode():
model_dir = None
if warm_start_mixture_weights:
model_dir = 'fake_checkpoint_dir'
ensembler = ensemble.ComplexityRegularizedEnsembler(
optimizer=self._optimizer,
mixture_weight_type=mixture_weight_type,
mixture_weight_initializer=mixture_weight_initializer,
warm_start_mixture_weights=warm_start_mixture_weights,
model_dir=model_dir,
adanet_lambda=adanet_lambda,
adanet_beta=adanet_beta,
use_bias=use_bias,
name=name)
if name:
self.assertEqual(ensembler.name, name)
else:
self.assertEqual(ensembler.name, 'complexity_regularized')
with tf_compat.v1.variable_scope('dummy_adanet_scope_iteration_0'):
previous_ensemble_subnetworks_all = [
self._build_subnetwork(multi_head),
self._build_subnetwork(multi_head)
]
previous_ensemble = self._build_easy_ensemble(
previous_ensemble_subnetworks_all)
with tf_compat.v1.variable_scope('dummy_adanet_scope_iteration_1'):
subnetworks_pool = [
self._build_subnetwork(multi_head),
self._build_subnetwork(multi_head),
]
subnetworks = subnetworks_pool[:num_subnetworks]
previous_ensemble_subnetworks = previous_ensemble_subnetworks_all[:(
num_previous_ensemble_subnetworks)]
self.summary.clear_scalars()
built_ensemble = ensembler.build_ensemble(
subnetworks=subnetworks,
previous_ensemble_subnetworks=previous_ensemble_subnetworks,
features=None,
labels=None,
logits_dimension=None,
training=None,
iteration_step=None,
summary=self.summary,
previous_ensemble=previous_ensemble,
previous_iteration_checkpoint=_FakeCheckpoint())
with self.test_session() as sess:
sess.run(tf_compat.v1.global_variables_initializer())
summary_scalars, complexity_regularization = sess.run(
(self.summary.scalars, built_ensemble.complexity_regularization))
if expected_summary_scalars:
for key in expected_summary_scalars.keys():
print(summary_scalars)
self.assertAllClose(expected_summary_scalars[key],
summary_scalars[key])
self.assertEqual(
[l.subnetwork for l in built_ensemble.weighted_subnetworks],
previous_ensemble_subnetworks + subnetworks)
self.assertAllClose(expected_complexity_regularization,
complexity_regularization)
self.assertIsNotNone(sess.run(built_ensemble.logits))
@test_util.run_in_graph_and_eager_modes
def test_build_ensemble_subnetwork_has_scalar_logits(self):
with context.graph_mode():
logits = tf.ones(shape=(100,))
ensemble_spec = self._build_easy_ensemble([
subnetwork.Subnetwork(
last_layer=logits, logits=logits, complexity=0.)
])
self.assertEqual([1], ensemble_spec.bias.shape.as_list())
@test_util.run_in_graph_and_eager_modes
def test_build_train_op_no_op(self):
with context.graph_mode():
train_op = ensemble.ComplexityRegularizedEnsembler().build_train_op(
*[None] * 7) # arguments unused
self.assertEqual(train_op.type, tf.no_op().type)
@test_util.run_in_graph_and_eager_modes
def test_build_train_op_callable_optimizer(self):
with context.graph_mode():
dummy_weight = tf.Variable(0., name='dummy_weight')
dummy_loss = dummy_weight * 2.
ensembler = ensemble.ComplexityRegularizedEnsembler(
optimizer=lambda: tf_compat.v1.train.GradientDescentOptimizer(.1))
train_op = ensembler.build_train_op(
self._build_easy_ensemble([self._build_subnetwork()]),
dummy_loss, [dummy_weight],
labels=None,
iteration_step=None,
summary=None,
previous_ensemble=None)
config = tf.compat.v1.ConfigProto(
gpu_options=tf.compat.v1.GPUOptions(allow_growth=True))
with tf_compat.v1.Session(config=config) as sess:
sess.run(tf_compat.v1.global_variables_initializer())
sess.run(train_op)
self.assertAllClose(-.2, sess.run(dummy_weight))
@test_util.run_in_graph_and_eager_modes
def test_build_train_op(self):
with context.graph_mode():
dummy_weight = tf.Variable(0., name='dummy_weight')
dummy_loss = dummy_weight * 2.
ensembler = ensemble.ComplexityRegularizedEnsembler(
optimizer=tf_compat.v1.train.GradientDescentOptimizer(.1))
train_op = ensembler.build_train_op(
self._build_easy_ensemble([self._build_subnetwork()]),
dummy_loss, [dummy_weight],
labels=None,
iteration_step=None,
summary=None,
previous_ensemble=None)
config = tf.compat.v1.ConfigProto(
gpu_options=tf.compat.v1.GPUOptions(allow_growth=True))
with tf_compat.v1.Session(config=config) as sess:
sess.run(tf_compat.v1.global_variables_initializer())
sess.run(train_op)
self.assertAllClose(-.2, sess.run(dummy_weight))
def tearDown(self):
self.summary.clear_scalars()
mock.patch.stopall()
tf_compat.v1.reset_default_graph()
super(ComplexityRegularizedEnsemblerTest, self).tearDown()
if __name__ == '__main__':
tf.enable_v2_behavior()
tf.test.main()
| 27,693 | 42.339593 | 94 | py |
adanet | adanet-master/adanet/ensemble/mean.py | # Copyright 2019 The AdaNet Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Adanet implementation for an ensembler for the mean of subnetwork logits."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from adanet.ensemble.ensembler import Ensemble
from adanet.ensemble.ensembler import Ensembler
import tensorflow.compat.v2 as tf
class MeanEnsemble(
collections.namedtuple('MeanEnsemble',
['logits', 'subnetworks', 'predictions']),
Ensemble):
r"""Mean ensemble.
Attributes:
logits: Logits :class:`tf.Tensor` or dict of string to logits
:class:`tf.Tensor` (for multi-head).
subnetworks: List of :class:`adanet.subnetwork.Subnetwork` instances that
form this ensemble.
predictions: Optional dict mapping prediction keys to Tensors. MeanEnsembler
can export mean_last_layer if the subnetworks have the last_layer of the
same dimension.
"""
# Key in predictions and export_outputs for mean of last_layer.
MEAN_LAST_LAYER = 'mean_last_layer'
def __new__(cls,
logits,
subnetworks=None,
predictions=None):
return super(MeanEnsemble, cls).__new__(
cls,
logits=logits,
subnetworks=list(subnetworks or []),
predictions=predictions)
class MeanEnsembler(Ensembler):
# pyformat: disable
r"""Ensembler that takes the mean of logits returned by its subnetworks.
Attributes:
name: Optional name for the ensembler. Defaults to 'complexity_regularized'.
add_mean_last_layer_predictions: Set to True to add mean of last_layer in
subnetworks in estimator's predictions and export outputs.
"""
# pyformat: enable
def __init__(self,
name=None, add_mean_last_layer_predictions=False):
self._name = name
self._add_mean_last_layer_predictions = add_mean_last_layer_predictions
@property
def name(self):
if self._name:
return self._name
return 'mean'
def _assert_last_layer_compatible_shapes(self, tensors):
if not tensors:
return True
first_shape = tensors[0].shape
for tensor in tensors:
try:
first_shape.assert_is_compatible_with(tensor.shape)
except ValueError:
raise ValueError(
'Shape of `last_layer` tensors must be same if setting '
'`add_mean_last_layer_predictions` to True. Found %s vs %s.'
% (first_shape, tensor.shape))
return True
def build_ensemble(self, subnetworks, previous_ensemble_subnetworks, features,
labels, logits_dimension, training, iteration_step,
summary, previous_ensemble, previous_iteration_checkpoint):
del features, labels, logits_dimension, training, iteration_step # unused
del previous_ensemble_subnetworks, previous_iteration_checkpoint # unused
if isinstance(subnetworks[0].logits, dict):
mean_logits = {
key: tf.math.reduce_mean(
tf.stack([s.logits[key] for s in subnetworks]), axis=0)
for key in subnetworks[0].logits
}
else:
mean_logits = tf.math.reduce_mean(
tf.stack([s.logits for s in subnetworks]), axis=0)
mean_last_layer = None
if self._add_mean_last_layer_predictions:
mean_last_layer = {}
if isinstance(subnetworks[0].last_layer, dict):
for key in subnetworks[0].logits:
last_layers = [s.last_layer[key] for s in subnetworks]
self._assert_last_layer_compatible_shapes(last_layers)
mean_last_layer['{}_{}'.format(MeanEnsemble.MEAN_LAST_LAYER,
key)] = tf.math.reduce_mean(
tf.stack(last_layers), axis=0)
else:
last_layers = [subnetwork.last_layer for subnetwork in subnetworks]
self._assert_last_layer_compatible_shapes(last_layers)
mean_last_layer = {
MeanEnsemble.MEAN_LAST_LAYER:
tf.math.reduce_mean(tf.stack(last_layers), axis=0)
}
return MeanEnsemble(
subnetworks=subnetworks,
logits=mean_logits,
predictions=mean_last_layer)
def build_train_op(self, ensemble, loss, var_list, labels, iteration_step,
summary, previous_ensemble):
del ensemble, loss, var_list, labels, iteration_step, summary # unused
del previous_ensemble # unused
return tf.no_op()
| 5,019 | 35.911765 | 80 | py |
adanet | adanet-master/adanet/ensemble/strategy.py | # Copyright 2019 The AdaNet Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Search strategy algorithms."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import six
class Candidate(
collections.namedtuple("Candidate", [
"name", "subnetwork_builders", "previous_ensemble_subnetwork_builders"
])):
"""An ensemble candidate found during the search phase.
Args:
name: String name of this ensemble candidate.
subnetwork_builders: Candidate :class:`adanet.subnetwork.Builder` instances
to include in the ensemble.
previous_ensemble_subnetwork_builders: :class:`adanet.subnetwork.Builder`
instances to include from the previous ensemble.
"""
def __new__(cls, name, subnetwork_builders,
previous_ensemble_subnetwork_builders):
return super(Candidate, cls).__new__(
cls,
name=name,
subnetwork_builders=tuple(subnetwork_builders),
previous_ensemble_subnetwork_builders=tuple(
previous_ensemble_subnetwork_builders or []))
@six.add_metaclass(abc.ABCMeta)
class Strategy(object): # pytype: disable=ignored-metaclass
"""An abstract ensemble strategy."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def generate_ensemble_candidates(self, subnetwork_builders,
previous_ensemble_subnetwork_builders):
"""Generates ensemble candidates to search over this iteration.
Args:
subnetwork_builders: Candidate :class:`adanet.subnetwork.Builder`
instances for this iteration.
previous_ensemble_subnetwork_builders: :class:`adanet.subnetwork.Builder`
instances from the previous ensemble. Including only a subset of these
in a returned :class:`adanet.ensemble.Candidate` is equivalent to
pruning the previous ensemble.
Returns:
An iterable of :class:`adanet.ensemble.Candidate` instances to train and
consider this iteration.
"""
# TODO: Pruning the previous subnetwork may require more metadata
# such as `subnetwork.Reports` and `ensemble.Reports` to make smart
# decisions.
class SoloStrategy(Strategy):
"""Produces a model composed of a single subnetwork.
*An ensemble of one.*
This is effectively the same as pruning all previous ensemble subnetworks,
and only adding one subnetwork candidate to the ensemble.
"""
def generate_ensemble_candidates(self, subnetwork_builders,
previous_ensemble_subnetwork_builders):
return [
Candidate("{}_solo".format(subnetwork_builder.name),
[subnetwork_builder], None)
for subnetwork_builder in subnetwork_builders
]
class GrowStrategy(Strategy):
"""Greedily grows an ensemble, one subnetwork at a time."""
def generate_ensemble_candidates(self, subnetwork_builders,
previous_ensemble_subnetwork_builders):
return [
Candidate("{}_grow".format(subnetwork_builder.name),
[subnetwork_builder], previous_ensemble_subnetwork_builders)
for subnetwork_builder in subnetwork_builders
]
class AllStrategy(Strategy):
"""Ensembles all subnetworks from the current iteration."""
def generate_ensemble_candidates(self, subnetwork_builders,
previous_ensemble_subnetwork_builders):
return [
Candidate("all", subnetwork_builders,
previous_ensemble_subnetwork_builders)
]
| 4,100 | 33.754237 | 79 | py |
adanet | adanet-master/adanet/ensemble/weighted.py | # Copyright 2019 The AdaNet Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Adanet implementation for weighted Subnetwork and Ensemblers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from absl import logging
from adanet import tf_compat
from adanet.ensemble.ensembler import Ensemble
from adanet.ensemble.ensembler import Ensembler
import tensorflow.compat.v2 as tf
def _stringify(key):
"""Flattens tuple and list keys into strings."""
if isinstance(key, (tuple, list)):
return "_".join([str(el) for el in key])
return key
def _lookup_if_dict(target, key):
if isinstance(target, dict):
return target[key]
return target
class WeightedSubnetwork(
collections.namedtuple(
"WeightedSubnetwork",
["name", "iteration_number", "weight", "logits", "subnetwork"])):
# pyformat: disable
"""An AdaNet weighted subnetwork.
A weighted subnetwork is a weight applied to a subnetwork's last layer
or logits (depending on the mixture weights type).
Args:
name: String name of :code:`subnetwork` as defined by its
:class:`adanet.subnetwork.Builder`.
iteration_number: Integer iteration when the subnetwork was created.
weight: The weight :class:`tf.Tensor` or dict of string to weight
:class:`tf.Tensor` (for multi-head) to apply to this subnetwork. The
AdaNet paper refers to this weight as :math:`w` in Equations (4), (5),
and (6).
logits: The output :class:`tf.Tensor` or dict of string to weight
:class:`tf.Tensor` (for multi-head) after the matrix multiplication of
:code:`weight` and the subnetwork's :code:`last_layer`. The output's shape
is [batch_size, logits_dimension]. It is equivalent to a linear logits
layer in a neural network.
subnetwork: The :class:`adanet.subnetwork.Subnetwork` to weight.
Returns:
An :class:`adanet.ensemble.WeightedSubnetwork` object.
"""
# pyformat: enable
def __new__(cls,
name="",
iteration_number=0,
weight=None,
logits=None,
subnetwork=None):
return super(WeightedSubnetwork, cls).__new__(
cls,
name=name,
iteration_number=iteration_number,
weight=weight,
logits=logits,
subnetwork=subnetwork)
class ComplexityRegularized(
collections.namedtuple("ComplexityRegularized", [
"weighted_subnetworks", "bias", "logits", "subnetworks",
"complexity_regularization"
]), Ensemble):
r"""An AdaNet ensemble where subnetworks are regularized by model complexity.
Hence an ensemble is a collection of subnetworks which forms a neural network
through the weighted sum of their outputs:
.. math::
F(x) = \sum_{i=1}^{N}w_ih_i(x) + b
Args:
weighted_subnetworks: List of :class:`adanet.ensemble.WeightedSubnetwork`
instances that form this ensemble. Ordered from first to most recent.
bias: Bias term :class:`tf.Tensor` or dict of string to bias term
:class:`tf.Tensor` (for multi-head) for the ensemble's logits.
logits: Logits :class:`tf.Tensor` or dict of string to logits
:class:`tf.Tensor` (for multi-head). The result of the function *f* as
defined in Section 5.1 which is the sum of the logits of all
:class:`adanet.WeightedSubnetwork` instances in ensemble.
subnetworks: List of :class:`adanet.subnetwork.Subnetwork` instances that
form this ensemble. This is kept together with weighted_subnetworks for
legacy reasons.
complexity_regularization: Regularization to be added in the Adanet loss.
Returns:
An :class:`adanet.ensemble.Weighted` instance.
"""
def __new__(cls,
weighted_subnetworks,
bias,
logits,
subnetworks=None,
complexity_regularization=None):
return super(ComplexityRegularized, cls).__new__(
cls,
weighted_subnetworks=list(weighted_subnetworks),
bias=bias,
logits=logits,
subnetworks=list(subnetworks or []),
complexity_regularization=complexity_regularization)
class MixtureWeightType(object):
"""Mixture weight types available for learning subnetwork contributions.
The following mixture weight types are defined:
* `SCALAR`: Produces a rank 0 `Tensor` mixture weight.
* `VECTOR`: Produces a rank 1 `Tensor` mixture weight.
* `MATRIX`: Produces a rank 2 `Tensor` mixture weight.
"""
SCALAR = "scalar"
VECTOR = "vector"
MATRIX = "matrix"
class ComplexityRegularizedEnsembler(Ensembler):
# pyformat: disable
r"""The AdaNet algorithm implemented as an :class:`adanet.ensemble.Ensembler`.
The AdaNet algorithm was introduced in the [Cortes et al. ICML 2017] paper:
https://arxiv.org/abs/1607.01097.
The AdaNet algorithm uses a weak learning algorithm to iteratively generate a
set of candidate subnetworks that attempt to minimize the loss function
defined in Equation (4) as part of an ensemble. At the end of each iteration,
the best candidate is chosen based on its ensemble's complexity-regularized
train loss. New subnetworks are allowed to use any subnetwork weights within
the previous iteration's ensemble in order to improve upon them. If the
complexity-regularized loss of the new ensemble, as defined in Equation (4),
is less than that of the previous iteration's ensemble, the AdaNet algorithm
continues onto the next iteration.
AdaNet attempts to minimize the following loss function to learn the mixture
weights :math:`w` of each subnetwork :math:`h` in the ensemble with
differentiable convex non-increasing surrogate loss function :math:`\Phi`:
Equation (4):
.. math::
F(w) = \frac{1}{m} \sum_{i=1}^{m} \Phi \left(\sum_{j=1}^{N}w_jh_j(x_i),
y_i \right) + \sum_{j=1}^{N} \left(\lambda r(h_j) + \beta \right) |w_j|
with :math:`\lambda >= 0` and :math:`\beta >= 0`.
Args:
optimizer: String, :class:`tf.train.Optimizer` object, or callable that
creates the optimizer to use for training the ensemble weights. If left
as :code:`None`, :meth:`tf.no_op()` is used instead.
mixture_weight_type: The :class:`adanet.ensemble.MixtureWeightType` defining
which mixture weight type to learn on top of the subnetworks' logits.
mixture_weight_initializer: The initializer for mixture_weights. When
:code:`None`, the default is different according to
:code:`mixture_weight_type`:
- :code:`SCALAR` initializes to :math:`1/N` where :math:`N` is the
number of subnetworks in the ensemble giving a uniform average.
- :code:`VECTOR` initializes each entry to :math:`1/N` where :math:`N`
is the number of subnetworks in the ensemble giving a uniform average.
- :code:`MATRIX` uses :meth:`tf.zeros_initializer`.
warm_start_mixture_weights: Whether, at the beginning of an iteration, to
initialize the mixture weights of the subnetworks from the previous
ensemble to their learned value at the previous iteration, as opposed to
retraining them from scratch. Takes precedence over the value for
:code:`mixture_weight_initializer` for subnetworks from previous
iterations.
model_dir: The model dir to use for warm-starting mixture weights and bias
at the logit layer. Ignored if :code:`warm_start_mixture_weights` is
:code:`False`.
adanet_lambda: Float multiplier :math:`\lambda` for applying :math:`L1`
regularization to subnetworks' mixture weights :math:`w` in the ensemble
proportional to their complexity. See Equation (4) in the AdaNet paper.
adanet_beta: Float :math:`L1` regularization multiplier :math:`\beta` to apply
equally to all subnetworks' weights :math:`w` in the ensemble regardless of
their complexity. See Equation (4) in the AdaNet paper.
use_bias: Whether to add a bias term to the ensemble's logits.
name: Optional name for the ensembler. Defaults to 'complexity_regularized'.
Returns:
An `adanet.ensemble.ComplexityRegularizedEnsembler` instance.
Raises:
ValueError: if :code:`warm_start_mixture_weights` is :code:`True` but
:code:`model_dir` is :code:`None`.
"""
# pyformat: enable
def __init__(self,
optimizer=None,
mixture_weight_type=MixtureWeightType.SCALAR,
mixture_weight_initializer=None,
warm_start_mixture_weights=False,
model_dir=None,
adanet_lambda=0.,
adanet_beta=0.,
use_bias=False,
name=None):
if warm_start_mixture_weights:
if model_dir is None:
raise ValueError("model_dir cannot be None when "
"warm_start_mixture_weights is True.")
self._optimizer = optimizer
self._mixture_weight_type = mixture_weight_type
self._mixture_weight_initializer = mixture_weight_initializer
self._warm_start_mixture_weights = warm_start_mixture_weights
self._model_dir = model_dir
self._adanet_lambda = adanet_lambda
self._adanet_beta = adanet_beta
self._use_bias = use_bias
self._name = name
@property
def name(self):
if self._name:
return self._name
return "complexity_regularized"
def build_ensemble(self,
subnetworks,
previous_ensemble_subnetworks,
features,
labels,
logits_dimension,
training,
iteration_step,
summary,
previous_ensemble,
previous_iteration_checkpoint=None):
del features, labels, logits_dimension, training, iteration_step # unused
weighted_subnetworks = []
subnetwork_index = 0
num_subnetworks = len(subnetworks)
if previous_ensemble_subnetworks and previous_ensemble:
num_subnetworks += len(previous_ensemble_subnetworks)
for weighted_subnetwork in previous_ensemble.weighted_subnetworks:
if weighted_subnetwork.subnetwork not in previous_ensemble_subnetworks:
# Pruned.
continue
weight_initializer = None
if self._warm_start_mixture_weights:
if isinstance(weighted_subnetwork.subnetwork.last_layer, dict):
weight_initializer = {
key: self._load_variable(weighted_subnetwork.weight[key],
previous_iteration_checkpoint)
for key in sorted(weighted_subnetwork.subnetwork.last_layer)
}
else:
weight_initializer = self._load_variable(
weighted_subnetwork.weight, previous_iteration_checkpoint)
with tf_compat.v1.variable_scope(
"weighted_subnetwork_{}".format(subnetwork_index)):
weighted_subnetworks.append(
self._build_weighted_subnetwork(
weighted_subnetwork.subnetwork,
num_subnetworks,
weight_initializer=weight_initializer))
subnetwork_index += 1
for subnetwork in subnetworks:
with tf_compat.v1.variable_scope(
"weighted_subnetwork_{}".format(subnetwork_index)):
weighted_subnetworks.append(
self._build_weighted_subnetwork(subnetwork, num_subnetworks))
subnetwork_index += 1
if previous_ensemble:
if len(
previous_ensemble.subnetworks) == len(previous_ensemble_subnetworks):
bias = self._create_bias_term(
weighted_subnetworks,
prior=previous_ensemble.bias,
previous_iteration_checkpoint=previous_iteration_checkpoint)
else:
bias = self._create_bias_term(
weighted_subnetworks,
prior=None,
previous_iteration_checkpoint=previous_iteration_checkpoint)
logging.info("Builders using a pruned set of the subnetworks "
"from the previous ensemble, so its ensemble's bias "
"term will not be warm started with the previous "
"ensemble's bias.")
else:
bias = self._create_bias_term(weighted_subnetworks)
logits = self._create_ensemble_logits(weighted_subnetworks, bias, summary)
complexity_regularization = 0
if isinstance(logits, dict):
for key in sorted(logits):
complexity_regularization += self._compute_complexity_regularization(
weighted_subnetworks, summary, key)
else:
complexity_regularization = self._compute_complexity_regularization(
weighted_subnetworks, summary)
return ComplexityRegularized(
weighted_subnetworks=weighted_subnetworks,
bias=bias,
subnetworks=[ws.subnetwork for ws in weighted_subnetworks],
logits=logits,
complexity_regularization=complexity_regularization)
def _load_variable(self, var, previous_iteration_checkpoint):
latest_checkpoint = tf.train.latest_checkpoint(self._model_dir)
status = previous_iteration_checkpoint.restore(latest_checkpoint)
try:
status.expect_partial().assert_nontrivial_match()
except AssertionError:
# Fall back to v1 checkpoint when not using v2 checkpoint.
return tf.train.load_variable(self._model_dir, tf_compat.tensor_name(var))
else:
with tf_compat.v1.Session() as sess:
status.initialize_or_restore(sess)
return sess.run(var)
def _compute_adanet_gamma(self, complexity):
"""For a subnetwork, computes: lambda * r(h) + beta."""
if self._adanet_lambda == 0.:
return self._adanet_beta
return tf.scalar_mul(self._adanet_lambda,
tf.cast(complexity,
dtype=tf.float32)) + self._adanet_beta
def _select_mixture_weight_initializer(self, num_subnetworks):
if self._mixture_weight_initializer:
return self._mixture_weight_initializer
if (self._mixture_weight_type == MixtureWeightType.SCALAR or
self._mixture_weight_type == MixtureWeightType.VECTOR):
return tf_compat.v1.constant_initializer(1. / num_subnetworks)
return tf_compat.v1.zeros_initializer()
def _build_weighted_subnetwork(self,
subnetwork,
num_subnetworks,
weight_initializer=None):
"""Builds an `adanet.ensemble.WeightedSubnetwork`.
Args:
subnetwork: The `Subnetwork` to weight.
num_subnetworks: The number of subnetworks in the ensemble.
weight_initializer: Initializer for the weight variable.
Returns:
A `WeightedSubnetwork` instance.
Raises:
ValueError: When the subnetwork's last layer and logits dimension do
not match and requiring a SCALAR or VECTOR mixture weight.
"""
if isinstance(subnetwork.last_layer, dict):
logits, weight = {}, {}
for i, key in enumerate(sorted(subnetwork.last_layer)):
logits[key], weight[key] = self._build_weighted_subnetwork_helper(
subnetwork, num_subnetworks,
_lookup_if_dict(weight_initializer, key), key, i)
else:
logits, weight = self._build_weighted_subnetwork_helper(
subnetwork, num_subnetworks, weight_initializer)
return WeightedSubnetwork(
subnetwork=subnetwork, logits=logits, weight=weight)
def _build_weighted_subnetwork_helper(self,
subnetwork,
num_subnetworks,
weight_initializer=None,
key=None,
index=None):
"""Returns the logits and weight of the `WeightedSubnetwork` for key."""
# Treat subnetworks as if their weights are frozen, and ensure that
# mixture weight gradients do not propagate through.
last_layer = _lookup_if_dict(subnetwork.last_layer, key)
logits = _lookup_if_dict(subnetwork.logits, key)
weight_shape = None
last_layer_size = last_layer.get_shape().as_list()[-1]
logits_size = logits.get_shape().as_list()[-1]
batch_size = tf.shape(input=last_layer)[0]
if weight_initializer is None:
weight_initializer = self._select_mixture_weight_initializer(
num_subnetworks)
if self._mixture_weight_type == MixtureWeightType.SCALAR:
weight_shape = []
if self._mixture_weight_type == MixtureWeightType.VECTOR:
weight_shape = [logits_size]
if self._mixture_weight_type == MixtureWeightType.MATRIX:
weight_shape = [last_layer_size, logits_size]
with tf_compat.v1.variable_scope(
"logits_{}".format(index) if index else "logits"):
weight = tf_compat.v1.get_variable(
name="mixture_weight",
shape=weight_shape,
initializer=weight_initializer)
if self._mixture_weight_type == MixtureWeightType.MATRIX:
# TODO: Add Unit tests for the ndims == 3 path.
ndims = len(last_layer.get_shape().as_list())
if ndims > 3:
raise NotImplementedError(
"Last Layer with more than 3 dimensions are not supported with "
"matrix mixture weights.")
# This is reshaping [batch_size, timesteps, emb_dim ] to
# [batch_size x timesteps, emb_dim] for matrix multiplication
# and reshaping back.
if ndims == 3:
logging.info("Rank 3 tensors like [batch_size, timesteps, d] are "
"reshaped to rank 2 [ batch_size x timesteps, d] for "
"the weight matrix multiplication, and are reshaped "
"to their original shape afterwards.")
last_layer = tf.reshape(last_layer, [-1, last_layer_size])
logits = tf.matmul(last_layer, weight)
if ndims == 3:
logits = tf.reshape(logits, [batch_size, -1, logits_size])
else:
logits = tf.multiply(logits, weight)
return logits, weight
def _create_bias_term(self,
weighted_subnetworks,
prior=None,
previous_iteration_checkpoint=None):
"""Returns a bias term vector.
If `use_bias` is set, then it returns a trainable bias variable initialized
to zero, or warm-started with the given prior. Otherwise it returns
a non-trainable zero variable.
Args:
weighted_subnetworks: List of `WeightedSubnetwork` instances that form
this ensemble. Ordered from first to most recent.
prior: Prior bias term `Tensor` of dict of string to `Tensor` (for multi-
head) for warm-starting the bias term variable.
previous_iteration_checkpoint: `tf.train.Checkpoint` for iteration t-1.
Returns:
A bias term `Tensor` or dict of string to bias term `Tensor` (for multi-
head).
"""
if not isinstance(weighted_subnetworks[0].subnetwork.logits, dict):
return self._create_bias_term_helper(weighted_subnetworks, prior,
previous_iteration_checkpoint)
bias_terms = {}
for i, key in enumerate(sorted(weighted_subnetworks[0].subnetwork.logits)):
bias_terms[key] = self._create_bias_term_helper(
weighted_subnetworks, prior, previous_iteration_checkpoint, key, i)
return bias_terms
def _create_bias_term_helper(self,
weighted_subnetworks,
prior,
previous_iteration_checkpoint,
key=None,
index=None):
"""Returns a bias term for weights with the given key."""
shape = None
if prior is None or not self._warm_start_mixture_weights:
prior = tf_compat.v1.zeros_initializer()
logits = _lookup_if_dict(weighted_subnetworks[0].subnetwork.logits, key)
dims = logits.shape.as_list()
if len(dims) == 1:
num_dims = 1
else:
assert len(dims) == 2
num_dims = dims[-1]
assert num_dims is not None
shape = num_dims
else:
prior = self._load_variable(
_lookup_if_dict(prior, key), previous_iteration_checkpoint)
return tf_compat.v1.get_variable(
name="bias_{}".format(index) if index else "bias",
shape=shape,
initializer=prior,
trainable=self._use_bias)
def _create_ensemble_logits(self, weighted_subnetworks, bias, summary):
"""Computes the AdaNet weighted ensemble logits.
Args:
weighted_subnetworks: List of `WeightedSubnetwork` instances that form
this ensemble. Ordered from first to most recent.
bias: Bias term `Tensor` or dict of string to `Tensor` (for multi-head)
for the AdaNet-weighted ensemble logits.
summary: A `_ScopedSummary` instance for recording ensemble summaries.
Returns:
A two-tuple of:
1. Ensemble logits `Tensor` or dict of string to logits `Tensor` (for
multi-head).
2. Ensemble complexity regularization
"""
if not isinstance(weighted_subnetworks[0].subnetwork.logits, dict):
return self._create_ensemble_logits_helper(weighted_subnetworks, bias,
summary)
logits_dict = weighted_subnetworks[0].subnetwork.logits
return {
key: self._create_ensemble_logits_helper(
weighted_subnetworks, bias, summary, key=key, index=i)
for i, key in enumerate(sorted(logits_dict))
}
def _create_ensemble_logits_helper(self,
weighted_subnetworks,
bias,
summary,
key=None,
index=None):
"""Returns the AdaNet ensemble logits and regularization term for key."""
subnetwork_logits = []
for weighted_subnetwork in weighted_subnetworks:
subnetwork_logits.append(_lookup_if_dict(weighted_subnetwork.logits, key))
with tf_compat.v1.variable_scope(
"logits_{}".format(index) if index else "logits"):
ensemble_logits = _lookup_if_dict(bias, key)
for logits in subnetwork_logits:
ensemble_logits = tf.add(ensemble_logits, logits)
return ensemble_logits
def _compute_complexity_regularization(self,
weighted_subnetworks,
summary,
key=None):
"""Returns the AdaNet regularization term contribution for a key."""
ensemble_complexity_regularization = 0
total_weight_l1_norms = 0
weights = []
for weighted_subnetwork in weighted_subnetworks:
weight_l1_norm = tf.norm(
tensor=_lookup_if_dict(weighted_subnetwork.weight, key), ord=1)
total_weight_l1_norms += weight_l1_norm
ensemble_complexity_regularization += (
self._compute_complexity_regularization_helper(
weight_l1_norm, weighted_subnetwork.subnetwork.complexity))
weights.append(weight_l1_norm)
with summary.current_scope():
# Append a suffix for multi head summaries.
suffix = "_{}".format(_stringify(key)) if key else ""
summary.scalar(
"complexity_regularization/adanet/adanet_weighted_ensemble" + suffix,
ensemble_complexity_regularization)
summary.histogram(
"mixture_weights/adanet/adanet_weighted_ensemble" + suffix, weights)
for iteration, weight in enumerate(weights):
scope = "adanet/adanet_weighted_ensemble/subnetwork{}_{}".format(
suffix, iteration)
summary.scalar("mixture_weight_norms/{}".format(scope), weight)
fraction = weight / total_weight_l1_norms
summary.scalar("mixture_weight_fractions/{}".format(scope), fraction)
return ensemble_complexity_regularization
def _compute_complexity_regularization_helper(self, weight_l1_norm,
complexity):
"""For a subnetwork, computes: (lambda * r(h) + beta) * |w|."""
# Note: Unsafe comparison against float zero.
if self._adanet_lambda == 0. and self._adanet_beta == 0.:
return tf.constant(0., name="zero")
return tf.scalar_mul(self._compute_adanet_gamma(complexity), weight_l1_norm)
def build_train_op(self, ensemble, loss, var_list, labels, iteration_step,
summary, previous_ensemble):
del labels, iteration_step, summary, previous_ensemble # unused
optimizer = self._optimizer
if callable(optimizer):
optimizer = optimizer()
if optimizer is None:
return tf.no_op()
# The AdaNet Estimator is responsible for incrementing the global step.
return optimizer.minimize(
loss=loss + ensemble.complexity_regularization, var_list=var_list)
| 25,487 | 40.242718 | 82 | py |
adanet | adanet-master/adanet/ensemble/strategy_test.py | """Test AdaNet single graph subnetwork implementation.
Copyright 2019 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from adanet import ensemble
from adanet import subnetwork
import mock
import tensorflow.compat.v2 as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.framework import test_util
# pylint: enable=g-direct-tensorflow-import
class StrategyTest(tf.test.TestCase):
def setUp(self):
self.fake_builder_1 = mock.create_autospec(spec=subnetwork.Builder)
self.fake_builder_2 = mock.create_autospec(spec=subnetwork.Builder)
self.fake_builder_3 = mock.create_autospec(spec=subnetwork.Builder)
self.fake_builder_4 = mock.create_autospec(spec=subnetwork.Builder)
@test_util.run_in_graph_and_eager_modes
def test_solo_strategy(self):
want = [
ensemble.Candidate("{}_solo".format(self.fake_builder_1.name),
[self.fake_builder_1], []),
ensemble.Candidate("{}_solo".format(self.fake_builder_2.name),
[self.fake_builder_2], [])
]
got = ensemble.SoloStrategy().generate_ensemble_candidates(
[self.fake_builder_1, self.fake_builder_2], None)
self.assertEqual(want, got)
@test_util.run_in_graph_and_eager_modes
def test_solo_strategy_with_previous_ensemble_subnetwork_builders(self):
want = [
ensemble.Candidate("{}_solo".format(self.fake_builder_1.name),
[self.fake_builder_1], []),
ensemble.Candidate("{}_solo".format(self.fake_builder_2.name),
[self.fake_builder_2], [])
]
got = ensemble.SoloStrategy().generate_ensemble_candidates(
[self.fake_builder_1, self.fake_builder_2],
[self.fake_builder_3, self.fake_builder_4])
self.assertEqual(want, got)
@test_util.run_in_graph_and_eager_modes
def test_grow_strategy(self):
want = [
ensemble.Candidate("{}_grow".format(self.fake_builder_1.name),
[self.fake_builder_1], []),
ensemble.Candidate("{}_grow".format(self.fake_builder_2.name),
[self.fake_builder_2], [])
]
got = ensemble.GrowStrategy().generate_ensemble_candidates(
[self.fake_builder_1, self.fake_builder_2], None)
self.assertEqual(want, got)
@test_util.run_in_graph_and_eager_modes
def test_grow_strategy_with_previous_ensemble_subnetwork_builders(self):
want = [
ensemble.Candidate("{}_grow".format(self.fake_builder_1.name),
[self.fake_builder_1],
[self.fake_builder_3, self.fake_builder_4]),
ensemble.Candidate("{}_grow".format(self.fake_builder_2.name),
[self.fake_builder_2],
[self.fake_builder_3, self.fake_builder_4])
]
got = ensemble.GrowStrategy().generate_ensemble_candidates(
[self.fake_builder_1, self.fake_builder_2],
[self.fake_builder_3, self.fake_builder_4])
self.assertEqual(want, got)
@test_util.run_in_graph_and_eager_modes
def test_all_strategy(self):
want = [
ensemble.Candidate("all", [self.fake_builder_1, self.fake_builder_2],
[])
]
got = ensemble.AllStrategy().generate_ensemble_candidates(
[self.fake_builder_1, self.fake_builder_2], None)
self.assertEqual(want, got)
@test_util.run_in_graph_and_eager_modes
def test_all_strategy_with_previous_ensemble_subnetwork_builders(self):
want = [
ensemble.Candidate("all", [self.fake_builder_1, self.fake_builder_2],
[self.fake_builder_3, self.fake_builder_4])
]
got = ensemble.AllStrategy().generate_ensemble_candidates(
[self.fake_builder_1, self.fake_builder_2],
[self.fake_builder_3, self.fake_builder_4])
self.assertEqual(want, got)
if __name__ == "__main__":
tf.test.main()
| 4,539 | 37.803419 | 77 | py |
adanet | adanet-master/adanet/ensemble/__init__.py | # Copyright 2019 The AdaNet Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines built-in ensemble methods and interfaces for custom ensembles."""
# TODO: Add more detailed documentation.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from adanet.ensemble.ensembler import Ensemble
from adanet.ensemble.ensembler import Ensembler
from adanet.ensemble.ensembler import TrainOpSpec
from adanet.ensemble.mean import MeanEnsemble
from adanet.ensemble.mean import MeanEnsembler
from adanet.ensemble.strategy import AllStrategy
from adanet.ensemble.strategy import Candidate
from adanet.ensemble.strategy import GrowStrategy
from adanet.ensemble.strategy import SoloStrategy
from adanet.ensemble.strategy import Strategy
from adanet.ensemble.weighted import ComplexityRegularized
from adanet.ensemble.weighted import ComplexityRegularizedEnsembler
from adanet.ensemble.weighted import MixtureWeightType
from adanet.ensemble.weighted import WeightedSubnetwork
__all__ = [
"Ensemble",
"Ensembler",
"TrainOpSpec",
"AllStrategy",
"Candidate",
"GrowStrategy",
"SoloStrategy",
"Strategy",
"ComplexityRegularized",
"ComplexityRegularizedEnsembler",
"MeanEnsemble",
"MeanEnsembler",
"MixtureWeightType",
"WeightedSubnetwork",
]
| 1,869 | 33.62963 | 76 | py |
adanet | adanet-master/adanet/ensemble/mean_test.py | """Test AdaNet mean ensemble and ensembler implementation.
Copyright 2019 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from adanet import ensemble
from adanet import subnetwork
from adanet import tf_compat
import numpy as np
import tensorflow.compat.v2 as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util
# pylint: enable=g-direct-tensorflow-import
class MeanTest(parameterized.TestCase, tf.test.TestCase):
def _build_subnetwork(self, multi_head=False, last_layer_dim=3):
last_layer = tf.Variable(
tf_compat.random_normal(shape=(2, last_layer_dim)),
trainable=False).read_value()
def new_logits():
return tf_compat.v1.layers.dense(
last_layer,
units=1,
kernel_initializer=tf_compat.v1.glorot_uniform_initializer())
if multi_head:
logits = {k: new_logits() for k in multi_head}
last_layer = {k: last_layer for k in multi_head}
else:
logits = new_logits()
return subnetwork.Subnetwork(
last_layer=last_layer, logits=logits, complexity=2)
@parameterized.named_parameters({
'testcase_name': 'base',
}, {
'testcase_name': 'base_with_last_layer_predictions',
'add_mean_last_layer_predictions': True
}, {
'testcase_name': 'base_with_last_layer_predictions_diff_shapes',
'add_mean_last_layer_predictions': True,
'diff_last_layer_shapes': True
}, {
'testcase_name': 'multi_head',
'multi_head': ['first_head', 'second_head'],
}, {
'testcase_name': 'multi_head_with_last_layer_predictions',
'multi_head': ['first_head', 'second_head'],
'add_mean_last_layer_predictions': True
}, {
'testcase_name': 'multi_head_with_last_layer_predictions_diff_shapes',
'multi_head': ['first_head', 'second_head'],
'add_mean_last_layer_predictions': True,
'diff_last_layer_shapes': True
})
@test_util.run_in_graph_and_eager_modes
def test_mean_ensembler(self,
multi_head=False,
add_mean_last_layer_predictions=False,
diff_last_layer_shapes=False):
with context.graph_mode():
ensembler = ensemble.MeanEnsembler(
add_mean_last_layer_predictions=add_mean_last_layer_predictions)
last_layer_dims = [3, 3]
if diff_last_layer_shapes:
last_layer_dims = [3, 5]
if multi_head:
subnetworks = [
self._build_subnetwork(
multi_head=multi_head, last_layer_dim=last_layer_dim)
for last_layer_dim in last_layer_dims
]
else:
subnetworks = [
self._build_subnetwork(last_layer_dim=last_layer_dim)
for last_layer_dim in last_layer_dims
]
if diff_last_layer_shapes:
with self.assertRaisesRegexp(
ValueError, r'Shape of \`last_layer\` tensors must be same'):
built_ensemble = ensembler.build_ensemble(
subnetworks=subnetworks,
previous_ensemble_subnetworks=None,
features=None,
labels=None,
logits_dimension=None,
training=None,
iteration_step=None,
summary=None,
previous_ensemble=None,
previous_iteration_checkpoint=None)
return
built_ensemble = ensembler.build_ensemble(
subnetworks=subnetworks,
previous_ensemble_subnetworks=None,
features=None,
labels=None,
logits_dimension=None,
training=None,
iteration_step=None,
summary=None,
previous_ensemble=None,
previous_iteration_checkpoint=None)
with self.test_session() as sess:
sess.run(tf_compat.v1.global_variables_initializer())
got_logits = sess.run(built_ensemble.logits)
if add_mean_last_layer_predictions:
got_predictions = sess.run(built_ensemble.predictions)
logits = sess.run([s.logits for s in subnetworks])
last_layer = sess.run([s.last_layer for s in subnetworks])
if not multi_head:
expected_logits = np.mean(logits, axis=0)
expected_predictions = {
ensemble.MeanEnsemble.MEAN_LAST_LAYER: np.mean(
last_layer, axis=0)
}
else:
expected_logits = {
head_name: np.mean([s[head_name] for s in logits
], axis=0) for head_name in multi_head
}
expected_predictions = {
'{}_{}'.format(ensemble.MeanEnsemble.MEAN_LAST_LAYER, head_name):
np.mean([s[head_name] for s in last_layer], axis=0)
for head_name in multi_head
}
self.assertAllClose(expected_logits, got_logits)
if add_mean_last_layer_predictions:
self.assertAllClose(expected_predictions, got_predictions)
if __name__ == '__main__':
tf.test.main()
| 5,711 | 34.042945 | 79 | py |
adanet | adanet-master/adanet/ensemble/ensembler.py | # Copyright 2019 The AdaNet Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ensembler definitions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import six
class TrainOpSpec(
collections.namedtuple("TrainOpSpec",
["train_op", "chief_hooks", "hooks"])):
"""A data structure for specifying ensembler training operations.
Args:
train_op: Op for the training step.
chief_hooks: Iterable of :class:`tf.train.SessionRunHook` objects to run on
the chief worker during training.
hooks: Iterable of :class:`tf.train.SessionRunHook` objects to run on all
workers during training.
Returns:
An :class:`adanet.ensemble.TrainOpSpec` object.
"""
def __new__(cls, train_op, chief_hooks=None, hooks=None):
# Make hooks immutable.
chief_hooks = tuple(chief_hooks) if chief_hooks else ()
hooks = tuple(hooks) if hooks else ()
return super(TrainOpSpec, cls).__new__(cls, train_op, chief_hooks, hooks)
@six.add_metaclass(abc.ABCMeta)
class Ensemble(object):
"""An abstract ensemble of subnetworks."""
@abc.abstractproperty
def logits(self):
"""Ensemble logits :class:`tf.Tensor`."""
@abc.abstractproperty
def subnetworks(self):
"""Returns an ordered :class:`Iterable` of the ensemble's subnetworks."""
@property
def predictions(self):
"""Optional dict of Ensemble predictions to be merged in EstimatorSpec.
These will be additional (over the default included by the head) predictions
which will be included in the EstimatorSpec in `predictions` and
`export_outputs` (wrapped as PredictOutput).
"""
return None
@six.add_metaclass(abc.ABCMeta)
class Ensembler(object):
"""An abstract ensembler."""
@abc.abstractproperty
def name(self):
"""This ensembler's unique string name."""
@abc.abstractmethod
def build_ensemble(self, subnetworks, previous_ensemble_subnetworks, features,
labels, logits_dimension, training, iteration_step,
summary, previous_ensemble, previous_iteration_checkpoint):
# pyformat: disable
"""Builds an ensemble of subnetworks.
Accessing the global step via :meth:`tf.train.get_or_create_global_step()`
or :meth:`tf.train.get_global_step()` within this scope will return an
incrementable iteration step since the beginning of the iteration.
Args:
subnetworks: Ordered iterable of :class:`adanet.subnetwork.Subnetwork`
instances to ensemble. Must have at least one element.
previous_ensemble_subnetworks: Ordered iterable of
:class:`adanet.subnetwork.Subnetwork` instances present in previous
ensemble to be used. The subnetworks from previous_ensemble not
included in this list should be pruned. Can be set to None or empty.
features: Input :code:`dict` of :class:`tf.Tensor` objects.
labels: Labels :class:`tf.Tensor` or a dictionary of string label name to
:class:`tf.Tensor` (for multi-head). Can be :code:`None`.
logits_dimension: Size of the last dimension of the logits
:class:`tf.Tensor`. Typically, logits have for shape `[batch_size,
logits_dimension]`.
training: A python boolean indicating whether the graph is in training
mode or prediction mode.
iteration_step: Integer :class:`tf.Tensor` representing the step since the
beginning of the current iteration, as opposed to the global step.
summary: An :class:`adanet.Summary` for scoping summaries to individual
ensembles in Tensorboard. Using :meth:`tf.summary` within this scope
will use this :class:`adanet.Summary` under the hood.
previous_ensemble: The best :class:`adanet.Ensemble` from iteration *t-1*.
The created subnetwork will extend the previous ensemble to form the
:class:`adanet.Ensemble` at iteration *t*.
previous_iteration_checkpoint: The `tf.train.Checkpoint` object associated
with the previous iteration.
Returns:
An :class:`adanet.ensemble.Ensemble` subclass instance.
"""
# pyformat: enable
@abc.abstractmethod
def build_train_op(self, ensemble, loss, var_list, labels, iteration_step,
summary, previous_ensemble):
# pyformat: disable
"""Returns an op for training an ensemble.
Accessing the global step via :meth:`tf.train.get_or_create_global_step`
or :meth:`tf.train.get_global_step` within this scope will return an
incrementable iteration step since the beginning of the iteration.
Args:
ensemble: The :class:`adanet.ensemble.Ensemble` subclass instance returned
by this instance's :meth:`build_ensemble`.
loss: A :class:`tf.Tensor` containing the ensemble's loss to minimize.
var_list: List of ensemble :class:`tf.Variable` parameters to update as
part of the training operation.
labels: Labels :class:`tf.Tensor` or a dictionary of string label name to
:class:`tf.Tensor` (for multi-head).
iteration_step: Integer :class:`tf.Tensor` representing the step since the
beginning of the current iteration, as opposed to the global step.
summary: An :class:`adanet.Summary` for scoping summaries to individual
ensembles in Tensorboard. Using :code:`tf.summary` within this scope
will use this :class:`adanet.Summary` under the hood.
previous_ensemble: The best :class:`adanet.ensemble.Ensemble` from the
previous iteration.
Returns:
Either a train op or an :class:`adanet.ensemble.TrainOpSpec`.
"""
# pyformat: enable
| 6,215 | 40.165563 | 80 | py |
adanet | adanet-master/adanet/experimental/__init__.py | # Lint as: python3
# Copyright 2020 The AdaNet Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AdaNet experimental directory."""
from adanet.experimental import controllers
from adanet.experimental import keras
from adanet.experimental import phases
from adanet.experimental import schedulers
from adanet.experimental import storages
from adanet.experimental import work_units
__all__ = [
"controllers",
"keras",
"phases",
"schedulers",
"storages",
"work_units",
]
| 1,021 | 29.969697 | 74 | py |
adanet | adanet-master/adanet/experimental/storages/storage.py | # Lint as: python3
# Copyright 2019 The AdaNet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A storage for persisting results and managing stage."""
import abc
from typing import Iterable, List
import tensorflow.compat.v2 as tf
class ModelContainer:
"""A container for a model and its metadata."""
def __init__(self, score: float, model: tf.keras.Model, metrics: List[float]):
self.score = score
self.model = model
self.metrics = metrics
def __eq__(self, other: 'ModelContainer'):
return self.score == other.score
def __lt__(self, other: 'ModelContainer'):
return self.score < other.score
class Storage(abc.ABC):
"""A storage for persisting results and managing state."""
@abc.abstractmethod
def save_model(self, model_container: ModelContainer):
"""Stores a model and its metadata."""
# TODO: How do we enforce that save_model is called only once per
# model?
pass
@abc.abstractmethod
def get_models(self) -> Iterable[tf.keras.Model]:
"""Returns all stored models."""
pass
@abc.abstractmethod
def get_best_models(self, num_models: int = 1) -> Iterable[tf.keras.Model]:
"""Returns the top `num_models` stored models in descending order."""
pass
@abc.abstractmethod
def get_model_metrics(self) -> Iterable[Iterable[float]]:
"""Returns the metrics for all stored models."""
pass
| 1,910 | 29.822581 | 80 | py |
adanet | adanet-master/adanet/experimental/storages/__init__.py | # Lint as: python3
# Copyright 2020 The AdaNet Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AdaNet ModelFlow storages."""
from adanet.experimental.storages.in_memory_storage import InMemoryStorage
__all__ = [
"InMemoryStorage",
]
| 769 | 32.478261 | 74 | py |
adanet | adanet-master/adanet/experimental/storages/in_memory_storage.py | # Lint as: python3
# Copyright 2019 The AdaNet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A storage for persisting results and managing stage."""
import heapq
from typing import List
from adanet.experimental.storages.storage import ModelContainer
from adanet.experimental.storages.storage import Storage
import tensorflow.compat.v2 as tf
class InMemoryStorage(Storage):
"""In memory storage for testing-only.
Uses a priority queue under the hood to sort the models according to their
score.
Currently the only supported score is 'loss'.
"""
def __init__(self):
self._model_containers = []
def save_model(self, model_container: ModelContainer):
"""Stores a model.
Args:
model_container: A `ModelContainer` instance.
"""
# We use a counter since heappush will compare on the second item in the
# tuple in the case of a tie in the first item comparison. This is for the
# off chance that two models have the same loss.
heapq.heappush(self._model_containers, model_container)
def get_models(self) -> List[tf.keras.Model]:
"""Returns all stored models."""
return [c.model for c in self._model_containers]
def get_best_models(self, num_models: int = 1) -> List[tf.keras.Model]:
"""Returns the top `num_models` stored models in descending order."""
return [c.model
for c in heapq.nsmallest(num_models, self._model_containers)]
def get_model_metrics(self) -> List[List[float]]:
"""Returns the metrics for all stored models."""
return [c.metrics for c in self._model_containers]
| 2,113 | 34.233333 | 78 | py |
adanet | adanet-master/adanet/experimental/work_units/keras_tuner_work_unit.py | # Lint as: python3
# Copyright 2019 The AdaNet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A work unit for training, evaluating, and saving a Keras model."""
import os
import time
from adanet.experimental.work_units import work_unit
from kerastuner.engine.tuner import Tuner
import tensorflow.compat.v2 as tf
class KerasTunerWorkUnit(work_unit.WorkUnit):
"""Trains, evaluates and saves a tuned Keras model."""
def __init__(self, tuner: Tuner, *search_args, **search_kwargs):
self._tuner = tuner
self._search_args = search_args
self._search_kwargs = search_kwargs
# TODO: Allow better customization of TensorBoard log_dir.
def execute(self):
log_dir = os.path.join('/tmp', str(int(time.time())))
tensorboard = tf.keras.callbacks.TensorBoard(log_dir=log_dir,
update_freq='batch')
# We don't need to eval and store, because the Tuner does it for us.
self._tuner.search(callbacks=[tensorboard], *self._search_args,
**self._search_kwargs)
| 1,584 | 37.658537 | 74 | py |
adanet | adanet-master/adanet/experimental/work_units/work_unit.py | # Lint as: python3
# Copyright 2019 The AdaNet Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A work unit for an AdaNet scheduler."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
class WorkUnit(abc.ABC):
@abc.abstractproperty
def execute(self):
pass
| 868 | 28.965517 | 74 | py |
adanet | adanet-master/adanet/experimental/work_units/keras_trainer_work_unit.py | # Lint as: python3
# Copyright 2019 The AdaNet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A work unit for training, evaluating, and saving a Keras model."""
import os
import time
from adanet.experimental.storages.storage import ModelContainer
from adanet.experimental.storages.storage import Storage
from adanet.experimental.work_units import work_unit
import tensorflow.compat.v2 as tf
class KerasTrainerWorkUnit(work_unit.WorkUnit):
"""Trains, evaluates, and saves a Keras model."""
def __init__(self, model: tf.keras.Model,
train_dataset: tf.data.Dataset,
eval_dataset: tf.data.Dataset,
storage: Storage,
tensorboard_base_dir: str = '/tmp'):
self._model = model
self._train_dataset = train_dataset
self._eval_dataset = eval_dataset
self._storage = storage
self._tensorboard_base_dir = tensorboard_base_dir
# TODO: Allow better customization of TensorBoard log_dir.
def execute(self):
log_dir = os.path.join(self._tensorboard_base_dir, str(int(time.time())))
tensorboard = tf.keras.callbacks.TensorBoard(log_dir=log_dir,
update_freq='batch')
if self._model.trainable:
self._model.fit(self._train_dataset, callbacks=[tensorboard])
else:
print('Skipping training since model.trainable set to false.')
results = self._model.evaluate(self._eval_dataset, callbacks=[tensorboard])
# If the model was compiled with metrics, the results is a list of loss +
# metric values. If the model was compiled without metrics, it is a loss
# scalar.
if not isinstance(results, list):
results = [results]
self._storage.save_model(ModelContainer(results[0], self._model, results))
| 2,301 | 40.107143 | 79 | py |
adanet | adanet-master/adanet/experimental/work_units/__init__.py | # Lint as: python3
# Copyright 2020 The AdaNet Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AdaNet ModelFlow work units."""
from adanet.experimental.work_units.keras_trainer_work_unit import KerasTrainerWorkUnit
from adanet.experimental.work_units.keras_tuner_work_unit import KerasTunerWorkUnit
__all__ = [
"KerasTrainerWorkUnit",
"KerasTunerWorkUnit",
]
| 899 | 35 | 87 | py |
adanet | adanet-master/adanet/experimental/phases/autoensemble_phase.py | # Lint as: python3
# Copyright 2020 The AdaNet Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A phase that automatically ensembles models."""
import abc
import random
from typing import Iterable, Iterator, List
from adanet.experimental.keras.ensemble_model import EnsembleModel
from adanet.experimental.keras.ensemble_model import MeanEnsemble
from adanet.experimental.phases.phase import DatasetProvider
from adanet.experimental.phases.phase import ModelProvider
from adanet.experimental.storages.in_memory_storage import InMemoryStorage
from adanet.experimental.storages.storage import Storage
from adanet.experimental.work_units.keras_trainer_work_unit import KerasTrainerWorkUnit
from adanet.experimental.work_units.work_unit import WorkUnit
import tensorflow.compat.v2 as tf
class EnsembleStrategy(abc.ABC):
"""An abstract ensemble strategy."""
@abc.abstractmethod
def __call__(
self, candidates: List[tf.keras.Model]) -> Iterable[List[tf.keras.Model]]:
pass
class Ensembler(abc.ABC):
"""An abstract ensembler."""
def __init__(self, loss, optimizer, metrics):
self._loss = loss
self._optimizer = optimizer
self._metrics = metrics
@abc.abstractmethod
def __call__(self, submodels: List[tf.keras.Model]) -> EnsembleModel:
pass
class MeanEnsembler(Ensembler):
"""An ensembler that averages the weights of submodel outputs."""
def __init__(self, loss, optimizer, metrics, freeze_submodels=True):
super().__init__(loss, optimizer, metrics)
self._freeze_submodels = freeze_submodels
def __call__(self, submodels: List[tf.keras.Model]) -> EnsembleModel:
ensemble = MeanEnsemble(submodels, freeze_submodels=self._freeze_submodels)
if self._freeze_submodels:
for layer in ensemble.layers:
layer.trainable = False
# Compile SGD with learning rate set to 0 for no weight updates.
ensemble.compile(
loss=self._loss, optimizer=tf.keras.optimizers.SGD(0),
metrics=self._metrics)
return ensemble
class GrowStrategy(EnsembleStrategy):
"""An ensemble strategy that adds one candidate to the ensemble at a time."""
def __call__(
self, candidates: List[tf.keras.Model]) -> Iterable[List[tf.keras.Model]]:
return [[candidate] for candidate in candidates]
class AllStrategy(EnsembleStrategy):
"""An ensemble strategy that adds all candidates to the ensemble."""
def __call__(
self, candidates: List[tf.keras.Model]) -> Iterable[List[tf.keras.Model]]:
return [candidates]
class RandomKStrategy(EnsembleStrategy):
"""An ensemble strategy that adds k random candidates (with replacement)."""
def __init__(self, k, seed=None):
"""Initializes a RandomKStrategy ensemble strategy.
Args:
k: Number of candidates to sample.
seed: Random seed.
"""
self._k = k
self._seed = seed
def __call__(
self, candidates: List[tf.keras.Model]) -> Iterable[List[tf.keras.Model]]:
if self._seed:
random_state = random.getstate()
random.seed(self._seed)
candidates = [random.choices(candidates, k=self._k)]
random_state = random.setstate(random_state)
else:
candidates = [random.choices(candidates, k=self._k)]
return [candidates]
class AutoEnsemblePhase(DatasetProvider, ModelProvider):
"""A phase that automatically ensembles models from a prior phase."""
def __init__(self,
ensemblers: List[Ensembler],
ensemble_strategies: List[EnsembleStrategy],
storage: Storage = InMemoryStorage(),
num_candidates: int = None):
"""Initializes an AutoEnsemblePhase.
Args:
ensemblers: A list of `Ensembler` instances to determine how to combine
subnetworks.
ensemble_strategies: A list of `EnsembleStrategy` instances to determine
which subnetworks compose an ensemble.
storage: A `Storage` instance to store models and model metadata.
num_candidates: The number of subnetwork candidates to consider from the
previous phase. If `None` then all of the subnetworks generated in the
previous phase will be considered.
"""
super().__init__(storage)
self._ensemblers = ensemblers
self._ensemble_strategies = ensemble_strategies
self._num_candidates = num_candidates
def work_units(self, previous_phase) -> Iterator[WorkUnit]:
self._train_dataset = previous_phase.get_train_dataset()
self._eval_dataset = previous_phase.get_eval_dataset()
if self._num_candidates:
candidates = previous_phase.get_best_models(
num_models=self._num_candidates)
else:
candidates = previous_phase.get_models()
if self.get_best_models():
current_best_ensemble = list(self.get_best_models())[0]
else:
current_best_ensemble = None
for ensemble_strategy in self._ensemble_strategies:
for submodels in ensemble_strategy(candidates):
for ensembler in self._ensemblers:
if current_best_ensemble:
previous_ensemble = current_best_ensemble.submodels
else:
previous_ensemble = []
ensemble = ensembler(previous_ensemble + submodels)
yield KerasTrainerWorkUnit(ensemble,
previous_phase.get_train_dataset(),
previous_phase.get_eval_dataset(),
self._storage)
def get_models(self) -> Iterable[tf.keras.Model]:
return self._storage.get_models()
def get_best_models(self, num_models=1) -> Iterable[tf.keras.Model]:
return self._storage.get_best_models(num_models)
# TODO: Add some way to check that work_units has to be called
# before accessing these methods.
def get_train_dataset(self) -> tf.data.Dataset:
return self._train_dataset
def get_eval_dataset(self) -> tf.data.Dataset:
return self._eval_dataset
| 6,425 | 34.899441 | 87 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.