filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_23489 | """Access to Python's configuration information."""
import os
import sys
from os.path import pardir, realpath
__all__ = [
'get_config_h_filename',
'get_config_var',
'get_config_vars',
'get_makefile_filename',
'get_path',
'get_path_names',
'get_paths',
'get_platform',
'get_python_version',
'get_scheme_names',
'parse_config_h',
]
_INSTALL_SCHEMES = {
'posix_prefix': {
'stdlib': '{installed_base}/lib/python{py_version_short}',
'platstdlib': '{platbase}/lib/python{py_version_short}',
'purelib': '{base}/lib/python{py_version_short}/site-packages',
'platlib': '{platbase}/lib/python{py_version_short}/site-packages',
'include':
'{installed_base}/include/python{py_version_short}{abiflags}',
'platinclude':
'{installed_platbase}/include/python{py_version_short}{abiflags}',
'scripts': '{base}/bin',
'data': '{base}',
},
'posix_home': {
'stdlib': '{installed_base}/lib/python',
'platstdlib': '{base}/lib/python',
'purelib': '{base}/lib/python',
'platlib': '{base}/lib/python',
'include': '{installed_base}/include/python',
'platinclude': '{installed_base}/include/python',
'scripts': '{base}/bin',
'data': '{base}',
},
'nt': {
'stdlib': '{installed_base}/Lib',
'platstdlib': '{base}/Lib',
'purelib': '{base}/Lib/site-packages',
'platlib': '{base}/Lib/site-packages',
'include': '{installed_base}/Include',
'platinclude': '{installed_base}/Include',
'scripts': '{base}/Scripts',
'data': '{base}',
},
# NOTE: When modifying "purelib" scheme, update site._get_path() too.
'nt_user': {
'stdlib': '{userbase}/Python{py_version_nodot}',
'platstdlib': '{userbase}/Python{py_version_nodot}',
'purelib': '{userbase}/Python{py_version_nodot}/site-packages',
'platlib': '{userbase}/Python{py_version_nodot}/site-packages',
'include': '{userbase}/Python{py_version_nodot}/Include',
'scripts': '{userbase}/Python{py_version_nodot}/Scripts',
'data': '{userbase}',
},
'posix_user': {
'stdlib': '{userbase}/lib/python{py_version_short}',
'platstdlib': '{userbase}/lib/python{py_version_short}',
'purelib': '{userbase}/lib/python{py_version_short}/site-packages',
'platlib': '{userbase}/lib/python{py_version_short}/site-packages',
'include': '{userbase}/include/python{py_version_short}',
'scripts': '{userbase}/bin',
'data': '{userbase}',
},
'osx_framework_user': {
'stdlib': '{userbase}/lib/python',
'platstdlib': '{userbase}/lib/python',
'purelib': '{userbase}/lib/python/site-packages',
'platlib': '{userbase}/lib/python/site-packages',
'include': '{userbase}/include',
'scripts': '{userbase}/bin',
'data': '{userbase}',
},
'riscos': {
'stdlib': 'Python3:Python{py_version_nodot}.Lib',
'platstdlib': 'Python3:.Python{py_version_nodot}.Lib',
'purelib': 'PythonSite:Python{py_version_nodot}.Site-Packages',
'platlib': 'PythonSite:Python{py_version_nodot}.Site-Packages',
'include': '<Python3:Python{py_version_nodot}.Include',
'scripts': '<Python3$Dir>.Scripts',
'data': '<Python3$Dir>.Data',
},
}
_SCHEME_KEYS = ('stdlib', 'platstdlib', 'purelib', 'platlib', 'include',
'scripts', 'data')
# FIXME don't rely on sys.version here, its format is an implementation detail
# of CPython, use sys.version_info or sys.hexversion
_PY_VERSION = sys.version.split()[0]
_PY_VERSION_SHORT = '%d.%d' % sys.version_info[:2]
_PY_VERSION_SHORT_NO_DOT = '%d%d' % sys.version_info[:2]
_PREFIX = os.path.normpath(sys.prefix)
_BASE_PREFIX = os.path.normpath(sys.base_prefix)
_EXEC_PREFIX = os.path.normpath(sys.exec_prefix)
_BASE_EXEC_PREFIX = os.path.normpath(sys.base_exec_prefix)
_CONFIG_VARS = None
_USER_BASE = None
def _safe_realpath(path):
try:
return realpath(path)
except OSError:
return path
if sys.executable:
_PROJECT_BASE = os.path.dirname(_safe_realpath(sys.executable))
else:
# sys.executable can be empty if argv[0] has been changed and Python is
# unable to retrieve the real program name
_PROJECT_BASE = _safe_realpath(os.getcwd())
if (os.name == 'nt' and
_PROJECT_BASE.lower().endswith(('\\pcbuild\\win32', '\\pcbuild\\amd64'))):
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir))
# set for cross builds
if "_PYTHON_PROJECT_BASE" in os.environ:
_PROJECT_BASE = _safe_realpath(os.environ["_PYTHON_PROJECT_BASE"])
def _is_python_source_dir(d):
for fn in ("Setup", "Setup.local"):
if os.path.isfile(os.path.join(d, "Modules", fn)):
return True
return False
_sys_home = getattr(sys, '_home', None)
if os.name == 'nt':
def _fix_pcbuild(d):
if d and os.path.normcase(d).startswith(
os.path.normcase(os.path.join(_PREFIX, "PCbuild"))):
return _PREFIX
return d
_PROJECT_BASE = _fix_pcbuild(_PROJECT_BASE)
_sys_home = _fix_pcbuild(_sys_home)
def is_python_build(check_home=False):
if check_home and _sys_home:
return _is_python_source_dir(_sys_home)
return _is_python_source_dir(_PROJECT_BASE)
_PYTHON_BUILD = is_python_build(True)
if _PYTHON_BUILD:
for scheme in ('posix_prefix', 'posix_home'):
_INSTALL_SCHEMES[scheme]['include'] = '{srcdir}/Include'
_INSTALL_SCHEMES[scheme]['platinclude'] = '{projectbase}/.'
def _subst_vars(s, local_vars):
try:
return s.format(**local_vars)
except KeyError:
try:
return s.format(**os.environ)
except KeyError as var:
raise AttributeError('{%s}' % var) from None
def _extend_dict(target_dict, other_dict):
target_keys = target_dict.keys()
for key, value in other_dict.items():
if key in target_keys:
continue
target_dict[key] = value
def _expand_vars(scheme, vars):
res = {}
if vars is None:
vars = {}
_extend_dict(vars, get_config_vars())
for key, value in _INSTALL_SCHEMES[scheme].items():
if os.name in ('posix', 'nt'):
value = os.path.expanduser(value)
res[key] = os.path.normpath(_subst_vars(value, vars))
return res
def _get_default_scheme():
if os.name == 'posix':
# the default scheme for posix is posix_prefix
return 'posix_prefix'
return os.name
# NOTE: site.py has copy of this function.
# Sync it when modify this function.
def _getuserbase():
env_base = os.environ.get("PYTHONUSERBASE", None)
if env_base:
return env_base
def joinuser(*args):
return os.path.expanduser(os.path.join(*args))
if os.name == "nt":
base = os.environ.get("APPDATA") or "~"
return joinuser(base, "Python")
if sys.platform == "darwin" and sys._framework:
return joinuser("~", "Library", sys._framework,
"%d.%d" % sys.version_info[:2])
return joinuser("~", ".local")
def _parse_makefile(filename, vars=None):
"""Parse a Makefile-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
# Regexes needed for parsing Makefile (and similar syntaxes,
# like old-style Setup files).
import re
_variable_rx = re.compile(r"([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)")
_findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)")
_findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}")
if vars is None:
vars = {}
done = {}
notdone = {}
with open(filename, errors="surrogateescape") as f:
lines = f.readlines()
for line in lines:
if line.startswith('#') or line.strip() == '':
continue
m = _variable_rx.match(line)
if m:
n, v = m.group(1, 2)
v = v.strip()
# `$$' is a literal `$' in make
tmpv = v.replace('$$', '')
if "$" in tmpv:
notdone[n] = v
else:
try:
v = int(v)
except ValueError:
# insert literal `$'
done[n] = v.replace('$$', '$')
else:
done[n] = v
# do variable interpolation here
variables = list(notdone.keys())
# Variables with a 'PY_' prefix in the makefile. These need to
# be made available without that prefix through sysconfig.
# Special care is needed to ensure that variable expansion works, even
# if the expansion uses the name without a prefix.
renamed_variables = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS')
while len(variables) > 0:
for name in tuple(variables):
value = notdone[name]
m1 = _findvar1_rx.search(value)
m2 = _findvar2_rx.search(value)
if m1 and m2:
m = m1 if m1.start() < m2.start() else m2
else:
m = m1 if m1 else m2
if m is not None:
n = m.group(1)
found = True
if n in done:
item = str(done[n])
elif n in notdone:
# get it on a subsequent round
found = False
elif n in os.environ:
# do it like make: fall back to environment
item = os.environ[n]
elif n in renamed_variables:
if (name.startswith('PY_') and
name[3:] in renamed_variables):
item = ""
elif 'PY_' + n in notdone:
found = False
else:
item = str(done['PY_' + n])
else:
done[n] = item = ""
if found:
after = value[m.end():]
value = value[:m.start()] + item + after
if "$" in after:
notdone[name] = value
else:
try:
value = int(value)
except ValueError:
done[name] = value.strip()
else:
done[name] = value
variables.remove(name)
if name.startswith('PY_') \
and name[3:] in renamed_variables:
name = name[3:]
if name not in done:
done[name] = value
else:
# bogus variable reference (e.g. "prefix=$/opt/python");
# just drop it since we can't deal
done[name] = value
variables.remove(name)
# strip spurious spaces
for k, v in done.items():
if isinstance(v, str):
done[k] = v.strip()
# save the results in the global dictionary
vars.update(done)
return vars
def get_makefile_filename():
"""Return the path of the Makefile."""
if os.name == 'riscos':
return os.path.join(_sys_home or _PROJECT_BASE, "RISCOS", "Makefile")
if _PYTHON_BUILD:
return os.path.join(_sys_home or _PROJECT_BASE, "Makefile")
if hasattr(sys, 'abiflags'):
config_dir_name = 'config-%s%s' % (_PY_VERSION_SHORT, sys.abiflags)
else:
config_dir_name = 'config'
if hasattr(sys.implementation, '_multiarch'):
config_dir_name += '-%s' % sys.implementation._multiarch
return os.path.join(get_path('stdlib'), config_dir_name, 'Makefile')
def _get_sysconfigdata_name():
return os.environ.get('_PYTHON_SYSCONFIGDATA_NAME',
'_sysconfigdata_{abi}_{platform}_{multiarch}'.format(
abi=sys.abiflags,
platform=sys.platform,
multiarch=getattr(sys.implementation, '_multiarch', ''),
))
def _generate_posix_vars():
"""Generate the Python module containing build-time variables."""
import pprint
vars = {}
# load the installed Makefile:
makefile = get_makefile_filename()
try:
_parse_makefile(makefile, vars)
except OSError as e:
msg = "invalid Python installation: unable to open %s" % makefile
if hasattr(e, "strerror"):
msg = msg + " (%s)" % e.strerror
raise OSError(msg)
# load the installed pyconfig.h:
config_h = get_config_h_filename()
try:
with open(config_h) as f:
parse_config_h(f, vars)
except OSError as e:
msg = "invalid Python installation: unable to open %s" % config_h
if hasattr(e, "strerror"):
msg = msg + " (%s)" % e.strerror
raise OSError(msg)
# On AIX, there are wrong paths to the linker scripts in the Makefile
# -- these paths are relative to the Python source, but when installed
# the scripts are in another directory.
if _PYTHON_BUILD:
vars['BLDSHARED'] = vars['LDSHARED']
# There's a chicken-and-egg situation on OS X with regards to the
# _sysconfigdata module after the changes introduced by #15298:
# get_config_vars() is called by get_platform() as part of the
# `make pybuilddir.txt` target -- which is a precursor to the
# _sysconfigdata.py module being constructed. Unfortunately,
# get_config_vars() eventually calls _init_posix(), which attempts
# to import _sysconfigdata, which we won't have built yet. In order
# for _init_posix() to work, if we're on Darwin, just mock up the
# _sysconfigdata module manually and populate it with the build vars.
# This is more than sufficient for ensuring the subsequent call to
# get_platform() succeeds.
name = _get_sysconfigdata_name()
if 'darwin' in sys.platform:
import types
module = types.ModuleType(name)
module.build_time_vars = vars
sys.modules[name] = module
if sys.platform == 'riscos':
pybuilddir = 'build.lib_%s-%s' % (get_platform(), _PY_VERSION_SHORT_NO_DOT)
builddir = 'pybuilddir/txt'
else:
pybuilddir = 'build/lib.%s-%s' % (get_platform(), _PY_VERSION_SHORT)
builddir = 'pybuilddir.txt'
if hasattr(sys, "gettotalrefcount"):
pybuilddir += '-pydebug'
os.makedirs(pybuilddir, exist_ok=True)
if sys.platform == 'riscos':
# Add /py on so it will import without type
destfile = os.path.join(pybuilddir, name+'/py')
with open(destfile, 'w', encoding='utf8') as f:
f.write('# system configuration generated and used by'
' the sysconfig module\n')
f.write('build_time_vars = ')
pprint.pprint(vars, stream=f)
# Create file used for sys.path fixup -- see Modules/getpath.c
with open(builddir, 'w', encoding='utf8') as f:
f.write(pybuilddir)
def _init_posix(vars):
"""Initialize the module as appropriate for POSIX systems."""
# _sysconfigdata is generated at build time, see _generate_posix_vars()
name = _get_sysconfigdata_name()
_temp = __import__(name, globals(), locals(), ['build_time_vars'], 0)
build_time_vars = _temp.build_time_vars
vars.update(build_time_vars)
def _init_non_posix(vars):
"""Initialize the module as appropriate for NT"""
# set basic install directories
vars['LIBDEST'] = get_path('stdlib')
vars['BINLIBDEST'] = get_path('platstdlib')
vars['INCLUDEPY'] = get_path('include')
vars['EXT_SUFFIX'] = '.pyd'
vars['EXE'] = '.exe'
vars['VERSION'] = _PY_VERSION_SHORT_NO_DOT
vars['BINDIR'] = os.path.dirname(_safe_realpath(sys.executable))
def _init_riscos(vars):
"""Initialize the module as appropriate for RISCOS"""
_init_posix(vars)
# set basic install directories
#vars['LIBDEST'] = get_path('stdlib')
#vars['BINLIBDEST'] = get_path('platstdlib')
#vars['INCLUDEPY'] = get_path('include')
vars['EXT_SUFFIX'] = ''
vars['EXE'] = ''
vars['VERSION'] = _PY_VERSION_SHORT_NO_DOT
#vars['BINDIR'] = os.path.dirname(_safe_realpath(sys.executable))
#
# public APIs
#
def parse_config_h(fp, vars=None):
"""Parse a config.h-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
if vars is None:
vars = {}
import re
define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n")
undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n")
while True:
line = fp.readline()
if not line:
break
m = define_rx.match(line)
if m:
n, v = m.group(1, 2)
try:
v = int(v)
except ValueError:
pass
vars[n] = v
else:
m = undef_rx.match(line)
if m:
vars[m.group(1)] = 0
return vars
def get_config_h_filename():
"""Return the path of pyconfig.h."""
if _PYTHON_BUILD:
if os.name == "nt":
inc_dir = os.path.join(_sys_home or _PROJECT_BASE, "PC")
elif os.name == "riscos":
return os.path.join(_sys_home or _PROJECT_BASE, "RISCOS", "h", "pyconfig")
else:
inc_dir = _sys_home or _PROJECT_BASE
else:
inc_dir = get_path('platinclude')
return os.path.join(inc_dir, 'pyconfig.h')
def get_scheme_names():
"""Return a tuple containing the schemes names."""
return tuple(sorted(_INSTALL_SCHEMES))
def get_path_names():
"""Return a tuple containing the paths names."""
return _SCHEME_KEYS
def get_paths(scheme=_get_default_scheme(), vars=None, expand=True):
"""Return a mapping containing an install scheme.
``scheme`` is the install scheme name. If not provided, it will
return the default scheme for the current platform.
"""
if expand:
return _expand_vars(scheme, vars)
else:
return _INSTALL_SCHEMES[scheme]
def get_path(name, scheme=_get_default_scheme(), vars=None, expand=True):
"""Return a path corresponding to the scheme.
``scheme`` is the install scheme name.
"""
return get_paths(scheme, vars, expand)[name]
def get_config_vars(*args):
"""With no arguments, return a dictionary of all configuration
variables relevant for the current platform.
On Unix, this means every variable defined in Python's installed Makefile;
On Windows it's a much smaller set.
With arguments, return a list of values that result from looking up
each argument in the configuration variable dictionary.
"""
global _CONFIG_VARS
if _CONFIG_VARS is None:
_CONFIG_VARS = {}
# Normalized versions of prefix and exec_prefix are handy to have;
# in fact, these are the standard versions used most places in the
# Distutils.
_CONFIG_VARS['prefix'] = _PREFIX
_CONFIG_VARS['exec_prefix'] = _EXEC_PREFIX
_CONFIG_VARS['py_version'] = _PY_VERSION
_CONFIG_VARS['py_version_short'] = _PY_VERSION_SHORT
_CONFIG_VARS['py_version_nodot'] = _PY_VERSION_SHORT_NO_DOT
_CONFIG_VARS['installed_base'] = _BASE_PREFIX
_CONFIG_VARS['base'] = _PREFIX
_CONFIG_VARS['installed_platbase'] = _BASE_EXEC_PREFIX
_CONFIG_VARS['platbase'] = _EXEC_PREFIX
_CONFIG_VARS['projectbase'] = _PROJECT_BASE
try:
_CONFIG_VARS['abiflags'] = sys.abiflags
except AttributeError:
# sys.abiflags may not be defined on all platforms.
_CONFIG_VARS['abiflags'] = ''
if os.name == 'nt':
_init_non_posix(_CONFIG_VARS)
if os.name == 'posix':
_init_posix(_CONFIG_VARS)
if os.name == 'riscos':
_init_riscos(_CONFIG_VARS)
# For backward compatibility, see issue19555
SO = _CONFIG_VARS.get('EXT_SUFFIX')
if SO is not None:
_CONFIG_VARS['SO'] = SO
# Setting 'userbase' is done below the call to the
# init function to enable using 'get_config_var' in
# the init-function.
_CONFIG_VARS['userbase'] = _getuserbase()
# Always convert srcdir to an absolute path
srcdir = _CONFIG_VARS.get('srcdir', _PROJECT_BASE)
if os.name == 'posix':
if _PYTHON_BUILD:
# If srcdir is a relative path (typically '.' or '..')
# then it should be interpreted relative to the directory
# containing Makefile.
base = os.path.dirname(get_makefile_filename())
srcdir = os.path.join(base, srcdir)
else:
# srcdir is not meaningful since the installation is
# spread about the filesystem. We choose the
# directory containing the Makefile since we know it
# exists.
srcdir = os.path.dirname(get_makefile_filename())
_CONFIG_VARS['srcdir'] = _safe_realpath(srcdir)
# OS X platforms require special customization to handle
# multi-architecture, multi-os-version installers
if sys.platform == 'darwin':
import _osx_support
_osx_support.customize_config_vars(_CONFIG_VARS)
if args:
vals = []
for name in args:
vals.append(_CONFIG_VARS.get(name))
return vals
else:
return _CONFIG_VARS
def get_config_var(name):
"""Return the value of a single variable using the dictionary returned by
'get_config_vars()'.
Equivalent to get_config_vars().get(name)
"""
if name == 'SO':
import warnings
warnings.warn('SO is deprecated, use EXT_SUFFIX', DeprecationWarning, 2)
return get_config_vars().get(name)
def get_platform():
"""Return a string that identifies the current platform.
This is used mainly to distinguish platform-specific build directories and
platform-specific built distributions. Typically includes the OS name and
version and the architecture (as supplied by 'os.uname()'), although the
exact information included depends on the OS; on Linux, the kernel version
isn't particularly important.
Examples of returned values:
linux-i586
linux-alpha (?)
solaris-2.6-sun4u
Windows will return one of:
win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc)
win32 (all others - specifically, sys.platform is returned)
For other non-POSIX platforms, currently just returns 'sys.platform'.
"""
if os.name == 'nt':
if 'amd64' in sys.version.lower():
return 'win-amd64'
if '(arm)' in sys.version.lower():
return 'win-arm32'
if '(arm64)' in sys.version.lower():
return 'win-arm64'
return sys.platform
if os.name != "posix" or not hasattr(os, 'uname'):
# XXX what about the architecture? NT is Intel or Alpha
return sys.platform
# Set for cross builds explicitly
if "_PYTHON_HOST_PLATFORM" in os.environ:
return os.environ["_PYTHON_HOST_PLATFORM"]
# Try to distinguish various flavours of Unix
osname, host, release, version, machine = os.uname()
# Convert the OS name to lowercase, remove '/' characters, and translate
# spaces (for "Power Macintosh")
osname = osname.lower().replace('/', '')
machine = machine.replace(' ', '_')
machine = machine.replace('/', '-')
if osname[:5] == "linux":
# At least on Linux/Intel, 'machine' is the processor --
# i386, etc.
# XXX what about Alpha, SPARC, etc?
return "%s-%s" % (osname, machine)
elif osname[:5] == "sunos":
if release[0] >= "5": # SunOS 5 == Solaris 2
osname = "solaris"
release = "%d.%s" % (int(release[0]) - 3, release[2:])
# We can't use "platform.architecture()[0]" because a
# bootstrap problem. We use a dict to get an error
# if some suspicious happens.
bitness = {2147483647:"32bit", 9223372036854775807:"64bit"}
machine += ".%s" % bitness[sys.maxsize]
# fall through to standard osname-release-machine representation
elif osname[:3] == "aix":
return "%s-%s.%s" % (osname, version, release)
elif osname[:6] == "cygwin":
osname = "cygwin"
import re
rel_re = re.compile(r'[\d.]+')
m = rel_re.match(release)
if m:
release = m.group()
elif osname[:6] == "darwin":
import _osx_support
osname, release, machine = _osx_support.get_platform_osx(
get_config_vars(),
osname, release, machine)
return "%s-%s-%s" % (osname, release, machine)
def get_python_version():
return _PY_VERSION_SHORT
def _print_dict(title, data):
for index, (key, value) in enumerate(sorted(data.items())):
if index == 0:
print('%s: ' % (title))
print('\t%s = "%s"' % (key, value))
def _main():
"""Display all information sysconfig detains."""
if '--generate-posix-vars' in sys.argv:
_generate_posix_vars()
return
print('Platform: "%s"' % get_platform())
print('Python version: "%s"' % get_python_version())
print('Current installation scheme: "%s"' % _get_default_scheme())
print()
_print_dict('Paths', get_paths())
print()
_print_dict('Variables', get_config_vars())
if __name__ == '__main__':
_main()
|
the-stack_0_23490 | from flask import Flask, render_template, request, redirect, url_for
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///blog.db'
db = SQLAlchemy(app)
class Blogpost(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(50), nullable=False)
subtitle = db.Column(db.String(50))
author = db.Column(db.String(20), nullable=False)
date_posted = db.Column(db.DateTime, nullable=False)
content = db.Column(db.Text, nullable=False)
@app.route('/')
def index():
posts = Blogpost.query.order_by(Blogpost.date_posted.desc()).all()
return render_template('index.html', posts=posts)
@app.route('/about')
def about():
return render_template('about.html')
@app.route('/post/<int:post_id>')
def post(post_id):
post = Blogpost.query.filter_by(id=post_id).one()
return render_template('post.html', post=post)
@app.route('/add')
def add():
return render_template('add.html')
@app.route('/addpost', methods=['POST'])
def addpost():
title = request.form['title']
subtitle = request.form['subtitle']
author = request.form['author']
content = request.form['content']
post = Blogpost(title=title, subtitle=subtitle, author=author, content=content, date_posted=datetime.now())
db.session.add(post)
db.session.commit()
return redirect(url_for('index'))
if __name__ == '__main__':
app.run(debug=True, port=3490)
|
the-stack_0_23491 | import argparse
import sys
import dacite
from dataclasses import dataclass, field
import fsspec
import json
import logging
from fv3net.artifacts.metadata import StepMetadata, log_fact_json
import numpy as np
import os
import time
from typing import Any, Dict, List, Mapping, Optional, Sequence, Set, Union
from fv3fit._shared.config import register_training_function
from fv3fit.dataclasses import asdict_with_enum as _asdict_with_enum
from fv3fit.emulation.data.transforms import expand_single_dim_data
from fv3fit import tfdataset
import tensorflow as tf
import yaml
from fv3fit import set_random_seed
from fv3fit._shared import put_dir
from fv3fit._shared.config import (
OptimizerConfig,
get_arg_updated_config_dict,
to_nested_dict,
)
from fv3fit._shared.hyperparameters import Hyperparameters
from fv3fit.emulation.layers.normalization2 import MeanMethod, StdDevMethod
from fv3fit.keras._models.shared.pure_keras import PureKerasDictPredictor
from fv3fit.keras.jacobian import compute_jacobians, nondimensionalize_jacobians
from fv3fit.emulation.transforms.factories import ConditionallyScaled
from fv3fit.emulation.types import LossFunction, TensorDict
from fv3fit.emulation import train, ModelCheckpointCallback
from fv3fit.emulation.data import TransformConfig, nc_dir_to_tfdataset
from fv3fit.emulation.data.config import SliceConfig
from fv3fit.emulation.layers import ArchitectureConfig
from fv3fit.emulation.keras import save_model
from fv3fit.emulation.losses import CustomLoss
from fv3fit.emulation.models import (
transform_model,
MicrophysicsConfig,
ConservativeWaterConfig,
)
from fv3fit.emulation.transforms import (
ComposedTransformFactory,
Difference,
TensorTransform,
TransformedVariableConfig,
CloudWaterDiffPrecpd,
GscondClassesV1,
)
from fv3fit.emulation.layers.normalization import standard_deviation_all_features
from fv3fit.wandb import (
WandBConfig,
store_model_artifact,
plot_all_output_sensitivities,
)
logger = logging.getLogger(__name__)
__all__ = [
"TransformedParameters",
"MicrophysicsConfig",
"CustomLoss",
"TransformedVariableConfig",
"ConditionallyScaled",
"Difference",
"WandBConfig",
"ArchitectureConfig",
"SliceConfig",
]
def load_config_yaml(path: str) -> Dict[str, Any]:
"""
Load yaml from local/remote location
"""
with fsspec.open(path, "r") as f:
d = yaml.safe_load(f)
return d
@dataclass
class TransformedParameters(Hyperparameters):
"""
Configuration for training a microphysics emulator
Args:
transform: Data preprocessing TransformConfig
tensor_transform: specification of differerentiable tensorflow
transformations to apply before and after data is passed to models and
losses.
model: MicrophysicsConfig used to build the keras model
use_wandb: Enable wandb logging of training, requires that wandb is installed
and initialized
wandb: WandBConfig to set up the wandb logged run
loss: Configuration of the keras loss to prepare and use for training
epochs: Number of training epochs
batch_size: batch size applied to tf datasets during training
valid_freq: How often to score validation data (in epochs)
verbose: Verbosity of keras fit output
shuffle_buffer_size: How many samples to keep in the keras shuffle buffer
during training
out_url: where to save checkpoints
checkpoint_model: if true, save a checkpoint after each epoch
Example:
.. code-block:: yaml
model_type: transformed
hyperparameters:
epochs: 1
loss:
loss_variables: [dQ2]
model:
architecture:
name: dense
direct_out_variables:
- dQ2
input_variables:
- air_temperature
- specific_humidity
- cos_zenith_angle
use_wandb: false
"""
tensor_transform: List[
Union[
TransformedVariableConfig,
ConditionallyScaled,
Difference,
CloudWaterDiffPrecpd,
GscondClassesV1,
]
] = field(default_factory=list)
model: Optional[MicrophysicsConfig] = None
conservative_model: Optional[ConservativeWaterConfig] = None
loss: CustomLoss = field(default_factory=CustomLoss)
epochs: int = 1
batch_size: int = 128
valid_freq: int = 5
verbose: int = 2
shuffle_buffer_size: Optional[int] = 13824
# only model checkpoints are saved at out_url, but need to keep these name
# for backwards compatibility
checkpoint_model: bool = True
out_url: str = ""
# ideally will refactor these out, but need to insert the callback somehow
use_wandb: bool = True
wandb: WandBConfig = field(default_factory=WandBConfig)
@property
def transform_factory(self) -> ComposedTransformFactory:
return ComposedTransformFactory(self.tensor_transform)
def build_transform(self, sample: TensorDict) -> TensorTransform:
return self.transform_factory.build(sample)
@property
def _model(
self,
) -> Union[
MicrophysicsConfig, ConservativeWaterConfig,
]:
if self.model:
return self.model
elif self.conservative_model:
return self.conservative_model
else:
raise ValueError(
"Neither .model, .conservative_model, nor .transformed_model provided."
)
def build_model(
self, data: Mapping[str, tf.Tensor], transform: TensorTransform
) -> tf.keras.Model:
inputs = {
name: tf.keras.Input(data[name].shape[1:], name=name)
for name in self.input_variables
}
inner_model = self._model.build(transform.forward(data))
return transform_model(inner_model, transform, inputs)
def build_loss(
self, data: Mapping[str, tf.Tensor], transform: TensorTransform
) -> LossFunction:
return self.loss.build(transform.forward(data))
@property
def input_variables(self) -> Sequence:
return list(
self.transform_factory.backward_names(set(self._model.input_variables))
)
@property
def model_variables(self) -> Set[str]:
return self.transform_factory.backward_names(
set(self._model.input_variables) | set(self._model.output_variables)
)
@property
def variables(self) -> Set[str]:
return self.model_variables
@classmethod
def init_testing(cls, input_variables, output_variables) -> "TransformedParameters":
"""used for testing"""
return TransformedParameters(
model=MicrophysicsConfig(
input_variables=input_variables,
direct_out_variables=output_variables,
architecture=ArchitectureConfig("dense"),
),
loss=CustomLoss(loss_variables=output_variables),
use_wandb=False,
)
# Temporarily subclass from the hyperparameters object for backwards compatibility
# we can delete this class once usage has switched to fv3fit.train
@dataclass
class TrainConfig(TransformedParameters):
"""
Configuration for training a microphysics emulator
Args:
train_url: Path to training netcdfs (already in [sample x feature] format)
test_url: Path to validation netcdfs (already in [sample x feature] format)
out_url: Where to store the trained model, history, and configuration
transform: Data preprocessing TransformConfig
tensor_transform: specification of differerentiable tensorflow
transformations to apply before and after data is passed to models and
losses.
model: MicrophysicsConfig used to build the keras model
nfiles: Number of files to use from train_url
nfiles_valid: Number of files to use from test_url
use_wandb: Enable wandb logging of training, requires that wandb is installed
and initialized
wandb: WandBConfig to set up the wandb logged run
loss: Configuration of the keras loss to prepare and use for training
epochs: Number of training epochs
batch_size: batch size applied to tf datasets during training
valid_freq: How often to score validation data (in epochs)
verbose: Verbosity of keras fit output
shuffle_buffer_size: How many samples to keep in the keras shuffle buffer
during training
checkpoint_model: if true, save a checkpoint after each epoch
log_level: what logging level to use
"""
train_url: str = ""
test_url: str = ""
transform: TransformConfig = field(default_factory=TransformConfig)
tensor_transform: List[
Union[
TransformedVariableConfig,
ConditionallyScaled,
Difference,
CloudWaterDiffPrecpd,
GscondClassesV1,
]
] = field(default_factory=list)
model: Optional[MicrophysicsConfig] = None
conservative_model: Optional[ConservativeWaterConfig] = None
nfiles: Optional[int] = None
nfiles_valid: Optional[int] = None
loss: CustomLoss = field(default_factory=CustomLoss)
epochs: int = 1
batch_size: int = 128
valid_freq: int = 5
verbose: int = 2
shuffle_buffer_size: Optional[int] = 13824
checkpoint_model: bool = True
log_level: str = "INFO"
@classmethod
def from_dict(cls, d: Dict[str, Any]) -> "TrainConfig":
"""Standard init from nested dictionary"""
# casting necessary for 'from_args' which all come in as string
# TODO: should this be just a json parsed??
config = dacite.Config(
strict=True, cast=[bool, str, int, float, StdDevMethod, MeanMethod]
)
return dacite.from_dict(cls, d, config=config)
@classmethod
def from_flat_dict(cls, d: Dict[str, Any]) -> "TrainConfig":
"""
Init from a dictionary flattened in the style of wandb configs
where all nested mapping keys are flattened to the top level
by joining with a '.'
E.g.:
{
"test_url": "gs://bucket/path/to/blobs",
"model.input_variables": ["var1", "var2"],
"model.architecture.name": "rnn",
...
}
"""
d = to_nested_dict(d)
return cls.from_dict(d)
@classmethod
def from_yaml_path(cls, path: str) -> "TrainConfig":
"""Init from path to yaml file"""
d = load_config_yaml(path)
return cls.from_dict(d)
@classmethod
def from_args(cls, args: Optional[Sequence[str]] = None):
"""
Init from commandline arguments (or provided arguments). If no args
are provided, uses sys.argv to parse.
Note: A current limitation of this init style is that we cannot provide
arbitrary arguments to the parser. Therefore, value being updated should
either be a member of the default config or the file specified by
--config-path
Args:
args: A list of arguments to be parsed. If not provided, uses
sys.argv
Requires "--config-path", use "--config-path default" to use
default configuration
Note: arguments should be in the flat style used by wandb where all
nested mappings are at the top level with '.' joined keys. E.g.,
"--model.architecture.name rnn"
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--config-path",
required=True,
help="Path to training config yaml. Use '--config-path default'"
" to run with a default configuration.",
)
path_arg, unknown_args = parser.parse_known_args(args=args)
if path_arg.config_path == "default":
config = get_default_config()
else:
config = cls.from_yaml_path(path_arg.config_path)
if unknown_args:
updated = get_arg_updated_config_dict(
unknown_args, _asdict_with_enum(config)
)
config = cls.from_dict(updated)
return config
def to_yaml(self) -> str:
return yaml.safe_dump(_asdict_with_enum(self))
def open_dataset(
self, url: str, nfiles: Optional[int], required_variables: Set[str],
) -> tf.data.Dataset:
nc_open_fn = self.transform.get_pipeline(required_variables)
return nc_dir_to_tfdataset(
url,
nc_open_fn,
nfiles=nfiles,
shuffle=True,
random_state=np.random.RandomState(0),
)
def save_jacobians(std_jacobians, dir_, filename="jacobians.npz"):
with put_dir(dir_) as tmpdir:
dumpable = {
f"{out_name}/{in_name}": data
for out_name, sensitivities in std_jacobians.items()
for in_name, data in sensitivities.items()
}
np.savez(os.path.join(tmpdir, filename), **dumpable)
@register_training_function("transformed", TransformedParameters)
def train_function(
hyperparameters: TransformedParameters,
train_batches: tf.data.Dataset,
validation_batches: Optional[tf.data.Dataset],
) -> PureKerasDictPredictor:
def _prepare(ds):
return (
ds.map(tfdataset.apply_to_mapping(tfdataset.float64_to_float32))
.map(expand_single_dim_data)
.unbatch()
)
return _train_function_unbatched(
hyperparameters,
_prepare(train_batches),
_prepare(validation_batches) if validation_batches else None,
)
def _train_function_unbatched(
config: TransformedParameters,
train_ds: tf.data.Dataset,
test_ds: Optional[tf.data.Dataset],
) -> PureKerasDictPredictor:
# callbacks that are always active
callbacks = [tf.keras.callbacks.TerminateOnNaN()]
if config.use_wandb:
config.wandb.init(config=_asdict_with_enum(config))
callbacks.append(config.wandb.get_callback())
if config.shuffle_buffer_size is not None:
train_ds = train_ds.shuffle(config.shuffle_buffer_size)
train_set = next(iter(train_ds.batch(150_000)))
transform = config.build_transform(train_set)
train_ds = train_ds.map(transform.forward)
model = config.build_model(train_set, transform)
if config.checkpoint_model:
callbacks.append(
ModelCheckpointCallback(
filepath=os.path.join(
config.out_url, "checkpoints", "epoch.{epoch:03d}.tf"
)
)
)
train_ds_batched = train_ds.batch(config.batch_size).prefetch(tf.data.AUTOTUNE)
if test_ds is not None:
test_ds = test_ds.map(transform.forward)
test_ds_batched = test_ds.batch(config.batch_size).prefetch(tf.data.AUTOTUNE)
else:
test_ds_batched = None
history = train(
model,
train_ds_batched,
config.build_loss(train_set, transform),
optimizer=config.loss.optimizer.instance,
epochs=config.epochs,
validation_data=test_ds_batched,
validation_freq=config.valid_freq,
verbose=config.verbose,
callbacks=callbacks,
)
return PureKerasDictPredictor(
model, passthrough=(model, transform, history, train_set)
)
def main(config: TrainConfig, seed: int = 0):
logging.basicConfig(level=getattr(logging, config.log_level))
set_random_seed(seed)
start = time.perf_counter()
train_ds = config.open_dataset(
config.train_url, config.nfiles, config.model_variables
)
test_ds = config.open_dataset(
config.test_url, config.nfiles_valid, config.model_variables
)
StepMetadata(
job_type="train",
url=config.out_url,
dependencies={"train_data": config.train_url, "test_data": config.test_url},
args=sys.argv[1:],
).print_json()
predictor = train_function(config, train_ds, test_ds)
model, transform, history, train_set = predictor.passthrough # type: ignore
logger.debug("Training complete")
with put_dir(config.out_url) as tmpdir:
with open(os.path.join(tmpdir, "history.json"), "w") as f:
json.dump(history.params, f)
with open(os.path.join(tmpdir, "config.yaml"), "w") as f:
f.write(config.to_yaml())
local_model_path = save_model(model, tmpdir)
if config.use_wandb:
store_model_artifact(local_model_path, name=config._model.name)
end = time.perf_counter()
log_fact_json(data={"train_time_seconds": end - start})
# Jacobians after model storing in case of "out of memory" errors
sample = transform.forward(train_set)
jacobians = compute_jacobians(model, sample, config.input_variables)
std_factors = {
name: np.array(float(standard_deviation_all_features(data)))
for name, data in sample.items()
}
std_jacobians = nondimensionalize_jacobians(jacobians, std_factors)
save_jacobians(std_jacobians, config.out_url, "jacobians.npz")
if config.use_wandb:
plot_all_output_sensitivities(std_jacobians)
def get_default_config():
input_vars = [
"air_temperature_input",
"specific_humidity_input",
"cloud_water_mixing_ratio_input",
"pressure_thickness_of_atmospheric_layer",
]
model_config = MicrophysicsConfig(
input_variables=input_vars,
direct_out_variables=[
"cloud_water_mixing_ratio_after_precpd",
"total_precipitation",
],
residual_out_variables=dict(
air_temperature_after_precpd="air_temperature_input",
specific_humidity_after_precpd="specific_humidity_input",
),
architecture=ArchitectureConfig("linear"),
selection_map=dict(
air_temperature_input=SliceConfig(stop=-10),
specific_humidity_input=SliceConfig(stop=-10),
cloud_water_mixing_ratio_input=SliceConfig(stop=-10),
pressure_thickness_of_atmospheric_layer=SliceConfig(stop=-10),
),
tendency_outputs=dict(
air_temperature_after_precpd="tendency_of_air_temperature_due_to_microphysics", # noqa E501
specific_humidity_after_precpd="tendency_of_specific_humidity_due_to_microphysics", # noqa E501
),
)
transform = TransformConfig()
loss = CustomLoss(
optimizer=OptimizerConfig(name="Adam", kwargs=dict(learning_rate=1e-4)),
loss_variables=[
"air_temperature_after_precpd",
"specific_humidity_after_precpd",
"cloud_water_mixing_ratio_after_precpd",
"total_precipitation",
],
weights=dict(
air_temperature_after_precpd=0.5e5,
specific_humidity_after_precpd=0.5e5,
cloud_water_mixing_ratio_after_precpd=1.0,
total_precipitation=0.04,
),
metric_variables=[
"tendency_of_air_temperature_due_to_microphysics",
"tendency_of_specific_humidity_due_to_microphysics",
"tendency_of_cloud_water_mixing_ratio_due_to_microphysics",
],
)
config = TrainConfig(
train_url="gs://vcm-ml-experiments/microphysics-emulation/2021-11-24/microphysics-training-data-v3-training_netcdfs/train", # noqa E501
test_url="gs://vcm-ml-experiments/microphysics-emulation/2021-11-24/microphysics-training-data-v3-training_netcdfs/test", # noqa E501
out_url="gs://vcm-ml-scratch/andrep/test-train-emulation",
model=model_config,
transform=transform,
loss=loss,
nfiles=80,
nfiles_valid=80,
valid_freq=1,
epochs=4,
wandb=WandBConfig(job_type="training"),
)
return config
if __name__ == "__main__":
config = TrainConfig.from_args()
main(config)
|
the-stack_0_23493 | #!/usr/bin/env python
"""
Usage:
<Script-alias> <directory-pattern>
"""
from __future__ import print_function, absolute_import
import os
import sys
from cmdargs import CmdArgs
from builtins import input
import signal
import colorama
from colorama import Fore, Back, Style
env_homedir = os.environ['HOME']
#ctrl-c and ctrl-z handler
def signal_handler(signal, frame):
'''Exit on Ctrl-C or Ctrl-Z
'''
sys.exit(0)
#The signal functions
signal.signal(signal.SIGINT,signal_handler)
signal.signal(signal.SIGTSTP, signal_handler)
class Actions(object):
"""Actions to be performed by the command"""
def __init__(self):
'''initialize database path and history size
'''
self.db_file = os.path.join(env_homedir, '.cdhist.db')
self.HIST_SIZE = 1000
self.hist_dict = {}
self.history = None
def absolute_path(self, partial):
'''Absolute path is formed from relative path
'''
return os.path.abspath(
os.path.join(os.getcwd(), os.path.expanduser(partial))
)
def search_pattern(self, pattern):
'''Search for a pattern
'''
plen = len(pattern)
min_extra = float('Inf')
match = None
for directory in self.history:
pos = directory.rfind(pattern)
if pos >= 0:
extra = len(directory) - pos - plen
if extra <= min_extra:
min_extra = extra
match = directory
return match or self.absolute_path(pattern)
def read_history(self):
'''Read the history of directories
'''
if os.path.exists(self.db_file):
with open(self.db_file) as fh:
self.history = fh.read().split('\n')
for i, item in enumerate(self.history):
self.hist_dict[item] = i
else:
self.history = []
def list_history(self, pattern = ''):
'''list top 10 directories or directories mathing
pattern
'''
top_10 = self.history[-10:]
top_10 = top_10[::-1]
return_list = list(top_10)
if (pattern != ''):
top_10 = [s for s in top_10 if pattern in s]
return_list = list(top_10)
for i,s in enumerate(top_10):
pos = s.rfind(pattern)
top_10[i] = s[0:pos] + Fore.RED + Back.GREEN + s[pos:pos+len(pattern)] + Style.RESET_ALL + s[pos+len(pattern):]
for i, item in enumerate(top_10):
print(str(i+1), item, file = sys.stderr)
print("Enter your choice:", file=sys.stderr)
choice = int(input()) - 1
return str(return_list[choice])
def write_history(self):
'''Update the history file
'''
with open(self.db_file, 'w') as fh:
if len(self.history) > (self.HIST_SIZE * 1.4):
self.history = self.history[-self.HIST_SIZE:]
fh.write('\n'.join(self.history))
def save_match(self, match):
'''Save a pattern match
'''
idx = self.hist_dict.get(match)
if idx is not None:
self.history.pop(idx)
self.history.append(match)
self.write_history()
if __name__ == '__main__':
usage = """\
usage:
%(cmd)s -h (or --help)
%(cmd)s -l (or --list)
%(cmd)s <pattern/absolute-path>
-h show help text
-l list history of last 10 directories
pattern after first search short version
absolute path absolute path of the directory to work like cd
"""
sopts = 'hl'
dopts = ['help','list']
act = Actions()
if not len(sys.argv) > 1:
# on typing just the script-name go to user home
print('cd %s' % env_homedir)
sys.exit(0)
args_handler = CmdArgs(usage, sopts, dopts)
#Handle help for the script
params = args_handler.parseargs(sys.argv)
if 'help' in params:
print(args_handler.doHelp(), file=sys.stderr)
sys.exit(0)
act.read_history()
#Handle listing of top 10 history
if 'list' in params:
if 'pattern' in params:
directory = act.list_history(params['pattern'])
else:
directory = act.list_history()
if os.path.isdir(act.absolute_path(directory)):
act.save_match(act.absolute_path(directory))
print('cd %s' % (act.absolute_path(directory)))
sys.exit(0)
else:
print("Unexpected error", file =sys.stderr)
sys.exit(1)
#handle pattern match
required_ok = 'pattern' in params
if not required_ok:
print(args_handler.doHelp(), file=sys.stderr)
sys.exit(1)
pattern = params['pattern']
match = act.absolute_path(pattern)
if os.path.isdir(match):
# Handle the case with absolute path
act.save_match(match)
else:
# Perform a search for a pattern
match = act.search_pattern(pattern)
if os.path.isdir(match):
act.save_match(match)
else:
match = pattern
print('cd %s' % (match))
|
the-stack_0_23495 | # Copyright (c) 2020 UAVCAN Consortium
# This software is distributed under the terms of the MIT License.
# Author: Pavel Kirienko <[email protected]>
import pathlib
# Please maintain these carefully if you're changing the project's directory structure.
TEST_DIR = pathlib.Path(__file__).resolve().parent
ROOT_DIR = TEST_DIR.parent
DEPS_DIR = TEST_DIR / "deps"
assert DEPS_DIR.is_dir()
|
the-stack_0_23496 | #!/usr/bin/env python
#############################################################################
# Copyright (c) 2017-2020 SiteWare Corp. All right reserved
#############################################################################
from __future__ import absolute_import, print_function
import os
from . import get_hosts
from .file_rewriter import FileRewriter
g_etc_hosts = os.getenv('SD2_ETC_HOSTS', '/etc/hosts')
def get_our_config():
rr = ''
for host in get_hosts(enabled=False):
if not host.get('containers'):
continue
rr += '{}\t{}\n'.format(host['local-ip'], host['name'] + '-local')
for cont in host['containers']:
for alias in cont.get('hostAliases', []):
rr += "{}\t{}\n".format(host['local-ip'], alias)
for cont in host['containers']:
rr += '{}\t'.format(cont['ip'])
rr += "{}\n".format(cont['name'])
for alias in cont.get('aliases', []):
rr += '{}\t'.format(cont['ip'])
rr += "{} ".format(alias)
rr += '\n'
return rr
def gen_etc_hosts():
fr = FileRewriter(g_etc_hosts)
before, after = fr.read_config()
rr = get_our_config()
fr.write_config(
before,
rr.split('\n'),
after,
sudo=True
) |
the-stack_0_23499 |
from stationdata import build_station_list
from geo import stations_by_distance, stations_within_radius, rivers_with_station, stations_by_river, rivers_by_station_number, inconsistent_typical_range_stations
def test_stations_by_distance():
#testcall
#sorted_list = stations_by_distance(stations,cam)
stations = build_station_list()
cam = (52.2053, 0.1218)
closest_10 = stations_by_distance(stations,cam)[:10]
result = [('Cambridge Jesus Lock', 0.840237595667494), ('Bin Brook', 2.502277543239629), ("Cambridge Byron's Pool", 4.07204948005424), ('Cambridge Baits Bite', 5.115596582531859), ('Girton', 5.227077565748483), ('Haslingfield Burnt Mill', 7.0443978959918025), ('Oakington', 7.12825901765745), ('Stapleford', 7.265704342799649), ('Comberton', 7.735085060177142), ('Dernford', 7.993872393303291)]
assert closest_10 == result
def test_stations_within_radius():
#testcall
#list_of_stations_within = stations_within_radius(stations,cam,10)
stations = build_station_list()
cam = (52.2053, 0.1218)
list_of_stations_within = stations_within_radius(stations,cam,10)
result = ['Bin Brook', 'Cambridge Baits Bite', "Cambridge Byron's Pool", 'Cambridge Jesus Lock', 'Comberton', 'Dernford', 'Girton', 'Haslingfield Burnt Mill', 'Lode', 'Oakington', 'Stapleford']
assert list_of_stations_within == result |
the-stack_0_23500 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import re
import tempfile
from collections import defaultdict, namedtuple
from pants.backend.codegen.thrift.java.java_thrift_library import JavaThriftLibrary
from pants.backend.codegen.thrift.java.thrift_defaults import ThriftDefaults
from pants.backend.jvm.tasks.nailgun_task import NailgunTask
from pants.base.exceptions import TargetDefinitionException, TaskError
from pants.build_graph.address import Address
from pants.build_graph.address_lookup_error import AddressLookupError
from pants.task.simple_codegen_task import SimpleCodegenTask
from pants.util.dirutil import safe_mkdir, safe_open
from pants.util.memo import memoized_method, memoized_property
from twitter.common.collections import OrderedSet
from pants.contrib.scrooge.tasks.thrift_util import calculate_compile_sources
_RPC_STYLES = frozenset(['sync', 'finagle', 'ostrich'])
class ScroogeGen(SimpleCodegenTask, NailgunTask):
DepInfo = namedtuple('DepInfo', ['service', 'structs'])
PartialCmd = namedtuple('PartialCmd', ['language', 'rpc_style', 'namespace_map'])
@classmethod
def register_options(cls, register):
super(ScroogeGen, cls).register_options(register)
register('--verbose', type=bool, help='Emit verbose output.')
register('--strict', fingerprint=True, type=bool,
help='Enable strict compilation.')
register('--service-deps', default={}, advanced=True, type=dict,
help='A map of language to targets to add as dependencies of '
'synthetic thrift libraries that contain services.')
register('--structs-deps', default={}, advanced=True, type=dict,
help='A map of language to targets to add as dependencies of '
'synthetic thrift libraries that contain structs.')
register('--target-types',
default={'scala': 'scala_library', 'java': 'java_library', 'android': 'java_library'},
advanced=True,
type=dict,
help='Registered target types.')
cls.register_jvm_tool(register, 'scrooge-gen')
@classmethod
def subsystem_dependencies(cls):
return super(ScroogeGen, cls).subsystem_dependencies() + (ThriftDefaults,)
@classmethod
def product_types(cls):
return ['java', 'scala']
@classmethod
def implementation_version(cls):
return super(ScroogeGen, cls).implementation_version() + [('ScroogeGen', 3)]
def __init__(self, *args, **kwargs):
super(ScroogeGen, self).__init__(*args, **kwargs)
self._thrift_defaults = ThriftDefaults.global_instance()
self._depinfo = None
# TODO(benjy): Use regular os-located tmpfiles, as we do everywhere else.
def _tempname(self):
# don't assume the user's cwd is buildroot
pants_workdir = self.get_options().pants_workdir
tmp_dir = os.path.join(pants_workdir, 'tmp')
safe_mkdir(tmp_dir)
fd, path = tempfile.mkstemp(dir=tmp_dir, prefix='')
os.close(fd)
return path
def _resolve_deps(self, depmap):
"""Given a map of gen-key=>target specs, resolves the target specs into references."""
deps = defaultdict(lambda: OrderedSet())
for category, depspecs in depmap.items():
dependencies = deps[category]
for depspec in depspecs:
dep_address = Address.parse(depspec)
try:
self.context.build_graph.maybe_inject_address_closure(dep_address)
dependencies.add(self.context.build_graph.get_target(dep_address))
except AddressLookupError as e:
raise AddressLookupError('{}\n referenced from {} scope'.format(e, self.options_scope))
return deps
def _validate_language(self, target):
language = self._thrift_defaults.language(target)
if language not in self._registered_language_aliases():
raise TargetDefinitionException(
target,
'language {} not supported: expected one of {}.'.format(language, self._registered_language_aliases().keys()))
return language
def _validate_rpc_style(self, target):
rpc_style = self._thrift_defaults.rpc_style(target)
if rpc_style not in _RPC_STYLES:
raise TargetDefinitionException(
target,
'rpc_style {} not supported: expected one of {}.'.format(rpc_style, _RPC_STYLES))
return rpc_style
@memoized_method
def _registered_language_aliases(self):
return self.get_options().target_types
@memoized_method
def _target_type_for_language(self, language):
alias_for_lang = self._registered_language_aliases()[language]
registered_aliases = self.context.build_file_parser.registered_aliases()
target_types = registered_aliases.target_types_by_alias.get(alias_for_lang, None)
if not target_types:
raise TaskError('Registered target type `{0}` for language `{1}` does not exist!'.format(alias_for_lang, language))
if len(target_types) > 1:
raise TaskError('More than one target type registered for language `{0}`'.format(language))
return next(iter(target_types))
def execute_codegen(self, target, target_workdir):
self._validate_compiler_configs([target])
self._must_have_sources(target)
partial_cmd = self.PartialCmd(
language=self._validate_language(target),
rpc_style=self._validate_rpc_style(target),
namespace_map=tuple(sorted(target.namespace_map.items()) if target.namespace_map else ()))
self.gen(partial_cmd, target, target_workdir)
def gen(self, partial_cmd, target, target_workdir):
import_paths, _ = calculate_compile_sources([target], self.is_gentarget)
args = []
for import_path in import_paths:
args.extend(['--import-path', import_path])
args.extend(['--language', partial_cmd.language])
for lhs, rhs in partial_cmd.namespace_map:
args.extend(['--namespace-map', '%s=%s' % (lhs, rhs)])
if partial_cmd.rpc_style == 'ostrich':
args.append('--finagle')
args.append('--ostrich')
elif partial_cmd.rpc_style == 'finagle':
args.append('--finagle')
args.extend(['--dest', target_workdir])
if not self.get_options().strict:
args.append('--disable-strict')
if self.get_options().verbose:
args.append('--verbose')
gen_file_map_path = os.path.relpath(self._tempname())
args.extend(['--gen-file-map', gen_file_map_path])
args.extend(target.sources_relative_to_buildroot())
classpath = self.tool_classpath('scrooge-gen')
jvm_options = list(self.get_options().jvm_options)
jvm_options.append('-Dfile.encoding=UTF-8')
returncode = self.runjava(classpath=classpath,
main='com.twitter.scrooge.Main',
jvm_options=jvm_options,
args=args,
workunit_name='scrooge-gen')
if 0 != returncode:
raise TaskError('Scrooge compiler exited non-zero for {} ({})'.format(target, returncode))
SERVICE_PARSER = re.compile(r'^\s*service\s+(?:[^\s{]+)')
def _declares_service(self, source):
with open(source) as thrift:
return any(line for line in thrift if self.SERVICE_PARSER.search(line))
def parse_gen_file_map(self, gen_file_map_path, outdir):
d = defaultdict(set)
with safe_open(gen_file_map_path, 'r') as deps:
for dep in deps:
src, cls = dep.strip().split('->')
src = os.path.relpath(src.strip())
cls = os.path.relpath(cls.strip(), outdir)
d[src].add(cls)
return d
def is_gentarget(self, target):
if not isinstance(target, JavaThriftLibrary):
return False
# We only handle requests for 'scrooge' compilation and not, for example 'thrift', aka the
# Apache thrift compiler
return self._thrift_defaults.compiler(target) == 'scrooge'
def _validate_compiler_configs(self, targets):
assert len(targets) == 1, ("TODO: This method now only ever receives one target. Simplify.")
ValidateCompilerConfig = namedtuple('ValidateCompilerConfig', ['language', 'rpc_style'])
def compiler_config(tgt):
# Note compiler is not present in this signature. At this time
# Scrooge and the Apache thrift generators produce identical
# java sources, and the Apache generator does not produce scala
# sources. As there's no permutation allowing the creation of
# incompatible sources with the same language+rpc_style we omit
# the compiler from the signature at this time.
return ValidateCompilerConfig(language=self._thrift_defaults.language(tgt),
rpc_style=self._thrift_defaults.rpc_style(tgt))
mismatched_compiler_configs = defaultdict(set)
for target in filter(lambda t: isinstance(t, JavaThriftLibrary), targets):
mycompilerconfig = compiler_config(target)
def collect(dep):
if mycompilerconfig != compiler_config(dep):
mismatched_compiler_configs[target].add(dep)
target.walk(collect, predicate=lambda t: isinstance(t, JavaThriftLibrary))
if mismatched_compiler_configs:
msg = ['Thrift dependency trees must be generated with a uniform compiler configuration.\n\n']
for tgt in sorted(mismatched_compiler_configs.keys()):
msg.append('%s - %s\n' % (tgt, compiler_config(tgt)))
for dep in mismatched_compiler_configs[tgt]:
msg.append(' %s - %s\n' % (dep, compiler_config(dep)))
raise TaskError(''.join(msg))
def _must_have_sources(self, target):
if isinstance(target, JavaThriftLibrary) and not target.payload.sources.source_paths:
raise TargetDefinitionException(target, 'no thrift files found')
def synthetic_target_type(self, target):
language = self._thrift_defaults.language(target)
return self._target_type_for_language(language)
def synthetic_target_extra_dependencies(self, target, target_workdir):
deps = OrderedSet(self._thrift_dependencies_for_target(target))
deps.update(target.dependencies)
return deps
def _thrift_dependencies_for_target(self, target):
dep_info = self._resolved_dep_info
target_declares_service = any(self._declares_service(source)
for source in target.sources_relative_to_buildroot())
language = self._thrift_defaults.language(target)
if target_declares_service:
return dep_info.service[language]
else:
return dep_info.structs[language]
@memoized_property
def _resolved_dep_info(self):
return ScroogeGen.DepInfo(self._resolve_deps(self.get_options().service_deps),
self._resolve_deps(self.get_options().structs_deps))
@property
def _copy_target_attributes(self):
return ['provides']
|
the-stack_0_23501 | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 7 12:18:42 2021
@author: bjorn
script to load and process images from outside mnist
"""
import io
import os
import matplotlib.pyplot as plt
import numpy as np
import torch
from PIL import Image
from skimage.transform import resize
def load_prediction_data(path, show_image=False):
size = (28, 28)
images = []
image_names = [f.name for f in os.scandir(path)]
if ".gitkeep" in image_names:
image_names.remove(".gitkeep")
# load images
for image_name in image_names:
# im = io.imread(os.path.join(path+'/image', image_name))
im = Image.open(os.path.join(path, image_name)).convert("1")
im = np.array(im, np.float32)
im = resize(im, size, anti_aliasing=True)
# if show_image:
# plt.imshow(im)
# plt.show()
# im = np.resize(im, size)
images.append(im)
if show_image:
plt.imshow(im)
plt.show()
# put data into tensor->dataset->data_loader
data_loader = torch.utils.data.DataLoader(
list(zip(images)), batch_size=1, shuffle=True
)
return data_loader
|
the-stack_0_23502 | """
Adapted from the inference.py to demonstate the usage of the util functions.
"""
import sys
import numpy as np
import pydensecrf.densecrf as dcrf
# Get im{read,write} from somewhere.
try:
from cv2 import imread, imwrite
except ImportError:
# Note that, sadly, skimage unconditionally import scipy and matplotlib,
# so you'll need them if you don't have OpenCV. But you probably have them.
from skimage.io import imread, imsave
imwrite = imsave
# TODO: Use scipy instead.
from pydensecrf.utils import unary_from_labels, create_pairwise_bilateral, create_pairwise_gaussian
if len(sys.argv) != 4:
print("Usage: python {} IMAGE ANNO OUTPUT".format(sys.argv[0]))
print("")
print("IMAGE and ANNO are inputs and OUTPUT is where the result should be written.")
print("If there's at least one single full-black pixel in ANNO, black is assumed to mean unknown.")
sys.exit(1)
fn_im = sys.argv[1]
fn_anno = sys.argv[2]
fn_output = sys.argv[3]
##################################
### Read images and annotation ###
##################################
img = imread(fn_im)
# Convert the annotation's RGB color to a single 32-bit integer color 0xBBGGRR
anno_rgb = imread(fn_anno).astype(np.uint32)
anno_lbl = anno_rgb[:,:,0] + (anno_rgb[:,:,1] << 8) + (anno_rgb[:,:,2] << 16)
# Convert the 32bit integer color to 1, 2, ... labels.
# Note that all-black, i.e. the value 0 for background will stay 0.
colors, labels = np.unique(anno_lbl, return_inverse=True)
# But remove the all-0 black, that won't exist in the MAP!
HAS_UNK = 0 in colors
if HAS_UNK:
print("Found a full-black pixel in annotation image, assuming it means 'unknown' label, and will thus not be present in the output!")
print("If 0 is an actual label for you, consider writing your own code, or simply giving your labels only non-zero values.")
colors = colors[1:]
#else:
# print("No single full-black pixel found in annotation image. Assuming there's no 'unknown' label!")
# And create a mapping back from the labels to 32bit integer colors.
colorize = np.empty((len(colors), 3), np.uint8)
colorize[:,0] = (colors & 0x0000FF)
colorize[:,1] = (colors & 0x00FF00) >> 8
colorize[:,2] = (colors & 0xFF0000) >> 16
# Compute the number of classes in the label image.
# We subtract one because the number shouldn't include the value 0 which stands
# for "unknown" or "unsure".
n_labels = len(set(labels.flat)) - int(HAS_UNK)
print(n_labels, " labels", (" plus \"unknown\" 0: " if HAS_UNK else ""), set(labels.flat))
###########################
### Setup the CRF model ###
###########################
use_2d = False
# use_2d = True
if use_2d:
print("Using 2D specialized functions")
# Example using the DenseCRF2D code
d = dcrf.DenseCRF2D(img.shape[1], img.shape[0], n_labels)
# get unary potentials (neg log probability)
U = unary_from_labels(labels, n_labels, gt_prob=0.7, zero_unsure=HAS_UNK)
d.setUnaryEnergy(U)
# This adds the color-independent term, features are the locations only.
d.addPairwiseGaussian(sxy=(3, 3), compat=3, kernel=dcrf.DIAG_KERNEL,
normalization=dcrf.NORMALIZE_SYMMETRIC)
# This adds the color-dependent term, i.e. features are (x,y,r,g,b).
d.addPairwiseBilateral(sxy=(80, 80), srgb=(13, 13, 13), rgbim=img,
compat=10,
kernel=dcrf.DIAG_KERNEL,
normalization=dcrf.NORMALIZE_SYMMETRIC)
else:
print("Using generic 2D functions")
# Example using the DenseCRF class and the util functions
d = dcrf.DenseCRF(img.shape[1] * img.shape[0], n_labels)
# get unary potentials (neg log probability)
U = unary_from_labels(labels, n_labels, gt_prob=0.7, zero_unsure=HAS_UNK)
d.setUnaryEnergy(U)
# This creates the color-independent features and then add them to the CRF
feats = create_pairwise_gaussian(sdims=(3, 3), shape=img.shape[:2])
d.addPairwiseEnergy(feats, compat=3,
kernel=dcrf.DIAG_KERNEL,
normalization=dcrf.NORMALIZE_SYMMETRIC)
# This creates the color-dependent features and then add them to the CRF
feats = create_pairwise_bilateral(sdims=(80, 80), schan=(13, 13, 13),
img=img, chdim=2)
d.addPairwiseEnergy(feats, compat=10,
kernel=dcrf.DIAG_KERNEL,
normalization=dcrf.NORMALIZE_SYMMETRIC)
####################################
### Do inference and compute MAP ###
####################################
# Run five inference steps.
Q = d.inference(5)
# Find out the most probable class for each pixel.
MAP = np.argmax(Q, axis=0)
# Convert the MAP (labels) back to the corresponding colors and save the image.
# Note that there is no "unknown" here anymore, no matter what we had at first.
MAP = colorize[MAP,:]
imwrite(fn_output, MAP.reshape(img.shape))
# Just randomly manually run inference iterations
Q, tmp1, tmp2 = d.startInference()
for i in range(5):
print("KL-divergence at {}: {}".format(i, d.klDivergence(Q)))
d.stepInference(Q, tmp1, tmp2)
|
the-stack_0_23503 | # Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
""" Userbot initialization. """
import os
from sys import version_info
from logging import basicConfig, getLogger, INFO, DEBUG
from distutils.util import strtobool as sb
from pylast import LastFMNetwork, md5
from pySmartDL import SmartDL
from dotenv import load_dotenv
from requests import get
from telethon import TelegramClient
from telethon.sessions import StringSession
load_dotenv("config.env")
# Bot Logs setup:
CONSOLE_LOGGER_VERBOSE = sb(os.environ.get("CONSOLE_LOGGER_VERBOSE", "False"))
if CONSOLE_LOGGER_VERBOSE:
basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
level=DEBUG,
)
else:
basicConfig(format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
level=INFO)
LOGS = getLogger(__name__)
if version_info[0] < 3 or version_info[1] < 8:
LOGS.info("You MUST have a python version of at least 3.8."
"Multiple features depend on this. Bot quitting.")
quit(1)
# Check if the config was edited by using the already used variable.
# Basically, its the 'virginity check' for the config file ;)
CONFIG_CHECK = os.environ.get(
"___________PLOX_______REMOVE_____THIS_____LINE__________", None)
if CONFIG_CHECK:
LOGS.info(
"Please remove the line mentioned in the first hashtag from the config.env file"
)
quit(1)
# Telegram App KEY and HASH
API_KEY = os.environ.get("API_KEY", None)
API_HASH = os.environ.get("API_HASH", None)
# Userbot Session String
STRING_SESSION = os.environ.get("STRING_SESSION", None)
# Deezloader
DEEZER_ARL_TOKEN = os.environ.get("DEEZER_ARL_TOKEN", None)
# Logging channel/group ID configuration.
BOTLOG_CHATID = int(os.environ.get("BOTLOG_CHATID", None))
# Userbot logging feature switch.
BOTLOG = sb(os.environ.get("BOTLOG", "False"))
LOGSPAMMER = sb(os.environ.get("LOGSPAMMER", "False"))
# Bleep Blop, this is a bot ;)
PM_AUTO_BAN = sb(os.environ.get("PM_AUTO_BAN", "False"))
# Heroku Credentials for updater.
HEROKU_MEMEZ = sb(os.environ.get("HEROKU_MEMEZ", "False"))
HEROKU_APP_NAME = os.environ.get("HEROKU_APP_NAME", None)
HEROKU_API_KEY = os.environ.get("HEROKU_API_KEY", None)
# Github Credentials for updater and Gitupload.
GIT_REPO_NAME = os.environ.get("GIT_REPO_NAME", None)
GITHUB_ACCESS_TOKEN = os.environ.get("GITHUB_ACCESS_TOKEN", None)
# Custom (forked) repo URL and BRANCH for updater.
UPSTREAM_REPO_URL = os.environ.get(
"UPSTREAM_REPO_URL",
"https://github.com/MoveAngel/One4uBot.git")
UPSTREAM_REPO_BRANCH = os.environ.get(
"UPSTREAM_REPO_BRANCH", "sql-extended")
# Console verbose logging
CONSOLE_LOGGER_VERBOSE = sb(os.environ.get("CONSOLE_LOGGER_VERBOSE", "False"))
# SQL Database URI
DB_URI = os.environ.get("DATABASE_URL", None)
# OCR API key
OCR_SPACE_API_KEY = os.environ.get("OCR_SPACE_API_KEY", None)
# remove.bg API key
REM_BG_API_KEY = os.environ.get("REM_BG_API_KEY", None)
# Chrome Driver and Headless Google Chrome Binaries
CHROME_DRIVER = os.environ.get("CHROME_DRIVER", None)
GOOGLE_CHROME_BIN = os.environ.get("GOOGLE_CHROME_BIN", None)
# OpenWeatherMap API Key
OPEN_WEATHER_MAP_APPID = os.environ.get("OPEN_WEATHER_MAP_APPID", None)
WEATHER_DEFCITY = os.environ.get("WEATHER_DEFCITY", None)
# Genius lyrics API
GENIUS = os.environ.get("GENIUS_ACCESS_TOKEN", None)
# Lydia API
LYDIA_API_KEY = os.environ.get("LYDIA_API_KEY", None)
# Quotes API Token
QUOTES_API_TOKEN = os.environ.get("QUOTES_API_TOKEN", None)
# Anti Spambot Config
ANTI_SPAMBOT = sb(os.environ.get("ANTI_SPAMBOT", "False"))
ANTI_SPAMBOT_SHOUT = sb(os.environ.get("ANTI_SPAMBOT_SHOUT", "False"))
# Youtube API key
YOUTUBE_API_KEY = os.environ.get("YOUTUBE_API_KEY", None)
# Default .alive name
ALIVE_NAME = os.environ.get("ALIVE_NAME", None)
# Default .alive logo
ALIVE_LOGO = os.environ.get(
"ALIVE_LOGO",
"https://telegra.ph/file/a904be2cb0ebe75a12cb6.jpg")
# Default .alive username
ALIVE_USERNAME = os.environ.get("ALIVE_USERNAME", None)
# Time & Date - Country and Time Zone
COUNTRY = str(os.environ.get("COUNTRY", ""))
TZ_NUMBER = int(os.environ.get("TZ_NUMBER", 1))
# User Terminal alias
USER_TERM_ALIAS = os.environ.get("USER_TERM_ALIAS", "One4uBot")
# Zipfile module
ZIP_DOWNLOAD_DIRECTORY = os.environ.get("ZIP_DOWNLOAD_DIRECTORY", "./zips")
# Clean Welcome
CLEAN_WELCOME = sb(os.environ.get("CLEAN_WELCOME", "True"))
# Last.fm Module
BIO_PREFIX = os.environ.get("BIO_PREFIX", None)
DEFAULT_BIO = os.environ.get("DEFAULT_BIO", None)
LASTFM_API = os.environ.get("LASTFM_API", None)
LASTFM_SECRET = os.environ.get("LASTFM_SECRET", None)
LASTFM_USERNAME = os.environ.get("LASTFM_USERNAME", None)
LASTFM_PASSWORD_PLAIN = os.environ.get("LASTFM_PASSWORD", None)
LASTFM_PASS = md5(LASTFM_PASSWORD_PLAIN)
if LASTFM_API and LASTFM_SECRET and LASTFM_USERNAME and LASTFM_PASS:
lastfm = LastFMNetwork(api_key=LASTFM_API,
api_secret=LASTFM_SECRET,
username=LASTFM_USERNAME,
password_hash=LASTFM_PASS)
else:
lastfm = None
# Google Drive Module
G_DRIVE_DATA = os.environ.get("G_DRIVE_DATA", None)
G_DRIVE_CLIENT_ID = os.environ.get("G_DRIVE_CLIENT_ID", None)
G_DRIVE_CLIENT_SECRET = os.environ.get("G_DRIVE_CLIENT_SECRET", None)
G_DRIVE_AUTH_TOKEN_DATA = os.environ.get("G_DRIVE_AUTH_TOKEN_DATA", None)
G_DRIVE_FOLDER_ID = os.environ.get("G_DRIVE_FOLDER_ID", None)
TEMP_DOWNLOAD_DIRECTORY = os.environ.get("TMP_DOWNLOAD_DIRECTORY",
"./downloads")
# JustWatch Country
WATCH_COUNTRY = os.environ.get("WATCH_COUNTRY", None)
# Setting Up CloudMail.ru and MEGA.nz extractor binaries,
# and giving them correct perms to work properly.
if not os.path.exists('bin'):
os.mkdir('bin')
binaries = {
"https://raw.githubusercontent.com/adekmaulana/megadown/master/megadown":
"bin/megadown",
"https://raw.githubusercontent.com/yshalsager/cmrudl.py/master/cmrudl.py":
"bin/cmrudl"
}
for binary, path in binaries.items():
downloader = SmartDL(binary, path, progress_bar=False)
downloader.start()
os.chmod(path, 0o755)
# 'bot' variable
if STRING_SESSION:
# pylint: disable=invalid-name
bot = TelegramClient(StringSession(STRING_SESSION), API_KEY, API_HASH)
else:
# pylint: disable=invalid-name
bot = TelegramClient("userbot", API_KEY, API_HASH)
async def check_botlog_chatid():
if not BOTLOG_CHATID and LOGSPAMMER:
LOGS.info(
"You must set up the BOTLOG_CHATID variable in the config.env or environment variables, for the private error log storage to work."
)
quit(1)
elif not BOTLOG_CHATID and BOTLOG:
LOGS.info(
"You must set up the BOTLOG_CHATID variable in the config.env or environment variables, for the userbot logging feature to work."
)
quit(1)
elif not BOTLOG or not LOGSPAMMER:
return
entity = await bot.get_entity(BOTLOG_CHATID)
if entity.default_banned_rights.send_messages:
LOGS.info(
"Your account doesn't have rights to send messages to BOTLOG_CHATID "
"group. Check if you typed the Chat ID correctly.")
quit(1)
with bot:
try:
bot.loop.run_until_complete(check_botlog_chatid())
except:
LOGS.info(
"BOTLOG_CHATID environment variable isn't a "
"valid entity. Check your environment variables/config.env file.")
quit(1)
# Global Variables
COUNT_MSG = 0
USERS = {}
COUNT_PM = {}
LASTMSG = {}
CMD_HELP = {}
ZALG_LIST = {}
ISAFK = False
AFKREASON = None
|
the-stack_0_23506 | #imported from uniborg by @heyworld
"""Count Number of Files in a Chat
Original Module Credits: https://t.me/UniBorg/127"""
from userbot.events import register
from userbot.utils import humanbytes
from userbot.utils.tools import (yaml_format, parse_pre)
@register(outgoing=True, pattern="^.confs(?: |$)(.*)")
async def _(event):
if event.fwd_from:
return
entity = event.chat_id
input_str = event.pattern_match.group(1)
if input_str:
entity = input_str
status_message = await event.reply(
"... this might take some time "
"depending on the number of messages "
"in the chat ..."
)
mus = 0
hmm = {}
async for message in event.client.iter_messages(
entity=entity,
limit=None
):
if message and message.file:
if message.file.mime_type not in hmm:
hmm[message.file.mime_type] = 0
hmm[message.file.mime_type] += message.file.size
hnm = {}
for key in hmm:
hnm[key] = humanbytes(hmm[key])
await status_message.edit(
yaml_format(hnm),
parse_mode=parse_pre
)
await event.delete()
|
the-stack_0_23509 | #!/usr/bin/env python
###############################################################################
# Git-based CTF
###############################################################################
#
# Author: SeongIl Wi <[email protected]>
# Jaeseung Choi <[email protected]>
# Sang Kil Cha <[email protected]>
#
# Copyright (c) 2018 SoftSec Lab. KAIST
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import json
from ctf_utils import random_string, docker_cleanup, base_dir, load_config
from ctf_utils import prompt_checkout_warning, get_dirname, print_and_log
from command import run_command
from ctf_git import checkout
from crypto import encrypt_exploit
#-*- coding: utf-8 -*-
# TODO : Get these values via cmdline option
SERVICE_IP = "127.0.0.1"
SERVICE_PORT = 4000
def start_service(service_dir, branch, container_name, flag_str, log=None):
log= print_and_log(f"[*] Starting service from {service_dir} (branch '{branch}')", log)
checkout(service_dir, branch)
# Update flag file
flag_path = os.path.join(service_dir, "flag") # Assumption in template
if not os.path.isfile(flag_path):
log = print_and_log(f"[*] 'flag' file not found in {service_dir}", log)
return False, log
with open(flag_path, "w") as flag_file:
flag_file.write(flag_str)
# Run the service
script = os.path.join(base_dir(), "setup_service.sh")
cmdline = f"{script} {container_name} {SERVICE_PORT} {SERVICE_PORT}"
output, err, e = run_command(cmdline, service_dir)
if e != 0:
log = print_and_log("[*] Failed to start service", log)
log = print_and_log(err, log)
log = print_and_log("==========================", log)
return False, log
if log is not None:
log = log + output
log = print_and_log("[*] Started service successfully", log)
return True, log
def run_exploit(exploit_dir, container_name, timeout, log=None):
log = print_and_log("[*] Running exploit", log)
script = os.path.join(base_dir(), "launch_exploit.sh")
cmdline = f"{script} {container_name} {SERVICE_IP} {SERVICE_PORT} {timeout}"
output, err, e = run_command(cmdline, exploit_dir)
if log is not None:
try:
log = log.decode('utf_8', 'ignore')
output = output.decode('utf-8', 'ignore')
log = log + output
except UnicodeDecodeError:
pass
if e != 0:
log = print_and_log("[*] Failed to run exploit", log)
log = print_and_log(err, log)
log = print_and_log("==========================", log)
return None, log
# Exploit prints out the flag string at the end.
tokens = [_f for _f in output.split('\n') if _f] # Filter out empty strings
flag_candidate = [_f for _f in tokens if _f][-1] # Read the last line
return flag_candidate, log
def verify_exploit(exploit_dir, service_dir, branch, timeout, config,
encrypt=False, log=None):
if not os.path.isdir(exploit_dir) :
print(f"[*] Exploit directory '{exploit_dir}' does not exist")
return False, log
if not os.path.isdir(service_dir) :
print(f"[*] Service directory '{service_dir}' does not exist")
return False, log
# Create random flag value
flag = random_string(10)
# Start the service
service_dirname = get_dirname(service_dir)
service_container_name = f"{service_dirname}-{branch}"
result, log = start_service(service_dir, branch, service_container_name, \
flag, log=log)
if not result:
return False, log
# Run the exploit
exploit_dirname = get_dirname(exploit_dir)
exploit_container_name = f"exploit-{branch}"
exploit_result, log = run_exploit(exploit_dir, exploit_container_name, \
timeout, log=log)
# Clean up containers
docker_cleanup(service_container_name)
docker_cleanup(exploit_container_name)
log = print_and_log(f"[*] Exploit returned : {exploit_result}", log)
log = print_and_log(f"[*] Solution flag : {flag}", log)
if exploit_result == flag:
print("[*] Exploit worked successfully")
if encrypt:
print("[*] Encrypting the verified exploit")
# Set your own team as target team, and signer is not needed.
target_team = config["player_team"]
encrypted_file = encrypt_exploit(exploit_dir, target_team, config)
if encrypted_file is None:
print("[*] Failed to encrypt exploit")
else:
print(f"[*] Your exploit is encrypted in {encrypted_file}")
print("[*] Now you may commit and push this encrypted exploit "\
"to the corresponding branch of your service repository")
return True, log
else:
log = print_and_log("[*] Exploit returned a wrong flag string", log)
return False, log
if __name__ == "__main__":
if len(sys.argv) != 6:
print(f"Usage: {sys.argv[0]} [exploit dir] [service dir] [branch] [timeout] [config]")
sys.exit()
exploit_dir = sys.argv[1]
service_dir = sys.argv[2]
branch = sys.argv[3]
timeout = int(sys.argv[4])
config_file = sys.argv[5]
prompt_checkout_warning(service_dir)
config = load_config(config_file)
verify_exploit(exploit_dir, service_dir, branch, timeout, config)
|
the-stack_0_23510 |
import cv2
import numpy as np
from PIL import Image
import os
def second():
# Path for face image database
path = 'dataset'
recognizer = cv2.face.LBPHFaceRecognizer_create()
detector = cv2.CascadeClassifier("haarcascades/haarcascade_frontalface_default.xml");
# function to get the images and label data
def getImagesAndLabels(path):
imagePaths = [os.path.join(path,f) for f in os.listdir(path)]
faceSamples=[]
ids = []
for imagePath in imagePaths:
PIL_img = Image.open(imagePath).convert('L') # convert it to grayscale
img_numpy = np.array(PIL_img,'uint8')
id = int(os.path.split(imagePath)[-1].split(".")[1])
faces = detector.detectMultiScale(img_numpy)
for (x,y,w,h) in faces:
faceSamples.append(img_numpy[y:y+h,x:x+w])
ids.append(id)
return faceSamples,ids
print ("\n [INFO] Training faces. It will take a few seconds. Wait ...")
faces,ids = getImagesAndLabels(path)
recognizer.train(faces, np.array(ids))
# Save the model into trainer/trainer.yml
recognizer.write('trainer/trainer.yml') # recognizer.save() worked on Mac, but not on Pi
# Print the numer of faces trained and end program
print("\n [INFO] {0} faces trained. Exiting Program".format(len(np.unique(ids))))
second()
|
the-stack_0_23511 | #!/usr/bin/env python
import json
import sys
import argparse
import os
from pathlib import Path
def load_multiple_pdf_ignore_list():
"""
When a publication/revision has more than one pdf, use this result to get the pdfs that are duplicates,
or they can be ignored.
The pdfs that aren't main articles, and neither are in this list can be considered valious supplemental data.
The dict has been manually populated.
"""
this_dir = os.path.dirname(os.path.realpath(__file__))
json_filename = "multiple_pdfs_ignore_list.json"
file = os.path.join(this_dir, json_filename)
with open(file, 'r') as fp:
ignore_list = json.load(fp=fp)
return ignore_list
def load_multiple_pdf_main_articles():
"""
When a publication/revision has more than one pdf, use this result to get the main article.
This dict has been manually populated.
Note: When the main article is an empty string, it means all the pdfs are supplemental.
For example publication 969 is a compendium of various presentations with no main article.
"""
this_dir = os.path.dirname(os.path.realpath(__file__))
json_filename = "./multiple_pdfs_main_articles.json"
file = os.path.join(this_dir, json_filename)
with open(file, 'r') as fp:
main_articles = json.load(fp=fp)
return main_articles
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Check publications with multiple pdf.'
'After run, hard-write the result the files multiple_pdfs_ignore_list.json and multiple_pdfs_main_articles.json.'
'You can then use load_multiple_pdf_ignore_list and load_multiple_pdf_main_articles functions to reuse them in other scripts.')
parser.add_argument("-i", "--input-folder", dest="input_folder",
help="Path to the root folder of the IJ data: '/path/publications'"
"where publications are stored by /pub_id/revision_id/*.pdf.")
parser.add_argument("-n", "--no-write-output-json",
dest="no_write_output_json", action='store_true',
help="Avoid writing .json files with citation_list")
parser.add_argument("-v", "--verbose",
dest="verbose", action='store_true',
help="Print the output dict")
args = parser.parse_args()
print(args)
publication_main_article = {}
publication_main_article_max_revision = {}
# publications/pub_id/revision_id/*.pdf
publication_dirs = [f.name for f in os.scandir(args.input_folder) if f.is_dir()]
for publication_dir in publication_dirs:
if args.verbose:
print("Publication: ", publication_dir)
publication_dir_path = os.path.join(args.input_folder, publication_dir)
revision_dirs = [f.name for f in os.scandir(publication_dir_path) if f.is_dir()]
# Populate dict only for max revision
if len(revision_dirs) > 0:
revision_dir = max(revision_dirs)
revision_dir_path = os.path.join(publication_dir_path, revision_dir)
pdfs = [f.name for f in os.scandir(revision_dir_path) if Path(f.name).suffix == ".pdf"]
if len(pdfs) > 1:
publication_main_article_max_revision[int(publication_dir)] = {revision_dir:[]}
for pdf in pdfs:
publication_main_article_max_revision[int(publication_dir)][revision_dir].append(pdf)
# Populate dict with all revisions
for revision_dir in revision_dirs:
revision_dir_path = os.path.join(publication_dir_path, revision_dir)
pdfs = [f.name for f in os.scandir(revision_dir_path) if Path(f.name).suffix == ".pdf"]
if len(pdfs) > 1:
publication_main_article[int(publication_dir)] = {revision_dir:[]}
for pdf in pdfs:
publication_main_article[int(publication_dir)][revision_dir].append(pdf)
if False: # Disordered
json.dump(publication_main_article, fp=sys.stdout, indent=4)
if not args.no_write_output_json:
out_file = "multiple_pdfs.json"
with open(out_file, 'w') as fp:
json.dump(publication_main_article, fp=fp, indent=4)
publication_main_article_sorted = {}
for key, value in sorted(publication_main_article.items()):
publication_main_article_sorted[key] = value
json.dump(publication_main_article_sorted, fp=sys.stdout, indent=4)
if not args.no_write_output_json:
out_file = "multiple_pdfs_sorted.json"
with open(out_file, 'w') as fp:
json.dump(publication_main_article_sorted, fp=fp, indent=4)
publication_main_article_max_revision_sorted = {}
for key, value in sorted(publication_main_article_max_revision.items()):
publication_main_article_max_revision_sorted[key] = value
json.dump(publication_main_article_max_revision_sorted, fp=sys.stdout, indent=4)
if not args.no_write_output_json:
out_file = "multiple_pdfs_max_revision_sorted.json"
with open(out_file, 'w') as fp:
json.dump(publication_main_article_max_revision_sorted, fp=fp, indent=4)
|
the-stack_0_23513 | import argparse
import logging
import os
from functools import partial
from typing import Callable, List
from sciencebeam_utils.beam_utils.files import find_matching_filenames_with_limit
from sciencebeam_utils.utils.file_path import (
join_if_relative_path,
get_output_file
)
from sciencebeam_utils.utils.file_list import (
load_file_list
)
from sciencebeam_utils.tools.check_file_list import map_file_list_to_file_exists
LOGGER = logging.getLogger(__name__)
class DataProps:
SOURCE_FILENAME = 'source_filename'
FILENAME = 'filename'
CONTENT = 'content'
TYPE = 'type'
def add_batch_args(parser: argparse.ArgumentParser):
parser.add_argument(
'--data-path', type=str, required=True,
help='base data path'
)
source_group = parser.add_argument_group('source')
source_one_of_group = source_group.add_mutually_exclusive_group(
required=True
)
source_one_of_group.add_argument(
'--source-path', type=str, required=False,
help='path to source file(s), relative to data-path'
)
source_one_of_group.add_argument(
'--source-file-list', type=str, required=False,
help='path to source csv/tsv file list'
)
source_group.add_argument(
'--source-file-column', type=str, required=False, default='url',
help='the column of the source file list to use'
)
parser.add_argument(
'--limit', type=int, required=False,
help='limit the number of file pairs to process'
)
output_group = parser.add_argument_group('output')
output_group.add_argument(
'--output-path', required=False,
help='Output directory to write results to.'
)
output_group.add_argument(
'--output-suffix', required=False, default='.xml',
help='Output file suffix to add to the filename (excluding the file extension).'
)
parser.add_argument(
'--resume', action='store_true', default=False,
help='resume conversion (skip files that already have an output file)'
)
parser.add_argument(
'--debug', action='store_true', default=False,
help='enable debug output'
)
def process_batch_args(args):
args.base_data_path = args.data_path.replace('/*/', '/')
if not args.output_path:
args.output_path = os.path.join(
os.path.dirname(args.base_data_path),
os.path.basename(args.base_data_path + '-results')
)
def encode_if_text_type(data):
return data.encode('utf-8') if isinstance(data, str) else data
def get_file_list_for_args(args: argparse.Namespace):
if args.source_file_list:
file_list_path = join_if_relative_path(args.base_data_path, args.source_file_list)
return load_file_list(
file_list_path, column=args.source_file_column, limit=args.limit
)
return list(find_matching_filenames_with_limit(
join_if_relative_path(args.base_data_path, args.source_path), limit=args.limit
))
def get_file_list_without_output_file(
file_list: List[str],
get_output_file_for_source_url: Callable[[str], str]) -> List[str]:
output_file_exists_list = map_file_list_to_file_exists([
get_output_file_for_source_url(file_url)
for file_url in file_list
])
LOGGER.debug('output_file_exists_list: %s', output_file_exists_list)
return [
file_url
for file_url, output_file_exists in zip(file_list, output_file_exists_list)
if not output_file_exists
]
def get_output_file_for_source_file_fn(args):
return partial(
get_output_file,
source_base_path=args.base_data_path,
output_base_path=args.output_path,
output_file_suffix=args.output_suffix
)
def get_remaining_file_list_for_args(args: argparse.Namespace):
file_list = get_file_list_for_args(args)
LOGGER.debug('file_list: %s', file_list)
if not file_list:
LOGGER.warning('no files found')
return file_list
LOGGER.info('total number of files: %d', len(file_list))
if args.resume:
file_list = get_file_list_without_output_file(
file_list,
get_output_file_for_source_url=get_output_file_for_source_file_fn(args)
)
LOGGER.info('remaining number of files: %d', len(file_list))
return file_list
|
the-stack_0_23515 | from gql_server.schema import schema
from .test_graphql_server import TestGraphQLServerBase
class TestMatchups(TestGraphQLServerBase):
def get_matchup_notes(self, matchup):
result = schema.execute(
f"""
query getMatchupNotes {{
tournament(id: {self.tourn_id}) {{
matchup(id: {matchup}) {{
notes
}}
}}
}}
"""
)
notes = result.data['tournament']['matchup']['notes']
return notes
def assign_notes(self, matchup, notes):
result = schema.execute(
f"""
mutation setMatchupNotes {{
assignMatchupNotes(tournament: {self.tourn_id}, matchup: {matchup}, notes: "{notes}") {{
matchup {{
id
}}
notes
}}
}}
"""
)
result = result.data['assignMatchupNotes']
return result
def test_starts_with_no_rounds(self):
self.assertHasNumRounds(0)
def test_can_add_manual_round(self):
self.add_default_team_matrix()
matchups = self.add_round(
1, [{"pl": 1001, "def": 1101}, {"pl": 1002, "def": 1102}]
)
self.assertHasNumRounds(1)
self.assertHasRound(1)
self.assertHasMatchup(matchups[0], 1001, 1101)
self.assertHasMatchup(matchups[1], 1002, 1102)
def test_team_has_matchup(self):
[matchup, _] = self.add_default_r1_setup()
result = schema.execute(
f"""
query teamMatchups {{
tournament(id: {self.tourn_id}) {{
team(num: {1001}) {{
matchups {{
id
}}
}}
}}
}}
"""
)
[gql_matchup] = result.data['tournament']['team']['matchups']
self.assertEqual(matchup, gql_matchup['id'])
def test_matchup_gives_round_num(self):
[matchup, _] = self.add_default_r1_setup()
result = schema.execute(
f"""
query teamMatchups {{
tournament(id: {self.tourn_id}) {{
matchup(id: {matchup}) {{
roundNum
}}
}}
}}
"""
)
round_num = result.data['tournament']['matchup']['roundNum']
self.assertEqual(round_num, 1)
def test_matchup_teams_are_fully_explorable(self):
matchup = self.add_one_matchup_setup()
result = schema.execute(
f"""
query teamMatchups {{
tournament(id: {self.tourn_id}) {{
matchup(id: {matchup}) {{
pl {{
team {{
num
name
}}
}}
def {{
team {{
num
name
}}
}}
}}
}}
}}
"""
)
match = result.data['tournament']['matchup']
pl = match['pl']['team']
self.assertEqual(pl['num'], 1001)
self.assertEqual(pl['name'], "Midlands University A")
de = match['def']['team']
self.assertEqual(de['num'], 1101)
self.assertEqual(de['name'], "Midlands State University A")
def test_matchup_starts_with_no_notes(self):
matchup = self.add_one_matchup_setup()
notes = self.get_matchup_notes(matchup)
self.assertIsNone(notes)
def test_can_assign_notes(self):
matchup = self.add_one_matchup_setup()
result = self.assign_notes(matchup, "Hello, World!")
self.assertEqual(result['matchup']['id'], matchup)
self.assertEqual(result['notes'], "Hello, World!")
notes = self.get_matchup_notes(matchup)
self.assertEqual(notes, "Hello, World!")
def test_can_reassign_notes(self):
matchup = self.add_one_matchup_setup()
self.assign_notes(matchup, "Hey there!")
self.assign_notes(matchup, "Hey there Delilah")
self.assertEqual(self.get_matchup_notes(matchup), "Hey there Delilah")
|
the-stack_0_23517 | from __future__ import print_function
import time
import unwired
from unwired.rest import ApiException
from pprint import pprint
# create an instance of the API class
api_instance = unwired.GEOLOCATIONApi()
#cell schema
c=unwired.CellSchema(lac=38996,cid=12814)
celldata=[c]
#fallback schema
f=unwired.FallbackSchema(scf=2)
# GeolocationSchema |
geolocation_schema = unwired.GeolocationSchema(token="YOUR_API_KEY",
radio="utms",mcc=310,mnc=404,cells=celldata,fallbacks=f,
address=1)
try:
# Geolocation
api_response = api_instance.geolocation(geolocation_schema)
pprint(api_response)
except ApiException as e:
print("Exception when calling GEOLOCATIONApi->geolocation: %s\n" % e)
|
the-stack_0_23518 | #!/usr/bin/env python3
"""
Create dataset and experiments.
A dataset is a directory with subdirectories, one subdir per class.
An experiment is a directory subdirectories, one subdir per participant.
"""
import os
from os.path import join as pjoin
from os import listdir as ld
import numpy as np
import shutil
import sys
from PIL import Image
import numpy as np
import math
from torchvision import transforms
from ..helper import human_categories as hc
from .. import constants as consts
def resize_crop_image(input_file,
resize_size,
crop_size):
"""Replace input_file with resized and cropped version (png)."""
img = Image.open(input_file)
t = transforms.Compose([transforms.Resize(resize_size),
transforms.CenterCrop(crop_size)])
new_img = t(img)
os.remove(input_file)
new_img.save(input_file.replace(".JPEG", ".png"), 'png')
def create_dataset(original_dataset_path,
target_dataset_path,
rng,
min_num_imgs_per_class,
max_num_imgs_per_class,
target_resize_size,
target_crop_size):
"Create a balanced dataset from a larger (potentially unbalanced) dataset."""
categories = hc.HumanCategories()
class_count_dict = dict()
image_path_dict = dict()
for human_category in sorted(hc.get_human_object_recognition_categories()):
class_count_dict[human_category] = 0
image_path_dict[human_category] = list()
for c in sorted(os.listdir(original_dataset_path)):
human_category = categories.get_human_category_from_WNID(c)
if human_category is not None:
class_count_dict[human_category] += len(os.listdir(pjoin(original_dataset_path,
c)))
for image_name in sorted(os.listdir(pjoin(original_dataset_path, c))):
image_path_dict[human_category].append(pjoin(original_dataset_path,
c, image_name))
count = 0
maximum = 0
minimum = np.Inf
for c in sorted(os.listdir(original_dataset_path)):
num = len(os.listdir(pjoin(original_dataset_path, c)))
count += num
if num > maximum:
maximum = num
if num < minimum:
minimum = num
min_16_classes = np.Inf
for k, v in class_count_dict.items():
if v < min_16_classes:
min_16_classes = v
print("Total image count: "+str(count))
print("Max #images per class: "+str(maximum))
print("Min #images per class: "+str(minimum))
print("Min #images within 16 classes: "+str(min_16_classes))
print(class_count_dict)
assert min_16_classes >= min_num_imgs_per_class, "not enough images"
num_imgs_per_target_class = max_num_imgs_per_class
if min_16_classes < num_imgs_per_target_class:
num_imgs_per_target_class = min_16_classes
if not os.path.exists(target_dataset_path):
print("Creating directory "+target_dataset_path)
os.makedirs(target_dataset_path)
else:
raise OSError("target dataset already exists: "+target_dataset_path)
for human_category in sorted(hc.get_human_object_recognition_categories()):
print("Creating category "+human_category)
category_dir = pjoin(target_dataset_path, human_category)
if not os.path.exists(category_dir):
os.makedirs(category_dir)
num_images = class_count_dict[human_category]
assert num_images >= min_16_classes, "not enough images found"
choice = rng.choice(num_images, num_imgs_per_target_class, replace=False)
assert len(choice) <= len(image_path_dict[human_category])
assert len(choice) == num_imgs_per_target_class
for image_index in choice:
image_index_str = str(image_index+1)
while len(image_index_str) < 4:
image_index_str = "0"+image_index_str
image_path = image_path_dict[human_category][image_index]
target_image_path = pjoin(target_dataset_path, human_category,
human_category+"-"+image_index_str+"-"+image_path.split("/")[-1].replace("_", "-"))
shutil.copyfile(image_path, target_image_path)
resize_crop_image(target_image_path, target_resize_size,
target_crop_size)
def create_experiment(expt_name,
expt_abbreviation,
expt_source_dir,
expt_target_dir,
only_dnn=True,
num_subjects=1,
rng=None):
"""Create human / CNN experiment.
parameters:
- only_dnn: boolean indicating whether this is a DNN experiment
or not (if not, a human experiment will be created.)
"""
if not only_dnn:
assert rng is not None, "Please specify random number generator (rng)!"
assert("_" not in expt_name), "no '_' in experiment name!"
assert(os.path.exists(expt_source_dir)), "directory "+expt_source_dir+" does not exist."
for i in range(0, num_subjects+1):
if i==0:
subject_abbreviation = "dnn"
subject_name="dnn"
else:
subject_abbreviation = "s"+get_leading_zeros(i, 2)
subject_name = "subject-"+get_leading_zeros(i, 2)
print("Creating experiment for subject: '"+subject_name+"'")
target_dir = pjoin(expt_target_dir, expt_name,
subject_name, "session-1")
if os.path.exists(target_dir):
print("Error: target directory "+target_dir+" does already exist.")
sys.exit(1)
else:
os.makedirs(target_dir)
img_list = []
for c in sorted(hc.get_human_object_recognition_categories()):
for x in sorted(ld(pjoin(expt_source_dir, c))):
input_file = pjoin(expt_source_dir, c, x)
img_list.append(input_file)
order = np.arange(len(img_list))
if i != 0:
rng.shuffle(order)
for i, img_index in enumerate(order):
input_file = img_list[img_index]
imgname = input_file.split("/")[-1]
correct_category = input_file.split("/")[-2]
condition = "0"
target_image_path = pjoin(target_dir,
(get_leading_zeros(i+1)+"_"+
expt_abbreviation+"_"+
subject_abbreviation+"_"+
condition+"_"+
correct_category+"_"+
"00_"+
imgname))
shutil.copyfile(input_file, target_image_path)
def get_leading_zeros(num, length=4):
return ("0"*length+str(num))[-length:]
|
the-stack_0_23520 | from __future__ import division
import numpy as np
from glob import glob
import os
import scipy.misc
# import sys
# sys.path.append('../../')
# from utils.misc import *
class kitti_odom_loader(object):
def __init__(self,
dataset_dir,
img_height=128,
img_width=416,
seq_length=5):
self.dataset_dir = dataset_dir
self.img_height = img_height
self.img_width = img_width
self.seq_length = seq_length
self.train_seqs = [9, 10] #[0, 1, 2, 3, 4, 5, 6, 7, 8]
self.test_seqs = [9, 10]
self.collect_test_frames()
self.collect_train_frames()
def collect_test_frames(self):
self.test_frames = []
for seq in self.test_seqs:
seq_dir = os.path.join(self.dataset_dir, 'sequences', '%.2d' % seq)
img_dir = os.path.join(seq_dir, 'image_1')
N = len(glob(img_dir + '/*.png'))
for n in range(N):
self.test_frames.append('%.2d %.6d' % (seq, n))
self.num_test = len(self.test_frames)
def collect_train_frames(self):
self.train_frames = []
for seq in self.train_seqs:
seq_dir = os.path.join(self.dataset_dir, 'sequences', '%.2d' % seq)
img_dir = os.path.join(seq_dir, 'image_1')
N = len(glob(img_dir + '/*.png'))
for n in range(N):
self.train_frames.append('%.2d %.6d' % (seq, n))
self.num_train = len(self.train_frames)
def is_valid_sample(self, frames, tgt_idx):
N = len(frames)
tgt_drive, _ = frames[tgt_idx].split(' ')
half_offset = int((self.seq_length - 1)/2)
min_src_idx = tgt_idx - half_offset
max_src_idx = tgt_idx + half_offset
if min_src_idx < 0 or max_src_idx >= N:
return False
min_src_drive, _ = frames[min_src_idx].split(' ')
max_src_drive, _ = frames[max_src_idx].split(' ')
if tgt_drive == min_src_drive and tgt_drive == max_src_drive:
return True
return False
def load_image_sequence(self, frames, tgt_idx, seq_length):
half_offset = int((seq_length - 1)/2)
image_seq = []
for o in range(-half_offset, half_offset+1):
curr_idx = tgt_idx + o
curr_drive, curr_frame_id = frames[curr_idx].split(' ')
curr_img = self.load_image(curr_drive, curr_frame_id)
if o == 0:
zoom_y = self.img_height/curr_img.shape[0]
zoom_x = self.img_width/curr_img.shape[1]
curr_img = scipy.misc.imresize(curr_img, (self.img_height, self.img_width))
image_seq.append(curr_img)
return image_seq, zoom_x, zoom_y
def load_example(self, frames, tgt_idx, load_pose=False):
image_seq, zoom_x, zoom_y = self.load_image_sequence(frames, tgt_idx, self.seq_length)
tgt_drive, tgt_frame_id = frames[tgt_idx].split(' ')
intrinsics = self.load_intrinsics(tgt_drive, tgt_frame_id)
intrinsics = self.scale_intrinsics(intrinsics, zoom_x, zoom_y)
example = {}
example['intrinsics'] = intrinsics
example['image_seq'] = image_seq
example['folder_name'] = tgt_drive
example['file_name'] = tgt_frame_id
if load_pose:
pass
return example
def get_train_example_with_idx(self, tgt_idx):
if not self.is_valid_sample(self.train_frames, tgt_idx):
return False
example = self.load_example(self.train_frames, tgt_idx)
return example
# def load_frame(self, drive, frame_id):
# img = self.load_image(drive, frame_id)
# try:
# scale_x = np.float(self.img_width)/img.shape[1]
# except:
# print("KITTI loading error!")
# print("Drive = ", drive)
# print("frame_id = ", frame_id)
# raise
# scale_y = np.float(self.img_height)/img.shape[0]
# intrinsics = self.load_intrinsics(drive, frame_id)
# intrinsics = self.scale_intrinsics(intrinsics, scale_x, scale_y)
# img = self.crop_resize(img)
# return img, intrinsics
def load_image(self, drive, frame_id):
img_file = os.path.join(self.dataset_dir, 'sequences', '%s/image_1/%s.png' % (drive, frame_id))
img = scipy.misc.imread(img_file)
return img
def load_intrinsics(self, drive, frame_id):
calib_file = os.path.join(self.dataset_dir, 'sequences', '%s/calib.txt' % drive)
proj_c2p, _ = self.read_calib_file(calib_file)
intrinsics = proj_c2p[:3, :3]
return intrinsics
# def load_gt_odom(self, drive, tgt_idx, src_idx):
# pose_file = os.path.join(self.dataset_dir, 'poses', '%s.txt' % drive)
# with open(pose_file, 'r') as f:
# poses = f.readlines()
# filler = np.array([0, 0, 0, 1]).reshape((1,4))
# tgt_pose = np.array(poses[int(tgt_idx)][:-1].split(' ')).astype(np.float32).reshape(3,4)
# tgt_pose = np.concatenate((tgt_pose, filler), axis=0)
# src_pose = np.array(poses[int(src_idx)][:-1].split(' ')).astype(np.float32).reshape(3,4)
# src_pose = np.concatenate((src_pose, filler), axis=0)
# rel_pose = np.dot(np.linalg.inv(src_pose), tgt_pose)
# rel_6DOF = pose_mat_to_6dof(rel_pose)
# return rel_6DOF
def read_calib_file(self, filepath, cid=2):
"""Read in a calibration file and parse into a dictionary."""
with open(filepath, 'r') as f:
C = f.readlines()
def parseLine(L, shape):
data = L.split()
data = np.array(data[1:]).reshape(shape).astype(np.float32)
return data
proj_c2p = parseLine(C[cid], shape=(3,4))
proj_v2c = parseLine(C[-1], shape=(3,4))
filler = np.array([0, 0, 0, 1]).reshape((1,4))
proj_v2c = np.concatenate((proj_v2c, filler), axis=0)
return proj_c2p, proj_v2c
def scale_intrinsics(self,mat, sx, sy):
out = np.copy(mat)
out[0,0] *= sx
out[0,2] *= sx
out[1,1] *= sy
out[1,2] *= sy
return out
|
the-stack_0_23521 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.pyplot import rc
from sklearn.metrics import auc
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import Perceptron
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from xgboost import XGBClassifier
import sys
import time
# TeX fonts
rc('text', usetex=True)
rc('font',**{'family':'serif','serif':['Times']})
class GSClassification():
'''
Executes grid search and cross-validation for many classification models.
Parameters:
models: list of potential classifiers
grid: grid search parameters
'''
def __init__(self, models, grid):
self.models = models
# instances only desired models.
self.grid_of_params = {k:v for k, v in grid.items() if k in self.models}
def apply_grid_search(self, X_train, y_train, k=5):
self.X_train = X_train
self.y_train = y_train
'''
Parameters:
X_train: 2D ndarray
y_train: 1D ndarray
k: cross-validation k-fold. Default: 5.
'''
# list of current compatible classifiers
compatible_classes = [SVC(), DecisionTreeClassifier(), KNeighborsClassifier(), LogisticRegression(), GaussianNB(), RandomForestClassifier(), SGDClassifier(), Perceptron()]
compatible_classes_str = [str(i) for i in compatible_classes if str(i) in self.grid_of_params.keys()]
self.classificators = [compatible_classes[i].fit(X_train, y_train) for i in range(len(compatible_classes)) if str(compatible_classes[i]) in self.grid_of_params.keys()]
self.model_name = []
self.accuracies = []
self.standar_dev = []
self.best_parameters = []
self.best_estimators = []
for i in range(len(self.classificators)):
start = time.time()
print("Executing grid search for {}.".format(compatible_classes_str[i]))
grid_search = GridSearchCV(estimator = self.classificators[i],
param_grid = self.grid_of_params[compatible_classes_str[i]],
scoring = 'accuracy',
cv = k,
n_jobs = -1,
verbose=1)
grid_search.fit(X_train, y_train)
self.accuracies.append(grid_search.best_score_)
self.best_parameters.append(grid_search.best_params_)
self.best_estimators.append(grid_search.best_estimator_)
self.standar_dev.append(grid_search.cv_results_['std_test_score'][grid_search.best_index_])
self.model_name.append(compatible_classes_str[i][0:-2])
end = time.time()
print ("Elapsed time: %.3fs"%(end-start))
# XGboost is special...
if 'XGBClassifier()' in self.grid_of_params.keys():
start = time.time()
xgb = XGBClassifier()
print("Executing grid search for XGBClassifier().")
grid_search = GridSearchCV(estimator = xgb,
param_grid = self.grid_of_params['XGBClassifier()'],
scoring = 'accuracy',
cv = k,
n_jobs = -1,
verbose=1)
grid_search.fit(X_train, y_train)
self.accuracies.append(grid_search.best_score_)
self.best_parameters.append(grid_search.best_params_)
self.standar_dev.append(grid_search.cv_results_['std_test_score'][grid_search.best_index_])
self.model_name.append('XGBClassifier')
end = time.time()
print ("Elapsed time: %.3fs"%(end-start))
xgb.fit(X_train, y_train)
self.classificators.append(xgb)
self.best_estimators.append(grid_search.best_estimator_)
def show_dataframe(self):
out = list(zip(self.model_name, self.accuracies, self.standar_dev)) #zip joins same index tuples of lists
resultsinDataFrame = pd.DataFrame(out, columns = ['method', 'mean accuracy (%)', 'standard deviation (%)'])
final_df = resultsinDataFrame.sort_values(by='mean accuracy (%)', ascending=False)
print(final_df)
def plot_cap_curves(self):
# split
X_train_, X_test_, y_train_, y_test_ = train_test_split(self.X_train, self.y_train, test_size = 0.40)
# used to compute CAP
self.y_pred = []
for best_estimator in self.best_estimators:
self.y_pred.append(best_estimator.predict(X_test_).tolist())
self.y_test_ = [y_test_.tolist()]*len(self.best_estimators)
fig, ax = plt.subplots(nrows=2, ncols=1, figsize=(7,12))
ax1 = plt.subplot(211)
for model in range(len(self.y_pred)):
# sort
data = pd.DataFrame(list(zip(self.y_test_[model],self.y_pred[model])), columns=['y','ypred'])
# sort
data_sorted_ypred = data.sort_values(by=['ypred'], ascending=False)
data_sorted_y = data.sort_values(by=['y'], ascending=False)
# total records
total_records = len(data)
# total amount of positives
total_positive = len(data[data['y'] == 1])
# proportion of the total records (x axis)
x = [(i+1)/total_records for i in range(total_records)]
# proportion of positives out of total
proportion_of_positive = total_positive/total_records
# random select
random_select = [(i+1)*proportion_of_positive for i in range(total_records)]
# out of the random select, proportion of positives (y axis)
random_select_proportion_of_positive = [random_select[i]/total_positive for i in range(total_records)]
# model select
model_select = [sum(data_sorted_ypred.iloc[0:i+1,0]) for i in range(total_records)]
# out of the model select, proportion of positives (y axis)
model_select_proportion_of_positive = [model_select[i]/total_positive for i in range(total_records)]
# perfect select
perfect_select = [sum(data_sorted_y.iloc[0:i+1,0]) for i in range(total_records)]
# out of the perfect select, proportion of positives (y axis)
perfect_select_proportion_of_positive = [perfect_select[i]/total_positive for i in range(total_records)]
auc_random = auc(x, random_select_proportion_of_positive)
auc_model = auc(x, model_select_proportion_of_positive)
auc_perfect = auc(x, perfect_select_proportion_of_positive)
acc_ratio = (auc_model-auc_random)/(auc_perfect-auc_random)
ax1.plot(x, model_select_proportion_of_positive, label='{}: {:.2f}'.format(self.model_name[model], acc_ratio), linewidth=0.7)
ax1.plot(x, random_select_proportion_of_positive, '--', color='red', linewidth=1, label='Random', alpha=0.5)
ax1.plot(x, perfect_select_proportion_of_positive, '--', color='blue', linewidth=1, label='Perfect Model', alpha=0.5)
ax1.set_title('Cumulative Accuracy Profile (CAP)', size=17)
ax1.set_xlabel('Fraction of total', fontsize=16)
ax1.set_ylabel('Fraction of positive outcomes', fontsize=16)
legend = ax1.legend(frameon=False, loc='lower right', title='Accuracy Ratio', fontsize=13)
legend.get_title().set_fontsize('13')
for legobj in legend.legendHandles:
legobj.set_linewidth(2.0)
plt.xticks(size=16)
plt.yticks(size=16)
plt.subplots_adjust(hspace=0.25)
ax2 = plt.subplot(212)
ax2.bar(self.model_name, self.accuracies, zorder=2, alpha=0.8)
ax2.grid(alpha=0.3, zorder=0)
ax2.errorbar(self.model_name, self.accuracies, yerr=self.standar_dev, c='C1', ls='none', zorder=3, alpha=0.8)
ax2.set_yscale='log'
ax2.set_title('Mean accuracy $\pm \sigma$', size=17)
plt.xticks(rotation=10, ha='right', size=12)
plt.yticks(size=16)
#plt.tight_layout()
plt.savefig('cap.jpg', dpi=150)
plt.close()
def show_best_parameters(self):
for i in range(len(self.model_name)):
print(self.model_name[i], self.best_parameters[i])
|
the-stack_0_23526 | # Copyright 2013 OpenStack Foundation
# Copyright (C) 2013 Yahoo! Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import os
import sys
import fixtures
import mock
import six
from glanceclient import exc
from glanceclient import shell as openstack_shell
#NOTE (esheffield) Used for the schema caching tests
from glanceclient.v2 import schemas as schemas
import json
from tests import keystone_client_fixtures
from tests import utils
import keystoneclient
from keystoneclient.openstack.common.apiclient import exceptions as ks_exc
DEFAULT_IMAGE_URL = 'http://127.0.0.1:5000/'
DEFAULT_USERNAME = 'username'
DEFAULT_PASSWORD = 'password'
DEFAULT_TENANT_ID = 'tenant_id'
DEFAULT_TENANT_NAME = 'tenant_name'
DEFAULT_PROJECT_ID = '0123456789'
DEFAULT_USER_DOMAIN_NAME = 'user_domain_name'
DEFAULT_UNVERSIONED_AUTH_URL = 'http://127.0.0.1:5000/'
DEFAULT_V2_AUTH_URL = 'http://127.0.0.1:5000/v2.0/'
DEFAULT_V3_AUTH_URL = 'http://127.0.0.1:5000/v3/'
DEFAULT_AUTH_TOKEN = ' 3bcc3d3a03f44e3d8377f9247b0ad155'
TEST_SERVICE_URL = 'http://127.0.0.1:5000/'
FAKE_V2_ENV = {'OS_USERNAME': DEFAULT_USERNAME,
'OS_PASSWORD': DEFAULT_PASSWORD,
'OS_TENANT_NAME': DEFAULT_TENANT_NAME,
'OS_AUTH_URL': DEFAULT_V2_AUTH_URL,
'OS_IMAGE_URL': DEFAULT_IMAGE_URL}
FAKE_V3_ENV = {'OS_USERNAME': DEFAULT_USERNAME,
'OS_PASSWORD': DEFAULT_PASSWORD,
'OS_PROJECT_ID': DEFAULT_PROJECT_ID,
'OS_USER_DOMAIN_NAME': DEFAULT_USER_DOMAIN_NAME,
'OS_AUTH_URL': DEFAULT_V3_AUTH_URL,
'OS_IMAGE_URL': DEFAULT_IMAGE_URL}
class ShellTest(utils.TestCase):
# auth environment to use
auth_env = FAKE_V2_ENV.copy()
# expected auth plugin to invoke
auth_plugin = 'keystoneclient.auth.identity.v2.Password'
# Patch os.environ to avoid required auth info
def make_env(self, exclude=None, fake_env=FAKE_V2_ENV):
env = dict((k, v) for k, v in fake_env.items() if k != exclude)
self.useFixture(fixtures.MonkeyPatch('os.environ', env))
def setUp(self):
super(ShellTest, self).setUp()
global _old_env
_old_env, os.environ = os.environ, self.auth_env
global shell, _shell, assert_called, assert_called_anytime
_shell = openstack_shell.OpenStackImagesShell()
shell = lambda cmd: _shell.main(cmd.split())
def tearDown(self):
super(ShellTest, self).tearDown()
global _old_env
os.environ = _old_env
def shell(self, argstr, exitcodes=(0,)):
orig = sys.stdout
orig_stderr = sys.stderr
try:
sys.stdout = six.StringIO()
sys.stderr = six.StringIO()
_shell = openstack_shell.OpenStackImagesShell()
_shell.main(argstr.split())
except SystemExit:
exc_type, exc_value, exc_traceback = sys.exc_info()
self.assertIn(exc_value.code, exitcodes)
finally:
stdout = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = orig
stderr = sys.stderr.getvalue()
sys.stderr.close()
sys.stderr = orig_stderr
return (stdout, stderr)
def test_help_unknown_command(self):
shell = openstack_shell.OpenStackImagesShell()
argstr = 'help foofoo'
self.assertRaises(exc.CommandError, shell.main, argstr.split())
def test_help(self):
shell = openstack_shell.OpenStackImagesShell()
argstr = 'help'
actual = shell.main(argstr.split())
self.assertEqual(0, actual)
def test_help_on_subcommand_error(self):
self.assertRaises(exc.CommandError, shell, 'help bad')
def test_get_base_parser(self):
test_shell = openstack_shell.OpenStackImagesShell()
actual_parser = test_shell.get_base_parser()
description = 'Command-line interface to the OpenStack Images API.'
expected = argparse.ArgumentParser(
prog='glance', usage=None,
description=description,
conflict_handler='error',
add_help=False,
formatter_class=openstack_shell.HelpFormatter,)
# NOTE(guochbo): Can't compare ArgumentParser instances directly
# Convert ArgumentPaser to string first.
self.assertEqual(str(expected), str(actual_parser))
@mock.patch.object(openstack_shell.OpenStackImagesShell,
'_get_versioned_client')
def test_cert_and_key_args_interchangeable(self,
mock_versioned_client):
# make sure --os-cert and --os-key are passed correctly
args = '--os-cert mycert --os-key mykey image-list'
shell(args)
assert mock_versioned_client.called
((api_version, args), kwargs) = mock_versioned_client.call_args
self.assertEqual('mycert', args.os_cert)
self.assertEqual('mykey', args.os_key)
# make sure we get the same thing with --cert-file and --key-file
args = '--cert-file mycertfile --key-file mykeyfile image-list'
glance_shell = openstack_shell.OpenStackImagesShell()
glance_shell.main(args.split())
assert mock_versioned_client.called
((api_version, args), kwargs) = mock_versioned_client.call_args
self.assertEqual('mycertfile', args.os_cert)
self.assertEqual('mykeyfile', args.os_key)
@mock.patch('glanceclient.v1.client.Client')
def test_no_auth_with_token_and_image_url_with_v1(self, v1_client):
# test no authentication is required if both token and endpoint url
# are specified
args = ('--os-auth-token mytoken --os-image-url https://image:1234/v1 '
'image-list')
glance_shell = openstack_shell.OpenStackImagesShell()
glance_shell.main(args.split())
assert v1_client.called
(args, kwargs) = v1_client.call_args
self.assertEqual('mytoken', kwargs['token'])
self.assertEqual('https://image:1234/v1', args[0])
@mock.patch.object(openstack_shell.OpenStackImagesShell, '_cache_schemas')
def test_no_auth_with_token_and_image_url_with_v2(self,
cache_schemas):
with mock.patch('glanceclient.v2.client.Client') as v2_client:
# test no authentication is required if both token and endpoint url
# are specified
args = ('--os-auth-token mytoken '
'--os-image-url https://image:1234/v2 '
'--os-image-api-version 2 image-list')
glance_shell = openstack_shell.OpenStackImagesShell()
glance_shell.main(args.split())
((args), kwargs) = v2_client.call_args
self.assertEqual('https://image:1234/v2', args[0])
self.assertEqual('mytoken', kwargs['token'])
def _assert_auth_plugin_args(self, mock_auth_plugin):
# make sure our auth plugin is invoked with the correct args
mock_auth_plugin.assert_called_once_with(
keystone_client_fixtures.V2_URL,
self.auth_env['OS_USERNAME'],
self.auth_env['OS_PASSWORD'],
tenant_name=self.auth_env['OS_TENANT_NAME'],
tenant_id='')
@mock.patch('glanceclient.v1.client.Client')
@mock.patch('keystoneclient.session.Session')
@mock.patch.object(keystoneclient.discover.Discover, 'url_for',
side_effect=[keystone_client_fixtures.V2_URL, None])
def test_auth_plugin_invocation_with_v1(self,
v1_client,
ks_session,
url_for):
with mock.patch(self.auth_plugin) as mock_auth_plugin:
args = 'image-list'
glance_shell = openstack_shell.OpenStackImagesShell()
glance_shell.main(args.split())
self._assert_auth_plugin_args(mock_auth_plugin)
@mock.patch('glanceclient.v2.client.Client')
@mock.patch('keystoneclient.session.Session')
@mock.patch.object(openstack_shell.OpenStackImagesShell, '_cache_schemas')
@mock.patch.object(keystoneclient.discover.Discover, 'url_for',
side_effect=[keystone_client_fixtures.V2_URL, None])
def test_auth_plugin_invocation_with_v2(self,
v2_client,
ks_session,
url_for,
cache_schemas):
with mock.patch(self.auth_plugin) as mock_auth_plugin:
args = '--os-image-api-version 2 image-list'
glance_shell = openstack_shell.OpenStackImagesShell()
glance_shell.main(args.split())
self._assert_auth_plugin_args(mock_auth_plugin)
@mock.patch('glanceclient.v1.client.Client')
@mock.patch('keystoneclient.session.Session')
@mock.patch.object(keystoneclient.discover.Discover, 'url_for',
side_effect=[keystone_client_fixtures.V2_URL,
keystone_client_fixtures.V3_URL])
def test_auth_plugin_invocation_with_unversioned_auth_url_with_v1(
self, v1_client, ks_session, url_for):
with mock.patch(self.auth_plugin) as mock_auth_plugin:
args = '--os-auth-url %s image-list' % (
keystone_client_fixtures.BASE_URL)
glance_shell = openstack_shell.OpenStackImagesShell()
glance_shell.main(args.split())
self._assert_auth_plugin_args(mock_auth_plugin)
@mock.patch('glanceclient.v2.client.Client')
@mock.patch('keystoneclient.session.Session')
@mock.patch.object(openstack_shell.OpenStackImagesShell, '_cache_schemas')
@mock.patch.object(keystoneclient.discover.Discover, 'url_for',
side_effect=[keystone_client_fixtures.V2_URL,
keystone_client_fixtures.V3_URL])
def test_auth_plugin_invocation_with_unversioned_auth_url_with_v2(
self, v2_client, ks_session, cache_schemas, url_for):
with mock.patch(self.auth_plugin) as mock_auth_plugin:
args = ('--os-auth-url %s --os-image-api-version 2 '
'image-list') % (keystone_client_fixtures.BASE_URL)
glance_shell = openstack_shell.OpenStackImagesShell()
glance_shell.main(args.split())
self._assert_auth_plugin_args(mock_auth_plugin)
@mock.patch('sys.stdin', side_effect=mock.MagicMock)
@mock.patch('getpass.getpass', return_value='password')
def test_password_prompted_with_v2(self, mock_getpass, mock_stdin):
glance_shell = openstack_shell.OpenStackImagesShell()
self.make_env(exclude='OS_PASSWORD')
# We will get a Connection Refused because there is no keystone.
self.assertRaises(ks_exc.ConnectionRefused,
glance_shell.main, ['image-list'])
# Make sure we are actually prompted.
mock_getpass.assert_called_with('OS Password: ')
@mock.patch('sys.stdin', side_effect=mock.MagicMock)
@mock.patch('getpass.getpass', side_effect=EOFError)
def test_password_prompted_ctrlD_with_v2(self, mock_getpass, mock_stdin):
glance_shell = openstack_shell.OpenStackImagesShell()
self.make_env(exclude='OS_PASSWORD')
# We should get Command Error because we mock Ctl-D.
self.assertRaises(exc.CommandError, glance_shell.main, ['image-list'])
# Make sure we are actually prompted.
mock_getpass.assert_called_with('OS Password: ')
class ShellTestWithKeystoneV3Auth(ShellTest):
# auth environment to use
auth_env = FAKE_V3_ENV.copy()
# expected auth plugin to invoke
auth_plugin = 'keystoneclient.auth.identity.v3.Password'
def _assert_auth_plugin_args(self, mock_auth_plugin):
mock_auth_plugin.assert_called_once_with(
keystone_client_fixtures.V3_URL,
user_id='',
username=self.auth_env['OS_USERNAME'],
password=self.auth_env['OS_PASSWORD'],
user_domain_id='',
user_domain_name=self.auth_env['OS_USER_DOMAIN_NAME'],
project_id=self.auth_env['OS_PROJECT_ID'],
project_name='',
project_domain_id='',
project_domain_name='')
@mock.patch('glanceclient.v1.client.Client')
@mock.patch('keystoneclient.session.Session')
@mock.patch.object(keystoneclient.discover.Discover, 'url_for',
side_effect=[None, keystone_client_fixtures.V3_URL])
def test_auth_plugin_invocation_with_v1(self,
v1_client,
ks_session,
url_for):
with mock.patch(self.auth_plugin) as mock_auth_plugin:
args = 'image-list'
glance_shell = openstack_shell.OpenStackImagesShell()
glance_shell.main(args.split())
self._assert_auth_plugin_args(mock_auth_plugin)
@mock.patch('glanceclient.v2.client.Client')
@mock.patch('keystoneclient.session.Session')
@mock.patch.object(openstack_shell.OpenStackImagesShell, '_cache_schemas')
@mock.patch.object(keystoneclient.discover.Discover, 'url_for',
side_effect=[None, keystone_client_fixtures.V3_URL])
def test_auth_plugin_invocation_with_v2(self,
v2_client,
ks_session,
url_for,
cache_schemas):
with mock.patch(self.auth_plugin) as mock_auth_plugin:
args = '--os-image-api-version 2 image-list'
glance_shell = openstack_shell.OpenStackImagesShell()
glance_shell.main(args.split())
self._assert_auth_plugin_args(mock_auth_plugin)
@mock.patch('keystoneclient.session.Session')
@mock.patch('keystoneclient.discover.Discover',
side_effect=ks_exc.ClientException())
def test_api_discovery_failed_with_unversioned_auth_url(self,
ks_session,
discover):
args = '--os-auth-url %s image-list' % (
keystone_client_fixtures.BASE_URL)
glance_shell = openstack_shell.OpenStackImagesShell()
self.assertRaises(exc.CommandError, glance_shell.main, args.split())
def test_bash_completion(self):
stdout, stderr = self.shell('bash_completion')
# just check we have some output
required = [
'--status',
'image-create',
'help',
'--size']
for r in required:
self.assertIn(r, stdout.split())
avoided = [
'bash_completion',
'bash-completion']
for r in avoided:
self.assertNotIn(r, stdout.split())
class ShellCacheSchemaTest(utils.TestCase):
def setUp(self):
super(ShellCacheSchemaTest, self).setUp()
self._mock_client_setup()
self._mock_shell_setup()
self.cache_dir = '/dir_for_cached_schema'
self.cache_files = [self.cache_dir + '/image_schema.json',
self.cache_dir + '/namespace_schema.json',
self.cache_dir + '/resource_type_schema.json']
def tearDown(self):
super(ShellCacheSchemaTest, self).tearDown()
def _mock_client_setup(self):
self.schema_dict = {
'name': 'image',
'properties': {
'name': {'type': 'string', 'description': 'Name of image'},
},
}
self.client = mock.Mock()
self.client.schemas.get.return_value = schemas.Schema(self.schema_dict)
def _mock_shell_setup(self):
mocked_get_client = mock.MagicMock(return_value=self.client)
self.shell = openstack_shell.OpenStackImagesShell()
self.shell._get_versioned_client = mocked_get_client
def _make_args(self, args):
class Args():
def __init__(self, entries):
self.__dict__.update(entries)
return Args(args)
@mock.patch('six.moves.builtins.open', new=mock.mock_open(), create=True)
@mock.patch('os.path.exists', return_value=True)
def test_cache_schemas_gets_when_forced(self, exists_mock):
options = {
'get_schema': True
}
self.shell._cache_schemas(self._make_args(options),
home_dir=self.cache_dir)
self.assertEqual(12, open.mock_calls.__len__())
self.assertEqual(mock.call(self.cache_files[0], 'w'),
open.mock_calls[0])
self.assertEqual(mock.call(self.cache_files[1], 'w'),
open.mock_calls[4])
self.assertEqual(mock.call().write(json.dumps(self.schema_dict)),
open.mock_calls[2])
self.assertEqual(mock.call().write(json.dumps(self.schema_dict)),
open.mock_calls[6])
@mock.patch('six.moves.builtins.open', new=mock.mock_open(), create=True)
@mock.patch('os.path.exists', side_effect=[True, False, False, False])
def test_cache_schemas_gets_when_not_exists(self, exists_mock):
options = {
'get_schema': False
}
self.shell._cache_schemas(self._make_args(options),
home_dir=self.cache_dir)
self.assertEqual(12, open.mock_calls.__len__())
self.assertEqual(mock.call(self.cache_files[0], 'w'),
open.mock_calls[0])
self.assertEqual(mock.call(self.cache_files[1], 'w'),
open.mock_calls[4])
self.assertEqual(mock.call().write(json.dumps(self.schema_dict)),
open.mock_calls[2])
self.assertEqual(mock.call().write(json.dumps(self.schema_dict)),
open.mock_calls[6])
@mock.patch('six.moves.builtins.open', new=mock.mock_open(), create=True)
@mock.patch('os.path.exists', return_value=True)
def test_cache_schemas_leaves_when_present_not_forced(self, exists_mock):
options = {
'get_schema': False
}
self.shell._cache_schemas(self._make_args(options),
home_dir=self.cache_dir)
os.path.exists.assert_any_call(self.cache_dir)
os.path.exists.assert_any_call(self.cache_files[0])
os.path.exists.assert_any_call(self.cache_files[1])
self.assertEqual(4, exists_mock.call_count)
self.assertEqual(0, open.mock_calls.__len__())
|
the-stack_0_23528 | from __future__ import absolute_import, division, print_function
import os
import hashlib
import zipfile
from six.moves import urllib
import numpy as np
import cv2
from my_utils import toNumpy
class HomomorphicFilter:
"""Homomorphic filter implemented with diferents filters and an option to an external filter.
https://github.com/glasgio/homomorphic-filter/blob/eacc5d236ee2f15a40db120fd16d8221d61859bf/homofilt.py#L5
High-frequency filters implemented:
butterworth
gaussian
Attributes:
a, b: Floats used on emphasis filter:
H = a + b*H
.
"""
def __init__(self, a = 0.5, b = 1.5):
self.a = float(a)
self.b = float(b)
# Filters
def __butterworth_filter(self, I_shape, filter_params):
P = I_shape[0]/2
Q = I_shape[1]/2
U, V = np.meshgrid(range(I_shape[0]), range(I_shape[1]), sparse=False, indexing='ij')
Duv = (((U-P)**2+(V-Q)**2)).astype(float)
H = 1/(1+(Duv/filter_params[0]**2)**filter_params[1])
return (1 - H)
def __gaussian_filter(self, I_shape, filter_params):
P = I_shape[0]/2
Q = I_shape[1]/2
H = np.zeros(I_shape)
U, V = np.meshgrid(range(I_shape[0]), range(I_shape[1]), sparse=False, indexing='ij')
Duv = (((U-P)**2+(V-Q)**2)).astype(float)
H = np.exp((-Duv/(2*(filter_params[0])**2)))
return (1 - H)
# Methods
def __apply_filter(self, I, H):
H = np.fft.fftshift(H)
I_filtered = (self.a + self.b*H)*I
return I_filtered
def filter(self, I, filter_params, filter='butterworth', H = None):
"""
Method to apply homormophic filter on an image
Attributes:
I: Single channel image
filter_params: Parameters to be used on filters:
butterworth:
filter_params[0]: Cutoff frequency
filter_params[1]: Order of filter
gaussian:
filter_params[0]: Cutoff frequency
filter: Choose of the filter, options:
butterworth
gaussian
external
H: Used to pass external filter
"""
# Validating image
if len(I.shape) != 2:
raise Exception('Improper image')
# Take the image to log domain and then to frequency domain
I_log = np.log1p(np.array(I, dtype="float"))
I_fft = np.fft.fft2(I_log)
# Filters
if filter=='butterworth':
H = self.__butterworth_filter(I_shape = I_fft.shape, filter_params = filter_params)
elif filter=='gaussian':
H = self.__gaussian_filter(I_shape = I_fft.shape, filter_params = filter_params)
elif filter=='external':
print('external')
if len(H.shape) != 2:
raise Exception('Invalid external filter')
else:
raise Exception('Selected filter not implemented')
# Apply filter on frequency domain then take the image back to spatial domain
I_fft_filt = self.__apply_filter(I = I_fft, H = H)
I_filt = np.fft.ifft2(I_fft_filt)
I = np.exp(np.real(I_filt))-1
return (I)
# End of class HomomorphicFilter
def readlines(filename):
"""Read all the lines in a text file and return as a list
"""
with open(filename, 'r') as f:
lines = f.read().splitlines()
return lines
def normalize_image(x):
"""Rescale image pixels to span range [0, 1]
"""
ma = float(x.max().cpu().data)
mi = float(x.min().cpu().data)
d = ma - mi if ma != mi else 1e5
return (x - mi) / d
def sec_to_hm(t):
"""Convert time in seconds to time in hours, minutes and seconds
e.g. 10239 -> (2, 50, 39)
"""
t = int(t)
s = t % 60
t //= 60
m = t % 60
t //= 60
return t, m, s
def sec_to_hm_str(t):
"""Convert time in seconds to a nice string
e.g. 10239 -> '02h50m39s'
"""
h, m, s = sec_to_hm(t)
return "{:02d}h{:02d}m{:02d}s".format(h, m, s)
def download_model_if_doesnt_exist(model_name):
"""If pretrained kitti model doesn't exist, download and unzip it
"""
# values are tuples of (<google cloud URL>, <md5 checksum>)
download_paths = {
"mono_640x192":
("https://storage.googleapis.com/niantic-lon-static/research/monodepth2/mono_640x192.zip",
"a964b8356e08a02d009609d9e3928f7c"),
"stereo_640x192":
("https://storage.googleapis.com/niantic-lon-static/research/monodepth2/stereo_640x192.zip",
"3dfb76bcff0786e4ec07ac00f658dd07"),
"mono+stereo_640x192":
("https://storage.googleapis.com/niantic-lon-static/research/monodepth2/mono%2Bstereo_640x192.zip",
"c024d69012485ed05d7eaa9617a96b81"),
"mono_no_pt_640x192":
("https://storage.googleapis.com/niantic-lon-static/research/monodepth2/mono_no_pt_640x192.zip",
"9c2f071e35027c895a4728358ffc913a"),
"stereo_no_pt_640x192":
("https://storage.googleapis.com/niantic-lon-static/research/monodepth2/stereo_no_pt_640x192.zip",
"41ec2de112905f85541ac33a854742d1"),
"mono+stereo_no_pt_640x192":
("https://storage.googleapis.com/niantic-lon-static/research/monodepth2/mono%2Bstereo_no_pt_640x192.zip",
"46c3b824f541d143a45c37df65fbab0a"),
"mono_1024x320":
("https://storage.googleapis.com/niantic-lon-static/research/monodepth2/mono_1024x320.zip",
"0ab0766efdfeea89a0d9ea8ba90e1e63"),
"stereo_1024x320":
("https://storage.googleapis.com/niantic-lon-static/research/monodepth2/stereo_1024x320.zip",
"afc2f2126d70cf3fdf26b550898b501a"),
"mono+stereo_1024x320":
("https://storage.googleapis.com/niantic-lon-static/research/monodepth2/mono%2Bstereo_1024x320.zip",
"cdc5fc9b23513c07d5b19235d9ef08f7"),
}
if not os.path.exists("models"):
os.makedirs("models")
model_path = os.path.join("models", model_name)
def check_file_matches_md5(checksum, fpath):
if not os.path.exists(fpath):
return False
with open(fpath, 'rb') as f:
current_md5checksum = hashlib.md5(f.read()).hexdigest()
return current_md5checksum == checksum
# see if we have the model already downloaded...
if not os.path.exists(os.path.join(model_path, "encoder.pth")):
model_url, required_md5checksum = download_paths[model_name]
if not check_file_matches_md5(required_md5checksum, model_path + ".zip"):
print("-> Downloading pretrained model to {}".format(model_path + ".zip"))
urllib.request.urlretrieve(model_url, model_path + ".zip")
if not check_file_matches_md5(required_md5checksum, model_path + ".zip"):
print(" Failed to download a file which matches the checksum - quitting")
quit()
print(" Unzipping model...")
with zipfile.ZipFile(model_path + ".zip", 'r') as f:
f.extractall(model_path)
print(" Model unzipped to {}".format(model_path))
water_types_Nrer_rgb = {}
water_types_Nrer_rgb["I"] = np.exp(-np.array([0.233, 0.049, 0.021]))
water_types_Nrer_rgb["IA"] = np.exp(-np.array([0.234, 0.0503, 0.0253]))
water_types_Nrer_rgb["3C"] = np.exp(-np.array([0.380, 0.187, 0.240]))
def estimateA(img, depth):
# finding BL
p = np.percentile(depth, 99.9)
depth_10p = depth.copy()
depth_10p[depth_10p<p]=0
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
img_BL = gray.copy()
img_BL[depth_10p<p]=0
rmax, cmax = np.unravel_index(img_BL.argmax(), img_BL.shape)
BL = img[rmax, cmax, :]
return BL
def computeJ(image, depth):
img = toNumpy(image)
depth = toNumpy(depth)
A = estimateA(img, depth)
TM = np.zeros_like(img)
for t in range(3):
# TM[:,:,t] = np.exp(-beta_rgb[t]*depth)
TM[:,:,t] = water_types_Nrer_rgb["3C"][t]**depth
S = A*(1-TM)
J = (img - A) / TM + A
return J # TODO: convert back to pytorch
def homorphicFiltering(img, G=None):
img = np.float32(img)
img = img/255
rows,cols,dim=img.shape
rh, rl, cutoff = 0.6,0.5,32
imgYCrCb = cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)
y,cr,cb = cv2.split(imgYCrCb)
y_log = np.log(y+0.01)
y_fft = np.fft.fft2(y_log)
y_fft_shift = np.fft.fftshift(y_fft)
DX = cols/cutoff
# if G is None:
# G = np.ones((rows,cols))
# for i in range(rows):
# for j in range(cols):
# G[i][j]=((rh-rl)*(1-np.exp(-((i-rows/2)**2+(j-cols/2)**2)/(2*DX**2))))+rl
# result_filter = G * y_fft_shift
# result_interm = np.real(np.fft.ifft2(np.fft.ifftshift(result_filter)))
# result = np.exp(result_interm)
# result = result.astype(np.float32)
# rgb = np.dstack((result,cr,cb))
homo_filter = HomomorphicFilter(a = 0.75, b = 1.25)
homo_filter = HomomorphicFilter()
img_filtered = homo_filter.filter(I=y, filter_params=[10,2]).astype(np.float32)
rgb = np.dstack((img_filtered,cr,cb))
rgb = cv2.cvtColor(rgb, cv2.COLOR_YCrCb2RGB)
rgb[rgb<0]=0
rgb*=255
rgb = rgb.astype(np.uint8)
return rgb
|
the-stack_0_23529 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import unittest
import mock
import webapp2
import webtest
from dashboard import debug_alert
from dashboard.common import testing_common
from dashboard.common import utils
from dashboard.models import anomaly
from dashboard.models import anomaly_config
from dashboard.models import graph_data
from dashboard.sheriff_config_client import SheriffConfigClient
_SAMPLE_SERIES = [
(300, 60.06),
(301, 60.36),
(302, 61.76),
(303, 60.06),
(304, 61.24),
(305, 60.65),
(306, 55.61),
(307, 61.88),
(308, 61.51),
(309, 59.58),
(310, 71.79),
(311, 71.97),
(312, 71.63),
(313, 67.16),
(314, 70.91),
(315, 73.40),
(316, 71.00),
(317, 69.45),
(318, 67.16),
(319, 66.05),
]
@mock.patch.object(SheriffConfigClient, '__init__',
mock.MagicMock(return_value=None))
@mock.patch.object(SheriffConfigClient, 'Match',
mock.MagicMock(return_value=([], None)))
class DebugAlertTest(testing_common.TestCase):
def setUp(self):
super(DebugAlertTest, self).setUp()
app = webapp2.WSGIApplication([('/debug_alert',
debug_alert.DebugAlertHandler)])
self.testapp = webtest.TestApp(app)
self.PatchDatastoreHooksRequest()
def _AddSampleData(self):
"""Adds TestMetadata and Row entities, and returns the TestMetadata key."""
testing_common.AddTests(['M'], ['b'], {'suite': {'foo': {}}})
test_path = 'M/b/suite/foo'
rows_dict = {x: {'value': y} for x, y in _SAMPLE_SERIES}
testing_common.AddRows(test_path, rows_dict)
return utils.TestKey(test_path)
def testGet_WithInvalidTestPath_ShowsFormAndError(self):
response = self.testapp.get('/debug_alert?test_path=foo')
self.assertIn('<form', response.body)
self.assertIn('class="error"', response.body)
def testGet_WithValidTestPath_ShowsChart(self):
test_key = self._AddSampleData()
test_path = utils.TestPath(test_key)
response = self.testapp.get('/debug_alert?test_path=%s' % test_path)
self.assertIn('id="plot"', response.body)
def testPost_SameAsGet(self):
# Post is the same as get for this endpoint.
test_key = self._AddSampleData()
test_path = utils.TestPath(test_key)
get_response = self.testapp.get('/debug_alert?test_path=%s' % test_path)
post_response = self.testapp.post('/debug_alert?test_path=%s' % test_path)
self.assertEqual(get_response.body, post_response.body)
def testGet_WithNoParameters_ShowsForm(self):
response = self.testapp.get('/debug_alert')
self.assertIn('<form', response.body)
self.assertNotIn('id="plot"', response.body)
def testGet_WithRevParameter_EmbedsCorrectRevisions(self):
test_key = self._AddSampleData()
test_path = utils.TestPath(test_key)
response = self.testapp.get(
'/debug_alert?test_path=%s&rev=%s&num_before=%s&num_after=%s' %
(test_path, 305, 10, 5))
self.assertEqual([300, 301, 302, 303, 304, 305, 306, 307, 308, 309, 310],
self.GetEmbeddedVariable(response, 'LOOKUP'))
def testGet_InvalidNumBeforeParameter_ShowsFormAndError(self):
test_key = self._AddSampleData()
test_path = utils.TestPath(test_key)
response = self.testapp.get(
'/debug_alert?test_path=%s&rev=%s&num_before=%s&num_after=%s' %
(test_path, 305, 'foo', 5))
self.assertIn('<form', response.body)
self.assertIn('class="error"', response.body)
self.assertNotIn('LOOKUP', response.body)
def _AddAnomalyConfig(self, config_name, test_key, config_dict):
"""Adds a custom anomaly config which applies to one test."""
anomaly_config_key = anomaly_config.AnomalyConfig(
id=config_name, config=config_dict,
patterns=[utils.TestPath(test_key)]).put()
return anomaly_config_key
@unittest.expectedFailure
@mock.patch.object(debug_alert, 'SimulateAlertProcessing')
def testGet_TestHasOverriddenConfig_ConfigUsed(self, simulate_mock):
test_key = self._AddSampleData()
# Add a config which applies to the test. The test is updated upon put.
self._AddAnomalyConfig('X', test_key, {'min_absolute_change': 10})
t = test_key.get()
t.UpdateSheriff()
t.put()
response = self.testapp.get('/debug_alert?test_path=%s' %
utils.TestPath(test_key))
# The custom config should be used when simulating alert processing.
simulate_mock.assert_called_once_with(mock.ANY, min_absolute_change=10)
# The config JSON should also be put into the form on the page.
self.assertIn('"min_absolute_change": 10', response.body)
@unittest.expectedFailure
@mock.patch.object(debug_alert, 'SimulateAlertProcessing')
def testGet_WithValidCustomConfig_ConfigUsed(self, simulate_mock):
test_key = self._AddSampleData()
response = self.testapp.get(
'/debug_alert?test_path=%s&config=%s' %
(utils.TestPath(test_key), '{"min_relative_change":0.75}'))
# The custom config should be used when simulating alert processing.
simulate_mock.assert_called_once_with(mock.ANY, min_relative_change=0.75)
# The config JSON should also be put into the form on the page.
self.assertIn('"min_relative_change": 0.75', response.body)
@unittest.expectedFailure
@mock.patch.object(debug_alert, 'SimulateAlertProcessing')
def testGet_WithBogusParameterNames_ParameterIgnored(self, simulate_mock):
test_key = self._AddSampleData()
response = self.testapp.get('/debug_alert?test_path=%s&config=%s' %
(utils.TestPath(test_key), '{"foo":0.75}'))
simulate_mock.assert_called_once_with(mock.ANY)
self.assertNotIn('"foo"', response.body)
def testGet_WithInvalidCustomConfig_ErrorShown(self):
test_key = self._AddSampleData()
response = self.testapp.get('/debug_alert?test_path=%s&config=%s' %
(utils.TestPath(test_key), 'not valid json'))
# The error message should be on the page; JS constants should not be.
self.assertIn('Invalid JSON', response.body)
self.assertNotIn('LOOKUP', response.body)
def testGet_WithStoredAnomalies_ShowsStoredAnomalies(self):
test_key = self._AddSampleData()
anomaly.Anomaly(
test=test_key,
start_revision=309,
end_revision=310,
median_before_anomaly=60,
median_after_anomaly=70,
bug_id=12345).put()
response = self.testapp.get('/debug_alert?test_path=%s' %
utils.TestPath(test_key))
# Information about the stored anomaly should be somewhere on the page.
self.assertIn('12345', response.body)
def testFetchLatestRows(self):
test_key = self._AddSampleData()
rows = debug_alert._FetchLatestRows(test_key.get(), 4)
revisions = [r.revision for r in rows]
self.assertEqual([316, 317, 318, 319], revisions)
def testFetchAroundRev(self):
test_key = self._AddSampleData()
rows = debug_alert._FetchRowsAroundRev(test_key.get(), 310, 5, 8)
revisions = [r.revision for r in rows]
self.assertEqual(
[306, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318],
revisions)
def testFetchRowsAroundRev_NotAllRowsAvailable(self):
test_key = self._AddSampleData()
rows = debug_alert._FetchRowsAroundRev(test_key.get(), 310, 100, 100)
# There are only 20 rows in the sample data, so only 20 can be fetched.
self.assertEqual(20, len(rows))
def testChartSeries(self):
test_key = self._AddSampleData()
rows = debug_alert._FetchRowsAroundRev(test_key.get(), 310, 5, 5)
# The indexes used in the chart series should match those in the lookup.
self.assertEqual([(0, 55.61), (1, 61.88), (2, 61.51), (3, 59.58),
(4, 71.79), (5, 71.97), (6, 71.63), (7, 67.16),
(8, 70.91), (9, 73.4)], debug_alert._ChartSeries(rows))
def testRevisionList(self):
test_key = self._AddSampleData()
rows = debug_alert._FetchRowsAroundRev(test_key.get(), 310, 5, 5)
# The lookup dict maps indexes to x-values in the input series.
self.assertEqual([306, 307, 308, 309, 310, 311, 312, 313, 314, 315],
debug_alert._RevisionList(rows))
def testCsvUrl_RowsGiven_AllParamsSpecified(self):
self._AddSampleData()
rows = graph_data.Row.query().fetch(limit=20)
self.assertEqual(
'/graph_csv?test_path=M%2Fb%2Fsuite%2Ffoo&num_points=20&rev=319',
debug_alert._CsvUrl('M/b/suite/foo', rows))
def testCsvUrl_NoRows_OnlyTestPathSpecified(self):
# If there are no rows available for some reason, a CSV download
# URL can still be constructed, but without specific revisions.
self.assertEqual('/graph_csv?test_path=M%2Fb%2Fsuite%2Ffoo',
debug_alert._CsvUrl('M/b/suite/foo', []))
def testGraphUrl_RevisionGiven_RevisionParamInUrl(self):
test_key = self._AddSampleData()
# Both string and int can be accepted for revision.
self.assertEqual('/report?masters=M&bots=b&tests=suite%2Ffoo&rev=310',
debug_alert._GraphUrl(test_key.get(), 310))
self.assertEqual('/report?masters=M&bots=b&tests=suite%2Ffoo&rev=310',
debug_alert._GraphUrl(test_key.get(), '310'))
def testGraphUrl_NoRevisionGiven_NoRevisionParamInUrl(self):
test_key = self._AddSampleData()
# Both None and empty string mean "no revision".
self.assertEqual('/report?masters=M&bots=b&tests=suite%2Ffoo',
debug_alert._GraphUrl(test_key.get(), ''))
self.assertEqual('/report?masters=M&bots=b&tests=suite%2Ffoo',
debug_alert._GraphUrl(test_key.get(), None))
if __name__ == '__main__':
unittest.main()
|
the-stack_0_23534 | from unittest import TestCase
import pandas as pd
from .test_trading_calendar import ExchangeCalendarTestBase
from trading_calendars.exchange_calendar_xasx import XASXExchangeCalendar
class XASXCalendarTestCase(ExchangeCalendarTestBase, TestCase):
answer_key_filename = 'xasx'
calendar_class = XASXExchangeCalendar
# The XASX is open from 10:00 am to 4:00 pm.
MAX_SESSION_HOURS = 6
def test_normal_year(self):
expected_holidays = [
pd.Timestamp('2018-01-01', tz='UTC'), # New Year's Day
pd.Timestamp('2018-01-26', tz='UTC'), # Australia Day
pd.Timestamp('2018-03-30', tz='UTC'), # Good Friday
pd.Timestamp('2018-04-02', tz='UTC'), # Easter Monday
pd.Timestamp('2018-04-25', tz='UTC'), # Anzac Day
pd.Timestamp('2018-06-11', tz='UTC'), # Queen's Birthday
pd.Timestamp('2018-12-25', tz='UTC'), # Christmas
pd.Timestamp('2018-12-26', tz='UTC'), # Boxing Day
]
for session_label in expected_holidays:
self.assertNotIn(session_label, self.calendar.all_sessions)
early_closes = [
pd.Timestamp('2018-12-24', tz='UTC'), # Day before Christmas
pd.Timestamp('2018-12-31', tz='UTC'), # Day before New Year's
]
for early_close_session_label in early_closes:
self.assertIn(
early_close_session_label,
self.calendar.early_closes,
)
def test_holidays_fall_on_weekend(self):
"""
Holidays falling on a weekend should be made up on the next weekday.
Anzac Day is observed on the following Monday only when falling
on a Sunday. In years where Anzac Day falls on a Saturday, there
is no make-up.
Christmas/Boxing Day are special cases, whereby if Christmas is a
Saturday and Boxing Day is a Sunday, the next Monday and Tuesday will
be holidays. If Christmas is a Sunday and Boxing Day is a Monday then
Monday and Tuesday will still both be holidays.
"""
expected_holidays = [
# New Year's Day on a Sunday, observed on Monday.
pd.Timestamp('2017-01-02', tz='UTC'),
# Australia Day on a Sunday, observed on Monday (2010 and after).
pd.Timestamp('2014-01-27', tz='UTC'),
# Anzac Day on a Sunday, observed on Monday.
pd.Timestamp('2010-04-26', tz='UTC'),
# Christmas on a Sunday, Boxing Day on Monday.
pd.Timestamp('2016-12-26', tz='UTC'),
pd.Timestamp('2016-12-27', tz='UTC'),
# Christmas on a Saturday, Boxing Day on Sunday.
pd.Timestamp('2010-12-27', tz='UTC'),
pd.Timestamp('2010-12-28', tz='UTC'),
]
for session_label in expected_holidays:
self.assertNotIn(session_label, self.calendar.all_sessions)
expected_sessions = [
# Anzac Day on a Saturday, does not have a make-up.
pd.Timestamp('2015-04-27', tz='UTC'),
# Anzac Day on a Saturday, does not have a make-up (prior
# to 2010).
pd.Timestamp('2004-04-26', tz='UTC'),
]
for session_label in expected_sessions:
self.assertIn(session_label, self.calendar.all_sessions)
def test_half_days(self):
half_days = [
# In 2018, the last trading days before Christmas and New Year's
# are on Mondays, so they should be half days.
pd.Timestamp('2018-12-24', tz='Australia/Sydney'),
pd.Timestamp('2018-12-31', tz='Australia/Sydney'),
# In 2017, Christmas and New Year's fell on Mondays, so the last
# trading days before them were Fridays, which should be half days.
pd.Timestamp('2017-12-22', tz='Australia/Sydney'),
pd.Timestamp('2017-12-29', tz='Australia/Sydney'),
# In 2016, Christmas and New Year's fell on Sundays, so the last
# trading days before them were Fridays, which should be half days.
pd.Timestamp('2016-12-23', tz='Australia/Sydney'),
pd.Timestamp('2016-12-30', tz='Australia/Sydney'),
# 2010 is the first year we expect the half day rules to take
# effect.
pd.Timestamp('2010-12-24', tz='Australia/Sydney'),
pd.Timestamp('2010-12-31', tz='Australia/Sydney'),
]
full_days = [
# In 2009 the half day rules should not be in effect yet.
pd.Timestamp('2009-12-24', tz='Australia/Sydney'),
pd.Timestamp('2009-12-31', tz='Australia/Sydney'),
]
for half_day in half_days:
half_day_close_time = self.calendar.next_close(half_day)
self.assertEqual(
half_day_close_time.tz_convert('Australia/Sydney'),
half_day + pd.Timedelta(hours=14, minutes=10),
)
for full_day in full_days:
full_day_close_time = self.calendar.next_close(full_day)
self.assertEqual(
full_day_close_time.tz_convert('Australia/Sydney'),
full_day + pd.Timedelta(hours=16),
)
|
the-stack_0_23536 | from collections import deque
from Arena import Arena
from MCTS import MCTS
import numpy as np
from pytorch_classification.utils import Bar, AverageMeter
import time, os, sys
from pickle import Pickler, Unpickler
from random import shuffle
class Coach():
"""
This class executes the self-play + learning. It uses the functions defined
in Game and NeuralNet. args are specified in main.py.
"""
def __init__(self, game, nnet, args):
self.game = game
self.nnet = nnet
self.pnet = self.nnet.__class__(self.game) # the competitor network
self.args = args
self.mcts = MCTS(self.game, self.nnet, self.args)
self.trainExamplesHistory = [] # history of examples from args.numItersForTrainExamplesHistory latest iterations
self.skipFirstSelfPlay = False # can be overriden in loadTrainExamples()
def executeEpisode(self):
"""
This function executes one episode of self-play, starting with player 1.
As the game is played, each turn is added as a training example to
trainExamples. The game is played till the game ends. After the game
ends, the outcome of the game is used to assign values to each example
in trainExamples.
It uses a temp=1 if episodeStep < tempThreshold, and thereafter
uses temp=0.
Returns:
trainExamples: a list of examples of the form (canonicalBoard,pi,v)
pi is the MCTS informed policy vector, v is +1 if
the player eventually won the game, else -1.
"""
trainExamples = []
board = self.game.getInitBoard()
self.curPlayer = 1
episodeStep = 0
while True:
#print('here')
episodeStep += 1
canonicalBoard = self.game.getCanonicalForm(board,self.curPlayer)
can_board = board * self.curPlayer
if(not(np.array_equal(can_board, canonicalBoard))):
print('Canonical Board and Current Board states not same!!!')
#If you see this line getting printed something is terribly wrong
temp = int(episodeStep < self.args.tempThreshold)
pi = self.mcts.getActionProb(canonicalBoard, temp=temp)
sym = self.game.getSymmetries(canonicalBoard, pi)
for b,p in sym:
trainExamples.append([b, self.curPlayer, p, None])
action = np.random.choice(len(pi), p=pi)
#print('!!!:')
#print(board)
board, self.curPlayer = self.game.getNextState(board, self.curPlayer, action)
r = self.game.getGameEnded(board, self.curPlayer)
if r!=0:
return [(x[0],x[2],r*((-1)**(x[1]!=self.curPlayer))) for x in trainExamples]
def learn(self):
"""
Performs numIters iterations with numEps episodes of self-play in each
iteration. After every iteration, it retrains neural network with
examples in trainExamples (which has a maximium length of maxlenofQueue).
It then pits the new neural network against the old one and accepts it
only if it wins >= updateThreshold fraction of games.
"""
for i in range(1, self.args.numIters+1):
# bookkeeping
print('------ITER ' + str(i) + '------')
# examples of the iteration
if not self.skipFirstSelfPlay or i>1:
iterationTrainExamples = deque([], maxlen=self.args.maxlenOfQueue)
eps_time = AverageMeter()
bar = Bar('Self Play', max=self.args.numEps)
end = time.time()
for eps in range(self.args.numEps):
self.mcts = MCTS(self.game, self.nnet, self.args) # reset search tree
iterationTrainExamples += self.executeEpisode()
# bookkeeping + plot progress
eps_time.update(time.time() - end)
end = time.time()
bar.suffix = '({eps}/{maxeps}) Eps Time: {et:.3f}s | Total: {total:} | ETA: {eta:}'.format(eps=eps+1, maxeps=self.args.numEps, et=eps_time.avg,
total=bar.elapsed_td, eta=bar.eta_td)
bar.next()
bar.finish()
# save the iteration examples to the history
self.trainExamplesHistory.append(iterationTrainExamples)
if len(self.trainExamplesHistory) > self.args.numItersForTrainExamplesHistory:
print("len(trainExamplesHistory) =", len(self.trainExamplesHistory), " => remove the oldest trainExamples")
self.trainExamplesHistory.pop(0)
# backup history to a file
# NB! the examples were collected using the model from the previous iteration, so (i-1)
self.saveTrainExamples(i-1)
# shuffle examlpes before training
trainExamples = []
for e in self.trainExamplesHistory:
trainExamples.extend(e)
shuffle(trainExamples)
# training new network, keeping a copy of the old one
self.nnet.save_checkpoint(folder=self.args.checkpoint, filename='temp.pth.tar')
self.pnet.load_checkpoint(folder=self.args.checkpoint, filename='temp.pth.tar')
pmcts = MCTS(self.game, self.pnet, self.args)
self.nnet.train(trainExamples)
nmcts = MCTS(self.game, self.nnet, self.args)
print('PITTING AGAINST PREVIOUS VERSION')
arena = Arena(lambda x: np.argmax(pmcts.getActionProb(x, temp=0)),
lambda x: np.argmax(nmcts.getActionProb(x, temp=0)), self.game)
pwins, nwins, draws = arena.playGames(self.args.arenaCompare)
print('NEW/PREV WINS : %d / %d ; DRAWS : %d' % (nwins, pwins, draws))
if pwins+nwins == 0 or float(nwins)/(pwins+nwins) < self.args.updateThreshold:
print('REJECTING NEW MODEL')
self.nnet.load_checkpoint(folder=self.args.checkpoint, filename='temp.pth.tar')
else:
print('ACCEPTING NEW MODEL')
self.nnet.save_checkpoint(folder=self.args.checkpoint, filename=self.getCheckpointFile(i))
self.nnet.save_checkpoint(folder=self.args.checkpoint, filename='best.pth.tar')
def getCheckpointFile(self, iteration):
return 'checkpoint_' + str(iteration) + '.pth.tar'
def saveTrainExamples(self, iteration):
folder = self.args.checkpoint
if not os.path.exists(folder):
os.makedirs(folder)
filename = os.path.join(folder, self.getCheckpointFile(iteration)+".examples")
with open(filename, "wb+") as f:
Pickler(f).dump(self.trainExamplesHistory)
f.closed
def loadTrainExamples(self):
modelFile = os.path.join(self.args.load_folder_file[0], self.args.load_folder_file[1])
examplesFile = modelFile+".examples"
if not os.path.isfile(examplesFile):
print(examplesFile)
r = input("File with trainExamples not found. Continue? [y|n]")
if r != "y":
sys.exit()
else:
print("File with trainExamples found. Read it.")
with open(examplesFile, "rb") as f:
self.trainExamplesHistory = Unpickler(f).load()
f.closed
# examples based on the model were already collected (loaded)
self.skipFirstSelfPlay = True
|
the-stack_0_23537 | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class cacheselector(base_resource) :
""" Configuration for cache selector resource. """
def __init__(self) :
self._selectorname = ""
self._rule = []
self._flags = 0
self.___count = 0
@property
def selectorname(self) :
"""Name for the selector. Must begin with an ASCII alphabetic or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at (@), equals (=), and hyphen (-) characters.
"""
try :
return self._selectorname
except Exception as e:
raise e
@selectorname.setter
def selectorname(self, selectorname) :
"""Name for the selector. Must begin with an ASCII alphabetic or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at (@), equals (=), and hyphen (-) characters.
"""
try :
self._selectorname = selectorname
except Exception as e:
raise e
@property
def rule(self) :
"""One or multiple PIXL expressions for evaluating an HTTP request or response.<br/>Minimum length = 1.
"""
try :
return self._rule
except Exception as e:
raise e
@rule.setter
def rule(self, rule) :
"""One or multiple PIXL expressions for evaluating an HTTP request or response.<br/>Minimum length = 1
"""
try :
self._rule = rule
except Exception as e:
raise e
@property
def flags(self) :
"""Flags.
"""
try :
return self._flags
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(cacheselector_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.cacheselector
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.selectorname) :
return str(self.selectorname)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
""" Use this API to add cacheselector.
"""
try :
if type(resource) is not list :
addresource = cacheselector()
addresource.selectorname = resource.selectorname
addresource.rule = resource.rule
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ cacheselector() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].selectorname = resource[i].selectorname
addresources[i].rule = resource[i].rule
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
""" Use this API to delete cacheselector.
"""
try :
if type(resource) is not list :
deleteresource = cacheselector()
if type(resource) != type(deleteresource):
deleteresource.selectorname = resource
else :
deleteresource.selectorname = resource.selectorname
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ cacheselector() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].selectorname = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ cacheselector() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].selectorname = resource[i].selectorname
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
""" Use this API to update cacheselector.
"""
try :
if type(resource) is not list :
updateresource = cacheselector()
updateresource.selectorname = resource.selectorname
updateresource.rule = resource.rule
return updateresource.update_resource(client)
else :
if (resource and len(resource) > 0) :
updateresources = [ cacheselector() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].selectorname = resource[i].selectorname
updateresources[i].rule = resource[i].rule
result = cls.update_bulk_request(client, updateresources)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
""" Use this API to fetch all the cacheselector resources that are configured on netscaler.
"""
try :
if not name :
obj = cacheselector()
response = obj.get_resources(client, option_)
else :
if type(name) != cls :
if type(name) is not list :
obj = cacheselector()
obj.selectorname = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [cacheselector() for _ in range(len(name))]
obj = [cacheselector() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = cacheselector()
obj[i].selectorname = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
""" Use this API to fetch filtered set of cacheselector resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = cacheselector()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
""" Use this API to count the cacheselector resources configured on NetScaler.
"""
try :
obj = cacheselector()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
""" Use this API to count filtered the set of cacheselector resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = cacheselector()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class cacheselector_response(base_response) :
def __init__(self, length=1) :
self.cacheselector = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.cacheselector = [cacheselector() for _ in range(length)]
|
the-stack_0_23540 | # Python imports
import unittest
# Third party imports
from jinja2 import Template
from googleapiclient.errors import HttpError
# Project imports
import bq_utils
from cdr_cleaner import clean_cdr_engine
from cdr_cleaner.cleaning_rules import repopulate_person_post_deid
from tests import test_util
# Participant 1: has gender and sex at birth observations
# Participant 2: no gender or sex at birth observations
INSERT_FAKE_PARTICIPANTS_TMPLS = [
# TODO(calbach): Ideally these tests should not manipulate concept table, not currently hermetic.
Template("""
INSERT INTO `{{project_id}}.{{dataset_id}}.concept` (concept_id, concept_code)
VALUES
({{gender_concept_id}}, "gender"),
({{gender_nonbinary_concept_id}}, "nonbinary"),
({{gender_nonbinary_source_concept_id}}, "nonbinary_src"),
({{sex_at_birth_concept_id}}, "sex"),
({{sex_female_concept_id}}, "female"),
({{sex_female_source_concept_id}}, "female_src")
"""),
Template("""
INSERT INTO `{{project_id}}.{{dataset_id}}.person` (person_id)
VALUES (1), (2)
"""),
Template("""
INSERT INTO `{{project_id}}.{{dataset_id}}.observation` (person_id, observation_id, observation_source_concept_id, value_as_concept_id, value_source_concept_id)
VALUES
(1, 100, {{gender_concept_id}}, {{gender_nonbinary_concept_id}}, {{gender_nonbinary_source_concept_id}}),
(1, 101, {{sex_at_birth_concept_id}}, {{sex_female_concept_id}}, {{sex_female_source_concept_id}})
""")
]
class RepopulatePersonPostDeidTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
print('**************************************************************')
print(cls.__name__)
print('**************************************************************')
def setUp(self):
self.project_id = bq_utils.app_identity.get_application_id()
self.dataset_id = bq_utils.get_combined_dataset_id()
if not self.project_id or not self.dataset_id:
# TODO: Fix handling of globals, push these assertions down if they are required.
raise ValueError(
f"missing configuration for project ('{self.project_id}') " +
f"and/or dataset ('{self.dataset_id}')")
# TODO: Reconcile this with a consistent integration testing model. Ideally each test should
# clean up after itself so that we don't need this defensive check.
test_util.delete_all_tables(self.dataset_id)
create_tables = ['person', 'observation']
for tbl in ['concept']:
if not bq_utils.table_exists(tbl, dataset_id=self.dataset_id):
create_tables.append(tbl)
for tbl in create_tables:
bq_utils.create_standard_table(tbl,
tbl,
dataset_id=self.dataset_id,
force_all_nullable=True)
def tearDown(self):
test_util.delete_all_tables(self.dataset_id)
def assertPersonFields(self, person, want):
for k in want.keys():
self.assertIn(k, person)
self.assertEqual(person[k], want[k])
def test_execute_queries(self):
gender_nonbinary_concept_id = 1585841
gender_nonbinary_source_concept_id = 123
sex_female_concept_id = 1585847
sex_female_source_concept_id = 45878463
for tmpl in INSERT_FAKE_PARTICIPANTS_TMPLS:
query = tmpl.render(
project_id=self.project_id,
dataset_id=self.dataset_id,
gender_concept_id=repopulate_person_post_deid.GENDER_CONCEPT_ID,
gender_nonbinary_concept_id=gender_nonbinary_concept_id,
gender_nonbinary_source_concept_id=
gender_nonbinary_source_concept_id,
sex_at_birth_concept_id=repopulate_person_post_deid.
SEX_AT_BIRTH_CONCEPT_ID,
sex_female_concept_id=sex_female_concept_id,
sex_female_source_concept_id=sex_female_source_concept_id)
try:
resp = bq_utils.query(query)
except HttpError as e:
self.fail("failed to execute query '{}': {}".format(
query, e.content))
self.assertTrue(resp["jobComplete"])
queries = repopulate_person_post_deid.get_repopulate_person_post_deid_queries(
self.project_id, self.dataset_id)
clean_cdr_engine.clean_dataset(self.project_id, queries)
rows = bq_utils.response2rows(
bq_utils.query("SELECT * FROM `{}.{}.person`".format(
self.project_id, self.dataset_id)))
self.assertEquals(len(rows), 2)
by_participant = {r["person_id"]: r for r in rows}
self.assertPersonFields(
by_participant[1], {
"gender_concept_id": gender_nonbinary_concept_id,
"gender_source_value": "nonbinary_src",
"gender_source_concept_id": gender_nonbinary_source_concept_id,
"sex_at_birth_concept_id": sex_female_concept_id,
"sex_at_birth_source_value": "female_src",
"sex_at_birth_source_concept_id": sex_female_source_concept_id
})
self.assertPersonFields(
by_participant[2], {
"gender_concept_id": 0,
"gender_source_value": "No matching concept",
"gender_source_concept_id": 0,
"sex_at_birth_concept_id": 0,
"sex_at_birth_source_value": "No matching concept",
"sex_at_birth_source_concept_id": 0
})
|
the-stack_0_23541 | import re
import os
EXCLUDES = ['implementation', 'testbench']
BASIC_ID_REGEX = (
r'[a-z]' # a basic identifier starts with an alphabetic char
r'[a-z0-9]*' # it can contain alphanumerics
r'(?:_[a-z0-9]+)*' # and '_' but not at the end
)
INSTANTIATION_REGEX = (
r'\s*' # indentation
r'(?P<entity>{0})' # single basic identifier
r'\s*:\s*entity\s*' # entity declaration and spacing
r'(?P<component>' # component:
r'{0}' # at least a basic identifier # noqa E131
r'(?:\.{0})*' # for libraries: dots can only appear if they are followed by another basic identifier
r')'
.format(BASIC_ID_REGEX)
)
INSTANTIATION_PATTERN = re.compile(INSTANTIATION_REGEX, re.IGNORECASE)
def _vhdltree(level, vhd_path, vhd_files):
with open(vhd_path) as vhd_file:
for entity, component in find_entities(vhd_file):
try:
component_path = vhd_files[component.lower()]
except KeyError:
yield level, entity, ''
else:
yield level, entity, component_path
yield from _vhdltree(level + 1, component_path, vhd_files)
def find_entities(lines):
for l in lines:
m = INSTANTIATION_PATTERN.match(l)
if m:
yield m.group('entity'), m.group('component').split('.')[-1]
def find_ext(directory, ext):
try:
entries = os.listdir(directory)
except PermissionError as e:
print(e)
else:
for entry in entries:
entrypath = os.path.join(directory, entry)
if os.path.isfile(entrypath):
basename, *entry_ext = entry.lower().rsplit('.', 1)
if entry_ext == [ext] and basename:
yield basename, entrypath
elif os.path.isdir(entrypath) and all(excluder not in entry.lower() for excluder in EXCLUDES):
yield from find_ext(entrypath, ext)
def vhdltree(filepath, proot):
vhd_files = dict(find_ext(proot, 'vhd'))
for level, entity, path in _vhdltree(0, filepath, vhd_files):
print('{indent}{entity} : {path}'.format(
indent=4 * ' ' * level,
entity=entity,
path=path or 'Not found'
))
|
the-stack_0_23542 | # importing required packages for this section
from urllib.parse import urlparse,urlencode
import ipaddress
import re
import whois
import urllib
import urllib.request
from datetime import datetime
import requests
import dns.resolver
import socket
import time
import gc
truseted_ca = ['cPanel,',
'Microsoft',
'HydrantID',
'AlphaSSL',
'GTS',
'RapidSSL',
'DFN-Verein',
'Cloudflare',
'GeoTrust',
'QuoVadis',
'Certum',
'Amazon',
'Gandi',
'COMODO',
'Go',
'Cybertrust',
'GlobalSign',
'Yandex',
'R3',
'Network',
'DigiCert',
'GoGetSSL',
'Thawte',
'Apple',
'Starfield',
'RU-CENTER',
'Trustwave',
'Entrust',
'InCommon',
'Sectigo',
'Secure']
headers = {
"Cache-Control": "no-cache",
"Pragma": "no-cache"
}
class Extractor():
def __init__(self):
self.feature_names = ['Speical_Char','Have_IP', 'Have_At','URL_length' ,'URL_Depth','redirection', 'time_get_redirect',
'port_in_url','use_http', 'http_in_domain','TinyURL', 'Prefix/Suffix', 'DNS_Record','trusted_ca',
'domain_lifespan', 'domain_timeleft', 'same_asn','iFrame', 'Mouse_Over','Right_Click','eval','unescape',
'escape', 'ActiveXObject','fromCharCode','atob','Punny_Code',
'TLDs','Title','country_name']
# 1.Speical Chartacter in URL
@staticmethod
def special_char(url):
url = url.replace("/","")
url = url.replace("-","")
special = re.sub('[\w]+' ,'', url)
return len(special)
# 2.Checks for IP address in URL (Have_IP)
@staticmethod
def havingIP(url):
try:
if ipaddress.ip_address(url) and Extractor.getLength(url) == 1 :
ip = 1
except:
ip = 0
return ip
# 3.Checks the presence of @ in URL (Have_At)
@staticmethod
def haveAtSign(url):
if "@" in url and Extractor.getLength(url) == 1:
at = 1
else:
at = 0
return at
# 4.Finding the length of URL and categorizing (URL_Length)
@staticmethod
def getLength(url):
if "?fbclid" in url: #Fukin link in facebook contain fbclid
url = url.split("?fbclid")[0]
if len(url) < 54:
length = 0
else:
length = 1
return length
# 5.Gives number of '/' in URL (URL_Depth)
@staticmethod
def getDepth(url):
s = urlparse(url).path.split('/')
depth = 0
for j in range(len(s)):
if len(s[j]) != 0:
depth = depth+1
return depth
# 6.Checking for redirection '//' in the url (Redirection)
@staticmethod
def redirection(url):
pos = url.rfind('//')
return pos
# 7.Redirect time
@staticmethod
def forwarding(response):
try:
n_redirect = len([response for response in responses.history])
return n_redirect
except:
return 0
# 8.
@staticmethod
def port_in_url(url):
p = '(?:http.*://)?(?P<host>[^:/ ]+).?(?P<port>[0-9]*).*'
m = re.search(p,url)
if m.group('port') :
return 1
else :
return 0
# 9.
@staticmethod
def notsafe_protocol(url):
if urlparse(url).scheme == 'http':
return 1
else:
return 0
# 10.Existence of “HTTPS” Token in the Domain Part of the URL (https_Domain)
@staticmethod
def httpDomain(url):
# domain = urlparse(url).netloc
# if 'http' in domain and Extractor.getLength(url) == 1:
if 'http' in url.split("//")[1]:
return 1
else:
return 0
# 11. Checking for Shortening Services in URL (Tiny_URL)
@staticmethod
def tinyURL(url):
#listing shortening services
shortening_services = r"bit\.ly|goo\.gl|shorte\.st|go2l\.ink|x\.co|ow\.ly|t\.co|tinyurl|tr\.im|is\.gd|cli\.gs|" \
r"yfrog\.com|migre\.me|ff\.im|tiny\.cc|url4\.eu|twit\.ac|su\.pr|twurl\.nl|snipurl\.com|" \
r"short\.to|BudURL\.com|ping\.fm|post\.ly|Just\.as|bkite\.com|snipr\.com|fic\.kr|loopt\.us|" \
r"doiop\.com|short\.ie|kl\.am|wp\.me|rubyurl\.com|om\.ly|to\.ly|bit\.do|t\.co|lnkd\.in|db\.tt|" \
r"qr\.ae|adf\.ly|goo\.gl|bitly\.com|cur\.lv|tinyurl\.com|ow\.ly|bit\.ly|ity\.im|q\.gs|is\.gd|" \
r"po\.st|bc\.vc|twitthis\.com|u\.to|j\.mp|buzurl\.com|cutt\.us|u\.bb|yourls\.org|x\.co|" \
r"prettylinkpro\.com|scrnch\.me|filoops\.info|vzturl\.com|qr\.net|1url\.com|tweez\.me|v\.gd|" \
r"tr\.im|link\.zip\.net"
match=re.search(shortening_services,url)
if match:
return 1
else:
return 0
# 12.Checking for Prefix or Suffix Separated by (-) in the Domain (Prefix/Suffix)
@staticmethod
def prefixSuffix(url):
if '-' in urlparse(url).netloc:
return 1 # phishing
else:
return 0 # legitimate
# Reject
@staticmethod
def web_traffic(url):
try:
#Filling the whitespaces in the URL if any
url = urllib.parse.quote(url)
rank = BeautifulSoup(urllib.request.urlopen("http://data.alexa.com/data?cli=10&dat=s&url=" + url).read(), "xml").find("REACH")['RANK']
rank = int(rank)
except TypeError:
print("Cant get web traffic")
return 1
if rank <100000:
return 0
else:
return 1
# 13
@staticmethod
def trusted_ca(domain):
try:
ctx = ssl.create_default_context()
with ctx.wrap_socket(socket.socket(), server_hostname=domain['domain_name']) as s:
s.settimeout(5)
s.connect((domain['domain_name'], 443))
cert = s.getpeercert()
subject = dict(x[0] for x in cert['subject'])
issued_to = subject['commonName']
issuer = dict(x[0] for x in cert['issuer'])
issued_by = issuer['commonName']
if issued_by.split(" ")[0] in truseted_ca:
return 0
else:
return 1
except:
return 1
# print(f"DOMAIN {domain['domain_name']} ERROR")
# 14.Survival time of domain: The difference between termination time and creation time (Domain_Age)
@staticmethod
def domain_lifespan(domain_name):
creation_date = domain_name.creation_date
expiration_date = domain_name.expiration_date
if isinstance(creation_date, list):
creation_date= creation_date[-1]
if isinstance(expiration_date, list):
expiration_date= expiration_date[-1]
if (isinstance(creation_date,str) or isinstance(expiration_date,str)):
try:
creation_date = datetime.strptime(creation_date,'%Y-%m-%d')
expiration_date = datetime.strptime(expiration_date,"%Y-%m-%d")
except:
return 1
if ((expiration_date is None) or (creation_date is None)):
return 1
elif ((type(expiration_date) is list) or (type(creation_date) is list)):
return 1
else:
ageofdomain = abs((expiration_date - creation_date).days)
# print("Domain Age: ", ageofdomain)
if ((ageofdomain/30) < 6):
age = 1
else:
age = 0
return age
# 15.End time of domain: The difference between termination time and current time (Domain_End)
@staticmethod
def domainEnd(domain_name):
expiration_date = domain_name.expiration_date
if isinstance(expiration_date, list):
expiration_date= expiration_date[-1]
if isinstance(expiration_date,str):
try:
expiration_date = datetime.strptime(expiration_date,"%Y-%m-%d")
except:
return 1
if (expiration_date is None):
return 1
elif (type(expiration_date) is list):
return 1
else:
today = datetime.now()
end = abs((expiration_date - today).days)
if ((end/30) < 6):
end = 1
else:
end = 0
return end
# 16.
@staticmethod
def same_asn(domain_name):
try:
_asn = []
for record in dns.resolver.resolve(domain_name["domain_name"], 'MX'):
mx = record.to_text().split(" ")[1]
_asn.append(socket.gethostbyname(mx))
if len(_asn) == 1 and ocket.gethostbyname(_asn[0]) == socket.gethostbyname(domain_name):
return 1
else :
return 0
except:
return 1
# reject (too slow)
@staticmethod
def top_n_google(domain, stop=30):
google_search = [j for j in search(domain, tld="co.in", num=10, stop=stop, pause=2)]
if domain not in google_search:
return 1
else:
return 0
# 17. IFrame Redirection (iFrame)
@staticmethod
def iframe(response):
if response == "":
return 1
else:
if re.findall(r"[<iframe>|<frameBorder>]", response.text):
return 0
else:
return 1
# 18.Checks the effect of mouse over on status bar (Mouse_Over)
@staticmethod
def mouseOver(response):
if response == "" :
return 1
else:
if re.findall("<script>.+onmouseover.+</script>", response.text):
return 1
else:
return 0
# 19.Checks the status of the right click attribute (Right_Click)
@staticmethod
def rightClick(response):
if response == "":
return 1
else:
if re.findall(r"event.button ?== ?2", response.text):
return 0
else:
return 1
# 21
@staticmethod
def js_eval(response):
try:
if response == "":
return 1
else:
return response.count("eval")
except:
return 0
# 22
@staticmethod
def js_unescape(response):
try:
if response == "":
return 1
else:
return response.count("unescape")
except:
return 0
# 23
@staticmethod
def js_escape(response):
try:
if response == "":
return 1
else:
return response.count("escape")
except:
return 0
# 24
@staticmethod
def js_Active(response):
try:
if response == "":
return 1
else:
return response.count("ActiveXObject")
except:
return 0
# 25
@staticmethod
def js_charcode(response):
try:
if response == "":
return 1
else:
return response.count("fromCharCode")
except:
return 0
# 26
@staticmethod
def js_atob(response):
try:
if response == "":
return 1
else:
return response.count("atob")
except:
return 0
# 27.Punny code
@staticmethod
def punnycode(url):
vaild_regex = "/^(http|https|ftp):\/\/([A-Z0-9][A-Z0-9_-]*(?:\.[A-Z0-9][A-Z0-9_-]*)+):?(\d+)?\/?/i"
if re.match(vaild_regex,url):
punny = 1
else:
punny = 0
return punny
@staticmethod
def extract_title(response):
try:
if response == "":
return "No Title"
else:
match_title = re.search("<title.*?>(.*?)</title>", response.text)
if match_title is not None:
title = match_title.group(1)
return title
else:
return "No Title"
except:
return "No Title"
#Function to extract features
def __call__(self, url, max_retries=2):
if isinstance(url, str):
features = []
try:
response = requests.get(url, headers=headers, timeout=3)
if response.status_code not in range(400,600):
url = url.rstrip()
features.append(self.special_char(url))
features.append(self.havingIP(url))
features.append(self.haveAtSign(url))
features.append(self.getLength(url))
features.append(self.getDepth(url))
features.append(self.redirection(url))
features.append(self.forwarding(response))
features.append(self.port_in_url(url))
features.append(self.notsafe_protocol(url))
features.append(self.httpDomain(url))
features.append(self.tinyURL(url))
features.append(self.prefixSuffix(url))
#Domain based features (4)
dns = 0
try:
domain_name = whois.whois(urlparse(url).netloc)
except:
dns = 1
features.append(dns)
features.append(1 if dns == 1 else self.trusted_ca(domain_name))
features.append(1 if dns == 1 else self.domain_lifespan(domain_name))
features.append(1 if dns == 1 else self.domainEnd(domain_name))
features.append(1 if dns == 1 else self.same_asn(domain_name))
# features.append(1 if dns == 1 else self.top_n_google(domain_name))
#HTML & Javascript based features
features.append(self.iframe(response))
features.append(self.mouseOver(response))
features.append(self.rightClick(response))
features.append(self.js_eval(response))
features.append(self.js_unescape(response))
features.append(self.js_escape(response))
features.append(self.js_Active(response))
features.append(self.js_charcode(response))
features.append(self.js_atob(response))
features.append(self.punnycode(url))
# Data for Dashboard plotting
features.append(urlparse(url).netloc.split(".")[-1])
features.append(self.extract_title(response))
features.append("None" if dns == 1 else domain_name.country)
return features
else:
return []
except Exception as e:
return []
else:
return []
if __name__ == "__main__":
ext = Extractor()
vector = ext("https://stackoverflow.com/questions/42179046/what-flavor-of-regex-does-visual-studio-code-use")
print(vector)
# Vector = ext("http://msmcomun662.000webhostapp.com/login3.php")
# print(len(Vector))
|
the-stack_0_23543 | #!/usr/bin/env python
import rospy
from std_msgs.msg import String
def callback(data):
rospy.loginfo(rospy.get_caller_id() + "I heard %s", data.data)
def listener():
# In ROS, nodes are uniquely named. If two nodes with the same
# name are launched, the previous one is kicked off. The
# anonymous=True flag means that rospy will choose a unique
# name for our 'listener' node so that multiple listeners can
# run simultaneously.
rospy.init_node('listener', anonymous=False)
rospy.Subscriber("chatter", String, callback)
# spin() simply keeps python from exiting until this node is stopped
rospy.spin()
if __name__ == '__main__':
listener() |
the-stack_0_23544 | from typing import Dict, Tuple
import numpy as np
from gym import spaces
from mobile_env.handlers.handler import Handler
class MComCentralHandler(Handler):
features = ["connections", "snrs", "utility"]
@classmethod
def ue_obs_size(cls, env) -> int:
return sum(env.feature_sizes[ftr] for ftr in cls.features)
@classmethod
def action_space(cls, env) -> spaces.MultiDiscrete:
# define multi-discrete action space for central setting
# each element of a multi-discrete action denotes one UE's decision
return spaces.MultiDiscrete([env.NUM_STATIONS + 1 for _ in env.users])
@classmethod
def observation_space(cls, env) -> spaces.Box:
# observation is a single vector of concatenated UE representations
size = cls.ue_obs_size(env)
return spaces.Box(low=-1.0, high=1.0, shape=(env.NUM_USERS * size,))
@classmethod
def action(cls, env, actions: Tuple[int]) -> Dict[int, int]:
"""Transform flattend actions to expected shape of core environment."""
assert len(actions) == len(
env.users
), "Number of actions must equal overall UEs."
users = sorted(env.users)
return {ue_id: action for ue_id, action in zip(users, actions)}
@classmethod
def observation(cls, env) -> np.ndarray:
"""Select from & flatten observations from MA setting."""
# select observations considered in the central setting
obs = {
ue_id: [obs_dict[key] for key in cls.features]
for ue_id, obs_dict in env.features().items()
}
# flatten observation to single vector
return np.concatenate([o for ue_obs in obs.values() for o in ue_obs])
@classmethod
def reward(cls, env):
"""The central agent receives the average UE utility as reward."""
utilities = np.asarray([utility for utility in env.utilities.values()])
# assert that rewards are in range [-1, +1]
bounded = np.logical_and(utilities >= -1, utilities <= 1).all()
assert bounded, "Utilities must be in range [-1, +1]"
# return average utility of UEs to central agent as reward
return np.mean(utilities)
@classmethod
def check(cls, env):
assert [
ue.stime <= 0.0 and ue.extime >= env.EP_MAX_TIME
for ue in env.users.values()
], "Central environment cannot handle a changing number of UEs."
|
the-stack_0_23546 | import stripe
from django.db import models
from .. import enums
from .. import settings as djstripe_settings
from ..fields import (
JSONField,
StripeCurrencyCodeField,
StripeDecimalCurrencyAmountField,
StripeEnumField,
StripeForeignKey,
StripeIdField,
StripeQuantumCurrencyAmountField,
)
from ..managers import TransferManager
from .base import StripeModel
class Account(StripeModel):
"""
Stripe documentation: https://stripe.com/docs/api#account
"""
djstripe_owner_account = None
stripe_class = stripe.Account
# Special handling of the icon and logo fields, they moved to settings.branding
# in Stripe 2019-02-19 but we want them as ForeignKeys
branding_icon = StripeForeignKey(
"FileUpload",
on_delete=models.SET_NULL,
null=True,
related_name="icon_account",
help_text="An icon for the account. Must be square and at least 128px x 128px.",
)
branding_logo = StripeForeignKey(
"FileUpload",
on_delete=models.SET_NULL,
null=True,
related_name="logo_account",
help_text="A logo for the account that will be used in Checkout instead of "
"the icon and without the account’s name next to it if provided. "
"Must be at least 128px x 128px.",
)
business_profile = JSONField(
null=True, blank=True, help_text="Optional information related to the business."
)
business_type = StripeEnumField(
enum=enums.BusinessType, default="", blank=True, help_text="The business type."
)
charges_enabled = models.BooleanField(
help_text="Whether the account can create live charges"
)
country = models.CharField(max_length=2, help_text="The country of the account")
company = JSONField(
null=True,
blank=True,
help_text=(
"Information about the company or business. "
"This field is null unless business_type is set to company."
),
)
default_currency = StripeCurrencyCodeField(
help_text="The currency this account has chosen to use as the default"
)
details_submitted = models.BooleanField(
help_text=(
"Whether account details have been submitted. "
"Standard accounts cannot receive payouts before this is true."
)
)
email = models.CharField(
max_length=255, help_text="The primary user’s email address."
)
# TODO external_accounts = ...
individual = JSONField(
null=True,
blank=True,
help_text=(
"Information about the person represented by the account. "
"This field is null unless business_type is set to individual."
),
)
payouts_enabled = models.BooleanField(
null=True, help_text="Whether Stripe can send payouts to this account"
)
product_description = models.CharField(
max_length=255,
default="",
blank=True,
help_text="Internal-only description of the product sold or service provided "
"by the business. It’s used by Stripe for risk and underwriting purposes.",
)
requirements = JSONField(
null=True,
blank=True,
help_text="Information about the requirements for the account, "
"including what information needs to be collected, and by when.",
)
settings = JSONField(
null=True,
blank=True,
help_text=(
"Account options for customizing how the account functions within Stripe."
),
)
type = StripeEnumField(enum=enums.AccountType, help_text="The Stripe account type.")
tos_acceptance = JSONField(
null=True,
blank=True,
help_text="Details on the acceptance of the Stripe Services Agreement",
)
@property
def business_url(self):
"""
The business’s publicly available website.
:rtype: Optional[str]
"""
return (self.business_profile or {}).get("url")
@classmethod
def get_connected_account_from_token(cls, access_token):
account_data = cls.stripe_class.retrieve(api_key=access_token)
return cls._get_or_create_from_stripe_object(account_data)[0]
@classmethod
def get_default_account(cls):
# As of API version 2020-03-02, there is no permission that can allow
# restricted keys to call GET /v1/account
if djstripe_settings.STRIPE_SECRET_KEY.startswith("rk_"):
return None
account_data = cls.stripe_class.retrieve(
api_key=djstripe_settings.STRIPE_SECRET_KEY
)
return cls._get_or_create_from_stripe_object(account_data)[0]
def __str__(self):
settings = self.settings or {}
business_profile = self.business_profile or {}
return (
settings.get("dashboard", {}).get("display_name")
or business_profile.get("name")
or super().__str__()
)
@classmethod # noqa: C901
def _manipulate_stripe_object_hook(cls, data):
data = super()._manipulate_stripe_object_hook(data)
def empty_string_to_none(v):
"""
stripe.StripeObject.__setitem__ doesn't allow = ""
"""
if v == "":
return None
else:
return v
# icon (formerly called business_logo)
# logo (formerly called business_logo_large)
# moved to settings.branding in Stripe 2019-02-19
# but we'll keep them to provide the ForeignKey
for old, new in [("branding_icon", "icon"), ("branding_logo", "logo")]:
try:
data[old] = data["settings"]["branding"][new]
except KeyError:
pass
return data
@classmethod
def _create_from_stripe_object(
cls,
data,
current_ids=None,
pending_relations=None,
save=True,
stripe_account=None,
):
"""
Set the stripe_account to the id of the Account instance being created.
This ensures that the foreign-key relations that may exist in stripe are
fetched using the appropriate connected account ID.
"""
return super()._create_from_stripe_object(
data=data,
current_ids=current_ids,
pending_relations=pending_relations,
save=save,
stripe_account=data["id"] if not stripe_account else stripe_account,
)
class ApplicationFee(StripeModel):
"""
When you collect a transaction fee on top of a charge made for your
user (using Connect), an ApplicationFee is created in your account.
Stripe documentation: https://stripe.com/docs/api#application_fees
"""
stripe_class = stripe.ApplicationFee
amount = StripeQuantumCurrencyAmountField(help_text="Amount earned, in cents.")
amount_refunded = StripeQuantumCurrencyAmountField(
help_text="Amount in cents refunded (can be less than the amount attribute "
"on the fee if a partial refund was issued)"
)
# TODO application = ...
balance_transaction = StripeForeignKey(
"BalanceTransaction",
on_delete=models.CASCADE,
help_text="Balance transaction that describes the impact on your account"
" balance.",
)
charge = StripeForeignKey(
"Charge",
on_delete=models.CASCADE,
help_text="The charge that the application fee was taken from.",
)
currency = StripeCurrencyCodeField()
# TODO originating_transaction = ... (refs. both Charge and Transfer)
refunded = models.BooleanField(
help_text=(
"Whether the fee has been fully refunded. If the fee is only "
"partially refunded, this attribute will still be false."
)
)
class ApplicationFeeRefund(StripeModel):
"""
ApplicationFeeRefund objects allow you to refund an ApplicationFee that
has previously been created but not yet refunded.
Funds will be refunded to the Stripe account from which the fee was
originally collected.
Stripe documentation: https://stripe.com/docs/api#fee_refunds
"""
description = None
amount = StripeQuantumCurrencyAmountField(help_text="Amount refunded, in cents.")
balance_transaction = StripeForeignKey(
"BalanceTransaction",
on_delete=models.CASCADE,
help_text="Balance transaction that describes the impact on your account "
"balance.",
)
currency = StripeCurrencyCodeField()
fee = StripeForeignKey(
"ApplicationFee",
on_delete=models.CASCADE,
related_name="refunds",
help_text="The application fee that was refunded",
)
class CountrySpec(StripeModel):
"""
Stripe documentation: https://stripe.com/docs/api#country_specs
"""
stripe_class = stripe.CountrySpec
id = models.CharField(max_length=2, primary_key=True, serialize=True)
default_currency = StripeCurrencyCodeField(
help_text=(
"The default currency for this country. "
"This applies to both payment methods and bank accounts."
)
)
supported_bank_account_currencies = JSONField(
help_text="Currencies that can be accepted in the specific country"
" (for transfers)."
)
supported_payment_currencies = JSONField(
help_text="Currencies that can be accepted in the specified country"
" (for payments)."
)
supported_payment_methods = JSONField(
help_text="Payment methods available in the specified country."
)
supported_transfer_countries = JSONField(
help_text="Countries that can accept transfers from the specified country."
)
verification_fields = JSONField(
help_text="Lists the types of verification data needed to keep an account open."
)
# Get rid of core common fields
djstripe_id = None
created = None
description = None
livemode = True
metadata = None
class Meta:
pass
class Transfer(StripeModel):
"""
When Stripe sends you money or you initiate a transfer to a bank account,
debit card, or connected Stripe account, a transfer object will be created.
Stripe documentation: https://stripe.com/docs/api/python#transfers
"""
stripe_class = stripe.Transfer
expand_fields = ["balance_transaction"]
stripe_dashboard_item_name = "transfers"
objects = TransferManager()
amount = StripeDecimalCurrencyAmountField(help_text="The amount transferred")
amount_reversed = StripeDecimalCurrencyAmountField(
null=True,
blank=True,
help_text="The amount (as decimal) reversed (can be less than the amount "
"attribute on the transfer if a partial reversal was issued).",
)
balance_transaction = StripeForeignKey(
"BalanceTransaction",
on_delete=models.SET_NULL,
null=True,
blank=True,
help_text="Balance transaction that describes the impact on your account"
" balance.",
)
currency = StripeCurrencyCodeField()
# TODO: Link destination to Card, Account, or Bank Account Models
destination = StripeIdField(
help_text="ID of the bank account, card, or Stripe account the transfer was "
"sent to."
)
destination_payment = StripeIdField(
null=True,
blank=True,
help_text="If the destination is a Stripe account, this will be the ID of the "
"payment that the destination account received for the transfer.",
)
reversed = models.BooleanField(
default=False,
help_text="Whether or not the transfer has been fully reversed. "
"If the transfer is only partially reversed, this attribute will still "
"be false.",
)
source_transaction = StripeIdField(
null=True,
help_text="ID of the charge (or other transaction) that was used to fund "
"the transfer. If null, the transfer was funded from the available balance.",
)
source_type = StripeEnumField(
enum=enums.LegacySourceType,
help_text="The source balance from which this transfer came.",
)
transfer_group = models.CharField(
max_length=255,
default="",
blank=True,
help_text="A string that identifies this transaction as part of a group.",
)
@property
def fee(self):
if self.balance_transaction:
return self.balance_transaction.fee
def str_parts(self):
return ["amount={amount}".format(amount=self.amount)] + super().str_parts()
class TransferReversal(StripeModel):
"""
Stripe documentation: https://stripe.com/docs/api#transfer_reversals
"""
stripe_class = stripe.Transfer
amount = StripeQuantumCurrencyAmountField(help_text="Amount, in cents.")
balance_transaction = StripeForeignKey(
"BalanceTransaction",
on_delete=models.SET_NULL,
null=True,
blank=True,
related_name="transfer_reversals",
help_text="Balance transaction that describes the impact on your account "
"balance.",
)
currency = StripeCurrencyCodeField()
transfer = StripeForeignKey(
"Transfer",
on_delete=models.CASCADE,
help_text="The transfer that was reversed.",
related_name="reversals",
)
|
the-stack_0_23547 | import argparse
import inspect
import logging
import os
import shutil
import onnx
import tensorflow as tf
from tensorflow.core.framework import graph_pb2
from tensorflow.python.tools import freeze_graph
import onnx_tf.backend as backend
from onnx_tf.common import get_unique_suffix
import onnx_tf.experiment.frontend as experiment_frontend
import onnx_tf.frontend as frontend
from onnx_tf.pb_wrapper import TensorflowGraph
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
def main(args):
args = parse_args(args)
convert(**{k: v for k, v in vars(args).items() if v is not None})
def parse_args(args):
class ListAction(argparse.Action):
""" Define how to convert command line list strings to Python objects.
"""
def __call__(self, parser, namespace, values, option_string=None):
values = values if values[0] not in ("(", "[") or values[-1] not in (
")", "]") else values[1:-1]
res = []
for value in values.split(","):
if value.isdigit():
res.append(int(value))
else:
res.append(value)
setattr(namespace, self.dest, res)
class OpsetAction(argparse.Action):
""" Define how to convert command line opset strings to Python objects.
"""
def __call__(self, parser, namespace, values, option_string=None):
if values.isdigit():
setattr(namespace, "opset", int(values))
else:
res = []
while values and values[0] in ("(", "["):
values = values[1:]
while values and values[-1] in (")", "]"):
values = values[:-1]
for value in values.split("),("):
l, r = value.split(",")
res.append((l, int(r)))
setattr(namespace, "opset", res)
def get_param_doc_dict(funcs):
"""Get doc of funcs params.
Args:
funcs: Target funcs.
Returns:
Dict of params doc.
"""
# TODO(fumihwh): support google doc format
def helper(doc, func):
first_idx = doc.find(":param")
last_idx = doc.find(":return")
last_idx = last_idx if last_idx != -1 else len(doc)
param_doc = doc[first_idx:last_idx]
params_doc = param_doc.split(":param ")[1:]
return {
p[:p.find(": ")]: p[p.find(": ") + len(": "):] +
" (from {})".format(func.__module__ + "." + func.__name__)
for p in params_doc
}
param_doc_dict = {}
for func, persists in funcs:
doc = inspect.getdoc(func)
doc_dict = helper(doc, func)
for k, v in doc_dict.items():
if k not in persists:
continue
param_doc_dict[k] = {"doc": v, "params": persists[k]}
return param_doc_dict
parser = argparse.ArgumentParser(
description=
"This is the converter for converting protocol buffer between tf and onnx."
)
# required two args, source and destination path
parser.add_argument(
"--infile",
"-i",
help="Input file path, can be pb or ckpt file.",
required=True)
parser.add_argument(
"--outfile", "-o", help="Output file path.", required=True)
parser.add_argument(
"--convert_to",
"-t",
choices=["onnx", "tf"],
help="Format converted to.",
required=True)
parser.add_argument(
"--graph",
"-g",
help=
"Inference graph, which is obtained by optimizing or editing the training graph for better training usability."
)
def add_argument_group(parser, group_name, funcs):
group = parser.add_argument_group(group_name)
param_doc_dict = get_param_doc_dict(funcs)
for k, v in param_doc_dict.items():
group.add_argument("--{}".format(k), help=v["doc"], **v["params"])
def add_experimental_args(parser):
group = parser.add_argument_group("EXPERIMENTAL ARGUMENTS")
group.add_argument(
"--rnn_type",
choices=["GRU", "LSTM", "RNN"],
help=
"RNN graph type if using experimental feature: convert rnn graph to onnx."
)
# backend args
# Args must be named consistently with respect to backend.prepare.
add_argument_group(parser, "backend arguments (onnx -> tf)",
[(backend.prepare, {
"device": {},
"strict": {}
})])
# frontend args
# Args must be named consistently with respect to frontend.tensorflow_graph_to_onnx_model.
add_argument_group(parser, "frontend arguments (tf -> onnx)",
[(frontend.tensorflow_graph_to_onnx_model, {
"output": {
"action": ListAction,
"dest": "output"
},
"opset": {
"action": OpsetAction,
},
"ignore_unimplemented": {
"type": bool
},
"optimizer_passes": {
"action": ListAction,
"dest": "optimizer_passes"
}
})])
add_experimental_args(parser)
return parser.parse_args(args)
def convert(infile, outfile, convert_to, graph=None, **kwargs):
"""Convert pb.
Args:
infile: Input path.
outfile: Output path.
convert_to: Format converted to.
graph: Inference graph.
**kwargs: Other args for converting.
Returns:
None.
"""
if convert_to == "tf":
logger.info("Start converting onnx pb to tf pb:")
onnx_model = onnx.load(infile)
tf_rep = backend.prepare(onnx_model, **kwargs)
tf_rep.export_graph(outfile)
elif convert_to == "onnx":
ext = os.path.splitext(infile)[1]
logger.info("Start converting tf pb to onnx pb:")
if ext == ".pb":
with open(infile, "rb") as f:
graph_def = graph_pb2.GraphDef()
graph_def.ParseFromString(f.read())
elif ext == ".ckpt":
latest_ckpt = tf.train.latest_checkpoint(os.path.dirname(infile))
saver = tf.train.import_meta_graph(latest_ckpt + ".meta")
temp_file_suffix = get_unique_suffix()
workdir = 'onnx-tf_workdir_{}'.format(temp_file_suffix)
with tf.Session() as sess:
sess.run([
tf.global_variables_initializer(),
tf.local_variables_initializer()
])
saver.restore(sess, latest_ckpt)
# Take users' hint or deduce output node automatically.
kwargs["output"] = kwargs.get(
"output", None) or TensorflowGraph.get_output_node_names(
sess.graph.as_graph_def())
# Save the graph to disk for freezing.
tf.train.write_graph(
sess.graph.as_graph_def(add_shapes=True),
workdir,
"input_model.pb",
as_text=False)
# Freeze graph:
freeze_graph.freeze_graph(
input_graph=graph or workdir + "/input_model.pb",
input_saver="",
input_binary=True,
input_checkpoint=latest_ckpt,
output_node_names=",".join(kwargs["output"]),
restore_op_name="",
filename_tensor_name="",
output_graph=workdir + "/frozen_model.pb",
clear_devices=True,
initializer_nodes="")
# Load back the frozen graph.
with open(workdir + "/frozen_model.pb", "rb") as f:
graph_def = graph_pb2.GraphDef()
graph_def.ParseFromString(f.read())
# Remove work directory.
shutil.rmtree(workdir)
else:
raise ValueError(
"Input file is not supported. Should be .pb or .ckpt, but get {}".
format(ext))
if "rnn_type" in kwargs:
onnx_model = experiment_frontend.rnn_tf_graph_to_onnx_model(graph_def, **kwargs)
else:
onnx_model = frontend.tensorflow_graph_to_onnx_model(graph_def, **kwargs)
onnx.save(onnx_model, outfile)
logger.info("Converting completes successfully.")
|
the-stack_0_23548 | # draw a line, wrong answers only
# make little tunnels, have the lines go through them, do S curves with quadratics
# SOCIALISM
# healthcare, social security, education, human rights, living wage
from p5 import *
from math import sin, cos, pi
from random import choice, shuffle, randint
def tunnel(x, y, sz): # sz is radius of arc, centered on x, y of arc
with push_style():
no_stroke()
with push_matrix():
translate(x, y)
fill(0, .57, .32)
arc(0, 0, sz+10, sz*1.1+10, PI, 2*PI)
rect(-sz/2-5, 0, sz+10, sz/2)
fill(0)
arc(0, 0, sz, sz*1.1, PI, 2*PI)
rect(-sz/2, 0, sz, sz/2)
def tunnel_shadow(x, y, sz):
# draw a shadow over whatever is entering/exiting the tunnel
a = 0
a_off = 1.0 / (sz/3)
with push_style():
no_stroke()
with push_matrix():
translate(x-sz/2, y+sz/2)
for i in range(int(sz/3)):
stroke(0, 0, 0, a)
line((0,-i), (sz,-i))
a += a_off
def setup():
size(1000, 1000)
color_mode('HSB', 360, 1, 1, 1)
f = create_font("brlnsr.ttf", 24)
text_font(f)
no_loop()
def draw():
background(199, 0.16, 0.29)
track_colours = ((0, 1, 1), (140, 0.53, 0.55), (210, 0.79, 0.55), (23, 0.97, 0.85), (209, 0.61, 0.64), (49, .91, .52))
words = ["HEALTH CARE", "EDUCATION", "SOCIAL SECURITY", "EQUALITY", "LIVING WAGE", "RESPECT"]
shuffle(words)
locs = (((130, 95), (290, 285)), # word loc, gate loc
((325, 635), (100, 415)),
((536, 692), (162, 906)),
((850, 915), (591, 916)),
((704, 441), (893, 705)),
((569, 288), (878, 112)),
)
paths = [ # set of quadratic paths to the gate
((130, 100), (132, 143), (191, 181), (313, 209), (363, 284), (324, 338), (292, 320), (289, 296)),
((325, 640), (294, 693), (183, 722), (96, 629), (137, 507), (98, 428)),
((536, 697), (534, 792), (469, 833), (418, 812), (428, 741), (489, 734), (507, 901), (412, 951), (311, 892), (206, 964), (161, 954), (160, 918)),
((850, 920), (866, 939), (925, 963), (976, 902), (918, 841), (783, 865), (699, 827), (647, 866), (681, 932), (644, 979), (595, 957), (590, 930)),
((704, 446), (704, 470), (744, 501), (784, 468), (828, 430), (872, 469), (835, 535), (784, 607), (729, 737), (863, 766), (893, 721)),
((569, 293), (512, 312), (428, 272), (450, 94), (388, 77), (388, 154), (700, 48), (759, 235), (697, 228), (723, 172), (868, 173), (879, 125)),
]
word_colours = list(range(len(track_colours)))
shuffle(word_colours)
text_align("CENTER", "BOTTOM")
for word, gate in locs:
w = words.pop()
p = paths.pop(0)
fill(0, 0, 1)
no_stroke()
text_size(randint(28, 32))
text(w, *word)
tw = randint(40, 60)
tunnel(*gate, tw)
# DEBUG
# stroke(0, 0, 1)
# line(word, gate)
# if len(p) == 2: # DEBUG
# continue
stroke(*track_colours[word_colours.pop()])
stroke_weight(6)
no_fill()
begin_shape()
vertex(*p[0])
vertex(*p[0])
for x, y in p[1:-1]:
curve_vertex(x, y)
# with push_style(): # DEBUG
# fill(0, 1, 1)
# no_stroke()
# circle(x, y, 10)
vertex(*p[-1])
vertex(*p[-1])
end_shape()
tunnel_shadow(*gate, tw)
#-------------- SOCIALISM!
tunnel(500, 400, 70)
# tracks coming out to text
xtop_w = 5
xbottom_w = 10
xpad = 4
xtop = ((500-(70/2))+((xtop_w+xpad/2)*len(track_colours)/8))
ytop = 420
xbottom = 530
ybottom = 530
tracks = []
for i in range(len(track_colours)):
track = (
(xtop, ytop),
(xtop+xtop_w, ytop),
(xbottom+xbottom_w, ybottom),
(xbottom, ybottom)
)
tracks.append(track)
xtop += xtop_w + xpad/2
xbottom += xbottom_w + xpad
for c, points in zip(track_colours, tracks):
fill(*c)
no_stroke()
begin_shape()
for p in points:
vertex(*p)
end_shape(CLOSE)
tunnel_shadow(500, 400, 70)
with push_style():
text_align("CENTER", "CENTER")
fill(0)
text_size(40)
text("</S>OCIALISM!", 583, 553)
fill(0, 1, 1)
stroke(0)
text_size(40)
stroke_weight(2)
text("</S>OCIALISM!", 580, 550)
save("22.png")
def mouse_pressed():
print(f"{mouse_x}, {mouse_y}")
with push_style():
no_stroke()
fill(0, 1, 1)
circle(mouse_x, mouse_y, 5)
run() |
the-stack_0_23549 | # Code Listing #3
"""
Thumbnail producer/consumer - Limiting number of images using a lock
"""
import threading
import time
import string
import random
import uuid
import urllib.request
# from PIL import Image
from queue import Queue
class ThumbnailURL_Generator(threading.Thread):
""" Worker class that generates image URLs """
def __init__(self, queue, sleep_time=1,):
self.sleep_time = sleep_time
self.queue = queue
# A flag for stopping
self.flag = True
# sizes
self._sizes = (240,320,360,480,600,720)
# URL scheme
self.url_template = 'https://dummyimage.com/%s/%s/%s.jpg'
threading.Thread.__init__(self)
def __str__(self):
return 'Producer'
def get_size(self):
return '%dx%d' % (random.choice(self._sizes),
random.choice(self._sizes))
def get_color(self):
return ''.join(random.sample(string.hexdigits[:-6], 3))
def run(self):
""" Main thread function """
while self.flag:
# generate image URLs of random sizes and fg/bg colors
url = self.url_template % (self.get_size(),
self.get_color(),
self.get_color())
# Add to queue
print(self,'Put',url)
self.queue.put(url)
time.sleep(self.sleep_time)
def stop(self):
""" Stop the thread """
self.flag = False
class ThumbnailImageSaver(object):
""" Class which saves URLs to thumbnail images and keeps a counter """
def __init__(self, limit=10):
self.limit = limit
self.lock = threading.Lock()
self.counter = {}
def thumbnail_image(self, url, size=(64,64), format='.png'):
""" Save image thumbnails, given a URL """
im=Image.open(urllib.request.urlopen(url))
# filename is last two parts of URL minus extension + '.format'
pieces = url.split('/')
filename = ''.join((pieces[-2],'_',pieces[-1].split( '.')[0],'_thumb',format))
im.thumbnail(size, Image.ANTIALIAS)
im.save(filename)
print('Saved',filename)
self.counter[filename] = 1
return True
def save(self, url):
""" Save a URL as thumbnail """
with self.lock:
if len(self.counter)>=self.limit:
return False
self.thumbnail_image(url)
print('\tCount=>',len(self.counter))
return True
class ThumbnailURL_Consumer(threading.Thread):
""" Worker class that consumes URLs and generates thumbnails """
def __init__(self, queue, saver):
self.queue = queue
self.flag = True
self.saver = saver
self.count = 0
# Internal id
self._id = uuid.uuid4().hex
threading.Thread.__init__(self, name='Consumer-'+ self._id)
def __str__(self):
return 'Consumer-' + self._id
def run(self):
""" Main thread function """
while self.flag:
url = self.queue.get()
print(self,'Got',url)
self.count += 1
if not self.saver.save(url):
# Limit reached, break out
print(self, 'Set limit reached, quitting')
break
def stop(self):
""" Stop the thread """
self.flag = False
if __name__ == '__main__':
from queue import Queue
import glob,os
os.system('rm -f *.png')
q = Queue(maxsize = 2000)
saver = ThumbnailImageSaver(limit = 50)
producers, consumers = [], []
for i in range(3):
t = ThumbnailURL_Generator(q)
producers.append(t)
t.start()
for i in range(5):
t = ThumbnailURL_Consumer(q, saver)
consumers.append(t)
t.start()
for t in consumers:
t.join()
print('Joined', t, flush=True)
# To make sure producers dont block on a full queue
while not q.empty():
item=q.get()
for t in producers:
t.stop()
print('Stopped',t, flush=True)
print('Total number of PNG images',len(glob.glob('*.png')))
|
the-stack_0_23550 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class OperationPaged(Paged):
"""
A paging container for iterating over a list of :class:`Operation <azure.mgmt.resource.resources.v2019_05_01.models.Operation>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[Operation]'}
}
def __init__(self, *args, **kwargs):
super(OperationPaged, self).__init__(*args, **kwargs)
class DeploymentExtendedPaged(Paged):
"""
A paging container for iterating over a list of :class:`DeploymentExtended <azure.mgmt.resource.resources.v2019_05_01.models.DeploymentExtended>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[DeploymentExtended]'}
}
def __init__(self, *args, **kwargs):
super(DeploymentExtendedPaged, self).__init__(*args, **kwargs)
class ProviderPaged(Paged):
"""
A paging container for iterating over a list of :class:`Provider <azure.mgmt.resource.resources.v2019_05_01.models.Provider>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[Provider]'}
}
def __init__(self, *args, **kwargs):
super(ProviderPaged, self).__init__(*args, **kwargs)
class GenericResourceExpandedPaged(Paged):
"""
A paging container for iterating over a list of :class:`GenericResourceExpanded <azure.mgmt.resource.resources.v2019_05_01.models.GenericResourceExpanded>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[GenericResourceExpanded]'}
}
def __init__(self, *args, **kwargs):
super(GenericResourceExpandedPaged, self).__init__(*args, **kwargs)
class ResourceGroupPaged(Paged):
"""
A paging container for iterating over a list of :class:`ResourceGroup <azure.mgmt.resource.resources.v2019_05_01.models.ResourceGroup>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[ResourceGroup]'}
}
def __init__(self, *args, **kwargs):
super(ResourceGroupPaged, self).__init__(*args, **kwargs)
class TagDetailsPaged(Paged):
"""
A paging container for iterating over a list of :class:`TagDetails <azure.mgmt.resource.resources.v2019_05_01.models.TagDetails>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[TagDetails]'}
}
def __init__(self, *args, **kwargs):
super(TagDetailsPaged, self).__init__(*args, **kwargs)
class DeploymentOperationPaged(Paged):
"""
A paging container for iterating over a list of :class:`DeploymentOperation <azure.mgmt.resource.resources.v2019_05_01.models.DeploymentOperation>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[DeploymentOperation]'}
}
def __init__(self, *args, **kwargs):
super(DeploymentOperationPaged, self).__init__(*args, **kwargs)
|
the-stack_0_23552 | # Software License Agreement (BSD License)
#
# Copyright (c) 2010, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
import optparse
import os
import shutil
import signal
import subprocess
import sys
import time
try:
from UserDict import UserDict # Python 2.x
except ImportError:
from collections import UserDict # Python 3.x
import roslib.message
import roslib.packages
from .bag import Bag, Compression, ROSBagException, ROSBagFormatException, ROSBagUnindexedException
from .migration import MessageMigrator, fixbag2, checkbag
def print_trans(old, new, indent):
from_txt = '%s [%s]' % (old._type, old._md5sum)
if new is not None:
to_txt= '%s [%s]' % (new._type, new._md5sum)
else:
to_txt = 'Unknown'
print(' ' * indent + ' * From: %s' % from_txt)
print(' ' * indent + ' To: %s' % to_txt)
def handle_split(option, opt_str, value, parser):
parser.values.split = True
if len(parser.rargs) > 0 and parser.rargs[0].isdigit():
print("Use of \"--split <MAX_SIZE>\" has been deprecated. Please use --split --size <MAX_SIZE> or --split --duration <MAX_DURATION>", file=sys.stderr)
parser.values.size = int(parser.rargs.pop(0))
def record_cmd(argv):
parser = optparse.OptionParser(usage="rosbag record TOPIC1 [TOPIC2 TOPIC3 ...]",
description="Record a bag file with the contents of specified topics.",
formatter=optparse.IndentedHelpFormatter())
parser.add_option("-a", "--all", dest="all", default=False, action="store_true", help="record all topics")
parser.add_option("-e", "--regex", dest="regex", default=False, action="store_true", help="match topics using regular expressions")
parser.add_option("-x", "--exclude", dest="exclude_regex", default="", action="store", help="exclude topics matching the follow regular expression (subtracts from -a or regex)")
parser.add_option("-q", "--quiet", dest="quiet", default=False, action="store_true", help="suppress console output")
parser.add_option("-o", "--output-prefix", dest="prefix", default=None, action="store", help="prepend PREFIX to beginning of bag name (name will always end with date stamp)")
parser.add_option("-O", "--output-name", dest="name", default=None, action="store", help="record to bag with name NAME.bag")
parser.add_option( "--split", dest="split", default=False, callback=handle_split, action="callback", help="split the bag when maximum size or duariton is reached")
parser.add_option( "--size", dest="size", type='int', action="store", help="record a bag of maximum size SIZE", metavar="SIZE")
parser.add_option( "--duration", dest="duration", type='string',action="store", help="record a bag of maximum duration DURATION in seconds, unless 'm', or 'h' is appended.", metavar="DURATION")
parser.add_option("-b", "--buffsize", dest="buffsize", default=256, type='int', action="store", help="use an internal buffer of SIZE MB (Default: %default, 0 = infinite)", metavar="SIZE")
parser.add_option("--chunksize", dest="chunksize", default=768, type='int', action="store", help="Advanced. Record to chunks of SIZE KB (Default: %default)", metavar="SIZE")
parser.add_option("-l", "--limit", dest="num", default=0, type='int', action="store", help="only record NUM messages on each topic")
parser.add_option( "--node", dest="node", default=None, type='string',action="store", help="record all topics subscribed to by a specific node")
parser.add_option("-j", "--bz2", dest="bz2", default=False, action="store_true", help="use BZ2 compression")
(options, args) = parser.parse_args(argv)
if len(args) == 0 and not options.all and not options.node:
parser.error("You must specify a topic name or else use the '-a' option.")
if options.prefix is not None and options.name is not None:
parser.error("Can't set both prefix and name.")
recordpath = roslib.packages.find_node('rosbag', 'record')
if not recordpath:
parser.error("Cannot find rosbag/record executable")
cmd = [recordpath[0]]
cmd.extend(['--buffsize', str(options.buffsize)])
cmd.extend(['--chunksize', str(options.chunksize)])
if options.num != 0: cmd.extend(['--limit', str(options.num)])
if options.quiet: cmd.extend(["--quiet"])
if options.prefix: cmd.extend(["-o", options.prefix])
if options.name: cmd.extend(["-O", options.name])
if options.exclude_regex: cmd.extend(["--exclude", options.exclude_regex])
if options.all: cmd.extend(["--all"])
if options.regex: cmd.extend(["--regex"])
if options.bz2: cmd.extend(["--bz2"])
if options.split:
if not options.duration and not options.size:
parser.error("Split specified without giving a maximum duration or size")
cmd.extend(["--split"])
if options.duration: cmd.extend(["--duration", options.duration])
if options.size: cmd.extend(["--size", str(options.size)])
if options.node:
cmd.extend(["--node", options.node])
cmd.extend(args)
# Better way of handling it than os.execv
# This makes sure stdin handles are passed to the process.
subprocess.call(cmd)
def info_cmd(argv):
parser = optparse.OptionParser(usage='rosbag info [options] BAGFILE1 [BAGFILE2 BAGFILE3 ...]',
description='Summarize the contents of one or more bag files.')
parser.add_option('-y', '--yaml', dest='yaml', default=False, action='store_true', help='print information in YAML format')
parser.add_option('-k', '--key', dest='key', default=None, action='store', help='print information on the given key')
parser.add_option( '--freq', dest='freq', default=False, action='store_true', help='display topic message frequency statistics')
(options, args) = parser.parse_args(argv)
if len(args) == 0:
parser.error('You must specify at least 1 bag file.')
if options.key and not options.yaml:
parser.error('You can only specify key when printing in YAML format.')
for i, arg in enumerate(args):
try:
b = Bag(arg, 'r', skip_index=not options.freq)
if options.yaml:
info = b._get_yaml_info(key=options.key)
if info is not None:
print(info)
else:
print(b)
b.close()
if i < len(args) - 1:
print('---')
except ROSBagUnindexedException as ex:
print('ERROR bag unindexed: %s. Run rosbag reindex.' % arg,
file=sys.stderr)
except ROSBagException as ex:
print('ERROR reading %s: %s' % (arg, str(ex)), file=sys.stderr)
except IOError as ex:
print('ERROR reading %s: %s' % (arg, str(ex)), file=sys.stderr)
def handle_topics(option, opt_str, value, parser):
topics = []
for arg in parser.rargs:
if arg[:2] == "--" and len(arg) > 2:
break
if arg[:1] == "-" and len(arg) > 1:
break
topics.append(arg)
parser.values.topics.extend(topics)
del parser.rargs[:len(topics)]
def play_cmd(argv):
parser = optparse.OptionParser(usage="rosbag play BAGFILE1 [BAGFILE2 BAGFILE3 ...]",
description="Play back the contents of one or more bag files in a time-synchronized fashion.")
parser.add_option("-q", "--quiet", dest="quiet", default=False, action="store_true", help="suppress console output")
parser.add_option("-i", "--immediate", dest="immediate", default=False, action="store_true", help="play back all messages without waiting")
parser.add_option("--pause", dest="pause", default=False, action="store_true", help="start in paused mode")
parser.add_option("--queue", dest="queue", default=100, type='int', action="store", help="use an outgoing queue of size SIZE (defaults to %default)", metavar="SIZE")
parser.add_option("--clock", dest="clock", default=False, action="store_true", help="publish the clock time")
parser.add_option("--hz", dest="freq", default=100, type='float', action="store", help="use a frequency of HZ when publishing clock time (default: %default)", metavar="HZ")
parser.add_option("-d", "--delay", dest="delay", default=0.2, type='float', action="store", help="sleep SEC seconds after every advertise call (to allow subscribers to connect)", metavar="SEC")
parser.add_option("-r", "--rate", dest="rate", default=1.0, type='float', action="store", help="multiply the publish rate by FACTOR", metavar="FACTOR")
parser.add_option("-s", "--start", dest="start", default=0.0, type='float', action="store", help="start SEC seconds into the bag files", metavar="SEC")
parser.add_option("-u", "--duration", dest="duration", default=None, type='float', action="store", help="play only SEC seconds from the bag files", metavar="SEC")
parser.add_option("--skip-empty", dest="skip_empty", default=None, type='float', action="store", help="skip regions in the bag with no messages for more than SEC seconds", metavar="SEC")
parser.add_option("-l", "--loop", dest="loop", default=False, action="store_true", help="loop playback")
parser.add_option("-k", "--keep-alive", dest="keep_alive", default=False, action="store_true", help="keep alive past end of bag (useful for publishing latched topics)")
parser.add_option("--try-future-version", dest="try_future", default=False, action="store_true", help="still try to open a bag file, even if the version number is not known to the player")
parser.add_option("--topics", dest="topics", default=[], callback=handle_topics, action="callback", help="topics to play back")
parser.add_option("--bags", help="bags files to play back from")
(options, args) = parser.parse_args(argv)
if len(args) == 0:
parser.error('You must specify at least 1 bag file to play back.')
playpath = roslib.packages.find_node('rosbag', 'play')
if not playpath:
parser.error("Cannot find rosbag/play executable")
cmd = [playpath[0]]
if options.quiet: cmd.extend(["--quiet"])
if options.pause: cmd.extend(["--pause"])
if options.immediate: cmd.extend(["--immediate"])
if options.loop: cmd.extend(["--loop"])
if options.keep_alive: cmd.extend(["--keep-alive"])
if options.try_future: cmd.extend(["--try-future-version"])
if options.clock:
cmd.extend(["--clock", "--hz", str(options.freq)])
cmd.extend(['--queue', str(options.queue)])
cmd.extend(['--rate', str(options.rate)])
cmd.extend(['--delay', str(options.delay)])
cmd.extend(['--start', str(options.start)])
if options.duration:
cmd.extend(['--duration', str(options.duration)])
if options.skip_empty:
cmd.extend(['--skip-empty', str(options.skip_empty)])
if options.topics:
cmd.extend(['--topics'] + options.topics + ['--bags'])
cmd.extend(args)
# Better way of handling it than os.execv
# This makes sure stdin handles are passed to the process.
subprocess.call(cmd)
def filter_cmd(argv):
def expr_eval(expr):
def eval_fn(topic, m, t):
return eval(expr)
return eval_fn
parser = optparse.OptionParser(usage="""rosbag filter [options] INBAG OUTBAG EXPRESSION
EXPRESSION can be any Python-legal expression.
The following variables are available:
* topic: name of topic
* m: message
* t: time of message (t.secs, t.nsecs)""",
description='Filter the contents of the bag.')
parser.add_option('-p', '--print', action='store', dest='verbose_pattern', default=None, metavar='PRINT-EXPRESSION', help='Python expression to print for verbose debugging. Uses same variables as filter-expression')
options, args = parser.parse_args(argv)
if len(args) == 0:
parser.error('You must specify an in bag, an out bag, and an expression.')
if len(args) == 1:
parser.error('You must specify an out bag and an expression.')
if len(args) == 2:
parser.error("You must specify an expression.")
if len(args) > 3:
parser.error("Too many arguments.")
inbag_filename, outbag_filename, expr = args
if not os.path.isfile(inbag_filename):
print('Cannot locate input bag file [%s]' % inbag_filename, file=sys.stderr)
sys.exit(2)
if os.path.realpath(inbag_filename) == os.path.realpath(outbag_filename):
print('Cannot use same file as input and output [%s]' % inbag_filename, file=sys.stderr)
sys.exit(3)
filter_fn = expr_eval(expr)
outbag = Bag(outbag_filename, 'w')
try:
inbag = Bag(inbag_filename)
except ROSBagUnindexedException as ex:
print('ERROR bag unindexed: %s. Run rosbag reindex.' % inbag_filename, file=sys.stderr)
return
try:
meter = ProgressMeter(outbag_filename, inbag.size)
total_bytes = 0
if options.verbose_pattern:
verbose_pattern = expr_eval(options.verbose_pattern)
for topic, raw_msg, t in inbag.read_messages(raw=True):
msg_type, serialized_bytes, md5sum, pos, pytype = raw_msg
msg = pytype()
msg.deserialize(serialized_bytes)
if filter_fn(topic, msg, t):
print('MATCH', verbose_pattern(topic, msg, t))
outbag.write(topic, msg, t)
else:
print('NO MATCH', verbose_pattern(topic, msg, t))
total_bytes += len(serialized_bytes)
meter.step(total_bytes)
else:
for topic, raw_msg, t in inbag.read_messages(raw=True):
msg_type, serialized_bytes, md5sum, pos, pytype = raw_msg
msg = pytype()
msg.deserialize(serialized_bytes)
if filter_fn(topic, msg, t):
outbag.write(topic, msg, t)
total_bytes += len(serialized_bytes)
meter.step(total_bytes)
meter.finish()
finally:
inbag.close()
outbag.close()
def fix_cmd(argv):
parser = optparse.OptionParser(usage='rosbag fix INBAG OUTBAG [EXTRARULES1 EXTRARULES2 ...]', description='Repair the messages in a bag file so that it can be played in the current system.')
parser.add_option('-n', '--noplugins', action='store_true', dest='noplugins', help='do not load rulefiles via plugins')
parser.add_option('--force', action='store_true', dest='force', help='proceed with migrations, even if not all rules defined')
(options, args) = parser.parse_args(argv)
if len(args) < 1:
parser.error('You must pass input and output bag files.')
if len(args) < 2:
parser.error('You must pass an output bag file.')
inbag_filename = args[0]
outbag_filename = args[1]
rules = args[2:]
ext = os.path.splitext(outbag_filename)[1]
if ext == '.bmr':
parser.error('Input file should be a bag file, not a rule file.')
if ext != '.bag':
parser.error('Output file must be a bag file.')
outname = outbag_filename + '.tmp'
if os.path.exists(outbag_filename):
if not os.access(outbag_filename, os.W_OK):
print('Don\'t have permissions to access %s' % outbag_filename, file=sys.stderr)
sys.exit(1)
else:
try:
file = open(outbag_filename, 'w')
file.close()
except IOError as e:
print('Cannot open %s for writing' % outbag_filename, file=sys.stderr)
sys.exit(1)
if os.path.exists(outname):
if not os.access(outname, os.W_OK):
print('Don\'t have permissions to access %s' % outname, file=sys.stderr)
sys.exit(1)
else:
try:
file = open(outname, 'w')
file.close()
except IOError as e:
print('Cannot open %s for writing' % outname, file=sys.stderr)
sys.exit(1)
if options.noplugins is None:
options.noplugins = False
migrator = MessageMigrator(rules, plugins=not options.noplugins)
try:
migrations = fixbag2(migrator, inbag_filename, outname, options.force)
except ROSBagUnindexedException as ex:
print('ERROR bag unindexed: %s. Run rosbag reindex.' % inbag_filename,
file=sys.stderr)
return
if len(migrations) == 0:
os.rename(outname, outbag_filename)
print('Bag migrated successfully.')
else:
print('Bag could not be migrated. The following migrations could not be performed:')
for m in migrations:
print_trans(m[0][0].old_class, m[0][-1].new_class, 0)
if len(m[1]) > 0:
print(' %d rules missing:' % len(m[1]))
for r in m[1]:
print_trans(r.old_class, r.new_class,1)
print('Try running \'rosbag check\' to create the necessary rule files or run \'rosbag fix\' with the \'--force\' option.')
os.remove(outname)
def check_cmd(argv):
parser = optparse.OptionParser(usage='rosbag check BAG [-g RULEFILE] [EXTRARULES1 EXTRARULES2 ...]', description='Determine whether a bag is playable in the current system, or if it can be migrated.')
parser.add_option('-g', '--genrules', action='store', dest='rulefile', default=None, help='generate a rulefile named RULEFILE')
parser.add_option('-a', '--append', action='store_true', dest='append', help='append to the end of an existing rulefile after loading it')
parser.add_option('-n', '--noplugins', action='store_true', dest='noplugins', help='do not load rulefiles via plugins')
(options, args) = parser.parse_args(argv)
if len(args) == 0:
parser.error('You must specify a bag file to check.')
if options.append and options.rulefile is None:
parser.error('Cannot specify -a without also specifying -g.')
if options.rulefile is not None:
rulefile_exists = os.path.isfile(options.rulefile)
if rulefile_exists and not options.append:
parser.error('The file %s already exists. Include -a if you intend to append.' % options.rulefile)
if not rulefile_exists and options.append:
parser.error('The file %s does not exist, and so -a is invalid.' % options.rulefile)
if options.append:
append_rule = [options.rulefile]
else:
append_rule = []
# First check that the bag is not unindexed
try:
Bag(args[0])
except ROSBagUnindexedException as ex:
print('ERROR bag unindexed: %s. Run rosbag reindex.' % args[0], file=sys.stderr)
return
mm = MessageMigrator(args[1:] + append_rule, not options.noplugins)
migrations = checkbag(mm, args[0])
if len(migrations) == 0:
print('Bag file is up to date.')
exit(0)
print('The following migrations need to occur:')
all_rules = []
for m in migrations:
all_rules.extend(m[1])
print_trans(m[0][0].old_class, m[0][-1].new_class, 0)
if len(m[1]) > 0:
print(" %d rules missing:" % len(m[1]))
for r in m[1]:
print_trans(r.old_class, r.new_class, 1)
if options.rulefile is None:
if all_rules == []:
print("\nAll rules defined. Bag is ready to be migrated")
else:
print("\nTo generate rules, please run with -g <rulefile>")
exit(0)
output = ''
rules_left = mm.filter_rules_unique(all_rules)
if rules_left == []:
print("\nNo additional rule files needed to be generated. %s not created."%(options.rulefile))
exit(0)
while len(rules_left) > 0:
extra_rules = []
for r in rules_left:
if r.new_class is None:
print('The message type %s appears to have moved. Please enter the type to migrate it to.' % r.old_class._type)
new_type = raw_input('>')
new_class = roslib.message.get_message_class(new_type)
while new_class is None:
print("\'%s\' could not be found in your system. Please make sure it is built." % new_type)
new_type = raw_input('>')
new_class = roslib.message.get_message_class(new_type)
new_rule = mm.make_update_rule(r.old_class, new_class)
R = new_rule(mm, 'GENERATED.' + new_rule.__name__)
R.find_sub_paths()
new_rules = [r for r in mm.expand_rules(R.sub_rules) if r.valid == False]
extra_rules.extend(new_rules)
print('Creating the migration rule for %s requires additional missing rules:' % new_type)
for nr in new_rules:
print_trans(nr.old_class, nr.new_class,1)
output += R.get_class_def()
else:
output += r.get_class_def()
rules_left = mm.filter_rules_unique(extra_rules)
f = open(options.rulefile, 'a')
f.write(output)
f.close()
print('\nThe necessary rule files have been written to: %s' % options.rulefile)
def compress_cmd(argv):
parser = optparse.OptionParser(usage='rosbag compress [options] BAGFILE1 [BAGFILE2 ...]',
description='Compress one or more bag files.')
parser.add_option( '--output-dir', action='store', dest='output_dir', help='write to directory DIR', metavar='DIR')
parser.add_option('-f', '--force', action='store_true', dest='force', help='force overwriting of backup file if it exists')
parser.add_option('-q', '--quiet', action='store_true', dest='quiet', help='suppress noncritical messages')
(options, args) = parser.parse_args(argv)
if len(args) < 1:
parser.error('You must specify at least one bag file.')
op = lambda inbag, outbag, quiet: change_compression_op(inbag, outbag, Compression.BZ2, options.quiet)
bag_op(args, False, lambda b: False, op, options.output_dir, options.force, options.quiet)
def decompress_cmd(argv):
parser = optparse.OptionParser(usage='rosbag decompress [options] BAGFILE1 [BAGFILE2 ...]',
description='Decompress one or more bag files.')
parser.add_option( '--output-dir', action='store', dest='output_dir', help='write to directory DIR', metavar='DIR')
parser.add_option('-f', '--force', action='store_true', dest='force', help='force overwriting of backup file if it exists')
parser.add_option('-q', '--quiet', action='store_true', dest='quiet', help='suppress noncritical messages')
(options, args) = parser.parse_args(argv)
if len(args) < 1:
parser.error('You must specify at least one bag file.')
op = lambda inbag, outbag, quiet: change_compression_op(inbag, outbag, Compression.NONE, options.quiet)
bag_op(args, False, lambda b: False, op, options.output_dir, options.force, options.quiet)
def reindex_cmd(argv):
parser = optparse.OptionParser(usage='rosbag reindex [options] BAGFILE1 [BAGFILE2 ...]',
description='Reindexes one or more bag files.')
parser.add_option( '--output-dir', action='store', dest='output_dir', help='write to directory DIR', metavar='DIR')
parser.add_option('-f', '--force', action='store_true', dest='force', help='force overwriting of backup file if it exists')
parser.add_option('-q', '--quiet', action='store_true', dest='quiet', help='suppress noncritical messages')
(options, args) = parser.parse_args(argv)
if len(args) < 1:
parser.error('You must specify at least one bag file.')
op = lambda inbag, outbag, quiet: reindex_op(inbag, outbag, options.quiet)
bag_op(args, True, lambda b: b.version > 102, op, options.output_dir, options.force, options.quiet)
def bag_op(inbag_filenames, allow_unindexed, copy_fn, op, output_dir=None, force=False, quiet=False):
for inbag_filename in inbag_filenames:
# Check we can read the file
try:
inbag = Bag(inbag_filename, 'r', allow_unindexed=allow_unindexed)
except ROSBagUnindexedException:
print('ERROR bag unindexed: %s. Run rosbag reindex.' % inbag_filename, file=sys.stderr)
continue
except (ROSBagException, IOError) as ex:
print('ERROR reading %s: %s' % (inbag_filename, str(ex)), file=sys.stderr)
continue
# Determine whether we should copy the bag
copy = copy_fn(inbag)
inbag.close()
# Determine filename for output bag
if output_dir is None:
outbag_filename = inbag_filename
else:
outbag_filename = os.path.join(output_dir, os.path.split(inbag_filename)[1])
backup_filename = None
if outbag_filename == inbag_filename:
# Rename the input bag to ###.orig.###, and open for reading
backup_filename = '%s.orig%s' % os.path.splitext(inbag_filename)
if not force and os.path.exists(backup_filename):
if not quiet:
print('Skipping %s. Backup path %s already exists.' % (inbag_filename, backup_filename), file=sys.stderr)
continue
try:
if copy:
shutil.copy(inbag_filename, backup_filename)
else:
os.rename(inbag_filename, backup_filename)
except OSError as ex:
print('ERROR %s %s to %s: %s' % ('copying' if copy else 'moving', inbag_filename, backup_filename, str(ex)), file=sys.stderr)
continue
source_filename = backup_filename
else:
if copy:
shutil.copy(inbag_filename, outbag_filename)
source_filename = outbag_filename
else:
source_filename = inbag_filename
try:
inbag = Bag(source_filename, 'r', allow_unindexed=allow_unindexed)
# Open the output bag file for writing
try:
if copy:
outbag = Bag(outbag_filename, 'a', allow_unindexed=allow_unindexed)
else:
outbag = Bag(outbag_filename, 'w')
except (ROSBagException, IOError) as ex:
print('ERROR writing to %s: %s' % (outbag_filename, str(ex)), file=sys.stderr)
inbag.close()
continue
# Perform the operation
try:
op(inbag, outbag, quiet=quiet)
except ROSBagException as ex:
print('\nERROR operating on %s: %s' % (source_filename, str(ex)), file=sys.stderr)
inbag.close()
outbag.close()
continue
outbag.close()
inbag.close()
except KeyboardInterrupt:
if backup_filename is not None:
try:
if copy:
os.remove(backup_filename)
else:
os.rename(backup_filename, inbag_filename)
except OSError as ex:
print('ERROR %s %s to %s: %s', ('removing' if copy else 'moving', backup_filename, inbag_filename, str(ex)), file=sys.stderr)
break
except (ROSBagException, IOError) as ex:
print('ERROR operating on %s: %s' % (inbag_filename, str(ex)), file=sys.stderr)
def change_compression_op(inbag, outbag, compression, quiet):
outbag.compression = compression
if quiet:
for topic, msg, t in inbag.read_messages(raw=True):
outbag.write(topic, msg, t, raw=True)
else:
meter = ProgressMeter(outbag.filename, inbag._uncompressed_size)
total_bytes = 0
for topic, msg, t in inbag.read_messages(raw=True):
msg_type, serialized_bytes, md5sum, pos, pytype = msg
outbag.write(topic, msg, t, raw=True)
total_bytes += len(serialized_bytes)
meter.step(total_bytes)
meter.finish()
def reindex_op(inbag, outbag, quiet):
if inbag.version == 102:
if quiet:
try:
for offset in inbag.reindex():
pass
except:
pass
for (topic, msg, t) in inbag.read_messages():
outbag.write(topic, msg, t)
else:
meter = ProgressMeter(outbag.filename, inbag.size)
try:
for offset in inbag.reindex():
meter.step(offset)
except:
pass
meter.finish()
meter = ProgressMeter(outbag.filename, inbag.size)
for (topic, msg, t) in inbag.read_messages():
outbag.write(topic, msg, t)
meter.step(inbag._file.tell())
meter.finish()
else:
if quiet:
try:
for offset in outbag.reindex():
pass
except:
pass
else:
meter = ProgressMeter(outbag.filename, outbag.size)
try:
for offset in outbag.reindex():
meter.step(offset)
except:
pass
meter.finish()
class RosbagCmds(UserDict):
def __init__(self):
UserDict.__init__(self)
self._description = {}
self['help'] = self.help_cmd
def add_cmd(self, name, function, description):
self[name] = function
self._description[name] = description
def get_valid_cmds(self):
str = "Available subcommands:\n"
for k in sorted(self.keys()):
str += " %s " % k
if k in self._description.keys():
str +="\t%s" % self._description[k]
str += "\n"
return str
def help_cmd(self,argv):
argv = [a for a in argv if a != '-h' and a != '--help']
if len(argv) == 0:
print('Usage: rosbag <subcommand> [options] [args]')
print()
print("A bag is a file format in ROS for storing ROS message data. The rosbag command can record, replay and manipulate bags.")
print()
print(self.get_valid_cmds())
print('For additional information, see http://wiki.ros.org/rosbag')
print()
return
cmd = argv[0]
if cmd in self:
self[cmd](['-h'])
else:
print("Unknown command: '%s'" % cmd, file=sys.stderr)
print(self.get_valid_cmds(), file=sys.stderr)
class ProgressMeter(object):
def __init__(self, path, bytes_total, refresh_rate=1.0):
self.path = path
self.bytes_total = bytes_total
self.refresh_rate = refresh_rate
self.elapsed = 0.0
self.update_elapsed = 0.0
self.bytes_read = 0.0
self.start_time = time.time()
self._update_progress()
def step(self, bytes_read, force_update=False):
self.bytes_read = bytes_read
self.elapsed = time.time() - self.start_time
if force_update or self.elapsed - self.update_elapsed > self.refresh_rate:
self._update_progress()
self.update_elapsed = self.elapsed
def _update_progress(self):
max_path_len = self.terminal_width() - 37
path = self.path
if len(path) > max_path_len:
path = '...' + self.path[-max_path_len + 3:]
bytes_read_str = self._human_readable_size(float(self.bytes_read))
bytes_total_str = self._human_readable_size(float(self.bytes_total))
if self.bytes_read < self.bytes_total:
complete_fraction = float(self.bytes_read) / self.bytes_total
pct_complete = int(100.0 * complete_fraction)
if complete_fraction > 0.0:
eta = self.elapsed * (1.0 / complete_fraction - 1.0)
eta_min, eta_sec = eta / 60, eta % 60
if eta_min > 99:
eta_str = '--:--'
else:
eta_str = '%02d:%02d' % (eta_min, eta_sec)
else:
eta_str = '--:--'
progress = '%-*s %3d%% %8s / %8s %s ETA' % (max_path_len, path, pct_complete, bytes_read_str, bytes_total_str, eta_str)
else:
progress = '%-*s 100%% %19s %02d:%02d ' % (max_path_len, path, bytes_total_str, self.elapsed / 60, self.elapsed % 60)
print('\r', progress, end='')
sys.stdout.flush()
def _human_readable_size(self, size):
multiple = 1024.0
for suffix in ['KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB']:
size /= multiple
if size < multiple:
return '%.1f %s' % (size, suffix)
raise ValueError('number too large')
def finish(self):
self.step(self.bytes_total, force_update=True)
print()
@staticmethod
def terminal_width():
"""Estimate the width of the terminal"""
width = 0
try:
import struct, fcntl, termios
s = struct.pack('HHHH', 0, 0, 0, 0)
x = fcntl.ioctl(1, termios.TIOCGWINSZ, s)
width = struct.unpack('HHHH', x)[1]
except (IOError, ImportError):
pass
if width <= 0:
try:
width = int(os.environ['COLUMNS'])
except:
pass
if width <= 0:
width = 80
return width
def rosbagmain(argv=None):
cmds = RosbagCmds()
cmds.add_cmd('record', record_cmd, "Record a bag file with the contents of specified topics.")
cmds.add_cmd('info', info_cmd, 'Summarize the contents of one or more bag files.')
cmds.add_cmd('play', play_cmd, "Play back the contents of one or more bag files in a time-synchronized fashion.")
cmds.add_cmd('check', check_cmd, 'Determine whether a bag is playable in the current system, or if it can be migrated.')
cmds.add_cmd('fix', fix_cmd, 'Repair the messages in a bag file so that it can be played in the current system.')
cmds.add_cmd('filter', filter_cmd, 'Filter the contents of the bag.')
cmds.add_cmd('compress', compress_cmd, 'Compress one or more bag files.')
cmds.add_cmd('decompress', decompress_cmd, 'Decompress one or more bag files.')
cmds.add_cmd('reindex', reindex_cmd, 'Reindexes one or more bag files.')
if argv is None:
argv = sys.argv
if '-h' in argv or '--help' in argv:
argv = [a for a in argv if a != '-h' and a != '--help']
argv.insert(1, 'help')
if len(argv) > 1:
cmd = argv[1]
else:
cmd = 'help'
try:
if cmd in cmds:
cmds[cmd](argv[2:])
else:
cmds['help']([cmd])
except KeyboardInterrupt:
pass
|
the-stack_0_23553 | import urllib
import urllib.request
import os
if not os.path.exists('imgs'):
os.makedirs('imgs')
def scrape():
'''Scrape for images!'''
url_i = input('Url to get images from: ')
try:
gr = urllib.request.urlopen(url_i)
except ValueError:
print('Wrong url format! make sure your url starts with "http" or "https"!')
exit()
try:
from BeautifulSoup import BeautifulSoup
except ImportError:
from bs4 import BeautifulSoup
html = gr.read()
results = []
parsed_html = BeautifulSoup(html, "html.parser")
num = 0
for item in parsed_html.find_all('img'):
img_num = len(parsed_html.find_all('img'))
try:
urllib.request.urlretrieve(url_i+item['src'], 'imgs/img{}.jpg'.format(num))
print('Downloading image {} of {}!'.format(num, img_num))
except:
urllib.request.urlretrieve(item['src'], 'imgs/img{}.jpg'.format(num))
print('Downloading image {} of {}!'.format(num, img_num))
num += 1
print('Finished! Downloaded {} images!'.format(img_num))
scrape() |
the-stack_0_23555 | import copy
import numpy as np
from solnml.components.evaluators.cls_evaluator import ClassificationEvaluator
from solnml.components.evaluators.reg_evaluator import RegressionEvaluator
from solnml.utils.logging_utils import get_logger
from ConfigSpace.hyperparameters import UnParametrizedHyperparameter
from solnml.components.feature_engineering.transformation_graph import DataNode
from solnml.components.fe_optimizers import build_fe_optimizer
from solnml.components.hpo_optimizer import build_hpo_optimizer
from solnml.components.utils.constants import CLS_TASKS, REG_TASKS
from solnml.utils.decorators import time_limit
from solnml.utils.functions import get_increasing_sequence
class SecondLayerBandit(object):
def __init__(self, task_type, estimator_id: str, data: DataNode, metric,
share_fe=False, output_dir='logs',
per_run_time_limit=120,
per_run_mem_limit=5120,
dataset_id='default',
eval_type='holdout',
mth='rb', sw_size=3,
n_jobs=1, seed=1, fe_algo='tree_based',
enable_intersection=True,
number_of_unit_resource=2,
total_resource=30):
self.task_type = task_type
self.metric = metric
self.number_of_unit_resource = number_of_unit_resource
# One unit of resource, that's, the number of trials per iteration.
self.one_unit_of_resource = 5
self.total_resource = total_resource
self.per_run_time_limit = per_run_time_limit
self.per_run_mem_limit = per_run_mem_limit
self.estimator_id = estimator_id
self.evaluation_type = eval_type
self.original_data = data.copy_()
self.share_fe = share_fe
self.output_dir = output_dir
self.n_jobs = n_jobs
self.mth = mth
self.seed = seed
self.sliding_window_size = sw_size
task_id = '%s-%d-%s' % (dataset_id, seed, estimator_id)
self.logger = get_logger(self.__class__.__name__ + '-' + task_id)
np.random.seed(self.seed)
# Bandit settings.
# self.arms = ['fe', 'hpo']
self.arms = ['hpo', 'fe']
self.rewards = dict()
self.optimizer = dict()
self.evaluation_cost = dict()
self.update_flag = dict()
# Global incumbent.
self.inc = dict()
self.local_inc = dict()
self.local_hist = {'fe': [], 'hpo': []}
for arm in self.arms:
self.rewards[arm] = list()
self.update_flag[arm] = False
self.evaluation_cost[arm] = list()
self.pull_cnt = 0
self.action_sequence = list()
self.final_rewards = list()
self.incumbent_perf = float("-INF")
self.early_stopped_flag = False
self.enable_intersection = enable_intersection
# Fetch hyperparameter space.
if self.task_type in CLS_TASKS:
from solnml.components.models.classification import _classifiers, _addons
if estimator_id in _classifiers:
clf_class = _classifiers[estimator_id]
elif estimator_id in _addons.components:
clf_class = _addons.components[estimator_id]
else:
raise ValueError("Algorithm %s not supported!" % estimator_id)
cs = clf_class.get_hyperparameter_search_space()
model = UnParametrizedHyperparameter("estimator", estimator_id)
cs.add_hyperparameter(model)
elif self.task_type in REG_TASKS:
from solnml.components.models.regression import _regressors, _addons
if estimator_id in _regressors:
reg_class = _regressors[estimator_id]
elif estimator_id in _addons.components:
reg_class = _addons.components[estimator_id]
else:
raise ValueError("Algorithm %s not supported!" % estimator_id)
cs = reg_class.get_hyperparameter_search_space()
model = UnParametrizedHyperparameter("estimator", estimator_id)
cs.add_hyperparameter(model)
else:
raise ValueError("Unknown task type %s!" % self.task_type)
self.config_space = cs
self.default_config = cs.get_default_configuration()
self.config_space.seed(self.seed)
# Build the Feature Engineering component.
if self.task_type in CLS_TASKS:
fe_evaluator = ClassificationEvaluator(self.default_config, scorer=self.metric,
name='fe', resampling_strategy=self.evaluation_type,
seed=self.seed)
hpo_evaluator = ClassificationEvaluator(self.default_config, scorer=self.metric,
data_node=self.original_data, name='hpo',
resampling_strategy=self.evaluation_type,
seed=self.seed)
elif self.task_type in REG_TASKS:
fe_evaluator = RegressionEvaluator(self.default_config, scorer=self.metric,
name='fe', resampling_strategy=self.evaluation_type,
seed=self.seed)
hpo_evaluator = RegressionEvaluator(self.default_config, scorer=self.metric,
data_node=self.original_data, name='hpo',
resampling_strategy=self.evaluation_type,
seed=self.seed)
else:
raise ValueError('Invalid task type!')
self.fe_algo = fe_algo
self.optimizer['fe'] = build_fe_optimizer(self.fe_algo, self.evaluation_type,
self.task_type, self.original_data,
fe_evaluator, estimator_id, per_run_time_limit,
per_run_mem_limit, self.seed,
shared_mode=self.share_fe, n_jobs=n_jobs)
self.inc['fe'], self.local_inc['fe'] = self.original_data, self.original_data
# Build the HPO component.
# trials_per_iter = max(len(self.optimizer['fe'].trans_types), 20)
trials_per_iter = self.one_unit_of_resource * self.number_of_unit_resource
self.optimizer['hpo'] = build_hpo_optimizer(self.evaluation_type, hpo_evaluator, cs, output_dir=output_dir,
per_run_time_limit=per_run_time_limit,
trials_per_iter=trials_per_iter,
seed=self.seed, n_jobs=n_jobs)
self.inc['hpo'], self.local_inc['hpo'] = self.default_config, self.default_config
self.init_config = cs.get_default_configuration()
self.local_hist['fe'].append(self.original_data)
self.local_hist['hpo'].append(self.default_config)
def collect_iter_stats(self, _arm, results):
for arm_id in self.arms:
self.update_flag[arm_id] = False
if _arm == 'fe' and len(self.final_rewards) == 0:
self.incumbent_perf = self.optimizer['fe'].baseline_score
self.final_rewards.append(self.incumbent_perf)
self.logger.debug('After %d-th pulling, results: %s' % (self.pull_cnt, results))
score, iter_cost, config = results
if score is None:
score = 0.0
self.rewards[_arm].append(score)
self.evaluation_cost[_arm].append(iter_cost)
self.local_inc[_arm] = config
# Update global incumbent from FE and HPO.
if np.isfinite(score) and score > self.incumbent_perf:
self.inc[_arm] = config
self.local_hist[_arm].append(config)
if _arm == 'fe':
if self.mth not in ['alter_hpo', 'rb_hpo', 'fixed_pipeline']:
self.inc['hpo'] = self.default_config
else:
self.inc['hpo'] = self.init_config
else:
if self.mth not in ['alter_hpo', 'rb_hpo', 'fixed_pipeline']:
self.inc['fe'] = self.original_data
self.incumbent_perf = score
arm_id = 'fe' if _arm == 'hpo' else 'hpo'
self.update_flag[arm_id] = True
if self.mth in ['rb_hpo', 'alter_hpo'] and _arm == 'fe':
self.prepare_optimizer(arm_id)
if self.mth in ['rb_hpo', 'alter_hpo'] and _arm == 'hpo':
if len(self.rewards[_arm]) == 1:
self.prepare_optimizer(arm_id)
self.init_config = config
if config != self.default_config:
self.logger.debug('Initial hp_config for FE has changed!')
if self.mth in ['alter_p', 'fixed']:
self.prepare_optimizer(arm_id)
def optimize_rb(self):
# First pull each arm #sliding_window_size times.
if self.pull_cnt < len(self.arms) * self.sliding_window_size:
arm_picked = self.arms[self.pull_cnt % 2]
else:
imp_values = list()
for _arm in self.arms:
increasing_rewards = get_increasing_sequence(self.rewards[_arm])
impv = list()
for idx in range(1, len(increasing_rewards)):
impv.append(increasing_rewards[idx] - increasing_rewards[idx - 1])
imp_values.append(np.mean(impv[-self.sliding_window_size:]))
self.logger.debug('Imp values: %s' % imp_values)
if imp_values[0] == imp_values[1]:
# Break ties randomly.
# arm_picked = np.random.choice(self.arms, 1)[0]
arm_picked = 'fe' if self.action_sequence[-1] == 'hpo' else 'hpo'
else:
arm_picked = self.arms[np.argmax(imp_values)]
# Early stopping scenario.
if self.optimizer[arm_picked].early_stopped_flag is True:
arm_picked = 'hpo' if arm_picked == 'fe' else 'fe'
if self.optimizer[arm_picked].early_stopped_flag is True:
self.early_stopped_flag = True
return
self.action_sequence.append(arm_picked)
self.logger.debug(','.join(self.action_sequence))
self.logger.debug('Pulling arm: %s for %s at %d-th round' % (arm_picked, self.estimator_id, self.pull_cnt))
results = self.optimizer[arm_picked].iterate()
self.collect_iter_stats(arm_picked, results)
self.pull_cnt += 1
def optimize_alternatedly(self):
# First choose one arm.
_arm = self.arms[self.pull_cnt % 2]
self.logger.debug('Pulling arm: %s for %s at %d-th round' % (_arm, self.estimator_id, self.pull_cnt))
# Execute one iteration.
results = self.optimizer[_arm].iterate()
self.collect_iter_stats(_arm, results)
self.action_sequence.append(_arm)
self.pull_cnt += 1
def _optimize_fixed_pipeline(self):
if self.pull_cnt <= 2:
_arm = 'hpo'
else:
_arm = 'fe'
self.logger.debug('Pulling arm: %s for %s at %d-th round' % (_arm, self.estimator_id, self.pull_cnt))
# Execute one iteration.
results = self.optimizer[_arm].iterate()
self.collect_iter_stats(_arm, results)
self.action_sequence.append(_arm)
self.pull_cnt += 1
def optimize_fixed_pipeline(self):
ratio_fe = int(self.total_resource * 0.75) + 1
for iter_id in range(self.total_resource):
if iter_id == 0 or iter_id >= ratio_fe:
_arm = 'hpo'
else:
_arm = 'fe'
results = self.optimizer[_arm].iterate()
self.collect_iter_stats(_arm, results)
self.action_sequence.append(_arm)
self.pull_cnt += 1
def optimize_one_component(self, mth):
_arm = 'hpo' if mth == 'hpo_only' else 'fe'
self.logger.debug('Pulling arm: %s for %s at %d-th round' % (_arm, self.estimator_id, self.pull_cnt))
# Execute one iteration.
results = self.optimizer[_arm].iterate()
self.collect_iter_stats(_arm, results)
self.action_sequence.append(_arm)
self.pull_cnt += 1
def evaluate_joint_solution(self):
# Update join incumbent from FE and HPO.
_perf = None
try:
with time_limit(600):
if self.task_type in CLS_TASKS:
_perf = ClassificationEvaluator(
self.local_inc['hpo'], data_node=self.local_inc['fe'], scorer=self.metric,
name='fe', resampling_strategy=self.evaluation_type,
seed=self.seed)(self.local_inc['hpo'])
else:
_perf = RegressionEvaluator(
self.local_inc['hpo'], data_node=self.local_inc['fe'], scorer=self.metric,
name='fe', resampling_strategy=self.evaluation_type,
seed=self.seed)(self.local_inc['hpo'])
except Exception as e:
self.logger.error(str(e))
# Update INC.
if _perf is not None and np.isfinite(_perf) and _perf > self.incumbent_perf:
self.inc['hpo'] = self.local_inc['hpo']
self.inc['fe'] = self.local_inc['fe']
self.incumbent_perf = _perf
def play_once(self):
if self.early_stopped_flag:
return self.incumbent_perf
if self.mth in ['rb', 'rb_hpo']:
self.optimize_rb()
self.evaluate_joint_solution()
elif self.mth in ['alter', 'alter_p', 'alter_hpo']:
self.optimize_alternatedly()
self.evaluate_joint_solution()
elif self.mth in ['fe_only', 'hpo_only']:
self.optimize_one_component(self.mth)
elif self.mth in ['fixed']:
self._optimize_fixed_pipeline()
else:
raise ValueError('Invalid method: %s' % self.mth)
self.final_rewards.append(self.incumbent_perf)
return self.incumbent_perf
def prepare_optimizer(self, _arm):
if _arm == 'fe':
# Build the Feature Engineering component.
self.original_data._node_id = -1
inc_hpo = copy.deepcopy(self.inc['hpo'])
if self.task_type in CLS_TASKS:
fe_evaluator = ClassificationEvaluator(inc_hpo, scorer=self.metric,
name='fe', resampling_strategy=self.evaluation_type,
seed=self.seed)
elif self.task_type in REG_TASKS:
fe_evaluator = RegressionEvaluator(inc_hpo, scorer=self.metric,
name='fe', resampling_strategy=self.evaluation_type,
seed=self.seed)
else:
raise ValueError('Invalid task type!')
self.optimizer[_arm] = build_fe_optimizer(self.fe_algo, self.evaluation_type,
self.task_type, self.inc['fe'],
fe_evaluator, self.estimator_id, self.per_run_time_limit,
self.per_run_mem_limit, self.seed,
shared_mode=self.share_fe,
n_jobs=self.n_jobs)
else:
# trials_per_iter = self.optimizer['fe'].evaluation_num_last_iteration // 2
# trials_per_iter = max(20, trials_per_iter)
trials_per_iter = self.one_unit_of_resource * self.number_of_unit_resource
if self.task_type in CLS_TASKS:
hpo_evaluator = ClassificationEvaluator(self.default_config, scorer=self.metric,
data_node=self.inc['fe'].copy_(), name='hpo',
resampling_strategy=self.evaluation_type,
seed=self.seed)
elif self.task_type in REG_TASKS:
hpo_evaluator = RegressionEvaluator(self.default_config, scorer=self.metric,
data_node=self.inc['fe'].copy_(), name='hpo',
resampling_strategy=self.evaluation_type,
seed=self.seed)
else:
raise ValueError('Invalid task type!')
self.optimizer[_arm] = build_hpo_optimizer(self.evaluation_type, hpo_evaluator, self.config_space,
output_dir=self.output_dir,
per_run_time_limit=self.per_run_time_limit,
trials_per_iter=trials_per_iter, seed=self.seed)
self.logger.debug('=' * 30)
self.logger.debug('UPDATE OPTIMIZER: %s' % _arm)
self.logger.debug('=' * 30)
|
the-stack_0_23561 | #appModules/mirc.py
#A part of NonVisual Desktop Access (NVDA)
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
#Copyright (C) 2010 James Teh <[email protected]>
"""App module for mIRC
"""
import controlTypes
from NVDAObjects.window import Window, DisplayModelLiveText
from NVDAObjects.IAccessible import StaticText
import appModuleHandler
class Input(Window):
def event_gainFocus(self):
super(Input, self).event_gainFocus()
try:
output = self.parent.parent.lastChild.firstChild
except AttributeError:
output = None
if isinstance(output, DisplayModelLiveText):
output.startMonitoring()
self._output = output
else:
self._output = None
def event_loseFocus(self):
if self._output:
self._output.stopMonitoring()
class AppModule(appModuleHandler.AppModule):
def chooseNVDAObjectOverlayClasses(self, obj, clsList):
if obj.role == controlTypes.ROLE_WINDOW:
return
if obj.windowClassName == "Static" and obj.windowControlID == 32918:
clsList.remove(StaticText)
clsList.insert(0, DisplayModelLiveText)
elif obj.windowClassName == "RichEdit20W" and obj.windowControlID == 32921:
clsList.insert(0, Input)
|
the-stack_0_23562 | # get temperature readings from DS18B20 temperature sensor
# Configuration Temperature-node:
# WeMOS D1 Mini - sensor attached to D5 (GPIO14)
# ,, - LED attached to D6 (GPIO12)
from micropython import const
import machine, time
import onewire, ds18x20
__SENSOR_PIN = const(14) # WeMOS D1 mini D5
# setup sensor and returns sensor ds, roms and led
def setup():
# create onewire object on GPIO-pin
ow = onewire.OneWire(machine.Pin(__SENSOR_PIN))
print('OneWire bus devices:', ow.scan())
#output: [bytearray(b'(\xff\xa1/\x83\x16\x03~')]
#create a sensor-object
ds = ds18x20.DS18X20(ow)
#roms = ds.scan()
# return ds, roms
return (ds)
def test(ds, dt = 1.0):
try:
roms = ds.scan()
while True:
ds.convert_temp()
time.sleep_ms(750)
for rom in roms:
print('Temperature {0:0.2f} C'.format(ds.read_temp(rom)))
time.sleep(dt)
except:
print('test() intercepted.')
#@property <- howto use?
def temperature(ds):
roms = ds.scan()
ds.convert_temp()
time.sleep_ms(750)
for rom in roms:
t = ds.read_temp(rom)
#OK:print ('Temperature {0:0.2f} C'.format(t))
#OK:print ('Temperature {0:0.2f} C'.format(t))
return t
|
the-stack_0_23564 | # -*- coding: utf-8 -*-
"""Compares OF, D-MF, S-MF STD and mean for real data."""
# results_low_snr_adc_5
import os.path
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams['xtick.labelsize'] = 22
plt.rcParams['ytick.labelsize'] = 22
DIR_PATH = os.path.dirname(__file__)
if __name__ == '__main__':
amplitude_mean = 10
channel = 1
noise_means = [30, 50, 90]
sufix = f'_ch{channel}'
base_folder = DIR_PATH + f'/../../results/hybrid/amplitude_mean{amplitude_mean}'
of_means = []
of_stds = []
dmf_means = []
dmf_stds = []
smf_means = []
smf_stds = []
for noise_mean in noise_means:
of_file_name = base_folder + f'/OF/mu{noise_mean}/of_amp_error{sufix}.txt'
dmf_file_name = base_folder + f'/D_MF/mu{noise_mean}/dmf_amp_error{sufix}.txt'
smf_file_name = base_folder + f'/S_MF/mu{noise_mean}/smf_amp_error{sufix}.txt'
of_error = np.loadtxt(of_file_name)
dmf_error = np.loadtxt(dmf_file_name)
smf_error = np.loadtxt(smf_file_name)
of_means.append(np.mean(of_error))
of_stds.append(np.std(of_error))
dmf_means.append(np.mean(dmf_error))
dmf_stds.append(np.std(dmf_error))
smf_means.append(np.mean(smf_error))
smf_stds.append(np.std(smf_error))
fig, ((ax0, ax1)) = plt.subplots(nrows=1, ncols=2, figsize=(19,10))
font = {
'family': 'Times New Roman',
'size': 22
}
print('Means and RMSs')
print('OF: \n')
print(of_means)
print(of_stds)
print('DMF: \n')
print(dmf_means)
print(dmf_stds)
print('SMF: \n')
print(smf_means)
print(smf_stds)
fig.suptitle(f'Comparação das Médias e RMS \n Canal: {channel} Amplitude: {amplitude_mean}')
ax0.legend(prop={'size': 10})
ax0.grid(axis='y', alpha=0.75)
ax0.set_xlabel('Ruído', **font)
ax0.set_ylabel('Média', **font)
ax0.plot(noise_means, of_means, '-ro', label='Média-OF')
ax0.plot(noise_means, dmf_means, '-bo', label='Média-DMF')
ax0.plot(noise_means, smf_means, '-go', label='Média-SMF')
ax0.legend(loc='best', fontsize=19)
ax1.legend(prop={'size': 10})
ax1.grid(axis='y', alpha=0.75)
ax1.set_xlabel('Ruído', **font)
ax1.set_ylabel('RMS', **font)
ax1.plot(noise_means, of_stds, '-ro', label='RMS-OF')
ax1.plot(noise_means, dmf_stds, '-bo', label='RMS-DMF')
ax1.plot(noise_means, smf_stds, '-go', label='RMS-SMF')
ax1.legend(loc='best', fontsize=19)
plt.show()
|
the-stack_0_23567 | # -*- Python -*-
import os
import platform
import re
import subprocess
import tempfile
import lit.formats
import lit.util
from lit.llvm import llvm_config
from lit.llvm.subst import ToolSubst
from lit.llvm.subst import FindTool
# Configuration file for the 'lit' test runner.
# name: The name of this test suite.
config.name = 'Clang'
# testFormat: The test format to use to interpret tests.
#
# For now we require '&&' between commands, until they get globally killed and
# the test runner updated.
config.test_format = lit.formats.ShTest(not llvm_config.use_lit_shell)
# suffixes: A list of file extensions to treat as test files.
config.suffixes = ['.c', '.cpp', '.i', '.cppm', '.m', '.mm', '.cu',
'.ll', '.cl', '.s', '.S', '.modulemap', '.test', '.rs', '.ifs']
# excludes: A list of directories to exclude from the testsuite. The 'Inputs'
# subdirectories contain auxiliary inputs for various tests in their parent
# directories.
config.excludes = ['Inputs', 'CMakeLists.txt', 'README.txt', 'LICENSE.txt', 'debuginfo-tests']
# test_source_root: The root path where tests are located.
config.test_source_root = os.path.dirname(__file__)
# test_exec_root: The root path where tests should be run.
config.test_exec_root = os.path.join(config.clang_obj_root, 'test')
llvm_config.use_default_substitutions()
llvm_config.use_clang()
config.substitutions.append(
('%src_include_dir', config.clang_src_dir + '/include'))
config.substitutions.append(
('%target_triple', config.target_triple))
# Propagate path to symbolizer for ASan/MSan.
llvm_config.with_system_environment(
['ASAN_SYMBOLIZER_PATH', 'MSAN_SYMBOLIZER_PATH'])
config.substitutions.append(('%PATH%', config.environment['PATH']))
# For each occurrence of a clang tool name, replace it with the full path to
# the build directory holding that tool. We explicitly specify the directories
# to search to ensure that we get the tools just built and not some random
# tools that might happen to be in the user's PATH.
tool_dirs = [config.clang_tools_dir, config.llvm_tools_dir]
tools = [
'apinotes-test', 'c-index-test', 'clang-diff', 'clang-format',
'clang-tblgen', 'opt', 'llvm-ifs',
ToolSubst('%clang_extdef_map', command=FindTool(
'clang-extdef-mapping'), unresolved='ignore'),
]
if config.clang_examples:
config.available_features.add('examples')
tools.append('clang-interpreter')
if config.clang_staticanalyzer:
config.available_features.add('staticanalyzer')
tools.append('clang-check')
if config.clang_staticanalyzer_z3 == '1':
config.available_features.add('z3')
check_analyzer_fixit_path = os.path.join(
config.test_source_root, "Analysis", "check-analyzer-fixit.py")
config.substitutions.append(
('%check_analyzer_fixit',
'"%s" %s' % (config.python_executable, check_analyzer_fixit_path)))
llvm_config.add_tool_substitutions(tools, tool_dirs)
config.substitutions.append(
('%hmaptool', "'%s' %s" % (config.python_executable,
os.path.join(config.clang_tools_dir, 'hmaptool'))))
# Plugins (loadable modules)
if config.has_plugins and config.llvm_plugin_ext:
config.available_features.add('plugins')
# Set available features we allow tests to conditionalize on.
#
if config.clang_default_cxx_stdlib != '':
config.available_features.add('default-cxx-stdlib-set')
# As of 2011.08, crash-recovery tests still do not pass on FreeBSD.
if platform.system() not in ['FreeBSD']:
config.available_features.add('crash-recovery')
# Support for new pass manager.
if config.enable_experimental_new_pass_manager:
config.available_features.add('experimental-new-pass-manager')
# ANSI escape sequences in non-dumb terminal
if platform.system() not in ['Windows']:
config.available_features.add('ansi-escape-sequences')
# Capability to print utf8 to the terminal.
# Windows expects codepage, unless Wide API.
if platform.system() not in ['Windows']:
config.available_features.add('utf8-capable-terminal')
# Support for libgcc runtime. Used to rule out tests that require
# clang to run with -rtlib=libgcc.
if platform.system() not in ['Darwin', 'Fuchsia']:
config.available_features.add('libgcc')
# Case-insensitive file system
def is_filesystem_case_insensitive():
handle, path = tempfile.mkstemp(
prefix='case-test', dir=config.test_exec_root)
isInsensitive = os.path.exists(
os.path.join(
os.path.dirname(path),
os.path.basename(path).upper()
))
os.close(handle)
os.remove(path)
return isInsensitive
if is_filesystem_case_insensitive():
config.available_features.add('case-insensitive-filesystem')
# Tests that require the /dev/fd filesystem.
if os.path.exists('/dev/fd/0') and sys.platform not in ['cygwin']:
config.available_features.add('dev-fd-fs')
# Set on native MS environment.
if re.match(r'.*-(windows-msvc)$', config.target_triple):
config.available_features.add('ms-sdk')
# [PR8833] LLP64-incompatible tests
if not re.match(r'^x86_64.*-(windows-msvc|windows-gnu)$', config.target_triple):
config.available_features.add('LP64')
# [PR12920] "clang-driver" -- set if gcc driver is not used.
if not re.match(r'.*-(cygwin)$', config.target_triple):
config.available_features.add('clang-driver')
# Tests that are specific to the Apple Silicon macOS.
if re.match(r'^arm64(e)?-apple-(macos|darwin)', config.target_triple):
config.available_features.add('apple-silicon-mac')
# [PR18856] Depends to remove opened file. On win32, a file could be removed
# only if all handles were closed.
if platform.system() not in ['Windows']:
config.available_features.add('can-remove-opened-file')
# Features
known_arches = ["x86_64", "mips64", "ppc64", "aarch64"]
if (any(config.target_triple.startswith(x) for x in known_arches)):
config.available_features.add("clang-target-64-bits")
def calculate_arch_features(arch_string):
features = []
for arch in arch_string.split():
features.append(arch.lower() + '-registered-target')
return features
llvm_config.feature_config(
[('--assertion-mode', {'ON': 'asserts'}),
('--cxxflags', {r'-D_GLIBCXX_DEBUG\b': 'libstdcxx-safe-mode'}),
('--targets-built', calculate_arch_features),
])
if lit.util.which('xmllint'):
config.available_features.add('xmllint')
if config.enable_backtrace:
config.available_features.add('backtrace')
if config.enable_threads:
config.available_features.add('thread_support')
# Check if we should allow outputs to console.
run_console_tests = int(lit_config.params.get('enable_console', '0'))
if run_console_tests != 0:
config.available_features.add('console')
lit.util.usePlatformSdkOnDarwin(config, lit_config)
macOSSDKVersion = lit.util.findPlatformSdkVersionOnMacOS(config, lit_config)
if macOSSDKVersion is not None:
config.available_features.add('macos-sdk-' + str(macOSSDKVersion))
if os.path.exists('/etc/gentoo-release'):
config.available_features.add('gentoo')
if config.enable_shared:
config.available_features.add("enable_shared")
# Add a vendor-specific feature.
if config.clang_vendor_uti:
config.available_features.add('clang-vendor=' + config.clang_vendor_uti)
|
the-stack_0_23569 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests tools.validators.instance_validator.instance_parser."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os import path
from absl.testing import absltest
import strictyaml as syaml
from tests import test_constants
from validate import instance_parser
_TESTCASE_PATH = test_constants.TEST_INSTANCES
def _ParserHelper(testpaths):
parser = instance_parser.InstanceParser()
for filepath in testpaths:
parser.AddFile(filepath)
parser.Finalize()
return parser
def _Helper(testpaths):
return _ParserHelper(testpaths).GetEntities()
class ParserTest(absltest.TestCase):
def test_EnumToRegex(self):
expected = syaml.Regex('^(ADD) | (UPDATE)$')
actual = instance_parser.EnumToRegex(
instance_parser.EntityOperation,
[instance_parser.EntityOperation.DELETE])
self.assertEqual(str(expected), str(actual))
def testInstanceValidatorDetectDuplicateKeys(self):
with self.assertRaises(SystemExit):
parse = _Helper(
[path.join(_TESTCASE_PATH, 'BAD', 'bad_duplicate_keys.yaml')])
self.assertIsNone(parse)
def testInstanceValidatorDetectMissingColon(self):
with self.assertRaises(SystemExit):
parse = _Helper(
[path.join(_TESTCASE_PATH, 'BAD', 'bad_missing_colon.yaml')])
self.assertIsNone(parse)
def testInstanceValidatorDetectImproperSpacing(self):
with self.assertRaises(SystemExit):
parse = _Helper([path.join(_TESTCASE_PATH, 'BAD', 'bad_spacing.yaml')])
self.assertIsNone(parse)
def testInstanceValidatorDetectImproperTabbing(self):
with self.assertRaises(SystemExit):
parse = _Helper([path.join(_TESTCASE_PATH, 'BAD', 'bad_tabbing.yaml')])
self.assertIsNone(parse)
def testInstanceValidatorParseProperFormat(self):
parse = _Helper(
[path.join(_TESTCASE_PATH, 'GOOD', 'good_building_type.yaml')])
self.assertIsNotNone(parse)
def testInstanceValidatorParseProperConnections(self):
parse = _Helper(
[path.join(_TESTCASE_PATH, 'GOOD', 'good_building_connections.yaml')])
self.assertIsNotNone(parse)
def testInstanceValidatorParseProperConnectionList(self):
parse = _Helper([
path.join(_TESTCASE_PATH, 'GOOD', 'good_building_connection_list.yaml')
])
self.assertIsNotNone(parse)
def testInstanceValidatorParseMultipleEntities(self):
parse = _Helper(
[path.join(_TESTCASE_PATH, 'GOOD', 'good_multi_instances.yaml')])
self.assertLen(parse.keys(), 3)
self.assertIn('AHU-11', parse.keys())
self.assertIn('FCU-1', parse.keys())
self.assertIn('FCU-10', parse.keys())
def testInstanceValidatorDetectImproperTranslationCompliance(self):
with self.assertRaises(SystemExit):
parse = _Helper(
[path.join(_TESTCASE_PATH, 'BAD', 'bad_translation_compliant.yaml')])
del parse
def testInstanceValidatorDetectImproperTranslationKeys(self):
with self.assertRaises(SystemExit):
parse = _Helper(
[path.join(_TESTCASE_PATH, 'BAD', 'bad_translation_keys.yaml')])
del parse
def testInstanceValidatorDetectImproperUnitsKeys(self):
with self.assertRaises(SystemExit):
parse = _Helper([
path.join(_TESTCASE_PATH, 'BAD', 'bad_translation_units_format.yaml')
])
del parse
def testInstanceValidatorCloudDeviceIdNotSetWithTranslation(self):
with self.assertRaises(KeyError):
parse = _Helper([
path.join(_TESTCASE_PATH, 'BAD',
'bad_translation_no_cloud_device_id.yaml')
])
del parse
def testInstanceValidatorDetectDuplicateEntityKeys(self):
with self.assertRaises(SystemExit):
parse = _Helper(
[path.join(_TESTCASE_PATH, 'BAD', 'bad_duplicate_key.yaml')])
del parse
def testInstanceValidatorDetectDuplicateMetadata(self):
with self.assertRaises(SystemExit):
parse = _Helper(
[path.join(_TESTCASE_PATH, 'BAD', 'bad_duplicate_metadata.yaml')])
del parse
def testInstanceValidatorRejectsOperationOnInitialize(self):
with self.assertRaises(SystemExit):
parse = _Helper(
[path.join(_TESTCASE_PATH, 'BAD', 'bad_entity_operation.yaml')])
del parse
def testInstanceValidatorRejectsMaskOnInitialize(self):
with self.assertRaises(SystemExit):
parse = _Helper(
[path.join(_TESTCASE_PATH, 'BAD', 'bad_entity_mask.yaml')])
del parse
def testInstanceValidatorRejectsMaskOnAdd(self):
with self.assertRaises(SystemExit):
parse = _Helper(
[path.join(_TESTCASE_PATH, 'BAD', 'bad_entity_add_mask.yaml')])
del parse
def testInstanceValidatorRejectsUpdateWithoutEtag(self):
with self.assertRaises(SystemExit):
parse = _Helper(
[path.join(_TESTCASE_PATH, 'BAD', 'bad_entity_etag.yaml')])
del parse
def testInstanceValidatorReadsMetadata(self):
parser = _ParserHelper(
[path.join(_TESTCASE_PATH, 'GOOD', 'good_with_metadata.yaml')])
self.assertLen(parser.GetEntities().keys(), 2)
self.assertEqual(parser.GetConfigMode(), instance_parser.ConfigMode.UPDATE)
def testInstanceValidatorReadsMetadataAtEnd(self):
parser = _ParserHelper(
[path.join(_TESTCASE_PATH, 'GOOD', 'good_with_metadata_at_end.yaml')])
self.assertLen(parser.GetEntities().keys(), 2)
self.assertEqual(parser.GetConfigMode(), instance_parser.ConfigMode.UPDATE)
def testInstanceValidatorHandlesUpdateMode(self):
parser = _ParserHelper(
[path.join(_TESTCASE_PATH, 'GOOD', 'good_update_with_metadata.yaml')])
self.assertLen(parser.GetEntities().keys(), 4)
def testInstanceValidatorUsesDefaultMode(self):
parser = _ParserHelper(
[path.join(_TESTCASE_PATH, 'GOOD', 'good_building_type.yaml')])
self.assertEqual(parser.GetConfigMode(),
instance_parser.ConfigMode.Default())
def testInstanceRejectsExportMode(self):
with self.assertRaises(KeyError):
parser = _ParserHelper(
[path.join(_TESTCASE_PATH, 'BAD', 'bad_configmode.yaml')])
del parser
if __name__ == '__main__':
absltest.main()
|
the-stack_0_23571 | import os
from tqdm import tqdm
dataset_dir = '/home/xiao/Datasets/T-LESS'
# where contains the original ply files
ply_dir = os.path.join(dataset_dir, "models_cad")
# where to save the converted obj files
obj_dir = os.path.join(dataset_dir, "models_obj")
if not os.path.isdir(obj_dir):
os.mkdir(obj_dir)
plys = [name for name in os.listdir(ply_dir) if name.endswith(".ply")]
for ply in tqdm(plys):
os.system("meshlabserver -i %s -o %s" % (os.path.join(ply_dir, ply), os.path.join(obj_dir, ply.replace(".ply", ".obj")[-6:]))) |
the-stack_0_23573 | #!/usr/bin/env python
import os
import sys
import yaml
import logging
import optparse
import pyfiglet
import uuid
import time
import kraken.kubernetes.client as kubecli
import kraken.invoke.command as runcommand
import kraken.litmus.common_litmus as common_litmus
import kraken.time_actions.common_time_functions as time_actions
import kraken.performance_dashboards.setup as performance_dashboards
import kraken.pod_scenarios.setup as pod_scenarios
import kraken.namespace_actions.common_namespace_functions as namespace_actions
import kraken.shut_down.common_shut_down_func as shut_down
import kraken.node_actions.run as nodeaction
import kraken.kube_burner.client as kube_burner
import kraken.zone_outage.actions as zone_outages
import kraken.application_outage.actions as application_outage
import kraken.pvc.pvc_scenario as pvc_scenario
import server as server
def publish_kraken_status(status):
with open("/tmp/kraken_status", "w+") as file:
file.write(str(status))
# Main function
def main(cfg):
# Start kraken
print(pyfiglet.figlet_format("kraken"))
logging.info("Starting kraken")
# Parse and read the config
if os.path.isfile(cfg):
with open(cfg, "r") as f:
config = yaml.full_load(f)
global kubeconfig_path, wait_duration
distribution = config["kraken"].get("distribution", "openshift")
kubeconfig_path = config["kraken"].get("kubeconfig_path", "")
chaos_scenarios = config["kraken"].get("chaos_scenarios", [])
publish_running_status = config["kraken"].get("publish_kraken_status", False)
port = config["kraken"].get("port", "8081")
run_signal = config["kraken"].get("signal_state", "RUN")
litmus_version = config["kraken"].get("litmus_version", "v1.9.1")
litmus_uninstall = config["kraken"].get("litmus_uninstall", False)
wait_duration = config["tunings"].get("wait_duration", 60)
iterations = config["tunings"].get("iterations", 1)
daemon_mode = config["tunings"].get("daemon_mode", False)
deploy_performance_dashboards = config["performance_monitoring"].get("deploy_dashboards", False)
dashboard_repo = config["performance_monitoring"].get(
"repo", "https://github.com/cloud-bulldozer/performance-dashboards.git"
) # noqa
capture_metrics = config["performance_monitoring"].get("capture_metrics", False)
kube_burner_url = config["performance_monitoring"].get(
"kube_burner_binary_url",
"https://github.com/cloud-bulldozer/kube-burner/releases/download/v0.9.1/kube-burner-0.9.1-Linux-x86_64.tar.gz", # noqa
)
config_path = config["performance_monitoring"].get("config_path", "config/kube_burner.yaml")
metrics_profile = config["performance_monitoring"].get("metrics_profile_path", "config/metrics-aggregated.yaml")
prometheus_url = config["performance_monitoring"].get("prometheus_url", "")
prometheus_bearer_token = config["performance_monitoring"].get("prometheus_bearer_token", "")
run_uuid = config["performance_monitoring"].get("uuid", "")
enable_alerts = config["performance_monitoring"].get("enable_alerts", False)
alert_profile = config["performance_monitoring"].get("alert_profile", "")
# Initialize clients
if not os.path.isfile(kubeconfig_path):
logging.error("Cannot read the kubeconfig file at %s, please check" % kubeconfig_path)
sys.exit(1)
logging.info("Initializing client to talk to the Kubernetes cluster")
os.environ["KUBECONFIG"] = str(kubeconfig_path)
kubecli.initialize_clients(kubeconfig_path)
# find node kraken might be running on
kubecli.find_kraken_node()
# Set up kraken url to track signal
if not 0 <= int(port) <= 65535:
logging.info("Using port 8081 as %s isn't a valid port number" % (port))
port = 8081
address = ("0.0.0.0", port)
# If publish_running_status is False this should keep us going in our loop below
if publish_running_status:
server_address = address[0]
port = address[1]
logging.info("Publishing kraken status at http://%s:%s" % (server_address, port))
server.start_server(address)
publish_kraken_status(run_signal)
# Cluster info
logging.info("Fetching cluster info")
cluster_version = runcommand.invoke("kubectl get clusterversion", 60)
cluster_info = runcommand.invoke(
"kubectl cluster-info | awk 'NR==1' | sed -r " "'s/\x1B\[([0-9]{1,3}(;[0-9]{1,2})?)?[mGK]//g'", 60
) # noqa
logging.info("\n%s%s" % (cluster_version, cluster_info))
# Deploy performance dashboards
if deploy_performance_dashboards:
performance_dashboards.setup(dashboard_repo)
# Generate uuid for the run
if run_uuid:
logging.info("Using the uuid defined by the user for the run: %s" % run_uuid)
else:
run_uuid = str(uuid.uuid4())
logging.info("Generated a uuid for the run: %s" % run_uuid)
# Initialize the start iteration to 0
iteration = 0
# Set the number of iterations to loop to infinity if daemon mode is
# enabled or else set it to the provided iterations count in the config
if daemon_mode:
logging.info("Daemon mode enabled, kraken will cause chaos forever\n")
logging.info("Ignoring the iterations set")
iterations = float("inf")
else:
logging.info("Daemon mode not enabled, will run through %s iterations\n" % str(iterations))
iterations = int(iterations)
failed_post_scenarios = []
litmus_installed = False
# Capture the start time
start_time = int(time.time())
# Loop to run the chaos starts here
while int(iteration) < iterations and run_signal != "STOP":
# Inject chaos scenarios specified in the config
logging.info("Executing scenarios for iteration " + str(iteration))
if chaos_scenarios:
for scenario in chaos_scenarios:
if publish_running_status:
run_signal = server.get_status(address)
if run_signal == "PAUSE":
while publish_running_status and run_signal == "PAUSE":
logging.info(
"Pausing Kraken run, waiting for %s seconds and will re-poll signal"
% str(wait_duration)
)
time.sleep(wait_duration)
run_signal = server.get_status(address)
if run_signal == "STOP":
logging.info("Received STOP signal; ending Kraken run")
break
scenario_type = list(scenario.keys())[0]
scenarios_list = scenario[scenario_type]
if scenarios_list:
# Inject pod chaos scenarios specified in the config
if scenario_type == "pod_scenarios":
logging.info("Running pod scenarios")
failed_post_scenarios = pod_scenarios.run(
kubeconfig_path, scenarios_list, config, failed_post_scenarios, wait_duration
)
elif scenario_type == "container_scenarios":
logging.info("Running container scenarios")
failed_post_scenarios = pod_scenarios.container_run(
kubeconfig_path, scenarios_list, config, failed_post_scenarios, wait_duration
)
# Inject node chaos scenarios specified in the config
elif scenario_type == "node_scenarios":
logging.info("Running node scenarios")
nodeaction.run(scenarios_list, config, wait_duration)
# Inject time skew chaos scenarios specified in the config
elif scenario_type == "time_scenarios":
logging.info("Running time skew scenarios")
time_actions.run(scenarios_list, config, wait_duration)
# Inject litmus based chaos scenarios
elif scenario_type == "litmus_scenarios":
logging.info("Running litmus scenarios")
litmus_namespace = "litmus"
if not litmus_installed:
# Will always uninstall first
common_litmus.delete_chaos(litmus_namespace)
common_litmus.delete_chaos_experiments(litmus_namespace)
common_litmus.uninstall_litmus(litmus_version, litmus_namespace)
common_litmus.install_litmus(litmus_version, litmus_namespace)
common_litmus.deploy_all_experiments(litmus_version, litmus_namespace)
litmus_installed = True
common_litmus.run(
scenarios_list, config, litmus_uninstall, wait_duration, litmus_namespace,
)
# Inject cluster shutdown scenarios
elif scenario_type == "cluster_shut_down_scenarios":
shut_down.run(scenarios_list, config, wait_duration)
# Inject namespace chaos scenarios
elif scenario_type == "namespace_scenarios":
logging.info("Running namespace scenarios")
namespace_actions.run(
scenarios_list, config, wait_duration, failed_post_scenarios, kubeconfig_path
)
# Inject zone failures
elif scenario_type == "zone_outages":
logging.info("Inject zone outages")
zone_outages.run(scenarios_list, config, wait_duration)
# Application outages
elif scenario_type == "application_outages":
logging.info("Injecting application outage")
application_outage.run(scenarios_list, config, wait_duration)
# PVC scenarios
elif scenario_type == "pvc_scenarios":
logging.info("Running PVC scenario")
pvc_scenario.run(scenarios_list, config)
iteration += 1
logging.info("")
# Capture the end time
end_time = int(time.time())
# Capture metrics for the run
if capture_metrics:
logging.info("Capturing metrics")
kube_burner.setup(kube_burner_url)
kube_burner.scrape_metrics(
distribution,
run_uuid,
prometheus_url,
prometheus_bearer_token,
start_time,
end_time,
config_path,
metrics_profile,
)
# Check for the alerts specified
if enable_alerts:
logging.info("Alerts checking is enabled")
kube_burner.setup(kube_burner_url)
if alert_profile:
kube_burner.alerts(
distribution, prometheus_url, prometheus_bearer_token, start_time, end_time, alert_profile,
)
else:
logging.error("Alert profile is not defined")
sys.exit(1)
if litmus_uninstall and litmus_installed:
common_litmus.delete_chaos(litmus_namespace)
common_litmus.delete_chaos_experiments(litmus_namespace)
common_litmus.uninstall_litmus(litmus_version, litmus_namespace)
if failed_post_scenarios:
logging.error("Post scenarios are still failing at the end of all iterations")
sys.exit(1)
run_dir = os.getcwd() + "/kraken.report"
logging.info(
"Successfully finished running Kraken. UUID for the run: %s. Report generated at %s. Exiting"
% (run_uuid, run_dir)
)
else:
logging.error("Cannot find a config at %s, please check" % (cfg))
sys.exit(1)
if __name__ == "__main__":
# Initialize the parser to read the config
parser = optparse.OptionParser()
parser.add_option(
"-c", "--config", dest="cfg", help="config location", default="config/config.yaml",
)
(options, args) = parser.parse_args()
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s [%(levelname)s] %(message)s",
handlers=[logging.FileHandler("kraken.report", mode="w"), logging.StreamHandler()],
)
if options.cfg is None:
logging.error("Please check if you have passed the config")
sys.exit(1)
else:
main(options.cfg)
|
the-stack_0_23576 | """
Pool distributes the tasks to the available processors using a FIFO
scheduling. It works like a map reduce architecture. It maps the input to the
different processors and collects the output from all the processors. After the
execution of code, it returns the output in form of a list or array. It waits
for all the tasks to finish and then returns the output. The processes in
execution are stored in memory and other non-executing processes are stored out
of memory.
Process puts all the processes in memory and schedules execution
using FIFO policy. When the process is suspended, it pre-empts and schedules
new process for execution.
"""
from multiprocessing import Pool
from multiprocessing import Process
import os
def f1(x): # for Pool
return x*x
def info(title):
print(title)
print('module name:', __name__)
print('parent process:', os.getppid())
print('process id:', os.getpid())
def f2(name): # for Process
info('function f2')
print('hello', name)
if __name__ == '__main__':
print("part 1 Pool")
with Pool(5) as p1:
print(p1.map(f1, [10, 11, 12]))
print()
print("part 2 Process")
info('main line')
p2 = Process(target=f2, args=('bob',))
p2.start()
p2.join()
"""
part 1 Pool
[100, 121, 144]
part 2 Process
main line
module name: __main__
parent process: 5
process id: 2706
function f2
module name: __main__
parent process: 2706
process id: 2715
hello bob
"""
|
the-stack_0_23579 | #!/usr/bin/env python3
from __future__ import print_function
import os
import pickle
import numpy as np
import torch
import torch.utils.data as data
from learn2learn.data.utils import download_file_from_google_drive, download_file
def download_pkl(google_drive_id, data_root, mode):
filename = 'mini-imagenet-cache-' + mode
file_path = os.path.join(data_root, filename)
if not os.path.exists(file_path + '.pkl'):
print('Downloading:', file_path + '.pkl')
download_file_from_google_drive(google_drive_id, file_path + '.pkl')
else:
print("Data was already downloaded")
def index_classes(items):
idx = {}
for i in items:
if (i not in idx):
idx[i] = len(idx)
return idx
class MiniImagenet(data.Dataset):
"""
[[Source]](https://github.com/learnables/learn2learn/blob/master/learn2learn/vision/datasets/mini_imagenet.py)
**Description**
The *mini*-ImageNet dataset was originally introduced by Vinyals et al., 2016.
It consists of 60'000 colour images of sizes 84x84 pixels.
The dataset is divided in 3 splits of 64 training, 16 validation, and 20 testing classes each containing 600 examples.
The classes are sampled from the ImageNet dataset, and we use the splits from Ravi & Larochelle, 2017.
**References**
1. Vinyals et al. 2016. “Matching Networks for One Shot Learning.” NeurIPS.
2. Ravi and Larochelle. 2017. “Optimization as a Model for Few-Shot Learning.” ICLR.
**Arguments**
* **root** (str) - Path to download the data.
* **mode** (str, *optional*, default='train') - Which split to use.
Must be 'train', 'validation', or 'test'.
* **transform** (Transform, *optional*, default=None) - Input pre-processing.
* **target_transform** (Transform, *optional*, default=None) - Target pre-processing.
* **download** (bool, *optional*, default=False) - Download the dataset if it's not available.
**Example**
~~~python
train_dataset = l2l.vision.datasets.MiniImagenet(root='./data', mode='train')
train_dataset = l2l.data.MetaDataset(train_dataset)
train_generator = l2l.data.TaskGenerator(dataset=train_dataset, ways=ways)
~~~
"""
def __init__(
self,
root,
mode='train',
transform=None,
target_transform=None,
download=False,
):
super(MiniImagenet, self).__init__()
self.root = os.path.expanduser(root)
if not os.path.exists(self.root):
os.mkdir(self.root)
self.transform = transform
self.target_transform = target_transform
self.mode = mode
self._bookkeeping_path = os.path.join(self.root, 'mini-imagenet-bookkeeping-' + mode + '.pkl')
if self.mode == 'test':
google_drive_file_id = '1wpmY-hmiJUUlRBkO9ZDCXAcIpHEFdOhD'
dropbox_file_link = 'https://www.dropbox.com/s/ye9jeb5tyz0x01b/mini-imagenet-cache-test.pkl?dl=1'
elif self.mode == 'train':
google_drive_file_id = '1I3itTXpXxGV68olxM5roceUMG8itH9Xj'
dropbox_file_link = 'https://www.dropbox.com/s/9g8c6w345s2ek03/mini-imagenet-cache-train.pkl?dl=1'
elif self.mode == 'validation':
google_drive_file_id = '1KY5e491bkLFqJDp0-UWou3463Mo8AOco'
dropbox_file_link = 'https://www.dropbox.com/s/ip1b7se3gij3r1b/mini-imagenet-cache-validation.pkl?dl=1'
else:
raise ValueError('Needs to be train, test or validation')
pickle_file = os.path.join(self.root, 'mini-imagenet-cache-' + mode + '.pkl')
try:
if not self._check_exists() and download:
print('Downloading mini-ImageNet --', mode)
download_pkl(google_drive_file_id, self.root, mode)
with open(pickle_file, 'rb') as f:
self.data = pickle.load(f)
except pickle.UnpicklingError:
if not self._check_exists() and download:
print('Download failed. Re-trying mini-ImageNet --', mode)
download_file(dropbox_file_link, pickle_file)
with open(pickle_file, 'rb') as f:
self.data = pickle.load(f)
self.x = torch.from_numpy(self.data["image_data"]).permute(0, 3, 1, 2).float()
self.y = np.ones(len(self.x))
# TODO Remove index_classes from here
self.class_idx = index_classes(self.data['class_dict'].keys())
for class_name, idxs in self.data['class_dict'].items():
for idx in idxs:
self.y[idx] = self.class_idx[class_name]
def __getitem__(self, idx):
data = self.x[idx]
if self.transform:
data = self.transform(data)
return data, self.y[idx]
def __len__(self):
return len(self.x)
def _check_exists(self):
return os.path.exists(os.path.join(self.root, 'mini-imagenet-cache-' + self.mode + '.pkl'))
if __name__ == '__main__':
mi = MiniImagenet(root='./data', download=True)
__import__('pdb').set_trace()
|
the-stack_0_23580 | # Copyright (C) 2003-2007 Robey Pointer <[email protected]>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
import socket
import sys
from paramiko.py3compat import u
# windows does not have termios...
try:
import termios
import tty
has_termios = True
except ImportError:
has_termios = False
def interactive_shell(chan):
if has_termios:
posix_shell(chan)
else:
windows_shell(chan)
def posix_shell(chan):
import select
oldtty = termios.tcgetattr(sys.stdin)
try:
tty.setraw(sys.stdin.fileno())
tty.setcbreak(sys.stdin.fileno())
chan.settimeout(0.0)
while True:
r, w, e = select.select([chan, sys.stdin], [], [])
if chan in r:
try:
x = u(chan.recv(1024))
if len(x) == 0:
sys.stdout.write("\r\n*** EOF\r\n")
break
sys.stdout.write(x)
sys.stdout.flush()
except socket.timeout:
pass
if sys.stdin in r:
x = sys.stdin.read(1)
if len(x) == 0:
break
chan.send(x)
finally:
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, oldtty)
# thanks to Mike Looijmans for this code
def windows_shell(chan):
import threading
sys.stdout.write(
"Line-buffered terminal emulation. Press F6 or ^Z to send EOF.\r\n\r\n"
)
def writeall(sock):
while True:
data = sock.recv(256)
if not data:
sys.stdout.write("\r\n*** EOF ***\r\n\r\n")
sys.stdout.flush()
break
sys.stdout.write(data)
sys.stdout.flush()
writer = threading.Thread(target=writeall, args=(chan,))
writer.start()
try:
while True:
d = sys.stdin.read(1)
if not d:
break
chan.send(d)
except EOFError:
# user hit ^Z or F6
pass
|
the-stack_0_23582 | from datetime import (
datetime,
time,
)
from functools import partial
import os
from pathlib import Path
from urllib.error import URLError
from zipfile import BadZipFile
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
)
import pandas._testing as tm
from pandas.tests.io.excel import xlrd_version
from pandas.util.version import Version
read_ext_params = [".xls", ".xlsx", ".xlsm", ".xlsb", ".ods"]
engine_params = [
# Add any engines to test here
# When defusedxml is installed it triggers deprecation warnings for
# xlrd and openpyxl, so catch those here
pytest.param(
"xlrd",
marks=[
td.skip_if_no("xlrd"),
],
),
pytest.param(
"openpyxl",
marks=[
td.skip_if_no("openpyxl"),
pytest.mark.filterwarnings("ignore:.*html argument"),
],
),
pytest.param(
None,
marks=[
td.skip_if_no("xlrd"),
],
),
pytest.param("pyxlsb", marks=td.skip_if_no("pyxlsb")),
pytest.param("odf", marks=td.skip_if_no("odf")),
]
def _is_valid_engine_ext_pair(engine, read_ext: str) -> bool:
"""
Filter out invalid (engine, ext) pairs instead of skipping, as that
produces 500+ pytest.skips.
"""
engine = engine.values[0]
if engine == "openpyxl" and read_ext == ".xls":
return False
if engine == "odf" and read_ext != ".ods":
return False
if read_ext == ".ods" and engine != "odf":
return False
if engine == "pyxlsb" and read_ext != ".xlsb":
return False
if read_ext == ".xlsb" and engine != "pyxlsb":
return False
if (
engine == "xlrd"
and xlrd_version is not None
and xlrd_version >= Version("2")
and read_ext != ".xls"
):
return False
return True
def _transfer_marks(engine, read_ext):
"""
engine gives us a pytest.param object with some marks, read_ext is just
a string. We need to generate a new pytest.param inheriting the marks.
"""
values = engine.values + (read_ext,)
new_param = pytest.param(values, marks=engine.marks)
return new_param
@pytest.fixture(
params=[
_transfer_marks(eng, ext)
for eng in engine_params
for ext in read_ext_params
if _is_valid_engine_ext_pair(eng, ext)
],
)
def engine_and_read_ext(request):
"""
Fixture for Excel reader engine and read_ext, only including valid pairs.
"""
return request.param
@pytest.fixture
def engine(engine_and_read_ext):
engine, read_ext = engine_and_read_ext
return engine
@pytest.fixture
def read_ext(engine_and_read_ext):
engine, read_ext = engine_and_read_ext
return read_ext
class TestReaders:
@pytest.fixture(autouse=True)
def cd_and_set_engine(self, engine, datapath, monkeypatch):
"""
Change directory and set engine for read_excel calls.
"""
func = partial(pd.read_excel, engine=engine)
monkeypatch.chdir(datapath("io", "data", "excel"))
monkeypatch.setattr(pd, "read_excel", func)
def test_engine_used(self, read_ext, engine, monkeypatch):
# GH 38884
def parser(self, *args, **kwargs):
return self.engine
monkeypatch.setattr(pd.ExcelFile, "parse", parser)
expected_defaults = {
"xlsx": "openpyxl",
"xlsm": "openpyxl",
"xlsb": "pyxlsb",
"xls": "xlrd",
"ods": "odf",
}
with open("test1" + read_ext, "rb") as f:
result = pd.read_excel(f)
if engine is not None:
expected = engine
else:
expected = expected_defaults[read_ext[1:]]
assert result == expected
def test_usecols_int(self, read_ext):
# usecols as int
msg = "Passing an integer for `usecols`"
with pytest.raises(ValueError, match=msg):
pd.read_excel(
"test1" + read_ext, sheet_name="Sheet1", index_col=0, usecols=3
)
# usecols as int
with pytest.raises(ValueError, match=msg):
pd.read_excel(
"test1" + read_ext,
sheet_name="Sheet2",
skiprows=[1],
index_col=0,
usecols=3,
)
def test_usecols_list(self, request, read_ext, df_ref):
if read_ext == ".xlsb":
request.node.add_marker(
pytest.mark.xfail(
reason="Sheets containing datetimes not supported by pyxlsb"
)
)
df_ref = df_ref.reindex(columns=["B", "C"])
df1 = pd.read_excel(
"test1" + read_ext, sheet_name="Sheet1", index_col=0, usecols=[0, 2, 3]
)
df2 = pd.read_excel(
"test1" + read_ext,
sheet_name="Sheet2",
skiprows=[1],
index_col=0,
usecols=[0, 2, 3],
)
# TODO add index to xls file)
tm.assert_frame_equal(df1, df_ref, check_names=False)
tm.assert_frame_equal(df2, df_ref, check_names=False)
def test_usecols_str(self, request, read_ext, df_ref):
if read_ext == ".xlsb":
request.node.add_marker(
pytest.mark.xfail(
reason="Sheets containing datetimes not supported by pyxlsb"
)
)
df1 = df_ref.reindex(columns=["A", "B", "C"])
df2 = pd.read_excel(
"test1" + read_ext, sheet_name="Sheet1", index_col=0, usecols="A:D"
)
df3 = pd.read_excel(
"test1" + read_ext,
sheet_name="Sheet2",
skiprows=[1],
index_col=0,
usecols="A:D",
)
# TODO add index to xls, read xls ignores index name ?
tm.assert_frame_equal(df2, df1, check_names=False)
tm.assert_frame_equal(df3, df1, check_names=False)
df1 = df_ref.reindex(columns=["B", "C"])
df2 = pd.read_excel(
"test1" + read_ext, sheet_name="Sheet1", index_col=0, usecols="A,C,D"
)
df3 = pd.read_excel(
"test1" + read_ext,
sheet_name="Sheet2",
skiprows=[1],
index_col=0,
usecols="A,C,D",
)
# TODO add index to xls file
tm.assert_frame_equal(df2, df1, check_names=False)
tm.assert_frame_equal(df3, df1, check_names=False)
df1 = df_ref.reindex(columns=["B", "C"])
df2 = pd.read_excel(
"test1" + read_ext, sheet_name="Sheet1", index_col=0, usecols="A,C:D"
)
df3 = pd.read_excel(
"test1" + read_ext,
sheet_name="Sheet2",
skiprows=[1],
index_col=0,
usecols="A,C:D",
)
tm.assert_frame_equal(df2, df1, check_names=False)
tm.assert_frame_equal(df3, df1, check_names=False)
@pytest.mark.parametrize(
"usecols", [[0, 1, 3], [0, 3, 1], [1, 0, 3], [1, 3, 0], [3, 0, 1], [3, 1, 0]]
)
def test_usecols_diff_positional_int_columns_order(
self, request, read_ext, usecols, df_ref
):
if read_ext == ".xlsb":
request.node.add_marker(
pytest.mark.xfail(
reason="Sheets containing datetimes not supported by pyxlsb"
)
)
expected = df_ref[["A", "C"]]
result = pd.read_excel(
"test1" + read_ext, sheet_name="Sheet1", index_col=0, usecols=usecols
)
tm.assert_frame_equal(result, expected, check_names=False)
@pytest.mark.parametrize("usecols", [["B", "D"], ["D", "B"]])
def test_usecols_diff_positional_str_columns_order(self, read_ext, usecols, df_ref):
expected = df_ref[["B", "D"]]
expected.index = range(len(expected))
result = pd.read_excel("test1" + read_ext, sheet_name="Sheet1", usecols=usecols)
tm.assert_frame_equal(result, expected, check_names=False)
def test_read_excel_without_slicing(self, request, read_ext, df_ref):
if read_ext == ".xlsb":
request.node.add_marker(
pytest.mark.xfail(
reason="Sheets containing datetimes not supported by pyxlsb"
)
)
expected = df_ref
result = pd.read_excel("test1" + read_ext, sheet_name="Sheet1", index_col=0)
tm.assert_frame_equal(result, expected, check_names=False)
def test_usecols_excel_range_str(self, request, read_ext, df_ref):
if read_ext == ".xlsb":
request.node.add_marker(
pytest.mark.xfail(
reason="Sheets containing datetimes not supported by pyxlsb"
)
)
expected = df_ref[["C", "D"]]
result = pd.read_excel(
"test1" + read_ext, sheet_name="Sheet1", index_col=0, usecols="A,D:E"
)
tm.assert_frame_equal(result, expected, check_names=False)
def test_usecols_excel_range_str_invalid(self, read_ext):
msg = "Invalid column name: E1"
with pytest.raises(ValueError, match=msg):
pd.read_excel("test1" + read_ext, sheet_name="Sheet1", usecols="D:E1")
def test_index_col_label_error(self, read_ext):
msg = "list indices must be integers.*, not str"
with pytest.raises(TypeError, match=msg):
pd.read_excel(
"test1" + read_ext,
sheet_name="Sheet1",
index_col=["A"],
usecols=["A", "C"],
)
def test_index_col_empty(self, read_ext):
# see gh-9208
result = pd.read_excel(
"test1" + read_ext, sheet_name="Sheet3", index_col=["A", "B", "C"]
)
expected = DataFrame(
columns=["D", "E", "F"],
index=MultiIndex(levels=[[]] * 3, codes=[[]] * 3, names=["A", "B", "C"]),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("index_col", [None, 2])
def test_index_col_with_unnamed(self, read_ext, index_col):
# see gh-18792
result = pd.read_excel(
"test1" + read_ext, sheet_name="Sheet4", index_col=index_col
)
expected = DataFrame(
[["i1", "a", "x"], ["i2", "b", "y"]], columns=["Unnamed: 0", "col1", "col2"]
)
if index_col:
expected = expected.set_index(expected.columns[index_col])
tm.assert_frame_equal(result, expected)
def test_usecols_pass_non_existent_column(self, read_ext):
msg = (
"Usecols do not match columns, "
"columns expected but not found: " + r"\['E'\]"
)
with pytest.raises(ValueError, match=msg):
pd.read_excel("test1" + read_ext, usecols=["E"])
def test_usecols_wrong_type(self, read_ext):
msg = (
"'usecols' must either be list-like of "
"all strings, all unicode, all integers or a callable."
)
with pytest.raises(ValueError, match=msg):
pd.read_excel("test1" + read_ext, usecols=["E1", 0])
def test_excel_stop_iterator(self, read_ext):
parsed = pd.read_excel("test2" + read_ext, sheet_name="Sheet1")
expected = DataFrame([["aaaa", "bbbbb"]], columns=["Test", "Test1"])
tm.assert_frame_equal(parsed, expected)
def test_excel_cell_error_na(self, request, read_ext):
if read_ext == ".xlsb":
request.node.add_marker(
pytest.mark.xfail(
reason="Sheets containing datetimes not supported by pyxlsb"
)
)
parsed = pd.read_excel("test3" + read_ext, sheet_name="Sheet1")
expected = DataFrame([[np.nan]], columns=["Test"])
tm.assert_frame_equal(parsed, expected)
def test_excel_table(self, request, read_ext, df_ref):
if read_ext == ".xlsb":
request.node.add_marker(
pytest.mark.xfail(
reason="Sheets containing datetimes not supported by pyxlsb"
)
)
df1 = pd.read_excel("test1" + read_ext, sheet_name="Sheet1", index_col=0)
df2 = pd.read_excel(
"test1" + read_ext, sheet_name="Sheet2", skiprows=[1], index_col=0
)
# TODO add index to file
tm.assert_frame_equal(df1, df_ref, check_names=False)
tm.assert_frame_equal(df2, df_ref, check_names=False)
df3 = pd.read_excel(
"test1" + read_ext, sheet_name="Sheet1", index_col=0, skipfooter=1
)
tm.assert_frame_equal(df3, df1.iloc[:-1])
def test_reader_special_dtypes(self, request, read_ext):
if read_ext == ".xlsb":
request.node.add_marker(
pytest.mark.xfail(
reason="Sheets containing datetimes not supported by pyxlsb"
)
)
expected = DataFrame.from_dict(
{
"IntCol": [1, 2, -3, 4, 0],
"FloatCol": [1.25, 2.25, 1.83, 1.92, 0.0000000005],
"BoolCol": [True, False, True, True, False],
"StrCol": [1, 2, 3, 4, 5],
# GH5394 - this is why convert_float isn't vectorized
"Str2Col": ["a", 3, "c", "d", "e"],
"DateCol": [
datetime(2013, 10, 30),
datetime(2013, 10, 31),
datetime(1905, 1, 1),
datetime(2013, 12, 14),
datetime(2015, 3, 14),
],
},
)
basename = "test_types"
# should read in correctly and infer types
actual = pd.read_excel(basename + read_ext, sheet_name="Sheet1")
tm.assert_frame_equal(actual, expected)
# if not coercing number, then int comes in as float
float_expected = expected.copy()
float_expected["IntCol"] = float_expected["IntCol"].astype(float)
float_expected.loc[float_expected.index[1], "Str2Col"] = 3.0
with tm.assert_produces_warning(
FutureWarning,
match="convert_float is deprecated",
raise_on_extra_warnings=False,
):
# raise_on_extra_warnings because xlrd raises a PendingDeprecationWarning
# on database job Linux_py37_IO (ci/deps/actions-37-db.yaml)
# See GH#41176
actual = pd.read_excel(
basename + read_ext, sheet_name="Sheet1", convert_float=False
)
tm.assert_frame_equal(actual, float_expected)
# check setting Index (assuming xls and xlsx are the same here)
for icol, name in enumerate(expected.columns):
actual = pd.read_excel(
basename + read_ext, sheet_name="Sheet1", index_col=icol
)
exp = expected.set_index(name)
tm.assert_frame_equal(actual, exp)
# convert_float and converters should be different but both accepted
expected["StrCol"] = expected["StrCol"].apply(str)
actual = pd.read_excel(
basename + read_ext, sheet_name="Sheet1", converters={"StrCol": str}
)
tm.assert_frame_equal(actual, expected)
no_convert_float = float_expected.copy()
no_convert_float["StrCol"] = no_convert_float["StrCol"].apply(str)
with tm.assert_produces_warning(
FutureWarning,
match="convert_float is deprecated",
raise_on_extra_warnings=False,
):
# raise_on_extra_warnings because xlrd raises a PendingDeprecationWarning
# on database job Linux_py37_IO (ci/deps/actions-37-db.yaml)
# See GH#41176
actual = pd.read_excel(
basename + read_ext,
sheet_name="Sheet1",
convert_float=False,
converters={"StrCol": str},
)
tm.assert_frame_equal(actual, no_convert_float)
# GH8212 - support for converters and missing values
def test_reader_converters(self, read_ext):
basename = "test_converters"
expected = DataFrame.from_dict(
{
"IntCol": [1, 2, -3, -1000, 0],
"FloatCol": [12.5, np.nan, 18.3, 19.2, 0.000000005],
"BoolCol": ["Found", "Found", "Found", "Not found", "Found"],
"StrCol": ["1", np.nan, "3", "4", "5"],
}
)
converters = {
"IntCol": lambda x: int(x) if x != "" else -1000,
"FloatCol": lambda x: 10 * x if x else np.nan,
2: lambda x: "Found" if x != "" else "Not found",
3: lambda x: str(x) if x else "",
}
# should read in correctly and set types of single cells (not array
# dtypes)
actual = pd.read_excel(
basename + read_ext, sheet_name="Sheet1", converters=converters
)
tm.assert_frame_equal(actual, expected)
def test_reader_dtype(self, read_ext):
# GH 8212
basename = "testdtype"
actual = pd.read_excel(basename + read_ext)
expected = DataFrame(
{
"a": [1, 2, 3, 4],
"b": [2.5, 3.5, 4.5, 5.5],
"c": [1, 2, 3, 4],
"d": [1.0, 2.0, np.nan, 4.0],
}
).reindex(columns=["a", "b", "c", "d"])
tm.assert_frame_equal(actual, expected)
actual = pd.read_excel(
basename + read_ext, dtype={"a": "float64", "b": "float32", "c": str}
)
expected["a"] = expected["a"].astype("float64")
expected["b"] = expected["b"].astype("float32")
expected["c"] = ["001", "002", "003", "004"]
tm.assert_frame_equal(actual, expected)
msg = "Unable to convert column d to type int64"
with pytest.raises(ValueError, match=msg):
pd.read_excel(basename + read_ext, dtype={"d": "int64"})
@pytest.mark.parametrize(
"dtype,expected",
[
(
None,
DataFrame(
{
"a": [1, 2, 3, 4],
"b": [2.5, 3.5, 4.5, 5.5],
"c": [1, 2, 3, 4],
"d": [1.0, 2.0, np.nan, 4.0],
}
),
),
(
{"a": "float64", "b": "float32", "c": str, "d": str},
DataFrame(
{
"a": Series([1, 2, 3, 4], dtype="float64"),
"b": Series([2.5, 3.5, 4.5, 5.5], dtype="float32"),
"c": ["001", "002", "003", "004"],
"d": ["1", "2", np.nan, "4"],
}
),
),
],
)
def test_reader_dtype_str(self, read_ext, dtype, expected):
# see gh-20377
basename = "testdtype"
actual = pd.read_excel(basename + read_ext, dtype=dtype)
tm.assert_frame_equal(actual, expected)
@pytest.mark.parametrize("dtypes, exp_value", [({}, "1"), ({"a.1": "int64"}, 1)])
def test_dtype_mangle_dup_cols(self, read_ext, dtypes, exp_value):
# GH#35211
basename = "df_mangle_dup_col_dtypes"
dtype_dict = {"a": str, **dtypes}
dtype_dict_copy = dtype_dict.copy()
# GH#42462
result = pd.read_excel(basename + read_ext, dtype=dtype_dict)
expected = DataFrame({"a": ["1"], "a.1": [exp_value]})
assert dtype_dict == dtype_dict_copy, "dtype dict changed"
tm.assert_frame_equal(result, expected)
def test_reader_spaces(self, read_ext):
# see gh-32207
basename = "test_spaces"
actual = pd.read_excel(basename + read_ext)
expected = DataFrame(
{
"testcol": [
"this is great",
"4 spaces",
"1 trailing ",
" 1 leading",
"2 spaces multiple times",
]
}
)
tm.assert_frame_equal(actual, expected)
# gh-36122, gh-35802
@pytest.mark.parametrize(
"basename,expected",
[
("gh-35802", DataFrame({"COLUMN": ["Test (1)"]})),
("gh-36122", DataFrame(columns=["got 2nd sa"])),
],
)
def test_read_excel_ods_nested_xml(self, engine, read_ext, basename, expected):
# see gh-35802
if engine != "odf":
pytest.skip(f"Skipped for engine: {engine}")
actual = pd.read_excel(basename + read_ext)
tm.assert_frame_equal(actual, expected)
def test_reading_all_sheets(self, read_ext):
# Test reading all sheet names by setting sheet_name to None,
# Ensure a dict is returned.
# See PR #9450
basename = "test_multisheet"
dfs = pd.read_excel(basename + read_ext, sheet_name=None)
# ensure this is not alphabetical to test order preservation
expected_keys = ["Charlie", "Alpha", "Beta"]
tm.assert_contains_all(expected_keys, dfs.keys())
# Issue 9930
# Ensure sheet order is preserved
assert expected_keys == list(dfs.keys())
def test_reading_multiple_specific_sheets(self, read_ext):
# Test reading specific sheet names by specifying a mixed list
# of integers and strings, and confirm that duplicated sheet
# references (positions/names) are removed properly.
# Ensure a dict is returned
# See PR #9450
basename = "test_multisheet"
# Explicitly request duplicates. Only the set should be returned.
expected_keys = [2, "Charlie", "Charlie"]
dfs = pd.read_excel(basename + read_ext, sheet_name=expected_keys)
expected_keys = list(set(expected_keys))
tm.assert_contains_all(expected_keys, dfs.keys())
assert len(expected_keys) == len(dfs.keys())
def test_reading_all_sheets_with_blank(self, read_ext):
# Test reading all sheet names by setting sheet_name to None,
# In the case where some sheets are blank.
# Issue #11711
basename = "blank_with_header"
dfs = pd.read_excel(basename + read_ext, sheet_name=None)
expected_keys = ["Sheet1", "Sheet2", "Sheet3"]
tm.assert_contains_all(expected_keys, dfs.keys())
# GH6403
def test_read_excel_blank(self, read_ext):
actual = pd.read_excel("blank" + read_ext, sheet_name="Sheet1")
tm.assert_frame_equal(actual, DataFrame())
def test_read_excel_blank_with_header(self, read_ext):
expected = DataFrame(columns=["col_1", "col_2"])
actual = pd.read_excel("blank_with_header" + read_ext, sheet_name="Sheet1")
tm.assert_frame_equal(actual, expected)
def test_date_conversion_overflow(self, request, engine, read_ext):
# GH 10001 : pandas.ExcelFile ignore parse_dates=False
if engine == "pyxlsb":
request.node.add_marker(
pytest.mark.xfail(
reason="Sheets containing datetimes not supported by pyxlsb"
)
)
expected = DataFrame(
[
[pd.Timestamp("2016-03-12"), "Marc Johnson"],
[pd.Timestamp("2016-03-16"), "Jack Black"],
[1e20, "Timothy Brown"],
],
columns=["DateColWithBigInt", "StringCol"],
)
if engine == "openpyxl":
request.node.add_marker(
pytest.mark.xfail(reason="Maybe not supported by openpyxl")
)
if engine is None and read_ext in (".xlsx", ".xlsm"):
# GH 35029
request.node.add_marker(
pytest.mark.xfail(reason="Defaults to openpyxl, maybe not supported")
)
result = pd.read_excel("testdateoverflow" + read_ext)
tm.assert_frame_equal(result, expected)
def test_sheet_name(self, request, read_ext, df_ref):
if read_ext == ".xlsb":
request.node.add_marker(
pytest.mark.xfail(
reason="Sheets containing datetimes not supported by pyxlsb"
)
)
filename = "test1"
sheet_name = "Sheet1"
df1 = pd.read_excel(
filename + read_ext, sheet_name=sheet_name, index_col=0
) # doc
df2 = pd.read_excel(filename + read_ext, index_col=0, sheet_name=sheet_name)
tm.assert_frame_equal(df1, df_ref, check_names=False)
tm.assert_frame_equal(df2, df_ref, check_names=False)
def test_excel_read_buffer(self, read_ext):
pth = "test1" + read_ext
expected = pd.read_excel(pth, sheet_name="Sheet1", index_col=0)
with open(pth, "rb") as f:
actual = pd.read_excel(f, sheet_name="Sheet1", index_col=0)
tm.assert_frame_equal(expected, actual)
def test_bad_engine_raises(self):
bad_engine = "foo"
with pytest.raises(ValueError, match="Unknown engine: foo"):
pd.read_excel("", engine=bad_engine)
@pytest.mark.parametrize(
"sheet_name",
[3, [0, 3], [3, 0], "Sheet4", ["Sheet1", "Sheet4"], ["Sheet4", "Sheet1"]],
)
def test_bad_sheetname_raises(self, read_ext, sheet_name):
# GH 39250
msg = "Worksheet index 3 is invalid|Worksheet named 'Sheet4' not found"
with pytest.raises(ValueError, match=msg):
pd.read_excel("blank" + read_ext, sheet_name=sheet_name)
def test_missing_file_raises(self, read_ext):
bad_file = f"foo{read_ext}"
# CI tests with other languages, translates to "No such file or directory"
match = r"(No such file or directory|没有那个文件或目录|File o directory non esistente)"
with pytest.raises(FileNotFoundError, match=match):
pd.read_excel(bad_file)
def test_corrupt_bytes_raises(self, engine):
bad_stream = b"foo"
if engine is None:
error = ValueError
msg = (
"Excel file format cannot be determined, you must "
"specify an engine manually."
)
elif engine == "xlrd":
from xlrd import XLRDError
error = XLRDError
msg = (
"Unsupported format, or corrupt file: Expected BOF "
"record; found b'foo'"
)
else:
error = BadZipFile
msg = "File is not a zip file"
with pytest.raises(error, match=msg):
pd.read_excel(bad_stream)
@tm.network
def test_read_from_http_url(self, read_ext):
url = (
"https://raw.githubusercontent.com/pandas-dev/pandas/main/"
"pandas/tests/io/data/excel/test1" + read_ext
)
url_table = pd.read_excel(url)
local_table = pd.read_excel("test1" + read_ext)
tm.assert_frame_equal(url_table, local_table)
@td.skip_if_not_us_locale
def test_read_from_s3_url(self, read_ext, s3_resource, s3so):
# Bucket "pandas-test" created in tests/io/conftest.py
with open("test1" + read_ext, "rb") as f:
s3_resource.Bucket("pandas-test").put_object(Key="test1" + read_ext, Body=f)
url = "s3://pandas-test/test1" + read_ext
url_table = pd.read_excel(url, storage_options=s3so)
local_table = pd.read_excel("test1" + read_ext)
tm.assert_frame_equal(url_table, local_table)
def test_read_from_s3_object(self, read_ext, s3_resource, s3so):
# GH 38788
# Bucket "pandas-test" created in tests/io/conftest.py
with open("test1" + read_ext, "rb") as f:
s3_resource.Bucket("pandas-test").put_object(Key="test1" + read_ext, Body=f)
import s3fs
s3 = s3fs.S3FileSystem(**s3so)
with s3.open("s3://pandas-test/test1" + read_ext) as f:
url_table = pd.read_excel(f)
local_table = pd.read_excel("test1" + read_ext)
tm.assert_frame_equal(url_table, local_table)
@pytest.mark.slow
def test_read_from_file_url(self, read_ext, datapath):
# FILE
localtable = os.path.join(datapath("io", "data", "excel"), "test1" + read_ext)
local_table = pd.read_excel(localtable)
try:
url_table = pd.read_excel("file://localhost/" + localtable)
except URLError:
# fails on some systems
import platform
platform_info = " ".join(platform.uname()).strip()
pytest.skip(f"failing on {platform_info}")
tm.assert_frame_equal(url_table, local_table)
def test_read_from_pathlib_path(self, read_ext):
# GH12655
from pathlib import Path
str_path = "test1" + read_ext
expected = pd.read_excel(str_path, sheet_name="Sheet1", index_col=0)
path_obj = Path("test1" + read_ext)
actual = pd.read_excel(path_obj, sheet_name="Sheet1", index_col=0)
tm.assert_frame_equal(expected, actual)
@td.skip_if_no("py.path")
@td.check_file_leaks
def test_read_from_py_localpath(self, read_ext):
# GH12655
from py.path import local as LocalPath
str_path = os.path.join("test1" + read_ext)
expected = pd.read_excel(str_path, sheet_name="Sheet1", index_col=0)
path_obj = LocalPath().join("test1" + read_ext)
actual = pd.read_excel(path_obj, sheet_name="Sheet1", index_col=0)
tm.assert_frame_equal(expected, actual)
@td.check_file_leaks
def test_close_from_py_localpath(self, read_ext):
# GH31467
str_path = os.path.join("test1" + read_ext)
with open(str_path, "rb") as f:
x = pd.read_excel(f, sheet_name="Sheet1", index_col=0)
del x
# should not throw an exception because the passed file was closed
f.read()
def test_reader_seconds(self, request, engine, read_ext):
if engine == "pyxlsb":
request.node.add_marker(
pytest.mark.xfail(
reason="Sheets containing datetimes not supported by pyxlsb"
)
)
# Test reading times with and without milliseconds. GH5945.
expected = DataFrame.from_dict(
{
"Time": [
time(1, 2, 3),
time(2, 45, 56, 100000),
time(4, 29, 49, 200000),
time(6, 13, 42, 300000),
time(7, 57, 35, 400000),
time(9, 41, 28, 500000),
time(11, 25, 21, 600000),
time(13, 9, 14, 700000),
time(14, 53, 7, 800000),
time(16, 37, 0, 900000),
time(18, 20, 54),
]
}
)
actual = pd.read_excel("times_1900" + read_ext, sheet_name="Sheet1")
tm.assert_frame_equal(actual, expected)
actual = pd.read_excel("times_1904" + read_ext, sheet_name="Sheet1")
tm.assert_frame_equal(actual, expected)
def test_read_excel_multiindex(self, request, read_ext):
# see gh-4679
if read_ext == ".xlsb":
request.node.add_marker(
pytest.mark.xfail(
reason="Sheets containing datetimes not supported by pyxlsb"
)
)
mi = MultiIndex.from_product([["foo", "bar"], ["a", "b"]])
mi_file = "testmultiindex" + read_ext
# "mi_column" sheet
expected = DataFrame(
[
[1, 2.5, pd.Timestamp("2015-01-01"), True],
[2, 3.5, pd.Timestamp("2015-01-02"), False],
[3, 4.5, pd.Timestamp("2015-01-03"), False],
[4, 5.5, pd.Timestamp("2015-01-04"), True],
],
columns=mi,
)
actual = pd.read_excel(
mi_file, sheet_name="mi_column", header=[0, 1], index_col=0
)
tm.assert_frame_equal(actual, expected)
# "mi_index" sheet
expected.index = mi
expected.columns = ["a", "b", "c", "d"]
actual = pd.read_excel(mi_file, sheet_name="mi_index", index_col=[0, 1])
tm.assert_frame_equal(actual, expected, check_names=False)
# "both" sheet
expected.columns = mi
actual = pd.read_excel(
mi_file, sheet_name="both", index_col=[0, 1], header=[0, 1]
)
tm.assert_frame_equal(actual, expected, check_names=False)
# "mi_index_name" sheet
expected.columns = ["a", "b", "c", "d"]
expected.index = mi.set_names(["ilvl1", "ilvl2"])
actual = pd.read_excel(mi_file, sheet_name="mi_index_name", index_col=[0, 1])
tm.assert_frame_equal(actual, expected)
# "mi_column_name" sheet
expected.index = list(range(4))
expected.columns = mi.set_names(["c1", "c2"])
actual = pd.read_excel(
mi_file, sheet_name="mi_column_name", header=[0, 1], index_col=0
)
tm.assert_frame_equal(actual, expected)
# see gh-11317
# "name_with_int" sheet
expected.columns = mi.set_levels([1, 2], level=1).set_names(["c1", "c2"])
actual = pd.read_excel(
mi_file, sheet_name="name_with_int", index_col=0, header=[0, 1]
)
tm.assert_frame_equal(actual, expected)
# "both_name" sheet
expected.columns = mi.set_names(["c1", "c2"])
expected.index = mi.set_names(["ilvl1", "ilvl2"])
actual = pd.read_excel(
mi_file, sheet_name="both_name", index_col=[0, 1], header=[0, 1]
)
tm.assert_frame_equal(actual, expected)
# "both_skiprows" sheet
actual = pd.read_excel(
mi_file,
sheet_name="both_name_skiprows",
index_col=[0, 1],
header=[0, 1],
skiprows=2,
)
tm.assert_frame_equal(actual, expected)
@pytest.mark.parametrize(
"sheet_name,idx_lvl2",
[
("both_name_blank_after_mi_name", [np.nan, "b", "a", "b"]),
("both_name_multiple_blanks", [np.nan] * 4),
],
)
def test_read_excel_multiindex_blank_after_name(
self, request, read_ext, sheet_name, idx_lvl2
):
# GH34673
if read_ext == ".xlsb":
request.node.add_marker(
pytest.mark.xfail(
reason="Sheets containing datetimes not supported by pyxlsb (GH4679"
)
)
mi_file = "testmultiindex" + read_ext
mi = MultiIndex.from_product([["foo", "bar"], ["a", "b"]], names=["c1", "c2"])
expected = DataFrame(
[
[1, 2.5, pd.Timestamp("2015-01-01"), True],
[2, 3.5, pd.Timestamp("2015-01-02"), False],
[3, 4.5, pd.Timestamp("2015-01-03"), False],
[4, 5.5, pd.Timestamp("2015-01-04"), True],
],
columns=mi,
index=MultiIndex.from_arrays(
(["foo", "foo", "bar", "bar"], idx_lvl2),
names=["ilvl1", "ilvl2"],
),
)
result = pd.read_excel(
mi_file,
sheet_name=sheet_name,
index_col=[0, 1],
header=[0, 1],
)
tm.assert_frame_equal(result, expected)
def test_read_excel_multiindex_header_only(self, read_ext):
# see gh-11733.
#
# Don't try to parse a header name if there isn't one.
mi_file = "testmultiindex" + read_ext
result = pd.read_excel(mi_file, sheet_name="index_col_none", header=[0, 1])
exp_columns = MultiIndex.from_product([("A", "B"), ("key", "val")])
expected = DataFrame([[1, 2, 3, 4]] * 2, columns=exp_columns)
tm.assert_frame_equal(result, expected)
def test_excel_old_index_format(self, read_ext):
# see gh-4679
filename = "test_index_name_pre17" + read_ext
# We detect headers to determine if index names exist, so
# that "index" name in the "names" version of the data will
# now be interpreted as rows that include null data.
data = np.array(
[
[None, None, None, None, None],
["R0C0", "R0C1", "R0C2", "R0C3", "R0C4"],
["R1C0", "R1C1", "R1C2", "R1C3", "R1C4"],
["R2C0", "R2C1", "R2C2", "R2C3", "R2C4"],
["R3C0", "R3C1", "R3C2", "R3C3", "R3C4"],
["R4C0", "R4C1", "R4C2", "R4C3", "R4C4"],
]
)
columns = ["C_l0_g0", "C_l0_g1", "C_l0_g2", "C_l0_g3", "C_l0_g4"]
mi = MultiIndex(
levels=[
["R0", "R_l0_g0", "R_l0_g1", "R_l0_g2", "R_l0_g3", "R_l0_g4"],
["R1", "R_l1_g0", "R_l1_g1", "R_l1_g2", "R_l1_g3", "R_l1_g4"],
],
codes=[[0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]],
names=[None, None],
)
si = Index(
["R0", "R_l0_g0", "R_l0_g1", "R_l0_g2", "R_l0_g3", "R_l0_g4"], name=None
)
expected = DataFrame(data, index=si, columns=columns)
actual = pd.read_excel(filename, sheet_name="single_names", index_col=0)
tm.assert_frame_equal(actual, expected)
expected.index = mi
actual = pd.read_excel(filename, sheet_name="multi_names", index_col=[0, 1])
tm.assert_frame_equal(actual, expected)
# The analogous versions of the "names" version data
# where there are explicitly no names for the indices.
data = np.array(
[
["R0C0", "R0C1", "R0C2", "R0C3", "R0C4"],
["R1C0", "R1C1", "R1C2", "R1C3", "R1C4"],
["R2C0", "R2C1", "R2C2", "R2C3", "R2C4"],
["R3C0", "R3C1", "R3C2", "R3C3", "R3C4"],
["R4C0", "R4C1", "R4C2", "R4C3", "R4C4"],
]
)
columns = ["C_l0_g0", "C_l0_g1", "C_l0_g2", "C_l0_g3", "C_l0_g4"]
mi = MultiIndex(
levels=[
["R_l0_g0", "R_l0_g1", "R_l0_g2", "R_l0_g3", "R_l0_g4"],
["R_l1_g0", "R_l1_g1", "R_l1_g2", "R_l1_g3", "R_l1_g4"],
],
codes=[[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]],
names=[None, None],
)
si = Index(["R_l0_g0", "R_l0_g1", "R_l0_g2", "R_l0_g3", "R_l0_g4"], name=None)
expected = DataFrame(data, index=si, columns=columns)
actual = pd.read_excel(filename, sheet_name="single_no_names", index_col=0)
tm.assert_frame_equal(actual, expected)
expected.index = mi
actual = pd.read_excel(filename, sheet_name="multi_no_names", index_col=[0, 1])
tm.assert_frame_equal(actual, expected, check_names=False)
def test_read_excel_bool_header_arg(self, read_ext):
# GH 6114
msg = "Passing a bool to header is invalid"
for arg in [True, False]:
with pytest.raises(TypeError, match=msg):
pd.read_excel("test1" + read_ext, header=arg)
def test_read_excel_skiprows(self, request, read_ext):
# GH 4903
if read_ext == ".xlsb":
request.node.add_marker(
pytest.mark.xfail(
reason="Sheets containing datetimes not supported by pyxlsb"
)
)
actual = pd.read_excel(
"testskiprows" + read_ext, sheet_name="skiprows_list", skiprows=[0, 2]
)
expected = DataFrame(
[
[1, 2.5, pd.Timestamp("2015-01-01"), True],
[2, 3.5, pd.Timestamp("2015-01-02"), False],
[3, 4.5, pd.Timestamp("2015-01-03"), False],
[4, 5.5, pd.Timestamp("2015-01-04"), True],
],
columns=["a", "b", "c", "d"],
)
tm.assert_frame_equal(actual, expected)
actual = pd.read_excel(
"testskiprows" + read_ext,
sheet_name="skiprows_list",
skiprows=np.array([0, 2]),
)
tm.assert_frame_equal(actual, expected)
# GH36435
actual = pd.read_excel(
"testskiprows" + read_ext,
sheet_name="skiprows_list",
skiprows=lambda x: x in [0, 2],
)
tm.assert_frame_equal(actual, expected)
actual = pd.read_excel(
"testskiprows" + read_ext,
sheet_name="skiprows_list",
skiprows=3,
names=["a", "b", "c", "d"],
)
expected = DataFrame(
[
# [1, 2.5, pd.Timestamp("2015-01-01"), True],
[2, 3.5, pd.Timestamp("2015-01-02"), False],
[3, 4.5, pd.Timestamp("2015-01-03"), False],
[4, 5.5, pd.Timestamp("2015-01-04"), True],
],
columns=["a", "b", "c", "d"],
)
tm.assert_frame_equal(actual, expected)
def test_read_excel_nrows(self, read_ext):
# GH 16645
num_rows_to_pull = 5
actual = pd.read_excel("test1" + read_ext, nrows=num_rows_to_pull)
expected = pd.read_excel("test1" + read_ext)
expected = expected[:num_rows_to_pull]
tm.assert_frame_equal(actual, expected)
def test_read_excel_nrows_greater_than_nrows_in_file(self, read_ext):
# GH 16645
expected = pd.read_excel("test1" + read_ext)
num_records_in_file = len(expected)
num_rows_to_pull = num_records_in_file + 10
actual = pd.read_excel("test1" + read_ext, nrows=num_rows_to_pull)
tm.assert_frame_equal(actual, expected)
def test_read_excel_nrows_non_integer_parameter(self, read_ext):
# GH 16645
msg = "'nrows' must be an integer >=0"
with pytest.raises(ValueError, match=msg):
pd.read_excel("test1" + read_ext, nrows="5")
def test_read_excel_squeeze(self, read_ext):
# GH 12157
f = "test_squeeze" + read_ext
with tm.assert_produces_warning(
FutureWarning,
match="The squeeze argument has been deprecated "
"and will be removed in a future version. "
'Append .squeeze\\("columns"\\) to the call to squeeze.\n\n',
):
actual = pd.read_excel(
f, sheet_name="two_columns", index_col=0, squeeze=True
)
expected = Series([2, 3, 4], [4, 5, 6], name="b")
expected.index.name = "a"
tm.assert_series_equal(actual, expected)
actual = pd.read_excel(f, sheet_name="two_columns", squeeze=True)
expected = DataFrame({"a": [4, 5, 6], "b": [2, 3, 4]})
tm.assert_frame_equal(actual, expected)
actual = pd.read_excel(f, sheet_name="one_column", squeeze=True)
expected = Series([1, 2, 3], name="a")
tm.assert_series_equal(actual, expected)
def test_deprecated_kwargs(self, read_ext):
with tm.assert_produces_warning(FutureWarning, raise_on_extra_warnings=False):
pd.read_excel("test1" + read_ext, "Sheet1", 0)
pd.read_excel("test1" + read_ext)
def test_no_header_with_list_index_col(self, read_ext):
# GH 31783
file_name = "testmultiindex" + read_ext
data = [("B", "B"), ("key", "val"), (3, 4), (3, 4)]
idx = MultiIndex.from_tuples(
[("A", "A"), ("key", "val"), (1, 2), (1, 2)], names=(0, 1)
)
expected = DataFrame(data, index=idx, columns=(2, 3))
result = pd.read_excel(
file_name, sheet_name="index_col_none", index_col=[0, 1], header=None
)
tm.assert_frame_equal(expected, result)
def test_one_col_noskip_blank_line(self, read_ext):
# GH 39808
file_name = "one_col_blank_line" + read_ext
data = [0.5, np.nan, 1, 2]
expected = DataFrame(data, columns=["numbers"])
result = pd.read_excel(file_name)
tm.assert_frame_equal(result, expected)
def test_multiheader_two_blank_lines(self, read_ext):
# GH 40442
file_name = "testmultiindex" + read_ext
columns = MultiIndex.from_tuples([("a", "A"), ("b", "B")])
data = [[np.nan, np.nan], [np.nan, np.nan], [1, 3], [2, 4]]
expected = DataFrame(data, columns=columns)
result = pd.read_excel(
file_name, sheet_name="mi_column_empty_rows", header=[0, 1]
)
tm.assert_frame_equal(result, expected)
def test_trailing_blanks(self, read_ext):
"""
Sheets can contain blank cells with no data. Some of our readers
were including those cells, creating many empty rows and columns
"""
file_name = "trailing_blanks" + read_ext
result = pd.read_excel(file_name)
assert result.shape == (3, 3)
def test_ignore_chartsheets_by_str(self, request, engine, read_ext):
# GH 41448
if engine == "odf":
pytest.skip("chartsheets do not exist in the ODF format")
if engine == "pyxlsb":
request.node.add_marker(
pytest.mark.xfail(
reason="pyxlsb can't distinguish chartsheets from worksheets"
)
)
with pytest.raises(ValueError, match="Worksheet named 'Chart1' not found"):
pd.read_excel("chartsheet" + read_ext, sheet_name="Chart1")
def test_ignore_chartsheets_by_int(self, request, engine, read_ext):
# GH 41448
if engine == "odf":
pytest.skip("chartsheets do not exist in the ODF format")
if engine == "pyxlsb":
request.node.add_marker(
pytest.mark.xfail(
reason="pyxlsb can't distinguish chartsheets from worksheets"
)
)
with pytest.raises(
ValueError, match="Worksheet index 1 is invalid, 1 worksheets found"
):
pd.read_excel("chartsheet" + read_ext, sheet_name=1)
def test_euro_decimal_format(self, read_ext):
# copied from read_csv
result = pd.read_excel("test_decimal" + read_ext, decimal=",", skiprows=1)
expected = DataFrame(
[
[1, 1521.1541, 187101.9543, "ABC", "poi", 4.738797819],
[2, 121.12, 14897.76, "DEF", "uyt", 0.377320872],
[3, 878.158, 108013.434, "GHI", "rez", 2.735694704],
],
columns=["Id", "Number1", "Number2", "Text1", "Text2", "Number3"],
)
tm.assert_frame_equal(result, expected)
class TestExcelFileRead:
@pytest.fixture(autouse=True)
def cd_and_set_engine(self, engine, datapath, monkeypatch):
"""
Change directory and set engine for ExcelFile objects.
"""
func = partial(pd.ExcelFile, engine=engine)
monkeypatch.chdir(datapath("io", "data", "excel"))
monkeypatch.setattr(pd, "ExcelFile", func)
def test_engine_used(self, read_ext, engine):
expected_defaults = {
"xlsx": "openpyxl",
"xlsm": "openpyxl",
"xlsb": "pyxlsb",
"xls": "xlrd",
"ods": "odf",
}
with pd.ExcelFile("test1" + read_ext) as excel:
result = excel.engine
if engine is not None:
expected = engine
else:
expected = expected_defaults[read_ext[1:]]
assert result == expected
def test_excel_passes_na(self, read_ext):
with pd.ExcelFile("test4" + read_ext) as excel:
parsed = pd.read_excel(
excel, sheet_name="Sheet1", keep_default_na=False, na_values=["apple"]
)
expected = DataFrame(
[["NA"], [1], ["NA"], [np.nan], ["rabbit"]], columns=["Test"]
)
tm.assert_frame_equal(parsed, expected)
with pd.ExcelFile("test4" + read_ext) as excel:
parsed = pd.read_excel(
excel, sheet_name="Sheet1", keep_default_na=True, na_values=["apple"]
)
expected = DataFrame(
[[np.nan], [1], [np.nan], [np.nan], ["rabbit"]], columns=["Test"]
)
tm.assert_frame_equal(parsed, expected)
# 13967
with pd.ExcelFile("test5" + read_ext) as excel:
parsed = pd.read_excel(
excel, sheet_name="Sheet1", keep_default_na=False, na_values=["apple"]
)
expected = DataFrame(
[["1.#QNAN"], [1], ["nan"], [np.nan], ["rabbit"]], columns=["Test"]
)
tm.assert_frame_equal(parsed, expected)
with pd.ExcelFile("test5" + read_ext) as excel:
parsed = pd.read_excel(
excel, sheet_name="Sheet1", keep_default_na=True, na_values=["apple"]
)
expected = DataFrame(
[[np.nan], [1], [np.nan], [np.nan], ["rabbit"]], columns=["Test"]
)
tm.assert_frame_equal(parsed, expected)
@pytest.mark.parametrize("na_filter", [None, True, False])
def test_excel_passes_na_filter(self, read_ext, na_filter):
# gh-25453
kwargs = {}
if na_filter is not None:
kwargs["na_filter"] = na_filter
with pd.ExcelFile("test5" + read_ext) as excel:
parsed = pd.read_excel(
excel,
sheet_name="Sheet1",
keep_default_na=True,
na_values=["apple"],
**kwargs,
)
if na_filter is False:
expected = [["1.#QNAN"], [1], ["nan"], ["apple"], ["rabbit"]]
else:
expected = [[np.nan], [1], [np.nan], [np.nan], ["rabbit"]]
expected = DataFrame(expected, columns=["Test"])
tm.assert_frame_equal(parsed, expected)
def test_excel_table_sheet_by_index(self, request, read_ext, df_ref):
if read_ext == ".xlsb":
request.node.add_marker(
pytest.mark.xfail(
reason="Sheets containing datetimes not supported by pyxlsb"
)
)
with pd.ExcelFile("test1" + read_ext) as excel:
df1 = pd.read_excel(excel, sheet_name=0, index_col=0)
df2 = pd.read_excel(excel, sheet_name=1, skiprows=[1], index_col=0)
tm.assert_frame_equal(df1, df_ref, check_names=False)
tm.assert_frame_equal(df2, df_ref, check_names=False)
with pd.ExcelFile("test1" + read_ext) as excel:
df1 = excel.parse(0, index_col=0)
df2 = excel.parse(1, skiprows=[1], index_col=0)
tm.assert_frame_equal(df1, df_ref, check_names=False)
tm.assert_frame_equal(df2, df_ref, check_names=False)
with pd.ExcelFile("test1" + read_ext) as excel:
df3 = pd.read_excel(excel, sheet_name=0, index_col=0, skipfooter=1)
tm.assert_frame_equal(df3, df1.iloc[:-1])
with pd.ExcelFile("test1" + read_ext) as excel:
df3 = excel.parse(0, index_col=0, skipfooter=1)
tm.assert_frame_equal(df3, df1.iloc[:-1])
def test_sheet_name(self, request, read_ext, df_ref):
if read_ext == ".xlsb":
request.node.add_marker(
pytest.mark.xfail(
reason="Sheets containing datetimes not supported by pyxlsb"
)
)
filename = "test1"
sheet_name = "Sheet1"
with pd.ExcelFile(filename + read_ext) as excel:
df1_parse = excel.parse(sheet_name=sheet_name, index_col=0) # doc
with pd.ExcelFile(filename + read_ext) as excel:
df2_parse = excel.parse(index_col=0, sheet_name=sheet_name)
tm.assert_frame_equal(df1_parse, df_ref, check_names=False)
tm.assert_frame_equal(df2_parse, df_ref, check_names=False)
@pytest.mark.parametrize(
"sheet_name",
[3, [0, 3], [3, 0], "Sheet4", ["Sheet1", "Sheet4"], ["Sheet4", "Sheet1"]],
)
def test_bad_sheetname_raises(self, read_ext, sheet_name):
# GH 39250
msg = "Worksheet index 3 is invalid|Worksheet named 'Sheet4' not found"
with pytest.raises(ValueError, match=msg):
with pd.ExcelFile("blank" + read_ext) as excel:
excel.parse(sheet_name=sheet_name)
def test_excel_read_buffer(self, engine, read_ext):
pth = "test1" + read_ext
expected = pd.read_excel(pth, sheet_name="Sheet1", index_col=0, engine=engine)
with open(pth, "rb") as f:
with pd.ExcelFile(f) as xls:
actual = pd.read_excel(xls, sheet_name="Sheet1", index_col=0)
tm.assert_frame_equal(expected, actual)
def test_reader_closes_file(self, engine, read_ext):
with open("test1" + read_ext, "rb") as f:
with pd.ExcelFile(f) as xlsx:
# parses okay
pd.read_excel(xlsx, sheet_name="Sheet1", index_col=0, engine=engine)
assert f.closed
def test_conflicting_excel_engines(self, read_ext):
# GH 26566
msg = "Engine should not be specified when passing an ExcelFile"
with pd.ExcelFile("test1" + read_ext) as xl:
with pytest.raises(ValueError, match=msg):
pd.read_excel(xl, engine="foo")
def test_excel_read_binary(self, engine, read_ext):
# GH 15914
expected = pd.read_excel("test1" + read_ext, engine=engine)
with open("test1" + read_ext, "rb") as f:
data = f.read()
actual = pd.read_excel(data, engine=engine)
tm.assert_frame_equal(expected, actual)
def test_excel_read_binary_via_read_excel(self, read_ext, engine):
# GH 38424
with open("test1" + read_ext, "rb") as f:
result = pd.read_excel(f)
expected = pd.read_excel("test1" + read_ext, engine=engine)
tm.assert_frame_equal(result, expected)
@pytest.mark.skipif(
xlrd_version is not None and xlrd_version >= Version("2"),
reason="xlrd no longer supports xlsx",
)
def test_excel_high_surrogate(self):
# GH 23809
expected = DataFrame(["\udc88"], columns=["Column1"])
# should not produce a segmentation violation
actual = pd.read_excel("high_surrogate.xlsx", engine="xlrd")
tm.assert_frame_equal(expected, actual)
@pytest.mark.parametrize("filename", ["df_empty.xlsx", "df_equals.xlsx"])
def test_header_with_index_col(self, filename):
# GH 33476
idx = Index(["Z"], name="I2")
cols = MultiIndex.from_tuples([("A", "B"), ("A", "B.1")], names=["I11", "I12"])
expected = DataFrame([[1, 3]], index=idx, columns=cols, dtype="int64")
result = pd.read_excel(
filename, sheet_name="Sheet1", index_col=0, header=[0, 1]
)
tm.assert_frame_equal(expected, result)
def test_read_datetime_multiindex(self, request, engine, read_ext):
# GH 34748
if engine == "pyxlsb":
request.node.add_marker(
pytest.mark.xfail(
reason="Sheets containing datetimes not supported by pyxlsb"
)
)
f = "test_datetime_mi" + read_ext
with pd.ExcelFile(f) as excel:
actual = pd.read_excel(excel, header=[0, 1], index_col=0, engine=engine)
expected_column_index = MultiIndex.from_tuples(
[(pd.to_datetime("02/29/2020"), pd.to_datetime("03/01/2020"))],
names=[
pd.to_datetime("02/29/2020").to_pydatetime(),
pd.to_datetime("03/01/2020").to_pydatetime(),
],
)
expected = DataFrame([], columns=expected_column_index)
tm.assert_frame_equal(expected, actual)
def test_engine_invalid_option(self, read_ext):
# read_ext includes the '.' hence the weird formatting
with pytest.raises(ValueError, match="Value must be one of *"):
with pd.option_context(f"io.excel{read_ext}.reader", "abc"):
pass
def test_ignore_chartsheets(self, request, engine, read_ext):
# GH 41448
if engine == "odf":
pytest.skip("chartsheets do not exist in the ODF format")
if engine == "pyxlsb":
request.node.add_marker(
pytest.mark.xfail(
reason="pyxlsb can't distinguish chartsheets from worksheets"
)
)
with pd.ExcelFile("chartsheet" + read_ext) as excel:
assert excel.sheet_names == ["Sheet1"]
def test_corrupt_files_closed(self, engine, read_ext):
# GH41778
errors = (BadZipFile,)
if engine is None:
pytest.skip(f"Invalid test for engine={engine}")
elif engine == "xlrd":
import xlrd
errors = (BadZipFile, xlrd.biffh.XLRDError)
with tm.ensure_clean(f"corrupt{read_ext}") as file:
Path(file).write_text("corrupt")
with tm.assert_produces_warning(False):
try:
pd.ExcelFile(file, engine=engine)
except errors:
pass
|
the-stack_0_23583 | from typing import List, Dict
class TwoSum:
"""
Given an array of integers ``nums`` and an integer ``target``, return the indices of the two numbers such that they
add up to ``target``.
Constraints:
* ``2 <= len(nums) <= 10e4``
* ``-10e9 <= nums[i] <= 10e9``
* ``-10e9 <= target <= 10e9``
* Only one valid answer exists
Reference: https://leetcode.com/problems/two-sum/
"""
@staticmethod
def find_indices(nums: List[int], target: int) -> List[int]:
hashmap: Dict[int, int] = {}
for i, n in enumerate(nums):
if n in hashmap:
return [hashmap[n], i]
hashmap[target - n] = i
return []
|
the-stack_0_23584 | # Copyright (c) Microsoft Corporation. Licensed under the MIT license.
import requests
import sys
import os
import glob
import operator
import argparse
# subscription_key = ''
def get_token(subscription_key):
fetch_token_url = 'https://southeastasia.api.cognitive.microsoft.com/sts/v1.0/issueToken'
headers = {
'Ocp-Apim-Subscription-Key': subscription_key
}
response = requests.post(fetch_token_url, headers=headers)
access_token = str(response.text)
return access_token
def get_transliteration(vocab, headers):
base_url = 'https://api.cognitive.microsofttranslator.com'
path = '/transliterate?api-version=3.0&language=hi&fromScript=Latn&toScript=Deva'
trans={}
count=0
body=[]
constructed_url = base_url + path
query=''
while(count<=6500):
for i in range(count,(count+500),50):
for j in range(i,i+50):
query += vocab[j] + ' '
body.append({'text' : query.strip()})
query=''
response = requests.post(constructed_url, headers=headers, json=body)
result = response.json()
for j,i in enumerate(result):
trans.update({body[j]['text']:i['text']})
body=[]
count += 500
for i in range(count,len(vocab),50):
for j in range(i,i+50):
if j<len(vocab):
query += vocab[j] + ' '
body.append({'text' : query.strip()})
query=''
response = requests.post(constructed_url, headers=headers, json=body)
result = response.json()
for j,i in enumerate(result):
trans.update({body[j]['text']:i['text']})
return trans
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--subscription_key", default=None, type=str, required=True, help="Azure Subscription key for downloading transliterations")
parser.add_argument("--input_file", default=None, type=str, required=True,
help="The roman hindi words vocabulary ")
args = parser.parse_args()
input_file = args.input_file
subscription_key = args.subscription_key
req_token = get_token(subscription_key)
headers = { 'Accept': 'application/json;text/xml',
'Content-Type': 'application/json',
'Ocp-Apim-Subscription-Key': subscription_key,
'Authorization': req_token
}
vocab = []
with open(input_file,'r+') as infile:
con = infile.readlines()
vocab = [x.strip('\n') for x in con]
trans = get_transliteration(vocab, headers)
with open('transliterations.txt','w+') as outfile:
for i in trans.keys():
words=i.split(' ')
deva=trans.get(i).split(' ')
for j,k in enumerate(words):
outfile.write(k + "\t" + deva[j] +"\n")
if __name__ == "__main__":
main() |
the-stack_0_23586 |
import asyncio
import logging
class DiscordHandler(logging.Handler):
"""
Handler to allow the builtin python logger to log to discord. Requires a context object be provided
for the handler to know where to log to. The logger starts an asyncio constantly running task, so is
not the most efficient. Primarily should be used for debug
"""
def __init__(self, ctx, level=logging.NOTSET, *, loop=None):
"""
Create a new DiscordHandler, pointing to a specific context
:param ctx: Context to use
:param level: Level to log at, by default
:param loop: Loop to use, if different from ctx.bot.loop
"""
super().__init__(level)
self.channel = ctx.channel
if loop is None:
loop = ctx.bot.loop
self.loop = loop
self.queue = asyncio.Queue()
self.running = True
self.loop.create_task(self._run_loop())
async def _run_loop(self):
"""
The primary execution loop. Prints a new log message every second, to avoid spamming the endpoint
Draws from the internal record queue
"""
while self.running:
record = await self.queue.get()
log = self.format(record)
await self.channel.send(log)
await asyncio.sleep(1)
def emit(self, record):
"""
Add a new log to be logged. Places the record into the internal queue, to be removed by the run loop
:param record: Record to log
"""
self.queue.put_nowait(record)
def stop(self):
"""
Stop running this logger immediately, killing the run loop so things can easily be cleaned up
"""
self.running = False
|
the-stack_0_23587 | """
next_backend Participant Resource
author: Christopher Fernandez, Lalit Jain
Resource for accessing all participant data related to a resource
"""
'''
example use:
get a tripletMDS query:
curl -X GET http://localhost:8001/api/experiment/[exp_uid]/participants
'''
from StringIO import StringIO
import pandas as pd
from flask import Flask, send_file, request, abort
from flask_restful import Resource, reqparse
import traceback
import json
from io import BytesIO
import zipfile
import next.utils
import next.utils as utils
import next.api.api_util as api_util
from next.api.api_util import APIArgument
from next.api.resource_manager import ResourceManager
from next.database_client.DatabaseAPI import DatabaseAPI
db = DatabaseAPI()
from next.logging_client.LoggerAPI import LoggerAPI
ell = LoggerAPI()
resource_manager = ResourceManager()
# Request parser. Checks that necessary dictionary keys are available in a given resource.
# We rely on learningLib functions to ensure that all necessary arguments are available and parsed.
post_parser = reqparse.RequestParser(argument_class=APIArgument)
# Custom errors for GET and POST verbs on experiment resource
meta_error = {
'ExpDoesNotExistError': {
'message': "No experiment with the specified experiment ID exists.",
'code': 400,
'status':'FAIL'
},
}
meta_success = {
'code': 200,
'status': 'OK'
}
# Participants resource class
class Participants(Resource):
def get(self, exp_uid):
"""
.. http:get:: /experiment/<exp_uid>/participants
Get all participant response data associated with a given exp_uid.
**Example request**:
.. sourcecode:: http
GET /experiment/<exp_uid>/participants HTTP/1.1
Host: next_backend.next.discovery.wisc.edu
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: application/json
{
participant_responses: [participant_responses]
status: {
code: 200,
status: OK,
},
}
:>json all_participant_responses: list of all participant_responses
:statuscode 200: Participants responses successfully returned
:statuscode 400: Participants responses failed to be generated
"""
zip_true = False
if request.args.get('zip'):
try:
zip_true = eval(request.args.get('zip'))
except:
pass
# Get all participants for exp_uid from resource_manager
participant_uids = resource_manager.get_participant_uids(exp_uid)
participant_responses = {}
# Iterate through list of all participants for specified exp_uid
for participant in participant_uids:
response = resource_manager.get_participant_data(participant,
exp_uid)
# Append participant query responses to list
participant_responses[participant] = response
if request.args.get('csv'):
responses = []
for participant in participant_uids:
response = resource_manager.get_participant_data(participant,
exp_uid)
for r in response:
responses += [r]
try:
response_file = parse_responses(responses)
except ValueError as e:
message = str(e)
message += '\n\n' + str(traceback.format_exc())
utils.debug_print(message)
return message
response_file.seek(0)
return send_file(response_file,
attachment_filename='responses.csv',
as_attachment=True)
all_responses = {'participant_responses': participant_responses}
if zip_true:
zip_responses = BytesIO()
with zipfile.ZipFile(zip_responses, 'w') as zf:
zf.writestr('participants.json', json.dumps(all_responses))
zip_responses.seek(0)
return send_file(zip_responses,
attachment_filename='participants.zip',
as_attachment='True')
else:
return api_util.attach_meta(all_responses, meta_success), 200
def parse_responses(responses):
if len(responses) == 0:
raise ValueError('ERROR: responses have not been recorded')
exp_uid = responses[0]['exp_uid']
app_id = resource_manager.get_app_id(exp_uid)
myApp = utils.get_app(app_id, exp_uid, db, ell).myApp
if not hasattr(myApp, 'format_responses'):
raise ValueError('ERROR: myApp.format_responses does not exist for {}'.format(app_id))
r = myApp.format_responses(responses)
if type(r) != list and type(r[0]) != dict:
raise ValueError('ERROR: myApp.format_responses should return a list of dictionaries')
df = pd.DataFrame(r)
str_file = StringIO()
df.to_csv(str_file, encoding='utf-8')
return str_file
|
the-stack_0_23589 |
from InputsConfig import InputsConfig as p
import random
from Transaction import Transaction
from Block import Block
from Event import Event
from Queue import Queue
from Node import Node
from Consensus import Consensus as c
###################################### A class to schedule future events ########################################
class Scheduler:
# ##### Time methods #####
# def PoW_completion_time(hashPower):
# return random.expovariate(hashPower * 1/p.Binterval)
def receive_block_time():
return random.expovariate(1/p.Bdelay)
# ##### Start solving a fresh PoW on top of last block appended #####
# def solve_PoW(miner):
# TOTAL_HASHPOWER = sum([miner.hashPower for miner in p.NODES])
# hashPower = miner.hashPower/TOTAL_HASHPOWER
# return Scheduler.PoW_completion_time(hashPower)
##### Schedule initial events and add them to the event list #####
def initial_events():
currentTime = 0 # at the start of the simualtion, time will be zero
for node in p.NODES:
if node.hashPower >0: # only if hashPower >0, the node will be eligiable for mining
Scheduler.create_block_event(node,currentTime)
##### Schedule a block creation event and add it to the event list #####
def create_block_event(miner,currentTime):
if miner.hashPower > 0:
# blockTime = currentTime + Scheduler.solve_PoW(miner)
blockTime = currentTime + c.PoW(miner)
eventTime = blockTime
eventType = "create_block"
if eventTime <= p.simTime: ##### create the event + add it to the event list #####
# prepare attributes for the event
minerId= miner.id
blockDepth = len(miner.blockchain)
blockId= random.randrange(100000000000)
blockPrev= miner.last_block().id
block = Block(blockDepth,blockId,blockPrev,blockTime,minerId,[],0,[]) # event content: transctions, uncles and blockSize is not set yet -> they will be set once the event is created
event = Event(eventType,minerId,eventTime,block) # create the event
Queue.add_event(event) # add the event to the queue
##### Schedule block receiving events for all other nodes and add those events to the event list #####
def receive_block_event(event):
miner= event.node
blockDepth = event.block.depth
blockId = event.block.id
blockTrans = event.block.transactions
blockPrev= event.block.previous
bockSize = event.block.size
blockTimestamp = event.time
blockUncles= event.block.uncles
for recipient in p.NODES:
if recipient.id != miner:
receive_block_time = event.time + Scheduler.receive_block_time() # draw time for node i to receive the block
if receive_block_time <= p.simTime:
block = Block(blockDepth,blockId,blockPrev,blockTimestamp,miner,blockTrans,bockSize,blockUncles)
e = Event("receive_block", recipient.id, receive_block_time, block)
Queue.add_event(e)
|
the-stack_0_23590 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Train a network across multiple GPUs.
"""
from fairseq import distributed_utils
from fairseq.trainer import Trainer
from fairseq.dataclass.configs import FairseqConfig
try:
from fairseq.model_parallel.megatron.mpu import (
get_data_parallel_rank,
get_data_parallel_world_size,
get_model_parallel_src_rank,
get_cuda_rng_tracker,
)
has_megatron_submodule = True
except (ImportError, ModuleNotFoundError):
has_megatron_submodule = False
class MegatronTrainer(Trainer):
"""Main class for model parallel with data parallel training."""
def __init__(self, cfg: FairseqConfig, task, model, criterion, **kwargs):
if not has_megatron_submodule:
raise ImportError(
"\n\nPlease install the megatron submodule:"
"\n\n git submodule update --init "
"fairseq/model_parallel/megatron"
)
super().__init__(cfg, task, model, criterion, **kwargs)
def clip_grad_norm(self, clip_norm):
def _aggregate_model_parallel_grad_norm(total_norm):
total_norm = total_norm ** 2
distributed_utils.all_reduce(
total_norm, group=distributed_utils.get_model_parallel_group()
)
total_norm = total_norm ** 0.5
return total_norm
return self.optimizer.clip_grad_norm(
clip_norm,
aggregate_norm_fn=_aggregate_model_parallel_grad_norm,
)
def save_checkpoint(self, filename, extra_state):
"""Save all training state in a checkpoint file."""
extra_state['rng_tracker_states'] \
= get_cuda_rng_tracker().get_states()
super().save_checkpoint(filename, extra_state)
def load_checkpoint(
self,
filename,
reset_optimizer=False,
reset_lr_scheduler=False,
optimizer_overrides=None,
reset_meters=False,
):
extra_state = super().load_checkpoint(filename, reset_optimizer=reset_optimizer, reset_lr_scheduler=reset_lr_scheduler, optimizer_overrides=optimizer_overrides, reset_meters=reset_meters)
if extra_state is not None and 'rng_tracker_states' in extra_state:
get_cuda_rng_tracker().set_states(
extra_state['rng_tracker_states'])
return extra_state
|
the-stack_0_23592 | #! /usr/bin/python
# -*- coding: utf-8 -*-
import tensorflow as tf
import tensorlayer as tl
from tensorlayer import logging
from tensorlayer.decorators import deprecated_alias
from tensorlayer.layers.core import Layer
__all__ = [
'GaussianNoise',
]
class GaussianNoise(Layer):
"""
The :class:`GaussianNoise` class is noise layer that adding noise with
gaussian distribution to the activation.
Parameters
------------
mean : float
The mean. Default is 0.0.
stddev : float
The standard deviation. Default is 1.0.
is_always : boolean
Is True, add noise for train and eval mode. If False, skip this layer in eval mode.
seed : int or None
The seed for random noise.
name : str
A unique layer name.
Examples
--------
With TensorLayer
>>> net = tl.layers.Input([64, 200], name='input')
>>> net = tl.layers.Dense(n_units=100, act=tf.nn.relu, name='dense')(net)
>>> gaussianlayer = tl.layers.GaussianNoise(name='gaussian')(net)
>>> print(gaussianlayer)
>>> output shape : (64, 100)
"""
def __init__(
self,
mean=0.0,
stddev=1.0,
is_always=True,
seed=None,
name=None, # 'gaussian_noise',
):
super().__init__(name)
self.mean = mean
self.stddev = stddev
self.seed = seed
self.is_always = is_always
self.build()
self._built = True
logging.info("GaussianNoise %s: mean: %f stddev: %f" % (self.name, self.mean, self.stddev))
def __repr__(self):
s = '{classname}(mean={mean}, stddev={stddev}'
if self.name is not None:
s += ', name=\'{name}\''
s += ')'
return s.format(classname=self.__class__.__name__, **self.__dict__)
def build(self, inputs=None):
pass
def forward(self, inputs):
if (self.is_train or self.is_always) is False:
return inputs
else:
# noise = np.random.normal(0.0 , sigma , tf.to_int64(self.inputs).get_shape())
noise = tf.random.normal(shape=inputs.get_shape(), mean=self.mean, stddev=self.stddev, seed=self.seed)
outputs = inputs + noise
return outputs
|
the-stack_0_23593 | # vim: filetype=python
# Copyright 2019 Autodesk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
## load our own python modules
from . import system
from itertools import izip
def process_return_code(retcode):
'''
translates a process return code (as obtained by os.system or subprocess) into a status string
'''
if retcode == 0:
status = 'OK'
else:
if system.is_windows:
if retcode < 0:
status = 'CRASHED'
else:
status = 'FAILED'
else:
# When a signal "N" is raised, the process can
# return with status "128 + N" or "-N"
if retcode > 128 or retcode < 0:
status = 'CRASHED'
else:
status = 'FAILED'
return status
## Searches for file in the system path. Returns a list of directories containing file
def find_in_path(file):
path = os.environ['PATH']
path = string.split(path, os.pathsep)
return filter(os.path.exists, map(lambda dir, file=file: os.path.join(dir, file), path))
## Returns a list of all files with an extension from the 'valid_extensions' list
def find_files_recursive(path, valid_extensions):
path = os.path.normpath(path)
list = []
for root, dirs, files in os.walk(path):
if '.svn' in dirs:
dirs.remove('.svn')
for f in files:
if os.path.splitext(f)[1] in valid_extensions:
# Build the absolute path and then remove the root path, to get the relative path from root
file = os.path.join(root, f)[len(path) + 1:]
list += [file]
return list
## Copy directories recursively, ignoring .svn dirs
## <dest> directory must not exist
def copy_dir_recursive(src, dest):
for f in os.listdir(src):
src_path = os.path.join(src, f)
dest_path = os.path.join(dest, f)
if os.path.isdir(src_path):
if f != '.svn':
if not os.path.exists(dest_path):
os.makedirs(dest_path)
#shutil.copystat(src_path, dest_path)
copy_dir_recursive(src_path, dest_path)
else:
shutil.copy2(src_path, dest_path)
def get_arnold_version(arnold_include_dir, components = 4):
'''Obtain Arnold library version by parsing 'ai_version.h'
'''
ARCH_VERSION=''
MAJOR_VERSION=''
MINOR_VERSION=''
FIX_VERSION=''
ai_version_h = os.path.join(arnold_include_dir, 'ai_version.h')
f = open(ai_version_h, 'r')
while True:
line = f.readline().lstrip(' \t')
if line == "":
# We have reached the end of file.
break
if line.startswith('#define'):
tokens = line.split()
if tokens[1] == 'AI_VERSION_ARCH_NUM':
ARCH_VERSION = tokens[2]
elif tokens[1] == 'AI_VERSION_MAJOR_NUM':
MAJOR_VERSION = tokens[2]
elif tokens[1] == 'AI_VERSION_MINOR_NUM':
MINOR_VERSION = tokens[2]
elif tokens[1] == 'AI_VERSION_FIX':
FIX_VERSION = tokens[2].strip('"')
f.close()
if (components > 0):
version = ARCH_VERSION
if (components > 1):
version += '.' + MAJOR_VERSION
if (components > 2):
version += '.' + MINOR_VERSION
if (components > 3):
version += '.' + FIX_VERSION
return version
def get_usd_version(usd_include_dir, components=3):
VERSION = [''] * 3
pxr_h = os.path.join(usd_include_dir, 'pxr', 'pxr.h')
f = open(pxr_h, 'r')
while True:
line = f.readline().lstrip(' \t')
if line == "":
# We have reached the end of file.
break
if line.startswith('#define'):
tokens = line.split()
if tokens[1] == 'PXR_MAJOR_VERSION':
VERSION[0] = tokens[2]
elif tokens[1] == 'PXR_MINOR_VERSION':
VERSION[1] = tokens[2]
elif tokens[1] == 'PXR_PATCH_VERSION':
VERSION[2] = tokens[2]
f.close()
return '.'.join(VERSION[:components])
def convert_usd_version_to_int(usd_version):
sum = 0
for v, m in izip(usd_version.split('.'), [10000, 100, 1]):
sum += int(v) * m
return sum
def add_to_library_path(env, new_path):
if system.os == 'windows':
var_name = 'PATH'
elif system.os == 'darwin':
var_name = 'DYLD_LIBRARY_PATH'
else:
var_name = 'LD_LIBRARY_PATH'
if env['ENV'].has_key(var_name):
env['ENV'][var_name] = '%s%s%s' % (new_path, os.pathsep, env['ENV'][var_name])
else:
env['ENV'][var_name] = new_path
def set_library_path(env):
if system.os == 'windows':
var_name = 'PATH'
elif system.os == 'darwin':
var_name = 'DYLD_LIBRARY_PATH'
else:
var_name = 'LD_LIBRARY_PATH'
env['PREVIOUS_LIBRARY_PATH'] = ''
if os.environ.has_key(var_name):
env['PREVIOUS_LIBRARY_PATH'] = os.environ[var_name]
os.environ[var_name] = env['ENV'][var_name]
def reset_library_path(env):
if env.has_key('PREVIOUS_LIBRARY_PATH'):
if system.os == 'windows':
var_name = 'PATH'
elif system.os == 'darwin':
var_name = 'DYLD_LIBRARY_PATH'
else:
var_name = 'LD_LIBRARY_PATH'
os.environ[var_name] = env['PREVIOUS_LIBRARY_PATH']
def add_to_program_path(env, new_path):
if env['ENV'].has_key('PATH'):
env['ENV']['PATH'] = '%s%s%s' % (new_path, os.pathsep, env['ENV']['PATH'])
else:
env['ENV']['PATH'] = new_path
def set_program_path(env):
env['PREVIOUS_PROGRAM_PATH'] = ''
if os.environ.has_key('PATH'):
env['PREVIOUS_PROGRAM_PATH'] = os.environ['PATH']
os.environ['PATH'] = env['ENV']['PATH']
def reset_program_path(env):
if env.has_key('PREVIOUS_PROGRAM_PATH'):
os.environ['PATH'] = env['PREVIOUS_PROGRAM_PATH']
def get_default_path(var, default):
if var in os.environ:
return os.environ[var]
else:
return default
def get_escaped_path(path):
if system.os() == 'windows':
return path.replace("\\", "\\\\")
else:
return path
def link_usd_libraries(env, libs):
lib_prefix = env['USD_LIB_PREFIX']
usd_lib = env['USD_LIB']
if env['USD_LIB_AS_SOURCE']:
return [], [os.path.join(usd_lib, '%s%s%s' % (lib_prefix, lib, system.LIB_EXTENSION)) for lib in libs]
else:
return ['%s%s' % (lib_prefix, lib) for lib in libs], []
|
the-stack_0_23596 | # -*- coding: utf-8 -*-
# --------------------------------------
# @Time : 2020/11/01
# @Author : Oscar Chen
# @Email : [email protected]
# @File : freeze_graph.py
# Description : 冻结权重ckpt——>pb
# --------------------------------------
import os
import sys
ros_path = '/opt/ros/kinetic/lib/python2.7/dist-packages'
if ros_path in sys.path:
sys.path.remove(ros_path)
import cv2
import numpy as np
from PIL import Image
# 图片后缀
def Postfix():
postFix = set()
postFix.update(['bmp', 'jpg', 'png', 'tiff', 'gif', 'pcx', 'tga', 'exif',
'fpx', 'svg', 'psd', 'cdr', 'pcd', 'dxf', 'ufo', 'eps', 'JPG', 'raw', 'jpeg'])
return postFix
# 均值哈希算法
def ahash(image):
# 将图片缩放到8 * 8
image = cv2.resize(image, (8, 8), interpolation=cv2.INTER_CUBIC)
# 将图片转换为灰度图
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
# 遍历像素累加和, 计算像素平均值
s = 0
for i in range(8):
for j in range(8):
s = s + gray[i, j]
avg = s / 64
# 灰度大于平均值为1,否则为0,得到图片的平均哈希值,此时得到的Hash值为64位的01字符串
ahash_str = ''
for i in range(8):
for j in range(8):
if gray[i, j] > avg:
ahash_str = ahash_str + '1'
else:
ahash_str = ahash_str + '0'
# print("ahash值:",ahash_str)
return ahash_str
# 感知哈希算法
def phash(image):
# 将图片缩放到32 * 32
image_resize = cv2.resize(image, (32, 32), interpolation=cv2.INTER_CUBIC)
# 将图片转换为灰度图
gray = cv2.cvtColor(image_resize, cv2.COLOR_RGB2GRAY)
# 将灰度图转为浮点型,再进行dct变换
img_dct = cv2.dct(np.float32(gray))
# 获取左上角8 * 8 的ROI区域
roi_dct = img_dct[0:8, 0:8]
# 计算均值
avreage = np.mean(roi_dct)
# 计算哈希值
phash_str = ''
for i in range(roi_dct.shape[0]):
for j in range(roi_dct.shape[1]):
if roi_dct[i, j] > avreage:
phash_str = phash_str + '1'
else:
phash_str = phash_str + '0'
# print("phash值:",phash_str)
return phash_str
# 差异哈希算法
def dhash(image):
# 将图片缩放到9 * 8
image_resize = cv2.resize(image, (9, 8), interpolation=cv2.INTER_CUBIC)
# 将图片转换为灰度图
gray = cv2.cvtColor(image_resize, cv2.COLOR_RGB2GRAY)
# 每行前一个像素大于后一个像素为1,否则为0,得到图片的平均哈希值,此时得到的Hash值为64位的01字符串
dhash_str = ''
for i in range(8):
for j in range(8):
if gray[i, j] > gray[i, j + 1]:
dhash_str = dhash_str + '1'
else:
dhash_str = dhash_str + '0'
# print("dhash值", dhash_str)
return dhash_str
# 计算两张图之间的汉明距离
def Hamming(hash1, hash2):
# hash长度不同返回-1,此时不能比较
if len(hash1) != len(hash2):
return -1
hamming_distance = 0
for i in range(len(hash1)):
if hash1[i] != hash2[i]:
hamming_distance += 1
return hamming_distance
# 图片归一化
def Normalize(image, size=(64, 64), greyscale=False):
# 重新设置图片大小
image = cv2.resize(image, size, interpolation=cv2.INTER_CUBIC)
if greyscale:
# 将图片转换为灰度图,其每个像素用8个bit表示
image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
return image
# 计算两张图之间的余弦距离
def Cosine(image1, image2):
image1 = Normalize(image1)
image2 = Normalize(image2)
image1 = Image.fromarray(cv2.cvtColor(image1, cv2.COLOR_BGR2RGB))
image2 = Image.fromarray(cv2.cvtColor(image2, cv2.COLOR_BGR2RGB))
images = [image1, image2]
vectors = []
norms = []
for image in images:
vector = []
for pixel_tuple in image.getdata():
vector.append(np.average(pixel_tuple))
vectors.append(vector)
norms.append(np.linalg.norm(vector, 2))
a, b = vectors
a_norm, b_norm = norms
# dot返回的是点积,对二维数组(矩阵)进行计算
res = np.dot(a / a_norm, b / b_norm)
return res
def Histogram(image_1, image_2):
# 计算单通道直方图
hist_1 = cv2.calcHist([image_1], [0], None, [256], [0.0, 255.0])
hist_2 = cv2.calcHist([image_2], [0], None, [256], [0.0, 255.0])
# 计算直方图的重合度
degree = 0
for i in range(len(hist_1)):
if hist_1[i] != hist_2[i]:
degree = degree + (1 - abs(hist_1[i] - hist_2[i]) / max(hist_1[i], hist_2[i]))
else:
degree = degree + 1
degree = degree / len(hist_1)
return degree
def correlation(image, kernal):
kernal_heigh = kernal.shape[0]
kernal_width = kernal.shape[1]
cor_heigh = image.shape[0] - kernal_heigh + 1
cor_width = image.shape[1] - kernal_width + 1
result = np.zeros((cor_heigh, cor_width), dtype=np.float64)
for i in range(cor_heigh):
for j in range(cor_width):
result[i][j] = (image[i:i + kernal_heigh, j:j + kernal_width] * kernal).sum()
return result
def gaussian_2d_kernel(kernel_size=11, sigma=1.5):
kernel = np.zeros([kernel_size, kernel_size])
center = kernel_size // 2
if sigma == 0:
sigma = ((kernel_size - 1) * 0.5 - 1) * 0.3 + 0.8
s = 2 * (sigma ** 2)
sum_val = 0
for i in range(0, kernel_size):
for j in range(0, kernel_size):
x = i - center
y = j - center
kernel[i, j] = np.exp(-(x ** 2 + y ** 2) / s)
sum_val += kernel[i, j]
sum_val = 1 / sum_val
return kernel * sum_val
def ssim(image_1, image_2, window_size=11, gaussian_sigma=1.5, K1=0.01, K2=0.03, alfa=1, beta=1, gama=1):
image_1 = cv2.cvtColor(image_1, cv2.COLOR_RGB2GRAY)
image_2 = cv2.cvtColor(image_2, cv2.COLOR_RGB2GRAY)
image_1=np.array(image_1,dtype=np.float64)
image_2=np.array(image_2,dtype=np.float64)
if not image_1.shape == image_2.shape:
raise ValueError("Input Imagees must has the same size")
if len(image_1.shape) > 2:
raise ValueError("Please input the images with 1 channel")
kernal=gaussian_2d_kernel(window_size,gaussian_sigma)
# 求ux uy ux*uy ux^2 uy^2 sigma_x^2 sigma_y^2 sigma_xy等中间变量
ux = correlation(image_1, kernal)
uy = correlation(image_2, kernal)
image_1_sqr = image_1 ** 2
image_2_sqr = image_2 ** 2
dis_mult_ori = image_1 * image_2
uxx = correlation(image_1_sqr, kernal)
uyy = correlation(image_2_sqr, kernal)
uxy = correlation(dis_mult_ori, kernal)
ux_sqr = ux ** 2
uy_sqr = uy ** 2
uxuy = ux * uy
sx_sqr = uxx - ux_sqr
sy_sqr = uyy - uy_sqr
sxy = uxy - uxuy
C1 = (K1 * 255) ** 2
C2 = (K2 * 255) ** 2
#常用情况的SSIM
if(alfa==1 and beta==1 and gama==1):
ssim=(2 * uxuy + C1) * (2 * sxy + C2) / (ux_sqr + uy_sqr + C1) / (sx_sqr + sy_sqr + C2)
return np.mean(ssim)
#计算亮度相似性
l = (2 * uxuy + C1) / (ux_sqr + uy_sqr + C1)
l = l ** alfa
#计算对比度相似性
sxsy = np.sqrt(sx_sqr) * np.sqrt(sy_sqr)
c= (2 * sxsy + C2) / (sx_sqr + sy_sqr + C2)
c= c ** beta
#计算结构相似性
C3 = 0.5 * C2
s = (sxy + C3) / (sxsy + C3)
s = s ** gama
ssim = l * c * s
return np.mean(ssim)
if __name__ == '__main__':
image_dir = "/home/chenwei/HDD/Project/datasets/segmentation/lane_dataset/高速通道/白天/image"
postFix = Postfix()
file_list = os.listdir(image_dir)
file_list.sort()
cImage = cv2.imread(r'%s/%s' % (image_dir, str(file_list[0])))
sp = cImage.shape
top = int(sp[0] / 2)
bottom = int(sp[0])
cROI = cImage[:, top:bottom]
cValue = dhash(cROI)
count = 0
for index in range(1, len(file_list)):
if str(file_list[index]).split('.')[-1] in postFix:
image = cv2.imread(r'%s/%s' % (image_dir, str(file_list[index])))
roi = image[:, top:bottom]
value = dhash(roi)
#dis = Hamming(cValue, value)
dis = ssim(cROI, roi)
print("Distance is: ", dis)
concat = np.hstack((cImage, image))
concat = cv2.resize(concat, (960, 360), interpolation=cv2.INTER_CUBIC)
cv2.line(concat, (0, 180), (960, 180), (0, 255, 0), 1, 4)
cv2.imshow("test", concat)
cv2.waitKey(75)
if dis < 0.6:
cv2.imwrite('/home/chenwei/HDD/Project/datasets/segmentation/lane_dataset/result/' + str(count) + '.png', cImage)
cImage = image
#cValue = value
cROI = roi
count += 1
|
the-stack_0_23601 | from scipy.signal.windows import triang
from scipy.ndimage import gaussian_filter1d
import numpy as np
def get_lds_kernel_window(kernel, ks, sigma):
assert kernel in ['gaussian', 'triang', 'laplace']
half_ks = (ks - 1) // 2
if kernel == 'gaussian':
base_kernel = [0.] * half_ks + [1.] + [0.] * half_ks
kernel_window = gaussian_filter1d(base_kernel, sigma=sigma) / max(gaussian_filter1d(base_kernel, sigma=sigma))
elif kernel == 'triang':
kernel_window = triang(ks)
else:
laplace = lambda x: np.exp(-abs(x) / sigma) / (2. * sigma)
kernel_window = list(map(laplace, np.arange(-half_ks, half_ks + 1))) / max(map(laplace, np.arange(-half_ks, half_ks + 1)))
return kernel_window |
the-stack_0_23602 | import logging
import time
import threading
from flask import Response, json, jsonify
from base import python_logging as pl
from base.settings import Settings
from base.exceptions import InvalidUsage
from base.utils import get_real_logger_level
settings = Settings()
class LoggerBase(logging.Logger):
def __init__(self, name, *args, **kwargs):
self.name = name
self._log_type = kwargs.get('log_type', 'json')
super().__init__(name, *args, **kwargs)
@property
def log_type(self):
return self._log_type
@log_type.setter
def log_type(self, value: str):
self._log_type = value
def makeRecord(self, name, level, fn, lno, msg, args, exc_info,
func=None, extra=None, sinfo=None):
fn = extra.pop('fn', fn)
lno = extra.pop('lno', lno)
func = extra.pop('func', func)
return super().makeRecord(name, level, fn, lno, msg, args, exc_info, func, extra, sinfo)
log_level = get_real_logger_level(int(settings.config_log["level"]))
try:
import uwsgi
uwsgi.sharedarea_write(0, 0, json.dumps(log_level))
uwsgi.sharedarea_write(0, 3, json.dumps(log_level))
except ModuleNotFoundError:
pass
pl.setup_loglevel()
log_type = settings.config_log.get('format', 'json')
host_pub = settings.host_pub
logger_handler = pl.setup_logger_handler(settings.log_path, log_level, log_type, host_pub)
logging.setLoggerClass(LoggerBase)
logger = logging.getLogger(__name__)
logger.log_type = log_type
logger.addHandler(logger_handler)
LOG_TABLE = {
0: logger.emergency,
1: logger.alert,
2: logger.critical,
3: logger.error,
4: logger.warning,
5: logger.notice,
6: logger.info,
7: logger.debug,
8: logger.trace,
9: logger.verbose
}
def level_runtime(request) -> Response:
if request.method == 'GET':
try:
shared_area = uwsgi.sharedarea_read(0, 3, 3)
level = int(shared_area.decode('utf-8'))
except:
level = logger.level
context = {
"level_number": 109 - level
}
response = jsonify(context)
response.status_code = 200
elif request.method == 'POST':
if request.is_json and request.data:
payload = request.get_json()
if 'level_number' not in payload:
raise InvalidUsage(status_code=412, message='level_number not found in payload')
elif type(payload['level_number']) is not int or not 0 <= payload['level_number'] <= 9:
raise InvalidUsage(status_code=412, message='level_number is not a number or not between 0 and 9')
else:
level = payload.get('level_number', 9)
real_level = int(get_real_logger_level(int(level)))
expire_hours = payload.get('expire_hours', 0)
expire_timestamp = int(time.time()) + int(float(expire_hours) * 3600) if expire_hours else 0
payload = {
"level_number": level,
"expire_hours": expire_hours
}
try:
if expire_hours == 0:
uwsgi.sharedarea_write(0, 0, json.dumps(real_level))
else:
timer_thread = threading.Thread(target=set_global_log_level, args=(expire_timestamp,),
name='Timer',
daemon=True)
timer_thread.start()
uwsgi.sharedarea_write(0, 3, json.dumps(real_level))
except NameError:
logger.setLevel(real_level)
response = jsonify(payload)
response.status_code = 200
else:
raise InvalidUsage('No data or data format invalid.', status_code=422)
else:
raise InvalidUsage('The method is not allowed for the requested URL.', status_code=405)
return response
def set_global_log_level(expire_timestamp):
timer_ = expire_timestamp - int(time.time())
time.sleep(int(timer_))
try:
default_log_level = uwsgi.sharedarea_read(0, 0, 3)
global_level = int(default_log_level.decode('ascii'))
uwsgi.sharedarea_write(0, 3, json.dumps(global_level))
except NameError:
pass
|
the-stack_0_23603 | """Tests for parsing which does not raise Exceptions normally"""
import xml.dom
from . import basetest
import cssutils
import urllib.request
import urllib.error
import urllib.parse
try:
import mock
except ImportError:
mock = None
print("install mock library to run all tests")
class CSSParserTestCase(basetest.BaseTestCase):
def _make_fetcher(self, encoding, content):
"make an URL fetcher with specified data"
def fetcher(url):
return encoding, content
return fetcher
def setUp(self):
self._saved = cssutils.log.raiseExceptions
def tearDown(self):
cssutils.log.raiseExceptions = self._saved
def test_init(self):
"CSSParser.__init__()"
self.assertEqual(True, cssutils.log.raiseExceptions)
# also the default:
cssutils.log.raiseExceptions = True
# default non raising parser
p = cssutils.CSSParser()
s = p.parseString('$')
self.assertEqual(s.cssText, ''.encode())
# explicit raiseExceptions=False
p = cssutils.CSSParser(raiseExceptions=False)
s = p.parseString('$')
self.assertEqual(s.cssText, ''.encode())
# working with sheet does raise though!
self.assertRaises(xml.dom.DOMException, s.__setattr__, 'cssText', '$')
# ----
# raiseExceptions=True
p = cssutils.CSSParser(raiseExceptions=True)
self.assertRaises(xml.dom.SyntaxErr, p.parseString, '$')
# working with a sheet does raise too
s = cssutils.css.CSSStyleSheet()
self.assertRaises(xml.dom.DOMException, s.__setattr__, 'cssText', '$')
# RESET cssutils.log.raiseExceptions
cssutils.log.raiseExceptions = False
s = cssutils.css.CSSStyleSheet()
# does not raise!
s.__setattr__('cssText', '$')
self.assertEqual(s.cssText, ''.encode())
def test_parseComments(self):
"cssutils.CSSParser(parseComments=False)"
css = '/*1*/ a { color: /*2*/ red; }'
p = cssutils.CSSParser(parseComments=False)
self.assertEqual(
p.parseString(css).cssText, 'a {\n color: red\n }'.encode()
)
p = cssutils.CSSParser(parseComments=True)
self.assertEqual(
p.parseString(css).cssText,
'/*1*/\na {\n color: /*2*/ red\n }'.encode(),
)
# def test_parseFile(self):
# "CSSParser.parseFile()"
# # see test_cssutils
def test_parseUrl(self):
"CSSParser.parseUrl()"
if mock:
# parseUrl(self, href, encoding=None, media=None, title=None):
parser = cssutils.CSSParser()
m = mock.Mock()
with mock.patch('cssutils.util._defaultFetcher', m):
m.return_value = (None, '')
sheet = parser.parseUrl(
'http://example.com', media='tv,print', title='test'
)
self.assertEqual(sheet.href, 'http://example.com')
self.assertEqual(sheet.encoding, 'utf-8')
self.assertEqual(sheet.media.mediaText, 'tv, print')
self.assertEqual(sheet.title, 'test')
# URL and content tests
tests = {
# (url, content): isSheet, encoding, cssText
('', None): (False, None, None),
('1', None): (False, None, None),
('mailto:[email protected]', None): (False, None, None),
('http://cthedot.de/test.css', None): (False, None, None),
('http://cthedot.de/test.css', ''): (True, 'utf-8', ''),
('http://cthedot.de/test.css', 'a'): (True, 'utf-8', ''),
('http://cthedot.de/test.css', 'a {color: red}'): (
True,
'utf-8',
'a {\n color: red\n }',
),
('http://cthedot.de/test.css', 'a {color: red}'): (
True,
'utf-8',
'a {\n color: red\n }',
),
('http://cthedot.de/test.css', '@charset "ascii";a {color: red}'): (
True,
'ascii',
'@charset "ascii";\na {\n color: red\n }',
),
}
override = 'iso-8859-1'
overrideprefix = '@charset "iso-8859-1";'
httpencoding = None
for (url, content), (isSheet, expencoding, cssText) in list(tests.items()):
parser.setFetcher(self._make_fetcher(httpencoding, content))
sheet1 = parser.parseUrl(url)
sheet2 = parser.parseUrl(url, encoding=override)
if isSheet:
self.assertEqual(sheet1.encoding, expencoding)
self.assertEqual(sheet1.cssText, cssText.encode())
self.assertEqual(sheet2.encoding, override)
if sheet1.cssText and cssText.startswith('@charset'):
self.assertEqual(
sheet2.cssText,
(cssText.replace('ascii', override).encode()),
)
elif sheet1.cssText:
self.assertEqual(
sheet2.cssText, (overrideprefix + '\n' + cssText).encode()
)
else:
self.assertEqual(
sheet2.cssText, (overrideprefix + cssText).encode()
)
else:
self.assertEqual(sheet1, None)
self.assertEqual(sheet2, None)
parser.setFetcher(None)
self.assertRaises(ValueError, parser.parseUrl, '../not-valid-in-urllib')
self.assertRaises(
urllib.error.HTTPError,
parser.parseUrl,
'http://cthedot.de/not-present.css',
)
else:
self.assertEqual(False, 'Mock needed for this test')
def test_parseString(self):
"CSSParser.parseString()"
tests = {
# (byte) string, encoding: encoding, cssText
('/*a*/', None): ('utf-8', '/*a*/'.encode('utf-8')),
('/*a*/', 'ascii'): ('ascii', '@charset "ascii";\n/*a*/'.encode('ascii')),
# org
# ('/*\xc3\xa4*/', None): (u'utf-8', u'/*\xc3\xa4*/'.encode('utf-8')),
# ('/*\xc3\xa4*/', 'utf-8'): (u'utf-8',
# u'@charset "utf-8";\n/*\xc3\xa4*/'.encode('utf-8')),
# new for 2.x and 3.x
('/*\xe4*/'.encode('utf-8'), None): ('utf-8', '/*\xe4*/'.encode('utf-8')),
('/*\xe4*/'.encode('utf-8'), 'utf-8'): (
'utf-8',
'@charset "utf-8";\n/*\xe4*/'.encode('utf-8'),
),
('@charset "ascii";/*a*/', None): (
'ascii',
'@charset "ascii";\n/*a*/'.encode('ascii'),
),
('@charset "utf-8";/*a*/', None): (
'utf-8',
'@charset "utf-8";\n/*a*/'.encode('utf-8'),
),
('@charset "iso-8859-1";/*a*/', None): (
'iso-8859-1',
'@charset "iso-8859-1";\n/*a*/'.encode('iso-8859-1'),
),
# unicode string, no encoding: encoding, cssText
('/*€*/', None): ('utf-8', '/*€*/'.encode('utf-8')),
('@charset "iso-8859-1";/*ä*/', None): (
'iso-8859-1',
'@charset "iso-8859-1";\n/*ä*/'.encode('iso-8859-1'),
),
('@charset "utf-8";/*€*/', None): (
'utf-8',
'@charset "utf-8";\n/*€*/'.encode('utf-8'),
),
('@charset "utf-16";/**/', None): (
'utf-16',
'@charset "utf-16";\n/**/'.encode('utf-16'),
),
# unicode string, encoding utf-8: encoding, cssText
('/*€*/', 'utf-8'): ('utf-8', '@charset "utf-8";\n/*€*/'.encode('utf-8')),
('@charset "iso-8859-1";/*ä*/', 'utf-8'): (
'utf-8',
'@charset "utf-8";\n/*ä*/'.encode('utf-8'),
),
('@charset "utf-8";/*€*/', 'utf-8'): (
'utf-8',
'@charset "utf-8";\n/*€*/'.encode('utf-8'),
),
('@charset "utf-16";/**/', 'utf-8'): (
'utf-8',
'@charset "utf-8";\n/**/'.encode('utf-8'),
),
# probably not what is wanted but does not raise:
('/*€*/', 'ascii'): (
'ascii',
'@charset "ascii";\n/*\\20AC */'.encode('utf-8'),
),
('/*€*/', 'iso-8859-1'): (
'iso-8859-1',
'@charset "iso-8859-1";\n/*\\20AC */'.encode('utf-8'),
),
}
for test in tests:
css, encoding = test
sheet = cssutils.parseString(css, encoding=encoding)
encoding, cssText = tests[test]
self.assertEqual(encoding, sheet.encoding)
self.assertEqual(cssText, sheet.cssText)
tests = [
# encoded css, overiding encoding
('/*€*/'.encode('utf-16'), 'utf-8'),
('/*ä*/'.encode('iso-8859-1'), 'ascii'),
('/*€*/'.encode('utf-8'), 'ascii'),
('a'.encode('ascii'), 'utf-16'),
]
for test in tests:
# self.assertEqual(None, cssutils.parseString(css, encoding=encoding))
self.assertRaises(
UnicodeDecodeError, cssutils.parseString, test[0], test[1]
)
def test_validate(self):
"""CSSParser(validate)"""
style = 'color: red'
t = 'a { %s }' % style
# helper
s = cssutils.parseString(t)
self.assertEqual(s.validating, True)
s = cssutils.parseString(t, validate=False)
self.assertEqual(s.validating, False)
s = cssutils.parseString(t, validate=True)
self.assertEqual(s.validating, True)
d = cssutils.parseStyle(style)
self.assertEqual(d.validating, True)
d = cssutils.parseStyle(style, validate=True)
self.assertEqual(d.validating, True)
d = cssutils.parseStyle(style, validate=False)
self.assertEqual(d.validating, False)
# parser
p = cssutils.CSSParser()
s = p.parseString(t)
self.assertEqual(s.validating, True)
s = p.parseString(t, validate=False)
self.assertEqual(s.validating, False)
s = p.parseString(t, validate=True)
self.assertEqual(s.validating, True)
d = p.parseStyle(style)
self.assertEqual(d.validating, True)
p = cssutils.CSSParser(validate=True)
s = p.parseString(t)
self.assertEqual(s.validating, True)
s = p.parseString(t, validate=False)
self.assertEqual(s.validating, False)
s = p.parseString(t, validate=True)
self.assertEqual(s.validating, True)
d = p.parseStyle(style)
self.assertEqual(d.validating, True)
p = cssutils.CSSParser(validate=False)
s = p.parseString(t)
self.assertEqual(s.validating, False)
s = p.parseString(t, validate=False)
self.assertEqual(s.validating, False)
s = p.parseString(t, validate=True)
self.assertEqual(s.validating, True)
d = p.parseStyle(style)
self.assertEqual(d.validating, False)
# url
p = cssutils.CSSParser(validate=False)
p.setFetcher(self._make_fetcher('utf-8', t))
u = 'url'
s = p.parseUrl(u)
self.assertEqual(s.validating, False)
s = p.parseUrl(u, validate=False)
self.assertEqual(s.validating, False)
s = p.parseUrl(u, validate=True)
self.assertEqual(s.validating, True)
# check if it raises see log test
def test_fetcher(self):
"""CSSParser.fetcher
order:
0. explicity given encoding OVERRIDE (cssutils only)
1. An HTTP "charset" parameter in a "Content-Type" field
(or similar parameters in other protocols)
2. BOM and/or @charset (see below)
3. <link charset=""> or other metadata from the linking mechanism (if any)
4. charset of referring style sheet or document (if any)
5. Assume UTF-8
"""
tests = {
# css, encoding, (mimetype, encoding, importcss):
# encoding, importIndex, importEncoding, importText
# 0/0 override/override => ASCII/ASCII
(
'@charset "utf-16"; @import "x";',
'ASCII',
('iso-8859-1', '@charset "latin1";/*t*/'),
): ('ascii', 1, 'ascii', '@charset "ascii";\n/*t*/'.encode()),
# 1/1 not tested her but same as next
# 2/1 @charset/HTTP => UTF-16/ISO-8859-1
(
'@charset "UTF-16"; @import "x";',
None,
('ISO-8859-1', '@charset "latin1";/*t*/'),
): (
'utf-16',
1,
'iso-8859-1',
'@charset "iso-8859-1";\n/*t*/'.encode('iso-8859-1'),
),
# 2/2 @charset/@charset => UTF-16/ISO-8859-1
(
'@charset "UTF-16"; @import "x";',
None,
(None, '@charset "ISO-8859-1";/*t*/'),
): (
'utf-16',
1,
'iso-8859-1',
'@charset "iso-8859-1";\n/*t*/'.encode('iso-8859-1'),
),
# 2/4 @charset/referrer => ASCII/ASCII
('@charset "ASCII"; @import "x";', None, (None, '/*t*/')): (
'ascii',
1,
'ascii',
'@charset "ascii";\n/*t*/'.encode(),
),
# 5/5 default/default or referrer
('@import "x";', None, (None, '/*t*/')): (
'utf-8',
0,
'utf-8',
'/*t*/'.encode(),
),
# 0/0 override/override+unicode
(
'@charset "utf-16"; @import "x";',
'ASCII',
(None, '@charset "latin1";/*\u0287*/'),
): ('ascii', 1, 'ascii', '@charset "ascii";\n/*\\287 */'.encode()),
# 2/1 @charset/HTTP+unicode
('@charset "ascii"; @import "x";', None, ('iso-8859-1', '/*\u0287*/')): (
'ascii',
1,
'iso-8859-1',
'@charset "iso-8859-1";\n/*\\287 */'.encode(),
),
# 2/4 @charset/referrer+unicode
('@charset "ascii"; @import "x";', None, (None, '/*\u0287*/')): (
'ascii',
1,
'ascii',
'@charset "ascii";\n/*\\287 */'.encode(),
),
# 5/1 default/HTTP+unicode
('@import "x";', None, ('ascii', '/*\u0287*/')): (
'utf-8',
0,
'ascii',
'@charset "ascii";\n/*\\287 */'.encode(),
),
# 5/5 default+unicode/default+unicode
('@import "x";', None, (None, '/*\u0287*/')): (
'utf-8',
0,
'utf-8',
'/*\u0287*/'.encode('utf-8'),
),
}
parser = cssutils.CSSParser()
for test in tests:
css, encoding, fetchdata = test
sheetencoding, importIndex, importEncoding, importText = tests[test]
# use setFetcher
parser.setFetcher(self._make_fetcher(*fetchdata))
# use init
parser2 = cssutils.CSSParser(fetcher=self._make_fetcher(*fetchdata))
sheet = parser.parseString(css, encoding=encoding)
sheet2 = parser2.parseString(css, encoding=encoding)
# sheet
self.assertEqual(sheet.encoding, sheetencoding)
self.assertEqual(sheet2.encoding, sheetencoding)
# imported sheet
self.assertEqual(
sheet.cssRules[importIndex].styleSheet.encoding, importEncoding
)
self.assertEqual(
sheet2.cssRules[importIndex].styleSheet.encoding, importEncoding
)
self.assertEqual(sheet.cssRules[importIndex].styleSheet.cssText, importText)
self.assertEqual(
sheet2.cssRules[importIndex].styleSheet.cssText, importText
)
def test_roundtrip(self):
"cssutils encodings"
css1 = r'''@charset "utf-8";
/* ä */'''
s = cssutils.parseString(css1)
css2 = str(s.cssText, 'utf-8')
self.assertEqual(css1, css2)
s = cssutils.parseString(css2)
s.cssRules[0].encoding = 'ascii'
css3 = r'''@charset "ascii";
/* \E4 */'''
self.assertEqual(css3, str(s.cssText, 'utf-8'))
def test_escapes(self):
"cssutils escapes"
css = r'\43\x { \43\x: \43\x !import\41nt }'
sheet = cssutils.parseString(css)
self.assertEqual(
sheet.cssText,
r'''C\x {
c\x: C\x !important
}'''.encode(),
)
css = r'\ x{\ x :\ x ;y:1} '
sheet = cssutils.parseString(css)
self.assertEqual(
sheet.cssText,
r'''\ x {
\ x: \ x;
y: 1
}'''.encode(),
)
def test_invalidstring(self):
"cssutils.parseString(INVALID_STRING)"
validfromhere = '@namespace "x";'
csss = (
'''@charset "ascii
;'''
+ validfromhere,
'''@charset 'ascii
;'''
+ validfromhere,
'''@namespace "y
;'''
+ validfromhere,
'''@import "y
;'''
+ validfromhere,
'''@import url('a
);'''
+ validfromhere,
'''@unknown "y
;'''
+ validfromhere,
)
for css in csss:
s = cssutils.parseString(css)
self.assertEqual(validfromhere.encode(), s.cssText)
csss = (
'''a { font-family: "Courier
; }''',
r'''a { content: "\"; }
''',
r'''a { content: "\\\"; }
''',
)
for css in csss:
self.assertEqual(''.encode(), cssutils.parseString(css).cssText)
def test_invalid(self):
"cssutils.parseString(INVALID_CSS)"
tests = {
'a {color: blue}} a{color: red} a{color: green}': '''a {
color: blue
}
a {
color: green
}''',
'p @here {color: red} p {color: green}': 'p {\n color: green\n }',
}
for css in tests:
exp = tests[css]
if exp is None:
exp = css
s = cssutils.parseString(css)
self.assertEqual(exp.encode(), s.cssText)
def test_nesting(self):
"cssutils.parseString nesting"
# examples from csslist 27.11.2007
tests = {
'@1; div{color:green}': 'div {\n color: green\n }',
'@1 []; div{color:green}': 'div {\n color: green\n }',
'@1 [{}]; div { color:green; }': 'div {\n color: green\n }',
'@media all { @ } div{color:green}': 'div {\n color: green\n }',
# should this be u''?
'@1 { [ } div{color:green}': '',
# red was eaten:
'@1 { [ } ] div{color:red}div{color:green}': 'div {\n color: green\n }',
}
for css, exp in list(tests.items()):
self.assertEqual(exp.encode(), cssutils.parseString(css).cssText)
def test_specialcases(self):
"cssutils.parseString(special_case)"
tests = {
'''
a[title="a not s\
o very long title"] {/*...*/}''': '''a[title="a not so very long title"] {
/*...*/
}'''
}
for css in tests:
exp = tests[css]
if exp is None:
exp = css
s = cssutils.parseString(css)
self.assertEqual(exp.encode(), s.cssText)
def test_iehack(self):
"IEhack: $property (not since 0.9.5b3)"
# $color is not color!
css = 'a { color: green; $color: red; }'
s = cssutils.parseString(css)
p1 = s.cssRules[0].style.getProperty('color')
self.assertEqual('color', p1.name)
self.assertEqual('color', p1.literalname)
self.assertEqual('', s.cssRules[0].style.getPropertyValue('$color'))
p2 = s.cssRules[0].style.getProperty('$color')
self.assertEqual(None, p2)
self.assertEqual('green', s.cssRules[0].style.getPropertyValue('color'))
self.assertEqual('green', s.cssRules[0].style.color)
def test_attributes(self):
"cssutils.parseString(href, media)"
s = cssutils.parseString(
"a{}", href="file:foo.css", media="screen, projection, tv"
)
self.assertEqual(s.href, "file:foo.css")
self.assertEqual(s.media.mediaText, "screen, projection, tv")
s = cssutils.parseString(
"a{}", href="file:foo.css", media=["screen", "projection", "tv"]
)
self.assertEqual(s.media.mediaText, "screen, projection, tv")
if __name__ == '__main__':
import unittest
unittest.main()
|
the-stack_0_23604 | import numpy as np
import torch
import torch.nn.functional as F
import gym
import time
from fireup.algos.iqn import core
from fireup.utils.logx import EpochLogger
class ReplayBuffer:
"""
A simple FIFO experience replay buffer for DQN agents.
"""
def __init__(self, obs_dim, act_dim, size):
self.obs1_buf = np.zeros([size, obs_dim], dtype=np.float32)
self.obs2_buf = np.zeros([size, obs_dim], dtype=np.float32)
self.acts_buf = np.zeros([size, act_dim], dtype=np.float32)
self.rews_buf = np.zeros(size, dtype=np.float32)
self.done_buf = np.zeros(size, dtype=np.float32)
self.ptr, self.size, self.max_size = 0, 0, size
def store(self, obs, act, rew, next_obs, done):
self.obs1_buf[self.ptr] = obs
self.obs2_buf[self.ptr] = next_obs
self.acts_buf[self.ptr] = act
self.rews_buf[self.ptr] = rew
self.done_buf[self.ptr] = done
self.ptr = (self.ptr + 1) % self.max_size
self.size = min(self.size + 1, self.max_size)
def sample_batch(self, batch_size=32):
idxs = np.random.randint(0, self.size, size=batch_size)
return dict(
obs1=self.obs1_buf[idxs],
obs2=self.obs2_buf[idxs],
acts=self.acts_buf[idxs],
rews=self.rews_buf[idxs],
done=self.done_buf[idxs],
)
"""
Implicit Quantile Network from http://arxiv.org/abs/1806.06923
"""
def iqn(
env_fn,
dqnetwork=core.DQNetwork,
ac_kwargs=dict(),
seed=0,
steps_per_epoch=5000,
epochs=100,
replay_size=int(1e6),
quantile_embedding_dim=64, # n in equation 4 in IQN paper
num_tau_samples=16, # N in equation 3 in IQN paper
num_tau_prime_samples=8, # N' in equation 3 in IQN paper
num_quantile_samples=32, # K in equation 3 in IQN paper
kappa=1.0, # kappa for Huber Loss in IQN
gamma=0.99,
min_replay_history=20000,
epsilon_decay_period=250000,
epsilon_train=0.01,
epsilon_eval=0.001,
lr=1e-3,
max_ep_len=1000,
update_period=4,
target_update_period=8000,
batch_size=100,
logger_kwargs=dict(),
save_freq=1,
):
"""
quantile_embedding_dim : # n in equation 4 in IQN paper
num_tau_samples : N in equation 3 in IQN paper
num_tau_prime_samples : N' in equation 3 in IQN paper
num_quantile_samples : K in equation 3 in IQN paper
kappa : kappa for Huber Loss in IQN
"""
logger = EpochLogger(**logger_kwargs)
logger.save_config(locals())
torch.manual_seed(seed)
np.random.seed(seed)
env, test_env = env_fn(), env_fn()
obs_dim = env.observation_space.shape[0]
act_dim = 1 # env.action_space.shape
# Share information about action space with policy architecture
ac_kwargs["action_space"] = env.action_space
ac_kwargs["quantile_embedding_dim"] = quantile_embedding_dim
# Main computation graph
main = dqnetwork(in_features=obs_dim, **ac_kwargs)
# Target network
target = dqnetwork(in_features=obs_dim, **ac_kwargs)
# Experience buffer
replay_buffer = ReplayBuffer(obs_dim=obs_dim, act_dim=act_dim, size=replay_size)
# Count variables
var_counts = tuple(core.count_vars(module) for module in [main.z, main])
print(("\nNumber of parameters: \t z: %d, \t total: %d\n") % var_counts)
# Value train op
params = main.parameters()
optimizer = torch.optim.Adam(params, lr=lr)
# Initializing targets to match main variables
target.load_state_dict(main.state_dict())
def get_action(o, epsilon):
"""Select an action from the set of available actions.
Chooses an action randomly with probability epsilon otherwise
act greedily according to the current Q-value estimates.
"""
if np.random.random() <= epsilon:
return env.action_space.sample()
else:
return main.policy(torch.Tensor(o.reshape(1, -1)), num_tau_samples).item()
def test_agent(n=10):
for _ in range(n):
o, r, d, ep_ret, ep_len = test_env.reset(), 0, False, 0, 0
while not (d or (ep_len == max_ep_len)):
# epsilon_eval used when evaluating the agent
o, r, d, _ = test_env.step(get_action(o, epsilon_eval))
ep_ret += r
ep_len += 1
logger.store(TestEpRet=ep_ret, TestEpLen=ep_len)
def update():
"""ref: https://github.com/google/dopamine/blob/master/dopamine/agents/implicit_quantile/implicit_quantile_agent.py
"""
main.train()
batch = replay_buffer.sample_batch(batch_size)
(obs1, obs2, acts1, rews, done) = (
torch.Tensor(batch["obs1"]),
torch.Tensor(batch["obs2"]),
torch.LongTensor(batch["acts"]), # (bsz, 1)
torch.Tensor(batch["rews"]), # (bsz)
torch.Tensor(batch["done"]), # (bsz)
)
action_dim = env.action_space.n
bsz = obs1.size(0)
with torch.no_grad():
z2, _ = target(obs2, num_tau_prime_samples)
assert z2.size() == (bsz, action_dim, num_tau_prime_samples)
# acts2 = main(obs2, num_quantile_samples)[0].mean(dim=-1).argmax(dim=-1) # double dqn
acts2 = z2.mean(dim=-1).argmax(dim=-1) # (bsz)
rews = rews.unsqueeze(1)
done = done.unsqueeze(1)
backups = rews + (1 - done) * gamma * z2[range(bsz), acts2]
assert backups.size() == (bsz, num_tau_prime_samples)
z1, replay_tau = main(obs1, num_tau_samples)
acts1 = acts1.squeeze(1) # (bsz)
z1 = z1[range(bsz), acts1] # (bsz, num_tau_samples)
bellman_errors = backups.unsqueeze(-1) - z1.unsqueeze(1)
assert bellman_errors.size() == (bsz, num_tau_prime_samples, num_tau_samples)
huber_loss1 = (abs(bellman_errors) <= kappa).float() * 0.5 * bellman_errors ** 2
huber_loss2 = (
(abs(bellman_errors) > kappa).float()
* kappa
* (abs(bellman_errors) - kappa / 2)
)
huber_loss = huber_loss1 + huber_loss2
replay_tau = replay_tau.view(bsz, num_tau_samples).unsqueeze(
1
) # (bsz, 1, num_tau_samples)
replay_tau = replay_tau.repeat(1, num_tau_prime_samples, 1)
assert replay_tau.size() == (bsz, num_tau_prime_samples, num_tau_samples)
tau_huber_loss = abs(replay_tau - ((bellman_errors < 0).float()).detach())
tau_huber_loss = tau_huber_loss * huber_loss / kappa
loss = tau_huber_loss.sum(dim=2).mean(dim=1) # (bsz)
loss = loss.mean()
optimizer.zero_grad()
loss.backward()
optimizer.step()
return loss.item(), None
start_time = time.time()
o, r, d, ep_ret, ep_len = env.reset(), 0, False, 0, 0
total_steps = steps_per_epoch * epochs
# Main loop: collect experience in env and update/log each epoch
for t in range(total_steps):
main.eval()
# the epsilon value used for exploration during training
epsilon = core.linearly_decaying_epsilon(
epsilon_decay_period, t, min_replay_history, epsilon_train
)
a = get_action(o, epsilon)
# Step the env
o2, r, d, _ = env.step(a)
ep_ret += r
ep_len += 1
# Ignore the "done" signal if it comes from hitting the time
# horizon (that is, when it's an artificial terminal signal
# that isn't based on the agent's state)
d = False if ep_len == max_ep_len else d
# Store experience to replay buffer
replay_buffer.store(o, a, r, o2, d)
# Super critical, easy to overlook step: make sure to update
# most recent observation!
o = o2
if d or (ep_len == max_ep_len):
logger.store(EpRet=ep_ret, EpLen=ep_len)
o, r, d, ep_ret, ep_len = env.reset(), 0, False, 0, 0
# train at the rate of update_period if enough training steps have been run
if replay_buffer.size > min_replay_history and t % update_period == 0:
loss, QDist = update()
logger.store(LossQ=loss) # , QVals=QDist.mean(-1))
# syncs weights from online to target network
if t % target_update_period == 0:
target.load_state_dict(main.state_dict())
# End of epoch wrap-up
if replay_buffer.size > min_replay_history and t % steps_per_epoch == 0:
epoch = t // steps_per_epoch
# Save model
if (epoch % save_freq == 0) or (epoch == epochs - 1):
logger.save_state({"env": env}, main, None)
# Test the performance of the deterministic version of the agent.
test_agent()
# Log info about epoch
logger.log_tabular("Epoch", epoch)
logger.log_tabular("EpRet", with_min_and_max=True)
logger.log_tabular("TestEpRet", with_min_and_max=True)
logger.log_tabular("EpLen", average_only=True)
logger.log_tabular("TestEpLen", average_only=True)
logger.log_tabular("TotalEnvInteracts", t)
logger.log_tabular("LossQ", average_only=True)
# logger.log_tabular("QVals", with_min_and_max=True)
logger.log_tabular("Time", time.time() - start_time)
logger.dump_tabular()
|
the-stack_0_23605 | # file: swarm.py
# author: Joram Wessels
# date: 05-12-2017
# depedencies: numpy
# description:
# Handles the swarm technology behind the autonomous TORCS driver.
# usage:
# Assuming the variables
# *distTraveled*, *pos*, *spd*, *crashed*, and *contact*,
#
# if distTraveled % trail.pos_int == 0:
# max_speed = trail.check_in(pos, spd, crashed, contact)
# crashed, contact = False, False
#
from sys import stderr
from collections import defaultdict
from numbers import Real
from os.path import isfile
from os import remove
import numpy as np
SEP = ',' # the separator used in the feromone trail files
NAME = '.feromones' # the basic filename for feromone trail files
NOMAX = 10000 # returned when an error occurs
# - TODO unit test: back_prop, report_result, check_in
# - TODO if track is known, or if feromone trail looks like a known track, switch to known trail
# - NOTE if they drive close behind each other, they'll always explore the same max_speeds
# - NOTE initializing a FeromoneTrail will not read the previous .feromone entries
# - NOTE back_prop will stop at the finish line, since the length of the track isn't known
class FeromoneTrail:
def __init__(self, pos_int, spd_int, spd0, spd_n, expl_int, glb_max,
track_id=None):
""" FeromoneTrail keeps track of-, and syncs the feromone trail
A FeromoneTrail contains a table of known feromones and syncs to
a .feromone file in storage to communicate with the swarm. The
initialization requires a position interval in meters at which
to check and update the feromone trail, a grid of possible
max_speed values to explore, an exploration interval to increase
the max_speed with when no negative experiences are known, and
a global maximum speed to default to when there are no positive
experiences. The resulting max_speeds can be lower than the
global maximum when this speed resulted in a negative experience.
Make sure to choose your speed grid big enough to harbor any speed
category you want to learn.
Args:
pos_int: The interval at which to check feromones in m (int)
spd_int: The interval between speed boxes in km/h (int)
spd0: The first speed box in km/h (int)
spd_n: The amount of speed boxes (int)
expl_int: The jump interval (dividable by spd_int) in km/h (int)
glb_max: The global max speed that ensures a finish in km/h (int)
track_id: The name of the race track if known
"""
self.pos_int = int(pos_int)
self.spd_int = int(spd_int)
self.spd0 = int(spd0)
self.spd_n = int(spd_n)
self.spd_max = (spd0 + spd_n * spd_int) - spd_int
self.expl_int = int(expl_int)
self.glb_max = int(glb_max)
self.prev_pos = 0
self.prev_spd = 0
self.filename = NAME + '_' + track_id if track_id else NAME
self.table = defaultdict(lambda: np.zeros(spd_n))
self.leave_feromone(0, 0, 0)
if isfile(self.filename):
remove(self.filename)
def ___str___(self):
""" Casts the feromone trail table to a string representation """
return self.__repr__()
def __repr__(self):
""" Casts the feromone trail table to a string representation """
i = 0
speeds = [str(self.to_speed(i)) for i in range(self.spd_n)]
string = "\t " + ' '.join(speeds) + " km/h" + '\n'
while str(i) in self.table:
string += str(i) + ':\t' + str(self.table[str(i)]) + '\n'
i += self.pos_int
string += "m\n"
return string
def to_index(self, spd):
""" Converts absolute speed to table index """
return int((spd - self.spd0) // self.spd_int)
def to_speed(self, ind):
""" Converts table index to absolute speed """
return self.spd0 + ind * self.spd_int
def is_on_grid(self, spd):
""" Returns True if speed value is on the speed grid """
return not (spd < self.spd0) or (spd > self.spd_max)
def write_feromone(self, pos, speed, val):
""" Writes a new feromone to the .feromone file
Args:
pos: The position on the track, CurLapTime (int)
speed: The speed that has been tested (int)
val: The result of the test (-1, 0, 1)
"""
file = open(self.filename, 'a')
file.write('\n' + SEP.join([str(pos), str(speed), str(val)]))
file.close()
def read_feromone(self):
""" Reads the last feromones and updates it if they're new
Returns:
List of [pos, speed, val] lists if there are any
"""
file = open(self.filename, 'r')
contents = file.readlines()
file.close()
i = 1
changes = []
changed = True
while changed:
if contents[-i].strip() == '': continue
feromone = [int(s) for s in contents[-i].strip().split(SEP)]
if feromone == self.last_change: break
changes.append(feromone)
i += 1
if changes: self.last_change = changes[0]
return changes
def update_table(self, pos, spd, val):
""" Updates a newly received feromone in the table
Args:
pos: The position on the track, CurLapTime (int)
spd: The speed that has been tested (int)
val: The result of the test (-1, 0, 1)
"""
index = self.to_index(spd)
if val == -1:
for i in range(index, self.to_index(self.spd_max) +1):
self.table[str(pos)][i] = -1
elif val == 1:
for i in range(index, -1, -1):
self.table[str(pos)][i] = 1
def next_experiment(self, pos):
""" Checks the table for the next best max speed experiment
Returns the ideal next max speed to try out, regardless
of the current speed of the car.
Args:
pos: The position on the track, CurLapTime (int)
Returns:
The next best max speed value (int)
"""
row = self.table[str(pos)]
i1 = find_first(row, 1, rev=True)
i2 = find_first(row, -1)
i_glb_max = self.to_index(self.glb_max)
# if there are no occurences of + above glb_max
if i1 == -1 or (i1 < i_glb_max and not row[i_glb_max] == -1):
if row[i_glb_max] == -1:
i1 = i2 - 1 # last 0 before first -
else:
i1 = i_glb_max # resort to global max
# exploring, value in between known values, or safe value
if i2 == -1:
spd = min(self.spd_max, self.to_speed(i1) + self.expl_int)
index = self.to_index(spd)
else:
index = i1 + (i2 - i1) // 2
return index * self.spd_int + self.spd0
def leave_feromone(self, pos, spd, val):
""" Updates the table and writes the new feromone to the file
If an off-grid pos value is passed,
it defaults to the last on-grid value
Args:
pos: The position on the track, CurLapTime (int)
spd: The speed that has been tested (int)
val: The result of the test (-1, 0, 1)
"""
self.last_change = [pos, spd, val]
self.update_table(pos, spd, val)
self.write_feromone(pos, spd, val)
def back_prop(self, pos, max_spd):
""" Updates previous frames to anticipate this failed *max_spd*
Args:
pos: The position on the track, CurLapTime (int)
max_spd: The max speed that has failed (int)
"""
while max_spd < self.spd_max and pos > -1:
first_minus = find_first(self.table[str(pos)], -1)
if self.to_index(max_spd) >= first_minus and first_minus > -1:
break
self.leave_feromone(pos, max_spd, -1)
max_spd = int(breakable_speed(max_spd, self.pos_int))
max_spd -= int(max_spd % self.spd_int)
pos -= self.pos_int
def get_max_speed(self, pos):
""" Updates the feromone table and returns the next max speed
If an off-grid pos value is passed,
it defaults to the next on-grid value
Args:
pos: The position on the track, CurLapTime (int)
Returns:
The next best max speed value (int)
"""
if not pos % self.pos_int == 0:
err("SWARM WARNING: Invalid position:", pos)
pos += self.pos_int - (pos % self.pos_int)
err(" Defaulted to", pos)
change = self.read_feromone()
while change:
ppos, speed, val = change.pop()
self.update_table(ppos, speed, val)
max_speed = self.next_experiment(pos)
return max_speed
def report_result(self, pos, spd, val):
""" Updates the feromone trail with the new information
Args:
pos: The position on the track, CurLapTime (int)
spd: The current speed of the car (int)
val: The result of the experiment (-1, 0, 1)
"""
spd -= 1 # such that 160 falls into the category with max_spd=160
max_spd = spd - (spd % self.spd_int) + self.spd_int
spd_i = self.to_index(max_spd)
if val == -1:
self.back_prop(pos, max_spd)
elif not self.table[str(pos)][spd_i] == val:
self.leave_feromone(pos, max_spd, val)
def check_in(self, pos, spd, crashed, contact):
""" Called at the start of ever frame to check/update feromones
Args:
pos: The position on the track, distTraveled in m (num)
spd: The current speed of the car in km/h (num)
crashed: Indicates a crash or off-track in last frame (bool)
contact: Indicates contact with another car in last frame (bool)
Returns:
The maximum speed for the next frame according to the swarm
"""
# input verification
if not isinstance(pos, Real):
err("SWARM ERROR: pos isn't a real number, but:", pos)
return NOMAX
if not isinstance(spd, Real):
err("SWARM ERROR: spd isn't a real number, but:", pos)
return NOMAX
if spd > self.spd_max:
err("SWARM WARNING: Speed is beyond speed grid:", spd)
err(" Swarm can't learn from this experience")
if not pos % self.pos_int == 0:
err("SWARM WARNING: Invalid position:", pos)
pos -= pos % self.pos_int
err(" Defaulted to: ", pos)
pos, spd = int(pos), int(spd)
# update
if self.is_on_grid(self.prev_spd):
if crashed and not contact:
self.report_result(self.prev_pos, self.prev_spd, -1)
elif not crashed and not contact:
self.report_result(self.prev_pos, self.prev_spd, 1)
# predict
self.prev_pos, self.prev_spd = pos, spd
max_speed = self.get_max_speed(pos)
return max_speed
def err(*args):
""" prints to standard error """
print(*args, file=stderr)
def find_first(array, val, rev=False):
""" Finds the first (or last) occurence of val in array
Args:
array: The numpy array to evaluate
val: The value to find
rev: If True, returns the last occurence of val
Returns:
The index of the first (or last) occurence of val
in array, or -1 if the value doesn't appear in array
"""
ar = np.array(list(array)) # copies the array
if rev:
ar = np.flip(ar, 0)
i = np.argmax(ar==val)
if i == 0 and not ar[0] == val:
return -1
if rev:
i = abs(i - len(ar) + 1)
return i
def breakable_speed(end_speed, trajectory):
""" Computes the max speed that can break to reach *end_speed*
Args:
end_speed: The speed at the end of the trajectory in km/h (num)
trajectory: The distance over which to descelerate in m (num)
Returns:
The maximum absolute speed at the beginning of the
trajectory that ensures a desceleration to *end_speed*
"""
# The car is about 5m long, it descelerated from 280 to 0
# in about 12-14 times its length, which would be 60-70m.
# Assuming a linear decrease in speed, the maximum rate
# of desceleration is therefore -280/65 = -4.31 km/h/m.
# To be safe, we use half that: -2.15
return trajectory * 2.15 + end_speed
|
the-stack_0_23606 | from typing import NoReturn
from ...base import BaseEstimator
import numpy as np
from numpy.linalg import det, inv
from ..gaussian_estimators import MultivariateGaussian
class LDA(BaseEstimator):
"""
Linear Discriminant Analysis (LDA) classifier
Attributes
----------
self.classes_ : np.ndarray of shape (n_classes,)
The different labels classes. To be set in `LDA.fit`
self.mu_ : np.ndarray of shape (n_classes,n_features)
The estimated features means for each class. To be set in `LDA.fit`
self.cov_ : np.ndarray of shape (n_features,n_features)
The estimated features covariance. To be set in `LDA.fit`
self._cov_inv : np.ndarray of shape (n_features,n_features)
The inverse of the estimated features covariance. To be set in `LDA.fit`
self.pi_: np.ndarray of shape (n_classes)
The estimated class probabilities. To be set in `GaussianNaiveBayes.fit`
"""
def __init__(self):
"""
Instantiate an LDA classifier
"""
super().__init__()
self.classes_, self.mu_, self.cov_, self._cov_inv, self.pi_ = None, None, None, None, None
def _fit(self, X: np.ndarray, y: np.ndarray) -> NoReturn:
"""
fits an LDA model.
Estimates gaussian for each label class - Different mean vector, same covariance
matrix with dependent features.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Input data to fit an estimator for
y : ndarray of shape (n_samples, )
Responses of input data to fit to
"""
self.classes_ = np.unique(y)
self.mu_ = np.zeros((len(self.classes_), X.shape[1]))
self.cov_ = np.zeros((X.shape[1], X.shape[1]))
nk = np.zeros(len(self.classes_))
for c in range(len(self.classes_)):
nk[c] = len(y[y == self.classes_[c]])
self.pi_ = nk / len(y)
for c in range(len(self.classes_)):
X_c = X[(y == self.classes_[c])]
self.mu_[c, :] = np.mean(X_c, axis=0)
w = X_c - self.mu_[c]
self.cov_ = self.cov_ + w.T @ w
self.cov_ = self.cov_ / len(y)
self._cov_inv = np.linalg.inv(self.cov_)
def _predict(self, X: np.ndarray) -> np.ndarray:
"""
Predict responses for given samples using fitted estimator
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Input data to predict responses for
Returns
-------
responses : ndarray of shape (n_samples, )
Predicted responses of given samples
"""
likelihoods = self.likelihood(X)
posterior = likelihoods + np.log(self.pi_)
responses = np.argmax(posterior, axis=1)
return responses
def likelihood(self, X: np.ndarray) -> np.ndarray:
"""
Calculate the likelihood of a given data over the estimated model
Parameters
----------
X : np.ndarray of shape (n_samples, n_features)
Input data to calculate its likelihood over the different classes.
Returns
-------
likelihoods : np.ndarray of shape (n_samples, n_classes)
The likelihood for each sample under each of the classes
"""
if not self.fitted_:
raise ValueError("Estimator must first be fitted before calling `likelihood` function")
likelihoods = np.zeros((X.shape[0], len(self.classes_)))
log_det_cov_inv = np.log(np.linalg.det(self._cov_inv))
for c in range(len(self.classes_)):
d = X[:, np.newaxis, :] - self.mu_[c, :]
likelihoods[:, c] = .5 * (log_det_cov_inv - np.sum(d.dot(self._cov_inv) * d, axis=2).flatten())
return likelihoods
def _loss(self, X: np.ndarray, y: np.ndarray) -> float:
"""
Evaluate performance under misclassification loss function
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Test samples
y : ndarray of shape (n_samples, )
True labels of test samples
Returns
-------
loss : float
Performance under missclassification loss function
"""
from ...metrics import misclassification_error
y_pred = self.predict(X)
return misclassification_error(y, y_pred)
|
the-stack_0_23608 | from functools import wraps
from .exceptions import RecordIsNull
from .utils import to_py_deep, to_js_key, get_repr, bind
__all__ = 'query', 'query_repr'
empty_tuple = tuple()
def query_repr(interface):
return get_repr('Record', interface)
def recursive_require(required, fields):
out = {}
for field_name, field in required.items():
if field is None or not len(field):
# out[field_name] = recursive_require(fields[field_name], fields[field_name].fields)
if hasattr(fields[field_name], 'fields'):
out[field_name] = recursive_require(
{key: None for key in fields[field_name].fields},
fields[field_name].fields
)
else:
out[field_name] = None
else:
out[field_name] = recursive_require(required[field_name], fields[field_name].fields)
return out
def query(**fields):
query_records = bind(fields)
empty_requires = {record_name: None for record_name in query_records.keys()}
def create_query(resolver):
wrapper = wraps(resolver)
@wrapper
def init(query_name):
@wrapper
def resolve(required=None, props=None, context=None):
context = context or {}
if required is not None and len(required):
required = to_py_deep(required)
required = recursive_require(required or empty_requires, query_records)
if props is not None and len(props):
state = resolver(
required,
query=query_records,
query_name=query_name,
**to_py_deep(props),
**context
)
else:
state = resolver(
required,
query=query_records,
query_name=query_name,
**context
)
values = {}
for record_name, required_fields in required.items():
try:
result = query_records[record_name](
state[record_name],
required_fields,
query=query_records,
query_name=query_name,
**context
)
except RecordIsNull:
result = None
values[to_js_key(record_name)] = result
return values
return resolve
return init
return create_query
|
the-stack_0_23609 | """Performs face alignment and stores face thumbnails in the output directory."""
# MIT License
#
# Copyright (c) 2016 David Sandberg
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import random
import sys
from time import sleep
import numpy as np
import tensorflow as tf
from scipy import misc
import align.detect_face
import facenet
def main(args):
sleep(random.random())
output_dir = os.path.expanduser(args.output_dir)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Store some git revision info in a text file in the log directory
src_path, _ = os.path.split(os.path.realpath(__file__))
facenet.store_revision_info(src_path, output_dir, ' '.join(sys.argv))
dataset = facenet.get_dataset(args.input_dir)
print('Creating networks and loading parameters')
with tf.Graph().as_default():
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
with sess.as_default():
pnet, rnet, onet = align.detect_face.create_mtcnn(sess, None)
minsize = 20 # minimum size of face
threshold = [0.6, 0.7, 0.7] # three steps's threshold
factor = 0.709 # scale factor
# Add a random key to the filename to allow alignment using multiple processes
random_key = np.random.randint(0, high=99999)
bounding_boxes_filename = os.path.join(output_dir, 'bounding_boxes_%05d.txt' % random_key)
with open(bounding_boxes_filename, "w") as text_file:
nrof_images_total = 0
nrof_successfully_aligned = 0
if args.random_order:
random.shuffle(dataset)
for cls in dataset:
output_class_dir = os.path.join(output_dir, cls.name)
if not os.path.exists(output_class_dir):
os.makedirs(output_class_dir)
if args.random_order:
random.shuffle(cls.image_paths)
for image_path in cls.image_paths:
nrof_images_total += 1
filename = os.path.splitext(os.path.split(image_path)[1])[0]
output_filename = os.path.join(output_class_dir, filename + '.png')
print(image_path)
if not os.path.exists(output_filename):
try:
img = misc.imread(image_path)
# img = imageio.read(image_path)
except (IOError, ValueError, IndexError) as e:
errorMessage = '{}: {}'.format(image_path, e)
print(errorMessage)
else:
if img.ndim < 2:
print('Unable to align "%s"' % image_path)
text_file.write('%s\n' % (output_filename))
continue
if img.ndim == 2:
img = facenet.to_rgb(img)
img = img[:, :, 0:3]
bounding_boxes, _ = align.detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold,
factor)
nrof_faces = bounding_boxes.shape[0]
if nrof_faces > 0:
det = bounding_boxes[:, 0:4]
det_arr = []
img_size = np.asarray(img.shape)[0:2]
if nrof_faces > 1:
if args.detect_multiple_faces:
for i in range(nrof_faces):
det_arr.append(np.squeeze(det[i]))
else:
bounding_box_size = (det[:, 2] - det[:, 0]) * (det[:, 3] - det[:, 1])
img_center = img_size / 2
offsets = np.vstack([(det[:, 0] + det[:, 2]) / 2 - img_center[1],
(det[:, 1] + det[:, 3]) / 2 - img_center[0]])
offset_dist_squared = np.sum(np.power(offsets, 2.0), 0)
index = np.argmax(
bounding_box_size - offset_dist_squared * 2.0) # some extra weight on the centering
det_arr.append(det[index, :])
else:
det_arr.append(np.squeeze(det))
for i, det in enumerate(det_arr):
det = np.squeeze(det)
bb = np.zeros(4, dtype=np.int32)
bb[0] = np.maximum(det[0] - args.margin / 2, 0)
bb[1] = np.maximum(det[1] - args.margin / 2, 0)
bb[2] = np.minimum(det[2] + args.margin / 2, img_size[1])
bb[3] = np.minimum(det[3] + args.margin / 2, img_size[0])
cropped = img[bb[1]:bb[3], bb[0]:bb[2], :]
scaled = misc.imresize(cropped, (args.image_size, args.image_size), interp='bilinear')
nrof_successfully_aligned += 1
filename_base, file_extension = os.path.splitext(output_filename)
if args.detect_multiple_faces:
output_filename_n = "{}_{}{}".format(filename_base, i, file_extension)
else:
output_filename_n = "{}{}".format(filename_base, file_extension)
misc.imsave(output_filename_n, scaled)
text_file.write('%s %d %d %d %d\n' % (output_filename_n, bb[0], bb[1], bb[2], bb[3]))
else:
print('Unable to align "%s"' % image_path)
text_file.write('%s\n' % (output_filename))
print('Total number of images: %d' % nrof_images_total)
print('Number of successfully aligned images: %d' % nrof_successfully_aligned)
BASE_PATH = os.path.dirname(__file__)
print('Base name : %s' % BASE_PATH)
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--input_dir', type=str, help='Directory with unaligned images.',
default='~/workspace/ml-facenet-jetson/src/lfw')
parser.add_argument('--output_dir', type=str, help='Directory with aligned face thumbnails.',
default='~/workspace/ml-facenet-jetson/src/lfw_aligned')
parser.add_argument('--image_size', type=int,
help='Image size (height, width) in pixels.', default=160)
parser.add_argument('--margin', type=int,
help='Margin for the crop around the bounding box (height, width) in pixels.', default=32)
parser.add_argument('--random_order',
help='Shuffles the order of images to enable alignment using multiple processes.',
action='store_true')
parser.add_argument('--gpu_memory_fraction', type=float,
help='Upper bound on the amount of GPU memory that will be used by the process.', default=0.5)
parser.add_argument('--detect_multiple_faces', type=bool,
help='Detect and align multiple faces per image.', default=False)
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
|
the-stack_0_23610 | # First Approach using built-in library, itertools.
from itertools import chain, combinations
def powerset(iterable):
# powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
list(powerset("abcd"))
# Second Approach with scratch.
def powerset(s):
x = len(s)
powers = [1 << i for i in range(x)]
for i in range(1 << x):
yield [a for power, a in zip(powers, s) if i & power]
print(list(powerset([4, 5, 6])))
|
the-stack_0_23612 | """
Unit tests for optimization routines from minpack.py.
"""
from __future__ import division, print_function, absolute_import
import warnings
from numpy.testing import (assert_, assert_almost_equal, assert_array_equal,
assert_array_almost_equal, assert_allclose)
from pytest import raises as assert_raises
import numpy as np
from numpy import array, float64
from multiprocessing.pool import ThreadPool
from scipy import optimize
from scipy.special import lambertw
from scipy.optimize.minpack import leastsq, curve_fit, fixed_point
from scipy._lib._numpy_compat import _assert_warns, suppress_warnings
from scipy.optimize import OptimizeWarning
class ReturnShape(object):
"""This class exists to create a callable that does not have a '__name__' attribute.
__init__ takes the argument 'shape', which should be a tuple of ints. When an instance
it called with a single argument 'x', it returns numpy.ones(shape).
"""
def __init__(self, shape):
self.shape = shape
def __call__(self, x):
return np.ones(self.shape)
def dummy_func(x, shape):
"""A function that returns an array of ones of the given shape.
`x` is ignored.
"""
return np.ones(shape)
def sequence_parallel(fs):
pool = ThreadPool(len(fs))
try:
return pool.map(lambda f: f(), fs)
finally:
pool.terminate()
# Function and jacobian for tests of solvers for systems of nonlinear
# equations
def pressure_network(flow_rates, Qtot, k):
"""Evaluate non-linear equation system representing
the pressures and flows in a system of n parallel pipes::
f_i = P_i - P_0, for i = 1..n
f_0 = sum(Q_i) - Qtot
Where Q_i is the flow rate in pipe i and P_i the pressure in that pipe.
Pressure is modeled as a P=kQ**2 where k is a valve coefficient and
Q is the flow rate.
Parameters
----------
flow_rates : float
A 1D array of n flow rates [kg/s].
k : float
A 1D array of n valve coefficients [1/kg m].
Qtot : float
A scalar, the total input flow rate [kg/s].
Returns
-------
F : float
A 1D array, F[i] == f_i.
"""
P = k * flow_rates**2
F = np.hstack((P[1:] - P[0], flow_rates.sum() - Qtot))
return F
def pressure_network_jacobian(flow_rates, Qtot, k):
"""Return the jacobian of the equation system F(flow_rates)
computed by `pressure_network` with respect to
*flow_rates*. See `pressure_network` for the detailed
description of parrameters.
Returns
-------
jac : float
*n* by *n* matrix ``df_i/dQ_i`` where ``n = len(flow_rates)``
and *f_i* and *Q_i* are described in the doc for `pressure_network`
"""
n = len(flow_rates)
pdiff = np.diag(flow_rates[1:] * 2 * k[1:] - 2 * flow_rates[0] * k[0])
jac = np.empty((n, n))
jac[:n-1, :n-1] = pdiff * 0
jac[:n-1, n-1] = 0
jac[n-1, :] = np.ones(n)
return jac
def pressure_network_fun_and_grad(flow_rates, Qtot, k):
return (pressure_network(flow_rates, Qtot, k),
pressure_network_jacobian(flow_rates, Qtot, k))
class TestFSolve(object):
def test_pressure_network_no_gradient(self):
# fsolve without gradient, equal pipes -> equal flows.
k = np.full(4, 0.5)
Qtot = 4
initial_guess = array([2., 0., 2., 0.])
final_flows, info, ier, mesg = optimize.fsolve(
pressure_network, initial_guess, args=(Qtot, k),
full_output=True)
assert_array_almost_equal(final_flows, np.ones(4))
assert_(ier == 1, mesg)
def test_pressure_network_with_gradient(self):
# fsolve with gradient, equal pipes -> equal flows
k = np.full(4, 0.5)
Qtot = 4
initial_guess = array([2., 0., 2., 0.])
final_flows = optimize.fsolve(
pressure_network, initial_guess, args=(Qtot, k),
fprime=pressure_network_jacobian)
assert_array_almost_equal(final_flows, np.ones(4))
def test_wrong_shape_func_callable(self):
func = ReturnShape(1)
# x0 is a list of two elements, but func will return an array with
# length 1, so this should result in a TypeError.
x0 = [1.5, 2.0]
assert_raises(TypeError, optimize.fsolve, func, x0)
def test_wrong_shape_func_function(self):
# x0 is a list of two elements, but func will return an array with
# length 1, so this should result in a TypeError.
x0 = [1.5, 2.0]
assert_raises(TypeError, optimize.fsolve, dummy_func, x0, args=((1,),))
def test_wrong_shape_fprime_callable(self):
func = ReturnShape(1)
deriv_func = ReturnShape((2,2))
assert_raises(TypeError, optimize.fsolve, func, x0=[0,1], fprime=deriv_func)
def test_wrong_shape_fprime_function(self):
func = lambda x: dummy_func(x, (2,))
deriv_func = lambda x: dummy_func(x, (3,3))
assert_raises(TypeError, optimize.fsolve, func, x0=[0,1], fprime=deriv_func)
def test_func_can_raise(self):
def func(*args):
raise ValueError('I raised')
with assert_raises(ValueError, match='I raised'):
optimize.fsolve(func, x0=[0])
def test_Dfun_can_raise(self):
func = lambda x: x - np.array([10])
def deriv_func(*args):
raise ValueError('I raised')
with assert_raises(ValueError, match='I raised'):
optimize.fsolve(func, x0=[0], fprime=deriv_func)
def test_float32(self):
func = lambda x: np.array([x[0] - 100, x[1] - 1000], dtype=np.float32)**2
p = optimize.fsolve(func, np.array([1, 1], np.float32))
assert_allclose(func(p), [0, 0], atol=1e-3)
def test_reentrant_func(self):
def func(*args):
self.test_pressure_network_no_gradient()
return pressure_network(*args)
# fsolve without gradient, equal pipes -> equal flows.
k = np.full(4, 0.5)
Qtot = 4
initial_guess = array([2., 0., 2., 0.])
final_flows, info, ier, mesg = optimize.fsolve(
func, initial_guess, args=(Qtot, k),
full_output=True)
assert_array_almost_equal(final_flows, np.ones(4))
assert_(ier == 1, mesg)
def test_reentrant_Dfunc(self):
def deriv_func(*args):
self.test_pressure_network_with_gradient()
return pressure_network_jacobian(*args)
# fsolve with gradient, equal pipes -> equal flows
k = np.full(4, 0.5)
Qtot = 4
initial_guess = array([2., 0., 2., 0.])
final_flows = optimize.fsolve(
pressure_network, initial_guess, args=(Qtot, k),
fprime=deriv_func)
assert_array_almost_equal(final_flows, np.ones(4))
def test_concurrent_no_gradient(self):
return sequence_parallel([self.test_pressure_network_no_gradient] * 10)
def test_concurrent_with_gradient(self):
return sequence_parallel([self.test_pressure_network_with_gradient] * 10)
class TestRootHybr(object):
def test_pressure_network_no_gradient(self):
# root/hybr without gradient, equal pipes -> equal flows
k = np.full(4, 0.5)
Qtot = 4
initial_guess = array([2., 0., 2., 0.])
final_flows = optimize.root(pressure_network, initial_guess,
method='hybr', args=(Qtot, k)).x
assert_array_almost_equal(final_flows, np.ones(4))
def test_pressure_network_with_gradient(self):
# root/hybr with gradient, equal pipes -> equal flows
k = np.full(4, 0.5)
Qtot = 4
initial_guess = array([[2., 0., 2., 0.]])
final_flows = optimize.root(pressure_network, initial_guess,
args=(Qtot, k), method='hybr',
jac=pressure_network_jacobian).x
assert_array_almost_equal(final_flows, np.ones(4))
def test_pressure_network_with_gradient_combined(self):
# root/hybr with gradient and function combined, equal pipes -> equal
# flows
k = np.full(4, 0.5)
Qtot = 4
initial_guess = array([2., 0., 2., 0.])
final_flows = optimize.root(pressure_network_fun_and_grad,
initial_guess, args=(Qtot, k),
method='hybr', jac=True).x
assert_array_almost_equal(final_flows, np.ones(4))
class TestRootLM(object):
def test_pressure_network_no_gradient(self):
# root/lm without gradient, equal pipes -> equal flows
k = np.full(4, 0.5)
Qtot = 4
initial_guess = array([2., 0., 2., 0.])
final_flows = optimize.root(pressure_network, initial_guess,
method='lm', args=(Qtot, k)).x
assert_array_almost_equal(final_flows, np.ones(4))
class TestLeastSq(object):
def setup_method(self):
x = np.linspace(0, 10, 40)
a,b,c = 3.1, 42, -304.2
self.x = x
self.abc = a,b,c
y_true = a*x**2 + b*x + c
np.random.seed(0)
self.y_meas = y_true + 0.01*np.random.standard_normal(y_true.shape)
def residuals(self, p, y, x):
a,b,c = p
err = y-(a*x**2 + b*x + c)
return err
def residuals_jacobian(self, _p, _y, x):
return -np.vstack([x**2, x, np.ones_like(x)]).T
def test_basic(self):
p0 = array([0,0,0])
params_fit, ier = leastsq(self.residuals, p0,
args=(self.y_meas, self.x))
assert_(ier in (1,2,3,4), 'solution not found (ier=%d)' % ier)
# low precision due to random
assert_array_almost_equal(params_fit, self.abc, decimal=2)
def test_basic_with_gradient(self):
p0 = array([0,0,0])
params_fit, ier = leastsq(self.residuals, p0,
args=(self.y_meas, self.x),
Dfun=self.residuals_jacobian)
assert_(ier in (1,2,3,4), 'solution not found (ier=%d)' % ier)
# low precision due to random
assert_array_almost_equal(params_fit, self.abc, decimal=2)
def test_full_output(self):
p0 = array([[0,0,0]])
full_output = leastsq(self.residuals, p0,
args=(self.y_meas, self.x),
full_output=True)
params_fit, cov_x, infodict, mesg, ier = full_output
assert_(ier in (1,2,3,4), 'solution not found: %s' % mesg)
def test_input_untouched(self):
p0 = array([0,0,0],dtype=float64)
p0_copy = array(p0, copy=True)
full_output = leastsq(self.residuals, p0,
args=(self.y_meas, self.x),
full_output=True)
params_fit, cov_x, infodict, mesg, ier = full_output
assert_(ier in (1,2,3,4), 'solution not found: %s' % mesg)
assert_array_equal(p0, p0_copy)
def test_wrong_shape_func_callable(self):
func = ReturnShape(1)
# x0 is a list of two elements, but func will return an array with
# length 1, so this should result in a TypeError.
x0 = [1.5, 2.0]
assert_raises(TypeError, optimize.leastsq, func, x0)
def test_wrong_shape_func_function(self):
# x0 is a list of two elements, but func will return an array with
# length 1, so this should result in a TypeError.
x0 = [1.5, 2.0]
assert_raises(TypeError, optimize.leastsq, dummy_func, x0, args=((1,),))
def test_wrong_shape_Dfun_callable(self):
func = ReturnShape(1)
deriv_func = ReturnShape((2,2))
assert_raises(TypeError, optimize.leastsq, func, x0=[0,1], Dfun=deriv_func)
def test_wrong_shape_Dfun_function(self):
func = lambda x: dummy_func(x, (2,))
deriv_func = lambda x: dummy_func(x, (3,3))
assert_raises(TypeError, optimize.leastsq, func, x0=[0,1], Dfun=deriv_func)
def test_float32(self):
# Regression test for gh-1447
def func(p,x,y):
q = p[0]*np.exp(-(x-p[1])**2/(2.0*p[2]**2))+p[3]
return q - y
x = np.array([1.475,1.429,1.409,1.419,1.455,1.519,1.472, 1.368,1.286,
1.231], dtype=np.float32)
y = np.array([0.0168,0.0193,0.0211,0.0202,0.0171,0.0151,0.0185,0.0258,
0.034,0.0396], dtype=np.float32)
p0 = np.array([1.0,1.0,1.0,1.0])
p1, success = optimize.leastsq(func, p0, args=(x,y))
assert_(success in [1,2,3,4])
assert_((func(p1,x,y)**2).sum() < 1e-4 * (func(p0,x,y)**2).sum())
def test_func_can_raise(self):
def func(*args):
raise ValueError('I raised')
with assert_raises(ValueError, match='I raised'):
optimize.leastsq(func, x0=[0])
def test_Dfun_can_raise(self):
func = lambda x: x - np.array([10])
def deriv_func(*args):
raise ValueError('I raised')
with assert_raises(ValueError, match='I raised'):
optimize.leastsq(func, x0=[0], Dfun=deriv_func)
def test_reentrant_func(self):
def func(*args):
self.test_basic()
return self.residuals(*args)
p0 = array([0,0,0])
params_fit, ier = leastsq(func, p0,
args=(self.y_meas, self.x))
assert_(ier in (1,2,3,4), 'solution not found (ier=%d)' % ier)
# low precision due to random
assert_array_almost_equal(params_fit, self.abc, decimal=2)
def test_reentrant_Dfun(self):
def deriv_func(*args):
self.test_basic()
return self.residuals_jacobian(*args)
p0 = array([0,0,0])
params_fit, ier = leastsq(self.residuals, p0,
args=(self.y_meas, self.x),
Dfun=deriv_func)
assert_(ier in (1,2,3,4), 'solution not found (ier=%d)' % ier)
# low precision due to random
assert_array_almost_equal(params_fit, self.abc, decimal=2)
def test_concurrent_no_gradient(self):
return sequence_parallel([self.test_basic] * 10)
def test_concurrent_with_gradient(self):
return sequence_parallel([self.test_basic_with_gradient] * 10)
class TestCurveFit(object):
def setup_method(self):
self.y = array([1.0, 3.2, 9.5, 13.7])
self.x = array([1.0, 2.0, 3.0, 4.0])
def test_one_argument(self):
def func(x,a):
return x**a
popt, pcov = curve_fit(func, self.x, self.y)
assert_(len(popt) == 1)
assert_(pcov.shape == (1,1))
assert_almost_equal(popt[0], 1.9149, decimal=4)
assert_almost_equal(pcov[0,0], 0.0016, decimal=4)
# Test if we get the same with full_output. Regression test for #1415.
# Also test if check_finite can be turned off.
res = curve_fit(func, self.x, self.y,
full_output=1, check_finite=False)
(popt2, pcov2, infodict, errmsg, ier) = res
assert_array_almost_equal(popt, popt2)
def test_two_argument(self):
def func(x, a, b):
return b*x**a
popt, pcov = curve_fit(func, self.x, self.y)
assert_(len(popt) == 2)
assert_(pcov.shape == (2,2))
assert_array_almost_equal(popt, [1.7989, 1.1642], decimal=4)
assert_array_almost_equal(pcov, [[0.0852, -0.1260], [-0.1260, 0.1912]],
decimal=4)
def test_func_is_classmethod(self):
class test_self(object):
"""This class tests if curve_fit passes the correct number of
arguments when the model function is a class instance method.
"""
def func(self, x, a, b):
return b * x**a
test_self_inst = test_self()
popt, pcov = curve_fit(test_self_inst.func, self.x, self.y)
assert_(pcov.shape == (2,2))
assert_array_almost_equal(popt, [1.7989, 1.1642], decimal=4)
assert_array_almost_equal(pcov, [[0.0852, -0.1260], [-0.1260, 0.1912]],
decimal=4)
def test_regression_2639(self):
# This test fails if epsfcn in leastsq is too large.
x = [574.14200000000005, 574.154, 574.16499999999996,
574.17700000000002, 574.18799999999999, 574.19899999999996,
574.21100000000001, 574.22199999999998, 574.23400000000004,
574.245]
y = [859.0, 997.0, 1699.0, 2604.0, 2013.0, 1964.0, 2435.0,
1550.0, 949.0, 841.0]
guess = [574.1861428571428, 574.2155714285715, 1302.0, 1302.0,
0.0035019999999983615, 859.0]
good = [5.74177150e+02, 5.74209188e+02, 1.74187044e+03, 1.58646166e+03,
1.0068462e-02, 8.57450661e+02]
def f_double_gauss(x, x0, x1, A0, A1, sigma, c):
return (A0*np.exp(-(x-x0)**2/(2.*sigma**2))
+ A1*np.exp(-(x-x1)**2/(2.*sigma**2)) + c)
popt, pcov = curve_fit(f_double_gauss, x, y, guess, maxfev=10000)
assert_allclose(popt, good, rtol=1e-5)
def test_pcov(self):
xdata = np.array([0, 1, 2, 3, 4, 5])
ydata = np.array([1, 1, 5, 7, 8, 12])
sigma = np.array([1, 2, 1, 2, 1, 2])
def f(x, a, b):
return a*x + b
for method in ['lm', 'trf', 'dogbox']:
popt, pcov = curve_fit(f, xdata, ydata, p0=[2, 0], sigma=sigma,
method=method)
perr_scaled = np.sqrt(np.diag(pcov))
assert_allclose(perr_scaled, [0.20659803, 0.57204404], rtol=1e-3)
popt, pcov = curve_fit(f, xdata, ydata, p0=[2, 0], sigma=3*sigma,
method=method)
perr_scaled = np.sqrt(np.diag(pcov))
assert_allclose(perr_scaled, [0.20659803, 0.57204404], rtol=1e-3)
popt, pcov = curve_fit(f, xdata, ydata, p0=[2, 0], sigma=sigma,
absolute_sigma=True, method=method)
perr = np.sqrt(np.diag(pcov))
assert_allclose(perr, [0.30714756, 0.85045308], rtol=1e-3)
popt, pcov = curve_fit(f, xdata, ydata, p0=[2, 0], sigma=3*sigma,
absolute_sigma=True, method=method)
perr = np.sqrt(np.diag(pcov))
assert_allclose(perr, [3*0.30714756, 3*0.85045308], rtol=1e-3)
# infinite variances
def f_flat(x, a, b):
return a*x
pcov_expected = np.array([np.inf]*4).reshape(2, 2)
with suppress_warnings() as sup:
sup.filter(OptimizeWarning,
"Covariance of the parameters could not be estimated")
popt, pcov = curve_fit(f_flat, xdata, ydata, p0=[2, 0], sigma=sigma)
popt1, pcov1 = curve_fit(f, xdata[:2], ydata[:2], p0=[2, 0])
assert_(pcov.shape == (2, 2))
assert_array_equal(pcov, pcov_expected)
assert_(pcov1.shape == (2, 2))
assert_array_equal(pcov1, pcov_expected)
def test_array_like(self):
# Test sequence input. Regression test for gh-3037.
def f_linear(x, a, b):
return a*x + b
x = [1, 2, 3, 4]
y = [3, 5, 7, 9]
assert_allclose(curve_fit(f_linear, x, y)[0], [2, 1], atol=1e-10)
def test_indeterminate_covariance(self):
# Test that a warning is returned when pcov is indeterminate
xdata = np.array([1, 2, 3, 4, 5, 6])
ydata = np.array([1, 2, 3, 4, 5.5, 6])
_assert_warns(OptimizeWarning, curve_fit,
lambda x, a, b: a*x, xdata, ydata)
def test_NaN_handling(self):
# Test for correct handling of NaNs in input data: gh-3422
# create input with NaNs
xdata = np.array([1, np.nan, 3])
ydata = np.array([1, 2, 3])
assert_raises(ValueError, curve_fit,
lambda x, a, b: a*x + b, xdata, ydata)
assert_raises(ValueError, curve_fit,
lambda x, a, b: a*x + b, ydata, xdata)
assert_raises(ValueError, curve_fit, lambda x, a, b: a*x + b,
xdata, ydata, **{"check_finite": True})
def test_empty_inputs(self):
# Test both with and without bounds (regression test for gh-9864)
assert_raises(ValueError, curve_fit, lambda x, a: a*x, [], [])
assert_raises(ValueError, curve_fit, lambda x, a: a*x, [], [],
bounds=(1, 2))
assert_raises(ValueError, curve_fit, lambda x, a: a*x, [1], [])
assert_raises(ValueError, curve_fit, lambda x, a: a*x, [2], [],
bounds=(1, 2))
def test_function_zero_params(self):
# Fit args is zero, so "Unable to determine number of fit parameters."
assert_raises(ValueError, curve_fit, lambda x: x, [1, 2], [3, 4])
def test_None_x(self): # Added in GH10196
popt, pcov = curve_fit(lambda _, a: a * np.arange(10),
None, 2 * np.arange(10))
assert_allclose(popt, [2.])
def test_method_argument(self):
def f(x, a, b):
return a * np.exp(-b*x)
xdata = np.linspace(0, 1, 11)
ydata = f(xdata, 2., 2.)
for method in ['trf', 'dogbox', 'lm', None]:
popt, pcov = curve_fit(f, xdata, ydata, method=method)
assert_allclose(popt, [2., 2.])
assert_raises(ValueError, curve_fit, f, xdata, ydata, method='unknown')
def test_bounds(self):
def f(x, a, b):
return a * np.exp(-b*x)
xdata = np.linspace(0, 1, 11)
ydata = f(xdata, 2., 2.)
# The minimum w/out bounds is at [2., 2.],
# and with bounds it's at [1.5, smth].
bounds = ([1., 0], [1.5, 3.])
for method in [None, 'trf', 'dogbox']:
popt, pcov = curve_fit(f, xdata, ydata, bounds=bounds,
method=method)
assert_allclose(popt[0], 1.5)
# With bounds, the starting estimate is feasible.
popt, pcov = curve_fit(f, xdata, ydata, method='trf',
bounds=([0., 0], [0.6, np.inf]))
assert_allclose(popt[0], 0.6)
# method='lm' doesn't support bounds.
assert_raises(ValueError, curve_fit, f, xdata, ydata, bounds=bounds,
method='lm')
def test_bounds_p0(self):
# This test is for issue #5719. The problem was that an initial guess
# was ignored when 'trf' or 'dogbox' methods were invoked.
def f(x, a):
return np.sin(x + a)
xdata = np.linspace(-2*np.pi, 2*np.pi, 40)
ydata = np.sin(xdata)
bounds = (-3 * np.pi, 3 * np.pi)
for method in ['trf', 'dogbox']:
popt_1, _ = curve_fit(f, xdata, ydata, p0=2.1*np.pi)
popt_2, _ = curve_fit(f, xdata, ydata, p0=2.1*np.pi,
bounds=bounds, method=method)
# If the initial guess is ignored, then popt_2 would be close 0.
assert_allclose(popt_1, popt_2)
def test_jac(self):
# Test that Jacobian callable is handled correctly and
# weighted if sigma is provided.
def f(x, a, b):
return a * np.exp(-b*x)
def jac(x, a, b):
e = np.exp(-b*x)
return np.vstack((e, -a * x * e)).T
xdata = np.linspace(0, 1, 11)
ydata = f(xdata, 2., 2.)
# Test numerical options for least_squares backend.
for method in ['trf', 'dogbox']:
for scheme in ['2-point', '3-point', 'cs']:
popt, pcov = curve_fit(f, xdata, ydata, jac=scheme,
method=method)
assert_allclose(popt, [2, 2])
# Test the analytic option.
for method in ['lm', 'trf', 'dogbox']:
popt, pcov = curve_fit(f, xdata, ydata, method=method, jac=jac)
assert_allclose(popt, [2, 2])
# Now add an outlier and provide sigma.
ydata[5] = 100
sigma = np.ones(xdata.shape[0])
sigma[5] = 200
for method in ['lm', 'trf', 'dogbox']:
popt, pcov = curve_fit(f, xdata, ydata, sigma=sigma, method=method,
jac=jac)
# Still the optimization process is influenced somehow,
# have to set rtol=1e-3.
assert_allclose(popt, [2, 2], rtol=1e-3)
def test_maxfev_and_bounds(self):
# gh-6340: with no bounds, curve_fit accepts parameter maxfev (via leastsq)
# but with bounds, the parameter is `max_nfev` (via least_squares)
x = np.arange(0, 10)
y = 2*x
popt1, _ = curve_fit(lambda x,p: p*x, x, y, bounds=(0, 3), maxfev=100)
popt2, _ = curve_fit(lambda x,p: p*x, x, y, bounds=(0, 3), max_nfev=100)
assert_allclose(popt1, 2, atol=1e-14)
assert_allclose(popt2, 2, atol=1e-14)
def test_curvefit_simplecovariance(self):
def func(x, a, b):
return a * np.exp(-b*x)
def jac(x, a, b):
e = np.exp(-b*x)
return np.vstack((e, -a * x * e)).T
np.random.seed(0)
xdata = np.linspace(0, 4, 50)
y = func(xdata, 2.5, 1.3)
ydata = y + 0.2 * np.random.normal(size=len(xdata))
sigma = np.zeros(len(xdata)) + 0.2
covar = np.diag(sigma**2)
for jac1, jac2 in [(jac, jac), (None, None)]:
for absolute_sigma in [False, True]:
popt1, pcov1 = curve_fit(func, xdata, ydata, sigma=sigma,
jac=jac1, absolute_sigma=absolute_sigma)
popt2, pcov2 = curve_fit(func, xdata, ydata, sigma=covar,
jac=jac2, absolute_sigma=absolute_sigma)
assert_allclose(popt1, popt2, atol=1e-14)
assert_allclose(pcov1, pcov2, atol=1e-14)
def test_curvefit_covariance(self):
def funcp(x, a, b):
rotn = np.array([[1./np.sqrt(2), -1./np.sqrt(2), 0], [1./np.sqrt(2), 1./np.sqrt(2), 0], [0, 0, 1.0]])
return rotn.dot(a * np.exp(-b*x))
def jacp(x, a, b):
rotn = np.array([[1./np.sqrt(2), -1./np.sqrt(2), 0], [1./np.sqrt(2), 1./np.sqrt(2), 0], [0, 0, 1.0]])
e = np.exp(-b*x)
return rotn.dot(np.vstack((e, -a * x * e)).T)
def func(x, a, b):
return a * np.exp(-b*x)
def jac(x, a, b):
e = np.exp(-b*x)
return np.vstack((e, -a * x * e)).T
np.random.seed(0)
xdata = np.arange(1, 4)
y = func(xdata, 2.5, 1.0)
ydata = y + 0.2 * np.random.normal(size=len(xdata))
sigma = np.zeros(len(xdata)) + 0.2
covar = np.diag(sigma**2)
# Get a rotation matrix, and obtain ydatap = R ydata
# Chisq = ydata^T C^{-1} ydata
# = ydata^T R^T R C^{-1} R^T R ydata
# = ydatap^T Cp^{-1} ydatap
# Cp^{-1} = R C^{-1} R^T
# Cp = R C R^T, since R^-1 = R^T
rotn = np.array([[1./np.sqrt(2), -1./np.sqrt(2), 0], [1./np.sqrt(2), 1./np.sqrt(2), 0], [0, 0, 1.0]])
ydatap = rotn.dot(ydata)
covarp = rotn.dot(covar).dot(rotn.T)
for jac1, jac2 in [(jac, jacp), (None, None)]:
for absolute_sigma in [False, True]:
popt1, pcov1 = curve_fit(func, xdata, ydata, sigma=sigma,
jac=jac1, absolute_sigma=absolute_sigma)
popt2, pcov2 = curve_fit(funcp, xdata, ydatap, sigma=covarp,
jac=jac2, absolute_sigma=absolute_sigma)
assert_allclose(popt1, popt2, atol=1e-14)
assert_allclose(pcov1, pcov2, atol=1e-14)
def test_dtypes(self):
# regression test for gh-9581: curve_fit fails if x and y dtypes differ
x = np.arange(-3, 5)
y = 1.5*x + 3.0 + 0.5*np.sin(x)
def func(x, a, b):
return a*x + b
for method in ['lm', 'trf', 'dogbox']:
for dtx in [np.float32, np.float64]:
for dty in [np.float32, np.float64]:
x = x.astype(dtx)
y = y.astype(dty)
with warnings.catch_warnings():
warnings.simplefilter("error", OptimizeWarning)
p, cov = curve_fit(func, x, y, method=method)
assert np.isfinite(cov).all()
assert not np.allclose(p, 1) # curve_fit's initial value
def test_dtypes2(self):
# regression test for gh-7117: curve_fit fails if
# both inputs are float32
def hyperbola(x, s_1, s_2, o_x, o_y, c):
b_2 = (s_1 + s_2) / 2
b_1 = (s_2 - s_1) / 2
return o_y + b_1*(x-o_x) + b_2*np.sqrt((x-o_x)**2 + c**2/4)
min_fit = np.array([-3.0, 0.0, -2.0, -10.0, 0.0])
max_fit = np.array([0.0, 3.0, 3.0, 0.0, 10.0])
guess = np.array([-2.5/3.0, 4/3.0, 1.0, -4.0, 0.5])
params = [-2, .4, -1, -5, 9.5]
xdata = np.array([-32, -16, -8, 4, 4, 8, 16, 32])
ydata = hyperbola(xdata, *params)
# run optimization twice, with xdata being float32 and float64
popt_64, _ = curve_fit(f=hyperbola, xdata=xdata, ydata=ydata, p0=guess,
bounds=(min_fit, max_fit))
xdata = xdata.astype(np.float32)
ydata = hyperbola(xdata, *params)
popt_32, _ = curve_fit(f=hyperbola, xdata=xdata, ydata=ydata, p0=guess,
bounds=(min_fit, max_fit))
assert_allclose(popt_32, popt_64, atol=2e-5)
def test_broadcast_y(self):
xdata = np.arange(10)
target = 4.7 * xdata ** 2 + 3.5 * xdata + np.random.rand(len(xdata))
fit_func = lambda x, a, b: a*x**2 + b*x - target
for method in ['lm', 'trf', 'dogbox']:
popt0, pcov0 = curve_fit(fit_func,
xdata=xdata,
ydata=np.zeros_like(xdata),
method=method)
popt1, pcov1 = curve_fit(fit_func,
xdata=xdata,
ydata=0,
method=method)
assert_allclose(pcov0, pcov1)
def test_args_in_kwargs(self):
# Ensure that `args` cannot be passed as keyword argument to `curve_fit`
def func(x, a, b):
return a * x + b
with assert_raises(ValueError):
curve_fit(func,
xdata=[1, 2, 3, 4],
ydata=[5, 9, 13, 17],
p0=[1],
args=(1,))
class TestFixedPoint(object):
def test_scalar_trivial(self):
# f(x) = 2x; fixed point should be x=0
def func(x):
return 2.0*x
x0 = 1.0
x = fixed_point(func, x0)
assert_almost_equal(x, 0.0)
def test_scalar_basic1(self):
# f(x) = x**2; x0=1.05; fixed point should be x=1
def func(x):
return x**2
x0 = 1.05
x = fixed_point(func, x0)
assert_almost_equal(x, 1.0)
def test_scalar_basic2(self):
# f(x) = x**0.5; x0=1.05; fixed point should be x=1
def func(x):
return x**0.5
x0 = 1.05
x = fixed_point(func, x0)
assert_almost_equal(x, 1.0)
def test_array_trivial(self):
def func(x):
return 2.0*x
x0 = [0.3, 0.15]
olderr = np.seterr(all='ignore')
try:
x = fixed_point(func, x0)
finally:
np.seterr(**olderr)
assert_almost_equal(x, [0.0, 0.0])
def test_array_basic1(self):
# f(x) = c * x**2; fixed point should be x=1/c
def func(x, c):
return c * x**2
c = array([0.75, 1.0, 1.25])
x0 = [1.1, 1.15, 0.9]
olderr = np.seterr(all='ignore')
try:
x = fixed_point(func, x0, args=(c,))
finally:
np.seterr(**olderr)
assert_almost_equal(x, 1.0/c)
def test_array_basic2(self):
# f(x) = c * x**0.5; fixed point should be x=c**2
def func(x, c):
return c * x**0.5
c = array([0.75, 1.0, 1.25])
x0 = [0.8, 1.1, 1.1]
x = fixed_point(func, x0, args=(c,))
assert_almost_equal(x, c**2)
def test_lambertw(self):
# python-list/2010-December/594592.html
xxroot = fixed_point(lambda xx: np.exp(-2.0*xx)/2.0, 1.0,
args=(), xtol=1e-12, maxiter=500)
assert_allclose(xxroot, np.exp(-2.0*xxroot)/2.0)
assert_allclose(xxroot, lambertw(1)/2)
def test_no_acceleration(self):
# github issue 5460
ks = 2
kl = 6
m = 1.3
n0 = 1.001
i0 = ((m-1)/m)*(kl/ks/m)**(1/(m-1))
def func(n):
return np.log(kl/ks/n) / np.log((i0*n/(n - 1))) + 1
n = fixed_point(func, n0, method='iteration')
assert_allclose(n, m)
|
the-stack_0_23613 | #!/usr/bin/python
import pandas as pd
import matplotlib as mlp
import os
benchmark_dir = os.path.dirname(os.path.realpath(__file__))
df = pd.read_csv(benchmark_dir + "/data.csv", index_col=0)
df['Label'] = df.index + ' \n(' + df['Commit'] + ')'
df.index = df['Label']
ax = df[['Fib 35']].plot(grid=True, figsize=(12, 5), title="Benchmark (fib 35, recursive function)", marker='o')
ax.set_ylabel("Runtime (s)")
ax.set_xlabel("")
mlp.pyplot.xticks(rotation=8, ha='right')
mlp.pyplot.savefig(benchmark_dir + '/chart.jpg') |
the-stack_0_23615 | class Result:
def __init__(self, *args):
if len(args) == 1 and isinstance(args, tuple):
# Unpack a single element tuple.
self.args = args[0]
else:
self.args = args
def apply_to(self, f, arity):
res = None
if isinstance(self.args, tuple) and arity == len(self.args):
res = f(*self.args)
else:
res = f(self.args)
if isinstance(res, Result):
return res
elif res is not None:
return Result(res)
else:
return Result(self.args)
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.args)
class Done(Result):
def apply_to(self, f, arity):
raise RuntimeError("Calling apply to on a completed result.")
class SkipTo(Result):
def __init__(self, target_f, *args):
super(SkipTo, self).__init__(*args)
self.target_f = target_f
def apply_to(self, f, arity):
if f != self.target_f:
return self
else:
return Result.apply_to(self, f, arity)
|
the-stack_0_23618 | """Train SSD"""
import argparse
import os
import logging
import warnings
import time
import numpy as np
import mxnet as mx
from mxnet import gluon
from mxnet import autograd
import gluoncv as gcv
from gluoncv import data as gdata
from gluoncv import utils as gutils
from gluoncv.model_zoo import get_model
from gluoncv.data.batchify import Tuple, Stack, Pad, Append
from gluoncv.data.transforms.presets.yolact import YOLACTDefaultTrainTransform
from gluoncv.data.transforms.presets.yolact import YOLACTDefaultValTransform
from gluoncv.utils.metrics.coco_detection import COCODetectionMetric
from gluoncv.data import batchify
from gluoncv.utils.metrics.coco_instance import COCOInstanceMetric
from gluoncv.utils import utils
def parse_args():
parser = argparse.ArgumentParser(description='Train YOLACT networks.')
parser.add_argument('--network', type=str, default='resnet50_v1b',
help="Base network name which serves as feature extraction base.")
parser.add_argument('--data-shape', type=int, default=550,
help="Input data shape, use 300, 512.")
parser.add_argument('--batch-size', type=int, default='8',
help='Training mini-batch size')
parser.add_argument('--dataset', type=str, default='coco',
help='Training dataset. Now support voc.')
parser.add_argument('--num-workers', '-j', dest='num_workers', type=int,
default=4, help='Number of data workers, you can use larger '
'number to accelerate data loading, if you CPU and GPUs are powerful.')
parser.add_argument('--gpus', type=str, default='0',
help='Training with GPUs, you can specify 1,3 for example.')
parser.add_argument('--epochs', type=int, default=55,
help='Training epochs.')
parser.add_argument('--resume', type=str, default='',
help='Resume from previously saved parameters if not None. '
'For example, you can resume from ./ssd_xxx_0123.params')
parser.add_argument('--start-epoch', type=int, default=0,
help='Starting epoch for resuming, default is 0 for new training.'
'You can specify it to 100 for example to start from 100 epoch.')
parser.add_argument('--lr', type=float, default=0.001,
help='Learning rate, default is 0.001')
parser.add_argument('--lr-decay', type=float, default=0.1,
help='decay rate of learning rate. default is 0.1.')
parser.add_argument('--lr-decay-epoch', type=str, default='20,40,47, 51',
help='epochs at which learning rate decays. default is 160,200.')
parser.add_argument('--momentum', type=float, default=0.9,
help='SGD momentum, default is 0.9')
parser.add_argument('--wd', type=float, default=0.0005,
help='Weight decay, default is 5e-4')
parser.add_argument('--log-interval', type=int, default=100,
help='Logging mini-batch interval. Default is 100.')
parser.add_argument('--save-prefix', type=str, default='/media/HDD_4TB/xcq/experiments/yolact/',
help='Saving parameter prefix')
parser.add_argument('--save-interval', type=int, default=10,
help='Saving parameters epoch interval, best model will always be saved.')
parser.add_argument('--val-interval', type=int, default=10,
help='Epoch interval for validation, increase the number will reduce the '
'training time if validation is slow.')
parser.add_argument('--seed', type=int, default=233,
help='Random seed to be fixed.')
parser.add_argument('--syncbn', action='store_true',
help='Use synchronize BN across devices.')
# FPN options
parser.add_argument('--use-fpn', action='store_true', default=True,
help='Whether to use feature pyramid network.')
args = parser.parse_args()
return args
def get_dataset(dataset, args):
if dataset.lower() == 'coco':
train_dataset = gdata.COCOInstance(root='/home/xcq/PycharmProjects/datasets/coco/', splits='instances_train2017')
val_dataset = gdata.COCOInstance(root='/home/xcq/PycharmProjects/datasets/coco/', splits='instances_val2017', skip_empty=False)
val_metric = COCOInstanceMetric(val_dataset, args.save_prefix + '_eval', cleanup=True)
else:
raise NotImplementedError('Dataset: {} not implemented.'.format(dataset))
return train_dataset, val_dataset, val_metric
def get_dataloader(net, train_dataset, val_dataset, data_shape, batch_size, num_workers):
"""Get dataloader."""
width, height = data_shape, data_shape
scale = 4
# use fake data to generate fixed anchors for target generation
with autograd.train_mode():
_, _, anchors, _, _ = net(mx.nd.zeros((1, 3, height, width)))
batchify_fn = Tuple(Stack(), Stack(), Stack(), Stack(), Stack()) # stack image, cls_targets, box_targets, masks, matches
train_loader = gluon.data.DataLoader(
train_dataset.transform(YOLACTDefaultTrainTransform(width, height, anchors, scale=scale)),
batch_size, True, batchify_fn=batchify_fn, last_batch='rollover', num_workers=num_workers)
val_batchify_fn = Tuple(Stack(), Pad(pad_val=-1))
val_loader = gluon.data.DataLoader(
val_dataset.transform(YOLACTDefaultValTransform(width, height, scale=4)),
batch_size, False, batchify_fn=val_batchify_fn, last_batch='rollover', num_workers=num_workers)
return train_loader, val_loader
def save_params(net, best_map, current_map, epoch, save_interval, prefix):
current_map = float(current_map)
if current_map > best_map[0]:
best_map[0] = current_map
net.save_parameters('{:s}_best.params'.format(prefix, epoch, current_map))
with open(prefix+'_best_map.log', 'a') as f:
f.write('{:04d}:\t{:.4f}\n'.format(epoch, current_map))
if save_interval and epoch % save_interval == 0:
net.save_parameters('{:s}_{:04d}_{:.4f}.params'.format(prefix, epoch, current_map))
def crop(bboxes, h, w, masks):
scale = 4
b = masks.shape[0]
with autograd.pause():
ctx = bboxes.context
_h = mx.nd.arange(h, ctx=ctx)
_w = mx.nd.arange(w, ctx = ctx)
_h = mx.nd.tile(_h, reps=(b, 1))
_w = mx.nd.tile(_w, reps=(b, 1))
x1, y1 = mx.nd.round(bboxes[:, 0]/scale), mx.nd.round(bboxes[:, 1]/scale)
x2, y2 = mx.nd.round((bboxes[:, 2])/scale), mx.nd.round((bboxes[:, 3])/scale)
_h = (_h >= x1.expand_dims(axis=-1)) * (_h <= x2.expand_dims(axis=-1))
_w = (_w >= y1.expand_dims(axis=-1)) * (_w <= y2.expand_dims(axis=-1))
_mask = mx.nd.batch_dot(_h.expand_dims(axis=-1), _w.expand_dims(axis=-1), transpose_b=True)
masks = _mask * masks
return masks
def global_aware(masks):
_, h, w = masks.shape
masks = masks.reshape((0, -1))
masks = masks - mx.nd.mean(masks, axis=-1, keepdims=True)
std = mx.nd.sqrt(mx.nd.mean(mx.nd.square(masks), axis=-1, keepdims=True))
masks = (masks / (std + 1e-6)).reshape((0, h, w))
return masks
def validate(net, val_data, ctx, eval_metric):
"""Test on validation dataset."""
clipper = gcv.nn.bbox.BBoxClipToImage()
eval_metric.reset()
# if not args.disable_hybridization:
# net.hybridize(static_alloc=args.static_alloc)
net.hybridize()
for ib, batch in enumerate(val_data):
data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0)
det_info = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0)
for x, det_inf in zip(data, det_info):
det_id, det_score, det_bbox, det_maskeoc, det_mask = net(x)
det_bbox = clipper(det_bbox, x)
for i in range(det_bbox.shape[0]):
det_bbox_t = det_bbox[i] # det_bbox_t: [x1, y1, x2, y2]
det_id_t = det_id[i].asnumpy()
det_score_t = det_score[i].asnumpy()
det_maskeoc_t = det_maskeoc[i]
det_mask_t = det_mask[i]
full_mask = mx.nd.dot(det_maskeoc_t, det_mask_t)
im_height, im_width, h_scale, w_scale = det_inf[i].asnumpy()
im_height, im_width = int(round(im_height / h_scale)), \
int(round(im_width / w_scale))
full_mask = mx.nd.sigmoid(full_mask)
_, h, w = full_mask.shape
full_mask = crop(det_bbox_t, h, w, full_mask).asnumpy()
det_bbox_t = det_bbox_t.asnumpy()
det_bbox_t[:, 0], det_bbox_t[:, 2] = det_bbox_t[:, 0] / w_scale, det_bbox_t[:, 2] / w_scale
det_bbox_t[:, 1], det_bbox_t[:, 3] = det_bbox_t[:, 1] / h_scale, det_bbox_t[:, 3] / h_scale
full_masks = []
for mask in full_mask:
full_masks.append(gdata.transforms.mask.proto_fill(mask, (im_width, im_height)))
full_masks = np.array(full_masks)
assert det_bbox_t.shape[0] == det_id_t.shape[0] == det_score_t.shape[0] == full_masks.shape[0], \
print(det_bbox_t.shape[0], det_id_t.shape[0], det_score_t.shape[0], full_masks.shape[0])
eval_metric.update(det_bbox_t, det_id_t, det_score_t, full_masks)
return eval_metric.get()
def train(net, train_data, val_data, eval_metric, ctx, args):
"""Training pipeline"""
net.collect_params().reset_ctx(ctx)
trainer = gluon.Trainer(
net.collect_params(), 'sgd',
{'learning_rate': args.lr, 'wd': args.wd, 'momentum': args.momentum})
# lr decay policy
lr_decay = float(args.lr_decay)
lr_steps = sorted([float(ls) for ls in args.lr_decay_epoch.split(',') if ls.strip()])
# mbox_loss = gcv.loss.SSDMultiBoxLoss()
mbox_loss = gcv.loss.YOLACTMultiBoxLoss()
ce_metric = mx.metric.Loss('CrossEntropy')
smoothl1_metric = mx.metric.Loss('SmoothL1')
sq_metric = mx.metric.Loss('SigmoidBCE')
# set up logger
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
log_file_path = args.save_prefix + '_train.log'
log_dir = os.path.dirname(log_file_path)
if log_dir and not os.path.exists(log_dir):
os.makedirs(log_dir)
fh = logging.FileHandler(log_file_path)
logger.addHandler(fh)
logger.info(args)
logger.info('Start training from [Epoch {}]'.format(args.start_epoch))
best_map = [0]
for epoch in range(args.start_epoch, args.epochs):
while lr_steps and epoch >= lr_steps[0]:
new_lr = trainer.learning_rate * lr_decay
lr_steps.pop(0)
trainer.set_learning_rate(new_lr)
logger.info("[Epoch {}] Set learning rate to {}".format(epoch, new_lr))
ce_metric.reset()
smoothl1_metric.reset()
sq_metric.reset()
tic = time.time()
btic = time.time()
net.hybridize()
for i, batch in enumerate(train_data):
batch_size = batch[0].shape[0]
data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0)
cls_targets = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0)
box_targets = gluon.utils.split_and_load(batch[2], ctx_list=ctx, batch_axis=0)
mask_targets = gluon.utils.split_and_load(batch[3], ctx_list=ctx, batch_axis=0)
matches = gluon.utils.split_and_load(batch[4], ctx_list=ctx, batch_axis=0)
with autograd.record():
cls_preds = []
box_preds = []
masks = []
maskeocs = []
bts = []
for x, bt in zip(data, box_targets):
cls_pred, box_pred, anchor, maskeoc, mask = net(x)
bts.append(net.bbox_decoder(bt, anchor))
cls_preds.append(cls_pred)
box_preds.append(box_pred)
masks.append(mask)
maskeocs.append(maskeoc)
sum_loss, cls_loss, box_loss, mask_loss = mbox_loss(
cls_preds, box_preds, masks, maskeocs, cls_targets, box_targets, mask_targets, matches, bts)
autograd.backward(sum_loss)
# since we have already normalized the loss, we don't want to normalize
# by batch-size anymore
trainer.step(1)
ce_metric.update(0, [l * batch_size for l in cls_loss])
smoothl1_metric.update(0, [l * batch_size for l in box_loss])
sq_metric.update(0, [l * batch_size for l in mask_loss])
if args.log_interval and not (i + 1) % args.log_interval:
name1, loss1 = ce_metric.get()
name2, loss2 = smoothl1_metric.get()
name3, loss3 = sq_metric.get()
logger.info('[Epoch {}][Batch {}], Speed: {:.3f} samples/sec, {}={:.3f}, {}={:.3f}, {}={:.3f},'.format(
epoch, i, batch_size/(time.time()-btic), name1, loss1, name2, loss2, name3, loss3))
btic = time.time()
break
name1, loss1 = ce_metric.get()
name2, loss2 = smoothl1_metric.get()
name3, loss3 = sq_metric.get()
logger.info('[Epoch {}] Training cost: {:.3f}, {}={:.3f}, {}={:.3f}, {}={:.3f}'.format(
epoch, (time.time()-tic), name1, loss1, name2, loss2, name3, loss3))
if (epoch % args.val_interval == 0) or (args.save_interval and epoch % args.save_interval == 0) or (epoch >= 50):
# consider reduce the frequency of validation to save time
map_name, mean_ap = validate(net, val_data, ctx, eval_metric)
val_msg = '\n'.join(['{}={}'.format(k, v) for k, v in zip(map_name, mean_ap)])
logger.info('[Epoch {}] Validation: \n{}'.format(epoch, val_msg))
current_map = float(mean_ap[-1])
else:
current_map = 0.
save_params(net, best_map, current_map, epoch, args.save_interval, args.save_prefix)
if __name__ == '__main__':
args = parse_args()
# fix seed for mxnet, numpy and python builtin random generator.
gutils.random.seed(args.seed)
# training contexts
ctx = [mx.gpu(int(i)) for i in args.gpus.split(',') if i.strip()]
ctx = ctx if ctx else [mx.cpu()]
# network
if args.use_fpn:
net_name = '_'.join(('yolact', str(args.data_shape), 'fpn', args.network, args.dataset))
else:
net_name = '_'.join(('yolact', str(args.data_shape), args.network, args.dataset))
args.save_prefix += net_name
if args.syncbn and len(ctx) > 1:
net = get_model(net_name, pretrained_base=True, norm_layer=gluon.contrib.nn.SyncBatchNorm,
norm_kwargs={'num_devices': len(ctx)})
async_net = get_model(net_name, pretrained_base=False) # used by cpu worker
else:
net = get_model(net_name, pretrained_base=True, num_prototypes=32, sge=False)
async_net = net
if args.resume.strip():
net.load_parameters(args.resume.strip())
async_net.load_parameters(args.resume.strip())
else:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
net.initialize()
async_net.initialize()
# training data
train_dataset, val_dataset, eval_metric = get_dataset(args.dataset, args)
train_data, val_data = get_dataloader(
async_net, train_dataset, val_dataset, args.data_shape, args.batch_size, args.num_workers)
# training
train(net, train_data, val_data, eval_metric, ctx, args)
|
the-stack_0_23623 | # -*- coding: utf-8 -*-
"""Attribute container definitions."""
from dftimewolf.lib.containers import interface
class FSPath(interface.AttributeContainer):
"""Filesystem path container.
Attributes:
path (str): Filesystem path.
"""
CONTAINER_TYPE = 'fspath'
def __init__(self, path=None):
"""Initializes the FSPath object.
Args:
path (str): Filesystem path
"""
super(FSPath, self).__init__()
self.path = path
class RemoteFSPath(FSPath):
"""Remote Filesystem path container.
Attributes:
hostname (str): Hostname where the file is located.
path (str): Filesystem path.
"""
CONTAINER_TYPE = 'remotefspath'
def __init__(self, path=None, hostname=None):
"""Initializes the FSPath object.
Args:
path (str): Filesystem path
hostname (str): Hostname where the file is located
"""
super(RemoteFSPath, self).__init__(path=path)
self.hostname = hostname
class Report(interface.AttributeContainer):
"""Analysis report attribute container.
Attributes:
module_name (str): name of the module that generated the report.
text (str): report text.
text_format (str): format of text in the report. Must be either 'plaintext'
or 'markdown'.
attributes (list): attribute list, dicts must contain 'name',
'type', 'values' keys.
"""
CONTAINER_TYPE = 'report'
def __init__(
self, module_name, text, text_format='plaintext', attributes=None):
"""Initializes the analysis report.
Args:
module_name (str): name of the analysis plugin that generated
the report.
text (str): report text.
text_format (str): format of text in the report. Must be either
'plaintext' or 'markdown'.
attributes (list): attribute list of dicts that must contain 'name',
'type', 'values' keys.
"""
super(Report, self).__init__()
self.module_name = module_name
self.text = text
self.text_format = text_format
if attributes is None:
self.attributes = []
else:
self.attributes = attributes
class GCPLogs(interface.AttributeContainer):
"""Google Cloud Platform logs container.
Attributes:
filter_expression (str): GCP logging advanced logs filter expression
used to generate the results.
path (str): path to a GCP log file.
project_name (str): name of the project that was queried.
"""
CONTAINER_TYPE = 'gcp_logs'
def __init__(self, path, filter_expression, project_name):
"""Initializes the analysis report.
Args:
filter_expression (str): GCP advanced logs filter expression
used to generate the results.
path (str): path to a GCP log file.
project_name (str): name of the project that was queried.
"""
super(GCPLogs, self).__init__()
self.filter_expression = filter_expression
self.path = path
self.project_name = project_name
class ThreatIntelligence(interface.AttributeContainer):
"""Threat Intelligence attribute container.
Attributes:
name (string): name of the threat.
indicator (string): regular expression relevant to a threat.
path (string): path to the indicator data (e.g. file).
"""
CONTAINER_TYPE = 'threat_intelligence'
def __init__(self, name, indicator, path):
"""Initializes the Threat Intelligence container.
Args:
name (string): name of the threat.
indicator (string): regular expression relevant to a threat.
path (string): path to the indicator data (e.g. file)
"""
super(ThreatIntelligence, self).__init__()
self.name = name
self.indicator = indicator
self.path = path
class TicketAttribute(interface.AttributeContainer):
"""Attribute container definition for generic ticketing system attributes.
Attributes:
type (str): Type of the attribute.
name (str): Name of the attribute.
value (str): Value of the attribute.
"""
CONTAINER_TYPE = 'ticketattribute'
def __init__(self, type_, name, value):
"""Initializes the attribute.
Args:
type_ (str): Type of the attribute.
name (str): Name of the attribute.
value (str): Value of the attribute.
"""
super(TicketAttribute, self).__init__()
self.type = type_
self.name = name
self.value = value
class File(interface.AttributeContainer):
"""Attribute container definition for generic files.
Attributes:
name (str): Human-friendly name or short description of the file.
path (str): Full path to the file.
description (str): Longer description of the file.
"""
CONTAINER_TYPE = 'file'
def __init__(self, name, path, description=None):
"""Initializes the attribute.
Args:
name (str): Human-friendly name or short description of the file.
path (str): Full path to the file.
description (Optional[str]): Longer description of the file.
"""
super(File, self).__init__()
self.name = name
self.path = path
self.description = description
class ForensicsVM(interface.AttributeContainer):
"""Attribute container definition for a forensics virtual machine.
Attributes:
name (str): Identifying name for the virtual machine.
evidence_disk (libcloudforensics.GoogleComputeDisk): The disk containing
the forensic evidence. Full definition in
libcloudforensics.providers.gcp.internal.GoogleComputeDisk.
platform (str): The cloud platform where the VM is located. One of
{gcp,aws,azure}.
"""
CONTAINER_TYPE = 'forensics_vm'
def __init__(self, name, evidence_disk, platform):
super(ForensicsVM, self).__init__()
self.name = name
self.evidence_disk = evidence_disk
self.platform = platform
class URL(interface.AttributeContainer):
"""Attribute container definition for a Uniform Resource Locator.
Attributes:
path (str): The full path to the URL.
"""
CONTAINER_TYPE = 'url'
def __init__(self, path):
super(URL, self).__init__()
self.path = path
class DataFrame(interface.AttributeContainer):
"""Attribute container definition for a Pandas DataFrame.
Attributes:
data_frame (pandas.DataFrame): DataFrame containing the data.
description (str): Description of the data in the data frame.
name (str): Name of the data frame.
"""
CONTAINER_TYPE = 'data_frame'
def __init__(self, data_frame, description, name):
super(DataFrame, self).__init__()
self.data_frame = data_frame
self.description = description
self.name = name
|
the-stack_0_23625 | import _plotly_utils.basevalidators
class ButtonValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self,
plotly_name='buttondefaults',
parent_name='layout.xaxis.rangeselector',
**kwargs
):
super(ButtonValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop('data_class_str', 'Button'),
data_docs=kwargs.pop('data_docs', """
"""),
**kwargs
)
|
the-stack_0_23626 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from multiprocessing import Process, Pipe
from occant_utils.astar_pycpp import pyastar
def worker(remote, parent_remote, worker_id, use_weighted_graph, scale, niters):
parent_remote.close()
try:
while True:
cmd, data = remote.recv()
if cmd == "plan":
map_, start, goal, mask, allow_diagonal = data
if mask == 1 and not use_weighted_graph:
path_x, path_y = pyastar.astar_planner(
map_, start, goal, allow_diagonal
)
elif mask == 1 and use_weighted_graph:
path_x, path_y = pyastar.weighted_astar_planner(
map_, start, goal, allow_diagonal, scale, niters,
)
else:
path_x, path_y = None, None
remote.send((path_x, path_y))
elif cmd == "close":
remote.close()
break
except KeyboardInterrupt:
print("AStarPlannerVector worker: got KeyboardInterrupt")
class AStarPlannerVector:
def __init__(self, config):
self.config = config
nplanners = config.nplanners
self.waiting = False
self.closed = False
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nplanners)])
self.ps = [
Process(
target=worker,
args=(
work_remote,
remote,
worker_id,
config.use_weighted_graph,
config.weight_scale,
config.weight_niters,
),
)
for (work_remote, remote, worker_id) in zip(
self.work_remotes, self.remotes, range(nplanners)
)
]
for p in self.ps:
p.daemon = True
p.start()
for remote in self.work_remotes:
remote.close()
def plan_async(self, maps, starts, goals, masks):
self._assert_not_closed()
for remote, map_, start, goal, mask in zip(
self.remotes, maps, starts, goals, masks
):
remote.send(("plan", (map_, start, goal, mask, self.config.allow_diagonal)))
self.waiting = True
def plan_wait(self):
self._assert_not_closed()
results = [remote.recv() for remote in self.remotes]
self.waiting = False
return results # Planned paths
def plan(self, maps, starts, goals, masks):
self.plan_async(maps, starts, goals, masks)
return self.plan_wait()
def close(self):
for remote in self.remotes:
remote.send(("close", None))
def _assert_not_closed(self):
assert (
not self.closed
), "Trying to operate on an AStarPlannerVector after calling close()"
class AStarPlannerSequential:
def __init__(self, config):
self.config = config
def plan(self, maps, starts, goals, masks):
paths = []
for map_, start, goal, mask in zip(maps, starts, goals, masks):
if mask == 1 and not self.config.use_weighted_graph:
path_x, path_y = pyastar.astar_planner(
map_, start, goal, self.config.allow_diagonal
)
elif mask == 1 and self.config.use_weighted_graph:
path_x, path_y = pyastar.weighted_astar_planner(
map_,
start,
goal,
self.config.allow_diagonal,
self.config.weight_scale,
self.config.weight_niters,
)
else:
path_x, path_y = None, None
paths.append((path_x, path_y))
return paths
def close(self):
pass
def _assert_not_closed(self):
pass
|
the-stack_0_23627 | import json
from channels.db import database_sync_to_async
from channels.generic.websocket import AsyncWebsocketConsumer
from websocket_notifications.models import NotificationGroup
class NotificationConsumer(AsyncWebsocketConsumer):
"""Consumer for sending notification to the client."""
@database_sync_to_async
def get_notification_group(self, code):
return NotificationGroup.objects.filter(code=code).first()
async def connect(self):
"""When a consumer is connected, we get the code from the URL, to join to
the group."""
self.code = self.scope["url_route"]["kwargs"]["code"]
if await self.get_notification_group(self.code):
await self.channel_layer.group_add(self.code, self.channel_name)
await self.accept()
async def disconnect(self, close_code):
"""On disconnect, exit from the group."""
await self.channel_layer.group_discard(self.code, self.channel_name)
async def notification_message(self, event):
"""When received a message from a notification."""
payload = event["payload"]
await self.send(text_data=json.dumps({"payload": payload}))
|
the-stack_0_23628 | import re
def parse(str):
m = re.search(r'(\w+)\)(\w+)',str)
x,y = m.group(1), m.group(2)
return (x,y)
def mkOrbitsDict(fname):
orbits = {}
f = open(fname,'r')
for l in f:
(x,y) = parse(l)
orbits[y] = x
return orbits
fname = 'input'
def paths_to_root(fname):
orbits = mkOrbitsDict(fname)
planets = set(orbits.keys())
planet_data = {}
all_l = 0
for p in planets:
cursor = p
l = 0
path = [p]
while cursor != 'COM':
# o ) p
o = orbits[cursor]
path.append(o)
l += 1
cursor = o
print('planet {}: path={}, len={}'.format(p,path,l))
planet_data[p] = (path,l)
all_l += l
print('total path lens={}'.format(all_l))
return planet_data
# find first common parent b/t YOU and SAN.
pp = paths_to_root(fname)
you = pp['YOU'][0]
san = pp['SAN'][0]
you.reverse()
san.reverse()
common_parent = 0
while (you[common_parent] == san[common_parent]):
common_parent += 1
common_parent -= 1 # 'cos index from 0
# len of you[i]->you.index(YOU)
# -2 'cos we don't count you[i] and YOU.
len_you = len(you) - you.index(you[common_parent]) - 2
len_san = len(san) - san.index(san[common_parent]) - 2 |
the-stack_0_23631 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module for constructing RNN Cells.
## Base interface for all RNN Cells
@@RNNCell
## RNN Cells for use with TensorFlow's core RNN methods
@@BasicRNNCell
@@BasicLSTMCell
@@GRUCell
@@LSTMCell
## Classes storing split `RNNCell` state
@@LSTMStateTuple
## RNN Cell wrappers (RNNCells that wrap other RNNCells)
@@MultiRNNCell
@@DropoutWrapper
@@EmbeddingWrapper
@@InputProjectionWrapper
@@OutputProjectionWrapper
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops.math_ops import sigmoid
from tensorflow.python.ops.math_ops import tanh
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
def _state_size_with_prefix(state_size, prefix=None):
"""Helper function that enables int or TensorShape shape specification.
This function takes a size specification, which can be an integer or a
TensorShape, and converts it into a list of integers. One may specify any
additional dimensions that precede the final state size specification.
Args:
state_size: TensorShape or int that specifies the size of a tensor.
prefix: optional additional list of dimensions to prepend.
Returns:
result_state_size: list of dimensions the resulting tensor size.
"""
result_state_size = tensor_shape.as_shape(state_size).as_list()
if prefix is not None:
if not isinstance(prefix, list):
raise TypeError("prefix of _state_size_with_prefix should be a list.")
result_state_size = prefix + result_state_size
return result_state_size
class RNNCell(object):
"""Abstract object representing an RNN cell.
The definition of cell in this package differs from the definition used in the
literature. In the literature, cell refers to an object with a single scalar
output. The definition in this package refers to a horizontal array of such
units.
An RNN cell, in the most abstract setting, is anything that has
a state and performs some operation that takes a matrix of inputs.
This operation results in an output matrix with `self.output_size` columns.
If `self.state_size` is an integer, this operation also results in a new
state matrix with `self.state_size` columns. If `self.state_size` is a
tuple of integers, then it results in a tuple of `len(state_size)` state
matrices, each with the a column size corresponding to values in `state_size`.
This module provides a number of basic commonly used RNN cells, such as
LSTM (Long Short Term Memory) or GRU (Gated Recurrent Unit), and a number
of operators that allow add dropouts, projections, or embeddings for inputs.
Constructing multi-layer cells is supported by the class `MultiRNNCell`,
or by calling the `rnn` ops several times. Every `RNNCell` must have the
properties below and and implement `__call__` with the following signature.
"""
def __call__(self, inputs, state, scope=None):
"""Run this RNN cell on inputs, starting from the given state.
Args:
inputs: `2-D` tensor with shape `[batch_size x input_size]`.
state: if `self.state_size` is an integer, this should be a `2-D Tensor`
with shape `[batch_size x self.state_size]`. Otherwise, if
`self.state_size` is a tuple of integers, this should be a tuple
with shapes `[batch_size x s] for s in self.state_size`.
scope: VariableScope for the created subgraph; defaults to class name.
Returns:
A pair containing:
- Output: A `2-D` tensor with shape `[batch_size x self.output_size]`.
- New state: Either a single `2-D` tensor, or a tuple of tensors matching
the arity and shapes of `state`.
"""
raise NotImplementedError("Abstract method")
@property
def state_size(self):
"""size(s) of state(s) used by this cell.
It can be represented by an Integer, a TensorShape or a tuple of Integers
or TensorShapes.
"""
raise NotImplementedError("Abstract method")
@property
def output_size(self):
"""Integer or TensorShape: size of outputs produced by this cell."""
raise NotImplementedError("Abstract method")
def zero_state(self, batch_size, dtype):
"""Return zero-filled state tensor(s).
Args:
batch_size: int, float, or unit Tensor representing the batch size.
dtype: the data type to use for the state.
Returns:
If `state_size` is an int or TensorShape, then the return value is a
`N-D` tensor of shape `[batch_size x state_size]` filled with zeros.
If `state_size` is a nested list or tuple, then the return value is
a nested list or tuple (of the same structure) of `2-D` tensors with
the shapes `[batch_size x s]` for each s in `state_size`.
"""
state_size = self.state_size
if nest.is_sequence(state_size):
state_size_flat = nest.flatten(state_size)
zeros_flat = [
array_ops.zeros(
array_ops.pack(_state_size_with_prefix(s, prefix=[batch_size])),
dtype=dtype)
for s in state_size_flat]
for s, z in zip(state_size_flat, zeros_flat):
z.set_shape(_state_size_with_prefix(s, prefix=[None]))
zeros = nest.pack_sequence_as(structure=state_size,
flat_sequence=zeros_flat)
else:
zeros_size = _state_size_with_prefix(state_size, prefix=[batch_size])
zeros = array_ops.zeros(array_ops.pack(zeros_size), dtype=dtype)
zeros.set_shape(_state_size_with_prefix(state_size, prefix=[None]))
return zeros
class BasicRNNCell(RNNCell):
"""The most basic RNN cell."""
def __init__(self, num_units, input_size=None, activation=tanh):
if input_size is not None:
logging.warn("%s: The input_size parameter is deprecated.", self)
self._num_units = num_units
self._activation = activation
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def __call__(self, inputs, state, scope=None):
"""Most basic RNN: output = new_state = activation(W * input + U * state + B)."""
with vs.variable_scope(scope or type(self).__name__): # "BasicRNNCell"
output = self._activation(_linear([inputs, state], self._num_units, True))
return output, output
class GRUCell(RNNCell):
"""Gated Recurrent Unit cell (cf. http://arxiv.org/abs/1406.1078)."""
def __init__(self, num_units, input_size=None, activation=tanh):
if input_size is not None:
logging.warn("%s: The input_size parameter is deprecated.", self)
self._num_units = num_units
self._activation = activation
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def __call__(self, inputs, state, scope=None):
"""Gated recurrent unit (GRU) with nunits cells."""
with vs.variable_scope(scope or type(self).__name__): # "GRUCell"
with vs.variable_scope("Gates"): # Reset gate and update gate.
# We start with bias of 1.0 to not reset and not update.
r, u = array_ops.split(1, 2, _linear([inputs, state],
2 * self._num_units, True, 1.0))
r, u = sigmoid(r), sigmoid(u)
with vs.variable_scope("Candidate"):
c = self._activation(_linear([inputs, r * state],
self._num_units, True))
new_h = u * state + (1 - u) * c
return new_h, new_h
_LSTMStateTuple = collections.namedtuple("LSTMStateTuple", ("c", "h"))
class LSTMStateTuple(_LSTMStateTuple):
"""Tuple used by LSTM Cells for `state_size`, `zero_state`, and output state.
Stores two elements: `(c, h)`, in that order.
Only used when `state_is_tuple=True`.
"""
__slots__ = ()
@property
def dtype(self):
(c, h) = self
if not c.dtype == h.dtype:
raise TypeError("Inconsistent internal state: %s vs %s" %
(str(c.dtype), str(h.dtype)))
return c.dtype
class BasicLSTMCell(RNNCell):
"""Basic LSTM recurrent network cell.
The implementation is based on: http://arxiv.org/abs/1409.2329.
We add forget_bias (default: 1) to the biases of the forget gate in order to
reduce the scale of forgetting in the beginning of the training.
It does not allow cell clipping, a projection layer, and does not
use peep-hole connections: it is the basic baseline.
For advanced models, please use the full LSTMCell that follows.
"""
def __init__(self, num_units, forget_bias=1.0, input_size=None,
state_is_tuple=False, activation=tanh):
"""Initialize the basic LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (see above).
input_size: Deprecated and unused.
state_is_tuple: If True, accepted and returned states are 2-tuples of
the `c_state` and `m_state`. By default (False), they are concatenated
along the column axis. This default behavior will soon be deprecated.
activation: Activation function of the inner states.
"""
if not state_is_tuple:
logging.warn("%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True.", self)
if input_size is not None:
logging.warn("%s: The input_size parameter is deprecated.", self)
self._num_units = num_units
self._forget_bias = forget_bias
self._state_is_tuple = state_is_tuple
self._activation = activation
@property
def state_size(self):
return (LSTMStateTuple(self._num_units, self._num_units)
if self._state_is_tuple else 2 * self._num_units)
@property
def output_size(self):
return self._num_units
def __call__(self, inputs, state, scope=None):
"""Long short-term memory cell (LSTM)."""
with vs.variable_scope(scope or type(self).__name__): # "BasicLSTMCell"
# Parameters of gates are concatenated into one multiply for efficiency.
if self._state_is_tuple:
c, h = state
else:
c, h = array_ops.split(1, 2, state)
concat = _linear([inputs, h], 4 * self._num_units, True)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
i, j, f, o = array_ops.split(1, 4, concat)
new_c = (c * sigmoid(f + self._forget_bias) + sigmoid(i) *
self._activation(j))
new_h = self._activation(new_c) * sigmoid(o)
if self._state_is_tuple:
new_state = LSTMStateTuple(new_c, new_h)
else:
new_state = array_ops.concat(1, [new_c, new_h])
return new_h, new_state
def _get_concat_variable(name, shape, dtype, num_shards):
"""Get a sharded variable concatenated into one tensor."""
sharded_variable = _get_sharded_variable(name, shape, dtype, num_shards)
if len(sharded_variable) == 1:
return sharded_variable[0]
concat_name = name + "/concat"
concat_full_name = vs.get_variable_scope().name + "/" + concat_name + ":0"
for value in ops.get_collection(ops.GraphKeys.CONCATENATED_VARIABLES):
if value.name == concat_full_name:
return value
concat_variable = array_ops.concat(0, sharded_variable, name=concat_name)
ops.add_to_collection(ops.GraphKeys.CONCATENATED_VARIABLES,
concat_variable)
return concat_variable
def _get_sharded_variable(name, shape, dtype, num_shards):
"""Get a list of sharded variables with the given dtype."""
if num_shards > shape[0]:
raise ValueError("Too many shards: shape=%s, num_shards=%d" %
(shape, num_shards))
unit_shard_size = int(math.floor(shape[0] / num_shards))
remaining_rows = shape[0] - unit_shard_size * num_shards
shards = []
for i in range(num_shards):
current_size = unit_shard_size
if i < remaining_rows:
current_size += 1
shards.append(vs.get_variable(name + "_%d" % i, [current_size] + shape[1:],
dtype=dtype))
return shards
class LSTMCell(RNNCell):
"""Long short-term memory unit (LSTM) recurrent network cell.
The default non-peephole implementation is based on:
http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf
S. Hochreiter and J. Schmidhuber.
"Long Short-Term Memory". Neural Computation, 9(8):1735-1780, 1997.
The peephole implementation is based on:
https://research.google.com/pubs/archive/43905.pdf
Hasim Sak, Andrew Senior, and Francoise Beaufays.
"Long short-term memory recurrent neural network architectures for
large scale acoustic modeling." INTERSPEECH, 2014.
The class uses optional peep-hole connections, optional cell clipping, and
an optional projection layer.
"""
def __init__(self, num_units, input_size=None,
use_peepholes=False, cell_clip=None,
initializer=None, num_proj=None, proj_clip=None,
num_unit_shards=1, num_proj_shards=1,
forget_bias=1.0, state_is_tuple=False,
activation=tanh):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell
input_size: Deprecated and unused.
use_peepholes: bool, set True to enable diagonal/peephole connections.
cell_clip: (optional) A float value, if provided the cell state is clipped
by this value prior to the cell output activation.
initializer: (optional) The initializer to use for the weight and
projection matrices.
num_proj: (optional) int, The output dimensionality for the projection
matrices. If None, no projection is performed.
proj_clip: (optional) A float value. If `num_proj > 0` and `proj_clip` is
provided, then the projected values are clipped elementwise to within
`[-proj_clip, proj_clip]`.
num_unit_shards: How to split the weight matrix. If >1, the weight
matrix is stored across num_unit_shards.
num_proj_shards: How to split the projection matrix. If >1, the
projection matrix is stored across num_proj_shards.
forget_bias: Biases of the forget gate are initialized by default to 1
in order to reduce the scale of forgetting at the beginning of
the training.
state_is_tuple: If True, accepted and returned states are 2-tuples of
the `c_state` and `m_state`. By default (False), they are concatenated
along the column axis. This default behavior will soon be deprecated.
activation: Activation function of the inner states.
"""
if not state_is_tuple:
logging.warn("%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True.", self)
if input_size is not None:
logging.warn("%s: The input_size parameter is deprecated.", self)
self._num_units = num_units
self._use_peepholes = use_peepholes
self._cell_clip = cell_clip
self._initializer = initializer
self._num_proj = num_proj
self._proj_clip = proj_clip
self._num_unit_shards = num_unit_shards
self._num_proj_shards = num_proj_shards
self._forget_bias = forget_bias
self._state_is_tuple = state_is_tuple
self._activation = activation
if num_proj:
self._state_size = (
LSTMStateTuple(num_units, num_proj)
if state_is_tuple else num_units + num_proj)
self._output_size = num_proj
else:
self._state_size = (
LSTMStateTuple(num_units, num_units)
if state_is_tuple else 2 * num_units)
self._output_size = num_units
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
def __call__(self, inputs, state, scope=None):
"""Run one step of LSTM.
Args:
inputs: input Tensor, 2D, batch x num_units.
state: if `state_is_tuple` is False, this must be a state Tensor,
`2-D, batch x state_size`. If `state_is_tuple` is True, this must be a
tuple of state Tensors, both `2-D`, with column sizes `c_state` and
`m_state`.
scope: VariableScope for the created subgraph; defaults to "LSTMCell".
Returns:
A tuple containing:
- A `2-D, [batch x output_dim]`, Tensor representing the output of the
LSTM after reading `inputs` when previous state was `state`.
Here output_dim is:
num_proj if num_proj was set,
num_units otherwise.
- Tensor(s) representing the new state of LSTM after reading `inputs` when
the previous state was `state`. Same type and shape(s) as `state`.
Raises:
ValueError: If input size cannot be inferred from inputs via
static shape inference.
"""
num_proj = self._num_units if self._num_proj is None else self._num_proj
if self._state_is_tuple:
(c_prev, m_prev) = state
else:
c_prev = array_ops.slice(state, [0, 0], [-1, self._num_units])
m_prev = array_ops.slice(state, [0, self._num_units], [-1, num_proj])
dtype = inputs.dtype
input_size = inputs.get_shape().with_rank(2)[1]
if input_size.value is None:
raise ValueError("Could not infer input size from inputs.get_shape()[-1]")
with vs.variable_scope(scope or type(self).__name__,
initializer=self._initializer): # "LSTMCell"
concat_w = _get_concat_variable(
"W", [input_size.value + num_proj, 4 * self._num_units],
dtype, self._num_unit_shards)
b = vs.get_variable(
"B", shape=[4 * self._num_units],
initializer=array_ops.zeros_initializer, dtype=dtype)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
cell_inputs = array_ops.concat(1, [inputs, m_prev])
lstm_matrix = nn_ops.bias_add(math_ops.matmul(cell_inputs, concat_w), b)
i, j, f, o = array_ops.split(1, 4, lstm_matrix)
# Diagonal connections
if self._use_peepholes:
w_f_diag = vs.get_variable(
"W_F_diag", shape=[self._num_units], dtype=dtype)
w_i_diag = vs.get_variable(
"W_I_diag", shape=[self._num_units], dtype=dtype)
w_o_diag = vs.get_variable(
"W_O_diag", shape=[self._num_units], dtype=dtype)
if self._use_peepholes:
c = (sigmoid(f + self._forget_bias + w_f_diag * c_prev) * c_prev +
sigmoid(i + w_i_diag * c_prev) * self._activation(j))
else:
c = (sigmoid(f + self._forget_bias) * c_prev + sigmoid(i) *
self._activation(j))
if self._cell_clip is not None:
# pylint: disable=invalid-unary-operand-type
c = clip_ops.clip_by_value(c, -self._cell_clip, self._cell_clip)
# pylint: enable=invalid-unary-operand-type
if self._use_peepholes:
m = sigmoid(o + w_o_diag * c) * self._activation(c)
else:
m = sigmoid(o) * self._activation(c)
if self._num_proj is not None:
concat_w_proj = _get_concat_variable(
"W_P", [self._num_units, self._num_proj],
dtype, self._num_proj_shards)
m = math_ops.matmul(m, concat_w_proj)
if self._proj_clip is not None:
# pylint: disable=invalid-unary-operand-type
m = clip_ops.clip_by_value(m, -self._proj_clip, self._proj_clip)
# pylint: enable=invalid-unary-operand-type
new_state = (LSTMStateTuple(c, m) if self._state_is_tuple
else array_ops.concat(1, [c, m]))
return m, new_state
class OutputProjectionWrapper(RNNCell):
"""Operator adding an output projection to the given cell.
Note: in many cases it may be more efficient to not use this wrapper,
but instead concatenate the whole sequence of your outputs in time,
do the projection on this batch-concatenated sequence, then split it
if needed or directly feed into a softmax.
"""
def __init__(self, cell, output_size):
"""Create a cell with output projection.
Args:
cell: an RNNCell, a projection to output_size is added to it.
output_size: integer, the size of the output after projection.
Raises:
TypeError: if cell is not an RNNCell.
ValueError: if output_size is not positive.
"""
if not isinstance(cell, RNNCell):
raise TypeError("The parameter cell is not RNNCell.")
if output_size < 1:
raise ValueError("Parameter output_size must be > 0: %d." % output_size)
self._cell = cell
self._output_size = output_size
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._output_size
def __call__(self, inputs, state, scope=None):
"""Run the cell and output projection on inputs, starting from state."""
output, res_state = self._cell(inputs, state)
# Default scope: "OutputProjectionWrapper"
with vs.variable_scope(scope or type(self).__name__):
projected = _linear(output, self._output_size, True)
return projected, res_state
class InputProjectionWrapper(RNNCell):
"""Operator adding an input projection to the given cell.
Note: in many cases it may be more efficient to not use this wrapper,
but instead concatenate the whole sequence of your inputs in time,
do the projection on this batch-concatenated sequence, then split it.
"""
def __init__(self, cell, num_proj, input_size=None):
"""Create a cell with input projection.
Args:
cell: an RNNCell, a projection of inputs is added before it.
num_proj: Python integer. The dimension to project to.
input_size: Deprecated and unused.
Raises:
TypeError: if cell is not an RNNCell.
"""
if input_size is not None:
logging.warn("%s: The input_size parameter is deprecated.", self)
if not isinstance(cell, RNNCell):
raise TypeError("The parameter cell is not RNNCell.")
self._cell = cell
self._num_proj = num_proj
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def __call__(self, inputs, state, scope=None):
"""Run the input projection and then the cell."""
# Default scope: "InputProjectionWrapper"
with vs.variable_scope(scope or type(self).__name__):
projected = _linear(inputs, self._num_proj, True)
return self._cell(projected, state)
class DropoutWrapper(RNNCell):
"""Operator adding dropout to inputs and outputs of the given cell."""
def __init__(self, cell, input_keep_prob=1.0, output_keep_prob=1.0,
seed=None):
"""Create a cell with added input and/or output dropout.
Dropout is never used on the state.
Args:
cell: an RNNCell, a projection to output_size is added to it.
input_keep_prob: unit Tensor or float between 0 and 1, input keep
probability; if it is float and 1, no input dropout will be added.
output_keep_prob: unit Tensor or float between 0 and 1, output keep
probability; if it is float and 1, no output dropout will be added.
seed: (optional) integer, the randomness seed.
Raises:
TypeError: if cell is not an RNNCell.
ValueError: if keep_prob is not between 0 and 1.
"""
if not isinstance(cell, RNNCell):
raise TypeError("The parameter cell is not a RNNCell.")
if (isinstance(input_keep_prob, float) and
not (input_keep_prob >= 0.0 and input_keep_prob <= 1.0)):
raise ValueError("Parameter input_keep_prob must be between 0 and 1: %d"
% input_keep_prob)
if (isinstance(output_keep_prob, float) and
not (output_keep_prob >= 0.0 and output_keep_prob <= 1.0)):
raise ValueError("Parameter output_keep_prob must be between 0 and 1: %d"
% output_keep_prob)
self._cell = cell
self._input_keep_prob = input_keep_prob
self._output_keep_prob = output_keep_prob
self._seed = seed
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def __call__(self, inputs, state, scope=None):
"""Run the cell with the declared dropouts."""
if (not isinstance(self._input_keep_prob, float) or
self._input_keep_prob < 1):
inputs = nn_ops.dropout(inputs, self._input_keep_prob, seed=self._seed)
output, new_state = self._cell(inputs, state, scope)
if (not isinstance(self._output_keep_prob, float) or
self._output_keep_prob < 1):
output = nn_ops.dropout(output, self._output_keep_prob, seed=self._seed)
return output, new_state
class EmbeddingWrapper(RNNCell):
"""Operator adding input embedding to the given cell.
Note: in many cases it may be more efficient to not use this wrapper,
but instead concatenate the whole sequence of your inputs in time,
do the embedding on this batch-concatenated sequence, then split it and
feed into your RNN.
"""
def __init__(self, cell, embedding_classes, embedding_size, initializer=None):
"""Create a cell with an added input embedding.
Args:
cell: an RNNCell, an embedding will be put before its inputs.
embedding_classes: integer, how many symbols will be embedded.
embedding_size: integer, the size of the vectors we embed into.
initializer: an initializer to use when creating the embedding;
if None, the initializer from variable scope or a default one is used.
Raises:
TypeError: if cell is not an RNNCell.
ValueError: if embedding_classes is not positive.
"""
if not isinstance(cell, RNNCell):
raise TypeError("The parameter cell is not RNNCell.")
if embedding_classes <= 0 or embedding_size <= 0:
raise ValueError("Both embedding_classes and embedding_size must be > 0: "
"%d, %d." % (embedding_classes, embedding_size))
self._cell = cell
self._embedding_classes = embedding_classes
self._embedding_size = embedding_size
self._initializer = initializer
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def __call__(self, inputs, state, scope=None):
"""Run the cell on embedded inputs."""
with vs.variable_scope(scope or type(self).__name__): # "EmbeddingWrapper"
with ops.device("/cpu:0"):
if self._initializer:
initializer = self._initializer
elif vs.get_variable_scope().initializer:
initializer = vs.get_variable_scope().initializer
else:
# Default initializer for embeddings should have variance=1.
sqrt3 = math.sqrt(3) # Uniform(-sqrt(3), sqrt(3)) has variance=1.
initializer = init_ops.random_uniform_initializer(-sqrt3, sqrt3)
if type(state) is tuple:
data_type = state[0].dtype
else:
data_type = state.dtype
embedding = vs.get_variable(
"embedding", [self._embedding_classes, self._embedding_size],
initializer=initializer,
dtype=data_type)
embedded = embedding_ops.embedding_lookup(
embedding, array_ops.reshape(inputs, [-1]))
return self._cell(embedded, state)
class MultiRNNCell(RNNCell):
"""RNN cell composed sequentially of multiple simple cells."""
def __init__(self, cells, state_is_tuple=False):
"""Create a RNN cell composed sequentially of a number of RNNCells.
Args:
cells: list of RNNCells that will be composed in this order.
state_is_tuple: If True, accepted and returned states are n-tuples, where
`n = len(cells)`. By default (False), the states are all
concatenated along the column axis.
Raises:
ValueError: if cells is empty (not allowed), or at least one of the cells
returns a state tuple but the flag `state_is_tuple` is `False`.
"""
if not cells:
raise ValueError("Must specify at least one cell for MultiRNNCell.")
self._cells = cells
self._state_is_tuple = state_is_tuple
if not state_is_tuple:
if any(nest.is_sequence(c.state_size) for c in self._cells):
raise ValueError("Some cells return tuples of states, but the flag "
"state_is_tuple is not set. State sizes are: %s"
% str([c.state_size for c in self._cells]))
@property
def state_size(self):
if self._state_is_tuple:
return tuple(cell.state_size for cell in self._cells)
else:
return sum([cell.state_size for cell in self._cells])
@property
def output_size(self):
return self._cells[-1].output_size
def __call__(self, inputs, state, scope=None):
"""Run this multi-layer cell on inputs, starting from state."""
with vs.variable_scope(scope or type(self).__name__): # "MultiRNNCell"
cur_state_pos = 0
cur_inp = inputs
new_states = []
for i, cell in enumerate(self._cells):
with vs.variable_scope("Cell%d" % i):
if self._state_is_tuple:
if not nest.is_sequence(state):
raise ValueError(
"Expected state to be a tuple of length %d, but received: %s"
% (len(self.state_size), state))
cur_state = state[i]
else:
cur_state = array_ops.slice(
state, [0, cur_state_pos], [-1, cell.state_size])
cur_state_pos += cell.state_size
cur_inp, new_state = cell(cur_inp, cur_state)
new_states.append(new_state)
new_states = (tuple(new_states) if self._state_is_tuple
else array_ops.concat(1, new_states))
return cur_inp, new_states
class _SlimRNNCell(RNNCell):
"""A simple wrapper for slim.rnn_cells."""
def __init__(self, cell_fn):
"""Create a SlimRNNCell from a cell_fn.
Args:
cell_fn: a function which takes (inputs, state, scope) and produces the
outputs and the new_state. Additionally when called with inputs=None and
state=None it should return (initial_outputs, initial_state).
Raises:
TypeError: if cell_fn is not callable
ValueError: if cell_fn cannot produce a valid initial state.
"""
if not callable(cell_fn):
raise TypeError("cell_fn %s needs to be callable", cell_fn)
self._cell_fn = cell_fn
self._cell_name = cell_fn.func.__name__
init_output, init_state = self._cell_fn(None, None)
output_shape = init_output.get_shape()
state_shape = init_state.get_shape()
self._output_size = output_shape.with_rank(2)[1].value
self._state_size = state_shape.with_rank(2)[1].value
if self._output_size is None:
raise ValueError("Initial output created by %s has invalid shape %s" %
(self._cell_name, output_shape))
if self._state_size is None:
raise ValueError("Initial state created by %s has invalid shape %s" %
(self._cell_name, state_shape))
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
def __call__(self, inputs, state, scope=None):
scope = scope or self._cell_name
output, state = self._cell_fn(inputs, state, scope=scope)
return output, state
def _linear(args, output_size, bias, bias_start=0.0, scope=None):
"""Linear map: sum_i(args[i] * W[i]), where W[i] is a variable.
Args:
args: a 2D Tensor or a list of 2D, batch x n, Tensors.
output_size: int, second dimension of W[i].
bias: boolean, whether to add a bias term or not.
bias_start: starting value to initialize the bias; 0 by default.
scope: VariableScope for the created subgraph; defaults to "Linear".
Returns:
A 2D Tensor with shape [batch x output_size] equal to
sum_i(args[i] * W[i]), where W[i]s are newly created matrices.
Raises:
ValueError: if some of the arguments has unspecified or wrong shape.
"""
if args is None or (nest.is_sequence(args) and not args):
raise ValueError("`args` must be specified")
if not nest.is_sequence(args):
args = [args]
# Calculate the total size of arguments on dimension 1.
total_arg_size = 0
shapes = [a.get_shape().as_list() for a in args]
for shape in shapes:
if len(shape) != 2:
raise ValueError("Linear is expecting 2D arguments: %s" % str(shapes))
if not shape[1]:
raise ValueError("Linear expects shape[1] of arguments: %s" % str(shapes))
else:
total_arg_size += shape[1]
dtype = [a.dtype for a in args][0]
# Now the computation.
with vs.variable_scope(scope or "Linear"):
matrix = vs.get_variable(
"Matrix", [total_arg_size, output_size], dtype=dtype)
if len(args) == 1:
res = math_ops.matmul(args[0], matrix)
else:
res = math_ops.matmul(array_ops.concat(1, args), matrix)
if not bias:
return res
bias_term = vs.get_variable(
"Bias", [output_size],
dtype=dtype,
initializer=init_ops.constant_initializer(
bias_start, dtype=dtype))
return res + bias_term
|
the-stack_0_23632 | # /*
# MIT License
#
# Copyright (c) [2018] [Xiao Yang]
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Original Author: Xiao Yang, UNC Chapel Hill, United States
# Contact: [email protected]
# Contributing Authors:
# Tamlin Pavelsky, UNC Chapel Hill, United States
# George Allen, Texas A&M, United States
# Genna Dontchyts, Deltares, NL
#
# NOTE: THIS IS A PRERELEASE VERSION (Edited on: 2019/02/18)
# */
# /* functions to extract river mask */
# GitHub: https://github.com/seanyx/RivWidthCloudPaper
import ee
import math
import numpy as np
from geemap.common import ee_initialize
def hitOrMiss(image, se1, se2):
"""perform hitOrMiss transform (adapted from [citation])"""
e1 = image.reduceNeighborhood(ee.Reducer.min(), se1)
e2 = image.Not().reduceNeighborhood(ee.Reducer.min(), se2)
return e1.And(e2)
def splitKernel(kernel, value):
"""recalculate the kernel according to the given foreground value"""
kernel = np.array(kernel)
result = kernel
r = 0
while r < kernel.shape[0]:
c = 0
while c < kernel.shape[1]:
if kernel[r][c] == value:
result[r][c] = 1
else:
result[r][c] = 0
c = c + 1
r = r + 1
return result.tolist()
def Skeletonize(image, iterations, method):
"""perform skeletonization"""
se1w = [[2, 2, 2], [0, 1, 0], [1, 1, 1]]
if method == 2:
se1w = [[2, 2, 2], [0, 1, 0], [0, 1, 0]]
se11 = ee.Kernel.fixed(3, 3, splitKernel(se1w, 1))
se12 = ee.Kernel.fixed(3, 3, splitKernel(se1w, 2))
se2w = [[2, 2, 0], [2, 1, 1], [0, 1, 0]]
if method == 2:
se2w = [[2, 2, 0], [2, 1, 1], [0, 1, 1]]
se21 = ee.Kernel.fixed(3, 3, splitKernel(se2w, 1))
se22 = ee.Kernel.fixed(3, 3, splitKernel(se2w, 2))
result = image
i = 0
while i < iterations:
j = 0
while j < 4:
result = result.subtract(hitOrMiss(result, se11, se12))
se11 = se11.rotate(1)
se12 = se12.rotate(1)
result = result.subtract(hitOrMiss(result, se21, se22))
se21 = se21.rotate(1)
se22 = se22.rotate(1)
j = j + 1
i = i + 1
return result.rename(["clRaw"])
def CalcDistanceMap(img, neighborhoodSize, scale):
# // assign each river pixel with the distance (in meter) between itself and the closest non-river pixel
imgD2 = img.focal_max(1.5, "circle", "pixels", 2)
imgD1 = img.focal_max(1.5, "circle", "pixels", 1)
outline = imgD2.subtract(imgD1)
dpixel = outline.fastDistanceTransform(neighborhoodSize).sqrt()
dmeters = dpixel.multiply(scale) # // for a given scale
DM = dmeters.mask(dpixel.lte(neighborhoodSize).And(imgD2))
return DM
def CalcGradientMap(image, gradMethod, scale):
## Calculate the gradient
if gradMethod == 1: # GEE .gradient() method
grad = image.gradient()
dx = grad.select(["x"])
dy = grad.select(["y"])
g = dx.multiply(dx).add(dy.multiply(dy)).sqrt()
if gradMethod == 2: # Gena's method
k_dx = ee.Kernel.fixed(
3,
3,
[
[1.0 / 8, 0.0, -1.0 / 8],
[2.0 / 8, 0.0, -2.0 / 8],
[1.0 / 8, 0.0, -1.0 / 8],
],
)
k_dy = ee.Kernel.fixed(
3,
3,
[
[-1.0 / 8, -2.0 / 8, -1.0 / 8],
[0.0, 0.0, 0.0],
[1.0 / 8, 2.0 / 8, 1.0 / 8],
],
)
dx = image.convolve(k_dx)
dy = image.convolve(k_dy)
g = dx.multiply(dx).add(dy.multiply(dy)).divide(scale.multiply(scale)).sqrt()
if gradMethod == 3: # RivWidth method
k_dx = ee.Kernel.fixed(3, 1, [[-0.5, 0.0, 0.5]])
k_dy = ee.Kernel.fixed(1, 3, [[0.5], [0.0], [-0.5]])
dx = image.convolve(k_dx)
dy = image.convolve(k_dy)
g = dx.multiply(dx).add(dy.multiply(dy)).divide(scale.multiply(scale))
return g
def CalcOnePixelWidthCenterline(img, GM, hGrad):
# /***
# calculate the 1px centerline from:
# 1. fast distance transform of the river banks
# 2. gradient of the distance transform, mask areas where gradient greater than a threshold hGrad
# 3. apply skeletonization twice to get a 1px centerline
# thresholding gradient map inspired by Pavelsky and Smith., 2008
# ***/
imgD2 = img.focal_max(1.5, "circle", "pixels", 2)
cl = ee.Image(GM).mask(imgD2).lte(hGrad).And(img)
# // apply skeletonization twice
cl1px = Skeletonize(cl, 2, 1)
return cl1px
def ExtractEndpoints(CL1px):
"""calculate end points in the one pixel centerline"""
se1w = [[0, 0, 0], [2, 1, 2], [2, 2, 2]]
se11 = ee.Kernel.fixed(3, 3, splitKernel(se1w, 1))
se12 = ee.Kernel.fixed(3, 3, splitKernel(se1w, 2))
result = CL1px
# // the for loop removes the identified endpoints from the input image
i = 0
while i < 4: # rotate kernels
result = result.subtract(hitOrMiss(CL1px, se11, se12))
se11 = se11.rotate(1)
se12 = se12.rotate(1)
i = i + 1
endpoints = CL1px.subtract(result)
return endpoints
def ExtractCorners(CL1px):
"""calculate corners in the one pixel centerline"""
se1w = [[2, 2, 0], [2, 1, 1], [0, 1, 0]]
se11 = ee.Kernel.fixed(3, 3, splitKernel(se1w, 1))
se12 = ee.Kernel.fixed(3, 3, splitKernel(se1w, 2))
result = CL1px
# // the for loop removes the identified corners from the input image
i = 0
while i < 4: # rotate kernels
result = result.subtract(hitOrMiss(result, se11, se12))
se11 = se11.rotate(1)
se12 = se12.rotate(1)
i = i + 1
cornerPoints = CL1px.subtract(result)
return cornerPoints
def CleanCenterline(cl1px, maxBranchLengthToRemove, rmCorners):
"""clean the 1px centerline:
1. remove branches
2. remove corners to insure 1px width (optional)
"""
## find the number of connecting pixels (8-connectivity)
nearbyPoints = cl1px.mask(cl1px).reduceNeighborhood(
reducer=ee.Reducer.count(), kernel=ee.Kernel.circle(1.5), skipMasked=True
)
## define ends
endsByNeighbors = nearbyPoints.lte(2)
## define joint points
joints = nearbyPoints.gte(4)
costMap = (
cl1px.mask(cl1px)
.updateMask(joints.Not())
.cumulativeCost(
source=endsByNeighbors.mask(endsByNeighbors),
maxDistance=maxBranchLengthToRemove,
geodeticDistance=True,
)
)
branchMask = costMap.gte(0).unmask(0)
cl1Cleaned = cl1px.updateMask(branchMask.Not()) # mask short branches;
ends = ExtractEndpoints(cl1Cleaned)
cl1Cleaned = cl1Cleaned.updateMask(ends.Not())
if rmCorners:
corners = ExtractCorners(cl1Cleaned)
cl1Cleaned = cl1Cleaned.updateMask(corners.Not())
return cl1Cleaned
def CalculateAngle(clCleaned):
"""calculate the orthogonal direction of each pixel of the centerline"""
w3 = ee.Kernel.fixed(
9,
9,
[
[135.0, 126.9, 116.6, 104.0, 90.0, 76.0, 63.4, 53.1, 45.0],
[143.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 36.9],
[153.4, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 26.6],
[166.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 14.0],
[180.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1e-5],
[194.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 346.0],
[206.6, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 333.4],
[216.9, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 323.1],
[225.0, 233.1, 243.4, 256.0, 270.0, 284.0, 296.6, 306.9, 315.0],
],
)
combinedReducer = ee.Reducer.sum().combine(ee.Reducer.count(), None, True)
clAngle = (
clCleaned.mask(clCleaned)
.rename(["clCleaned"])
.reduceNeighborhood(
reducer=combinedReducer, kernel=w3, inputWeight="kernel", skipMasked=True
)
)
## mask calculating when there are more than two inputs into the angle calculation
clAngleNorm = (
clAngle.select("clCleaned_sum")
.divide(clAngle.select("clCleaned_count"))
.mask(clAngle.select("clCleaned_count").gt(2).Not())
)
## if only one input into the angle calculation, rotate it by 90 degrees to get the orthogonal
clAngleNorm = clAngleNorm.where(
clAngle.select("clCleaned_count").eq(1), clAngleNorm.add(ee.Image(90))
)
return clAngleNorm.rename(["orthDegree"])
def GetWidth(clAngleNorm, segmentInfo, endInfo, DM, crs, bound, scale, sceneID, note):
"""calculate the width of the river at each centerline pixel, measured according to the orthgonal direction of the river"""
def GetXsectionEnds(f):
xc = ee.Number(f.get("x"))
yc = ee.Number(f.get("y"))
orthRad = ee.Number(f.get("angle")).divide(180).multiply(math.pi)
width = ee.Number(f.get("toBankDistance")).multiply(1.5)
cosRad = width.multiply(orthRad.cos())
sinRad = width.multiply(orthRad.sin())
p1 = ee.Geometry.Point([xc.add(cosRad), yc.add(sinRad)], crs)
p2 = ee.Geometry.Point([xc.subtract(cosRad), yc.subtract(sinRad)], crs)
xlEnds = ee.Feature(
ee.Geometry.MultiPoint([p1, p2]).buffer(30),
{
"xc": xc,
"yc": yc,
"longitude": f.get("lon"),
"latitude": f.get("lat"),
"orthogonalDirection": orthRad,
"MLength": width.multiply(2),
"p1": p1,
"p2": p2,
"crs": crs,
"image_id": sceneID,
"note": note,
},
)
return xlEnds
def SwitchGeometry(f):
return (
f.setGeometry(
ee.Geometry.LineString(
coords=[f.get("p1"), f.get("p2")], proj=crs, geodesic=False
)
)
.set("p1", None)
.set("p2", None)
) # remove p1 and p2
## convert centerline image to a list. prepare for map function
clPoints = (
clAngleNorm.rename(["angle"])
.addBands(ee.Image.pixelCoordinates(crs))
.addBands(ee.Image.pixelLonLat().rename(["lon", "lat"]))
.addBands(DM.rename(["toBankDistance"]))
.sample(region=bound, scale=scale, projection=None, factor=1, dropNulls=True)
)
## calculate the cross-section lines, returning a featureCollection
xsectionsEnds = clPoints.map(GetXsectionEnds)
## calculate the flags at the xsection line end points
endStat = endInfo.reduceRegions(
collection=xsectionsEnds,
reducer=ee.Reducer.anyNonZero().combine(
ee.Reducer.count(), None, True
), # test endpoints type
scale=scale,
crs=crs,
)
## calculate the width of the river and other flags along the xsection lines
xsections1 = endStat.map(SwitchGeometry)
combinedReducer = ee.Reducer.mean()
xsections = segmentInfo.reduceRegions(
collection=xsections1, reducer=combinedReducer, scale=scale, crs=crs
)
return xsections
def CalculateCenterline(imgIn):
crs = imgIn.get("crs")
scale = ee.Number(imgIn.get("scale"))
riverMask = imgIn.select(["riverMask"])
distM = CalcDistanceMap(riverMask, 256, scale)
gradM = CalcGradientMap(distM, 2, scale)
cl1 = CalcOnePixelWidthCenterline(riverMask, gradM, 0.9)
cl1Cleaned1 = CleanCenterline(cl1, 300, True)
cl1px = CleanCenterline(cl1Cleaned1, 300, False)
imgOut = (
imgIn.addBands(cl1px.rename(["cleanedCL"]))
.addBands(cl1.rename(["rawCL"]))
.addBands(gradM.rename(["gradientMap"]))
.addBands(distM.rename(["distanceMap"]))
)
return imgOut
def CalculateOrthAngle(imgIn):
cl1px = imgIn.select(["cleanedCL"])
angle = CalculateAngle(cl1px)
imgOut = imgIn.addBands(angle)
return imgOut
def prepExport(f):
f = f.set(
{
"width": ee.Number(f.get("MLength")).multiply(f.get("channelMask")),
"endsInWater": ee.Number(f.get("any")).eq(1),
"endsOverEdge": ee.Number(f.get("count")).lt(2),
}
)
fOut = ee.Feature(
ee.Geometry.Point([f.get("longitude"), f.get("latitude")]), {}
).copyProperties(f, None, ["any", "count", "MLength", "xc", "yc", "channelMask"])
return fOut
def CalculateWidth(imgIn):
crs = imgIn.get("crs")
scale = imgIn.get("scale")
imgId = imgIn.get("image_id")
bound = imgIn.select(["riverMask"]).geometry()
angle = imgIn.select(["orthDegree"])
dem = ee.Image("users/eeProject/MERIT")
infoEnds = imgIn.select(["riverMask"])
infoExport = (
imgIn.select("channelMask")
.addBands(imgIn.select("^flag.*"))
.addBands(dem.rename(["flag_elevation"]))
)
dm = imgIn.select(["distanceMap"])
widths = GetWidth(
angle, infoExport, infoEnds, dm, crs, bound, scale, imgId, ""
).map(prepExport)
return widths
def merge_collections_std_bandnames_collection1tier1_sr():
"""merge landsat 5, 7, 8 collection 1 tier 1 SR imageCollections and standardize band names"""
## standardize band names
bn8 = ["B1", "B2", "B3", "B4", "B6", "pixel_qa", "B5", "B7"]
bn7 = ["B1", "B1", "B2", "B3", "B5", "pixel_qa", "B4", "B7"]
bn5 = ["B1", "B1", "B2", "B3", "B5", "pixel_qa", "B4", "B7"]
bns = ["uBlue", "Blue", "Green", "Red", "Swir1", "BQA", "Nir", "Swir2"]
# create a merged collection from landsat 5, 7, and 8
ls5 = ee.ImageCollection("LANDSAT/LT05/C01/T1_SR").select(bn5, bns)
ls7 = (
ee.ImageCollection("LANDSAT/LE07/C01/T1_SR")
.filterDate("1999-04-15", "2003-05-30")
.select(bn7, bns)
)
ls8 = ee.ImageCollection("LANDSAT/LC08/C01/T1_SR").select(bn8, bns)
merged = ls5.merge(ls7).merge(ls8)
return merged
def id2Img(id):
return ee.Image(
merge_collections_std_bandnames_collection1tier1_sr()
.filterMetadata("LANDSAT_ID", "equals", id)
.first()
)
def Unpack(bitBand, startingBit, bitWidth):
# unpacking bit bands
# see: https://groups.google.com/forum/#!starred/google-earth-engine-developers/iSV4LwzIW7A
return (
ee.Image(bitBand)
.rightShift(startingBit)
.bitwiseAnd(ee.Number(2).pow(ee.Number(bitWidth)).subtract(ee.Number(1)).int())
)
def UnpackAllSR(bitBand):
# apply Unpack function for multiple pixel qualities
bitInfoSR = {
"Cloud": [5, 1],
"CloudShadow": [3, 1],
"SnowIce": [4, 1],
"Water": [2, 1],
}
unpackedImage = ee.Image.cat(
[
Unpack(bitBand, bitInfoSR[key][0], bitInfoSR[key][1]).rename([key])
for key in bitInfoSR
]
)
return unpackedImage
def AddFmaskSR(image):
# // add fmask as a separate band to the input image
temp = UnpackAllSR(image.select(["BQA"]))
fmask = (
temp.select(["Water"])
.rename(["fmask"])
.where(temp.select(["SnowIce"]), ee.Image(3))
.where(temp.select(["CloudShadow"]), ee.Image(2))
.where(temp.select(["Cloud"]), ee.Image(4))
.mask(temp.select(["Cloud"]).gte(0))
)
return image.addBands(fmask)
def CalcHillShadowSR(image):
dem = ee.Image("users/eeProject/MERIT").clip(image.geometry().buffer(9000).bounds())
SOLAR_AZIMUTH_ANGLE = ee.Number(image.get("SOLAR_AZIMUTH_ANGLE"))
SOLAR_ZENITH_ANGLE = ee.Number(image.get("SOLAR_ZENITH_ANGLE"))
return (
ee.Terrain.hillShadow(dem, SOLAR_AZIMUTH_ANGLE, SOLAR_ZENITH_ANGLE, 100, True)
.reproject("EPSG:4326", None, 90)
.rename(["hillshadow"])
)
# /* functions to classify water (default) */
def ClassifyWater(imgIn, method="Jones2019"):
if method == "Jones2019":
return ClassifyWaterJones2019(imgIn)
elif method == "Zou2018":
return ClassifyWaterZou2018(imgIn)
# /* water function */
def CalculateWaterAddFlagsSR(imgIn, waterMethod="Jones2019"):
# waterMethod = typeof waterMethod !== 'undefined' ? waterMethod : 'Jones2019';
fmask = AddFmaskSR(imgIn).select(["fmask"])
fmaskUnpacked = (
fmask.eq(4)
.rename("flag_cloud")
.addBands(fmask.eq(2).rename("flag_cldShadow"))
.addBands(fmask.eq(3).rename("flag_snowIce"))
.addBands(fmask.eq(1).rename("flag_water"))
)
water = ClassifyWater(imgIn, waterMethod).where(fmask.gte(2), ee.Image.constant(0))
hillshadow = CalcHillShadowSR(imgIn).Not().rename(["flag_hillshadow"])
imgOut = ee.Image(
water.addBands(fmask)
.addBands(hillshadow)
.addBands(fmaskUnpacked)
.setMulti(
{
"image_id": imgIn.get("LANDSAT_ID"),
"timestamp": imgIn.get("system:time_start"),
"scale": imgIn.projection().nominalScale(),
"crs": imgIn.projection().crs(),
}
)
)
return imgOut
def GetCenterline(clDataset, bound):
# // filter the GRWL centerline based on area of interest
cl = clDataset.filterBounds(bound)
return cl
def ExtractChannel(image, centerline, maxDistance):
# // extract the channel water bodies from the water mask, based on connectivity to the reference centerline.
connectedToCl = (
image.Not()
.cumulativeCost(
source=ee.Image()
.toByte()
.paint(centerline, 1)
.And(image), # // only use the centerline that overlaps with the water mask
maxDistance=maxDistance,
geodeticDistance=False,
)
.eq(0)
)
channel = (
image.updateMask(connectedToCl)
.unmask(0)
.updateMask(image.gte(0))
.rename(["channelMask"])
)
return channel
def RemoveIsland(channel, FILL_SIZE):
# /* fill in island as water if the size (number of pixels) of the island is smaller than FILL_SIZE */
fill = channel.Not().selfMask().connectedPixelCount(FILL_SIZE).lt(FILL_SIZE)
river = channel.where(fill, ee.Image(1)).rename(["riverMask"])
return river
def ExtractRiver(imgIn, clData, maxDist, minIslandRemoval):
waterMask = imgIn.select(["waterMask"])
bound = waterMask.geometry()
cl = GetCenterline(clData, bound)
channelMask = ExtractChannel(waterMask, cl, maxDist)
riverMask = RemoveIsland(channelMask, minIslandRemoval)
return imgIn.addBands(channelMask).addBands(riverMask)
def Mndwi(image):
return image.normalizedDifference(["Green", "Swir1"]).rename("mndwi")
def Mbsrv(image):
return image.select(["Green"]).add(image.select(["Red"])).rename("mbsrv")
def Mbsrn(image):
return image.select(["Nir"]).add(image.select(["Swir1"])).rename("mbsrn")
def Ndvi(image):
return image.normalizedDifference(["Nir", "Red"]).rename("ndvi")
def Awesh(image):
return image.expression(
"Blue + 2.5 * Green + (-1.5) * mbsrn + (-0.25) * Swir2",
{
"Blue": image.select(["Blue"]),
"Green": image.select(["Green"]),
"mbsrn": Mbsrn(image).select(["mbsrn"]),
"Swir2": image.select(["Swir2"]),
},
)
def Evi(image):
# calculate the enhanced vegetation index
evi = image.expression(
"2.5 * (Nir - Red) / (1 + Nir + 6 * Red - 7.5 * Blue)",
{
"Nir": image.select(["Nir"]),
"Red": image.select(["Red"]),
"Blue": image.select(["Blue"]),
},
)
return evi.rename(["evi"])
def Dswe(i):
mndwi = Mndwi(i)
mbsrv = Mbsrv(i)
mbsrn = Mbsrn(i)
awesh = Awesh(i)
swir1 = i.select(["Swir1"])
nir = i.select(["Nir"])
ndvi = Ndvi(i)
blue = i.select(["Blue"])
swir2 = i.select(["Swir2"])
t1 = mndwi.gt(0.124)
t2 = mbsrv.gt(mbsrn)
t3 = awesh.gt(0)
t4 = mndwi.gt(-0.44).And(swir1.lt(900)).And(nir.lt(1500)).And(ndvi.lt(0.7))
t5 = (
mndwi.gt(-0.5)
.And(blue.lt(1000))
.And(swir1.lt(3000))
.And(swir2.lt(1000))
.And(nir.lt(2500))
)
t = (
t1.add(t2.multiply(10))
.add(t3.multiply(100))
.add(t4.multiply(1000))
.add(t5.multiply(10000))
)
noWater = t.eq(0).Or(t.eq(1)).Or(t.eq(10)).Or(t.eq(100)).Or(t.eq(1000))
hWater = (
t.eq(1111)
.Or(t.eq(10111))
.Or(t.eq(11011))
.Or(t.eq(11101))
.Or(t.eq(11110))
.Or(t.eq(11111))
)
mWater = (
t.eq(111)
.Or(t.eq(1011))
.Or(t.eq(1101))
.Or(t.eq(1110))
.Or(t.eq(10011))
.Or(t.eq(10101))
.Or(t.eq(10110))
.Or(t.eq(11001))
.Or(t.eq(11010))
.Or(t.eq(11100))
)
pWetland = t.eq(11000)
lWater = (
t.eq(11)
.Or(t.eq(101))
.Or(t.eq(110))
.Or(t.eq(1001))
.Or(t.eq(1010))
.Or(t.eq(1100))
.Or(t.eq(10000))
.Or(t.eq(10001))
.Or(t.eq(10010))
.Or(t.eq(10100))
)
iDswe = (
noWater.multiply(0)
.add(hWater.multiply(1))
.add(mWater.multiply(2))
.add(pWetland.multiply(3))
.add(lWater.multiply(4))
)
return iDswe.rename(["dswe"])
def ClassifyWaterJones2019(img):
dswe = Dswe(img)
waterMask = dswe.eq(1).Or(dswe.eq(2)).rename(["waterMask"])
return waterMask
def ClassifyWaterZou2018(image):
mndwi = Mndwi(image)
ndvi = Ndvi(image)
evi = Evi(image)
water = (mndwi.gt(ndvi).Or(mndwi.gt(evi))).And(evi.lt(0.1))
return water.rename(["waterMask"])
def rwGenSR(
aoi=None,
WATER_METHOD="Jones2019",
MAXDISTANCE=4000,
FILL_SIZE=333,
MAXDISTANCE_BRANCH_REMOVAL=500,
):
grwl = ee.FeatureCollection("users/eeProject/grwl")
# // generate function based on user choice
def tempFUN(image, aoi=aoi):
aoi = ee.Algorithms.If(aoi, aoi, image.geometry())
image = image.clip(aoi)
# // derive water mask and masks for flags
imgOut = CalculateWaterAddFlagsSR(image, WATER_METHOD)
# // calculate river mask
imgOut = ExtractRiver(imgOut, grwl, MAXDISTANCE, FILL_SIZE)
# // calculate centerline
imgOut = CalculateCenterline(imgOut)
# // calculate orthogonal direction of the centerline
imgOut = CalculateOrthAngle(imgOut)
# // export widths
width_fc = CalculateWidth(imgOut)
return width_fc
return tempFUN
def merge_collections_std_bandnames_collection1tier1_sr():
"""merge landsat 5, 7, 8 collection 1 tier 1 SR imageCollections and standardize band names"""
## standardize band names
bn8 = ["B1", "B2", "B3", "B4", "B6", "pixel_qa", "B5", "B7"]
bn7 = ["B1", "B1", "B2", "B3", "B5", "pixel_qa", "B4", "B7"]
bn5 = ["B1", "B1", "B2", "B3", "B5", "pixel_qa", "B4", "B7"]
bns = ["uBlue", "Blue", "Green", "Red", "Swir1", "BQA", "Nir", "Swir2"]
# create a merged collection from landsat 5, 7, and 8
ls5 = ee.ImageCollection("LANDSAT/LT05/C01/T1_SR").select(bn5, bns)
ls7 = (
ee.ImageCollection("LANDSAT/LE07/C01/T1_SR")
.filterDate("1999-04-15", "2003-05-30")
.select(bn7, bns)
)
ls8 = ee.ImageCollection("LANDSAT/LC08/C01/T1_SR").select(bn8, bns)
merged = ls5.merge(ls7).merge(ls8)
return merged
def id2Img(id):
return ee.Image(
merge_collections_std_bandnames_collection1tier1_sr()
.filterMetadata("LANDSAT_ID", "equals", id)
.first()
)
def Unpack(bitBand, startingBit, bitWidth):
# unpacking bit bands
# see: https://groups.google.com/forum/#!starred/google-earth-engine-developers/iSV4LwzIW7A
return (
ee.Image(bitBand)
.rightShift(startingBit)
.bitwiseAnd(ee.Number(2).pow(ee.Number(bitWidth)).subtract(ee.Number(1)).int())
)
def UnpackAllSR(bitBand):
# apply Unpack function for multiple pixel qualities
bitInfoSR = {
"Cloud": [5, 1],
"CloudShadow": [3, 1],
"SnowIce": [4, 1],
"Water": [2, 1],
}
unpackedImage = ee.Image.cat(
[
Unpack(bitBand, bitInfoSR[key][0], bitInfoSR[key][1]).rename([key])
for key in bitInfoSR
]
)
return unpackedImage
def AddFmaskSR(image):
# // add fmask as a separate band to the input image
temp = UnpackAllSR(image.select(["BQA"]))
fmask = (
temp.select(["Water"])
.rename(["fmask"])
.where(temp.select(["SnowIce"]), ee.Image(3))
.where(temp.select(["CloudShadow"]), ee.Image(2))
.where(temp.select(["Cloud"]), ee.Image(4))
.mask(temp.select(["Cloud"]).gte(0))
)
return image.addBands(fmask)
def CalcHillShadowSR(image):
dem = ee.Image("users/eeProject/MERIT").clip(image.geometry().buffer(9000).bounds())
SOLAR_AZIMUTH_ANGLE = ee.Number(image.get("SOLAR_AZIMUTH_ANGLE"))
SOLAR_ZENITH_ANGLE = ee.Number(image.get("SOLAR_ZENITH_ANGLE"))
return (
ee.Terrain.hillShadow(dem, SOLAR_AZIMUTH_ANGLE, SOLAR_ZENITH_ANGLE, 100, True)
.reproject("EPSG:4326", None, 90)
.rename(["hillshadow"])
)
# /* functions to classify water (default) */
def ClassifyWater(imgIn, method="Jones2019"):
if method == "Jones2019":
return ClassifyWaterJones2019(imgIn)
elif method == "Zou2018":
return ClassifyWaterZou2018(imgIn)
# /* water function */
def CalculateWaterAddFlagsSR(imgIn, waterMethod="Jones2019"):
# waterMethod = typeof waterMethod !== 'undefined' ? waterMethod : 'Jones2019';
fmask = AddFmaskSR(imgIn).select(["fmask"])
fmaskUnpacked = (
fmask.eq(4)
.rename("flag_cloud")
.addBands(fmask.eq(2).rename("flag_cldShadow"))
.addBands(fmask.eq(3).rename("flag_snowIce"))
.addBands(fmask.eq(1).rename("flag_water"))
)
water = ClassifyWater(imgIn, waterMethod).where(fmask.gte(2), ee.Image.constant(0))
hillshadow = CalcHillShadowSR(imgIn).Not().rename(["flag_hillshadow"])
imgOut = ee.Image(
water.addBands(fmask)
.addBands(hillshadow)
.addBands(fmaskUnpacked)
.setMulti(
{
"image_id": imgIn.get("LANDSAT_ID"),
"timestamp": imgIn.get("system:time_start"),
"scale": imgIn.projection().nominalScale(),
"crs": imgIn.projection().crs(),
}
)
)
return imgOut
def maximum_no_of_tasks(MaxNActive, waitingPeriod):
"""maintain a maximum number of active tasks"""
import time
import ee
ee.Initialize()
time.sleep(10)
## initialize submitting jobs
ts = list(ee.batch.Task.list())
NActive = 0
for task in ts:
if "RUNNING" in str(task) or "READY" in str(task):
NActive += 1
## wait if the number of current active tasks reach the maximum number
## defined in MaxNActive
while NActive >= MaxNActive:
time.sleep(
waitingPeriod
) # if reach or over maximum no. of active tasks, wait for 2min and check again
ts = list(ee.batch.Task.list())
NActive = 0
for task in ts:
if "RUNNING" in str(task) or "READY" in str(task):
NActive += 1
return ()
def rwc(
img_id,
description=None,
folder="",
file_format="shp",
aoi=None,
water_method="Jones2019",
max_dist=4000,
fill_size=333,
max_dist_branch_removal=500,
return_fc=False,
):
"""Calculate river centerlines and widths for one Landsat SR image.
Args:
img_id (str): LANDSAT_ID for any Landsat 5, 7, and 8 SR scene. For example, LC08_L1TP_022034_20130422_20170310_01_T1.
description (str, optional): File name of the output file. Defaults to None.
folder (str, optional): Folder name within Google Drive to save the exported file. Defaults to "", which is the root directory.
file_format (str, optional): The supported file format include shp, csv, json, kml, kmz, and TFRecord. Defaults to 'shp'. Defaults to "shp".
aoi (ee.Geometry.Polygon, optional): A polygon (or rectangle) geometry define the area of interest. Only widths and centerline from this area will be calculated. Defaults to None.
water_method (str, optional): Water classification method ('Jones2019' or 'Zou2018'). Defaults to "Jones2019".
max_dist (int, optional): Maximum distance (unit: meters) to check water pixel's connectivity to GRWL centerline. Defaults to 4000.
fill_size (int, optional): islands or bars smaller than this value (unit: pixels) will be removed before calculating centerline. Defaults to 333.
max_dist_branch_removal (int, optional): length of pruning. Spurious branch of the initial centerline will be removed by this length (unit: pixels). Defaults to 500.
return_fc(bool, optional): whether to return the result as an ee.FeatureColleciton. Defaults to False.
"""
if description is None:
description = img_id
img = id2Img(img_id)
gen = rwGenSR(
aoi=aoi,
WATER_METHOD=water_method,
MAXDISTANCE=max_dist,
FILL_SIZE=fill_size,
MAXDISTANCE_BRANCH_REMOVAL=max_dist_branch_removal,
)
width_fc = gen(img)
if return_fc:
return width_fc
else:
taskWidth = ee.batch.Export.table.toDrive(
collection=width_fc,
description=description,
folder=folder,
fileFormat=file_format,
)
taskWidth.start()
print(description, "will be exported to", folder, "as", file_format, "file")
def rwc_batch(
in_csv,
folder="",
file_format="shp",
aoi=None,
water_method="Jones2019",
max_dist=4000,
fill_size=333,
max_dist_branch_remove=500,
):
"""Calculate river centerlines and widths for multiple Landsat SR images.
Args:
in_csv (str): An input csv file containing a list of Landsat IDs (e.g., LC08_L1TP_022034_20130422_20170310_01_T1)
folder (str, optional): Folder name within Google Drive to save the exported file. Defaults to "", which is the root directory.
file_format (str, optional): The supported file format include shp, csv, json, kml, kmz, and TFRecord. Defaults to 'shp'. Defaults to "shp".
aoi (ee.Geometry.Polygon, optional): A polygon (or rectangle) geometry define the area of interest. Only widths and centerline from this area will be calculated. Defaults to None.
water_method (str, optional): Water classification method ('Jones2019' or 'Zou2018'). Defaults to "Jones2019".
max_dist (int, optional): Maximum distance (unit: meters) to check water pixel's connectivity to GRWL centerline. Defaults to 4000.
fill_size (int, optional): islands or bars smaller than this value (unit: pixels) will be removed before calculating centerline. Defaults to 333.
max_dist_branch_removal (int, optional): length of pruning. Spurious branch of the initial centerline will be removed by this length (unit: pixels). Defaults to 500.
"""
import pandas as pd
imageInfo = pd.read_csv(
in_csv, dtype={"Point_ID": np.unicode_, "LANDSAT_ID": np.unicode_}
)
sceneIDList = imageInfo["LANDSAT_ID"].values.tolist()
# point_IDList = imageInfo["Point_ID"].values.tolist()
# x = imageInfo["Longitude"].values.tolist()
# y = imageInfo["Latitude"].values.tolist()
for scene in sceneIDList:
rwc(
scene,
scene,
folder,
file_format,
aoi,
water_method,
max_dist,
fill_size,
max_dist_branch_remove,
)
|
the-stack_0_23634 | from auvlib.data_tools import std_data, xtf_data
#Script that reads multibeam and xtf_data from .cereal files and extracts relevant data
#in a region of the users choice. Saves mesh_map of selected region as well as multibeam
#and xtf_data originating from that region.
#------------------------------------------------------------------------------
#SPECIFY PATHS, FILENAMES, REGION BOUNDS AND MESH_MAP RESOLUTION
#Path to .cereal files with sonar data
xtf_file = "../datasets/20190618_6/processed data/xtf_subset1.cereal" #.cereal file with xtf data
mbes_file = "../datasets/20190618_6/processed data/corrected_mbes.cereal" #.cereal file with multibeam data
#Filenames - output
outputfile_xtf = "xtf_lawnmover_test" + ".cereal" #Filename output xtf file
outputfile_mbes = "mbes_lawnmower_test" + ".cereal" #Filename output multibeam file
#Paths to saving location. If path is "" the file will be saved in the scripts location.
save_path_xtf = "../datasets/20190618_6/processed data/" #xtf
save_path_mbes = "../datasets/20190618_6/processed data/" #multibeam
#Specify region bounds.
#To find reasonable region bounds, use generate_mesh_map.py for visulization.
low_x = 650412
high_x = 652245
low_y = 0
high_y = 6471750
#------------------------------------------------------------------------------
#SAVE XTF DATA FROM SELECTED REGION
#Read xtf data
xtf_data = xtf_data.xtf_sss_ping.read_data(xtf_file)
#Pick out data points from selected region
xtf_region_data = []
for ping in xtf_data:
pos = ping.pos_
if pos[0] > low_x and pos[0] < high_x and pos[1] > low_y and pos[1] < high_y:
xtf_region_data.append(ping)
#Save data points from selected region
xtf_data.write_data(xtf_region_data, save_path_xtf + outputfile_xtf)
#------------------------------------------------------------------------------
#SAVE MULTIBEAM DATA FROM SELECTED REGION
#Read multibeam data
mbes_data = std_data.mbes_ping.read_data(mbes_file)
#Pick out data points from selected region
mbes_region_data = []
for ping in mbes_data:
pos = ping.pos_
if pos[0] > low_x and pos[0] < high_x and pos[1] > low_y and pos[1] < high_y:
mbes_region_data.append(ping)
#Save data points from selected region
std_data.write_data(mbes_region_data, save_path_mbes + outputfile_mbes)
|
the-stack_0_23635 | # This script adds in the county level socioeconomic and altitude data
# Importing required modules
import pandas as pd
# Specifying the path to the data -- update this accordingly!
username = ''
filepath = 'C:/Users/' + username + '/Documents/Data/ultracompetitive/'
# Reading in the data
compdata = pd.read_csv(filepath + 'output.csv')
edu = pd.read_csv(filepath + 'Education.csv', engine = 'python')
labor = pd.read_csv(filepath + 'Unemployment.csv', engine = 'python')
inc = pd.read_csv(filepath + 'Income.csv', engine = 'python')
ccmap = pd.read_csv(filepath + 'ccmap.csv', sep = '|')
cases = pd.read_csv(filepath + 'time_series_covid19_confirmed_US.csv')
# Standardizing ccmap for DC
ccmap = ccmap.replace(to_replace = 'Washington, D.C.', value = 'District of Columbia')
ccmap.City = ccmap.City.str.lower()
# Making county names in cases.csv all lower case
lows = [str(c).lower() for c in cases.Admin2]
lows = pd.Series(lows, name = 'County')
cases = pd.concat([cases, lows], axis = 1)
# Defining a helper function for assigning FIPS via ccmap and cases
def flipsadelphia(inp):
city = inp.City
state = inp.State.upper().strip('"')
sx = list(ccmap['State short']).index(state)
st = ccmap['State full'][sx]
try:
cc = ccmap[ccmap['City'] == city]
cc = cc[cc['State short'] == state]
county = cc.iloc[0]['County'].lower()
except:
county = 'NOPE'
if county != 'NOPE':
if (county[0:5] == 'saint') and (county != 'saint marys'):
back = county[5:]
county = 'st.' + back
elif county == 'virginia beach city':
county = 'virginia beach'
elif county == 'alexandria city':
county = 'alexandria'
elif county == 'norfolk city':
county = 'norfolk'
elif county == 'fredericksburg city':
county = 'fredericksburg'
elif county == 'chesapeake city':
county = 'chesapeake'
elif county == 'lexington city':
county = 'lexington'
elif county == 'falls church city':
county = 'falls church'
elif county == 'staunton city':
county = 'staunton'
elif county == 'la porte':
county = 'laporte'
elif county == 'suffolk city':
county = 'suffolk'
elif county == 'newport news city':
county = 'newport news'
elif county == 'hampton city':
county = 'hampton'
elif county == 'manassas city':
county = 'manassas'
elif county == 'harrisonburg city':
county = 'harrisonburg'
elif county == 'prince georges':
county = "prince george's"
elif county == 'la salle':
county = 'lasalle'
elif county == 'saint marys':
county = "st. mary's"
elif county == 'lynchburg city':
county = 'lynchburg'
elif county == 'portsmouth city':
county = 'portsmouth'
elif county == 'poquoson city':
county = 'poquoson'
elif county == 'queen annes':
county = "queen anne's"
elif county == 'matanuska susitna':
county = 'matanuska-susitna'
elif county == 'st joseph':
county = 'st. joseph'
elif county == 'de kalb':
county = 'dekalb'
elif county == 'waynesboro city':
county = 'waynesboro'
elif county == 'winchester city':
county = 'winchester'
elif county == 'martinsville city':
county = 'martinsville'
elif county == 'danville city':
county = 'danville'
elif county == 'bristol city':
county = 'bristol'
elif county == 'de witt':
county = 'dewitt'
elif county == 'galax city':
county = 'galax'
elif county == 'colonial heights city':
county = 'colonial heights'
try:
tmp = cases[cases['County'] == county]
tmp = tmp[tmp['Province_State'] == st]
flips = int(tmp.iloc[0]['FIPS'])
except:
flips = None
else:
flips = None
return flips
# Using the function to get FIPS codes
fips = []
for i in range(len(compdata)):
print(str(i+1) + ' of ' + str(len(compdata)) + '.......') # Visualize progress
fips.append(flipsadelphia(compdata.iloc[i]))
# Adding FIPS codes to the main dataframe
fips = pd.Series(fips, name = 'FIPS')
compdata = pd.concat([compdata, fips], axis = 1)
# Use FIPS codes to get county level census/ERS data
# Education data
ed_cols = ['percentsomehs_', 'percenthsgrad_', 'percentsomecollege_', 'percentassociates_', 'percentbachelors_', 'percentgrad_degree_'] # base of column headings
somehs = []
hs = []
someuni = []
ass = []
bach = []
grad = []
for i in range(len(compdata)):
print('Education :: ' + str(i+1) + ' of ' + str(len(compdata)) + '.......') # Visualize progress
try:
fip = int(compdata.iloc[i]['FIPS']) # Get the location of the event
yr = compdata.iloc[i]['RACE_Year'] - 1 # Get the year of the event
tmp = edu[edu['fips'] == fip].reset_index(drop = True) # Subset for location
somehs.append(tmp[ed_cols[0] + str(yr)][0])
hs.append(tmp[ed_cols[1] + str(yr)][0])
someuni.append(tmp[ed_cols[2] + str(yr)][0])
ass.append(tmp[ed_cols[3] + str(yr)][0])
bach.append(tmp[ed_cols[4] + str(yr)][0])
grad.append(tmp[ed_cols[5] + str(yr)][0])
except:
somehs.append(None)
hs.append(None)
someuni.append(None)
ass.append(None)
bach.append(None)
grad.append(None)
somehs = pd.Series(somehs, name = 'Some_HS')
hs = pd.Series(hs, name = 'HS')
someuni = pd.Series(someuni, name = 'Some_Uni')
ass = pd.Series(ass, name = 'Associate')
bach = pd.Series(bach, name = 'Bachelor')
grad = pd.Series(grad, name = 'Graduate')
compdata = pd.concat([compdata, somehs, hs, someuni, ass, bach, grad], axis = 1)
# Unemployment data
unemp = []
for i in range(len(compdata)):
print('Unemployment :: ' + str(i+1) + ' of ' + str(len(compdata)) + '.......') # Visualize progress
try:
fip = compdata.iloc[i]['FIPS'] # Get the location of the event
yr = compdata.iloc[i]['RACE_Year'] - 1 # Get the year of the event
tmp = labor[labor['fips'] == fip] # Subset for location
tmp = tmp[tmp.year == yr].reset_index(drop = True) # Subset for year
unemp.append(tmp.unemploymentrate[0])
except:
unemp.append(None)
unemp = pd.Series(unemp, name = 'Unemployment_Rate')
compdata = pd.concat([compdata, unemp], axis = 1)
# Income data
hhinc = []
for i in range(len(compdata)):
print('Income :: ' + str(i+1) + ' of ' + str(len(compdata)) + '.......') # Visualize progress
try:
fip = compdata.iloc[i]['FIPS'] # Get the location of the event
yr = compdata.iloc[i]['RACE_Year'] - 1 # Get the year of the event
tmp = inc[inc['countyid'] == fip] # Subset for location
tmp = tmp[tmp['year'] == yr].reset_index(drop = True) # Subset for year
hhinc.append(tmp.medianhouseholdincome[0])
except:
hhinc.append(None)
hhinc = pd.Series(hhinc, name = 'Median_Household_Income')
compdata = pd.concat([compdata, hhinc], axis = 1)
# Initial education data
ed_cols = ['percentsomehs_', 'percenthsgrad_', 'percentsomecollege_', 'percentassociates_', 'percentbachelors_', 'percentgrad_degree_'] # base of column headings
somehs10 = []
hs10 = []
someuni10 = []
ass10 = []
bach10 = []
grad10 = []
for i in range(len(compdata)):
print('Education :: ' + str(i+1) + ' of ' + str(len(compdata)) + '.......') # Visualize progress
try:
fip = int(compdata.iloc[i]['FIPS']) # Get the location of the event
tmp = edu[edu['fips'] == fip].reset_index(drop = True) # Subset for location
somehs10.append(tmp[ed_cols[0] + str(2010)][0])
hs10.append(tmp[ed_cols[1] + str(2010)][0])
someuni10.append(tmp[ed_cols[2] + str(2010)][0])
ass10.append(tmp[ed_cols[3] + str(2010)][0])
bach10.append(tmp[ed_cols[4] + str(2010)][0])
grad10.append(tmp[ed_cols[5] + str(2010)][0])
except:
somehs10.append(None)
hs10.append(None)
someuni10.append(None)
ass10.append(None)
bach10.append(None)
grad10.append(None)
somehs10 = pd.Series(somehs10, name = 'Some_HS_10')
hs10 = pd.Series(hs10, name = 'HS_10')
someuni10 = pd.Series(someuni10, name = 'Some_Uni_10')
ass10 = pd.Series(ass10, name = 'Associate_10')
bach10 = pd.Series(bach10, name = 'Bachelor_10')
grad10 = pd.Series(grad10, name = 'Graduate_10')
compdata = pd.concat([compdata, somehs10, hs10, someuni10, ass10, bach10, grad10], axis = 1)
# Initial unemployment data
unemp10 = []
for i in range(len(compdata)):
print('Unemployment :: ' + str(i+1) + ' of ' + str(len(compdata)) + '.......') # Visualize progress
try:
fip = compdata.iloc[i]['FIPS'] # Get the location of the event
tmp = labor[labor['fips'] == fip] # Subset for location
tmp = tmp[tmp.year == 2010].reset_index(drop = True) # Subset for year
unemp10.append(tmp.unemploymentrate[0])
except:
unemp10.append(None)
unemp10 = pd.Series(unemp10, name = 'Unemployment_Rate_10')
compdata = pd.concat([compdata, unemp10], axis = 1)
# Initial income data
hhinc10 = []
for i in range(len(compdata)):
print('Income :: ' + str(i+1) + ' of ' + str(len(compdata)) + '.......') # Visualize progress
try:
fip = compdata.iloc[i]['FIPS'] # Get the location of the event
tmp = inc[inc['countyid'] == fip] # Subset for location
tmp = tmp[tmp['year'] == 2010].reset_index(drop = True) # Subset for year
hhinc10.append(tmp.medianhouseholdincome[0])
except:
hhinc10.append(None)
hhinc10 = pd.Series(hhinc10, name = 'Median_Household_Income_10')
compdata = pd.concat([compdata, hhinc10], axis = 1)
# Write dataframe to file
compdata.to_csv(filepath + 'output.csv', index = False)
|
the-stack_0_23636 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.cloud.apigeeconnect_v1.services.connection_service import pagers
from google.cloud.apigeeconnect_v1.types import connection
from .transports.base import ConnectionServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import ConnectionServiceGrpcTransport
from .transports.grpc_asyncio import ConnectionServiceGrpcAsyncIOTransport
class ConnectionServiceClientMeta(type):
"""Metaclass for the ConnectionService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[ConnectionServiceTransport]]
_transport_registry["grpc"] = ConnectionServiceGrpcTransport
_transport_registry["grpc_asyncio"] = ConnectionServiceGrpcAsyncIOTransport
def get_transport_class(
cls, label: str = None,
) -> Type[ConnectionServiceTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class ConnectionServiceClient(metaclass=ConnectionServiceClientMeta):
"""Service Interface for the Apigee Connect connection
management APIs.
"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "apigeeconnect.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ConnectionServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ConnectionServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> ConnectionServiceTransport:
"""Returns the transport used by the client instance.
Returns:
ConnectionServiceTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def endpoint_path(project: str, endpoint: str,) -> str:
"""Returns a fully-qualified endpoint string."""
return "projects/{project}/endpoints/{endpoint}".format(
project=project, endpoint=endpoint,
)
@staticmethod
def parse_endpoint_path(path: str) -> Dict[str, str]:
"""Parses a endpoint path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/endpoints/(?P<endpoint>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[client_options_lib.ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
if client_options is None:
client_options = client_options_lib.ClientOptions()
use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_client_cert not in ("true", "false"):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
if use_mtls_endpoint not in ("auto", "never", "always"):
raise MutualTLSChannelError(
"Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
)
# Figure out the client cert source to use.
client_cert_source = None
if use_client_cert == "true":
if client_options.client_cert_source:
client_cert_source = client_options.client_cert_source
elif mtls.has_default_client_cert_source():
client_cert_source = mtls.default_client_cert_source()
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
elif use_mtls_endpoint == "always" or (
use_mtls_endpoint == "auto" and client_cert_source
):
api_endpoint = cls.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = cls.DEFAULT_ENDPOINT
return api_endpoint, client_cert_source
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, ConnectionServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the connection service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ConnectionServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(
client_options
)
api_key_value = getattr(client_options, "api_key", None)
if api_key_value and credentials:
raise ValueError(
"client_options.api_key and credentials are mutually exclusive"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, ConnectionServiceTransport):
# transport is a ConnectionServiceTransport instance.
if credentials or client_options.credentials_file or api_key_value:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
import google.auth._default # type: ignore
if api_key_value and hasattr(
google.auth._default, "get_api_key_credentials"
):
credentials = google.auth._default.get_api_key_credentials(
api_key_value
)
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
)
def list_connections(
self,
request: Union[connection.ListConnectionsRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListConnectionsPager:
r"""Lists connections that are currently active for the
given Apigee Connect endpoint.
Args:
request (Union[google.cloud.apigeeconnect_v1.types.ListConnectionsRequest, dict]):
The request object. The request for
[ListConnections][Management.ListConnections].
parent (str):
Required. Parent name of the form:
``projects/{project_number or project_id}/endpoints/{endpoint}``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.apigeeconnect_v1.services.connection_service.pagers.ListConnectionsPager:
The response for
[ListConnections][Management.ListConnections].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a connection.ListConnectionsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, connection.ListConnectionsRequest):
request = connection.ListConnectionsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_connections]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListConnectionsPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-apigee-connect",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("ConnectionServiceClient",)
|
the-stack_0_23637 | """setuptools.command.egg_info
Create a distribution's .egg-info directory and contents"""
from distutils.filelist import FileList as _FileList
from distutils.errors import DistutilsInternalError
from distutils.util import convert_path
from distutils import log
import distutils.errors
import distutils.filelist
import os
import re
import sys
import io
import warnings
import time
import collections
import six
from six.moves import map
from setuptools import Command
from setuptools.command.sdist import sdist
from setuptools.command.sdist import walk_revctrl
from setuptools.command.setopt import edit_config
from setuptools.command import bdist_egg
from pkg_resources import (
parse_requirements, safe_name, parse_version,
safe_version, yield_lines, EntryPoint, iter_entry_points, to_filename)
import setuptools.unicode_utils as unicode_utils
from setuptools.glob import glob
import packaging
def translate_pattern(glob):
"""
Translate a file path glob like '*.txt' in to a regular expression.
This differs from fnmatch.translate which allows wildcards to match
directory separators. It also knows about '**/' which matches any number of
directories.
"""
pat = ''
# This will split on '/' within [character classes]. This is deliberate.
chunks = glob.split(os.path.sep)
sep = re.escape(os.sep)
valid_char = '[^%s]' % (sep,)
for c, chunk in enumerate(chunks):
last_chunk = c == len(chunks) - 1
# Chunks that are a literal ** are globstars. They match anything.
if chunk == '**':
if last_chunk:
# Match anything if this is the last component
pat += '.*'
else:
# Match '(name/)*'
pat += '(?:%s+%s)*' % (valid_char, sep)
continue # Break here as the whole path component has been handled
# Find any special characters in the remainder
i = 0
chunk_len = len(chunk)
while i < chunk_len:
char = chunk[i]
if char == '*':
# Match any number of name characters
pat += valid_char + '*'
elif char == '?':
# Match a name character
pat += valid_char
elif char == '[':
# Character class
inner_i = i + 1
# Skip initial !/] chars
if inner_i < chunk_len and chunk[inner_i] == '!':
inner_i = inner_i + 1
if inner_i < chunk_len and chunk[inner_i] == ']':
inner_i = inner_i + 1
# Loop till the closing ] is found
while inner_i < chunk_len and chunk[inner_i] != ']':
inner_i = inner_i + 1
if inner_i >= chunk_len:
# Got to the end of the string without finding a closing ]
# Do not treat this as a matching group, but as a literal [
pat += re.escape(char)
else:
# Grab the insides of the [brackets]
inner = chunk[i + 1:inner_i]
char_class = ''
# Class negation
if inner[0] == '!':
char_class = '^'
inner = inner[1:]
char_class += re.escape(inner)
pat += '[%s]' % (char_class,)
# Skip to the end ]
i = inner_i
else:
pat += re.escape(char)
i += 1
# Join each chunk with the dir separator
if not last_chunk:
pat += sep
return re.compile(pat + r'\Z(?ms)')
class egg_info(Command):
description = "create a distribution's .egg-info directory"
user_options = [
('egg-base=', 'e', "directory containing .egg-info directories"
" (default: top of the source tree)"),
('tag-date', 'd', "Add date stamp (e.g. 20050528) to version number"),
('tag-build=', 'b', "Specify explicit tag to add to version number"),
('no-date', 'D', "Don't include date stamp [default]"),
]
boolean_options = ['tag-date']
negative_opt = {
'no-date': 'tag-date',
}
def initialize_options(self):
self.egg_name = None
self.egg_version = None
self.egg_base = None
self.egg_info = None
self.tag_build = None
self.tag_date = 0
self.broken_egg_info = False
self.vtags = None
####################################
# allow the 'tag_svn_revision' to be detected and
# set, supporting sdists built on older Setuptools.
@property
def tag_svn_revision(self):
pass
@tag_svn_revision.setter
def tag_svn_revision(self, value):
pass
####################################
def save_version_info(self, filename):
"""
Materialize the value of date into the
build tag. Install build keys in a deterministic order
to avoid arbitrary reordering on subsequent builds.
"""
# python 2.6 compatibility
odict = getattr(collections, 'OrderedDict', dict)
egg_info = odict()
# follow the order these keys would have been added
# when PYTHONHASHSEED=0
egg_info['tag_build'] = self.tags()
egg_info['tag_date'] = 0
edit_config(filename, dict(egg_info=egg_info))
def finalize_options(self):
self.egg_name = safe_name(self.distribution.get_name())
self.vtags = self.tags()
self.egg_version = self.tagged_version()
parsed_version = parse_version(self.egg_version)
try:
is_version = isinstance(parsed_version, packaging.version.Version)
spec = (
"%s==%s" if is_version else "%s===%s"
)
list(
parse_requirements(spec % (self.egg_name, self.egg_version))
)
except ValueError:
raise distutils.errors.DistutilsOptionError(
"Invalid distribution name or version syntax: %s-%s" %
(self.egg_name, self.egg_version)
)
if self.egg_base is None:
dirs = self.distribution.package_dir
self.egg_base = (dirs or {}).get('', os.curdir)
self.ensure_dirname('egg_base')
self.egg_info = to_filename(self.egg_name) + '.egg-info'
if self.egg_base != os.curdir:
self.egg_info = os.path.join(self.egg_base, self.egg_info)
if '-' in self.egg_name:
self.check_broken_egg_info()
# Set package version for the benefit of dumber commands
# (e.g. sdist, bdist_wininst, etc.)
#
self.distribution.metadata.version = self.egg_version
# If we bootstrapped around the lack of a PKG-INFO, as might be the
# case in a fresh checkout, make sure that any special tags get added
# to the version info
#
pd = self.distribution._patched_dist
if pd is not None and pd.key == self.egg_name.lower():
pd._version = self.egg_version
pd._parsed_version = parse_version(self.egg_version)
self.distribution._patched_dist = None
def write_or_delete_file(self, what, filename, data, force=False):
"""Write `data` to `filename` or delete if empty
If `data` is non-empty, this routine is the same as ``write_file()``.
If `data` is empty but not ``None``, this is the same as calling
``delete_file(filename)`. If `data` is ``None``, then this is a no-op
unless `filename` exists, in which case a warning is issued about the
orphaned file (if `force` is false), or deleted (if `force` is true).
"""
if data:
self.write_file(what, filename, data)
elif os.path.exists(filename):
if data is None and not force:
log.warn(
"%s not set in setup(), but %s exists", what, filename
)
return
else:
self.delete_file(filename)
def write_file(self, what, filename, data):
"""Write `data` to `filename` (if not a dry run) after announcing it
`what` is used in a log message to identify what is being written
to the file.
"""
log.info("writing %s to %s", what, filename)
if six.PY3:
data = data.encode("utf-8")
if not self.dry_run:
f = open(filename, 'wb')
f.write(data)
f.close()
def delete_file(self, filename):
"""Delete `filename` (if not a dry run) after announcing it"""
log.info("deleting %s", filename)
if not self.dry_run:
os.unlink(filename)
def tagged_version(self):
version = self.distribution.get_version()
# egg_info may be called more than once for a distribution,
# in which case the version string already contains all tags.
if self.vtags and version.endswith(self.vtags):
return safe_version(version)
return safe_version(version + self.vtags)
def run(self):
self.mkpath(self.egg_info)
installer = self.distribution.fetch_build_egg
for ep in iter_entry_points('egg_info.writers'):
ep.require(installer=installer)
writer = ep.resolve()
writer(self, ep.name, os.path.join(self.egg_info, ep.name))
# Get rid of native_libs.txt if it was put there by older bdist_egg
nl = os.path.join(self.egg_info, "native_libs.txt")
if os.path.exists(nl):
self.delete_file(nl)
self.find_sources()
def tags(self):
version = ''
if self.tag_build:
version += self.tag_build
if self.tag_date:
version += time.strftime("-%Y%m%d")
return version
def find_sources(self):
"""Generate SOURCES.txt manifest file"""
manifest_filename = os.path.join(self.egg_info, "SOURCES.txt")
mm = manifest_maker(self.distribution)
mm.manifest = manifest_filename
mm.run()
self.filelist = mm.filelist
def check_broken_egg_info(self):
bei = self.egg_name + '.egg-info'
if self.egg_base != os.curdir:
bei = os.path.join(self.egg_base, bei)
if os.path.exists(bei):
log.warn(
"-" * 78 + '\n'
"Note: Your current .egg-info directory has a '-' in its name;"
'\nthis will not work correctly with "setup.py develop".\n\n'
'Please rename %s to %s to correct this problem.\n' + '-' * 78,
bei, self.egg_info
)
self.broken_egg_info = self.egg_info
self.egg_info = bei # make it work for now
class FileList(_FileList):
# Implementations of the various MANIFEST.in commands
def process_template_line(self, line):
# Parse the line: split it up, make sure the right number of words
# is there, and return the relevant words. 'action' is always
# defined: it's the first word of the line. Which of the other
# three are defined depends on the action; it'll be either
# patterns, (dir and patterns), or (dir_pattern).
(action, patterns, dir, dir_pattern) = self._parse_template_line(line)
# OK, now we know that the action is valid and we have the
# right number of words on the line for that action -- so we
# can proceed with minimal error-checking.
if action == 'include':
self.debug_print("include " + ' '.join(patterns))
for pattern in patterns:
if not self.include(pattern):
log.warn("warning: no files found matching '%s'", pattern)
elif action == 'exclude':
self.debug_print("exclude " + ' '.join(patterns))
for pattern in patterns:
if not self.exclude(pattern):
log.warn(("warning: no previously-included files "
"found matching '%s'"), pattern)
elif action == 'global-include':
self.debug_print("global-include " + ' '.join(patterns))
for pattern in patterns:
if not self.global_include(pattern):
log.warn(("warning: no files found matching '%s' "
"anywhere in distribution"), pattern)
elif action == 'global-exclude':
self.debug_print("global-exclude " + ' '.join(patterns))
for pattern in patterns:
if not self.global_exclude(pattern):
log.warn(("warning: no previously-included files matching "
"'%s' found anywhere in distribution"),
pattern)
elif action == 'recursive-include':
self.debug_print("recursive-include %s %s" %
(dir, ' '.join(patterns)))
for pattern in patterns:
if not self.recursive_include(dir, pattern):
log.warn(("warning: no files found matching '%s' "
"under directory '%s'"),
pattern, dir)
elif action == 'recursive-exclude':
self.debug_print("recursive-exclude %s %s" %
(dir, ' '.join(patterns)))
for pattern in patterns:
if not self.recursive_exclude(dir, pattern):
log.warn(("warning: no previously-included files matching "
"'%s' found under directory '%s'"),
pattern, dir)
elif action == 'graft':
self.debug_print("graft " + dir_pattern)
if not self.graft(dir_pattern):
log.warn("warning: no directories found matching '%s'",
dir_pattern)
elif action == 'prune':
self.debug_print("prune " + dir_pattern)
if not self.prune(dir_pattern):
log.warn(("no previously-included directories found "
"matching '%s'"), dir_pattern)
else:
raise DistutilsInternalError(
"this cannot happen: invalid action '%s'" % action)
def _remove_files(self, predicate):
"""
Remove all files from the file list that match the predicate.
Return True if any matching files were removed
"""
found = False
for i in range(len(self.files) - 1, -1, -1):
if predicate(self.files[i]):
self.debug_print(" removing " + self.files[i])
del self.files[i]
found = True
return found
def include(self, pattern):
"""Include files that match 'pattern'."""
found = [f for f in glob(pattern) if not os.path.isdir(f)]
self.extend(found)
return bool(found)
def exclude(self, pattern):
"""Exclude files that match 'pattern'."""
match = translate_pattern(pattern)
return self._remove_files(match.match)
def recursive_include(self, dir, pattern):
"""
Include all files anywhere in 'dir/' that match the pattern.
"""
full_pattern = os.path.join(dir, '**', pattern)
found = [f for f in glob(full_pattern, recursive=True)
if not os.path.isdir(f)]
self.extend(found)
return bool(found)
def recursive_exclude(self, dir, pattern):
"""
Exclude any file anywhere in 'dir/' that match the pattern.
"""
match = translate_pattern(os.path.join(dir, '**', pattern))
return self._remove_files(match.match)
def graft(self, dir):
"""Include all files from 'dir/'."""
found = [
item
for match_dir in glob(dir)
for item in distutils.filelist.findall(match_dir)
]
self.extend(found)
return bool(found)
def prune(self, dir):
"""Filter out files from 'dir/'."""
match = translate_pattern(os.path.join(dir, '**'))
return self._remove_files(match.match)
def global_include(self, pattern):
"""
Include all files anywhere in the current directory that match the
pattern. This is very inefficient on large file trees.
"""
if self.allfiles is None:
self.findall()
match = translate_pattern(os.path.join('**', pattern))
found = [f for f in self.allfiles if match.match(f)]
self.extend(found)
return bool(found)
def global_exclude(self, pattern):
"""
Exclude all files anywhere that match the pattern.
"""
match = translate_pattern(os.path.join('**', pattern))
return self._remove_files(match.match)
def append(self, item):
if item.endswith('\r'): # Fix older sdists built on Windows
item = item[:-1]
path = convert_path(item)
if self._safe_path(path):
self.files.append(path)
def extend(self, paths):
self.files.extend(filter(self._safe_path, paths))
def _repair(self):
"""
Replace self.files with only safe paths
Because some owners of FileList manipulate the underlying
``files`` attribute directly, this method must be called to
repair those paths.
"""
self.files = list(filter(self._safe_path, self.files))
def _safe_path(self, path):
enc_warn = "'%s' not %s encodable -- skipping"
# To avoid accidental trans-codings errors, first to unicode
u_path = unicode_utils.filesys_decode(path)
if u_path is None:
log.warn("'%s' in unexpected encoding -- skipping" % path)
return False
# Must ensure utf-8 encodability
utf8_path = unicode_utils.try_encode(u_path, "utf-8")
if utf8_path is None:
log.warn(enc_warn, path, 'utf-8')
return False
try:
# accept is either way checks out
if os.path.exists(u_path) or os.path.exists(utf8_path):
return True
# this will catch any encode errors decoding u_path
except UnicodeEncodeError:
log.warn(enc_warn, path, sys.getfilesystemencoding())
class manifest_maker(sdist):
template = "MANIFEST.in"
def initialize_options(self):
self.use_defaults = 1
self.prune = 1
self.manifest_only = 1
self.force_manifest = 1
def finalize_options(self):
pass
def run(self):
self.filelist = FileList()
if not os.path.exists(self.manifest):
self.write_manifest() # it must exist so it'll get in the list
self.add_defaults()
if os.path.exists(self.template):
self.read_template()
self.prune_file_list()
self.filelist.sort()
self.filelist.remove_duplicates()
self.write_manifest()
def _manifest_normalize(self, path):
path = unicode_utils.filesys_decode(path)
return path.replace(os.sep, '/')
def write_manifest(self):
"""
Write the file list in 'self.filelist' to the manifest file
named by 'self.manifest'.
"""
self.filelist._repair()
# Now _repairs should encodability, but not unicode
files = [self._manifest_normalize(f) for f in self.filelist.files]
msg = "writing manifest file '%s'" % self.manifest
self.execute(write_file, (self.manifest, files), msg)
def warn(self, msg):
if not self._should_suppress_warning(msg):
sdist.warn(self, msg)
@staticmethod
def _should_suppress_warning(msg):
"""
suppress missing-file warnings from sdist
"""
return re.match(r"standard file .*not found", msg)
def add_defaults(self):
sdist.add_defaults(self)
self.filelist.append(self.template)
self.filelist.append(self.manifest)
rcfiles = list(walk_revctrl())
if rcfiles:
self.filelist.extend(rcfiles)
elif os.path.exists(self.manifest):
self.read_manifest()
ei_cmd = self.get_finalized_command('egg_info')
self.filelist.graft(ei_cmd.egg_info)
def prune_file_list(self):
build = self.get_finalized_command('build')
base_dir = self.distribution.get_fullname()
self.filelist.prune(build.build_base)
self.filelist.prune(base_dir)
sep = re.escape(os.sep)
self.filelist.exclude_pattern(r'(^|' + sep + r')(RCS|CVS|\.svn)' + sep,
is_regex=1)
def write_file(filename, contents):
"""Create a file with the specified name and write 'contents' (a
sequence of strings without line terminators) to it.
"""
contents = "\n".join(contents)
# assuming the contents has been vetted for utf-8 encoding
contents = contents.encode("utf-8")
with open(filename, "wb") as f: # always write POSIX-style manifest
f.write(contents)
def write_pkg_info(cmd, basename, filename):
log.info("writing %s", filename)
if not cmd.dry_run:
metadata = cmd.distribution.metadata
metadata.version, oldver = cmd.egg_version, metadata.version
metadata.name, oldname = cmd.egg_name, metadata.name
try:
# write unescaped data to PKG-INFO, so older pkg_resources
# can still parse it
metadata.write_pkg_info(cmd.egg_info)
finally:
metadata.name, metadata.version = oldname, oldver
safe = getattr(cmd.distribution, 'zip_safe', None)
bdist_egg.write_safety_flag(cmd.egg_info, safe)
def warn_depends_obsolete(cmd, basename, filename):
if os.path.exists(filename):
log.warn(
"WARNING: 'depends.txt' is not used by setuptools 0.6!\n"
"Use the install_requires/extras_require setup() args instead."
)
def _write_requirements(stream, reqs):
lines = yield_lines(reqs or ())
append_cr = lambda line: line + '\n'
lines = map(append_cr, lines)
stream.writelines(lines)
def write_requirements(cmd, basename, filename):
dist = cmd.distribution
data = six.StringIO()
_write_requirements(data, dist.install_requires)
extras_require = dist.extras_require or {}
for extra in sorted(extras_require):
data.write('\n[{extra}]\n'.format(**vars()))
_write_requirements(data, extras_require[extra])
cmd.write_or_delete_file("requirements", filename, data.getvalue())
def write_setup_requirements(cmd, basename, filename):
data = StringIO()
_write_requirements(data, cmd.distribution.setup_requires)
cmd.write_or_delete_file("setup-requirements", filename, data.getvalue())
def write_toplevel_names(cmd, basename, filename):
pkgs = dict.fromkeys(
[
k.split('.', 1)[0]
for k in cmd.distribution.iter_distribution_names()
]
)
cmd.write_file("top-level names", filename, '\n'.join(sorted(pkgs)) + '\n')
def overwrite_arg(cmd, basename, filename):
write_arg(cmd, basename, filename, True)
def write_arg(cmd, basename, filename, force=False):
argname = os.path.splitext(basename)[0]
value = getattr(cmd.distribution, argname, None)
if value is not None:
value = '\n'.join(value) + '\n'
cmd.write_or_delete_file(argname, filename, value, force)
def write_entries(cmd, basename, filename):
ep = cmd.distribution.entry_points
if isinstance(ep, six.string_types) or ep is None:
data = ep
elif ep is not None:
data = []
for section, contents in sorted(ep.items()):
if not isinstance(contents, six.string_types):
contents = EntryPoint.parse_group(section, contents)
contents = '\n'.join(sorted(map(str, contents.values())))
data.append('[%s]\n%s\n\n' % (section, contents))
data = ''.join(data)
cmd.write_or_delete_file('entry points', filename, data, True)
def get_pkg_info_revision():
"""
Get a -r### off of PKG-INFO Version in case this is an sdist of
a subversion revision.
"""
warnings.warn("get_pkg_info_revision is deprecated.", DeprecationWarning)
if os.path.exists('PKG-INFO'):
with io.open('PKG-INFO') as f:
for line in f:
match = re.match(r"Version:.*-r(\d+)\s*$", line)
if match:
return int(match.group(1))
return 0
|
the-stack_0_23638 | import os
import sys
import shutil
from glob import glob
from subprocess import check_call, CalledProcessError
from time import sleep
from charms import layer
from charms.layer.execd import execd_preinstall
def lsb_release():
"""Return /etc/lsb-release in a dict"""
d = {}
with open('/etc/lsb-release', 'r') as lsb:
for l in lsb:
k, v = l.split('=')
d[k.strip()] = v.strip()
return d
def bootstrap_charm_deps():
"""
Set up the base charm dependencies so that the reactive system can run.
"""
# execd must happen first, before any attempt to install packages or
# access the network, because sites use this hook to do bespoke
# configuration and install secrets so the rest of this bootstrap
# and the charm itself can actually succeed. This call does nothing
# unless the operator has created and populated $JUJU_CHARM_DIR/exec.d.
execd_preinstall()
# ensure that $JUJU_CHARM_DIR/bin is on the path, for helper scripts
charm_dir = os.environ['JUJU_CHARM_DIR']
os.environ['PATH'] += ':%s' % os.path.join(charm_dir, 'bin')
venv = os.path.abspath('../.venv')
vbin = os.path.join(venv, 'bin')
vpip = os.path.join(vbin, 'pip')
vpy = os.path.join(vbin, 'python')
hook_name = os.path.basename(sys.argv[0])
is_bootstrapped = os.path.exists('wheelhouse/.bootstrapped')
is_charm_upgrade = hook_name == 'upgrade-charm'
is_series_upgrade = hook_name == 'post-series-upgrade'
post_upgrade = os.path.exists('wheelhouse/.upgrade')
is_upgrade = not post_upgrade and (is_charm_upgrade or is_series_upgrade)
if is_bootstrapped and not is_upgrade:
activate_venv()
# the .upgrade file prevents us from getting stuck in a loop
# when re-execing to activate the venv; at this point, we've
# activated the venv, so it's safe to clear it
if post_upgrade:
os.unlink('wheelhouse/.upgrade')
return
if is_series_upgrade and os.path.exists(venv):
# series upgrade should do a full clear of the venv, rather than just
# updating it, to bring in updates to Python itself
shutil.rmtree(venv)
if is_upgrade:
if os.path.exists('wheelhouse/.bootstrapped'):
os.unlink('wheelhouse/.bootstrapped')
open('wheelhouse/.upgrade', 'w').close()
# bootstrap wheelhouse
if os.path.exists('wheelhouse'):
with open('/root/.pydistutils.cfg', 'w') as fp:
# make sure that easy_install also only uses the wheelhouse
# (see https://github.com/pypa/pip/issues/410)
fp.writelines([
"[easy_install]\n",
"allow_hosts = ''\n",
"find_links = file://{}/wheelhouse/\n".format(charm_dir),
])
apt_install([
'python3-pip',
'python3-setuptools',
'python3-yaml',
'python3-dev',
'python3-wheel',
'build-essential',
])
from charms.layer import options
cfg = options.get('basic')
# include packages defined in layer.yaml
apt_install(cfg.get('packages', []))
# if we're using a venv, set it up
if cfg.get('use_venv'):
if not os.path.exists(venv):
series = lsb_release()['DISTRIB_CODENAME']
if series in ('precise', 'trusty'):
apt_install(['python-virtualenv'])
else:
apt_install(['virtualenv'])
cmd = ['virtualenv', '-ppython3', '--never-download', venv]
if cfg.get('include_system_packages'):
cmd.append('--system-site-packages')
check_call(cmd)
os.environ['PATH'] = ':'.join([vbin, os.environ['PATH']])
pip = vpip
else:
pip = 'pip3'
# save a copy of system pip to prevent `pip3 install -U pip`
# from changing it
if os.path.exists('/usr/bin/pip'):
shutil.copy2('/usr/bin/pip', '/usr/bin/pip.save')
# need newer pip, to fix spurious Double Requirement error:
# https://github.com/pypa/pip/issues/56
check_call([pip, 'install', '-U', '--no-index', '-f', 'wheelhouse',
'pip'])
# per https://github.com/juju-solutions/layer-basic/issues/110
# this replaces the setuptools that was copied over from the system on
# venv create with latest setuptools and adds setuptools_scm
check_call([pip, 'install', '-U', '--no-index', '-f', 'wheelhouse',
'setuptools', 'setuptools-scm'])
# install the rest of the wheelhouse deps
check_call([pip, 'install', '-U', '--no-index', '-f', 'wheelhouse'] +
glob('wheelhouse/*'))
# re-enable installation from pypi
os.remove('/root/.pydistutils.cfg')
# install python packages from layer options
if cfg.get('python_packages'):
check_call([pip, 'install', '-U'] + cfg.get('python_packages'))
if not cfg.get('use_venv'):
# restore system pip to prevent `pip3 install -U pip`
# from changing it
if os.path.exists('/usr/bin/pip.save'):
shutil.copy2('/usr/bin/pip.save', '/usr/bin/pip')
os.remove('/usr/bin/pip.save')
# setup wrappers to ensure envs are used for scripts
shutil.copy2('bin/charm-env', '/usr/local/sbin/')
for wrapper in ('charms.reactive', 'charms.reactive.sh',
'chlp', 'layer_option'):
src = os.path.join('/usr/local/sbin', 'charm-env')
dst = os.path.join('/usr/local/sbin', wrapper)
if not os.path.exists(dst):
os.symlink(src, dst)
if cfg.get('use_venv'):
shutil.copy2('bin/layer_option', vbin)
else:
shutil.copy2('bin/layer_option', '/usr/local/bin/')
# re-link the charm copy to the wrapper in case charms
# call bin/layer_option directly (as was the old pattern)
os.remove('bin/layer_option')
os.symlink('/usr/local/sbin/layer_option', 'bin/layer_option')
# flag us as having already bootstrapped so we don't do it again
open('wheelhouse/.bootstrapped', 'w').close()
# Ensure that the newly bootstrapped libs are available.
# Note: this only seems to be an issue with namespace packages.
# Non-namespace-package libs (e.g., charmhelpers) are available
# without having to reload the interpreter. :/
reload_interpreter(vpy if cfg.get('use_venv') else sys.argv[0])
def activate_venv():
"""
Activate the venv if enabled in ``layer.yaml``.
This is handled automatically for normal hooks, but actions might
need to invoke this manually, using something like:
# Load modules from $JUJU_CHARM_DIR/lib
import sys
sys.path.append('lib')
from charms.layer.basic import activate_venv
activate_venv()
This will ensure that modules installed in the charm's
virtual environment are available to the action.
"""
from charms.layer import options
venv = os.path.abspath('../.venv')
vbin = os.path.join(venv, 'bin')
vpy = os.path.join(vbin, 'python')
use_venv = options.get('basic', 'use_venv')
if use_venv and '.venv' not in sys.executable:
# activate the venv
os.environ['PATH'] = ':'.join([vbin, os.environ['PATH']])
reload_interpreter(vpy)
layer.patch_options_interface()
layer.import_layer_libs()
def reload_interpreter(python):
"""
Reload the python interpreter to ensure that all deps are available.
Newly installed modules in namespace packages sometimes seemt to
not be picked up by Python 3.
"""
os.execve(python, [python] + list(sys.argv), os.environ)
def apt_install(packages):
"""
Install apt packages.
This ensures a consistent set of options that are often missed but
should really be set.
"""
if isinstance(packages, (str, bytes)):
packages = [packages]
env = os.environ.copy()
if 'DEBIAN_FRONTEND' not in env:
env['DEBIAN_FRONTEND'] = 'noninteractive'
cmd = ['apt-get',
'--option=Dpkg::Options::=--force-confold',
'--assume-yes',
'install']
for attempt in range(3):
try:
check_call(cmd + packages, env=env)
except CalledProcessError:
if attempt == 2: # third attempt
raise
try:
# sometimes apt-get update needs to be run
check_call(['apt-get', 'update'])
except CalledProcessError:
# sometimes it's a dpkg lock issue
pass
sleep(5)
else:
break
def init_config_states():
import yaml
from charmhelpers.core import hookenv
from charms.reactive import set_state
from charms.reactive import toggle_state
config = hookenv.config()
config_defaults = {}
config_defs = {}
config_yaml = os.path.join(hookenv.charm_dir(), 'config.yaml')
if os.path.exists(config_yaml):
with open(config_yaml) as fp:
config_defs = yaml.safe_load(fp).get('options', {})
config_defaults = {key: value.get('default')
for key, value in config_defs.items()}
for opt in config_defs.keys():
if config.changed(opt):
set_state('config.changed')
set_state('config.changed.{}'.format(opt))
toggle_state('config.set.{}'.format(opt), config.get(opt))
toggle_state('config.default.{}'.format(opt),
config.get(opt) == config_defaults[opt])
def clear_config_states():
from charmhelpers.core import hookenv, unitdata
from charms.reactive import remove_state
config = hookenv.config()
remove_state('config.changed')
for opt in config.keys():
remove_state('config.changed.{}'.format(opt))
remove_state('config.set.{}'.format(opt))
remove_state('config.default.{}'.format(opt))
unitdata.kv().flush()
|
the-stack_0_23642 | # Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import numpy as np
import os
import re
import scipy.io
import shutil
import tarfile
import tempfile
import tqdm
from nnabla.utils.image_utils import imresize, imread
def _resize_image(im, width, height, padding):
# resize
h = im.shape[0]
w = im.shape[1]
if w != width or h != height:
# resize image
if not padding:
# trimming mode
if float(h) / w > float(height) / width:
target_h = int(float(w) / width * height)
im = im[(h - target_h) // 2:h -
(h - target_h) // 2, ::]
else:
target_w = int(float(h) / height * width)
im = im[::, (w - target_w) // 2:w -
(w - target_w) // 2]
else:
# padding mode
if float(h) / w < float(height) / width:
target_h = int(float(height) / width * w)
pad = (((target_h - h) // 2, target_h -
(target_h - h) // 2 - h), (0, 0))
else:
target_w = int(float(width) / height * h)
pad = ((0, 0), ((target_w - w) // 2,
target_w - (target_w - w) // 2 - w))
pad = pad + ((0, 0),)
im = np.pad(im, pad, 'constant')
im = imresize(im, (width, height))
x = np.array(im, dtype=np.uint8).transpose((2, 0, 1))
return x
def _create_train_cache(archive, output, names, synsets_id, args):
images0 = []
print("Count image in TAR")
pbar = tqdm.tqdm(total=len(names), unit='%')
for name in names:
category = os.path.splitext(name)[0]
marchive = tarfile.open(fileobj=archive.extractfile(name))
for mname in marchive.getnames():
if re.match(r'{}_[0-9]+\.JPEG'.format(category), mname):
images0.append((synsets_id[category], name, marchive, mname))
else:
print('Invalid file {} includes in tar file'.format(mname))
exit(-1)
pbar.update(1)
pbar.close()
# Thinning
images = []
for i, image in enumerate(images0):
if i % args.thinning == 0:
images.append(image)
def _load_func(index):
y, name, marchive, mname = images[index]
im = imread(marchive.extractfile(mname), num_channels=3)
x = _resize_image(im, args.width, args.height, args.mode == 'padding')
return x, np.array([y - 1]).astype(np.int32)
from nnabla.utils.data_source import DataSourceWithFileCache
from nnabla.utils.data_source_implements import SimpleDataSource
from nnabla.logger import logger
logger.info('Num of data : {}'.format(len(images)))
shuffle = True
if args.shuffle == 'False':
shuffle = False
source = SimpleDataSource(_load_func, len(images), shuffle, rng=None)
DataSourceWithFileCache(
source, cache_dir=output, shuffle=args.shuffle)
def _create_validation_cache(archive, output, names, ground_truth, args):
# ILSVRC2012_devkit_t12/readme.txt
# The ground truth of the validation images is in
# data/ILSVRC2012_validation_ground_truth.txt, where each line contains
# one ILSVRC2012_ID for one image, in the ascending alphabetical order
# of the image file names.
images0 = sorted(names)
# Thinning
images = []
for i, image in enumerate(images0):
if i % args.thinning == 0:
images.append(image)
def _load_func(index):
y, name = ground_truth[index], images[index]
im = imread(archive.extractfile(name), num_channels=3)
x = _resize_image(im, args.width, args.height, args.mode == 'padding')
return x, np.array([y - 1]).astype(np.int32)
from nnabla.utils.data_source import DataSourceWithFileCache
from nnabla.utils.data_source_implements import SimpleDataSource
from nnabla.logger import logger
logger.info('Num of data : {}'.format(len(images)))
shuffle = False
if args.shuffle == 'True':
shuffle = True
source = SimpleDataSource(_load_func, len(images), shuffle, rng=None)
DataSourceWithFileCache(
source, cache_dir=output, shuffle=args.shuffle)
_pbar = None
_prev_progress = None
def _progress(state, progress=0.0):
global _pbar
global _prev_progress
if state is None:
if _pbar is not None:
_pbar.close()
_pbar = None
_prev_progress = None
else:
if _pbar is None:
_pbar = tqdm.tqdm(desc=state, total=100, unit='%')
else:
if _prev_progress is None:
_prev_progress = 0
update = int((progress - _prev_progress) * 100)
if update > 0:
_pbar.update(update)
_prev_progress = progress
def main():
parser = argparse.ArgumentParser()
parser.add_argument('input', type=str, nargs='+',
help='Source file or directory.')
parser.add_argument('output', type=str,
help='Destination directory.')
parser.add_argument('-D', '--devkit', type=str, required=True,
help='Devkit filename')
parser.add_argument('-W', '--width', type=int, default=320,
help='width of output image (default:320)')
parser.add_argument('-H', '--height', type=int, default=320,
help='height of output image (default:320)')
parser.add_argument('-m', '--mode', default='trimming',
choices=['trimming', 'padding'],
help='shaping mode (trimming or padding) (default:trimming)')
parser.add_argument('-S', '--shuffle', choices=['True', 'False'],
help='shuffle mode if not specified, train:True, val:False.' +
' Otherwise specified value will be used for both.')
parser.add_argument('-N', '--file-cache-size', type=int, default=100,
help='num of data in cache file (default:100)')
parser.add_argument('-C', '--cache-type', default='npy',
choices=['h5', 'npy'],
help='cache format (h5 or npy) (default:npy)')
parser.add_argument('--thinning', type=int, default=1,
help='Thinning rate')
args = parser.parse_args()
############################################################################
# Analyze tar
# If it consists only of members corresponding to regular expression
# 'n[0-9]{8}\.tar', it is judged as train data archive.
# If it consists only of members corresponding to regular expression
# 'ILSVRC2012_val_[0-9]{8}\.JPEG', it is judged as validation data archive.
archives = {'train': None, 'val': None}
for inputarg in args.input:
print('Checking input file [{}]'.format(inputarg))
archive = tarfile.open(inputarg)
is_train = False
is_val = False
names = []
for name in archive.getnames():
if re.match(r'n[0-9]{8}\.tar', name):
if is_val:
print('Train data {} includes in validation tar'.format(name))
exit(-1)
is_train = True
elif re.match(r'ILSVRC2012_val_[0-9]{8}\.JPEG', name):
if is_train:
print('Validation data {} includes in train tar'.format(name))
exit(-1)
is_val = True
else:
print('Invalid member {} includes in tar file'.format(name))
exit(-1)
names.append(name)
if is_train:
if archives['train'] is None:
archives['train'] = (archive, names)
else:
print('Please specify only 1 training tar archive.')
exit(-1)
if is_val:
if archives['val'] is None:
archives['val'] = (archive, names)
else:
print('Please specify only 1 validation tar archive.')
exit(-1)
devkit = tarfile.open(args.devkit)
validation_ground_truth = []
synsets_id = {}
synsets_id_name = {}
synsets_id_word = {}
m = devkit.extractfile('ILSVRC2012_devkit_t12/data/meta.mat')
meta = scipy.io.loadmat(m)
for item in meta['synsets']:
sid = item[0][0][0][0]
sname = item[0][1][0]
sword = item[0][2][0]
synsets_id[sname] = sid
synsets_id_name[sid] = sname
synsets_id_word[sid] = sword
m.close()
g = devkit.extractfile(
'ILSVRC2012_devkit_t12/data/ILSVRC2012_validation_ground_truth.txt')
for l in g.readlines():
validation_ground_truth.append(int(l.rstrip()))
g.close()
devkit.close()
############################################################################
# Prepare logging
tmpdir = tempfile.mkdtemp()
logfilename = os.path.join(tmpdir, 'nnabla.log')
# Temporarily chdir to tmpdir just before importing nnabla to reflect nnabla.conf.
cwd = os.getcwd()
os.chdir(tmpdir)
with open('nnabla.conf', 'w') as f:
f.write('[LOG]\n')
f.write('log_file_name = {}\n'.format(logfilename))
f.write('log_file_format = %(funcName)s : %(message)s\n')
f.write('log_console_level = CRITICAL\n')
from nnabla.config import nnabla_config
os.chdir(cwd)
############################################################################
# Data iterator setting
nnabla_config.set('DATA_ITERATOR',
'cache_file_format', '.' + args.cache_type)
nnabla_config.set('DATA_ITERATOR',
'data_source_file_cache_size', str(args.file_cache_size))
nnabla_config.set('DATA_ITERATOR',
'data_source_file_cache_num_of_threads', '1')
if not os.path.isdir(args.output):
os.makedirs(args.output)
############################################################################
# Prepare status monitor
from nnabla.utils.progress import configure_progress
configure_progress(None, _progress)
############################################################################
# Converter
names_csv = open(os.path.join(args.output, 'synsets_id_name.csv'), 'w')
words_csv = open(os.path.join(args.output, 'synsets_id_word.csv'), 'w')
for sid in sorted(synsets_id_word.keys()):
names_csv.write('{},{}\n'.format(sid, synsets_id_name[sid]))
words_csv.write('{},{}\n'.format(sid, ','.join(
['"'+x.strip()+'"' for x in synsets_id_word[sid].split(',')])))
names_csv.close()
words_csv.close()
try:
if archives['train'] is not None:
from nnabla.logger import logger
logger.info('StartCreatingCache')
archive, names = archives['train']
output = os.path.join(args.output, 'train')
if not os.path.isdir(output):
os.makedirs(output)
_create_train_cache(archive, output, names, synsets_id, args)
if archives['val'] is not None:
from nnabla.logger import logger
logger.info('StartCreatingCache')
archive, names = archives['val']
output = os.path.join(args.output, 'val')
if not os.path.isdir(output):
os.makedirs(output)
_create_validation_cache(
archive, output, names, validation_ground_truth, args)
except KeyboardInterrupt:
shutil.rmtree(tmpdir, ignore_errors=True)
# Even if CTRL-C is pressed, it does not stop if there is a running
# thread, so it sending a signal to itself.
os.kill(os.getpid(), 9)
############################################################################
# Finish
_finish = True
shutil.rmtree(tmpdir, ignore_errors=True)
if __name__ == '__main__':
import warnings
with warnings.catch_warnings():
warnings.simplefilter('ignore')
main()
|
the-stack_0_23643 | """See README.md"""
import json
import os
import pathlib
import sys
import uuid
DATA_ROOT = pathlib.Path(os.path.abspath(__file__)).parent
assert DATA_ROOT.name == "data", "Please make sure this script is inside the data folder"
TO_EXCLUDE = (
"api/surf/crm/organisations",
"api/products",
)
def save_orchestrator_response(url, jsonresponse, dryrun):
"""Given a URL and JSON response create/update the corresponding mockfile."""
endpoint = url.split("/api/")[1].rstrip("/")
try:
path, identifier = endpoint.rsplit("/", maxsplit=1)
except ValueError:
path, identifier = None, endpoint
if any(char in identifier for char in "?&="):
# Skip urls with query parameters for now (can be normalized if it's needed)
print(f"Unsupported URL parameters: {url}")
return
if any(pattern in url for pattern in TO_EXCLUDE):
print(f"Excluding URL {url}")
return
def get_id(string):
"""Defines how final URL component can be used as identifier"""
try:
parsed = uuid.UUID(string)
return str(parsed)[:8]
except ValueError:
if string.isnumeric():
return string
return None
try:
response = json.loads(jsonresponse)
except json.JSONDecodeError as e:
print(f"Invalid JSON response: {url} ({e})")
return
if (parsed_id := get_id(identifier)) is None:
# URL ends on a word "products" or "organisations"
filename = f"{identifier}.json"
else:
# URL ends on UUID or integer
if "/domain-model/" in url:
filename_prefix = "".join(c for c in response["product"]["tag"].lower() if c.isalpha())
else:
filename_prefix = ""
filename = f"{filename_prefix}-{parsed_id}.json" if filename_prefix else f"{parsed_id}.json"
if not path:
# Store in data/
fpath = DATA_ROOT / filename
print(
f"{endpoint} -> {'update (if changed)' if fpath.is_file() else 'create'} '{filename}' in root directory"
)
else:
# Store in data/<subfolder>/
dpath = DATA_ROOT / path
fpath = dpath / filename
print(
f"{endpoint} -> {'update (if changed)' if fpath.is_file() else 'create'} '{filename}' "
f"in {'new' if not dpath.is_dir() else 'existing'} directory '{path}'"
)
if not dpath.is_dir() and not dryrun:
dpath.mkdir(parents=True)
if not dryrun:
with fpath.open(mode="w") as handle:
json.dump(response, handle, sort_keys=True, indent=4)
def process_har_entries(har_entries, dryrun):
"""Filter and process each successful and unique orchestrator API request"""
# Keep successful API requests
valid_entries = [
entry
for entry in har_entries
if "/api/" in entry["request"]["url"]
and entry["request"]["method"] == "GET"
and 200 <= entry["response"]["status"] < 300
and "application/json" in entry["response"]["content"]["mimeType"]
]
# Filter duplicates
unique_requests = {}
for entry in valid_entries:
url = entry["request"]["url"]
json_response = entry["response"]["content"]["text"]
if url not in unique_requests:
unique_requests[url] = json_response
else:
assert (
unique_requests[url] == json_response
), f"Request for {url=} occurs twice with different JSON response"
print(f"Going to process {len(unique_requests)}/{len(har_entries)} requests")
if dryrun:
print("--> Dryrun mode, not making changes. Provide second parameter 'update' to create/update the mocks")
for url in sorted(unique_requests.keys()):
save_orchestrator_response(url, unique_requests[url], dryrun)
if dryrun:
print("--> Dryrun mode, not making changes. Provide second parameter 'update' to create/update the mocks")
def process_har():
"""Extract entries from a HAR file and process them."""
try:
filename = sys.argv[1]
dryrun = (sys.argv[2] != "update") if len(sys.argv) > 2 else True
with open(filename) as f:
har = json.load(f)
except (IndexError, FileNotFoundError):
print("Please provide a .har file as first parameter")
except json.JSONDecodeError as e:
print(f"{filename} is not valid json:", e)
else:
process_har_entries(har["log"]["entries"], dryrun)
if __name__ == "__main__":
process_har()
|
the-stack_0_23645 | """Implementation of sample attack."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import os
import numpy as np
from scipy.misc import imread
from scipy.misc import imsave
import tensorflow as tf
from tensorflow.contrib.slim.nets import inception
slim = tf.contrib.slim
tf.flags.DEFINE_string(
'master', '', 'The address of the TensorFlow master to use.')
tf.flags.DEFINE_string(
'checkpoint_path', '', 'Path to checkpoint for inception network.')
tf.flags.DEFINE_string(
'input_dir', '', 'Input directory with images.')
tf.flags.DEFINE_string(
'output_dir', '', 'Output directory with images.')
tf.flags.DEFINE_float(
'max_epsilon', 16.0, 'Maximum size of adversarial perturbation.')
tf.flags.DEFINE_integer(
'image_width', 299, 'Width of each input images.')
tf.flags.DEFINE_integer(
'image_height', 299, 'Height of each input images.')
tf.flags.DEFINE_integer(
'batch_size', 16, 'How many images process at one time.')
FLAGS = tf.flags.FLAGS
def load_target_class(input_dir):
"""Loads target classes."""
with tf.gfile.Open(os.path.join(input_dir, 'target_class.csv')) as f:
return {row[0]: int(row[1]) for row in csv.reader(f) if len(row) >= 2}
def load_images(input_dir, batch_shape):
"""Read png images from input directory in batches.
Args:
input_dir: input directory
batch_shape: shape of minibatch array, i.e. [batch_size, height, width, 3]
Yields:
filenames: list file names without path of each image
Lenght of this list could be less than batch_size, in this case only
first few images of the result are elements of the minibatch.
images: array with all images from this batch
"""
images = np.zeros(batch_shape)
filenames = []
idx = 0
batch_size = batch_shape[0]
for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')):
with tf.gfile.Open(filepath) as f:
image = imread(f, mode='RGB').astype(np.float) / 255.0
# Images for inception classifier are normalized to be in [-1, 1] interval.
images[idx, :, :, :] = image * 2.0 - 1.0
filenames.append(os.path.basename(filepath))
idx += 1
if idx == batch_size:
yield filenames, images
filenames = []
images = np.zeros(batch_shape)
idx = 0
if idx > 0:
yield filenames, images
def save_images(images, filenames, output_dir):
"""Saves images to the output directory.
Args:
images: array with minibatch of images
filenames: list of filenames without path
If number of file names in this list less than number of images in
the minibatch then only first len(filenames) images will be saved.
output_dir: directory where to save images
"""
for i, filename in enumerate(filenames):
# Images for inception classifier are normalized to be in [-1, 1] interval,
# so rescale them back to [0, 1].
with tf.gfile.Open(os.path.join(output_dir, filename), 'w') as f:
imsave(f, (images[i, :, :, :] + 1.0) * 0.5, format='png')
def main(_):
# Images for inception classifier are normalized to be in [-1, 1] interval,
# eps is a difference between pixels so it should be in [0, 2] interval.
# Renormalizing epsilon from [0, 255] to [0, 2].
eps = 2.0 * FLAGS.max_epsilon / 255.0
batch_shape = [FLAGS.batch_size, FLAGS.image_height, FLAGS.image_width, 3]
num_classes = 1001
tf.logging.set_verbosity(tf.logging.INFO)
all_images_taget_class = load_target_class(FLAGS.input_dir)
with tf.Graph().as_default():
# Prepare graph
x_input = tf.placeholder(tf.float32, shape=batch_shape)
with slim.arg_scope(inception.inception_v3_arg_scope()):
logits, end_points = inception.inception_v3(
x_input, num_classes=num_classes, is_training=False)
target_class_input = tf.placeholder(tf.int32, shape=[FLAGS.batch_size])
one_hot_target_class = tf.one_hot(target_class_input, num_classes)
cross_entropy = tf.losses.softmax_cross_entropy(one_hot_target_class,
logits,
label_smoothing=0.1,
weights=1.0)
cross_entropy += tf.losses.softmax_cross_entropy(one_hot_target_class,
end_points['AuxLogits'],
label_smoothing=0.1,
weights=0.4)
x_adv = x_input - eps/4.0 * tf.sign(tf.gradients(cross_entropy, x_input)[0])
x_adv = tf.clip_by_value(x_adv, -1.0, 1.0)
# Run computation
saver = tf.train.Saver(slim.get_model_variables())
session_creator = tf.train.ChiefSessionCreator(
scaffold=tf.train.Scaffold(saver=saver),
checkpoint_filename_with_path=FLAGS.checkpoint_path,
master=FLAGS.master)
with tf.train.MonitoredSession(session_creator=session_creator) as sess:
for filenames, images in load_images(FLAGS.input_dir, batch_shape):
target_class_for_batch = (
[all_images_taget_class[n] for n in filenames]
+ [0] * (FLAGS.batch_size - len(filenames)))
img = images
for i in range(25):
img = sess.run(x_adv,
feed_dict={
x_input: img,
target_class_input: target_class_for_batch
})
adv_images = img
save_images(adv_images, filenames, FLAGS.output_dir)
if __name__ == '__main__':
tf.app.run()
|
the-stack_0_23646 | import datetime
from djmodels.db import connection, models, transaction
from djmodels.test import TestCase, TransactionTestCase, skipUnlessDBFeature
from .models import (
Award, AwardNote, Book, Child, Contact, Eaten, Email, File, Food, FooFile,
FooFileProxy, FooImage, FooPhoto, House, Image, Item, Location, Login,
OrderedPerson, OrgUnit, Person, Photo, PlayedWith, PlayedWithNote, Policy,
Researcher, Toy, Version,
)
# Can't run this test under SQLite, because you can't
# get two connections to an in-memory database.
@skipUnlessDBFeature('test_db_allows_multiple_connections')
class DeleteLockingTest(TransactionTestCase):
available_apps = ['delete_regress']
def setUp(self):
# Create a second connection to the default database
self.conn2 = connection.copy()
self.conn2.set_autocommit(False)
def tearDown(self):
# Close down the second connection.
self.conn2.rollback()
self.conn2.close()
def test_concurrent_delete(self):
"""Concurrent deletes don't collide and lock the database (#9479)."""
with transaction.atomic():
Book.objects.create(id=1, pagecount=100)
Book.objects.create(id=2, pagecount=200)
Book.objects.create(id=3, pagecount=300)
with transaction.atomic():
# Start a transaction on the main connection.
self.assertEqual(3, Book.objects.count())
# Delete something using another database connection.
with self.conn2.cursor() as cursor2:
cursor2.execute("DELETE from delete_regress_book WHERE id = 1")
self.conn2.commit()
# In the same transaction on the main connection, perform a
# queryset delete that covers the object deleted with the other
# connection. This causes an infinite loop under MySQL InnoDB
# unless we keep track of already deleted objects.
Book.objects.filter(pagecount__lt=250).delete()
self.assertEqual(1, Book.objects.count())
class DeleteCascadeTests(TestCase):
def test_generic_relation_cascade(self):
"""
Django cascades deletes through generic-related objects to their
reverse relations.
"""
person = Person.objects.create(name='Nelson Mandela')
award = Award.objects.create(name='Nobel', content_object=person)
AwardNote.objects.create(note='a peace prize', award=award)
self.assertEqual(AwardNote.objects.count(), 1)
person.delete()
self.assertEqual(Award.objects.count(), 0)
# first two asserts are just sanity checks, this is the kicker:
self.assertEqual(AwardNote.objects.count(), 0)
def test_fk_to_m2m_through(self):
"""
If an M2M relationship has an explicitly-specified through model, and
some other model has an FK to that through model, deletion is cascaded
from one of the participants in the M2M, to the through model, to its
related model.
"""
juan = Child.objects.create(name='Juan')
paints = Toy.objects.create(name='Paints')
played = PlayedWith.objects.create(child=juan, toy=paints, date=datetime.date.today())
PlayedWithNote.objects.create(played=played, note='the next Jackson Pollock')
self.assertEqual(PlayedWithNote.objects.count(), 1)
paints.delete()
self.assertEqual(PlayedWith.objects.count(), 0)
# first two asserts just sanity checks, this is the kicker:
self.assertEqual(PlayedWithNote.objects.count(), 0)
def test_15776(self):
policy = Policy.objects.create(pk=1, policy_number="1234")
version = Version.objects.create(policy=policy)
location = Location.objects.create(version=version)
Item.objects.create(version=version, location=location)
policy.delete()
class DeleteCascadeTransactionTests(TransactionTestCase):
available_apps = ['delete_regress']
def test_inheritance(self):
"""
Auto-created many-to-many through tables referencing a parent model are
correctly found by the delete cascade when a child of that parent is
deleted.
Refs #14896.
"""
r = Researcher.objects.create()
email = Email.objects.create(
label="office-email", email_address="[email protected]"
)
r.contacts.add(email)
email.delete()
def test_to_field(self):
"""
Cascade deletion works with ForeignKey.to_field set to non-PK.
"""
apple = Food.objects.create(name="apple")
Eaten.objects.create(food=apple, meal="lunch")
apple.delete()
self.assertFalse(Food.objects.exists())
self.assertFalse(Eaten.objects.exists())
class LargeDeleteTests(TestCase):
def test_large_deletes(self):
"Regression for #13309 -- if the number of objects > chunk size, deletion still occurs"
for x in range(300):
Book.objects.create(pagecount=x + 100)
# attach a signal to make sure we will not fast-delete
def noop(*args, **kwargs):
pass
models.signals.post_delete.connect(noop, sender=Book)
Book.objects.all().delete()
models.signals.post_delete.disconnect(noop, sender=Book)
self.assertEqual(Book.objects.count(), 0)
class ProxyDeleteTest(TestCase):
"""
Tests on_delete behavior for proxy models.
See #16128.
"""
def create_image(self):
"""Return an Image referenced by both a FooImage and a FooFile."""
# Create an Image
test_image = Image()
test_image.save()
foo_image = FooImage(my_image=test_image)
foo_image.save()
# Get the Image instance as a File
test_file = File.objects.get(pk=test_image.pk)
foo_file = FooFile(my_file=test_file)
foo_file.save()
return test_image
def test_delete_proxy(self):
"""
Deleting the *proxy* instance bubbles through to its non-proxy and
*all* referring objects are deleted.
"""
self.create_image()
Image.objects.all().delete()
# An Image deletion == File deletion
self.assertEqual(len(Image.objects.all()), 0)
self.assertEqual(len(File.objects.all()), 0)
# The Image deletion cascaded and *all* references to it are deleted.
self.assertEqual(len(FooImage.objects.all()), 0)
self.assertEqual(len(FooFile.objects.all()), 0)
def test_delete_proxy_of_proxy(self):
"""
Deleting a proxy-of-proxy instance should bubble through to its proxy
and non-proxy parents, deleting *all* referring objects.
"""
test_image = self.create_image()
# Get the Image as a Photo
test_photo = Photo.objects.get(pk=test_image.pk)
foo_photo = FooPhoto(my_photo=test_photo)
foo_photo.save()
Photo.objects.all().delete()
# A Photo deletion == Image deletion == File deletion
self.assertEqual(len(Photo.objects.all()), 0)
self.assertEqual(len(Image.objects.all()), 0)
self.assertEqual(len(File.objects.all()), 0)
# The Photo deletion should have cascaded and deleted *all*
# references to it.
self.assertEqual(len(FooPhoto.objects.all()), 0)
self.assertEqual(len(FooFile.objects.all()), 0)
self.assertEqual(len(FooImage.objects.all()), 0)
def test_delete_concrete_parent(self):
"""
Deleting an instance of a concrete model should also delete objects
referencing its proxy subclass.
"""
self.create_image()
File.objects.all().delete()
# A File deletion == Image deletion
self.assertEqual(len(File.objects.all()), 0)
self.assertEqual(len(Image.objects.all()), 0)
# The File deletion should have cascaded and deleted *all* references
# to it.
self.assertEqual(len(FooFile.objects.all()), 0)
self.assertEqual(len(FooImage.objects.all()), 0)
def test_delete_proxy_pair(self):
"""
If a pair of proxy models are linked by an FK from one concrete parent
to the other, deleting one proxy model cascade-deletes the other, and
the deletion happens in the right order (not triggering an
IntegrityError on databases unable to defer integrity checks).
Refs #17918.
"""
# Create an Image (proxy of File) and FooFileProxy (proxy of FooFile,
# which has an FK to File)
image = Image.objects.create()
as_file = File.objects.get(pk=image.pk)
FooFileProxy.objects.create(my_file=as_file)
Image.objects.all().delete()
self.assertEqual(len(FooFileProxy.objects.all()), 0)
def test_19187_values(self):
msg = 'Cannot call delete() after .values() or .values_list()'
with self.assertRaisesMessage(TypeError, msg):
Image.objects.values().delete()
with self.assertRaisesMessage(TypeError, msg):
Image.objects.values_list().delete()
class Ticket19102Tests(TestCase):
"""
Test different queries which alter the SELECT clause of the query. We
also must be using a subquery for the deletion (that is, the original
query has a join in it). The deletion should be done as "fast-path"
deletion (that is, just one query for the .delete() call).
Note that .values() is not tested here on purpose. .values().delete()
doesn't work for non fast-path deletes at all.
"""
def setUp(self):
self.o1 = OrgUnit.objects.create(name='o1')
self.o2 = OrgUnit.objects.create(name='o2')
self.l1 = Login.objects.create(description='l1', orgunit=self.o1)
self.l2 = Login.objects.create(description='l2', orgunit=self.o2)
@skipUnlessDBFeature("update_can_self_select")
def test_ticket_19102_annotate(self):
with self.assertNumQueries(1):
Login.objects.order_by('description').filter(
orgunit__name__isnull=False
).annotate(
n=models.Count('description')
).filter(
n=1, pk=self.l1.pk
).delete()
self.assertFalse(Login.objects.filter(pk=self.l1.pk).exists())
self.assertTrue(Login.objects.filter(pk=self.l2.pk).exists())
@skipUnlessDBFeature("update_can_self_select")
def test_ticket_19102_extra(self):
with self.assertNumQueries(1):
Login.objects.order_by('description').filter(
orgunit__name__isnull=False
).extra(
select={'extraf': '1'}
).filter(
pk=self.l1.pk
).delete()
self.assertFalse(Login.objects.filter(pk=self.l1.pk).exists())
self.assertTrue(Login.objects.filter(pk=self.l2.pk).exists())
@skipUnlessDBFeature("update_can_self_select")
@skipUnlessDBFeature('can_distinct_on_fields')
def test_ticket_19102_distinct_on(self):
# Both Login objs should have same description so that only the one
# having smaller PK will be deleted.
Login.objects.update(description='description')
with self.assertNumQueries(1):
Login.objects.distinct('description').order_by('pk').filter(
orgunit__name__isnull=False
).delete()
# Assumed that l1 which is created first has smaller PK.
self.assertFalse(Login.objects.filter(pk=self.l1.pk).exists())
self.assertTrue(Login.objects.filter(pk=self.l2.pk).exists())
@skipUnlessDBFeature("update_can_self_select")
def test_ticket_19102_select_related(self):
with self.assertNumQueries(1):
Login.objects.filter(
pk=self.l1.pk
).filter(
orgunit__name__isnull=False
).order_by(
'description'
).select_related('orgunit').delete()
self.assertFalse(Login.objects.filter(pk=self.l1.pk).exists())
self.assertTrue(Login.objects.filter(pk=self.l2.pk).exists())
@skipUnlessDBFeature("update_can_self_select")
def test_ticket_19102_defer(self):
with self.assertNumQueries(1):
Login.objects.filter(
pk=self.l1.pk
).filter(
orgunit__name__isnull=False
).order_by(
'description'
).only('id').delete()
self.assertFalse(Login.objects.filter(pk=self.l1.pk).exists())
self.assertTrue(Login.objects.filter(pk=self.l2.pk).exists())
class DeleteTests(TestCase):
def test_meta_ordered_delete(self):
# When a subquery is performed by deletion code, the subquery must be
# cleared of all ordering. There was a but that caused _meta ordering
# to be used. Refs #19720.
h = House.objects.create(address='Foo')
OrderedPerson.objects.create(name='Jack', lives_in=h)
OrderedPerson.objects.create(name='Bob', lives_in=h)
OrderedPerson.objects.filter(lives_in__address='Foo').delete()
self.assertEqual(OrderedPerson.objects.count(), 0)
def test_foreign_key_delete_nullifies_correct_columns(self):
"""
With a model (Researcher) that has two foreign keys pointing to the
same model (Contact), deleting an instance of the target model
(contact1) nullifies the correct fields of Researcher.
"""
contact1 = Contact.objects.create(label='Contact 1')
contact2 = Contact.objects.create(label='Contact 2')
researcher1 = Researcher.objects.create(
primary_contact=contact1,
secondary_contact=contact2,
)
researcher2 = Researcher.objects.create(
primary_contact=contact2,
secondary_contact=contact1,
)
contact1.delete()
researcher1.refresh_from_db()
researcher2.refresh_from_db()
self.assertIsNone(researcher1.primary_contact)
self.assertEqual(researcher1.secondary_contact, contact2)
self.assertEqual(researcher2.primary_contact, contact2)
self.assertIsNone(researcher2.secondary_contact)
|
the-stack_0_23647 | from envs.rover_lander_1 import rover_lander_1
from envs.rover_lander_2 import rover_lander_2
import argparse
import time
import tensorflow as tf
import numpy as np
import random
import os
parser = argparse.ArgumentParser()
parser.add_argument("--model", help="Path to model to be used by the agent")
parser.add_argument("--fps", help="Frames per second", type=int, default=20)
parser.add_argument("--env", help="Env name")
parser.add_argument("--save-gif", help="Save gif", action='store_true', default=False)
args = parser.parse_args()
model_path = args.model
fps = args.fps
class Agent:
def __init__(self, model_path=None):
self.testing = (model_path == None)
if not self.testing:
self.model = self.load_model(model_path)
def load_model(self, model_path):
return tf.keras.models.load_model(model_path)
def qs(self, state):
if self.testing:
return (random.randint(0, 4) - 1)
state = state.reshape(1, np.prod(state.shape[:]))/255
return np.argmax(self.model.predict(state))
if __name__ == '__main__':
agent = Agent(model_path)
if args.env == 'rover_lander_1':
env = rover_lander_1(save_gif=args.save_gif, filename=os.path.basename(model_path).replace(".h5", ""))
elif args.env == 'rover_lander_2':
env = rover_lander_2(save_gif=args.save_gif, filename=os.path.basename(model_path).replace(".h5", ""))
for i in range(5):
state = env.reset()
while True:
time.sleep(1/fps)
env.render()
action = agent.qs(state)
state, reward, done = env.step(action)
print(action, reward, done)
if done:
break
env.export_gif() |
the-stack_0_23650 | from __future__ import print_function
from __future__ import unicode_literals
from builtins import str
from builtins import range
from django.http import HttpResponse, HttpResponseForbidden, Http404, HttpResponseRedirect, JsonResponse
from django.contrib.admin.views.decorators import user_passes_test
from django.shortcuts import get_object_or_404, render, redirect
from django.core.cache import caches
from django.db.models import Prefetch
from django.conf import settings
from rest_framework.reverse import reverse
from haystack.query import EmptySearchQuerySet, SearchQuerySet
from isisdata.models import *
from isisdata.tasks import *
import datetime
import pytz
import base64
def authority(request, authority_id):
"""
View for individual Authority entries.
"""
authority = Authority.objects.get(id=authority_id)
# Some authority entries are deleted. These should be hidden from public
# view.
if authority.record_status_value == CuratedMixin.INACTIVE or (authority.record_status == Authority.DELETE and not authority.record_status_value):
raise Http404("No such Authority")
# If the user has been redirected from another Authority entry, this should
# be indicated in the view.
redirect_from_id = request.GET.get('redirect_from')
if redirect_from_id:
redirect_from = Authority.objects.get(pk=redirect_from_id)
else:
redirect_from = None
# There are several authority entries that redirect to other entries,
# usually because the former is a duplicate of the latter.
if (authority.record_status == Authority.REDIRECT or authority.record_status_value == CuratedMixin.REDIRECT) and authority.redirect_to is not None:
redirect_kwargs = {'authority_id': authority.redirect_to.id}
base_url = reverse('authority', kwargs=redirect_kwargs)
redirect_url = base_url + '?redirect_from={0}'.format(authority.id)
return HttpResponseRedirect(redirect_url)
if not authority.public:
return HttpResponseForbidden()
show_nr = 3
acrelation_qs = ACRelation.objects.filter(public=True)
related_citations_author = acrelation_qs.filter(authority=authority, type_controlled__in=['AU'], citation__public=True)\
.order_by('-citation__publication_date')[:show_nr]
related_citations_author_count = acrelation_qs.filter(authority=authority, type_controlled__in=['AU'], citation__public=True)\
.values('citation_id').distinct('citation_id')\
.count()
related_citations_editor = acrelation_qs.filter(authority=authority, type_controlled__in=['ED'], citation__public=True)\
.order_by('-citation__publication_date')[:show_nr]
related_citations_editor_count = acrelation_qs.filter(authority=authority, type_controlled__in=['ED'], citation__public=True)\
.values('citation_id').distinct('citation_id')\
.count()
related_citations_advisor = acrelation_qs.filter(authority=authority, type_controlled__in=['AD'], citation__public=True)\
.order_by('-citation__publication_date')[:show_nr]
related_citations_advisor_count = acrelation_qs.filter(authority=authority, type_controlled__in=['AD'], citation__public=True)\
.values('citation_id').distinct('citation_id')\
.count()
related_citations_contributor = acrelation_qs.filter(authority=authority, type_controlled__in=['CO'], citation__public=True)\
.order_by('-citation__publication_date')[:show_nr]
related_citations_contributor_count = acrelation_qs.filter(authority=authority, type_controlled__in=['CO'], citation__public=True)\
.values('citation_id').distinct('citation_id')\
.count()
related_citations_translator = acrelation_qs.filter(authority=authority, type_controlled__in=['TR'], citation__public=True)\
.order_by('-citation__publication_date')[:show_nr]
related_citations_translator_count = acrelation_qs.filter(authority=authority, type_controlled__in=['TR'], citation__public=True)\
.values('citation_id').distinct('citation_id')\
.count()
related_citations_subject = acrelation_qs.filter(authority=authority, type_controlled__in=['SU'], citation__public=True)\
.order_by('-citation__publication_date')[:show_nr]
related_citations_subject_count = acrelation_qs.filter(authority=authority, type_controlled__in=['SU'], citation__public=True)\
.values('citation_id').distinct('citation_id')\
.count()
related_citations_category = acrelation_qs.filter(authority=authority, type_controlled__in=['CA'], citation__public=True)\
.order_by('-citation__publication_date')[:show_nr]
related_citations_category_count = acrelation_qs.filter(authority=authority, type_controlled__in=['CA'], citation__public=True)\
.values('citation_id').distinct('citation_id')\
.count()
related_citations_publisher = acrelation_qs.filter(authority=authority, type_controlled__in=['PU'], citation__public=True)\
.order_by('-citation__publication_date')[:show_nr]
related_citations_publisher_count = acrelation_qs.filter(authority=authority, type_controlled__in=['PU'], citation__public=True)\
.values('citation_id').distinct('citation_id')\
.count()
related_citations_school = acrelation_qs.filter(authority=authority, type_controlled__in=['SC'], citation__public=True)\
.order_by('-citation__publication_date')[:show_nr]
related_citations_school_count = acrelation_qs.filter(authority=authority, type_controlled__in=['SC'], citation__public=True)\
.values('citation_id').distinct('citation_id')\
.count()
related_citations_institution = acrelation_qs.filter(authority=authority, type_controlled__in=['IN'], citation__public=True)\
.order_by('-citation__publication_date')[:show_nr]
related_citations_institution_count = acrelation_qs.filter(authority=authority, type_controlled__in=['IN'], citation__public=True)\
.values('citation_id').distinct('citation_id')\
.count()
related_citations_meeting = acrelation_qs.filter(authority=authority, type_controlled__in=['ME'], citation__public=True)\
.order_by('-citation__publication_date')[:show_nr]
related_citations_meeting_count = acrelation_qs.filter(authority=authority, type_controlled__in=['ME'], citation__public=True)\
.values('citation_id').distinct('citation_id')\
.count()
related_citations_periodical = acrelation_qs.filter(authority=authority, type_controlled__in=['PE'], citation__public=True)\
.order_by('-citation__publication_date')[:show_nr]
related_citations_periodical_count = acrelation_qs.filter(authority=authority, type_controlled__in=['PE'], citation__public=True)\
.values('citation_id').distinct('citation_id')\
.count()
related_citations_book_series = acrelation_qs.filter(authority=authority, type_controlled__in=['BS'], citation__public=True)\
.order_by('-citation__publication_date')[:show_nr]
related_citations_book_series_count = acrelation_qs.filter(authority=authority, type_controlled__in=['BS'], citation__public=True)\
.values('citation_id').distinct('citation_id')\
.count()
# Location of authority in REST API
api_view = reverse('authority-detail', args=[authority.id], request=request)
# boxes
sqs =SearchQuerySet().models(Citation).facet('all_contributor_ids', size=100). \
facet('subject_ids', size=100).facet('institution_ids', size=100). \
facet('geographic_ids', size=100).facet('time_period_ids', size=100).\
facet('category_ids', size=100).facet('other_person_ids', size=100).\
facet('publisher_ids', size=100).facet('periodical_ids', size=100).\
facet('concepts_by_subject_ids', size=100).facet('people_by_subject_ids', size=100).\
facet('institutions_by_subject_ids', size=100).facet('dataset_typed_names', size=100).\
facet('events_timeperiods_ids', size=100)
word_cloud_results = sqs.all().exclude(public="false").filter_or(author_ids=authority_id).filter_or(contributor_ids=authority_id) \
.filter_or(editor_ids=authority_id).filter_or(subject_ids=authority_id).filter_or(institution_ids=authority_id) \
.filter_or(category_ids=authority_id).filter_or(advisor_ids=authority_id).filter_or(translator_ids=authority_id) \
.filter_or(publisher_ids=authority_id).filter_or(school_ids=authority_id).filter_or(meeting_ids=authority_id) \
.filter_or(periodical_ids=authority_id).filter_or(book_series_ids=authority_id).filter_or(time_period_ids=authority_id) \
.filter_or(geographic_ids=authority_id).filter_or(about_person_ids=authority_id).filter_or(other_person_ids=authority_id)
related_citations_count = word_cloud_results.count()
author_contributor_count = sqs.all().exclude(public="false").filter_or(author_ids=authority_id).filter_or(contributor_ids=authority_id) \
.filter_or(editor_ids=authority_id).filter_or(advisor_ids=authority_id).filter_or(translator_ids=authority_id).count()
publisher_count = sqs.all().exclude(public="false").filter_or(publisher_ids=authority_id).filter_or(periodical_ids=authority_id).count()
subject_category_count = sqs.all().exclude(public="false").filter_or(subject_ids=authority_id).filter_or(category_ids=authority_id).count()
# the following count was used before, but it seems to be off
#related_citations_count = acrelation_qs.filter(authority=authority, public=True, citation__public=True)\
# .values('citation_id').distinct('citation_id')\
# .count()
subject_ids_facet = word_cloud_results.facet_counts()['fields']['subject_ids'] if 'fields' in word_cloud_results.facet_counts() else []
related_contributors_facet = word_cloud_results.facet_counts()['fields']['all_contributor_ids'] if 'fields' in word_cloud_results.facet_counts() else []
related_institutions_facet = word_cloud_results.facet_counts()['fields']['institution_ids'] if 'fields' in word_cloud_results.facet_counts() else []
related_geographics_facet = word_cloud_results.facet_counts()['fields']['geographic_ids'] if 'fields' in word_cloud_results.facet_counts() else []
related_timeperiod_facet = word_cloud_results.facet_counts()['fields']['events_timeperiods_ids'] if 'fields' in word_cloud_results.facet_counts() else []
related_categories_facet = word_cloud_results.facet_counts()['fields']['category_ids'] if 'fields' in word_cloud_results.facet_counts() else []
related_other_person_facet = word_cloud_results.facet_counts()['fields']['other_person_ids'] if 'fields' in word_cloud_results.facet_counts() else []
related_publisher_facet = word_cloud_results.facet_counts()['fields']['publisher_ids'] if 'fields' in word_cloud_results.facet_counts() else []
related_journal_facet = word_cloud_results.facet_counts()['fields']['periodical_ids'] if 'fields' in word_cloud_results.facet_counts() else []
related_subject_concepts_facet = word_cloud_results.facet_counts()['fields']['concepts_by_subject_ids'] if 'fields' in word_cloud_results.facet_counts() else []
related_subject_people_facet = word_cloud_results.facet_counts()['fields']['people_by_subject_ids'] if 'fields' in word_cloud_results.facet_counts() else []
related_subject_institutions_facet = word_cloud_results.facet_counts()['fields']['institutions_by_subject_ids'] if 'fields' in word_cloud_results.facet_counts() else []
related_dataset_facet = word_cloud_results.facet_counts()['fields']['dataset_typed_names'] if 'fields' in word_cloud_results.facet_counts() else []
# Provide progression through search results, if present.
last_query = request.GET.get('last_query', None) #request.session.get('last_query', None)
query_string = request.GET.get('query_string', None)
fromsearch = request.GET.get('fromsearch', False)
if query_string:
query_string = query_string.encode('ascii','ignore')
search_key = base64.b64encode(query_string)
else:
search_key = None
# This is the database cache.
user_cache = caches['default']
search_results = user_cache.get('search_results_authority_' + str(search_key))
# make sure we have a session key
if hasattr(request, 'session') and not request.session.session_key:
request.session.save()
request.session.modified = True
session_id = request.session.session_key
page_authority = user_cache.get(session_id + '_page_authority', None)
if search_results and fromsearch and page_authority:
search_count = search_results.count()
prev_search_result = None
if (page_authority > 1):
prev_search_result = search_results[(page_authority - 1)*20 - 1]
# if we got to the last result of the previous page we need to count down the page number
if prev_search_result == 'isisdata.authority.' + authority_id:
page_authority = page_authority - 1
user_cache.set(session_id + '_page_authority', page_authority)
search_results_page = search_results[(page_authority - 1)*20:page_authority*20 + 2]
try:
search_index = search_results_page.index('isisdata.authority.' + authority_id) + 1 # +1 for display.
if search_index == 21:
user_cache.set(session_id + '_page_authority', page_authority+1)
except (IndexError, ValueError):
search_index = None
try:
search_next = search_results_page[search_index]
except (IndexError, ValueError, TypeError):
search_next = None
try:
search_previous = search_results_page[search_index - 2]
if search_index - 2 == -1:
search_previous = prev_search_result
# !! Why are we catching all of these errors?
except (IndexError, ValueError, AssertionError, TypeError):
search_previous = None
if search_index:
search_current = search_index + (20* (page_authority - 1))
else:
search_current = None
else:
search_index = None
search_next = None
search_previous = None
search_current = None
search_count = None
context = {
'authority_id': authority_id,
'authority': authority,
'related_citations_count': related_citations_count,
'related_citations_author': related_citations_author,
'related_citations_author_count': related_citations_author_count,
'related_citations_editor': related_citations_editor,
'related_citations_editor_count': related_citations_editor_count,
'related_citations_advisor': related_citations_advisor,
'related_citations_advisor_count': related_citations_advisor_count,
'related_citations_contributor': related_citations_contributor,
'related_citations_contributor_count': related_citations_contributor_count,
'related_citations_translator': related_citations_translator,
'related_citations_translator_count': related_citations_translator_count,
'related_citations_subject': related_citations_subject,
'related_citations_subject_count': related_citations_subject_count,
'related_citations_category': related_citations_category,
'related_citations_category_count': related_citations_category_count,
'related_citations_publisher': related_citations_publisher,
'related_citations_publisher_count': related_citations_publisher_count,
'related_citations_school': related_citations_school,
'related_citations_school_count': related_citations_school_count,
'related_citations_institution': related_citations_institution,
'related_citations_institution_count': related_citations_institution_count,
'related_citations_meeting': related_citations_meeting,
'related_citations_meeting_count': related_citations_meeting_count,
'related_citations_periodical': related_citations_periodical,
'related_citations_periodical_count': related_citations_periodical_count,
'related_citations_book_series': related_citations_book_series,
'related_citations_book_series_count': related_citations_book_series_count,
'author_contributor_count': author_contributor_count,
'publisher_count': publisher_count,
'source_instance_id': authority_id,
'subject_category_count': subject_category_count,
'source_content_type': ContentType.objects.get(model='authority').id,
'api_view': api_view,
'redirect_from': redirect_from,
'search_results': search_results,
'search_index': search_index,
'search_next': search_next,
'search_previous': search_previous,
'search_current': search_current,
'search_count': search_count,
'fromsearch': fromsearch,
'last_query': last_query,
'query_string': query_string,
'subject_ids_facet': subject_ids_facet,
'related_contributors_facet': related_contributors_facet,
'related_institutions_facet': related_institutions_facet,
'related_geographics_facet': related_geographics_facet,
'related_timeperiod_facet': related_timeperiod_facet,
'related_categories_facet': related_categories_facet,
'related_other_person_facet': related_other_person_facet,
'related_publisher_facet': related_publisher_facet,
'related_journal_facet': related_journal_facet,
'related_subject_concepts_facet': related_subject_concepts_facet,
'related_subject_people_facet': related_subject_people_facet,
'related_subject_institutions_facet': related_subject_institutions_facet,
'url_linked_data_name': settings.URL_LINKED_DATA_NAME,
'related_dataset_facet': related_dataset_facet,
}
return render(request, 'isisdata/authority.html', context)
def authority_author_timeline(request, authority_id):
now = datetime.datetime.now()
cached_timelines = CachedTimeline.objects.filter(authority_id=authority_id).order_by('-created_at')
cached_timeline = cached_timelines[0] if cached_timelines else None
timeline_to_display = cached_timeline
# let's show an old one if there is one and current calculation hasn't completed yet
if cached_timeline and not cached_timeline.complete:
if len(cached_timelines) > 1:
timeline_to_display = cached_timelines[1]
refresh_time = settings.AUTHORITY_TIMELINE_REFRESH_TIME
data = {}
# FIXME: there seems to be a bug here. for some reason sometimes this is not true when it should
timeline_is_outdated = cached_timeline and ((cached_timeline.created_at + datetime.timedelta(hours=refresh_time) < datetime.datetime.now(tz=pytz.utc)) or cached_timeline.recalculate)
if not cached_timeline or timeline_is_outdated:
print("Refreshing timeline for " + authority_id)
timeline = CachedTimeline()
timeline.authority_id = authority_id
timeline.save()
create_timeline.apply_async(args=[authority_id, timeline.id], queue=settings.CELERY_GRAPH_TASK_QUEUE, routing_key='graph.#')
data.update({
'status': 'generating',
})
if timeline_to_display:
if timeline_to_display.complete:
year_map = { str(year.year) : year for year in timeline_to_display.years.all()}
years = [year for year in range(1970, now.year+1)]
book_count = []
thesis_count = []
chapter_count = []
article_count = []
review_count = []
other_count = []
now = datetime.datetime.now()
titles = {}
# including the current year
for running_year in range(1970, now.year+1):
running_year_str = str(running_year)
if running_year_str in year_map:
year = year_map[running_year_str]
book_count.append(year.book_count)
thesis_count.append(year.thesis_count)
chapter_count.append(year.chapter_count)
article_count.append(year.article_count)
review_count.append(year.review_count)
other_count.append(year.other_count)
titles.update({
running_year_str: {
'books': [title.title for title in year.titles.filter(citation_type=Citation.BOOK)],
'theses': [title.title for title in year.titles.filter(citation_type=Citation.THESIS)],
'chapters': [title.title for title in year.titles.filter(citation_type=Citation.CHAPTER)],
'articles': [title.title for title in year.titles.filter(citation_type=Citation.ARTICLE)],
'reviews': [title.title for title in year.titles.filter(citation_type__in=[Citation.REVIEW, Citation.ESSAY_REVIEW])],
'others': [title.title for title in year.titles.exclude(citation_type__in=[Citation.BOOK, Citation.THESIS, Citation.CHAPTER, Citation.ARTICLE, Citation.REVIEW, Citation.ESSAY_REVIEW])],
}
})
else:
book_count.append(0)
thesis_count.append(0)
chapter_count.append(0)
article_count.append(0)
review_count.append(0)
other_count.append(0)
titles.update({
running_year_str: {
'books': [],
'theses': [],
'chapters': [],
'articles': [],
'reviews': [],
'others': [],
}
})
user_init_refresh_time = settings.AUTHORITY_TIMELINE_REFRESH_TIME_USER_INIT
can_recalculate = cached_timeline.created_at + datetime.timedelta(hours=user_init_refresh_time) < datetime.datetime.now(tz=pytz.utc)
data.update({
'status': 'done',
'generated_on': timeline_to_display.created_at,
'timeline_recalculation': 'running' if timeline_to_display.recalculate or timeline_is_outdated else 'none',
'can_recalculate': can_recalculate,
'years': years,
'books': book_count,
'theses': thesis_count,
'chapters': chapter_count,
'articles': article_count,
'reviews': review_count,
'others': other_count,
'titles': titles,
})
else:
data.update({
'status': 'generating',
})
return JsonResponse(data)
@user_passes_test(lambda u: u.is_authenticated)
def timeline_recalculate(request, authority_id):
if (request.method == 'POST'):
cached_timeline = CachedTimeline.objects.filter(authority_id=authority_id).order_by('-created_at').first()
refresh_time = settings.AUTHORITY_TIMELINE_REFRESH_TIME_USER_INIT
if cached_timeline and cached_timeline.created_at + datetime.timedelta(hours=refresh_time) < datetime.datetime.now(tz=pytz.utc):
cached_timeline.recalculate = True
cached_timeline.save()
return HttpResponseRedirect(reverse('authority', args=[authority_id]))
|
the-stack_0_23653 | import torch
from rkn_cell.RKNCell import RKNCell
nn = torch.nn
class RKNLayer(nn.Module):
def __init__(self, latent_obs_dim, cell_config, dtype=torch.float32):
super().__init__()
self._lod = latent_obs_dim
self._lsd = 2 * latent_obs_dim
self._cell = RKNCell(latent_obs_dim, cell_config, dtype)
def forward(self, latent_obs, obs_vars, initial_mean, initial_cov, obs_valid=None):
"""
This currently only returns the posteriors. If you also need the priors uncomment the corresponding parts
:param latent_obs: latent observations
:param obs_vars: uncertainty estimate in latent observations
:param initial_mean: mean of initial belief
:param initial_cov: covariance of initial belief (as 3 vectors)
:param obs_valid: flags indicating which observations are valid, which are not
"""
# tif you need a version that also returns the prior uncomment the respective parts below
# prepare list for return
#prior_mean_list = []
#prior_cov_list = [[], [], []]
post_mean_list = []
post_cov_list = [[], [], []]
# initialize prior
prior_mean, prior_cov = initial_mean, initial_cov
# actual computation
for i in range(latent_obs.shape[1]):
cur_obs_valid = obs_valid[:, i] if obs_valid is not None else None
post_mean, post_cov, next_prior_mean, next_prior_cov = \
self._cell(prior_mean, prior_cov, latent_obs[:, i], obs_vars[:, i], cur_obs_valid)
post_mean_list.append(post_mean)
[post_cov_list[i].append(post_cov[i]) for i in range(3)]
#prior_mean_list.append(next_prior_mean)
#[prior_cov_list[i].append(next_prior_cov[i]) for i in range(3)]
prior_mean = next_prior_mean
prior_cov = next_prior_cov
# stack results
#prior_means = torch.stack(prior_mean_list, 1)
#prior_covs = [torch.stack(x, 1) for x in prior_cov_list]
post_means = torch.stack(post_mean_list, 1)
post_covs = [torch.stack(x, 1) for x in post_cov_list]
return post_means, post_covs #, prior_means, prior_covs
|
the-stack_0_23655 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddleseg.cvlibs import manager, param_init
from paddleseg.models import layers
from paddleseg.utils import utils
@manager.MODELS.add_component
class SegNet(nn.Layer):
"""
The SegNet implementation based on PaddlePaddle.
The original article refers to
Badrinarayanan, Vijay, et al. "SegNet: A Deep Convolutional Encoder-Decoder Architecture for Image Segmentation"
(https://arxiv.org/pdf/1511.00561.pdf).
Args:
num_classes (int): The unique number of target classes.
"""
def __init__(self, num_classes, pretrained=None):
super().__init__()
# Encoder Module
self.enco1 = nn.Sequential(
layers.ConvBNReLU(
3, 64, 3, padding=1),
layers.ConvBNReLU(
64, 64, 3, padding=1))
self.enco2 = nn.Sequential(
layers.ConvBNReLU(
64, 128, 3, padding=1),
layers.ConvBNReLU(
128, 128, 3, padding=1))
self.enco3 = nn.Sequential(
layers.ConvBNReLU(
128, 256, 3, padding=1),
layers.ConvBNReLU(
256, 256, 3, padding=1),
layers.ConvBNReLU(
256, 256, 3, padding=1))
self.enco4 = nn.Sequential(
layers.ConvBNReLU(
256, 512, 3, padding=1),
layers.ConvBNReLU(
512, 512, 3, padding=1),
layers.ConvBNReLU(
512, 512, 3, padding=1))
self.enco5 = nn.Sequential(
layers.ConvBNReLU(
512, 512, 3, padding=1),
layers.ConvBNReLU(
512, 512, 3, padding=1),
layers.ConvBNReLU(
512, 512, 3, padding=1))
# Decoder Module
self.deco1 = nn.Sequential(
layers.ConvBNReLU(
512, 512, 3, padding=1),
layers.ConvBNReLU(
512, 512, 3, padding=1),
layers.ConvBNReLU(
512, 512, 3, padding=1))
self.deco2 = nn.Sequential(
layers.ConvBNReLU(
512, 512, 3, padding=1),
layers.ConvBNReLU(
512, 512, 3, padding=1),
layers.ConvBNReLU(
512, 256, 3, padding=1))
self.deco3 = nn.Sequential(
layers.ConvBNReLU(
256, 256, 3, padding=1),
layers.ConvBNReLU(
256, 256, 3, padding=1),
layers.ConvBNReLU(
256, 128, 3, padding=1))
self.deco4 = nn.Sequential(
layers.ConvBNReLU(
128, 128, 3, padding=1),
layers.ConvBNReLU(
128, 128, 3, padding=1),
layers.ConvBNReLU(
128, 64, 3, padding=1))
self.deco5 = nn.Sequential(
layers.ConvBNReLU(
64, 64, 3, padding=1),
nn.Conv2D(
64, num_classes, kernel_size=3, padding=1), )
self.pretrained = pretrained
self.init_weight()
def init_weight(self):
if self.pretrained is not None:
utils.load_entire_model(self, self.pretrained)
def forward(self, x):
logit_list = []
x = self.enco1(x)
x, ind1 = F.max_pool2d(x, kernel_size=2, stride=2, return_mask=True)
size1 = x.shape
x = self.enco2(x)
x, ind2 = F.max_pool2d(x, kernel_size=2, stride=2, return_mask=True)
size2 = x.shape
x = self.enco3(x)
x, ind3 = F.max_pool2d(x, kernel_size=2, stride=2, return_mask=True)
size3 = x.shape
x = self.enco4(x)
x, ind4 = F.max_pool2d(x, kernel_size=2, stride=2, return_mask=True)
size4 = x.shape
x = self.enco5(x)
x, ind5 = F.max_pool2d(x, kernel_size=2, stride=2, return_mask=True)
size5 = x.shape
x = F.max_unpool2d(
x, indices=ind5, kernel_size=2, stride=2, output_size=size4)
x = self.deco1(x)
x = F.max_unpool2d(
x, indices=ind4, kernel_size=2, stride=2, output_size=size3)
x = self.deco2(x)
x = F.max_unpool2d(
x, indices=ind3, kernel_size=2, stride=2, output_size=size2)
x = self.deco3(x)
x = F.max_unpool2d(
x, indices=ind2, kernel_size=2, stride=2, output_size=size1)
x = self.deco4(x)
x = F.max_unpool2d(x, indices=ind1, kernel_size=2, stride=2)
x = self.deco5(x)
logit_list.append(x)
return logit_list
|
the-stack_0_23656 | """
Copyright (C) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from argparse import Namespace
import numpy as np
from extensions.front.onnx.dequantize_linear_resolver import DequantizeLinearResolver
from mo.utils.ir_engine.compare_graphs import compare_graphs
from mo.utils.unittest.graph import build_graph
nodes1_attributes = {
'input': {'kind': 'op', 'op': 'AnyOp'},
'dequantize': {'kind': 'op', 'op': 'DequantizeLinear'},
'scale_param_dq': {'kind': 'op', 'type': 'Const', 'op': 'Const'},
'zerop_param_dq': {'kind': 'op', 'type': 'Const', 'op': 'Const'},
'out': {'kind': 'op', 'op': 'AnyOp'},
}
nodes_ref_attributes = {
'input': {'kind': 'op', 'op': 'AnyOp'},
'cast': {'kind': 'op', 'op': 'Cast', 'type': 'Convert'},
'sub': {'kind': 'op', 'op': 'Sub', 'type': 'Subtract'},
'mul': {'kind': 'op', 'op': 'Mul', 'type': 'Multiply'},
'scale_param_dq': {'kind': 'op', 'type': 'Const', 'op': 'Const'},
'zerop_param_dq': {'kind': 'op', 'type': 'Const', 'op': 'Const'},
'out': {'kind': 'op', 'op': 'AnyOp'},
}
class TestDequantizeLinearResolver(unittest.TestCase):
def test_dequantize(self):
graph = build_graph(nodes1_attributes,
[('input', 'dequantize'),
('scale_param_dq', 'dequantize'),
('zerop_param_dq', 'dequantize'),
('dequantize', 'out'),
],
{'scale_param_dq': {'shape': np.array([]), 'value': np.float32(1.0 / 255)},
'zerop_param_dq': {'shape': np.array([]), 'value': np.uint8(0)},
}, nodes_with_edges_only=True)
graph.graph['cmd_params'] = Namespace(keep_shape_ops=True, data_type='FP32')
graph_ref = build_graph(nodes_ref_attributes,
[('input', 'cast'),
('cast', 'sub'),
('zerop_param_dq', 'sub'),
('sub', 'mul'),
('scale_param_dq', 'mul'),
('mul', 'out'),
],
{'scale_param_dq': {'shape': np.array([]), 'value': np.float32(1.0 / 255)},
'zerop_param_dq': {'shape': np.array([]), 'value': np.uint8(0)}
}, nodes_with_edges_only=True)
graph.stage = 'front'
DequantizeLinearResolver().find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'out', check_op_attrs=True)
self.assertTrue(flag, resp)
def test_dequantize_no_zerop(self):
graph = build_graph(nodes1_attributes,
[('input', 'dequantize'),
('scale_param_dq', 'dequantize'),
('dequantize', 'out'),
],
{'scale_param_dq': {'shape': np.array([]), 'value': np.float32(1.0 / 255)},
}, nodes_with_edges_only=True)
graph.graph['cmd_params'] = Namespace(keep_shape_ops=True, data_type='FP32')
graph_ref = build_graph(nodes_ref_attributes,
[('input', 'cast'),
('cast', 'mul'),
('scale_param_dq', 'mul'),
('mul', 'out'),
],
{'scale_param_dq': {'shape': np.array([]), 'value': np.float32(1.0 / 255)}
}, nodes_with_edges_only=True)
graph.stage = 'front'
DequantizeLinearResolver().find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'out', check_op_attrs=True)
self.assertTrue(flag, resp)
|
the-stack_0_23657 | #!/usr/bin/env python3
import json
import sys
import os.path
import fnmatch
def add_ricardian_contracts_to_actions(source_abi_directory, contract_name, abi_actions):
abi_actions_with_ricardian_contracts = []
for abi_action in abi_actions:
action_name = abi_action["name"]
contract_action_filename = '{contract_name}-{action_name}-rc.md'.format(contract_name = contract_name, action_name = action_name)
# check for rc file
rc_contract_path = os.path.join(source_abi_directory, contract_action_filename)
if os.path.exists(rc_contract_path):
print('Importing Contract {contract_action_filename} for {contract_name}:{action_name}'.format(
contract_action_filename = contract_action_filename,
contract_name = contract_name,
action_name = action_name
))
with open(rc_contract_path) as contract_file_handle:
contract_contents = contract_file_handle.read()
abi_action['ricardian_contract'] = contract_contents
else:
print('Did not find recardian contract file {contract_action_filename} for {contract_name}:{action_name}, skipping inclusion'.format(
contract_action_filename = contract_action_filename,
contract_name = contract_name,
action_name = action_name
))
abi_actions_with_ricardian_contracts.append(abi_action)
return abi_actions_with_ricardian_contracts
def create_ricardian_clauses_list(source_abi_directory, contract_name):
clause_file_pattern = '*-clause*-rc.md'
clause_files = fnmatch.filter(os.listdir(source_abi_directory), clause_file_pattern)
clause_prefix = 'clause-'
clause_postfix = '-rc.md'
abi_ricardian_clauses = []
for clause_file_name in clause_files:
rc_contract_path = os.path.join(source_abi_directory, clause_file_name)
with open(rc_contract_path) as contract_file_handle:
contract_contents = contract_file_handle.read()
start_of_clause_id = clause_file_name.index( clause_prefix ) + len( clause_prefix )
end_of_clause_id = clause_file_name.rindex(clause_postfix, start_of_clause_id)
clause_id = clause_file_name[start_of_clause_id:end_of_clause_id]
abi_ricardian_clauses.append({
'id': clause_id,
'body': contract_contents
})
return abi_ricardian_clauses
def add_ricardian_contracts_to_abi(source_abi, output_abi):
source_abi_directory = os.path.dirname(source_abi)
contract_name = os.path.split(source_abi)[1].rpartition(".")[0]
print('Creating {output_abi} with ricardian contracts included'.format(output_abi = output_abi))
with open(source_abi, 'r') as source_abi_file:
source_abi_json = json.load(source_abi_file)
source_abi_json['actions'] = add_ricardian_contracts_to_actions(source_abi_directory, contract_name, source_abi_json['actions'])
source_abi_json['ricardian_clauses'] = create_ricardian_clauses_list(source_abi_directory, contract_name)
with open(output_abi, 'w') as output_abi_file:
json.dump(source_abi_json, output_abi_file, indent=2)
def import_ricardian_to_abi(source_abi, output_abi):
if not os.path.exists(source_abi):
print('Source ABI not found in {source_abi}'.format(source_abi = source_abi))
sys.exit(0)
if os.path.exists(output_abi):
overwrite_prompt_response = input('Output ABI {output_abi} already exists, do you want to proceed? (y|n): '.format(output_abi = output_abi))
if overwrite_prompt_response == 'y':
print('Overwriting existing output abi')
add_ricardian_contracts_to_abi(source_abi, output_abi)
sys.exit(0)
else:
print('User aborted, not overwriting existing abi')
sys.exit(0)
else:
add_ricardian_contracts_to_abi(source_abi, output_abi)
def write_rc_file(path, filename, content):
output_filename = os.path.join(path, filename)
write_file = True
if os.path.exists(output_filename):
overwrite_prompt_response = input('Output rc {output_filename} already exists, do you want to proceed? (y|n): '.format(output_filename = output_filename))
if overwrite_prompt_response == 'y':
print('Overwriting existing output rc')
elif overwrite_prompt_response == 'n':
print('Skipping overwrite of {output_filename}'.format(output_filename = output_filename))
write_file = False
if write_file:
with open(output_filename, 'w') as text_file:
print(content, file=text_file)
print('Wrote {output_filename}'.format(output_filename = output_filename))
def export_ricardian_from_abi(source_abi):
source_abi_directory = os.path.dirname(source_abi)
contract_name = os.path.split(source_abi)[1].rpartition(".")[0]
if not os.path.exists(source_abi):
print('Source ABI not found in {source_abi}'.format(source_abi = source_abi))
sys.exit(0)
with open(source_abi, 'r') as source_abi_file:
source_abi_json = json.load(source_abi_file)
for abi_action in source_abi_json['actions']:
output_action_rc_file_name = '{contract_name}-{action_name}-rc.md'.format(contract_name = contract_name, action_name = abi_action['name'])
write_rc_file(source_abi_directory, output_action_rc_file_name, abi_action['ricardian_contract'])
for abi_clause in source_abi_json['ricardian_clauses']:
output_clause_rc_file_name = '{contract_name}-clause-{clause_id}-rc.md'.format(contract_name = contract_name, clause_id = abi_clause['id'])
write_rc_file(source_abi_directory, output_clause_rc_file_name, abi_clause['body'])
def main():
if len(sys.argv) == 1:
print('Please specify an operation of export or import: ./ricardseat.py <import|export>')
sys.exit(1)
if sys.argv[1] == 'import':
if len(sys.argv) != 4:
print('Please specify a source and destination abi:')
print('Usage: ./ricardseat.py import /seat/contracts/contract/mycontract.abi /seat/contracts/contract/withricardian-mycontract.abi')
sys.exit(0)
else:
import_ricardian_to_abi(sys.argv[2], sys.argv[3])
sys.exit(0)
elif sys.argv[1] == 'export':
if len(sys.argv) != 3:
print('Please specify a source abi:')
print('Usage: ./ricardseat.py export /seat/contracts/contract/mycontract.abi')
sys.exit(0)
else:
export_ricardian_from_abi(sys.argv[2])
sys.exit(0)
else:
print('Operation not recognized only import and export operations are supported')
if __name__ == '__main__':
main()
|
the-stack_0_23658 | """
A simple Biothings API implementation.
* Process command line arguments to setup the API.
* Add additional applicaion settings like handlers.
* ``port``: the port to start the API on, **default** 8000
* ``debug``: start the API in debug mode, **default** False
* ``address``: the address to start the API on, **default** 0.0.0.0
* ``autoreload``: restart the server when file changes, **default** False
* ``conf``: choose an alternative setting, **default** config
* ``dir``: path to app directory. **default**: current working directory
"""
import logging
import os
import sys
import tornado.httpserver
import tornado.ioloop
import tornado.log
import tornado.web
from biothings import __version__
from biothings.web.applications import BiothingsAPI
from biothings.web.settings import configs
from tornado.options import define, options
logger = logging.getLogger(__name__)
class BiothingsAPILauncher():
"""
Configure a Biothings Web API Server.
There are three parts to it:
* A biothings config module that defines the API handlers.
* Additional Tornado handlers and application settings.
* An asyncio event loop to run the tornado application.
The API can be started with:
* An external event loop by calling get_server()
* A default tornado event loop by calling start()
Unless started externally, debug mode:
* Sets proper logging levels for root logger and es,
* Enables debug mode on tornado except for autoreload,
* Disables integrated tracking and error monitoring.
"""
def __init__(self, config=None):
# About debug mode in tornado:
# https://www.tornadoweb.org/en/stable/guide/running.html \
# #debug-mode-and-automatic-reloading
logging.info("Biothings API %s", __version__)
self.config = configs.load(config)
self.handlers = [] # additional handlers
self.settings = dict(debug=False)
self.host = None
@staticmethod
def use_curl():
"""
Use curl implementation for tornado http clients.
More on https://www.tornadoweb.org/en/stable/httpclient.html
"""
tornado.httpclient.AsyncHTTPClient.configure(
"tornado.curl_httpclient.CurlAsyncHTTPClient")
def update(self, **settings):
"""
Update Tornado application settings. More on:
https://www.tornadoweb.org/en/stable/web.html \
#tornado.web.Application.settings
"""
self.settings.update(settings)
def _configure_logging(self):
root_logger = logging.getLogger()
# self.config.configure_logger(root_logger)
# try:
# if self.LOGGING_FORMAT and logger.hasHandlers():
# for handler in logger.handlers:
# if isinstance(handler.formatter, tornado.log.LogFormatter):
# handler.formatter._fmt = self.LOGGING_FORMAT
# except Exception:
# self.logger.exception('Error configuring logger %s.', logger)
logging.getLogger('urllib3').setLevel(logging.ERROR)
logging.getLogger('elasticsearch').setLevel(logging.WARNING)
if self.settings['debug']:
root_logger.setLevel(logging.DEBUG)
es_tracer = logging.getLogger('elasticsearch.trace')
es_tracer.setLevel(logging.DEBUG)
es_tracer.addHandler(logging.NullHandler())
else:
root_logger.setLevel(logging.INFO)
def get_server(self):
"""
Run API in an external event loop.
"""
webapp = BiothingsAPI.get_app(self.config, self.settings, self.handlers)
server = tornado.httpserver.HTTPServer(webapp, xheaders=True)
return server
def start(self, port=8000):
"""
Run API in the default event loop.
"""
self._configure_logging()
http_server = self.get_server()
http_server.listen(port, self.host)
logger.info(
'Server is running on "%s:%s"...',
self.host or '0.0.0.0', port
)
loop = tornado.ioloop.IOLoop.instance()
loop.start()
define("port", default=8000, help="run on the given port")
define("debug", default=False, help="debug settings like logging preferences")
define("address", default=None, help="host address to listen to, default to all interfaces")
define("autoreload", default=False, help="auto reload the web server when file change detected")
define("conf", default='config', help="specify a config module name to import")
define("dir", default=os.getcwd(), help="path to app directory that includes config.py")
def main(app_handlers=None, app_settings=None, use_curl=False):
""" Start a Biothings API Server
:param app_handlers: additional web handlers to add to the app
:param app_settings: `Tornado application settings dictionary
<http://www.tornadoweb.org/en/stable/web.html#tornado.web.Application.settings>`_
:param use_curl: Overide the default simple_httpclient with curl_httpclient
<https://www.tornadoweb.org/en/stable/httpclient.html>
"""
# TODO this section might very likely have problems
options.parse_command_line()
_path = os.path.abspath(options.dir)
if _path not in sys.path:
sys.path.append(_path)
del _path
app_handlers = app_handlers or []
app_settings = app_settings or {}
launcher = BiothingsAPILauncher(options.conf)
if app_settings:
launcher.settings.update(app_settings)
if app_handlers:
launcher.handlers = app_handlers
if use_curl:
launcher.use_curl()
launcher.host = options.address
launcher.update(debug=options.debug)
launcher.update(autoreload=options.autoreload)
launcher.start(options.port)
if __name__ == '__main__':
main()
# def configure_logger(self, logger):
# '''
# Configure a logger's formatter to use the format defined in this web setting.
# '''
# try:
# if self.LOGGING_FORMAT and logger.hasHandlers():
# for handler in logger.handlers:
# if isinstance(handler.formatter, tornado.log.LogFormatter):
# handler.formatter._fmt = self.LOGGING_FORMAT
# except Exception:
# self.logger.exception('Error configuring logger %s.', logger)
# async def _initialize(self):
# # failures will be logged concisely
# logging.getLogger('elasticsearch.trace').propagate = False
# await self.connections.log_versions()
# # populate source mappings
# for biothing_type in self.ES_INDICES:
# await self.metadata.refresh(biothing_type)
# # resume normal log flow
# logging.getLogger('elasticsearch.trace').propagate = True
# TODO logging config, add reference to es trace.
|
the-stack_0_23660 | #!/usr/bin/env python
# pylint: disable=C0111
import itertools
import json
import os
import re
from pathlib import Path
from typing import AnyStr, Callable, List, Match, Pattern, Sequence, Set, Tuple, Union
# NB: replacement matching groups should be in the \1 format instead of $1
from ltpylib import strings
def convert_to_path(path: Union[Path, str]) -> Path:
if isinstance(path, str):
return Path(path)
return path
def replace_matches_in_file(
file: Union[str, Path],
search_string: str,
replacement: Union[str, Callable[[Match], str]],
quote_replacement: Union[bool, str] = False,
wrap_replacement_in_function: Union[bool, str] = False,
force_replace: bool = False,
flags: Union[int, re.RegexFlag] = 0,
) -> bool:
if isinstance(quote_replacement, str):
quote_replacement = strings.convert_to_bool(quote_replacement)
if isinstance(wrap_replacement_in_function, str):
wrap_replacement_in_function = strings.convert_to_bool(wrap_replacement_in_function)
if isinstance(force_replace, str):
force_replace = strings.convert_to_bool(force_replace)
if isinstance(flags, str):
flags = strings.convert_to_number(flags)
if quote_replacement and isinstance(replacement, str):
replacement = re.escape(replacement)
elif wrap_replacement_in_function and isinstance(replacement, str):
replacement_content = replacement
def replacement_function(match: Match) -> str:
return replacement_content
replacement = replacement_function
content = read_file(file)
content_new = re.sub(search_string, replacement, content, flags=flags)
if content != content_new:
write_file(file, content_new)
return True
elif force_replace and re.search(search_string, content, flags=flags):
write_file(file, content_new)
return True
return False
def replace_strings_in_file(
file: Union[str, Path],
search_string: str,
replacement: str,
count: int = -1,
force_replace: bool = False,
) -> bool:
if isinstance(count, str):
count = strings.convert_to_number(count)
if isinstance(force_replace, str):
force_replace = strings.convert_to_bool(force_replace)
content: str = read_file(file)
content_new = content.replace(search_string, replacement, count)
if content != content_new:
write_file(file, content_new)
return True
elif force_replace and search_string in content:
write_file(file, content_new)
return True
return False
def remove_matching_lines_in_file(
file: Union[str, Path],
search_string: str,
quote_search_string: bool = False,
flags: Union[int, re.RegexFlag] = 0,
) -> bool:
if isinstance(quote_search_string, str):
quote_search_string = strings.convert_to_bool(quote_search_string)
if isinstance(flags, str):
flags = strings.convert_to_number(flags)
if quote_search_string:
search_string = re.escape(search_string)
content: str = read_file(file)
matcher = re.compile(search_string, flags=flags)
has_match = False
new_lines = []
for line in content.splitlines():
if matcher.search(line):
has_match = True
else:
new_lines.append(line)
if has_match:
write_file(file, "\n".join(new_lines))
return True
return False
def chmod_proc(perms: str, file: Union[str, Path]) -> int:
import subprocess
file = convert_to_path(file)
return subprocess.call(["chmod", perms, file.as_posix()])
def read_file(file: Union[str, Path]) -> AnyStr:
file = convert_to_path(file)
with open(file.as_posix(), 'r') as fr:
content = fr.read()
return content
def read_json_file(file: Union[str, Path]) -> Union[dict, list]:
file = convert_to_path(file)
with open(file.as_posix(), 'r') as fr:
loaded_json = json.load(fr)
return loaded_json
def read_file_n_lines(file: Union[str, Path], n_lines: int = -1) -> List[str]:
file = convert_to_path(file)
lines: List[str] = []
with open(file.as_posix()) as fr:
if n_lines < 0:
while True:
line = fr.readline()
if not line:
break
lines.append(line.rstrip('\n'))
else:
for n in range(n_lines):
line = fr.readline()
if not line:
break
lines.append(line.rstrip('\n'))
return lines
def write_file(file: Union[str, Path], contents: AnyStr):
file = convert_to_path(file)
with open(file.as_posix(), 'w') as fw:
fw.write(contents)
def append_file(file: Union[str, Path], contents: AnyStr):
file = convert_to_path(file)
with open(file.as_posix(), 'a') as fw:
fw.write(contents)
def list_files(base_dir: Path, globs: List[str] = ('**/*',)) -> List[Path]:
files: Set[Path] = set()
file: Path = None
for file in list(itertools.chain(*[base_dir.glob(glob) for glob in globs])):
if file.is_file():
files.add(file)
files_list = list(files)
files_list.sort()
return files_list
def list_dirs(base_dir: Path, globs: List[str] = ('**/*',)) -> List[Path]:
dirs: Set[Path] = set()
child_dir: Path = None
for child_dir in list(itertools.chain(*[base_dir.glob(glob) for glob in globs])):
if child_dir.is_dir():
dirs.add(child_dir)
dirs_list = list(dirs)
dirs_list.sort()
return dirs_list
def filter_files_with_matching_line(files: List[Union[str, Path]], regexes: List[Union[str, Pattern]], check_n_lines: int = 1) -> List[Path]:
filtered: List[Path] = []
for file in files:
lines = read_file_n_lines(file, check_n_lines)
has_match = False
for line in lines:
for regex in regexes:
if re.search(regex, line):
has_match = True
break
if has_match:
break
if not has_match:
filtered.append(file)
return filtered
def find_children(
base_dir: Union[Path, str],
break_after_match: bool = False,
max_depth: int = -1,
include_dirs: bool = True,
include_files: bool = True,
match_absolute_path: bool = False,
include_patterns: Sequence[str] = None,
exclude_patterns: Sequence[str] = None,
includes: Sequence[str] = None,
excludes: Sequence[str] = None,
recursion_include_patterns: Sequence[str] = None,
recursion_exclude_patterns: Sequence[str] = None,
recursion_includes: Sequence[str] = None,
recursion_excludes: Sequence[str] = None
) -> List[Path]:
if isinstance(base_dir, str):
top = str(base_dir)
else:
top = base_dir.as_posix()
found_dirs = []
return _find_children(
top,
found_dirs,
1,
break_after_match,
max_depth,
include_dirs,
include_files,
match_absolute_path,
include_patterns,
exclude_patterns,
includes,
excludes,
recursion_include_patterns,
recursion_exclude_patterns,
recursion_includes,
recursion_excludes
)[1]
def _find_children(
top: str,
found_dirs: List[Path],
current_depth: int,
break_after_match: bool,
max_depth: int,
include_dirs: bool,
include_files: bool,
match_absolute_path: bool,
include_patterns: Sequence[str],
exclude_patterns: Sequence[str],
includes: Sequence[str],
excludes: Sequence[str],
recursion_include_patterns: Sequence[str],
recursion_exclude_patterns: Sequence[str],
recursion_includes: Sequence[str],
recursion_excludes: Sequence[str]
) -> Tuple[bool, List[Path]]:
from ltpylib import filters
found_match = False
scandir_it = os.scandir(top)
dirs = []
with scandir_it:
while True:
try:
try:
entry = next(scandir_it)
except StopIteration:
break
except OSError:
return found_match, found_dirs
try:
is_dir = entry.is_dir()
except OSError:
# If is_dir() raises an OSError, consider that the entry is not
# a directory, same behaviour than os.path.isdir().
is_dir = False
if is_dir and not include_dirs:
continue
elif not is_dir and not include_files:
continue
child = entry.name
full_path = os.path.join(top, child)
test_value = child if not match_absolute_path else full_path
include = filters.should_include(
test_value,
include_patterns=include_patterns,
exclude_patterns=exclude_patterns,
includes=includes,
excludes=excludes,
)
if include:
found_match = True
found_dirs.append(Path(full_path))
if break_after_match:
break
if is_dir:
include_child = filters.should_include(
test_value,
include_patterns=recursion_include_patterns,
exclude_patterns=recursion_exclude_patterns,
includes=recursion_includes,
excludes=recursion_excludes,
)
if include_child:
dirs.append(child)
if (max_depth <= -1 or current_depth < max_depth) and (not found_match or not break_after_match):
for dirname in dirs:
_find_children(
os.path.join(top, dirname),
found_dirs,
current_depth + 1,
break_after_match,
max_depth,
include_dirs,
include_files,
match_absolute_path,
include_patterns,
exclude_patterns,
includes,
excludes,
recursion_include_patterns,
recursion_exclude_patterns,
recursion_includes,
recursion_excludes
)
return found_match, found_dirs
def _main():
import sys
result = globals()[sys.argv[1]](*sys.argv[2:])
if result is not None:
print(result)
if __name__ == "__main__":
try:
_main()
except KeyboardInterrupt:
exit(130)
|
the-stack_0_23661 | #!/usr/bin/env python
# encoding: utf-8
# Jérôme Carretero, 2013 (zougloub)
"""
reStructuredText support (experimental)
Example::
def configure(conf):
conf.load('rst')
if not conf.env.RST2HTML:
conf.fatal('The program rst2html is required')
def build(bld):
bld(
features = 'rst',
type = 'rst2html', # rst2html, rst2pdf, ...
source = 'index.rst', # mandatory, the source
deps = 'image.png', # to give additional non-trivial dependencies
)
By default the tool looks for a set of programs in PATH.
The tools are defined in `rst_progs`.
To configure with a special program use::
$ RST2HTML=/path/to/rst2html waf configure
This tool is experimental; don't hesitate to contribute to it.
"""
import re
from waflib import Node, Utils, Task, Errors, Logs
from waflib.TaskGen import feature, before_method
rst_progs = "rst2html rst2xetex rst2latex rst2xml rst2pdf rst2s5 rst2man rst2odt rst2rtf".split()
def parse_rst_node(node, nodes, names, seen):
# TODO add extensibility, to handle custom rst include tags...
if node in seen:
return
seen.append(node)
code = node.read()
re_rst = re.compile(r'^\s*.. ((?P<subst>\|\S+\|) )?(?P<type>include|image|figure):: (?P<file>.*)$', re.M)
for match in re_rst.finditer(code):
ipath = match.group('file')
itype = match.group('type')
Logs.debug("rst: visiting %s: %s" % (itype, ipath))
found = node.parent.find_resource(ipath)
if found:
nodes.append(found)
if itype == 'include':
parse_rst_node(found, nodes, names, seen)
else:
names.append(ipath)
class docutils(Task.Task):
"""
Compile a rst file.
"""
def scan(self):
"""
A recursive regex-based scanner that finds rst dependencies.
"""
nodes = []
names = []
seen = []
node = self.inputs[0]
if not node:
return (nodes, names)
parse_rst_node(node, nodes, names, seen)
Logs.debug("rst: %s: found the following file deps: %s" % (repr(self), nodes))
if names:
Logs.warn("rst: %s: could not find the following file deps: %s" % (repr(self), names))
return (nodes, names)
def check_status(self, msg, retcode):
"""
Check an exit status and raise an error with a particular message
:param msg: message to display if the code is non-zero
:type msg: string
:param retcode: condition
:type retcode: boolean
"""
if retcode != 0:
raise Errors.WafError("%r command exit status %r" % (msg, retcode))
def run(self):
"""
Runs the rst compilation using docutils
"""
raise NotImplementedError()
class rst2html(docutils):
color = 'BLUE'
def __init__(self, *args, **kw):
docutils.__init__(self, *args, **kw)
self.command = self.generator.env.RST2HTML
self.attributes = ['stylesheet']
def scan(self):
nodes, names = docutils.scan(self)
for attribute in self.attributes:
stylesheet = getattr(self.generator, attribute, None)
if stylesheet is not None:
ssnode = self.generator.to_nodes(stylesheet)[0]
nodes.append(ssnode)
Logs.debug("rst: adding dep to %s %s" % (attribute, stylesheet))
return nodes, names
def run(self):
cwdn = self.outputs[0].parent
src = self.inputs[0].path_from(cwdn)
dst = self.outputs[0].path_from(cwdn)
cmd = self.command + [src, dst]
cmd += Utils.to_list(getattr(self.generator, 'options', []))
for attribute in self.attributes:
stylesheet = getattr(self.generator, attribute, None)
if stylesheet is not None:
stylesheet = self.generator.to_nodes(stylesheet)[0]
cmd += ['--%s' % attribute, stylesheet.path_from(cwdn)]
return self.exec_command(cmd, cwd=cwdn.abspath())
class rst2s5(rst2html):
def __init__(self, *args, **kw):
rst2html.__init__(self, *args, **kw)
self.command = self.generator.env.RST2S5
self.attributes = ['stylesheet']
class rst2latex(rst2html):
def __init__(self, *args, **kw):
rst2html.__init__(self, *args, **kw)
self.command = self.generator.env.RST2LATEX
self.attributes = ['stylesheet']
class rst2xetex(rst2html):
def __init__(self, *args, **kw):
rst2html.__init__(self, *args, **kw)
self.command = self.generator.env.RST2XETEX
self.attributes = ['stylesheet']
class rst2pdf(docutils):
color = 'BLUE'
def run(self):
cwdn = self.outputs[0].parent
src = self.inputs[0].path_from(cwdn)
dst = self.outputs[0].path_from(cwdn)
cmd = self.generator.env.RST2PDF + [src, '-o', dst]
cmd += Utils.to_list(getattr(self.generator, 'options', []))
return self.exec_command(cmd, cwd=cwdn.abspath())
@feature('rst')
@before_method('process_source')
def apply_rst(self):
"""
Create :py:class:`rst` or other rst-related task objects
"""
if self.target:
if isinstance(self.target, Node.Node):
tgt = self.target
elif isinstance(self.target, str):
tgt = self.path.get_bld().make_node(self.target)
else:
self.bld.fatal("rst: Don't know how to build target name %s which is not a string or Node for %s" % (self.target, self))
else:
tgt = None
tsk_type = getattr(self, 'type', None)
src = self.to_nodes(self.source)
assert len(src) == 1
src = src[0]
if tsk_type is not None and tgt is None:
if tsk_type.startswith('rst2'):
ext = tsk_type[4:]
else:
self.bld.fatal("rst: Could not detect the output file extension for %s" % self)
tgt = src.change_ext('.%s' % ext)
elif tsk_type is None and tgt is not None:
out = tgt.name
ext = out[out.rfind('.')+1:]
self.type = 'rst2' + ext
elif tsk_type is not None and tgt is not None:
# the user knows what he wants
pass
else:
self.bld.fatal("rst: Need to indicate task type or target name for %s" % self)
deps_lst = []
if getattr(self, 'deps', None):
deps = self.to_list(self.deps)
for filename in deps:
n = self.path.find_resource(filename)
if not n:
self.bld.fatal('Could not find %r for %r' % (filename, self))
if not n in deps_lst:
deps_lst.append(n)
try:
task = self.create_task(self.type, src, tgt)
except KeyError:
self.bld.fatal("rst: Task of type %s not implemented (created by %s)" % (self.type, self))
task.env = self.env
# add the manual dependencies
if deps_lst:
try:
lst = self.bld.node_deps[task.uid()]
for n in deps_lst:
if not n in lst:
lst.append(n)
except KeyError:
self.bld.node_deps[task.uid()] = deps_lst
inst_to = getattr(self, 'install_path', None)
if inst_to:
self.install_task = self.bld.install_files(inst_to, task.outputs[:], env=self.env)
self.source = []
def configure(self):
"""
Try to find the rst programs.
Do not raise any error if they are not found.
You'll have to use additional code in configure() to die
if programs were not found.
"""
for p in rst_progs:
self.find_program(p, mandatory=False)
|
the-stack_0_23666 | #!/usr/bin/env python2
# -*- coding: utf-8-*-
import unittest
import mock
from client import brain, test_mic
DEFAULT_PROFILE = {
'prefers_email': False,
'location': 'Cape Town',
'timezone': 'US/Eastern',
'phone_number': '012344321'
}
class TestBrain(unittest.TestCase):
@staticmethod
def _emptyBrain():
mic = test_mic.Mic([])
profile = DEFAULT_PROFILE
return brain.Brain(mic, profile)
def testLog(self):
"""Does Brain correctly log errors when raised by modules?"""
my_brain = TestBrain._emptyBrain()
unclear = my_brain.modules[-1]
with mock.patch.object(unclear, 'handle') as mocked_handle:
with mock.patch.object(my_brain._logger, 'error') as mocked_log:
mocked_handle.side_effect = KeyError('foo')
my_brain.query("zzz gibberish zzz")
self.assertTrue(mocked_log.called)
|
the-stack_0_23668 | import argparse
import os
import pymaster as nmt
import healpy
import numpy as np
import sys
sys.path.append("../tools/")
from misc_utils import read_partial_map, file_header
def make_maps(nside, e1, e2, w, idx, rotate=False):
n_pix = healpy.nside2npix(nside)
if rotate:
alpha = np.pi*np.random.rand(len(e1))
e = np.sqrt(e1**2 + e2**2)
e1 = np.cos(2.0*alpha)*e
e2 = np.sin(2.0*alpha)*e
e1_map = np.bincount(idx, weights=w*e1, minlength=n_pix)
e2_map = np.bincount(idx, weights=w*e2, minlength=n_pix)
w_map = np.bincount(idx, weights=w, minlength=n_pix)
good_pixel = w_map > 0
e1_map[good_pixel] /= w_map[good_pixel]
e2_map[good_pixel] /= w_map[good_pixel]
return e1_map, e2_map, w_map
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--output-path", required=True)
parser.add_argument("--bin-operator", required=True)
parser.add_argument("--shear-maps", nargs="+")
parser.add_argument("--shear-masks", nargs="+")
parser.add_argument("--shear-auto", action="store_true")
parser.add_argument("--no-cross-shear", action="store_true")
parser.add_argument("--foreground-map")
parser.add_argument("--foreground-mask")
parser.add_argument("--foreground-auto", action="store_true")
parser.add_argument("--foreground-mask-already-applied",
action="store_true")
parser.add_argument("--pymaster-workspace-output-path")
parser.add_argument("--pymaster-workspace-input-path")
parser.add_argument("--compute-covariance", action="store_true")
parser.add_argument("--randomize-shear", action="store_true")
parser.add_argument("--n-iter")
parser.add_argument("--binary-mask", action="store_true")
args = parser.parse_args()
n_iter = 3
if args.n_iter is not None:
n_iter = int(args.n_iter)
print(f"Using n_iter = {n_iter}")
output_path = args.output_path
os.makedirs(output_path, exist_ok=True)
if args.pymaster_workspace_output_path is None:
pymaster_workspace_output_path = output_path
else:
pymaster_workspace_output_path = args.pymaster_workspace_output_path
os.makedirs(pymaster_workspace_output_path, exist_ok=True)
if args.pymaster_workspace_input_path is None:
print("Creating workspaces and computing coupling matrices")
compute_coupling_matrices = True
else:
print("Reading existing workspaces from ",
args.pymaster_workspace_input_path)
compute_coupling_matrices = False
if (args.shear_maps is not None
and len(args.shear_maps) != len(args.shear_masks)):
raise ValueError("Number of shear masks does not match number of "
"shear masks.")
binary_mask = args.binary_mask
if binary_mask:
print("Using binary mask")
is_foreground_auto = args.foreground_auto
is_shear_auto = args.shear_auto
no_cross_shear = args.no_cross_shear
if is_foreground_auto and is_shear_auto:
raise ValueError("Can only compute auto power spectra of either "
"foreground or shear.")
elif is_foreground_auto:
print("Computing foreground auto spectrum")
elif is_shear_auto:
print("Computing shear auto spectra")
if no_cross_shear:
print("Ignoring cross-bin shear correlations")
if args.shear_maps is not None:
shear_fields = []
print("Loading shear maps")
for shear_map_file, mask_file in zip(args.shear_maps,
args.shear_masks):
print(shear_map_file)
shear_mask = healpy.read_map(mask_file, verbose=False)
shear_mask[shear_mask == healpy.UNSEEN] = 0
nside = healpy.get_nside(shear_mask)
shear_data = read_partial_map(shear_map_file,
fields=[2, 3], fill_value=0,
scale=[1, 1])
if args.randomize_shear:
print(" Randomising shear field")
alpha = np.pi*np.random.rand(shear_data[0].size)
e = np.sqrt(shear_data[0]**2 + shear_data[1]**2)
shear_data[0] = np.cos(2.0*alpha)*e
shear_data[1] = np.sin(2.0*alpha)*e
print(" Creating field object")
field_background = nmt.NmtField(shear_mask,
shear_data,
n_iter=n_iter)
shear_fields.append(field_background)
if args.shear_catalogs is not None:
shear_fields = []
print("Loading shear catalogs")
for shear_catalog_file in args.shear_catalogs:
print(shear_catalog_file)
data = np.load(shear_catalog_file)
e1_map, e2_map, w_map = make_maps(nside, -data["e1"], data["e2"],
data["w"], data["pixel_idx"],
rotate=args.randomize_shear)
if binary_mask:
w_map = w_map > 0
print(" Creating field object")
field_background = nmt.NmtField(w_map,
[e1_map, e2_map],
n_iter=n_iter)
shear_fields.append(field_background)
if args.foreground_map is not None:
print("Loading foreground map")
print(args.foreground_map)
if args.foreground_mask_already_applied:
print(" Mask already applied to map")
foreground_mask_already_applied = True
else:
foreground_mask_already_applied = False
foreground_map = healpy.read_map(args.foreground_map, verbose=False)
foreground_map[foreground_map == healpy.UNSEEN] = 0
foreground_mask = healpy.read_map(args.foreground_mask, verbose=False)
foreground_mask[foreground_mask == healpy.UNSEEN] = 0
nside = healpy.get_nside(foreground_map)
print(" Creating field object")
foreground_field = nmt.NmtField(
foreground_mask,
[foreground_map],
masked_on_input=foreground_mask_already_applied,
n_iter=n_iter)
if args.bin_operator.find("delta_ell_") == 0:
delta_ell = int(args.bin_operator[len("delta_ell_"):])
print("Using linear binning with bin width ", delta_ell)
nmt_bins = nmt.NmtBin.from_nside_linear(
nside=nside,
nlb=delta_ell)
else:
print("Using binning operator from file ", args.bin_operator)
binning_operator = np.loadtxt(args.bin_operator)
ell = np.arange(binning_operator.size)
nmt_bins = nmt.NmtBin(nside=nside,
bpws=binning_operator, ells=ell, weights=2*ell+1)
nmt_workspaces = {}
if is_foreground_auto:
fields_A = [foreground_field]
fields_B = [foreground_field]
field_A_tag = "foreground"
field_B_tag = ""
elif is_shear_auto:
fields_A = shear_fields
fields_B = shear_fields
field_A_tag = "shear_{idx}"
field_B_tag = "shear_{idx}"
else:
fields_A = [foreground_field]
fields_B = shear_fields
field_A_tag = "foreground"
field_B_tag = "shear_{idx}"
print("Getting coupling matrices")
for i, field_A in enumerate(fields_A):
tag_A = field_A_tag.format(idx=i)
print(" Field " + tag_A)
for j, field_B in enumerate(fields_B):
if is_shear_auto:
if j > i:
continue
if no_cross_shear and i != j:
continue
tag_B = field_B_tag.format(idx=j)
print(" Field " + tag_B)
file_tag = tag_A + "_" + tag_B if tag_B != "" else tag_A
nmt_workspace = nmt.NmtWorkspace()
if compute_coupling_matrices:
nmt_workspace.compute_coupling_matrix(fl1=field_A,
fl2=field_B,
bins=nmt_bins,
is_teb=False,
n_iter=n_iter)
nmt_workspaces[(i, j)] = nmt_workspace
np.save(
os.path.join(
output_path,
f"pymaster_bandpower_windows_{file_tag}.npy"),
nmt_workspace.get_bandpower_windows())
np.save(
os.path.join(
output_path,
f"pymaster_coupling_matrix_{file_tag}.npy"),
nmt_workspace.get_coupling_matrix())
nmt_workspace.write_to(os.path.join(
pymaster_workspace_output_path,
f"pymaster_workspace_{file_tag}.fits"))
else:
nmt_workspace.read_from(os.path.join(
args.pymaster_workspace_input_path,
f"pymaster_workspace_{file_tag}.fits"))
nmt_workspaces[(i, j)] = nmt_workspace
print("Computing Cls")
Cls_coupled = {}
Cls_decoupled = {}
header_columns = {}
for i, field_A in enumerate(fields_A):
tag_A = field_A_tag.format(idx=i)
print(" Field " + tag_A)
for j, field_B in enumerate(fields_B):
if is_shear_auto:
if j > i:
continue
if no_cross_shear and i != j:
continue
tag_B = field_B_tag.format(idx=j)
print(" Field " + tag_B)
file_tag = tag_A + "_" + tag_B if tag_B != "" else tag_A
header_columns[(i, j)] = "Cl_" + file_tag
Cl_coupled = nmt.compute_coupled_cell(field_A, field_B)
noise_bias = None
Cl_decoupled = nmt_workspaces[(i, j)].decouple_cell(
cl_in=Cl_coupled,
cl_noise=noise_bias)
Cls_coupled[(i, j)] = Cl_coupled
Cls_decoupled[(i, j)] = Cl_decoupled
ell_nmt = nmt_bins.get_effective_ells()
header = "ell, " + ", ".join(header_columns.values())
header = file_header(header_info=header)
if is_foreground_auto:
spectra = [("TT", 0)]
elif is_shear_auto:
spectra = [("EE", 0), ("EB", 1), ("BE", 2), ("BB", 3)]
else:
spectra = [("TE", 0), ("TB", 1)]
for spectrum, spectrum_idx in spectra:
Cl_data = [Cl[spectrum_idx] for Cl in Cls_decoupled.values()]
np.savetxt(os.path.join(output_path, f"Cl_{spectrum}_decoupled.txt"),
np.vstack((ell_nmt, *Cl_data)).T,
header=header
)
Cl_data = [Cl[spectrum_idx] for Cl in Cls_coupled.values()]
ell_coupled = np.arange(Cl_data[0].size)
np.savetxt(os.path.join(output_path, f"Cl_{spectrum}_coupled.txt"),
np.vstack((ell_coupled, *Cl_data)).T,
header=header
)
if args.compute_covariance:
print("Computing coupling matrices for Gaussian covariance")
if not is_foreground_auto and not is_shear_auto:
for i, shear_field_a in enumerate(shear_fields):
for j, shear_field_b in enumerate(shear_fields[:i+1]):
print(f" Field {i}-{j}")
nmt_cov_workspace = nmt.NmtCovarianceWorkspace()
nmt_cov_workspace.compute_coupling_coefficients(
fla1=foreground_field,
fla2=shear_field_a,
flb1=foreground_field,
flb2=shear_field_b)
nmt_cov_workspace.write_to(
os.path.join(
pymaster_workspace_output_path,
f"pymaster_cov_workspace_foreground_shear_{i}"
f"_foreground_shear_{j}.fits"))
elif is_shear_auto:
field_idx = [(i, j) for i in range(len(shear_fields))
for j in range(i+1)]
for i, (idx_a1, idx_a2) in enumerate(field_idx):
print(f" A {idx_a1}-{idx_a2}")
for idx_b1, idx_b2 in field_idx[:i+1]:
print(f" B {idx_b1}-{idx_b2}")
nmt_cov_workspace = nmt.NmtCovarianceWorkspace()
nmt_cov_workspace.compute_coupling_coefficients(
fla1=shear_fields[idx_a1],
fla2=shear_fields[idx_a2],
flb1=shear_fields[idx_b1],
flb2=shear_fields[idx_b2])
nmt_cov_workspace.write_to(
os.path.join(
pymaster_workspace_output_path,
f"pymaster_cov_workspace"
f"_shear_{idx_a1}_shear_{idx_a2}"
f"_shear_{idx_b1}_shear_{idx_b2}.fits"))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.